From f1035bba1ecf18181697a5ab6329c5c6c43a4c3b Mon Sep 17 00:00:00 2001 From: Michael Peterson Date: Tue, 5 Mar 2024 11:41:07 -0500 Subject: [PATCH 001/248] Adjust randomization in ResolveClusterActionResponseTests (#105932) to avoid failures in `testEqualsAndHashcode` tests. Fixes https://github.com/elastic/elasticsearch/issues/105898 --- .../ResolveClusterActionResponseTests.java | 29 +++++++++++++++---- 1 file changed, 23 insertions(+), 6 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/resolve/ResolveClusterActionResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/resolve/ResolveClusterActionResponseTests.java index 322600fdeedff..33d4f0edf3450 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/resolve/ResolveClusterActionResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/resolve/ResolveClusterActionResponseTests.java @@ -20,22 +20,39 @@ public class ResolveClusterActionResponseTests extends AbstractWireSerializingTe @Override protected ResolveClusterActionResponse createTestInstance() { - return new ResolveClusterActionResponse(randomResolveClusterInfoMap()); + return new ResolveClusterActionResponse(randomResolveClusterInfoMap(null)); } - private Map randomResolveClusterInfoMap() { + private ResolveClusterInfo randomResolveClusterInfo(ResolveClusterInfo existing) { + if (existing == null) { + return randomResolveClusterInfo(); + } else { + return randomValueOtherThan(existing, () -> randomResolveClusterInfo()); + } + } + + private ResolveClusterInfo getResolveClusterInfoFromResponse(String key, ResolveClusterActionResponse response) { + if (response == null || response.getResolveClusterInfo() == null) { + return null; + } + return response.getResolveClusterInfo().get(key); + } + + private Map randomResolveClusterInfoMap(ResolveClusterActionResponse existingResponse) { Map infoMap = new HashMap<>(); int numClusters = randomIntBetween(0, 50); if (randomBoolean() || numClusters == 0) { - infoMap.put(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, randomResolveClusterInfo()); + String key = RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY; + infoMap.put(key, randomResolveClusterInfo(getResolveClusterInfoFromResponse(key, existingResponse))); } for (int i = 0; i < numClusters; i++) { - infoMap.put("remote_" + i, randomResolveClusterInfo()); + String key = "remote_" + i; + infoMap.put(key, randomResolveClusterInfo(getResolveClusterInfoFromResponse(key, existingResponse))); } return infoMap; } - private ResolveClusterInfo randomResolveClusterInfo() { + static ResolveClusterInfo randomResolveClusterInfo() { int val = randomIntBetween(1, 3); return switch (val) { case 1 -> new ResolveClusterInfo(false, randomBoolean()); @@ -52,6 +69,6 @@ protected Writeable.Reader instanceReader() { @Override protected ResolveClusterActionResponse mutateInstance(ResolveClusterActionResponse response) { - return new ResolveClusterActionResponse(randomResolveClusterInfoMap()); + return new ResolveClusterActionResponse(randomResolveClusterInfoMap(response)); } } From 85cd2043176ee2cfb8078ad685be2e1156b0c71d Mon Sep 17 00:00:00 2001 From: Craig Taverner Date: Tue, 5 Mar 2024 18:04:09 +0100 Subject: [PATCH 002/248] Fix automatic generation of spatial function types files (#105766) * Fix automatic generation of spatial function types files The automatic mapping of spatial function names from class names was not working for spatial types, so the automatic generation of these files did not happen, and in fact existing files were deleted. In addition, the generation of aggregation functions types does not yet exist at all, so the st_centroid.asciidoc file was always deleted. Until such support exists, this files contents will be moved back into the function definition file. The railroad diagrams for syntax are now also created, however, not all functions in the documentation actually use these, and certainly none of the `TO_*` type-casting functions do, so we'll not include links to them from the docs, and leave that to the docs team to decide. Personally, while these diagrams are pretty, they contain no additional informational content, and in fact give a cluttered impression to the documentation visual appeal. * Refined to use an annotation which is more generic --- .../functions/signature/to_cartesianpoint.svg | 1 + .../functions/signature/to_cartesianshape.svg | 1 + .../esql/functions/signature/to_geopoint.svg | 1 + .../esql/functions/signature/to_geoshape.svg | 1 + .../esql/functions/st_centroid.asciidoc | 7 ++++- .../esql/functions/types/st_centroid.asciidoc | 6 ---- .../function/AbstractFunctionTestCase.java | 13 ++++++-- .../expression/function/FunctionName.java | 30 +++++++++++++++++++ .../scalar/convert/ToCartesianPointTests.java | 2 ++ .../scalar/convert/ToCartesianShapeTests.java | 2 ++ .../scalar/convert/ToGeoPointTests.java | 2 ++ .../scalar/convert/ToGeoShapeTests.java | 2 ++ 12 files changed, 58 insertions(+), 10 deletions(-) create mode 100644 docs/reference/esql/functions/signature/to_cartesianpoint.svg create mode 100644 docs/reference/esql/functions/signature/to_cartesianshape.svg create mode 100644 docs/reference/esql/functions/signature/to_geopoint.svg create mode 100644 docs/reference/esql/functions/signature/to_geoshape.svg delete mode 100644 docs/reference/esql/functions/types/st_centroid.asciidoc create mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/FunctionName.java diff --git a/docs/reference/esql/functions/signature/to_cartesianpoint.svg b/docs/reference/esql/functions/signature/to_cartesianpoint.svg new file mode 100644 index 0000000000000..44484e8321e2f --- /dev/null +++ b/docs/reference/esql/functions/signature/to_cartesianpoint.svg @@ -0,0 +1 @@ +TO_CARTESIANPOINT(v) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/to_cartesianshape.svg b/docs/reference/esql/functions/signature/to_cartesianshape.svg new file mode 100644 index 0000000000000..c16ce9a6c15bc --- /dev/null +++ b/docs/reference/esql/functions/signature/to_cartesianshape.svg @@ -0,0 +1 @@ +TO_CARTESIANSHAPE(v) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/to_geopoint.svg b/docs/reference/esql/functions/signature/to_geopoint.svg new file mode 100644 index 0000000000000..444817aa388b9 --- /dev/null +++ b/docs/reference/esql/functions/signature/to_geopoint.svg @@ -0,0 +1 @@ +TO_GEOPOINT(v) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/to_geoshape.svg b/docs/reference/esql/functions/signature/to_geoshape.svg new file mode 100644 index 0000000000000..91b02332ad806 --- /dev/null +++ b/docs/reference/esql/functions/signature/to_geoshape.svg @@ -0,0 +1 @@ +TO_GEOSHAPE(v) \ No newline at end of file diff --git a/docs/reference/esql/functions/st_centroid.asciidoc b/docs/reference/esql/functions/st_centroid.asciidoc index abed1e71eab8f..cee0c85d5cb45 100644 --- a/docs/reference/esql/functions/st_centroid.asciidoc +++ b/docs/reference/esql/functions/st_centroid.asciidoc @@ -15,4 +15,9 @@ include::{esql-specs}/spatial.csv-spec[tag=st_centroid-airports-result] Supported types: -include::types/st_centroid.asciidoc[] +[%header.monospaced.styled,format=dsv,separator=|] +|=== +v | result +geo_point | geo_point +cartesian_point | cartesian_point +|=== diff --git a/docs/reference/esql/functions/types/st_centroid.asciidoc b/docs/reference/esql/functions/types/st_centroid.asciidoc deleted file mode 100644 index cbafb9d0fa6dc..0000000000000 --- a/docs/reference/esql/functions/types/st_centroid.asciidoc +++ /dev/null @@ -1,6 +0,0 @@ -[%header.monospaced.styled,format=dsv,separator=|] -|=== -v | result -geo_point | geo_point -cartesian_point | cartesian_point -|=== diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java index dded86fdd8aee..612861b2889a4 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java @@ -1088,7 +1088,7 @@ public static void renderTypesTable() throws IOException { renderTypesTable(EsqlFunctionRegistry.description(definition).argNames()); return; } - LogManager.getLogger(getTestClass()).info("Skipping rendering types because the function isn't registered"); + LogManager.getLogger(getTestClass()).info("Skipping rendering types because the function '" + name + "' isn't registered"); } private static void renderTypesTable(List argNames) throws IOException { @@ -1116,12 +1116,18 @@ private static void renderTypesTable(List argNames) throws IOException { [%header.monospaced.styled,format=dsv,separator=|] |=== """ + header + "\n" + table.stream().collect(Collectors.joining("\n")) + "\n|===\n"; - LogManager.getLogger(getTestClass()).info("Writing function types:\n{}", rendered); + LogManager.getLogger(getTestClass()).info("Writing function types for [{}]:\n{}", functionName(), rendered); writeToTempDir("types", rendered, "asciidoc"); } private static String functionName() { - return StringUtils.camelCaseToUnderscore(getTestClass().getSimpleName().replace("Tests", "")).toLowerCase(Locale.ROOT); + Class testClass = getTestClass(); + if (testClass.isAnnotationPresent(FunctionName.class)) { + FunctionName functionNameAnnotation = testClass.getAnnotation(FunctionName.class); + return functionNameAnnotation.value(); + } else { + return StringUtils.camelCaseToUnderscore(testClass.getSimpleName().replace("Tests", "")).toLowerCase(Locale.ROOT); + } } private static FunctionDefinition definition(String name) { @@ -1178,6 +1184,7 @@ private static void writeToTempDir(String subdir, String str, String extension) Files.createDirectories(dir); Path file = dir.resolve(functionName() + "." + extension); Files.writeString(file, str); + LogManager.getLogger(getTestClass()).info("Wrote function types for [{}] to file: {}", functionName(), file); } private final List breakers = Collections.synchronizedList(new ArrayList<>()); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/FunctionName.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/FunctionName.java new file mode 100644 index 0000000000000..b4a5d3bdc2b92 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/FunctionName.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * Tests that extend AbstractFunctionTestCase can use this annotation to specify the name of the function + * to use when generating documentation files while running tests. + * If this is not used, the name will be deduced from the test class name, by removing the "Test" suffix, and converting + * the class name to snake case. This annotation can be used to override that behavior, for cases where the deduced name + * is not correct. For example, in Elasticsearch the class name for `GeoPoint` capitalizes the `P` in `Point`, but the + * function name is `to_geopoint`, not `to_geo_point`. In some cases, even when compatible class names are used, + * like `StX` for the function `st_x`, the annotation is needed because the name deduction does not allow only a single + * character after the underscore. + */ +@Retention(RetentionPolicy.RUNTIME) +@Target(ElementType.TYPE) +public @interface FunctionName { + /** The function name to use in generating documentation files while running tests */ + String value(); +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianPointTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianPointTests.java index 88910320c962e..4eadf88992582 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianPointTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianPointTests.java @@ -13,6 +13,7 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.geo.ShapeTestUtils; import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.FunctionName; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.ql.expression.Expression; @@ -26,6 +27,7 @@ import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.CARTESIAN; +@FunctionName("to_cartesianpoint") public class ToCartesianPointTests extends AbstractFunctionTestCase { public ToCartesianPointTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianShapeTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianShapeTests.java index 117968de5148f..ad92b6578d71b 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianShapeTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianShapeTests.java @@ -13,6 +13,7 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.geo.GeometryTestUtils; import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.FunctionName; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.ql.expression.Expression; @@ -26,6 +27,7 @@ import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.CARTESIAN; +@FunctionName("to_cartesianshape") public class ToCartesianShapeTests extends AbstractFunctionTestCase { public ToCartesianShapeTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoPointTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoPointTests.java index 4a5534e1d5d1a..342325a63d96e 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoPointTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoPointTests.java @@ -13,6 +13,7 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.geo.GeometryTestUtils; import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.FunctionName; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.ql.expression.Expression; @@ -26,6 +27,7 @@ import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.GEO; +@FunctionName("to_geopoint") public class ToGeoPointTests extends AbstractFunctionTestCase { public ToGeoPointTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoShapeTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoShapeTests.java index 15db74d71d21f..290d0a08db725 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoShapeTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoShapeTests.java @@ -13,6 +13,7 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.geo.GeometryTestUtils; import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.FunctionName; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.ql.expression.Expression; @@ -26,6 +27,7 @@ import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.GEO; +@FunctionName("to_geoshape") public class ToGeoShapeTests extends AbstractFunctionTestCase { public ToGeoShapeTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); From aba546d023cbb5ea88082e1fad4d837b43827880 Mon Sep 17 00:00:00 2001 From: Stef Nestor <26751266+stefnestor@users.noreply.github.com> Date: Tue, 5 Mar 2024 10:10:58 -0700 Subject: [PATCH 003/248] (API+) CAT Nodes alias for shard header to match CAT Allocation (#105847) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 👋 howdy, team! Will you kindly consider adding `shards` as an alias to `shard_stats.total_count` column for CAT Nodes to match its naming from CAT Allocation? (The tests returned clean without changes which I hadn't expected, so please let me know if I missed something.) **Example**: To avoid running the default CAT Nodes & CAT Allocation separately, you can run CAT Nodes ``` GET _cat/nodes?v&s=master,name&h=name,id,master,node.role,cpu,heap.percent,disk.*,sstc,uptime ``` Where `sstc` is `shards` from CAT Allocation. This is a 👶 API (+ its doc) change to make the output more intuitive. --- docs/changelog/105847.yaml | 5 +++++ docs/reference/cat/nodes.asciidoc | 2 +- .../org/elasticsearch/rest/action/cat/RestNodesAction.java | 2 +- 3 files changed, 7 insertions(+), 2 deletions(-) create mode 100644 docs/changelog/105847.yaml diff --git a/docs/changelog/105847.yaml b/docs/changelog/105847.yaml new file mode 100644 index 0000000000000..a731395bc9a81 --- /dev/null +++ b/docs/changelog/105847.yaml @@ -0,0 +1,5 @@ +pr: 105847 +summary: (API+) CAT Nodes alias for shard header to match CAT Allocation +area: Stats +type: enhancement +issues: [] diff --git a/docs/reference/cat/nodes.asciidoc b/docs/reference/cat/nodes.asciidoc index b670ee26a20a9..da1ed532e41fa 100644 --- a/docs/reference/cat/nodes.asciidoc +++ b/docs/reference/cat/nodes.asciidoc @@ -318,7 +318,7 @@ Time spent in suggest, such as `0`. `suggest.total`, `suto`, `suggestTotal`:: Number of suggest operations, such as `0`. -`shard_stats.total_count`, `sstc`, `shardStatsTotalCount`:: +`shard_stats.total_count`, `sstc`, `shards`, `shardStatsTotalCount`:: Number of shards assigned. `mappings.total_count`, `mtc`, `mappingsTotalCount`:: diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java index 39045a99aa4a2..9b70776551ba6 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java @@ -316,7 +316,7 @@ protected Table getTableWithHeader(final RestRequest request) { table.addCell( "shard_stats.total_count", - "alias:sstc,shardStatsTotalCount;default:false;text-align:right;desc:number of shards assigned" + "alias:sstc,shards,shardStatsTotalCount;default:false;text-align:right;desc:number of shards assigned" ); table.addCell("mappings.total_count", "alias:mtc,mappingsTotalCount;default:false;text-align:right;desc:number of mappings"); From 2fbdc33dcf2cb1efbbd893557b4c64003a4087f7 Mon Sep 17 00:00:00 2001 From: Lee Hinman Date: Tue, 5 Mar 2024 18:20:00 +0100 Subject: [PATCH 004/248] Wait forever for IndexTemplateRegistry asset installation (#105985) Previously we would wait one minute for templates, ILM policies, and pipelines to be installed. This commit changes the timeout to use `TimeValue.MAX_VALUE` so that they should continue to wait until either the asset is install, or the master fails over. --- docs/changelog/105985.yaml | 5 +++++ .../xpack/core/template/IndexTemplateRegistry.java | 12 ++++++------ 2 files changed, 11 insertions(+), 6 deletions(-) create mode 100644 docs/changelog/105985.yaml diff --git a/docs/changelog/105985.yaml b/docs/changelog/105985.yaml new file mode 100644 index 0000000000000..2f2a8c1394070 --- /dev/null +++ b/docs/changelog/105985.yaml @@ -0,0 +1,5 @@ +pr: 105985 +summary: Wait forever for `IndexTemplateRegistry` asset installation +area: Indices APIs +type: enhancement +issues: [] diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistry.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistry.java index db5746f5c1b47..e189116b0179c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistry.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistry.java @@ -465,7 +465,7 @@ private void putLegacyTemplate(final IndexTemplateConfig config, final AtomicBoo final String templateName = config.getTemplateName(); PutIndexTemplateRequest request = new PutIndexTemplateRequest(templateName).source(config.loadBytes(), XContentType.JSON); - request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); + request.masterNodeTimeout(TimeValue.MAX_VALUE); executeAsyncWithOrigin( client.threadPool().getThreadContext(), getOrigin(), @@ -498,7 +498,7 @@ private void putComponentTemplate(final String templateName, final ComponentTemp final Executor executor = threadPool.generic(); executor.execute(() -> { PutComponentTemplateAction.Request request = new PutComponentTemplateAction.Request(templateName).componentTemplate(template); - request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); + request.masterNodeTimeout(TimeValue.MAX_VALUE); executeAsyncWithOrigin( client.threadPool().getThreadContext(), getOrigin(), @@ -538,7 +538,7 @@ private void putComposableTemplate( executor.execute(() -> { TransportPutComposableIndexTemplateAction.Request request = new TransportPutComposableIndexTemplateAction.Request(templateName) .indexTemplate(indexTemplate); - request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); + request.masterNodeTimeout(TimeValue.MAX_VALUE); executeAsyncWithOrigin( client.threadPool().getThreadContext(), getOrigin(), @@ -615,7 +615,7 @@ private void putPolicy(final LifecyclePolicy policy, final AtomicBoolean creatio final Executor executor = threadPool.generic(); executor.execute(() -> { PutLifecycleRequest request = new PutLifecycleRequest(policy); - request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); + request.masterNodeTimeout(TimeValue.MAX_VALUE); executeAsyncWithOrigin( client.threadPool().getThreadContext(), getOrigin(), @@ -727,7 +727,7 @@ private void putIngestPipeline(final IngestPipelineConfig pipelineConfig, final pipelineConfig.loadConfig(), pipelineConfig.getXContentType() ); - request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); + request.masterNodeTimeout(TimeValue.MAX_VALUE); executeAsyncWithOrigin( client.threadPool().getThreadContext(), @@ -815,7 +815,7 @@ public void onFailure(Exception e) { ); RolloverRequest request = new RolloverRequest(rolloverTarget, null); request.lazy(true); - request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); + request.masterNodeTimeout(TimeValue.MAX_VALUE); executeAsyncWithOrigin( client.threadPool().getThreadContext(), getOrigin(), From 71ffd35b6bed3fa87b5c645907f45e824defcfc2 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 5 Mar 2024 12:33:17 -0500 Subject: [PATCH 005/248] ESQL: fix single valued query tests (#105986) In some cases the tests for our lucene query that makes sure a field is single-valued was asserting incorrect things about the stats that come from the query. That was failing the test from time to time. This fixes the assertion in those cases. Closes #105918 --- .../querydsl/query/SingleValueQueryTests.java | 97 ++++++++++++------- 1 file changed, 62 insertions(+), 35 deletions(-) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQueryTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQueryTests.java index 55e8ba164ba70..6465e73417ae2 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQueryTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQueryTests.java @@ -50,7 +50,7 @@ interface Setup { List> build(RandomIndexWriter iw) throws IOException; - void assertStats(SingleValueQuery.Builder builder, boolean subHasTwoPhase); + void assertStats(SingleValueQuery.Builder builder, YesNoSometimes subHasTwoPhase); } @ParametersFactory @@ -74,7 +74,7 @@ public SingleValueQueryTests(Setup setup) { } public void testMatchAll() throws IOException { - testCase(new SingleValueQuery(new MatchAll(Source.EMPTY), "foo").asBuilder(), false, false, this::runCase); + testCase(new SingleValueQuery(new MatchAll(Source.EMPTY), "foo").asBuilder(), YesNoSometimes.NO, YesNoSometimes.NO, this::runCase); } @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/105952") @@ -82,8 +82,8 @@ public void testMatchSome() throws IOException { int max = between(1, 100); testCase( new SingleValueQuery.Builder(new RangeQueryBuilder("i").lt(max), "foo", new SingleValueQuery.Stats(), Source.EMPTY), - false, - false, + YesNoSometimes.SOMETIMES, + YesNoSometimes.NO, (fieldValues, count) -> runCase(fieldValues, count, null, max, false) ); } @@ -96,8 +96,8 @@ public void testSubPhrase() throws IOException { new SingleValueQuery.Stats(), Source.EMPTY ), - false, - true, + YesNoSometimes.NO, + YesNoSometimes.YES, this::runCase ); } @@ -105,8 +105,8 @@ public void testSubPhrase() throws IOException { public void testMatchNone() throws IOException { testCase( new SingleValueQuery.Builder(new MatchNoneQueryBuilder(), "foo", new SingleValueQuery.Stats(), Source.EMPTY), - true, - false, + YesNoSometimes.YES, + YesNoSometimes.NO, (fieldValues, count) -> assertThat(count, equalTo(0)) ); } @@ -114,8 +114,8 @@ public void testMatchNone() throws IOException { public void testRewritesToMatchNone() throws IOException { testCase( new SingleValueQuery.Builder(new TermQueryBuilder("missing", 0), "foo", new SingleValueQuery.Stats(), Source.EMPTY), - true, - false, + YesNoSometimes.YES, + YesNoSometimes.NO, (fieldValues, count) -> assertThat(count, equalTo(0)) ); } @@ -123,8 +123,8 @@ public void testRewritesToMatchNone() throws IOException { public void testNotMatchAll() throws IOException { testCase( new SingleValueQuery(new MatchAll(Source.EMPTY), "foo").negate(Source.EMPTY).asBuilder(), - true, - false, + YesNoSometimes.YES, + YesNoSometimes.NO, (fieldValues, count) -> assertThat(count, equalTo(0)) ); } @@ -132,19 +132,18 @@ public void testNotMatchAll() throws IOException { public void testNotMatchNone() throws IOException { testCase( new SingleValueQuery(new MatchAll(Source.EMPTY).negate(Source.EMPTY), "foo").negate(Source.EMPTY).asBuilder(), - false, - false, + YesNoSometimes.NO, + YesNoSometimes.NO, this::runCase ); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/105918") public void testNotMatchSome() throws IOException { int max = between(1, 100); testCase( new SingleValueQuery(new RangeQuery(Source.EMPTY, "i", null, false, max, false, null), "foo").negate(Source.EMPTY).asBuilder(), - false, - true, + YesNoSometimes.SOMETIMES, + YesNoSometimes.SOMETIMES, (fieldValues, count) -> runCase(fieldValues, count, max, 100, true) ); } @@ -193,8 +192,18 @@ private void runCase(List> fieldValues, int count) { runCase(fieldValues, count, null, null, false); } - private void testCase(SingleValueQuery.Builder builder, boolean rewritesToMatchNone, boolean subHasTwoPhase, TestCase testCase) - throws IOException { + enum YesNoSometimes { + YES, + NO, + SOMETIMES; + } + + private void testCase( + SingleValueQuery.Builder builder, + YesNoSometimes rewritesToMatchNone, + YesNoSometimes subHasTwoPhase, + TestCase testCase + ) throws IOException { MapperService mapper = createMapperService(mapping(setup::mapping)); try (Directory d = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), d)) { List> fieldValues = setup.build(iw); @@ -203,7 +212,7 @@ private void testCase(SingleValueQuery.Builder builder, boolean rewritesToMatchN QueryBuilder rewritten = builder.rewrite(ctx); Query query = rewritten.toQuery(ctx); testCase.run(fieldValues, ctx.searcher().count(query)); - if (rewritesToMatchNone) { + if (rewritesToMatchNone == YesNoSometimes.YES) { assertThat(rewritten, instanceOf(MatchNoneQueryBuilder.class)); assertThat(builder.stats().missingField(), equalTo(0)); assertThat(builder.stats().rewrittenToMatchNone(), equalTo(1)); @@ -219,7 +228,9 @@ private void testCase(SingleValueQuery.Builder builder, boolean rewritesToMatchN assertThat(builder.stats().rewrittenToMatchNone(), equalTo(0)); setup.assertStats(builder, subHasTwoPhase); } - assertThat(builder.stats().noNextScorer(), equalTo(0)); + if (rewritesToMatchNone != YesNoSometimes.SOMETIMES) { + assertThat(builder.stats().noNextScorer(), equalTo(0)); + } } } } @@ -302,7 +313,7 @@ private List docFor(int i, Iterable values) { } @Override - public void assertStats(SingleValueQuery.Builder builder, boolean subHasTwoPhase) { + public void assertStats(SingleValueQuery.Builder builder, YesNoSometimes subHasTwoPhase) { assertThat(builder.stats().missingField(), equalTo(0)); switch (fieldType) { case "long", "integer", "short", "byte", "double", "float" -> { @@ -314,12 +325,20 @@ public void assertStats(SingleValueQuery.Builder builder, boolean subHasTwoPhase if (multivaluedField || empty) { assertThat(builder.stats().numericSingle(), greaterThanOrEqualTo(0)); - if (subHasTwoPhase) { - assertThat(builder.stats().numericMultiNoApprox(), equalTo(0)); - assertThat(builder.stats().numericMultiApprox(), greaterThan(0)); - } else { - assertThat(builder.stats().numericMultiNoApprox(), greaterThan(0)); - assertThat(builder.stats().numericMultiApprox(), equalTo(0)); + switch (subHasTwoPhase) { + case YES -> { + assertThat(builder.stats().numericMultiNoApprox(), equalTo(0)); + assertThat(builder.stats().numericMultiApprox(), greaterThan(0)); + } + case NO -> { + assertThat(builder.stats().numericMultiNoApprox(), greaterThan(0)); + assertThat(builder.stats().numericMultiApprox(), equalTo(0)); + } + case SOMETIMES -> { + assertThat(builder.stats().numericMultiNoApprox() + builder.stats().numericMultiApprox(), greaterThan(0)); + assertThat(builder.stats().numericMultiNoApprox(), greaterThanOrEqualTo(0)); + assertThat(builder.stats().numericMultiApprox(), greaterThanOrEqualTo(0)); + } } } else { assertThat(builder.stats().numericSingle(), greaterThan(0)); @@ -335,12 +354,20 @@ public void assertStats(SingleValueQuery.Builder builder, boolean subHasTwoPhase assertThat(builder.stats().bytesNoApprox(), equalTo(0)); if (multivaluedField || empty) { assertThat(builder.stats().ordinalsSingle(), greaterThanOrEqualTo(0)); - if (subHasTwoPhase) { - assertThat(builder.stats().ordinalsMultiNoApprox(), equalTo(0)); - assertThat(builder.stats().ordinalsMultiApprox(), greaterThan(0)); - } else { - assertThat(builder.stats().ordinalsMultiNoApprox(), greaterThan(0)); - assertThat(builder.stats().ordinalsMultiApprox(), equalTo(0)); + switch (subHasTwoPhase) { + case YES -> { + assertThat(builder.stats().ordinalsMultiNoApprox(), equalTo(0)); + assertThat(builder.stats().ordinalsMultiApprox(), greaterThan(0)); + } + case NO -> { + assertThat(builder.stats().ordinalsMultiNoApprox(), greaterThan(0)); + assertThat(builder.stats().ordinalsMultiApprox(), equalTo(0)); + } + case SOMETIMES -> { + assertThat(builder.stats().ordinalsMultiNoApprox() + builder.stats().ordinalsMultiApprox(), greaterThan(0)); + assertThat(builder.stats().ordinalsMultiNoApprox(), greaterThanOrEqualTo(0)); + assertThat(builder.stats().ordinalsMultiApprox(), greaterThanOrEqualTo(0)); + } } } else { assertThat(builder.stats().ordinalsSingle(), greaterThan(0)); @@ -373,7 +400,7 @@ public List> build(RandomIndexWriter iw) throws IOException { } @Override - public void assertStats(SingleValueQuery.Builder builder, boolean subHasTwoPhase) { + public void assertStats(SingleValueQuery.Builder builder, YesNoSometimes subHasTwoPhase) { assertThat(builder.stats().missingField(), equalTo(1)); assertThat(builder.stats().numericSingle(), equalTo(0)); assertThat(builder.stats().numericMultiNoApprox(), equalTo(0)); From 680774e7f276ae403e62b9af9c610483ccb25344 Mon Sep 17 00:00:00 2001 From: Jonathan Buttner <56361221+jonathan-buttner@users.noreply.github.com> Date: Tue, 5 Mar 2024 12:42:08 -0500 Subject: [PATCH 006/248] [ML] Refactor inference service retry and queuing logic (#105526) * Refactoring http sender factory to be a nested class * Moving retrier and adding in request manager logic * Working tests * additional renaming and tests * Fixing merge issues * Cleaning up code * Removing interfaces --------- Co-authored-by: Elastic Machine --- .../xpack/inference/InferencePlugin.java | 13 +- .../action/cohere/CohereActionCreator.java | 2 +- .../action/cohere/CohereEmbeddingsAction.java | 46 +-- .../action/huggingface/HuggingFaceAction.java | 40 +-- .../action/openai/OpenAiEmbeddingsAction.java | 31 +- .../external/http/HttpClientManager.java | 1 - .../external/http/RequestExecutor.java | 11 +- .../{Retrier.java => RequestSender.java} | 15 +- .../external/http/retry/RetrySettings.java | 112 +++++- .../http/retry/RetryingHttpSender.java | 111 ++++-- ...ereEmbeddingsExecutableRequestCreator.java | 55 +++ .../sender/ExecutableInferenceRequest.java | 44 +++ .../http/sender/ExecutableRequestCreator.java | 29 ++ .../http/sender/HttpRequestSender.java | 192 ++++++++++ .../http/sender/HttpRequestSenderFactory.java | 175 --------- .../HuggingFaceExecutableRequestCreator.java | 64 ++++ .../http/sender/InferenceRequest.java | 45 +++ .../external/http/sender/NoopTask.java | 36 +- ...nAiEmbeddingsExecutableRequestCreator.java | 67 ++++ .../external/http/sender/RejectableTask.java | 12 + .../http/sender/RequestExecutorService.java | 80 ++--- .../external/http/sender/RequestTask.java | 163 +++------ .../external/http/sender/Sender.java | 13 +- .../http/sender/SingleRequestManager.java | 47 +++ .../external/openai/OpenAiClient.java | 47 --- .../inference/services/SenderService.java | 4 +- .../services/cohere/CohereService.java | 4 +- .../huggingface/HuggingFaceBaseService.java | 4 +- .../huggingface/HuggingFaceService.java | 4 +- .../elser/HuggingFaceElserService.java | 4 +- .../services/openai/OpenAiService.java | 4 +- .../elasticsearch/xpack/inference/Utils.java | 4 +- .../cohere/CohereActionCreatorTests.java | 4 +- .../cohere/CohereEmbeddingsActionTests.java | 23 +- .../HuggingFaceActionCreatorTests.java | 55 ++- .../huggingface/HuggingFaceActionTests.java | 9 +- .../openai/OpenAiActionCreatorTests.java | 178 +++++++++- .../openai/OpenAiEmbeddingsActionTests.java | 24 +- .../http/retry/RetrySettingsTests.java | 4 +- .../http/retry/RetryingHttpSenderTests.java | 335 ++++++++---------- .../sender/ExecutableRequestCreatorTests.java | 49 +++ ...Tests.java => HttpRequestSenderTests.java} | 134 +++++-- ...beddingsExecutableRequestCreatorTests.java | 40 +++ .../sender/RequestExecutorServiceTests.java | 148 ++++---- .../http/sender/RequestTaskTests.java | 208 ++--------- .../sender/SingleRequestManagerTests.java | 27 ++ .../external/openai/OpenAiClientTests.java | 297 ---------------- .../openai/OpenAiEmbeddingsRequestTests.java | 2 +- .../services/SenderServiceTests.java | 8 +- .../services/ServiceComponentsTests.java | 4 + .../services/cohere/CohereServiceTests.java | 21 +- .../HuggingFaceBaseServiceTests.java | 7 +- .../huggingface/HuggingFaceServiceTests.java | 12 +- .../services/openai/OpenAiServiceTests.java | 21 +- .../OpenAiEmbeddingsModelTests.java | 18 + 55 files changed, 1685 insertions(+), 1422 deletions(-) rename x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/{Retrier.java => RequestSender.java} (53%) create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/CohereEmbeddingsExecutableRequestCreator.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/ExecutableInferenceRequest.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/ExecutableRequestCreator.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestSender.java delete mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestSenderFactory.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/HuggingFaceExecutableRequestCreator.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/InferenceRequest.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/OpenAiEmbeddingsExecutableRequestCreator.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/RejectableTask.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/SingleRequestManager.java delete mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/openai/OpenAiClient.java create mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/ExecutableRequestCreatorTests.java rename x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/{HttpRequestSenderFactoryTests.java => HttpRequestSenderTests.java} (53%) create mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/OpenAiEmbeddingsExecutableRequestCreatorTests.java create mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/SingleRequestManagerTests.java delete mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/openai/OpenAiClientTests.java diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java index 1c5e5d4e9ef94..c598a58d014f9 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java @@ -50,7 +50,7 @@ import org.elasticsearch.xpack.inference.external.http.HttpClientManager; import org.elasticsearch.xpack.inference.external.http.HttpSettings; import org.elasticsearch.xpack.inference.external.http.retry.RetrySettings; -import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderFactory; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; import org.elasticsearch.xpack.inference.external.http.sender.RequestExecutorServiceSettings; import org.elasticsearch.xpack.inference.logging.ThrottlerManager; import org.elasticsearch.xpack.inference.registry.ModelRegistryImpl; @@ -95,7 +95,7 @@ public class InferencePlugin extends Plugin implements ActionPlugin, ExtensibleP public static final String NAME = "inference"; public static final String UTILITY_THREAD_POOL_NAME = "inference_utility"; private final Settings settings; - private final SetOnce httpFactory = new SetOnce<>(); + private final SetOnce httpFactory = new SetOnce<>(); private final SetOnce serviceComponents = new SetOnce<>(); private final SetOnce inferenceServiceRegistry = new SetOnce<>(); @@ -144,11 +144,10 @@ public Collection createComponents(PluginServices services) { var truncator = new Truncator(settings, services.clusterService()); serviceComponents.set(new ServiceComponents(services.threadPool(), throttlerManager, settings, truncator)); - var httpRequestSenderFactory = new HttpRequestSenderFactory( - services.threadPool(), + var httpRequestSenderFactory = new HttpRequestSender.Factory( + serviceComponents.get(), HttpClientManager.create(settings, services.threadPool(), services.clusterService(), throttlerManager), - services.clusterService(), - settings + services.clusterService() ); httpFactory.set(httpRequestSenderFactory); @@ -241,7 +240,7 @@ public List> getSettings() { return Stream.of( HttpSettings.getSettings(), HttpClientManager.getSettings(), - HttpRequestSenderFactory.HttpRequestSender.getSettings(), + HttpRequestSender.getSettings(), ThrottlerManager.getSettings(), RetrySettings.getSettingsDefinitions(), Truncator.getSettings(), diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/cohere/CohereActionCreator.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/cohere/CohereActionCreator.java index 0fb5ca9283fae..91db5e691cb61 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/cohere/CohereActionCreator.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/cohere/CohereActionCreator.java @@ -32,6 +32,6 @@ public CohereActionCreator(Sender sender, ServiceComponents serviceComponents) { public ExecutableAction create(CohereEmbeddingsModel model, Map taskSettings, InputType inputType) { var overriddenModel = CohereEmbeddingsModel.of(model, taskSettings, inputType); - return new CohereEmbeddingsAction(sender, overriddenModel, serviceComponents); + return new CohereEmbeddingsAction(sender, overriddenModel); } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/cohere/CohereEmbeddingsAction.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/cohere/CohereEmbeddingsAction.java index ae66496abbb1f..1f50f0ae6bc57 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/cohere/CohereEmbeddingsAction.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/cohere/CohereEmbeddingsAction.java @@ -7,21 +7,12 @@ package org.elasticsearch.xpack.inference.external.action.cohere; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.xpack.inference.external.action.ExecutableAction; -import org.elasticsearch.xpack.inference.external.cohere.CohereAccount; -import org.elasticsearch.xpack.inference.external.cohere.CohereResponseHandler; -import org.elasticsearch.xpack.inference.external.http.retry.ResponseHandler; -import org.elasticsearch.xpack.inference.external.http.retry.RetrySettings; -import org.elasticsearch.xpack.inference.external.http.retry.RetryingHttpSender; +import org.elasticsearch.xpack.inference.external.http.sender.CohereEmbeddingsExecutableRequestCreator; import org.elasticsearch.xpack.inference.external.http.sender.Sender; -import org.elasticsearch.xpack.inference.external.request.cohere.CohereEmbeddingsRequest; -import org.elasticsearch.xpack.inference.external.response.cohere.CohereEmbeddingsResponseEntity; -import org.elasticsearch.xpack.inference.services.ServiceComponents; import org.elasticsearch.xpack.inference.services.cohere.embeddings.CohereEmbeddingsModel; import java.util.List; @@ -32,51 +23,32 @@ import static org.elasticsearch.xpack.inference.external.action.ActionUtils.wrapFailuresInElasticsearchException; public class CohereEmbeddingsAction implements ExecutableAction { - private static final Logger logger = LogManager.getLogger(CohereEmbeddingsAction.class); - private static final ResponseHandler HANDLER = createEmbeddingsHandler(); - - private final CohereAccount account; - private final CohereEmbeddingsModel model; private final String failedToSendRequestErrorMessage; - private final RetryingHttpSender sender; + private final Sender sender; + private final CohereEmbeddingsExecutableRequestCreator requestCreator; - public CohereEmbeddingsAction(Sender sender, CohereEmbeddingsModel model, ServiceComponents serviceComponents) { - this.model = Objects.requireNonNull(model); - this.account = new CohereAccount( - this.model.getServiceSettings().getCommonSettings().getUri(), - this.model.getSecretSettings().apiKey() - ); + public CohereEmbeddingsAction(Sender sender, CohereEmbeddingsModel model) { + Objects.requireNonNull(model); + this.sender = Objects.requireNonNull(sender); this.failedToSendRequestErrorMessage = constructFailedToSendRequestMessage( - this.model.getServiceSettings().getCommonSettings().getUri(), + model.getServiceSettings().getCommonSettings().getUri(), "Cohere embeddings" ); - this.sender = new RetryingHttpSender( - Objects.requireNonNull(sender), - serviceComponents.throttlerManager(), - logger, - new RetrySettings(serviceComponents.settings()), - serviceComponents.threadPool() - ); + requestCreator = new CohereEmbeddingsExecutableRequestCreator(model); } @Override public void execute(List input, ActionListener listener) { try { - CohereEmbeddingsRequest request = new CohereEmbeddingsRequest(account, input, model); ActionListener wrappedListener = wrapFailuresInElasticsearchException( failedToSendRequestErrorMessage, listener ); - - sender.send(request, HANDLER, wrappedListener); + sender.send(requestCreator, input, wrappedListener); } catch (ElasticsearchException e) { listener.onFailure(e); } catch (Exception e) { listener.onFailure(createInternalServerError(e, failedToSendRequestErrorMessage)); } } - - private static ResponseHandler createEmbeddingsHandler() { - return new CohereResponseHandler("cohere text embedding", CohereEmbeddingsResponseEntity::fromResponse); - } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/huggingface/HuggingFaceAction.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/huggingface/HuggingFaceAction.java index 67c5fda5f83a0..928d396c991f8 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/huggingface/HuggingFaceAction.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/huggingface/HuggingFaceAction.java @@ -7,19 +7,13 @@ package org.elasticsearch.xpack.inference.external.action.huggingface; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.inference.InferenceServiceResults; -import org.elasticsearch.xpack.inference.common.Truncator; import org.elasticsearch.xpack.inference.external.action.ExecutableAction; import org.elasticsearch.xpack.inference.external.http.retry.ResponseHandler; -import org.elasticsearch.xpack.inference.external.http.retry.RetrySettings; -import org.elasticsearch.xpack.inference.external.http.retry.RetryingHttpSender; +import org.elasticsearch.xpack.inference.external.http.sender.HuggingFaceExecutableRequestCreator; import org.elasticsearch.xpack.inference.external.http.sender.Sender; -import org.elasticsearch.xpack.inference.external.huggingface.HuggingFaceAccount; -import org.elasticsearch.xpack.inference.external.request.huggingface.HuggingFaceInferenceRequest; import org.elasticsearch.xpack.inference.services.ServiceComponents; import org.elasticsearch.xpack.inference.services.huggingface.HuggingFaceModel; @@ -27,19 +21,13 @@ import java.util.Objects; import static org.elasticsearch.core.Strings.format; -import static org.elasticsearch.xpack.inference.common.Truncator.truncate; import static org.elasticsearch.xpack.inference.external.action.ActionUtils.createInternalServerError; import static org.elasticsearch.xpack.inference.external.action.ActionUtils.wrapFailuresInElasticsearchException; public class HuggingFaceAction implements ExecutableAction { - private static final Logger logger = LogManager.getLogger(HuggingFaceAction.class); - - private final HuggingFaceAccount account; private final String errorMessage; - private final RetryingHttpSender sender; - private final ResponseHandler responseHandler; - private final Truncator truncator; - private final HuggingFaceModel model; + private final Sender sender; + private final HuggingFaceExecutableRequestCreator requestCreator; public HuggingFaceAction( Sender sender, @@ -50,34 +38,20 @@ public HuggingFaceAction( ) { Objects.requireNonNull(serviceComponents); Objects.requireNonNull(requestType); - - this.model = Objects.requireNonNull(model); - this.responseHandler = Objects.requireNonNull(responseHandler); - this.sender = new RetryingHttpSender( - Objects.requireNonNull(sender), - serviceComponents.throttlerManager(), - logger, - new RetrySettings(serviceComponents.settings()), - serviceComponents.threadPool() - ); - this.account = new HuggingFaceAccount(model.getUri(), model.getApiKey()); - this.errorMessage = format( + this.sender = Objects.requireNonNull(sender); + requestCreator = new HuggingFaceExecutableRequestCreator(model, responseHandler, serviceComponents.truncator()); + errorMessage = format( "Failed to send Hugging Face %s request from inference entity id [%s]", requestType, model.getInferenceEntityId() ); - this.truncator = Objects.requireNonNull(serviceComponents.truncator()); } @Override public void execute(List input, ActionListener listener) { try { - var truncatedInput = truncate(input, model.getTokenLimit()); - - HuggingFaceInferenceRequest request = new HuggingFaceInferenceRequest(truncator, account, truncatedInput, model); ActionListener wrappedListener = wrapFailuresInElasticsearchException(errorMessage, listener); - - sender.send(request, responseHandler, wrappedListener); + sender.send(requestCreator, input, wrappedListener); } catch (ElasticsearchException e) { listener.onFailure(e); } catch (Exception e) { diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiEmbeddingsAction.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiEmbeddingsAction.java index 2e804dfeb6a4f..d5f083ac8aa90 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiEmbeddingsAction.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiEmbeddingsAction.java @@ -10,52 +10,39 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.inference.InferenceServiceResults; -import org.elasticsearch.xpack.inference.common.Truncator; import org.elasticsearch.xpack.inference.external.action.ExecutableAction; +import org.elasticsearch.xpack.inference.external.http.sender.OpenAiEmbeddingsExecutableRequestCreator; import org.elasticsearch.xpack.inference.external.http.sender.Sender; -import org.elasticsearch.xpack.inference.external.openai.OpenAiAccount; -import org.elasticsearch.xpack.inference.external.openai.OpenAiClient; -import org.elasticsearch.xpack.inference.external.request.openai.OpenAiEmbeddingsRequest; import org.elasticsearch.xpack.inference.services.ServiceComponents; import org.elasticsearch.xpack.inference.services.openai.embeddings.OpenAiEmbeddingsModel; import java.util.List; import java.util.Objects; -import static org.elasticsearch.xpack.inference.common.Truncator.truncate; import static org.elasticsearch.xpack.inference.external.action.ActionUtils.constructFailedToSendRequestMessage; import static org.elasticsearch.xpack.inference.external.action.ActionUtils.createInternalServerError; import static org.elasticsearch.xpack.inference.external.action.ActionUtils.wrapFailuresInElasticsearchException; public class OpenAiEmbeddingsAction implements ExecutableAction { - private final OpenAiAccount account; - private final OpenAiClient client; - private final OpenAiEmbeddingsModel model; private final String errorMessage; - private final Truncator truncator; + private final OpenAiEmbeddingsExecutableRequestCreator requestCreator; + private final Sender sender; public OpenAiEmbeddingsAction(Sender sender, OpenAiEmbeddingsModel model, ServiceComponents serviceComponents) { - this.model = Objects.requireNonNull(model); - this.account = new OpenAiAccount( - this.model.getServiceSettings().uri(), - this.model.getServiceSettings().organizationId(), - this.model.getSecretSettings().apiKey() - ); - this.client = new OpenAiClient(Objects.requireNonNull(sender), Objects.requireNonNull(serviceComponents)); - this.errorMessage = constructFailedToSendRequestMessage(this.model.getServiceSettings().uri(), "OpenAI embeddings"); - this.truncator = Objects.requireNonNull(serviceComponents.truncator()); + Objects.requireNonNull(serviceComponents); + Objects.requireNonNull(model); + this.sender = Objects.requireNonNull(sender); + requestCreator = new OpenAiEmbeddingsExecutableRequestCreator(model, serviceComponents.truncator()); + errorMessage = constructFailedToSendRequestMessage(model.getServiceSettings().uri(), "OpenAI embeddings"); } @Override public void execute(List input, ActionListener listener) { try { - var truncatedInput = truncate(input, model.getServiceSettings().maxInputTokens()); - - OpenAiEmbeddingsRequest request = new OpenAiEmbeddingsRequest(truncator, account, truncatedInput, model); ActionListener wrappedListener = wrapFailuresInElasticsearchException(errorMessage, listener); - client.send(request, wrappedListener); + sender.send(requestCreator, input, wrappedListener); } catch (ElasticsearchException e) { listener.onFailure(e); } catch (Exception e) { diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpClientManager.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpClientManager.java index 7cc4a3cb24502..ab3a8a8c0e043 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpClientManager.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpClientManager.java @@ -37,7 +37,6 @@ public class HttpClientManager implements Closeable { */ public static final Setting MAX_CONNECTIONS = Setting.intSetting( "xpack.inference.http.max_connections", - // TODO pick a reasonable values here 20, // default 1, // min Setting.Property.NodeScope, diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/RequestExecutor.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/RequestExecutor.java index 5c8fa62ba88f9..77b4d49d62b9f 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/RequestExecutor.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/RequestExecutor.java @@ -10,8 +10,10 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.xpack.inference.external.request.HttpRequest; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.xpack.inference.external.http.sender.ExecutableRequestCreator; +import java.util.List; import java.util.concurrent.TimeUnit; public interface RequestExecutor { @@ -25,5 +27,10 @@ public interface RequestExecutor { boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException; - void execute(HttpRequest request, @Nullable TimeValue timeout, ActionListener listener); + void execute( + ExecutableRequestCreator requestCreator, + List input, + @Nullable TimeValue timeout, + ActionListener listener + ); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/Retrier.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/RequestSender.java similarity index 53% rename from x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/Retrier.java rename to x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/RequestSender.java index 2e2ba03345a3b..8244e5ad29e95 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/Retrier.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/RequestSender.java @@ -7,10 +7,21 @@ package org.elasticsearch.xpack.inference.external.http.retry; +import org.apache.http.client.protocol.HttpClientContext; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.xpack.inference.external.request.Request; -public interface Retrier { - void send(Request request, ResponseHandler responseHandler, ActionListener listener); +import java.util.function.Supplier; + +public interface RequestSender { + void send( + Logger logger, + Request request, + HttpClientContext context, + Supplier hasRequestTimedOutFunction, + ResponseHandler responseHandler, + ActionListener listener + ); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/RetrySettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/RetrySettings.java index 040903a35ab08..35e50e557cc83 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/RetrySettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/RetrySettings.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.inference.external.http.retry; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; @@ -15,43 +16,128 @@ public class RetrySettings { - public static final Setting RETRY_INITIAL_DELAY_SETTING = Setting.timeSetting( + static final Setting RETRY_INITIAL_DELAY_SETTING = Setting.timeSetting( "xpack.inference.http.retry.initial_delay", TimeValue.timeValueSeconds(1), Setting.Property.NodeScope, Setting.Property.Dynamic ); - public static final Setting RETRY_MAX_DELAY_BOUND_SETTING = Setting.timeSetting( + static final Setting RETRY_MAX_DELAY_BOUND_SETTING = Setting.timeSetting( "xpack.inference.http.retry.max_delay_bound", TimeValue.timeValueSeconds(5), Setting.Property.NodeScope, Setting.Property.Dynamic ); - public static final Setting RETRY_TIMEOUT_SETTING = Setting.timeSetting( + static final Setting RETRY_TIMEOUT_SETTING = Setting.timeSetting( "xpack.inference.http.retry.timeout", TimeValue.timeValueSeconds(30), Setting.Property.NodeScope, Setting.Property.Dynamic ); - private final InternalSettings internalSettings; + static final Setting RETRY_DEBUG_FREQUENCY_MODE_SETTING = Setting.enumSetting( + DebugFrequencyMode.class, + "xpack.inference.http.retry.debug_frequency_mode", + DebugFrequencyMode.OFF, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + + static final Setting RETRY_DEBUG_FREQUENCY_AMOUNT_SETTING = Setting.timeSetting( + "xpack.inference.http.retry.debug_frequency_amount", + TimeValue.timeValueMinutes(5), + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + + private volatile TimeValue initialDelay; + private volatile TimeValue maxDelayBound; + private volatile TimeValue timeout; + private volatile DebugFrequencyMode debugMode; + private volatile TimeValue debugFrequency; + + public RetrySettings(Settings settings, ClusterService clusterService) { + initialDelay = RETRY_INITIAL_DELAY_SETTING.get(settings); + maxDelayBound = RETRY_MAX_DELAY_BOUND_SETTING.get(settings); + timeout = RETRY_TIMEOUT_SETTING.get(settings); + debugMode = RETRY_DEBUG_FREQUENCY_MODE_SETTING.get(settings); + debugFrequency = RETRY_DEBUG_FREQUENCY_AMOUNT_SETTING.get(settings); + + addSettingsUpdateConsumers(clusterService); + } + + private void addSettingsUpdateConsumers(ClusterService clusterService) { + clusterService.getClusterSettings().addSettingsUpdateConsumer(RETRY_INITIAL_DELAY_SETTING, this::setInitialDelay); + clusterService.getClusterSettings().addSettingsUpdateConsumer(RETRY_MAX_DELAY_BOUND_SETTING, this::setMaxDelayBound); + clusterService.getClusterSettings().addSettingsUpdateConsumer(RETRY_TIMEOUT_SETTING, this::setTimeout); + clusterService.getClusterSettings().addSettingsUpdateConsumer(RETRY_DEBUG_FREQUENCY_MODE_SETTING, this::setDebugMode); + clusterService.getClusterSettings().addSettingsUpdateConsumer(RETRY_DEBUG_FREQUENCY_AMOUNT_SETTING, this::setDebugFrequencyAmount); + } + + private void setInitialDelay(TimeValue initialDelay) { + this.initialDelay = initialDelay; + } - public RetrySettings(Settings settings) { - var initialDelay = RETRY_INITIAL_DELAY_SETTING.get(settings); - var maxDelayBound = RETRY_MAX_DELAY_BOUND_SETTING.get(settings); - var timeoutValue = RETRY_TIMEOUT_SETTING.get(settings); - this.internalSettings = new InternalSettings(initialDelay, maxDelayBound, timeoutValue); + private void setMaxDelayBound(TimeValue maxDelayBound) { + this.maxDelayBound = maxDelayBound; } - public record InternalSettings(TimeValue initialDelay, TimeValue maxDelayBound, TimeValue timeoutValue) {} + private void setTimeout(TimeValue timeout) { + this.timeout = timeout; + } + + private void setDebugMode(DebugFrequencyMode debugMode) { + this.debugMode = debugMode; + } - public InternalSettings getSettings() { - return internalSettings; + private void setDebugFrequencyAmount(TimeValue debugFrequency) { + this.debugFrequency = debugFrequency; } public static List> getSettingsDefinitions() { - return List.of(RETRY_INITIAL_DELAY_SETTING, RETRY_MAX_DELAY_BOUND_SETTING, RETRY_TIMEOUT_SETTING); + return List.of( + RETRY_INITIAL_DELAY_SETTING, + RETRY_MAX_DELAY_BOUND_SETTING, + RETRY_TIMEOUT_SETTING, + RETRY_DEBUG_FREQUENCY_MODE_SETTING, + RETRY_DEBUG_FREQUENCY_AMOUNT_SETTING + ); + } + + TimeValue getInitialDelay() { + return initialDelay; + } + + TimeValue getMaxDelayBound() { + return maxDelayBound; + } + + TimeValue getTimeout() { + return timeout; + } + + DebugFrequencyMode getDebugMode() { + return debugMode; + } + + TimeValue getDebugFrequency() { + return debugFrequency; + } + + enum DebugFrequencyMode { + /** + * Indicates that the debug messages should be logged every time + */ + ON, + /** + * Indicates that the debug messages should never be logged + */ + OFF, + /** + * Indicates that the debug messages should be logged on an interval + */ + INTERVAL } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/RetryingHttpSender.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/RetryingHttpSender.java index d8476c7c583d5..ffe10ffe3b6ae 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/RetryingHttpSender.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/RetryingHttpSender.java @@ -7,7 +7,9 @@ package org.elasticsearch.xpack.inference.external.http.retry; +import org.apache.http.client.protocol.HttpClientContext; import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; @@ -15,8 +17,8 @@ import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.inference.external.http.HttpClient; import org.elasticsearch.xpack.inference.external.http.HttpResult; -import org.elasticsearch.xpack.inference.external.http.sender.Sender; import org.elasticsearch.xpack.inference.external.request.Request; import org.elasticsearch.xpack.inference.logging.ThrottlerManager; @@ -24,40 +26,37 @@ import java.net.UnknownHostException; import java.util.Objects; import java.util.concurrent.Executor; +import java.util.function.Supplier; import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.xpack.inference.InferencePlugin.UTILITY_THREAD_POOL_NAME; -public class RetryingHttpSender implements Retrier { - private final Sender sender; +public class RetryingHttpSender implements RequestSender { + private final HttpClient httpClient; private final ThrottlerManager throttlerManager; - private final Logger logger; private final RetrySettings retrySettings; private final ThreadPool threadPool; private final Executor executor; public RetryingHttpSender( - Sender sender, + HttpClient httpClient, ThrottlerManager throttlerManager, - Logger logger, RetrySettings retrySettings, ThreadPool threadPool ) { - this(sender, throttlerManager, logger, retrySettings, threadPool, threadPool.executor(UTILITY_THREAD_POOL_NAME)); + this(httpClient, throttlerManager, retrySettings, threadPool, threadPool.executor(UTILITY_THREAD_POOL_NAME)); } // For testing only RetryingHttpSender( - Sender sender, + HttpClient httpClient, ThrottlerManager throttlerManager, - Logger logger, RetrySettings retrySettings, ThreadPool threadPool, Executor executor ) { - this.sender = Objects.requireNonNull(sender); + this.httpClient = Objects.requireNonNull(httpClient); this.throttlerManager = Objects.requireNonNull(throttlerManager); - this.logger = Objects.requireNonNull(logger); this.retrySettings = Objects.requireNonNull(retrySettings); this.threadPool = Objects.requireNonNull(threadPool); this.executor = Objects.requireNonNull(executor); @@ -66,23 +65,41 @@ public RetryingHttpSender( private class InternalRetrier extends RetryableAction { private Request request; private final ResponseHandler responseHandler; - - InternalRetrier(Request request, ResponseHandler responseHandler, ActionListener listener) { + private final Logger logger; + private final HttpClientContext context; + private final Supplier hasRequestCompletedFunction; + + InternalRetrier( + Logger logger, + Request request, + HttpClientContext context, + Supplier hasRequestCompletedFunction, + ResponseHandler responseHandler, + ActionListener listener + ) { super( - logger, + Objects.requireNonNull(logger), threadPool, - retrySettings.getSettings().initialDelay(), - retrySettings.getSettings().maxDelayBound(), - retrySettings.getSettings().timeoutValue(), + retrySettings.getInitialDelay(), + retrySettings.getMaxDelayBound(), + retrySettings.getTimeout(), listener, executor ); - this.request = request; - this.responseHandler = responseHandler; + this.logger = logger; + this.request = Objects.requireNonNull(request); + this.context = Objects.requireNonNull(context); + this.responseHandler = Objects.requireNonNull(responseHandler); + this.hasRequestCompletedFunction = Objects.requireNonNull(hasRequestCompletedFunction); } @Override public void tryAction(ActionListener listener) { + // A timeout likely occurred so let's stop attempting to execute the request + if (hasRequestCompletedFunction.get()) { + return; + } + ActionListener responseListener = ActionListener.wrap(result -> { try { responseHandler.validateResponse(throttlerManager, logger, request, result); @@ -90,25 +107,21 @@ public void tryAction(ActionListener listener) { listener.onResponse(inferenceResults); } catch (Exception e) { - logException(request, result, responseHandler.getRequestType(), e); + logException(logger, request, result, responseHandler.getRequestType(), e); listener.onFailure(e); } }, e -> { - logException(request, responseHandler.getRequestType(), e); + logException(logger, request, responseHandler.getRequestType(), e); listener.onFailure(transformIfRetryable(e)); }); - sender.send(request.createHttpRequest(), responseListener); - } + try { + httpClient.send(request.createHttpRequest(), context, responseListener); + } catch (Exception e) { + logException(logger, request, responseHandler.getRequestType(), e); - @Override - public boolean shouldRetry(Exception e) { - if (e instanceof Retryable retry) { - request = retry.rebuildRequest(request); - return retry.shouldRetry(); + listener.onFailure(wrapWithElasticsearchException(e, request.getInferenceEntityId())); } - - return false; } /** @@ -135,15 +148,45 @@ private Exception transformIfRetryable(Exception e) { return exceptionToReturn; } + + private Exception wrapWithElasticsearchException(Exception e, String inferenceEntityId) { + var transformedException = transformIfRetryable(e); + + if (transformedException instanceof ElasticsearchException) { + return transformedException; + } + + return new ElasticsearchException( + format("Http client failed to send request from inference entity id [%s]", inferenceEntityId), + transformedException + ); + } + + @Override + public boolean shouldRetry(Exception e) { + if (e instanceof Retryable retry) { + request = retry.rebuildRequest(request); + return retry.shouldRetry(); + } + + return false; + } } @Override - public void send(Request request, ResponseHandler responseHandler, ActionListener listener) { - InternalRetrier retrier = new InternalRetrier(request, responseHandler, listener); + public void send( + Logger logger, + Request request, + HttpClientContext context, + Supplier hasRequestTimedOutFunction, + ResponseHandler responseHandler, + ActionListener listener + ) { + InternalRetrier retrier = new InternalRetrier(logger, request, context, hasRequestTimedOutFunction, responseHandler, listener); retrier.run(); } - private void logException(Request request, String requestType, Exception exception) { + private void logException(Logger logger, Request request, String requestType, Exception exception) { var causeException = ExceptionsHelper.unwrapCause(exception); throttlerManager.warn( @@ -153,7 +196,7 @@ private void logException(Request request, String requestType, Exception excepti ); } - private void logException(Request request, HttpResult result, String requestType, Exception exception) { + private void logException(Logger logger, Request request, HttpResult result, String requestType, Exception exception) { var causeException = ExceptionsHelper.unwrapCause(exception); throttlerManager.warn( diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/CohereEmbeddingsExecutableRequestCreator.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/CohereEmbeddingsExecutableRequestCreator.java new file mode 100644 index 0000000000000..b0fdc800a64da --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/CohereEmbeddingsExecutableRequestCreator.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http.sender; + +import org.apache.http.client.protocol.HttpClientContext; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.xpack.inference.external.cohere.CohereAccount; +import org.elasticsearch.xpack.inference.external.cohere.CohereResponseHandler; +import org.elasticsearch.xpack.inference.external.http.retry.RequestSender; +import org.elasticsearch.xpack.inference.external.http.retry.ResponseHandler; +import org.elasticsearch.xpack.inference.external.request.cohere.CohereEmbeddingsRequest; +import org.elasticsearch.xpack.inference.external.response.cohere.CohereEmbeddingsResponseEntity; +import org.elasticsearch.xpack.inference.services.cohere.embeddings.CohereEmbeddingsModel; + +import java.util.List; +import java.util.Objects; +import java.util.function.Supplier; + +public class CohereEmbeddingsExecutableRequestCreator implements ExecutableRequestCreator { + private static final Logger logger = LogManager.getLogger(CohereEmbeddingsExecutableRequestCreator.class); + private static final ResponseHandler HANDLER = createEmbeddingsHandler(); + + private static ResponseHandler createEmbeddingsHandler() { + return new CohereResponseHandler("cohere text embedding", CohereEmbeddingsResponseEntity::fromResponse); + } + + private final CohereAccount account; + private final CohereEmbeddingsModel model; + + public CohereEmbeddingsExecutableRequestCreator(CohereEmbeddingsModel model) { + this.model = Objects.requireNonNull(model); + account = new CohereAccount(this.model.getServiceSettings().getCommonSettings().getUri(), this.model.getSecretSettings().apiKey()); + } + + @Override + public Runnable create( + List input, + RequestSender requestSender, + Supplier hasRequestCompletedFunction, + HttpClientContext context, + ActionListener listener + ) { + CohereEmbeddingsRequest request = new CohereEmbeddingsRequest(account, input, model); + + return new ExecutableInferenceRequest(requestSender, logger, request, context, HANDLER, hasRequestCompletedFunction, listener); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/ExecutableInferenceRequest.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/ExecutableInferenceRequest.java new file mode 100644 index 0000000000000..53f30773cbfe3 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/ExecutableInferenceRequest.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http.sender; + +import org.apache.http.client.protocol.HttpClientContext; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.Strings; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.xpack.inference.external.http.retry.RequestSender; +import org.elasticsearch.xpack.inference.external.http.retry.ResponseHandler; +import org.elasticsearch.xpack.inference.external.request.Request; + +import java.util.function.Supplier; + +record ExecutableInferenceRequest( + RequestSender requestSender, + Logger logger, + Request request, + HttpClientContext context, + ResponseHandler responseHandler, + Supplier hasFinished, + ActionListener listener +) implements Runnable { + + @Override + public void run() { + var inferenceEntityId = request.createHttpRequest().inferenceEntityId(); + + try { + requestSender.send(logger, request, context, hasFinished, responseHandler, listener); + } catch (Exception e) { + var errorMessage = Strings.format("Failed to send request from inference entity id [%s]", inferenceEntityId); + logger.warn(errorMessage, e); + listener.onFailure(new ElasticsearchException(errorMessage, e)); + } + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/ExecutableRequestCreator.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/ExecutableRequestCreator.java new file mode 100644 index 0000000000000..96455ca4b1cb1 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/ExecutableRequestCreator.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http.sender; + +import org.apache.http.client.protocol.HttpClientContext; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.xpack.inference.external.http.retry.RequestSender; + +import java.util.List; +import java.util.function.Supplier; + +/** + * A contract for constructing a {@link Runnable} to handle sending an inference request to a 3rd party service. + */ +public interface ExecutableRequestCreator { + Runnable create( + List input, + RequestSender requestSender, + Supplier hasRequestCompletedFunction, + HttpClientContext context, + ActionListener listener + ); +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestSender.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestSender.java new file mode 100644 index 0000000000000..0131bf2989f6f --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestSender.java @@ -0,0 +1,192 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http.sender; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.inference.external.http.HttpClientManager; +import org.elasticsearch.xpack.inference.external.http.retry.RetrySettings; +import org.elasticsearch.xpack.inference.external.http.retry.RetryingHttpSender; +import org.elasticsearch.xpack.inference.services.ServiceComponents; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.elasticsearch.core.Strings.format; +import static org.elasticsearch.xpack.inference.InferencePlugin.UTILITY_THREAD_POOL_NAME; + +/** + * A class for providing a more friendly interface for sending an inference request to a 3rd party service. + */ +public class HttpRequestSender implements Sender { + + /** + * A helper class for constructing a {@link HttpRequestSender}. + */ + public static class Factory { + private final ServiceComponents serviceComponents; + private final HttpClientManager httpClientManager; + private final ClusterService clusterService; + private final SingleRequestManager requestManager; + + public Factory(ServiceComponents serviceComponents, HttpClientManager httpClientManager, ClusterService clusterService) { + this.serviceComponents = Objects.requireNonNull(serviceComponents); + this.httpClientManager = Objects.requireNonNull(httpClientManager); + this.clusterService = Objects.requireNonNull(clusterService); + + var requestSender = new RetryingHttpSender( + this.httpClientManager.getHttpClient(), + serviceComponents.throttlerManager(), + new RetrySettings(serviceComponents.settings(), clusterService), + serviceComponents.threadPool() + ); + requestManager = new SingleRequestManager(requestSender); + } + + public Sender createSender(String serviceName) { + return new HttpRequestSender( + serviceName, + serviceComponents.threadPool(), + httpClientManager, + clusterService, + serviceComponents.settings(), + requestManager + ); + } + } + + private static final Logger logger = LogManager.getLogger(HttpRequestSender.class); + private static final TimeValue START_COMPLETED_WAIT_TIME = TimeValue.timeValueSeconds(5); + + /** + * The maximum time a request can take. The timer starts once a request is enqueued and continues until a response is + * received from the 3rd party service. This encompasses the time the request might just sit in the queue waiting to be sent + * if another request is already waiting for a connection lease from the connection pool. + */ + public static final Setting MAX_REQUEST_TIMEOUT = Setting.timeSetting( + "xpack.inference.http.max_request_timeout", + TimeValue.timeValueSeconds(30), + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + + private final ThreadPool threadPool; + private final HttpClientManager manager; + private final RequestExecutorService service; + private final AtomicBoolean started = new AtomicBoolean(false); + private volatile TimeValue maxRequestTimeout; + private final CountDownLatch startCompleted = new CountDownLatch(2); + + private HttpRequestSender( + String serviceName, + ThreadPool threadPool, + HttpClientManager httpClientManager, + ClusterService clusterService, + Settings settings, + SingleRequestManager requestManager + ) { + this.threadPool = Objects.requireNonNull(threadPool); + this.manager = Objects.requireNonNull(httpClientManager); + service = new RequestExecutorService( + serviceName, + threadPool, + startCompleted, + new RequestExecutorServiceSettings(settings, clusterService), + requestManager + ); + + this.maxRequestTimeout = MAX_REQUEST_TIMEOUT.get(settings); + addSettingsUpdateConsumers(clusterService); + } + + private void addSettingsUpdateConsumers(ClusterService clusterService) { + clusterService.getClusterSettings().addSettingsUpdateConsumer(MAX_REQUEST_TIMEOUT, this::setMaxRequestTimeout); + } + + // Default for testing + void setMaxRequestTimeout(TimeValue maxRequestTimeout) { + logger.debug(() -> format("Max request timeout updated to [%s] for service [%s]", maxRequestTimeout, service)); + this.maxRequestTimeout = maxRequestTimeout; + } + + /** + * Start various internal services. This is required before sending requests. + */ + public void start() { + if (started.compareAndSet(false, true)) { + // The manager must be started before the executor service. That way we guarantee that the http client + // is ready prior to the service attempting to use the http client to send a request + manager.start(); + threadPool.executor(UTILITY_THREAD_POOL_NAME).execute(service::start); + startCompleted.countDown(); + } + } + + @Override + public void close() throws IOException { + manager.close(); + service.shutdown(); + } + + /** + * Send a request at some point in the future. The timeout used is retrieved from the settings. + * @param requestCreator a factory for creating a request to be sent to a 3rd party service + * @param input the list of string input to send in the request + * @param timeout the maximum time the request should wait for a response before timing out. If null, the timeout is ignored. + * The queuing logic may still throw a timeout if it fails to send the request because it couldn't get a leased + * @param listener a listener to handle the response + */ + public void send( + ExecutableRequestCreator requestCreator, + List input, + @Nullable TimeValue timeout, + ActionListener listener + ) { + assert started.get() : "call start() before sending a request"; + waitForStartToComplete(); + service.execute(requestCreator, input, timeout, listener); + } + + private void waitForStartToComplete() { + try { + if (startCompleted.await(START_COMPLETED_WAIT_TIME.getSeconds(), TimeUnit.SECONDS) == false) { + throw new IllegalStateException("Http sender startup did not complete in time"); + } + } catch (InterruptedException e) { + throw new IllegalStateException("Http sender interrupted while waiting for startup to complete"); + } + } + + /** + * Send a request at some point in the future. The timeout used is retrieved from the settings. + * @param requestCreator a factory for creating a request to be sent to a 3rd party service + * @param input the list of string input to send in the request + * @param listener a listener to handle the response + */ + public void send(ExecutableRequestCreator requestCreator, List input, ActionListener listener) { + assert started.get() : "call start() before sending a request"; + waitForStartToComplete(); + service.execute(requestCreator, input, maxRequestTimeout, listener); + } + + public static List> getSettings() { + return List.of(MAX_REQUEST_TIMEOUT); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestSenderFactory.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestSenderFactory.java deleted file mode 100644 index c773f57933415..0000000000000 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestSenderFactory.java +++ /dev/null @@ -1,175 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.inference.external.http.sender; - -import org.apache.http.client.methods.HttpUriRequest; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.core.Nullable; -import org.elasticsearch.core.TimeValue; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xpack.inference.external.http.HttpClientManager; -import org.elasticsearch.xpack.inference.external.http.HttpResult; -import org.elasticsearch.xpack.inference.external.request.HttpRequest; - -import java.io.IOException; -import java.util.List; -import java.util.Objects; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; - -import static org.elasticsearch.core.Strings.format; -import static org.elasticsearch.xpack.inference.InferencePlugin.UTILITY_THREAD_POOL_NAME; - -/** - * A helper class for constructing a {@link HttpRequestSender}. - */ -public class HttpRequestSenderFactory { - private final ThreadPool threadPool; - private final HttpClientManager httpClientManager; - private final ClusterService clusterService; - private final Settings settings; - - public HttpRequestSenderFactory( - ThreadPool threadPool, - HttpClientManager httpClientManager, - ClusterService clusterService, - Settings settings - ) { - this.threadPool = Objects.requireNonNull(threadPool); - this.httpClientManager = Objects.requireNonNull(httpClientManager); - this.clusterService = Objects.requireNonNull(clusterService); - this.settings = Objects.requireNonNull(settings); - } - - public Sender createSender(String serviceName) { - return new HttpRequestSender(serviceName, threadPool, httpClientManager, clusterService, settings); - } - - /** - * A class for providing a more friendly interface for sending an {@link HttpUriRequest}. This leverages the queuing logic for sending - * a request. - */ - public static final class HttpRequestSender implements Sender { - private static final Logger logger = LogManager.getLogger(HttpRequestSender.class); - private static final TimeValue START_COMPLETED_WAIT_TIME = TimeValue.timeValueSeconds(5); - - /** - * The maximum time a request can take. The timer starts once a request is enqueued and continues until a response is - * received from the 3rd party service. This encompasses the time the request might just sit in the queue waiting to be sent - * if another request is already waiting for a connection lease from the connection pool. - */ - public static final Setting MAX_REQUEST_TIMEOUT = Setting.timeSetting( - "xpack.inference.http.max_request_timeout", - TimeValue.timeValueSeconds(30), - Setting.Property.NodeScope, - Setting.Property.Dynamic - ); - - private final ThreadPool threadPool; - private final HttpClientManager manager; - private final RequestExecutorService service; - private final AtomicBoolean started = new AtomicBoolean(false); - private volatile TimeValue maxRequestTimeout; - private final CountDownLatch startCompleted = new CountDownLatch(2); - - private HttpRequestSender( - String serviceName, - ThreadPool threadPool, - HttpClientManager httpClientManager, - ClusterService clusterService, - Settings settings - ) { - this.threadPool = Objects.requireNonNull(threadPool); - this.manager = Objects.requireNonNull(httpClientManager); - service = new RequestExecutorService( - serviceName, - manager.getHttpClient(), - threadPool, - startCompleted, - new RequestExecutorServiceSettings(settings, clusterService) - ); - - this.maxRequestTimeout = MAX_REQUEST_TIMEOUT.get(settings); - addSettingsUpdateConsumers(clusterService); - } - - private void addSettingsUpdateConsumers(ClusterService clusterService) { - clusterService.getClusterSettings().addSettingsUpdateConsumer(MAX_REQUEST_TIMEOUT, this::setMaxRequestTimeout); - } - - // Default for testing - void setMaxRequestTimeout(TimeValue maxRequestTimeout) { - logger.debug(() -> format("Max request timeout updated to [%s] for service [%s]", maxRequestTimeout, service)); - this.maxRequestTimeout = maxRequestTimeout; - } - - /** - * Start various internal services. This is required before sending requests. - */ - public void start() { - if (started.compareAndSet(false, true)) { - // The manager must be started before the executor service. That way we guarantee that the http client - // is ready prior to the service attempting to use the http client to send a request - manager.start(); - threadPool.executor(UTILITY_THREAD_POOL_NAME).execute(service::start); - startCompleted.countDown(); - } - } - - @Override - public void close() throws IOException { - manager.close(); - service.shutdown(); - } - - /** - * Send a request at some point in the future with a timeout specified. - * @param request the http request to send - * @param timeout the maximum time the request should wait for a response before timing out. If null, the timeout is ignored. - * The queuing logic may still throw a timeout if it fails to send the request because it couldn't get a leased - * connection from the connection pool - * @param listener a listener to handle the response - */ - public void send(HttpRequest request, @Nullable TimeValue timeout, ActionListener listener) { - assert started.get() : "call start() before sending a request"; - waitForStartToComplete(); - service.execute(request, timeout, listener); - } - - private void waitForStartToComplete() { - try { - if (startCompleted.await(START_COMPLETED_WAIT_TIME.getSeconds(), TimeUnit.SECONDS) == false) { - throw new IllegalStateException("Http sender startup did not complete in time"); - } - } catch (InterruptedException e) { - throw new IllegalStateException("Http sender interrupted while waiting for startup to complete"); - } - } - - /** - * Send a request at some point in the future. The timeout used is retrieved from the settings. - * @param request the http request to send - * @param listener a listener to handle the response - */ - public void send(HttpRequest request, ActionListener listener) { - assert started.get() : "call start() before sending a request"; - waitForStartToComplete(); - service.execute(request, maxRequestTimeout, listener); - } - - public static List> getSettings() { - return List.of(MAX_REQUEST_TIMEOUT); - } - } -} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/HuggingFaceExecutableRequestCreator.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/HuggingFaceExecutableRequestCreator.java new file mode 100644 index 0000000000000..62558fe6071ac --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/HuggingFaceExecutableRequestCreator.java @@ -0,0 +1,64 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http.sender; + +import org.apache.http.client.protocol.HttpClientContext; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.xpack.inference.common.Truncator; +import org.elasticsearch.xpack.inference.external.http.retry.RequestSender; +import org.elasticsearch.xpack.inference.external.http.retry.ResponseHandler; +import org.elasticsearch.xpack.inference.external.huggingface.HuggingFaceAccount; +import org.elasticsearch.xpack.inference.external.request.huggingface.HuggingFaceInferenceRequest; +import org.elasticsearch.xpack.inference.services.huggingface.HuggingFaceModel; + +import java.util.List; +import java.util.Objects; +import java.util.function.Supplier; + +import static org.elasticsearch.xpack.inference.common.Truncator.truncate; + +public class HuggingFaceExecutableRequestCreator implements ExecutableRequestCreator { + private static final Logger logger = LogManager.getLogger(HuggingFaceExecutableRequestCreator.class); + + private final HuggingFaceModel model; + private final HuggingFaceAccount account; + private final ResponseHandler responseHandler; + private final Truncator truncator; + + public HuggingFaceExecutableRequestCreator(HuggingFaceModel model, ResponseHandler responseHandler, Truncator truncator) { + this.model = Objects.requireNonNull(model); + account = new HuggingFaceAccount(model.getUri(), model.getApiKey()); + this.responseHandler = Objects.requireNonNull(responseHandler); + this.truncator = Objects.requireNonNull(truncator); + } + + @Override + public Runnable create( + List input, + RequestSender requestSender, + Supplier hasRequestCompletedFunction, + HttpClientContext context, + ActionListener listener + ) { + var truncatedInput = truncate(input, model.getTokenLimit()); + var request = new HuggingFaceInferenceRequest(truncator, account, truncatedInput, model); + + return new ExecutableInferenceRequest( + requestSender, + logger, + request, + context, + responseHandler, + hasRequestCompletedFunction, + listener + ); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/InferenceRequest.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/InferenceRequest.java new file mode 100644 index 0000000000000..ed77e4b207a94 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/InferenceRequest.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http.sender; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.inference.InferenceServiceResults; + +import java.util.List; +import java.util.function.Supplier; + +/** + * A contract for defining a request sent to a 3rd party service. + */ +public interface InferenceRequest { + + /** + * Returns the creator that handles building an executable request based on the input provided. + */ + ExecutableRequestCreator getRequestCreator(); + + /** + * Returns the text input associated with this request. + */ + List getInput(); + + /** + * Returns the listener to notify of the results. + */ + ActionListener getListener(); + + /** + * Returns whether the request has completed. Returns true if from a failure, success, or a timeout. + */ + boolean hasCompleted(); + + /** + * Returns a {@link Supplier} to determine if the request has completed. + */ + Supplier getRequestCompletedFunction(); +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/NoopTask.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/NoopTask.java index c5e533eb7d8fe..6cdcd38d224a9 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/NoopTask.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/NoopTask.java @@ -7,13 +7,41 @@ package org.elasticsearch.xpack.inference.external.http.sender; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.inference.InferenceServiceResults; -class NoopTask extends AbstractRunnable { +import java.util.List; +import java.util.function.Supplier; + +class NoopTask implements RejectableTask { + + @Override + public ExecutableRequestCreator getRequestCreator() { + return null; + } + + @Override + public List getInput() { + return null; + } @Override - public void onFailure(Exception e) {} + public ActionListener getListener() { + return null; + } @Override - protected void doRun() throws Exception {} + public boolean hasCompleted() { + return true; + } + + @Override + public Supplier getRequestCompletedFunction() { + return () -> true; + } + + @Override + public void onRejection(Exception e) { + + } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/OpenAiEmbeddingsExecutableRequestCreator.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/OpenAiEmbeddingsExecutableRequestCreator.java new file mode 100644 index 0000000000000..708e67944441c --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/OpenAiEmbeddingsExecutableRequestCreator.java @@ -0,0 +1,67 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http.sender; + +import org.apache.http.client.protocol.HttpClientContext; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.xpack.inference.common.Truncator; +import org.elasticsearch.xpack.inference.external.http.retry.RequestSender; +import org.elasticsearch.xpack.inference.external.http.retry.ResponseHandler; +import org.elasticsearch.xpack.inference.external.openai.OpenAiAccount; +import org.elasticsearch.xpack.inference.external.openai.OpenAiResponseHandler; +import org.elasticsearch.xpack.inference.external.request.openai.OpenAiEmbeddingsRequest; +import org.elasticsearch.xpack.inference.external.response.openai.OpenAiEmbeddingsResponseEntity; +import org.elasticsearch.xpack.inference.services.openai.embeddings.OpenAiEmbeddingsModel; + +import java.util.List; +import java.util.Objects; +import java.util.function.Supplier; + +import static org.elasticsearch.xpack.inference.common.Truncator.truncate; + +public class OpenAiEmbeddingsExecutableRequestCreator implements ExecutableRequestCreator { + + private static final Logger logger = LogManager.getLogger(OpenAiEmbeddingsExecutableRequestCreator.class); + + private static final ResponseHandler HANDLER = createEmbeddingsHandler(); + + private static ResponseHandler createEmbeddingsHandler() { + return new OpenAiResponseHandler("openai text embedding", OpenAiEmbeddingsResponseEntity::fromResponse); + } + + private final Truncator truncator; + private final OpenAiEmbeddingsModel model; + private final OpenAiAccount account; + + public OpenAiEmbeddingsExecutableRequestCreator(OpenAiEmbeddingsModel model, Truncator truncator) { + this.model = Objects.requireNonNull(model); + this.account = new OpenAiAccount( + this.model.getServiceSettings().uri(), + this.model.getServiceSettings().organizationId(), + this.model.getSecretSettings().apiKey() + ); + this.truncator = Objects.requireNonNull(truncator); + } + + @Override + public Runnable create( + List input, + RequestSender requestSender, + Supplier hasRequestCompletedFunction, + HttpClientContext context, + ActionListener listener + ) { + var truncatedInput = truncate(input, model.getServiceSettings().maxInputTokens()); + OpenAiEmbeddingsRequest request = new OpenAiEmbeddingsRequest(truncator, account, truncatedInput, model); + + return new ExecutableInferenceRequest(requestSender, logger, request, context, HANDLER, hasRequestCompletedFunction, listener); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/RejectableTask.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/RejectableTask.java new file mode 100644 index 0000000000000..3da64d5491a60 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/RejectableTask.java @@ -0,0 +1,12 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http.sender; + +interface RejectableTask extends InferenceRequest { + void onRejection(Exception e); +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/RequestExecutorService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/RequestExecutorService.java index f844787455290..00c28e8afc069 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/RequestExecutorService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/RequestExecutorService.java @@ -11,17 +11,14 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Strings; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.inference.common.AdjustableCapacityBlockingQueue; -import org.elasticsearch.xpack.inference.external.http.HttpClient; -import org.elasticsearch.xpack.inference.external.http.HttpResult; import org.elasticsearch.xpack.inference.external.http.RequestExecutor; -import org.elasticsearch.xpack.inference.external.request.HttpRequest; import java.util.ArrayList; import java.util.List; @@ -36,8 +33,7 @@ import static org.elasticsearch.core.Strings.format; /** - * An {@link java.util.concurrent.ExecutorService} for queuing and executing {@link RequestTask} containing - * {@link org.apache.http.client.methods.HttpUriRequest}. This class is useful because the + * A service for queuing and executing {@link RequestTask}. This class is useful because the * {@link org.apache.http.impl.nio.conn.PoolingNHttpClientConnectionManager} will block when leasing a connection if no * connections are available. To avoid blocking the inference transport threads, this executor will queue up the * requests until connections are available. @@ -48,12 +44,11 @@ * {@link org.apache.http.client.config.RequestConfig.Builder#setConnectionRequestTimeout} for more info. */ class RequestExecutorService implements RequestExecutor { - - private static final AdjustableCapacityBlockingQueue.QueueCreator QUEUE_CREATOR = + private static final AdjustableCapacityBlockingQueue.QueueCreator QUEUE_CREATOR = new AdjustableCapacityBlockingQueue.QueueCreator<>() { @Override - public BlockingQueue create(int capacity) { - BlockingQueue queue; + public BlockingQueue create(int capacity) { + BlockingQueue queue; if (capacity <= 0) { queue = create(); } else { @@ -64,41 +59,30 @@ public BlockingQueue create(int capacity) { } @Override - public BlockingQueue create() { + public BlockingQueue create() { return new LinkedBlockingQueue<>(); } }; private static final Logger logger = LogManager.getLogger(RequestExecutorService.class); private final String serviceName; - private final AdjustableCapacityBlockingQueue queue; + private final AdjustableCapacityBlockingQueue queue; private final AtomicBoolean running = new AtomicBoolean(true); private final CountDownLatch terminationLatch = new CountDownLatch(1); private final HttpClientContext httpContext; - private final HttpClient httpClient; private final ThreadPool threadPool; private final CountDownLatch startupLatch; private final BlockingQueue controlQueue = new LinkedBlockingQueue<>(); + private final SingleRequestManager requestManager; RequestExecutorService( String serviceName, - HttpClient httpClient, ThreadPool threadPool, @Nullable CountDownLatch startupLatch, - RequestExecutorServiceSettings settings + RequestExecutorServiceSettings settings, + SingleRequestManager requestManager ) { - this(serviceName, httpClient, threadPool, QUEUE_CREATOR, startupLatch, settings); - } - - private static BlockingQueue buildQueue(int capacity) { - BlockingQueue queue; - if (capacity <= 0) { - queue = new LinkedBlockingQueue<>(); - } else { - queue = new LinkedBlockingQueue<>(capacity); - } - - return queue; + this(serviceName, threadPool, QUEUE_CREATOR, startupLatch, settings, requestManager); } /** @@ -106,18 +90,18 @@ private static BlockingQueue buildQueue(int capacity) { */ RequestExecutorService( String serviceName, - HttpClient httpClient, ThreadPool threadPool, - AdjustableCapacityBlockingQueue.QueueCreator createQueue, + AdjustableCapacityBlockingQueue.QueueCreator createQueue, @Nullable CountDownLatch startupLatch, - RequestExecutorServiceSettings settings + RequestExecutorServiceSettings settings, + SingleRequestManager requestManager ) { this.serviceName = Objects.requireNonNull(serviceName); - this.httpClient = Objects.requireNonNull(httpClient); this.threadPool = Objects.requireNonNull(threadPool); this.httpContext = HttpClientContext.create(); this.queue = new AdjustableCapacityBlockingQueue<>(createQueue, settings.getQueueCapacity()); this.startupLatch = startupLatch; + this.requestManager = Objects.requireNonNull(requestManager); Objects.requireNonNull(settings); settings.registerQueueCapacityCallback(this::onCapacityChange); @@ -179,7 +163,7 @@ private void signalStartInitiated() { */ private void handleTasks() throws InterruptedException { try { - AbstractRunnable task = queue.take(); + RejectableTask task = queue.take(); var command = controlQueue.poll(); if (command != null) { @@ -200,9 +184,9 @@ private void handleTasks() throws InterruptedException { } } - private void executeTask(AbstractRunnable task) { + private void executeTask(RejectableTask task) { try { - task.run(); + requestManager.execute(task, httpContext); } catch (Exception e) { logger.warn(format("Http executor service [%s] failed to execute request [%s]", serviceName, task), e); } @@ -212,7 +196,7 @@ private synchronized void notifyRequestsOfShutdown() { assert isShutdown() : "Requests should only be notified if the executor is shutting down"; try { - List notExecuted = new ArrayList<>(); + List notExecuted = new ArrayList<>(); queue.drainTo(notExecuted); rejectTasks(notExecuted, this::rejectTaskBecauseOfShutdown); @@ -221,7 +205,7 @@ private synchronized void notifyRequestsOfShutdown() { } } - private void rejectTaskBecauseOfShutdown(AbstractRunnable task) { + private void rejectTaskBecauseOfShutdown(RejectableTask task) { try { task.onRejection( new EsRejectedExecutionException( @@ -236,7 +220,7 @@ private void rejectTaskBecauseOfShutdown(AbstractRunnable task) { } } - private void rejectTasks(List tasks, Consumer rejectionFunction) { + private void rejectTasks(List tasks, Consumer rejectionFunction) { for (var task : tasks) { rejectionFunction.accept(task); } @@ -270,16 +254,22 @@ public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedE } /** - * Send the request at some point in the future. + * Execute the request at some point in the future. * - * @param request the http request to send - * @param timeout the maximum time to wait for this request to complete (failing or succeeding). Once the time elapses, the - * listener::onFailure is called with a {@link org.elasticsearch.ElasticsearchTimeoutException}. - * If null, then the request will wait forever - * @param listener an {@link ActionListener} for the response or failure + * @param requestCreator the http request to send + * @param input the text to perform inference on + * @param timeout the maximum time to wait for this request to complete (failing or succeeding). Once the time elapses, the + * listener::onFailure is called with a {@link org.elasticsearch.ElasticsearchTimeoutException}. + * If null, then the request will wait forever + * @param listener an {@link ActionListener} for the response or failure */ - public void execute(HttpRequest request, @Nullable TimeValue timeout, ActionListener listener) { - RequestTask task = new RequestTask(request, httpClient, httpContext, timeout, threadPool, listener); + public void execute( + ExecutableRequestCreator requestCreator, + List input, + @Nullable TimeValue timeout, + ActionListener listener + ) { + var task = new RequestTask(requestCreator, input, timeout, threadPool, listener); if (isShutdown()) { EsRejectedExecutionException rejected = new EsRejectedExecutionException( diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/RequestTask.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/RequestTask.java index cc65d16af652c..970366f7765dd 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/RequestTask.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/RequestTask.java @@ -7,157 +7,96 @@ package org.elasticsearch.xpack.inference.external.http.sender; -import org.apache.http.client.protocol.HttpClientContext; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.action.support.ListenerTimeouts; +import org.elasticsearch.common.Strings; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.threadpool.Scheduler; +import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xpack.inference.external.http.HttpClient; -import org.elasticsearch.xpack.inference.external.http.HttpResult; -import org.elasticsearch.xpack.inference.external.request.HttpRequest; +import java.util.List; import java.util.Objects; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Supplier; -import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.xpack.inference.InferencePlugin.UTILITY_THREAD_POOL_NAME; -class RequestTask extends AbstractRunnable { - private static final Logger logger = LogManager.getLogger(RequestTask.class); - private static final Scheduler.Cancellable NOOP_TIMEOUT_HANDLER = createDefaultHandler(); +class RequestTask implements RejectableTask { - private final HttpRequest request; - private final ActionListener listener; - private final Scheduler.Cancellable timeoutHandler; - private final AtomicBoolean notified = new AtomicBoolean(); - private final TimeValue timeout; - private final Runnable command; + private final AtomicBoolean finished = new AtomicBoolean(); + private final ExecutableRequestCreator requestCreator; + private final List input; + private final ActionListener listener; RequestTask( - HttpRequest request, - HttpClient httpClient, - HttpClientContext context, + ExecutableRequestCreator requestCreator, + List input, @Nullable TimeValue timeout, ThreadPool threadPool, - ActionListener listener + ActionListener listener ) { - this.request = Objects.requireNonNull(request); - this.listener = Objects.requireNonNull(listener); - this.timeout = timeout; - this.timeoutHandler = startTimer(threadPool, timeout); - this.command = threadPool.getThreadContext() - .preserveContext( - new Command( - Objects.requireNonNull(httpClient), - this.request, - Objects.requireNonNull(context), - ActionListener.wrap(this::onSuccess, this::onFailure) - ) - ); + this.requestCreator = Objects.requireNonNull(requestCreator); + this.input = Objects.requireNonNull(input); + this.listener = getListener(Objects.requireNonNull(listener), timeout, Objects.requireNonNull(threadPool)); } - private Scheduler.Cancellable startTimer(ThreadPool threadPool, TimeValue timeout) { - Objects.requireNonNull(threadPool); + private ActionListener getListener( + ActionListener origListener, + @Nullable TimeValue timeout, + ThreadPool threadPool + ) { + ActionListener notificationListener = ActionListener.wrap(result -> { + finished.set(true); + origListener.onResponse(result); + }, e -> { + finished.set(true); + origListener.onFailure(e); + }); if (timeout == null) { - return NOOP_TIMEOUT_HANDLER; + return notificationListener; } - return threadPool.schedule(this::onTimeout, timeout, threadPool.executor(UTILITY_THREAD_POOL_NAME)); - } - - private void onTimeout() { - assert timeout != null : "timeout must be defined to use a timeout handler"; - logger.debug( - () -> format( - "Request from inference entity id [%s] timed out after [%s] while waiting to be executed", - request.inferenceEntityId(), - timeout + return ListenerTimeouts.wrapWithTimeout( + threadPool, + timeout, + threadPool.executor(UTILITY_THREAD_POOL_NAME), + notificationListener, + (ignored) -> notificationListener.onFailure( + new ElasticsearchTimeoutException(Strings.format("Request timed out waiting to be sent after [%s]", timeout)) ) ); - notifyOfResult( - () -> listener.onFailure( - new ElasticsearchTimeoutException(format("Request timed out waiting to be executed after [%s]", timeout)) - ) - ); - } - - private void notifyOfResult(Runnable runnable) { - if (notified.compareAndSet(false, true)) { - runnable.run(); - } } @Override - public void onFailure(Exception e) { - timeoutHandler.cancel(); - notifyOfResult(() -> listener.onFailure(e)); + public boolean hasCompleted() { + return finished.get(); } @Override - protected void doRun() { - try { - command.run(); - } catch (Exception e) { - String message = format("Failed while executing request from inference entity id [%s]", request.inferenceEntityId()); - logger.warn(message, e); - onFailure(new ElasticsearchException(message, e)); - } + public Supplier getRequestCompletedFunction() { + return this::hasCompleted; } - private void onSuccess(HttpResult result) { - timeoutHandler.cancel(); - notifyOfResult(() -> listener.onResponse(result)); + @Override + public List getInput() { + return input; } @Override - public String toString() { - return request.inferenceEntityId(); + public ActionListener getListener() { + return listener; } - private static Scheduler.Cancellable createDefaultHandler() { - return new Scheduler.Cancellable() { - @Override - public boolean cancel() { - return true; - } - - @Override - public boolean isCancelled() { - return true; - } - }; + @Override + public void onRejection(Exception e) { + listener.onFailure(e); } - private record Command( - HttpClient httpClient, - HttpRequest requestToSend, - HttpClientContext context, - ActionListener resultListener - ) implements Runnable { - - @Override - public void run() { - try { - httpClient.send(requestToSend, context, resultListener); - } catch (Exception e) { - logger.warn( - format("Failed to send request from inference entity id [%s] via the http client", requestToSend.inferenceEntityId()), - e - ); - resultListener.onFailure( - new ElasticsearchException( - format("Failed to send request from inference entity id [%s]", requestToSend.inferenceEntityId()), - e - ) - ); - } - } + @Override + public ExecutableRequestCreator getRequestCreator() { + return requestCreator; } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/Sender.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/Sender.java index f1a0e112219fd..0272f4b0e351c 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/Sender.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/Sender.java @@ -10,15 +10,20 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.xpack.inference.external.http.HttpResult; -import org.elasticsearch.xpack.inference.external.request.HttpRequest; +import org.elasticsearch.inference.InferenceServiceResults; import java.io.Closeable; +import java.util.List; public interface Sender extends Closeable { void start(); - void send(HttpRequest request, ActionListener listener); + void send( + ExecutableRequestCreator requestCreator, + List input, + @Nullable TimeValue timeout, + ActionListener listener + ); - void send(HttpRequest request, @Nullable TimeValue timeout, ActionListener listener); + void send(ExecutableRequestCreator requestCreator, List input, ActionListener listener); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/SingleRequestManager.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/SingleRequestManager.java new file mode 100644 index 0000000000000..ecd12814d0877 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/SingleRequestManager.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http.sender; + +import org.apache.http.client.protocol.HttpClientContext; +import org.elasticsearch.xpack.inference.external.http.retry.RetryingHttpSender; + +import java.util.Objects; + +/** + * Handles executing a single inference request at a time. + */ +public class SingleRequestManager { + + protected RetryingHttpSender requestSender; + + public SingleRequestManager(RetryingHttpSender requestSender) { + this.requestSender = Objects.requireNonNull(requestSender); + } + + public void execute(InferenceRequest inferenceRequest, HttpClientContext context) { + if (isNoopRequest(inferenceRequest) || inferenceRequest.hasCompleted()) { + return; + } + + inferenceRequest.getRequestCreator() + .create( + inferenceRequest.getInput(), + requestSender, + inferenceRequest.getRequestCompletedFunction(), + context, + inferenceRequest.getListener() + ) + .run(); + } + + private static boolean isNoopRequest(InferenceRequest inferenceRequest) { + return inferenceRequest.getRequestCreator() == null + || inferenceRequest.getInput() == null + || inferenceRequest.getListener() == null; + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/openai/OpenAiClient.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/openai/OpenAiClient.java deleted file mode 100644 index cb82616587091..0000000000000 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/openai/OpenAiClient.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.inference.external.openai; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.inference.InferenceServiceResults; -import org.elasticsearch.xpack.inference.external.http.retry.ResponseHandler; -import org.elasticsearch.xpack.inference.external.http.retry.RetrySettings; -import org.elasticsearch.xpack.inference.external.http.retry.RetryingHttpSender; -import org.elasticsearch.xpack.inference.external.http.sender.Sender; -import org.elasticsearch.xpack.inference.external.request.openai.OpenAiEmbeddingsRequest; -import org.elasticsearch.xpack.inference.external.response.openai.OpenAiEmbeddingsResponseEntity; -import org.elasticsearch.xpack.inference.services.ServiceComponents; - -import java.io.IOException; - -public class OpenAiClient { - private static final Logger logger = LogManager.getLogger(OpenAiClient.class); - private static final ResponseHandler EMBEDDINGS_HANDLER = createEmbeddingsHandler(); - - private final RetryingHttpSender sender; - - public OpenAiClient(Sender sender, ServiceComponents serviceComponents) { - this.sender = new RetryingHttpSender( - sender, - serviceComponents.throttlerManager(), - logger, - new RetrySettings(serviceComponents.settings()), - serviceComponents.threadPool() - ); - } - - public void send(OpenAiEmbeddingsRequest request, ActionListener listener) throws IOException { - sender.send(request, EMBEDDINGS_HANDLER, listener); - } - - private static ResponseHandler createEmbeddingsHandler() { - return new OpenAiResponseHandler("openai text embedding", OpenAiEmbeddingsResponseEntity::fromResponse); - } -} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/SenderService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/SenderService.java index 96378b109ae2d..98b004cd1aa7f 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/SenderService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/SenderService.java @@ -15,7 +15,7 @@ import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.Model; -import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderFactory; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; import org.elasticsearch.xpack.inference.external.http.sender.Sender; import java.io.IOException; @@ -27,7 +27,7 @@ public abstract class SenderService implements InferenceService { private final Sender sender; private final ServiceComponents serviceComponents; - public SenderService(HttpRequestSenderFactory factory, ServiceComponents serviceComponents) { + public SenderService(HttpRequestSender.Factory factory, ServiceComponents serviceComponents) { Objects.requireNonNull(factory); sender = factory.createSender(name()); this.serviceComponents = Objects.requireNonNull(serviceComponents); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/CohereService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/CohereService.java index 9502acdaf93e5..172a71bd45434 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/CohereService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/CohereService.java @@ -23,7 +23,7 @@ import org.elasticsearch.inference.TaskType; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xpack.inference.external.action.cohere.CohereActionCreator; -import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderFactory; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; import org.elasticsearch.xpack.inference.services.SenderService; import org.elasticsearch.xpack.inference.services.ServiceComponents; import org.elasticsearch.xpack.inference.services.ServiceUtils; @@ -42,7 +42,7 @@ public class CohereService extends SenderService { public static final String NAME = "cohere"; - public CohereService(HttpRequestSenderFactory factory, ServiceComponents serviceComponents) { + public CohereService(HttpRequestSender.Factory factory, ServiceComponents serviceComponents) { super(factory, serviceComponents); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceBaseService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceBaseService.java index 9cd8c285b406e..20994f3cc8e1e 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceBaseService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceBaseService.java @@ -17,7 +17,7 @@ import org.elasticsearch.inference.ModelSecrets; import org.elasticsearch.inference.TaskType; import org.elasticsearch.xpack.inference.external.action.huggingface.HuggingFaceActionCreator; -import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderFactory; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; import org.elasticsearch.xpack.inference.services.SenderService; import org.elasticsearch.xpack.inference.services.ServiceComponents; @@ -32,7 +32,7 @@ public abstract class HuggingFaceBaseService extends SenderService { - public HuggingFaceBaseService(HttpRequestSenderFactory factory, ServiceComponents serviceComponents) { + public HuggingFaceBaseService(HttpRequestSender.Factory factory, ServiceComponents serviceComponents) { super(factory, serviceComponents); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceService.java index 2d2f4667478d5..838d3dc857fbc 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceService.java @@ -15,7 +15,7 @@ import org.elasticsearch.inference.Model; import org.elasticsearch.inference.TaskType; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderFactory; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; import org.elasticsearch.xpack.inference.services.ServiceComponents; import org.elasticsearch.xpack.inference.services.ServiceUtils; import org.elasticsearch.xpack.inference.services.huggingface.elser.HuggingFaceElserModel; @@ -26,7 +26,7 @@ public class HuggingFaceService extends HuggingFaceBaseService { public static final String NAME = "hugging_face"; - public HuggingFaceService(HttpRequestSenderFactory factory, ServiceComponents serviceComponents) { + public HuggingFaceService(HttpRequestSender.Factory factory, ServiceComponents serviceComponents) { super(factory, serviceComponents); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserService.java index 3cc2ca5ed60a5..2587b2737e164 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserService.java @@ -13,7 +13,7 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.inference.TaskType; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderFactory; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; import org.elasticsearch.xpack.inference.services.ServiceComponents; import org.elasticsearch.xpack.inference.services.huggingface.HuggingFaceBaseService; import org.elasticsearch.xpack.inference.services.huggingface.HuggingFaceModel; @@ -23,7 +23,7 @@ public class HuggingFaceElserService extends HuggingFaceBaseService { public static final String NAME = "hugging_face_elser"; - public HuggingFaceElserService(HttpRequestSenderFactory factory, ServiceComponents serviceComponents) { + public HuggingFaceElserService(HttpRequestSender.Factory factory, ServiceComponents serviceComponents) { super(factory, serviceComponents); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiService.java index 5062bba8e7eac..234328de67efe 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiService.java @@ -24,7 +24,7 @@ import org.elasticsearch.inference.TaskType; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xpack.inference.external.action.openai.OpenAiActionCreator; -import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderFactory; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; import org.elasticsearch.xpack.inference.services.SenderService; import org.elasticsearch.xpack.inference.services.ServiceComponents; import org.elasticsearch.xpack.inference.services.ServiceUtils; @@ -45,7 +45,7 @@ public class OpenAiService extends SenderService { public static final String NAME = "openai"; - public OpenAiService(HttpRequestSenderFactory factory, ServiceComponents serviceComponents) { + public OpenAiService(HttpRequestSender.Factory factory, ServiceComponents serviceComponents) { super(factory, serviceComponents); } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/Utils.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/Utils.java index 5b7ffb3c8153e..96650bcca565e 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/Utils.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/Utils.java @@ -16,7 +16,7 @@ import org.elasticsearch.xpack.inference.external.http.HttpClientManager; import org.elasticsearch.xpack.inference.external.http.HttpSettings; import org.elasticsearch.xpack.inference.external.http.retry.RetrySettings; -import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderFactory; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; import org.elasticsearch.xpack.inference.external.http.sender.RequestExecutorServiceSettings; import org.elasticsearch.xpack.inference.logging.ThrottlerManager; @@ -39,7 +39,7 @@ public static ClusterService mockClusterService(Settings settings) { var registeredSettings = Stream.of( HttpSettings.getSettings(), HttpClientManager.getSettings(), - HttpRequestSenderFactory.HttpRequestSender.getSettings(), + HttpRequestSender.getSettings(), ThrottlerManager.getSettings(), RetrySettings.getSettingsDefinitions(), Truncator.getSettings(), diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/cohere/CohereActionCreatorTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/cohere/CohereActionCreatorTests.java index e7cfc784db117..66ef9910a2649 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/cohere/CohereActionCreatorTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/cohere/CohereActionCreatorTests.java @@ -19,7 +19,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.inference.external.http.HttpClientManager; -import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderFactory; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderTests; import org.elasticsearch.xpack.inference.logging.ThrottlerManager; import org.elasticsearch.xpack.inference.services.cohere.CohereTruncation; import org.elasticsearch.xpack.inference.services.cohere.embeddings.CohereEmbeddingType; @@ -67,7 +67,7 @@ public void shutdown() throws IOException { } public void testCreate_CohereEmbeddingsModel() throws IOException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); try (var sender = senderFactory.createSender("test_service")) { sender.start(); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/cohere/CohereEmbeddingsActionTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/cohere/CohereEmbeddingsActionTests.java index 7fd33f7bba58f..b504744bfe5f3 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/cohere/CohereEmbeddingsActionTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/cohere/CohereEmbeddingsActionTests.java @@ -23,7 +23,7 @@ import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.inference.external.http.HttpClientManager; import org.elasticsearch.xpack.inference.external.http.HttpResult; -import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderFactory; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderTests; import org.elasticsearch.xpack.inference.external.http.sender.Sender; import org.elasticsearch.xpack.inference.external.request.cohere.CohereUtils; import org.elasticsearch.xpack.inference.logging.ThrottlerManager; @@ -47,7 +47,6 @@ import static org.elasticsearch.xpack.inference.external.http.Utils.entityAsMap; import static org.elasticsearch.xpack.inference.external.http.Utils.getUrl; import static org.elasticsearch.xpack.inference.results.TextEmbeddingResultsTests.buildExpectation; -import static org.elasticsearch.xpack.inference.services.ServiceComponentsTests.createWithEmptySettings; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; @@ -77,9 +76,9 @@ public void shutdown() throws IOException { } public void testExecute_ReturnsSuccessfulResponse() throws IOException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); - try (var sender = senderFactory.createSender("test_service")) { + try (var sender = HttpRequestSenderTests.createSenderWithSingleRequestManager(senderFactory, "test_service")) { sender.start(); String responseJson = """ @@ -158,9 +157,9 @@ public void testExecute_ReturnsSuccessfulResponse() throws IOException { } public void testExecute_ReturnsSuccessfulResponse_ForInt8ResponseType() throws IOException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); - try (var sender = senderFactory.createSender("test_service")) { + try (var sender = HttpRequestSenderTests.createSenderWithSingleRequestManager(senderFactory, "test_service")) { sender.start(); String responseJson = """ @@ -253,7 +252,7 @@ public void testExecute_ThrowsURISyntaxException_ForInvalidUrl() throws IOExcept public void testExecute_ThrowsElasticsearchException() { var sender = mock(Sender.class); - doThrow(new ElasticsearchException("failed")).when(sender).send(any(), any()); + doThrow(new ElasticsearchException("failed")).when(sender).send(any(), any(), any()); var action = createAction(getUrl(webServer), "secret", CohereEmbeddingsTaskSettings.EMPTY_SETTINGS, null, null, sender); @@ -274,7 +273,7 @@ public void testExecute_ThrowsElasticsearchException_WhenSenderOnFailureIsCalled listener.onFailure(new IllegalStateException("failed")); return Void.TYPE; - }).when(sender).send(any(), any()); + }).when(sender).send(any(), any(), any()); var action = createAction(getUrl(webServer), "secret", CohereEmbeddingsTaskSettings.EMPTY_SETTINGS, null, null, sender); @@ -298,7 +297,7 @@ public void testExecute_ThrowsElasticsearchException_WhenSenderOnFailureIsCalled listener.onFailure(new IllegalStateException("failed")); return Void.TYPE; - }).when(sender).send(any(), any()); + }).when(sender).send(any(), any(), any()); var action = createAction(null, "secret", CohereEmbeddingsTaskSettings.EMPTY_SETTINGS, null, null, sender); @@ -312,7 +311,7 @@ public void testExecute_ThrowsElasticsearchException_WhenSenderOnFailureIsCalled public void testExecute_ThrowsException() { var sender = mock(Sender.class); - doThrow(new IllegalArgumentException("failed")).when(sender).send(any(), any()); + doThrow(new IllegalArgumentException("failed")).when(sender).send(any(), any(), any()); var action = createAction(getUrl(webServer), "secret", CohereEmbeddingsTaskSettings.EMPTY_SETTINGS, null, null, sender); @@ -329,7 +328,7 @@ public void testExecute_ThrowsException() { public void testExecute_ThrowsExceptionWithNullUrl() { var sender = mock(Sender.class); - doThrow(new IllegalArgumentException("failed")).when(sender).send(any(), any()); + doThrow(new IllegalArgumentException("failed")).when(sender).send(any(), any(), any()); var action = createAction(null, "secret", CohereEmbeddingsTaskSettings.EMPTY_SETTINGS, null, null, sender); @@ -351,7 +350,7 @@ private CohereEmbeddingsAction createAction( ) { var model = CohereEmbeddingsModelTests.createModel(url, apiKey, taskSettings, 1024, 1024, modelName, embeddingType); - return new CohereEmbeddingsAction(sender, model, createWithEmptySettings(threadPool)); + return new CohereEmbeddingsAction(sender, model); } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/huggingface/HuggingFaceActionCreatorTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/huggingface/HuggingFaceActionCreatorTests.java index 95b69f1231e9d..6334c669d0c1f 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/huggingface/HuggingFaceActionCreatorTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/huggingface/HuggingFaceActionCreatorTests.java @@ -20,7 +20,7 @@ import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.inference.common.TruncatorTests; import org.elasticsearch.xpack.inference.external.http.HttpClientManager; -import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderFactory; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderTests; import org.elasticsearch.xpack.inference.logging.ThrottlerManager; import org.elasticsearch.xpack.inference.results.SparseEmbeddingResultsTests; import org.elasticsearch.xpack.inference.results.TextEmbeddingResultsTests; @@ -31,7 +31,6 @@ import org.junit.Before; import java.io.IOException; -import java.net.URISyntaxException; import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; @@ -72,7 +71,7 @@ public void shutdown() throws IOException { @SuppressWarnings("unchecked") public void testExecute_ReturnsSuccessfulResponse_ForElserAction() throws IOException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); try (var sender = senderFactory.createSender("test_service")) { sender.start(); @@ -121,8 +120,14 @@ public void testExecute_ReturnsSuccessfulResponse_ForElserAction() throws IOExce } @SuppressWarnings("unchecked") - public void testSend_FailsFromInvalidResponseFormat_ForElserAction() throws IOException, URISyntaxException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + public void testSend_FailsFromInvalidResponseFormat_ForElserAction() throws IOException { + // timeout as zero for no retries + var settings = buildSettingsWithRetryFields( + TimeValue.timeValueMillis(1), + TimeValue.timeValueMinutes(1), + TimeValue.timeValueSeconds(0) + ); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager, settings); try (var sender = senderFactory.createSender("test_service")) { sender.start(); @@ -147,17 +152,7 @@ public void testSend_FailsFromInvalidResponseFormat_ForElserAction() throws IOEx var model = HuggingFaceElserModelTests.createModel(getUrl(webServer), "secret"); var actionCreator = new HuggingFaceActionCreator( sender, - new ServiceComponents( - threadPool, - mockThrottlerManager(), - // timeout as zero for no retries - buildSettingsWithRetryFields( - TimeValue.timeValueMillis(1), - TimeValue.timeValueMinutes(1), - TimeValue.timeValueSeconds(0) - ), - TruncatorTests.createTruncator() - ) + new ServiceComponents(threadPool, mockThrottlerManager(), settings, TruncatorTests.createTruncator()) ); var action = actionCreator.create(model); @@ -188,7 +183,7 @@ public void testSend_FailsFromInvalidResponseFormat_ForElserAction() throws IOEx @SuppressWarnings("unchecked") public void testExecute_ReturnsSuccessfulResponse_ForEmbeddingsAction() throws IOException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); try (var sender = senderFactory.createSender("test_service")) { sender.start(); @@ -233,8 +228,14 @@ public void testExecute_ReturnsSuccessfulResponse_ForEmbeddingsAction() throws I } @SuppressWarnings("unchecked") - public void testSend_FailsFromInvalidResponseFormat_ForEmbeddingsAction() throws IOException, URISyntaxException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + public void testSend_FailsFromInvalidResponseFormat_ForEmbeddingsAction() throws IOException { + // timeout as zero for no retries + var settings = buildSettingsWithRetryFields( + TimeValue.timeValueMillis(1), + TimeValue.timeValueMinutes(1), + TimeValue.timeValueSeconds(0) + ); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager, settings); try (var sender = senderFactory.createSender("test_service")) { sender.start(); @@ -257,17 +258,7 @@ public void testSend_FailsFromInvalidResponseFormat_ForEmbeddingsAction() throws var model = HuggingFaceEmbeddingsModelTests.createModel(getUrl(webServer), "secret"); var actionCreator = new HuggingFaceActionCreator( sender, - new ServiceComponents( - threadPool, - mockThrottlerManager(), - // timeout as zero for no retries - buildSettingsWithRetryFields( - TimeValue.timeValueMillis(1), - TimeValue.timeValueMinutes(1), - TimeValue.timeValueSeconds(0) - ), - TruncatorTests.createTruncator() - ) + new ServiceComponents(threadPool, mockThrottlerManager(), settings, TruncatorTests.createTruncator()) ); var action = actionCreator.create(model); @@ -297,7 +288,7 @@ public void testSend_FailsFromInvalidResponseFormat_ForEmbeddingsAction() throws } public void testExecute_ReturnsSuccessfulResponse_AfterTruncating() throws IOException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); try (var sender = senderFactory.createSender("test_service")) { sender.start(); @@ -362,7 +353,7 @@ public void testExecute_ReturnsSuccessfulResponse_AfterTruncating() throws IOExc } public void testExecute_TruncatesInputBeforeSending() throws IOException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); try (var sender = senderFactory.createSender("test_service")) { sender.start(); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/huggingface/HuggingFaceActionTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/huggingface/HuggingFaceActionTests.java index 25b05327a21b7..7b332e8c6634d 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/huggingface/HuggingFaceActionTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/huggingface/HuggingFaceActionTests.java @@ -16,7 +16,6 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.inference.common.TruncatorTests; -import org.elasticsearch.xpack.inference.external.http.HttpResult; import org.elasticsearch.xpack.inference.external.http.retry.AlwaysRetryingResponseHandler; import org.elasticsearch.xpack.inference.external.http.sender.Sender; import org.elasticsearch.xpack.inference.logging.ThrottlerManager; @@ -54,7 +53,7 @@ public void shutdown() throws IOException { public void testExecute_ThrowsElasticsearchException_WhenSenderThrows() { var sender = mock(Sender.class); - doThrow(new ElasticsearchException("failed")).when(sender).send(any(), any()); + doThrow(new ElasticsearchException("failed")).when(sender).send(any(), any(), any()); var action = createAction(URL, sender); @@ -71,11 +70,11 @@ public void testExecute_ThrowsElasticsearchException_WhenSenderOnFailureIsCalled doAnswer(invocation -> { @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocation.getArguments()[1]; + ActionListener listener = (ActionListener) invocation.getArguments()[1]; listener.onFailure(new IllegalStateException("failed")); return Void.TYPE; - }).when(sender).send(any(), any()); + }).when(sender).send(any(), any(), any()); var action = createAction(URL, sender, "inferenceEntityId"); @@ -92,7 +91,7 @@ public void testExecute_ThrowsElasticsearchException_WhenSenderOnFailureIsCalled public void testExecute_ThrowsException() { var sender = mock(Sender.class); - doThrow(new IllegalArgumentException("failed")).when(sender).send(any(), any()); + doThrow(new IllegalArgumentException("failed")).when(sender).send(any(), any(), any()); var action = createAction(URL, sender, "inferenceEntityId"); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiActionCreatorTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiActionCreatorTests.java index cf1a569548143..a844061fa48e1 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiActionCreatorTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiActionCreatorTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.inference.external.action.openai; import org.apache.http.HttpHeaders; +import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; @@ -19,7 +20,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.inference.external.http.HttpClientManager; -import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderFactory; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderTests; import org.elasticsearch.xpack.inference.logging.ThrottlerManager; import org.junit.After; import org.junit.Before; @@ -28,10 +29,12 @@ import java.util.List; import java.util.concurrent.TimeUnit; +import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; import static org.elasticsearch.xpack.inference.Utils.mockClusterServiceEmpty; import static org.elasticsearch.xpack.inference.external.http.Utils.entityAsMap; import static org.elasticsearch.xpack.inference.external.http.Utils.getUrl; +import static org.elasticsearch.xpack.inference.external.http.retry.RetrySettingsTests.buildSettingsWithRetryFields; import static org.elasticsearch.xpack.inference.external.request.openai.OpenAiUtils.ORGANIZATION_HEADER; import static org.elasticsearch.xpack.inference.results.TextEmbeddingResultsTests.buildExpectation; import static org.elasticsearch.xpack.inference.services.ServiceComponentsTests.createWithEmptySettings; @@ -63,7 +66,7 @@ public void shutdown() throws IOException { } public void testCreate_OpenAiEmbeddingsModel() throws IOException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); try (var sender = senderFactory.createSender("test_service")) { sender.start(); @@ -115,8 +118,173 @@ public void testCreate_OpenAiEmbeddingsModel() throws IOException { } } + public void testCreate_OpenAiEmbeddingsModel_WithoutUser() throws IOException { + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); + + try (var sender = senderFactory.createSender("test_service")) { + sender.start(); + + String responseJson = """ + { + "object": "list", + "data": [ + { + "object": "embedding", + "index": 0, + "embedding": [ + 0.0123, + -0.0123 + ] + } + ], + "model": "text-embedding-ada-002-v2", + "usage": { + "prompt_tokens": 8, + "total_tokens": 8 + } + } + """; + webServer.enqueue(new MockResponse().setResponseCode(200).setBody(responseJson)); + + var model = createModel(getUrl(webServer), "org", "secret", "model", null); + var actionCreator = new OpenAiActionCreator(sender, createWithEmptySettings(threadPool)); + var overriddenTaskSettings = getRequestTaskSettingsMap(null); + var action = actionCreator.create(model, overriddenTaskSettings); + + PlainActionFuture listener = new PlainActionFuture<>(); + action.execute(List.of("abc"), listener); + + var result = listener.actionGet(TIMEOUT); + + assertThat(result.asMap(), is(buildExpectation(List.of(List.of(0.0123F, -0.0123F))))); + assertThat(webServer.requests(), hasSize(1)); + assertNull(webServer.requests().get(0).getUri().getQuery()); + assertThat(webServer.requests().get(0).getHeader(HttpHeaders.CONTENT_TYPE), equalTo(XContentType.JSON.mediaType())); + assertThat(webServer.requests().get(0).getHeader(HttpHeaders.AUTHORIZATION), equalTo("Bearer secret")); + assertThat(webServer.requests().get(0).getHeader(ORGANIZATION_HEADER), equalTo("org")); + + var requestMap = entityAsMap(webServer.requests().get(0).getBody()); + assertThat(requestMap.size(), is(2)); + assertThat(requestMap.get("input"), is(List.of("abc"))); + assertThat(requestMap.get("model"), is("model")); + } + } + + public void testCreate_OpenAiEmbeddingsModel_WithoutOrganization() throws IOException { + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); + + try (var sender = senderFactory.createSender("test_service")) { + sender.start(); + + String responseJson = """ + { + "object": "list", + "data": [ + { + "object": "embedding", + "index": 0, + "embedding": [ + 0.0123, + -0.0123 + ] + } + ], + "model": "text-embedding-ada-002-v2", + "usage": { + "prompt_tokens": 8, + "total_tokens": 8 + } + } + """; + webServer.enqueue(new MockResponse().setResponseCode(200).setBody(responseJson)); + + var model = createModel(getUrl(webServer), null, "secret", "model", null); + var actionCreator = new OpenAiActionCreator(sender, createWithEmptySettings(threadPool)); + var overriddenTaskSettings = getRequestTaskSettingsMap("overridden_user"); + var action = actionCreator.create(model, overriddenTaskSettings); + + PlainActionFuture listener = new PlainActionFuture<>(); + action.execute(List.of("abc"), listener); + + var result = listener.actionGet(TIMEOUT); + + assertThat(result.asMap(), is(buildExpectation(List.of(List.of(0.0123F, -0.0123F))))); + assertThat(webServer.requests(), hasSize(1)); + assertNull(webServer.requests().get(0).getUri().getQuery()); + assertThat(webServer.requests().get(0).getHeader(HttpHeaders.CONTENT_TYPE), equalTo(XContentType.JSON.mediaType())); + assertThat(webServer.requests().get(0).getHeader(HttpHeaders.AUTHORIZATION), equalTo("Bearer secret")); + assertNull(webServer.requests().get(0).getHeader(ORGANIZATION_HEADER)); + + var requestMap = entityAsMap(webServer.requests().get(0).getBody()); + assertThat(requestMap.size(), is(3)); + assertThat(requestMap.get("input"), is(List.of("abc"))); + assertThat(requestMap.get("model"), is("model")); + assertThat(requestMap.get("user"), is("overridden_user")); + } + } + + public void testCreate_OpenAiEmbeddingsModel_FailsFromInvalidResponseFormat() throws IOException { + // timeout as zero for no retries + var settings = buildSettingsWithRetryFields( + TimeValue.timeValueMillis(1), + TimeValue.timeValueMinutes(1), + TimeValue.timeValueSeconds(0) + ); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager, settings); + + try (var sender = senderFactory.createSender("test_service")) { + sender.start(); + + String responseJson = """ + { + "object": "list", + "data_does_not_exist": [ + { + "object": "embedding", + "index": 0, + "embedding": [ + 0.0123, + -0.0123 + ] + } + ], + "model": "text-embedding-ada-002-v2", + "usage": { + "prompt_tokens": 8, + "total_tokens": 8 + } + } + """; + webServer.enqueue(new MockResponse().setResponseCode(200).setBody(responseJson)); + + var model = createModel(getUrl(webServer), null, "secret", "model", null); + var actionCreator = new OpenAiActionCreator(sender, createWithEmptySettings(threadPool)); + var overriddenTaskSettings = getRequestTaskSettingsMap("overridden_user"); + var action = actionCreator.create(model, overriddenTaskSettings); + + PlainActionFuture listener = new PlainActionFuture<>(); + action.execute(List.of("abc"), listener); + + var thrownException = expectThrows(ElasticsearchStatusException.class, () -> listener.actionGet(TIMEOUT)); + assertThat(thrownException.getMessage(), is(format("Failed to send OpenAI embeddings request to [%s]", getUrl(webServer)))); + assertThat(thrownException.getCause().getMessage(), is("Failed to find required field [data] in OpenAI embeddings response")); + + assertThat(webServer.requests(), hasSize(1)); + assertNull(webServer.requests().get(0).getUri().getQuery()); + assertThat(webServer.requests().get(0).getHeader(HttpHeaders.CONTENT_TYPE), equalTo(XContentType.JSON.mediaType())); + assertThat(webServer.requests().get(0).getHeader(HttpHeaders.AUTHORIZATION), equalTo("Bearer secret")); + assertNull(webServer.requests().get(0).getHeader(ORGANIZATION_HEADER)); + + var requestMap = entityAsMap(webServer.requests().get(0).getBody()); + assertThat(requestMap.size(), is(3)); + assertThat(requestMap.get("input"), is(List.of("abc"))); + assertThat(requestMap.get("model"), is("model")); + assertThat(requestMap.get("user"), is("overridden_user")); + } + } + public void testExecute_ReturnsSuccessfulResponse_AfterTruncating_From413StatusCode() throws IOException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); try (var sender = senderFactory.createSender("test_service")) { sender.start(); @@ -199,7 +367,7 @@ public void testExecute_ReturnsSuccessfulResponse_AfterTruncating_From413StatusC } public void testExecute_ReturnsSuccessfulResponse_AfterTruncating_From400StatusCode() throws IOException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); try (var sender = senderFactory.createSender("test_service")) { sender.start(); @@ -282,7 +450,7 @@ public void testExecute_ReturnsSuccessfulResponse_AfterTruncating_From400StatusC } public void testExecute_TruncatesInputBeforeSending() throws IOException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); try (var sender = senderFactory.createSender("test_service")) { sender.start(); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiEmbeddingsActionTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiEmbeddingsActionTests.java index 6bc8e2d61d579..c803121e6da79 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiEmbeddingsActionTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiEmbeddingsActionTests.java @@ -21,10 +21,10 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.inference.external.http.HttpClientManager; -import org.elasticsearch.xpack.inference.external.http.HttpResult; -import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderFactory; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; import org.elasticsearch.xpack.inference.external.http.sender.Sender; import org.elasticsearch.xpack.inference.logging.ThrottlerManager; +import org.elasticsearch.xpack.inference.services.ServiceComponentsTests; import org.junit.After; import org.junit.Before; @@ -70,7 +70,11 @@ public void shutdown() throws IOException { } public void testExecute_ReturnsSuccessfulResponse() throws IOException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = new HttpRequestSender.Factory( + ServiceComponentsTests.createWithEmptySettings(threadPool), + clientManager, + mockClusterServiceEmpty() + ); try (var sender = senderFactory.createSender("test_service")) { sender.start(); @@ -131,7 +135,7 @@ public void testExecute_ThrowsURISyntaxException_ForInvalidUrl() throws IOExcept public void testExecute_ThrowsElasticsearchException() { var sender = mock(Sender.class); - doThrow(new ElasticsearchException("failed")).when(sender).send(any(), any()); + doThrow(new ElasticsearchException("failed")).when(sender).send(any(), any(), any()); var action = createAction(getUrl(webServer), "org", "secret", "model", "user", sender); @@ -148,11 +152,11 @@ public void testExecute_ThrowsElasticsearchException_WhenSenderOnFailureIsCalled doAnswer(invocation -> { @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocation.getArguments()[1]; + ActionListener listener = (ActionListener) invocation.getArguments()[1]; listener.onFailure(new IllegalStateException("failed")); return Void.TYPE; - }).when(sender).send(any(), any()); + }).when(sender).send(any(), any(), any()); var action = createAction(getUrl(webServer), "org", "secret", "model", "user", sender); @@ -169,11 +173,11 @@ public void testExecute_ThrowsElasticsearchException_WhenSenderOnFailureIsCalled doAnswer(invocation -> { @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocation.getArguments()[1]; + ActionListener listener = (ActionListener) invocation.getArguments()[1]; listener.onFailure(new IllegalStateException("failed")); return Void.TYPE; - }).when(sender).send(any(), any()); + }).when(sender).send(any(), any(), any()); var action = createAction(null, "org", "secret", "model", "user", sender); @@ -187,7 +191,7 @@ public void testExecute_ThrowsElasticsearchException_WhenSenderOnFailureIsCalled public void testExecute_ThrowsException() { var sender = mock(Sender.class); - doThrow(new IllegalArgumentException("failed")).when(sender).send(any(), any()); + doThrow(new IllegalArgumentException("failed")).when(sender).send(any(), any(), any()); var action = createAction(getUrl(webServer), "org", "secret", "model", "user", sender); @@ -201,7 +205,7 @@ public void testExecute_ThrowsException() { public void testExecute_ThrowsExceptionWithNullUrl() { var sender = mock(Sender.class); - doThrow(new IllegalArgumentException("failed")).when(sender).send(any(), any()); + doThrow(new IllegalArgumentException("failed")).when(sender).send(any(), any(), any()); var action = createAction(null, "org", "secret", "model", "user", sender); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/retry/RetrySettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/retry/RetrySettingsTests.java index 940205a663337..2c63e085a9937 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/retry/RetrySettingsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/retry/RetrySettingsTests.java @@ -11,6 +11,8 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.test.ESTestCase; +import static org.elasticsearch.xpack.inference.Utils.mockClusterServiceEmpty; + public class RetrySettingsTests extends ESTestCase { /** @@ -24,7 +26,7 @@ public static RetrySettings createDefaultRetrySettings() { public static RetrySettings createRetrySettings(TimeValue initialDelay, TimeValue maxDelayBound, TimeValue timeout) { var settings = buildSettingsWithRetryFields(initialDelay, maxDelayBound, timeout); - return new RetrySettings(settings); + return new RetrySettings(settings, mockClusterServiceEmpty()); } public static Settings buildSettingsWithRetryFields(TimeValue initialDelay, TimeValue maxDelayBound, TimeValue timeout) { diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/retry/RetryingHttpSenderTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/retry/RetryingHttpSenderTests.java index 8d60c2f5bfa48..30bd40bdcc111 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/retry/RetryingHttpSenderTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/retry/RetryingHttpSenderTests.java @@ -10,7 +10,9 @@ import org.apache.http.ConnectionClosedException; import org.apache.http.HttpResponse; import org.apache.http.StatusLine; +import org.apache.http.client.protocol.HttpClientContext; import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; @@ -19,14 +21,15 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.inference.external.http.HttpClient; import org.elasticsearch.xpack.inference.external.http.HttpResult; -import org.elasticsearch.xpack.inference.external.http.sender.Sender; import org.elasticsearch.xpack.inference.external.request.HttpRequestTests; import org.elasticsearch.xpack.inference.external.request.Request; import org.elasticsearch.xpack.inference.logging.ThrottlerManager; import org.junit.Before; import org.mockito.stubbing.Answer; +import java.io.IOException; import java.net.UnknownHostException; import static org.elasticsearch.xpack.inference.external.http.retry.RetrySettingsTests.createDefaultRetrySettings; @@ -50,17 +53,17 @@ public void init() throws Exception { taskQueue = new DeterministicTaskQueue(); } - public void testSend_CallsSenderAgain_AfterValidateResponseThrowsAnException() { - var sender = mock(Sender.class); + public void testSend_CallsSenderAgain_AfterValidateResponseThrowsAnException() throws IOException { + var httpClient = mock(HttpClient.class); var httpResponse = mockHttpResponse(); doAnswer(invocation -> { @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocation.getArguments()[1]; + ActionListener listener = (ActionListener) invocation.getArguments()[2]; listener.onResponse(new HttpResult(httpResponse, new byte[0])); return Void.TYPE; - }).when(sender).send(any(), any()); + }).when(httpClient).send(any(), any(), any()); var inferenceResults = mock(InferenceServiceResults.class); Answer answer = (invocation) -> inferenceResults; @@ -71,72 +74,58 @@ public void testSend_CallsSenderAgain_AfterValidateResponseThrowsAnException() { // bounded wild card list, thenAnswer must be used instead. when(handler.parseResult(any(), any())).thenAnswer(answer); - var retrier = new RetryingHttpSender( - sender, - mock(ThrottlerManager.class), - mock(Logger.class), - createDefaultRetrySettings(), - taskQueue.getThreadPool(), - EsExecutors.DIRECT_EXECUTOR_SERVICE - ); + var retrier = createRetrier(httpClient); var listener = new PlainActionFuture(); - executeTasks(() -> retrier.send(mockRequest(), handler, listener), 1); + executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), HttpClientContext.create(), () -> false, handler, listener), 1); assertThat(listener.actionGet(TIMEOUT), is(inferenceResults)); - verify(sender, times(2)).send(any(), any()); - verifyNoMoreInteractions(sender); + verify(httpClient, times(2)).send(any(), any(), any()); + verifyNoMoreInteractions(httpClient); } - public void testSend_CallsSenderAgain_WhenAFailureStatusCodeIsReturned() { + public void testSend_CallsSenderAgain_WhenAFailureStatusCodeIsReturned() throws IOException { var statusLine = mock(StatusLine.class); when(statusLine.getStatusCode()).thenReturn(300).thenReturn(200); var httpResponse = mock(HttpResponse.class); when(httpResponse.getStatusLine()).thenReturn(statusLine); - var sender = mock(Sender.class); + var httpClient = mock(HttpClient.class); doAnswer(invocation -> { @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocation.getArguments()[1]; + ActionListener listener = (ActionListener) invocation.getArguments()[2]; listener.onResponse(new HttpResult(httpResponse, new byte[] { 'a' })); return Void.TYPE; - }).when(sender).send(any(), any()); + }).when(httpClient).send(any(), any(), any()); var inferenceResults = mock(InferenceServiceResults.class); var handler = new AlwaysRetryingResponseHandler("test", result -> inferenceResults); - var retrier = new RetryingHttpSender( - sender, - mock(ThrottlerManager.class), - mock(Logger.class), - createDefaultRetrySettings(), - taskQueue.getThreadPool(), - EsExecutors.DIRECT_EXECUTOR_SERVICE - ); + var retrier = createRetrier(httpClient); var listener = new PlainActionFuture(); - executeTasks(() -> retrier.send(mockRequest(), handler, listener), 1); + executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), HttpClientContext.create(), () -> false, handler, listener), 1); assertThat(listener.actionGet(TIMEOUT), is(inferenceResults)); - verify(sender, times(2)).send(any(), any()); - verifyNoMoreInteractions(sender); + verify(httpClient, times(2)).send(any(), any(), any()); + verifyNoMoreInteractions(httpClient); } - public void testSend_CallsSenderAgain_WhenParsingFailsOnce() { - var sender = mock(Sender.class); + public void testSend_CallsSenderAgain_WhenParsingFailsOnce() throws IOException { + var httpClient = mock(HttpClient.class); var httpResponse = mockHttpResponse(); doAnswer(invocation -> { @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocation.getArguments()[1]; + ActionListener listener = (ActionListener) invocation.getArguments()[2]; listener.onResponse(new HttpResult(httpResponse, new byte[] { 'a' })); return Void.TYPE; - }).when(sender).send(any(), any()); + }).when(httpClient).send(any(), any(), any()); var inferenceResults = mock(InferenceServiceResults.class); Answer answer = (invocation) -> inferenceResults; @@ -144,34 +133,27 @@ public void testSend_CallsSenderAgain_WhenParsingFailsOnce() { var handler = mock(ResponseHandler.class); when(handler.parseResult(any(), any())).thenThrow(new RetryException(true, "failed")).thenAnswer(answer); - var retrier = new RetryingHttpSender( - sender, - mock(ThrottlerManager.class), - mock(Logger.class), - createDefaultRetrySettings(), - taskQueue.getThreadPool(), - EsExecutors.DIRECT_EXECUTOR_SERVICE - ); + var retrier = createRetrier(httpClient); var listener = new PlainActionFuture(); - executeTasks(() -> retrier.send(mockRequest(), handler, listener), 1); + executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), HttpClientContext.create(), () -> false, handler, listener), 1); assertThat(listener.actionGet(TIMEOUT), is(inferenceResults)); - verify(sender, times(2)).send(any(), any()); - verifyNoMoreInteractions(sender); + verify(httpClient, times(2)).send(any(), any(), any()); + verifyNoMoreInteractions(httpClient); } - public void testSend_DoesNotCallSenderAgain_WhenParsingFailsWithNonRetryableException() { - var sender = mock(Sender.class); + public void testSend_DoesNotCallSenderAgain_WhenParsingFailsWithNonRetryableException() throws IOException { + var httpClient = mock(HttpClient.class); var httpResponse = mockHttpResponse(); doAnswer(invocation -> { @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocation.getArguments()[1]; + ActionListener listener = (ActionListener) invocation.getArguments()[2]; listener.onResponse(new HttpResult(httpResponse, new byte[] { 'a' })); return Void.TYPE; - }).when(sender).send(any(), any()); + }).when(httpClient).send(any(), any(), any()); var inferenceResults = mock(InferenceServiceResults.class); Answer answer = (invocation) -> inferenceResults; @@ -179,41 +161,34 @@ public void testSend_DoesNotCallSenderAgain_WhenParsingFailsWithNonRetryableExce var handler = mock(ResponseHandler.class); when(handler.parseResult(any(), any())).thenThrow(new IllegalStateException("failed")).thenAnswer(answer); - var retrier = new RetryingHttpSender( - sender, - mock(ThrottlerManager.class), - mock(Logger.class), - createDefaultRetrySettings(), - taskQueue.getThreadPool(), - EsExecutors.DIRECT_EXECUTOR_SERVICE - ); + var retrier = createRetrier(httpClient); var listener = new PlainActionFuture(); - executeTasks(() -> retrier.send(mockRequest(), handler, listener), 0); + executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), HttpClientContext.create(), () -> false, handler, listener), 0); var thrownException = expectThrows(IllegalStateException.class, () -> listener.actionGet(TIMEOUT)); assertThat(thrownException.getMessage(), is("failed")); - verify(sender, times(1)).send(any(), any()); - verifyNoMoreInteractions(sender); + verify(httpClient, times(1)).send(any(), any(), any()); + verifyNoMoreInteractions(httpClient); } - public void testSend_CallsSenderAgain_WhenHttpResultListenerCallsOnFailureOnce() { - var sender = mock(Sender.class); + public void testSend_CallsSenderAgain_WhenHttpResultListenerCallsOnFailureOnce() throws IOException { + var httpClient = mock(HttpClient.class); doAnswer(invocation -> { @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocation.getArguments()[1]; + ActionListener listener = (ActionListener) invocation.getArguments()[2]; listener.onFailure(new RetryException(true, "failed")); return Void.TYPE; }).doAnswer(invocation -> { @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocation.getArguments()[1]; + ActionListener listener = (ActionListener) invocation.getArguments()[2]; listener.onResponse(new HttpResult(mock(HttpResponse.class), new byte[] { 'a' })); return Void.TYPE; - }).when(sender).send(any(), any()); + }).when(httpClient).send(any(), any(), any()); var inferenceResults = mock(InferenceServiceResults.class); Answer answer = (invocation) -> inferenceResults; @@ -221,39 +196,32 @@ public void testSend_CallsSenderAgain_WhenHttpResultListenerCallsOnFailureOnce() var handler = mock(ResponseHandler.class); when(handler.parseResult(any(), any())).thenAnswer(answer); - var retrier = new RetryingHttpSender( - sender, - mock(ThrottlerManager.class), - mock(Logger.class), - createDefaultRetrySettings(), - taskQueue.getThreadPool(), - EsExecutors.DIRECT_EXECUTOR_SERVICE - ); + var retrier = createRetrier(httpClient); var listener = new PlainActionFuture(); - executeTasks(() -> retrier.send(mockRequest(), handler, listener), 1); + executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), HttpClientContext.create(), () -> false, handler, listener), 1); assertThat(listener.actionGet(TIMEOUT), is(inferenceResults)); - verify(sender, times(2)).send(any(), any()); - verifyNoMoreInteractions(sender); + verify(httpClient, times(2)).send(any(), any(), any()); + verifyNoMoreInteractions(httpClient); } - public void testSend_CallsSenderAgain_WhenHttpResultListenerCallsOnFailureOnce_WithContentTooLargeException() { - var sender = mock(Sender.class); + public void testSend_CallsSenderAgain_WhenHttpResultListenerCallsOnFailureOnce_WithContentTooLargeException() throws IOException { + var httpClient = mock(HttpClient.class); doAnswer(invocation -> { @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocation.getArguments()[1]; + ActionListener listener = (ActionListener) invocation.getArguments()[2]; listener.onFailure(new ContentTooLargeException(new IllegalStateException("failed"))); return Void.TYPE; }).doAnswer(invocation -> { @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocation.getArguments()[1]; + ActionListener listener = (ActionListener) invocation.getArguments()[2]; listener.onResponse(new HttpResult(mock(HttpResponse.class), new byte[] { 'a' })); return Void.TYPE; - }).when(sender).send(any(), any()); + }).when(httpClient).send(any(), any(), any()); var inferenceResults = mock(InferenceServiceResults.class); Answer answer = (invocation) -> inferenceResults; @@ -261,39 +229,32 @@ public void testSend_CallsSenderAgain_WhenHttpResultListenerCallsOnFailureOnce_W var handler = mock(ResponseHandler.class); when(handler.parseResult(any(), any())).thenAnswer(answer); - var retrier = new RetryingHttpSender( - sender, - mock(ThrottlerManager.class), - mock(Logger.class), - createDefaultRetrySettings(), - taskQueue.getThreadPool(), - EsExecutors.DIRECT_EXECUTOR_SERVICE - ); + var retrier = createRetrier(httpClient); var listener = new PlainActionFuture(); - executeTasks(() -> retrier.send(mockRequest(), handler, listener), 1); + executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), HttpClientContext.create(), () -> false, handler, listener), 1); assertThat(listener.actionGet(TIMEOUT), is(inferenceResults)); - verify(sender, times(2)).send(any(), any()); - verifyNoMoreInteractions(sender); + verify(httpClient, times(2)).send(any(), any(), any()); + verifyNoMoreInteractions(httpClient); } - public void testSend_CallsSenderAgain_WhenHttpResultListenerCallsOnFailureOnceWithConnectionClosedException() { - var sender = mock(Sender.class); + public void testSend_CallsSenderAgain_WhenHttpResultListenerCallsOnFailureOnceWithConnectionClosedException() throws IOException { + var httpClient = mock(HttpClient.class); doAnswer(invocation -> { @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocation.getArguments()[1]; + ActionListener listener = (ActionListener) invocation.getArguments()[2]; listener.onFailure(new ConnectionClosedException("failed")); return Void.TYPE; }).doAnswer(invocation -> { @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocation.getArguments()[1]; + ActionListener listener = (ActionListener) invocation.getArguments()[2]; listener.onResponse(new HttpResult(mock(HttpResponse.class), new byte[] { 'a' })); return Void.TYPE; - }).when(sender).send(any(), any()); + }).when(httpClient).send(any(), any(), any()); var inferenceResults = mock(InferenceServiceResults.class); Answer answer = (invocation) -> inferenceResults; @@ -301,33 +262,26 @@ public void testSend_CallsSenderAgain_WhenHttpResultListenerCallsOnFailureOnceWi var handler = mock(ResponseHandler.class); when(handler.parseResult(any(), any())).thenAnswer(answer); - var retrier = new RetryingHttpSender( - sender, - mock(ThrottlerManager.class), - mock(Logger.class), - createDefaultRetrySettings(), - taskQueue.getThreadPool(), - EsExecutors.DIRECT_EXECUTOR_SERVICE - ); + var retrier = createRetrier(httpClient); var listener = new PlainActionFuture(); - executeTasks(() -> retrier.send(mockRequest(), handler, listener), 1); + executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), HttpClientContext.create(), () -> false, handler, listener), 1); assertThat(listener.actionGet(TIMEOUT), is(inferenceResults)); - verify(sender, times(2)).send(any(), any()); - verifyNoMoreInteractions(sender); + verify(httpClient, times(2)).send(any(), any(), any()); + verifyNoMoreInteractions(httpClient); } - public void testSend_ReturnsFailure_WhenHttpResultListenerCallsOnFailureOnceWithUnknownHostException() { - var sender = mock(Sender.class); + public void testSend_ReturnsFailure_WhenHttpResultListenerCallsOnFailureOnceWithUnknownHostException() throws IOException { + var httpClient = mock(HttpClient.class); doAnswer(invocation -> { @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocation.getArguments()[1]; + ActionListener listener = (ActionListener) invocation.getArguments()[2]; listener.onFailure(new UnknownHostException("failed")); return Void.TYPE; - }).when(sender).send(any(), any()); + }).when(httpClient).send(any(), any(), any()); var inferenceResults = mock(InferenceServiceResults.class); Answer answer = (invocation) -> inferenceResults; @@ -335,37 +289,55 @@ public void testSend_ReturnsFailure_WhenHttpResultListenerCallsOnFailureOnceWith var handler = mock(ResponseHandler.class); when(handler.parseResult(any(), any())).thenAnswer(answer); - var retrier = new RetryingHttpSender( - sender, - mock(ThrottlerManager.class), - mock(Logger.class), - createDefaultRetrySettings(), - taskQueue.getThreadPool(), - EsExecutors.DIRECT_EXECUTOR_SERVICE - ); + var retrier = createRetrier(httpClient); var listener = new PlainActionFuture(); - executeTasks(() -> retrier.send(mockRequest(), handler, listener), 0); + executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), HttpClientContext.create(), () -> false, handler, listener), 0); var thrownException = expectThrows(ElasticsearchStatusException.class, () -> listener.actionGet(TIMEOUT)); assertThat(thrownException.getMessage(), is("Invalid host [null], please check that the URL is correct.")); - verify(sender, times(1)).send(any(), any()); - verifyNoMoreInteractions(sender); + verify(httpClient, times(1)).send(any(), any(), any()); + verifyNoMoreInteractions(httpClient); + } + + public void testSend_ReturnsElasticsearchExceptionFailure_WhenTheHttpClientThrowsAnIllegalStateException() throws IOException { + var httpClient = mock(HttpClient.class); + + doAnswer(invocation -> { throw new IllegalStateException("failed"); }).when(httpClient).send(any(), any(), any()); + + var inferenceResults = mock(InferenceServiceResults.class); + Answer answer = (invocation) -> inferenceResults; + + var handler = mock(ResponseHandler.class); + when(handler.parseResult(any(), any())).thenAnswer(answer); + + var retrier = createRetrier(httpClient); + + var listener = new PlainActionFuture(); + executeTasks( + () -> retrier.send(mock(Logger.class), mockRequest("id"), HttpClientContext.create(), () -> false, handler, listener), + 0 + ); + + var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); + assertThat(thrownException.getMessage(), is("Http client failed to send request from inference entity id [id]")); + verify(httpClient, times(1)).send(any(), any(), any()); + verifyNoMoreInteractions(httpClient); } - public void testSend_ReturnsFailure_WhenValidateResponseThrowsAnException_AfterOneRetry() { + public void testSend_ReturnsFailure_WhenValidateResponseThrowsAnException_AfterOneRetry() throws IOException { var httpResponse = mock(HttpResponse.class); when(httpResponse.getStatusLine()).thenReturn(mock(StatusLine.class)); - var sender = mock(Sender.class); + var sender = mock(HttpClient.class); doAnswer(invocation -> { @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocation.getArguments()[1]; + ActionListener listener = (ActionListener) invocation.getArguments()[2]; listener.onResponse(new HttpResult(httpResponse, new byte[0])); return Void.TYPE; - }).when(sender).send(any(), any()); + }).when(sender).send(any(), any(), any()); var inferenceResults = mock(InferenceServiceResults.class); Answer answer = (invocation) -> inferenceResults; @@ -376,40 +348,33 @@ public void testSend_ReturnsFailure_WhenValidateResponseThrowsAnException_AfterO .validateResponse(any(), any(), any(), any()); when(handler.parseResult(any(), any())).thenAnswer(answer); - var retrier = new RetryingHttpSender( - sender, - mock(ThrottlerManager.class), - mock(Logger.class), - createDefaultRetrySettings(), - taskQueue.getThreadPool(), - EsExecutors.DIRECT_EXECUTOR_SERVICE - ); + var retrier = createRetrier(sender); var listener = new PlainActionFuture(); - executeTasks(() -> retrier.send(mockRequest(), handler, listener), 1); + executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), HttpClientContext.create(), () -> false, handler, listener), 1); var thrownException = expectThrows(IllegalStateException.class, () -> listener.actionGet(TIMEOUT)); assertThat(thrownException.getMessage(), is("failed again")); assertThat(thrownException.getSuppressed().length, is(1)); assertThat(thrownException.getSuppressed()[0].getMessage(), is("failed")); - verify(sender, times(2)).send(any(), any()); + verify(sender, times(2)).send(any(), any(), any()); verifyNoMoreInteractions(sender); } - public void testSend_ReturnsFailure_WhenValidateResponseThrowsAnElasticsearchException_AfterOneRetry() { + public void testSend_ReturnsFailure_WhenValidateResponseThrowsAnElasticsearchException_AfterOneRetry() throws IOException { var httpResponse = mock(HttpResponse.class); when(httpResponse.getStatusLine()).thenReturn(mock(StatusLine.class)); - var sender = mock(Sender.class); + var httpClient = mock(HttpClient.class); doAnswer(invocation -> { @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocation.getArguments()[1]; + ActionListener listener = (ActionListener) invocation.getArguments()[2]; listener.onResponse(new HttpResult(httpResponse, new byte[0])); return Void.TYPE; - }).when(sender).send(any(), any()); + }).when(httpClient).send(any(), any(), any()); var inferenceResults = mock(InferenceServiceResults.class); Answer answer = (invocation) -> inferenceResults; @@ -420,101 +385,74 @@ public void testSend_ReturnsFailure_WhenValidateResponseThrowsAnElasticsearchExc .validateResponse(any(), any(), any(), any()); when(handler.parseResult(any(), any())).thenAnswer(answer); - var retrier = new RetryingHttpSender( - sender, - mock(ThrottlerManager.class), - mock(Logger.class), - createDefaultRetrySettings(), - taskQueue.getThreadPool(), - EsExecutors.DIRECT_EXECUTOR_SERVICE - ); + var retrier = createRetrier(httpClient); var listener = new PlainActionFuture(); - executeTasks(() -> retrier.send(mockRequest(), handler, listener), 1); + executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), HttpClientContext.create(), () -> false, handler, listener), 1); var thrownException = expectThrows(RetryException.class, () -> listener.actionGet(TIMEOUT)); assertThat(thrownException.getMessage(), is("failed again")); assertThat(thrownException.getSuppressed().length, is(1)); assertThat(thrownException.getSuppressed()[0].getMessage(), is("failed")); - verify(sender, times(2)).send(any(), any()); - verifyNoMoreInteractions(sender); + verify(httpClient, times(2)).send(any(), any(), any()); + verifyNoMoreInteractions(httpClient); } - public void testSend_ReturnsFailure_WhenHttpResultsListenerCallsOnFailure_AfterOneRetry() { - var httpResponse = mock(HttpResponse.class); - when(httpResponse.getStatusLine()).thenReturn(mock(StatusLine.class)); - - var sender = mock(Sender.class); + public void testSend_ReturnsFailure_WhenHttpResultsListenerCallsOnFailure_AfterOneRetry() throws IOException { + var httpClient = mock(HttpClient.class); doAnswer(invocation -> { @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocation.getArguments()[1]; + ActionListener listener = (ActionListener) invocation.getArguments()[2]; listener.onFailure(new RetryException(true, "failed")); return Void.TYPE; }).doAnswer(invocation -> { @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocation.getArguments()[1]; + ActionListener listener = (ActionListener) invocation.getArguments()[2]; listener.onFailure(new RetryException(false, "failed again")); return Void.TYPE; - }).when(sender).send(any(), any()); + }).when(httpClient).send(any(), any(), any()); var handler = mock(ResponseHandler.class); - var retrier = new RetryingHttpSender( - sender, - mock(ThrottlerManager.class), - mock(Logger.class), - createDefaultRetrySettings(), - taskQueue.getThreadPool(), - EsExecutors.DIRECT_EXECUTOR_SERVICE - ); + var retrier = createRetrier(httpClient); var listener = new PlainActionFuture(); - executeTasks(() -> retrier.send(mockRequest(), handler, listener), 1); + executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), HttpClientContext.create(), () -> false, handler, listener), 1); var thrownException = expectThrows(RetryException.class, () -> listener.actionGet(TIMEOUT)); assertThat(thrownException.getMessage(), is("failed again")); assertThat(thrownException.getSuppressed().length, is(1)); assertThat(thrownException.getSuppressed()[0].getMessage(), is("failed")); - verify(sender, times(2)).send(any(), any()); - verifyNoMoreInteractions(sender); + verify(httpClient, times(2)).send(any(), any(), any()); + verifyNoMoreInteractions(httpClient); } - public void testSend_ReturnsFailure_WhenHttpResultsListenerCallsOnFailure_WithNonRetryableException() { - var httpResponse = mock(HttpResponse.class); - when(httpResponse.getStatusLine()).thenReturn(mock(StatusLine.class)); - - var sender = mock(Sender.class); + public void testSend_ReturnsFailure_WhenHttpResultsListenerCallsOnFailure_WithNonRetryableException() throws IOException { + var httpClient = mock(HttpClient.class); doAnswer(invocation -> { @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocation.getArguments()[1]; + ActionListener listener = (ActionListener) invocation.getArguments()[2]; listener.onFailure(new IllegalStateException("failed")); return Void.TYPE; - }).when(sender).send(any(), any()); + }).when(httpClient).send(any(), any(), any()); var handler = mock(ResponseHandler.class); - var retrier = new RetryingHttpSender( - sender, - mock(ThrottlerManager.class), - mock(Logger.class), - createDefaultRetrySettings(), - taskQueue.getThreadPool(), - EsExecutors.DIRECT_EXECUTOR_SERVICE - ); + var retrier = createRetrier(httpClient); var listener = new PlainActionFuture(); - executeTasks(() -> retrier.send(mockRequest(), handler, listener), 0); + executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), HttpClientContext.create(), () -> false, handler, listener), 0); var thrownException = expectThrows(IllegalStateException.class, () -> listener.actionGet(TIMEOUT)); assertThat(thrownException.getMessage(), is("failed")); assertThat(thrownException.getSuppressed().length, is(0)); - verify(sender, times(1)).send(any(), any()); - verifyNoMoreInteractions(sender); + verify(httpClient, times(1)).send(any(), any(), any()); + verifyNoMoreInteractions(httpClient); } private static HttpResponse mockHttpResponse() { @@ -540,10 +478,25 @@ private void executeTasks(Runnable runnable, int retries) { } private static Request mockRequest() { + return mockRequest("inferenceEntityId"); + } + + private static Request mockRequest(String inferenceEntityId) { var request = mock(Request.class); when(request.truncate()).thenReturn(request); - when(request.createHttpRequest()).thenReturn(HttpRequestTests.createMock("inferenceEntityId")); + when(request.createHttpRequest()).thenReturn(HttpRequestTests.createMock(inferenceEntityId)); + when(request.getInferenceEntityId()).thenReturn(inferenceEntityId); return request; } + + private RetryingHttpSender createRetrier(HttpClient httpClient) { + return new RetryingHttpSender( + httpClient, + mock(ThrottlerManager.class), + createDefaultRetrySettings(), + taskQueue.getThreadPool(), + EsExecutors.DIRECT_EXECUTOR_SERVICE + ); + } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/ExecutableRequestCreatorTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/ExecutableRequestCreatorTests.java new file mode 100644 index 0000000000000..24f930a818134 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/ExecutableRequestCreatorTests.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http.sender; + +import org.apache.http.client.protocol.HttpClientContext; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.xpack.inference.external.http.retry.RequestSender; +import org.elasticsearch.xpack.inference.external.http.retry.ResponseHandler; +import org.elasticsearch.xpack.inference.external.request.RequestTests; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyList; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class ExecutableRequestCreatorTests { + public static ExecutableRequestCreator createMock() { + var mockCreator = mock(ExecutableRequestCreator.class); + when(mockCreator.create(anyList(), any(), any(), any(), any())).thenReturn(() -> {}); + + return mockCreator; + } + + public static ExecutableRequestCreator createMock(RequestSender requestSender) { + return createMock(requestSender, "id"); + } + + public static ExecutableRequestCreator createMock(RequestSender requestSender, String modelId) { + var mockCreator = mock(ExecutableRequestCreator.class); + when(mockCreator.create(anyList(), any(), any(), any(), any())).thenReturn(() -> { + requestSender.send( + mock(Logger.class), + RequestTests.mockRequest(modelId), + HttpClientContext.create(), + () -> false, + mock(ResponseHandler.class), + new PlainActionFuture<>() + ); + }); + + return mockCreator; + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestSenderFactoryTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestSenderTests.java similarity index 53% rename from x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestSenderFactoryTests.java rename to x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestSenderTests.java index 6b085f8dd80a7..79b17f8dff29d 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestSenderFactoryTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestSenderTests.java @@ -13,6 +13,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.http.MockResponse; import org.elasticsearch.test.http.MockWebServer; @@ -21,14 +22,13 @@ import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.inference.external.http.HttpClient; import org.elasticsearch.xpack.inference.external.http.HttpClientManager; -import org.elasticsearch.xpack.inference.external.http.HttpResult; -import org.elasticsearch.xpack.inference.external.request.HttpRequestTests; import org.elasticsearch.xpack.inference.logging.ThrottlerManager; +import org.elasticsearch.xpack.inference.services.ServiceComponentsTests; import org.junit.After; import org.junit.Before; import java.io.IOException; -import java.nio.charset.StandardCharsets; +import java.util.List; import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; @@ -36,7 +36,10 @@ import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; import static org.elasticsearch.xpack.inference.Utils.mockClusterServiceEmpty; -import static org.elasticsearch.xpack.inference.external.http.HttpClientTests.createHttpPost; +import static org.elasticsearch.xpack.inference.external.http.Utils.entityAsMap; +import static org.elasticsearch.xpack.inference.external.http.Utils.getUrl; +import static org.elasticsearch.xpack.inference.external.request.openai.OpenAiUtils.ORGANIZATION_HEADER; +import static org.elasticsearch.xpack.inference.results.TextEmbeddingResultsTests.buildExpectation; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; @@ -47,7 +50,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -public class HttpRequestSenderFactoryTests extends ESTestCase { +public class HttpRequestSenderTests extends ESTestCase { private static final TimeValue TIMEOUT = new TimeValue(30, TimeUnit.SECONDS); private final MockWebServer webServer = new MockWebServer(); private ThreadPool threadPool; @@ -79,36 +82,63 @@ public void testCreateSender_SendsRequestAndReceivesResponse() throws Exception try (var sender = senderFactory.createSender("test_service")) { sender.start(); - int responseCode = randomIntBetween(200, 203); - String body = randomAlphaOfLengthBetween(2, 8096); - webServer.enqueue(new MockResponse().setResponseCode(responseCode).setBody(body)); - - String paramKey = randomAlphaOfLength(3); - String paramValue = randomAlphaOfLength(3); - var httpPost = createHttpPost(webServer.getPort(), paramKey, paramValue); - - PlainActionFuture listener = new PlainActionFuture<>(); - sender.send(httpPost, null, listener); + String responseJson = """ + { + "object": "list", + "data": [ + { + "object": "embedding", + "index": 0, + "embedding": [ + 0.0123, + -0.0123 + ] + } + ], + "model": "text-embedding-ada-002-v2", + "usage": { + "prompt_tokens": 8, + "total_tokens": 8 + } + } + """; + webServer.enqueue(new MockResponse().setResponseCode(200).setBody(responseJson)); + + PlainActionFuture listener = new PlainActionFuture<>(); + sender.send( + OpenAiEmbeddingsExecutableRequestCreatorTests.makeCreator(getUrl(webServer), null, "key", "model", null), + List.of("abc"), + listener + ); var result = listener.actionGet(TIMEOUT); + assertThat(result.asMap(), is(buildExpectation(List.of(List.of(0.0123F, -0.0123F))))); - assertThat(result.response().getStatusLine().getStatusCode(), equalTo(responseCode)); - assertThat(new String(result.body(), StandardCharsets.UTF_8), is(body)); assertThat(webServer.requests(), hasSize(1)); - assertThat(webServer.requests().get(0).getUri().getPath(), equalTo(httpPost.httpRequestBase().getURI().getPath())); - assertThat(webServer.requests().get(0).getUri().getQuery(), equalTo(paramKey + "=" + paramValue)); + assertNull(webServer.requests().get(0).getUri().getQuery()); assertThat(webServer.requests().get(0).getHeader(HttpHeaders.CONTENT_TYPE), equalTo(XContentType.JSON.mediaType())); + assertThat(webServer.requests().get(0).getHeader(HttpHeaders.AUTHORIZATION), equalTo("Bearer key")); + assertNull(webServer.requests().get(0).getHeader(ORGANIZATION_HEADER)); + + var requestMap = entityAsMap(webServer.requests().get(0).getBody()); + assertThat(requestMap.size(), is(2)); + assertThat(requestMap.get("input"), is(List.of("abc"))); + assertThat(requestMap.get("model"), is("model")); } } public void testHttpRequestSender_Throws_WhenCallingSendBeforeStart() throws Exception { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = new HttpRequestSender.Factory( + ServiceComponentsTests.createWithEmptySettings(threadPool), + clientManager, + mockClusterServiceEmpty() + ); try (var sender = senderFactory.createSender("test_service")) { - PlainActionFuture listener = new PlainActionFuture<>(); + PlainActionFuture listener = new PlainActionFuture<>(); var thrownException = expectThrows( AssertionError.class, - () -> sender.send(HttpRequestTests.createMock("inferenceEntityId"), listener) + () -> sender.send(ExecutableRequestCreatorTests.createMock(), List.of(), listener) ); assertThat(thrownException.getMessage(), is("call start() before sending a request")); } @@ -118,23 +148,27 @@ public void testHttpRequestSender_Throws_WhenATimeoutOccurs() throws Exception { var mockManager = mock(HttpClientManager.class); when(mockManager.getHttpClient()).thenReturn(mock(HttpClient.class)); - var senderFactory = new HttpRequestSenderFactory(threadPool, mockManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = new HttpRequestSender.Factory( + ServiceComponentsTests.createWithEmptySettings(threadPool), + mockManager, + mockClusterServiceEmpty() + ); try (var sender = senderFactory.createSender("test_service")) { - assertThat(sender, instanceOf(HttpRequestSenderFactory.HttpRequestSender.class)); + assertThat(sender, instanceOf(HttpRequestSender.class)); // hack to get around the sender interface so we can set the timeout directly - var httpSender = (HttpRequestSenderFactory.HttpRequestSender) sender; + var httpSender = (HttpRequestSender) sender; httpSender.setMaxRequestTimeout(TimeValue.timeValueNanos(1)); sender.start(); - PlainActionFuture listener = new PlainActionFuture<>(); - sender.send(HttpRequestTests.createMock("inferenceEntityId"), TimeValue.timeValueNanos(1), listener); + PlainActionFuture listener = new PlainActionFuture<>(); + sender.send(ExecutableRequestCreatorTests.createMock(), List.of(), TimeValue.timeValueNanos(1), listener); var thrownException = expectThrows(ElasticsearchTimeoutException.class, () -> listener.actionGet(TIMEOUT)); assertThat( thrownException.getMessage(), - is(format("Request timed out waiting to be executed after [%s]", TimeValue.timeValueNanos(1))) + is(format("Request timed out waiting to be sent after [%s]", TimeValue.timeValueNanos(1))) ); } } @@ -143,24 +177,28 @@ public void testHttpRequestSenderWithTimeout_Throws_WhenATimeoutOccurs() throws var mockManager = mock(HttpClientManager.class); when(mockManager.getHttpClient()).thenReturn(mock(HttpClient.class)); - var senderFactory = new HttpRequestSenderFactory(threadPool, mockManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = new HttpRequestSender.Factory( + ServiceComponentsTests.createWithEmptySettings(threadPool), + mockManager, + mockClusterServiceEmpty() + ); try (var sender = senderFactory.createSender("test_service")) { sender.start(); - PlainActionFuture listener = new PlainActionFuture<>(); - sender.send(HttpRequestTests.createMock("id"), TimeValue.timeValueNanos(1), listener); + PlainActionFuture listener = new PlainActionFuture<>(); + sender.send(ExecutableRequestCreatorTests.createMock(), List.of(), TimeValue.timeValueNanos(1), listener); var thrownException = expectThrows(ElasticsearchTimeoutException.class, () -> listener.actionGet(TIMEOUT)); assertThat( thrownException.getMessage(), - is(format("Request timed out waiting to be executed after [%s]", TimeValue.timeValueNanos(1))) + is(format("Request timed out waiting to be sent after [%s]", TimeValue.timeValueNanos(1))) ); } } - private static HttpRequestSenderFactory createSenderFactory(HttpClientManager clientManager, AtomicReference threadRef) { + private static HttpRequestSender.Factory createSenderFactory(HttpClientManager clientManager, AtomicReference threadRef) { var mockExecutorService = mock(ExecutorService.class); doAnswer(invocation -> { Runnable runnable = (Runnable) invocation.getArguments()[0]; @@ -175,6 +213,34 @@ private static HttpRequestSenderFactory createSenderFactory(HttpClientManager cl when(mockThreadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); when(mockThreadPool.schedule(any(Runnable.class), any(), any())).thenReturn(mock(Scheduler.ScheduledCancellable.class)); - return new HttpRequestSenderFactory(mockThreadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + return new HttpRequestSender.Factory( + ServiceComponentsTests.createWithEmptySettings(mockThreadPool), + clientManager, + mockClusterServiceEmpty() + ); + } + + public static HttpRequestSender.Factory createSenderFactory(ThreadPool threadPool, HttpClientManager httpClientManager) { + return new HttpRequestSender.Factory( + ServiceComponentsTests.createWithEmptySettings(threadPool), + httpClientManager, + mockClusterServiceEmpty() + ); + } + + public static HttpRequestSender.Factory createSenderFactory( + ThreadPool threadPool, + HttpClientManager httpClientManager, + Settings settings + ) { + return new HttpRequestSender.Factory( + ServiceComponentsTests.createWithSettings(threadPool, settings), + httpClientManager, + mockClusterServiceEmpty() + ); + } + + public static Sender createSenderWithSingleRequestManager(HttpRequestSender.Factory factory, String serviceName) { + return factory.createSender(serviceName); } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/OpenAiEmbeddingsExecutableRequestCreatorTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/OpenAiEmbeddingsExecutableRequestCreatorTests.java new file mode 100644 index 0000000000000..53537a3ff77c2 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/OpenAiEmbeddingsExecutableRequestCreatorTests.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http.sender; + +import org.elasticsearch.core.Nullable; +import org.elasticsearch.xpack.inference.common.TruncatorTests; + +import static org.elasticsearch.xpack.inference.services.openai.embeddings.OpenAiEmbeddingsModelTests.createModel; + +public class OpenAiEmbeddingsExecutableRequestCreatorTests { + public static OpenAiEmbeddingsExecutableRequestCreator makeCreator( + String url, + @Nullable String org, + String apiKey, + String modelName, + @Nullable String user + ) { + var model = createModel(url, org, apiKey, modelName, user); + + return new OpenAiEmbeddingsExecutableRequestCreator(model, TruncatorTests.createTruncator()); + } + + public static OpenAiEmbeddingsExecutableRequestCreator makeCreator( + String url, + @Nullable String org, + String apiKey, + String modelName, + @Nullable String user, + String inferenceEntityId + ) { + var model = createModel(url, org, apiKey, modelName, user, inferenceEntityId); + + return new OpenAiEmbeddingsExecutableRequestCreator(model, TruncatorTests.createTruncator()); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/RequestExecutorServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/RequestExecutorServiceTests.java index ef8731746e187..ebad28095294b 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/RequestExecutorServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/RequestExecutorServiceTests.java @@ -11,20 +11,19 @@ import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xpack.inference.external.http.HttpClient; -import org.elasticsearch.xpack.inference.external.http.HttpResult; -import org.elasticsearch.xpack.inference.external.request.HttpRequestTests; +import org.elasticsearch.xpack.inference.external.http.retry.RetryingHttpSender; import org.junit.After; import org.junit.Before; import org.mockito.ArgumentCaptor; import java.io.IOException; +import java.util.List; import java.util.concurrent.BlockingQueue; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; @@ -36,7 +35,6 @@ import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; import static org.elasticsearch.xpack.inference.common.AdjustableCapacityBlockingQueueTests.mockQueueCreator; -import static org.elasticsearch.xpack.inference.external.http.HttpClientTests.createHttpPost; import static org.elasticsearch.xpack.inference.external.http.sender.RequestExecutorServiceSettingsTests.createRequestExecutorServiceSettings; import static org.elasticsearch.xpack.inference.external.http.sender.RequestExecutorServiceSettingsTests.createRequestExecutorServiceSettingsEmpty; import static org.hamcrest.Matchers.instanceOf; @@ -70,7 +68,7 @@ public void testQueueSize_IsEmpty() { public void testQueueSize_IsOne() { var service = createRequestExecutorServiceWithMocks(); - service.execute(HttpRequestTests.createMock("inferenceEntityId"), null, new PlainActionFuture<>()); + service.execute(ExecutableRequestCreatorTests.createMock(), List.of(), null, new PlainActionFuture<>()); assertThat(service.queueSize(), is(1)); } @@ -83,7 +81,7 @@ public void testIsTerminated_IsFalse() { public void testIsTerminated_IsTrue() throws InterruptedException { var latch = new CountDownLatch(1); - var service = createRequestExecutorService(null, latch); + var service = createRequestExecutorService(latch, mock(RetryingHttpSender.class)); service.shutdown(); service.start(); @@ -96,19 +94,24 @@ public void testIsTerminated_AfterStopFromSeparateThread() throws Exception { var waitToShutdown = new CountDownLatch(1); var waitToReturnFromSend = new CountDownLatch(1); - var mockHttpClient = mock(HttpClient.class); + var requestSender = mock(RetryingHttpSender.class); doAnswer(invocation -> { waitToShutdown.countDown(); waitToReturnFromSend.await(TIMEOUT.getSeconds(), TimeUnit.SECONDS); return Void.TYPE; - }).when(mockHttpClient).send(any(), any(), any()); + }).when(requestSender).send(any(), any(), any(), any(), any(), any()); - var service = createRequestExecutorService(mockHttpClient, null); + var service = createRequestExecutorService(null, requestSender); Future executorTermination = submitShutdownRequest(waitToShutdown, waitToReturnFromSend, service); - PlainActionFuture listener = new PlainActionFuture<>(); - service.execute(HttpRequestTests.createMock("inferenceEntityId"), null, listener); + PlainActionFuture listener = new PlainActionFuture<>(); + service.execute( + OpenAiEmbeddingsExecutableRequestCreatorTests.makeCreator("url", null, "key", "id", null), + List.of(), + null, + listener + ); service.start(); @@ -127,8 +130,8 @@ public void testSend_AfterShutdown_Throws() { service.shutdown(); - var listener = new PlainActionFuture(); - service.execute(HttpRequestTests.createMock("inferenceEntityId"), null, listener); + var listener = new PlainActionFuture(); + service.execute(ExecutableRequestCreatorTests.createMock(), List.of(), null, listener); var thrownException = expectThrows(EsRejectedExecutionException.class, () -> listener.actionGet(TIMEOUT)); @@ -142,15 +145,15 @@ public void testSend_AfterShutdown_Throws() { public void testSend_Throws_WhenQueueIsFull() { var service = new RequestExecutorService( "test_service", - mock(HttpClient.class), threadPool, null, - RequestExecutorServiceSettingsTests.createRequestExecutorServiceSettings(1) + RequestExecutorServiceSettingsTests.createRequestExecutorServiceSettings(1), + new SingleRequestManager(mock(RetryingHttpSender.class)) ); - service.execute(HttpRequestTests.createMock("inferenceEntityId"), null, new PlainActionFuture<>()); - var listener = new PlainActionFuture(); - service.execute(HttpRequestTests.createMock("inferenceEntityId"), null, listener); + service.execute(ExecutableRequestCreatorTests.createMock(), List.of(), null, new PlainActionFuture<>()); + var listener = new PlainActionFuture(); + service.execute(ExecutableRequestCreatorTests.createMock(), List.of(), null, listener); var thrownException = expectThrows(EsRejectedExecutionException.class, () -> listener.actionGet(TIMEOUT)); @@ -161,27 +164,28 @@ public void testSend_Throws_WhenQueueIsFull() { assertFalse(thrownException.isExecutorShutdown()); } - public void testTaskThrowsError_CallsOnFailure() throws Exception { - var httpClient = mock(HttpClient.class); + public void testTaskThrowsError_CallsOnFailure() { + var requestSender = mock(RetryingHttpSender.class); - var service = createRequestExecutorService(httpClient, null); + var service = createRequestExecutorService(null, requestSender); doAnswer(invocation -> { service.shutdown(); throw new IllegalArgumentException("failed"); - }).when(httpClient).send(any(), any(), any()); + }).when(requestSender).send(any(), any(), any(), any(), any(), any()); - PlainActionFuture listener = new PlainActionFuture<>(); + PlainActionFuture listener = new PlainActionFuture<>(); - var request = createHttpPost(0, "a", "b"); - service.execute(request, null, listener); + service.execute( + OpenAiEmbeddingsExecutableRequestCreatorTests.makeCreator("url", null, "key", "id", null), + List.of(), + null, + listener + ); service.start(); var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); - assertThat( - thrownException.getMessage(), - is(format("Failed to send request from inference entity id [%s]", request.inferenceEntityId())) - ); + assertThat(thrownException.getMessage(), is(format("Failed to send request from inference entity id [%s]", "id"))); assertThat(thrownException.getCause(), instanceOf(IllegalArgumentException.class)); assertTrue(service.isTerminated()); } @@ -200,22 +204,23 @@ public void testShutdown_AllowsMultipleCalls() { public void testSend_CallsOnFailure_WhenRequestTimesOut() { var service = createRequestExecutorServiceWithMocks(); - var listener = new PlainActionFuture(); - service.execute(HttpRequestTests.createMock("inferenceEntityId"), TimeValue.timeValueNanos(1), listener); + var listener = new PlainActionFuture(); + service.execute(ExecutableRequestCreatorTests.createMock(), List.of(), TimeValue.timeValueNanos(1), listener); var thrownException = expectThrows(ElasticsearchTimeoutException.class, () -> listener.actionGet(TIMEOUT)); assertThat( thrownException.getMessage(), - is(format("Request timed out waiting to be executed after [%s]", TimeValue.timeValueNanos(1))) + is(format("Request timed out waiting to be sent after [%s]", TimeValue.timeValueNanos(1))) ); } public void testSend_NotifiesTasksOfShutdown() { var service = createRequestExecutorServiceWithMocks(); - var listener = new PlainActionFuture(); - service.execute(HttpRequestTests.createMock("inferenceEntityId"), null, listener); + var listener = new PlainActionFuture(); + service.execute(ExecutableRequestCreatorTests.createMock(), List.of(), null, listener); + service.shutdown(); service.start(); @@ -231,15 +236,15 @@ public void testSend_NotifiesTasksOfShutdown() { public void testQueueTake_DoesNotCauseServiceToTerminate_WhenItThrows() throws InterruptedException { @SuppressWarnings("unchecked") - BlockingQueue queue = mock(LinkedBlockingQueue.class); + BlockingQueue queue = mock(LinkedBlockingQueue.class); var service = new RequestExecutorService( getTestName(), - mock(HttpClient.class), threadPool, mockQueueCreator(queue), null, - createRequestExecutorServiceSettingsEmpty() + createRequestExecutorServiceSettingsEmpty(), + new SingleRequestManager(mock(RetryingHttpSender.class)) ); when(queue.take()).thenThrow(new ElasticsearchException("failed")).thenAnswer(invocation -> { @@ -254,16 +259,16 @@ public void testQueueTake_DoesNotCauseServiceToTerminate_WhenItThrows() throws I public void testQueueTake_ThrowingInterruptedException_TerminatesService() throws Exception { @SuppressWarnings("unchecked") - BlockingQueue queue = mock(LinkedBlockingQueue.class); + BlockingQueue queue = mock(LinkedBlockingQueue.class); when(queue.take()).thenThrow(new InterruptedException("failed")); var service = new RequestExecutorService( getTestName(), - mock(HttpClient.class), threadPool, mockQueueCreator(queue), null, - createRequestExecutorServiceSettingsEmpty() + createRequestExecutorServiceSettingsEmpty(), + new SingleRequestManager(mock(RetryingHttpSender.class)) ); Future executorTermination = threadPool.generic().submit(() -> { @@ -281,17 +286,17 @@ public void testQueueTake_ThrowingInterruptedException_TerminatesService() throw } public void testQueueTake_RejectsTask_WhenServiceShutsDown() throws Exception { - var mockTask = mock(AbstractRunnable.class); + var mockTask = mock(RejectableTask.class); @SuppressWarnings("unchecked") - BlockingQueue queue = mock(LinkedBlockingQueue.class); + BlockingQueue queue = mock(LinkedBlockingQueue.class); var service = new RequestExecutorService( "test_service", - mock(HttpClient.class), threadPool, mockQueueCreator(queue), null, - createRequestExecutorServiceSettingsEmpty() + createRequestExecutorServiceSettingsEmpty(), + new SingleRequestManager(mock(RetryingHttpSender.class)) ); doAnswer(invocation -> { @@ -316,17 +321,17 @@ public void testQueueTake_RejectsTask_WhenServiceShutsDown() throws Exception { assertTrue(rejectionException.isExecutorShutdown()); } - public void testChangingCapacity_SetsCapacityToTwo() throws ExecutionException, InterruptedException, TimeoutException, IOException { - var httpClient = mock(HttpClient.class); + public void testChangingCapacity_SetsCapacityToTwo() throws ExecutionException, InterruptedException, TimeoutException { + var requestSender = mock(RetryingHttpSender.class); var settings = createRequestExecutorServiceSettings(1); - var service = new RequestExecutorService("test_service", httpClient, threadPool, null, settings); + var service = new RequestExecutorService("test_service", threadPool, null, settings, new SingleRequestManager(requestSender)); - service.execute(HttpRequestTests.createMock("inferenceEntityId"), null, new PlainActionFuture<>()); + service.execute(ExecutableRequestCreatorTests.createMock(requestSender), List.of(), null, new PlainActionFuture<>()); assertThat(service.queueSize(), is(1)); - PlainActionFuture listener = new PlainActionFuture<>(); - service.execute(HttpRequestTests.createMock("inferenceEntityId"), null, listener); + PlainActionFuture listener = new PlainActionFuture<>(); + service.execute(ExecutableRequestCreatorTests.createMock(requestSender), List.of(), null, listener); var thrownException = expectThrows(EsRejectedExecutionException.class, () -> listener.actionGet(TIMEOUT)); assertThat( @@ -343,7 +348,7 @@ public void testChangingCapacity_SetsCapacityToTwo() throws ExecutionException, waitToShutdown.countDown(); waitToReturnFromSend.await(TIMEOUT.getSeconds(), TimeUnit.SECONDS); return Void.TYPE; - }).when(httpClient).send(any(), any(), any()); + }).when(requestSender).send(any(), any(), any(), any(), any(), any()); Future executorTermination = submitShutdownRequest(waitToShutdown, waitToReturnFromSend, service); @@ -354,18 +359,18 @@ public void testChangingCapacity_SetsCapacityToTwo() throws ExecutionException, assertThat(service.remainingQueueCapacity(), is(2)); } - public void testChangingCapacity_DoesNotRejectsOverflowTasks_BecauseOfQueueFull() throws IOException, ExecutionException, - InterruptedException, TimeoutException { - var httpClient = mock(HttpClient.class); + public void testChangingCapacity_DoesNotRejectsOverflowTasks_BecauseOfQueueFull() throws ExecutionException, InterruptedException, + TimeoutException { + var requestSender = mock(RetryingHttpSender.class); var settings = createRequestExecutorServiceSettings(3); - var service = new RequestExecutorService("test_service", httpClient, threadPool, null, settings); + var service = new RequestExecutorService("test_service", threadPool, null, settings, new SingleRequestManager(requestSender)); - service.execute(HttpRequestTests.createMock("inferenceEntityId"), null, new PlainActionFuture<>()); - service.execute(HttpRequestTests.createMock("inferenceEntityId"), null, new PlainActionFuture<>()); + service.execute(ExecutableRequestCreatorTests.createMock(requestSender), List.of(), null, new PlainActionFuture<>()); + service.execute(ExecutableRequestCreatorTests.createMock(requestSender), List.of(), null, new PlainActionFuture<>()); - PlainActionFuture listener = new PlainActionFuture<>(); - service.execute(HttpRequestTests.createMock("inferenceEntityId"), null, listener); + PlainActionFuture listener = new PlainActionFuture<>(); + service.execute(ExecutableRequestCreatorTests.createMock(requestSender), List.of(), null, listener); assertThat(service.queueSize(), is(3)); settings.setQueueCapacity(1); @@ -377,7 +382,7 @@ public void testChangingCapacity_DoesNotRejectsOverflowTasks_BecauseOfQueueFull( waitToShutdown.countDown(); waitToReturnFromSend.await(TIMEOUT.getSeconds(), TimeUnit.SECONDS); return Void.TYPE; - }).when(httpClient).send(any(), any(), any()); + }).when(requestSender).send(any(), any(), any(), any(), any(), any()); Future executorTermination = submitShutdownRequest(waitToShutdown, waitToReturnFromSend, service); @@ -401,16 +406,16 @@ public void testChangingCapacity_DoesNotRejectsOverflowTasks_BecauseOfQueueFull( public void testChangingCapacity_ToZero_SetsQueueCapacityToUnbounded() throws IOException, ExecutionException, InterruptedException, TimeoutException { - var httpClient = mock(HttpClient.class); + var requestSender = mock(RetryingHttpSender.class); var settings = createRequestExecutorServiceSettings(1); - var service = new RequestExecutorService("test_service", httpClient, threadPool, null, settings); + var service = new RequestExecutorService("test_service", threadPool, null, settings, new SingleRequestManager(requestSender)); - service.execute(HttpRequestTests.createMock("inferenceEntityId"), null, new PlainActionFuture<>()); + service.execute(ExecutableRequestCreatorTests.createMock(requestSender), List.of(), null, new PlainActionFuture<>()); assertThat(service.queueSize(), is(1)); - PlainActionFuture listener = new PlainActionFuture<>(); - service.execute(HttpRequestTests.createMock("inferenceEntityId"), null, listener); + PlainActionFuture listener = new PlainActionFuture<>(); + service.execute(ExecutableRequestCreatorTests.createMock(requestSender), List.of(), null, listener); var thrownException = expectThrows(EsRejectedExecutionException.class, () -> listener.actionGet(TIMEOUT)); assertThat( @@ -427,7 +432,7 @@ public void testChangingCapacity_ToZero_SetsQueueCapacityToUnbounded() throws IO waitToShutdown.countDown(); waitToReturnFromSend.await(TIMEOUT.getSeconds(), TimeUnit.SECONDS); return Void.TYPE; - }).when(httpClient).send(any(), any(), any()); + }).when(requestSender).send(any(), any(), any(), any(), any(), any()); Future executorTermination = submitShutdownRequest(waitToShutdown, waitToReturnFromSend, service); @@ -458,17 +463,16 @@ private Future submitShutdownRequest( } private RequestExecutorService createRequestExecutorServiceWithMocks() { - return createRequestExecutorService(null, null); + return createRequestExecutorService(null, mock(RetryingHttpSender.class)); } - private RequestExecutorService createRequestExecutorService(@Nullable HttpClient httpClient, @Nullable CountDownLatch startupLatch) { - var httpClientToUse = httpClient == null ? mock(HttpClient.class) : httpClient; + private RequestExecutorService createRequestExecutorService(@Nullable CountDownLatch startupLatch, RetryingHttpSender requestSender) { return new RequestExecutorService( "test_service", - httpClientToUse, threadPool, startupLatch, - createRequestExecutorServiceSettingsEmpty() + createRequestExecutorServiceSettingsEmpty(), + new SingleRequestManager(requestSender) ); } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/RequestTaskTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/RequestTaskTests.java index eaf1a0ac267cf..5c35d8ce49b60 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/RequestTaskTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/RequestTaskTests.java @@ -7,30 +7,19 @@ package org.elasticsearch.xpack.inference.external.http.sender; -import org.apache.http.HttpHeaders; -import org.apache.http.HttpResponse; -import org.apache.http.client.protocol.HttpClientContext; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.http.MockResponse; -import org.elasticsearch.test.http.MockWebServer; import org.elasticsearch.threadpool.Scheduler; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xcontent.XContentType; -import org.elasticsearch.xpack.inference.external.http.HttpClient; -import org.elasticsearch.xpack.inference.external.http.HttpResult; -import org.elasticsearch.xpack.inference.external.request.HttpRequestTests; -import org.elasticsearch.xpack.inference.logging.ThrottlerManager; import org.junit.After; import org.junit.Before; import org.mockito.ArgumentCaptor; -import java.io.IOException; -import java.nio.charset.StandardCharsets; +import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; @@ -38,16 +27,9 @@ import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; -import static org.elasticsearch.xpack.inference.external.http.HttpClientTests.createConnectionManager; -import static org.elasticsearch.xpack.inference.external.http.HttpClientTests.createHttpPost; -import static org.elasticsearch.xpack.inference.external.http.HttpClientTests.emptyHttpSettings; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; @@ -57,134 +39,65 @@ public class RequestTaskTests extends ESTestCase { private static final TimeValue TIMEOUT = new TimeValue(30, TimeUnit.SECONDS); - private final MockWebServer webServer = new MockWebServer(); private ThreadPool threadPool; @Before public void init() throws Exception { - webServer.start(); threadPool = createThreadPool(inferenceUtilityPool()); } @After public void shutdown() { terminate(threadPool); - webServer.close(); } - public void testDoRun_SendsRequestAndReceivesResponse() throws Exception { - int responseCode = randomIntBetween(200, 203); - String body = randomAlphaOfLengthBetween(2, 8096); - webServer.enqueue(new MockResponse().setResponseCode(responseCode).setBody(body)); - - String paramKey = randomAlphaOfLength(3); - String paramValue = randomAlphaOfLength(3); - var httpPost = createHttpPost(webServer.getPort(), paramKey, paramValue); - - try (var httpClient = HttpClient.create(emptyHttpSettings(), threadPool, createConnectionManager(), mock(ThrottlerManager.class))) { - httpClient.start(); - - PlainActionFuture listener = new PlainActionFuture<>(); - var requestTask = new RequestTask(httpPost, httpClient, HttpClientContext.create(), null, threadPool, listener); - requestTask.doRun(); - var result = listener.actionGet(TIMEOUT); - - assertThat(result.response().getStatusLine().getStatusCode(), equalTo(responseCode)); - assertThat(new String(result.body(), StandardCharsets.UTF_8), is(body)); - assertThat(webServer.requests(), hasSize(1)); - assertThat(webServer.requests().get(0).getUri().getPath(), equalTo(httpPost.httpRequestBase().getURI().getPath())); - assertThat(webServer.requests().get(0).getUri().getQuery(), equalTo(paramKey + "=" + paramValue)); - assertThat(webServer.requests().get(0).getHeader(HttpHeaders.CONTENT_TYPE), equalTo(XContentType.JSON.mediaType())); - } - } - - public void testDoRun_SendThrowsIOException() throws Exception { - var httpClient = mock(HttpClient.class); - doThrow(new IOException("exception")).when(httpClient).send(any(), any(), any()); - - String paramKey = randomAlphaOfLength(3); - String paramValue = randomAlphaOfLength(3); - var httpPost = createHttpPost(webServer.getPort(), paramKey, paramValue); - - PlainActionFuture listener = new PlainActionFuture<>(); - var requestTask = new RequestTask(httpPost, httpClient, HttpClientContext.create(), null, threadPool, listener); - requestTask.doRun(); - - var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); - assertThat( - thrownException.getMessage(), - is(format("Failed to send request from inference entity id [%s]", httpPost.inferenceEntityId())) - ); - } - - public void testRequest_DoesNotCallOnFailureForTimeout_AfterSendThrowsIllegalArgumentException() throws Exception { + public void testExecuting_DoesNotCallOnFailureForTimeout_AfterIllegalArgumentException() { AtomicReference onTimeout = new AtomicReference<>(); var mockThreadPool = mockThreadPoolForTimeout(onTimeout); - var httpClient = mock(HttpClient.class); - doThrow(new IllegalArgumentException("failed")).when(httpClient).send(any(), any(), any()); - - var httpPost = createHttpPost(webServer.getPort(), "a", "b"); - @SuppressWarnings("unchecked") - ActionListener listener = mock(ActionListener.class); + ActionListener listener = mock(ActionListener.class); var requestTask = new RequestTask( - httpPost, - httpClient, - HttpClientContext.create(), + OpenAiEmbeddingsExecutableRequestCreatorTests.makeCreator("url", null, "key", "model", null, "id"), + List.of("abc"), TimeValue.timeValueMillis(1), mockThreadPool, listener ); - requestTask.doRun(); - - ArgumentCaptor argument = ArgumentCaptor.forClass(Exception.class); - verify(listener, times(1)).onFailure(argument.capture()); - assertThat( - argument.getValue().getMessage(), - is(format("Failed to send request from inference entity id [%s]", httpPost.inferenceEntityId())) - ); - assertThat(argument.getValue(), instanceOf(ElasticsearchException.class)); - assertThat(argument.getValue().getCause(), instanceOf(IllegalArgumentException.class)); + requestTask.getListener().onFailure(new IllegalArgumentException("failed")); + verify(listener, times(1)).onFailure(any()); + assertTrue(requestTask.hasCompleted()); + assertTrue(requestTask.getRequestCompletedFunction().get()); onTimeout.get().run(); verifyNoMoreInteractions(listener); } public void testRequest_ReturnsTimeoutException() { - var httpClient = mock(HttpClient.class); - PlainActionFuture listener = new PlainActionFuture<>(); + PlainActionFuture listener = new PlainActionFuture<>(); var requestTask = new RequestTask( - HttpRequestTests.createMock("inferenceEntityId"), - httpClient, - HttpClientContext.create(), + OpenAiEmbeddingsExecutableRequestCreatorTests.makeCreator("url", null, "key", "model", null, "id"), + List.of("abc"), TimeValue.timeValueMillis(1), threadPool, listener ); - requestTask.doRun(); var thrownException = expectThrows(ElasticsearchTimeoutException.class, () -> listener.actionGet(TIMEOUT)); assertThat( thrownException.getMessage(), - is(format("Request timed out waiting to be executed after [%s]", TimeValue.timeValueMillis(1))) + is(format("Request timed out waiting to be sent after [%s]", TimeValue.timeValueMillis(1))) ); + assertTrue(requestTask.hasCompleted()); + assertTrue(requestTask.getRequestCompletedFunction().get()); } public void testRequest_DoesNotCallOnFailureTwiceWhenTimingOut() throws Exception { - var httpClient = mock(HttpClient.class); - doAnswer(invocation -> { - @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocation.getArguments()[2]; - listener.onFailure(new ElasticsearchException("failed")); - return Void.TYPE; - }).when(httpClient).send(any(), any(), any()); - @SuppressWarnings("unchecked") - ActionListener listener = mock(ActionListener.class); + ActionListener listener = mock(ActionListener.class); var calledOnFailureLatch = new CountDownLatch(1); doAnswer(invocation -> { calledOnFailureLatch.countDown(); @@ -192,9 +105,8 @@ public void testRequest_DoesNotCallOnFailureTwiceWhenTimingOut() throws Exceptio }).when(listener).onFailure(any()); var requestTask = new RequestTask( - HttpRequestTests.createMock("inferenceEntityId"), - httpClient, - HttpClientContext.create(), + OpenAiEmbeddingsExecutableRequestCreatorTests.makeCreator("url", null, "key", "model", null, "id"), + List.of("abc"), TimeValue.timeValueMillis(1), threadPool, listener @@ -206,25 +118,18 @@ public void testRequest_DoesNotCallOnFailureTwiceWhenTimingOut() throws Exceptio verify(listener, times(1)).onFailure(argument.capture()); assertThat( argument.getValue().getMessage(), - is(format("Request timed out waiting to be executed after [%s]", TimeValue.timeValueMillis(1))) + is(format("Request timed out waiting to be sent after [%s]", TimeValue.timeValueMillis(1))) ); + assertTrue(requestTask.hasCompleted()); + assertTrue(requestTask.getRequestCompletedFunction().get()); - requestTask.doRun(); + requestTask.getListener().onFailure(new IllegalArgumentException("failed")); verifyNoMoreInteractions(listener); } public void testRequest_DoesNotCallOnResponseAfterTimingOut() throws Exception { - var httpClient = mock(HttpClient.class); - doAnswer(invocation -> { - @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocation.getArguments()[2]; - var result = new HttpResult(mock(HttpResponse.class), new byte[0]); - listener.onResponse(result); - return Void.TYPE; - }).when(httpClient).send(any(), any(), any()); - @SuppressWarnings("unchecked") - ActionListener listener = mock(ActionListener.class); + ActionListener listener = mock(ActionListener.class); var calledOnFailureLatch = new CountDownLatch(1); doAnswer(invocation -> { calledOnFailureLatch.countDown(); @@ -232,9 +137,8 @@ public void testRequest_DoesNotCallOnResponseAfterTimingOut() throws Exception { }).when(listener).onFailure(any()); var requestTask = new RequestTask( - HttpRequestTests.createMock("inferenceEntityId"), - httpClient, - HttpClientContext.create(), + OpenAiEmbeddingsExecutableRequestCreatorTests.makeCreator("url", null, "key", "model", null, "id"), + List.of("abc"), TimeValue.timeValueMillis(1), threadPool, listener @@ -246,44 +150,12 @@ public void testRequest_DoesNotCallOnResponseAfterTimingOut() throws Exception { verify(listener, times(1)).onFailure(argument.capture()); assertThat( argument.getValue().getMessage(), - is(format("Request timed out waiting to be executed after [%s]", TimeValue.timeValueMillis(1))) - ); - - requestTask.doRun(); - verifyNoMoreInteractions(listener); - } - - public void testRequest_DoesNotCallOnFailureForTimeout_AfterAlreadyCallingOnFailure() throws Exception { - AtomicReference onTimeout = new AtomicReference<>(); - var mockThreadPool = mockThreadPoolForTimeout(onTimeout); - - var httpClient = mock(HttpClient.class); - doAnswer(invocation -> { - @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocation.getArguments()[2]; - listener.onFailure(new ElasticsearchException("failed")); - return Void.TYPE; - }).when(httpClient).send(any(), any(), any()); - - @SuppressWarnings("unchecked") - ActionListener listener = mock(ActionListener.class); - - var requestTask = new RequestTask( - HttpRequestTests.createMock("inferenceEntityId"), - httpClient, - HttpClientContext.create(), - TimeValue.timeValueMillis(1), - mockThreadPool, - listener + is(format("Request timed out waiting to be sent after [%s]", TimeValue.timeValueMillis(1))) ); + assertTrue(requestTask.hasCompleted()); + assertTrue(requestTask.getRequestCompletedFunction().get()); - requestTask.doRun(); - - ArgumentCaptor argument = ArgumentCaptor.forClass(Exception.class); - verify(listener, times(1)).onFailure(argument.capture()); - assertThat(argument.getValue().getMessage(), is("failed")); - - onTimeout.get().run(); + requestTask.getListener().onResponse(mock(InferenceServiceResults.class)); verifyNoMoreInteractions(listener); } @@ -291,29 +163,21 @@ public void testRequest_DoesNotCallOnFailureForTimeout_AfterAlreadyCallingOnResp AtomicReference onTimeout = new AtomicReference<>(); var mockThreadPool = mockThreadPoolForTimeout(onTimeout); - var httpClient = mock(HttpClient.class); - doAnswer(invocation -> { - @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocation.getArguments()[2]; - listener.onResponse(new HttpResult(mock(HttpResponse.class), new byte[0])); - return Void.TYPE; - }).when(httpClient).send(any(), any(), any()); - @SuppressWarnings("unchecked") - ActionListener listener = mock(ActionListener.class); + ActionListener listener = mock(ActionListener.class); var requestTask = new RequestTask( - HttpRequestTests.createMock("inferenceEntityId"), - httpClient, - HttpClientContext.create(), + OpenAiEmbeddingsExecutableRequestCreatorTests.makeCreator("url", null, "key", "model", null, "id"), + List.of("abc"), TimeValue.timeValueMillis(1), mockThreadPool, listener ); - requestTask.doRun(); - + requestTask.getListener().onResponse(mock(InferenceServiceResults.class)); verify(listener, times(1)).onResponse(any()); + assertTrue(requestTask.hasCompleted()); + assertTrue(requestTask.getRequestCompletedFunction().get()); onTimeout.get().run(); verifyNoMoreInteractions(listener); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/SingleRequestManagerTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/SingleRequestManagerTests.java new file mode 100644 index 0000000000000..ab8bf244a4d2c --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/SingleRequestManagerTests.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http.sender; + +import org.apache.http.client.protocol.HttpClientContext; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.inference.external.http.retry.RetryingHttpSender; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verifyNoInteractions; +import static org.mockito.Mockito.when; + +public class SingleRequestManagerTests extends ESTestCase { + public void testExecute_DoesNotCallRequestCreatorCreate_WhenInputIsNull() { + var requestCreator = mock(ExecutableRequestCreator.class); + var request = mock(InferenceRequest.class); + when(request.getRequestCreator()).thenReturn(requestCreator); + + new SingleRequestManager(mock(RetryingHttpSender.class)).execute(mock(InferenceRequest.class), HttpClientContext.create()); + verifyNoInteractions(requestCreator); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/openai/OpenAiClientTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/openai/OpenAiClientTests.java deleted file mode 100644 index bb9612f01d8ff..0000000000000 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/openai/OpenAiClientTests.java +++ /dev/null @@ -1,297 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.inference.external.openai; - -import org.apache.http.HttpHeaders; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.core.TimeValue; -import org.elasticsearch.inference.InferenceServiceResults; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.http.MockResponse; -import org.elasticsearch.test.http.MockWebServer; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xcontent.XContentType; -import org.elasticsearch.xpack.inference.common.TruncatorTests; -import org.elasticsearch.xpack.inference.external.http.HttpClientManager; -import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderFactory; -import org.elasticsearch.xpack.inference.external.http.sender.Sender; -import org.elasticsearch.xpack.inference.services.ServiceComponents; -import org.junit.After; -import org.junit.Before; - -import java.io.IOException; -import java.net.URISyntaxException; -import java.util.List; -import java.util.concurrent.TimeUnit; - -import static org.elasticsearch.core.Strings.format; -import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; -import static org.elasticsearch.xpack.inference.Utils.mockClusterServiceEmpty; -import static org.elasticsearch.xpack.inference.external.http.Utils.entityAsMap; -import static org.elasticsearch.xpack.inference.external.http.Utils.getUrl; -import static org.elasticsearch.xpack.inference.external.http.retry.RetrySettingsTests.buildSettingsWithRetryFields; -import static org.elasticsearch.xpack.inference.external.request.openai.OpenAiEmbeddingsRequestTests.createRequest; -import static org.elasticsearch.xpack.inference.external.request.openai.OpenAiUtils.ORGANIZATION_HEADER; -import static org.elasticsearch.xpack.inference.logging.ThrottlerManagerTests.mockThrottlerManager; -import static org.elasticsearch.xpack.inference.results.TextEmbeddingResultsTests.buildExpectation; -import static org.elasticsearch.xpack.inference.services.ServiceComponentsTests.createWithEmptySettings; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.is; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.mock; - -public class OpenAiClientTests extends ESTestCase { - private static final TimeValue TIMEOUT = new TimeValue(30, TimeUnit.SECONDS); - private final MockWebServer webServer = new MockWebServer(); - private ThreadPool threadPool; - private HttpClientManager clientManager; - - @Before - public void init() throws Exception { - webServer.start(); - threadPool = createThreadPool(inferenceUtilityPool()); - clientManager = HttpClientManager.create(Settings.EMPTY, threadPool, mockClusterServiceEmpty(), mockThrottlerManager()); - } - - @After - public void shutdown() throws IOException { - clientManager.close(); - terminate(threadPool); - webServer.close(); - } - - public void testSend_SuccessfulResponse() throws IOException, URISyntaxException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); - - try (var sender = senderFactory.createSender("test_service")) { - sender.start(); - - String responseJson = """ - { - "object": "list", - "data": [ - { - "object": "embedding", - "index": 0, - "embedding": [ - 0.0123, - -0.0123 - ] - } - ], - "model": "text-embedding-ada-002-v2", - "usage": { - "prompt_tokens": 8, - "total_tokens": 8 - } - } - """; - webServer.enqueue(new MockResponse().setResponseCode(200).setBody(responseJson)); - - OpenAiClient openAiClient = new OpenAiClient(sender, createWithEmptySettings(threadPool)); - - PlainActionFuture listener = new PlainActionFuture<>(); - openAiClient.send(createRequest(getUrl(webServer), "org", "secret", "abc", "model", "user"), listener); - - var result = listener.actionGet(TIMEOUT); - - assertThat(result.asMap(), is(buildExpectation(List.of(List.of(0.0123F, -0.0123F))))); - - assertThat(webServer.requests(), hasSize(1)); - assertNull(webServer.requests().get(0).getUri().getQuery()); - assertThat(webServer.requests().get(0).getHeader(HttpHeaders.CONTENT_TYPE), equalTo(XContentType.JSON.mediaType())); - assertThat(webServer.requests().get(0).getHeader(HttpHeaders.AUTHORIZATION), equalTo("Bearer secret")); - assertThat(webServer.requests().get(0).getHeader(ORGANIZATION_HEADER), equalTo("org")); - - var requestMap = entityAsMap(webServer.requests().get(0).getBody()); - assertThat(requestMap.size(), is(3)); - assertThat(requestMap.get("input"), is(List.of("abc"))); - assertThat(requestMap.get("model"), is("model")); - assertThat(requestMap.get("user"), is("user")); - } - } - - public void testSend_SuccessfulResponse_WithoutUser() throws IOException, URISyntaxException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); - - try (var sender = senderFactory.createSender("test_service")) { - sender.start(); - - String responseJson = """ - { - "object": "list", - "data": [ - { - "object": "embedding", - "index": 0, - "embedding": [ - 0.0123, - -0.0123 - ] - } - ], - "model": "text-embedding-ada-002-v2", - "usage": { - "prompt_tokens": 8, - "total_tokens": 8 - } - } - """; - webServer.enqueue(new MockResponse().setResponseCode(200).setBody(responseJson)); - - OpenAiClient openAiClient = new OpenAiClient(sender, createWithEmptySettings(threadPool)); - - PlainActionFuture listener = new PlainActionFuture<>(); - openAiClient.send(createRequest(getUrl(webServer), "org", "secret", "abc", "model", null), listener); - - var result = listener.actionGet(TIMEOUT); - - assertThat(result.asMap(), is(buildExpectation(List.of(List.of(0.0123F, -0.0123F))))); - - assertThat(webServer.requests(), hasSize(1)); - assertNull(webServer.requests().get(0).getUri().getQuery()); - assertThat(webServer.requests().get(0).getHeader(HttpHeaders.CONTENT_TYPE), equalTo(XContentType.JSON.mediaType())); - assertThat(webServer.requests().get(0).getHeader(HttpHeaders.AUTHORIZATION), equalTo("Bearer secret")); - assertThat(webServer.requests().get(0).getHeader(ORGANIZATION_HEADER), equalTo("org")); - - var requestMap = entityAsMap(webServer.requests().get(0).getBody()); - assertThat(requestMap.size(), is(2)); - assertThat(requestMap.get("input"), is(List.of("abc"))); - assertThat(requestMap.get("model"), is("model")); - } - } - - public void testSend_SuccessfulResponse_WithoutOrganization() throws IOException, URISyntaxException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); - - try (var sender = senderFactory.createSender("test_service")) { - sender.start(); - - String responseJson = """ - { - "object": "list", - "data": [ - { - "object": "embedding", - "index": 0, - "embedding": [ - 0.0123, - -0.0123 - ] - } - ], - "model": "text-embedding-ada-002-v2", - "usage": { - "prompt_tokens": 8, - "total_tokens": 8 - } - } - """; - webServer.enqueue(new MockResponse().setResponseCode(200).setBody(responseJson)); - - OpenAiClient openAiClient = new OpenAiClient(sender, createWithEmptySettings(threadPool)); - - PlainActionFuture listener = new PlainActionFuture<>(); - openAiClient.send(createRequest(getUrl(webServer), null, "secret", "abc", "model", null), listener); - - var result = listener.actionGet(TIMEOUT); - - assertThat(result.asMap(), is(buildExpectation(List.of(List.of(0.0123F, -0.0123F))))); - - assertThat(webServer.requests(), hasSize(1)); - assertNull(webServer.requests().get(0).getUri().getQuery()); - assertThat(webServer.requests().get(0).getHeader(HttpHeaders.CONTENT_TYPE), equalTo(XContentType.JSON.mediaType())); - assertThat(webServer.requests().get(0).getHeader(HttpHeaders.AUTHORIZATION), equalTo("Bearer secret")); - assertNull(webServer.requests().get(0).getHeader(ORGANIZATION_HEADER)); - - var requestMap = entityAsMap(webServer.requests().get(0).getBody()); - assertThat(requestMap.size(), is(2)); - assertThat(requestMap.get("input"), is(List.of("abc"))); - assertThat(requestMap.get("model"), is("model")); - } - } - - public void testSend_FailsFromInvalidResponseFormat() throws IOException, URISyntaxException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); - - try (var sender = senderFactory.createSender("test_service")) { - sender.start(); - - String responseJson = """ - { - "object": "list", - "data_does_not_exist": [ - { - "object": "embedding", - "index": 0, - "embedding": [ - 0.0123, - -0.0123 - ] - } - ], - "model": "text-embedding-ada-002-v2", - "usage": { - "prompt_tokens": 8, - "total_tokens": 8 - } - } - """; - webServer.enqueue(new MockResponse().setResponseCode(200).setBody(responseJson)); - - OpenAiClient openAiClient = new OpenAiClient( - sender, - new ServiceComponents( - threadPool, - mockThrottlerManager(), - // timeout as zero for no retries - buildSettingsWithRetryFields( - TimeValue.timeValueMillis(1), - TimeValue.timeValueMinutes(1), - TimeValue.timeValueSeconds(0) - ), - TruncatorTests.createTruncator() - ) - ); - - PlainActionFuture listener = new PlainActionFuture<>(); - openAiClient.send(createRequest(getUrl(webServer), "org", "secret", "abc", "model", "user"), listener); - - var thrownException = expectThrows(IllegalStateException.class, () -> listener.actionGet(TIMEOUT)); - assertThat(thrownException.getMessage(), is(format("Failed to find required field [data] in OpenAI embeddings response"))); - - assertThat(webServer.requests(), hasSize(1)); - assertNull(webServer.requests().get(0).getUri().getQuery()); - assertThat(webServer.requests().get(0).getHeader(HttpHeaders.CONTENT_TYPE), equalTo(XContentType.JSON.mediaType())); - assertThat(webServer.requests().get(0).getHeader(HttpHeaders.AUTHORIZATION), equalTo("Bearer secret")); - assertThat(webServer.requests().get(0).getHeader(ORGANIZATION_HEADER), equalTo("org")); - - var requestMap = entityAsMap(webServer.requests().get(0).getBody()); - assertThat(requestMap.size(), is(3)); - assertThat(requestMap.get("input"), is(List.of("abc"))); - assertThat(requestMap.get("model"), is("model")); - assertThat(requestMap.get("user"), is("user")); - } - } - - public void testSend_ThrowsException() throws URISyntaxException, IOException { - var sender = mock(Sender.class); - doThrow(new ElasticsearchException("failed")).when(sender).send(any(), any()); - - OpenAiClient openAiClient = new OpenAiClient(sender, createWithEmptySettings(threadPool)); - PlainActionFuture listener = new PlainActionFuture<>(); - openAiClient.send(createRequest(getUrl(webServer), "org", "secret", "abc", "model", "user"), listener); - - var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); - assertThat(thrownException.getMessage(), is("failed")); - } -} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiEmbeddingsRequestTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiEmbeddingsRequestTests.java index 4c4c40e9c1056..ebff1c5e096e8 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiEmbeddingsRequestTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiEmbeddingsRequestTests.java @@ -116,7 +116,7 @@ public static OpenAiEmbeddingsRequest createRequest( String model, @Nullable String user ) { - var embeddingsModel = OpenAiEmbeddingsModelTests.createModel(url, org, apiKey, model, user, null); + var embeddingsModel = OpenAiEmbeddingsModelTests.createModel(url, org, apiKey, model, user, (Integer) null); var account = new OpenAiAccount(embeddingsModel.getServiceSettings().uri(), org, embeddingsModel.getSecretSettings().apiKey()); return new OpenAiEmbeddingsRequest( diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/SenderServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/SenderServiceTests.java index 82d53cfb09037..5c438644a18c5 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/SenderServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/SenderServiceTests.java @@ -19,7 +19,7 @@ import org.elasticsearch.inference.TaskType; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderFactory; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; import org.elasticsearch.xpack.inference.external.http.sender.Sender; import org.junit.After; import org.junit.Before; @@ -57,7 +57,7 @@ public void shutdown() throws IOException { public void testStart_InitializesTheSender() throws IOException { var sender = mock(Sender.class); - var factory = mock(HttpRequestSenderFactory.class); + var factory = mock(HttpRequestSender.Factory.class); when(factory.createSender(anyString())).thenReturn(sender); try (var service = new TestSenderService(factory, createWithEmptySettings(threadPool))) { @@ -77,7 +77,7 @@ public void testStart_InitializesTheSender() throws IOException { public void testStart_CallingStartTwiceKeepsSameSenderReference() throws IOException { var sender = mock(Sender.class); - var factory = mock(HttpRequestSenderFactory.class); + var factory = mock(HttpRequestSender.Factory.class); when(factory.createSender(anyString())).thenReturn(sender); try (var service = new TestSenderService(factory, createWithEmptySettings(threadPool))) { @@ -98,7 +98,7 @@ public void testStart_CallingStartTwiceKeepsSameSenderReference() throws IOExcep } private static final class TestSenderService extends SenderService { - TestSenderService(HttpRequestSenderFactory factory, ServiceComponents serviceComponents) { + TestSenderService(HttpRequestSender.Factory factory, ServiceComponents serviceComponents) { super(factory, serviceComponents); } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/ServiceComponentsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/ServiceComponentsTests.java index 77713fbfc30a5..fd568bf7f15da 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/ServiceComponentsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/ServiceComponentsTests.java @@ -18,4 +18,8 @@ public class ServiceComponentsTests extends ESTestCase { public static ServiceComponents createWithEmptySettings(ThreadPool threadPool) { return new ServiceComponents(threadPool, mockThrottlerManager(), Settings.EMPTY, TruncatorTests.createTruncator()); } + + public static ServiceComponents createWithSettings(ThreadPool threadPool, Settings settings) { + return new ServiceComponents(threadPool, mockThrottlerManager(), settings, TruncatorTests.createTruncator()); + } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceTests.java index 9c2722e68efd6..356da0ece08af 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceTests.java @@ -28,7 +28,8 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.inference.external.http.HttpClientManager; -import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderFactory; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderTests; import org.elasticsearch.xpack.inference.external.http.sender.Sender; import org.elasticsearch.xpack.inference.logging.ThrottlerManager; import org.elasticsearch.xpack.inference.services.cohere.embeddings.CohereEmbeddingType; @@ -93,7 +94,6 @@ public void shutdown() throws IOException { public void testParseRequestConfig_CreatesACohereEmbeddingsModel() throws IOException { try (var service = createCohereService()) { - ActionListener modelListener = ActionListener.wrap(model -> { MatcherAssert.assertThat(model, instanceOf(CohereEmbeddingsModel.class)); @@ -125,7 +125,6 @@ public void testParseRequestConfig_CreatesACohereEmbeddingsModel() throws IOExce public void testParseRequestConfig_ThrowsUnsupportedModelType() throws IOException { try (var service = createCohereService()) { - var failureListener = getModelListenerForException( ElasticsearchStatusException.class, "The [cohere] service does not support task type [sparse_embedding]" @@ -577,7 +576,7 @@ public void testParsePersistedConfig_NotThrowWhenAnExtraKeyExistsInTaskSettings( public void testInfer_ThrowsErrorWhenModelIsNotCohereModel() throws IOException { var sender = mock(Sender.class); - var factory = mock(HttpRequestSenderFactory.class); + var factory = mock(HttpRequestSender.Factory.class); when(factory.createSender(anyString())).thenReturn(sender); var mockModel = getInvalidModel("model_id", "service_name"); @@ -602,7 +601,7 @@ public void testInfer_ThrowsErrorWhenModelIsNotCohereModel() throws IOException } public void testInfer_SendsRequest() throws IOException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); try (var service = new CohereService(senderFactory, createWithEmptySettings(threadPool))) { @@ -662,7 +661,7 @@ public void testInfer_SendsRequest() throws IOException { } public void testCheckModelConfig_UpdatesDimensions() throws IOException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); try (var service = new CohereService(senderFactory, createWithEmptySettings(threadPool))) { @@ -725,7 +724,7 @@ public void testCheckModelConfig_UpdatesDimensions() throws IOException { } public void testInfer_UnauthorisedResponse() throws IOException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); try (var service = new CohereService(senderFactory, createWithEmptySettings(threadPool))) { @@ -756,7 +755,7 @@ public void testInfer_UnauthorisedResponse() throws IOException { } public void testInfer_SetsInputTypeToIngest_FromInferParameter_WhenTaskSettingsAreEmpty() throws IOException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); try (var service = new CohereService(senderFactory, createWithEmptySettings(threadPool))) { @@ -817,7 +816,7 @@ public void testInfer_SetsInputTypeToIngest_FromInferParameter_WhenTaskSettingsA public void testInfer_SetsInputTypeToIngestFromInferParameter_WhenModelSettingIsNull_AndRequestTaskSettingsIsSearch() throws IOException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); try (var service = new CohereService(senderFactory, createWithEmptySettings(threadPool))) { @@ -883,7 +882,7 @@ public void testInfer_SetsInputTypeToIngestFromInferParameter_WhenModelSettingIs } public void testInfer_DoesNotSetInputType_WhenNotPresentInTaskSettings_AndUnspecifiedIsPassedInRequest() throws IOException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); try (var service = new CohereService(senderFactory, createWithEmptySettings(threadPool))) { @@ -957,7 +956,7 @@ private Map getRequestConfigMap( } private CohereService createCohereService() { - return new CohereService(mock(HttpRequestSenderFactory.class), createWithEmptySettings(threadPool)); + return new CohereService(mock(HttpRequestSender.Factory.class), createWithEmptySettings(threadPool)); } private PeristedConfig getPersistedConfigMap( diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceBaseServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceBaseServiceTests.java index 345aa1a80e5bd..cd896cb18440a 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceBaseServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceBaseServiceTests.java @@ -16,7 +16,7 @@ import org.elasticsearch.inference.TaskType; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderFactory; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; import org.elasticsearch.xpack.inference.external.http.sender.Sender; import org.elasticsearch.xpack.inference.services.ServiceComponents; import org.junit.After; @@ -57,7 +57,7 @@ public void shutdown() throws IOException { public void testInfer_ThrowsErrorWhenModelIsNotHuggingFaceModel() throws IOException { var sender = mock(Sender.class); - var factory = mock(HttpRequestSenderFactory.class); + var factory = mock(HttpRequestSender.Factory.class); when(factory.createSender(anyString())).thenReturn(sender); var mockModel = getInvalidModel("model_id", "service_name"); @@ -82,8 +82,7 @@ public void testInfer_ThrowsErrorWhenModelIsNotHuggingFaceModel() throws IOExcep } private static final class TestService extends HuggingFaceBaseService { - - TestService(HttpRequestSenderFactory factory, ServiceComponents serviceComponents) { + TestService(HttpRequestSender.Factory factory, ServiceComponents serviceComponents) { super(factory, serviceComponents); } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceServiceTests.java index 23d6bd17e48d1..c4c49065cd79c 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceServiceTests.java @@ -28,7 +28,8 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.inference.external.http.HttpClientManager; -import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderFactory; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderTests; import org.elasticsearch.xpack.inference.logging.ThrottlerManager; import org.elasticsearch.xpack.inference.results.SparseEmbeddingResultsTests; import org.elasticsearch.xpack.inference.services.huggingface.elser.HuggingFaceElserModel; @@ -83,7 +84,6 @@ public void shutdown() throws IOException { public void testParseRequestConfig_CreatesAnEmbeddingsModel() throws IOException { try (var service = createHuggingFaceService()) { - ActionListener modelVerificationActionListener = ActionListener.wrap((model) -> { assertThat(model, instanceOf(HuggingFaceEmbeddingsModel.class)); @@ -408,7 +408,7 @@ public void testParsePersistedConfig_DoesNotThrowWhenAnExtraKeyExistsInTaskSetti } public void testInfer_SendsEmbeddingsRequest() throws IOException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); try (var service = new HuggingFaceService(senderFactory, createWithEmptySettings(threadPool))) { @@ -446,7 +446,7 @@ public void testInfer_SendsEmbeddingsRequest() throws IOException { } public void testInfer_SendsElserRequest() throws IOException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); try (var service = new HuggingFaceService(senderFactory, createWithEmptySettings(threadPool))) { @@ -488,7 +488,7 @@ public void testInfer_SendsElserRequest() throws IOException { } public void testCheckModelConfig_IncludesMaxTokens() throws IOException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); try (var service = new HuggingFaceService(senderFactory, createWithEmptySettings(threadPool))) { @@ -513,7 +513,7 @@ public void testCheckModelConfig_IncludesMaxTokens() throws IOException { } private HuggingFaceService createHuggingFaceService() { - return new HuggingFaceService(mock(HttpRequestSenderFactory.class), createWithEmptySettings(threadPool)); + return new HuggingFaceService(mock(HttpRequestSender.Factory.class), createWithEmptySettings(threadPool)); } private Map getRequestConfigMap(Map serviceSettings, Map secretSettings) { diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java index e97040ed7d795..d819b2b243872 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java @@ -29,7 +29,8 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.inference.external.http.HttpClientManager; -import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderFactory; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderTests; import org.elasticsearch.xpack.inference.external.http.sender.Sender; import org.elasticsearch.xpack.inference.logging.ThrottlerManager; import org.elasticsearch.xpack.inference.services.ServiceFields; @@ -604,7 +605,7 @@ public void testParsePersistedConfig_NotThrowWhenAnExtraKeyExistsInTaskSettings( public void testInfer_ThrowsErrorWhenModelIsNotOpenAiModel() throws IOException { var sender = mock(Sender.class); - var factory = mock(HttpRequestSenderFactory.class); + var factory = mock(HttpRequestSender.Factory.class); when(factory.createSender(anyString())).thenReturn(sender); var mockModel = getInvalidModel("model_id", "service_name"); @@ -629,7 +630,7 @@ public void testInfer_ThrowsErrorWhenModelIsNotOpenAiModel() throws IOException } public void testInfer_SendsRequest() throws IOException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); try (var service = new OpenAiService(senderFactory, createWithEmptySettings(threadPool))) { @@ -677,7 +678,7 @@ public void testInfer_SendsRequest() throws IOException { } public void testCheckModelConfig_IncludesMaxTokens() throws IOException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); try (var service = new OpenAiService(senderFactory, createWithEmptySettings(threadPool))) { @@ -718,7 +719,7 @@ public void testCheckModelConfig_IncludesMaxTokens() throws IOException { } public void testCheckModelConfig_ThrowsIfEmbeddingSizeDoesNotMatchValueSetByUser() throws IOException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); try (var service = new OpenAiService(senderFactory, createWithEmptySettings(threadPool))) { @@ -769,7 +770,7 @@ public void testCheckModelConfig_ThrowsIfEmbeddingSizeDoesNotMatchValueSetByUser public void testCheckModelConfig_ReturnsModelWithDimensionsSetTo2_AndDocProductSet_IfDimensionsSetByUser_ButSetToNull() throws IOException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); try (var service = new OpenAiService(senderFactory, createWithEmptySettings(threadPool))) { @@ -827,7 +828,7 @@ public void testCheckModelConfig_ReturnsModelWithDimensionsSetTo2_AndDocProductS public void testCheckModelConfig_ReturnsModelWithSameDimensions_AndDocProductSet_IfDimensionsSetByUser_AndTheyMatchReturnedSize() throws IOException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); try (var service = new OpenAiService(senderFactory, createWithEmptySettings(threadPool))) { @@ -886,7 +887,7 @@ public void testCheckModelConfig_ReturnsModelWithSameDimensions_AndDocProductSet } public void testCheckModelConfig_ReturnsNewModelReference_AndDoesNotSendDimensionsField_WhenNotSetByUser() throws IOException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); try (var service = new OpenAiService(senderFactory, createWithEmptySettings(threadPool))) { @@ -952,7 +953,7 @@ public void testCheckModelConfig_ReturnsNewModelReference_AndDoesNotSendDimensio } public void testInfer_UnauthorisedResponse() throws IOException { - var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); try (var service = new OpenAiService(senderFactory, createWithEmptySettings(threadPool))) { @@ -1007,7 +1008,7 @@ public void testMoveModelFromTaskToServiceSettings_AlreadyMoved() { } private OpenAiService createOpenAiService() { - return new OpenAiService(mock(HttpRequestSenderFactory.class), createWithEmptySettings(threadPool)); + return new OpenAiService(mock(HttpRequestSender.Factory.class), createWithEmptySettings(threadPool)); } private Map getRequestConfigMap( diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsModelTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsModelTests.java index 01b60fdb896d0..db5febef1dab2 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsModelTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsModelTests.java @@ -47,6 +47,24 @@ public void testOverrideWith_NullMap() { assertThat(overriddenModel, sameInstance(model)); } + public static OpenAiEmbeddingsModel createModel( + String url, + @Nullable String org, + String apiKey, + String modelName, + @Nullable String user, + String inferenceEntityId + ) { + return new OpenAiEmbeddingsModel( + inferenceEntityId, + TaskType.TEXT_EMBEDDING, + "service", + new OpenAiEmbeddingsServiceSettings(modelName, url, org, SimilarityMeasure.DOT_PRODUCT, 1536, null, false), + new OpenAiEmbeddingsTaskSettings(user), + new DefaultSecretSettings(new SecureString(apiKey.toCharArray())) + ); + } + public static OpenAiEmbeddingsModel createModel( String url, @Nullable String org, From d72665a207a9ae3074ed391ab0407fd397b7dfd9 Mon Sep 17 00:00:00 2001 From: John Verwolf Date: Tue, 5 Mar 2024 10:47:47 -0800 Subject: [PATCH 007/248] Bugfix: Disable eager loading BitSetFilterCache on Stateless Indexing Nodes (#105791) The BitSetFilterCache is used for search traffic, which is not served on stateless indexing nodes. Thus, we can disable it and save memory. --- docs/changelog/105791.yaml | 5 ++ .../index/cache/bitset/BitsetFilterCache.java | 18 +++++- .../cache/bitset/BitSetFilterCacheTests.java | 57 +++++++++++++++++++ 3 files changed, 79 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/105791.yaml diff --git a/docs/changelog/105791.yaml b/docs/changelog/105791.yaml new file mode 100644 index 0000000000000..f18b5e6b8fdd7 --- /dev/null +++ b/docs/changelog/105791.yaml @@ -0,0 +1,5 @@ +pr: 105791 +summary: "Bugfix: Disable eager loading `BitSetFilterCache` on Indexing Nodes" +area: Search +type: bug +issues: [] diff --git a/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java b/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java index f1f03eff88d08..f8bc40a395472 100644 --- a/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java +++ b/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java @@ -24,6 +24,8 @@ import org.apache.lucene.util.BitDocIdSet; import org.apache.lucene.util.BitSet; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodeRole; import org.elasticsearch.common.cache.Cache; import org.elasticsearch.common.cache.CacheBuilder; import org.elasticsearch.common.cache.RemovalListener; @@ -55,6 +57,8 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.Executor; +import static org.elasticsearch.index.IndexSettings.INDEX_FAST_REFRESH_SETTING; + /** * This is a cache for {@link BitDocIdSet} based filters and is unbounded by size or time. *

@@ -92,10 +96,22 @@ public BitsetFilterCache(IndexSettings indexSettings, Listener listener) { throw new IllegalArgumentException("listener must not be null"); } this.index = indexSettings.getIndex(); - this.loadRandomAccessFiltersEagerly = indexSettings.getValue(INDEX_LOAD_RANDOM_ACCESS_FILTERS_EAGERLY_SETTING); + this.loadRandomAccessFiltersEagerly = shouldLoadRandomAccessFiltersEagerly(indexSettings); this.listener = listener; } + static boolean shouldLoadRandomAccessFiltersEagerly(IndexSettings settings) { + boolean loadFiltersEagerlySetting = settings.getValue(INDEX_LOAD_RANDOM_ACCESS_FILTERS_EAGERLY_SETTING); + boolean isStateless = DiscoveryNode.isStateless(settings.getNodeSettings()); + if (isStateless) { + return DiscoveryNode.hasRole(settings.getNodeSettings(), DiscoveryNodeRole.INDEX_ROLE) + && loadFiltersEagerlySetting + && INDEX_FAST_REFRESH_SETTING.get(settings.getSettings()); + } else { + return loadFiltersEagerlySetting; + } + } + public static BitSet bitsetFromQuery(Query query, LeafReaderContext context) throws IOException { final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(context); final IndexSearcher searcher = new IndexSearcher(topLevelContext); diff --git a/server/src/test/java/org/elasticsearch/index/cache/bitset/BitSetFilterCacheTests.java b/server/src/test/java/org/elasticsearch/index/cache/bitset/BitSetFilterCacheTests.java index 1c164e898426d..6d72649e90764 100644 --- a/server/src/test/java/org/elasticsearch/index/cache/bitset/BitSetFilterCacheTests.java +++ b/server/src/test/java/org/elasticsearch/index/cache/bitset/BitSetFilterCacheTests.java @@ -28,19 +28,27 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.util.Accountable; import org.apache.lucene.util.BitSet; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.node.DiscoveryNodeRole; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.IOUtils; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.lucene.util.MatchAllBitSet; +import org.elasticsearch.node.NodeRoleSettings; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.IndexSettingsModule; import java.io.IOException; +import java.util.List; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; +import static org.elasticsearch.cluster.node.DiscoveryNode.STATELESS_ENABLED_SETTING_NAME; +import static org.elasticsearch.index.IndexSettings.INDEX_FAST_REFRESH_SETTING; +import static org.elasticsearch.index.cache.bitset.BitsetFilterCache.INDEX_LOAD_RANDOM_ACCESS_FILTERS_EAGERLY_SETTING; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasSize; @@ -259,4 +267,53 @@ public void onRemoval(ShardId shardId, Accountable accountable) { } } + public void testShouldLoadRandomAccessFiltersEagerly() { + var values = List.of(true, false); + for (var hasIndexRole : values) { + for (var indexFastRefresh : values) { + for (var loadFiltersEagerly : values) { + for (var isStateless : values) { + if (isStateless) { + assertEquals( + loadFiltersEagerly && indexFastRefresh && hasIndexRole, + BitsetFilterCache.shouldLoadRandomAccessFiltersEagerly( + bitsetFilterCacheSettings(isStateless, hasIndexRole, loadFiltersEagerly, indexFastRefresh) + ) + ); + } else { + assertEquals( + loadFiltersEagerly, + BitsetFilterCache.shouldLoadRandomAccessFiltersEagerly( + bitsetFilterCacheSettings(isStateless, hasIndexRole, loadFiltersEagerly, indexFastRefresh) + ) + ); + } + } + } + } + } + } + + private IndexSettings bitsetFilterCacheSettings( + boolean isStateless, + boolean hasIndexRole, + boolean loadFiltersEagerly, + boolean indexFastRefresh + ) { + var indexSettingsBuilder = Settings.builder().put(INDEX_LOAD_RANDOM_ACCESS_FILTERS_EAGERLY_SETTING.getKey(), loadFiltersEagerly); + if (isStateless) indexSettingsBuilder.put(INDEX_FAST_REFRESH_SETTING.getKey(), indexFastRefresh); + + var nodeSettingsBuilder = Settings.builder() + .putList( + NodeRoleSettings.NODE_ROLES_SETTING.getKey(), + hasIndexRole ? DiscoveryNodeRole.INDEX_ROLE.roleName() : DiscoveryNodeRole.SEARCH_ROLE.roleName() + ) + .put(STATELESS_ENABLED_SETTING_NAME, isStateless); + + return IndexSettingsModule.newIndexSettings( + new Index("index", IndexMetadata.INDEX_UUID_NA_VALUE), + indexSettingsBuilder.build(), + nodeSettingsBuilder.build() + ); + } } From e8039b9ecb2451752ac5377c44a6a0c662087a9f Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 5 Mar 2024 16:56:28 -0500 Subject: [PATCH 008/248] ESQL: Reenable svq tests (#105996) We fixed the test failure in #105986 but this snuck in. Closes #105952 --- .../xpack/esql/querydsl/query/SingleValueQueryTests.java | 1 - 1 file changed, 1 deletion(-) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQueryTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQueryTests.java index 6465e73417ae2..1324b3977786a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQueryTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQueryTests.java @@ -77,7 +77,6 @@ public void testMatchAll() throws IOException { testCase(new SingleValueQuery(new MatchAll(Source.EMPTY), "foo").asBuilder(), YesNoSometimes.NO, YesNoSometimes.NO, this::runCase); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/105952") public void testMatchSome() throws IOException { int max = between(1, 100); testCase( From c0d35f1e77dd7031abf039ad7d8f3a686e1fe954 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Przemys=C5=82aw=20Witek?= Date: Wed, 6 Mar 2024 09:33:11 +0100 Subject: [PATCH 009/248] [Transform] Cleanup and simplify reading transform settings (#105554) --- .../transform/transforms/SettingsConfig.java | 36 +++-- .../transform/transforms/TransformConfig.java | 2 +- .../TransformEffectiveSettings.java | 86 +++++++++++ .../transforms/SettingsConfigTests.java | 32 +++-- .../TransformEffectiveSettingsTests.java | 135 ++++++++++++++++++ .../integration/TransformProgressIT.java | 4 +- .../TransportPreviewTransformAction.java | 3 +- .../action/TransportStartTransformAction.java | 7 +- .../TimeBasedCheckpointProvider.java | 3 +- .../transform/persistence/TransformIndex.java | 5 +- .../transforms/ClientTransformIndexer.java | 10 +- .../transforms/TransformFailureHandler.java | 38 ++--- .../transforms/TransformIndexer.java | 5 +- .../transform/transforms/pivot/Pivot.java | 10 +- .../transforms/pivot/SchemaUtil.java | 9 +- .../action/TransformConfigLinterTests.java | 4 +- .../ClientTransformIndexerTests.java | 3 +- .../AggregationSchemaAndResultTests.java | 4 +- .../transforms/pivot/PivotTests.java | 16 +-- 19 files changed, 305 insertions(+), 107 deletions(-) create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformEffectiveSettings.java create mode 100644 x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformEffectiveSettingsTests.java diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/SettingsConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/SettingsConfig.java index 9b0fa3876819b..1557f2843b6af 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/SettingsConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/SettingsConfig.java @@ -31,6 +31,9 @@ import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; public class SettingsConfig implements Writeable, ToXContentObject { + + public static final SettingsConfig EMPTY = new SettingsConfig(null, null, null, null, null, null, null, (Integer) null); + public static final ConstructingObjectParser STRICT_PARSER = createParser(false); public static final ConstructingObjectParser LENIENT_PARSER = createParser(true); @@ -110,10 +113,6 @@ private static ConstructingObjectParser createParser(boole private final Integer numFailureRetries; private final Integer unattended; - public SettingsConfig() { - this(null, null, (Integer) null, (Integer) null, (Integer) null, (Integer) null, (Integer) null, (Integer) null); - } - public SettingsConfig( Integer maxPageSearchSize, Float docsPerSecond, @@ -136,7 +135,7 @@ public SettingsConfig( ); } - SettingsConfig( + private SettingsConfig( Integer maxPageSearchSize, Float docsPerSecond, Integer datesAsEpochMillis, @@ -188,51 +187,51 @@ public Float getDocsPerSecond() { return docsPerSecond; } - public Boolean getDatesAsEpochMillis() { + Boolean getDatesAsEpochMillis() { return datesAsEpochMillis != null ? datesAsEpochMillis > 0 : null; } - public Integer getDatesAsEpochMillisForUpdate() { + Integer getDatesAsEpochMillisForUpdate() { return datesAsEpochMillis; } - public Boolean getAlignCheckpoints() { + Boolean getAlignCheckpoints() { return alignCheckpoints != null ? (alignCheckpoints > 0) || (alignCheckpoints == DEFAULT_ALIGN_CHECKPOINTS) : null; } - public Integer getAlignCheckpointsForUpdate() { + Integer getAlignCheckpointsForUpdate() { return alignCheckpoints; } - public Boolean getUsePit() { + Boolean getUsePit() { return usePit != null ? (usePit > 0) || (usePit == DEFAULT_USE_PIT) : null; } - public Integer getUsePitForUpdate() { + Integer getUsePitForUpdate() { return usePit; } - public Boolean getDeduceMappings() { + Boolean getDeduceMappings() { return deduceMappings != null ? (deduceMappings > 0) || (deduceMappings == DEFAULT_DEDUCE_MAPPINGS) : null; } - public Integer getDeduceMappingsForUpdate() { + Integer getDeduceMappingsForUpdate() { return deduceMappings; } - public Integer getNumFailureRetries() { + Integer getNumFailureRetries() { return numFailureRetries != null ? (numFailureRetries == DEFAULT_NUM_FAILURE_RETRIES ? null : numFailureRetries) : null; } - public Integer getNumFailureRetriesForUpdate() { + Integer getNumFailureRetriesForUpdate() { return numFailureRetries; } - public Boolean getUnattended() { + Boolean getUnattended() { return unattended != null ? (unattended == DEFAULT_UNATTENDED) ? null : (unattended > 0) : null; } - public Integer getUnattendedForUpdate() { + Integer getUnattendedForUpdate() { return unattended; } @@ -495,7 +494,7 @@ public Builder setNumFailureRetries(Integer numFailureRetries) { * An explicit `null` resets to default. * * @param unattended true if this is a unattended transform. - * @return the {@link Builder} with usePit set. + * @return the {@link Builder} with unattended set. */ public Builder setUnattended(Boolean unattended) { this.unattended = unattended == null ? DEFAULT_UNATTENDED : unattended ? 1 : 0; @@ -545,7 +544,6 @@ public Builder update(SettingsConfig update) { if (update.getUnattendedForUpdate() != null) { this.unattended = update.getUnattendedForUpdate().equals(DEFAULT_UNATTENDED) ? null : update.getUnattendedForUpdate(); } - return this; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfig.java index d89eb9b397180..fb782bdae0068 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfig.java @@ -234,7 +234,7 @@ public TransformConfig( this.pivotConfig = pivotConfig; this.latestConfig = latestConfig; this.description = description; - this.settings = settings == null ? new SettingsConfig() : settings; + this.settings = settings == null ? SettingsConfig.EMPTY : settings; this.metadata = metadata; this.retentionPolicyConfig = retentionPolicyConfig; if (this.description != null && this.description.length() > MAX_DESCRIPTION_LENGTH) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformEffectiveSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformEffectiveSettings.java new file mode 100644 index 0000000000000..3d4b8ccc64d89 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformEffectiveSettings.java @@ -0,0 +1,86 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.transform.transforms; + +import org.elasticsearch.xpack.core.transform.TransformConfigVersion; + +public final class TransformEffectiveSettings { + + private TransformEffectiveSettings() {} + + /** + * Determines if the transform should write dates as epoch millis based on settings and version. + * + * @param settings transform's settings + * @return whether or not the transform is unattended + */ + public static boolean writeDatesAsEpochMillis(SettingsConfig settings, TransformConfigVersion version) { + // defines how dates are written, if not specified in settings + // < 7.11 as epoch millis + // >= 7.11 as string + // note: it depends on the version when the transform has been created, not the version of the code + return settings.getDatesAsEpochMillis() != null + ? settings.getDatesAsEpochMillis() + : version.before(TransformConfigVersion.V_7_11_0); + } + + /** + * Determines if aligning checkpoints is disabled for this transform based on settings. + * + * @param settings transform's settings + * @return whether or not aligning checkpoints is disabled for this transform + */ + public static boolean isAlignCheckpointsDisabled(SettingsConfig settings) { + return Boolean.FALSE.equals(settings.getAlignCheckpoints()); + } + + /** + * Determines if pit is disabled for this transform based on settings. + * + * @param settings transform's settings + * @return whether or not pit is disabled for this transform + */ + public static boolean isPitDisabled(SettingsConfig settings) { + return Boolean.FALSE.equals(settings.getUsePit()); + } + + /** + * Determines if mappings deduction is disabled for this transform based on settings. + * + * @param settings transform's settings + * @return whether or not mappings deduction is disabled for this transform + */ + public static boolean isDeduceMappingsDisabled(SettingsConfig settings) { + return Boolean.FALSE.equals(settings.getDeduceMappings()); + } + + /** + * Determines the appropriate number of retries. + *

+ * The number of retries are read from the config or if not read from the context which is based on a cluster wide default. + * If the transform runs in unattended mode, the number of retries is always indefinite. + * + * @param settings transform's settings + * @return the number of retries or -1 if retries are indefinite + */ + public static int getNumFailureRetries(SettingsConfig settings, int defaultNumFailureRetries) { + return isUnattended(settings) ? -1 + : settings.getNumFailureRetries() != null ? settings.getNumFailureRetries() + : defaultNumFailureRetries; + } + + /** + * Determines if the transform is unattended based on settings. + * + * @param settings transform's settings + * @return whether or not the transform is unattended + */ + public static boolean isUnattended(SettingsConfig settings) { + return Boolean.TRUE.equals(settings.getUnattended()); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/SettingsConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/SettingsConfigTests.java index 62b9e2e48a907..6bedd60d582dd 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/SettingsConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/SettingsConfigTests.java @@ -33,32 +33,30 @@ public class SettingsConfigTests extends AbstractSerializingTransformTestCase instanceReader() { } public void testExplicitNullParsing() throws IOException { - // explicit null assertThat(fromString("{\"max_page_search_size\" : null}").getMaxPageSearchSize(), equalTo(-1)); // not set @@ -119,6 +116,11 @@ public void testExplicitNullParsing() throws IOException { assertThat(fromString("{\"num_failure_retries\" : null}").getNumFailureRetriesForUpdate(), equalTo(-2)); assertNull(fromString("{}").getNumFailureRetries()); assertNull(fromString("{}").getNumFailureRetriesForUpdate()); + + assertNull(fromString("{\"unattended\" : null}").getUnattended()); + assertThat(fromString("{\"unattended\" : null}").getUnattendedForUpdate(), equalTo(-1)); + assertNull(fromString("{}").getUnattended()); + assertNull(fromString("{}").getUnattendedForUpdate()); } public void testUpdateMaxPageSearchSizeUsingBuilder() throws IOException { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformEffectiveSettingsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformEffectiveSettingsTests.java new file mode 100644 index 0000000000000..98726d8dbf272 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformEffectiveSettingsTests.java @@ -0,0 +1,135 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.transform.transforms; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.transform.TransformConfigVersion; + +public class TransformEffectiveSettingsTests extends ESTestCase { + + public void testWriteDatesAsEpochMillis() { + SettingsConfig settingsConfig = new SettingsConfig.Builder().build(); + assertFalse(TransformEffectiveSettings.writeDatesAsEpochMillis(settingsConfig, TransformConfigVersion.V_7_11_0)); + assertTrue(TransformEffectiveSettings.writeDatesAsEpochMillis(settingsConfig, TransformConfigVersion.V_7_10_1)); + + settingsConfig = new SettingsConfig.Builder().setDatesAsEpochMillis(null).build(); + assertFalse(TransformEffectiveSettings.writeDatesAsEpochMillis(settingsConfig, TransformConfigVersion.V_7_11_0)); + // Note that the result is not the same as if we just left "setDatesAsEpochMillis" unset in the builder! + assertFalse(TransformEffectiveSettings.writeDatesAsEpochMillis(settingsConfig, TransformConfigVersion.V_7_10_1)); + + settingsConfig = new SettingsConfig.Builder().setDatesAsEpochMillis(false).build(); + assertFalse(TransformEffectiveSettings.writeDatesAsEpochMillis(settingsConfig, TransformConfigVersion.V_7_11_0)); + assertFalse(TransformEffectiveSettings.writeDatesAsEpochMillis(settingsConfig, TransformConfigVersion.V_7_10_1)); + + settingsConfig = new SettingsConfig.Builder().setDatesAsEpochMillis(true).build(); + assertTrue(TransformEffectiveSettings.writeDatesAsEpochMillis(settingsConfig, TransformConfigVersion.V_7_11_0)); + assertTrue(TransformEffectiveSettings.writeDatesAsEpochMillis(settingsConfig, TransformConfigVersion.V_7_10_1)); + } + + public void testIsAlignCheckpointsDisabled() { + SettingsConfig settingsConfig = new SettingsConfig.Builder().build(); + assertFalse(TransformEffectiveSettings.isAlignCheckpointsDisabled(settingsConfig)); + + settingsConfig = new SettingsConfig.Builder().setAlignCheckpoints(null).build(); + assertFalse(TransformEffectiveSettings.isAlignCheckpointsDisabled(settingsConfig)); + + settingsConfig = new SettingsConfig.Builder().setAlignCheckpoints(false).build(); + assertTrue(TransformEffectiveSettings.isAlignCheckpointsDisabled(settingsConfig)); + + settingsConfig = new SettingsConfig.Builder().setAlignCheckpoints(true).build(); + assertFalse(TransformEffectiveSettings.isAlignCheckpointsDisabled(settingsConfig)); + } + + public void testIsPitDisabled() { + SettingsConfig settingsConfig = new SettingsConfig.Builder().build(); + assertFalse(TransformEffectiveSettings.isPitDisabled(settingsConfig)); + + settingsConfig = new SettingsConfig.Builder().setUsePit(null).build(); + assertFalse(TransformEffectiveSettings.isPitDisabled(settingsConfig)); + + settingsConfig = new SettingsConfig.Builder().setUsePit(false).build(); + assertTrue(TransformEffectiveSettings.isPitDisabled(settingsConfig)); + + settingsConfig = new SettingsConfig.Builder().setUsePit(true).build(); + assertFalse(TransformEffectiveSettings.isPitDisabled(settingsConfig)); + } + + public void testIsDeduceMappingsDisabled() { + SettingsConfig settingsConfig = new SettingsConfig.Builder().build(); + assertFalse(TransformEffectiveSettings.isDeduceMappingsDisabled(settingsConfig)); + + settingsConfig = new SettingsConfig.Builder().setDeduceMappings(null).build(); + assertFalse(TransformEffectiveSettings.isDeduceMappingsDisabled(settingsConfig)); + + settingsConfig = new SettingsConfig.Builder().setDeduceMappings(false).build(); + assertTrue(TransformEffectiveSettings.isDeduceMappingsDisabled(settingsConfig)); + + settingsConfig = new SettingsConfig.Builder().setDeduceMappings(true).build(); + assertFalse(TransformEffectiveSettings.isDeduceMappingsDisabled(settingsConfig)); + } + + public void testGetNumFailureRetries() { + SettingsConfig settingsConfig = new SettingsConfig.Builder().build(); + assertEquals(10, TransformEffectiveSettings.getNumFailureRetries(settingsConfig, 10)); + + settingsConfig = new SettingsConfig.Builder().setNumFailureRetries(null).build(); + assertEquals(10, TransformEffectiveSettings.getNumFailureRetries(settingsConfig, 10)); + + settingsConfig = new SettingsConfig.Builder().setNumFailureRetries(-1).build(); + assertEquals(-1, TransformEffectiveSettings.getNumFailureRetries(settingsConfig, 10)); + + settingsConfig = new SettingsConfig.Builder().setNumFailureRetries(0).build(); + assertEquals(0, TransformEffectiveSettings.getNumFailureRetries(settingsConfig, 10)); + + settingsConfig = new SettingsConfig.Builder().setNumFailureRetries(1).build(); + assertEquals(1, TransformEffectiveSettings.getNumFailureRetries(settingsConfig, 10)); + + settingsConfig = new SettingsConfig.Builder().setNumFailureRetries(10).build(); + assertEquals(10, TransformEffectiveSettings.getNumFailureRetries(settingsConfig, 10)); + + settingsConfig = new SettingsConfig.Builder().setNumFailureRetries(100).build(); + assertEquals(100, TransformEffectiveSettings.getNumFailureRetries(settingsConfig, 10)); + } + + public void testGetNumFailureRetries_Unattended() { + SettingsConfig settingsConfig = new SettingsConfig.Builder().setUnattended(true).build(); + assertEquals(-1, TransformEffectiveSettings.getNumFailureRetries(settingsConfig, 10)); + + settingsConfig = new SettingsConfig.Builder().setUnattended(true).setNumFailureRetries(null).build(); + assertEquals(-1, TransformEffectiveSettings.getNumFailureRetries(settingsConfig, 10)); + + settingsConfig = new SettingsConfig.Builder().setUnattended(true).setNumFailureRetries(-1).build(); + assertEquals(-1, TransformEffectiveSettings.getNumFailureRetries(settingsConfig, 10)); + + settingsConfig = new SettingsConfig.Builder().setUnattended(true).setNumFailureRetries(0).build(); + assertEquals(-1, TransformEffectiveSettings.getNumFailureRetries(settingsConfig, 10)); + + settingsConfig = new SettingsConfig.Builder().setUnattended(true).setNumFailureRetries(1).build(); + assertEquals(-1, TransformEffectiveSettings.getNumFailureRetries(settingsConfig, 10)); + + settingsConfig = new SettingsConfig.Builder().setUnattended(true).setNumFailureRetries(10).build(); + assertEquals(-1, TransformEffectiveSettings.getNumFailureRetries(settingsConfig, 10)); + + settingsConfig = new SettingsConfig.Builder().setUnattended(true).setNumFailureRetries(100).build(); + assertEquals(-1, TransformEffectiveSettings.getNumFailureRetries(settingsConfig, 10)); + } + + public void testIsUnattended() { + SettingsConfig settingsConfig = new SettingsConfig.Builder().build(); + assertFalse(TransformEffectiveSettings.isUnattended(settingsConfig)); + + settingsConfig = new SettingsConfig.Builder().setUnattended(null).build(); + assertFalse(TransformEffectiveSettings.isUnattended(settingsConfig)); + + settingsConfig = new SettingsConfig.Builder().setUnattended(false).build(); + assertFalse(TransformEffectiveSettings.isUnattended(settingsConfig)); + + settingsConfig = new SettingsConfig.Builder().setUnattended(true).build(); + assertTrue(TransformEffectiveSettings.isUnattended(settingsConfig)); + } +} diff --git a/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/integration/TransformProgressIT.java b/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/integration/TransformProgressIT.java index c62ff49ae6865..dbe09663abc20 100644 --- a/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/integration/TransformProgressIT.java +++ b/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/integration/TransformProgressIT.java @@ -160,7 +160,7 @@ public void assertGetProgress(int userWithMissingBuckets) throws Exception { null ); - Pivot pivot = new Pivot(pivotConfig, new SettingsConfig(), TransformConfigVersion.CURRENT, Collections.emptySet()); + Pivot pivot = new Pivot(pivotConfig, SettingsConfig.EMPTY, TransformConfigVersion.CURRENT, Collections.emptySet()); TransformProgress progress = getProgress(pivot, getProgressQuery(pivot, config.getSource().getIndex(), null)); @@ -188,7 +188,7 @@ public void assertGetProgress(int userWithMissingBuckets) throws Exception { Collections.singletonMap("every_50", new HistogramGroupSource("missing_field", null, missingBucket, 50.0)) ); pivotConfig = new PivotConfig(histgramGroupConfig, aggregationConfig, null); - pivot = new Pivot(pivotConfig, new SettingsConfig(), TransformConfigVersion.CURRENT, Collections.emptySet()); + pivot = new Pivot(pivotConfig, SettingsConfig.EMPTY, TransformConfigVersion.CURRENT, Collections.emptySet()); progress = getProgress( pivot, diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportPreviewTransformAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportPreviewTransformAction.java index 79644fac07579..f14ac9a534f28 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportPreviewTransformAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportPreviewTransformAction.java @@ -52,6 +52,7 @@ import org.elasticsearch.xpack.core.transform.transforms.SyncConfig; import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; import org.elasticsearch.xpack.core.transform.transforms.TransformDestIndexSettings; +import org.elasticsearch.xpack.core.transform.transforms.TransformEffectiveSettings; import org.elasticsearch.xpack.transform.TransformExtensionHolder; import org.elasticsearch.xpack.transform.persistence.TransformIndex; import org.elasticsearch.xpack.transform.transforms.Function; @@ -289,7 +290,7 @@ private void getPreview( }, listener::onFailure); ActionListener> deduceMappingsListener = ActionListener.wrap(deducedMappings -> { - if (Boolean.FALSE.equals(settingsConfig.getDeduceMappings())) { + if (TransformEffectiveSettings.isDeduceMappingsDisabled(settingsConfig)) { mappings.set(emptyMap()); } else { mappings.set(deducedMappings); diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStartTransformAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStartTransformAction.java index 825d0b8d12119..01359f351f07a 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStartTransformAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStartTransformAction.java @@ -39,6 +39,7 @@ import org.elasticsearch.xpack.core.transform.action.ValidateTransformAction; import org.elasticsearch.xpack.core.transform.transforms.AuthorizationState; import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; +import org.elasticsearch.xpack.core.transform.transforms.TransformEffectiveSettings; import org.elasticsearch.xpack.core.transform.transforms.TransformState; import org.elasticsearch.xpack.core.transform.transforms.TransformTaskParams; import org.elasticsearch.xpack.core.transform.transforms.TransformTaskState; @@ -187,7 +188,7 @@ protected void masterOperation( // <3> If the destination index exists, start the task, otherwise deduce our mappings for the destination index and create it ActionListener validationListener = ActionListener.wrap(validationResponse -> { - if (Boolean.TRUE.equals(transformConfigHolder.get().getSettings().getUnattended())) { + if (TransformEffectiveSettings.isUnattended(transformConfigHolder.get().getSettings())) { logger.debug( () -> format("[%s] Skip dest index creation as this is an unattended transform", transformConfigHolder.get().getId()) ); @@ -205,7 +206,7 @@ protected void masterOperation( createOrGetIndexListener ); }, e -> { - if (Boolean.TRUE.equals(transformConfigHolder.get().getSettings().getUnattended())) { + if (TransformEffectiveSettings.isUnattended(transformConfigHolder.get().getSettings())) { logger.debug( () -> format("[%s] Skip dest index creation as this is an unattended transform", transformConfigHolder.get().getId()) ); @@ -268,7 +269,7 @@ protected void masterOperation( ActionListener getTransformListener = ActionListener.wrap(config -> { transformConfigHolder.set(config); - if (Boolean.TRUE.equals(config.getSettings().getUnattended())) { + if (TransformEffectiveSettings.isUnattended(config.getSettings())) { // We do not fail the _start request of the unattended transform due to permission issues, // we just let it run fetchAuthStateListener.onResponse(null); diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/checkpoint/TimeBasedCheckpointProvider.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/checkpoint/TimeBasedCheckpointProvider.java index ec4cc2dcbcbf4..f49d5fc96f3ab 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/checkpoint/TimeBasedCheckpointProvider.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/checkpoint/TimeBasedCheckpointProvider.java @@ -22,6 +22,7 @@ import org.elasticsearch.xpack.core.transform.transforms.TimeSyncConfig; import org.elasticsearch.xpack.core.transform.transforms.TransformCheckpoint; import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; +import org.elasticsearch.xpack.core.transform.transforms.TransformEffectiveSettings; import org.elasticsearch.xpack.core.transform.transforms.pivot.DateHistogramGroupSource; import org.elasticsearch.xpack.core.transform.transforms.pivot.SingleGroupSource; import org.elasticsearch.xpack.transform.notifications.TransformAuditor; @@ -109,7 +110,7 @@ public void createNextCheckpoint(final TransformCheckpoint lastCheckpoint, final * @return function aligning the given timestamp with date histogram interval */ private static Function createAlignTimestampFunction(TransformConfig transformConfig) { - if (Boolean.FALSE.equals(transformConfig.getSettings().getAlignCheckpoints())) { + if (TransformEffectiveSettings.isAlignCheckpointsDisabled(transformConfig.getSettings())) { return identity(); } // In case of transforms created before aligning timestamp optimization was introduced we assume the default was "false". diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/TransformIndex.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/TransformIndex.java index fe3d4ede898bc..e3d9fa3aff671 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/TransformIndex.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/TransformIndex.java @@ -33,6 +33,7 @@ import org.elasticsearch.xpack.core.transform.transforms.DestAlias; import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; import org.elasticsearch.xpack.core.transform.transforms.TransformDestIndexSettings; +import org.elasticsearch.xpack.core.transform.transforms.TransformEffectiveSettings; import org.elasticsearch.xpack.transform.notifications.TransformAuditor; import java.time.Clock; @@ -128,7 +129,7 @@ public static void createDestinationIndex( // <2> Set up destination index aliases, regardless whether the destination index was created by the transform or by the user ActionListener createDestinationIndexListener = ActionListener.wrap(createdDestinationIndex -> { if (createdDestinationIndex) { - String message = Boolean.FALSE.equals(config.getSettings().getDeduceMappings()) + String message = TransformEffectiveSettings.isDeduceMappingsDisabled(config.getSettings()) ? "Created destination index [" + destinationIndex + "]." : "Created destination index [" + destinationIndex + "] with deduced mappings."; auditor.info(config.getId(), message); @@ -139,7 +140,7 @@ public static void createDestinationIndex( if (dest.length == 0) { TransformDestIndexSettings generatedDestIndexSettings = createTransformDestIndexSettings( destIndexSettings, - Boolean.FALSE.equals(config.getSettings().getDeduceMappings()) ? emptyMap() : destIndexMappings, + TransformEffectiveSettings.isDeduceMappingsDisabled(config.getSettings()) ? emptyMap() : destIndexMappings, config.getId(), Clock.systemUTC() ); diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexer.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexer.java index 1634f417924c0..c68c73fd71d9e 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexer.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexer.java @@ -50,6 +50,7 @@ import org.elasticsearch.xpack.core.transform.transforms.SettingsConfig; import org.elasticsearch.xpack.core.transform.transforms.TransformCheckpoint; import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; +import org.elasticsearch.xpack.core.transform.transforms.TransformEffectiveSettings; import org.elasticsearch.xpack.core.transform.transforms.TransformIndexerPosition; import org.elasticsearch.xpack.core.transform.transforms.TransformIndexerStats; import org.elasticsearch.xpack.core.transform.transforms.TransformProgress; @@ -131,17 +132,12 @@ class ClientTransformIndexer extends TransformIndexer { // TODO: move into context constructor context.setShouldStopAtCheckpoint(shouldStopAtCheckpoint); - if (transformConfig.getSettings().getUsePit() != null) { - disablePit = transformConfig.getSettings().getUsePit() == false; - } + disablePit = TransformEffectiveSettings.isPitDisabled(transformConfig.getSettings()); } @Override public void applyNewSettings(SettingsConfig newSettings) { - if (newSettings.getUsePit() != null) { - disablePit = newSettings.getUsePit() == false; - } - + disablePit = TransformEffectiveSettings.isPitDisabled(newSettings); super.applyNewSettings(newSettings); } diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformFailureHandler.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformFailureHandler.java index c7e0eda5ca5e6..337d3c5820c07 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformFailureHandler.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformFailureHandler.java @@ -17,12 +17,11 @@ import org.elasticsearch.script.ScriptException; import org.elasticsearch.xpack.core.transform.TransformMessages; import org.elasticsearch.xpack.core.transform.transforms.SettingsConfig; +import org.elasticsearch.xpack.core.transform.transforms.TransformEffectiveSettings; import org.elasticsearch.xpack.core.transform.utils.ExceptionsHelper; import org.elasticsearch.xpack.transform.notifications.TransformAuditor; import org.elasticsearch.xpack.transform.utils.ExceptionRootCauseFinder; -import java.util.Optional; - import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.xpack.core.common.notifications.Level.INFO; import static org.elasticsearch.xpack.core.common.notifications.Level.WARNING; @@ -59,32 +58,28 @@ void handleIndexerFailure(Exception exception, SettingsConfig settingsConfig) { // more detailed reporting in the handlers and below logger.atDebug().withThrowable(exception).log("[{}] transform encountered an exception", transformId); Throwable unwrappedException = ExceptionsHelper.findSearchExceptionRootCause(exception); - boolean unattended = Boolean.TRUE.equals(settingsConfig.getUnattended()); + boolean unattended = TransformEffectiveSettings.isUnattended(settingsConfig); + int numFailureRetries = TransformEffectiveSettings.getNumFailureRetries(settingsConfig, context.getNumFailureRetries()); if (unwrappedException instanceof CircuitBreakingException e) { handleCircuitBreakingException(e, unattended); } else if (unwrappedException instanceof ScriptException e) { handleScriptException(e, unattended); } else if (unwrappedException instanceof BulkIndexingException e) { - handleBulkIndexingException(e, unattended, getNumFailureRetries(settingsConfig)); + handleBulkIndexingException(e, unattended, numFailureRetries); } else if (unwrappedException instanceof ClusterBlockException e) { // gh#89802 always retry for a cluster block exception, because a cluster block should be temporary. - retry(e, e.getDetailedMessage(), unattended, getNumFailureRetries(settingsConfig)); + retry(e, e.getDetailedMessage(), unattended, numFailureRetries); } else if (unwrappedException instanceof SearchPhaseExecutionException e) { // The reason of a SearchPhaseExecutionException unfortunately contains a full stack trace. // Instead of displaying that to the user, get the cause's message instead. - retry(e, e.getCause() != null ? e.getCause().getMessage() : null, unattended, getNumFailureRetries(settingsConfig)); + retry(e, e.getCause() != null ? e.getCause().getMessage() : null, unattended, numFailureRetries); } else if (unwrappedException instanceof ElasticsearchException e) { - handleElasticsearchException(e, unattended, getNumFailureRetries(settingsConfig)); + handleElasticsearchException(e, unattended, numFailureRetries); } else if (unwrappedException instanceof IllegalArgumentException e) { handleIllegalArgumentException(e, unattended); } else { - retry( - unwrappedException, - ExceptionRootCauseFinder.getDetailedMessage(unwrappedException), - unattended, - getNumFailureRetries(settingsConfig) - ); + retry(unwrappedException, ExceptionRootCauseFinder.getDetailedMessage(unwrappedException), unattended, numFailureRetries); } } @@ -98,7 +93,7 @@ void handleIndexerFailure(Exception exception, SettingsConfig settingsConfig) { boolean handleStatePersistenceFailure(Exception e, SettingsConfig settingsConfig) { // we use the same setting for retries, however a separate counter, because the failure // counter for search/index gets reset after a successful bulk index request - int numFailureRetries = getNumFailureRetries(settingsConfig); + int numFailureRetries = TransformEffectiveSettings.getNumFailureRetries(settingsConfig, context.getNumFailureRetries()); int failureCount = context.incrementAndGetStatePersistenceFailureCount(e); @@ -273,19 +268,4 @@ private void fail(Throwable exception, String failureMessage) { // note: logging and audit is done as part of context.markAsFailed context.markAsFailed(exception, failureMessage); } - - /** - * Get the number of retries. - *

- * The number of retries are read from the config or if not read from the context which is based on a cluster wide - * default. If the transform runs in unattended mode, the number of retries is always indefinite. - * - * @param settingsConfig the setting config - * @return the number of retries or -1 if retries are indefinite - */ - private int getNumFailureRetries(SettingsConfig settingsConfig) { - return Boolean.TRUE.equals(settingsConfig.getUnattended()) - ? -1 - : Optional.ofNullable(settingsConfig.getNumFailureRetries()).orElse(context.getNumFailureRetries()); - } } diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformIndexer.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformIndexer.java index ff52f5e267655..38bd231e3e76a 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformIndexer.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformIndexer.java @@ -36,6 +36,7 @@ import org.elasticsearch.xpack.core.transform.transforms.SettingsConfig; import org.elasticsearch.xpack.core.transform.transforms.TransformCheckpoint; import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; +import org.elasticsearch.xpack.core.transform.transforms.TransformEffectiveSettings; import org.elasticsearch.xpack.core.transform.transforms.TransformIndexerPosition; import org.elasticsearch.xpack.core.transform.transforms.TransformIndexerStats; import org.elasticsearch.xpack.core.transform.transforms.TransformProgress; @@ -334,7 +335,7 @@ protected void onStart(long now, ActionListener listener) { }, listener::onFailure); var shouldMaybeCreateDestIndexForUnattended = context.getCheckpoint() == 0 - && Boolean.TRUE.equals(transformConfig.getSettings().getUnattended()); + && TransformEffectiveSettings.isUnattended(transformConfig.getSettings()); ActionListener> fieldMappingsListener = ActionListener.wrap(destIndexMappings -> { if (destIndexMappings.isEmpty() == false) { @@ -413,7 +414,7 @@ protected void onStart(long now, ActionListener listener) { hasSourceChanged = true; listener.onFailure(failure); })); - } else if (context.getCheckpoint() == 0 && Boolean.TRUE.equals(transformConfig.getSettings().getUnattended())) { + } else if (context.getCheckpoint() == 0 && TransformEffectiveSettings.isUnattended(transformConfig.getSettings())) { // this transform runs in unattended mode and has never run, to go on validate(changedSourceListener); } else { diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/Pivot.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/Pivot.java index 0d4dbcb6c2094..8c134b92c02af 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/Pivot.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/Pivot.java @@ -27,6 +27,7 @@ import org.elasticsearch.xpack.core.transform.TransformMessages; import org.elasticsearch.xpack.core.transform.transforms.SettingsConfig; import org.elasticsearch.xpack.core.transform.transforms.SourceConfig; +import org.elasticsearch.xpack.core.transform.transforms.TransformEffectiveSettings; import org.elasticsearch.xpack.core.transform.transforms.TransformIndexerStats; import org.elasticsearch.xpack.core.transform.transforms.TransformProgress; import org.elasticsearch.xpack.core.transform.transforms.pivot.PivotConfig; @@ -132,14 +133,7 @@ protected Stream> extractResults( TransformIndexerStats transformIndexerStats, TransformProgress transformProgress ) { - // defines how dates are written, if not specified in settings - // < 7.11 as epoch millis - // >= 7.11 as string - // note: it depends on the version when the transform has been created, not the version of the code - boolean datesAsEpoch = settings.getDatesAsEpochMillis() != null - ? settings.getDatesAsEpochMillis() - : version.before(TransformConfigVersion.V_7_11_0); - + boolean datesAsEpoch = TransformEffectiveSettings.writeDatesAsEpochMillis(settings, version); return AggregationResultUtils.extractCompositeAggregationResults( agg, config.getGroupConfig(), diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/SchemaUtil.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/SchemaUtil.java index 48b156ce39fc2..d5e0351a8822e 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/SchemaUtil.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/SchemaUtil.java @@ -24,6 +24,7 @@ import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.transform.transforms.SettingsConfig; import org.elasticsearch.xpack.core.transform.transforms.SourceConfig; +import org.elasticsearch.xpack.core.transform.transforms.TransformEffectiveSettings; import org.elasticsearch.xpack.core.transform.transforms.pivot.PivotConfig; import java.math.BigDecimal; @@ -167,7 +168,7 @@ public static void deduceMappings( sourceMappings -> listener.onResponse( resolveMappings( transformId, - Boolean.FALSE.equals(settingsConfig.getDeduceMappings()) == false, + TransformEffectiveSettings.isDeduceMappingsDisabled(settingsConfig), aggregationSourceFieldNames, aggregationTypes, fieldNamesForGrouping, @@ -207,7 +208,7 @@ public static void getDestinationFieldMappings( private static Map resolveMappings( String transformId, - boolean deduceMappings, + boolean deduceMappingsDisabled, Map aggregationSourceFieldNames, Map aggregationTypes, Map fieldNamesForGrouping, @@ -244,7 +245,7 @@ private static Map resolveMappings( targetMapping.put(targetFieldName, destinationMapping); } else { logger.log( - deduceMappings ? Level.WARN : Level.INFO, + deduceMappingsDisabled ? Level.INFO : Level.WARN, "[{}] Failed to deduce mapping for [{}], fall back to dynamic mapping. " + "Create the destination index with complete mappings first to avoid deducing the mappings", transformId, @@ -260,7 +261,7 @@ private static Map resolveMappings( targetMapping.put(targetFieldName, destinationMapping); } else { logger.log( - deduceMappings ? Level.WARN : Level.INFO, + deduceMappingsDisabled ? Level.INFO : Level.WARN, "[{}] Failed to deduce mapping for [{}], fall back to keyword. " + "Create the destination index with complete mappings first to avoid deducing the mappings", transformId, diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/action/TransformConfigLinterTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/action/TransformConfigLinterTests.java index 3006717bd843b..288ec8fc7a3d7 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/action/TransformConfigLinterTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/action/TransformConfigLinterTests.java @@ -42,7 +42,7 @@ public void testGetWarnings_Pivot_WithScriptBasedRuntimeFields() { AggregationConfigTests.randomAggregationConfig(), null ); - Function function = new Pivot(pivotConfig, new SettingsConfig(), TransformConfigVersion.CURRENT, Collections.emptySet()); + Function function = new Pivot(pivotConfig, SettingsConfig.EMPTY, TransformConfigVersion.CURRENT, Collections.emptySet()); SourceConfig sourceConfig = SourceConfigTests.randomSourceConfig(); assertThat(TransformConfigLinter.getWarnings(function, sourceConfig, null), is(empty())); @@ -117,7 +117,7 @@ public void testGetWarnings_Pivot_CouldNotFindAnyOptimization() { AggregationConfigTests.randomAggregationConfig(), null ); - Function function = new Pivot(pivotConfig, new SettingsConfig(), TransformConfigVersion.CURRENT, Collections.emptySet()); + Function function = new Pivot(pivotConfig, SettingsConfig.EMPTY, TransformConfigVersion.CURRENT, Collections.emptySet()); SourceConfig sourceConfig = SourceConfigTests.randomSourceConfig(); SyncConfig syncConfig = TimeSyncConfigTests.randomTimeSyncConfig(); assertThat( diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerTests.java index 43a8f35cfeafe..017fe3d289b0c 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerTests.java @@ -47,6 +47,7 @@ import org.elasticsearch.xpack.core.transform.transforms.TransformCheckpoint; import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; import org.elasticsearch.xpack.core.transform.transforms.TransformConfigTests; +import org.elasticsearch.xpack.core.transform.transforms.TransformEffectiveSettings; import org.elasticsearch.xpack.core.transform.transforms.TransformIndexerPosition; import org.elasticsearch.xpack.core.transform.transforms.TransformIndexerStats; import org.elasticsearch.xpack.core.transform.transforms.TransformProgress; @@ -309,7 +310,7 @@ public void testDisablePit() throws InterruptedException { } TransformConfig config = configBuilder.build(); - boolean pitEnabled = config.getSettings().getUsePit() == null || config.getSettings().getUsePit(); + boolean pitEnabled = TransformEffectiveSettings.isPitDisabled(config.getSettings()) == false; try (var threadPool = createThreadPool()) { final var client = new PitMockClient(threadPool, true); diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/AggregationSchemaAndResultTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/AggregationSchemaAndResultTests.java index 5943a9007fb7c..1eb86b813f260 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/AggregationSchemaAndResultTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/AggregationSchemaAndResultTests.java @@ -153,7 +153,7 @@ public void testBasic() throws InterruptedException { client, emptyMap(), "my-transform", - new SettingsConfig(), + SettingsConfig.EMPTY, pivotConfig, new SourceConfig(new String[] { "source-index" }), listener @@ -233,7 +233,7 @@ public void testNested() throws InterruptedException { client, emptyMap(), "my-transform", - new SettingsConfig(), + SettingsConfig.EMPTY, pivotConfig, new SourceConfig(new String[] { "source-index" }), listener diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/PivotTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/PivotTests.java index 5d58ac9904482..0a030d26016f7 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/PivotTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/PivotTests.java @@ -125,14 +125,14 @@ protected NamedXContentRegistry xContentRegistry() { public void testValidateExistingIndex() throws Exception { SourceConfig source = new SourceConfig("existing_source_index"); - Function pivot = new Pivot(getValidPivotConfig(), new SettingsConfig(), TransformConfigVersion.CURRENT, Collections.emptySet()); + Function pivot = new Pivot(getValidPivotConfig(), SettingsConfig.EMPTY, TransformConfigVersion.CURRENT, Collections.emptySet()); assertValidTransform(client, source, pivot); } public void testValidateNonExistingIndex() throws Exception { SourceConfig source = new SourceConfig("non_existing_source_index"); - Function pivot = new Pivot(getValidPivotConfig(), new SettingsConfig(), TransformConfigVersion.CURRENT, Collections.emptySet()); + Function pivot = new Pivot(getValidPivotConfig(), SettingsConfig.EMPTY, TransformConfigVersion.CURRENT, Collections.emptySet()); assertInvalidTransform(client, source, pivot); } @@ -142,7 +142,7 @@ public void testInitialPageSize() throws Exception { Function pivot = new Pivot( new PivotConfig(GroupConfigTests.randomGroupConfig(), getValidAggregationConfig(), expectedPageSize), - new SettingsConfig(), + SettingsConfig.EMPTY, TransformConfigVersion.CURRENT, Collections.emptySet() ); @@ -150,7 +150,7 @@ public void testInitialPageSize() throws Exception { pivot = new Pivot( new PivotConfig(GroupConfigTests.randomGroupConfig(), getValidAggregationConfig(), null), - new SettingsConfig(), + SettingsConfig.EMPTY, TransformConfigVersion.CURRENT, Collections.emptySet() ); @@ -164,7 +164,7 @@ public void testSearchFailure() throws Exception { // search has failures although they might just be temporary SourceConfig source = new SourceConfig("existing_source_index_with_failing_shards"); - Function pivot = new Pivot(getValidPivotConfig(), new SettingsConfig(), TransformConfigVersion.CURRENT, Collections.emptySet()); + Function pivot = new Pivot(getValidPivotConfig(), SettingsConfig.EMPTY, TransformConfigVersion.CURRENT, Collections.emptySet()); assertInvalidTransform(client, source, pivot); } @@ -177,7 +177,7 @@ public void testValidateAllSupportedAggregations() throws Exception { Function pivot = new Pivot( getValidPivotConfig(aggregationConfig), - new SettingsConfig(), + SettingsConfig.EMPTY, TransformConfigVersion.CURRENT, Collections.emptySet() ); @@ -191,7 +191,7 @@ public void testValidateAllUnsupportedAggregations() throws Exception { Function pivot = new Pivot( getValidPivotConfig(aggregationConfig), - new SettingsConfig(), + SettingsConfig.EMPTY, TransformConfigVersion.CURRENT, Collections.emptySet() ); @@ -233,7 +233,7 @@ public void testGetPerformanceCriticalFields() throws IOException { assertThat(groupConfig.validate(null), is(nullValue())); PivotConfig pivotConfig = new PivotConfig(groupConfig, AggregationConfigTests.randomAggregationConfig(), null); - Function pivot = new Pivot(pivotConfig, new SettingsConfig(), TransformConfigVersion.CURRENT, Collections.emptySet()); + Function pivot = new Pivot(pivotConfig, SettingsConfig.EMPTY, TransformConfigVersion.CURRENT, Collections.emptySet()); assertThat(pivot.getPerformanceCriticalFields(), contains("field-A", "field-B", "field-C")); } From 099a5a9b923bd67b584606c7554978fed6e79fa0 Mon Sep 17 00:00:00 2001 From: Jedr Blaszyk Date: Wed, 6 Mar 2024 09:38:42 +0100 Subject: [PATCH 010/248] [Connector API] Fix default ordering in SyncJob list endpoint (#105945) --- docs/changelog/105945.yaml | 5 ++++ .../entsearch/470_connector_sync_job_list.yml | 19 ++++++++------ .../syncjob/ConnectorSyncJobIndexService.java | 4 +-- .../ConnectorSyncJobIndexServiceTests.java | 26 ++++++++++--------- 4 files changed, 32 insertions(+), 22 deletions(-) create mode 100644 docs/changelog/105945.yaml diff --git a/docs/changelog/105945.yaml b/docs/changelog/105945.yaml new file mode 100644 index 0000000000000..ec76faf6ef76f --- /dev/null +++ b/docs/changelog/105945.yaml @@ -0,0 +1,5 @@ +pr: 105945 +summary: "[Connector API] Fix default ordering in `SyncJob` list endpoint" +area: Application +type: bug +issues: [] diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/470_connector_sync_job_list.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/470_connector_sync_job_list.yml index 8d23850f49840..82d9a18bb51e9 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/470_connector_sync_job_list.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/470_connector_sync_job_list.yml @@ -50,10 +50,10 @@ setup: - match: { count: 3 } - # Ascending order by creation_date for results - - match: { results.0.id: $sync-job-one-id } + # Descending order by creation_date for results + - match: { results.0.id: $sync-job-three-id } - match: { results.1.id: $sync-job-two-id } - - match: { results.2.id: $sync-job-three-id } + - match: { results.2.id: $sync-job-one-id } --- "List Connector Sync Jobs - with from": @@ -84,9 +84,9 @@ setup: - match: { count: 3 } - # Ascending order by creation_date for results + # Descending order by creation_date for results - match: { results.0.id: $sync-job-two-id } - - match: { results.1.id: $sync-job-three-id } + - match: { results.1.id: $sync-job-one-id } --- "List Connector Sync Jobs - with size": @@ -117,7 +117,8 @@ setup: - match: { count: 3 } - - match: { results.0.id: $sync-job-one-id } + # Descending order by creation_date for results + - match: { results.0.id: $sync-job-three-id } --- "List Connector Sync Jobs - Get pending jobs": @@ -216,9 +217,11 @@ setup: connector_sync_job.list: connector_id: connector-one job_type: full,incremental + + # Descending order by creation_date for results - match: { count: 2 } - - match: { results.0.id: $sync-job-one-id } - - match: { results.1.id: $sync-job-two-id } + - match: { results.0.id: $sync-job-two-id } + - match: { results.1.id: $sync-job-one-id } --- "List Connector Sync Jobs - with invalid job type": diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobIndexService.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobIndexService.java index 3ac598fd58ee8..d1d345840874f 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobIndexService.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobIndexService.java @@ -293,7 +293,7 @@ public void cancelConnectorSyncJob(String connectorSyncJobId, ActionListener Date: Wed, 6 Mar 2024 09:25:52 +0000 Subject: [PATCH 011/248] Make sure we test the listener is called (#105914) --- .../org/elasticsearch/xpack/core/ilm/DeleteStepTests.java | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/DeleteStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/DeleteStepTests.java index 5851ebe2fb3c9..7445e82da3ecf 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/DeleteStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/DeleteStepTests.java @@ -20,6 +20,7 @@ import org.mockito.Mockito; import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; import static org.elasticsearch.test.ActionListenerUtils.anyActionListener; import static org.hamcrest.Matchers.is; @@ -158,14 +159,17 @@ public void testPerformActionCallsFailureListenerIfIndexIsTheDataStreamWriteInde .metadata(Metadata.builder().put(index1, false).put(sourceIndexMetadata, false).put(dataStream).build()) .build(); + AtomicBoolean listenerCalled = new AtomicBoolean(false); createRandomInstance().performDuringNoSnapshot(sourceIndexMetadata, clusterState, new ActionListener<>() { @Override public void onResponse(Void complete) { + listenerCalled.set(true); fail("unexpected listener callback"); } @Override public void onFailure(Exception e) { + listenerCalled.set(true); assertThat( e.getMessage(), is( @@ -180,5 +184,7 @@ public void onFailure(Exception e) { ); } }); + + assertThat(listenerCalled.get(), is(true)); } } From c70956ac167908c111df33ccb845d5601f99cebc Mon Sep 17 00:00:00 2001 From: Jan Kuipers <148754765+jan-elastic@users.noreply.github.com> Date: Wed, 6 Mar 2024 11:03:42 +0100 Subject: [PATCH 012/248] [text structure] Find field and message structure endpoints (#105660) * Extract AbstractFindStructureRequest * Extract FindStructureResponse * Extract RestFindStructureRequestParser * FindFieldStructure endpoint * FindMessageStructure endpoint * Improve FindTextStructureResponseTests * REST API spec + YAML REST tests * Lint fixes * Remove POST find_field_structure * Update docs/changelog/105660.yaml * Update changelog * Fix text_structure.find_field_structure.json * Fix find_field_structure yaml rest test * Fix FindTextStructureResponseTests * Fix YAML tests with security * Remove unreachable code * DelimitedTextStructureFinder::createFromMessages * NdJsonTextStructureFinderFactory::createFromMessages * XmlTextStructureFinderFactory::createFromMessages * LogTextStructureFinderFactory::createFromMessages * Lint fixes * Add createFromMessages to TextStructureFinderFactory interface * Wire createFromMessages in the endpoints * Uppercase UTF-8 * REST test for semi-structured messages * Restrict query params to applicable endpoints * typo * Polish thread scheduling * Propagate parent task in search request * No header row for find message/field structure * Expose findTextStructure more consistently * Move text structure query params to shared doc * Rename "find structure API" -> "find text structure API" * Find message structure API docs * Find field structure docs * Maybe fix docs error? * bugfix * Fix docs? * Fix find-field-structure test from docs * Improve docs * Add param documents_to_sample to docs * improve docs --- docs/changelog/105660.yaml | 5 + .../apis/find-field-structure.asciidoc | 316 ++++++++++++++ .../apis/find-message-structure.asciidoc | 292 +++++++++++++ .../apis/find-structure-shared.asciidoc | 215 ++++++++++ .../apis/find-structure.asciidoc | 201 +-------- .../text-structure/apis/index.asciidoc | 4 + .../text_structure.find_field_structure.json | 90 ++++ ...text_structure.find_message_structure.json | 80 ++++ .../action/AbstractFindStructureRequest.java | 377 +++++++++++++++++ .../action/FindFieldStructureAction.java | 98 +++++ .../action/FindMessageStructureAction.java | 97 +++++ .../action/FindStructureAction.java | 389 +----------------- .../action/FindStructureResponse.java | 61 +++ .../FindTextStructureActionResponseTests.java | 29 -- .../FindTextStructureResponseTests.java | 33 ++ .../xpack/security/operator/Constants.java | 2 + .../text_structure/find_field_structure.yml | 63 +++ .../text_structure/find_message_structure.yml | 56 +++ .../text-structure-with-security/build.gradle | 2 +- .../qa/text-structure-with-security/roles.yml | 12 + .../textstructure/TextStructurePlugin.java | 15 +- .../rest/RestFindFieldStructureAction.java | 51 +++ .../rest/RestFindMessageStructureAction.java | 55 +++ .../rest/RestFindStructureAction.java | 38 +- .../RestFindStructureArgumentsParser.java | 73 ++++ .../DelimitedTextStructureFinder.java | 54 +-- .../DelimitedTextStructureFinderFactory.java | 40 +- .../LogTextStructureFinder.java | 141 ++++++- .../LogTextStructureFinderFactory.java | 13 + .../NdJsonTextStructureFinderFactory.java | 23 ++ .../TextStructureFinderFactory.java | 9 + .../TextStructureFinderManager.java | 91 +++- .../TextStructureOverrides.java | 3 +- .../XmlTextStructureFinderFactory.java | 64 ++- .../TransportFindFieldStructureAction.java | 94 +++++ .../TransportFindMessageStructureAction.java | 56 +++ .../TransportFindStructureAction.java | 38 +- ...imitedTextStructureFinderFactoryTests.java | 18 + .../DelimitedTextStructureFinderTests.java | 24 ++ .../LogTextStructureFinderTests.java | 16 + ...NdJsonTextStructureFinderFactoryTests.java | 18 + .../XmlTextStructureFinderFactoryTests.java | 18 + 42 files changed, 2640 insertions(+), 734 deletions(-) create mode 100644 docs/changelog/105660.yaml create mode 100644 docs/reference/text-structure/apis/find-field-structure.asciidoc create mode 100644 docs/reference/text-structure/apis/find-message-structure.asciidoc create mode 100644 docs/reference/text-structure/apis/find-structure-shared.asciidoc create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/api/text_structure.find_field_structure.json create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/api/text_structure.find_message_structure.json create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/action/AbstractFindStructureRequest.java create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/action/FindFieldStructureAction.java create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/action/FindMessageStructureAction.java create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/action/FindStructureResponse.java delete mode 100644 x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/textstructure/action/FindTextStructureActionResponseTests.java create mode 100644 x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/textstructure/action/FindTextStructureResponseTests.java create mode 100644 x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/text_structure/find_field_structure.yml create mode 100644 x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/text_structure/find_message_structure.yml create mode 100644 x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/rest/RestFindFieldStructureAction.java create mode 100644 x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/rest/RestFindMessageStructureAction.java create mode 100644 x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/rest/RestFindStructureArgumentsParser.java create mode 100644 x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/transport/TransportFindFieldStructureAction.java create mode 100644 x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/transport/TransportFindMessageStructureAction.java diff --git a/docs/changelog/105660.yaml b/docs/changelog/105660.yaml new file mode 100644 index 0000000000000..1b30a25417906 --- /dev/null +++ b/docs/changelog/105660.yaml @@ -0,0 +1,5 @@ +pr: 105660 +summary: "Text structure endpoints to determine the structure of a list of messages and of an indexed field" +area: Machine Learning +type: feature +issues: [] diff --git a/docs/reference/text-structure/apis/find-field-structure.asciidoc b/docs/reference/text-structure/apis/find-field-structure.asciidoc new file mode 100644 index 0000000000000..6788ddf7f42be --- /dev/null +++ b/docs/reference/text-structure/apis/find-field-structure.asciidoc @@ -0,0 +1,316 @@ +[role="xpack"] +[[find-field-structure]] += Find field structure API + +Finds the structure of a field in an Elasticsearch index. + +[discrete] +[[find-field-structure-request]] +== {api-request-title} + +`GET _text_structure/find_field_structure` + +[discrete] +[[find-field-structure-prereqs]] +== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `monitor_text_structure` or +`monitor` cluster privileges to use this API. See +<>. + +[discrete] +[[find-field-structure-desc]] +== {api-description-title} + +This API provides a starting point for extracting further information from log messages +already ingested into {es}. For example, if you have ingested data into a very simple +index that has just `@timestamp` and `message` fields, you can use this API to +see what common structure exists in the `message` field. + +The response from the API contains: + +* Sample messages. +* Statistics that reveal the most common values for all fields detected within +the text and basic numeric statistics for numeric fields. +* Information about the structure of the text, which is useful when you write +ingest configurations to index it or similarly formatted text. +* Appropriate mappings for an {es} index, which you could use to ingest the text. + +All this information can be calculated by the structure finder with no guidance. +However, you can optionally override some of the decisions about the text +structure by specifying one or more query parameters. + +Details of the output can be seen in the <>. + +If the structure finder produces unexpected results, +specify the `explain` query parameter and an `explanation` will appear in +the response. It helps determine why the returned structure was +chosen. + +[discrete] +[[find-field-structure-query-parms]] +== {api-query-parms-title} + +`index`:: +(Required, string) The name of the index containing the field. + +`field`:: +(Required, string) The name of the field that's analyzed. + +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-column-names] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-delimiter] + +`documents_to_sample`:: +(Optional, unsigned integer) The number of documents to include in the structural +analysis. The minimum is 2; the default is 1000. + +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-explain] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-format] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-grok-pattern] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-ecs-compatibility] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-quote] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-should-trim-fields] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-timeout] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-timestamp-field] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-timestamp-format] + +[discrete] +[[find-field-structure-examples]] +== {api-examples-title} + +[discrete] +[[find-field-structure-example]] +=== Analyzing Elasticsearch log files + +Suppose you have a list of {es} log messages in an index. +You can analyze them with the `find_field_structure` endpoint as follows: + +[source,console] +---- +POST _bulk?refresh=true +{"index":{"_index":"test-logs"}} +{"message":"[2024-03-05T10:52:36,256][INFO ][o.a.l.u.VectorUtilPanamaProvider] [laptop] Java vector incubator API enabled; uses preferredBitSize=128"} +{"index":{"_index":"test-logs"}} +{"message":"[2024-03-05T10:52:41,038][INFO ][o.e.p.PluginsService ] [laptop] loaded module [repository-url]"} +{"index":{"_index":"test-logs"}} +{"message":"[2024-03-05T10:52:41,042][INFO ][o.e.p.PluginsService ] [laptop] loaded module [rest-root]"} +{"index":{"_index":"test-logs"}} +{"message":"[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-core]"} +{"index":{"_index":"test-logs"}} +{"message":"[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-redact]"} +{"index":{"_index":"test-logs"}} +{"message":"[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [ingest-user-agent]"} +{"index":{"_index":"test-logs"}} +{"message":"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-monitoring]"} +{"index":{"_index":"test-logs"}} +{"message":"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [repository-s3]"} +{"index":{"_index":"test-logs"}} +{"message":"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-analytics]"} +{"index":{"_index":"test-logs"}} +{"message":"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-ent-search]"} +{"index":{"_index":"test-logs"}} +{"message":"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-autoscaling]"} +{"index":{"_index":"test-logs"}} +{"message":"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [lang-painless]]"} +{"index":{"_index":"test-logs"}} +{"message":"[2024-03-05T10:52:41,059][INFO ][o.e.p.PluginsService ] [laptop] loaded module [lang-expression]"} +{"index":{"_index":"test-logs"}} +{"message":"[2024-03-05T10:52:41,059][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-eql]"} +{"index":{"_index":"test-logs"}} +{"message":"[2024-03-05T10:52:43,291][INFO ][o.e.e.NodeEnvironment ] [laptop] heap size [16gb], compressed ordinary object pointers [true]"} +{"index":{"_index":"test-logs"}} +{"message":"[2024-03-05T10:52:46,098][INFO ][o.e.x.s.Security ] [laptop] Security is enabled"} +{"index":{"_index":"test-logs"}} +{"message":"[2024-03-05T10:52:47,227][INFO ][o.e.x.p.ProfilingPlugin ] [laptop] Profiling is enabled"} +{"index":{"_index":"test-logs"}} +{"message":"[2024-03-05T10:52:47,259][INFO ][o.e.x.p.ProfilingPlugin ] [laptop] profiling index templates will not be installed or reinstalled"} +{"index":{"_index":"test-logs"}} +{"message":"[2024-03-05T10:52:47,755][INFO ][o.e.i.r.RecoverySettings ] [laptop] using rate limit [40mb] with [default=40mb, read=0b, write=0b, max=0b]"} +{"index":{"_index":"test-logs"}} +{"message":"[2024-03-05T10:52:47,787][INFO ][o.e.d.DiscoveryModule ] [laptop] using discovery type [multi-node] and seed hosts providers [settings]"} +{"index":{"_index":"test-logs"}} +{"message":"[2024-03-05T10:52:49,188][INFO ][o.e.n.Node ] [laptop] initialized"} +{"index":{"_index":"test-logs"}} +{"message":"[2024-03-05T10:52:49,199][INFO ][o.e.n.Node ] [laptop] starting ..."} + +GET _text_structure/find_field_structure?index=test-logs&field=message +---- +// TEST + +If the request does not encounter errors, you receive the following result: + +[source,console-result] +---- +{ + "num_lines_analyzed" : 22, + "num_messages_analyzed" : 22, + "sample_start" : "[2024-03-05T10:52:36,256][INFO ][o.a.l.u.VectorUtilPanamaProvider] [laptop] Java vector incubator API enabled; uses preferredBitSize=128\n[2024-03-05T10:52:41,038][INFO ][o.e.p.PluginsService ] [laptop] loaded module [repository-url]\n", <3> + "charset" : "UTF-8", + "format" : "semi_structured_text", + "multiline_start_pattern" : "^\\[\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "grok_pattern" : "\\[%{TIMESTAMP_ISO8601:timestamp}\\]\\[%{LOGLEVEL:loglevel} \\]\\[.*", + "ecs_compatibility" : "disabled", + "timestamp_field" : "timestamp", + "joda_timestamp_formats" : [ + "ISO8601" + ], + "java_timestamp_formats" : [ + "ISO8601" + ], + "need_client_timezone" : true, + "mappings" : { + "properties" : { + "@timestamp" : { + "type" : "date" + }, + "loglevel" : { + "type" : "keyword" + }, + "message" : { + "type" : "text" + } + } + }, + "ingest_pipeline" : { + "description" : "Ingest pipeline created by text structure finder", + "processors" : [ + { + "grok" : { + "field" : "message", + "patterns" : [ + "\\[%{TIMESTAMP_ISO8601:timestamp}\\]\\[%{LOGLEVEL:loglevel} \\]\\[.*" + ], + "ecs_compatibility" : "disabled" + } + }, + { + "date" : { + "field" : "timestamp", + "timezone" : "{{ event.timezone }}", + "formats" : [ + "ISO8601" + ] + } + }, + { + "remove" : { + "field" : "timestamp" + } + } + ] + }, + "field_stats" : { + "loglevel" : { + "count" : 22, + "cardinality" : 1, + "top_hits" : [ + { + "value" : "INFO", + "count" : 22 + } + ] + }, + "message" : { + "count" : 22, + "cardinality" : 22, + "top_hits" : [ + { + "value" : "[2024-03-05T10:52:36,256][INFO ][o.a.l.u.VectorUtilPanamaProvider] [laptop] Java vector incubator API enabled; uses preferredBitSize=128", + "count" : 1 + }, + { + "value" : "[2024-03-05T10:52:41,038][INFO ][o.e.p.PluginsService ] [laptop] loaded module [repository-url]", + "count" : 1 + }, + { + "value" : "[2024-03-05T10:52:41,042][INFO ][o.e.p.PluginsService ] [laptop] loaded module [rest-root]", + "count" : 1 + }, + { + "value" : "[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [ingest-user-agent]", + "count" : 1 + }, + { + "value" : "[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-core]", + "count" : 1 + }, + { + "value" : "[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-redact]", + "count" : 1 + }, + { + "value" : "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [lang-painless]]", + "count" : 1 + }, + { + "value" : "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [repository-s3]", + "count" : 1 + }, + { + "value" : "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-analytics]", + "count" : 1 + }, + { + "value" : "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-autoscaling]", + "count" : 1 + } + ] + }, + "timestamp" : { + "count" : 22, + "cardinality" : 14, + "earliest" : "2024-03-05T10:52:36,256", + "latest" : "2024-03-05T10:52:49,199", + "top_hits" : [ + { + "value" : "2024-03-05T10:52:41,044", + "count" : 6 + }, + { + "value" : "2024-03-05T10:52:41,043", + "count" : 3 + }, + { + "value" : "2024-03-05T10:52:41,059", + "count" : 2 + }, + { + "value" : "2024-03-05T10:52:36,256", + "count" : 1 + }, + { + "value" : "2024-03-05T10:52:41,038", + "count" : 1 + }, + { + "value" : "2024-03-05T10:52:41,042", + "count" : 1 + }, + { + "value" : "2024-03-05T10:52:43,291", + "count" : 1 + }, + { + "value" : "2024-03-05T10:52:46,098", + "count" : 1 + }, + { + "value" : "2024-03-05T10:52:47,227", + "count" : 1 + }, + { + "value" : "2024-03-05T10:52:47,259", + "count" : 1 + } + ] + } + } +} +---- +// TESTRESPONSE[s/"sample_start" : ".*",/"sample_start" : "$body.sample_start",/] +// The substitution is because the text is pre-processed by the test harness, +// so the fields may get reordered in the JSON the endpoint sees + +For a detailed description of the response format, or for additional examples +on ingesting delimited text (such as CSV) or newline-delimited JSON, refer to the +<>. diff --git a/docs/reference/text-structure/apis/find-message-structure.asciidoc b/docs/reference/text-structure/apis/find-message-structure.asciidoc new file mode 100644 index 0000000000000..085f65b852126 --- /dev/null +++ b/docs/reference/text-structure/apis/find-message-structure.asciidoc @@ -0,0 +1,292 @@ +[role="xpack"] +[[find-message-structure]] += Find messages structure API + +Finds the structure of a list of text messages. + +[discrete] +[[find-message-structure-request]] +== {api-request-title} + +`GET _text_structure/find_message_structure` + +`POST _text_structure/find_message_structure` + +[discrete] +[[find-message-structure-prereqs]] +== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `monitor_text_structure` or +`monitor` cluster privileges to use this API. See +<>. + +[discrete] +[[find-message-structure-desc]] +== {api-description-title} + +This API provides a starting point for ingesting data into {es} in a format that +is suitable for subsequent use with other {stack} functionality. Use this +API in preference to `find_structure` when your input text has already been +split up into separate messages by some other process. + +The response from the API contains: + +* Sample messages. +* Statistics that reveal the most common values for all fields detected within +the text and basic numeric statistics for numeric fields. +* Information about the structure of the text, which is useful when you write +ingest configurations to index it or similarly formatted text. +* Appropriate mappings for an {es} index, which you could use to ingest the text. + +All this information can be calculated by the structure finder with no guidance. +However, you can optionally override some of the decisions about the text +structure by specifying one or more query parameters. + +Details of the output can be seen in the <>. + +If the structure finder produces unexpected results, +specify the `explain` query parameter and an `explanation` will appear in +the response. It helps determine why the returned structure was +chosen. + +[discrete] +[[find-message-structure-query-parms]] +== {api-query-parms-title} + +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-column-names] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-delimiter] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-explain] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-format] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-grok-pattern] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-ecs-compatibility] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-quote] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-should-trim-fields] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-timeout] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-timestamp-field] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-timestamp-format] + +[discrete] +[[find-message-structure-request-body]] +== {api-request-body-title} + +`messages`:: +(Required, array of strings) +The list of messages you want to analyze. + +[discrete] +[[find-message-structure-examples]] +== {api-examples-title} + +[discrete] +[[find-message-structure-example]] +=== Analyzing Elasticsearch log files + +Suppose you have a list of {es} logs messages. +You can send it to the `find_message_structure` endpoint as follows: + +[source,console] +---- +POST _text_structure/find_message_structure +{ + "messages": [ + "[2024-03-05T10:52:36,256][INFO ][o.a.l.u.VectorUtilPanamaProvider] [laptop] Java vector incubator API enabled; uses preferredBitSize=128", + "[2024-03-05T10:52:41,038][INFO ][o.e.p.PluginsService ] [laptop] loaded module [repository-url]", + "[2024-03-05T10:52:41,042][INFO ][o.e.p.PluginsService ] [laptop] loaded module [rest-root]", + "[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-core]", + "[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-redact]", + "[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [ingest-user-agent]", + "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-monitoring]", + "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [repository-s3]", + "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-analytics]", + "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-ent-search]", + "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-autoscaling]", + "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [lang-painless]]", + "[2024-03-05T10:52:41,059][INFO ][o.e.p.PluginsService ] [laptop] loaded module [lang-expression]", + "[2024-03-05T10:52:41,059][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-eql]", + "[2024-03-05T10:52:43,291][INFO ][o.e.e.NodeEnvironment ] [laptop] heap size [16gb], compressed ordinary object pointers [true]", + "[2024-03-05T10:52:46,098][INFO ][o.e.x.s.Security ] [laptop] Security is enabled", + "[2024-03-05T10:52:47,227][INFO ][o.e.x.p.ProfilingPlugin ] [laptop] Profiling is enabled", + "[2024-03-05T10:52:47,259][INFO ][o.e.x.p.ProfilingPlugin ] [laptop] profiling index templates will not be installed or reinstalled", + "[2024-03-05T10:52:47,755][INFO ][o.e.i.r.RecoverySettings ] [laptop] using rate limit [40mb] with [default=40mb, read=0b, write=0b, max=0b]", + "[2024-03-05T10:52:47,787][INFO ][o.e.d.DiscoveryModule ] [laptop] using discovery type [multi-node] and seed hosts providers [settings]", + "[2024-03-05T10:52:49,188][INFO ][o.e.n.Node ] [laptop] initialized", + "[2024-03-05T10:52:49,199][INFO ][o.e.n.Node ] [laptop] starting ..." + ] +} +---- +// TEST + +If the request does not encounter errors, you receive the following result: + +[source,console-result] +---- +{ + "num_lines_analyzed" : 22, + "num_messages_analyzed" : 22, + "sample_start" : "[2024-03-05T10:52:36,256][INFO ][o.a.l.u.VectorUtilPanamaProvider] [laptop] Java vector incubator API enabled; uses preferredBitSize=128\n[2024-03-05T10:52:41,038][INFO ][o.e.p.PluginsService ] [laptop] loaded module [repository-url]\n", <3> + "charset" : "UTF-8", + "format" : "semi_structured_text", + "multiline_start_pattern" : "^\\[\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "grok_pattern" : "\\[%{TIMESTAMP_ISO8601:timestamp}\\]\\[%{LOGLEVEL:loglevel} \\]\\[.*", + "ecs_compatibility" : "disabled", + "timestamp_field" : "timestamp", + "joda_timestamp_formats" : [ + "ISO8601" + ], + "java_timestamp_formats" : [ + "ISO8601" + ], + "need_client_timezone" : true, + "mappings" : { + "properties" : { + "@timestamp" : { + "type" : "date" + }, + "loglevel" : { + "type" : "keyword" + }, + "message" : { + "type" : "text" + } + } + }, + "ingest_pipeline" : { + "description" : "Ingest pipeline created by text structure finder", + "processors" : [ + { + "grok" : { + "field" : "message", + "patterns" : [ + "\\[%{TIMESTAMP_ISO8601:timestamp}\\]\\[%{LOGLEVEL:loglevel} \\]\\[.*" + ], + "ecs_compatibility" : "disabled" + } + }, + { + "date" : { + "field" : "timestamp", + "timezone" : "{{ event.timezone }}", + "formats" : [ + "ISO8601" + ] + } + }, + { + "remove" : { + "field" : "timestamp" + } + } + ] + }, + "field_stats" : { + "loglevel" : { + "count" : 22, + "cardinality" : 1, + "top_hits" : [ + { + "value" : "INFO", + "count" : 22 + } + ] + }, + "message" : { + "count" : 22, + "cardinality" : 22, + "top_hits" : [ + { + "value" : "[2024-03-05T10:52:36,256][INFO ][o.a.l.u.VectorUtilPanamaProvider] [laptop] Java vector incubator API enabled; uses preferredBitSize=128", + "count" : 1 + }, + { + "value" : "[2024-03-05T10:52:41,038][INFO ][o.e.p.PluginsService ] [laptop] loaded module [repository-url]", + "count" : 1 + }, + { + "value" : "[2024-03-05T10:52:41,042][INFO ][o.e.p.PluginsService ] [laptop] loaded module [rest-root]", + "count" : 1 + }, + { + "value" : "[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [ingest-user-agent]", + "count" : 1 + }, + { + "value" : "[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-core]", + "count" : 1 + }, + { + "value" : "[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-redact]", + "count" : 1 + }, + { + "value" : "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [lang-painless]]", + "count" : 1 + }, + { + "value" : "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [repository-s3]", + "count" : 1 + }, + { + "value" : "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-analytics]", + "count" : 1 + }, + { + "value" : "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-autoscaling]", + "count" : 1 + } + ] + }, + "timestamp" : { + "count" : 22, + "cardinality" : 14, + "earliest" : "2024-03-05T10:52:36,256", + "latest" : "2024-03-05T10:52:49,199", + "top_hits" : [ + { + "value" : "2024-03-05T10:52:41,044", + "count" : 6 + }, + { + "value" : "2024-03-05T10:52:41,043", + "count" : 3 + }, + { + "value" : "2024-03-05T10:52:41,059", + "count" : 2 + }, + { + "value" : "2024-03-05T10:52:36,256", + "count" : 1 + }, + { + "value" : "2024-03-05T10:52:41,038", + "count" : 1 + }, + { + "value" : "2024-03-05T10:52:41,042", + "count" : 1 + }, + { + "value" : "2024-03-05T10:52:43,291", + "count" : 1 + }, + { + "value" : "2024-03-05T10:52:46,098", + "count" : 1 + }, + { + "value" : "2024-03-05T10:52:47,227", + "count" : 1 + }, + { + "value" : "2024-03-05T10:52:47,259", + "count" : 1 + } + ] + } + } +} +---- +// TESTRESPONSE + +For a detailed description of the response format, or for additional examples +on ingesting delimited text (such as CSV) or newline-delimited JSON, refer to the +<>. diff --git a/docs/reference/text-structure/apis/find-structure-shared.asciidoc b/docs/reference/text-structure/apis/find-structure-shared.asciidoc new file mode 100644 index 0000000000000..67a85dd072a9a --- /dev/null +++ b/docs/reference/text-structure/apis/find-structure-shared.asciidoc @@ -0,0 +1,215 @@ +tag::param-charset[] +`charset`:: +(Optional, string) The text's character set. It must be a character set that is +supported by the JVM that {es} uses. For example, `UTF-8`, `UTF-16LE`, +`windows-1252`, or `EUC-JP`. If this parameter is not specified, the structure +finder chooses an appropriate character set. +end::param-charset[] + +tag::param-column-names[] +`column_names`:: +(Optional, string) If you have set `format` to `delimited`, you can specify the +column names in a comma-separated list. If this parameter is not specified, the +structure finder uses the column names from the header row of the text. If the +text does not have a header row, columns are named "column1", "column2", +"column3", etc. +end::param-column-names[] + +tag::param-delimiter[] +`delimiter`:: +(Optional, string) If you have set `format` to `delimited`, you can specify the +character used to delimit the values in each row. Only a single character is +supported; the delimiter cannot have multiple characters. By default, the API +considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). +In this default scenario, all rows must have the same number of fields for the +delimited format to be detected. If you specify a delimiter, up to 10% of the +rows can have a different number of columns than the first row. +end::param-delimiter[] + +tag::param-explain[] +`explain`:: +(Optional, Boolean) If `true`, the response includes a +field named `explanation`, which is an array of strings that indicate how the +structure finder produced its result. The default value is `false`. +end::param-explain[] + +tag::param-format[] +`format`:: +(Optional, string) The high level structure of the text. Valid values are +`ndjson`, `xml`, `delimited`, and `semi_structured_text`. By default, the API +chooses the format. In this default scenario, all rows must have the same number +of fields for a delimited format to be detected. If the `format` is set to +`delimited` and the `delimiter` is not set, however, the API tolerates up to 5% +of rows that have a different number of columns than the first row. +end::param-format[] + +tag::param-grok-pattern[] +`grok_pattern`:: +(Optional, string) If you have set `format` to `semi_structured_text`, you can +specify a Grok pattern that is used to extract fields from every message in the +text. The name of the timestamp field in the Grok pattern must match what is +specified in the `timestamp_field` parameter. If that parameter is not +specified, the name of the timestamp field in the Grok pattern must match +"timestamp". If `grok_pattern` is not specified, the structure finder creates a +Grok pattern. +end::param-grok-pattern[] + +tag::param-ecs-compatibility[] +`ecs_compatibility`:: +(Optional, string) The mode of compatibility with ECS compliant Grok patterns. +Use this parameter to specify whether to use ECS Grok patterns instead of +legacy ones when the structure finder creates a Grok pattern. Valid values +are `disabled` and `v1`. The default value is `disabled`. This setting primarily +has an impact when a whole message Grok pattern such as `%{CATALINALOG}` +matches the input. If the structure finder identifies a common structure but +has no idea of meaning then generic field names such as `path`, `ipaddress`, +`field1` and `field2` are used in the `grok_pattern` output, with the intention +that a user who knows the meanings rename these fields before using it. +end::param-ecs-compatibility[] + +tag::param-has-header-row[] +`has_header_row`:: +(Optional, Boolean) If you have set `format` to `delimited`, you can use this +parameter to indicate whether the column names are in the first row of the text. +If this parameter is not specified, the structure finder guesses based on the +similarity of the first row of the text to other rows. +end::param-has-header-row[] + +tag::param-line-merge-size-limit[] +`line_merge_size_limit`:: +(Optional, unsigned integer) The maximum number of characters in a message when +lines are merged to form messages while analyzing semi-structured text. The +default is `10000`. If you have extremely long messages you may need to increase +this, but be aware that this may lead to very long processing times if the way +to group lines into messages is misdetected. +end::param-line-merge-size-limit[] + +tag::param-lines-to-sample[] +`lines_to_sample`:: +(Optional, unsigned integer) The number of lines to include in the structural +analysis, starting from the beginning of the text. The minimum is 2; the default +is `1000`. If the value of this parameter is greater than the number of lines in +the text, the analysis proceeds (as long as there are at least two lines in the +text) for all of the lines. ++ +-- +NOTE: The number of lines and the variation of the lines affects the speed of +the analysis. For example, if you upload text where the first 1000 lines +are all variations on the same message, the analysis will find more commonality +than would be seen with a bigger sample. If possible, however, it is more +efficient to upload sample text with more variety in the first 1000 lines than +to request analysis of 100000 lines to achieve some variety. + +-- +end::param-lines-to-sample[] + +tag::param-quote[] +`quote`:: +(Optional, string) If you have set `format` to `delimited`, you can specify the +character used to quote the values in each row if they contain newlines or the +delimiter character. Only a single character is supported. If this parameter is +not specified, the default value is a double quote (`"`). If your delimited text +format does not use quoting, a workaround is to set this argument to a character +that does not appear anywhere in the sample. +end::param-quote[] + +tag::param-should-trim-fields[] +`should_trim_fields`:: +(Optional, Boolean) If you have set `format` to `delimited`, you can specify +whether values between delimiters should have whitespace trimmed from them. If +this parameter is not specified and the delimiter is pipe (`|`), the default +value is `true`. Otherwise, the default value is `false`. +end::param-should-trim-fields[] + +tag::param-timeout[] +`timeout`:: +(Optional, <>) Sets the maximum amount of time that the +structure analysis may take. If the analysis is still running when the timeout +expires then it will be stopped. The default value is 25 seconds. +end::param-timeout[] + +tag::param-timestamp-field[] +`timestamp_field`:: +(Optional, string) The name of the field that contains the primary timestamp of +each record in the text. In particular, if the text were ingested into an index, +this is the field that would be used to populate the `@timestamp` field. ++ +-- +If the `format` is `semi_structured_text`, this field must match the name of the +appropriate extraction in the `grok_pattern`. Therefore, for semi-structured +text, it is best not to specify this parameter unless `grok_pattern` is +also specified. + +For structured text, if you specify this parameter, the field must exist +within the text. + +If this parameter is not specified, the structure finder makes a decision about +which field (if any) is the primary timestamp field. For structured text, +it is not compulsory to have a timestamp in the text. +-- +end::param-timestamp-field[] + +tag::param-timestamp-format[] +`timestamp_format`:: +(Optional, string) The Java time format of the timestamp field in the text. ++ +-- +Only a subset of Java time format letter groups are supported: + +* `a` +* `d` +* `dd` +* `EEE` +* `EEEE` +* `H` +* `HH` +* `h` +* `M` +* `MM` +* `MMM` +* `MMMM` +* `mm` +* `ss` +* `XX` +* `XXX` +* `yy` +* `yyyy` +* `zzz` + +Additionally `S` letter groups (fractional seconds) of length one to nine are +supported providing they occur after `ss` and separated from the `ss` by a `.`, +`,` or `:`. Spacing and punctuation is also permitted with the exception of `?`, +newline and carriage return, together with literal text enclosed in single +quotes. For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override +format. + +One valuable use case for this parameter is when the format is semi-structured +text, there are multiple timestamp formats in the text, and you know which +format corresponds to the primary timestamp, but you do not want to specify the +full `grok_pattern`. Another is when the timestamp format is one that the +structure finder does not consider by default. + +If this parameter is not specified, the structure finder chooses the best +format from a built-in set. + +If the special value `null` is specified the structure finder will not look +for a primary timestamp in the text. When the format is semi-structured text +this will result in the structure finder treating the text as single-line +messages. + +The following table provides the appropriate `timeformat` values for some example timestamps: + +|=== +| Timeformat | Presentation + +| yyyy-MM-dd HH:mm:ssZ | 2019-04-20 13:15:22+0000 +| EEE, d MMM yyyy HH:mm:ss Z | Sat, 20 Apr 2019 13:15:22 +0000 +| dd.MM.yy HH:mm:ss.SSS | 20.04.19 13:15:22.285 +|=== + +Refer to +https://docs.oracle.com/javase/8/docs/api/java/time/format/DateTimeFormatter.html[the Java date/time format documentation] +for more information about date and time format syntax. + +-- +end::param-timestamp-format[] diff --git a/docs/reference/text-structure/apis/find-structure.asciidoc b/docs/reference/text-structure/apis/find-structure.asciidoc index a65f87290b0a8..b49b0f3526689 100644 --- a/docs/reference/text-structure/apis/find-structure.asciidoc +++ b/docs/reference/text-structure/apis/find-structure.asciidoc @@ -1,6 +1,6 @@ [role="xpack"] [[find-structure]] -= Find structure API += Find text structure API Finds the structure of text. The text must contain data that is suitable to be ingested into the @@ -55,190 +55,21 @@ chosen. [[find-structure-query-parms]] == {api-query-parms-title} -`charset`:: -(Optional, string) The text's character set. It must be a character set that is -supported by the JVM that {es} uses. For example, `UTF-8`, `UTF-16LE`, -`windows-1252`, or `EUC-JP`. If this parameter is not specified, the structure -finder chooses an appropriate character set. - -`column_names`:: -(Optional, string) If you have set `format` to `delimited`, you can specify the -column names in a comma-separated list. If this parameter is not specified, the -structure finder uses the column names from the header row of the text. If the -text does not have a header role, columns are named "column1", "column2", -"column3", etc. - -`delimiter`:: -(Optional, string) If you have set `format` to `delimited`, you can specify the -character used to delimit the values in each row. Only a single character is -supported; the delimiter cannot have multiple characters. By default, the API -considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). -In this default scenario, all rows must have the same number of fields for the -delimited format to be detected. If you specify a delimiter, up to 10% of the -rows can have a different number of columns than the first row. - -`explain`:: -(Optional, Boolean) If this parameter is set to `true`, the response includes a -field named `explanation`, which is an array of strings that indicate how the -structure finder produced its result. The default value is `false`. - -`format`:: -(Optional, string) The high level structure of the text. Valid values are -`ndjson`, `xml`, `delimited`, and `semi_structured_text`. By default, the API -chooses the format. In this default scenario, all rows must have the same number -of fields for a delimited format to be detected. If the `format` is set to -`delimited` and the `delimiter` is not set, however, the API tolerates up to 5% -of rows that have a different number of columns than the first row. - -`grok_pattern`:: -(Optional, string) If you have set `format` to `semi_structured_text`, you can -specify a Grok pattern that is used to extract fields from every message in the -text. The name of the timestamp field in the Grok pattern must match what is -specified in the `timestamp_field` parameter. If that parameter is not -specified, the name of the timestamp field in the Grok pattern must match -"timestamp". If `grok_pattern` is not specified, the structure finder creates a -Grok pattern. - -`ecs_compatibility`:: -(Optional, string) The mode of compatibility with ECS compliant Grok patterns. -Use this parameter to specify whether to use ECS Grok patterns instead of -legacy ones when the structure finder creates a Grok pattern. Valid values -are `disabled` and `v1`. The default value is `disabled`. This setting primarily -has an impact when a whole message Grok pattern such as `%{CATALINALOG}` -matches the input. If the structure finder identifies a common structure but -has no idea of meaning then generic field names such as `path`, `ipaddress`, -`field1` and `field2` are used in the `grok_pattern` output, with the intention -that a user who knows the meanings rename these fields before using it. -`has_header_row`:: -(Optional, Boolean) If you have set `format` to `delimited`, you can use this -parameter to indicate whether the column names are in the first row of the text. -If this parameter is not specified, the structure finder guesses based on the -similarity of the first row of the text to other rows. - -`line_merge_size_limit`:: -(Optional, unsigned integer) The maximum number of characters in a message when -lines are merged to form messages while analyzing semi-structured text. The -default is `10000`. If you have extremely long messages you may need to increase -this, but be aware that this may lead to very long processing times if the way -to group lines into messages is misdetected. - -`lines_to_sample`:: -(Optional, unsigned integer) The number of lines to include in the structural -analysis, starting from the beginning of the text. The minimum is 2; the default -is `1000`. If the value of this parameter is greater than the number of lines in -the text, the analysis proceeds (as long as there are at least two lines in the -text) for all of the lines. -+ --- -NOTE: The number of lines and the variation of the lines affects the speed of -the analysis. For example, if you upload text where the first 1000 lines -are all variations on the same message, the analysis will find more commonality -than would be seen with a bigger sample. If possible, however, it is more -efficient to upload sample text with more variety in the first 1000 lines than -to request analysis of 100000 lines to achieve some variety. - --- - -`quote`:: -(Optional, string) If you have set `format` to `delimited`, you can specify the -character used to quote the values in each row if they contain newlines or the -delimiter character. Only a single character is supported. If this parameter is -not specified, the default value is a double quote (`"`). If your delimited text -format does not use quoting, a workaround is to set this argument to a character -that does not appear anywhere in the sample. - -`should_trim_fields`:: -(Optional, Boolean) If you have set `format` to `delimited`, you can specify -whether values between delimiters should have whitespace trimmed from them. If -this parameter is not specified and the delimiter is pipe (`|`), the default -value is `true`. Otherwise, the default value is `false`. - -`timeout`:: -(Optional, <>) Sets the maximum amount of time that the -structure analysis make take. If the analysis is still running when the timeout -expires then it will be aborted. The default value is 25 seconds. - -`timestamp_field`:: -(Optional, string) The name of the field that contains the primary timestamp of -each record in the text. In particular, if the text were ingested into an index, -this is the field that would be used to populate the `@timestamp` field. -+ --- -If the `format` is `semi_structured_text`, this field must match the name of the -appropriate extraction in the `grok_pattern`. Therefore, for semi-structured -text, it is best not to specify this parameter unless `grok_pattern` is -also specified. - -For structured text, if you specify this parameter, the field must exist -within the text. - -If this parameter is not specified, the structure finder makes a decision about -which field (if any) is the primary timestamp field. For structured text, -it is not compulsory to have a timestamp in the text. --- - -`timestamp_format`:: -(Optional, string) The Java time format of the timestamp field in the text. -+ --- -Only a subset of Java time format letter groups are supported: - -* `a` -* `d` -* `dd` -* `EEE` -* `EEEE` -* `H` -* `HH` -* `h` -* `M` -* `MM` -* `MMM` -* `MMMM` -* `mm` -* `ss` -* `XX` -* `XXX` -* `yy` -* `yyyy` -* `zzz` - -Additionally `S` letter groups (fractional seconds) of length one to nine are -supported providing they occur after `ss` and separated from the `ss` by a `.`, -`,` or `:`. Spacing and punctuation is also permitted with the exception of `?`, -newline and carriage return, together with literal text enclosed in single -quotes. For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override -format. - -One valuable use case for this parameter is when the format is semi-structured -text, there are multiple timestamp formats in the text, and you know which -format corresponds to the primary timestamp, but you do not want to specify the -full `grok_pattern`. Another is when the timestamp format is one that the -structure finder does not consider by default. - -If this parameter is not specified, the structure finder chooses the best -format from a built-in set. - -If the special value `null` is specified the structure finder will not look -for a primary timestamp in the text. When the format is semi-structured text -this will result in the structure finder treating the text as single-line -messages. - -The following table provides the appropriate `timeformat` values for some example timestamps: - -|=== -| Timeformat | Presentation - -| yyyy-MM-dd HH:mm:ssZ | 2019-04-20 13:15:22+0000 -| EEE, d MMM yyyy HH:mm:ss Z | Sat, 20 Apr 2019 13:15:22 +0000 -| dd.MM.yy HH:mm:ss.SSS | 20.04.19 13:15:22.285 -|=== - -See -https://docs.oracle.com/javase/8/docs/api/java/time/format/DateTimeFormatter.html[the Java date/time format documentation] -for more information about date and time format syntax. - --- +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-charset] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-column-names] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-delimiter] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-explain] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-format] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-grok-pattern] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-ecs-compatibility] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-has-header-row] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-line-merge-size-limit] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-lines-to-sample] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-quote] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-should-trim-fields] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-timeout] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-timestamp-field] +include::{es-repo-dir}/text-structure/apis/find-structure-shared.asciidoc[tag=param-timestamp-format] [discrete] [[find-structure-request-body]] diff --git a/docs/reference/text-structure/apis/index.asciidoc b/docs/reference/text-structure/apis/index.asciidoc index 8628badba7e78..9f4af120690f7 100644 --- a/docs/reference/text-structure/apis/index.asciidoc +++ b/docs/reference/text-structure/apis/index.asciidoc @@ -4,8 +4,12 @@ You can use the following APIs to find text structures: +* <> +* <> * <> * <> +include::find-field-structure.asciidoc[leveloffset=+2] +include::find-message-structure.asciidoc[leveloffset=+2] include::find-structure.asciidoc[leveloffset=+2] include::test-grok-pattern.asciidoc[leveloffset=+2] diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/text_structure.find_field_structure.json b/rest-api-spec/src/main/resources/rest-api-spec/api/text_structure.find_field_structure.json new file mode 100644 index 0000000000000..f82e2ca2d190f --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/text_structure.find_field_structure.json @@ -0,0 +1,90 @@ +{ + "text_structure.find_field_structure":{ + "documentation":{ + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/find-field-structure.html", + "description":"Finds the structure of a text field in an index." + }, + "stability":"stable", + "visibility":"public", + "headers":{ + "accept": ["application/json"] + }, + "url":{ + "paths":[ + { + "path":"/_text_structure/find_field_structure", + "methods":["GET"] + } + ] + }, + "params":{ + "index":{ + "type":"string", + "description":"The index containing the analyzed field", + "required":true + }, + "field":{ + "type":"string", + "description":"The field that should be analyzed", + "required":true + }, + "documents_to_sample":{ + "type":"int", + "description":"How many documents should be included in the analysis", + "default":1000 + }, + "timeout":{ + "type":"time", + "description":"Timeout after which the analysis will be aborted", + "default":"25s" + }, + "format":{ + "type":"enum", + "options":[ + "ndjson", + "xml", + "delimited", + "semi_structured_text" + ], + "description":"Optional parameter to specify the high level file format" + }, + "column_names":{ + "type":"list", + "description":"Optional parameter containing a comma separated list of the column names for a delimited file" + }, + "delimiter":{ + "type":"string", + "description":"Optional parameter to specify the delimiter character for a delimited file - must be a single character" + }, + "quote":{ + "type":"string", + "description":"Optional parameter to specify the quote character for a delimited file - must be a single character" + }, + "should_trim_fields":{ + "type":"boolean", + "description":"Optional parameter to specify whether the values between delimiters in a delimited file should have whitespace trimmed from them" + }, + "grok_pattern":{ + "type":"string", + "description":"Optional parameter to specify the Grok pattern that should be used to extract fields from messages in a semi-structured text file" + }, + "ecs_compatibility":{ + "type":"string", + "description":"Optional parameter to specify the compatibility mode with ECS Grok patterns - may be either 'v1' or 'disabled'" + }, + "timestamp_field":{ + "type":"string", + "description":"Optional parameter to specify the timestamp field in the file" + }, + "timestamp_format":{ + "type":"string", + "description":"Optional parameter to specify the timestamp format in the file - may be either a Joda or Java time format" + }, + "explain":{ + "type":"boolean", + "description":"Whether to include a commentary on how the structure was derived", + "default":false + } + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/text_structure.find_message_structure.json b/rest-api-spec/src/main/resources/rest-api-spec/api/text_structure.find_message_structure.json new file mode 100644 index 0000000000000..d839e4b048f7d --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/text_structure.find_message_structure.json @@ -0,0 +1,80 @@ +{ + "text_structure.find_message_structure":{ + "documentation":{ + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/find-message-structure.html", + "description":"Finds the structure of a list of messages. The messages must contain data that is suitable to be ingested into Elasticsearch." + }, + "stability":"stable", + "visibility":"public", + "headers":{ + "accept": [ "application/json"], + "content_type": ["application/json"] + }, + "url":{ + "paths":[ + { + "path":"/_text_structure/find_message_structure", + "methods":["GET", "POST"] + } + ] + }, + "params":{ + "timeout":{ + "type":"time", + "description":"Timeout after which the analysis will be aborted", + "default":"25s" + }, + "format":{ + "type":"enum", + "options":[ + "ndjson", + "xml", + "delimited", + "semi_structured_text" + ], + "description":"Optional parameter to specify the high level file format" + }, + "column_names":{ + "type":"list", + "description":"Optional parameter containing a comma separated list of the column names for a delimited file" + }, + "delimiter":{ + "type":"string", + "description":"Optional parameter to specify the delimiter character for a delimited file - must be a single character" + }, + "quote":{ + "type":"string", + "description":"Optional parameter to specify the quote character for a delimited file - must be a single character" + }, + "should_trim_fields":{ + "type":"boolean", + "description":"Optional parameter to specify whether the values between delimiters in a delimited file should have whitespace trimmed from them" + }, + "grok_pattern":{ + "type":"string", + "description":"Optional parameter to specify the Grok pattern that should be used to extract fields from messages in a semi-structured text file" + }, + "ecs_compatibility":{ + "type":"string", + "description":"Optional parameter to specify the compatibility mode with ECS Grok patterns - may be either 'v1' or 'disabled'" + }, + "timestamp_field":{ + "type":"string", + "description":"Optional parameter to specify the timestamp field in the file" + }, + "timestamp_format":{ + "type":"string", + "description":"Optional parameter to specify the timestamp format in the file - may be either a Joda or Java time format" + }, + "explain":{ + "type":"boolean", + "description":"Whether to include a commentary on how the structure was derived", + "default":false + } + }, + "body":{ + "description":"JSON object with one field [messages], containing an array of messages to be analyzed", + "required":true + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/action/AbstractFindStructureRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/action/AbstractFindStructureRequest.java new file mode 100644 index 0000000000000..e06ffd3b95a05 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/action/AbstractFindStructureRequest.java @@ -0,0 +1,377 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.textstructure.action; + +import org.elasticsearch.TransportVersions; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.grok.GrokBuiltinPatterns; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xpack.core.textstructure.structurefinder.TextStructure; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.Locale; +import java.util.Objects; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + +public abstract class AbstractFindStructureRequest extends ActionRequest { + + public static final int MIN_SAMPLE_LINE_COUNT = 2; + + public static final ParseField LINES_TO_SAMPLE = new ParseField("lines_to_sample"); + public static final ParseField DOCUMENTS_TO_SAMPLE = new ParseField("documents_to_sample"); + public static final ParseField LINE_MERGE_SIZE_LIMIT = new ParseField("line_merge_size_limit"); + public static final ParseField TIMEOUT = new ParseField("timeout"); + public static final ParseField CHARSET = TextStructure.CHARSET; + public static final ParseField FORMAT = TextStructure.FORMAT; + public static final ParseField COLUMN_NAMES = TextStructure.COLUMN_NAMES; + public static final ParseField HAS_HEADER_ROW = TextStructure.HAS_HEADER_ROW; + public static final ParseField DELIMITER = TextStructure.DELIMITER; + public static final ParseField QUOTE = TextStructure.QUOTE; + public static final ParseField SHOULD_TRIM_FIELDS = TextStructure.SHOULD_TRIM_FIELDS; + public static final ParseField GROK_PATTERN = TextStructure.GROK_PATTERN; + // This one is plural in FileStructure, but singular in FileStructureOverrides + public static final ParseField TIMESTAMP_FORMAT = new ParseField("timestamp_format"); + public static final ParseField TIMESTAMP_FIELD = TextStructure.TIMESTAMP_FIELD; + + public static final ParseField ECS_COMPATIBILITY = TextStructure.ECS_COMPATIBILITY; + + private static final String ARG_INCOMPATIBLE_WITH_FORMAT_TEMPLATE = "[%s] may only be specified if [" + + FORMAT.getPreferredName() + + "] is [%s]"; + + private Integer linesToSample; + private Integer lineMergeSizeLimit; + private TimeValue timeout; + private String charset; + private TextStructure.Format format; + private List columnNames; + private Boolean hasHeaderRow; + private Character delimiter; + private Character quote; + private Boolean shouldTrimFields; + private String grokPattern; + private String ecsCompatibility; + private String timestampFormat; + private String timestampField; + + AbstractFindStructureRequest() {} + + AbstractFindStructureRequest(StreamInput in) throws IOException { + super(in); + linesToSample = in.readOptionalVInt(); + lineMergeSizeLimit = in.readOptionalVInt(); + timeout = in.readOptionalTimeValue(); + charset = in.readOptionalString(); + format = in.readBoolean() ? in.readEnum(TextStructure.Format.class) : null; + columnNames = in.readBoolean() ? in.readStringCollectionAsList() : null; + hasHeaderRow = in.readOptionalBoolean(); + delimiter = in.readBoolean() ? (char) in.readVInt() : null; + quote = in.readBoolean() ? (char) in.readVInt() : null; + shouldTrimFields = in.readOptionalBoolean(); + grokPattern = in.readOptionalString(); + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_5_0)) { + ecsCompatibility = in.readOptionalString(); + } else { + ecsCompatibility = null; + } + timestampFormat = in.readOptionalString(); + timestampField = in.readOptionalString(); + } + + public Integer getLinesToSample() { + return linesToSample; + } + + public void setLinesToSample(Integer linesToSample) { + this.linesToSample = linesToSample; + } + + public Integer getLineMergeSizeLimit() { + return lineMergeSizeLimit; + } + + public void setLineMergeSizeLimit(Integer lineMergeSizeLimit) { + this.lineMergeSizeLimit = lineMergeSizeLimit; + } + + public TimeValue getTimeout() { + return timeout; + } + + public void setTimeout(TimeValue timeout) { + this.timeout = timeout; + } + + public String getCharset() { + return charset; + } + + public void setCharset(String charset) { + this.charset = (charset == null || charset.isEmpty()) ? null : charset; + } + + public TextStructure.Format getFormat() { + return format; + } + + public void setFormat(TextStructure.Format format) { + this.format = format; + } + + public void setFormat(String format) { + this.format = (format == null || format.isEmpty()) ? null : TextStructure.Format.fromString(format); + } + + public List getColumnNames() { + return columnNames; + } + + public void setColumnNames(List columnNames) { + this.columnNames = (columnNames == null || columnNames.isEmpty()) ? null : columnNames; + } + + public void setColumnNames(String[] columnNames) { + this.columnNames = (columnNames == null || columnNames.length == 0) ? null : Arrays.asList(columnNames); + } + + public Boolean getHasHeaderRow() { + return hasHeaderRow; + } + + public void setHasHeaderRow(Boolean hasHeaderRow) { + this.hasHeaderRow = hasHeaderRow; + } + + public Character getDelimiter() { + return delimiter; + } + + public void setDelimiter(Character delimiter) { + this.delimiter = delimiter; + } + + public void setDelimiter(String delimiter) { + if (delimiter == null || delimiter.isEmpty()) { + this.delimiter = null; + } else if (delimiter.length() == 1) { + this.delimiter = delimiter.charAt(0); + } else { + throw new IllegalArgumentException(DELIMITER.getPreferredName() + " must be a single character"); + } + } + + public Character getQuote() { + return quote; + } + + public void setQuote(Character quote) { + this.quote = quote; + } + + public void setQuote(String quote) { + if (quote == null || quote.isEmpty()) { + this.quote = null; + } else if (quote.length() == 1) { + this.quote = quote.charAt(0); + } else { + throw new IllegalArgumentException(QUOTE.getPreferredName() + " must be a single character"); + } + } + + public Boolean getShouldTrimFields() { + return shouldTrimFields; + } + + public void setShouldTrimFields(Boolean shouldTrimFields) { + this.shouldTrimFields = shouldTrimFields; + } + + public String getGrokPattern() { + return grokPattern; + } + + public void setGrokPattern(String grokPattern) { + this.grokPattern = (grokPattern == null || grokPattern.isEmpty()) ? null : grokPattern; + } + + public String getEcsCompatibility() { + return ecsCompatibility; + } + + public void setEcsCompatibility(String ecsCompatibility) { + this.ecsCompatibility = (ecsCompatibility == null || ecsCompatibility.isEmpty()) ? null : ecsCompatibility; + } + + public String getTimestampFormat() { + return timestampFormat; + } + + public void setTimestampFormat(String timestampFormat) { + this.timestampFormat = (timestampFormat == null || timestampFormat.isEmpty()) ? null : timestampFormat; + } + + public String getTimestampField() { + return timestampField; + } + + public void setTimestampField(String timestampField) { + this.timestampField = (timestampField == null || timestampField.isEmpty()) ? null : timestampField; + } + + private static ActionRequestValidationException addIncompatibleArgError( + ParseField arg, + TextStructure.Format format, + ActionRequestValidationException validationException + ) { + return addValidationError( + String.format(Locale.ROOT, ARG_INCOMPATIBLE_WITH_FORMAT_TEMPLATE, arg.getPreferredName(), format), + validationException + ); + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (linesToSample != null && linesToSample < MIN_SAMPLE_LINE_COUNT) { + validationException = addValidationError( + "[" + LINES_TO_SAMPLE.getPreferredName() + "] must be at least [" + MIN_SAMPLE_LINE_COUNT + "] if specified", + validationException + ); + } + if (lineMergeSizeLimit != null && lineMergeSizeLimit <= 0) { + validationException = addValidationError( + "[" + LINE_MERGE_SIZE_LIMIT.getPreferredName() + "] must be positive if specified", + validationException + ); + } + if (format != TextStructure.Format.DELIMITED) { + if (columnNames != null) { + validationException = addIncompatibleArgError(COLUMN_NAMES, TextStructure.Format.DELIMITED, validationException); + } + if (hasHeaderRow != null) { + validationException = addIncompatibleArgError(HAS_HEADER_ROW, TextStructure.Format.DELIMITED, validationException); + } + if (delimiter != null) { + validationException = addIncompatibleArgError(DELIMITER, TextStructure.Format.DELIMITED, validationException); + } + if (quote != null) { + validationException = addIncompatibleArgError(QUOTE, TextStructure.Format.DELIMITED, validationException); + } + if (shouldTrimFields != null) { + validationException = addIncompatibleArgError(SHOULD_TRIM_FIELDS, TextStructure.Format.DELIMITED, validationException); + } + } + if (format != TextStructure.Format.SEMI_STRUCTURED_TEXT) { + if (grokPattern != null) { + validationException = addIncompatibleArgError(GROK_PATTERN, TextStructure.Format.SEMI_STRUCTURED_TEXT, validationException); + } + } + + if (ecsCompatibility != null && GrokBuiltinPatterns.isValidEcsCompatibilityMode(ecsCompatibility) == false) { + validationException = addValidationError( + "[" + + ECS_COMPATIBILITY.getPreferredName() + + "] must be one of [" + + String.join(", ", GrokBuiltinPatterns.ECS_COMPATIBILITY_MODES) + + "] if specified", + validationException + ); + } + + return validationException; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeOptionalVInt(linesToSample); + out.writeOptionalVInt(lineMergeSizeLimit); + out.writeOptionalTimeValue(timeout); + out.writeOptionalString(charset); + if (format == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + out.writeEnum(format); + } + if (columnNames == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + out.writeStringCollection(columnNames); + } + out.writeOptionalBoolean(hasHeaderRow); + if (delimiter == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + out.writeVInt(delimiter); + } + if (quote == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + out.writeVInt(quote); + } + out.writeOptionalBoolean(shouldTrimFields); + out.writeOptionalString(grokPattern); + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_5_0)) { + out.writeOptionalString(ecsCompatibility); + } + out.writeOptionalString(timestampFormat); + out.writeOptionalString(timestampField); + } + + @Override + public int hashCode() { + return Objects.hash( + linesToSample, + lineMergeSizeLimit, + timeout, + charset, + format, + columnNames, + hasHeaderRow, + delimiter, + grokPattern, + ecsCompatibility, + timestampFormat, + timestampField + ); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + if (other == null || getClass() != other.getClass()) { + return false; + } + AbstractFindStructureRequest that = (AbstractFindStructureRequest) other; + return Objects.equals(this.linesToSample, that.linesToSample) + && Objects.equals(this.lineMergeSizeLimit, that.lineMergeSizeLimit) + && Objects.equals(this.timeout, that.timeout) + && Objects.equals(this.charset, that.charset) + && Objects.equals(this.format, that.format) + && Objects.equals(this.columnNames, that.columnNames) + && Objects.equals(this.hasHeaderRow, that.hasHeaderRow) + && Objects.equals(this.delimiter, that.delimiter) + && Objects.equals(this.grokPattern, that.grokPattern) + && Objects.equals(this.ecsCompatibility, that.ecsCompatibility) + && Objects.equals(this.timestampFormat, that.timestampFormat) + && Objects.equals(this.timestampField, that.timestampField); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/action/FindFieldStructureAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/action/FindFieldStructureAction.java new file mode 100644 index 0000000000000..2e6f3af312e2b --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/action/FindFieldStructureAction.java @@ -0,0 +1,98 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.core.textstructure.action; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xcontent.ParseField; + +import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + +public class FindFieldStructureAction extends ActionType { + + public static final FindFieldStructureAction INSTANCE = new FindFieldStructureAction(); + public static final String NAME = "cluster:monitor/text_structure/find_field_structure"; + + private FindFieldStructureAction() { + super(NAME); + } + + public static class Request extends AbstractFindStructureRequest { + + public static final ParseField INDEX = new ParseField("index"); + public static final ParseField FIELD = new ParseField("field"); + + private String index; + private String field; + + public Request() {} + + public Request(StreamInput in) throws IOException { + super(in); + index = in.readString(); + field = in.readString(); + } + + public String getIndex() { + return index; + } + + public void setIndex(String index) { + this.index = index; + } + + public String getField() { + return field; + } + + public void setField(String field) { + this.field = field; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = super.validate(); + if (Strings.isNullOrEmpty(index)) { + validationException = addValidationError("index must be specified", validationException); + } + if (Strings.isNullOrEmpty(field)) { + validationException = addValidationError("field must be specified", validationException); + } + return validationException; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(index); + out.writeString(field); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), field, index); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + if (other == null || getClass() != other.getClass()) { + return false; + } + Request that = (Request) other; + return super.equals(other) && Objects.equals(this.index, that.index) && Objects.equals(this.field, that.field); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/action/FindMessageStructureAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/action/FindMessageStructureAction.java new file mode 100644 index 0000000000000..49035b36ff42c --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/action/FindMessageStructureAction.java @@ -0,0 +1,97 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.core.textstructure.action; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xcontent.ObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + +public class FindMessageStructureAction extends ActionType { + + public static final FindMessageStructureAction INSTANCE = new FindMessageStructureAction(); + public static final String NAME = "cluster:monitor/text_structure/find_message_structure"; + + private FindMessageStructureAction() { + super(NAME); + } + + public static class Request extends AbstractFindStructureRequest { + + public static final ParseField MESSAGES = new ParseField("messages"); + + private List messages; + + private static final ObjectParser PARSER = createParser(); + + private static ObjectParser createParser() { + ObjectParser parser = new ObjectParser<>("text_structure/find_message_structure", false, Request::new); + parser.declareStringArray(Request::setMessages, MESSAGES); + return parser; + } + + public Request() {} + + public Request(StreamInput in) throws IOException { + super(in); + messages = in.readStringCollectionAsList(); + } + + public static Request parseRequest(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + public List getMessages() { + return messages; + } + + public void setMessages(List messages) { + this.messages = messages; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = super.validate(); + if (messages == null || messages.isEmpty()) { + validationException = addValidationError("messages must be specified", validationException); + } + return validationException; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeStringCollection(messages); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), messages); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + if (other == null || getClass() != other.getClass()) { + return false; + } + Request that = (Request) other; + return super.equals(other) && Objects.equals(this.messages, that.messages); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/action/FindStructureAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/action/FindStructureAction.java index 98bdff8cbced7..15aa3be46a675 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/action/FindStructureAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/action/FindStructureAction.java @@ -6,290 +6,37 @@ */ package org.elasticsearch.xpack.core.textstructure.action; -import org.elasticsearch.TransportVersions; -import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionType; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.core.TimeValue; -import org.elasticsearch.grok.GrokBuiltinPatterns; -import org.elasticsearch.xcontent.ParseField; -import org.elasticsearch.xcontent.ToXContentObject; -import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.textstructure.structurefinder.TextStructure; import java.io.IOException; -import java.util.Arrays; -import java.util.List; -import java.util.Locale; import java.util.Objects; import static org.elasticsearch.action.ValidateActions.addValidationError; -public class FindStructureAction extends ActionType { +public class FindStructureAction extends ActionType { public static final FindStructureAction INSTANCE = new FindStructureAction(); public static final String NAME = "cluster:monitor/text_structure/findstructure"; - public static final int MIN_SAMPLE_LINE_COUNT = 2; - private FindStructureAction() { super(NAME); } - public static class Response extends ActionResponse implements ToXContentObject, Writeable { - - private final TextStructure textStructure; - - public Response(TextStructure textStructure) { - this.textStructure = textStructure; - } - - Response(StreamInput in) throws IOException { - super(in); - textStructure = new TextStructure(in); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - textStructure.writeTo(out); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - textStructure.toXContent(builder, params); - return builder; - } - - @Override - public int hashCode() { - return Objects.hash(textStructure); - } - - @Override - public boolean equals(Object other) { - - if (this == other) { - return true; - } - - if (other == null || getClass() != other.getClass()) { - return false; - } - - FindStructureAction.Response that = (FindStructureAction.Response) other; - return Objects.equals(textStructure, that.textStructure); - } - } - - public static class Request extends ActionRequest { - - public static final ParseField LINES_TO_SAMPLE = new ParseField("lines_to_sample"); - public static final ParseField LINE_MERGE_SIZE_LIMIT = new ParseField("line_merge_size_limit"); - public static final ParseField TIMEOUT = new ParseField("timeout"); - public static final ParseField CHARSET = TextStructure.CHARSET; - public static final ParseField FORMAT = TextStructure.FORMAT; - public static final ParseField COLUMN_NAMES = TextStructure.COLUMN_NAMES; - public static final ParseField HAS_HEADER_ROW = TextStructure.HAS_HEADER_ROW; - public static final ParseField DELIMITER = TextStructure.DELIMITER; - public static final ParseField QUOTE = TextStructure.QUOTE; - public static final ParseField SHOULD_TRIM_FIELDS = TextStructure.SHOULD_TRIM_FIELDS; - public static final ParseField GROK_PATTERN = TextStructure.GROK_PATTERN; - // This one is plural in FileStructure, but singular in FileStructureOverrides - public static final ParseField TIMESTAMP_FORMAT = new ParseField("timestamp_format"); - public static final ParseField TIMESTAMP_FIELD = TextStructure.TIMESTAMP_FIELD; + public static class Request extends AbstractFindStructureRequest { - public static final ParseField ECS_COMPATIBILITY = TextStructure.ECS_COMPATIBILITY; - - private static final String ARG_INCOMPATIBLE_WITH_FORMAT_TEMPLATE = "[%s] may only be specified if [" - + FORMAT.getPreferredName() - + "] is [%s]"; - - private Integer linesToSample; - private Integer lineMergeSizeLimit; - private TimeValue timeout; - private String charset; - private TextStructure.Format format; - private List columnNames; - private Boolean hasHeaderRow; - private Character delimiter; - private Character quote; - private Boolean shouldTrimFields; - private String grokPattern; - private String ecsCompatibility; - private String timestampFormat; - private String timestampField; private BytesReference sample; public Request() {} public Request(StreamInput in) throws IOException { super(in); - linesToSample = in.readOptionalVInt(); - lineMergeSizeLimit = in.readOptionalVInt(); - timeout = in.readOptionalTimeValue(); - charset = in.readOptionalString(); - format = in.readBoolean() ? in.readEnum(TextStructure.Format.class) : null; - columnNames = in.readBoolean() ? in.readStringCollectionAsList() : null; - hasHeaderRow = in.readOptionalBoolean(); - delimiter = in.readBoolean() ? (char) in.readVInt() : null; - quote = in.readBoolean() ? (char) in.readVInt() : null; - shouldTrimFields = in.readOptionalBoolean(); - grokPattern = in.readOptionalString(); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_5_0)) { - ecsCompatibility = in.readOptionalString(); - } else { - ecsCompatibility = null; - } - timestampFormat = in.readOptionalString(); - timestampField = in.readOptionalString(); sample = in.readBytesReference(); } - public Integer getLinesToSample() { - return linesToSample; - } - - public void setLinesToSample(Integer linesToSample) { - this.linesToSample = linesToSample; - } - - public Integer getLineMergeSizeLimit() { - return lineMergeSizeLimit; - } - - public void setLineMergeSizeLimit(Integer lineMergeSizeLimit) { - this.lineMergeSizeLimit = lineMergeSizeLimit; - } - - public TimeValue getTimeout() { - return timeout; - } - - public void setTimeout(TimeValue timeout) { - this.timeout = timeout; - } - - public String getCharset() { - return charset; - } - - public void setCharset(String charset) { - this.charset = (charset == null || charset.isEmpty()) ? null : charset; - } - - public TextStructure.Format getFormat() { - return format; - } - - public void setFormat(TextStructure.Format format) { - this.format = format; - } - - public void setFormat(String format) { - this.format = (format == null || format.isEmpty()) ? null : TextStructure.Format.fromString(format); - } - - public List getColumnNames() { - return columnNames; - } - - public void setColumnNames(List columnNames) { - this.columnNames = (columnNames == null || columnNames.isEmpty()) ? null : columnNames; - } - - public void setColumnNames(String[] columnNames) { - this.columnNames = (columnNames == null || columnNames.length == 0) ? null : Arrays.asList(columnNames); - } - - public Boolean getHasHeaderRow() { - return hasHeaderRow; - } - - public void setHasHeaderRow(Boolean hasHeaderRow) { - this.hasHeaderRow = hasHeaderRow; - } - - public Character getDelimiter() { - return delimiter; - } - - public void setDelimiter(Character delimiter) { - this.delimiter = delimiter; - } - - public void setDelimiter(String delimiter) { - if (delimiter == null || delimiter.isEmpty()) { - this.delimiter = null; - } else if (delimiter.length() == 1) { - this.delimiter = delimiter.charAt(0); - } else { - throw new IllegalArgumentException(DELIMITER.getPreferredName() + " must be a single character"); - } - } - - public Character getQuote() { - return quote; - } - - public void setQuote(Character quote) { - this.quote = quote; - } - - public void setQuote(String quote) { - if (quote == null || quote.isEmpty()) { - this.quote = null; - } else if (quote.length() == 1) { - this.quote = quote.charAt(0); - } else { - throw new IllegalArgumentException(QUOTE.getPreferredName() + " must be a single character"); - } - } - - public Boolean getShouldTrimFields() { - return shouldTrimFields; - } - - public void setShouldTrimFields(Boolean shouldTrimFields) { - this.shouldTrimFields = shouldTrimFields; - } - - public String getGrokPattern() { - return grokPattern; - } - - public void setGrokPattern(String grokPattern) { - this.grokPattern = (grokPattern == null || grokPattern.isEmpty()) ? null : grokPattern; - } - - public String getEcsCompatibility() { - return ecsCompatibility; - } - - public void setEcsCompatibility(String ecsCompatibility) { - this.ecsCompatibility = (ecsCompatibility == null || ecsCompatibility.isEmpty()) ? null : ecsCompatibility; - } - - public String getTimestampFormat() { - return timestampFormat; - } - - public void setTimestampFormat(String timestampFormat) { - this.timestampFormat = (timestampFormat == null || timestampFormat.isEmpty()) ? null : timestampFormat; - } - - public String getTimestampField() { - return timestampField; - } - - public void setTimestampField(String timestampField) { - this.timestampField = (timestampField == null || timestampField.isEmpty()) ? null : timestampField; - } - public BytesReference getSample() { return sample; } @@ -298,70 +45,9 @@ public void setSample(BytesReference sample) { this.sample = sample; } - private static ActionRequestValidationException addIncompatibleArgError( - ParseField arg, - TextStructure.Format format, - ActionRequestValidationException validationException - ) { - return addValidationError( - String.format(Locale.ROOT, ARG_INCOMPATIBLE_WITH_FORMAT_TEMPLATE, arg.getPreferredName(), format), - validationException - ); - } - @Override public ActionRequestValidationException validate() { - ActionRequestValidationException validationException = null; - if (linesToSample != null && linesToSample < MIN_SAMPLE_LINE_COUNT) { - validationException = addValidationError( - "[" + LINES_TO_SAMPLE.getPreferredName() + "] must be at least [" + MIN_SAMPLE_LINE_COUNT + "] if specified", - validationException - ); - } - if (lineMergeSizeLimit != null && lineMergeSizeLimit <= 0) { - validationException = addValidationError( - "[" + LINE_MERGE_SIZE_LIMIT.getPreferredName() + "] must be positive if specified", - validationException - ); - } - if (format != TextStructure.Format.DELIMITED) { - if (columnNames != null) { - validationException = addIncompatibleArgError(COLUMN_NAMES, TextStructure.Format.DELIMITED, validationException); - } - if (hasHeaderRow != null) { - validationException = addIncompatibleArgError(HAS_HEADER_ROW, TextStructure.Format.DELIMITED, validationException); - } - if (delimiter != null) { - validationException = addIncompatibleArgError(DELIMITER, TextStructure.Format.DELIMITED, validationException); - } - if (quote != null) { - validationException = addIncompatibleArgError(QUOTE, TextStructure.Format.DELIMITED, validationException); - } - if (shouldTrimFields != null) { - validationException = addIncompatibleArgError(SHOULD_TRIM_FIELDS, TextStructure.Format.DELIMITED, validationException); - } - } - if (format != TextStructure.Format.SEMI_STRUCTURED_TEXT) { - if (grokPattern != null) { - validationException = addIncompatibleArgError( - GROK_PATTERN, - TextStructure.Format.SEMI_STRUCTURED_TEXT, - validationException - ); - } - } - - if (ecsCompatibility != null && GrokBuiltinPatterns.isValidEcsCompatibilityMode(ecsCompatibility) == false) { - validationException = addValidationError( - "[" - + ECS_COMPATIBILITY.getPreferredName() - + "] must be one of [" - + String.join(", ", GrokBuiltinPatterns.ECS_COMPATIBILITY_MODES) - + "] if specified", - validationException - ); - } - + ActionRequestValidationException validationException = super.validate(); if (sample == null || sample.length() == 0) { validationException = addValidationError("sample must be specified", validationException); } @@ -371,89 +57,24 @@ public ActionRequestValidationException validate() { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeOptionalVInt(linesToSample); - out.writeOptionalVInt(lineMergeSizeLimit); - out.writeOptionalTimeValue(timeout); - out.writeOptionalString(charset); - if (format == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - out.writeEnum(format); - } - if (columnNames == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - out.writeStringCollection(columnNames); - } - out.writeOptionalBoolean(hasHeaderRow); - if (delimiter == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - out.writeVInt(delimiter); - } - if (quote == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - out.writeVInt(quote); - } - out.writeOptionalBoolean(shouldTrimFields); - out.writeOptionalString(grokPattern); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_5_0)) { - out.writeOptionalString(ecsCompatibility); - } - out.writeOptionalString(timestampFormat); - out.writeOptionalString(timestampField); out.writeBytesReference(sample); } @Override public int hashCode() { - return Objects.hash( - linesToSample, - lineMergeSizeLimit, - timeout, - charset, - format, - columnNames, - hasHeaderRow, - delimiter, - grokPattern, - ecsCompatibility, - timestampFormat, - timestampField, - sample - ); + return Objects.hash(super.hashCode(), sample); } @Override public boolean equals(Object other) { - if (this == other) { return true; } - if (other == null || getClass() != other.getClass()) { return false; } - Request that = (Request) other; - return Objects.equals(this.linesToSample, that.linesToSample) - && Objects.equals(this.lineMergeSizeLimit, that.lineMergeSizeLimit) - && Objects.equals(this.timeout, that.timeout) - && Objects.equals(this.charset, that.charset) - && Objects.equals(this.format, that.format) - && Objects.equals(this.columnNames, that.columnNames) - && Objects.equals(this.hasHeaderRow, that.hasHeaderRow) - && Objects.equals(this.delimiter, that.delimiter) - && Objects.equals(this.grokPattern, that.grokPattern) - && Objects.equals(this.ecsCompatibility, that.ecsCompatibility) - && Objects.equals(this.timestampFormat, that.timestampFormat) - && Objects.equals(this.timestampField, that.timestampField) - && Objects.equals(this.sample, that.sample); + return super.equals(other) && Objects.equals(this.sample, that.sample); } } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/action/FindStructureResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/action/FindStructureResponse.java new file mode 100644 index 0000000000000..5848c2cbd0a1d --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/action/FindStructureResponse.java @@ -0,0 +1,61 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.textstructure.action; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.textstructure.structurefinder.TextStructure; + +import java.io.IOException; +import java.util.Objects; + +public class FindStructureResponse extends ActionResponse implements ToXContentObject, Writeable { + + private final TextStructure textStructure; + + public FindStructureResponse(TextStructure textStructure) { + this.textStructure = textStructure; + } + + FindStructureResponse(StreamInput in) throws IOException { + super(in); + textStructure = new TextStructure(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + textStructure.writeTo(out); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + textStructure.toXContent(builder, params); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(textStructure); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + if (other == null || getClass() != other.getClass()) { + return false; + } + FindStructureResponse that = (FindStructureResponse) other; + return Objects.equals(textStructure, that.textStructure); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/textstructure/action/FindTextStructureActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/textstructure/action/FindTextStructureActionResponseTests.java deleted file mode 100644 index 31dbfc7dccff3..0000000000000 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/textstructure/action/FindTextStructureActionResponseTests.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.core.textstructure.action; - -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.test.AbstractWireSerializingTestCase; -import org.elasticsearch.xpack.core.textstructure.structurefinder.TextStructureTests; - -public class FindTextStructureActionResponseTests extends AbstractWireSerializingTestCase { - - @Override - protected FindStructureAction.Response createTestInstance() { - return new FindStructureAction.Response(TextStructureTests.createTestFileStructure()); - } - - @Override - protected FindStructureAction.Response mutateInstance(FindStructureAction.Response instance) { - return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929 - } - - @Override - protected Writeable.Reader instanceReader() { - return FindStructureAction.Response::new; - } -} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/textstructure/action/FindTextStructureResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/textstructure/action/FindTextStructureResponseTests.java new file mode 100644 index 0000000000000..887d75e3751c5 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/textstructure/action/FindTextStructureResponseTests.java @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.core.textstructure.action; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.core.textstructure.structurefinder.TextStructureTests; + +public class FindTextStructureResponseTests extends AbstractWireSerializingTestCase { + + @Override + protected FindStructureResponse createTestInstance() { + return new FindStructureResponse(TextStructureTests.createTestFileStructure()); + } + + @Override + protected FindStructureResponse mutateInstance(FindStructureResponse response) { + FindStructureResponse newResponse; + do { + newResponse = createTestInstance(); + } while (response.equals(newResponse)); + return newResponse; + } + + @Override + protected Writeable.Reader instanceReader() { + return FindStructureResponse::new; + } +} diff --git a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java index 2d743f562df8e..2250411fa7882 100644 --- a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java +++ b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java @@ -348,6 +348,8 @@ public class Constants { "cluster:monitor/task", "cluster:monitor/task/get", "cluster:monitor/tasks/lists", + "cluster:monitor/text_structure/find_field_structure", + "cluster:monitor/text_structure/find_message_structure", "cluster:monitor/text_structure/findstructure", "cluster:monitor/text_structure/test_grok_pattern", "cluster:monitor/transform/get", diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/text_structure/find_field_structure.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/text_structure/find_field_structure.yml new file mode 100644 index 0000000000000..c2e9dbea1600a --- /dev/null +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/text_structure/find_field_structure.yml @@ -0,0 +1,63 @@ +setup: + - do: + indices.create: + index: airlines + body: + mappings: + properties: + message: + type: text + - do: + bulk: + refresh: true + body: + - index: + _index: airlines + - message: "{\"airline\": \"AAL\", \"responsetime\": 132.2046, \"sourcetype\": \"text-structure-test\", \"time\": 1403481600}" + - index: + _index: airlines + - message: "{\"airline\": \"JZA\", \"responsetime\": 990.4628, \"sourcetype\": \"text-structure-test\", \"time\": 1403481700}" + - index: + _index: airlines + - message: "{\"airline\": \"AAL\", \"responsetime\": 134.2046, \"sourcetype\": \"text-structure-test\", \"time\": 1403481800}" +--- +"Field structure finder with JSON messages": + - do: + text_structure.find_field_structure: + index: airlines + field: message + documents_to_sample: 3 + timeout: 10s + - match: { num_lines_analyzed: 3 } + - match: { num_messages_analyzed: 3 } + - match: { charset: "UTF-8" } + - match: { has_byte_order_marker: null } + - match: { format: ndjson } + - match: { timestamp_field: time } + - match: { joda_timestamp_formats.0: UNIX } + - match: { java_timestamp_formats.0: UNIX } + - match: { need_client_timezone: false } + - match: { mappings.properties.airline.type: keyword } + - match: { mappings.properties.responsetime.type: double } + - match: { mappings.properties.sourcetype.type: keyword } + - match: { mappings.properties.time.type: date } + - match: { mappings.properties.time.format: epoch_second } + - match: { ingest_pipeline.description: "Ingest pipeline created by text structure finder" } + - match: { ingest_pipeline.processors.0.date.field: time } + - match: { ingest_pipeline.processors.0.date.formats.0: UNIX } + - match: { field_stats.airline.count: 3 } + - match: { field_stats.airline.cardinality: 2 } + - match: { field_stats.responsetime.count: 3 } + - match: { field_stats.responsetime.cardinality: 3 } + - match: { field_stats.responsetime.min_value: 132.2046 } + - match: { field_stats.responsetime.max_value: 990.4628 } + # Not asserting on field_stats.responsetime.mean as it's a recurring decimal + # so its representation in the response could cause spurious failures + - match: { field_stats.responsetime.median_value: 134.2046 } + - match: { field_stats.sourcetype.count: 3 } + - match: { field_stats.sourcetype.cardinality: 1 } + - match: { field_stats.time.count: 3 } + - match: { field_stats.time.cardinality: 3 } + - match: { field_stats.time.earliest: "1403481600" } + - match: { field_stats.time.latest: "1403481800" } + - is_false: explanation diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/text_structure/find_message_structure.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/text_structure/find_message_structure.yml new file mode 100644 index 0000000000000..b1000510f2972 --- /dev/null +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/text_structure/find_message_structure.yml @@ -0,0 +1,56 @@ +"Messages structure finder with JSON messages": + - do: + text_structure.find_message_structure: + timeout: 10s + body: + messages: + - "{\"airline\": \"AAL\", \"responsetime\": 132.2046, \"sourcetype\": \"text-structure-test\", \"time\": 1403481600}" + - "{\"airline\": \"JZA\", \"responsetime\": 990.4628, \"sourcetype\": \"text-structure-test\", \"time\": 1403481700}" + - "{\"airline\": \"AAL\", \"responsetime\": 134.2046, \"sourcetype\": \"text-structure-test\", \"time\": 1403481800}" + - match: { num_lines_analyzed: 3 } + - match: { num_messages_analyzed: 3 } + - match: { charset: "UTF-8" } + - match: { has_byte_order_marker: null } + - match: { format: ndjson } + - match: { timestamp_field: time } + - match: { joda_timestamp_formats.0: UNIX } + - match: { java_timestamp_formats.0: UNIX } + - match: { need_client_timezone: false } + - match: { mappings.properties.airline.type: keyword } + - match: { mappings.properties.responsetime.type: double } + - match: { mappings.properties.sourcetype.type: keyword } + - match: { mappings.properties.time.type: date } + - match: { mappings.properties.time.format: epoch_second } + - match: { ingest_pipeline.description: "Ingest pipeline created by text structure finder" } + - match: { ingest_pipeline.processors.0.date.field: time } + - match: { ingest_pipeline.processors.0.date.formats.0: UNIX } + - match: { field_stats.airline.count: 3 } + - match: { field_stats.airline.cardinality: 2 } + - match: { field_stats.responsetime.count: 3 } + - match: { field_stats.responsetime.cardinality: 3 } + - match: { field_stats.responsetime.min_value: 132.2046 } + - match: { field_stats.responsetime.max_value: 990.4628 } + # Not asserting on field_stats.responsetime.mean as it's a recurring decimal + # so its representation in the response could cause spurious failures + - match: { field_stats.responsetime.median_value: 134.2046 } + - match: { field_stats.sourcetype.count: 3 } + - match: { field_stats.sourcetype.cardinality: 1 } + - match: { field_stats.time.count: 3 } + - match: { field_stats.time.cardinality: 3 } + - match: { field_stats.time.earliest: "1403481600" } + - match: { field_stats.time.latest: "1403481800" } + - is_false: explanation +--- +"Messages structure finder with log messages": + - do: + text_structure.find_message_structure: + timeout: 10s + body: + messages: + - "2019-05-16 16:56:14 line 1 abcdefghijklmnopqrstuvwxyz" + - "2019-05-16 16:56:14 line 2 abcdefghijklmnopqrstuvwxyz\ncontinuation...\ncontinuation...\n" + - "2019-05-16 16:56:14 line 3 abcdefghijklmnopqrstuvwxyz" + - match: { num_lines_analyzed: 3 } + - match: { num_messages_analyzed: 3 } + - match: { format: semi_structured_text } + - match: { grok_pattern: "%{TIMESTAMP_ISO8601:timestamp} .*? %{INT:field} .*" } diff --git a/x-pack/plugin/text-structure/qa/text-structure-with-security/build.gradle b/x-pack/plugin/text-structure/qa/text-structure-with-security/build.gradle index 5fc76885aa7eb..1e592615da1f2 100644 --- a/x-pack/plugin/text-structure/qa/text-structure-with-security/build.gradle +++ b/x-pack/plugin/text-structure/qa/text-structure-with-security/build.gradle @@ -9,7 +9,7 @@ dependencies { restResources { restApi { // needed for template installation, etc. - include '_common', 'indices', 'text_structure' + include '_common', 'bulk', 'indices', 'text_structure' } restTests { includeXpack 'text_structure' diff --git a/x-pack/plugin/text-structure/qa/text-structure-with-security/roles.yml b/x-pack/plugin/text-structure/qa/text-structure-with-security/roles.yml index 7eff54728320a..7095acb3c60a1 100644 --- a/x-pack/plugin/text-structure/qa/text-structure-with-security/roles.yml +++ b/x-pack/plugin/text-structure/qa/text-structure-with-security/roles.yml @@ -6,3 +6,15 @@ minimal: # This is always required because the REST client uses it to find the version of # Elasticsearch it's talking to - cluster:monitor/main + indices: + # Give all users involved in these tests access to the indices where the data to + # be analyzed is stored. + - names: [ 'airlines' ] + privileges: + - create_index + - indices:admin/refresh + - read + - write + - view_index_metadata + - indices:data/write/bulk + - indices:data/write/index diff --git a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/TextStructurePlugin.java b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/TextStructurePlugin.java index 2a2fe1ea5a55a..07e49989b9f09 100644 --- a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/TextStructurePlugin.java +++ b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/TextStructurePlugin.java @@ -21,10 +21,16 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestHandler; +import org.elasticsearch.xpack.core.textstructure.action.FindFieldStructureAction; +import org.elasticsearch.xpack.core.textstructure.action.FindMessageStructureAction; import org.elasticsearch.xpack.core.textstructure.action.FindStructureAction; import org.elasticsearch.xpack.core.textstructure.action.TestGrokPatternAction; +import org.elasticsearch.xpack.textstructure.rest.RestFindFieldStructureAction; +import org.elasticsearch.xpack.textstructure.rest.RestFindMessageStructureAction; import org.elasticsearch.xpack.textstructure.rest.RestFindStructureAction; import org.elasticsearch.xpack.textstructure.rest.RestTestGrokPatternAction; +import org.elasticsearch.xpack.textstructure.transport.TransportFindFieldStructureAction; +import org.elasticsearch.xpack.textstructure.transport.TransportFindMessageStructureAction; import org.elasticsearch.xpack.textstructure.transport.TransportFindStructureAction; import org.elasticsearch.xpack.textstructure.transport.TransportTestGrokPatternAction; @@ -53,12 +59,19 @@ public List getRestHandlers( Supplier nodesInCluster, Predicate clusterSupportsFeature ) { - return Arrays.asList(new RestFindStructureAction(), new RestTestGrokPatternAction()); + return Arrays.asList( + new RestFindFieldStructureAction(), + new RestFindMessageStructureAction(), + new RestFindStructureAction(), + new RestTestGrokPatternAction() + ); } @Override public List> getActions() { return Arrays.asList( + new ActionHandler<>(FindFieldStructureAction.INSTANCE, TransportFindFieldStructureAction.class), + new ActionHandler<>(FindMessageStructureAction.INSTANCE, TransportFindMessageStructureAction.class), new ActionHandler<>(FindStructureAction.INSTANCE, TransportFindStructureAction.class), new ActionHandler<>(TestGrokPatternAction.INSTANCE, TransportTestGrokPatternAction.class) ); diff --git a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/rest/RestFindFieldStructureAction.java b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/rest/RestFindFieldStructureAction.java new file mode 100644 index 0000000000000..0f81a4fc9726b --- /dev/null +++ b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/rest/RestFindFieldStructureAction.java @@ -0,0 +1,51 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.textstructure.rest; + +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.Scope; +import org.elasticsearch.rest.ServerlessScope; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.textstructure.action.FindFieldStructureAction; +import org.elasticsearch.xpack.core.textstructure.structurefinder.TextStructure; + +import java.util.Collections; +import java.util.List; +import java.util.Set; + +import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.xpack.textstructure.TextStructurePlugin.BASE_PATH; + +@ServerlessScope(Scope.INTERNAL) +public class RestFindFieldStructureAction extends BaseRestHandler { + + @Override + public List routes() { + return List.of(new Route(GET, BASE_PATH + "find_field_structure")); + } + + @Override + public String getName() { + return "text_structure_find_field_structure_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { + FindFieldStructureAction.Request request = new FindFieldStructureAction.Request(); + RestFindStructureArgumentsParser.parse(restRequest, request); + request.setIndex(restRequest.param(FindFieldStructureAction.Request.INDEX.getPreferredName())); + request.setField(restRequest.param(FindFieldStructureAction.Request.FIELD.getPreferredName())); + return channel -> client.execute(FindFieldStructureAction.INSTANCE, request, new RestToXContentListener<>(channel)); + } + + @Override + protected Set responseParams() { + return Collections.singleton(TextStructure.EXPLAIN); + } +} diff --git a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/rest/RestFindMessageStructureAction.java b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/rest/RestFindMessageStructureAction.java new file mode 100644 index 0000000000000..cc607dbdcd646 --- /dev/null +++ b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/rest/RestFindMessageStructureAction.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.textstructure.rest; + +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.Scope; +import org.elasticsearch.rest.ServerlessScope; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.core.textstructure.action.FindMessageStructureAction; +import org.elasticsearch.xpack.core.textstructure.structurefinder.TextStructure; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.Set; + +import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.xpack.textstructure.TextStructurePlugin.BASE_PATH; + +@ServerlessScope(Scope.INTERNAL) +public class RestFindMessageStructureAction extends BaseRestHandler { + + @Override + public List routes() { + return List.of(new Route(GET, BASE_PATH + "find_message_structure"), new Route(POST, BASE_PATH + "find_message_structure")); + } + + @Override + public String getName() { + return "text_structure_find_message_structure_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + FindMessageStructureAction.Request request; + try (XContentParser parser = restRequest.contentOrSourceParamParser()) { + request = FindMessageStructureAction.Request.parseRequest(parser); + } + RestFindStructureArgumentsParser.parse(restRequest, request); + return channel -> client.execute(FindMessageStructureAction.INSTANCE, request, new RestToXContentListener<>(channel)); + } + + @Override + protected Set responseParams() { + return Collections.singleton(TextStructure.EXPLAIN); + } +} diff --git a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/rest/RestFindStructureAction.java b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/rest/RestFindStructureAction.java index 94aee3c2a5f49..65325f2268ed2 100644 --- a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/rest/RestFindStructureAction.java +++ b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/rest/RestFindStructureAction.java @@ -9,7 +9,6 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.core.RestApiVersion; -import org.elasticsearch.core.TimeValue; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.Scope; @@ -17,12 +16,10 @@ import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.xpack.core.textstructure.action.FindStructureAction; import org.elasticsearch.xpack.core.textstructure.structurefinder.TextStructure; -import org.elasticsearch.xpack.textstructure.structurefinder.TextStructureFinderManager; import java.util.Collections; import java.util.List; import java.util.Set; -import java.util.concurrent.TimeUnit; import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.xpack.textstructure.TextStructurePlugin.BASE_PATH; @@ -30,8 +27,6 @@ @ServerlessScope(Scope.INTERNAL) public class RestFindStructureAction extends BaseRestHandler { - private static final TimeValue DEFAULT_TIMEOUT = new TimeValue(25, TimeUnit.SECONDS); - @Override public List routes() { return List.of( @@ -46,38 +41,9 @@ public String getName() { @Override protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { - FindStructureAction.Request request = new FindStructureAction.Request(); - request.setLinesToSample( - restRequest.paramAsInt( - FindStructureAction.Request.LINES_TO_SAMPLE.getPreferredName(), - TextStructureFinderManager.DEFAULT_IDEAL_SAMPLE_LINE_COUNT - ) - ); - request.setLineMergeSizeLimit( - restRequest.paramAsInt( - FindStructureAction.Request.LINE_MERGE_SIZE_LIMIT.getPreferredName(), - TextStructureFinderManager.DEFAULT_LINE_MERGE_SIZE_LIMIT - ) - ); - request.setTimeout( - TimeValue.parseTimeValue( - restRequest.param(FindStructureAction.Request.TIMEOUT.getPreferredName()), - DEFAULT_TIMEOUT, - FindStructureAction.Request.TIMEOUT.getPreferredName() - ) - ); - request.setCharset(restRequest.param(FindStructureAction.Request.CHARSET.getPreferredName())); - request.setFormat(restRequest.param(FindStructureAction.Request.FORMAT.getPreferredName())); - request.setColumnNames(restRequest.paramAsStringArray(FindStructureAction.Request.COLUMN_NAMES.getPreferredName(), null)); - request.setHasHeaderRow(restRequest.paramAsBoolean(FindStructureAction.Request.HAS_HEADER_ROW.getPreferredName(), null)); - request.setDelimiter(restRequest.param(FindStructureAction.Request.DELIMITER.getPreferredName())); - request.setQuote(restRequest.param(FindStructureAction.Request.QUOTE.getPreferredName())); - request.setShouldTrimFields(restRequest.paramAsBoolean(FindStructureAction.Request.SHOULD_TRIM_FIELDS.getPreferredName(), null)); - request.setGrokPattern(restRequest.param(FindStructureAction.Request.GROK_PATTERN.getPreferredName())); - request.setEcsCompatibility(restRequest.param(FindStructureAction.Request.ECS_COMPATIBILITY.getPreferredName())); - request.setTimestampFormat(restRequest.param(FindStructureAction.Request.TIMESTAMP_FORMAT.getPreferredName())); - request.setTimestampField(restRequest.param(FindStructureAction.Request.TIMESTAMP_FIELD.getPreferredName())); + RestFindStructureArgumentsParser.parse(restRequest, request); + if (restRequest.hasContent()) { request.setSample(restRequest.content()); } else { diff --git a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/rest/RestFindStructureArgumentsParser.java b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/rest/RestFindStructureArgumentsParser.java new file mode 100644 index 0000000000000..bd6fe553fc447 --- /dev/null +++ b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/rest/RestFindStructureArgumentsParser.java @@ -0,0 +1,73 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.textstructure.rest; + +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.xpack.core.textstructure.action.AbstractFindStructureRequest; +import org.elasticsearch.xpack.core.textstructure.action.FindFieldStructureAction; +import org.elasticsearch.xpack.core.textstructure.action.FindMessageStructureAction; +import org.elasticsearch.xpack.core.textstructure.action.FindStructureAction; +import org.elasticsearch.xpack.core.textstructure.structurefinder.TextStructure; +import org.elasticsearch.xpack.textstructure.structurefinder.TextStructureFinderManager; + +import java.util.concurrent.TimeUnit; + +public class RestFindStructureArgumentsParser { + + private static final TimeValue DEFAULT_TIMEOUT = new TimeValue(25, TimeUnit.SECONDS); + + static void parse(RestRequest restRequest, AbstractFindStructureRequest request) { + if (request instanceof FindStructureAction.Request) { + request.setLinesToSample( + restRequest.paramAsInt( + FindStructureAction.Request.LINES_TO_SAMPLE.getPreferredName(), + TextStructureFinderManager.DEFAULT_IDEAL_SAMPLE_LINE_COUNT + ) + ); + request.setLineMergeSizeLimit( + restRequest.paramAsInt( + FindStructureAction.Request.LINE_MERGE_SIZE_LIMIT.getPreferredName(), + TextStructureFinderManager.DEFAULT_LINE_MERGE_SIZE_LIMIT + ) + ); + request.setCharset(restRequest.param(FindStructureAction.Request.CHARSET.getPreferredName())); + request.setHasHeaderRow(restRequest.paramAsBoolean(FindStructureAction.Request.HAS_HEADER_ROW.getPreferredName(), null)); + } else if (request instanceof FindFieldStructureAction.Request) { + request.setLinesToSample( + restRequest.paramAsInt( + FindStructureAction.Request.DOCUMENTS_TO_SAMPLE.getPreferredName(), + TextStructureFinderManager.DEFAULT_IDEAL_SAMPLE_LINE_COUNT + ) + ); + } + + request.setTimeout( + TimeValue.parseTimeValue( + restRequest.param(FindStructureAction.Request.TIMEOUT.getPreferredName()), + DEFAULT_TIMEOUT, + FindStructureAction.Request.TIMEOUT.getPreferredName() + ) + ); + request.setFormat(restRequest.param(FindStructureAction.Request.FORMAT.getPreferredName())); + request.setColumnNames(restRequest.paramAsStringArray(FindStructureAction.Request.COLUMN_NAMES.getPreferredName(), null)); + request.setDelimiter(restRequest.param(FindStructureAction.Request.DELIMITER.getPreferredName())); + request.setQuote(restRequest.param(FindStructureAction.Request.QUOTE.getPreferredName())); + request.setShouldTrimFields(restRequest.paramAsBoolean(FindStructureAction.Request.SHOULD_TRIM_FIELDS.getPreferredName(), null)); + request.setGrokPattern(restRequest.param(FindStructureAction.Request.GROK_PATTERN.getPreferredName())); + request.setEcsCompatibility(restRequest.param(FindStructureAction.Request.ECS_COMPATIBILITY.getPreferredName())); + request.setTimestampFormat(restRequest.param(FindStructureAction.Request.TIMESTAMP_FORMAT.getPreferredName())); + request.setTimestampField(restRequest.param(FindStructureAction.Request.TIMESTAMP_FIELD.getPreferredName())); + + if (request instanceof FindMessageStructureAction.Request || request instanceof FindFieldStructureAction.Request) { + if (TextStructure.Format.DELIMITED.equals(request.getFormat())) { + request.setHasHeaderRow(false); + } + } + } +} diff --git a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/DelimitedTextStructureFinder.java b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/DelimitedTextStructureFinder.java index 6d7faaadae433..7fc6db9cb5c6f 100644 --- a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/DelimitedTextStructureFinder.java +++ b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/DelimitedTextStructureFinder.java @@ -44,7 +44,7 @@ public class DelimitedTextStructureFinder implements TextStructureFinder { private final List sampleMessages; private final TextStructure structure; - static DelimitedTextStructureFinder makeDelimitedTextStructureFinder( + static DelimitedTextStructureFinder createFromSample( List explanation, String sample, String charsetName, @@ -590,6 +590,36 @@ static boolean lineHasUnescapedQuote(String line, CsvPreference csvPreference) { return false; } + static boolean canCreateFromMessages( + List explanation, + List messages, + int minFieldsPerRow, + CsvPreference csvPreference, + String formatName, + double allowedFractionOfBadLines + ) { + for (String message : messages) { + try (CsvListReader csvReader = new CsvListReader(new StringReader(message), csvPreference)) { + if (csvReader.read() == null) { + explanation.add(format("Not %s because message with no lines: [%s]", formatName, message)); + return false; + } + if (csvReader.read() != null) { + explanation.add(format("Not %s because message with multiple lines: [%s]", formatName, message)); + return false; + } + } catch (IOException e) { + explanation.add(format("Not %s because there was a parsing exception: [%s]", formatName, e.getMessage())); + return false; + } + } + + // Every line contains a single valid delimited message, so + // we can safely concatenate and run the logic for a sample. + String sample = String.join("\n", messages); + return canCreateFromSample(explanation, sample, minFieldsPerRow, csvPreference, formatName, allowedFractionOfBadLines); + } + static boolean canCreateFromSample( List explanation, String sample, @@ -598,7 +628,6 @@ static boolean canCreateFromSample( String formatName, double allowedFractionOfBadLines ) { - // Logstash's CSV parser won't tolerate fields where just part of the // value is quoted, whereas SuperCSV will, hence this extra check String[] sampleLines = sample.split("\n"); @@ -619,7 +648,6 @@ static boolean canCreateFromSample( try (CsvListReader csvReader = new CsvListReader(new StringReader(sample), csvPreference)) { int fieldsInFirstRow = -1; - int fieldsInLastRow = -1; List illFormattedRows = new ArrayList<>(); int numberOfRows = 0; @@ -643,7 +671,6 @@ static boolean canCreateFromSample( ); return false; } - fieldsInLastRow = fieldsInFirstRow; continue; } @@ -676,26 +703,7 @@ static boolean canCreateFromSample( ); return false; } - continue; } - - fieldsInLastRow = fieldsInThisRow; - } - - if (fieldsInLastRow > fieldsInFirstRow) { - explanation.add( - "Not " - + formatName - + " because last row has more fields than first row: [" - + fieldsInFirstRow - + "] and [" - + fieldsInLastRow - + "]" - ); - return false; - } - if (fieldsInLastRow < fieldsInFirstRow) { - --numberOfRows; } } catch (SuperCsvException e) { // Tolerate an incomplete last row diff --git a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/DelimitedTextStructureFinderFactory.java b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/DelimitedTextStructureFinderFactory.java index f809665199fea..5f09fdb437fe4 100644 --- a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/DelimitedTextStructureFinderFactory.java +++ b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/DelimitedTextStructureFinderFactory.java @@ -67,6 +67,22 @@ public boolean canCreateFromSample(List explanation, String sample, doub ); } + public boolean canCreateFromMessages(List explanation, List messages, double allowedFractionOfBadLines) { + String formatName = switch ((char) csvPreference.getDelimiterChar()) { + case ',' -> "CSV"; + case '\t' -> "TSV"; + default -> Character.getName(csvPreference.getDelimiterChar()).toLowerCase(Locale.ROOT) + " delimited values"; + }; + return DelimitedTextStructureFinder.canCreateFromMessages( + explanation, + messages, + minFieldsPerRow, + csvPreference, + formatName, + allowedFractionOfBadLines + ); + } + @Override public TextStructureFinder createFromSample( List explanation, @@ -78,7 +94,7 @@ public TextStructureFinder createFromSample( TimeoutChecker timeoutChecker ) throws IOException { CsvPreference adjustedCsvPreference = new CsvPreference.Builder(csvPreference).maxLinesPerRow(lineMergeSizeLimit).build(); - return DelimitedTextStructureFinder.makeDelimitedTextStructureFinder( + return DelimitedTextStructureFinder.createFromSample( explanation, sample, charsetName, @@ -89,4 +105,26 @@ public TextStructureFinder createFromSample( timeoutChecker ); } + + public TextStructureFinder createFromMessages( + List explanation, + List messages, + TextStructureOverrides overrides, + TimeoutChecker timeoutChecker + ) throws IOException { + // DelimitedTextStructureFinderFactory::canCreateFromMessages already + // checked that every line contains a single valid delimited message, + // so we can safely concatenate and run the logic for a sample. + String sample = String.join("\n", messages); + return DelimitedTextStructureFinder.createFromSample( + explanation, + sample, + "UTF-8", + null, + csvPreference, + trimFields, + overrides, + timeoutChecker + ); + } } diff --git a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/LogTextStructureFinder.java b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/LogTextStructureFinder.java index 4e01d32645008..c9ca6002b6c03 100644 --- a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/LogTextStructureFinder.java +++ b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/LogTextStructureFinder.java @@ -36,7 +36,6 @@ private static LogTextStructureFinder makeSingleLineLogTextStructureFinder( String[] sampleLines, String charsetName, Boolean hasByteOrderMarker, - int lineMergeSizeLimit, TextStructureOverrides overrides, TimeoutChecker timeoutChecker ) { @@ -108,12 +107,9 @@ private static LogTextStructureFinder makeSingleLineLogTextStructureFinder( return new LogTextStructureFinder(sampleMessages, structure); } - private static LogTextStructureFinder makeMultiLineLogTextStructureFinder( + private static TimestampFormatFinder getTimestampFormatFinder( List explanation, String[] sampleLines, - String charsetName, - Boolean hasByteOrderMarker, - int lineMergeSizeLimit, TextStructureOverrides overrides, TimeoutChecker timeoutChecker ) { @@ -145,15 +141,20 @@ private static LogTextStructureFinder makeMultiLineLogTextStructureFinder( + timestampFormatFinder.getJavaTimestampFormats() ); + return timestampFormatFinder; + } + + private static Tuple, Integer> getSampleMessages( + String multiLineRegex, + String[] sampleLines, + int lineMergeSizeLimit, + TimeoutChecker timeoutChecker + ) { List sampleMessages = new ArrayList<>(); - StringBuilder preamble = new StringBuilder(); int linesConsumed = 0; StringBuilder message = null; int linesInMessage = 0; - String multiLineRegex = createMultiLineMessageStartRegex( - timestampFormatFinder.getPrefaces(), - timestampFormatFinder.getSimplePattern().pattern() - ); + Pattern multiLinePattern = Pattern.compile(multiLineRegex); for (String sampleLine : sampleLines) { if (multiLinePattern.matcher(sampleLine).find()) { @@ -195,9 +196,6 @@ private static LogTextStructureFinder makeMultiLineLogTextStructureFinder( } } timeoutChecker.check("multi-line message determination"); - if (sampleMessages.size() < 2) { - preamble.append(sampleLine).append('\n'); - } } // Don't add the last message, as it might be partial and mess up subsequent pattern finding @@ -209,8 +207,24 @@ private static LogTextStructureFinder makeMultiLineLogTextStructureFinder( ); } - // null to allow GC before Grok pattern search - sampleLines = null; + return new Tuple<>(sampleMessages, linesConsumed); + } + + private static LogTextStructureFinder makeMultiLineLogTextStructureFinder( + List explanation, + List sampleMessages, + String charsetName, + Boolean hasByteOrderMarker, + TextStructureOverrides overrides, + int linesConsumed, + TimestampFormatFinder timestampFormatFinder, + String multiLineRegex, + TimeoutChecker timeoutChecker + ) { + StringBuilder preamble = new StringBuilder(); + for (int i = 0; i < sampleMessages.size() && i < 2; i++) { + preamble.append(sampleMessages.get(i)).append('\n'); + } TextStructure.Builder structureBuilder = new TextStructure.Builder(TextStructure.Format.SEMI_STRUCTURED_TEXT).setCharset( charsetName @@ -300,6 +314,80 @@ private static LogTextStructureFinder makeMultiLineLogTextStructureFinder( return new LogTextStructureFinder(sampleMessages, structure); } + private static LogTextStructureFinder makeMultiLineLogTextStructureFinder( + List explanation, + String[] sampleLines, + String charsetName, + Boolean hasByteOrderMarker, + int lineMergeSizeLimit, + TextStructureOverrides overrides, + TimeoutChecker timeoutChecker + ) { + TimestampFormatFinder timestampFormatFinder = getTimestampFormatFinder(explanation, sampleLines, overrides, timeoutChecker); + + String multiLineRegex = createMultiLineMessageStartRegex( + timestampFormatFinder.getPrefaces(), + timestampFormatFinder.getSimplePattern().pattern() + ); + + Tuple, Integer> sampleMessagesAndLinesConsumed = getSampleMessages( + multiLineRegex, + sampleLines, + lineMergeSizeLimit, + timeoutChecker + ); + List sampleMessages = sampleMessagesAndLinesConsumed.v1(); + int linesConsumed = sampleMessagesAndLinesConsumed.v2(); + + // null to allow GC before Grok pattern search + sampleLines = null; + + return makeMultiLineLogTextStructureFinder( + explanation, + sampleMessages, + charsetName, + hasByteOrderMarker, + overrides, + linesConsumed, + timestampFormatFinder, + multiLineRegex, + timeoutChecker + ); + } + + private static LogTextStructureFinder makeMultiLineLogTextStructureFinder( + List explanation, + List messages, + String charsetName, + Boolean hasByteOrderMarker, + TextStructureOverrides overrides, + TimeoutChecker timeoutChecker + ) { + TimestampFormatFinder timestampFormatFinder = getTimestampFormatFinder( + explanation, + messages.toArray(new String[0]), + overrides, + timeoutChecker + ); + + String multiLineRegex = createMultiLineMessageStartRegex( + timestampFormatFinder.getPrefaces(), + timestampFormatFinder.getSimplePattern().pattern() + ); + + return makeMultiLineLogTextStructureFinder( + explanation, + messages, + charsetName, + hasByteOrderMarker, + overrides, + messages.size(), + timestampFormatFinder, + multiLineRegex, + timeoutChecker + ); + } + static LogTextStructureFinder makeLogTextStructureFinder( List explanation, String sample, @@ -316,7 +404,6 @@ static LogTextStructureFinder makeLogTextStructureFinder( sampleLines, charsetName, hasByteOrderMarker, - lineMergeSizeLimit, overrides, timeoutChecker ); @@ -333,6 +420,28 @@ static LogTextStructureFinder makeLogTextStructureFinder( } } + static LogTextStructureFinder makeLogTextStructureFinder( + List explanation, + List messages, + String charsetName, + Boolean hasByteOrderMarker, + TextStructureOverrides overrides, + TimeoutChecker timeoutChecker + ) { + if (TextStructureUtils.NULL_TIMESTAMP_FORMAT.equals(overrides.getTimestampFormat())) { + return makeSingleLineLogTextStructureFinder( + explanation, + messages.toArray(new String[0]), + charsetName, + hasByteOrderMarker, + overrides, + timeoutChecker + ); + } else { + return makeMultiLineLogTextStructureFinder(explanation, messages, charsetName, hasByteOrderMarker, overrides, timeoutChecker); + } + } + private LogTextStructureFinder(List sampleMessages, TextStructure structure) { this.sampleMessages = Collections.unmodifiableList(sampleMessages); this.structure = structure; diff --git a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/LogTextStructureFinderFactory.java b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/LogTextStructureFinderFactory.java index d3978946ce908..24532e9fdaae4 100644 --- a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/LogTextStructureFinderFactory.java +++ b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/LogTextStructureFinderFactory.java @@ -40,6 +40,10 @@ public boolean canCreateFromSample(List explanation, String sample, doub return true; } + public boolean canCreateFromMessages(List explanation, List messages, double allowedFractionOfBadLines) { + return true; + } + @Override public TextStructureFinder createFromSample( List explanation, @@ -60,4 +64,13 @@ public TextStructureFinder createFromSample( timeoutChecker ); } + + public TextStructureFinder createFromMessages( + List explanation, + List messages, + TextStructureOverrides overrides, + TimeoutChecker timeoutChecker + ) { + return LogTextStructureFinder.makeLogTextStructureFinder(explanation, messages, "UTF-8", null, overrides, timeoutChecker); + } } diff --git a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/NdJsonTextStructureFinderFactory.java b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/NdJsonTextStructureFinderFactory.java index 5afba653dde6c..c98010d12e2fb 100644 --- a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/NdJsonTextStructureFinderFactory.java +++ b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/NdJsonTextStructureFinderFactory.java @@ -72,6 +72,16 @@ public boolean canCreateFromSample(List explanation, String sample, doub return true; } + public boolean canCreateFromMessages(List explanation, List messages, double allowedFractionOfBadLines) { + for (String message : messages) { + if (message.contains("\n")) { + explanation.add("Not NDJSON because message contains multiple lines: [" + message + "]"); + return false; + } + } + return canCreateFromSample(explanation, String.join("\n", messages), allowedFractionOfBadLines); + } + @Override public TextStructureFinder createFromSample( List explanation, @@ -92,6 +102,19 @@ public TextStructureFinder createFromSample( ); } + public TextStructureFinder createFromMessages( + List explanation, + List messages, + TextStructureOverrides overrides, + TimeoutChecker timeoutChecker + ) throws IOException { + // NdJsonTextStructureFinderFactory::canCreateFromMessages already + // checked that every line contains a single valid JSON message, + // so we can safely concatenate and run the logic for a sample. + String sample = String.join("\n", messages); + return NdJsonTextStructureFinder.makeNdJsonTextStructureFinder(explanation, sample, "UTF-8", null, overrides, timeoutChecker); + } + private static class ContextPrintingStringReader extends StringReader { private final String str; diff --git a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/TextStructureFinderFactory.java b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/TextStructureFinderFactory.java index 63970dd2c58d9..1e8317400d09d 100644 --- a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/TextStructureFinderFactory.java +++ b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/TextStructureFinderFactory.java @@ -33,6 +33,8 @@ public interface TextStructureFinderFactory { */ boolean canCreateFromSample(List explanation, String sample, double allowedFractionOfBadLines); + boolean canCreateFromMessages(List explanation, List messages, double allowedFractionOfBadMessages); + /** * Create an object representing the structure of some text. * @param explanation List of reasons for making decisions. May contain items when passed and new reasons @@ -56,4 +58,11 @@ TextStructureFinder createFromSample( TextStructureOverrides overrides, TimeoutChecker timeoutChecker ) throws Exception; + + TextStructureFinder createFromMessages( + List explanation, + List messages, + TextStructureOverrides overrides, + TimeoutChecker timeoutChecker + ) throws Exception; } diff --git a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/TextStructureFinderManager.java b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/TextStructureFinderManager.java index c0a100fbb280d..899f6c9108060 100644 --- a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/TextStructureFinderManager.java +++ b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/TextStructureFinderManager.java @@ -13,7 +13,7 @@ import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; -import org.elasticsearch.xpack.core.textstructure.action.FindStructureAction; +import org.elasticsearch.xpack.core.textstructure.action.AbstractFindStructureRequest; import org.elasticsearch.xpack.core.textstructure.structurefinder.TextStructure; import java.io.BufferedInputStream; @@ -310,7 +310,7 @@ public TextStructureFinder findTextStructure(Integer idealSampleLineCount, Integ * Given a stream of text data, determine its structure. * @param idealSampleLineCount Ideally, how many lines from the stream will be read to determine the structure? * If the stream has fewer lines then an attempt will still be made, providing at - * least {@link FindStructureAction#MIN_SAMPLE_LINE_COUNT} lines can be read. If + * least {@link AbstractFindStructureRequest#MIN_SAMPLE_LINE_COUNT} lines can be read. If * null the value of {@link #DEFAULT_IDEAL_SAMPLE_LINE_COUNT} will be used. * @param lineMergeSizeLimit Maximum number of characters permitted when lines are merged to create messages. * If null the value of {@link #DEFAULT_LINE_MERGE_SIZE_LIMIT} will be used. @@ -383,11 +383,11 @@ public TextStructureFinder findTextStructure( sampleReader = charsetMatch.getReader(); } - assert idealSampleLineCount >= FindStructureAction.MIN_SAMPLE_LINE_COUNT; + assert idealSampleLineCount >= AbstractFindStructureRequest.MIN_SAMPLE_LINE_COUNT; Tuple sampleInfo = sampleText( sampleReader, charsetName, - FindStructureAction.MIN_SAMPLE_LINE_COUNT, + AbstractFindStructureRequest.MIN_SAMPLE_LINE_COUNT, idealSampleLineCount, timeoutChecker ); @@ -413,6 +413,23 @@ public TextStructureFinder findTextStructure( } } + public TextStructureFinder findTextStructure(List messages, TextStructureOverrides overrides, TimeValue timeout) + throws Exception { + List explanation = new ArrayList<>(); + try (TimeoutChecker timeoutChecker = new TimeoutChecker("structure analysis", timeout, scheduler)) { + return makeBestStructureFinder(explanation, messages, overrides, timeoutChecker); + } catch (Exception e) { + // Add a dummy exception containing the explanation so far - this can be invaluable for troubleshooting as incorrect + // decisions made early on in the structure analysis can result in seemingly crazy decisions or timeouts later on + if (explanation.isEmpty() == false) { + e.addSuppressed( + new ElasticsearchException(explanation.stream().collect(Collectors.joining("]\n[", "Explanation so far:\n[", "]\n"))) + ); + } + throw e; + } + } + CharsetMatch findCharset(List explanation, InputStream inputStream, TimeoutChecker timeoutChecker) throws Exception { // We need an input stream that supports mark and reset, so wrap the argument @@ -551,24 +568,12 @@ CharsetMatch findCharset(List explanation, InputStream inputStream, Time ); } - TextStructureFinder makeBestStructureFinder( - List explanation, - String sample, - String charsetName, - Boolean hasByteOrderMarker, - int lineMergeSizeLimit, - TextStructureOverrides overrides, - TimeoutChecker timeoutChecker - ) throws Exception { - + List getFactories(TextStructureOverrides overrides) { Character delimiter = overrides.getDelimiter(); Character quote = overrides.getQuote(); Boolean shouldTrimFields = overrides.getShouldTrimFields(); List factories; - double allowedFractionOfBadLines = 0.0; if (delimiter != null) { - allowedFractionOfBadLines = DelimitedTextStructureFinderFactory.DELIMITER_OVERRIDDEN_ALLOWED_FRACTION_OF_BAD_LINES; - // If a precise delimiter is specified, we only need one structure finder // factory, and we'll tolerate as little as one column in the input factories = Collections.singletonList( @@ -581,8 +586,6 @@ TextStructureFinder makeBestStructureFinder( ); } else if (quote != null || shouldTrimFields != null || TextStructure.Format.DELIMITED.equals(overrides.getFormat())) { - allowedFractionOfBadLines = DelimitedTextStructureFinderFactory.FORMAT_OVERRIDDEN_ALLOWED_FRACTION_OF_BAD_LINES; - // The delimiter is not specified, but some other aspect of delimited text is, // so clone our default delimited factories altering the overridden values factories = ORDERED_STRUCTURE_FACTORIES.stream() @@ -599,6 +602,34 @@ TextStructureFinder makeBestStructureFinder( } + return factories; + } + + private double getAllowedFractionOfBadLines(TextStructureOverrides overrides) { + Character delimiter = overrides.getDelimiter(); + Character quote = overrides.getQuote(); + Boolean shouldTrimFields = overrides.getShouldTrimFields(); + if (delimiter != null) { + return DelimitedTextStructureFinderFactory.DELIMITER_OVERRIDDEN_ALLOWED_FRACTION_OF_BAD_LINES; + } else if (quote != null || shouldTrimFields != null || TextStructure.Format.DELIMITED.equals(overrides.getFormat())) { + return DelimitedTextStructureFinderFactory.FORMAT_OVERRIDDEN_ALLOWED_FRACTION_OF_BAD_LINES; + } else { + return 0.0; + } + } + + TextStructureFinder makeBestStructureFinder( + List explanation, + String sample, + String charsetName, + Boolean hasByteOrderMarker, + int lineMergeSizeLimit, + TextStructureOverrides overrides, + TimeoutChecker timeoutChecker + ) throws Exception { + List factories = getFactories(overrides); + double allowedFractionOfBadLines = getAllowedFractionOfBadLines(overrides); + for (TextStructureFinderFactory factory : factories) { timeoutChecker.check("high level format detection"); if (factory.canCreateFromSample(explanation, sample, allowedFractionOfBadLines)) { @@ -620,6 +651,28 @@ TextStructureFinder makeBestStructureFinder( ); } + private TextStructureFinder makeBestStructureFinder( + List explanation, + List messages, + TextStructureOverrides overrides, + TimeoutChecker timeoutChecker + ) throws Exception { + List factories = getFactories(overrides); + double allowedFractionOfBadLines = getAllowedFractionOfBadLines(overrides); + + for (TextStructureFinderFactory factory : factories) { + timeoutChecker.check("high level format detection"); + if (factory.canCreateFromMessages(explanation, messages, allowedFractionOfBadLines)) { + return factory.createFromMessages(explanation, messages, overrides, timeoutChecker); + } + } + + throw new IllegalArgumentException( + "Input did not match " + + ((overrides.getFormat() == null) ? "any known formats" : "the specified format [" + overrides.getFormat() + "]") + ); + } + private Tuple sampleText(Reader reader, String charsetName, int minLines, int maxLines, TimeoutChecker timeoutChecker) throws IOException { diff --git a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/TextStructureOverrides.java b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/TextStructureOverrides.java index 5ba4e464508f1..303cb2a59ea16 100644 --- a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/TextStructureOverrides.java +++ b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/TextStructureOverrides.java @@ -6,6 +6,7 @@ */ package org.elasticsearch.xpack.textstructure.structurefinder; +import org.elasticsearch.xpack.core.textstructure.action.AbstractFindStructureRequest; import org.elasticsearch.xpack.core.textstructure.action.FindStructureAction; import org.elasticsearch.xpack.core.textstructure.structurefinder.TextStructure; @@ -37,7 +38,7 @@ public class TextStructureOverrides { private final String ecsCompatibility; - public TextStructureOverrides(FindStructureAction.Request request) { + public TextStructureOverrides(AbstractFindStructureRequest request) { this( request.getCharset(), diff --git a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/XmlTextStructureFinderFactory.java b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/XmlTextStructureFinderFactory.java index 10f65564c3dde..2f56c73616866 100644 --- a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/XmlTextStructureFinderFactory.java +++ b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/XmlTextStructureFinderFactory.java @@ -46,7 +46,42 @@ public boolean canFindFormat(TextStructure.Format format) { */ @Override public boolean canCreateFromSample(List explanation, String sample, double allowedFractionOfBadLines) { + int completeDocCount = parseXml(explanation, sample); + if (completeDocCount == -1) { + return false; + } + if (completeDocCount == 0) { + explanation.add("Not XML because sample didn't contain a complete document"); + return false; + } + explanation.add("Deciding sample is XML"); + return true; + } + + public boolean canCreateFromMessages(List explanation, List messages, double allowedFractionOfBadLines) { + for (String message : messages) { + int completeDocCount = parseXml(explanation, message); + if (completeDocCount == -1) { + return false; + } + if (completeDocCount == 0) { + explanation.add("Not XML because a message didn't contain a complete document"); + return false; + } + if (completeDocCount > 1) { + explanation.add("Not XML because a message contains a multiple documents"); + return false; + } + } + explanation.add("Deciding sample is XML"); + return true; + } + /** + * Tries to parse the sample as XML. + * @return -1 if invalid, otherwise the number of complete docs + */ + private int parseXml(List explanation, String sample) { int completeDocCount = 0; String commonRootElementName = null; String remainder = sample.trim(); @@ -80,14 +115,14 @@ public boolean canCreateFromSample(List explanation, String sample, doub + rootElementName + "]" ); - return false; + return -1; } } break; case XMLStreamReader.END_ELEMENT: if (--nestingLevel < 0) { explanation.add("Not XML because an end element occurs before a start element"); - return false; + return -1; } break; } @@ -111,7 +146,7 @@ public boolean canCreateFromSample(List explanation, String sample, doub + remainder + "]" ); - return false; + return -1; } } endPos += location.getColumnNumber() - 1; @@ -125,17 +160,11 @@ public boolean canCreateFromSample(List explanation, String sample, doub } } catch (IOException | XMLStreamException e) { explanation.add("Not XML because there was a parsing exception: [" + e.getMessage().replaceAll("\\s?\r?\n\\s?", " ") + "]"); - return false; + return -1; } } - if (completeDocCount == 0) { - explanation.add("Not XML because sample didn't contain a complete document"); - return false; - } - - explanation.add("Deciding sample is XML"); - return true; + return completeDocCount; } @Override @@ -157,4 +186,17 @@ public TextStructureFinder createFromSample( timeoutChecker ); } + + public TextStructureFinder createFromMessages( + List explanation, + List messages, + TextStructureOverrides overrides, + TimeoutChecker timeoutChecker + ) throws IOException, ParserConfigurationException, SAXException { + // XmlTextStructureFinderFactory::canCreateFromMessages already + // checked that every message contains a single valid XML document, + // so we can safely concatenate and run the logic for a sample. + String sample = String.join("\n", messages); + return XmlTextStructureFinder.makeXmlTextStructureFinder(explanation, sample, "UTF-8", null, overrides, timeoutChecker); + } } diff --git a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/transport/TransportFindFieldStructureAction.java b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/transport/TransportFindFieldStructureAction.java new file mode 100644 index 0000000000000..43a990f6f565b --- /dev/null +++ b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/transport/TransportFindFieldStructureAction.java @@ -0,0 +1,94 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.textstructure.transport; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.client.internal.ParentTaskAssigningClient; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.ml.utils.MapHelper; +import org.elasticsearch.xpack.core.textstructure.action.AbstractFindStructureRequest; +import org.elasticsearch.xpack.core.textstructure.action.FindFieldStructureAction; +import org.elasticsearch.xpack.core.textstructure.action.FindStructureResponse; +import org.elasticsearch.xpack.textstructure.structurefinder.TextStructureFinder; +import org.elasticsearch.xpack.textstructure.structurefinder.TextStructureFinderManager; +import org.elasticsearch.xpack.textstructure.structurefinder.TextStructureOverrides; + +import java.util.Arrays; +import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; + +public class TransportFindFieldStructureAction extends HandledTransportAction { + + private final Client client; + private final TransportService transportService; + private final ThreadPool threadPool; + + @Inject + public TransportFindFieldStructureAction( + TransportService transportService, + ActionFilters actionFilters, + Client client, + ThreadPool threadPool + ) { + super(FindFieldStructureAction.NAME, transportService, actionFilters, FindFieldStructureAction.Request::new, threadPool.generic()); + this.client = client; + this.transportService = transportService; + this.threadPool = threadPool; + } + + @Override + protected void doExecute(Task task, FindFieldStructureAction.Request request, ActionListener listener) { + TaskId taskId = new TaskId(transportService.getLocalNode().getId(), task.getId()); + new ParentTaskAssigningClient(client, taskId).prepareSearch(request.getIndex()) + .setSize(request.getLinesToSample()) + .setFetchSource(true) + .setQuery(QueryBuilders.existsQuery(request.getField())) + .setFetchSource(new String[] { request.getField() }, null) + .execute(ActionListener.wrap(searchResponse -> { + long hitCount = searchResponse.getHits().getHits().length; + if (hitCount < AbstractFindStructureRequest.MIN_SAMPLE_LINE_COUNT) { + listener.onFailure( + new IllegalArgumentException("Input contained too few lines [" + hitCount + "] to obtain a meaningful sample") + ); + return; + } + List messages = getMessages(searchResponse, request.getField()); + try { + listener.onResponse(buildTextStructureResponse(messages, request)); + } catch (Exception e) { + listener.onFailure(e); + } + }, listener::onFailure)); + } + + private List getMessages(SearchResponse searchResponse, String field) { + return Arrays.stream(searchResponse.getHits().getHits()) + .map(hit -> MapHelper.dig(field, Objects.requireNonNull(hit.getSourceAsMap())).toString()) + .collect(Collectors.toList()); + } + + private FindStructureResponse buildTextStructureResponse(List messages, FindFieldStructureAction.Request request) + throws Exception { + TextStructureFinderManager structureFinderManager = new TextStructureFinderManager(threadPool.scheduler()); + TextStructureFinder textStructureFinder = structureFinderManager.findTextStructure( + messages, + new TextStructureOverrides(request), + request.getTimeout() + ); + return new FindStructureResponse(textStructureFinder.getStructure()); + } +} diff --git a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/transport/TransportFindMessageStructureAction.java b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/transport/TransportFindMessageStructureAction.java new file mode 100644 index 0000000000000..79c21b3cea306 --- /dev/null +++ b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/transport/TransportFindMessageStructureAction.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.textstructure.transport; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.textstructure.action.FindMessageStructureAction; +import org.elasticsearch.xpack.core.textstructure.action.FindStructureResponse; +import org.elasticsearch.xpack.textstructure.structurefinder.TextStructureFinder; +import org.elasticsearch.xpack.textstructure.structurefinder.TextStructureFinderManager; +import org.elasticsearch.xpack.textstructure.structurefinder.TextStructureOverrides; + +public class TransportFindMessageStructureAction extends HandledTransportAction { + + private final ThreadPool threadPool; + + @Inject + public TransportFindMessageStructureAction(TransportService transportService, ActionFilters actionFilters, ThreadPool threadPool) { + super( + FindMessageStructureAction.NAME, + transportService, + actionFilters, + FindMessageStructureAction.Request::new, + threadPool.generic() + ); + this.threadPool = threadPool; + } + + @Override + protected void doExecute(Task task, FindMessageStructureAction.Request request, ActionListener listener) { + try { + listener.onResponse(buildTextStructureResponse(request)); + } catch (Exception e) { + listener.onFailure(e); + } + } + + private FindStructureResponse buildTextStructureResponse(FindMessageStructureAction.Request request) throws Exception { + TextStructureFinderManager structureFinderManager = new TextStructureFinderManager(threadPool.scheduler()); + TextStructureFinder textStructureFinder = structureFinderManager.findTextStructure( + request.getMessages(), + new TextStructureOverrides(request), + request.getTimeout() + ); + return new FindStructureResponse(textStructureFinder.getStructure()); + } +} diff --git a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/transport/TransportFindStructureAction.java b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/transport/TransportFindStructureAction.java index 8bf0f1cd4395f..4257a36bc150a 100644 --- a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/transport/TransportFindStructureAction.java +++ b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/transport/TransportFindStructureAction.java @@ -10,53 +10,38 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.textstructure.action.FindStructureAction; +import org.elasticsearch.xpack.core.textstructure.action.FindStructureResponse; import org.elasticsearch.xpack.textstructure.structurefinder.TextStructureFinder; import org.elasticsearch.xpack.textstructure.structurefinder.TextStructureFinderManager; import org.elasticsearch.xpack.textstructure.structurefinder.TextStructureOverrides; import java.io.InputStream; -import static org.elasticsearch.threadpool.ThreadPool.Names.GENERIC; - -public class TransportFindStructureAction extends HandledTransportAction { +public class TransportFindStructureAction extends HandledTransportAction { private final ThreadPool threadPool; @Inject public TransportFindStructureAction(TransportService transportService, ActionFilters actionFilters, ThreadPool threadPool) { - super( - FindStructureAction.NAME, - transportService, - actionFilters, - FindStructureAction.Request::new, - EsExecutors.DIRECT_EXECUTOR_SERVICE - ); + super(FindStructureAction.NAME, transportService, actionFilters, FindStructureAction.Request::new, threadPool.generic()); this.threadPool = threadPool; } @Override - protected void doExecute(Task task, FindStructureAction.Request request, ActionListener listener) { - - // As determining the text structure might take a while, we run - // in a different thread to avoid blocking the network thread. - threadPool.executor(GENERIC).execute(() -> { - try { - listener.onResponse(buildTextStructureResponse(request)); - } catch (Exception e) { - listener.onFailure(e); - } - }); + protected void doExecute(Task task, FindStructureAction.Request request, ActionListener listener) { + try { + listener.onResponse(buildTextStructureResponse(request)); + } catch (Exception e) { + listener.onFailure(e); + } } - private FindStructureAction.Response buildTextStructureResponse(FindStructureAction.Request request) throws Exception { - + private FindStructureResponse buildTextStructureResponse(FindStructureAction.Request request) throws Exception { TextStructureFinderManager structureFinderManager = new TextStructureFinderManager(threadPool.scheduler()); - try (InputStream sampleStream = request.getSample().streamInput()) { TextStructureFinder textStructureFinder = structureFinderManager.findTextStructure( request.getLinesToSample(), @@ -65,8 +50,7 @@ private FindStructureAction.Response buildTextStructureResponse(FindStructureAct new TextStructureOverrides(request), request.getTimeout() ); - - return new FindStructureAction.Response(textStructureFinder.getStructure()); + return new FindStructureResponse(textStructureFinder.getStructure()); } } } diff --git a/x-pack/plugin/text-structure/src/test/java/org/elasticsearch/xpack/textstructure/structurefinder/DelimitedTextStructureFinderFactoryTests.java b/x-pack/plugin/text-structure/src/test/java/org/elasticsearch/xpack/textstructure/structurefinder/DelimitedTextStructureFinderFactoryTests.java index cd8c451ee0547..e28de72202460 100644 --- a/x-pack/plugin/text-structure/src/test/java/org/elasticsearch/xpack/textstructure/structurefinder/DelimitedTextStructureFinderFactoryTests.java +++ b/x-pack/plugin/text-structure/src/test/java/org/elasticsearch/xpack/textstructure/structurefinder/DelimitedTextStructureFinderFactoryTests.java @@ -6,6 +6,9 @@ */ package org.elasticsearch.xpack.textstructure.structurefinder; +import java.util.Arrays; +import java.util.List; + public class DelimitedTextStructureFinderFactoryTests extends TextStructureTestCase { private final TextStructureFinderFactory csvFactory = new DelimitedTextStructureFinderFactory(',', '"', 2, false); @@ -40,6 +43,21 @@ public void testCanCreateCsvFromSampleGivenText() { assertFalse(csvFactory.canCreateFromSample(explanation, TEXT_SAMPLE, 0.0)); } + public void testCanCreateCsvFromMessagesCsv() { + List messages = Arrays.asList(CSV_SAMPLE.split("\n")); + assertTrue(csvFactory.canCreateFromMessages(explanation, messages, 0.0)); + } + + public void testCanCreateCsvFromMessagesCsv_multipleCsvRowsPerMessage() { + List messages = List.of(CSV_SAMPLE, CSV_SAMPLE, CSV_SAMPLE); + assertFalse(csvFactory.canCreateFromMessages(explanation, messages, 0.0)); + } + + public void testCanCreateCsvFromMessagesCsv_emptyMessages() { + List messages = List.of("", "", ""); + assertFalse(csvFactory.canCreateFromMessages(explanation, messages, 0.0)); + } + // TSV - no need to check NDJSON, XML or CSV because they come earlier in the order we check formats public void testCanCreateTsvFromSampleGivenTsv() { diff --git a/x-pack/plugin/text-structure/src/test/java/org/elasticsearch/xpack/textstructure/structurefinder/DelimitedTextStructureFinderTests.java b/x-pack/plugin/text-structure/src/test/java/org/elasticsearch/xpack/textstructure/structurefinder/DelimitedTextStructureFinderTests.java index 478994178c5bc..62e06af809711 100644 --- a/x-pack/plugin/text-structure/src/test/java/org/elasticsearch/xpack/textstructure/structurefinder/DelimitedTextStructureFinderTests.java +++ b/x-pack/plugin/text-structure/src/test/java/org/elasticsearch/xpack/textstructure/structurefinder/DelimitedTextStructureFinderTests.java @@ -790,6 +790,30 @@ public void testCreateConfigsGivenDotInFieldName() throws Exception { assertEquals(Collections.singleton("properties"), structure.getMappings().keySet()); } + public void testCreateFromMessages() throws Exception { + List messages = List.of("a,b,c", "d,e,f", "g,h,i"); + assertTrue(csvFactory.canCreateFromMessages(explanation, messages, 0.0)); + TextStructureFinder structureFinder = csvFactory.createFromMessages( + explanation, + messages, + TextStructureOverrides.EMPTY_OVERRIDES, + NOOP_TIMEOUT_CHECKER + ); + TextStructure structure = structureFinder.getStructure(); + assertEquals(TextStructure.Format.DELIMITED, structure.getFormat()); + assertEquals(3, structure.getNumMessagesAnalyzed()); + } + + public void testCreateFromMessages_multipleRowPerMessage() { + List messages = List.of("a,b,c\nd,e,f", "g,h,i"); + assertFalse(csvFactory.canCreateFromMessages(explanation, messages, 0.0)); + } + + public void testCreateFromMessages_emptyMessage() { + List messages = List.of("a,b,c", "", "d,e,f"); + assertFalse(csvFactory.canCreateFromMessages(explanation, messages, 0.0)); + } + public void testFindHeaderFromSampleGivenHeaderInSample() throws IOException { String withHeader = """ time,airline,responsetime,sourcetype diff --git a/x-pack/plugin/text-structure/src/test/java/org/elasticsearch/xpack/textstructure/structurefinder/LogTextStructureFinderTests.java b/x-pack/plugin/text-structure/src/test/java/org/elasticsearch/xpack/textstructure/structurefinder/LogTextStructureFinderTests.java index 4ee651408af56..484fde023be6b 100644 --- a/x-pack/plugin/text-structure/src/test/java/org/elasticsearch/xpack/textstructure/structurefinder/LogTextStructureFinderTests.java +++ b/x-pack/plugin/text-structure/src/test/java/org/elasticsearch/xpack/textstructure/structurefinder/LogTextStructureFinderTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.xpack.core.textstructure.structurefinder.TextStructure; import java.util.Collections; +import java.util.List; import java.util.Map; import java.util.Set; import java.util.stream.Collectors; @@ -106,6 +107,21 @@ public void testCreateConfigsGivenElasticsearchLog() throws Exception { assertTrue(keys.contains("@timestamp")); } + public void testCreateFromMessages() throws Exception { + List messages = List.of(TEXT_SAMPLE.split("\n")); + assertTrue(factory.canCreateFromMessages(explanation, messages, 0.0)); + + TextStructureFinder structureFinder = factory.createFromMessages( + explanation, + messages, + TextStructureOverrides.EMPTY_OVERRIDES, + NOOP_TIMEOUT_CHECKER + ); + + TextStructure structure = structureFinder.getStructure(); + assertEquals("\\[%{TIMESTAMP_ISO8601:timestamp}\\]\\[%{LOGLEVEL:loglevel} \\]\\[.*", structure.getGrokPattern()); + } + public void testCreateConfigsGivenElasticsearchLogWithNoTimestamps() throws Exception { assertTrue(factory.canCreateFromSample(explanation, TEXT_WITH_NO_TIMESTAMPS_SAMPLE, 0.0)); diff --git a/x-pack/plugin/text-structure/src/test/java/org/elasticsearch/xpack/textstructure/structurefinder/NdJsonTextStructureFinderFactoryTests.java b/x-pack/plugin/text-structure/src/test/java/org/elasticsearch/xpack/textstructure/structurefinder/NdJsonTextStructureFinderFactoryTests.java index 85baf238630bb..dac202df8e811 100644 --- a/x-pack/plugin/text-structure/src/test/java/org/elasticsearch/xpack/textstructure/structurefinder/NdJsonTextStructureFinderFactoryTests.java +++ b/x-pack/plugin/text-structure/src/test/java/org/elasticsearch/xpack/textstructure/structurefinder/NdJsonTextStructureFinderFactoryTests.java @@ -6,6 +6,9 @@ */ package org.elasticsearch.xpack.textstructure.structurefinder; +import java.util.Arrays; +import java.util.List; + public class NdJsonTextStructureFinderFactoryTests extends TextStructureTestCase { private final TextStructureFinderFactory factory = new NdJsonTextStructureFinderFactory(); @@ -15,6 +18,21 @@ public void testCanCreateFromSampleGivenNdJson() { assertTrue(factory.canCreateFromSample(explanation, NDJSON_SAMPLE, 0.0)); } + public void testCanCreateFromMessages() { + List messages = Arrays.asList(NDJSON_SAMPLE.split("\n")); + assertTrue(factory.canCreateFromMessages(explanation, messages, 0.0)); + } + + public void testCanCreateFromMessages_multipleJsonLinesPerMessage() { + List messages = List.of(NDJSON_SAMPLE, NDJSON_SAMPLE, NDJSON_SAMPLE); + assertFalse(factory.canCreateFromMessages(explanation, messages, 0.0)); + } + + public void testCanCreateFromMessages_emptyMessages() { + List messages = List.of("", "", ""); + assertFalse(factory.canCreateFromMessages(explanation, messages, 0.0)); + } + public void testCanCreateFromSampleGivenXml() { assertFalse(factory.canCreateFromSample(explanation, XML_SAMPLE, 0.0)); diff --git a/x-pack/plugin/text-structure/src/test/java/org/elasticsearch/xpack/textstructure/structurefinder/XmlTextStructureFinderFactoryTests.java b/x-pack/plugin/text-structure/src/test/java/org/elasticsearch/xpack/textstructure/structurefinder/XmlTextStructureFinderFactoryTests.java index ea92420a1ea5a..7340c0c3dff00 100644 --- a/x-pack/plugin/text-structure/src/test/java/org/elasticsearch/xpack/textstructure/structurefinder/XmlTextStructureFinderFactoryTests.java +++ b/x-pack/plugin/text-structure/src/test/java/org/elasticsearch/xpack/textstructure/structurefinder/XmlTextStructureFinderFactoryTests.java @@ -6,6 +6,9 @@ */ package org.elasticsearch.xpack.textstructure.structurefinder; +import java.util.Arrays; +import java.util.List; + public class XmlTextStructureFinderFactoryTests extends TextStructureTestCase { private final TextStructureFinderFactory factory = new XmlTextStructureFinderFactory(); @@ -17,6 +20,21 @@ public void testCanCreateFromSampleGivenXml() { assertTrue(factory.canCreateFromSample(explanation, XML_SAMPLE, 0.0)); } + public void testCanCreateFromMessages() { + List messages = Arrays.asList(XML_SAMPLE.split("\n\n")); + assertTrue(factory.canCreateFromMessages(explanation, messages, 0.0)); + } + + public void testCanCreateFromMessages_multipleXmlDocsPerMessage() { + List messages = List.of(XML_SAMPLE, XML_SAMPLE, XML_SAMPLE); + assertFalse(factory.canCreateFromMessages(explanation, messages, 0.0)); + } + + public void testCanCreateFromMessages_emptyMessages() { + List messages = List.of("", "", ""); + assertFalse(factory.canCreateFromMessages(explanation, messages, 0.0)); + } + public void testCanCreateFromSampleGivenCsv() { assertFalse(factory.canCreateFromSample(explanation, CSV_SAMPLE, 0.0)); From fa735a9b772842d643b7c8eb4248c550db8262c3 Mon Sep 17 00:00:00 2001 From: David Roberts Date: Wed, 6 Mar 2024 10:08:28 +0000 Subject: [PATCH 013/248] [ML] Fix `categorize_text` aggregation nested under empty buckets (#105987) Previously the `categorize_text` aggregation could throw an exception if nested as a sub-aggregation of another aggregation that produced empty buckets at the end of its results. This change avoids this possibility. Fixes #105836 --- docs/changelog/105987.yaml | 6 ++ .../CategorizeTextAggregator.java | 3 +- .../CategorizeTextAggregatorTests.java | 84 +++++++++++++++++++ 3 files changed, 92 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/105987.yaml diff --git a/docs/changelog/105987.yaml b/docs/changelog/105987.yaml new file mode 100644 index 0000000000000..d09a6907c72bf --- /dev/null +++ b/docs/changelog/105987.yaml @@ -0,0 +1,6 @@ +pr: 105987 +summary: Fix `categorize_text` aggregation nested under empty buckets +area: Machine Learning +type: bug +issues: + - 105836 diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/categorization/CategorizeTextAggregator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/categorization/CategorizeTextAggregator.java index 520d554379cfc..cedaced0f57ee 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/categorization/CategorizeTextAggregator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/categorization/CategorizeTextAggregator.java @@ -113,7 +113,8 @@ protected void doClose() { public InternalAggregation[] buildAggregations(long[] ordsToCollect) throws IOException { Bucket[][] topBucketsPerOrd = new Bucket[ordsToCollect.length][]; for (int ordIdx = 0; ordIdx < ordsToCollect.length; ordIdx++) { - final TokenListCategorizer categorizer = categorizers.get(ordsToCollect[ordIdx]); + final long ord = ordsToCollect[ordIdx]; + final TokenListCategorizer categorizer = (ord < categorizers.size()) ? categorizers.get(ord) : null; if (categorizer == null) { topBucketsPerOrd[ordIdx] = new Bucket[0]; continue; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/categorization/CategorizeTextAggregatorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/categorization/CategorizeTextAggregatorTests.java index cb5b98af29d57..29f298894477a 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/categorization/CategorizeTextAggregatorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/categorization/CategorizeTextAggregatorTests.java @@ -322,6 +322,90 @@ public void testCategorizationWithSubAggsManyDocs() throws Exception { ); } + public void testCategorizationAsSubAggWithExtendedBounds() throws Exception { + // Test with more buckets than we have data for (via extended bounds in the histogram config). + // This will confirm that we don't try to read beyond the end of arrays of categorizers. + int numHistoBuckets = 50; + HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("histo").field(NUMERIC_FIELD_NAME) + .interval(1) + .extendedBounds(0, numHistoBuckets - 1) + .subAggregation(new CategorizeTextAggregationBuilder("my_agg", TEXT_FIELD_NAME)); + testCase(CategorizeTextAggregatorTests::writeTestDocs, (InternalHistogram histo) -> { + assertThat(histo.getBuckets(), hasSize(numHistoBuckets)); + assertThat(histo.getBuckets().get(0).getDocCount(), equalTo(2L)); + assertThat(histo.getBuckets().get(0).getKeyAsString(), equalTo("0.0")); + InternalCategorizationAggregation categorizationAggregation = histo.getBuckets().get(0).getAggregations().get("my_agg"); + assertThat(categorizationAggregation.getBuckets().get(0).getDocCount(), equalTo(1L)); + assertThat( + categorizationAggregation.getBuckets().get(0).getKeyAsString(), + equalTo("Failed to shutdown error org.aaaa.bbbb.Cccc line caused by foo exception") + ); + assertThat(categorizationAggregation.getBuckets().get(0).getSerializableCategory().maxMatchingStringLen(), equalTo(84)); + assertThat( + categorizationAggregation.getBuckets().get(0).getSerializableCategory().getRegex(), + equalTo(".*?Failed.+?to.+?shutdown.+?error.+?org\\.aaaa\\.bbbb\\.Cccc.+?line.+?caused.+?by.+?foo.+?exception.*?") + ); + assertThat(categorizationAggregation.getBuckets().get(1).getDocCount(), equalTo(1L)); + assertThat(categorizationAggregation.getBuckets().get(1).getKeyAsString(), equalTo("Node started")); + assertThat(categorizationAggregation.getBuckets().get(1).getSerializableCategory().maxMatchingStringLen(), equalTo(15)); + assertThat(categorizationAggregation.getBuckets().get(1).getSerializableCategory().getRegex(), equalTo(".*?Node.+?started.*?")); + assertThat(histo.getBuckets().get(1).getDocCount(), equalTo(1L)); + assertThat(histo.getBuckets().get(1).getKeyAsString(), equalTo("1.0")); + categorizationAggregation = histo.getBuckets().get(1).getAggregations().get("my_agg"); + assertThat(categorizationAggregation.getBuckets().get(0).getDocCount(), equalTo(1L)); + assertThat(categorizationAggregation.getBuckets().get(0).getKeyAsString(), equalTo("Node started")); + assertThat(categorizationAggregation.getBuckets().get(0).getSerializableCategory().maxMatchingStringLen(), equalTo(15)); + assertThat(categorizationAggregation.getBuckets().get(0).getSerializableCategory().getRegex(), equalTo(".*?Node.+?started.*?")); + assertThat(histo.getBuckets().get(2).getDocCount(), equalTo(1L)); + assertThat(histo.getBuckets().get(2).getKeyAsString(), equalTo("2.0")); + categorizationAggregation = histo.getBuckets().get(2).getAggregations().get("my_agg"); + assertThat(categorizationAggregation.getBuckets().get(0).getDocCount(), equalTo(1L)); + assertThat(categorizationAggregation.getBuckets().get(0).getKeyAsString(), equalTo("Node started")); + assertThat(categorizationAggregation.getBuckets().get(0).getSerializableCategory().maxMatchingStringLen(), equalTo(15)); + assertThat(categorizationAggregation.getBuckets().get(0).getSerializableCategory().getRegex(), equalTo(".*?Node.+?started.*?")); + assertThat(histo.getBuckets().get(3).getDocCount(), equalTo(1L)); + assertThat(histo.getBuckets().get(3).getKeyAsString(), equalTo("3.0")); + categorizationAggregation = histo.getBuckets().get(3).getAggregations().get("my_agg"); + assertThat(categorizationAggregation.getBuckets().get(0).getDocCount(), equalTo(1L)); + assertThat(categorizationAggregation.getBuckets().get(0).getKeyAsString(), equalTo("Node started")); + assertThat(categorizationAggregation.getBuckets().get(0).getSerializableCategory().maxMatchingStringLen(), equalTo(15)); + assertThat(categorizationAggregation.getBuckets().get(0).getSerializableCategory().getRegex(), equalTo(".*?Node.+?started.*?")); + assertThat(histo.getBuckets().get(4).getDocCount(), equalTo(2L)); + assertThat(histo.getBuckets().get(4).getKeyAsString(), equalTo("4.0")); + categorizationAggregation = histo.getBuckets().get(4).getAggregations().get("my_agg"); + assertThat(categorizationAggregation.getBuckets().get(0).getDocCount(), equalTo(1L)); + assertThat( + categorizationAggregation.getBuckets().get(0).getKeyAsString(), + equalTo("Failed to shutdown error org.aaaa.bbbb.Cccc line caused by foo exception") + ); + assertThat(categorizationAggregation.getBuckets().get(0).getSerializableCategory().maxMatchingStringLen(), equalTo(84)); + assertThat( + categorizationAggregation.getBuckets().get(0).getSerializableCategory().getRegex(), + equalTo(".*?Failed.+?to.+?shutdown.+?error.+?org\\.aaaa\\.bbbb\\.Cccc.+?line.+?caused.+?by.+?foo.+?exception.*?") + ); + assertThat(categorizationAggregation.getBuckets().get(1).getDocCount(), equalTo(1L)); + assertThat(categorizationAggregation.getBuckets().get(1).getKeyAsString(), equalTo("Node started")); + assertThat(categorizationAggregation.getBuckets().get(1).getSerializableCategory().maxMatchingStringLen(), equalTo(15)); + assertThat(categorizationAggregation.getBuckets().get(1).getSerializableCategory().getRegex(), equalTo(".*?Node.+?started.*?")); + assertThat(histo.getBuckets().get(5).getDocCount(), equalTo(1L)); + assertThat(histo.getBuckets().get(5).getKeyAsString(), equalTo("5.0")); + categorizationAggregation = histo.getBuckets().get(5).getAggregations().get("my_agg"); + assertThat(categorizationAggregation.getBuckets().get(0).getDocCount(), equalTo(1L)); + assertThat(categorizationAggregation.getBuckets().get(0).getKeyAsString(), equalTo("Node started")); + assertThat(categorizationAggregation.getBuckets().get(0).getSerializableCategory().maxMatchingStringLen(), equalTo(15)); + assertThat(categorizationAggregation.getBuckets().get(0).getSerializableCategory().getRegex(), equalTo(".*?Node.+?started.*?")); + for (int bucket = 6; bucket < numHistoBuckets; ++bucket) { + assertThat(histo.getBuckets().get(bucket).getDocCount(), equalTo(0L)); + } + }, + new AggTestConfig( + aggBuilder, + new TextFieldMapper.TextFieldType(TEXT_FIELD_NAME, randomBoolean()), + longField(NUMERIC_FIELD_NAME) + ) + ); + } + private static void writeTestDocs(RandomIndexWriter w) throws IOException { w.addDocument( Arrays.asList( From 882b92ab6092a8ea6b53a64e90cb12e812241483 Mon Sep 17 00:00:00 2001 From: Andrei Dan Date: Wed, 6 Mar 2024 10:12:08 +0000 Subject: [PATCH 014/248] Add service for computing the optimal number of shards for data streams (#105498) This adds the `DataStreamAutoShardingService` that will compute the optimal number of shards for a data stream and return a recommendation as to when to apply it (a time interval we call cool down which is 0 when the auto sharding recommendation can be applied immediately). This also introduces a `DataStreamAutoShardingEvent` object that will be stored in the data stream metadata to indicate the last auto sharding event that was applied to a data stream and its cluster state representation looks like so: ``` "auto_sharding": { "trigger_index_name": ".ds-logs-nginx-2024.02.12-000002", "target_number_of_shards": 3, "event_timestamp": 1707739707954 } ``` The auto sharding service is not used in this PR, so the auto sharding event will not be stored in the data stream metadata, but the required infrastructure to configure it is in place. --- .../datastreams/DataStreamIT.java | 3 +- .../DataStreamIndexSettingsProviderTests.java | 3 +- .../UpdateTimeSeriesRangeServiceTests.java | 3 +- .../action/GetDataStreamsResponseTests.java | 6 +- .../DataStreamLifecycleServiceTests.java | 3 +- server/src/main/java/module-info.java | 1 + .../org/elasticsearch/TransportVersions.java | 1 + .../datastreams/GetDataStreamAction.java | 17 + .../autosharding/AutoShardingResult.java | 59 ++ .../autosharding/AutoShardingType.java | 21 + .../DataStreamAutoShardingService.java | 415 ++++++++++ .../cluster/metadata/DataStream.java | 106 ++- .../metadata/DataStreamAutoShardingEvent.java | 84 ++ .../MetadataCreateDataStreamService.java | 3 +- .../metadata/MetadataDataStreamsService.java | 6 +- .../snapshots/RestoreService.java | 3 +- .../DataStreamAutoShardingServiceTests.java | 771 ++++++++++++++++++ .../DataStreamAutoShardingEventTests.java | 62 ++ .../cluster/metadata/DataStreamTests.java | 124 ++- .../MetadataDataStreamsServiceTests.java | 3 +- .../metadata/DataStreamTestHelper.java | 11 +- .../ccr/action/TransportPutFollowAction.java | 6 +- ...StreamLifecycleUsageTransportActionIT.java | 3 +- .../LicensedWriteLoadForecaster.java | 27 +- .../LicensedWriteLoadForecasterTests.java | 59 -- 25 files changed, 1681 insertions(+), 119 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/action/datastreams/autosharding/AutoShardingResult.java create mode 100644 server/src/main/java/org/elasticsearch/action/datastreams/autosharding/AutoShardingType.java create mode 100644 server/src/main/java/org/elasticsearch/action/datastreams/autosharding/DataStreamAutoShardingService.java create mode 100644 server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamAutoShardingEvent.java create mode 100644 server/src/test/java/org/elasticsearch/action/datastreams/autosharding/DataStreamAutoShardingServiceTests.java create mode 100644 server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamAutoShardingEventTests.java diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java index 34f1701a595de..6c06511ccfbd1 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java @@ -1790,7 +1790,8 @@ public ClusterState execute(ClusterState currentState) throws Exception { original.getIndexMode(), original.getLifecycle(), original.isFailureStore(), - original.getFailureIndices() + original.getFailureIndices(), + null ); brokenDataStreamHolder.set(broken); return ClusterState.builder(currentState) diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProviderTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProviderTests.java index c65854903f7a9..01ad1bb09b20f 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProviderTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProviderTests.java @@ -314,7 +314,8 @@ public void testGetAdditionalIndexSettingsDataStreamAlreadyCreatedTimeSettingsMi IndexMode.TIME_SERIES, ds.getLifecycle(), ds.isFailureStore(), - ds.getFailureIndices() + ds.getFailureIndices(), + null ) ); Metadata metadata = mb.build(); diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/UpdateTimeSeriesRangeServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/UpdateTimeSeriesRangeServiceTests.java index dbb48ea3ddc26..abd5132edde16 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/UpdateTimeSeriesRangeServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/UpdateTimeSeriesRangeServiceTests.java @@ -153,7 +153,8 @@ public void testUpdateTimeSeriesTemporalRange_NoUpdateBecauseReplicated() { d.getIndexMode(), d.getLifecycle(), d.isFailureStore(), - d.getFailureIndices() + d.getFailureIndices(), + null ) ) .build(); diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsResponseTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsResponseTests.java index 13054379dd666..e200ff7cba2e1 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsResponseTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsResponseTests.java @@ -89,7 +89,8 @@ public void testResponseIlmAndDataStreamLifecycleRepresentation() throws Excepti IndexMode.STANDARD, new DataStreamLifecycle(), true, - failureStores + failureStores, + null ); String ilmPolicyName = "rollover-30days"; @@ -198,7 +199,8 @@ public void testResponseIlmAndDataStreamLifecycleRepresentation() throws Excepti IndexMode.STANDARD, new DataStreamLifecycle(null, null, false), true, - failureStores + failureStores, + null ); String ilmPolicyName = "rollover-30days"; diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java index 15f526d0a06d6..d0456d669663d 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java @@ -295,7 +295,8 @@ public void testRetentionNotExecutedForTSIndicesWithinTimeBounds() { dataStream.getIndexMode(), DataStreamLifecycle.newBuilder().dataRetention(0L).build(), dataStream.isFailureStore(), - dataStream.getFailureIndices() + dataStream.getFailureIndices(), + null ) ); clusterState = ClusterState.builder(clusterState).metadata(builder).build(); diff --git a/server/src/main/java/module-info.java b/server/src/main/java/module-info.java index 36a940af63c61..9c142d18034c0 100644 --- a/server/src/main/java/module-info.java +++ b/server/src/main/java/module-info.java @@ -381,6 +381,7 @@ opens org.elasticsearch.common.logging to org.apache.logging.log4j.core; exports org.elasticsearch.action.datastreams.lifecycle; + exports org.elasticsearch.action.datastreams.autosharding; exports org.elasticsearch.action.downsample; exports org.elasticsearch.plugins.internal to diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 055fcb6d9cf7b..d484da5ba506c 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -135,6 +135,7 @@ static TransportVersion def(int id) { public static final TransportVersion ML_MODEL_IN_SERVICE_SETTINGS = def(8_595_00_0); public static final TransportVersion RANDOM_AGG_SHARD_SEED = def(8_596_00_0); public static final TransportVersion ESQL_TIMINGS = def(8_597_00_0); + public static final TransportVersion DATA_STREAM_AUTO_SHARDING_EVENT = def(8_598_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java index 079c8f8b01ceb..8c469f7dffc4d 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java @@ -18,6 +18,7 @@ import org.elasticsearch.cluster.SimpleDiffable; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.DataStreamAutoShardingEvent; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -37,6 +38,7 @@ import java.util.Objects; import static org.elasticsearch.TransportVersions.V_8_11_X; +import static org.elasticsearch.cluster.metadata.DataStream.AUTO_SHARDING_FIELD; public class GetDataStreamAction extends ActionType { @@ -179,6 +181,10 @@ public static class DataStreamInfo implements SimpleDiffable, To public static final ParseField TEMPORAL_RANGES = new ParseField("temporal_ranges"); public static final ParseField TEMPORAL_RANGE_START = new ParseField("start"); public static final ParseField TEMPORAL_RANGE_END = new ParseField("end"); + public static final ParseField TIME_SINCE_LAST_AUTO_SHARD_EVENT = new ParseField("time_since_last_auto_shard_event"); + public static final ParseField TIME_SINCE_LAST_AUTO_SHARD_EVENT_MILLIS = new ParseField( + "time_since_last_auto_shard_event_millis" + ); private final DataStream dataStream; private final ClusterHealthStatus dataStreamStatus; @@ -348,6 +354,17 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params, @Nulla if (DataStream.isFailureStoreEnabled()) { builder.field(DataStream.FAILURE_STORE_FIELD.getPreferredName(), dataStream.isFailureStore()); } + if (dataStream.getAutoShardingEvent() != null) { + DataStreamAutoShardingEvent autoShardingEvent = dataStream.getAutoShardingEvent(); + builder.startObject(AUTO_SHARDING_FIELD.getPreferredName()); + autoShardingEvent.toXContent(builder, params); + builder.humanReadableField( + TIME_SINCE_LAST_AUTO_SHARD_EVENT_MILLIS.getPreferredName(), + TIME_SINCE_LAST_AUTO_SHARD_EVENT.getPreferredName(), + autoShardingEvent.getTimeSinceLastAutoShardingEvent(System::currentTimeMillis) + ); + builder.endObject(); + } if (timeSeries != null) { builder.startObject(TIME_SERIES.getPreferredName()); builder.startArray(TEMPORAL_RANGES.getPreferredName()); diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/autosharding/AutoShardingResult.java b/server/src/main/java/org/elasticsearch/action/datastreams/autosharding/AutoShardingResult.java new file mode 100644 index 0000000000000..7bbd3291caf3a --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/datastreams/autosharding/AutoShardingResult.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.datastreams.autosharding; + +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; + +import java.util.Arrays; + +import static org.elasticsearch.action.datastreams.autosharding.AutoShardingType.COOLDOWN_PREVENTED_DECREASE; +import static org.elasticsearch.action.datastreams.autosharding.AutoShardingType.COOLDOWN_PREVENTED_INCREASE; + +/** + * Represents an auto sharding recommendation. It includes the current and target number of shards together with a remaining cooldown + * period that needs to lapse before the current recommendation should be applied. + *

+ * If auto sharding is not applicable for a data stream (e.g. due to + * {@link DataStreamAutoShardingService#DATA_STREAMS_AUTO_SHARDING_EXCLUDES_SETTING}) the target number of shards will be -1 and cool down + * remaining {@link TimeValue#MAX_VALUE}. + */ +public record AutoShardingResult( + AutoShardingType type, + int currentNumberOfShards, + int targetNumberOfShards, + TimeValue coolDownRemaining, + @Nullable Double writeLoad +) { + + static final String COOLDOWN_PREVENTING_TYPES = Arrays.toString( + new AutoShardingType[] { COOLDOWN_PREVENTED_DECREASE, COOLDOWN_PREVENTED_INCREASE } + ); + + public AutoShardingResult { + if (type.equals(AutoShardingType.INCREASE_SHARDS) || type.equals(AutoShardingType.DECREASE_SHARDS)) { + if (coolDownRemaining.equals(TimeValue.ZERO) == false) { + throw new IllegalArgumentException( + "The increase/decrease shards events must have a cooldown period of zero. Use one of [" + + COOLDOWN_PREVENTING_TYPES + + "] types indead" + ); + } + } + } + + public static final AutoShardingResult NOT_APPLICABLE_RESULT = new AutoShardingResult( + AutoShardingType.NOT_APPLICABLE, + -1, + -1, + TimeValue.MAX_VALUE, + null + ); + +} diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/autosharding/AutoShardingType.java b/server/src/main/java/org/elasticsearch/action/datastreams/autosharding/AutoShardingType.java new file mode 100644 index 0000000000000..50d3027abbc88 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/datastreams/autosharding/AutoShardingType.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.datastreams.autosharding; + +/** + * Represents the type of recommendation the auto sharding service provided. + */ +public enum AutoShardingType { + INCREASE_SHARDS, + DECREASE_SHARDS, + COOLDOWN_PREVENTED_INCREASE, + COOLDOWN_PREVENTED_DECREASE, + NO_CHANGE_REQUIRED, + NOT_APPLICABLE +} diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/autosharding/DataStreamAutoShardingService.java b/server/src/main/java/org/elasticsearch/action/datastreams/autosharding/DataStreamAutoShardingService.java new file mode 100644 index 0000000000000..e830f538d222f --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/datastreams/autosharding/DataStreamAutoShardingService.java @@ -0,0 +1,415 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.datastreams.autosharding; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.IndexMetadataStats; +import org.elasticsearch.cluster.metadata.IndexWriteLoad; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.features.FeatureService; +import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.index.Index; + +import java.util.List; +import java.util.Objects; +import java.util.OptionalDouble; +import java.util.function.Function; +import java.util.function.LongSupplier; + +import static org.elasticsearch.action.datastreams.autosharding.AutoShardingResult.NOT_APPLICABLE_RESULT; + +/** + * Calculates the optimal number of shards the data stream write index should have based on the indexing load. + */ +public class DataStreamAutoShardingService { + + private static final Logger logger = LogManager.getLogger(DataStreamAutoShardingService.class); + public static final String DATA_STREAMS_AUTO_SHARDING_ENABLED = "data_streams.auto_sharding.enabled"; + + public static final NodeFeature DATA_STREAM_AUTO_SHARDING_FEATURE = new NodeFeature("data_stream.auto_sharding"); + + public static final Setting> DATA_STREAMS_AUTO_SHARDING_EXCLUDES_SETTING = Setting.listSetting( + "data_streams.auto_sharding.excludes", + List.of("*"), + Function.identity(), + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * Represents the minimum amount of time between two scaling events if the next event will increase the number of shards. + * We've chosen a value of 4.5minutes by default, just lower than the data stream lifecycle poll interval so we can increase shards with + * every DSL run, but we don't want it to be lower/0 as data stream lifecycle might run more often than the poll interval in case of + * a master failover. + */ + public static final Setting DATA_STREAMS_AUTO_SHARDING_INCREASE_SHARDS_COOLDOWN = Setting.timeSetting( + "data_streams.auto_sharding.increase_shards.cooldown", + TimeValue.timeValueSeconds(270), + TimeValue.timeValueSeconds(0), + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * Represents the minimum amount of time between two scaling events if the next event will reduce the number of shards. + */ + public static final Setting DATA_STREAMS_AUTO_SHARDING_DECREASE_SHARDS_COOLDOWN = Setting.timeSetting( + "data_streams.auto_sharding.decrease_shards.cooldown", + TimeValue.timeValueDays(3), + TimeValue.timeValueSeconds(0), + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * Represents the minimum number of write threads we expect a node to have in the environments where auto sharding will be enabled. + */ + public static final Setting CLUSTER_AUTO_SHARDING_MIN_WRITE_THREADS = Setting.intSetting( + "cluster.auto_sharding.min_write_threads", + 2, + 1, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * Represents the maximum number of write threads we expect a node to have in the environments where auto sharding will be enabled. + */ + public static final Setting CLUSTER_AUTO_SHARDING_MAX_WRITE_THREADS = Setting.intSetting( + "cluster.auto_sharding.max_write_threads", + 32, + 1, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + private final ClusterService clusterService; + private final boolean isAutoShardingEnabled; + private final FeatureService featureService; + private final LongSupplier nowSupplier; + private volatile TimeValue increaseShardsCooldown; + private volatile TimeValue reduceShardsCooldown; + private volatile int minWriteThreads; + private volatile int maxWriteThreads; + private volatile List dataStreamExcludePatterns; + + public DataStreamAutoShardingService( + Settings settings, + ClusterService clusterService, + FeatureService featureService, + LongSupplier nowSupplier + ) { + this.clusterService = clusterService; + this.isAutoShardingEnabled = settings.getAsBoolean(DATA_STREAMS_AUTO_SHARDING_ENABLED, false); + this.increaseShardsCooldown = DATA_STREAMS_AUTO_SHARDING_INCREASE_SHARDS_COOLDOWN.get(settings); + this.reduceShardsCooldown = DATA_STREAMS_AUTO_SHARDING_DECREASE_SHARDS_COOLDOWN.get(settings); + this.minWriteThreads = CLUSTER_AUTO_SHARDING_MIN_WRITE_THREADS.get(settings); + this.maxWriteThreads = CLUSTER_AUTO_SHARDING_MAX_WRITE_THREADS.get(settings); + this.dataStreamExcludePatterns = DATA_STREAMS_AUTO_SHARDING_EXCLUDES_SETTING.get(settings); + this.featureService = featureService; + this.nowSupplier = nowSupplier; + } + + public void init() { + clusterService.getClusterSettings() + .addSettingsUpdateConsumer(DATA_STREAMS_AUTO_SHARDING_INCREASE_SHARDS_COOLDOWN, this::updateIncreaseShardsCooldown); + clusterService.getClusterSettings() + .addSettingsUpdateConsumer(DATA_STREAMS_AUTO_SHARDING_DECREASE_SHARDS_COOLDOWN, this::updateReduceShardsCooldown); + clusterService.getClusterSettings().addSettingsUpdateConsumer(CLUSTER_AUTO_SHARDING_MIN_WRITE_THREADS, this::updateMinWriteThreads); + clusterService.getClusterSettings().addSettingsUpdateConsumer(CLUSTER_AUTO_SHARDING_MAX_WRITE_THREADS, this::updateMaxWriteThreads); + clusterService.getClusterSettings() + .addSettingsUpdateConsumer(DATA_STREAMS_AUTO_SHARDING_EXCLUDES_SETTING, this::updateDataStreamExcludePatterns); + } + + /** + * Computes the optimal number of shards for the provided data stream according to the write index's indexing load (to check if we must + * increase the number of shards, whilst the heuristics for decreasing the number of shards _might_ use the provided write indexing + * load). + * The result type will indicate the recommendation of the auto sharding service : + * - not applicable if the data stream is excluded from auto sharding as configured by + * {@link #DATA_STREAMS_AUTO_SHARDING_EXCLUDES_SETTING} or if the auto sharding functionality is disabled according to + * {@link #DATA_STREAMS_AUTO_SHARDING_EXCLUDES_SETTING}, or if the cluster doesn't have the feature available + * - increase number of shards if the optimal number of shards it deems necessary for the provided data stream is GT the current number + * of shards + * - decrease the number of shards if the optimal number of shards it deems necessary for the provided data stream is LT the current + * number of shards + * + * If the recommendation is to INCREASE/DECREASE shards the reported cooldown period will be TimeValue.ZERO. + * If the auto sharding service thinks the number of shards must be changed but it can't recommend a change due to the cooldown + * period not lapsing, the result will be of type {@link AutoShardingType#COOLDOWN_PREVENTED_INCREASE} or + * {@link AutoShardingType#COOLDOWN_PREVENTED_INCREASE} with the remaining cooldown configured and the number of shards that should + * be configured for the data stream once the remaining cooldown lapses as the target number of shards. + * + * The NOT_APPLICABLE type result will report a cooldown period of TimeValue.MAX_VALUE. + * + * The NO_CHANGE_REQUIRED type will potentially report the remaining cooldown always report a cool down period of TimeValue.ZERO (as + * there'll be no new auto sharding event) + */ + public AutoShardingResult calculate(ClusterState state, DataStream dataStream, @Nullable Double writeIndexLoad) { + Metadata metadata = state.metadata(); + if (isAutoShardingEnabled == false) { + logger.debug("Data stream auto sharding service is not enabled."); + return NOT_APPLICABLE_RESULT; + } + + if (featureService.clusterHasFeature(state, DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE) == false) { + logger.debug( + "Data stream auto sharding service cannot compute the optimal number of shards for data stream [{}] because the cluster " + + "doesn't have the auto sharding feature", + dataStream.getName() + ); + return NOT_APPLICABLE_RESULT; + } + + if (dataStreamExcludePatterns.stream().anyMatch(pattern -> Regex.simpleMatch(pattern, dataStream.getName()))) { + logger.debug( + "Data stream [{}] is excluded from auto sharding via the [{}] setting", + dataStream.getName(), + DATA_STREAMS_AUTO_SHARDING_EXCLUDES_SETTING.getKey() + ); + return NOT_APPLICABLE_RESULT; + } + + if (writeIndexLoad == null) { + logger.debug( + "Data stream auto sharding service cannot compute the optimal number of shards for data stream [{}] as the write index " + + "load is not available", + dataStream.getName() + ); + return NOT_APPLICABLE_RESULT; + } + return innerCalculate(metadata, dataStream, writeIndexLoad, nowSupplier); + } + + private AutoShardingResult innerCalculate(Metadata metadata, DataStream dataStream, double writeIndexLoad, LongSupplier nowSupplier) { + // increasing the number of shards is calculated solely based on the index load of the write index + IndexMetadata writeIndex = metadata.index(dataStream.getWriteIndex()); + assert writeIndex != null : "the data stream write index must exist in the provided cluster metadata"; + AutoShardingResult increaseShardsResult = getIncreaseShardsResult(dataStream, writeIndexLoad, nowSupplier, writeIndex); + return Objects.requireNonNullElseGet( + increaseShardsResult, + () -> getDecreaseShardsResult( + metadata, + dataStream, + writeIndexLoad, + nowSupplier, + writeIndex, + getRemainingDecreaseShardsCooldown(metadata, dataStream) + ) + ); + + } + + @Nullable + private AutoShardingResult getIncreaseShardsResult( + DataStream dataStream, + double writeIndexLoad, + LongSupplier nowSupplier, + IndexMetadata writeIndex + ) { + // increasing the number of shards is calculated solely based on the index load of the write index + long optimalShardCount = computeOptimalNumberOfShards(minWriteThreads, maxWriteThreads, writeIndexLoad); + if (optimalShardCount > writeIndex.getNumberOfShards()) { + TimeValue timeSinceLastAutoShardingEvent = dataStream.getAutoShardingEvent() != null + ? dataStream.getAutoShardingEvent().getTimeSinceLastAutoShardingEvent(nowSupplier) + : TimeValue.MAX_VALUE; + + TimeValue coolDownRemaining = TimeValue.timeValueMillis( + Math.max(0L, increaseShardsCooldown.millis() - timeSinceLastAutoShardingEvent.millis()) + ); + logger.debug( + "data stream autosharding service recommends increasing the number of shards from [{}] to [{}] after [{}] cooldown for " + + "data stream [{}]", + writeIndex.getNumberOfShards(), + optimalShardCount, + coolDownRemaining, + dataStream.getName() + ); + return new AutoShardingResult( + coolDownRemaining.equals(TimeValue.ZERO) ? AutoShardingType.INCREASE_SHARDS : AutoShardingType.COOLDOWN_PREVENTED_INCREASE, + writeIndex.getNumberOfShards(), + Math.toIntExact(optimalShardCount), + coolDownRemaining, + writeIndexLoad + ); + } + return null; + } + + /** + * Calculates the amount of time remaining before we can consider reducing the number of shards. + * This reference for the remaining time math is either the time since the last auto sharding event (if available) or otherwise the + * oldest index in the data stream. + */ + private TimeValue getRemainingDecreaseShardsCooldown(Metadata metadata, DataStream dataStream) { + Index oldestBackingIndex = dataStream.getIndices().get(0); + IndexMetadata oldestIndexMeta = metadata.getIndexSafe(oldestBackingIndex); + + return dataStream.getAutoShardingEvent() == null + // without a pre-existing auto sharding event we wait until the oldest index has been created longer than the decrease_shards + // cool down period "ago" so we don't immediately reduce the number of shards after a data stream is created + ? TimeValue.timeValueMillis( + Math.max(0L, oldestIndexMeta.getCreationDate() + reduceShardsCooldown.millis() - nowSupplier.getAsLong()) + ) + : TimeValue.timeValueMillis( + Math.max( + 0L, + reduceShardsCooldown.millis() - dataStream.getAutoShardingEvent() + .getTimeSinceLastAutoShardingEvent(nowSupplier) + .millis() + ) + ); + } + + private AutoShardingResult getDecreaseShardsResult( + Metadata metadata, + DataStream dataStream, + double writeIndexLoad, + LongSupplier nowSupplier, + IndexMetadata writeIndex, + TimeValue remainingReduceShardsCooldown + ) { + double maxIndexLoadWithinCoolingPeriod = getMaxIndexLoadWithinCoolingPeriod( + metadata, + dataStream, + writeIndexLoad, + reduceShardsCooldown, + nowSupplier + ); + + logger.trace( + "calculating the optimal number of shards for a potential decrease in number of shards for data stream [{}] with the" + + " max indexing load [{}] over the decrease shards cool down period", + dataStream.getName(), + maxIndexLoadWithinCoolingPeriod + ); + long optimalShardCount = computeOptimalNumberOfShards(minWriteThreads, maxWriteThreads, maxIndexLoadWithinCoolingPeriod); + if (optimalShardCount < writeIndex.getNumberOfShards()) { + logger.debug( + "data stream autosharding service recommends decreasing the number of shards from [{}] to [{}] after [{}] cooldown for " + + "data stream [{}]", + writeIndex.getNumberOfShards(), + optimalShardCount, + remainingReduceShardsCooldown, + dataStream.getName() + ); + + // we should reduce the number of shards + return new AutoShardingResult( + remainingReduceShardsCooldown.equals(TimeValue.ZERO) + ? AutoShardingType.DECREASE_SHARDS + : AutoShardingType.COOLDOWN_PREVENTED_DECREASE, + writeIndex.getNumberOfShards(), + Math.toIntExact(optimalShardCount), + remainingReduceShardsCooldown, + maxIndexLoadWithinCoolingPeriod + ); + } + + logger.trace( + "data stream autosharding service recommends maintaining the number of shards [{}] for data stream [{}]", + writeIndex.getNumberOfShards(), + dataStream.getName() + ); + return new AutoShardingResult( + AutoShardingType.NO_CHANGE_REQUIRED, + writeIndex.getNumberOfShards(), + writeIndex.getNumberOfShards(), + TimeValue.ZERO, + maxIndexLoadWithinCoolingPeriod + ); + } + + // Visible for testing + static long computeOptimalNumberOfShards(int minNumberWriteThreads, int maxNumberWriteThreads, double indexingLoad) { + return Math.max( + Math.min(roundUp(indexingLoad / (minNumberWriteThreads / 2.0)), 3), + roundUp(indexingLoad / (maxNumberWriteThreads / 2.0)) + ); + } + + private static long roundUp(double value) { + return (long) Math.ceil(value); + } + + // Visible for testing + /** + * Calculates the maximum write index load observed for the provided data stream across all the backing indices that were created + * during the provide {@param coolingPeriod} (note: to cover the entire cooling period, the backing index created before the cooling + * period is also considered). + */ + static double getMaxIndexLoadWithinCoolingPeriod( + Metadata metadata, + DataStream dataStream, + double writeIndexLoad, + TimeValue coolingPeriod, + LongSupplier nowSupplier + ) { + // for reducing the number of shards we look at more than just the write index + List writeLoadsWithinCoolingPeriod = DataStream.getIndicesWithinMaxAgeRange( + dataStream, + metadata::getIndexSafe, + coolingPeriod, + nowSupplier + ) + .stream() + .filter(index -> index.equals(dataStream.getWriteIndex()) == false) + .map(metadata::index) + .filter(Objects::nonNull) + .map(IndexMetadata::getStats) + .filter(Objects::nonNull) + .map(IndexMetadataStats::writeLoad) + .filter(Objects::nonNull) + .toList(); + + // assume the current write index load is the highest observed and look back to find the actual maximum + double maxIndexLoadWithinCoolingPeriod = writeIndexLoad; + for (IndexWriteLoad writeLoad : writeLoadsWithinCoolingPeriod) { + double totalIndexLoad = 0; + for (int shardId = 0; shardId < writeLoad.numberOfShards(); shardId++) { + final OptionalDouble writeLoadForShard = writeLoad.getWriteLoadForShard(shardId); + totalIndexLoad += writeLoadForShard.orElse(0); + } + + if (totalIndexLoad > maxIndexLoadWithinCoolingPeriod) { + maxIndexLoadWithinCoolingPeriod = totalIndexLoad; + } + } + return maxIndexLoadWithinCoolingPeriod; + } + + void updateIncreaseShardsCooldown(TimeValue scaleUpCooldown) { + this.increaseShardsCooldown = scaleUpCooldown; + } + + void updateReduceShardsCooldown(TimeValue scaleDownCooldown) { + this.reduceShardsCooldown = scaleDownCooldown; + } + + void updateMinWriteThreads(int minNumberWriteThreads) { + this.minWriteThreads = minNumberWriteThreads; + } + + void updateMaxWriteThreads(int maxNumberWriteThreads) { + this.maxWriteThreads = maxNumberWriteThreads; + } + + private void updateDataStreamExcludePatterns(List newExcludePatterns) { + this.dataStreamExcludePatterns = newExcludePatterns; + } +} diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java index 1bcfdba1d16f4..66cef1ea49af0 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java @@ -70,6 +70,7 @@ public final class DataStream implements SimpleDiffable, ToXContentO public static final FeatureFlag FAILURE_STORE_FEATURE_FLAG = new FeatureFlag("failure_store"); public static final TransportVersion ADDED_FAILURE_STORE_TRANSPORT_VERSION = TransportVersions.V_8_12_0; + public static final TransportVersion ADDED_AUTO_SHARDING_EVENT_VERSION = TransportVersions.DATA_STREAM_AUTO_SHARDING_EVENT; public static boolean isFailureStoreEnabled() { return FAILURE_STORE_FEATURE_FLAG.isEnabled(); @@ -113,6 +114,8 @@ public static boolean isFailureStoreEnabled() { private final boolean rolloverOnWrite; private final boolean failureStore; private final List failureIndices; + @Nullable + private final DataStreamAutoShardingEvent autoShardingEvent; public DataStream( String name, @@ -126,7 +129,8 @@ public DataStream( IndexMode indexMode, DataStreamLifecycle lifecycle, boolean failureStore, - List failureIndices + List failureIndices, + @Nullable DataStreamAutoShardingEvent autoShardingEvent ) { this( name, @@ -142,7 +146,8 @@ public DataStream( lifecycle, failureStore, failureIndices, - false + false, + autoShardingEvent ); } @@ -159,7 +164,8 @@ public DataStream( DataStreamLifecycle lifecycle, boolean failureStore, List failureIndices, - boolean rolloverOnWrite + boolean rolloverOnWrite, + @Nullable DataStreamAutoShardingEvent autoShardingEvent ) { this( name, @@ -175,7 +181,8 @@ public DataStream( lifecycle, failureStore, failureIndices, - rolloverOnWrite + rolloverOnWrite, + autoShardingEvent ); } @@ -194,7 +201,8 @@ public DataStream( DataStreamLifecycle lifecycle, boolean failureStore, List failureIndices, - boolean rolloverOnWrite + boolean rolloverOnWrite, + @Nullable DataStreamAutoShardingEvent autoShardingEvent ) { this.name = name; this.indices = List.copyOf(indices); @@ -213,6 +221,7 @@ public DataStream( this.failureIndices = failureIndices; assert assertConsistent(this.indices); this.rolloverOnWrite = rolloverOnWrite; + this.autoShardingEvent = autoShardingEvent; } // mainly available for testing @@ -227,7 +236,7 @@ public DataStream( boolean allowCustomRouting, IndexMode indexMode ) { - this(name, indices, generation, metadata, hidden, replicated, system, allowCustomRouting, indexMode, null, false, List.of()); + this(name, indices, generation, metadata, hidden, replicated, system, allowCustomRouting, indexMode, null, false, List.of(), null); } private static boolean assertConsistent(List indices) { @@ -412,6 +421,13 @@ public DataStreamLifecycle getLifecycle() { return lifecycle; } + /** + * Returns the latest auto sharding event that happened for this data stream + */ + public DataStreamAutoShardingEvent getAutoShardingEvent() { + return autoShardingEvent; + } + /** * Performs a rollover on a {@code DataStream} instance and returns a new instance containing * the updated list of backing indices and incremented generation. @@ -456,7 +472,8 @@ public DataStream unsafeRollover(Index writeIndex, long generation, boolean time indexMode, lifecycle, failureStore, - failureIndices + failureIndices, + autoShardingEvent ); } @@ -534,7 +551,8 @@ public DataStream removeBackingIndex(Index index) { indexMode, lifecycle, failureStore, - failureIndices + failureIndices, + autoShardingEvent ); } @@ -579,7 +597,8 @@ public DataStream replaceBackingIndex(Index existingBackingIndex, Index newBacki indexMode, lifecycle, failureStore, - failureIndices + failureIndices, + autoShardingEvent ); } @@ -639,7 +658,8 @@ public DataStream addBackingIndex(Metadata clusterMetadata, Index index) { indexMode, lifecycle, failureStore, - failureIndices + failureIndices, + autoShardingEvent ); } @@ -658,7 +678,8 @@ public DataStream promoteDataStream() { lifecycle, failureStore, failureIndices, - rolloverOnWrite + rolloverOnWrite, + autoShardingEvent ); } @@ -694,7 +715,8 @@ public DataStream snapshot(Collection indicesInSnapshot) { indexMode, lifecycle, failureStore, - failureIndices + failureIndices, + autoShardingEvent ); } @@ -909,7 +931,10 @@ public DataStream(StreamInput in) throws IOException { in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X) ? in.readOptionalWriteable(DataStreamLifecycle::new) : null, in.getTransportVersion().onOrAfter(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION) ? in.readBoolean() : false, in.getTransportVersion().onOrAfter(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION) ? readIndices(in) : List.of(), - in.getTransportVersion().onOrAfter(TransportVersions.LAZY_ROLLOVER_ADDED) ? in.readBoolean() : false + in.getTransportVersion().onOrAfter(TransportVersions.LAZY_ROLLOVER_ADDED) ? in.readBoolean() : false, + in.getTransportVersion().onOrAfter(DataStream.ADDED_AUTO_SHARDING_EVENT_VERSION) + ? in.readOptionalWriteable(DataStreamAutoShardingEvent::new) + : null ); } @@ -953,6 +978,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.LAZY_ROLLOVER_ADDED)) { out.writeBoolean(rolloverOnWrite); } + if (out.getTransportVersion().onOrAfter(DataStream.ADDED_AUTO_SHARDING_EVENT_VERSION)) { + out.writeOptionalWriteable(autoShardingEvent); + } } public static final ParseField NAME_FIELD = new ParseField("name"); @@ -969,13 +997,14 @@ public void writeTo(StreamOutput out) throws IOException { public static final ParseField FAILURE_STORE_FIELD = new ParseField("failure_store"); public static final ParseField FAILURE_INDICES_FIELD = new ParseField("failure_indices"); public static final ParseField ROLLOVER_ON_WRITE_FIELD = new ParseField("rollover_on_write"); + public static final ParseField AUTO_SHARDING_FIELD = new ParseField("auto_sharding"); @SuppressWarnings("unchecked") private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("data_stream", args -> { // Fields behind a feature flag need to be parsed last otherwise the parser will fail when the feature flag is disabled. // Until the feature flag is removed we keep them separately to be mindful of this. - boolean failureStoreEnabled = DataStream.isFailureStoreEnabled() && args[11] != null && (boolean) args[11]; - List failureStoreIndices = DataStream.isFailureStoreEnabled() && args[12] != null ? (List) args[12] : List.of(); + boolean failureStoreEnabled = DataStream.isFailureStoreEnabled() && args[12] != null && (boolean) args[12]; + List failureStoreIndices = DataStream.isFailureStoreEnabled() && args[13] != null ? (List) args[13] : List.of(); return new DataStream( (String) args[0], (List) args[1], @@ -989,7 +1018,8 @@ public void writeTo(StreamOutput out) throws IOException { (DataStreamLifecycle) args[9], failureStoreEnabled, failureStoreIndices, - args[10] != null && (boolean) args[10] + args[10] != null && (boolean) args[10], + (DataStreamAutoShardingEvent) args[11] ); }); @@ -1013,6 +1043,11 @@ public void writeTo(StreamOutput out) throws IOException { PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), INDEX_MODE); PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> DataStreamLifecycle.fromXContent(p), LIFECYCLE); PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), ROLLOVER_ON_WRITE_FIELD); + PARSER.declareObject( + ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> DataStreamAutoShardingEvent.fromXContent(p), + AUTO_SHARDING_FIELD + ); // The fields behind the feature flag should always be last. if (DataStream.isFailureStoreEnabled()) { PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), FAILURE_STORE_FIELD); @@ -1067,6 +1102,11 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params, @Nulla lifecycle.toXContent(builder, params, rolloverConfiguration); } builder.field(ROLLOVER_ON_WRITE_FIELD.getPreferredName(), rolloverOnWrite); + if (autoShardingEvent != null) { + builder.startObject(AUTO_SHARDING_FIELD.getPreferredName()); + autoShardingEvent.toXContent(builder, params); + builder.endObject(); + } builder.endObject(); return builder; } @@ -1088,7 +1128,8 @@ public boolean equals(Object o) { && Objects.equals(lifecycle, that.lifecycle) && failureStore == that.failureStore && failureIndices.equals(that.failureIndices) - && rolloverOnWrite == that.rolloverOnWrite; + && rolloverOnWrite == that.rolloverOnWrite + && Objects.equals(autoShardingEvent, that.autoShardingEvent); } @Override @@ -1106,7 +1147,8 @@ public int hashCode() { lifecycle, failureStore, failureIndices, - rolloverOnWrite + rolloverOnWrite, + autoShardingEvent ); } @@ -1169,6 +1211,34 @@ public DataStream getParentDataStream() { "strict_date_optional_time_nanos||strict_date_optional_time||epoch_millis" ); + /** + * Returns the indices created within the {@param maxIndexAge} interval. Note that this strives to cover + * the entire {@param maxIndexAge} interval so one backing index created before the specified age will also + * be return. + */ + public static List getIndicesWithinMaxAgeRange( + DataStream dataStream, + Function indexProvider, + TimeValue maxIndexAge, + LongSupplier nowSupplier + ) { + final List dataStreamIndices = dataStream.getIndices(); + final long currentTimeMillis = nowSupplier.getAsLong(); + // Consider at least 1 index (including the write index) for cases where rollovers happen less often than maxIndexAge + int firstIndexWithinAgeRange = Math.max(dataStreamIndices.size() - 2, 0); + for (int i = 0; i < dataStreamIndices.size(); i++) { + Index index = dataStreamIndices.get(i); + final IndexMetadata indexMetadata = indexProvider.apply(index); + final long indexAge = currentTimeMillis - indexMetadata.getCreationDate(); + if (indexAge < maxIndexAge.getMillis()) { + // We need to consider the previous index too in order to cover the entire max-index-age range. + firstIndexWithinAgeRange = i == 0 ? 0 : i - 1; + break; + } + } + return dataStreamIndices.subList(firstIndexWithinAgeRange, dataStreamIndices.size()); + } + private static Instant getTimeStampFromRaw(Object rawTimestamp) { try { if (rawTimestamp instanceof Long lTimestamp) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamAutoShardingEvent.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamAutoShardingEvent.java new file mode 100644 index 0000000000000..ff143681827ca --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamAutoShardingEvent.java @@ -0,0 +1,84 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.metadata; + +import org.elasticsearch.cluster.Diff; +import org.elasticsearch.cluster.SimpleDiffable; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContentFragment; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; +import java.util.function.LongSupplier; + +/** + * Represents the last auto sharding event that occured for a data stream. + */ +public record DataStreamAutoShardingEvent(String triggerIndexName, int targetNumberOfShards, long timestamp) + implements + SimpleDiffable, + ToXContentFragment { + + public static final ParseField TRIGGER_INDEX_NAME = new ParseField("trigger_index_name"); + public static final ParseField TARGET_NUMBER_OF_SHARDS = new ParseField("target_number_of_shards"); + public static final ParseField EVENT_TIME = new ParseField("event_time"); + public static final ParseField EVENT_TIME_MILLIS = new ParseField("event_time_millis"); + + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "auto_sharding", + false, + (args, unused) -> new DataStreamAutoShardingEvent((String) args[0], (int) args[1], (long) args[2]) + ); + + static { + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), TRIGGER_INDEX_NAME); + PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), TARGET_NUMBER_OF_SHARDS); + PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), EVENT_TIME_MILLIS); + } + + public static DataStreamAutoShardingEvent fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + static Diff readDiffFrom(StreamInput in) throws IOException { + return SimpleDiffable.readDiffFrom(DataStreamAutoShardingEvent::new, in); + } + + DataStreamAutoShardingEvent(StreamInput in) throws IOException { + this(in.readString(), in.readVInt(), in.readVLong()); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(TRIGGER_INDEX_NAME.getPreferredName(), triggerIndexName); + builder.field(TARGET_NUMBER_OF_SHARDS.getPreferredName(), targetNumberOfShards); + builder.humanReadableField( + EVENT_TIME_MILLIS.getPreferredName(), + EVENT_TIME.getPreferredName(), + TimeValue.timeValueMillis(timestamp) + ); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(triggerIndexName); + out.writeVInt(targetNumberOfShards); + out.writeVLong(timestamp); + } + + public TimeValue getTimeSinceLastAutoShardingEvent(LongSupplier now) { + return TimeValue.timeValueMillis(Math.max(0L, now.getAsLong() - timestamp)); + } +} diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java index d500a8b8e6876..20b28edef5ca2 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java @@ -314,7 +314,8 @@ static ClusterState createDataStream( indexMode, lifecycle == null && isDslOnlyMode ? DataStreamLifecycle.DEFAULT : lifecycle, template.getDataStreamTemplate().hasFailureStore(), - failureIndices + failureIndices, + null ); Metadata.Builder builder = Metadata.builder(currentState.metadata()).put(newDataStream); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java index 6b81aa230f0d9..4006bc8d1a94a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java @@ -212,7 +212,8 @@ static ClusterState updateDataLifecycle( dataStream.getIndexMode(), lifecycle, dataStream.isFailureStore(), - dataStream.getFailureIndices() + dataStream.getFailureIndices(), + dataStream.getAutoShardingEvent() ) ); } @@ -249,7 +250,8 @@ public static ClusterState setRolloverOnWrite(ClusterState currentState, String dataStream.getLifecycle(), dataStream.isFailureStore(), dataStream.getFailureIndices(), - rolloverOnWrite + rolloverOnWrite, + dataStream.getAutoShardingEvent() ) ); return ClusterState.builder(currentState).metadata(builder.build()).build(); diff --git a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java index 9ac76e653b640..4b6e3f30fe6fa 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -716,7 +716,8 @@ static DataStream updateDataStream(DataStream dataStream, Metadata.Builder metad dataStream.getIndexMode(), dataStream.getLifecycle(), dataStream.isFailureStore(), - dataStream.getFailureIndices() + dataStream.getFailureIndices(), + dataStream.getAutoShardingEvent() ); } diff --git a/server/src/test/java/org/elasticsearch/action/datastreams/autosharding/DataStreamAutoShardingServiceTests.java b/server/src/test/java/org/elasticsearch/action/datastreams/autosharding/DataStreamAutoShardingServiceTests.java new file mode 100644 index 0000000000000..674b3e855e912 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/datastreams/autosharding/DataStreamAutoShardingServiceTests.java @@ -0,0 +1,771 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.datastreams.autosharding; + +import org.elasticsearch.action.admin.indices.rollover.MaxAgeCondition; +import org.elasticsearch.action.admin.indices.rollover.RolloverInfo; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.DataStreamAutoShardingEvent; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.IndexMetadataStats; +import org.elasticsearch.cluster.metadata.IndexWriteLoad; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.features.FeatureService; +import org.elasticsearch.features.FeatureSpecification; +import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexMode; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.function.Function; + +import static org.elasticsearch.action.datastreams.autosharding.AutoShardingResult.NOT_APPLICABLE_RESULT; +import static org.elasticsearch.action.datastreams.autosharding.AutoShardingType.COOLDOWN_PREVENTED_DECREASE; +import static org.elasticsearch.action.datastreams.autosharding.AutoShardingType.COOLDOWN_PREVENTED_INCREASE; +import static org.elasticsearch.action.datastreams.autosharding.AutoShardingType.DECREASE_SHARDS; +import static org.elasticsearch.action.datastreams.autosharding.AutoShardingType.INCREASE_SHARDS; +import static org.elasticsearch.action.datastreams.autosharding.AutoShardingType.NO_CHANGE_REQUIRED; +import static org.elasticsearch.test.ClusterServiceUtils.createClusterService; +import static org.hamcrest.Matchers.is; + +public class DataStreamAutoShardingServiceTests extends ESTestCase { + + private ClusterService clusterService; + private ThreadPool threadPool; + private DataStreamAutoShardingService service; + private long now; + String dataStreamName; + + @Before + public void setupService() { + threadPool = new TestThreadPool(getTestName()); + Set> builtInClusterSettings = new HashSet<>(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + builtInClusterSettings.add(DataStreamAutoShardingService.CLUSTER_AUTO_SHARDING_MIN_WRITE_THREADS); + builtInClusterSettings.add(DataStreamAutoShardingService.CLUSTER_AUTO_SHARDING_MAX_WRITE_THREADS); + builtInClusterSettings.add(DataStreamAutoShardingService.DATA_STREAMS_AUTO_SHARDING_INCREASE_SHARDS_COOLDOWN); + builtInClusterSettings.add(DataStreamAutoShardingService.DATA_STREAMS_AUTO_SHARDING_DECREASE_SHARDS_COOLDOWN); + builtInClusterSettings.add( + Setting.boolSetting( + DataStreamAutoShardingService.DATA_STREAMS_AUTO_SHARDING_ENABLED, + false, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ) + ); + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, builtInClusterSettings); + clusterService = createClusterService(threadPool, clusterSettings); + now = System.currentTimeMillis(); + service = new DataStreamAutoShardingService( + Settings.builder() + .put(DataStreamAutoShardingService.DATA_STREAMS_AUTO_SHARDING_ENABLED, true) + .putList(DataStreamAutoShardingService.DATA_STREAMS_AUTO_SHARDING_EXCLUDES_SETTING.getKey(), List.of()) + .build(), + clusterService, + new FeatureService(List.of(new FeatureSpecification() { + @Override + public Set getFeatures() { + return Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE); + } + })), + () -> now + ); + dataStreamName = randomAlphaOfLengthBetween(10, 100); + logger.info("-> data stream name is [{}]", dataStreamName); + } + + @After + public void cleanup() { + clusterService.close(); + threadPool.shutdownNow(); + } + + public void testCalculateValidations() { + Metadata.Builder builder = Metadata.builder(); + DataStream dataStream = createDataStream( + builder, + dataStreamName, + 1, + now, + List.of(now - 3000, now - 2000, now - 1000), + getWriteLoad(1, 2.0), + null + ); + builder.put(dataStream); + ClusterState state = ClusterState.builder(ClusterName.DEFAULT) + .nodeFeatures( + Map.of( + "n1", + Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()), + "n2", + Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()) + ) + ) + .metadata(builder) + .build(); + + { + // autosharding disabled + DataStreamAutoShardingService disabledAutoshardingService = new DataStreamAutoShardingService( + Settings.EMPTY, + clusterService, + new FeatureService(List.of(new FeatureSpecification() { + @Override + public Set getFeatures() { + return Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE); + } + })), + System::currentTimeMillis + ); + + AutoShardingResult autoShardingResult = disabledAutoshardingService.calculate(state, dataStream, 2.0); + assertThat(autoShardingResult, is(NOT_APPLICABLE_RESULT)); + } + + { + // cluster doesn't have feature + ClusterState stateNoFeature = ClusterState.builder(ClusterName.DEFAULT).metadata(Metadata.builder()).build(); + + DataStreamAutoShardingService noFeatureService = new DataStreamAutoShardingService( + Settings.builder() + .put(DataStreamAutoShardingService.DATA_STREAMS_AUTO_SHARDING_ENABLED, true) + .putList(DataStreamAutoShardingService.DATA_STREAMS_AUTO_SHARDING_EXCLUDES_SETTING.getKey(), List.of()) + .build(), + clusterService, + new FeatureService(List.of()), + () -> now + ); + + AutoShardingResult autoShardingResult = noFeatureService.calculate(stateNoFeature, dataStream, 2.0); + assertThat(autoShardingResult, is(NOT_APPLICABLE_RESULT)); + } + + { + // patterns are configured to exclude the current data stream + DataStreamAutoShardingService noFeatureService = new DataStreamAutoShardingService( + Settings.builder() + .put(DataStreamAutoShardingService.DATA_STREAMS_AUTO_SHARDING_ENABLED, true) + .putList( + DataStreamAutoShardingService.DATA_STREAMS_AUTO_SHARDING_EXCLUDES_SETTING.getKey(), + List.of("foo", dataStreamName + "*") + ) + .build(), + clusterService, + new FeatureService(List.of()), + () -> now + ); + + AutoShardingResult autoShardingResult = noFeatureService.calculate(state, dataStream, 2.0); + assertThat(autoShardingResult, is(NOT_APPLICABLE_RESULT)); + } + + { + // null write load passed + AutoShardingResult autoShardingResult = service.calculate(state, dataStream, null); + assertThat(autoShardingResult, is(NOT_APPLICABLE_RESULT)); + } + } + + public void testCalculateIncreaseShardingRecommendations() { + // the input is a data stream with 5 backing indices with 1 shard each + // all 4 backing indices have a write load of 2.0 + // we'll recreate it across the test and add an auto sharding event as we iterate + { + Metadata.Builder builder = Metadata.builder(); + Function dataStreamSupplier = (autoShardingEvent) -> createDataStream( + builder, + dataStreamName, + 1, + now, + List.of(now - 10_000, now - 7000, now - 5000, now - 2000, now - 1000), + getWriteLoad(1, 2.0), + autoShardingEvent + ); + + DataStream dataStream = dataStreamSupplier.apply(null); + builder.put(dataStream); + ClusterState state = ClusterState.builder(ClusterName.DEFAULT) + .nodeFeatures( + Map.of( + "n1", + Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()), + "n2", + Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()) + ) + ) + .metadata(builder) + .build(); + + AutoShardingResult autoShardingResult = service.calculate(state, dataStream, 2.5); + assertThat(autoShardingResult.type(), is(INCREASE_SHARDS)); + // no pre-existing scaling event so the cool down must be zero + assertThat(autoShardingResult.coolDownRemaining(), is(TimeValue.ZERO)); + assertThat(autoShardingResult.targetNumberOfShards(), is(3)); + } + + { + // let's add a pre-existing sharding event so that we'll return some cool down period that's preventing an INCREASE_SHARDS + // event so the result type we're expecting is COOLDOWN_PREVENTED_INCREASE + Metadata.Builder builder = Metadata.builder(); + Function dataStreamSupplier = (autoShardingEvent) -> createDataStream( + builder, + dataStreamName, + 1, + now, + List.of(now - 10_000, now - 7000, now - 5000, now - 2000, now - 1000), + getWriteLoad(1, 2.0), + autoShardingEvent + ); + + // generation 4 triggered an auto sharding event to 2 shards + DataStream dataStream = dataStreamSupplier.apply( + new DataStreamAutoShardingEvent(DataStream.getDefaultBackingIndexName(dataStreamName, 4), 2, now - 1005) + ); + builder.put(dataStream); + ClusterState state = ClusterState.builder(ClusterName.DEFAULT) + .nodeFeatures( + Map.of( + "n1", + Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()), + "n2", + Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()) + ) + ) + .metadata(builder) + .build(); + + AutoShardingResult autoShardingResult = service.calculate(state, dataStream, 2.5); + assertThat(autoShardingResult.type(), is(COOLDOWN_PREVENTED_INCREASE)); + // no pre-existing scaling event so the cool down must be zero + assertThat(autoShardingResult.targetNumberOfShards(), is(3)); + // it's been 1005 millis since the last auto sharding event and the cool down is 270secoinds (270_000 millis) + assertThat(autoShardingResult.coolDownRemaining(), is(TimeValue.timeValueMillis(268995))); + } + + { + // let's test a subsequent increase in the number of shards after a previos auto sharding event + Metadata.Builder builder = Metadata.builder(); + Function dataStreamSupplier = (autoShardingEvent) -> createDataStream( + builder, + dataStreamName, + 1, + now, + List.of(now - 10_000_000, now - 7_000_000, now - 2_000_000, now - 1_000_000, now - 1000), + getWriteLoad(1, 2.0), + autoShardingEvent + ); + + // generation 3 triggered an increase in shards event to 2 shards + DataStream dataStream = dataStreamSupplier.apply( + new DataStreamAutoShardingEvent(DataStream.getDefaultBackingIndexName(dataStreamName, 4), 2, now - 2_000_100) + ); + builder.put(dataStream); + ClusterState state = ClusterState.builder(ClusterName.DEFAULT) + .nodeFeatures( + Map.of( + "n1", + Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()), + "n2", + Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()) + ) + ) + .metadata(builder) + .build(); + + AutoShardingResult autoShardingResult = service.calculate(state, dataStream, 2.5); + assertThat(autoShardingResult.type(), is(INCREASE_SHARDS)); + // no pre-existing scaling event so the cool down must be zero + assertThat(autoShardingResult.targetNumberOfShards(), is(3)); + assertThat(autoShardingResult.coolDownRemaining(), is(TimeValue.ZERO)); + } + } + + public void testCalculateDecreaseShardingRecommendations() { + // the input is a data stream with 5 backing indices with 3 shards each + { + // testing a decrease shards events prevented by the cool down period not lapsing due to the oldest generation index being + // "too new" (i.e. the cool down period hasn't lapsed since the oldest generation index) + Metadata.Builder builder = Metadata.builder(); + Function dataStreamSupplier = (autoShardingEvent) -> createDataStream( + builder, + dataStreamName, + 3, + now, + List.of(now - 10_000, now - 7000, now - 5000, now - 2000, now - 1000), + getWriteLoad(3, 0.25), + autoShardingEvent + ); + + DataStream dataStream = dataStreamSupplier.apply(null); + builder.put(dataStream); + ClusterState state = ClusterState.builder(ClusterName.DEFAULT) + .nodeFeatures( + Map.of( + "n1", + Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()), + "n2", + Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()) + ) + ) + .metadata(builder) + .build(); + + AutoShardingResult autoShardingResult = service.calculate(state, dataStream, 1.0); + // the cooldown period for the decrease shards event hasn't lapsed since the data stream was created + assertThat(autoShardingResult.type(), is(COOLDOWN_PREVENTED_DECREASE)); + assertThat(autoShardingResult.coolDownRemaining(), is(TimeValue.timeValueMillis(TimeValue.timeValueDays(3).millis() - 10_000))); + // based on the write load of 0.75 we should be reducing the number of shards to 1 + assertThat(autoShardingResult.targetNumberOfShards(), is(1)); + } + + { + Metadata.Builder builder = Metadata.builder(); + Function dataStreamSupplier = (autoShardingEvent) -> createDataStream( + builder, + dataStreamName, + 3, + now, + List.of( + now - TimeValue.timeValueDays(21).getMillis(), + now - TimeValue.timeValueDays(15).getMillis(), + now - TimeValue.timeValueDays(4).getMillis(), + now - TimeValue.timeValueDays(2).getMillis(), + now - 1000 + ), + getWriteLoad(3, 0.333), + autoShardingEvent + ); + + DataStream dataStream = dataStreamSupplier.apply(null); + builder.put(dataStream); + ClusterState state = ClusterState.builder(ClusterName.DEFAULT) + .nodeFeatures( + Map.of( + "n1", + Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()), + "n2", + Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()) + ) + ) + .metadata(builder) + .build(); + + AutoShardingResult autoShardingResult = service.calculate(state, dataStream, 1.0); + assertThat(autoShardingResult.type(), is(DECREASE_SHARDS)); + assertThat(autoShardingResult.targetNumberOfShards(), is(1)); + // no pre-existing auto sharding event however we have old enough backing indices (older than the cooldown period) so we can + // make a decision to reduce the number of shards + assertThat(autoShardingResult.coolDownRemaining(), is(TimeValue.ZERO)); + } + + { + // let's test a decrease in number of shards after a previous decrease event + Metadata.Builder builder = Metadata.builder(); + Function dataStreamSupplier = (autoShardingEvent) -> createDataStream( + builder, + dataStreamName, + 3, + now, + List.of( + now - TimeValue.timeValueDays(21).getMillis(), + now - TimeValue.timeValueDays(15).getMillis(), // triggers auto sharding event + now - TimeValue.timeValueDays(4).getMillis(), + now - TimeValue.timeValueDays(2).getMillis(), + now - 1000 + ), + getWriteLoad(3, 0.333), + autoShardingEvent + ); + + // generation 2 triggered a decrease in shards event to 2 shards + DataStream dataStream = dataStreamSupplier.apply( + new DataStreamAutoShardingEvent( + DataStream.getDefaultBackingIndexName(dataStreamName, 2), + 2, + now - TimeValue.timeValueDays(4).getMillis() + ) + ); + builder.put(dataStream); + ClusterState state = ClusterState.builder(ClusterName.DEFAULT) + .nodeFeatures( + Map.of( + "n1", + Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()), + "n2", + Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()) + ) + ) + .metadata(builder) + .build(); + + AutoShardingResult autoShardingResult = service.calculate(state, dataStream, 1.0); + assertThat(autoShardingResult.type(), is(DECREASE_SHARDS)); + assertThat(autoShardingResult.targetNumberOfShards(), is(1)); + assertThat(autoShardingResult.coolDownRemaining(), is(TimeValue.ZERO)); + } + + { + // let's test a decrease in number of shards that's prevented by the cool down period due to a previous sharding event + // the expected result type here is COOLDOWN_PREVENTED_DECREASE + Metadata.Builder builder = Metadata.builder(); + Function dataStreamSupplier = (autoShardingEvent) -> createDataStream( + builder, + dataStreamName, + 3, + now, + List.of( + now - TimeValue.timeValueDays(21).getMillis(), + now - TimeValue.timeValueDays(2).getMillis(), // triggers auto sharding event + now - TimeValue.timeValueDays(1).getMillis(), + now - 1000 + ), + getWriteLoad(3, 0.25), + autoShardingEvent + ); + + // generation 2 triggered a decrease in shards event to 2 shards + DataStream dataStream = dataStreamSupplier.apply( + new DataStreamAutoShardingEvent( + DataStream.getDefaultBackingIndexName(dataStreamName, 2), + 2, + now - TimeValue.timeValueDays(2).getMillis() + ) + ); + builder.put(dataStream); + ClusterState state = ClusterState.builder(ClusterName.DEFAULT) + .nodeFeatures( + Map.of( + "n1", + Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()), + "n2", + Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()) + ) + ) + .metadata(builder) + .build(); + + AutoShardingResult autoShardingResult = service.calculate(state, dataStream, 1.0); + assertThat(autoShardingResult.type(), is(COOLDOWN_PREVENTED_DECREASE)); + assertThat(autoShardingResult.targetNumberOfShards(), is(1)); + assertThat(autoShardingResult.coolDownRemaining(), is(TimeValue.timeValueDays(1))); + } + + { + // no change required + Metadata.Builder builder = Metadata.builder(); + Function dataStreamSupplier = (autoShardingEvent) -> createDataStream( + builder, + dataStreamName, + 3, + now, + List.of( + now - TimeValue.timeValueDays(21).getMillis(), + now - TimeValue.timeValueDays(15).getMillis(), + now - TimeValue.timeValueDays(4).getMillis(), + now - TimeValue.timeValueDays(2).getMillis(), + now - 1000 + ), + getWriteLoad(3, 1.333), + autoShardingEvent + ); + + // generation 2 triggered a decrease in shards event to 2 shards + DataStream dataStream = dataStreamSupplier.apply(null); + builder.put(dataStream); + ClusterState state = ClusterState.builder(ClusterName.DEFAULT) + .nodeFeatures( + Map.of( + "n1", + Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()), + "n2", + Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()) + ) + ) + .metadata(builder) + .build(); + + AutoShardingResult autoShardingResult = service.calculate(state, dataStream, 4.0); + assertThat(autoShardingResult.type(), is(NO_CHANGE_REQUIRED)); + assertThat(autoShardingResult.targetNumberOfShards(), is(3)); + assertThat(autoShardingResult.coolDownRemaining(), is(TimeValue.ZERO)); + } + } + + public void testComputeOptimalNumberOfShards() { + int minWriteThreads = 2; + int maxWriteThreads = 32; + { + // the small values will be very common so let's randomise to make sure we never go below 1L + double indexingLoad = randomDoubleBetween(0.0001, 1.0, true); + logger.info("-> indexingLoad {}", indexingLoad); + assertThat(DataStreamAutoShardingService.computeOptimalNumberOfShards(minWriteThreads, maxWriteThreads, indexingLoad), is(1L)); + } + + { + double indexingLoad = 2.0; + logger.info("-> indexingLoad {}", indexingLoad); + assertThat(DataStreamAutoShardingService.computeOptimalNumberOfShards(minWriteThreads, maxWriteThreads, indexingLoad), is(2L)); + } + + { + // there's a broad range of popular values (a write index starting to be very busy, using between 3 and all of the 32 write + // threads, so let's randomise this too to make sure we stay at 3 recommended shards) + double indexingLoad = randomDoubleBetween(3.0002, 32.0, true); + logger.info("-> indexingLoad {}", indexingLoad); + + assertThat(DataStreamAutoShardingService.computeOptimalNumberOfShards(minWriteThreads, maxWriteThreads, indexingLoad), is(3L)); + } + + { + double indexingLoad = 49.0; + logger.info("-> indexingLoad {}", indexingLoad); + assertThat(DataStreamAutoShardingService.computeOptimalNumberOfShards(minWriteThreads, maxWriteThreads, indexingLoad), is(4L)); + } + + { + double indexingLoad = 70.0; + logger.info("-> indexingLoad {}", indexingLoad); + assertThat(DataStreamAutoShardingService.computeOptimalNumberOfShards(minWriteThreads, maxWriteThreads, indexingLoad), is(5L)); + } + + { + double indexingLoad = 100.0; + logger.info("-> indexingLoad {}", indexingLoad); + assertThat(DataStreamAutoShardingService.computeOptimalNumberOfShards(minWriteThreads, maxWriteThreads, indexingLoad), is(7L)); + } + + { + double indexingLoad = 180.0; + logger.info("-> indexingLoad {}", indexingLoad); + assertThat(DataStreamAutoShardingService.computeOptimalNumberOfShards(minWriteThreads, maxWriteThreads, indexingLoad), is(12L)); + } + } + + public void testGetMaxIndexLoadWithinCoolingPeriod() { + final TimeValue coolingPeriod = TimeValue.timeValueDays(3); + + final Metadata.Builder metadataBuilder = Metadata.builder(); + final int numberOfBackingIndicesOutsideCoolingPeriod = randomIntBetween(3, 10); + final int numberOfBackingIndicesWithinCoolingPeriod = randomIntBetween(3, 10); + final List backingIndices = new ArrayList<>(); + final String dataStreamName = "logs"; + long now = System.currentTimeMillis(); + + // to cover the entire cooling period we'll also include the backing index right before the index age calculation + // this flag makes that index have a very low or very high write load + boolean lastIndexBeforeCoolingPeriodHasLowWriteLoad = randomBoolean(); + for (int i = 0; i < numberOfBackingIndicesOutsideCoolingPeriod; i++) { + long creationDate = now - (coolingPeriod.millis() * 2); + IndexMetadata indexMetadata = createIndexMetadata( + DataStream.getDefaultBackingIndexName(dataStreamName, backingIndices.size(), creationDate), + 1, + getWriteLoad(1, 999.0), + creationDate + ); + + if (lastIndexBeforeCoolingPeriodHasLowWriteLoad) { + indexMetadata = createIndexMetadata( + DataStream.getDefaultBackingIndexName(dataStreamName, backingIndices.size(), creationDate), + 1, + getWriteLoad(1, 1.0), + creationDate + ); + } + backingIndices.add(indexMetadata.getIndex()); + metadataBuilder.put(indexMetadata, false); + } + + for (int i = 0; i < numberOfBackingIndicesWithinCoolingPeriod; i++) { + final long createdAt = now - (coolingPeriod.getMillis() / 2); + IndexMetadata indexMetadata; + if (i == numberOfBackingIndicesWithinCoolingPeriod - 1) { + indexMetadata = createIndexMetadata( + DataStream.getDefaultBackingIndexName(dataStreamName, backingIndices.size(), createdAt), + 3, + getWriteLoad(3, 5.0), // max write index within cooling period + createdAt + ); + } else { + indexMetadata = createIndexMetadata( + DataStream.getDefaultBackingIndexName(dataStreamName, backingIndices.size(), createdAt), + 3, + getWriteLoad(3, 3.0), // each backing index has a write load of 9.0 + createdAt + ); + } + backingIndices.add(indexMetadata.getIndex()); + metadataBuilder.put(indexMetadata, false); + } + + final String writeIndexName = DataStream.getDefaultBackingIndexName(dataStreamName, backingIndices.size()); + final IndexMetadata writeIndexMetadata = createIndexMetadata(writeIndexName, 3, getWriteLoad(3, 1.0), System.currentTimeMillis()); + backingIndices.add(writeIndexMetadata.getIndex()); + metadataBuilder.put(writeIndexMetadata, false); + + final DataStream dataStream = new DataStream( + dataStreamName, + backingIndices, + backingIndices.size(), + Collections.emptyMap(), + false, + false, + false, + false, + IndexMode.STANDARD + ); + + metadataBuilder.put(dataStream); + + double maxIndexLoadWithinCoolingPeriod = DataStreamAutoShardingService.getMaxIndexLoadWithinCoolingPeriod( + metadataBuilder.build(), + dataStream, + 3.0, + coolingPeriod, + () -> now + ); + // to cover the entire cooldown period, the last index before the cooling period is taken into account + assertThat(maxIndexLoadWithinCoolingPeriod, is(lastIndexBeforeCoolingPeriodHasLowWriteLoad ? 15.0 : 999.0)); + } + + public void testAutoShardingResultValidation() { + { + // throws exception when constructed using types that shouldn't report cooldowns + expectThrows( + IllegalArgumentException.class, + () -> new AutoShardingResult(INCREASE_SHARDS, 1, 3, TimeValue.timeValueSeconds(3), 3.0) + ); + + expectThrows( + IllegalArgumentException.class, + () -> new AutoShardingResult(DECREASE_SHARDS, 3, 1, TimeValue.timeValueSeconds(3), 1.0) + ); + + } + + { + // we can successfully create results with cooldown period for the designated types + AutoShardingResult cooldownPreventedIncrease = new AutoShardingResult( + COOLDOWN_PREVENTED_INCREASE, + 1, + 3, + TimeValue.timeValueSeconds(3), + 3.0 + ); + assertThat(cooldownPreventedIncrease.coolDownRemaining(), is(TimeValue.timeValueSeconds(3))); + + AutoShardingResult cooldownPreventedDecrease = new AutoShardingResult( + COOLDOWN_PREVENTED_DECREASE, + 3, + 1, + TimeValue.timeValueSeconds(7), + 1.0 + ); + assertThat(cooldownPreventedDecrease.coolDownRemaining(), is(TimeValue.timeValueSeconds(7))); + } + } + + private DataStream createDataStream( + Metadata.Builder builder, + String dataStreamName, + int numberOfShards, + Long now, + List indicesCreationDate, + IndexWriteLoad backingIndicesWriteLoad, + @Nullable DataStreamAutoShardingEvent autoShardingEvent + ) { + final List backingIndices = new ArrayList<>(); + int backingIndicesCount = indicesCreationDate.size(); + for (int k = 0; k < indicesCreationDate.size(); k++) { + long createdAt = indicesCreationDate.get(k); + IndexMetadata.Builder indexMetaBuilder; + if (k < backingIndicesCount - 1) { + indexMetaBuilder = IndexMetadata.builder( + createIndexMetadata( + DataStream.getDefaultBackingIndexName(dataStreamName, k + 1), + numberOfShards, + backingIndicesWriteLoad, + createdAt + ) + ); + // add rollover info only for non-write indices + MaxAgeCondition rolloverCondition = new MaxAgeCondition(TimeValue.timeValueMillis(now - 2000L)); + indexMetaBuilder.putRolloverInfo(new RolloverInfo(dataStreamName, List.of(rolloverCondition), now - 2000L)); + } else { + // write index + indexMetaBuilder = IndexMetadata.builder( + createIndexMetadata(DataStream.getDefaultBackingIndexName(dataStreamName, k + 1), numberOfShards, null, createdAt) + ); + } + IndexMetadata indexMetadata = indexMetaBuilder.build(); + builder.put(indexMetadata, false); + backingIndices.add(indexMetadata.getIndex()); + } + return new DataStream( + dataStreamName, + backingIndices, + backingIndicesCount, + null, + false, + false, + false, + false, + null, + null, + false, + List.of(), + autoShardingEvent + ); + } + + private IndexMetadata createIndexMetadata( + String indexName, + int numberOfShards, + @Nullable IndexWriteLoad indexWriteLoad, + long createdAt + ) { + return IndexMetadata.builder(indexName) + .settings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numberOfShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) + .build() + ) + .stats(indexWriteLoad == null ? null : new IndexMetadataStats(indexWriteLoad, 1, 1)) + .creationDate(createdAt) + .build(); + } + + private IndexWriteLoad getWriteLoad(int numberOfShards, double shardWriteLoad) { + IndexWriteLoad.Builder builder = IndexWriteLoad.builder(numberOfShards); + for (int shardId = 0; shardId < numberOfShards; shardId++) { + builder.withShardWriteLoad(shardId, shardWriteLoad, randomLongBetween(1, 10)); + } + return builder.build(); + } + +} diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamAutoShardingEventTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamAutoShardingEventTests.java new file mode 100644 index 0000000000000..925c204fa5b27 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamAutoShardingEventTests.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.metadata; + +import org.elasticsearch.cluster.Diff; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.SimpleDiffableSerializationTestCase; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; + +public class DataStreamAutoShardingEventTests extends SimpleDiffableSerializationTestCase { + + @Override + protected DataStreamAutoShardingEvent doParseInstance(XContentParser parser) throws IOException { + return DataStreamAutoShardingEvent.fromXContent(parser); + } + + @Override + protected Writeable.Reader instanceReader() { + return DataStreamAutoShardingEvent::new; + } + + @Override + protected DataStreamAutoShardingEvent createTestInstance() { + return DataStreamAutoShardingEventTests.randomInstance(); + } + + @Override + protected DataStreamAutoShardingEvent mutateInstance(DataStreamAutoShardingEvent instance) { + String triggerIndex = instance.triggerIndexName(); + long timestamp = instance.timestamp(); + int targetNumberOfShards = instance.targetNumberOfShards(); + switch (randomInt(2)) { + case 0 -> triggerIndex = randomValueOtherThan(triggerIndex, () -> randomAlphaOfLengthBetween(10, 50)); + case 1 -> timestamp = randomValueOtherThan(timestamp, ESTestCase::randomNonNegativeLong); + case 2 -> targetNumberOfShards = randomValueOtherThan(targetNumberOfShards, ESTestCase::randomNonNegativeInt); + } + return new DataStreamAutoShardingEvent(triggerIndex, targetNumberOfShards, timestamp); + } + + static DataStreamAutoShardingEvent randomInstance() { + return new DataStreamAutoShardingEvent(randomAlphaOfLengthBetween(10, 40), randomNonNegativeInt(), randomNonNegativeLong()); + } + + @Override + protected DataStreamAutoShardingEvent makeTestChanges(DataStreamAutoShardingEvent testInstance) { + return mutateInstance(testInstance); + } + + @Override + protected Writeable.Reader> diffReader() { + return DataStreamAutoShardingEvent::readDiffFrom; + } +} diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java index 9f7d6b49b0844..7e8e9805b54e7 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java @@ -39,6 +39,7 @@ import java.time.temporal.ChronoUnit; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -93,7 +94,8 @@ protected DataStream mutateInstance(DataStream instance) { var lifecycle = instance.getLifecycle(); var failureStore = instance.isFailureStore(); var failureIndices = instance.getFailureIndices(); - switch (between(0, 10)) { + var autoShardingEvent = instance.getAutoShardingEvent(); + switch (between(0, 11)) { case 0 -> name = randomAlphaOfLength(10); case 1 -> indices = randomValueOtherThan(List.of(), DataStreamTestHelper::randomIndexInstances); case 2 -> generation = instance.getGeneration() + randomIntBetween(1, 10); @@ -130,6 +132,15 @@ protected DataStream mutateInstance(DataStream instance) { failureStore = true; } } + case 11 -> { + autoShardingEvent = randomBoolean() && autoShardingEvent != null + ? null + : new DataStreamAutoShardingEvent( + indices.get(indices.size() - 1).getName(), + randomIntBetween(1, 10), + randomMillisUpToYear9999() + ); + } } return new DataStream( @@ -144,7 +155,8 @@ protected DataStream mutateInstance(DataStream instance) { indexMode, lifecycle, failureStore, - failureIndices + failureIndices, + autoShardingEvent ); } @@ -201,7 +213,8 @@ public void testRolloverUpgradeToTsdbDataStream() { indexMode, ds.getLifecycle(), ds.isFailureStore(), - ds.getFailureIndices() + ds.getFailureIndices(), + ds.getAutoShardingEvent() ); var newCoordinates = ds.nextWriteIndexAndGeneration(Metadata.EMPTY_METADATA); @@ -228,7 +241,8 @@ public void testRolloverDowngradeToRegularDataStream() { IndexMode.TIME_SERIES, ds.getLifecycle(), ds.isFailureStore(), - ds.getFailureIndices() + ds.getFailureIndices(), + ds.getAutoShardingEvent() ); var newCoordinates = ds.nextWriteIndexAndGeneration(Metadata.EMPTY_METADATA); @@ -590,7 +604,8 @@ public void testSnapshot() { preSnapshotDataStream.getIndexMode(), preSnapshotDataStream.getLifecycle(), preSnapshotDataStream.isFailureStore(), - preSnapshotDataStream.getFailureIndices() + preSnapshotDataStream.getFailureIndices(), + preSnapshotDataStream.getAutoShardingEvent() ); var reconciledDataStream = postSnapshotDataStream.snapshot( @@ -634,7 +649,8 @@ public void testSnapshotWithAllBackingIndicesRemoved() { preSnapshotDataStream.getIndexMode(), preSnapshotDataStream.getLifecycle(), preSnapshotDataStream.isFailureStore(), - preSnapshotDataStream.getFailureIndices() + preSnapshotDataStream.getFailureIndices(), + preSnapshotDataStream.getAutoShardingEvent() ); assertNull(postSnapshotDataStream.snapshot(preSnapshotDataStream.getIndices().stream().map(Index::getName).toList())); @@ -1654,7 +1670,8 @@ public void testXContentSerializationWithRollover() throws IOException { lifecycle, failureStore, failureIndices, - false + false, + null ); try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) { @@ -1671,6 +1688,99 @@ public void testXContentSerializationWithRollover() throws IOException { } } + public void testGetIndicesWithinMaxAgeRange() { + final TimeValue maxIndexAge = TimeValue.timeValueDays(7); + + final Metadata.Builder metadataBuilder = Metadata.builder(); + final int numberOfBackingIndicesOlderThanMinAge = randomIntBetween(0, 10); + final int numberOfBackingIndicesWithinMinAnge = randomIntBetween(0, 10); + final int numberOfShards = 1; + final List backingIndices = new ArrayList<>(); + final String dataStreamName = "logs-es"; + final List backingIndicesOlderThanMinAge = new ArrayList<>(); + for (int i = 0; i < numberOfBackingIndicesOlderThanMinAge; i++) { + long creationDate = System.currentTimeMillis() - maxIndexAge.millis() * 2; + final IndexMetadata indexMetadata = createIndexMetadata( + DataStream.getDefaultBackingIndexName(dataStreamName, backingIndices.size(), creationDate), + randomIndexWriteLoad(numberOfShards), + creationDate + ); + backingIndices.add(indexMetadata.getIndex()); + backingIndicesOlderThanMinAge.add(indexMetadata.getIndex()); + metadataBuilder.put(indexMetadata, false); + } + + final List backingIndicesWithinMinAge = new ArrayList<>(); + for (int i = 0; i < numberOfBackingIndicesWithinMinAnge; i++) { + final long createdAt = System.currentTimeMillis() - (maxIndexAge.getMillis() / 2); + final IndexMetadata indexMetadata = createIndexMetadata( + DataStream.getDefaultBackingIndexName(dataStreamName, backingIndices.size(), createdAt), + randomIndexWriteLoad(numberOfShards), + createdAt + ); + backingIndices.add(indexMetadata.getIndex()); + backingIndicesWithinMinAge.add(indexMetadata.getIndex()); + metadataBuilder.put(indexMetadata, false); + } + + final String writeIndexName = DataStream.getDefaultBackingIndexName(dataStreamName, backingIndices.size()); + final IndexMetadata writeIndexMetadata = createIndexMetadata(writeIndexName, null, System.currentTimeMillis()); + backingIndices.add(writeIndexMetadata.getIndex()); + metadataBuilder.put(writeIndexMetadata, false); + + final DataStream dataStream = new DataStream( + dataStreamName, + backingIndices, + backingIndices.size(), + Collections.emptyMap(), + false, + false, + false, + false, + randomBoolean() ? IndexMode.STANDARD : IndexMode.TIME_SERIES + ); + + metadataBuilder.put(dataStream); + + final List indicesWithinMaxAgeRange = DataStream.getIndicesWithinMaxAgeRange( + dataStream, + metadataBuilder::getSafe, + maxIndexAge, + System::currentTimeMillis + ); + + final List expectedIndicesWithinMaxAgeRange = new ArrayList<>(); + if (numberOfBackingIndicesOlderThanMinAge > 0) { + expectedIndicesWithinMaxAgeRange.add(backingIndicesOlderThanMinAge.get(backingIndicesOlderThanMinAge.size() - 1)); + } + expectedIndicesWithinMaxAgeRange.addAll(backingIndicesWithinMinAge); + expectedIndicesWithinMaxAgeRange.add(writeIndexMetadata.getIndex()); + + assertThat(indicesWithinMaxAgeRange, is(equalTo(expectedIndicesWithinMaxAgeRange))); + } + + private IndexWriteLoad randomIndexWriteLoad(int numberOfShards) { + IndexWriteLoad.Builder builder = IndexWriteLoad.builder(numberOfShards); + for (int shardId = 0; shardId < numberOfShards; shardId++) { + builder.withShardWriteLoad(shardId, randomDoubleBetween(0, 64, true), randomLongBetween(1, 10)); + } + return builder.build(); + } + + private IndexMetadata createIndexMetadata(String indexName, IndexWriteLoad indexWriteLoad, long createdAt) { + return IndexMetadata.builder(indexName) + .settings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) + .build() + ) + .stats(indexWriteLoad == null ? null : new IndexMetadataStats(indexWriteLoad, 1, 1)) + .creationDate(createdAt) + .build(); + } + private record DataStreamMetadata(Long creationTimeInMillis, Long rolloverTimeInMillis, Long originationTimeInMillis) { public static DataStreamMetadata dataStreamMetadata(Long creationTimeInMillis, Long rolloverTimeInMillis) { return new DataStreamMetadata(creationTimeInMillis, rolloverTimeInMillis, null); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsServiceTests.java index ba3b1a7387110..71306d7fe0aef 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsServiceTests.java @@ -356,7 +356,8 @@ public void testRemoveBrokenBackingIndexReference() { original.getIndexMode(), original.getLifecycle(), original.isFailureStore(), - original.getFailureIndices() + original.getFailureIndices(), + original.getAutoShardingEvent() ); var brokenState = ClusterState.builder(state).metadata(Metadata.builder(state.getMetadata()).put(broken).build()).build(); diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java b/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java index 3a47e0885f2d2..8fc02bb8e808c 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java @@ -74,6 +74,7 @@ import static org.elasticsearch.test.ESTestCase.randomAlphaOfLength; import static org.elasticsearch.test.ESTestCase.randomBoolean; import static org.elasticsearch.test.ESTestCase.randomFrom; +import static org.elasticsearch.test.ESTestCase.randomIntBetween; import static org.elasticsearch.test.ESTestCase.randomMap; import static org.elasticsearch.test.ESTestCase.randomMillisUpToYear9999; import static org.mockito.ArgumentMatchers.any; @@ -136,7 +137,8 @@ public static DataStream newInstance( null, lifecycle, failureStores.size() > 0, - failureStores + failureStores, + null ); } @@ -307,7 +309,14 @@ public static DataStream randomInstance(String dataStreamName, LongSupplier time randomBoolean() ? DataStreamLifecycle.newBuilder().dataRetention(randomMillisUpToYear9999()).build() : null, failureStore, failureIndices, + randomBoolean(), randomBoolean() + ? new DataStreamAutoShardingEvent( + indices.get(indices.size() - 1).getName(), + randomIntBetween(1, 10), + randomMillisUpToYear9999() + ) + : null ); } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java index b06ff73e29960..c3dd30bd2f242 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java @@ -334,7 +334,8 @@ static DataStream updateLocalDataStream( remoteDataStream.getIndexMode(), remoteDataStream.getLifecycle(), remoteDataStream.isFailureStore(), - remoteDataStream.getFailureIndices() + remoteDataStream.getFailureIndices(), + remoteDataStream.getAutoShardingEvent() ); } else { if (localDataStream.isReplicated() == false) { @@ -387,7 +388,8 @@ static DataStream updateLocalDataStream( localDataStream.getIndexMode(), localDataStream.getLifecycle(), localDataStream.isFailureStore(), - localDataStream.getFailureIndices() + localDataStream.getFailureIndices(), + localDataStream.getAutoShardingEvent() ); } } diff --git a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/core/action/DataStreamLifecycleUsageTransportActionIT.java b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/core/action/DataStreamLifecycleUsageTransportActionIT.java index c102470628a00..bc97623c76970 100644 --- a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/core/action/DataStreamLifecycleUsageTransportActionIT.java +++ b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/core/action/DataStreamLifecycleUsageTransportActionIT.java @@ -134,7 +134,8 @@ public void testAction() throws Exception { IndexMode.STANDARD, lifecycle, false, - List.of() + List.of(), + null ); dataStreamMap.put(dataStream.getName(), dataStream); } diff --git a/x-pack/plugin/write-load-forecaster/src/main/java/org/elasticsearch/xpack/writeloadforecaster/LicensedWriteLoadForecaster.java b/x-pack/plugin/write-load-forecaster/src/main/java/org/elasticsearch/xpack/writeloadforecaster/LicensedWriteLoadForecaster.java index c1126df228cfe..d4a85ce859b2b 100644 --- a/x-pack/plugin/write-load-forecaster/src/main/java/org/elasticsearch/xpack/writeloadforecaster/LicensedWriteLoadForecaster.java +++ b/x-pack/plugin/write-load-forecaster/src/main/java/org/elasticsearch/xpack/writeloadforecaster/LicensedWriteLoadForecaster.java @@ -76,7 +76,13 @@ public Metadata.Builder withWriteLoadForecastForWriteIndex(String dataStreamName clearPreviousForecast(dataStream, metadata); - final List indicesWriteLoadWithinMaxAgeRange = getIndicesWithinMaxAgeRange(dataStream, metadata).stream() + final List indicesWriteLoadWithinMaxAgeRange = DataStream.getIndicesWithinMaxAgeRange( + dataStream, + metadata::getSafe, + maxIndexAge, + threadPool::absoluteTimeInMillis + ) + .stream() .filter(index -> index.equals(dataStream.getWriteIndex()) == false) .map(metadata::getSafe) .map(IndexMetadata::getStats) @@ -134,25 +140,6 @@ static OptionalDouble forecastIndexWriteLoad(List indicesWriteLo return totalShardUptime == 0 ? OptionalDouble.empty() : OptionalDouble.of(totalWeightedWriteLoad / totalShardUptime); } - // Visible for testing - List getIndicesWithinMaxAgeRange(DataStream dataStream, Metadata.Builder metadata) { - final List dataStreamIndices = dataStream.getIndices(); - final long currentTimeMillis = threadPool.absoluteTimeInMillis(); - // Consider at least 1 index (including the write index) for cases where rollovers happen less often than maxIndexAge - int firstIndexWithinAgeRange = Math.max(dataStreamIndices.size() - 2, 0); - for (int i = 0; i < dataStreamIndices.size(); i++) { - Index index = dataStreamIndices.get(i); - final IndexMetadata indexMetadata = metadata.getSafe(index); - final long indexAge = currentTimeMillis - indexMetadata.getCreationDate(); - if (indexAge < maxIndexAge.getMillis()) { - // We need to consider the previous index too in order to cover the entire max-index-age range. - firstIndexWithinAgeRange = i == 0 ? 0 : i - 1; - break; - } - } - return dataStreamIndices.subList(firstIndexWithinAgeRange, dataStreamIndices.size()); - } - @Override @SuppressForbidden(reason = "This is the only place where IndexMetadata#getForecastedWriteLoad is allowed to be used") public OptionalDouble getForecastedWriteLoad(IndexMetadata indexMetadata) { diff --git a/x-pack/plugin/write-load-forecaster/src/test/java/org/elasticsearch/xpack/writeloadforecaster/LicensedWriteLoadForecasterTests.java b/x-pack/plugin/write-load-forecaster/src/test/java/org/elasticsearch/xpack/writeloadforecaster/LicensedWriteLoadForecasterTests.java index 38e754c802983..c7efb27509ef7 100644 --- a/x-pack/plugin/write-load-forecaster/src/test/java/org/elasticsearch/xpack/writeloadforecaster/LicensedWriteLoadForecasterTests.java +++ b/x-pack/plugin/write-load-forecaster/src/test/java/org/elasticsearch/xpack/writeloadforecaster/LicensedWriteLoadForecasterTests.java @@ -287,65 +287,6 @@ public void testWriteLoadForecast() { } } - public void testGetIndicesWithinMaxAgeRange() { - final TimeValue maxIndexAge = TimeValue.timeValueDays(7); - final LicensedWriteLoadForecaster writeLoadForecaster = new LicensedWriteLoadForecaster(() -> true, threadPool, maxIndexAge); - - final Metadata.Builder metadataBuilder = Metadata.builder(); - final int numberOfBackingIndicesOlderThanMinAge = randomIntBetween(0, 10); - final int numberOfBackingIndicesWithinMinAnge = randomIntBetween(0, 10); - final int numberOfShards = 1; - final List backingIndices = new ArrayList<>(); - final String dataStreamName = "logs-es"; - final List backingIndicesOlderThanMinAge = new ArrayList<>(); - for (int i = 0; i < numberOfBackingIndicesOlderThanMinAge; i++) { - long creationDate = System.currentTimeMillis() - maxIndexAge.millis() * 2; - final IndexMetadata indexMetadata = createIndexMetadata( - DataStream.getDefaultBackingIndexName(dataStreamName, backingIndices.size(), creationDate), - numberOfShards, - randomIndexWriteLoad(numberOfShards), - creationDate - ); - backingIndices.add(indexMetadata.getIndex()); - backingIndicesOlderThanMinAge.add(indexMetadata.getIndex()); - metadataBuilder.put(indexMetadata, false); - } - - final List backingIndicesWithinMinAge = new ArrayList<>(); - for (int i = 0; i < numberOfBackingIndicesWithinMinAnge; i++) { - final long createdAt = System.currentTimeMillis() - (maxIndexAge.getMillis() / 2); - final IndexMetadata indexMetadata = createIndexMetadata( - DataStream.getDefaultBackingIndexName(dataStreamName, backingIndices.size(), createdAt), - numberOfShards, - randomIndexWriteLoad(numberOfShards), - createdAt - ); - backingIndices.add(indexMetadata.getIndex()); - backingIndicesWithinMinAge.add(indexMetadata.getIndex()); - metadataBuilder.put(indexMetadata, false); - } - - final String writeIndexName = DataStream.getDefaultBackingIndexName(dataStreamName, backingIndices.size()); - final IndexMetadata writeIndexMetadata = createIndexMetadata(writeIndexName, numberOfShards, null, System.currentTimeMillis()); - backingIndices.add(writeIndexMetadata.getIndex()); - metadataBuilder.put(writeIndexMetadata, false); - - final DataStream dataStream = createDataStream(dataStreamName, backingIndices); - - metadataBuilder.put(dataStream); - - final List indicesWithinMaxAgeRange = writeLoadForecaster.getIndicesWithinMaxAgeRange(dataStream, metadataBuilder); - - final List expectedIndicesWithinMaxAgeRange = new ArrayList<>(); - if (numberOfBackingIndicesOlderThanMinAge > 0) { - expectedIndicesWithinMaxAgeRange.add(backingIndicesOlderThanMinAge.get(backingIndicesOlderThanMinAge.size() - 1)); - } - expectedIndicesWithinMaxAgeRange.addAll(backingIndicesWithinMinAge); - expectedIndicesWithinMaxAgeRange.add(writeIndexMetadata.getIndex()); - - assertThat(indicesWithinMaxAgeRange, is(equalTo(expectedIndicesWithinMaxAgeRange))); - } - private IndexWriteLoad randomIndexWriteLoad(int numberOfShards) { IndexWriteLoad.Builder builder = IndexWriteLoad.builder(numberOfShards); for (int shardId = 0; shardId < numberOfShards; shardId++) { From 8d93a934f6924b3359d08a7cf81db6f732a57ce6 Mon Sep 17 00:00:00 2001 From: Craig Taverner Date: Wed, 6 Mar 2024 11:19:54 +0100 Subject: [PATCH 015/248] Add two new OGC functions ST_X and ST_Y (#105768) * Add two new OGC functions ST_X and ST_Y Recently Nik did work that involved extracting the X and Y coordinates from geo_point data using `to_string(field)` followed by a DISSECT command to re-parse the string to get the X and Y coordinates. This is much more efficiently achieved using existing known OGC functions `ST_X` and `ST_Y`. * Update docs/changelog/105768.yaml * Fixed invalid changelog yaml * Fixed mixed cluster tests * Fixed tests and added docs * Removed false impression that these functions were different for geo/cartesian With the use of WKB as the core type in the compute engine, many spatial functions are actually the same between these two types, so we should not give the impression they are different. * Code review comments and reduced object creation. * Revert temporary StringUtils hack, and fix bug in x/y extraction from WKB * Revert object creation reduction * Fixed mistakes in documentation --- docs/changelog/105768.yaml | 5 + .../esql/esql-functions-operators.asciidoc | 4 + .../esql/functions/signature/st_x.svg | 1 + .../esql/functions/signature/st_y.svg | 1 + .../esql/functions/spatial-functions.asciidoc | 16 +++ docs/reference/esql/functions/st_x.asciidoc | 33 +++++ docs/reference/esql/functions/st_y.asciidoc | 33 +++++ .../esql/functions/types/st_x.asciidoc | 6 + .../esql/functions/types/st_y.asciidoc | 6 + .../src/main/resources/show.csv-spec | 10 +- .../src/main/resources/spatial.csv-spec | 44 ++++++ .../scalar/spatial/StXFromWKBEvaluator.java | 127 ++++++++++++++++++ .../scalar/spatial/StYFromWKBEvaluator.java | 127 ++++++++++++++++++ .../function/EsqlFunctionRegistry.java | 4 + .../function/scalar/spatial/StX.java | 73 ++++++++++ .../function/scalar/spatial/StY.java | 73 ++++++++++ .../xpack/esql/io/stream/PlanNamedTypes.java | 6 + .../function/AbstractFunctionTestCase.java | 3 +- .../function/scalar/spatial/StXTests.java | 50 +++++++ .../function/scalar/spatial/StYTests.java | 50 +++++++ .../xpack/ql/util/SpatialCoordinateTypes.java | 16 ++- 21 files changed, 683 insertions(+), 5 deletions(-) create mode 100644 docs/changelog/105768.yaml create mode 100644 docs/reference/esql/functions/signature/st_x.svg create mode 100644 docs/reference/esql/functions/signature/st_y.svg create mode 100644 docs/reference/esql/functions/spatial-functions.asciidoc create mode 100644 docs/reference/esql/functions/st_x.asciidoc create mode 100644 docs/reference/esql/functions/st_y.asciidoc create mode 100644 docs/reference/esql/functions/types/st_x.asciidoc create mode 100644 docs/reference/esql/functions/types/st_y.asciidoc create mode 100644 x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StXFromWKBEvaluator.java create mode 100644 x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StYFromWKBEvaluator.java create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StX.java create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StY.java create mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StXTests.java create mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StYTests.java diff --git a/docs/changelog/105768.yaml b/docs/changelog/105768.yaml new file mode 100644 index 0000000000000..49d7f1f15c453 --- /dev/null +++ b/docs/changelog/105768.yaml @@ -0,0 +1,5 @@ +pr: 105768 +summary: Add two new OGC functions ST_X and ST_Y +area: "ES|QL" +type: enhancement +issues: [] diff --git a/docs/reference/esql/esql-functions-operators.asciidoc b/docs/reference/esql/esql-functions-operators.asciidoc index 375bb4ee9dd00..a1ad512fbe512 100644 --- a/docs/reference/esql/esql-functions-operators.asciidoc +++ b/docs/reference/esql/esql-functions-operators.asciidoc @@ -21,6 +21,9 @@ include::functions/string-functions.asciidoc[tag=string_list] <>:: include::functions/date-time-functions.asciidoc[tag=date_list] +<>:: +include::functions/spatial-functions.asciidoc[tag=spatial_list] + <>:: include::functions/type-conversion-functions.asciidoc[tag=type_list] @@ -37,6 +40,7 @@ include::functions/aggregation-functions.asciidoc[] include::functions/math-functions.asciidoc[] include::functions/string-functions.asciidoc[] include::functions/date-time-functions.asciidoc[] +include::functions/spatial-functions.asciidoc[] include::functions/type-conversion-functions.asciidoc[] include::functions/conditional-functions-and-expressions.asciidoc[] include::functions/mv-functions.asciidoc[] diff --git a/docs/reference/esql/functions/signature/st_x.svg b/docs/reference/esql/functions/signature/st_x.svg new file mode 100644 index 0000000000000..d6fac8a96505a --- /dev/null +++ b/docs/reference/esql/functions/signature/st_x.svg @@ -0,0 +1 @@ +ST_X(point) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/st_y.svg b/docs/reference/esql/functions/signature/st_y.svg new file mode 100644 index 0000000000000..c6dc23724d59c --- /dev/null +++ b/docs/reference/esql/functions/signature/st_y.svg @@ -0,0 +1 @@ +ST_Y(point) \ No newline at end of file diff --git a/docs/reference/esql/functions/spatial-functions.asciidoc b/docs/reference/esql/functions/spatial-functions.asciidoc new file mode 100644 index 0000000000000..d99fe36191a31 --- /dev/null +++ b/docs/reference/esql/functions/spatial-functions.asciidoc @@ -0,0 +1,16 @@ +[[esql-spatial-functions]] +==== {esql} spatial functions + +++++ +Spatial functions +++++ + +{esql} supports these spatial functions: + +// tag::spatial_list[] +* <> +* <> +// end::spatial_list[] + +include::st_x.asciidoc[] +include::st_y.asciidoc[] diff --git a/docs/reference/esql/functions/st_x.asciidoc b/docs/reference/esql/functions/st_x.asciidoc new file mode 100644 index 0000000000000..0f40a66417f9f --- /dev/null +++ b/docs/reference/esql/functions/st_x.asciidoc @@ -0,0 +1,33 @@ +[discrete] +[[esql-st_x]] +=== `ST_X` + +*Syntax* + +[.text-center] +image::esql/functions/signature/st_x.svg[Embedded,opts=inline] + +*Parameters* + +`point`:: +Expression of type `geo_point` or `cartesian_point`. If `null`, the function returns `null`. + +*Description* + +Extracts the `x` coordinate from the supplied point. +If the points is of type `geo_point` this is equivalent to extracting the `longitude` value. + +*Supported types* + +include::types/st_x.asciidoc[] + +*Example* + +[source.merge.styled,esql] +---- +include::{esql-specs}/spatial.csv-spec[tag=st_x_y] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/spatial.csv-spec[tag=st_x_y-result] +|=== diff --git a/docs/reference/esql/functions/st_y.asciidoc b/docs/reference/esql/functions/st_y.asciidoc new file mode 100644 index 0000000000000..e876852228d83 --- /dev/null +++ b/docs/reference/esql/functions/st_y.asciidoc @@ -0,0 +1,33 @@ +[discrete] +[[esql-st_y]] +=== `ST_Y` + +*Syntax* + +[.text-center] +image::esql/functions/signature/st_y.svg[Embedded,opts=inline] + +*Parameters* + +`point`:: +Expression of type `geo_point` or `cartesian_point`. If `null`, the function returns `null`. + +*Description* + +Extracts the `y` coordinate from the supplied point. +If the points is of type `geo_point` this is equivalent to extracting the `latitude` value. + +*Supported types* + +include::types/st_y.asciidoc[] + +*Example* + +[source.merge.styled,esql] +---- +include::{esql-specs}/spatial.csv-spec[tag=st_x_y] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/spatial.csv-spec[tag=st_x_y-result] +|=== diff --git a/docs/reference/esql/functions/types/st_x.asciidoc b/docs/reference/esql/functions/types/st_x.asciidoc new file mode 100644 index 0000000000000..94ed4b296f1d4 --- /dev/null +++ b/docs/reference/esql/functions/types/st_x.asciidoc @@ -0,0 +1,6 @@ +[%header.monospaced.styled,format=dsv,separator=|] +|=== +point | result +cartesian_point | double +geo_point | double +|=== diff --git a/docs/reference/esql/functions/types/st_y.asciidoc b/docs/reference/esql/functions/types/st_y.asciidoc new file mode 100644 index 0000000000000..94ed4b296f1d4 --- /dev/null +++ b/docs/reference/esql/functions/types/st_y.asciidoc @@ -0,0 +1,6 @@ +[%header.monospaced.styled,format=dsv,separator=|] +|=== +point | result +cartesian_point | double +geo_point | double +|=== diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec index 6887a1bbe9069..3f2d87c6d7a08 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec @@ -68,6 +68,8 @@ sinh |"double sinh(n:double|integer|long|unsigned_long)"|n split |"keyword split(str:keyword|text, delim:keyword|text)" |[str, delim] |["keyword|text", "keyword|text"] |["", ""] |keyword | "Split a single valued string into multiple strings." | [false, false] | false | false sqrt |"double sqrt(n:double|integer|long|unsigned_long)" |n |"double|integer|long|unsigned_long" | "" |double | "Returns the square root of a number." | false | false | false st_centroid |"geo_point|cartesian_point st_centroid(field:geo_point|cartesian_point)" |field |"geo_point|cartesian_point" | "" |"geo_point|cartesian_point" | "The centroid of a spatial field." | false | false | true +st_x |"double st_x(point:geo_point|cartesian_point)" |point |"geo_point|cartesian_point" | "" |double | "Extracts the x-coordinate from a point geometry." | false | false | false +st_y |"double st_y(point:geo_point|cartesian_point)" |point |"geo_point|cartesian_point" | "" |double | "Extracts the y-coordinate from a point geometry." | false | false | false starts_with |"boolean starts_with(str:keyword|text, prefix:keyword|text)" |[str, prefix] |["keyword|text", "keyword|text"] |["", ""] |boolean | "Returns a boolean that indicates whether a keyword string starts with another string" | [false, false] | false | false substring |"keyword substring(str:keyword|text, start:integer, ?length:integer)" |[str, start, length] |["keyword|text", "integer", "integer"] |["", "", ""] |keyword | "Returns a substring of a string, specified by a start position and an optional length" | [false, false, true]| false | false sum |"long sum(field:double|integer|long)" |field |"double|integer|long" | "" |long | "The sum of a numeric field." | false | false | true @@ -103,7 +105,7 @@ trim |"keyword|text trim(str:keyword|text)" ; -showFunctionsSynopsis#[skip:-8.12.99] +showFunctionsSynopsis#[skip:-8.13.99] show functions | keep synopsis; synopsis:keyword @@ -165,6 +167,8 @@ double pi() "keyword split(str:keyword|text, delim:keyword|text)" "double sqrt(n:double|integer|long|unsigned_long)" "geo_point|cartesian_point st_centroid(field:geo_point|cartesian_point)" +"double st_x(point:geo_point|cartesian_point)" +"double st_y(point:geo_point|cartesian_point)" "boolean starts_with(str:keyword|text, prefix:keyword|text)" "keyword substring(str:keyword|text, start:integer, ?length:integer)" "long sum(field:double|integer|long)" @@ -216,9 +220,9 @@ sinh | "double sinh(n:double|integer|long|unsigned_long)" // see https://github.com/elastic/elasticsearch/issues/102120 -countFunctions#[skip:-8.12.99] +countFunctions#[skip:-8.13.99] show functions | stats a = count(*), b = count(*), c = count(*) | mv_expand c; a:long | b:long | c:long -90 | 90 | 90 +92 | 92 | 92 ; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial.csv-spec index 5c4aae740910b..1eb4d82b5fcc2 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial.csv-spec @@ -69,6 +69,30 @@ c:geo_point POINT(39.58327988510707 20.619513023697994) ; +centroidFromString4#[skip:-8.13.99, reason:st_x and st_y added in 8.14] +ROW wkt = ["POINT(42.97109629958868 14.7552534006536)", "POINT(75.80929149873555 22.72774917539209)", "POINT(-0.030548143003023033 24.37553649504829)"] +| MV_EXPAND wkt +| EVAL pt = TO_GEOPOINT(wkt) +| STATS c = ST_CENTROID(pt) +| EVAL x = ST_X(c), y = ST_Y(c); + +c:geo_point | x:double | y:double +POINT(39.58327988510707 20.619513023697994) | 39.58327988510707 | 20.619513023697994 +; + +stXFromString#[skip:-8.13.99, reason:st_x and st_y added in 8.14] +// tag::st_x_y[] +ROW point = TO_GEOPOINT("POINT(42.97109629958868 14.7552534006536)") +| EVAL x = ST_X(point), y = ST_Y(point) +// end::st_x_y[] +; + +// tag::st_x_y-result[] +point:geo_point | x:double | y:double +POINT(42.97109629958868 14.7552534006536) | 42.97109629958868 | 14.7552534006536 +// end::st_x_y-result[] +; + simpleLoad#[skip:-8.12.99, reason:spatial type geo_point improved precision in 8.13] FROM airports | WHERE scalerank == 9 | SORT abbrev | WHERE length(name) > 12; @@ -87,6 +111,17 @@ WIIT | Bandar Lampung | POINT(105.2667 -5.45) | Indonesia ZAH | Zāhedān | POINT(60.8628 29.4964) | Iran | POINT(60.900708564915 29.4752941956573) | Zahedan Int'l | 9 | mid ; +stXFromAirportsSupportsNull#[skip:-8.13.99, reason:st_x and st_y added in 8.14] +FROM airports +| EVAL x = FLOOR(ABS(ST_X(city_location))/200), y = FLOOR(ABS(ST_Y(city_location))/100) +| STATS c = count(*) BY x, y +; + +c:long | x:double | y:double +872 | 0.0 | 0.0 +19 | null | null +; + centroidFromAirports#[skip:-8.12.99, reason:st_centroid added in 8.13] // tag::st_centroid-airports[] FROM airports @@ -399,6 +434,15 @@ c:cartesian_point POINT(3949.163965353159 1078.2645465797348) ; +stXFromCartesianString#[skip:-8.13.99, reason:st_x and st_y added in 8.14] +ROW point = TO_CARTESIANPOINT("POINT(4297.10986328125 -1475.530029296875)") +| EVAL x = ST_X(point), y = ST_Y(point) +; + +point:cartesian_point | x:double | y:double +POINT(4297.10986328125 -1475.530029296875) | 4297.10986328125 | -1475.530029296875 +; + simpleCartesianLoad#[skip:-8.12.99, reason:spatial type cartesian_point improved precision in 8.13] FROM airports_web | WHERE scalerank == 9 | SORT abbrev | WHERE length(name) > 12; diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StXFromWKBEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StXFromWKBEvaluator.java new file mode 100644 index 0000000000000..937eedc1d8fe0 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StXFromWKBEvaluator.java @@ -0,0 +1,127 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.spatial; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.Vector; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.AbstractConvertFunction; +import org.elasticsearch.xpack.ql.tree.Source; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link StX}. + * This class is generated. Do not edit it. + */ +public final class StXFromWKBEvaluator extends AbstractConvertFunction.AbstractEvaluator { + public StXFromWKBEvaluator(EvalOperator.ExpressionEvaluator field, Source source, + DriverContext driverContext) { + super(driverContext, field, source); + } + + @Override + public String name() { + return "StXFromWKB"; + } + + @Override + public Block evalVector(Vector v) { + BytesRefVector vector = (BytesRefVector) v; + int positionCount = v.getPositionCount(); + BytesRef scratchPad = new BytesRef(); + if (vector.isConstant()) { + try { + return driverContext.blockFactory().newConstantDoubleBlockWith(evalValue(vector, 0, scratchPad), positionCount); + } catch (IllegalArgumentException e) { + registerException(e); + return driverContext.blockFactory().newConstantNullBlock(positionCount); + } + } + try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + try { + builder.appendDouble(evalValue(vector, p, scratchPad)); + } catch (IllegalArgumentException e) { + registerException(e); + builder.appendNull(); + } + } + return builder.build(); + } + } + + private static double evalValue(BytesRefVector container, int index, BytesRef scratchPad) { + BytesRef value = container.getBytesRef(index, scratchPad); + return StX.fromWellKnownBinary(value); + } + + @Override + public Block evalBlock(Block b) { + BytesRefBlock block = (BytesRefBlock) b; + int positionCount = block.getPositionCount(); + try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + BytesRef scratchPad = new BytesRef(); + for (int p = 0; p < positionCount; p++) { + int valueCount = block.getValueCount(p); + int start = block.getFirstValueIndex(p); + int end = start + valueCount; + boolean positionOpened = false; + boolean valuesAppended = false; + for (int i = start; i < end; i++) { + try { + double value = evalValue(block, i, scratchPad); + if (positionOpened == false && valueCount > 1) { + builder.beginPositionEntry(); + positionOpened = true; + } + builder.appendDouble(value); + valuesAppended = true; + } catch (IllegalArgumentException e) { + registerException(e); + } + } + if (valuesAppended == false) { + builder.appendNull(); + } else if (positionOpened) { + builder.endPositionEntry(); + } + } + return builder.build(); + } + } + + private static double evalValue(BytesRefBlock container, int index, BytesRef scratchPad) { + BytesRef value = container.getBytesRef(index, scratchPad); + return StX.fromWellKnownBinary(value); + } + + public static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory field; + + public Factory(EvalOperator.ExpressionEvaluator.Factory field, Source source) { + this.field = field; + this.source = source; + } + + @Override + public StXFromWKBEvaluator get(DriverContext context) { + return new StXFromWKBEvaluator(field.get(context), source, context); + } + + @Override + public String toString() { + return "StXFromWKBEvaluator[field=" + field + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StYFromWKBEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StYFromWKBEvaluator.java new file mode 100644 index 0000000000000..33405f6db5998 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StYFromWKBEvaluator.java @@ -0,0 +1,127 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.spatial; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.Vector; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.AbstractConvertFunction; +import org.elasticsearch.xpack.ql.tree.Source; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link StY}. + * This class is generated. Do not edit it. + */ +public final class StYFromWKBEvaluator extends AbstractConvertFunction.AbstractEvaluator { + public StYFromWKBEvaluator(EvalOperator.ExpressionEvaluator field, Source source, + DriverContext driverContext) { + super(driverContext, field, source); + } + + @Override + public String name() { + return "StYFromWKB"; + } + + @Override + public Block evalVector(Vector v) { + BytesRefVector vector = (BytesRefVector) v; + int positionCount = v.getPositionCount(); + BytesRef scratchPad = new BytesRef(); + if (vector.isConstant()) { + try { + return driverContext.blockFactory().newConstantDoubleBlockWith(evalValue(vector, 0, scratchPad), positionCount); + } catch (IllegalArgumentException e) { + registerException(e); + return driverContext.blockFactory().newConstantNullBlock(positionCount); + } + } + try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + try { + builder.appendDouble(evalValue(vector, p, scratchPad)); + } catch (IllegalArgumentException e) { + registerException(e); + builder.appendNull(); + } + } + return builder.build(); + } + } + + private static double evalValue(BytesRefVector container, int index, BytesRef scratchPad) { + BytesRef value = container.getBytesRef(index, scratchPad); + return StY.fromWellKnownBinary(value); + } + + @Override + public Block evalBlock(Block b) { + BytesRefBlock block = (BytesRefBlock) b; + int positionCount = block.getPositionCount(); + try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + BytesRef scratchPad = new BytesRef(); + for (int p = 0; p < positionCount; p++) { + int valueCount = block.getValueCount(p); + int start = block.getFirstValueIndex(p); + int end = start + valueCount; + boolean positionOpened = false; + boolean valuesAppended = false; + for (int i = start; i < end; i++) { + try { + double value = evalValue(block, i, scratchPad); + if (positionOpened == false && valueCount > 1) { + builder.beginPositionEntry(); + positionOpened = true; + } + builder.appendDouble(value); + valuesAppended = true; + } catch (IllegalArgumentException e) { + registerException(e); + } + } + if (valuesAppended == false) { + builder.appendNull(); + } else if (positionOpened) { + builder.endPositionEntry(); + } + } + return builder.build(); + } + } + + private static double evalValue(BytesRefBlock container, int index, BytesRef scratchPad) { + BytesRef value = container.getBytesRef(index, scratchPad); + return StY.fromWellKnownBinary(value); + } + + public static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory field; + + public Factory(EvalOperator.ExpressionEvaluator.Factory field, Source source) { + this.field = field; + this.source = source; + } + + @Override + public StYFromWKBEvaluator get(DriverContext context) { + return new StYFromWKBEvaluator(field.get(context), source, context); + } + + @Override + public String toString() { + return "StYFromWKBEvaluator[field=" + field + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java index 395a9ed16dc67..ede3633c1b3e8 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java @@ -75,6 +75,8 @@ import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvMin; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvSum; import org.elasticsearch.xpack.esql.expression.function.scalar.nulls.Coalesce; +import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.StX; +import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.StY; import org.elasticsearch.xpack.esql.expression.function.scalar.string.Concat; import org.elasticsearch.xpack.esql.expression.function.scalar.string.EndsWith; import org.elasticsearch.xpack.esql.expression.function.scalar.string.LTrim; @@ -174,6 +176,8 @@ private FunctionDefinition[][] functions() { def(Now.class, Now::new, "now") }, // spatial new FunctionDefinition[] { def(SpatialCentroid.class, SpatialCentroid::new, "st_centroid") }, + new FunctionDefinition[] { def(StX.class, StX::new, "st_x") }, + new FunctionDefinition[] { def(StY.class, StY::new, "st_y") }, // conditional new FunctionDefinition[] { def(Case.class, Case::new, "case") }, // null diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StX.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StX.java new file mode 100644 index 0000000000000..f86be9290fed1 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StX.java @@ -0,0 +1,73 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.spatial; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.ann.ConvertEvaluator; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.Param; +import org.elasticsearch.xpack.esql.expression.function.scalar.UnaryScalarFunction; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.expression.TypeResolutions; +import org.elasticsearch.xpack.ql.tree.NodeInfo; +import org.elasticsearch.xpack.ql.tree.Source; +import org.elasticsearch.xpack.ql.type.DataType; + +import java.util.List; +import java.util.function.Function; + +import static org.elasticsearch.xpack.esql.expression.EsqlTypeResolutions.isSpatialPoint; +import static org.elasticsearch.xpack.ql.type.DataTypes.DOUBLE; +import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.UNSPECIFIED; + +/** + * Extracts the x-coordinate from a point geometry. + * For cartesian geometries, the x-coordinate is the first coordinate. + * For geographic geometries, the x-coordinate is the longitude. + * The function `st_x` is defined in the OGC Simple Feature Access standard. + * Alternatively it is well described in PostGIS documentation at PostGIS:ST_X. + */ +public class StX extends UnaryScalarFunction { + @FunctionInfo(returnType = "double", description = "Extracts the x-coordinate from a point geometry.") + public StX(Source source, @Param(name = "point", type = { "geo_point", "cartesian_point" }) Expression field) { + super(source, field); + } + + @Override + protected Expression.TypeResolution resolveType() { + return isSpatialPoint(field(), sourceText(), TypeResolutions.ParamOrdinal.DEFAULT); + } + + @Override + public EvalOperator.ExpressionEvaluator.Factory toEvaluator( + Function toEvaluator + ) { + return new StXFromWKBEvaluator.Factory(toEvaluator.apply(field()), source()); + } + + @Override + public DataType dataType() { + return DOUBLE; + } + + @Override + public Expression replaceChildren(List newChildren) { + return new StX(source(), newChildren.get(0)); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, StX::new, field()); + } + + @ConvertEvaluator(extraName = "FromWKB", warnExceptions = { IllegalArgumentException.class }) + static double fromWellKnownBinary(BytesRef in) { + return UNSPECIFIED.wkbAsPoint(in).getX(); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StY.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StY.java new file mode 100644 index 0000000000000..759c23c73374a --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StY.java @@ -0,0 +1,73 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.spatial; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.ann.ConvertEvaluator; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.Param; +import org.elasticsearch.xpack.esql.expression.function.scalar.UnaryScalarFunction; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.expression.TypeResolutions; +import org.elasticsearch.xpack.ql.tree.NodeInfo; +import org.elasticsearch.xpack.ql.tree.Source; +import org.elasticsearch.xpack.ql.type.DataType; + +import java.util.List; +import java.util.function.Function; + +import static org.elasticsearch.xpack.esql.expression.EsqlTypeResolutions.isSpatialPoint; +import static org.elasticsearch.xpack.ql.type.DataTypes.DOUBLE; +import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.UNSPECIFIED; + +/** + * Extracts the y-coordinate from a point geometry. + * For cartesian geometries, the y-coordinate is the second coordinate. + * For geographic geometries, the y-coordinate is the latitude. + * The function `st_y` is defined in the OGC Simple Feature Access standard. + * Alternatively it is well described in PostGIS documentation at PostGIS:ST_Y. + */ +public class StY extends UnaryScalarFunction { + @FunctionInfo(returnType = "double", description = "Extracts the y-coordinate from a point geometry.") + public StY(Source source, @Param(name = "point", type = { "geo_point", "cartesian_point" }) Expression field) { + super(source, field); + } + + @Override + protected TypeResolution resolveType() { + return isSpatialPoint(field(), sourceText(), TypeResolutions.ParamOrdinal.DEFAULT); + } + + @Override + public EvalOperator.ExpressionEvaluator.Factory toEvaluator( + Function toEvaluator + ) { + return new StYFromWKBEvaluator.Factory(toEvaluator.apply(field()), source()); + } + + @Override + public DataType dataType() { + return DOUBLE; + } + + @Override + public Expression replaceChildren(List newChildren) { + return new StY(source(), newChildren.get(0)); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, StY::new, field()); + } + + @ConvertEvaluator(extraName = "FromWKB", warnExceptions = { IllegalArgumentException.class }) + static double fromWellKnownBinary(BytesRef in) { + return UNSPECIFIED.wkbAsPoint(in).getY(); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java index 95892ac42e587..3ca5f2f5868ba 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java @@ -99,6 +99,8 @@ import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvMin; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvSum; import org.elasticsearch.xpack.esql.expression.function.scalar.nulls.Coalesce; +import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.StX; +import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.StY; import org.elasticsearch.xpack.esql.expression.function.scalar.string.Concat; import org.elasticsearch.xpack.esql.expression.function.scalar.string.EndsWith; import org.elasticsearch.xpack.esql.expression.function.scalar.string.LTrim; @@ -340,6 +342,8 @@ public static List namedTypeEntries() { of(ESQL_UNARY_SCLR_CLS, Sin.class, PlanNamedTypes::writeESQLUnaryScalar, PlanNamedTypes::readESQLUnaryScalar), of(ESQL_UNARY_SCLR_CLS, Sinh.class, PlanNamedTypes::writeESQLUnaryScalar, PlanNamedTypes::readESQLUnaryScalar), of(ESQL_UNARY_SCLR_CLS, Sqrt.class, PlanNamedTypes::writeESQLUnaryScalar, PlanNamedTypes::readESQLUnaryScalar), + of(ESQL_UNARY_SCLR_CLS, StX.class, PlanNamedTypes::writeESQLUnaryScalar, PlanNamedTypes::readESQLUnaryScalar), + of(ESQL_UNARY_SCLR_CLS, StY.class, PlanNamedTypes::writeESQLUnaryScalar, PlanNamedTypes::readESQLUnaryScalar), of(ESQL_UNARY_SCLR_CLS, Tan.class, PlanNamedTypes::writeESQLUnaryScalar, PlanNamedTypes::readESQLUnaryScalar), of(ESQL_UNARY_SCLR_CLS, Tanh.class, PlanNamedTypes::writeESQLUnaryScalar, PlanNamedTypes::readESQLUnaryScalar), of(ESQL_UNARY_SCLR_CLS, ToBoolean.class, PlanNamedTypes::writeESQLUnaryScalar, PlanNamedTypes::readESQLUnaryScalar), @@ -1248,6 +1252,8 @@ static void writeBinaryLogic(PlanStreamOutput out, BinaryLogic binaryLogic) thro entry(name(Sin.class), Sin::new), entry(name(Sinh.class), Sinh::new), entry(name(Sqrt.class), Sqrt::new), + entry(name(StX.class), StX::new), + entry(name(StY.class), StY::new), entry(name(Tan.class), Tan::new), entry(name(Tanh.class), Tanh::new), entry(name(ToBoolean.class), ToBoolean::new), diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java index 612861b2889a4..4d44d3111c094 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java @@ -967,7 +967,8 @@ protected static String typeErrorMessage(boolean includeOrdinal, List testCaseSupplier) { + this.testCase = testCaseSupplier.get(); + } + + @ParametersFactory + public static Iterable parameters() { + String expectedEvaluator = "StXFromWKBEvaluator[field=Attribute[channel=0]]"; + final List suppliers = new ArrayList<>(); + TestCaseSupplier.forUnaryGeoPoint(suppliers, expectedEvaluator, DOUBLE, StXTests::valueOf, List.of()); + TestCaseSupplier.forUnaryCartesianPoint(suppliers, expectedEvaluator, DOUBLE, StXTests::valueOf, List.of()); + return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + } + + private static double valueOf(BytesRef wkb) { + return UNSPECIFIED.wkbAsPoint(wkb).getX(); + } + + @Override + protected Expression build(Source source, List args) { + return new StX(source, args.get(0)); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StYTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StYTests.java new file mode 100644 index 0000000000000..9416b7ba8cad4 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StYTests.java @@ -0,0 +1,50 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.spatial; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.FunctionName; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.tree.Source; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.Supplier; + +import static org.elasticsearch.xpack.ql.type.DataTypes.DOUBLE; +import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.UNSPECIFIED; + +@FunctionName("st_y") +public class StYTests extends AbstractFunctionTestCase { + public StYTests(@Name("TestCase") Supplier testCaseSupplier) { + this.testCase = testCaseSupplier.get(); + } + + @ParametersFactory + public static Iterable parameters() { + String expectedEvaluator = "StYFromWKBEvaluator[field=Attribute[channel=0]]"; + final List suppliers = new ArrayList<>(); + TestCaseSupplier.forUnaryGeoPoint(suppliers, expectedEvaluator, DOUBLE, StYTests::valueOf, List.of()); + TestCaseSupplier.forUnaryCartesianPoint(suppliers, expectedEvaluator, DOUBLE, StYTests::valueOf, List.of()); + return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + } + + private static double valueOf(BytesRef wkb) { + return UNSPECIFIED.wkbAsPoint(wkb).getY(); + } + + @Override + protected Expression build(Source source, List args) { + return new StY(source, args.get(0)); + } +} diff --git a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/util/SpatialCoordinateTypes.java b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/util/SpatialCoordinateTypes.java index 6508a67f7e785..32bd76cf84e19 100644 --- a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/util/SpatialCoordinateTypes.java +++ b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/util/SpatialCoordinateTypes.java @@ -56,6 +56,15 @@ public long pointAsLong(double x, double y) { final long yi = XYEncodingUtils.encode((float) y); return (yi & 0xFFFFFFFFL) | xi << 32; } + }, + UNSPECIFIED { + public Point longAsPoint(long encoded) { + throw new UnsupportedOperationException("Cannot convert long to point without specifying coordinate type"); + } + + public long pointAsLong(double x, double y) { + throw new UnsupportedOperationException("Cannot convert point to long without specifying coordinate type"); + } }; public abstract Point longAsPoint(long encoded); @@ -63,9 +72,14 @@ public long pointAsLong(double x, double y) { public abstract long pointAsLong(double x, double y); public long wkbAsLong(BytesRef wkb) { + Point point = wkbAsPoint(wkb); + return pointAsLong(point.getX(), point.getY()); + } + + public Point wkbAsPoint(BytesRef wkb) { Geometry geometry = WellKnownBinary.fromWKB(GeometryValidator.NOOP, false, wkb.bytes, wkb.offset, wkb.length); if (geometry instanceof Point point) { - return pointAsLong(point.getX(), point.getY()); + return point; } else { throw new IllegalArgumentException("Unsupported geometry: " + geometry.type()); } From eeecdbf87b08ec03682dabe7def7272055119252 Mon Sep 17 00:00:00 2001 From: Ievgen Degtiarenko Date: Wed, 6 Mar 2024 11:28:23 +0100 Subject: [PATCH 016/248] Additional trace logging for desired balance computer (#105910) --- .../allocation/decider/DiskThresholdDeciderIT.java | 3 ++- .../allocator/DesiredBalanceComputer.java | 14 ++++++++++++-- .../snapshots/SnapshotShardSizeInfo.java | 5 +++++ 3 files changed, 19 insertions(+), 3 deletions(-) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java index 7b9f89b60ed94..56eacb1bc41b5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java @@ -162,7 +162,8 @@ public void testRestoreSnapshotAllocationDoesNotExceedWatermark() throws Excepti } @TestIssueLogging( - value = "org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalanceReconciler:DEBUG," + value = "org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalanceComputer:TRACE," + + "org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalanceReconciler:DEBUG," + "org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalanceShardsAllocator:TRACE", issueUrl = "https://github.com/elastic/elasticsearch/issues/105331" ) diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java index effd5ec110c44..3a26bbcc7b280 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java @@ -77,7 +77,18 @@ public DesiredBalance compute( Predicate isFresh ) { - logger.debug("Recomputing desired balance for [{}]", desiredBalanceInput.index()); + if (logger.isTraceEnabled()) { + logger.trace( + "Recomputing desired balance for [{}]: {}, {}, {}, {}", + desiredBalanceInput.index(), + previousDesiredBalance, + desiredBalanceInput.routingAllocation().routingNodes().toString(), + desiredBalanceInput.routingAllocation().clusterInfo().toString(), + desiredBalanceInput.routingAllocation().snapshotShardSizeInfo().toString() + ); + } else { + logger.debug("Recomputing desired balance for [{}]", desiredBalanceInput.index()); + } final var routingAllocation = desiredBalanceInput.routingAllocation().mutableCloneForSimulation(); final var routingNodes = routingAllocation.routingNodes(); @@ -283,7 +294,6 @@ public DesiredBalance compute( hasChanges = true; clusterInfoSimulator.simulateShardStarted(shardRouting); routingNodes.startShard(logger, shardRouting, changes, 0L); - logger.trace("starting shard {}", shardRouting); } } } diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardSizeInfo.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardSizeInfo.java index 29ae2d1c5da4b..3bd5431c7be63 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardSizeInfo.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardSizeInfo.java @@ -46,4 +46,9 @@ public long getShardSize(ShardRouting shardRouting, long fallback) { } return shardSize; } + + @Override + public String toString() { + return "SnapshotShardSizeInfo{snapshotShardSizes=" + snapshotShardSizes + '}'; + } } From 6fbf9892aa6964151c7491ba8e6b5a00336bba84 Mon Sep 17 00:00:00 2001 From: Mary Gouseti Date: Wed, 6 Mar 2024 14:04:18 +0200 Subject: [PATCH 017/248] Add the parameter `failure_store` to multi-target syntax APIs (#105386) In this PR we introduce a new query parameter behind the failure store feature flag. The query param, `faliure_store` allows the multi-syntax supporting APIs to choose the failure store indices as well. If an API should not support failure store, the `allowFailureStore` flag should be `false`. --- .../anomaly-detection/apis/put-job.asciidoc | 1 + .../datastreams/FailureStoreQueryParamIT.java | 216 +++++++++ .../elasticsearch/ElasticsearchException.java | 7 + .../org/elasticsearch/TransportVersions.java | 1 + .../admin/indices/get/GetIndexRequest.java | 12 +- .../mapping/put/PutMappingRequest.java | 19 +- .../action/support/IndicesOptions.java | 455 +++++++++++++++--- .../master/info/ClusterInfoRequest.java | 5 + .../cluster/metadata/DataStream.java | 27 ++ .../metadata/IndexNameExpressionResolver.java | 136 +++++- .../cluster/metadata/Metadata.java | 8 + .../FailureIndexNotSupportedException.java | 37 ++ .../ExceptionSerializationTests.java | 2 + .../indices/get/GetIndexRequestTests.java | 15 + .../action/support/IndicesOptionsTests.java | 52 +- .../cluster/metadata/DataStreamTests.java | 158 ++++++ .../IndexNameExpressionResolverTests.java | 198 +++++++- .../metadata/DataStreamTestHelper.java | 4 + .../datafeed/DatafeedNodeSelectorTests.java | 13 +- .../xpack/ql/index/IndexResolver.java | 16 +- 20 files changed, 1261 insertions(+), 121 deletions(-) create mode 100644 modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/FailureStoreQueryParamIT.java create mode 100644 server/src/main/java/org/elasticsearch/indices/FailureIndexNotSupportedException.java diff --git a/docs/reference/ml/anomaly-detection/apis/put-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/put-job.asciidoc index 97120ff1873ae..1ab5de76a94b0 100644 --- a/docs/reference/ml/anomaly-detection/apis/put-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/put-job.asciidoc @@ -537,3 +537,4 @@ The API returns the following results: // TESTRESPONSE[s/"job_version" : "8.4.0"/"job_version" : $body.job_version/] // TESTRESPONSE[s/1656087283340/$body.$_path/] // TESTRESPONSE[s/"superuser"/"_es_test_root"/] +// TESTRESPONSE[s/"ignore_throttled" : true/"ignore_throttled" : true,"failure_store":"false"/] diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/FailureStoreQueryParamIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/FailureStoreQueryParamIT.java new file mode 100644 index 0000000000000..a6b235e8d566f --- /dev/null +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/FailureStoreQueryParamIT.java @@ -0,0 +1,216 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.datastreams; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.junit.Before; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; + +/** + * This should be a yaml test, but in order to write one we would need to expose the new parameter in the rest-api-spec. + * We do not want to do that until the feature flag is removed. For this reason, we temporarily, test the affected APIs here. + * Please convert this to a yaml test when the feature flag is removed. + */ +public class FailureStoreQueryParamIT extends DisabledSecurityDataStreamTestCase { + + private static final String DATA_STREAM_NAME = "failure-data-stream"; + private String backingIndex; + private String failureStoreIndex; + + @SuppressWarnings("unchecked") + @Before + public void setup() throws IOException { + Request putComposableIndexTemplateRequest = new Request("POST", "/_index_template/ds-template"); + putComposableIndexTemplateRequest.setJsonEntity(""" + { + "index_patterns": ["failure-data-stream"], + "template": { + "settings": { + "number_of_replicas": 0 + } + }, + "data_stream": { + "failure_store": true + } + } + """); + assertOK(client().performRequest(putComposableIndexTemplateRequest)); + + assertOK(client().performRequest(new Request("PUT", "/_data_stream/" + DATA_STREAM_NAME))); + ensureGreen(DATA_STREAM_NAME); + + final Response dataStreamResponse = client().performRequest(new Request("GET", "/_data_stream/" + DATA_STREAM_NAME)); + List dataStreams = (List) entityAsMap(dataStreamResponse).get("data_streams"); + assertThat(dataStreams.size(), is(1)); + Map dataStream = (Map) dataStreams.get(0); + assertThat(dataStream.get("name"), equalTo(DATA_STREAM_NAME)); + List backingIndices = getBackingIndices(dataStream); + assertThat(backingIndices.size(), is(1)); + List failureStore = getFailureStore(dataStream); + assertThat(failureStore.size(), is(1)); + backingIndex = backingIndices.get(0); + failureStoreIndex = failureStore.get(0); + } + + public void testGetIndexApi() throws IOException { + { + final Response indicesResponse = client().performRequest(new Request("GET", "/" + DATA_STREAM_NAME)); + Map indices = entityAsMap(indicesResponse); + assertThat(indices.size(), is(2)); + assertThat(indices.containsKey(backingIndex), is(true)); + assertThat(indices.containsKey(failureStoreIndex), is(true)); + } + { + final Response indicesResponse = client().performRequest(new Request("GET", "/" + DATA_STREAM_NAME + "?failure_store=false")); + Map indices = entityAsMap(indicesResponse); + assertThat(indices.size(), is(1)); + assertThat(indices.containsKey(backingIndex), is(true)); + } + { + final Response indicesResponse = client().performRequest(new Request("GET", "/" + DATA_STREAM_NAME + "?failure_store=only")); + Map indices = entityAsMap(indicesResponse); + assertThat(indices.size(), is(1)); + assertThat(indices.containsKey(failureStoreIndex), is(true)); + } + } + + @SuppressWarnings("unchecked") + public void testGetIndexStatsApi() throws IOException { + { + final Response statsResponse = client().performRequest(new Request("GET", "/" + DATA_STREAM_NAME + "/_stats")); + Map indices = (Map) entityAsMap(statsResponse).get("indices"); + assertThat(indices.size(), is(1)); + assertThat(indices.containsKey(backingIndex), is(true)); + } + { + final Response statsResponse = client().performRequest( + new Request("GET", "/" + DATA_STREAM_NAME + "/_stats?failure_store=true") + ); + Map indices = (Map) entityAsMap(statsResponse).get("indices"); + assertThat(indices.size(), is(2)); + assertThat(indices.containsKey(backingIndex), is(true)); + assertThat(indices.containsKey(failureStoreIndex), is(true)); + } + { + final Response statsResponse = client().performRequest( + new Request("GET", "/" + DATA_STREAM_NAME + "/_stats?failure_store=only") + ); + Map indices = (Map) entityAsMap(statsResponse).get("indices"); + assertThat(indices.size(), is(1)); + assertThat(indices.containsKey(failureStoreIndex), is(true)); + } + } + + public void testGetIndexSettingsApi() throws IOException { + { + final Response indicesResponse = client().performRequest(new Request("GET", "/" + DATA_STREAM_NAME + "/_settings")); + Map indices = entityAsMap(indicesResponse); + assertThat(indices.size(), is(1)); + assertThat(indices.containsKey(backingIndex), is(true)); + } + { + final Response indicesResponse = client().performRequest( + new Request("GET", "/" + DATA_STREAM_NAME + "/_settings?failure_store=true") + ); + Map indices = entityAsMap(indicesResponse); + assertThat(indices.size(), is(2)); + assertThat(indices.containsKey(backingIndex), is(true)); + assertThat(indices.containsKey(failureStoreIndex), is(true)); + } + { + final Response indicesResponse = client().performRequest( + new Request("GET", "/" + DATA_STREAM_NAME + "/_settings?failure_store=only") + ); + Map indices = entityAsMap(indicesResponse); + assertThat(indices.size(), is(1)); + assertThat(indices.containsKey(failureStoreIndex), is(true)); + } + } + + public void testGetIndexMappingApi() throws IOException { + { + final Response indicesResponse = client().performRequest(new Request("GET", "/" + DATA_STREAM_NAME + "/_mapping")); + Map indices = entityAsMap(indicesResponse); + assertThat(indices.size(), is(1)); + assertThat(indices.containsKey(backingIndex), is(true)); + } + { + final Response indicesResponse = client().performRequest( + new Request("GET", "/" + DATA_STREAM_NAME + "/_mapping?failure_store=true") + ); + Map indices = entityAsMap(indicesResponse); + assertThat(indices.size(), is(2)); + assertThat(indices.containsKey(backingIndex), is(true)); + assertThat(indices.containsKey(failureStoreIndex), is(true)); + } + { + final Response indicesResponse = client().performRequest( + new Request("GET", "/" + DATA_STREAM_NAME + "/_mapping?failure_store=only") + ); + Map indices = entityAsMap(indicesResponse); + assertThat(indices.size(), is(1)); + assertThat(indices.containsKey(failureStoreIndex), is(true)); + } + } + + @SuppressWarnings("unchecked") + public void testPutIndexMappingApi() throws IOException { + { + final Request mappingRequest = new Request("PUT", "/" + DATA_STREAM_NAME + "/_mapping"); + mappingRequest.setJsonEntity(""" + { + "properties": { + "email": { + "type": "keyword" + } + } + } + """); + assertAcknowledged(client().performRequest(mappingRequest)); + } + { + final Request mappingRequest = new Request("PUT", "/" + DATA_STREAM_NAME + "/_mapping?failure_store=true"); + mappingRequest.setJsonEntity(""" + { + "properties": { + "email": { + "type": "keyword" + } + } + } + """); + ResponseException responseException = expectThrows(ResponseException.class, () -> client().performRequest(mappingRequest)); + Map response = entityAsMap(responseException.getResponse()); + assertThat(((Map) response.get("error")).get("reason"), is("failure index not supported")); + } + } + + private List getBackingIndices(Map response) { + return getIndices(response, "indices"); + } + + private List getFailureStore(Map response) { + return getIndices(response, "failure_indices"); + + } + + @SuppressWarnings("unchecked") + private List getIndices(Map response, String fieldName) { + List> indices = (List>) response.get(fieldName); + return indices.stream().map(index -> index.get("index_name")).toList(); + } +} diff --git a/server/src/main/java/org/elasticsearch/ElasticsearchException.java b/server/src/main/java/org/elasticsearch/ElasticsearchException.java index 33566203bb99a..83e5375546b63 100644 --- a/server/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/server/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -29,6 +29,7 @@ import org.elasticsearch.index.mapper.DocumentParsingException; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.AutoscalingMissedIndicesUpdateException; +import org.elasticsearch.indices.FailureIndexNotSupportedException; import org.elasticsearch.indices.recovery.RecoveryCommitTooNewException; import org.elasticsearch.ingest.GraphStructureException; import org.elasticsearch.rest.ApiNotAvailableException; @@ -1910,6 +1911,12 @@ private enum ElasticsearchExceptionHandle { GraphStructureException::new, 177, TransportVersions.INGEST_GRAPH_STRUCTURE_EXCEPTION + ), + FAILURE_INDEX_NOT_SUPPORTED_EXCEPTION( + FailureIndexNotSupportedException.class, + FailureIndexNotSupportedException::new, + 178, + TransportVersions.ADD_FAILURE_STORE_INDICES_OPTIONS ); final Class exceptionClass; diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index d484da5ba506c..ec3971a48a649 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -136,6 +136,7 @@ static TransportVersion def(int id) { public static final TransportVersion RANDOM_AGG_SHARD_SEED = def(8_596_00_0); public static final TransportVersion ESQL_TIMINGS = def(8_597_00_0); public static final TransportVersion DATA_STREAM_AUTO_SHARDING_EVENT = def(8_598_00_0); + public static final TransportVersion ADD_FAILURE_STORE_INDICES_OPTIONS = def(8_599_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java index 1c2598d70998a..a550350c20f6b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java @@ -9,7 +9,9 @@ package org.elasticsearch.action.admin.indices.get; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.info.ClusterInfoRequest; +import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.ArrayUtils; @@ -92,7 +94,15 @@ public static Feature[] fromRequest(RestRequest request) { private transient boolean includeDefaults = false; public GetIndexRequest() { - + super( + DataStream.isFailureStoreEnabled() + ? IndicesOptions.builder(IndicesOptions.strictExpandOpen()) + .failureStoreOptions( + IndicesOptions.FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(true) + ) + .build() + : IndicesOptions.strictExpandOpen() + ); } public GetIndexRequest(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java index 45532d8024f87..a2787e1a55fd7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java @@ -66,7 +66,24 @@ public class PutMappingRequest extends AcknowledgedRequest im private String[] indices; - private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, false, true, true); + private IndicesOptions indicesOptions = IndicesOptions.builder() + .concreteTargetOptions(IndicesOptions.ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) + .wildcardOptions( + IndicesOptions.WildcardOptions.builder() + .matchOpen(true) + .matchClosed(true) + .includeHidden(false) + .allowEmptyExpressions(false) + .resolveAliases(true) + ) + .gatekeeperOptions( + IndicesOptions.GatekeeperOptions.builder() + .allowClosedIndices(true) + .allowAliasToMultipleIndices(true) + .ignoreThrottled(false) + .allowFailureIndices(false) + ) + .build(); private String source; private String origin = ""; diff --git a/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java b/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java index 6e94ea11c652d..3b03b1cf0a4f6 100644 --- a/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java +++ b/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java @@ -8,8 +8,11 @@ package org.elasticsearch.action.support; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.logging.DeprecationCategory; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.core.Nullable; @@ -40,12 +43,27 @@ * target does not exist. * @param wildcardOptions, applies only to wildcard expressions and defines how the wildcards will be expanded and if it will * be acceptable to have expressions that results to no indices. - * @param generalOptions, applies to all the resolved indices and defines if throttled will be included and if certain type of - * aliases or indices are allowed, or they will throw an error. + * @param gatekeeperOptions, applies to all the resolved indices and defines if throttled will be included and if certain type of + * aliases or indices are allowed, or they will throw an error. It acts as a gatekeeper when an action + * does not support certain options. + * @param failureStoreOptions, applies to all indices already matched and controls the type of indices that will be returned. Currently, + * there are two types, data stream failure indices (only certain data streams have them) and data stream + * backing indices or stand-alone indices. */ -public record IndicesOptions(ConcreteTargetOptions concreteTargetOptions, WildcardOptions wildcardOptions, GeneralOptions generalOptions) - implements - ToXContentFragment { +public record IndicesOptions( + ConcreteTargetOptions concreteTargetOptions, + WildcardOptions wildcardOptions, + GatekeeperOptions gatekeeperOptions, + FailureStoreOptions failureStoreOptions +) implements ToXContentFragment { + + public IndicesOptions( + ConcreteTargetOptions concreteTargetOptions, + WildcardOptions wildcardOptions, + GatekeeperOptions gatekeeperOptions + ) { + this(concreteTargetOptions, wildcardOptions, gatekeeperOptions, FailureStoreOptions.DEFAULT); + } public static IndicesOptions.Builder builder() { return new Builder(); @@ -286,20 +304,28 @@ public static Builder builder(WildcardOptions wildcardOptions) { } /** - * These options apply on all indices that have been selected by the other Options. It can either filter the response or - * define what type of indices or aliases are not allowed which will result in an error response. + * The "gatekeeper" options apply on all indices that have been selected by the other Options. It contains two type of flags: + * - The "allow*" flags, which purpose is to enable actions to define certain conditions that need to apply on the concrete indices + * they accept. For example, single-index actions will set allowAliasToMultipleIndices to false, while search will not accept a + * closed index etc. These options are not configurable by the end-user. + * - The ignoreThrottled flag, which is a depricared flag that will filter out frozen indices. * @param allowAliasToMultipleIndices, allow aliases to multiple indices, true by default. * @param allowClosedIndices, allow closed indices, true by default. - * @param ignoreThrottled, filters out throttled (aka frozen indices), defaults to true. + * @param allowFailureIndices, allow failure indices in the response, true by default + * @param ignoreThrottled, filters out throttled (aka frozen indices), defaults to true. This is deprecated and the only one + * that only filters and never throws an error. */ - public record GeneralOptions(boolean allowAliasToMultipleIndices, boolean allowClosedIndices, @Deprecated boolean ignoreThrottled) - implements - ToXContentFragment { + public record GatekeeperOptions( + boolean allowAliasToMultipleIndices, + boolean allowClosedIndices, + boolean allowFailureIndices, + @Deprecated boolean ignoreThrottled + ) implements ToXContentFragment { public static final String IGNORE_THROTTLED = "ignore_throttled"; - public static final GeneralOptions DEFAULT = new GeneralOptions(true, true, false); + public static final GatekeeperOptions DEFAULT = new GatekeeperOptions(true, true, true, false); - public static GeneralOptions parseParameter(Object ignoreThrottled, GeneralOptions defaultOptions) { + public static GatekeeperOptions parseParameter(Object ignoreThrottled, GatekeeperOptions defaultOptions) { if (ignoreThrottled == null && defaultOptions != null) { return defaultOptions; } @@ -316,15 +342,17 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws public static class Builder { private boolean allowAliasToMultipleIndices; private boolean allowClosedIndices; + private boolean allowFailureIndices; private boolean ignoreThrottled; public Builder() { this(DEFAULT); } - Builder(GeneralOptions options) { + Builder(GatekeeperOptions options) { allowAliasToMultipleIndices = options.allowAliasToMultipleIndices; allowClosedIndices = options.allowClosedIndices; + allowFailureIndices = options.allowFailureIndices; ignoreThrottled = options.ignoreThrottled; } @@ -346,6 +374,15 @@ public Builder allowClosedIndices(boolean allowClosedIndices) { return this; } + /** + * Failure indices are accepted when true, otherwise the resolution will throw an error. + * Defaults to true. + */ + public Builder allowFailureIndices(boolean allowFailureIndices) { + this.allowFailureIndices = allowFailureIndices; + return this; + } + /** * Throttled indices will not be included in the result. Defaults to false. */ @@ -354,8 +391,8 @@ public Builder ignoreThrottled(boolean ignoreThrottled) { return this; } - public GeneralOptions build() { - return new GeneralOptions(allowAliasToMultipleIndices, allowClosedIndices, ignoreThrottled); + public GatekeeperOptions build() { + return new GatekeeperOptions(allowAliasToMultipleIndices, allowClosedIndices, allowFailureIndices, ignoreThrottled); } } @@ -363,8 +400,102 @@ public static Builder builder() { return new Builder(); } - public static Builder builder(GeneralOptions generalOptions) { - return new Builder(generalOptions); + public static Builder builder(GatekeeperOptions gatekeeperOptions) { + return new Builder(gatekeeperOptions); + } + } + + /** + * Applies to all indices already matched and controls the type of indices that will be returned. There are two types, data stream + * failure indices (only certain data streams have them) and data stream backing indices or stand-alone indices. + * @param includeRegularIndices, when true regular or data stream backing indices will be retrieved. + * @param includeFailureIndices, when true data stream failure indices will be included. + */ + public record FailureStoreOptions(boolean includeRegularIndices, boolean includeFailureIndices) + implements + Writeable, + ToXContentFragment { + + public static final String FAILURE_STORE = "failure_store"; + public static final String INCLUDE_ALL = "true"; + public static final String INCLUDE_ONLY_REGULAR_INDICES = "false"; + public static final String INCLUDE_ONLY_FAILURE_INDICES = "only"; + + public static final FailureStoreOptions DEFAULT = new FailureStoreOptions(true, false); + + public static FailureStoreOptions read(StreamInput in) throws IOException { + return new FailureStoreOptions(in.readBoolean(), in.readBoolean()); + } + + public static FailureStoreOptions parseParameters(Object failureStoreValue, FailureStoreOptions defaultOptions) { + if (failureStoreValue == null) { + return defaultOptions; + } + FailureStoreOptions.Builder builder = defaultOptions == null + ? new FailureStoreOptions.Builder() + : new FailureStoreOptions.Builder(defaultOptions); + return switch (failureStoreValue.toString()) { + case INCLUDE_ALL -> builder.includeRegularIndices(true).includeFailureIndices(true).build(); + case INCLUDE_ONLY_REGULAR_INDICES -> builder.includeRegularIndices(true).includeFailureIndices(false).build(); + case INCLUDE_ONLY_FAILURE_INDICES -> builder.includeRegularIndices(false).includeFailureIndices(true).build(); + default -> throw new IllegalArgumentException("No valid " + FAILURE_STORE + " value [" + failureStoreValue + "]"); + }; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.field(FAILURE_STORE, displayValue()); + } + + public String displayValue() { + if (includeRegularIndices && includeFailureIndices) { + return INCLUDE_ALL; + } else if (includeRegularIndices) { + return INCLUDE_ONLY_REGULAR_INDICES; + } + return INCLUDE_ONLY_FAILURE_INDICES; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeBoolean(includeRegularIndices); + out.writeBoolean(includeFailureIndices); + } + + public static class Builder { + private boolean includeRegularIndices; + private boolean includeFailureIndices; + + public Builder() { + this(DEFAULT); + } + + Builder(FailureStoreOptions options) { + includeRegularIndices = options.includeRegularIndices; + includeFailureIndices = options.includeFailureIndices; + } + + public Builder includeRegularIndices(boolean includeRegularIndices) { + this.includeRegularIndices = includeRegularIndices; + return this; + } + + public Builder includeFailureIndices(boolean includeFailureIndices) { + this.includeFailureIndices = includeFailureIndices; + return this; + } + + public FailureStoreOptions build() { + return new FailureStoreOptions(includeRegularIndices, includeFailureIndices); + } + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(FailureStoreOptions failureStoreOptions) { + return new Builder(failureStoreOptions); } } @@ -400,9 +531,10 @@ private enum Option { EXCLUDE_ALIASES, ALLOW_EMPTY_WILDCARD_EXPRESSIONS, ERROR_WHEN_ALIASES_TO_MULTIPLE_INDICES, - ERROR_WHEN_CLOSED_INDICES, - IGNORE_THROTTLED + IGNORE_THROTTLED, + + ALLOW_FAILURE_INDICES // Added in 8.14 } private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(IndicesOptions.class); @@ -415,7 +547,8 @@ private enum Option { public static final IndicesOptions DEFAULT = new IndicesOptions( ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS, WildcardOptions.DEFAULT, - GeneralOptions.DEFAULT + GatekeeperOptions.DEFAULT, + FailureStoreOptions.DEFAULT ); public static final IndicesOptions STRICT_EXPAND_OPEN = IndicesOptions.builder() @@ -428,7 +561,14 @@ private enum Option { .allowEmptyExpressions(true) .resolveAliases(true) ) - .generalOptions(GeneralOptions.builder().allowAliasToMultipleIndices(true).allowClosedIndices(true).ignoreThrottled(false)) + .gatekeeperOptions( + GatekeeperOptions.builder() + .allowAliasToMultipleIndices(true) + .allowClosedIndices(true) + .allowFailureIndices(true) + .ignoreThrottled(false) + ) + .failureStoreOptions(FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(false)) .build(); public static final IndicesOptions LENIENT_EXPAND_OPEN = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS) @@ -440,7 +580,14 @@ private enum Option { .allowEmptyExpressions(true) .resolveAliases(true) ) - .generalOptions(GeneralOptions.builder().allowAliasToMultipleIndices(true).allowClosedIndices(true).ignoreThrottled(false)) + .gatekeeperOptions( + GatekeeperOptions.builder() + .allowAliasToMultipleIndices(true) + .allowClosedIndices(true) + .allowFailureIndices(true) + .ignoreThrottled(false) + ) + .failureStoreOptions(FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(false)) .build(); public static final IndicesOptions LENIENT_EXPAND_OPEN_HIDDEN = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS) @@ -452,7 +599,14 @@ private enum Option { .allowEmptyExpressions(true) .resolveAliases(true) ) - .generalOptions(GeneralOptions.builder().allowAliasToMultipleIndices(true).allowClosedIndices(true).ignoreThrottled(false)) + .gatekeeperOptions( + GatekeeperOptions.builder() + .allowAliasToMultipleIndices(true) + .allowClosedIndices(true) + .allowFailureIndices(true) + .ignoreThrottled(false) + ) + .failureStoreOptions(FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(false)) .build(); public static final IndicesOptions LENIENT_EXPAND_OPEN_CLOSED = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS) @@ -464,14 +618,28 @@ private enum Option { .allowEmptyExpressions(true) .resolveAliases(true) ) - .generalOptions(GeneralOptions.builder().allowAliasToMultipleIndices(true).allowClosedIndices(true).ignoreThrottled(false)) + .gatekeeperOptions( + GatekeeperOptions.builder() + .allowAliasToMultipleIndices(true) + .allowClosedIndices(true) + .allowFailureIndices(true) + .ignoreThrottled(false) + ) + .failureStoreOptions(FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(false)) .build(); public static final IndicesOptions LENIENT_EXPAND_OPEN_CLOSED_HIDDEN = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS) .wildcardOptions( WildcardOptions.builder().matchOpen(true).matchClosed(true).includeHidden(true).allowEmptyExpressions(true).resolveAliases(true) ) - .generalOptions(GeneralOptions.builder().allowAliasToMultipleIndices(true).allowClosedIndices(true).ignoreThrottled(false)) + .gatekeeperOptions( + GatekeeperOptions.builder() + .allowAliasToMultipleIndices(true) + .allowClosedIndices(true) + .allowFailureIndices(true) + .ignoreThrottled(false) + ) + .failureStoreOptions(FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(false)) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_CLOSED = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -483,14 +651,28 @@ private enum Option { .allowEmptyExpressions(true) .resolveAliases(true) ) - .generalOptions(GeneralOptions.builder().allowAliasToMultipleIndices(true).allowClosedIndices(true).ignoreThrottled(false)) + .gatekeeperOptions( + GatekeeperOptions.builder() + .allowAliasToMultipleIndices(true) + .allowClosedIndices(true) + .allowFailureIndices(true) + .ignoreThrottled(false) + ) + .failureStoreOptions(FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(false)) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_CLOSED_HIDDEN = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) .wildcardOptions( WildcardOptions.builder().matchOpen(true).matchClosed(true).includeHidden(true).allowEmptyExpressions(true).resolveAliases(true) ) - .generalOptions(GeneralOptions.builder().allowAliasToMultipleIndices(true).allowClosedIndices(true).ignoreThrottled(false)) + .gatekeeperOptions( + GatekeeperOptions.builder() + .allowAliasToMultipleIndices(true) + .allowClosedIndices(true) + .allowFailureIndices(true) + .ignoreThrottled(false) + ) + .failureStoreOptions(FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(false)) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_FORBID_CLOSED = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -502,7 +684,14 @@ private enum Option { .allowEmptyExpressions(true) .resolveAliases(true) ) - .generalOptions(GeneralOptions.builder().allowClosedIndices(false).allowAliasToMultipleIndices(true).ignoreThrottled(false)) + .gatekeeperOptions( + GatekeeperOptions.builder() + .allowClosedIndices(false) + .allowAliasToMultipleIndices(true) + .allowFailureIndices(true) + .ignoreThrottled(false) + ) + .failureStoreOptions(FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(false)) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_HIDDEN_FORBID_CLOSED = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -514,7 +703,14 @@ private enum Option { .allowEmptyExpressions(true) .resolveAliases(true) ) - .generalOptions(GeneralOptions.builder().allowClosedIndices(false).allowAliasToMultipleIndices(true).ignoreThrottled(false)) + .gatekeeperOptions( + GatekeeperOptions.builder() + .allowClosedIndices(false) + .allowAliasToMultipleIndices(true) + .allowFailureIndices(true) + .ignoreThrottled(false) + ) + .failureStoreOptions(FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(false)) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_FORBID_CLOSED_IGNORE_THROTTLED = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -526,7 +722,14 @@ private enum Option { .allowEmptyExpressions(true) .resolveAliases(true) ) - .generalOptions(GeneralOptions.builder().ignoreThrottled(true).allowClosedIndices(false).allowAliasToMultipleIndices(true)) + .gatekeeperOptions( + GatekeeperOptions.builder() + .ignoreThrottled(true) + .allowClosedIndices(false) + .allowFailureIndices(true) + .allowAliasToMultipleIndices(true) + ) + .failureStoreOptions(FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(false)) .build(); public static final IndicesOptions STRICT_SINGLE_INDEX_NO_EXPAND_FORBID_CLOSED = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -538,7 +741,14 @@ private enum Option { .allowEmptyExpressions(true) .resolveAliases(true) ) - .generalOptions(GeneralOptions.builder().allowAliasToMultipleIndices(false).allowClosedIndices(false).ignoreThrottled(false)) + .gatekeeperOptions( + GatekeeperOptions.builder() + .allowAliasToMultipleIndices(false) + .allowClosedIndices(false) + .allowFailureIndices(true) + .ignoreThrottled(false) + ) + .failureStoreOptions(FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(false)) .build(); public static final IndicesOptions STRICT_NO_EXPAND_FORBID_CLOSED = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -550,7 +760,14 @@ private enum Option { .allowEmptyExpressions(true) .resolveAliases(true) ) - .generalOptions(GeneralOptions.builder().allowClosedIndices(false).allowAliasToMultipleIndices(true).ignoreThrottled(false)) + .gatekeeperOptions( + GatekeeperOptions.builder() + .allowClosedIndices(false) + .allowAliasToMultipleIndices(true) + .allowFailureIndices(true) + .ignoreThrottled(false) + ) + .failureStoreOptions(FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(false)) .build(); /** @@ -604,14 +821,21 @@ public boolean expandWildcardsHidden() { * @return Whether execution on closed indices is allowed. */ public boolean forbidClosedIndices() { - return generalOptions.allowClosedIndices() == false; + return gatekeeperOptions.allowClosedIndices() == false; + } + + /** + * @return Whether execution on closed indices is allowed. + */ + public boolean allowFailureIndices() { + return gatekeeperOptions.allowFailureIndices(); } /** * @return whether aliases pointing to multiple indices are allowed */ public boolean allowAliasesToMultipleIndices() { - return generalOptions().allowAliasToMultipleIndices(); + return gatekeeperOptions().allowAliasToMultipleIndices(); } /** @@ -625,7 +849,21 @@ public boolean ignoreAliases() { * @return whether indices that are marked as throttled should be ignored */ public boolean ignoreThrottled() { - return generalOptions().ignoreThrottled(); + return gatekeeperOptions().ignoreThrottled(); + } + + /** + * @return whether regular indices (stand-alone or backing indices) will be included in the response + */ + public boolean includeRegularIndices() { + return failureStoreOptions().includeRegularIndices(); + } + + /** + * @return whether failure indices (only supported by certain data streams) will be included in the response + */ + public boolean includeFailureIndices() { + return failureStoreOptions().includeFailureIndices(); } public void writeIndicesOptions(StreamOutput out) throws IOException { @@ -648,6 +886,11 @@ public void writeIndicesOptions(StreamOutput out) throws IOException { if (ignoreUnavailable()) { backwardsCompatibleOptions.add(Option.ALLOW_UNAVAILABLE_CONCRETE_TARGETS); } + if (out.getTransportVersion().onOrAfter(TransportVersions.ADD_FAILURE_STORE_INDICES_OPTIONS)) { + if (allowFailureIndices()) { + backwardsCompatibleOptions.add(Option.ALLOW_FAILURE_INDICES); + } + } out.writeEnumSet(backwardsCompatibleOptions); EnumSet states = EnumSet.noneOf(WildcardStates.class); @@ -661,6 +904,9 @@ public void writeIndicesOptions(StreamOutput out) throws IOException { states.add(WildcardStates.HIDDEN); } out.writeEnumSet(states); + if (out.getTransportVersion().onOrAfter(TransportVersions.ADD_FAILURE_STORE_INDICES_OPTIONS)) { + failureStoreOptions.writeTo(out); + } } public static IndicesOptions readIndicesOptions(StreamInput in) throws IOException { @@ -670,24 +916,34 @@ public static IndicesOptions readIndicesOptions(StreamInput in) throws IOExcepti options.contains(Option.ALLOW_EMPTY_WILDCARD_EXPRESSIONS), options.contains(Option.EXCLUDE_ALIASES) ); - GeneralOptions generalOptions = GeneralOptions.builder() + boolean allowFailureIndices = true; + if (in.getTransportVersion().onOrAfter(TransportVersions.ADD_FAILURE_STORE_INDICES_OPTIONS)) { + allowFailureIndices = options.contains(Option.ALLOW_FAILURE_INDICES); + } + GatekeeperOptions gatekeeperOptions = GatekeeperOptions.builder() .allowClosedIndices(options.contains(Option.ERROR_WHEN_CLOSED_INDICES) == false) .allowAliasToMultipleIndices(options.contains(Option.ERROR_WHEN_ALIASES_TO_MULTIPLE_INDICES) == false) + .allowFailureIndices(allowFailureIndices) .ignoreThrottled(options.contains(Option.IGNORE_THROTTLED)) .build(); + FailureStoreOptions failureStoreOptions = in.getTransportVersion().onOrAfter(TransportVersions.ADD_FAILURE_STORE_INDICES_OPTIONS) + ? FailureStoreOptions.read(in) + : FailureStoreOptions.DEFAULT; return new IndicesOptions( options.contains(Option.ALLOW_UNAVAILABLE_CONCRETE_TARGETS) ? ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS : ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS, wildcardOptions, - generalOptions + gatekeeperOptions, + failureStoreOptions ); } public static class Builder { private ConcreteTargetOptions concreteTargetOptions; private WildcardOptions wildcardOptions; - private GeneralOptions generalOptions; + private GatekeeperOptions gatekeeperOptions; + private FailureStoreOptions failureStoreOptions; Builder() { this(DEFAULT); @@ -696,7 +952,8 @@ public static class Builder { Builder(IndicesOptions indicesOptions) { concreteTargetOptions = indicesOptions.concreteTargetOptions; wildcardOptions = indicesOptions.wildcardOptions; - generalOptions = indicesOptions.generalOptions; + gatekeeperOptions = indicesOptions.gatekeeperOptions; + failureStoreOptions = indicesOptions.failureStoreOptions; } public Builder concreteTargetOptions(ConcreteTargetOptions concreteTargetOptions) { @@ -714,18 +971,28 @@ public Builder wildcardOptions(WildcardOptions.Builder wildcardOptions) { return this; } - public Builder generalOptions(GeneralOptions generalOptions) { - this.generalOptions = generalOptions; + public Builder gatekeeperOptions(GatekeeperOptions gatekeeperOptions) { + this.gatekeeperOptions = gatekeeperOptions; + return this; + } + + public Builder gatekeeperOptions(GatekeeperOptions.Builder generalOptions) { + this.gatekeeperOptions = generalOptions.build(); return this; } - public Builder generalOptions(GeneralOptions.Builder generalOptions) { - this.generalOptions = generalOptions.build(); + public Builder failureStoreOptions(FailureStoreOptions failureStoreOptions) { + this.failureStoreOptions = failureStoreOptions; + return this; + } + + public Builder failureStoreOptions(FailureStoreOptions.Builder failureStoreOptions) { + this.failureStoreOptions = failureStoreOptions.build(); return this; } public IndicesOptions build() { - return new IndicesOptions(concreteTargetOptions, wildcardOptions, generalOptions); + return new IndicesOptions(concreteTargetOptions, wildcardOptions, gatekeeperOptions, failureStoreOptions); } } @@ -819,7 +1086,7 @@ public static IndicesOptions fromOptions( .resolveAliases(ignoreAliases == false) .allowEmptyExpressions(allowNoIndices) .build(); - final GeneralOptions generalOptions = GeneralOptions.builder() + final GatekeeperOptions gatekeeperOptions = GatekeeperOptions.builder() .allowAliasToMultipleIndices(allowAliasesToMultipleIndices) .allowClosedIndices(forbidClosedIndices == false) .ignoreThrottled(ignoreThrottled) @@ -827,12 +1094,13 @@ public static IndicesOptions fromOptions( return new IndicesOptions( ignoreUnavailable ? ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS : ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS, wildcards, - generalOptions + gatekeeperOptions, + FailureStoreOptions.DEFAULT ); } public static IndicesOptions fromRequest(RestRequest request, IndicesOptions defaultSettings) { - if (request.hasParam(GeneralOptions.IGNORE_THROTTLED)) { + if (request.hasParam(GatekeeperOptions.IGNORE_THROTTLED)) { DEPRECATION_LOGGER.warn(DeprecationCategory.API, "ignore_throttled_param", IGNORE_THROTTLED_DEPRECATION_MESSAGE); } @@ -840,19 +1108,36 @@ public static IndicesOptions fromRequest(RestRequest request, IndicesOptions def request.param(WildcardOptions.EXPAND_WILDCARDS), request.param(ConcreteTargetOptions.IGNORE_UNAVAILABLE), request.param(WildcardOptions.ALLOW_NO_INDICES), - request.param(GeneralOptions.IGNORE_THROTTLED), + request.param(GatekeeperOptions.IGNORE_THROTTLED), + DataStream.isFailureStoreEnabled() + ? request.param(FailureStoreOptions.FAILURE_STORE) + : FailureStoreOptions.INCLUDE_ONLY_REGULAR_INDICES, defaultSettings ); } public static IndicesOptions fromMap(Map map, IndicesOptions defaultSettings) { + if (DataStream.isFailureStoreEnabled()) { + return fromParameters( + map.containsKey(WildcardOptions.EXPAND_WILDCARDS) ? map.get(WildcardOptions.EXPAND_WILDCARDS) : map.get("expandWildcards"), + map.containsKey(ConcreteTargetOptions.IGNORE_UNAVAILABLE) + ? map.get(ConcreteTargetOptions.IGNORE_UNAVAILABLE) + : map.get("ignoreUnavailable"), + map.containsKey(WildcardOptions.ALLOW_NO_INDICES) ? map.get(WildcardOptions.ALLOW_NO_INDICES) : map.get("allowNoIndices"), + map.containsKey(GatekeeperOptions.IGNORE_THROTTLED) + ? map.get(GatekeeperOptions.IGNORE_THROTTLED) + : map.get("ignoreThrottled"), + map.containsKey(FailureStoreOptions.FAILURE_STORE) ? map.get(FailureStoreOptions.FAILURE_STORE) : map.get("failureStore"), + defaultSettings + ); + } return fromParameters( map.containsKey(WildcardOptions.EXPAND_WILDCARDS) ? map.get(WildcardOptions.EXPAND_WILDCARDS) : map.get("expandWildcards"), map.containsKey(ConcreteTargetOptions.IGNORE_UNAVAILABLE) ? map.get(ConcreteTargetOptions.IGNORE_UNAVAILABLE) : map.get("ignoreUnavailable"), map.containsKey(WildcardOptions.ALLOW_NO_INDICES) ? map.get(WildcardOptions.ALLOW_NO_INDICES) : map.get("allowNoIndices"), - map.containsKey(GeneralOptions.IGNORE_THROTTLED) ? map.get(GeneralOptions.IGNORE_THROTTLED) : map.get("ignoreThrottled"), + map.containsKey(GatekeeperOptions.IGNORE_THROTTLED) ? map.get(GatekeeperOptions.IGNORE_THROTTLED) : map.get("ignoreThrottled"), defaultSettings ); } @@ -866,10 +1151,22 @@ public static boolean isIndicesOptions(String name) { || "expandWildcards".equals(name) || ConcreteTargetOptions.IGNORE_UNAVAILABLE.equals(name) || "ignoreUnavailable".equals(name) - || GeneralOptions.IGNORE_THROTTLED.equals(name) + || GatekeeperOptions.IGNORE_THROTTLED.equals(name) || "ignoreThrottled".equals(name) || WildcardOptions.ALLOW_NO_INDICES.equals(name) - || "allowNoIndices".equals(name); + || "allowNoIndices".equals(name) + || (DataStream.isFailureStoreEnabled() && FailureStoreOptions.FAILURE_STORE.equals(name)) + || (DataStream.isFailureStoreEnabled() && "failureStore".equals(name)); + } + + public static IndicesOptions fromParameters( + Object wildcardsString, + Object ignoreUnavailableString, + Object allowNoIndicesString, + Object ignoreThrottled, + IndicesOptions defaultSettings + ) { + return fromParameters(wildcardsString, ignoreUnavailableString, allowNoIndicesString, ignoreThrottled, null, defaultSettings); } public static IndicesOptions fromParameters( @@ -877,20 +1174,29 @@ public static IndicesOptions fromParameters( Object ignoreUnavailableString, Object allowNoIndicesString, Object ignoreThrottled, + Object failureStoreString, IndicesOptions defaultSettings ) { - if (wildcardsString == null && ignoreUnavailableString == null && allowNoIndicesString == null && ignoreThrottled == null) { + if (wildcardsString == null + && ignoreUnavailableString == null + && allowNoIndicesString == null + && ignoreThrottled == null + && failureStoreString == null) { return defaultSettings; } WildcardOptions wildcards = WildcardOptions.parseParameters(wildcardsString, allowNoIndicesString, defaultSettings.wildcardOptions); - GeneralOptions generalOptions = GeneralOptions.parseParameter(ignoreThrottled, defaultSettings.generalOptions); + GatekeeperOptions gatekeeperOptions = GatekeeperOptions.parseParameter(ignoreThrottled, defaultSettings.gatekeeperOptions); + FailureStoreOptions failureStoreOptions = DataStream.isFailureStoreEnabled() + ? FailureStoreOptions.parseParameters(failureStoreString, defaultSettings.failureStoreOptions) + : FailureStoreOptions.DEFAULT; // note that allowAliasesToMultipleIndices is not exposed, always true (only for internal use) return IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.fromParameter(ignoreUnavailableString, defaultSettings.concreteTargetOptions)) .wildcardOptions(wildcards) - .generalOptions(generalOptions) + .gatekeeperOptions(gatekeeperOptions) + .failureStoreOptions(failureStoreOptions) .build(); } @@ -898,14 +1204,18 @@ public static IndicesOptions fromParameters( public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { concreteTargetOptions.toXContent(builder, params); wildcardOptions.toXContent(builder, params); - generalOptions.toXContent(builder, params); + gatekeeperOptions.toXContent(builder, params); + if (DataStream.isFailureStoreEnabled()) { + failureStoreOptions.toXContent(builder, params); + } return builder; } private static final ParseField EXPAND_WILDCARDS_FIELD = new ParseField(WildcardOptions.EXPAND_WILDCARDS); private static final ParseField IGNORE_UNAVAILABLE_FIELD = new ParseField(ConcreteTargetOptions.IGNORE_UNAVAILABLE); - private static final ParseField IGNORE_THROTTLED_FIELD = new ParseField(GeneralOptions.IGNORE_THROTTLED).withAllDeprecated(); + private static final ParseField IGNORE_THROTTLED_FIELD = new ParseField(GatekeeperOptions.IGNORE_THROTTLED).withAllDeprecated(); private static final ParseField ALLOW_NO_INDICES_FIELD = new ParseField(WildcardOptions.ALLOW_NO_INDICES); + private static final ParseField FAILURE_STORE_FIELD = new ParseField(FailureStoreOptions.FAILURE_STORE); public static IndicesOptions fromXContent(XContentParser parser) throws IOException { return fromXContent(parser, null); @@ -914,8 +1224,9 @@ public static IndicesOptions fromXContent(XContentParser parser) throws IOExcept public static IndicesOptions fromXContent(XContentParser parser, @Nullable IndicesOptions defaults) throws IOException { boolean parsedWildcardStates = false; WildcardOptions.Builder wildcards = defaults == null ? null : WildcardOptions.builder(defaults.wildcardOptions()); - GeneralOptions.Builder generalOptions = GeneralOptions.builder() - .ignoreThrottled(defaults != null && defaults.generalOptions().ignoreThrottled()); + GatekeeperOptions.Builder generalOptions = GatekeeperOptions.builder() + .ignoreThrottled(defaults != null && defaults.gatekeeperOptions().ignoreThrottled()); + FailureStoreOptions failureStoreOptions = defaults == null ? FailureStoreOptions.DEFAULT : defaults.failureStoreOptions(); Boolean allowNoIndices = defaults == null ? null : defaults.allowNoIndices(); Boolean ignoreUnavailable = defaults == null ? null : defaults.ignoreUnavailable(); Token token = parser.currentToken() == Token.START_OBJECT ? parser.currentToken() : parser.nextToken(); @@ -965,13 +1276,16 @@ public static IndicesOptions fromXContent(XContentParser parser, @Nullable Indic allowNoIndices = parser.booleanValue(); } else if (IGNORE_THROTTLED_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { generalOptions.ignoreThrottled(parser.booleanValue()); - } else { - throw new ElasticsearchParseException( - "could not read indices options. unexpected index option [" + currentFieldName + "]" - ); - } + } else if (DataStream.isFailureStoreEnabled() + && FAILURE_STORE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + failureStoreOptions = FailureStoreOptions.parseParameters(parser.text(), failureStoreOptions); + } else { + throw new ElasticsearchParseException( + "could not read indices options. Unexpected index option [" + currentFieldName + "]" + ); + } } else { - throw new ElasticsearchParseException("could not read indices options. unexpected object field [" + currentFieldName + "]"); + throw new ElasticsearchParseException("could not read indices options. Unexpected object field [" + currentFieldName + "]"); } } @@ -994,7 +1308,8 @@ public static IndicesOptions fromXContent(XContentParser parser, @Nullable Indic return IndicesOptions.builder() .concreteTargetOptions(new ConcreteTargetOptions(ignoreUnavailable)) .wildcardOptions(wildcards) - .generalOptions(generalOptions) + .gatekeeperOptions(generalOptions) + .failureStoreOptions(failureStoreOptions) .build(); } @@ -1108,6 +1423,14 @@ public String toString() { + ignoreAliases() + ", ignore_throttled=" + ignoreThrottled() + + (DataStream.isFailureStoreEnabled() + ? ", include_regular_indices=" + + includeRegularIndices() + + ", include_failure_indices=" + + includeFailureIndices() + + ", allow_failure_indices=" + + allowFailureIndices() + : "") + ']'; } } diff --git a/server/src/main/java/org/elasticsearch/action/support/master/info/ClusterInfoRequest.java b/server/src/main/java/org/elasticsearch/action/support/master/info/ClusterInfoRequest.java index 22f0da70137af..00384852d1472 100644 --- a/server/src/main/java/org/elasticsearch/action/support/master/info/ClusterInfoRequest.java +++ b/server/src/main/java/org/elasticsearch/action/support/master/info/ClusterInfoRequest.java @@ -28,6 +28,11 @@ public abstract class ClusterInfoRequest failureIndices; + private volatile Set failureStoreLookup; @Nullable private final DataStreamAutoShardingEvent autoShardingEvent; @@ -282,6 +283,32 @@ public Index getWriteIndex() { return indices.get(indices.size() - 1); } + /** + * @return the write failure index if the failure store is enabled and there is already at least one failure, null otherwise + */ + @Nullable + public Index getFailureStoreWriteIndex() { + return isFailureStore() == false || failureIndices.isEmpty() ? null : failureIndices.get(failureIndices.size() - 1); + } + + /** + * Returns true if the index name provided belongs to a failure store index. + * This method builds a local Set with all the failure store index names and then checks if it contains the name. + * This will perform better if there are multiple indices of this data stream checked. + */ + public boolean isFailureStoreIndex(String indexName) { + if (failureStoreLookup == null) { + // There is a chance this will be calculated twice, but it's a relatively cheap action, + // so it's not worth synchronising + if (failureIndices == null || failureIndices.isEmpty()) { + failureStoreLookup = Set.of(); + } else { + failureStoreLookup = failureIndices.stream().map(Index::getName).collect(Collectors.toSet()); + } + } + return failureStoreLookup.contains(indexName); + } + public boolean rolloverOnWrite() { return rolloverOnWrite; } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java index 4c3318d8d2f6a..e8e8ca767cc34 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java @@ -31,6 +31,7 @@ import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; +import org.elasticsearch.indices.FailureIndexNotSupportedException; import org.elasticsearch.indices.IndexClosedException; import org.elasticsearch.indices.InvalidIndexNameException; import org.elasticsearch.indices.SystemIndices; @@ -354,16 +355,19 @@ Index[] concreteIndices(Context context, String... indexExpressions) { + " indices without one being designated as a write index" ); } - if (addIndex(writeIndex, null, context)) { - concreteIndicesResult.add(writeIndex); + if (indexAbstraction.isDataStreamRelated()) { + DataStream dataStream = indicesLookup.get(indexAbstraction.getWriteIndex().getName()).getParentDataStream(); + resolveWriteIndexForDataStreams(context, dataStream, concreteIndicesResult); + } else { + if (addIndex(writeIndex, null, context)) { + concreteIndicesResult.add(writeIndex); + } } } else if (indexAbstraction.getType() == Type.DATA_STREAM && context.isResolveToWriteIndex()) { - Index writeIndex = indexAbstraction.getWriteIndex(); - if (addIndex(writeIndex, null, context)) { - concreteIndicesResult.add(writeIndex); - } + resolveWriteIndexForDataStreams(context, (DataStream) indexAbstraction, concreteIndicesResult); } else { - if (indexAbstraction.getIndices().size() > 1 && context.getOptions().allowAliasesToMultipleIndices() == false) { + if (resolvesToMoreThanOneIndex(indexAbstraction, context) + && context.getOptions().allowAliasesToMultipleIndices() == false) { String[] indexNames = new String[indexAbstraction.getIndices().size()]; int i = 0; for (Index indexName : indexAbstraction.getIndices()) { @@ -379,11 +383,27 @@ Index[] concreteIndices(Context context, String... indexExpressions) { ); } - for (Index index : indexAbstraction.getIndices()) { - if (shouldTrackConcreteIndex(context, context.getOptions(), index)) { - concreteIndicesResult.add(index); + if (indexAbstraction.getType() == Type.DATA_STREAM) { + resolveIndicesForDataStream(context, (DataStream) indexAbstraction, concreteIndicesResult); + } else if (indexAbstraction.getType() == Type.ALIAS + && indexAbstraction.isDataStreamRelated() + && DataStream.isFailureStoreEnabled() + && context.getOptions().includeFailureIndices()) { + // Collect the data streams involved + Set aliasDataStreams = new HashSet<>(); + for (Index index : indexAbstraction.getIndices()) { + aliasDataStreams.add(indicesLookup.get(index.getName()).getParentDataStream()); + } + for (DataStream dataStream : aliasDataStreams) { + resolveIndicesForDataStream(context, dataStream, concreteIndicesResult); + } + } else { + for (Index index : indexAbstraction.getIndices()) { + if (shouldTrackConcreteIndex(context, context.getOptions(), index)) { + concreteIndicesResult.add(index); + } + } } - } } } @@ -394,6 +414,67 @@ Index[] concreteIndices(Context context, String... indexExpressions) { return concreteIndicesResult.toArray(Index.EMPTY_ARRAY); } + private static void resolveIndicesForDataStream(Context context, DataStream dataStream, Set concreteIndicesResult) { + if (shouldIncludeRegularIndices(context.getOptions())) { + for (Index index : dataStream.getIndices()) { + if (shouldTrackConcreteIndex(context, context.getOptions(), index)) { + concreteIndicesResult.add(index); + } + } + } + if (shouldIncludeFailureIndices(context.getOptions(), dataStream)) { + // We short-circuit here, if failure indices are not allowed and they can be skipped + if (context.getOptions().allowFailureIndices() || context.getOptions().ignoreUnavailable() == false) { + for (Index index : dataStream.getFailureIndices()) { + if (shouldTrackConcreteIndex(context, context.getOptions(), index)) { + concreteIndicesResult.add(index); + } + } + } + } + } + + private static void resolveWriteIndexForDataStreams(Context context, DataStream dataStream, Set concreteIndicesResult) { + if (shouldIncludeRegularIndices(context.getOptions())) { + Index writeIndex = dataStream.getWriteIndex(); + if (addIndex(writeIndex, null, context)) { + concreteIndicesResult.add(writeIndex); + } + } + if (shouldIncludeFailureIndices(context.getOptions(), dataStream)) { + Index failureStoreWriteIndex = dataStream.getFailureStoreWriteIndex(); + if (failureStoreWriteIndex != null && addIndex(failureStoreWriteIndex, null, context)) { + if (context.options.allowFailureIndices() == false) { + throw new FailureIndexNotSupportedException(failureStoreWriteIndex); + } + concreteIndicesResult.add(failureStoreWriteIndex); + } + } + } + + private static boolean shouldIncludeRegularIndices(IndicesOptions indicesOptions) { + return DataStream.isFailureStoreEnabled() == false || indicesOptions.includeRegularIndices(); + } + + private static boolean shouldIncludeFailureIndices(IndicesOptions indicesOptions, DataStream dataStream) { + return DataStream.isFailureStoreEnabled() && indicesOptions.includeFailureIndices() && dataStream.isFailureStore(); + } + + private static boolean resolvesToMoreThanOneIndex(IndexAbstraction indexAbstraction, Context context) { + if (indexAbstraction.getType() == Type.DATA_STREAM) { + DataStream dataStream = (DataStream) indexAbstraction; + int count = 0; + if (shouldIncludeRegularIndices(context.getOptions())) { + count += dataStream.getIndices().size(); + } + if (shouldIncludeFailureIndices(context.getOptions(), dataStream)) { + count += dataStream.getFailureIndices().size(); + } + return count > 1; + } + return indexAbstraction.getIndices().size() > 1; + } + private void checkSystemIndexAccess(Context context, Set concreteIndices) { final Predicate systemIndexAccessPredicate = context.getSystemIndexAccessPredicate(); if (systemIndexAccessPredicate == Predicates.always()) { @@ -485,6 +566,21 @@ private static boolean shouldTrackConcreteIndex(Context context, IndicesOptions // Exclude this one as it's a net-new system index, and we explicitly don't want those. return false; } + if (DataStream.isFailureStoreEnabled()) { + IndexAbstraction indexAbstraction = context.getState().metadata().getIndicesLookup().get(index.getName()); + if (context.options.allowFailureIndices() == false) { + DataStream parentDataStream = indexAbstraction.getParentDataStream(); + if (parentDataStream != null && parentDataStream.isFailureStore()) { + if (parentDataStream.isFailureStoreIndex(index.getName())) { + if (options.ignoreUnavailable()) { + return false; + } else { + throw new FailureIndexNotSupportedException(index); + } + } + } + } + } final IndexMetadata imd = context.state.metadata().index(index); if (imd.getState() == IndexMetadata.State.CLOSE) { if (options.forbidClosedIndices() && options.ignoreUnavailable() == false) { @@ -1309,7 +1405,7 @@ private static Map filterIndicesLookupForSuffixWildcar /** * Return the {@code Stream} of open and/or closed index names for the given {@param resources}. - * Datastreams and aliases are interpreted to refer to multiple indices, + * Data streams and aliases are interpreted to refer to multiple indices, * then all index resources are filtered by their open/closed status. */ private static Stream expandToOpenClosed(Context context, Stream resources) { @@ -1320,7 +1416,18 @@ private static Stream expandToOpenClosed(Context context, Stream indicesStateStream = indexAbstraction.getIndices().stream().map(context.state.metadata()::index); + Stream indicesStateStream = Stream.of(); + if (shouldIncludeRegularIndices(context.getOptions())) { + indicesStateStream = indexAbstraction.getIndices().stream().map(context.state.metadata()::index); + } + if (indexAbstraction.getType() == Type.DATA_STREAM + && shouldIncludeFailureIndices(context.getOptions(), (DataStream) indexAbstraction)) { + DataStream dataStream = (DataStream) indexAbstraction; + indicesStateStream = Stream.concat( + indicesStateStream, + dataStream.getFailureIndices().stream().map(context.state.metadata()::index) + ); + } if (excludeState != null) { indicesStateStream = indicesStateStream.filter(indexMeta -> indexMeta.getState() != excludeState); } @@ -1362,6 +1469,9 @@ private static List resolveEmptyOrTrivialWildcardWithAllowedSystemIndice } private static String[] resolveEmptyOrTrivialWildcardToAllIndices(IndicesOptions options, Metadata metadata) { + if (shouldIncludeRegularIndices(options) == false) { + return Strings.EMPTY_ARRAY; + } if (options.expandWildcardsOpen() && options.expandWildcardsClosed() && options.expandWildcardsHidden()) { return metadata.getConcreteAllIndices(); } else if (options.expandWildcardsOpen() && options.expandWildcardsClosed()) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java index 4d76ead90e12a..b450251ff7e3f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java @@ -2597,6 +2597,9 @@ private static void collectIndices( private static boolean assertContainsIndexIfDataStream(DataStream parent, IndexMetadata indexMetadata) { assert parent == null || parent.getIndices().stream().anyMatch(index -> indexMetadata.getIndex().getName().equals(index.getName())) + || (DataStream.isFailureStoreEnabled() + && parent.isFailureStore() + && parent.getFailureIndices().stream().anyMatch(index -> indexMetadata.getIndex().getName().equals(index.getName()))) : "Expected data stream [" + parent.getName() + "] to contain index " + indexMetadata.getIndex(); return true; } @@ -2618,6 +2621,11 @@ private static void collectDataStreams( for (Index i : dataStream.getIndices()) { indexToDataStreamLookup.put(i.getName(), dataStream); } + if (DataStream.isFailureStoreEnabled() && dataStream.isFailureStore()) { + for (Index i : dataStream.getFailureIndices()) { + indexToDataStreamLookup.put(i.getName(), dataStream); + } + } } } diff --git a/server/src/main/java/org/elasticsearch/indices/FailureIndexNotSupportedException.java b/server/src/main/java/org/elasticsearch/indices/FailureIndexNotSupportedException.java new file mode 100644 index 0000000000000..90fdd364b7035 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/indices/FailureIndexNotSupportedException.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.indices; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.index.Index; +import org.elasticsearch.rest.RestStatus; + +import java.io.IOException; + +/** + * Exception indicating that one or more requested indices are failure indices. + */ +public final class FailureIndexNotSupportedException extends ElasticsearchException { + + public FailureIndexNotSupportedException(Index index) { + super("failure index not supported"); + setIndex(index); + } + + public FailureIndexNotSupportedException(StreamInput in) throws IOException { + super(in); + } + + @Override + public RestStatus status() { + return RestStatus.BAD_REQUEST; + } + +} diff --git a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java index 1265a4e7f96db..b8091b50b5dd8 100644 --- a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java @@ -60,6 +60,7 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardNotInPrimaryModeException; import org.elasticsearch.indices.AutoscalingMissedIndicesUpdateException; +import org.elasticsearch.indices.FailureIndexNotSupportedException; import org.elasticsearch.indices.IndexTemplateMissingException; import org.elasticsearch.indices.InvalidIndexTemplateException; import org.elasticsearch.indices.recovery.PeerRecoveryNotFound; @@ -827,6 +828,7 @@ public void testIds() { ids.put(175, AutoscalingMissedIndicesUpdateException.class); ids.put(176, SearchTimeoutException.class); ids.put(177, GraphStructureException.class); + ids.put(178, FailureIndexNotSupportedException.class); Map, Integer> reverse = new HashMap<>(); for (Map.Entry> entry : ids.entrySet()) { diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexRequestTests.java index 21c5d0bee47e9..d77be7c45e416 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexRequestTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.action.admin.indices.get; +import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.core.Strings; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestRequestTests; @@ -71,4 +72,18 @@ public void testInvalidFeatures() { IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> GetIndexRequest.Feature.fromRequest(request)); assertThat(e.getMessage(), containsString(Strings.format("Invalid features specified [%s]", String.join(",", invalidFeatures)))); } + + public void testIndicesOptions() { + GetIndexRequest getIndexRequest = new GetIndexRequest(); + assertThat( + getIndexRequest.indicesOptions().concreteTargetOptions(), + equalTo(IndicesOptions.strictExpandOpen().concreteTargetOptions()) + ); + assertThat(getIndexRequest.indicesOptions().wildcardOptions(), equalTo(IndicesOptions.strictExpandOpen().wildcardOptions())); + assertThat(getIndexRequest.indicesOptions().gatekeeperOptions(), equalTo(IndicesOptions.strictExpandOpen().gatekeeperOptions())); + assertThat( + getIndexRequest.indicesOptions().failureStoreOptions(), + equalTo(IndicesOptions.FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(true).build()) + ); + } } diff --git a/server/src/test/java/org/elasticsearch/action/support/IndicesOptionsTests.java b/server/src/test/java/org/elasticsearch/action/support/IndicesOptionsTests.java index deec53de59326..297ebbae6c85a 100644 --- a/server/src/test/java/org/elasticsearch/action/support/IndicesOptionsTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/IndicesOptionsTests.java @@ -9,7 +9,8 @@ package org.elasticsearch.action.support; import org.elasticsearch.action.support.IndicesOptions.ConcreteTargetOptions; -import org.elasticsearch.action.support.IndicesOptions.GeneralOptions; +import org.elasticsearch.action.support.IndicesOptions.FailureStoreOptions; +import org.elasticsearch.action.support.IndicesOptions.GatekeeperOptions; import org.elasticsearch.action.support.IndicesOptions.WildcardOptions; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -40,17 +41,25 @@ public class IndicesOptionsTests extends ESTestCase { public void testSerialization() throws Exception { int iterations = randomIntBetween(5, 20); for (int i = 0; i < iterations; i++) { - IndicesOptions indicesOptions = IndicesOptions.fromOptions( - randomBoolean(), - randomBoolean(), - randomBoolean(), - randomBoolean(), - randomBoolean(), - randomBoolean(), - randomBoolean(), - randomBoolean(), - randomBoolean() - ); + IndicesOptions indicesOptions = IndicesOptions.builder() + .wildcardOptions( + WildcardOptions.builder() + .matchOpen(randomBoolean()) + .matchClosed(randomBoolean()) + .includeHidden(randomBoolean()) + .allowEmptyExpressions(randomBoolean()) + .resolveAliases(randomBoolean()) + ) + .gatekeeperOptions( + GatekeeperOptions.builder() + .ignoreThrottled(randomBoolean()) + .allowAliasToMultipleIndices(randomBoolean()) + .allowClosedIndices(randomBoolean()) + ) + .failureStoreOptions( + FailureStoreOptions.builder().includeRegularIndices(randomBoolean()).includeFailureIndices(randomBoolean()) + ) + .build(); BytesStreamOutput output = new BytesStreamOutput(); indicesOptions.writeIndicesOptions(output); @@ -58,16 +67,7 @@ public void testSerialization() throws Exception { StreamInput streamInput = output.bytes().streamInput(); IndicesOptions indicesOptions2 = IndicesOptions.readIndicesOptions(streamInput); - assertThat(indicesOptions2.ignoreUnavailable(), equalTo(indicesOptions.ignoreUnavailable())); - assertThat(indicesOptions2.allowNoIndices(), equalTo(indicesOptions.allowNoIndices())); - assertThat(indicesOptions2.expandWildcardsOpen(), equalTo(indicesOptions.expandWildcardsOpen())); - assertThat(indicesOptions2.expandWildcardsClosed(), equalTo(indicesOptions.expandWildcardsClosed())); - assertThat(indicesOptions2.expandWildcardsHidden(), equalTo(indicesOptions.expandWildcardsHidden())); - - assertThat(indicesOptions2.forbidClosedIndices(), equalTo(indicesOptions.forbidClosedIndices())); - assertThat(indicesOptions2.allowAliasesToMultipleIndices(), equalTo(indicesOptions.allowAliasesToMultipleIndices())); - - assertEquals(indicesOptions2.ignoreAliases(), indicesOptions.ignoreAliases()); + assertThat(indicesOptions2, equalTo(indicesOptions)); } } @@ -343,9 +343,10 @@ public void testToXContent() throws IOException { randomBoolean(), randomBoolean() ); - GeneralOptions generalOptions = new GeneralOptions(randomBoolean(), randomBoolean(), randomBoolean()); + GatekeeperOptions gatekeeperOptions = new GatekeeperOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()); + FailureStoreOptions failureStoreOptions = new IndicesOptions.FailureStoreOptions(randomBoolean(), randomBoolean()); - IndicesOptions indicesOptions = new IndicesOptions(concreteTargetOptions, wildcardOptions, generalOptions); + IndicesOptions indicesOptions = new IndicesOptions(concreteTargetOptions, wildcardOptions, gatekeeperOptions, failureStoreOptions); XContentType type = randomFrom(XContentType.values()); BytesReference xContentBytes = toXContentBytes(indicesOptions, type); @@ -359,7 +360,8 @@ public void testToXContent() throws IOException { assertThat(((List) map.get("expand_wildcards")).contains("hidden"), equalTo(wildcardOptions.includeHidden())); assertThat(map.get("ignore_unavailable"), equalTo(concreteTargetOptions.allowUnavailableTargets())); assertThat(map.get("allow_no_indices"), equalTo(wildcardOptions.allowEmptyExpressions())); - assertThat(map.get("ignore_throttled"), equalTo(generalOptions.ignoreThrottled())); + assertThat(map.get("ignore_throttled"), equalTo(gatekeeperOptions.ignoreThrottled())); + assertThat(map.get("failure_store"), equalTo(failureStoreOptions.displayValue())); } public void testFromXContent() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java index 7e8e9805b54e7..8b6a0fcb55c5b 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java @@ -1781,6 +1781,164 @@ private IndexMetadata createIndexMetadata(String indexName, IndexWriteLoad index .build(); } + public void testWriteFailureIndex() { + boolean hidden = randomBoolean(); + boolean system = hidden && randomBoolean(); + DataStream noFailureStoreDataStream = new DataStream( + randomAlphaOfLength(10), + randomIndexInstances(), + randomNonNegativeInt(), + null, + hidden, + randomBoolean(), + system, + System::currentTimeMillis, + randomBoolean(), + randomBoolean() ? IndexMode.STANDARD : IndexMode.TIME_SERIES, + DataStreamLifecycleTests.randomLifecycle(), + false, + null, + randomBoolean(), + null + ); + assertThat(noFailureStoreDataStream.getFailureStoreWriteIndex(), nullValue()); + + DataStream failureStoreDataStreamWithEmptyFailureIndices = new DataStream( + randomAlphaOfLength(10), + randomIndexInstances(), + randomNonNegativeInt(), + null, + hidden, + randomBoolean(), + system, + System::currentTimeMillis, + randomBoolean(), + randomBoolean() ? IndexMode.STANDARD : IndexMode.TIME_SERIES, + DataStreamLifecycleTests.randomLifecycle(), + true, + List.of(), + randomBoolean(), + null + ); + assertThat(failureStoreDataStreamWithEmptyFailureIndices.getFailureStoreWriteIndex(), nullValue()); + + List failureIndices = randomIndexInstances(); + String dataStreamName = randomAlphaOfLength(10); + Index writeFailureIndex = new Index( + getDefaultBackingIndexName(dataStreamName, randomNonNegativeInt()), + UUIDs.randomBase64UUID(LuceneTestCase.random()) + ); + failureIndices.add(writeFailureIndex); + DataStream failureStoreDataStream = new DataStream( + dataStreamName, + randomIndexInstances(), + randomNonNegativeInt(), + null, + hidden, + randomBoolean(), + system, + System::currentTimeMillis, + randomBoolean(), + randomBoolean() ? IndexMode.STANDARD : IndexMode.TIME_SERIES, + DataStreamLifecycleTests.randomLifecycle(), + true, + failureIndices, + randomBoolean(), + null + ); + assertThat(failureStoreDataStream.getFailureStoreWriteIndex(), is(writeFailureIndex)); + } + + public void testIsFailureIndex() { + boolean hidden = randomBoolean(); + boolean system = hidden && randomBoolean(); + List backingIndices = randomIndexInstances(); + DataStream noFailureStoreDataStream = new DataStream( + randomAlphaOfLength(10), + backingIndices, + randomNonNegativeInt(), + null, + hidden, + randomBoolean(), + system, + System::currentTimeMillis, + randomBoolean(), + randomBoolean() ? IndexMode.STANDARD : IndexMode.TIME_SERIES, + DataStreamLifecycleTests.randomLifecycle(), + false, + null, + randomBoolean(), + null + ); + assertThat( + noFailureStoreDataStream.isFailureStoreIndex(backingIndices.get(randomIntBetween(0, backingIndices.size() - 1)).getName()), + is(false) + ); + + backingIndices = randomIndexInstances(); + DataStream failureStoreDataStreamWithEmptyFailureIndices = new DataStream( + randomAlphaOfLength(10), + backingIndices, + randomNonNegativeInt(), + null, + hidden, + randomBoolean(), + system, + System::currentTimeMillis, + randomBoolean(), + randomBoolean() ? IndexMode.STANDARD : IndexMode.TIME_SERIES, + DataStreamLifecycleTests.randomLifecycle(), + true, + List.of(), + randomBoolean(), + null + ); + assertThat( + failureStoreDataStreamWithEmptyFailureIndices.isFailureStoreIndex( + backingIndices.get(randomIntBetween(0, backingIndices.size() - 1)).getName() + ), + is(false) + ); + + backingIndices = randomIndexInstances(); + List failureIndices = randomIndexInstances(); + String dataStreamName = randomAlphaOfLength(10); + Index writeFailureIndex = new Index( + getDefaultBackingIndexName(dataStreamName, randomNonNegativeInt()), + UUIDs.randomBase64UUID(LuceneTestCase.random()) + ); + failureIndices.add(writeFailureIndex); + DataStream failureStoreDataStream = new DataStream( + dataStreamName, + backingIndices, + randomNonNegativeInt(), + null, + hidden, + randomBoolean(), + system, + System::currentTimeMillis, + randomBoolean(), + randomBoolean() ? IndexMode.STANDARD : IndexMode.TIME_SERIES, + DataStreamLifecycleTests.randomLifecycle(), + true, + failureIndices, + randomBoolean(), + null + ); + assertThat(failureStoreDataStream.isFailureStoreIndex(writeFailureIndex.getName()), is(true)); + assertThat( + failureStoreDataStream.isFailureStoreIndex(failureIndices.get(randomIntBetween(0, failureIndices.size() - 1)).getName()), + is(true) + ); + assertThat( + failureStoreDataStreamWithEmptyFailureIndices.isFailureStoreIndex( + backingIndices.get(randomIntBetween(0, backingIndices.size() - 1)).getName() + ), + is(false) + ); + assertThat(failureStoreDataStreamWithEmptyFailureIndices.isFailureStoreIndex(randomAlphaOfLength(10)), is(false)); + } + private record DataStreamMetadata(Long creationTimeInMillis, Long rolloverTimeInMillis, Long originationTimeInMillis) { public static DataStreamMetadata dataStreamMetadata(Long creationTimeInMillis, Long rolloverTimeInMillis) { return new DataStreamMetadata(creationTimeInMillis, rolloverTimeInMillis, null); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java index c043734d15cdf..a1eeceba8a390 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java @@ -29,6 +29,7 @@ import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.indices.FailureIndexNotSupportedException; import org.elasticsearch.indices.IndexClosedException; import org.elasticsearch.indices.InvalidIndexNameException; import org.elasticsearch.indices.SystemIndexDescriptor; @@ -53,6 +54,7 @@ import static org.elasticsearch.cluster.metadata.DataStreamTestHelper.backingIndexEqualTo; import static org.elasticsearch.cluster.metadata.DataStreamTestHelper.createBackingIndex; +import static org.elasticsearch.cluster.metadata.DataStreamTestHelper.createFailureStore; import static org.elasticsearch.cluster.metadata.DataStreamTestHelper.newInstance; import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_HIDDEN_SETTING; import static org.elasticsearch.common.util.set.Sets.newHashSet; @@ -2294,7 +2296,7 @@ public void testIgnoreThrottled() { new IndicesOptions( IndicesOptions.ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS, IndicesOptions.WildcardOptions.DEFAULT, - IndicesOptions.GeneralOptions.builder().ignoreThrottled(true).build() + IndicesOptions.GatekeeperOptions.builder().ignoreThrottled(true).build() ), "ind*", "test-index" @@ -2697,6 +2699,200 @@ public void testDataStreams() { } } + public void testDataStreamsWithFailureStore() { + final String dataStreamName = "my-data-stream"; + IndexMetadata index1 = createBackingIndex(dataStreamName, 1, epochMillis).build(); + IndexMetadata index2 = createBackingIndex(dataStreamName, 2, epochMillis).build(); + IndexMetadata failureIndex1 = createFailureStore(dataStreamName, 1, epochMillis).build(); + IndexMetadata failureIndex2 = createFailureStore(dataStreamName, 2, epochMillis).build(); + IndexMetadata otherIndex = indexBuilder("my-other-index", Settings.EMPTY).state(State.OPEN).build(); + + Metadata.Builder mdBuilder = Metadata.builder() + .put(index1, false) + .put(index2, false) + .put(failureIndex1, false) + .put(failureIndex2, false) + .put(otherIndex, false) + .put( + newInstance( + dataStreamName, + List.of(index1.getIndex(), index2.getIndex()), + List.of(failureIndex1.getIndex(), failureIndex2.getIndex()) + ) + ); + ClusterState state = ClusterState.builder(new ClusterName("_name")).metadata(mdBuilder).build(); + + // Test default with an exact data stream name + { + IndicesOptions indicesOptions = IndicesOptions.STRICT_EXPAND_OPEN; + Index[] result = indexNameExpressionResolver.concreteIndices(state, indicesOptions, true, "my-data-stream"); + assertThat(result.length, equalTo(2)); + assertThat(result[0].getName(), equalTo(DataStream.getDefaultBackingIndexName(dataStreamName, 1, epochMillis))); + assertThat(result[1].getName(), equalTo(DataStream.getDefaultBackingIndexName(dataStreamName, 2, epochMillis))); + } + + // Test include failure store with an exact data stream name + { + IndicesOptions indicesOptions = IndicesOptions.builder(IndicesOptions.STRICT_EXPAND_OPEN) + .failureStoreOptions(IndicesOptions.FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(true)) + .build(); + Index[] result = indexNameExpressionResolver.concreteIndices(state, indicesOptions, true, "my-data-stream"); + assertThat(result.length, equalTo(4)); + assertThat(result[0].getName(), equalTo(DataStream.getDefaultBackingIndexName(dataStreamName, 1, epochMillis))); + assertThat(result[1].getName(), equalTo(DataStream.getDefaultBackingIndexName(dataStreamName, 2, epochMillis))); + assertThat(result[2].getName(), equalTo(DataStream.getDefaultFailureStoreName(dataStreamName, 1, epochMillis))); + assertThat(result[3].getName(), equalTo(DataStream.getDefaultFailureStoreName(dataStreamName, 2, epochMillis))); + } + + // Test include failure store while we do not allow failure indices and ignore unavailable + // We expect that they will be skipped + { + IndicesOptions indicesOptions = IndicesOptions.builder(IndicesOptions.STRICT_EXPAND_OPEN) + .failureStoreOptions(IndicesOptions.FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(true)) + .gatekeeperOptions(IndicesOptions.GatekeeperOptions.builder().allowFailureIndices(false).build()) + .concreteTargetOptions(IndicesOptions.ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS) + .build(); + Index[] result = indexNameExpressionResolver.concreteIndices(state, indicesOptions, true, "my-data-stream"); + assertThat(result.length, equalTo(2)); + assertThat(result[0].getName(), equalTo(DataStream.getDefaultBackingIndexName(dataStreamName, 1, epochMillis))); + assertThat(result[1].getName(), equalTo(DataStream.getDefaultBackingIndexName(dataStreamName, 2, epochMillis))); + } + + // Test include failure store while we do not allow failure indices + // We expect an error + { + IndicesOptions indicesOptions = IndicesOptions.builder(IndicesOptions.STRICT_EXPAND_OPEN) + .failureStoreOptions(IndicesOptions.FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(true)) + .gatekeeperOptions(IndicesOptions.GatekeeperOptions.builder().allowFailureIndices(false).build()) + .build(); + FailureIndexNotSupportedException failureIndexNotSupportedException = expectThrows( + FailureIndexNotSupportedException.class, + () -> indexNameExpressionResolver.concreteIndices(state, indicesOptions, true, "my-data-stream") + ); + assertThat( + failureIndexNotSupportedException.getIndex().getName(), + equalTo(DataStream.getDefaultFailureStoreName(dataStreamName, 1, epochMillis)) + ); + } + + // Test only failure store with an exact data stream name + { + IndicesOptions indicesOptions = IndicesOptions.builder(IndicesOptions.STRICT_EXPAND_OPEN) + .failureStoreOptions(IndicesOptions.FailureStoreOptions.builder().includeRegularIndices(false).includeFailureIndices(true)) + .build(); + Index[] result = indexNameExpressionResolver.concreteIndices(state, indicesOptions, true, "my-data-stream"); + assertThat(result.length, equalTo(2)); + assertThat(result[0].getName(), equalTo(DataStream.getDefaultFailureStoreName(dataStreamName, 1, epochMillis))); + assertThat(result[1].getName(), equalTo(DataStream.getDefaultFailureStoreName(dataStreamName, 2, epochMillis))); + } + + // Test default without any expressions + { + IndicesOptions indicesOptions = IndicesOptions.STRICT_EXPAND_OPEN; + Index[] result = indexNameExpressionResolver.concreteIndices(state, indicesOptions, true); + assertThat(result.length, equalTo(3)); + List indexNames = Arrays.stream(result).map(Index::getName).toList(); + assertThat( + indexNames, + containsInAnyOrder( + DataStream.getDefaultBackingIndexName(dataStreamName, 2, epochMillis), + DataStream.getDefaultBackingIndexName(dataStreamName, 1, epochMillis), + otherIndex.getIndex().getName() + ) + ); + } + + // Test include failure store without any expressions + { + IndicesOptions indicesOptions = IndicesOptions.builder(IndicesOptions.STRICT_EXPAND_OPEN) + .failureStoreOptions(IndicesOptions.FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(true)) + .build(); + Index[] result = indexNameExpressionResolver.concreteIndices(state, indicesOptions, true); + assertThat(result.length, equalTo(5)); + List indexNames = Arrays.stream(result).map(Index::getName).toList(); + assertThat( + indexNames, + containsInAnyOrder( + DataStream.getDefaultBackingIndexName(dataStreamName, 2, epochMillis), + DataStream.getDefaultBackingIndexName(dataStreamName, 1, epochMillis), + DataStream.getDefaultFailureStoreName(dataStreamName, 2, epochMillis), + DataStream.getDefaultFailureStoreName(dataStreamName, 1, epochMillis), + otherIndex.getIndex().getName() + ) + ); + } + + // Test only failure store without any expressions + { + IndicesOptions indicesOptions = IndicesOptions.builder(IndicesOptions.STRICT_EXPAND_OPEN) + .failureStoreOptions(IndicesOptions.FailureStoreOptions.builder().includeRegularIndices(false).includeFailureIndices(true)) + .build(); + Index[] result = indexNameExpressionResolver.concreteIndices(state, indicesOptions, true); + assertThat(result.length, equalTo(2)); + List indexNames = Arrays.stream(result).map(Index::getName).toList(); + assertThat( + indexNames, + containsInAnyOrder( + DataStream.getDefaultFailureStoreName(dataStreamName, 2, epochMillis), + DataStream.getDefaultFailureStoreName(dataStreamName, 1, epochMillis) + ) + ); + } + + // Test default with wildcard expression + { + IndicesOptions indicesOptions = IndicesOptions.STRICT_EXPAND_OPEN; + Index[] result = indexNameExpressionResolver.concreteIndices(state, indicesOptions, true, "my-*"); + assertThat(result.length, equalTo(3)); + List indexNames = Arrays.stream(result).map(Index::getName).toList(); + assertThat( + indexNames, + containsInAnyOrder( + DataStream.getDefaultBackingIndexName(dataStreamName, 2, epochMillis), + DataStream.getDefaultBackingIndexName(dataStreamName, 1, epochMillis), + otherIndex.getIndex().getName() + ) + ); + } + + // Test include failure store with wildcard expression + { + IndicesOptions indicesOptions = IndicesOptions.builder(IndicesOptions.STRICT_EXPAND_OPEN) + .failureStoreOptions(IndicesOptions.FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(true)) + .build(); + Index[] result = indexNameExpressionResolver.concreteIndices(state, indicesOptions, true, "my-*"); + assertThat(result.length, equalTo(5)); + List indexNames = Arrays.stream(result).map(Index::getName).toList(); + assertThat( + indexNames, + containsInAnyOrder( + DataStream.getDefaultBackingIndexName(dataStreamName, 2, epochMillis), + DataStream.getDefaultBackingIndexName(dataStreamName, 1, epochMillis), + DataStream.getDefaultFailureStoreName(dataStreamName, 2, epochMillis), + DataStream.getDefaultFailureStoreName(dataStreamName, 1, epochMillis), + otherIndex.getIndex().getName() + ) + ); + } + + // Test only failure store with wildcard expression + { + IndicesOptions indicesOptions = IndicesOptions.builder(IndicesOptions.STRICT_EXPAND_OPEN) + .failureStoreOptions(IndicesOptions.FailureStoreOptions.builder().includeRegularIndices(false).includeFailureIndices(true)) + .build(); + Index[] result = indexNameExpressionResolver.concreteIndices(state, indicesOptions, true, "my-*"); + assertThat(result.length, equalTo(2)); + List indexNames = Arrays.stream(result).map(Index::getName).toList(); + assertThat( + indexNames, + containsInAnyOrder( + DataStream.getDefaultFailureStoreName(dataStreamName, 2, epochMillis), + DataStream.getDefaultFailureStoreName(dataStreamName, 1, epochMillis) + ) + ); + } + } + public void testDataStreamAliases() { String dataStream1 = "my-data-stream-1"; IndexMetadata index1 = createBackingIndex(dataStream1, 1, epochMillis).build(); diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java b/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java index 8fc02bb8e808c..1cc5006fe0018 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java @@ -91,6 +91,10 @@ public static DataStream newInstance(String name, List indices) { return newInstance(name, indices, indices.size(), null); } + public static DataStream newInstance(String name, List indices, List failureIndices) { + return newInstance(name, indices, indices.size(), null, false, null, failureIndices); + } + public static DataStream newInstance(String name, List indices, long generation, Map metadata) { return newInstance(name, indices, generation, metadata, false); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java index 69d8663478b36..5ddba7519eef2 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java @@ -338,8 +338,8 @@ public void testIndexDoesntExist() { "cannot start datafeed [datafeed_id] because it failed resolving indices given [not_foo] and " + "indices_options [IndicesOptions[ignore_unavailable=false, allow_no_indices=true, expand_wildcards_open=true, " + "expand_wildcards_closed=false, expand_wildcards_hidden=false, allow_aliases_to_multiple_indices=true, " - + "forbid_closed_indices=true, ignore_aliases=false, ignore_throttled=true]] " - + "with exception [no such index [not_foo]]" + + "forbid_closed_indices=true, ignore_aliases=false, ignore_throttled=true, include_regular_indices=true, " + + "include_failure_indices=false, allow_failure_indices=true]] with exception [no such index [not_foo]]" ) ); @@ -361,8 +361,9 @@ public void testIndexDoesntExist() { + "[cannot start datafeed [datafeed_id] because it failed resolving " + "indices given [not_foo] and indices_options [IndicesOptions[ignore_unavailable=false, allow_no_indices=true, " + "expand_wildcards_open=true, expand_wildcards_closed=false, expand_wildcards_hidden=false, " - + "allow_aliases_to_multiple_indices=true, forbid_closed_indices=true, ignore_aliases=false, ignore_throttled=true" - + "]] with exception [no such index [not_foo]]]" + + "allow_aliases_to_multiple_indices=true, forbid_closed_indices=true, ignore_aliases=false, ignore_throttled=true, " + + "include_regular_indices=true, include_failure_indices=false, allow_failure_indices=true]] " + + "with exception [no such index [not_foo]]]" ) ); } @@ -527,8 +528,8 @@ public void testSelectNode_GivenJobOpeningAndIndexDoesNotExist() { + "[cannot start datafeed [datafeed_id] because it failed resolving indices given [not_foo] and " + "indices_options [IndicesOptions[ignore_unavailable=false, allow_no_indices=true, expand_wildcards_open=true, " + "expand_wildcards_closed=false, expand_wildcards_hidden=false, allow_aliases_to_multiple_indices=true, " - + "forbid_closed_indices=true, ignore_aliases=false, ignore_throttled=true]] " - + "with exception [no such index [not_foo]]]" + + "forbid_closed_indices=true, ignore_aliases=false, ignore_throttled=true, include_regular_indices=true, " + + "include_failure_indices=false, allow_failure_indices=true]] with exception [no such index [not_foo]]]" ) ); } diff --git a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/index/IndexResolver.java b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/index/IndexResolver.java index 9009baf188cac..318f3888ac9b3 100644 --- a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/index/IndexResolver.java +++ b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/index/IndexResolver.java @@ -124,8 +124,8 @@ public String toString() { .allowEmptyExpressions(true) .resolveAliases(false) ) - .generalOptions( - IndicesOptions.GeneralOptions.builder().ignoreThrottled(true).allowClosedIndices(true).allowAliasToMultipleIndices(true) + .gatekeeperOptions( + IndicesOptions.GatekeeperOptions.builder().ignoreThrottled(true).allowClosedIndices(true).allowAliasToMultipleIndices(true) ) .build(); private static final IndicesOptions FROZEN_INDICES_OPTIONS = IndicesOptions.builder() @@ -138,8 +138,8 @@ public String toString() { .allowEmptyExpressions(true) .resolveAliases(false) ) - .generalOptions( - IndicesOptions.GeneralOptions.builder().ignoreThrottled(false).allowClosedIndices(true).allowAliasToMultipleIndices(true) + .gatekeeperOptions( + IndicesOptions.GatekeeperOptions.builder().ignoreThrottled(false).allowClosedIndices(true).allowAliasToMultipleIndices(true) ) .build(); @@ -153,8 +153,8 @@ public String toString() { .allowEmptyExpressions(true) .resolveAliases(true) ) - .generalOptions( - IndicesOptions.GeneralOptions.builder().ignoreThrottled(true).allowClosedIndices(true).allowAliasToMultipleIndices(true) + .gatekeeperOptions( + IndicesOptions.GatekeeperOptions.builder().ignoreThrottled(true).allowClosedIndices(true).allowAliasToMultipleIndices(true) ) .build(); public static final IndicesOptions FIELD_CAPS_FROZEN_INDICES_OPTIONS = IndicesOptions.builder() @@ -167,8 +167,8 @@ public String toString() { .allowEmptyExpressions(true) .resolveAliases(true) ) - .generalOptions( - IndicesOptions.GeneralOptions.builder().ignoreThrottled(false).allowClosedIndices(true).allowAliasToMultipleIndices(true) + .gatekeeperOptions( + IndicesOptions.GatekeeperOptions.builder().ignoreThrottled(false).allowClosedIndices(true).allowAliasToMultipleIndices(true) ) .build(); From d9d291b8a3b215358cfc365e2d9067fd6f00dd54 Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Wed, 6 Mar 2024 23:34:18 +1100 Subject: [PATCH 018/248] Minor improvement for SparseFileTracker toString (#106004) Include both length and complete in toString for easier introspection. Also add a comment to the choice of using a string constant (`file`) for the description. --- .../org/elasticsearch/blobcache/common/SparseFileTracker.java | 2 +- .../elasticsearch/blobcache/shared/SharedBlobCacheService.java | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/common/SparseFileTracker.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/common/SparseFileTracker.java index 5eb146102cd76..e9be9577063cf 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/common/SparseFileTracker.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/common/SparseFileTracker.java @@ -525,7 +525,7 @@ private boolean invariant() { @Override public String toString() { - return "SparseFileTracker[" + description + ']'; + return "SparseFileTracker{description=" + description + ", length=" + length + ", complete=" + complete + '}'; } /** diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java index 1f6f075a2b2af..d4c7c04c5b26e 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java @@ -715,6 +715,7 @@ class CacheFileRegion extends EvictableRefCounted { CacheFileRegion(RegionKey regionKey, int regionSize) { this.regionKey = regionKey; assert regionSize > 0; + // NOTE we use a constant string for description to avoid consume extra heap space tracker = new SparseFileTracker("file", regionSize); } From 38168407ef6200ab3f1df55553a004f7a254ec56 Mon Sep 17 00:00:00 2001 From: Joe Gallo Date: Wed, 6 Mar 2024 07:45:13 -0500 Subject: [PATCH 019/248] Docs typo fix (#105835) (#106002) Co-authored-by: MikhailBerezhanov <35196259+MikhailBerezhanov@users.noreply.github.com> --- docs/reference/data-streams/lifecycle/index.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/reference/data-streams/lifecycle/index.asciidoc b/docs/reference/data-streams/lifecycle/index.asciidoc index ef5558817885e..bf861df7c80d4 100644 --- a/docs/reference/data-streams/lifecycle/index.asciidoc +++ b/docs/reference/data-streams/lifecycle/index.asciidoc @@ -36,8 +36,8 @@ each data stream and performs the following steps: automatically tail merges the index. Data stream lifecycle executes a merge operation that only targets the long tail of small segments instead of the whole shard. As the segments are organised into tiers of exponential sizes, merging the long tail of small segments is only a -fraction of the cost of force mergeing to a single segment. The small segments would usually -hold the most recent data so tail mergeing will focus the merging resources on the higher-value +fraction of the cost of force merging to a single segment. The small segments would usually +hold the most recent data so tail merging will focus the merging resources on the higher-value data that is most likely to keep being queried. 4. If <> is configured it will execute all the configured downsampling rounds. From 61a50339a67e807687554074373125a2be99b25d Mon Sep 17 00:00:00 2001 From: Craig Taverner Date: Wed, 6 Mar 2024 14:05:47 +0100 Subject: [PATCH 020/248] For cartesian values we are even more lenient with extremely large values (#106014) --- .../lucene/spatial/CentroidCalculatorTests.java | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/lucene/spatial/CentroidCalculatorTests.java b/server/src/test/java/org/elasticsearch/lucene/spatial/CentroidCalculatorTests.java index d15ea1ac2e469..7a5cb5de49bdc 100644 --- a/server/src/test/java/org/elasticsearch/lucene/spatial/CentroidCalculatorTests.java +++ b/server/src/test/java/org/elasticsearch/lucene/spatial/CentroidCalculatorTests.java @@ -423,13 +423,12 @@ private CentroidMatcher(double x, double y, double weight, double weightFactor) } private Matcher matchDouble(double value) { - if (value > 1e20 || value < 1e20) { - // Very large values have floating point errors, so instead of an absolute value, we use a relative one - return closeTo(value, Math.abs(value / 1e10)); - } else { - // Most data (notably geo data) has values within bounds, and an absolute delta makes more sense. - return closeTo(value, DELTA); - } + // Very large values have floating point errors, so instead of an absolute value, we use a relative one + // Most data (notably geo data) has values within bounds, and an absolute delta makes more sense. + double delta = (value > 1e28 || value < -1e28) ? Math.abs(value / 1e6) + : (value > 1e20 || value < -1e20) ? Math.abs(value / 1e10) + : DELTA; + return closeTo(value, delta); } @Override From c21b23c2641fbc65648d19719cd61ffe9556dd11 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Wed, 6 Mar 2024 14:29:56 +0100 Subject: [PATCH 021/248] Reduce overhead of (source-less) SearchHit (#105659) Reduce overhead of `SearchHit` a little. No need for any real ref-counting if there's neither source nor nested hits. Same goes for `SearchHits` which don't have to be ref-counted if their contents aren't. Also, don't create pointless unmodifiable maps wrapping the empty singleton for highlight fields and use the singleton for the empty search sort values. --- .../org/elasticsearch/search/SearchHit.java | 19 +++++++++++++------ .../org/elasticsearch/search/SearchHits.java | 11 +++++++++-- .../search/SearchSortValues.java | 18 +++++++++++++----- .../search/SearchSortValuesTests.java | 2 +- 4 files changed, 36 insertions(+), 14 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/search/SearchHit.java b/server/src/main/java/org/elasticsearch/search/SearchHit.java index fe11aa8af39f4..60ced289929a0 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchHit.java +++ b/server/src/main/java/org/elasticsearch/search/SearchHit.java @@ -204,7 +204,7 @@ public SearchHit( this.innerHits = innerHits; this.documentFields = documentFields; this.metaFields = metaFields; - this.refCounted = refCounted == null ? LeakTracker.wrap(new SimpleRefCounted()) : ALWAYS_REFERENCED; + this.refCounted = refCounted == null ? LeakTracker.wrap(new SimpleRefCounted()) : refCounted; } public static SearchHit readFrom(StreamInput in, boolean pooled) throws IOException { @@ -233,8 +233,10 @@ public static SearchHit readFrom(StreamInput in, boolean pooled) throws IOExcept } final Map documentFields = in.readMap(DocumentField::new); final Map metaFields = in.readMap(DocumentField::new); - final Map highlightFields = in.readMapValues(HighlightField::new, HighlightField::name); - final SearchSortValues sortValues = new SearchSortValues(in); + Map highlightFields = in.readMapValues(HighlightField::new, HighlightField::name); + highlightFields = highlightFields.isEmpty() ? null : unmodifiableMap(highlightFields); + + final SearchSortValues sortValues = SearchSortValues.readFrom(in); final Map matchedQueries; if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { @@ -257,12 +259,17 @@ public static SearchHit readFrom(StreamInput in, boolean pooled) throws IOExcept index = shardTarget.getIndex(); clusterAlias = shardTarget.getClusterAlias(); } + + boolean isPooled = pooled && source != null; final Map innerHits; int size = in.readVInt(); if (size > 0) { innerHits = Maps.newMapWithExpectedSize(size); for (int i = 0; i < size; i++) { - innerHits.put(in.readString(), SearchHits.readFrom(in, pooled)); + var key = in.readString(); + var nestedHits = SearchHits.readFrom(in, pooled); + innerHits.put(key, nestedHits); + isPooled = isPooled || nestedHits.isPooled(); } } else { innerHits = null; @@ -277,7 +284,7 @@ public static SearchHit readFrom(StreamInput in, boolean pooled) throws IOExcept seqNo, primaryTerm, source, - unmodifiableMap(highlightFields), + highlightFields, sortValues, matchedQueries, explanation, @@ -288,7 +295,7 @@ public static SearchHit readFrom(StreamInput in, boolean pooled) throws IOExcept innerHits, documentFields, metaFields, - pooled ? null : ALWAYS_REFERENCED + isPooled ? null : ALWAYS_REFERENCED ); } diff --git a/server/src/main/java/org/elasticsearch/search/SearchHits.java b/server/src/main/java/org/elasticsearch/search/SearchHits.java index ce8ccf4b7f0e6..d559fc60fa72d 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchHits.java +++ b/server/src/main/java/org/elasticsearch/search/SearchHits.java @@ -132,24 +132,31 @@ public static SearchHits readFrom(StreamInput in, boolean pooled) throws IOExcep final float maxScore = in.readFloat(); int size = in.readVInt(); final SearchHit[] hits; + boolean isPooled = false; if (size == 0) { hits = EMPTY; } else { hits = new SearchHit[size]; for (int i = 0; i < hits.length; i++) { - hits[i] = SearchHit.readFrom(in, pooled); + var hit = SearchHit.readFrom(in, pooled); + hits[i] = hit; + isPooled = isPooled || hit.isPooled(); } } var sortFields = in.readOptionalArray(Lucene::readSortField, SortField[]::new); var collapseField = in.readOptionalString(); var collapseValues = in.readOptionalArray(Lucene::readSortValue, Object[]::new); - if (pooled) { + if (isPooled) { return new SearchHits(hits, totalHits, maxScore, sortFields, collapseField, collapseValues); } else { return unpooled(hits, totalHits, maxScore, sortFields, collapseField, collapseValues); } } + public boolean isPooled() { + return refCounted != ALWAYS_REFERENCED; + } + @Override public void writeTo(StreamOutput out) throws IOException { assert hasReferences(); diff --git a/server/src/main/java/org/elasticsearch/search/SearchSortValues.java b/server/src/main/java/org/elasticsearch/search/SearchSortValues.java index b82e6632ca9ec..38bc705bdf5ae 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchSortValues.java +++ b/server/src/main/java/org/elasticsearch/search/SearchSortValues.java @@ -32,8 +32,7 @@ public class SearchSortValues implements ToXContentFragment, Writeable { private final Object[] rawSortValues; SearchSortValues(Object[] sortValues) { - this.formattedSortValues = Objects.requireNonNull(sortValues, "sort values must not be empty"); - this.rawSortValues = EMPTY_ARRAY; + this(Objects.requireNonNull(sortValues, "sort values must not be empty"), EMPTY_ARRAY); } public SearchSortValues(Object[] rawSortValues, DocValueFormat[] sortValueFormats) { @@ -52,9 +51,18 @@ public SearchSortValues(Object[] rawSortValues, DocValueFormat[] sortValueFormat } } - SearchSortValues(StreamInput in) throws IOException { - this.formattedSortValues = in.readArray(Lucene::readSortValue, Object[]::new); - this.rawSortValues = in.readArray(Lucene::readSortValue, Object[]::new); + public static SearchSortValues readFrom(StreamInput in) throws IOException { + Object[] formattedSortValues = in.readArray(Lucene::readSortValue, Object[]::new); + Object[] rawSortValues = in.readArray(Lucene::readSortValue, Object[]::new); + if (formattedSortValues.length == 0 && rawSortValues.length == 0) { + return EMPTY; + } + return new SearchSortValues(formattedSortValues, rawSortValues); + } + + private SearchSortValues(Object[] formattedSortValues, Object[] rawSortValues) { + this.formattedSortValues = formattedSortValues; + this.rawSortValues = rawSortValues; } @Override diff --git a/server/src/test/java/org/elasticsearch/search/SearchSortValuesTests.java b/server/src/test/java/org/elasticsearch/search/SearchSortValuesTests.java index ac9ae1da0fddd..e21ae8af04b77 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchSortValuesTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchSortValuesTests.java @@ -90,7 +90,7 @@ protected SearchSortValues createTestInstance() { @Override protected Writeable.Reader instanceReader() { - return SearchSortValues::new; + return SearchSortValues::readFrom; } @Override From f468024c25ed7f373e209322b2f2b9ced25a100e Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Wed, 6 Mar 2024 14:30:24 +0100 Subject: [PATCH 022/248] Fix duplication around transport code in IndexBasedTransformConfigManager (#105907) Drying up the logic in this class further and making use of `delegateAndWrap` in an effort to narrow down possible sources for the remaining very rare leaks we observe around `SearchResponse`. --- .../IndexBasedTransformConfigManager.java | 475 ++++++++---------- 1 file changed, 209 insertions(+), 266 deletions(-) diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/IndexBasedTransformConfigManager.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/IndexBasedTransformConfigManager.java index 1d44ed5a1f8ef..40eb2e2ad294a 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/IndexBasedTransformConfigManager.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/IndexBasedTransformConfigManager.java @@ -14,6 +14,9 @@ import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.delete.TransportDeleteIndexAction; @@ -60,6 +63,7 @@ import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.action.util.ExpandedIdsMatcher; import org.elasticsearch.xpack.core.action.util.PageParams; import org.elasticsearch.xpack.core.transform.TransformField; @@ -76,10 +80,10 @@ import java.util.HashSet; import java.util.List; import java.util.Set; +import java.util.function.BiConsumer; import static org.elasticsearch.index.mapper.MapperService.SINGLE_MAPPING_NAME; import static org.elasticsearch.xpack.core.ClientHelper.TRANSFORM_ORIGIN; -import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; /** * Place of all interactions with the internal transforms index. For configuration and mappings see @link{TransformInternalIndex} @@ -135,9 +139,7 @@ public void putTransformCheckpoint(TransformCheckpoint checkpoint, ActionListene .id(TransformCheckpoint.documentId(checkpoint.getTransformId(), checkpoint.getCheckpoint())) .source(source); - executeAsyncWithOrigin(client, TRANSFORM_ORIGIN, TransportIndexAction.TYPE, indexRequest, ActionListener.wrap(r -> { - listener.onResponse(true); - }, listener::onFailure)); + executeAsyncWithOrigin(TransportIndexAction.TYPE, indexRequest, listener.delegateFailureAndWrap((l, r) -> l.onResponse(true))); } catch (IOException e) { // not expected to happen but for the sake of completeness listener.onFailure(e); @@ -180,22 +182,16 @@ public void deleteOldTransformConfigurations(String transformId, ActionListener< ) ); - executeAsyncWithOrigin( - client, - TRANSFORM_ORIGIN, - DeleteByQueryAction.INSTANCE, - deleteByQueryRequest, - ActionListener.wrap(response -> { - if ((response.getBulkFailures().isEmpty() && response.getSearchFailures().isEmpty()) == false) { - Tuple statusAndReason = getStatusAndReason(response); - listener.onFailure( - new ElasticsearchStatusException(statusAndReason.v2().getMessage(), statusAndReason.v1(), statusAndReason.v2()) - ); - return; - } - listener.onResponse(true); - }, listener::onFailure) - ); + executeAsyncWithOrigin(DeleteByQueryAction.INSTANCE, deleteByQueryRequest, listener.delegateFailureAndWrap((l, response) -> { + if ((response.getBulkFailures().isEmpty() && response.getSearchFailures().isEmpty()) == false) { + Tuple statusAndReason = getStatusAndReason(response); + l.onFailure( + new ElasticsearchStatusException(statusAndReason.v2().getMessage(), statusAndReason.v1(), statusAndReason.v2()) + ); + return; + } + l.onResponse(true); + })); } @Override @@ -212,22 +208,7 @@ public void deleteOldTransformStoredDocuments(String transformId, ActionListener .filter(QueryBuilders.termQuery("_id", TransformStoredDoc.documentId(transformId))) ) ); - executeAsyncWithOrigin( - client, - TRANSFORM_ORIGIN, - DeleteByQueryAction.INSTANCE, - deleteByQueryRequest, - ActionListener.wrap(response -> { - if ((response.getBulkFailures().isEmpty() && response.getSearchFailures().isEmpty()) == false) { - Tuple statusAndReason = getStatusAndReason(response); - listener.onFailure( - new ElasticsearchStatusException(statusAndReason.v2().getMessage(), statusAndReason.v1(), statusAndReason.v2()) - ); - return; - } - listener.onResponse(response.getDeleted()); - }, listener::onFailure) - ); + deleteByQuery(listener, deleteByQueryRequest); } @Override @@ -247,22 +228,20 @@ public void deleteOldCheckpoints(String transformId, long deleteCheckpointsBelow ) ); logger.debug("Deleting old checkpoints using {}", deleteByQueryRequest.getSearchRequest()); - executeAsyncWithOrigin( - client, - TRANSFORM_ORIGIN, - DeleteByQueryAction.INSTANCE, - deleteByQueryRequest, - ActionListener.wrap(response -> { - if ((response.getBulkFailures().isEmpty() && response.getSearchFailures().isEmpty()) == false) { - Tuple statusAndReason = getStatusAndReason(response); - listener.onFailure( - new ElasticsearchStatusException(statusAndReason.v2().getMessage(), statusAndReason.v1(), statusAndReason.v2()) - ); - return; - } - listener.onResponse(response.getDeleted()); - }, listener::onFailure) - ); + deleteByQuery(listener, deleteByQueryRequest); + } + + private void deleteByQuery(ActionListener listener, DeleteByQueryRequest deleteByQueryRequest) { + executeAsyncWithOrigin(DeleteByQueryAction.INSTANCE, deleteByQueryRequest, listener.delegateFailureAndWrap((l, response) -> { + if ((response.getBulkFailures().isEmpty() && response.getSearchFailures().isEmpty()) == false) { + Tuple statusAndReason = getStatusAndReason(response); + l.onFailure( + new ElasticsearchStatusException(statusAndReason.v2().getMessage(), statusAndReason.v1(), statusAndReason.v2()) + ); + return; + } + l.onResponse(response.getDeleted()); + })); } @Override @@ -304,13 +283,13 @@ public void deleteOldIndices(ActionListener listener) { IndicesOptions.LENIENT_EXPAND_OPEN ); - executeAsyncWithOrigin(client, TRANSFORM_ORIGIN, TransportDeleteIndexAction.TYPE, deleteRequest, ActionListener.wrap(response -> { + executeAsyncWithOrigin(TransportDeleteIndexAction.TYPE, deleteRequest, listener.delegateFailureAndWrap((l, response) -> { if (response.isAcknowledged() == false) { - listener.onFailure(new ElasticsearchStatusException("Failed to delete internal indices", RestStatus.INTERNAL_SERVER_ERROR)); + l.onFailure(new ElasticsearchStatusException("Failed to delete internal indices", RestStatus.INTERNAL_SERVER_ERROR)); return; } - listener.onResponse(true); - }, listener::onFailure)); + l.onResponse(true); + })); } private void putTransformConfiguration( @@ -331,9 +310,7 @@ private void putTransformConfiguration( if (seqNoPrimaryTermAndIndex != null) { indexRequest.setIfSeqNo(seqNoPrimaryTermAndIndex.getSeqNo()).setIfPrimaryTerm(seqNoPrimaryTermAndIndex.getPrimaryTerm()); } - executeAsyncWithOrigin(client, TRANSFORM_ORIGIN, TransportIndexAction.TYPE, indexRequest, ActionListener.wrap(r -> { - listener.onResponse(true); - }, e -> { + executeAsyncWithOrigin(TransportIndexAction.TYPE, indexRequest, ActionListener.wrap(r -> listener.onResponse(true), e -> { if (e instanceof VersionConflictEngineException) { if (DocWriteRequest.OpType.CREATE.equals(opType)) { // we want to create the transform but it already exists listener.onFailure( @@ -378,22 +355,16 @@ public void getTransformCheckpoint(String transformId, long checkpoint, ActionLi .setAllowPartialSearchResults(false) .request(); - executeAsyncWithOrigin( - client, - TRANSFORM_ORIGIN, - TransportSearchAction.TYPE, - searchRequest, - ActionListener.wrap(searchResponse -> { - if (searchResponse.getHits().getHits().length == 0) { - // do not fail if checkpoint does not exist but return an empty checkpoint - logger.trace("found no checkpoint for transform [" + transformId + "], returning empty checkpoint"); - resultListener.onResponse(TransformCheckpoint.EMPTY); - return; - } - BytesReference source = searchResponse.getHits().getHits()[0].getSourceRef(); - parseCheckpointsLenientlyFromSource(source, transformId, resultListener); - }, resultListener::onFailure) - ); + executeAsyncWithOrigin(TransportSearchAction.TYPE, searchRequest, resultListener.delegateFailureAndWrap((l, searchResponse) -> { + if (searchResponse.getHits().getHits().length == 0) { + // do not fail if checkpoint does not exist but return an empty checkpoint + logger.trace("found no checkpoint for transform [{}], returning empty checkpoint", transformId); + l.onResponse(TransformCheckpoint.EMPTY); + return; + } + BytesReference source = searchResponse.getHits().getHits()[0].getSourceRef(); + parseCheckpointsLenientlyFromSource(source, transformId, l); + })); } @Override @@ -416,14 +387,12 @@ public void getTransformCheckpointForUpdate( .request(); executeAsyncWithOrigin( - client, - TRANSFORM_ORIGIN, TransportSearchAction.TYPE, searchRequest, - ActionListener.wrap(searchResponse -> { + checkpointAndVersionListener.delegateFailureAndWrap((l, searchResponse) -> { if (searchResponse.getHits().getHits().length == 0) { // do not fail, this _must_ be handled by the caller - checkpointAndVersionListener.onResponse(null); + l.onResponse(null); return; } SearchHit hit = searchResponse.getHits().getHits()[0]; @@ -431,17 +400,16 @@ public void getTransformCheckpointForUpdate( parseCheckpointsLenientlyFromSource( source, transformId, - ActionListener.wrap( - parsedCheckpoint -> checkpointAndVersionListener.onResponse( + l.delegateFailureAndWrap( + (ll, parsedCheckpoint) -> ll.onResponse( Tuple.tuple( parsedCheckpoint, new SeqNoPrimaryTermAndIndex(hit.getSeqNo(), hit.getPrimaryTerm(), hit.getIndex()) ) - ), - checkpointAndVersionListener::onFailure + ) ) ); - }, checkpointAndVersionListener::onFailure) + }) ); } @@ -459,22 +427,16 @@ public void getTransformConfiguration(String transformId, ActionListenerwrap(searchResponse -> { - if (searchResponse.getHits().getHits().length == 0) { - resultListener.onFailure( - new ResourceNotFoundException(TransformMessages.getMessage(TransformMessages.REST_UNKNOWN_TRANSFORM, transformId)) - ); - return; - } - BytesReference source = searchResponse.getHits().getHits()[0].getSourceRef(); - parseTransformLenientlyFromSource(source, transformId, resultListener); - }, resultListener::onFailure) - ); + executeAsyncWithOrigin(TransportSearchAction.TYPE, searchRequest, resultListener.delegateFailureAndWrap((l, searchResponse) -> { + if (searchResponse.getHits().getHits().length == 0) { + l.onFailure( + new ResourceNotFoundException(TransformMessages.getMessage(TransformMessages.REST_UNKNOWN_TRANSFORM, transformId)) + ); + return; + } + BytesReference source = searchResponse.getHits().getHits()[0].getSourceRef(); + parseTransformLenientlyFromSource(source, transformId, l); + })); } @Override @@ -495,26 +457,29 @@ public void getTransformConfigurationForUpdate( .seqNoAndPrimaryTerm(true) .request(); - executeAsyncWithOrigin(client, TRANSFORM_ORIGIN, TransportSearchAction.TYPE, searchRequest, ActionListener.wrap(searchResponse -> { - if (searchResponse.getHits().getHits().length == 0) { - configAndVersionListener.onFailure( - new ResourceNotFoundException(TransformMessages.getMessage(TransformMessages.REST_UNKNOWN_TRANSFORM, transformId)) + executeAsyncWithOrigin( + TransportSearchAction.TYPE, + searchRequest, + configAndVersionListener.delegateFailureAndWrap((l, searchResponse) -> { + if (searchResponse.getHits().getHits().length == 0) { + l.onFailure( + new ResourceNotFoundException(TransformMessages.getMessage(TransformMessages.REST_UNKNOWN_TRANSFORM, transformId)) + ); + return; + } + SearchHit hit = searchResponse.getHits().getHits()[0]; + BytesReference source = hit.getSourceRef(); + parseTransformLenientlyFromSource( + source, + transformId, + l.delegateFailureAndWrap( + (ll, config) -> ll.onResponse( + Tuple.tuple(config, new SeqNoPrimaryTermAndIndex(hit.getSeqNo(), hit.getPrimaryTerm(), hit.getIndex())) + ) + ) ); - return; - } - SearchHit hit = searchResponse.getHits().getHits()[0]; - BytesReference source = hit.getSourceRef(); - parseTransformLenientlyFromSource( - source, - transformId, - ActionListener.wrap( - config -> configAndVersionListener.onResponse( - Tuple.tuple(config, new SeqNoPrimaryTermAndIndex(hit.getSeqNo(), hit.getPrimaryTerm(), hit.getIndex())) - ), - configAndVersionListener::onFailure - ) - ); - }, configAndVersionListener::onFailure)); + }) + ); } @Override @@ -543,48 +508,40 @@ public void expandTransformIds( final ExpandedIdsMatcher requiredMatches = new ExpandedIdsMatcher(idTokens, allowNoMatch); - executeAsyncWithOrigin( - client.threadPool().getThreadContext(), - TRANSFORM_ORIGIN, - request, - ActionListener.wrap(searchResponse -> { - long totalHits = searchResponse.getHits().getTotalHits().value; - // important: preserve order - Set ids = Sets.newLinkedHashSetWithExpectedSize(searchResponse.getHits().getHits().length); - Set configs = Sets.newLinkedHashSetWithExpectedSize(searchResponse.getHits().getHits().length); - for (SearchHit hit : searchResponse.getHits().getHits()) { - try (XContentParser parser = createParser(hit)) { - TransformConfig config = TransformConfig.fromXContent(parser, null, true); - if (ids.add(config.getId())) { - configs.add(config); - } - } catch (IOException e) { - foundConfigsListener.onFailure(new ElasticsearchParseException("failed to parse search hit for ids", e)); - return; + executeAsyncWithOrigin(request, foundConfigsListener.delegateFailureAndWrap((l, searchResponse) -> { + long totalHits = searchResponse.getHits().getTotalHits().value; + // important: preserve order + Set ids = Sets.newLinkedHashSetWithExpectedSize(searchResponse.getHits().getHits().length); + Set configs = Sets.newLinkedHashSetWithExpectedSize(searchResponse.getHits().getHits().length); + for (SearchHit hit : searchResponse.getHits().getHits()) { + try (XContentParser parser = createParser(hit)) { + TransformConfig config = TransformConfig.fromXContent(parser, null, true); + if (ids.add(config.getId())) { + configs.add(config); } - } - requiredMatches.filterMatchedIds(ids); - if (requiredMatches.hasUnmatchedIds()) { - // some required Ids were not found - foundConfigsListener.onFailure( - new ResourceNotFoundException( - TransformMessages.getMessage(TransformMessages.REST_UNKNOWN_TRANSFORM, requiredMatches.unmatchedIdsString()) - ) - ); + } catch (IOException e) { + l.onFailure(new ElasticsearchParseException("failed to parse search hit for ids", e)); return; } - // if only exact ids have been given, take the count from docs to avoid potential duplicates - // in versioned indexes (like transform) - if (requiredMatches.isOnlyExact()) { - foundConfigsListener.onResponse( - new Tuple<>((long) ids.size(), Tuple.tuple(new ArrayList<>(ids), new ArrayList<>(configs))) - ); - } else { - foundConfigsListener.onResponse(new Tuple<>(totalHits, Tuple.tuple(new ArrayList<>(ids), new ArrayList<>(configs)))); - } - }, foundConfigsListener::onFailure), - client::search - ); + } + requiredMatches.filterMatchedIds(ids); + if (requiredMatches.hasUnmatchedIds()) { + // some required Ids were not found + l.onFailure( + new ResourceNotFoundException( + TransformMessages.getMessage(TransformMessages.REST_UNKNOWN_TRANSFORM, requiredMatches.unmatchedIdsString()) + ) + ); + return; + } + // if only exact ids have been given, take the count from docs to avoid potential duplicates + // in versioned indexes (like transform) + if (requiredMatches.isOnlyExact()) { + l.onResponse(new Tuple<>((long) ids.size(), Tuple.tuple(new ArrayList<>(ids), new ArrayList<>(configs)))); + } else { + l.onResponse(new Tuple<>(totalHits, Tuple.tuple(new ArrayList<>(ids), new ArrayList<>(configs)))); + } + }), client::search); } private XContentParser createParser(BytesReference source) throws IOException { @@ -601,12 +558,7 @@ private XContentParser createParser(SearchHit hit) throws IOException { @Override public void getAllTransformIds(TimeValue timeout, ActionListener> listener) { - expandAllTransformIds( - false, - MAX_RESULTS_WINDOW, - timeout, - ActionListener.wrap(r -> listener.onResponse(r.v2()), listener::onFailure) - ); + expandAllTransformIds(false, MAX_RESULTS_WINDOW, timeout, listener.delegateFailureAndWrap((l, r) -> l.onResponse(r.v2()))); } @Override @@ -616,7 +568,7 @@ public void getAllOutdatedTransformIds(TimeValue timeout, ActionListener listener) { - ActionListener deleteListener = ActionListener.wrap(dbqResponse -> { listener.onResponse(true); }, e -> { + ActionListener deleteListener = ActionListener.wrap(dbqResponse -> listener.onResponse(true), e -> { if (e.getClass() == IndexNotFoundException.class) { listener.onFailure( new ResourceNotFoundException(TransformMessages.getMessage(TransformMessages.REST_UNKNOWN_TRANSFORM, transformId)) @@ -636,7 +588,7 @@ public void resetTransform(String transformId, ActionListener listener) .query(QueryBuilders.termQuery(TransformField.ID.getPreferredName(), transformId)) .trackTotalHitsUpTo(1) ); - executeAsyncWithOrigin(client, TRANSFORM_ORIGIN, TransportSearchAction.TYPE, searchRequest, ActionListener.wrap(searchResponse -> { + executeAsyncWithOrigin(TransportSearchAction.TYPE, searchRequest, deleteListener.delegateFailureAndWrap((l, searchResponse) -> { if (searchResponse.getHits().getTotalHits().value == 0) { listener.onFailure( new ResourceNotFoundException(TransformMessages.getMessage(TransformMessages.REST_UNKNOWN_TRANSFORM, transformId)) @@ -655,8 +607,8 @@ public void resetTransform(String transformId, ActionListener listener) TransformInternalIndexConstants.INDEX_NAME_PATTERN, TransformInternalIndexConstants.INDEX_NAME_PATTERN_DEPRECATED ).setQuery(dbqQuery).setRefresh(true); - executeAsyncWithOrigin(client, TRANSFORM_ORIGIN, DeleteByQueryAction.INSTANCE, dbqRequest, deleteListener); - }, deleteListener::onFailure)); + executeAsyncWithOrigin(DeleteByQueryAction.INSTANCE, dbqRequest, l); + })); } @Override @@ -668,7 +620,7 @@ public void deleteTransform(String transformId, ActionListener listener request.setQuery(query); request.setRefresh(true); - executeAsyncWithOrigin(client, TRANSFORM_ORIGIN, DeleteByQueryAction.INSTANCE, request, ActionListener.wrap(deleteResponse -> { + executeAsyncWithOrigin(DeleteByQueryAction.INSTANCE, request, ActionListener.wrap(deleteResponse -> { if (deleteResponse.getDeleted() == 0) { listener.onFailure( new ResourceNotFoundException(TransformMessages.getMessage(TransformMessages.REST_UNKNOWN_TRANSFORM, transformId)) @@ -714,8 +666,6 @@ public void putOrUpdateTransformStoredDoc( } executeAsyncWithOrigin( - client, - TRANSFORM_ORIGIN, TransportIndexAction.TYPE, indexRequest, ActionListener.wrap( @@ -758,38 +708,30 @@ public void getTransformStoredDoc( .seqNoAndPrimaryTerm(true) .request(); - executeAsyncWithOrigin( - client, - TRANSFORM_ORIGIN, - TransportSearchAction.TYPE, - searchRequest, - ActionListener.wrap(searchResponse -> { - if (searchResponse.getHits().getHits().length == 0) { - if (allowNoMatch) { - resultListener.onResponse(null); - } else { - resultListener.onFailure( - new ResourceNotFoundException( - TransformMessages.getMessage(TransformMessages.UNKNOWN_TRANSFORM_STATS, transformId) - ) - ); - } - return; - } - SearchHit searchHit = searchResponse.getHits().getHits()[0]; - try (XContentParser parser = createParser(searchHit)) { - resultListener.onResponse( - Tuple.tuple(TransformStoredDoc.fromXContent(parser), SeqNoPrimaryTermAndIndex.fromSearchHit(searchHit)) - ); - } catch (Exception e) { - logger.error( - TransformMessages.getMessage(TransformMessages.FAILED_TO_PARSE_TRANSFORM_STATISTICS_CONFIGURATION, transformId), - e + executeAsyncWithOrigin(TransportSearchAction.TYPE, searchRequest, resultListener.delegateFailureAndWrap((l, searchResponse) -> { + if (searchResponse.getHits().getHits().length == 0) { + if (allowNoMatch) { + l.onResponse(null); + } else { + l.onFailure( + new ResourceNotFoundException(TransformMessages.getMessage(TransformMessages.UNKNOWN_TRANSFORM_STATS, transformId)) ); - resultListener.onFailure(e); } - }, resultListener::onFailure) - ); + return; + } + SearchHit searchHit = searchResponse.getHits().getHits()[0]; + try (XContentParser parser = createParser(searchHit)) { + resultListener.onResponse( + Tuple.tuple(TransformStoredDoc.fromXContent(parser), SeqNoPrimaryTermAndIndex.fromSearchHit(searchHit)) + ); + } catch (Exception e) { + logger.error( + TransformMessages.getMessage(TransformMessages.FAILED_TO_PARSE_TRANSFORM_STATISTICS_CONFIGURATION, transformId), + e + ); + resultListener.onFailure(e); + } + })); } @Override @@ -816,43 +758,50 @@ public void getTransformStoredDocs( .setTimeout(timeout) .request(); - executeAsyncWithOrigin( - client.threadPool().getThreadContext(), - TRANSFORM_ORIGIN, - searchRequest, - ActionListener.wrap(searchResponse -> { - List stats = new ArrayList<>(); - String previousId = null; - for (SearchHit hit : searchResponse.getHits().getHits()) { - // skip old versions - if (hit.getId().equals(previousId) == false) { - previousId = hit.getId(); - try (XContentParser parser = createParser(hit)) { - stats.add(TransformStoredDoc.fromXContent(parser)); - } catch (IOException e) { - listener.onFailure(new ElasticsearchParseException("failed to parse transform stats from search hit", e)); - return; - } + executeAsyncWithOrigin(searchRequest, listener.delegateFailureAndWrap((l, searchResponse) -> { + List stats = new ArrayList<>(); + String previousId = null; + for (SearchHit hit : searchResponse.getHits().getHits()) { + // skip old versions + if (hit.getId().equals(previousId) == false) { + previousId = hit.getId(); + try (XContentParser parser = createParser(hit)) { + stats.add(TransformStoredDoc.fromXContent(parser)); + } catch (IOException e) { + l.onFailure(new ElasticsearchParseException("failed to parse transform stats from search hit", e)); + return; } } - - listener.onResponse(stats); - }, listener::onFailure), - client::search - ); + } + l.onResponse(stats); + }), client::search); } @Override public void refresh(ActionListener listener) { executeAsyncWithOrigin( - client.threadPool().getThreadContext(), - TRANSFORM_ORIGIN, new RefreshRequest(TransformInternalIndexConstants.LATEST_INDEX_NAME), - ActionListener.wrap(r -> listener.onResponse(true), listener::onFailure), + listener.delegateFailureAndWrap((l, r) -> l.onResponse(true)), client.admin().indices()::refresh ); } + private void executeAsyncWithOrigin( + Request request, + ActionListener listener, + BiConsumer> consumer + ) { + ClientHelper.executeAsyncWithOrigin(client.threadPool().getThreadContext(), TRANSFORM_ORIGIN, request, listener, consumer); + } + + private void executeAsyncWithOrigin( + ActionType action, + Request request, + ActionListener listener + ) { + ClientHelper.executeAsyncWithOrigin(client, TRANSFORM_ORIGIN, action, request, listener); + } + private void parseTransformLenientlyFromSource( BytesReference source, String transformId, @@ -950,51 +899,45 @@ private void recursiveExpandAllTransformIds( ) .request(); - executeAsyncWithOrigin( - client.threadPool().getThreadContext(), - TRANSFORM_ORIGIN, - request, - ActionListener.wrap(searchResponse -> { - long totalHits = total; - String idOfLastHit = lastId; - - for (SearchHit hit : searchResponse.getHits().getHits()) { - String id = hit.field(TransformField.ID.getPreferredName()).getValue(); - - // paranoia - if (Strings.isNullOrEmpty(id)) { - continue; - } + executeAsyncWithOrigin(request, listener.delegateFailureAndWrap((l, searchResponse) -> { + long totalHits = total; + String idOfLastHit = lastId; - // only count hits if looking for outdated transforms - if (filterForOutdated && hit.getIndex().equals(TransformInternalIndexConstants.LATEST_INDEX_VERSIONED_NAME)) { - ++totalHits; - } else if (id.equals(idOfLastHit) == false && collectedIds.add(id)) { - ++totalHits; - } - idOfLastHit = id; + for (SearchHit hit : searchResponse.getHits().getHits()) { + String id = hit.field(TransformField.ID.getPreferredName()).getValue(); + + // paranoia + if (Strings.isNullOrEmpty(id)) { + continue; } - if (searchResponse.getHits().getHits().length == page.getSize()) { - PageParams nextPage = new PageParams(page.getFrom() + page.getSize(), maxResultWindow); - - recursiveExpandAllTransformIds( - collectedIds, - totalHits, - filterForOutdated, - maxResultWindow, - idOfLastHit, - nextPage, - timeout, - listener - ); - return; + // only count hits if looking for outdated transforms + if (filterForOutdated && hit.getIndex().equals(TransformInternalIndexConstants.LATEST_INDEX_VERSIONED_NAME)) { + ++totalHits; + } else if (id.equals(idOfLastHit) == false && collectedIds.add(id)) { + ++totalHits; } + idOfLastHit = id; + } - listener.onResponse(new Tuple<>(totalHits, collectedIds)); - }, listener::onFailure), - client::search - ); + if (searchResponse.getHits().getHits().length == page.getSize()) { + PageParams nextPage = new PageParams(page.getFrom() + page.getSize(), maxResultWindow); + + recursiveExpandAllTransformIds( + collectedIds, + totalHits, + filterForOutdated, + maxResultWindow, + idOfLastHit, + nextPage, + timeout, + l + ); + return; + } + + l.onResponse(new Tuple<>(totalHits, collectedIds)); + }), client::search); } private static Tuple getStatusAndReason(final BulkByScrollResponse response) { From 263a017ca9d7f9c3cc25f6bd5f6f4514b9129523 Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Wed, 6 Mar 2024 08:46:13 -0500 Subject: [PATCH 023/248] Fix bug when nested knn pre-filter might match nested docs (#105994) When using a pre-filter with nested kNN vectors, its treated like a top-level filter. Meaning, it is applied over parent document fields. However, there are times when a query filter is applied that may or may not match internal nested or non-nested docs. We failed to handle this case correctly and users would receive an error. closes: https://github.com/elastic/elasticsearch/issues/105901 --- docs/changelog/105994.yaml | 5 ++ .../search.vectors/100_knn_nested_search.yml | 84 ++++++++++++++++++ .../130_knn_query_nested_search.yml | 87 +++++++++++++++++++ .../search/vectors/KnnVectorQueryBuilder.java | 23 +++-- 4 files changed, 193 insertions(+), 6 deletions(-) create mode 100644 docs/changelog/105994.yaml diff --git a/docs/changelog/105994.yaml b/docs/changelog/105994.yaml new file mode 100644 index 0000000000000..ef9889d0a47af --- /dev/null +++ b/docs/changelog/105994.yaml @@ -0,0 +1,5 @@ +pr: 105994 +summary: Fix bug when nested knn pre-filter might match nested docs +area: Vector Search +type: bug +issues: [] diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/100_knn_nested_search.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/100_knn_nested_search.yml index c69e22d274c8e..6c6c75990b0f5 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/100_knn_nested_search.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/100_knn_nested_search.yml @@ -323,3 +323,87 @@ setup: - match: {hits.total.value: 3} - is_true : profile +--- +"nested kNN search with filter that might match nested docs": + - skip: + version: ' - 8.13.99' + reason: 'bugfix for matching non-nested docs in 8.14' + + - do: + indices.create: + index: nested_text + body: + mappings: + properties: + range: + type: long + other_nested_thing: + type: nested + properties: + text: + type: text + paragraphs: + type: nested + properties: + other_nested_thing: + type: nested + properties: + text: + type: text + vector: + type: dense_vector + dims: 2 + index: true + similarity: cosine + vector: + type: dense_vector + dims: 2 + index: true + similarity: cosine + - do: + index: + index: nested_text + id: "1" + body: + publish_date: "1" + paragraphs: + - vector: [1, 1] + text: "some text" + - vector: [1, 2] + text: "some text" + other_nested_thing: + - text: "some text" + vector: [1, 2] + - do: + index: + index: nested_text + id: "2" + body: + paragraphs: + - vector: [2, 1] + text: "some text" + - vector: [2, 2] + text: "some text" + other_nested_thing: + - text: "some text" + vector: [ 1, 2 ] + - do: + indices.refresh: {} + + - do: + search: + index: nested_text + body: + knn: + field: paragraphs.vector + query_vector: [1, 2] + num_candidates: 10 + k: 10 + filter: + bool: + must_not: + exists: + field: publish_date + + - match: {hits.total.value: 1} + - match: {hits.hits.0._id: "2"} diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/130_knn_query_nested_search.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/130_knn_query_nested_search.yml index 5d07c0c8b5f9d..53cc7eb064270 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/130_knn_query_nested_search.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/130_knn_query_nested_search.yml @@ -319,3 +319,90 @@ setup: # Rabbit only has one passage vector - match: {hits.hits.4.fields.name.0: "rabbit.jpg"} - length: { hits.hits.4.inner_hits.nested.hits.hits: 1 } +--- +"nested kNN query search with filter that might match nested docs": + - skip: + version: ' - 8.13.99' + reason: 'bugfix for matching non-nested docs in 8.14' + + - do: + indices.create: + index: nested_text + body: + mappings: + properties: + range: + type: long + other_nested_thing: + type: nested + properties: + text: + type: text + paragraphs: + type: nested + properties: + other_nested_thing: + type: nested + properties: + text: + type: text + vector: + type: dense_vector + dims: 2 + index: true + similarity: cosine + vector: + type: dense_vector + dims: 2 + index: true + similarity: cosine + - do: + index: + index: nested_text + id: "1" + body: + publish_date: "1" + paragraphs: + - vector: [1, 1] + text: "some text" + - vector: [1, 2] + text: "some text" + other_nested_thing: + - text: "some text" + vector: [1, 2] + - do: + index: + index: nested_text + id: "2" + body: + paragraphs: + - vector: [2, 1] + text: "some text" + - vector: [2, 2] + text: "some text" + other_nested_thing: + - text: "some text" + vector: [ 1, 2 ] + - do: + indices.refresh: {} + + - do: + search: + index: nested_text + body: + query: + nested: + path: paragraphs + query: + knn: + field: paragraphs.vector + query_vector: [1, 2] + num_candidates: 10 + filter: + bool: + must_not: + exists: + field: publish_date + + - match: {hits.total.value: 1} + - match: {hits.hits.0._id: "2"} diff --git a/server/src/main/java/org/elasticsearch/search/vectors/KnnVectorQueryBuilder.java b/server/src/main/java/org/elasticsearch/search/vectors/KnnVectorQueryBuilder.java index 96a16013ab874..7e65cd19638ce 100644 --- a/server/src/main/java/org/elasticsearch/search/vectors/KnnVectorQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/vectors/KnnVectorQueryBuilder.java @@ -28,6 +28,7 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryRewriteContext; import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.index.search.NestedHelper; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ParseField; @@ -274,7 +275,6 @@ protected Query doToQuery(SearchExecutionContext context) throws IOException { ); } - final BitSetProducer parentFilter; BooleanQuery.Builder builder = new BooleanQuery.Builder(); for (QueryBuilder query : this.filterQueries) { builder.add(query.toQuery(context), BooleanClause.Occur.FILTER); @@ -289,6 +289,8 @@ protected Query doToQuery(SearchExecutionContext context) throws IOException { String parentPath = context.nestedLookup().getNestedParent(fieldName); if (parentPath != null) { + final BitSetProducer parentBitSet; + final Query parentFilter; NestedObjectMapper originalObjectMapper = context.nestedScope().getObjectMapper(); if (originalObjectMapper != null) { try { @@ -296,19 +298,28 @@ protected Query doToQuery(SearchExecutionContext context) throws IOException { context.nestedScope().previousLevel(); NestedObjectMapper objectMapper = context.nestedScope().getObjectMapper(); parentFilter = objectMapper == null - ? context.bitsetFilter(Queries.newNonNestedFilter(context.indexVersionCreated())) - : context.bitsetFilter(objectMapper.nestedTypeFilter()); + ? Queries.newNonNestedFilter(context.indexVersionCreated()) + : objectMapper.nestedTypeFilter(); } finally { context.nestedScope().nextLevel(originalObjectMapper); } } else { // we are NOT in a nested context, coming from the top level knn search - parentFilter = context.bitsetFilter(Queries.newNonNestedFilter(context.indexVersionCreated())); + parentFilter = Queries.newNonNestedFilter(context.indexVersionCreated()); } + parentBitSet = context.bitsetFilter(parentFilter); if (filterQuery != null) { - filterQuery = new ToChildBlockJoinQuery(filterQuery, parentFilter); + NestedHelper nestedHelper = new NestedHelper(context.nestedLookup(), context::isFieldMapped); + // We treat the provided filter as a filter over PARENT documents, so if it might match nested documents + // we need to adjust it. + if (nestedHelper.mightMatchNestedDocs(filterQuery)) { + // Ensure that the query only returns parent documents matching `filterQuery` + filterQuery = Queries.filtered(filterQuery, parentFilter); + } + // Now join the filterQuery & parentFilter to provide the matching blocks of children + filterQuery = new ToChildBlockJoinQuery(filterQuery, parentBitSet); } - return vectorFieldType.createKnnQuery(queryVector, adjustedNumCands, filterQuery, vectorSimilarity, parentFilter); + return vectorFieldType.createKnnQuery(queryVector, adjustedNumCands, filterQuery, vectorSimilarity, parentBitSet); } return vectorFieldType.createKnnQuery(queryVector, adjustedNumCands, filterQuery, vectorSimilarity, null); } From 78df37cbf934380aba8e39cb6d62398593bb25bc Mon Sep 17 00:00:00 2001 From: Nikolaj Volgushev Date: Wed, 6 Mar 2024 14:50:48 +0100 Subject: [PATCH 024/248] API key APIs customization (#105671) This PR makes request translation and building customizable for our API Key Management APIs, create, update, and bulk update. Relates: ES-7827 --- .../apikey/BulkUpdateApiKeyRequest.java | 1 + .../BulkUpdateApiKeyRequestTranslator.java | 63 ++++++++++++++++++ .../apikey/CreateApiKeyRequestBuilder.java | 52 ++++++++------- .../CreateApiKeyRequestBuilderFactory.java | 22 +++++++ .../action/apikey/UpdateApiKeyRequest.java | 1 + .../apikey/UpdateApiKeyRequestTranslator.java | 66 +++++++++++++++++++ .../xpack/security/Security.java | 25 +++++-- .../apikey/RestBulkUpdateApiKeyAction.java | 43 +++--------- .../action/apikey/RestCreateApiKeyAction.java | 18 ++--- .../action/apikey/RestUpdateApiKeyAction.java | 47 +++---------- .../apikey/RestCreateApiKeyActionTests.java | 7 +- .../apikey/RestUpdateApiKeyActionTests.java | 3 +- 12 files changed, 242 insertions(+), 106 deletions(-) create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BulkUpdateApiKeyRequestTranslator.java create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CreateApiKeyRequestBuilderFactory.java create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/UpdateApiKeyRequestTranslator.java diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BulkUpdateApiKeyRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BulkUpdateApiKeyRequest.java index f915781c6211a..534c874438e3f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BulkUpdateApiKeyRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BulkUpdateApiKeyRequest.java @@ -49,4 +49,5 @@ public BulkUpdateApiKeyRequest(StreamInput in) throws IOException { public ApiKey.Type getType() { return ApiKey.Type.REST; } + } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BulkUpdateApiKeyRequestTranslator.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BulkUpdateApiKeyRequestTranslator.java new file mode 100644 index 0000000000000..57a5848970b2e --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BulkUpdateApiKeyRequestTranslator.java @@ -0,0 +1,63 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.security.action.apikey; + +import org.elasticsearch.common.CheckedBiFunction; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; + +public interface BulkUpdateApiKeyRequestTranslator { + BulkUpdateApiKeyRequest translate(RestRequest request) throws IOException; + + class Default implements BulkUpdateApiKeyRequestTranslator { + private static final ConstructingObjectParser PARSER = createParser( + (n, p) -> RoleDescriptor.parse(n, p, false) + ); + + @SuppressWarnings("unchecked") + protected static ConstructingObjectParser createParser( + CheckedBiFunction roleDescriptorParser + ) { + final ConstructingObjectParser parser = new ConstructingObjectParser<>( + "bulk_update_api_key_request", + a -> new BulkUpdateApiKeyRequest( + (List) a[0], + (List) a[1], + (Map) a[2], + TimeValue.parseTimeValue((String) a[3], null, "expiration") + ) + ); + parser.declareStringArray(constructorArg(), new ParseField("ids")); + parser.declareNamedObjects(optionalConstructorArg(), (p, c, n) -> { + p.nextToken(); + return roleDescriptorParser.apply(n, p); + }, new ParseField("role_descriptors")); + parser.declareObject(optionalConstructorArg(), (p, c) -> p.map(), new ParseField("metadata")); + parser.declareString(optionalConstructorArg(), new ParseField("expiration")); + return parser; + } + + @Override + public BulkUpdateApiKeyRequest translate(RestRequest request) throws IOException { + try (XContentParser parser = request.contentParser()) { + return PARSER.parse(parser, null); + } + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CreateApiKeyRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CreateApiKeyRequestBuilder.java index 2747dc47058f8..5c156ab4e6166 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CreateApiKeyRequestBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CreateApiKeyRequestBuilder.java @@ -9,6 +9,7 @@ import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.internal.ElasticsearchClient; +import org.elasticsearch.common.CheckedBiFunction; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.XContentHelper; @@ -29,30 +30,34 @@ /** * Request builder for populating a {@link CreateApiKeyRequest} */ -public final class CreateApiKeyRequestBuilder extends ActionRequestBuilder { +public class CreateApiKeyRequestBuilder extends ActionRequestBuilder { + private static final ConstructingObjectParser PARSER = createParser( + (n, p) -> RoleDescriptor.parse(n, p, false) + ); @SuppressWarnings("unchecked") - static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "api_key_request", - false, - (args, v) -> { - return new CreateApiKeyRequest( + protected static ConstructingObjectParser createParser( + CheckedBiFunction roleDescriptorParser + ) { + ConstructingObjectParser parser = new ConstructingObjectParser<>( + "api_key_request", + false, + (args, v) -> new CreateApiKeyRequest( (String) args[0], (List) args[1], TimeValue.parseTimeValue((String) args[2], null, "expiration"), (Map) args[3] - ); - } - ); + ) + ); - static { - PARSER.declareString(constructorArg(), new ParseField("name")); - PARSER.declareNamedObjects(optionalConstructorArg(), (p, c, n) -> { + parser.declareString(constructorArg(), new ParseField("name")); + parser.declareNamedObjects(optionalConstructorArg(), (p, c, n) -> { p.nextToken(); - return RoleDescriptor.parse(n, p, false); + return roleDescriptorParser.apply(n, p); }, new ParseField("role_descriptors")); - PARSER.declareString(optionalConstructorArg(), new ParseField("expiration")); - PARSER.declareObject(optionalConstructorArg(), (p, c) -> p.map(), new ParseField("metadata")); + parser.declareString(optionalConstructorArg(), new ParseField("expiration")); + parser.declareObject(optionalConstructorArg(), (p, c) -> p.map(), new ParseField("metadata")); + return parser; } public CreateApiKeyRequestBuilder(ElasticsearchClient client) { @@ -85,6 +90,15 @@ public CreateApiKeyRequestBuilder setMetadata(Map metadata) { } public CreateApiKeyRequestBuilder source(BytesReference source, XContentType xContentType) throws IOException { + CreateApiKeyRequest createApiKeyRequest = parse(source, xContentType); + setName(createApiKeyRequest.getName()); + setRoleDescriptors(createApiKeyRequest.getRoleDescriptors()); + setExpiration(createApiKeyRequest.getExpiration()); + setMetadata(createApiKeyRequest.getMetadata()); + return this; + } + + protected CreateApiKeyRequest parse(BytesReference source, XContentType xContentType) throws IOException { try ( XContentParser parser = XContentHelper.createParserNotCompressed( LoggingDeprecationHandler.XCONTENT_PARSER_CONFIG, @@ -92,14 +106,8 @@ public CreateApiKeyRequestBuilder source(BytesReference source, XContentType xCo xContentType ) ) { - CreateApiKeyRequest createApiKeyRequest = parse(parser); - setName(createApiKeyRequest.getName()); - setRoleDescriptors(createApiKeyRequest.getRoleDescriptors()); - setExpiration(createApiKeyRequest.getExpiration()); - setMetadata(createApiKeyRequest.getMetadata()); - + return parse(parser); } - return this; } public static CreateApiKeyRequest parse(XContentParser parser) throws IOException { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CreateApiKeyRequestBuilderFactory.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CreateApiKeyRequestBuilderFactory.java new file mode 100644 index 0000000000000..ff5592e339634 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CreateApiKeyRequestBuilderFactory.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.security.action.apikey; + +import org.elasticsearch.client.internal.Client; + +public interface CreateApiKeyRequestBuilderFactory { + CreateApiKeyRequestBuilder create(Client client, boolean restrictRequest); + + class Default implements CreateApiKeyRequestBuilderFactory { + @Override + public CreateApiKeyRequestBuilder create(Client client, boolean restrictRequest) { + assert false == restrictRequest; + return new CreateApiKeyRequestBuilder(client); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/UpdateApiKeyRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/UpdateApiKeyRequest.java index c5c8bcc4fc87a..9b1e9194d59fd 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/UpdateApiKeyRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/UpdateApiKeyRequest.java @@ -38,4 +38,5 @@ public UpdateApiKeyRequest(StreamInput in) throws IOException { public ApiKey.Type getType() { return ApiKey.Type.REST; } + } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/UpdateApiKeyRequestTranslator.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/UpdateApiKeyRequestTranslator.java new file mode 100644 index 0000000000000..f70732dd50990 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/UpdateApiKeyRequestTranslator.java @@ -0,0 +1,66 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.security.action.apikey; + +import org.elasticsearch.common.CheckedBiFunction; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; + +public interface UpdateApiKeyRequestTranslator { + UpdateApiKeyRequest translate(RestRequest request) throws IOException; + + class Default implements UpdateApiKeyRequestTranslator { + private static final ConstructingObjectParser PARSER = createParser((n, p) -> RoleDescriptor.parse(n, p, false)); + + @SuppressWarnings("unchecked") + protected static ConstructingObjectParser createParser( + CheckedBiFunction roleDescriptorParser + ) { + final ConstructingObjectParser parser = new ConstructingObjectParser<>( + "update_api_key_request_payload", + a -> new Payload( + (List) a[0], + (Map) a[1], + TimeValue.parseTimeValue((String) a[2], null, "expiration") + ) + ); + parser.declareNamedObjects(optionalConstructorArg(), (p, c, n) -> { + p.nextToken(); + return roleDescriptorParser.apply(n, p); + }, new ParseField("role_descriptors")); + parser.declareObject(optionalConstructorArg(), (p, c) -> p.map(), new ParseField("metadata")); + parser.declareString(optionalConstructorArg(), new ParseField("expiration")); + return parser; + } + + @Override + public UpdateApiKeyRequest translate(RestRequest request) throws IOException { + // Note that we use `ids` here even though we only support a single ID. This is because the route where this translator is used + // shares a path prefix with `RestClearApiKeyCacheAction` and our current REST implementation requires that path params have the + // same wildcard if their paths share a prefix + final String apiKeyId = request.param("ids"); + if (false == request.hasContent()) { + return UpdateApiKeyRequest.usingApiKeyId(apiKeyId); + } + final Payload payload = PARSER.parse(request.contentParser(), null); + return new UpdateApiKeyRequest(apiKeyId, payload.roleDescriptors, payload.metadata, payload.expiration); + } + + protected record Payload(List roleDescriptors, Map metadata, TimeValue expiration) {} + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index 3beff69849a58..219f645a92bbe 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -119,13 +119,16 @@ import org.elasticsearch.xpack.core.security.action.ClearSecurityCacheAction; import org.elasticsearch.xpack.core.security.action.DelegatePkiAuthenticationAction; import org.elasticsearch.xpack.core.security.action.apikey.BulkUpdateApiKeyAction; +import org.elasticsearch.xpack.core.security.action.apikey.BulkUpdateApiKeyRequestTranslator; import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyAction; +import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyRequestBuilderFactory; import org.elasticsearch.xpack.core.security.action.apikey.CreateCrossClusterApiKeyAction; import org.elasticsearch.xpack.core.security.action.apikey.GetApiKeyAction; import org.elasticsearch.xpack.core.security.action.apikey.GrantApiKeyAction; import org.elasticsearch.xpack.core.security.action.apikey.InvalidateApiKeyAction; import org.elasticsearch.xpack.core.security.action.apikey.QueryApiKeyAction; import org.elasticsearch.xpack.core.security.action.apikey.UpdateApiKeyAction; +import org.elasticsearch.xpack.core.security.action.apikey.UpdateApiKeyRequestTranslator; import org.elasticsearch.xpack.core.security.action.apikey.UpdateCrossClusterApiKeyAction; import org.elasticsearch.xpack.core.security.action.enrollment.KibanaEnrollmentAction; import org.elasticsearch.xpack.core.security.action.enrollment.NodeEnrollmentAction; @@ -561,6 +564,9 @@ public class Security extends Plugin private final SetOnce scriptServiceReference = new SetOnce<>(); private final SetOnce operatorOnlyRegistry = new SetOnce<>(); private final SetOnce putRoleRequestBuilderFactory = new SetOnce<>(); + private final SetOnce createApiKeyRequestBuilderFactory = new SetOnce<>(); + private final SetOnce updateApiKeyRequestTranslator = new SetOnce<>(); + private final SetOnce bulkUpdateApiKeyRequestTranslator = new SetOnce<>(); private final SetOnce getBuiltinPrivilegesResponseTranslator = new SetOnce<>(); private final SetOnce fileRolesStore = new SetOnce<>(); private final SetOnce operatorPrivilegesService = new SetOnce<>(); @@ -821,10 +827,18 @@ Collection createComponents( if (putRoleRequestBuilderFactory.get() == null) { putRoleRequestBuilderFactory.set(new PutRoleRequestBuilderFactory.Default()); } - + if (createApiKeyRequestBuilderFactory.get() == null) { + createApiKeyRequestBuilderFactory.set(new CreateApiKeyRequestBuilderFactory.Default()); + } if (getBuiltinPrivilegesResponseTranslator.get() == null) { getBuiltinPrivilegesResponseTranslator.set(new GetBuiltinPrivilegesResponseTranslator.Default()); } + if (updateApiKeyRequestTranslator.get() == null) { + updateApiKeyRequestTranslator.set(new UpdateApiKeyRequestTranslator.Default()); + } + if (bulkUpdateApiKeyRequestTranslator.get() == null) { + bulkUpdateApiKeyRequestTranslator.set(new BulkUpdateApiKeyRequestTranslator.Default()); + } final Map, ActionListener>>> customRoleProviders = new LinkedHashMap<>(); for (SecurityExtension extension : securityExtensions) { @@ -1456,10 +1470,10 @@ public List getRestHandlers( new RestGetPrivilegesAction(settings, getLicenseState()), new RestPutPrivilegesAction(settings, getLicenseState()), new RestDeletePrivilegesAction(settings, getLicenseState()), - new RestCreateApiKeyAction(settings, getLicenseState()), + new RestCreateApiKeyAction(settings, getLicenseState(), createApiKeyRequestBuilderFactory.get()), new RestCreateCrossClusterApiKeyAction(settings, getLicenseState()), - new RestUpdateApiKeyAction(settings, getLicenseState()), - new RestBulkUpdateApiKeyAction(settings, getLicenseState()), + new RestUpdateApiKeyAction(settings, getLicenseState(), updateApiKeyRequestTranslator.get()), + new RestBulkUpdateApiKeyAction(settings, getLicenseState(), bulkUpdateApiKeyRequestTranslator.get()), new RestUpdateCrossClusterApiKeyAction(settings, getLicenseState()), new RestGrantApiKeyAction(settings, getLicenseState()), new RestInvalidateApiKeyAction(settings, getLicenseState()), @@ -2039,6 +2053,9 @@ public void loadExtensions(ExtensionLoader loader) { loadSingletonExtensionAndSetOnce(loader, operatorOnlyRegistry, OperatorOnlyRegistry.class); loadSingletonExtensionAndSetOnce(loader, putRoleRequestBuilderFactory, PutRoleRequestBuilderFactory.class); loadSingletonExtensionAndSetOnce(loader, getBuiltinPrivilegesResponseTranslator, GetBuiltinPrivilegesResponseTranslator.class); + loadSingletonExtensionAndSetOnce(loader, updateApiKeyRequestTranslator, UpdateApiKeyRequestTranslator.class); + loadSingletonExtensionAndSetOnce(loader, bulkUpdateApiKeyRequestTranslator, BulkUpdateApiKeyRequestTranslator.class); + loadSingletonExtensionAndSetOnce(loader, createApiKeyRequestBuilderFactory, CreateApiKeyRequestBuilderFactory.class); } private void loadSingletonExtensionAndSetOnce(ExtensionLoader loader, SetOnce setOnce, Class clazz) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestBulkUpdateApiKeyAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestBulkUpdateApiKeyAction.java index 584ad08704ddd..97ee7cc50a7d5 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestBulkUpdateApiKeyAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestBulkUpdateApiKeyAction.java @@ -9,53 +9,32 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.core.TimeValue; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.ParseField; -import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.core.security.action.apikey.BulkUpdateApiKeyAction; import org.elasticsearch.xpack.core.security.action.apikey.BulkUpdateApiKeyRequest; -import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.core.security.action.apikey.BulkUpdateApiKeyRequestTranslator; import java.io.IOException; import java.util.List; -import java.util.Map; import static org.elasticsearch.rest.RestRequest.Method.POST; -import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; -import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; @ServerlessScope(Scope.PUBLIC) public final class RestBulkUpdateApiKeyAction extends ApiKeyBaseRestHandler { - @SuppressWarnings("unchecked") - static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "bulk_update_api_key_request", - a -> new BulkUpdateApiKeyRequest( - (List) a[0], - (List) a[1], - (Map) a[2], - TimeValue.parseTimeValue((String) a[3], null, "expiration") - ) - ); + private final BulkUpdateApiKeyRequestTranslator requestTranslator; - static { - PARSER.declareStringArray(constructorArg(), new ParseField("ids")); - PARSER.declareNamedObjects(optionalConstructorArg(), (p, c, n) -> { - p.nextToken(); - return RoleDescriptor.parse(n, p, false); - }, new ParseField("role_descriptors")); - PARSER.declareObject(optionalConstructorArg(), (p, c) -> p.map(), new ParseField("metadata")); - PARSER.declareString(optionalConstructorArg(), new ParseField("expiration")); - } - - public RestBulkUpdateApiKeyAction(final Settings settings, final XPackLicenseState licenseState) { + public RestBulkUpdateApiKeyAction( + final Settings settings, + final XPackLicenseState licenseState, + final BulkUpdateApiKeyRequestTranslator requestTranslator + ) { super(settings, licenseState); + this.requestTranslator = requestTranslator; } @Override @@ -70,9 +49,7 @@ public String getName() { @Override protected RestChannelConsumer innerPrepareRequest(final RestRequest request, final NodeClient client) throws IOException { - try (XContentParser parser = request.contentParser()) { - final BulkUpdateApiKeyRequest parsed = PARSER.parse(parser, null); - return channel -> client.execute(BulkUpdateApiKeyAction.INSTANCE, parsed, new RestToXContentListener<>(channel)); - } + final BulkUpdateApiKeyRequest bulkUpdateApiKeyRequest = requestTranslator.translate(request); + return channel -> client.execute(BulkUpdateApiKeyAction.INSTANCE, bulkUpdateApiKeyRequest, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyAction.java index 2cb5a15f1e0f2..217afdb3cfea2 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyAction.java @@ -16,6 +16,7 @@ import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyRequestBuilder; +import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyRequestBuilderFactory; import org.elasticsearch.xpack.security.authc.ApiKeyService; import java.io.IOException; @@ -30,13 +31,16 @@ @ServerlessScope(Scope.PUBLIC) public final class RestCreateApiKeyAction extends ApiKeyBaseRestHandler { + private final CreateApiKeyRequestBuilderFactory builderFactory; + /** - * @param settings the node's settings - * @param licenseState the license state that will be used to determine if - * security is licensed + * @param settings the node's settings + * @param licenseState the license state that will be used to determine if + * security is licensed */ - public RestCreateApiKeyAction(Settings settings, XPackLicenseState licenseState) { + public RestCreateApiKeyAction(Settings settings, XPackLicenseState licenseState, CreateApiKeyRequestBuilderFactory builderFactory) { super(settings, licenseState); + this.builderFactory = builderFactory; } @Override @@ -51,10 +55,8 @@ public String getName() { @Override protected RestChannelConsumer innerPrepareRequest(final RestRequest request, final NodeClient client) throws IOException { - CreateApiKeyRequestBuilder builder = new CreateApiKeyRequestBuilder(client).source( - request.requiredContent(), - request.getXContentType() - ); + CreateApiKeyRequestBuilder builder = builderFactory.create(client, request.hasParam(RestRequest.PATH_RESTRICTED)) + .source(request.requiredContent(), request.getXContentType()); String refresh = request.param("refresh"); if (refresh != null) { builder.setRefreshPolicy(WriteRequest.RefreshPolicy.parse(request.param("refresh"))); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestUpdateApiKeyAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestUpdateApiKeyAction.java index d64e7f4007387..0fe0f3df0715f 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestUpdateApiKeyAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestUpdateApiKeyAction.java @@ -9,49 +9,31 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.core.TimeValue; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xpack.core.security.action.apikey.UpdateApiKeyAction; import org.elasticsearch.xpack.core.security.action.apikey.UpdateApiKeyRequest; -import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.core.security.action.apikey.UpdateApiKeyRequestTranslator; import java.io.IOException; import java.util.List; -import java.util.Map; import static org.elasticsearch.rest.RestRequest.Method.PUT; -import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; @ServerlessScope(Scope.PUBLIC) public final class RestUpdateApiKeyAction extends ApiKeyBaseRestHandler { + private final UpdateApiKeyRequestTranslator requestTranslator; - @SuppressWarnings("unchecked") - static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "update_api_key_request_payload", - a -> new Payload( - (List) a[0], - (Map) a[1], - TimeValue.parseTimeValue((String) a[2], null, "expiration") - ) - ); - - static { - PARSER.declareNamedObjects(optionalConstructorArg(), (p, c, n) -> { - p.nextToken(); - return RoleDescriptor.parse(n, p, false); - }, new ParseField("role_descriptors")); - PARSER.declareObject(optionalConstructorArg(), (p, c) -> p.map(), new ParseField("metadata")); - PARSER.declareString(optionalConstructorArg(), new ParseField("expiration")); - } - - public RestUpdateApiKeyAction(final Settings settings, final XPackLicenseState licenseState) { + public RestUpdateApiKeyAction( + final Settings settings, + final XPackLicenseState licenseState, + final UpdateApiKeyRequestTranslator requestTranslator + ) { super(settings, licenseState); + this.requestTranslator = requestTranslator; } @Override @@ -66,17 +48,8 @@ public String getName() { @Override protected RestChannelConsumer innerPrepareRequest(final RestRequest request, final NodeClient client) throws IOException { - // Note that we use `ids` here even though we only support a single id. This is because this route shares a path prefix with - // `RestClearApiKeyCacheAction` and our current REST implementation requires that path params have the same wildcard if their paths - // share a prefix - final var apiKeyId = request.param("ids"); - final var payload = request.hasContent() == false ? new Payload(null, null, null) : PARSER.parse(request.contentParser(), null); - return channel -> client.execute( - UpdateApiKeyAction.INSTANCE, - new UpdateApiKeyRequest(apiKeyId, payload.roleDescriptors, payload.metadata, payload.expiration), - new RestToXContentListener<>(channel) - ); + final UpdateApiKeyRequest updateApiKeyRequest = requestTranslator.translate(request); + return channel -> client.execute(UpdateApiKeyAction.INSTANCE, updateApiKeyRequest, new RestToXContentListener<>(channel)); } - record Payload(List roleDescriptors, Map metadata, TimeValue expiration) {} } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyActionTests.java index 0ab9533e62d4c..d487eab9f7887 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyActionTests.java @@ -31,6 +31,7 @@ import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.security.action.apikey.ApiKey; import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyRequestBuilderFactory; import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyResponse; import java.time.Duration; @@ -105,7 +106,11 @@ public void doE } } }; - final RestCreateApiKeyAction restCreateApiKeyAction = new RestCreateApiKeyAction(Settings.EMPTY, mockLicenseState); + final RestCreateApiKeyAction restCreateApiKeyAction = new RestCreateApiKeyAction( + Settings.EMPTY, + mockLicenseState, + new CreateApiKeyRequestBuilderFactory.Default() + ); restCreateApiKeyAction.handleRequest(restRequest, restChannel, client); final RestResponse restResponse = responseSetOnce.get(); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestUpdateApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestUpdateApiKeyActionTests.java index eb0b7bea1a5fb..c349ad57a486c 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestUpdateApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestUpdateApiKeyActionTests.java @@ -16,6 +16,7 @@ import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.security.action.apikey.UpdateApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.apikey.UpdateApiKeyRequestTranslator; import org.elasticsearch.xpack.core.security.action.apikey.UpdateApiKeyResponse; import org.junit.Before; @@ -34,7 +35,7 @@ public void init() { final Settings settings = Settings.builder().put(XPackSettings.SECURITY_ENABLED.getKey(), true).build(); final XPackLicenseState licenseState = mock(XPackLicenseState.class); requestHolder = new AtomicReference<>(); - restAction = new RestUpdateApiKeyAction(settings, licenseState); + restAction = new RestUpdateApiKeyAction(settings, licenseState, new UpdateApiKeyRequestTranslator.Default()); controller().registerHandler(restAction); verifyingClient.setExecuteVerifier(((actionType, actionRequest) -> { assertThat(actionRequest, instanceOf(UpdateApiKeyRequest.class)); From 46beceb18024a27a57449de6e45897a0fb1d890b Mon Sep 17 00:00:00 2001 From: Rene Groeschke Date: Wed, 6 Mar 2024 15:25:08 +0100 Subject: [PATCH 025/248] Add Saml test connection timeout debugging output (#104801) Add additional logging to idp test fixture container --- .../security/authc/saml/SamlAuthenticationIT.java | 6 +++--- x-pack/test/idp-fixture/build.gradle | 15 +++++++-------- .../test/fixtures/idp/IdpTestContainer.java | 2 +- .../src/main/resources/idp/bin/run-jetty.sh | 12 ++++++++---- 4 files changed, 19 insertions(+), 16 deletions(-) diff --git a/x-pack/qa/saml-idp-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticationIT.java b/x-pack/qa/saml-idp-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticationIT.java index 6e6939084cdd3..e8caf004e043b 100644 --- a/x-pack/qa/saml-idp-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticationIT.java +++ b/x-pack/qa/saml-idp-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticationIT.java @@ -251,7 +251,7 @@ public void setupNativeUser() throws IOException { *
  • Uses that token to verify the user details
  • * */ - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103595") + // @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103595") public void testLoginUserWithSamlRoleMapping() throws Exception { final Tuple authTokens = loginViaSaml("shibboleth"); verifyElasticsearchAccessTokenForRoleMapping(authTokens.v1()); @@ -262,7 +262,7 @@ public void testLoginUserWithSamlRoleMapping() throws Exception { verifyElasticsearchAccessTokenInvalidated(accessToken); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103595") + // @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103595") public void testLoginUserWithAuthorizingRealm() throws Exception { final Tuple authTokens = loginViaSaml("shibboleth_native"); verifyElasticsearchAccessTokenForAuthorizingRealms(authTokens.v1()); @@ -273,7 +273,7 @@ public void testLoginUserWithAuthorizingRealm() throws Exception { verifyElasticsearchAccessTokenInvalidated(accessToken); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103595") + // @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103595") public void testLoginWithWrongRealmFails() throws Exception { final BasicHttpContext context = new BasicHttpContext(); try (CloseableHttpClient client = getHttpClient()) { diff --git a/x-pack/test/idp-fixture/build.gradle b/x-pack/test/idp-fixture/build.gradle index 3fd39dd9a18a8..2ef03bf7747cc 100644 --- a/x-pack/test/idp-fixture/build.gradle +++ b/x-pack/test/idp-fixture/build.gradle @@ -12,23 +12,22 @@ dependencies { api "junit:junit:${versions.junit}" } +tasks.withType(DockerBuildTask).configureEach { + noCache = BuildParams.isCi() + push = true //BuildParams.isCi() + getPlatforms().addAll( Architecture.values().collect{ it.dockerPlatform } ) +} + tasks.register("deployIdpFixtureDockerImages", DockerBuildTask) { dockerContext.fileValue(file("src/main/resources/idp")) baseImages = ["openjdk:11.0.16-jre"] - noCache = BuildParams.isCi() - tags = ["docker.elastic.co/elasticsearch-dev/idp-fixture:1.0"] - push = BuildParams.isCi() - getPlatforms().addAll( Architecture.values().collect{ it.dockerPlatform } ) + tags = ["docker.elastic.co/elasticsearch-dev/idp-fixture:1.1"] } - tasks.register("deployOpenLdapFixtureDockerImages", DockerBuildTask) { dockerContext.fileValue(file("src/main/resources/openldap")) baseImages = ["osixia/openldap:1.4.0"] - noCache = BuildParams.isCi() tags = ["docker.elastic.co/elasticsearch-dev/openldap-fixture:1.0"] - push = BuildParams.isCi() - getPlatforms().addAll( Architecture.values().collect{ it.dockerPlatform } ) } tasks.register("deployFixtureDockerImages") { diff --git a/x-pack/test/idp-fixture/src/main/java/org/elasticsearch/test/fixtures/idp/IdpTestContainer.java b/x-pack/test/idp-fixture/src/main/java/org/elasticsearch/test/fixtures/idp/IdpTestContainer.java index d76ca5741d8b3..32c8e693ef3f7 100644 --- a/x-pack/test/idp-fixture/src/main/java/org/elasticsearch/test/fixtures/idp/IdpTestContainer.java +++ b/x-pack/test/idp-fixture/src/main/java/org/elasticsearch/test/fixtures/idp/IdpTestContainer.java @@ -20,7 +20,7 @@ public final class IdpTestContainer extends DockerEnvironmentAwareTestContainer { - private static final String DOCKER_BASE_IMAGE = "docker.elastic.co/elasticsearch-dev/idp-fixture:1.0"; + private static final String DOCKER_BASE_IMAGE = "docker.elastic.co/elasticsearch-dev/idp-fixture:1.1"; private final TemporaryFolder temporaryFolder = new TemporaryFolder(); private Path certsPath; diff --git a/x-pack/test/idp-fixture/src/main/resources/idp/bin/run-jetty.sh b/x-pack/test/idp-fixture/src/main/resources/idp/bin/run-jetty.sh index 0160cc613407d..421deaa49d2ff 100644 --- a/x-pack/test/idp-fixture/src/main/resources/idp/bin/run-jetty.sh +++ b/x-pack/test/idp-fixture/src/main/resources/idp/bin/run-jetty.sh @@ -12,7 +12,7 @@ sed -i "s/^-Xmx.*$/-Xmx$JETTY_MAX_HEAP/g" /opt/shib-jetty-base/start.ini # For some reason, this container always immediately (in less than 1 second) exits with code 0 when starting for the first time # Even with a health check, docker-compose will immediately report the container as unhealthy when using --wait instead of waiting for it to become healthy -# So, let's just start it a second time if it exits quickly +# So, let's just start it a second time set +e start_time=$(date +%s) /opt/jetty-home/bin/jetty.sh run @@ -20,9 +20,13 @@ exit_code=$? end_time=$(date +%s) duration=$((end_time - start_time)) -if [ $duration -lt 10 ]; then - /opt/jetty-home/bin/jetty.sh run - exit_code=$? +echo "Duration for initial idp run was $duration seconds." + +if [ $duration -lt 60 ]; then + echo "Restarting idp." + + /opt/jetty-home/bin/jetty.sh run + exit_code=$? fi exit $exit_code From 1fae3e75017ab831eab5ff30029bc97ebd52fe82 Mon Sep 17 00:00:00 2001 From: David Turner Date: Wed, 6 Mar 2024 15:27:57 +0000 Subject: [PATCH 026/248] Extract `SnapshotSortKey` (#106015) The behaviour of the get-snapshots API varies quite considerably depending on the sort key chosen. Today this logic is implemented using scattered `switch` statements and other conditionals but it'd be clearer if we delegated this stuff to the sort key instances themselves. This commit moves the sort key enum to the top level and replaces one of the `switch` statements with a method on the enum instances. --- .../http/snapshots/RestGetSnapshotsIT.java | 70 ++++----- .../snapshots/GetSnapshotsIT.java | 133 +++++++----------- .../snapshots/get/GetSnapshotsRequest.java | 46 +----- .../get/GetSnapshotsRequestBuilder.java | 2 +- .../snapshots/get/SnapshotSortKey.java | 83 +++++++++++ .../get/TransportGetSnapshotsAction.java | 39 +---- .../admin/cluster/RestGetSnapshotsAction.java | 3 +- .../get/GetSnapshotsRequestTests.java | 3 +- .../AbstractSnapshotIntegTestCase.java | 8 +- 9 files changed, 188 insertions(+), 199 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/SnapshotSortKey.java diff --git a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/snapshots/RestGetSnapshotsIT.java b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/snapshots/RestGetSnapshotsIT.java index e9f4106433771..59e07581499ee 100644 --- a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/snapshots/RestGetSnapshotsIT.java +++ b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/snapshots/RestGetSnapshotsIT.java @@ -14,6 +14,7 @@ import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest; import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; +import org.elasticsearch.action.admin.cluster.snapshots.get.SnapshotSortKey; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.cluster.SnapshotsInProgress; @@ -101,38 +102,38 @@ private void doTestSortOrder(String repoName, Collection allSnapshotName .getSnapshots(); assertSnapshotListSorted(defaultSorting, null, order); assertSnapshotListSorted( - allSnapshotsSorted(allSnapshotNames, repoName, GetSnapshotsRequest.SortBy.NAME, order, includeIndexNames), - GetSnapshotsRequest.SortBy.NAME, + allSnapshotsSorted(allSnapshotNames, repoName, SnapshotSortKey.NAME, order, includeIndexNames), + SnapshotSortKey.NAME, order ); assertSnapshotListSorted( - allSnapshotsSorted(allSnapshotNames, repoName, GetSnapshotsRequest.SortBy.DURATION, order, includeIndexNames), - GetSnapshotsRequest.SortBy.DURATION, + allSnapshotsSorted(allSnapshotNames, repoName, SnapshotSortKey.DURATION, order, includeIndexNames), + SnapshotSortKey.DURATION, order ); assertSnapshotListSorted( - allSnapshotsSorted(allSnapshotNames, repoName, GetSnapshotsRequest.SortBy.INDICES, order, includeIndexNames), - GetSnapshotsRequest.SortBy.INDICES, + allSnapshotsSorted(allSnapshotNames, repoName, SnapshotSortKey.INDICES, order, includeIndexNames), + SnapshotSortKey.INDICES, order ); assertSnapshotListSorted( - allSnapshotsSorted(allSnapshotNames, repoName, GetSnapshotsRequest.SortBy.START_TIME, order, includeIndexNames), - GetSnapshotsRequest.SortBy.START_TIME, + allSnapshotsSorted(allSnapshotNames, repoName, SnapshotSortKey.START_TIME, order, includeIndexNames), + SnapshotSortKey.START_TIME, order ); assertSnapshotListSorted( - allSnapshotsSorted(allSnapshotNames, repoName, GetSnapshotsRequest.SortBy.SHARDS, order, includeIndexNames), - GetSnapshotsRequest.SortBy.SHARDS, + allSnapshotsSorted(allSnapshotNames, repoName, SnapshotSortKey.SHARDS, order, includeIndexNames), + SnapshotSortKey.SHARDS, order ); assertSnapshotListSorted( - allSnapshotsSorted(allSnapshotNames, repoName, GetSnapshotsRequest.SortBy.FAILED_SHARDS, order, includeIndexNames), - GetSnapshotsRequest.SortBy.FAILED_SHARDS, + allSnapshotsSorted(allSnapshotNames, repoName, SnapshotSortKey.FAILED_SHARDS, order, includeIndexNames), + SnapshotSortKey.FAILED_SHARDS, order ); assertSnapshotListSorted( - allSnapshotsSorted(allSnapshotNames, repoName, GetSnapshotsRequest.SortBy.REPOSITORY, order, includeIndexNames), - GetSnapshotsRequest.SortBy.REPOSITORY, + allSnapshotsSorted(allSnapshotNames, repoName, SnapshotSortKey.REPOSITORY, order, includeIndexNames), + SnapshotSortKey.REPOSITORY, order ); } @@ -141,7 +142,7 @@ public void testResponseSizeLimit() throws Exception { final String repoName = "test-repo"; AbstractSnapshotIntegTestCase.createRepository(logger, repoName, "fs"); final List names = AbstractSnapshotIntegTestCase.createNSnapshots(logger, repoName, randomIntBetween(6, 20)); - for (GetSnapshotsRequest.SortBy sort : GetSnapshotsRequest.SortBy.values()) { + for (SnapshotSortKey sort : SnapshotSortKey.values()) { for (SortOrder order : SortOrder.values()) { logger.info("--> testing pagination for [{}] [{}]", sort, order); doTestPagination(repoName, names, sort, order); @@ -149,8 +150,7 @@ public void testResponseSizeLimit() throws Exception { } } - private void doTestPagination(String repoName, List names, GetSnapshotsRequest.SortBy sort, SortOrder order) - throws IOException { + private void doTestPagination(String repoName, List names, SnapshotSortKey sort, SortOrder order) throws IOException { final boolean includeIndexNames = randomBoolean(); final List allSnapshotsSorted = allSnapshotsSorted(names, repoName, sort, order, includeIndexNames); final GetSnapshotsResponse batch1 = sortedWithLimit(repoName, sort, null, 2, order, includeIndexNames); @@ -220,18 +220,18 @@ public void testSortAndPaginateWithInProgress() throws Exception { .equals(Map.of(SnapshotsInProgress.ShardState.INIT, 1L, SnapshotsInProgress.ShardState.QUEUED, (long) inProgressCount - 1)); return firstIndexSuccessfullySnapshot && secondIndexIsBlocked; }); - assertStablePagination(repoName, allSnapshotNames, GetSnapshotsRequest.SortBy.START_TIME); - assertStablePagination(repoName, allSnapshotNames, GetSnapshotsRequest.SortBy.NAME); - assertStablePagination(repoName, allSnapshotNames, GetSnapshotsRequest.SortBy.INDICES); + assertStablePagination(repoName, allSnapshotNames, SnapshotSortKey.START_TIME); + assertStablePagination(repoName, allSnapshotNames, SnapshotSortKey.NAME); + assertStablePagination(repoName, allSnapshotNames, SnapshotSortKey.INDICES); AbstractSnapshotIntegTestCase.unblockAllDataNodes(repoName); for (ActionFuture inProgressSnapshot : inProgressSnapshots) { AbstractSnapshotIntegTestCase.assertSuccessful(logger, inProgressSnapshot); } - assertStablePagination(repoName, allSnapshotNames, GetSnapshotsRequest.SortBy.START_TIME); - assertStablePagination(repoName, allSnapshotNames, GetSnapshotsRequest.SortBy.NAME); - assertStablePagination(repoName, allSnapshotNames, GetSnapshotsRequest.SortBy.INDICES); + assertStablePagination(repoName, allSnapshotNames, SnapshotSortKey.START_TIME); + assertStablePagination(repoName, allSnapshotNames, SnapshotSortKey.NAME); + assertStablePagination(repoName, allSnapshotNames, SnapshotSortKey.INDICES); } public void testFilterBySLMPolicy() throws Exception { @@ -240,7 +240,7 @@ public void testFilterBySLMPolicy() throws Exception { AbstractSnapshotIntegTestCase.createNSnapshots(logger, repoName, randomIntBetween(1, 5)); final List snapshotsWithoutPolicy = clusterAdmin().prepareGetSnapshots("*") .setSnapshots("*") - .setSort(GetSnapshotsRequest.SortBy.NAME) + .setSort(SnapshotSortKey.NAME) .get() .getSnapshots(); final String snapshotWithPolicy = "snapshot-with-policy"; @@ -277,7 +277,7 @@ public void testFilterBySLMPolicy() throws Exception { assertThat(getAllSnapshotsForPolicies(policyName, otherPolicyName, "no-such-policy*"), is(List.of(withOtherPolicy, withPolicy))); final List allSnapshots = clusterAdmin().prepareGetSnapshots("*") .setSnapshots("*") - .setSort(GetSnapshotsRequest.SortBy.NAME) + .setSort(SnapshotSortKey.NAME) .get() .getSnapshots(); assertThat(getAllSnapshotsForPolicies(GetSnapshotsRequest.NO_POLICY_PATTERN, policyName, otherPolicyName), is(allSnapshots)); @@ -294,7 +294,7 @@ public void testSortAfterStartTime() throws Exception { final List allSnapshotInfo = clusterAdmin().prepareGetSnapshots(matchAllPattern()) .setSnapshots(matchAllPattern()) - .setSort(GetSnapshotsRequest.SortBy.START_TIME) + .setSort(SnapshotSortKey.START_TIME) .get() .getSnapshots(); assertThat(allSnapshotInfo, is(List.of(snapshot1, snapshot2, snapshot3))); @@ -311,7 +311,7 @@ public void testSortAfterStartTime() throws Exception { final List allSnapshotInfoDesc = clusterAdmin().prepareGetSnapshots(matchAllPattern()) .setSnapshots(matchAllPattern()) - .setSort(GetSnapshotsRequest.SortBy.START_TIME) + .setSort(SnapshotSortKey.START_TIME) .setOrder(SortOrder.DESC) .get() .getSnapshots(); @@ -340,7 +340,7 @@ private SnapshotInfo createFullSnapshotWithUniqueStartTime(String repoName, Stri private List allAfterStartTimeAscending(long timestamp) throws IOException { final Request request = baseGetSnapshotsRequest("*"); - request.addParameter("sort", GetSnapshotsRequest.SortBy.START_TIME.toString()); + request.addParameter("sort", SnapshotSortKey.START_TIME.toString()); request.addParameter("from_sort_value", String.valueOf(timestamp)); final Response response = getRestClient().performRequest(request); return readSnapshotInfos(response).getSnapshots(); @@ -348,7 +348,7 @@ private List allAfterStartTimeAscending(long timestamp) throws IOE private List allBeforeStartTimeDescending(long timestamp) throws IOException { final Request request = baseGetSnapshotsRequest("*"); - request.addParameter("sort", GetSnapshotsRequest.SortBy.START_TIME.toString()); + request.addParameter("sort", SnapshotSortKey.START_TIME.toString()); request.addParameter("from_sort_value", String.valueOf(timestamp)); request.addParameter("order", SortOrder.DESC.toString()); final Response response = getRestClient().performRequest(request); @@ -358,7 +358,7 @@ private List allBeforeStartTimeDescending(long timestamp) throws I private static List getAllSnapshotsForPolicies(String... policies) throws IOException { final Request requestWithPolicy = new Request(HttpGet.METHOD_NAME, "/_snapshot/*/*"); requestWithPolicy.addParameter("slm_policy_filter", Strings.arrayToCommaDelimitedString(policies)); - requestWithPolicy.addParameter("sort", GetSnapshotsRequest.SortBy.NAME.toString()); + requestWithPolicy.addParameter("sort", SnapshotSortKey.NAME.toString()); return readSnapshotInfos(getRestClient().performRequest(requestWithPolicy)).getSnapshots(); } @@ -369,10 +369,10 @@ private void createIndexWithContent(String indexName) { indexDoc(indexName, "some_id", "foo", "bar"); } - private static void assertStablePagination(String repoName, Collection allSnapshotNames, GetSnapshotsRequest.SortBy sort) + private static void assertStablePagination(String repoName, Collection allSnapshotNames, SnapshotSortKey sort) throws IOException { final SortOrder order = randomFrom(SortOrder.values()); - final boolean includeIndexNames = sort == GetSnapshotsRequest.SortBy.INDICES || randomBoolean(); + final boolean includeIndexNames = sort == SnapshotSortKey.INDICES || randomBoolean(); final List allSorted = allSnapshotsSorted(allSnapshotNames, repoName, sort, order, includeIndexNames); for (int i = 1; i <= allSnapshotNames.size(); i++) { @@ -413,7 +413,7 @@ private static void assertStablePagination(String repoName, Collection a private static List allSnapshotsSorted( Collection allSnapshotNames, String repoName, - GetSnapshotsRequest.SortBy sortBy, + SnapshotSortKey sortBy, SortOrder order, boolean includeIndices ) throws IOException { @@ -454,7 +454,7 @@ private static GetSnapshotsResponse readSnapshotInfos(Response response) throws private static GetSnapshotsResponse sortedWithLimit( String repoName, - GetSnapshotsRequest.SortBy sortBy, + SnapshotSortKey sortBy, String after, int size, SortOrder order, @@ -486,7 +486,7 @@ private static void addIndexNamesParameter(boolean includeIndices, Request reque private static GetSnapshotsResponse sortedWithLimit( String repoName, - GetSnapshotsRequest.SortBy sortBy, + SnapshotSortKey sortBy, int offset, int size, SortOrder order, diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java index 6b5b3826272ce..d01064b9fb8bc 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java @@ -14,6 +14,7 @@ import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest; import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsRequestBuilder; import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; +import org.elasticsearch.action.admin.cluster.snapshots.get.SnapshotSortKey; import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.repositories.RepositoryMissingException; @@ -84,39 +85,31 @@ private void doTestSortOrder(String repoName, Collection allSnapshotName final List defaultSorting = clusterAdmin().prepareGetSnapshots(repoName).setOrder(order).get().getSnapshots(); assertSnapshotListSorted(defaultSorting, null, order); final String[] repos = { repoName }; + assertSnapshotListSorted(allSnapshotsSorted(allSnapshotNames, repos, SnapshotSortKey.NAME, order), SnapshotSortKey.NAME, order); assertSnapshotListSorted( - allSnapshotsSorted(allSnapshotNames, repos, GetSnapshotsRequest.SortBy.NAME, order), - GetSnapshotsRequest.SortBy.NAME, + allSnapshotsSorted(allSnapshotNames, repos, SnapshotSortKey.DURATION, order), + SnapshotSortKey.DURATION, order ); assertSnapshotListSorted( - allSnapshotsSorted(allSnapshotNames, repos, GetSnapshotsRequest.SortBy.DURATION, order), - GetSnapshotsRequest.SortBy.DURATION, + allSnapshotsSorted(allSnapshotNames, repos, SnapshotSortKey.INDICES, order), + SnapshotSortKey.INDICES, order ); assertSnapshotListSorted( - allSnapshotsSorted(allSnapshotNames, repos, GetSnapshotsRequest.SortBy.INDICES, order), - GetSnapshotsRequest.SortBy.INDICES, + allSnapshotsSorted(allSnapshotNames, repos, SnapshotSortKey.START_TIME, order), + SnapshotSortKey.START_TIME, order ); + assertSnapshotListSorted(allSnapshotsSorted(allSnapshotNames, repos, SnapshotSortKey.SHARDS, order), SnapshotSortKey.SHARDS, order); assertSnapshotListSorted( - allSnapshotsSorted(allSnapshotNames, repos, GetSnapshotsRequest.SortBy.START_TIME, order), - GetSnapshotsRequest.SortBy.START_TIME, + allSnapshotsSorted(allSnapshotNames, repos, SnapshotSortKey.FAILED_SHARDS, order), + SnapshotSortKey.FAILED_SHARDS, order ); assertSnapshotListSorted( - allSnapshotsSorted(allSnapshotNames, repos, GetSnapshotsRequest.SortBy.SHARDS, order), - GetSnapshotsRequest.SortBy.SHARDS, - order - ); - assertSnapshotListSorted( - allSnapshotsSorted(allSnapshotNames, repos, GetSnapshotsRequest.SortBy.FAILED_SHARDS, order), - GetSnapshotsRequest.SortBy.FAILED_SHARDS, - order - ); - assertSnapshotListSorted( - allSnapshotsSorted(allSnapshotNames, repos, GetSnapshotsRequest.SortBy.REPOSITORY, order), - GetSnapshotsRequest.SortBy.REPOSITORY, + allSnapshotsSorted(allSnapshotNames, repos, SnapshotSortKey.REPOSITORY, order), + SnapshotSortKey.REPOSITORY, order ); } @@ -127,7 +120,7 @@ public void testResponseSizeLimit() throws Exception { createRepository(repoName, "fs", repoPath); maybeInitWithOldSnapshotVersion(repoName, repoPath); final List names = createNSnapshots(repoName, randomIntBetween(6, 20)); - for (GetSnapshotsRequest.SortBy sort : GetSnapshotsRequest.SortBy.values()) { + for (SnapshotSortKey sort : SnapshotSortKey.values()) { for (SortOrder order : SortOrder.values()) { logger.info("--> testing pagination for [{}] [{}]", sort, order); doTestPagination(repoName, names, sort, order); @@ -135,7 +128,7 @@ public void testResponseSizeLimit() throws Exception { } } - private void doTestPagination(String repoName, List names, GetSnapshotsRequest.SortBy sort, SortOrder order) { + private void doTestPagination(String repoName, List names, SnapshotSortKey sort, SortOrder order) { final String[] repos = { repoName }; final List allSnapshotsSorted = allSnapshotsSorted(names, repos, sort, order); final GetSnapshotsResponse batch1 = sortedWithLimit(repos, sort, null, 2, order); @@ -191,9 +184,9 @@ public void testSortAndPaginateWithInProgress() throws Exception { ) ); final String[] repos = { repoName }; - assertStablePagination(repos, allSnapshotNames, GetSnapshotsRequest.SortBy.START_TIME); - assertStablePagination(repos, allSnapshotNames, GetSnapshotsRequest.SortBy.NAME); - assertStablePagination(repos, allSnapshotNames, GetSnapshotsRequest.SortBy.INDICES); + assertStablePagination(repos, allSnapshotNames, SnapshotSortKey.START_TIME); + assertStablePagination(repos, allSnapshotNames, SnapshotSortKey.NAME); + assertStablePagination(repos, allSnapshotNames, SnapshotSortKey.INDICES); final List currentSnapshots = clusterAdmin().prepareGetSnapshots(matchAllPattern()) .setSnapshots(GetSnapshotsRequest.CURRENT_SNAPSHOT) .get() @@ -215,9 +208,9 @@ public void testSortAndPaginateWithInProgress() throws Exception { assertSuccessful(inProgressSnapshot); } - assertStablePagination(repos, allSnapshotNames, GetSnapshotsRequest.SortBy.START_TIME); - assertStablePagination(repos, allSnapshotNames, GetSnapshotsRequest.SortBy.NAME); - assertStablePagination(repos, allSnapshotNames, GetSnapshotsRequest.SortBy.INDICES); + assertStablePagination(repos, allSnapshotNames, SnapshotSortKey.START_TIME); + assertStablePagination(repos, allSnapshotNames, SnapshotSortKey.NAME); + assertStablePagination(repos, allSnapshotNames, SnapshotSortKey.INDICES); } public void testPaginationRequiresVerboseListing() throws Exception { @@ -228,14 +221,14 @@ public void testPaginationRequiresVerboseListing() throws Exception { ActionRequestValidationException.class, clusterAdmin().prepareGetSnapshots(repoName) .setVerbose(false) - .setSort(GetSnapshotsRequest.SortBy.DURATION) + .setSort(SnapshotSortKey.DURATION) .setSize(GetSnapshotsRequest.NO_LIMIT) ); expectThrows( ActionRequestValidationException.class, clusterAdmin().prepareGetSnapshots(repoName) .setVerbose(false) - .setSort(GetSnapshotsRequest.SortBy.START_TIME) + .setSort(SnapshotSortKey.START_TIME) .setSize(randomIntBetween(1, 100)) ); } @@ -258,16 +251,11 @@ public void testExcludePatterns() throws Exception { allSnapshotNames.addAll(namesOtherRepo); final SortOrder order = SortOrder.DESC; - final List allSorted = allSnapshotsSorted( - allSnapshotNames, - new String[] { "*" }, - GetSnapshotsRequest.SortBy.REPOSITORY, - order - ); + final List allSorted = allSnapshotsSorted(allSnapshotNames, new String[] { "*" }, SnapshotSortKey.REPOSITORY, order); final List allSortedWithoutOther = allSnapshotsSorted( allSnapshotNamesWithoutOther, new String[] { "*", "-" + otherRepo }, - GetSnapshotsRequest.SortBy.REPOSITORY, + SnapshotSortKey.REPOSITORY, order ); assertThat(allSortedWithoutOther, is(allSorted.subList(0, allSnapshotNamesWithoutOther.size()))); @@ -275,7 +263,7 @@ public void testExcludePatterns() throws Exception { final List allInOther = allSnapshotsSorted( namesOtherRepo, new String[] { "*", "-test-repo-*" }, - GetSnapshotsRequest.SortBy.REPOSITORY, + SnapshotSortKey.REPOSITORY, order ); assertThat(allInOther, is(allSorted.subList(allSnapshotNamesWithoutOther.size(), allSorted.size()))); @@ -289,7 +277,7 @@ public void testExcludePatterns() throws Exception { final List allInOtherWithoutOtherPrefix = allSnapshotsSorted( namesOtherRepo, patternOtherRepo, - GetSnapshotsRequest.SortBy.REPOSITORY, + SnapshotSortKey.REPOSITORY, order, "-other*" ); @@ -298,7 +286,7 @@ public void testExcludePatterns() throws Exception { final List allInOtherWithoutOtherExplicit = allSnapshotsSorted( namesOtherRepo, patternOtherRepo, - GetSnapshotsRequest.SortBy.REPOSITORY, + SnapshotSortKey.REPOSITORY, order, "-" + otherPrefixSnapshot1, "-" + otherPrefixSnapshot2 @@ -345,7 +333,7 @@ public void testNamesStartingInDash() { final SnapshotInfo weirdSnapshot2InWeird2 = createFullSnapshot(weirdRepo2, weirdSnapshot2); final List allSnapshots = clusterAdmin().prepareGetSnapshots(matchAllPattern()) - .setSort(GetSnapshotsRequest.SortBy.REPOSITORY) + .setSort(SnapshotSortKey.REPOSITORY) .get() .getSnapshots(); assertThat(allSnapshots, hasSize(9)); @@ -407,11 +395,7 @@ public void testNamesStartingInDash() { } private List getAllByPatterns(String[] repos, String[] snapshots) { - return clusterAdmin().prepareGetSnapshots(repos) - .setSnapshots(snapshots) - .setSort(GetSnapshotsRequest.SortBy.REPOSITORY) - .get() - .getSnapshots(); + return clusterAdmin().prepareGetSnapshots(repos).setSnapshots(snapshots).setSort(SnapshotSortKey.REPOSITORY).get().getSnapshots(); } public void testFilterBySLMPolicy() throws Exception { @@ -420,7 +404,7 @@ public void testFilterBySLMPolicy() throws Exception { createNSnapshots(repoName, randomIntBetween(1, 5)); final List snapshotsWithoutPolicy = clusterAdmin().prepareGetSnapshots(matchAllPattern()) .setSnapshots(matchAllPattern()) - .setSort(GetSnapshotsRequest.SortBy.NAME) + .setSort(SnapshotSortKey.NAME) .get() .getSnapshots(); final String snapshotWithPolicy = "snapshot-with-policy"; @@ -456,7 +440,7 @@ public void testFilterBySLMPolicy() throws Exception { final List allSnapshots = clusterAdmin().prepareGetSnapshots(matchAllPattern()) .setSnapshots(matchAllPattern()) - .setSort(GetSnapshotsRequest.SortBy.NAME) + .setSort(SnapshotSortKey.NAME) .get() .getSnapshots(); assertThat(getAllSnapshotsForPolicies(GetSnapshotsRequest.NO_POLICY_PATTERN, policyName, otherPolicyName), is(allSnapshots)); @@ -477,7 +461,7 @@ public void testSortAfter() throws Exception { final List allSnapshotInfo = clusterAdmin().prepareGetSnapshots(matchAllPattern()) .setSnapshots(matchAllPattern()) - .setSort(GetSnapshotsRequest.SortBy.START_TIME) + .setSort(SnapshotSortKey.START_TIME) .get() .getSnapshots(); assertThat(allSnapshotInfo, is(List.of(snapshot1, snapshot2, snapshot3))); @@ -504,7 +488,7 @@ public void testSortAfter() throws Exception { final List allSnapshotInfoDesc = clusterAdmin().prepareGetSnapshots(matchAllPattern()) .setSnapshots(matchAllPattern()) - .setSort(GetSnapshotsRequest.SortBy.START_TIME) + .setSort(SnapshotSortKey.START_TIME) .setOrder(SortOrder.DESC) .get() .getSnapshots(); @@ -525,7 +509,7 @@ public void testSortAfter() throws Exception { final List allSnapshotInfoByDuration = clusterAdmin().prepareGetSnapshots(matchAllPattern()) .setSnapshots(matchAllPattern()) - .setSort(GetSnapshotsRequest.SortBy.DURATION) + .setSort(SnapshotSortKey.DURATION) .get() .getSnapshots(); @@ -541,7 +525,7 @@ public void testSortAfter() throws Exception { final List allSnapshotInfoByDurationDesc = clusterAdmin().prepareGetSnapshots(matchAllPattern()) .setSnapshots(matchAllPattern()) - .setSort(GetSnapshotsRequest.SortBy.DURATION) + .setSort(SnapshotSortKey.DURATION) .setOrder(SortOrder.DESC) .get() .getSnapshots(); @@ -554,12 +538,12 @@ public void testSortAfter() throws Exception { final SnapshotInfo otherSnapshot = createFullSnapshot(repoName, "other-snapshot"); - assertThat(allSnapshots(new String[] { "snap*" }, GetSnapshotsRequest.SortBy.NAME, SortOrder.ASC, "a"), is(allSnapshotInfo)); - assertThat(allSnapshots(new String[] { "o*" }, GetSnapshotsRequest.SortBy.NAME, SortOrder.ASC, "a"), is(List.of(otherSnapshot))); + assertThat(allSnapshots(new String[] { "snap*" }, SnapshotSortKey.NAME, SortOrder.ASC, "a"), is(allSnapshotInfo)); + assertThat(allSnapshots(new String[] { "o*" }, SnapshotSortKey.NAME, SortOrder.ASC, "a"), is(List.of(otherSnapshot))); final GetSnapshotsResponse paginatedResponse = clusterAdmin().prepareGetSnapshots(matchAllPattern()) .setSnapshots("snap*") - .setSort(GetSnapshotsRequest.SortBy.NAME) + .setSort(SnapshotSortKey.NAME) .setFromSortValue("a") .setOffset(1) .setSize(1) @@ -568,7 +552,7 @@ public void testSortAfter() throws Exception { assertThat(paginatedResponse.totalCount(), is(3)); final GetSnapshotsResponse paginatedResponse2 = clusterAdmin().prepareGetSnapshots(matchAllPattern()) .setSnapshots("snap*") - .setSort(GetSnapshotsRequest.SortBy.NAME) + .setSort(SnapshotSortKey.NAME) .setFromSortValue("a") .setOffset(0) .setSize(2) @@ -587,7 +571,7 @@ public void testRetrievingSnapshotsWhenRepositoryIsMissing() throws Exception { snapshotNames.sort(String::compareTo); final GetSnapshotsResponse response = clusterAdmin().prepareGetSnapshots(repoName, missingRepoName) - .setSort(GetSnapshotsRequest.SortBy.NAME) + .setSort(SnapshotSortKey.NAME) .get(); assertThat(response.getSnapshots().stream().map(info -> info.snapshotId().getName()).toList(), equalTo(snapshotNames)); assertTrue(response.getFailures().containsKey(missingRepoName)); @@ -618,35 +602,30 @@ private SnapshotInfo createFullSnapshotWithUniqueTimestamps( } private List allAfterStartTimeAscending(long timestamp) { - return allSnapshots(matchAllPattern(), GetSnapshotsRequest.SortBy.START_TIME, SortOrder.ASC, timestamp); + return allSnapshots(matchAllPattern(), SnapshotSortKey.START_TIME, SortOrder.ASC, timestamp); } private List allBeforeStartTimeDescending(long timestamp) { - return allSnapshots(matchAllPattern(), GetSnapshotsRequest.SortBy.START_TIME, SortOrder.DESC, timestamp); + return allSnapshots(matchAllPattern(), SnapshotSortKey.START_TIME, SortOrder.DESC, timestamp); } private List allAfterNameAscending(String name) { - return allSnapshots(matchAllPattern(), GetSnapshotsRequest.SortBy.NAME, SortOrder.ASC, name); + return allSnapshots(matchAllPattern(), SnapshotSortKey.NAME, SortOrder.ASC, name); } private List allBeforeNameDescending(String name) { - return allSnapshots(matchAllPattern(), GetSnapshotsRequest.SortBy.NAME, SortOrder.DESC, name); + return allSnapshots(matchAllPattern(), SnapshotSortKey.NAME, SortOrder.DESC, name); } private List allAfterDurationAscending(long duration) { - return allSnapshots(matchAllPattern(), GetSnapshotsRequest.SortBy.DURATION, SortOrder.ASC, duration); + return allSnapshots(matchAllPattern(), SnapshotSortKey.DURATION, SortOrder.ASC, duration); } private List allBeforeDurationDescending(long duration) { - return allSnapshots(matchAllPattern(), GetSnapshotsRequest.SortBy.DURATION, SortOrder.DESC, duration); + return allSnapshots(matchAllPattern(), SnapshotSortKey.DURATION, SortOrder.DESC, duration); } - private static List allSnapshots( - String[] snapshotNames, - GetSnapshotsRequest.SortBy sortBy, - SortOrder order, - Object fromSortValue - ) { + private static List allSnapshots(String[] snapshotNames, SnapshotSortKey sortBy, SortOrder order, Object fromSortValue) { return clusterAdmin().prepareGetSnapshots(matchAllPattern()) .setSnapshots(snapshotNames) .setSort(sortBy) @@ -660,12 +639,12 @@ private static List getAllSnapshotsForPolicies(String... policies) return clusterAdmin().prepareGetSnapshots(matchAllPattern()) .setSnapshots(matchAllPattern()) .setPolicies(policies) - .setSort(GetSnapshotsRequest.SortBy.NAME) + .setSort(SnapshotSortKey.NAME) .get() .getSnapshots(); } - private static void assertStablePagination(String[] repoNames, Collection allSnapshotNames, GetSnapshotsRequest.SortBy sort) { + private static void assertStablePagination(String[] repoNames, Collection allSnapshotNames, SnapshotSortKey sort) { final SortOrder order = randomFrom(SortOrder.values()); final List allSorted = allSnapshotsSorted(allSnapshotNames, repoNames, sort, order); @@ -700,7 +679,7 @@ private static void assertStablePagination(String[] repoNames, Collection allSnapshotsSorted( Collection allSnapshotNames, String[] repoNames, - GetSnapshotsRequest.SortBy sortBy, + SnapshotSortKey sortBy, SortOrder order, String... namePatterns ) { @@ -724,7 +703,7 @@ private static List allSnapshotsSorted( private static GetSnapshotsResponse sortedWithLimit( String[] repoNames, - GetSnapshotsRequest.SortBy sortBy, + SnapshotSortKey sortBy, String after, int size, SortOrder order, @@ -738,13 +717,7 @@ private static GetSnapshotsResponse sortedWithLimit( .get(); } - private static GetSnapshotsResponse sortedWithLimit( - String[] repoNames, - GetSnapshotsRequest.SortBy sortBy, - int offset, - int size, - SortOrder order - ) { + private static GetSnapshotsResponse sortedWithLimit(String[] repoNames, SnapshotSortKey sortBy, int offset, int size, SortOrder order) { return baseGetSnapshotsRequest(repoNames).setOffset(offset).setSort(sortBy).setSize(size).setOrder(order).get(); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java index fda371f9364f9..25373178e2b89 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java @@ -61,7 +61,7 @@ public class GetSnapshotsRequest extends MasterNodeRequest @Nullable private String fromSortValue; - private SortBy sort = SortBy.START_TIME; + private SnapshotSortKey sort = SnapshotSortKey.START_TIME; private SortOrder order = SortOrder.ASC; @@ -106,7 +106,7 @@ public GetSnapshotsRequest(StreamInput in) throws IOException { ignoreUnavailable = in.readBoolean(); verbose = in.readBoolean(); after = in.readOptionalWriteable(After::new); - sort = in.readEnum(SortBy.class); + sort = in.readEnum(SnapshotSortKey.class); size = in.readVInt(); order = SortOrder.readFromStream(in); offset = in.readVInt(); @@ -146,7 +146,7 @@ public ActionRequestValidationException validate() { validationException = addValidationError("size must be -1 or greater than 0", validationException); } if (verbose == false) { - if (sort != SortBy.START_TIME) { + if (sort != SnapshotSortKey.START_TIME) { validationException = addValidationError("can't use non-default sort with verbose=false", validationException); } if (size > 0) { @@ -287,7 +287,7 @@ public After after() { return after; } - public SortBy sort() { + public SnapshotSortKey sort() { return sort; } @@ -306,7 +306,7 @@ public String fromSortValue() { return fromSortValue; } - public GetSnapshotsRequest sort(SortBy sort) { + public GetSnapshotsRequest sort(SnapshotSortKey sort) { this.sort = sort; return this; } @@ -350,40 +350,6 @@ public Task createTask(long id, String type, String action, TaskId parentTaskId, return new CancellableTask(id, type, action, getDescription(), parentTaskId, headers); } - public enum SortBy { - START_TIME("start_time"), - NAME("name"), - DURATION("duration"), - INDICES("index_count"), - SHARDS("shard_count"), - FAILED_SHARDS("failed_shard_count"), - REPOSITORY("repository"); - - private final String param; - - SortBy(String param) { - this.param = param; - } - - @Override - public String toString() { - return param; - } - - public static SortBy of(String value) { - return switch (value) { - case "start_time" -> START_TIME; - case "name" -> NAME; - case "duration" -> DURATION; - case "index_count" -> INDICES; - case "shard_count" -> SHARDS; - case "failed_shard_count" -> FAILED_SHARDS; - case "repository" -> REPOSITORY; - default -> throw new IllegalArgumentException("unknown sort order [" + value + "]"); - }; - } - } - public static final class After implements Writeable { private final String value; @@ -405,7 +371,7 @@ public static After fromQueryParam(String param) { } @Nullable - public static After from(@Nullable SnapshotInfo snapshotInfo, SortBy sortBy) { + public static After from(@Nullable SnapshotInfo snapshotInfo, SnapshotSortKey sortBy) { if (snapshotInfo == null) { return null; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java index eadbaa8aa0952..25e8a433bf243 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java @@ -122,7 +122,7 @@ public GetSnapshotsRequestBuilder setFromSortValue(@Nullable String fromSortValu return this; } - public GetSnapshotsRequestBuilder setSort(GetSnapshotsRequest.SortBy sort) { + public GetSnapshotsRequestBuilder setSort(SnapshotSortKey sort) { request.sort(sort); return this; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/SnapshotSortKey.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/SnapshotSortKey.java new file mode 100644 index 0000000000000..599f41e8615da --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/SnapshotSortKey.java @@ -0,0 +1,83 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.admin.cluster.snapshots.get; + +import org.elasticsearch.snapshots.SnapshotInfo; + +import java.util.Comparator; + +/** + * Sort key for snapshots e.g. returned from the get-snapshots API. All values break ties using {@link SnapshotInfo#snapshotId} (i.e. by + * name). + */ +public enum SnapshotSortKey { + /** + * Sort by snapshot start time. + */ + START_TIME("start_time", Comparator.comparingLong(SnapshotInfo::startTime)), + + /** + * Sort by snapshot name. + */ + NAME("name", Comparator.comparing(sni -> sni.snapshotId().getName())), + + /** + * Sort by snapshot duration (end time minus start time). + */ + DURATION("duration", Comparator.comparingLong(sni -> sni.endTime() - sni.startTime())), + /** + * Sort by number of indices in the snapshot. + */ + INDICES("index_count", Comparator.comparingInt(sni -> sni.indices().size())), + + /** + * Sort by number of shards in the snapshot. + */ + SHARDS("shard_count", Comparator.comparingInt(SnapshotInfo::totalShards)), + + /** + * Sort by number of failed shards in the snapshot. + */ + FAILED_SHARDS("failed_shard_count", Comparator.comparingInt(SnapshotInfo::failedShards)), + + /** + * Sort by repository name. + */ + REPOSITORY("repository", Comparator.comparing(SnapshotInfo::repository)); + + private final String name; + private final Comparator snapshotInfoComparator; + + SnapshotSortKey(String name, Comparator snapshotInfoComparator) { + this.name = name; + this.snapshotInfoComparator = snapshotInfoComparator.thenComparing(SnapshotInfo::snapshotId); + } + + @Override + public String toString() { + return name; + } + + public final Comparator getSnapshotInfoComparator() { + return snapshotInfoComparator; + } + + public static SnapshotSortKey of(String name) { + return switch (name) { + case "start_time" -> START_TIME; + case "name" -> NAME; + case "duration" -> DURATION; + case "index_count" -> INDICES; + case "shard_count" -> SHARDS; + case "failed_shard_count" -> FAILED_SHARDS; + case "repository" -> REPOSITORY; + default -> throw new IllegalArgumentException("unknown sort key [" + name + "]"); + }; + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java index ce3446317400d..c664e51a68456 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java @@ -148,7 +148,7 @@ private class GetSnapshotsOperation { private final SnapshotPredicates predicates; // snapshot ordering/pagination - private final GetSnapshotsRequest.SortBy sortBy; + private final SnapshotSortKey sortBy; private final SortOrder order; @Nullable private final String fromSortValue; @@ -177,7 +177,7 @@ private class GetSnapshotsOperation { String[] snapshots, boolean ignoreUnavailable, SnapshotPredicates predicates, - GetSnapshotsRequest.SortBy sortBy, + SnapshotSortKey sortBy, SortOrder order, String fromSortValue, int offset, @@ -214,7 +214,7 @@ private class GetSnapshotsOperation { * the sort value range if possible. */ private List maybeFilterRepositories() { - if (sortBy != GetSnapshotsRequest.SortBy.REPOSITORY || fromSortValue == null) { + if (sortBy != SnapshotSortKey.REPOSITORY || fromSortValue == null) { return repositories; } final Predicate predicate = order == SortOrder.ASC @@ -486,27 +486,6 @@ private SnapshotsInRepo buildSimpleSnapshotInfos( return sortSnapshotsWithNoOffsetOrLimit(snapshotInfos); } - private static final Comparator BY_START_TIME = Comparator.comparingLong(SnapshotInfo::startTime) - .thenComparing(SnapshotInfo::snapshotId); - - private static final Comparator BY_DURATION = Comparator.comparingLong( - sni -> sni.endTime() - sni.startTime() - ).thenComparing(SnapshotInfo::snapshotId); - - private static final Comparator BY_INDICES_COUNT = Comparator.comparingInt(sni -> sni.indices().size()) - .thenComparing(SnapshotInfo::snapshotId); - - private static final Comparator BY_SHARDS_COUNT = Comparator.comparingInt(SnapshotInfo::totalShards) - .thenComparing(SnapshotInfo::snapshotId); - - private static final Comparator BY_FAILED_SHARDS_COUNT = Comparator.comparingInt(SnapshotInfo::failedShards) - .thenComparing(SnapshotInfo::snapshotId); - - private static final Comparator BY_NAME = Comparator.comparing(sni -> sni.snapshotId().getName()); - - private static final Comparator BY_REPOSITORY = Comparator.comparing(SnapshotInfo::repository) - .thenComparing(SnapshotInfo::snapshotId); - private SnapshotsInRepo sortSnapshotsWithNoOffsetOrLimit(List snapshotInfos) { return sortSnapshots(snapshotInfos.stream(), snapshotInfos.size(), 0, GetSnapshotsRequest.NO_LIMIT); } @@ -532,15 +511,7 @@ private SnapshotsInRepo sortSnapshots(Stream snapshotInfoStream, i } private Comparator buildComparator() { - final Comparator comparator = switch (sortBy) { - case START_TIME -> BY_START_TIME; - case NAME -> BY_NAME; - case DURATION -> BY_DURATION; - case INDICES -> BY_INDICES_COUNT; - case SHARDS -> BY_SHARDS_COUNT; - case FAILED_SHARDS -> BY_FAILED_SHARDS_COUNT; - case REPOSITORY -> BY_REPOSITORY; - }; + final var comparator = sortBy.getSnapshotInfoComparator(); return order == SortOrder.DESC ? comparator.reversed() : comparator; } @@ -732,7 +703,7 @@ private static boolean matchPolicy(String[] includes, String[] excludes, boolean return excludes.length == 0 || Regex.simpleMatch(excludes, policy) == false; } - private static SnapshotPredicates getSortValuePredicate(String fromSortValue, GetSnapshotsRequest.SortBy sortBy, SortOrder order) { + private static SnapshotPredicates getSortValuePredicate(String fromSortValue, SnapshotSortKey sortBy, SortOrder order) { if (fromSortValue == null) { return MATCH_ALL; } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetSnapshotsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetSnapshotsAction.java index 5cc77d3d50a01..7a66c6d7c435a 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetSnapshotsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetSnapshotsAction.java @@ -9,6 +9,7 @@ package org.elasticsearch.rest.action.admin.cluster; import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest; +import org.elasticsearch.action.admin.cluster.snapshots.get.SnapshotSortKey; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.rest.BaseRestHandler; @@ -59,7 +60,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC GetSnapshotsRequest getSnapshotsRequest = new GetSnapshotsRequest(repositories).snapshots(snapshots); getSnapshotsRequest.ignoreUnavailable(request.paramAsBoolean("ignore_unavailable", getSnapshotsRequest.ignoreUnavailable())); getSnapshotsRequest.verbose(request.paramAsBoolean("verbose", getSnapshotsRequest.verbose())); - final GetSnapshotsRequest.SortBy sort = GetSnapshotsRequest.SortBy.of(request.param("sort", getSnapshotsRequest.sort().toString())); + final SnapshotSortKey sort = SnapshotSortKey.of(request.param("sort", getSnapshotsRequest.sort().toString())); getSnapshotsRequest.sort(sort); final int size = request.paramAsInt("size", getSnapshotsRequest.size()); getSnapshotsRequest.size(size); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestTests.java index 965654266e1cc..14db6bdf84264 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestTests.java @@ -39,8 +39,7 @@ public void testValidateParameters() { assertThat(e.getMessage(), containsString("can't use offset with verbose=false")); } { - final GetSnapshotsRequest request = new GetSnapshotsRequest("repo", "snapshot").verbose(false) - .sort(GetSnapshotsRequest.SortBy.INDICES); + final GetSnapshotsRequest request = new GetSnapshotsRequest("repo", "snapshot").verbose(false).sort(SnapshotSortKey.INDICES); final ActionRequestValidationException e = request.validate(); assertThat(e.getMessage(), containsString("can't use non-default sort with verbose=false")); } diff --git a/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java index 46b18887241dd..2e7ce0400d78b 100644 --- a/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -13,7 +13,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; -import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest; +import org.elasticsearch.action.admin.cluster.snapshots.get.SnapshotSortKey; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.support.GroupedActionListener; import org.elasticsearch.action.support.PlainActionFuture; @@ -734,11 +734,7 @@ public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IO }); } - public static void assertSnapshotListSorted( - List snapshotInfos, - @Nullable GetSnapshotsRequest.SortBy sort, - SortOrder sortOrder - ) { + public static void assertSnapshotListSorted(List snapshotInfos, @Nullable SnapshotSortKey sort, SortOrder sortOrder) { final BiConsumer assertion; if (sort == null) { assertion = (s1, s2) -> assertThat(s2, greaterThanOrEqualTo(s1)); From d6f91d9b21eed2b7a0df412e82a232f96efff70a Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Wed, 6 Mar 2024 07:46:54 -0800 Subject: [PATCH 027/248] Reenable heap attack tests (#105939) I have fixed the recent failures of this suite. We should re-enable this module and keep an eye on it. --- .../org/elasticsearch/xpack/esql/heap_attack/HeapAttackIT.java | 2 -- 1 file changed, 2 deletions(-) diff --git a/test/external-modules/esql-heap-attack/src/javaRestTest/java/org/elasticsearch/xpack/esql/heap_attack/HeapAttackIT.java b/test/external-modules/esql-heap-attack/src/javaRestTest/java/org/elasticsearch/xpack/esql/heap_attack/HeapAttackIT.java index bb3617b178a51..8c87ef5977114 100644 --- a/test/external-modules/esql-heap-attack/src/javaRestTest/java/org/elasticsearch/xpack/esql/heap_attack/HeapAttackIT.java +++ b/test/external-modules/esql-heap-attack/src/javaRestTest/java/org/elasticsearch/xpack/esql/heap_attack/HeapAttackIT.java @@ -10,7 +10,6 @@ import org.apache.http.HttpHost; import org.apache.http.client.config.RequestConfig; import org.apache.http.util.EntityUtils; -import org.apache.lucene.tests.util.LuceneTestCase.AwaitsFix; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; @@ -58,7 +57,6 @@ * Tests that run ESQL queries that have, in the past, used so much memory they * crash Elasticsearch. */ -@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/105814") public class HeapAttackIT extends ESRestTestCase { @ClassRule From bd787f01c349a6722442773669a23a073faa585a Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Wed, 6 Mar 2024 11:23:50 -0500 Subject: [PATCH 028/248] Test mute for #105485 (#106029) --- .../org/elasticsearch/xpack/ilm/actions/DownsampleActionIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/DownsampleActionIT.java b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/DownsampleActionIT.java index 6d34fb0eced79..5eabacbf1ab3c 100644 --- a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/DownsampleActionIT.java +++ b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/DownsampleActionIT.java @@ -505,6 +505,7 @@ public void testDownsampleTwice() throws Exception { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/105485") public void testDownsampleTwiceSameInterval() throws Exception { // Create the ILM policy Request request = new Request("PUT", "_ilm/policy/" + policy); From e23852736ef93de1755a0fa9a724a876c03168fb Mon Sep 17 00:00:00 2001 From: David Turner Date: Wed, 6 Mar 2024 16:31:26 +0000 Subject: [PATCH 029/248] Inline `maybeFilterRepositories` (#105761) No need to copy the whole list of repositories just to filter out any non-matching ones, we can apply the filter on the fly instead. --- .../get/TransportGetSnapshotsAction.java | 32 +++++++++---------- 1 file changed, 15 insertions(+), 17 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java index c664e51a68456..cf779445fcd6a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java @@ -208,23 +208,7 @@ private class GetSnapshotsOperation { } } - /** - * Filters the list of repositories that a request will fetch snapshots from in the special case of sorting by repository - * name and having a non-null value for {@link GetSnapshotsRequest#fromSortValue()} on the request to exclude repositories outside - * the sort value range if possible. - */ - private List maybeFilterRepositories() { - if (sortBy != SnapshotSortKey.REPOSITORY || fromSortValue == null) { - return repositories; - } - final Predicate predicate = order == SortOrder.ASC - ? repositoryMetadata -> fromSortValue.compareTo(repositoryMetadata.name()) <= 0 - : repositoryMetadata -> fromSortValue.compareTo(repositoryMetadata.name()) >= 0; - return repositories.stream().filter(predicate).toList(); - } - void getMultipleReposSnapshotInfo(ActionListener listener) { - List filteredRepositories = maybeFilterRepositories(); try (var listeners = new RefCountingListener(listener.map(ignored -> { cancellableTask.ensureNotCancelled(); final var sortedSnapshotsInRepos = sortSnapshots( @@ -246,8 +230,13 @@ void getMultipleReposSnapshotInfo(ActionListener listener) finalRemaining ); }))) { - for (final RepositoryMetadata repository : filteredRepositories) { + for (final RepositoryMetadata repository : repositories) { final String repoName = repository.name(); + if (skipRepository(repoName)) { + // TODO we should still count the matching snapshots in totalCount + continue; + } + getSingleRepoSnapshotInfo(repoName, listeners.acquire((SnapshotsInRepo snapshotsInRepo) -> { allSnapshotInfos.add(snapshotsInRepo.snapshotInfos()); remaining.addAndGet(snapshotsInRepo.remaining()); @@ -264,6 +253,15 @@ void getMultipleReposSnapshotInfo(ActionListener listener) } } + private boolean skipRepository(String repositoryName) { + if (sortBy == SnapshotSortKey.REPOSITORY && fromSortValue != null) { + // If we are sorting by repository name with an offset given by fromSortValue, skip earlier repositories + return order == SortOrder.ASC ? fromSortValue.compareTo(repositoryName) > 0 : fromSortValue.compareTo(repositoryName) < 0; + } else { + return false; + } + } + private void getSingleRepoSnapshotInfo(String repo, ActionListener listener) { final Map allSnapshotIds = new HashMap<>(); final List currentSnapshots = new ArrayList<>(); From 9a049a109208b7ef943961ac8324eadae4e4fdb2 Mon Sep 17 00:00:00 2001 From: Lee Hinman Date: Wed, 6 Mar 2024 17:46:27 +0100 Subject: [PATCH 030/248] Use index block API in shrink/split/clone docs (#105997) This uses the dedicated index block API in the docs for the shrink, split, and clone APIs, rather than putting the block in as a setting directly. The specialized API will wait for ongoing operations to finish, which is better during indexing operations. Resolves #105831 --- docs/reference/indices/clone-index.asciidoc | 10 +++------- docs/reference/indices/shrink-index.asciidoc | 18 +++++++++++------- docs/reference/indices/split-index.asciidoc | 14 +++----------- 3 files changed, 17 insertions(+), 25 deletions(-) diff --git a/docs/reference/indices/clone-index.asciidoc b/docs/reference/indices/clone-index.asciidoc index 748b3adddd528..ef8ed28c6ac05 100644 --- a/docs/reference/indices/clone-index.asciidoc +++ b/docs/reference/indices/clone-index.asciidoc @@ -31,17 +31,13 @@ POST /my-index-000001/_clone/cloned-my-index-000001 For example, the following request prevents write operations on `my_source_index` -so it can be cloned. +so it can be cloned using the +<> API. Metadata changes like deleting the index are still allowed. [source,console] -------------------------------------------------- -PUT /my_source_index/_settings -{ - "settings": { - "index.blocks.write": true - } -} +PUT /my_source_index/_block/write -------------------------------------------------- // TEST[s/^/PUT my_source_index\n/] diff --git a/docs/reference/indices/shrink-index.asciidoc b/docs/reference/indices/shrink-index.asciidoc index 388344123b964..5d5e6c24d9e83 100644 --- a/docs/reference/indices/shrink-index.asciidoc +++ b/docs/reference/indices/shrink-index.asciidoc @@ -39,8 +39,8 @@ replica shards. You can later re-add replica shards as part of the shrink operation. You can use the following <> -request to remove an index's replica shards, relocates the index's remaining -shards to the same node, and make the index read-only. +request to remove an index's replica shards, and relocate the index's remaining +shards to the same node. [source,console] -------------------------------------------------- @@ -48,8 +48,7 @@ PUT /my_source_index/_settings { "settings": { "index.number_of_replicas": 0, <1> - "index.routing.allocation.require._name": "shrink_node_name", <2> - "index.blocks.write": true <3> + "index.routing.allocation.require._name": "shrink_node_name" <2> } } -------------------------------------------------- @@ -58,15 +57,20 @@ PUT /my_source_index/_settings <1> Removes replica shards for the index. <2> Relocates the index's shards to the `shrink_node_name` node. See <>. -<3> Prevents write operations to this index. Metadata changes, such as deleting - the index, are still allowed. - It can take a while to relocate the source index. Progress can be tracked with the <>, or the <> can be used to wait until all shards have relocated with the `wait_for_no_relocating_shards` parameter. +You can then make the index read-only with the following request using the +<> API: + +[source,console] +-------------------------------------------------- +PUT /my_source_index/_block/write +-------------------------------------------------- +// TEST[continued] [[shrink-index-api-desc]] ==== {api-description-title} diff --git a/docs/reference/indices/split-index.asciidoc b/docs/reference/indices/split-index.asciidoc index 06f048856348e..26ae0f19b177c 100644 --- a/docs/reference/indices/split-index.asciidoc +++ b/docs/reference/indices/split-index.asciidoc @@ -37,23 +37,15 @@ POST /my-index-000001/_split/split-my-index-000001 ** The index must be read-only. ** The <> status must be green. -You can do make an index read-only -with the following request: +You can do make an index read-only with the following request using the +<> API: [source,console] -------------------------------------------------- -PUT /my_source_index/_settings -{ - "settings": { - "index.blocks.write": true <1> - } -} +PUT /my_source_index/_block/write -------------------------------------------------- // TEST[s/^/PUT my_source_index\n/] -<1> Prevents write operations to this index while still allowing metadata - changes like deleting the index. - The current write index on a data stream cannot be split. In order to split the current write index, the data stream must first be <> so that a new write index is created From 3dcfbe0732ba6c348d7fc77469636d024c414a40 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Istv=C3=A1n=20Zolt=C3=A1n=20Szab=C3=B3?= Date: Wed, 6 Mar 2024 19:40:04 +0100 Subject: [PATCH 031/248] [DOCS] Changes the cohere example to use a different model (#106037) --- .../search-your-data/semantic-search-inference.asciidoc | 6 +++--- .../tab-widgets/inference-api/infer-api-mapping.asciidoc | 2 +- .../tab-widgets/inference-api/infer-api-search.asciidoc | 2 +- .../tab-widgets/inference-api/infer-api-task.asciidoc | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/reference/search/search-your-data/semantic-search-inference.asciidoc b/docs/reference/search/search-your-data/semantic-search-inference.asciidoc index b9bb36b21ea12..97a37e34eb116 100644 --- a/docs/reference/search/search-your-data/semantic-search-inference.asciidoc +++ b/docs/reference/search/search-your-data/semantic-search-inference.asciidoc @@ -6,9 +6,9 @@ The instructions in this tutorial shows you how to use the {infer} API with various services to perform semantic search on your data. The following examples -use Cohere's `embed-english-light-v3.0` model and OpenAI's -`text-embedding-ada-002` second generation embedding model. You can use any -Cohere and OpenAI models, they are all supported by the {infer} API. +use Cohere's `embed-english-v3.0` model and OpenAI's `text-embedding-ada-002` +second generation embedding model. You can use any Cohere and OpenAI models, +they are all supported by the {infer} API. Click the name of the service you want to use on any of the widgets below to review the corresponding instructions. diff --git a/docs/reference/tab-widgets/inference-api/infer-api-mapping.asciidoc b/docs/reference/tab-widgets/inference-api/infer-api-mapping.asciidoc index 4b70a1b84f45f..e43bbd036b44e 100644 --- a/docs/reference/tab-widgets/inference-api/infer-api-mapping.asciidoc +++ b/docs/reference/tab-widgets/inference-api/infer-api-mapping.asciidoc @@ -8,7 +8,7 @@ PUT cohere-embeddings "properties": { "content_embedding": { <1> "type": "dense_vector", <2> - "dims": 384, <3> + "dims": 1024, <3> "element_type": "float" }, "content": { <4> diff --git a/docs/reference/tab-widgets/inference-api/infer-api-search.asciidoc b/docs/reference/tab-widgets/inference-api/infer-api-search.asciidoc index 0c71ab7cecbce..1e8470471491f 100644 --- a/docs/reference/tab-widgets/inference-api/infer-api-search.asciidoc +++ b/docs/reference/tab-widgets/inference-api/infer-api-search.asciidoc @@ -9,7 +9,7 @@ GET cohere-embeddings/_search "query_vector_builder": { "text_embedding": { "model_id": "cohere_embeddings", - "model_text": "Calculate fuel cost" + "model_text": "Muscles in human body" } }, "k": 10, diff --git a/docs/reference/tab-widgets/inference-api/infer-api-task.asciidoc b/docs/reference/tab-widgets/inference-api/infer-api-task.asciidoc index 3395fea9cc053..be0319fcf1ec1 100644 --- a/docs/reference/tab-widgets/inference-api/infer-api-task.asciidoc +++ b/docs/reference/tab-widgets/inference-api/infer-api-task.asciidoc @@ -7,7 +7,7 @@ PUT _inference/text_embedding/cohere_embeddings <1> "service": "cohere", "service_settings": { "api_key": "", <2> - "model_id": "embed-english-light-v3.0", <3> + "model_id": "embed-english-v3.0", <3> "embedding_type": "int8" }, "task_settings": { From abd2722228161ebfc062f897d4bae9a8dad4e197 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Wed, 6 Mar 2024 12:27:30 -0800 Subject: [PATCH 032/248] Cleanup subproject creation in build tests (#105998) Sometimes subprojects are created by build tests. There exists a utility function to define the subproject build file. Yet some tests add the project to settings and then configure through the subprojects directive in the root build file. This commit cleans up two such cases to use explicit subproject build files. --- .../internal/JdkDownloadPluginFuncTest.groovy | 18 ++++++------ .../DistributionDownloadPluginFuncTest.groovy | 29 ++----------------- 2 files changed, 12 insertions(+), 35 deletions(-) diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/JdkDownloadPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/JdkDownloadPluginFuncTest.groovy index 2640a5a43e167..67a04ebc5b7a0 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/JdkDownloadPluginFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/JdkDownloadPluginFuncTest.groovy @@ -99,17 +99,17 @@ class JdkDownloadPluginFuncTest extends AbstractGradleFuncTest { given: def mockRepoUrl = urlPath(jdkVendor, jdkVersion, platform) def mockedContent = filebytes(jdkVendor, platform) - 3.times { - settingsFile << """ - include ':sub-$it' - """ - } buildFile.text = """ plugins { id 'elasticsearch.jdk-download' apply false } subprojects { + + } + """ + 3.times { + subProject(':sub-' + it) << """ apply plugin: 'elasticsearch.jdk-download' jdks { @@ -126,8 +126,8 @@ class JdkDownloadPluginFuncTest extends AbstractGradleFuncTest { println "JDK HOME: " + jdks.myJdk } } - } - """ + """ + } when: def result = WiremockFixture.withWireMock(mockRepoUrl, mockedContent) { server -> @@ -165,7 +165,7 @@ class JdkDownloadPluginFuncTest extends AbstractGradleFuncTest { architecture = "x64" } } - + tasks.register("getJdk", PrintJdk) { dependsOn jdks.myJdk jdkPath = jdks.myJdk.getPath() @@ -174,7 +174,7 @@ class JdkDownloadPluginFuncTest extends AbstractGradleFuncTest { class PrintJdk extends DefaultTask { @Input String jdkPath - + @TaskAction void print() { println "JDK HOME: " + jdkPath } diff --git a/build-tools/src/integTest/groovy/org/elasticsearch/gradle/DistributionDownloadPluginFuncTest.groovy b/build-tools/src/integTest/groovy/org/elasticsearch/gradle/DistributionDownloadPluginFuncTest.groovy index 228223897ede9..3a06bdf917ff6 100644 --- a/build-tools/src/integTest/groovy/org/elasticsearch/gradle/DistributionDownloadPluginFuncTest.groovy +++ b/build-tools/src/integTest/groovy/org/elasticsearch/gradle/DistributionDownloadPluginFuncTest.groovy @@ -70,25 +70,14 @@ class DistributionDownloadPluginFuncTest extends AbstractGradleFuncTest { def version = VersionProperties.getElasticsearch() def platform = ElasticsearchDistribution.Platform.LINUX - 3.times { - settingsFile << """ - include ':sub-$it' - """ - } buildFile.text = """ - import org.elasticsearch.gradle.Architecture - plugins { id 'elasticsearch.distribution-download' } - - subprojects { - apply plugin: 'elasticsearch.distribution-download' - - ${setupTestDistro(version, platform)} - ${setupDistroTask()} - } """ + 3.times { + subProject(':sub-' + it) << applyPluginAndSetupDistro(version, platform) + } when: def runner = gradleRunner('setupDistro', '-i', '-g', gradleUserHome) @@ -118,14 +107,6 @@ class DistributionDownloadPluginFuncTest extends AbstractGradleFuncTest { id 'elasticsearch.distribution-download' } - ${setupTestDistro(version, platform)} - ${setupDistroTask()} - - """ - } - - private static String setupTestDistro(String version, ElasticsearchDistribution.Platform platform) { - return """ elasticsearch_distributions { test_distro { version = "$version" @@ -134,11 +115,7 @@ class DistributionDownloadPluginFuncTest extends AbstractGradleFuncTest { architecture = Architecture.current(); } } - """ - } - private static String setupDistroTask() { - return """ tasks.register("setupDistro", Sync) { from(elasticsearch_distributions.test_distro) into("build/distro") From e58dc5228de3085b891ffa452c8ab92961828fc4 Mon Sep 17 00:00:00 2001 From: Niels Bauman <33722607+nielsbauman@users.noreply.github.com> Date: Wed, 6 Mar 2024 22:06:30 +0100 Subject: [PATCH 033/248] Increase master transition threshold in health API YAML REST tests (#106013) Due to the nature of the `mixed-cluster` tests setup, we sometimes see a couple of master changes in a short amount of time. --- qa/mixed-cluster/build.gradle | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/qa/mixed-cluster/build.gradle b/qa/mixed-cluster/build.gradle index 28d372671ee99..3953237a0e8d9 100644 --- a/qa/mixed-cluster/build.gradle +++ b/qa/mixed-cluster/build.gradle @@ -67,6 +67,11 @@ BuildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> numberOfNodes = 4 setting 'path.repo', "${buildDir}/cluster/shared/repo/${baseName}" setting 'xpack.security.enabled', 'false' + /* There is a chance we have more master changes than "normal", so to avoid this test from failing, + we increase the threshold (as this purpose of this test isn't to test that specific indicator). */ + if (bwcVersion.onOrAfter(Version.fromString("8.4.0"))) { + setting 'health.master_history.no_master_transitions_threshold', '10' + } requiresFeature 'es.index_mode_feature_flag_registered', Version.fromString("8.0.0") } From 11cfedbd96758424dc0a14a08099eb9e7084f52f Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Wed, 6 Mar 2024 17:10:09 -0500 Subject: [PATCH 034/248] Test mute for #106044 (#106045) --- .../elasticsearch/index/mapper/BooleanScriptFieldTypeTests.java | 1 + .../org/elasticsearch/index/mapper/DateScriptFieldTypeTests.java | 1 + .../elasticsearch/index/mapper/DoubleScriptFieldTypeTests.java | 1 + .../org/elasticsearch/index/mapper/IpScriptFieldTypeTests.java | 1 + .../elasticsearch/index/mapper/KeywordScriptFieldTypeTests.java | 1 + .../org/elasticsearch/index/mapper/LongScriptFieldTypeTests.java | 1 + 6 files changed, 6 insertions(+) diff --git a/server/src/test/java/org/elasticsearch/index/mapper/BooleanScriptFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/BooleanScriptFieldTypeTests.java index 053e4226b3d79..5040225373f54 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/BooleanScriptFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/BooleanScriptFieldTypeTests.java @@ -411,6 +411,7 @@ public XContentParser parser() { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/106044") public void testBlockLoader() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [false]}")))); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DateScriptFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DateScriptFieldTypeTests.java index eb3daf472ea2e..c5ba39b972651 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DateScriptFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DateScriptFieldTypeTests.java @@ -471,6 +471,7 @@ public void testLegacyDateFormatName() throws IOException { ); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/106044") public void testBlockLoader() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181354]}")))); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DoubleScriptFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DoubleScriptFieldTypeTests.java index d37e42e04edca..ea427bb28bb1d 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DoubleScriptFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DoubleScriptFieldTypeTests.java @@ -230,6 +230,7 @@ public void testTermsQuery() throws IOException { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/106044") public void testBlockLoader() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IpScriptFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IpScriptFieldTypeTests.java index cd19bb50b842c..5a556d561f008 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IpScriptFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IpScriptFieldTypeTests.java @@ -246,6 +246,7 @@ public void testTermsQuery() throws IOException { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/106044") public void testBlockLoader() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.0\"]}")))); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/KeywordScriptFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/KeywordScriptFieldTypeTests.java index ce705f2e9ae8b..7d6ad4a61560a 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/KeywordScriptFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/KeywordScriptFieldTypeTests.java @@ -376,6 +376,7 @@ public void testMatchQuery() throws IOException { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/106044") public void testBlockLoader() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/LongScriptFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/LongScriptFieldTypeTests.java index fd20b6c71e984..ad6a6636a92fc 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/LongScriptFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/LongScriptFieldTypeTests.java @@ -263,6 +263,7 @@ public void testTermsQuery() throws IOException { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/106044") public void testBlockLoader() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); From 5377b4abf03821441af554ff77251c73ab0ef5fc Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Wed, 6 Mar 2024 14:31:24 -0800 Subject: [PATCH 035/248] Remove end of life Java versions from our testing matrix (#106042) These Java versions are EOL and no longer supported by Elasticsearch so we can remove them from our CI testing. We only need to support LTS versions >= 17 and the currently latest bundled JDK version. --- .buildkite/pipelines/periodic.template.yml | 3 --- .buildkite/pipelines/periodic.yml | 3 --- 2 files changed, 6 deletions(-) diff --git a/.buildkite/pipelines/periodic.template.yml b/.buildkite/pipelines/periodic.template.yml index a92e190be7963..535305a7ebd33 100644 --- a/.buildkite/pipelines/periodic.template.yml +++ b/.buildkite/pipelines/periodic.template.yml @@ -69,9 +69,6 @@ steps: ES_RUNTIME_JAVA: - graalvm-ce17 - openjdk17 - - openjdk18 - - openjdk19 - - openjdk20 - openjdk21 - openjdk22 GRADLE_TASK: diff --git a/.buildkite/pipelines/periodic.yml b/.buildkite/pipelines/periodic.yml index bedf559e98ff4..13c4566301afc 100644 --- a/.buildkite/pipelines/periodic.yml +++ b/.buildkite/pipelines/periodic.yml @@ -1250,9 +1250,6 @@ steps: ES_RUNTIME_JAVA: - graalvm-ce17 - openjdk17 - - openjdk18 - - openjdk19 - - openjdk20 - openjdk21 - openjdk22 GRADLE_TASK: From e971c51a45c70b140c48ad5000c23f2f453ceee4 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Wed, 6 Mar 2024 14:45:20 -0800 Subject: [PATCH 036/248] Add status for enrich operator (#106036) This PR adds a status for the enrich operator. This status should help us answer how fast the enrich operator is. --- docs/changelog/106036.yaml | 5 + .../org/elasticsearch/TransportVersions.java | 1 + .../compute/operator/AsyncOperator.java | 121 +++++++++++++++++- .../compute/operator/DriverProfile.java | 2 +- .../operator/AsyncOperatorStatusTests.java | 69 ++++++++++ .../esql/action/CrossClustersEnrichIT.java | 3 + .../xpack/esql/action/EnrichIT.java | 29 +++++ .../esql/enrich/EnrichLookupOperator.java | 82 ++++++++++++ .../xpack/esql/plugin/EsqlPlugin.java | 6 +- .../enrich/EnrichOperatorStatusTests.java | 80 ++++++++++++ 10 files changed, 395 insertions(+), 3 deletions(-) create mode 100644 docs/changelog/106036.yaml create mode 100644 x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AsyncOperatorStatusTests.java create mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/enrich/EnrichOperatorStatusTests.java diff --git a/docs/changelog/106036.yaml b/docs/changelog/106036.yaml new file mode 100644 index 0000000000000..7b129c6c0a7a3 --- /dev/null +++ b/docs/changelog/106036.yaml @@ -0,0 +1,5 @@ +pr: 106036 +summary: Add status for enrich operator +area: ES|QL +type: enhancement +issues: [] diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index ec3971a48a649..29dec80875787 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -137,6 +137,7 @@ static TransportVersion def(int id) { public static final TransportVersion ESQL_TIMINGS = def(8_597_00_0); public static final TransportVersion DATA_STREAM_AUTO_SHARDING_EVENT = def(8_598_00_0); public static final TransportVersion ADD_FAILURE_STORE_INDICES_OPTIONS = def(8_599_00_0); + public static final TransportVersion ESQL_ENRICH_OPERATOR_STATUS = def(8_600_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AsyncOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AsyncOperator.java index bcab6a39496fd..061cefc86bed0 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AsyncOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AsyncOperator.java @@ -8,16 +8,27 @@ package org.elasticsearch.compute.operator; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.SubscribableListener; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.compute.data.Page; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.seqno.LocalCheckpointTracker; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.tasks.TaskCancelledException; +import org.elasticsearch.xcontent.XContentBuilder; +import java.io.IOException; import java.util.Map; +import java.util.Objects; import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.atomic.LongAdder; /** * {@link AsyncOperator} performs an external computation specified in {@link #performAsync(Page, ActionListener)}. @@ -33,6 +44,7 @@ public abstract class AsyncOperator implements Operator { private final DriverContext driverContext; private final int maxOutstandingRequests; + private final LongAdder totalTimeInNanos = new LongAdder(); private boolean finished = false; private volatile boolean closed = false; @@ -81,7 +93,11 @@ public void addInput(Page input) { onFailure(e); onSeqNoCompleted(seqNo); }); - performAsync(input, ActionListener.runAfter(listener, driverContext::removeAsyncAction)); + final long startNanos = System.nanoTime(); + performAsync(input, ActionListener.runAfter(listener, () -> { + driverContext.removeAsyncAction(); + totalTimeInNanos.add(System.nanoTime() - startNanos); + })); success = true; } finally { if (success == false) { @@ -224,4 +240,107 @@ public SubscribableListener isBlocked() { return blockedFuture; } } + + @Override + public final Operator.Status status() { + return status( + Math.max(0L, checkpoint.getMaxSeqNo()), + Math.max(0L, checkpoint.getProcessedCheckpoint()), + TimeValue.timeValueNanos(totalTimeInNanos.sum()).millis() + ); + } + + protected Operator.Status status(long receivedPages, long completedPages, long totalTimeInMillis) { + return new Status(receivedPages, completedPages, totalTimeInMillis); + } + + public static class Status implements Operator.Status { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( + Operator.Status.class, + "async_operator", + Status::new + ); + + final long receivedPages; + final long completedPages; + final long totalTimeInMillis; + + protected Status(long receivedPages, long completedPages, long totalTimeInMillis) { + this.receivedPages = receivedPages; + this.completedPages = completedPages; + this.totalTimeInMillis = totalTimeInMillis; + } + + protected Status(StreamInput in) throws IOException { + this.receivedPages = in.readVLong(); + this.completedPages = in.readVLong(); + this.totalTimeInMillis = in.readVLong(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVLong(receivedPages); + out.writeVLong(completedPages); + out.writeVLong(totalTimeInMillis); + } + + public long receivedPages() { + return receivedPages; + } + + public long completedPages() { + return completedPages; + } + + public long totalTimeInMillis() { + return totalTimeInMillis; + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + innerToXContent(builder); + return builder.endObject(); + } + + protected final XContentBuilder innerToXContent(XContentBuilder builder) throws IOException { + builder.field("received_pages", receivedPages); + builder.field("completed_pages", completedPages); + builder.field("total_time_in_millis", totalTimeInMillis); + if (totalTimeInMillis >= 0) { + builder.field("total_time", TimeValue.timeValueMillis(totalTimeInMillis)); + } + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Status status = (Status) o; + return receivedPages == status.receivedPages + && completedPages == status.completedPages + && totalTimeInMillis == status.totalTimeInMillis; + } + + @Override + public int hashCode() { + return Objects.hash(receivedPages, completedPages, totalTimeInMillis); + } + + @Override + public String toString() { + return Strings.toString(this); + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.ESQL_ENRICH_OPERATOR_STATUS; + } + } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverProfile.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverProfile.java index 5f6e1ed12e204..00c3771540867 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverProfile.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverProfile.java @@ -102,7 +102,7 @@ public long iterations() { return iterations; } - List operators() { + public List operators() { return operators; } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AsyncOperatorStatusTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AsyncOperatorStatusTests.java new file mode 100644 index 0000000000000..ab2dcc5e6c443 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AsyncOperatorStatusTests.java @@ -0,0 +1,69 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.operator; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +import static org.hamcrest.Matchers.equalTo; + +public class AsyncOperatorStatusTests extends AbstractWireSerializingTestCase { + @Override + protected Writeable.Reader instanceReader() { + return AsyncOperator.Status::new; + } + + @Override + protected AsyncOperator.Status createTestInstance() { + return new AsyncOperator.Status( + randomNonNegativeLong(), + randomNonNegativeLong(), + randomLongBetween(1, TimeValue.timeValueHours(1).millis()) + ); + } + + @Override + protected AsyncOperator.Status mutateInstance(AsyncOperator.Status in) throws IOException { + int field = randomIntBetween(0, 2); + return switch (field) { + case 0 -> new AsyncOperator.Status( + randomValueOtherThan(in.receivedPages(), ESTestCase::randomNonNegativeLong), + in.completedPages(), + in.totalTimeInMillis() + ); + case 1 -> new AsyncOperator.Status( + in.receivedPages(), + randomValueOtherThan(in.completedPages(), ESTestCase::randomNonNegativeLong), + in.totalTimeInMillis() + ); + case 2 -> new AsyncOperator.Status( + in.receivedPages(), + in.completedPages(), + randomValueOtherThan(in.totalTimeInMillis(), ESTestCase::randomNonNegativeLong) + ); + default -> throw new AssertionError("unknown "); + }; + } + + public void testToXContent() { + var status = new AsyncOperator.Status(100, 50, TimeValue.timeValueSeconds(10).millis()); + String json = Strings.toString(status, true, true); + assertThat(json, equalTo(""" + { + "received_pages" : 100, + "completed_pages" : 50, + "total_time_in_millis" : 10000, + "total_time" : "10s" + }""")); + } +} diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersEnrichIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersEnrichIT.java index 09f20d7ca4ffd..2b59e6dd1957d 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersEnrichIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersEnrichIT.java @@ -460,6 +460,9 @@ protected EsqlQueryResponse runQuery(String query) { EsqlQueryRequest request = new EsqlQueryRequest(); request.query(query); request.pragmas(AbstractEsqlIntegTestCase.randomPragmas()); + if (randomBoolean()) { + request.profile(true); + } return client(LOCAL_CLUSTER).execute(EsqlQueryAction.INSTANCE, request).actionGet(30, TimeUnit.SECONDS); } diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EnrichIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EnrichIT.java index a589e1cc468a5..3bb6bb35b5210 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EnrichIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EnrichIT.java @@ -18,6 +18,8 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.operator.DriverProfile; +import org.elasticsearch.compute.operator.DriverStatus; import org.elasticsearch.compute.operator.exchange.ExchangeService; import org.elasticsearch.core.TimeValue; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; @@ -58,12 +60,15 @@ import java.util.concurrent.TimeUnit; import java.util.function.Function; +import static java.util.Collections.emptyList; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.transport.AbstractSimpleTransportTestCase.IGNORE_DESERIALIZATION_ERRORS_SETTING; import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.not; public class EnrichIT extends AbstractEsqlIntegTestCase { @@ -121,6 +126,9 @@ protected EsqlQueryResponse run(EsqlQueryRequest request) { } else { client = client(); } + if (request.profile() == false && randomBoolean()) { + request.profile(true); + } if (randomBoolean()) { setRequestCircuitBreakerLimit(ByteSizeValue.ofBytes(between(256, 4096))); try { @@ -318,6 +326,27 @@ public void testTopN() { } } + public void testProfile() { + EsqlQueryRequest request = new EsqlQueryRequest(); + request.pragmas(randomPragmas()); + request.query("from listens* | sort timestamp DESC | limit 1 | " + enrichSongCommand() + " | KEEP timestamp, artist"); + request.profile(true); + try (var resp = run(request)) { + Iterator row = resp.values().next(); + assertThat(row.next(), equalTo(7L)); + assertThat(row.next(), equalTo("Linkin Park")); + EsqlQueryResponse.Profile profile = resp.profile(); + assertNotNull(profile); + List drivers = profile.drivers(); + assertThat(drivers.size(), greaterThanOrEqualTo(2)); + List enrichOperators = drivers.stream() + .flatMap(d -> d.operators().stream()) + .filter(status -> status.operator().startsWith("EnrichOperator")) + .toList(); + assertThat(enrichOperators, not(emptyList())); + } + } + /** * Some enrich queries that could fail without the PushDownEnrich rule. */ diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupOperator.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupOperator.java index 844cfde286072..2d433f0732064 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupOperator.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupOperator.java @@ -8,15 +8,21 @@ package org.elasticsearch.xpack.esql.enrich; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.operator.AsyncOperator; import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.compute.operator.Operator; import org.elasticsearch.tasks.CancellableTask; +import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xpack.ql.expression.NamedExpression; +import java.io.IOException; import java.util.List; +import java.util.Objects; public final class EnrichLookupOperator extends AsyncOperator { private final EnrichLookupService enrichLookupService; @@ -27,6 +33,7 @@ public final class EnrichLookupOperator extends AsyncOperator { private final String matchType; private final String matchField; private final List enrichFields; + private long totalTerms = 0L; public record Factory( String sessionId, @@ -95,6 +102,7 @@ public EnrichLookupOperator( @Override protected void performAsync(Page inputPage, ActionListener listener) { final Block inputBlock = inputPage.getBlock(inputChannel); + totalTerms += inputBlock.getTotalValueCount(); enrichLookupService.lookupAsync( sessionId, parentTask, @@ -107,9 +115,83 @@ protected void performAsync(Page inputPage, ActionListener listener) { ); } + @Override + public String toString() { + return "EnrichOperator[index=" + + enrichIndex + + " match_field=" + + matchField + + " enrich_fields=" + + enrichFields + + " inputChannel=" + + inputChannel + + "]"; + } + @Override protected void doClose() { // TODO: Maybe create a sub-task as the parent task of all the lookup tasks // then cancel it when this operator terminates early (e.g., have enough result). } + + @Override + protected Operator.Status status(long receivedPages, long completedPages, long totalTimeInMillis) { + return new EnrichLookupOperator.Status(receivedPages, completedPages, totalTimeInMillis, totalTerms); + } + + public static class Status extends AsyncOperator.Status { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( + Operator.Status.class, + "enrich", + Status::new + ); + + final long totalTerms; + + Status(long receivedPages, long completedPages, long totalTimeInMillis, long totalTerms) { + super(receivedPages, completedPages, totalTimeInMillis); + this.totalTerms = totalTerms; + } + + Status(StreamInput in) throws IOException { + super(in); + this.totalTerms = in.readVLong(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeVLong(totalTerms); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + innerToXContent(builder); + builder.field("total_terms", totalTerms); + return builder.endObject(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass() || super.equals(o) == false) { + return false; + } + Status status = (Status) o; + return totalTerms == status.totalTerms; + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), totalTerms); + } + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java index d1bcac4e399e5..61f0393c80948 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java @@ -26,6 +26,7 @@ import org.elasticsearch.compute.lucene.ValuesSourceReaderOperator; import org.elasticsearch.compute.operator.AbstractPageMappingOperator; import org.elasticsearch.compute.operator.AggregationOperator; +import org.elasticsearch.compute.operator.AsyncOperator; import org.elasticsearch.compute.operator.DriverStatus; import org.elasticsearch.compute.operator.HashAggregationOperator; import org.elasticsearch.compute.operator.LimitOperator; @@ -52,6 +53,7 @@ import org.elasticsearch.xpack.esql.action.RestEsqlDeleteAsyncResultAction; import org.elasticsearch.xpack.esql.action.RestEsqlGetAsyncResultAction; import org.elasticsearch.xpack.esql.action.RestEsqlQueryAction; +import org.elasticsearch.xpack.esql.enrich.EnrichLookupOperator; import org.elasticsearch.xpack.esql.execution.PlanExecutor; import org.elasticsearch.xpack.esql.querydsl.query.SingleValueQuery; import org.elasticsearch.xpack.esql.session.EsqlIndexResolver; @@ -176,7 +178,9 @@ public List getNamedWriteables() { TopNOperatorStatus.ENTRY, MvExpandOperator.Status.ENTRY, ValuesSourceReaderOperator.Status.ENTRY, - SingleValueQuery.ENTRY + SingleValueQuery.ENTRY, + AsyncOperator.Status.ENTRY, + EnrichLookupOperator.Status.ENTRY ).stream(), Block.getNamedWriteables().stream() ).toList(); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/enrich/EnrichOperatorStatusTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/enrich/EnrichOperatorStatusTests.java new file mode 100644 index 0000000000000..4fc67f85cc062 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/enrich/EnrichOperatorStatusTests.java @@ -0,0 +1,80 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.enrich; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +import static org.hamcrest.Matchers.equalTo; + +public class EnrichOperatorStatusTests extends AbstractWireSerializingTestCase { + @Override + protected Writeable.Reader instanceReader() { + return EnrichLookupOperator.Status::new; + } + + @Override + protected EnrichLookupOperator.Status createTestInstance() { + return new EnrichLookupOperator.Status( + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomLongBetween(1, TimeValue.timeValueHours(1).millis()) + ); + } + + @Override + protected EnrichLookupOperator.Status mutateInstance(EnrichLookupOperator.Status in) throws IOException { + int field = randomIntBetween(0, 3); + return switch (field) { + case 0 -> new EnrichLookupOperator.Status( + randomValueOtherThan(in.receivedPages(), ESTestCase::randomNonNegativeLong), + in.completedPages(), + in.totalTerms, + in.totalTimeInMillis() + ); + case 1 -> new EnrichLookupOperator.Status( + in.receivedPages(), + randomValueOtherThan(in.completedPages(), ESTestCase::randomNonNegativeLong), + in.totalTerms, + in.totalTimeInMillis() + ); + case 2 -> new EnrichLookupOperator.Status( + in.receivedPages(), + in.completedPages(), + randomValueOtherThan(in.totalTerms, ESTestCase::randomNonNegativeLong), + in.totalTimeInMillis() + ); + case 3 -> new EnrichLookupOperator.Status( + in.receivedPages(), + in.completedPages(), + in.totalTerms, + randomValueOtherThan(in.totalTimeInMillis(), ESTestCase::randomNonNegativeLong) + ); + default -> throw new AssertionError("unknown "); + }; + } + + public void testToXContent() { + var status = new EnrichLookupOperator.Status(100, 50, TimeValue.timeValueSeconds(10).millis(), 120); + String json = Strings.toString(status, true, true); + assertThat(json, equalTo(""" + { + "received_pages" : 100, + "completed_pages" : 50, + "total_time_in_millis" : 10000, + "total_time" : "10s", + "total_terms" : 120 + }""")); + } +} From daacea9c0a03b4f2a9a08979c198024cda140323 Mon Sep 17 00:00:00 2001 From: Daniel Mitterdorfer Date: Thu, 7 Mar 2024 07:22:13 +0100 Subject: [PATCH 037/248] [Profiling] Use seed for consistent test results (#105783) In some cases (APM integration) profiling APIs may use the random sampler aggregation which inherently produces random results. To have consistent results in tests we have implemented a hack that kept the seed used by random sampler constant. With #104830 it is now possible to provide the shard seed directly to random sampler so we can now remove this hack. Relates #104830 --- .../profiling/GetStackTracesActionIT.java | 28 ++----------------- .../profiling/GetStackTracesRequest.java | 11 ++++++++ .../TransportGetStackTracesAction.java | 19 +++++++------ 3 files changed, 25 insertions(+), 33 deletions(-) diff --git a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStackTracesActionIT.java b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStackTracesActionIT.java index 02506db3e9cc8..62b8242e7df86 100644 --- a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStackTracesActionIT.java +++ b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStackTracesActionIT.java @@ -7,11 +7,9 @@ package org.elasticsearch.xpack.profiling; -import org.elasticsearch.index.Index; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.TermQueryBuilder; -import org.elasticsearch.index.shard.ShardId; import java.util.List; @@ -92,7 +90,6 @@ public void testGetStackTracesFromAPMWithMatchNoDownsampling() throws Exception public void testGetStackTracesFromAPMWithMatchAndDownsampling() throws Exception { TermQueryBuilder query = QueryBuilders.termQuery("transaction.name", "encodeSha1"); - Index apmTest = resolveIndex("apm-test-001"); GetStackTracesRequest request = new GetStackTracesRequest( 1, @@ -107,28 +104,9 @@ public void testGetStackTracesFromAPMWithMatchAndDownsampling() throws Exception null, null, null - ) { - @Override - public boolean equals(Object o) { - return super.equals(o); - } - - @Override - public int hashCode() { - // The random sampler aggregation takes a user-provided seed as well as the index UUID into account for randomness. This is - // fine for a production use case but here we need full control over the internal seed so test results are stable. As - // the index UUID changes between test runs, and we have no control over it, we will instead modify the user provided seed - // so that the random number generator is always initialized the same, regardless of the index UUID. - // - // See org.elasticsearch.search.aggregations.bucket.sampler.random.RandomSamplingQuery#createWeight(), specifically the - // initialization of SplittableRandom(), which uses both the "seed" (user-provided) and a "hash", which is built from - // ShardId#hashCode(). By using the same hash code, the XOR will always evaluate to 0, thus producing a consistent seed for - // SplittableRandom(). - int baseSeed = new ShardId(apmTest, 0).hashCode(); - // a seed of zero won't return results for our test scenario, so we toggle one bit to generate a consistent non-zero seed. - return baseSeed ^ 2; - } - }; + ); + // ensures consistent results in the random sampler aggregation that is used internally + request.setShardSeed(42); GetStackTracesResponse response = client().execute(GetStackTracesAction.INSTANCE, request).get(); assertEquals(49, response.getTotalFrames()); diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesRequest.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesRequest.java index 3dfe48744cb97..86ed038467191 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesRequest.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesRequest.java @@ -69,6 +69,9 @@ public class GetStackTracesRequest extends ActionRequest implements IndicesReque // sample counts by default and remove this flag. private Boolean adjustSampleCount; + // This is only meant for testing and is intentionally not exposed in the REST API. + private Integer shardSeed; + public GetStackTracesRequest() { this(null, null, null, null, null, null, null, null, null, null, null, null); } @@ -167,6 +170,14 @@ public void setAdjustSampleCount(Boolean adjustSampleCount) { this.adjustSampleCount = adjustSampleCount; } + public Integer getShardSeed() { + return shardSeed; + } + + public void setShardSeed(Integer shardSeed) { + this.shardSeed = shardSeed; + } + public void parseXContent(XContentParser parser) throws IOException { XContentParser.Token token = parser.currentToken(); String currentFieldName = null; diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesAction.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesAction.java index 417f4fccfa8d9..aa5f3efb179a2 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesAction.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesAction.java @@ -257,6 +257,16 @@ private void searchGenericEventGroupedByStackTrace( ActionListener submitListener, GetStackTracesResponseBuilder responseBuilder ) { + + RandomSamplerAggregationBuilder randomSampler = new RandomSamplerAggregationBuilder("sample").setSeed(request.hashCode()) + .setProbability(responseBuilder.getSamplingRate()) + .subAggregation( + new CountedTermsAggregationBuilder("group_by").size(MAX_TRACE_EVENTS_RESULT_SIZE).field(request.getStackTraceIdsField()) + ); + // shard seed is only set in tests and ensures consistent results + if (request.getShardSeed() != null) { + randomSampler.setShardSeed(request.getShardSeed()); + } client.prepareSearch(request.getIndices()) .setTrackTotalHits(false) .setSize(0) @@ -266,14 +276,7 @@ private void searchGenericEventGroupedByStackTrace( .setQuery(request.getQuery()) .addAggregation(new MinAggregationBuilder("min_time").field("@timestamp")) .addAggregation(new MaxAggregationBuilder("max_time").field("@timestamp")) - .addAggregation( - new RandomSamplerAggregationBuilder("sample").setSeed(request.hashCode()) - .setProbability(responseBuilder.getSamplingRate()) - .subAggregation( - new CountedTermsAggregationBuilder("group_by").size(MAX_TRACE_EVENTS_RESULT_SIZE) - .field(request.getStackTraceIdsField()) - ) - ) + .addAggregation(randomSampler) .execute(handleEventsGroupedByStackTrace(submitTask, client, responseBuilder, submitListener, searchResponse -> { long totalSamples = 0; SingleBucketAggregation sample = searchResponse.getAggregations().get("sample"); From 0731f5ff36b7207a649ea25b2e45935d9c197cee Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Thu, 7 Mar 2024 08:02:06 +0100 Subject: [PATCH 038/248] Add time series source operator. (#105398) This change adds an experimental time series source operator that gets enabled when `time_series` query pragma is set. When enabled, the documents the source operator emits, are in time series order. Meaning sorted by tsid asc and timestamp descending. Other yet to be introduced operators can make use of the sorted order and optimizations or computations that would otherwise not be feasible. Example usage: ``` POST /_query?format=txt { "query": "FROM cpu_tsbs | LIMIT 3", "pragma": { "time_series": true } } ``` Note that this change on its own doesn't add any real functionality order then the sort order in which data gets emitted. This change is part of a series of many changes that would eventually add time series query support to ES|QL. There are many things to be done like adding a time series grouping operator that makes use of the sorted nature of pages that this source operator adds, adding parallization support, adding time series function support like `rate` and much more. Relates #105397 --- ...TimeSeriesSortedSourceOperatorFactory.java | 343 +++++++++++++++++ .../TimeSeriesSortedSourceOperatorTests.java | 359 ++++++++++++++++++ .../esql/qa/single_node/TSDBRestEsqlIT.java | 99 +++++ .../resources/tsdb-bulk-request.txt | 17 + .../javaRestTest/resources/tsdb-mapping.json | 56 +++ .../javaRestTest/resources/tsdb-settings.json | 9 + .../optimizer/LocalPhysicalPlanOptimizer.java | 16 +- .../xpack/esql/plan/physical/EsQueryExec.java | 8 +- .../plan/physical/EsTimeseriesQueryExec.java | 91 +++++ .../planner/EsPhysicalOperationProviders.java | 27 +- .../esql/planner/LocalExecutionPlanner.java | 1 - .../xpack/esql/plugin/QueryPragmas.java | 6 + 12 files changed, 1016 insertions(+), 16 deletions(-) create mode 100644 x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorFactory.java create mode 100644 x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorTests.java create mode 100644 x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/TSDBRestEsqlIT.java create mode 100644 x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/resources/tsdb-bulk-request.txt create mode 100644 x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/resources/tsdb-mapping.json create mode 100644 x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/resources/tsdb-settings.json create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EsTimeseriesQueryExec.java diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorFactory.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorFactory.java new file mode 100644 index 0000000000000..b1211c8ea5ff4 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorFactory.java @@ -0,0 +1,343 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.lucene; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.SortedDocValues; +import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.search.Weight; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.PriorityQueue; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.DocVector; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.SourceOperator; +import org.elasticsearch.core.Releasables; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.List; +import java.util.function.Function; + +/** + * Creates a source operator that takes advantage of the natural sorting of segments in a tsdb index. + *

    + * This source operator loads the _tsid and @timestamp fields, which is used for emitting documents in the correct order. These field values + * are included in the page as seperate blocks and downstream operators can make use of these loaded time series ids and timestamps. + *

    + * The source operator includes all documents of a time serie with the same page. So the same time series never exists in multiple pages. + * Downstream operators can make use of this implementation detail. + *

    + * This operator currently only supports shard level concurrency. A new concurrency mechanism should be introduced at the time serie level + * in order to read tsdb indices in parallel. + */ +public record TimeSeriesSortedSourceOperatorFactory(int limit, int maxPageSize, int taskConcurrency, LuceneSliceQueue sliceQueue) + implements + LuceneOperator.Factory { + + @Override + public SourceOperator get(DriverContext driverContext) { + return new Impl(driverContext.blockFactory(), sliceQueue, maxPageSize, limit); + } + + @Override + public int taskConcurrency() { + return taskConcurrency; + } + + @Override + public String describe() { + return "TimeSeriesSortedSourceOperator[maxPageSize = " + maxPageSize + ", limit = " + limit + "]"; + } + + public static TimeSeriesSortedSourceOperatorFactory create( + int limit, + int maxPageSize, + int taskConcurrency, + List searchContexts, + Function queryFunction + ) { + var weightFunction = LuceneOperator.weightFunction(queryFunction, ScoreMode.COMPLETE_NO_SCORES); + var sliceQueue = LuceneSliceQueue.create(searchContexts, weightFunction, DataPartitioning.SHARD, taskConcurrency); + taskConcurrency = Math.min(sliceQueue.totalSlices(), taskConcurrency); + return new TimeSeriesSortedSourceOperatorFactory(limit, maxPageSize, taskConcurrency, sliceQueue); + } + + static final class Impl extends SourceOperator { + + private final int maxPageSize; + private final BlockFactory blockFactory; + private final LuceneSliceQueue sliceQueue; + private int currentPagePos = 0; + private int remainingDocs; + private boolean doneCollecting; + private IntVector.Builder docsBuilder; + private IntVector.Builder segmentsBuilder; + private LongVector.Builder timestampIntervalBuilder; + // TODO: handle when a time series spans across backing indices + // In that case we need to bytes representation of the tsid + private IntVector.Builder tsOrdBuilder; + private TimeSeriesIterator iterator; + + Impl(BlockFactory blockFactory, LuceneSliceQueue sliceQueue, int maxPageSize, int limit) { + this.maxPageSize = maxPageSize; + this.blockFactory = blockFactory; + this.remainingDocs = limit; + this.docsBuilder = blockFactory.newIntVectorBuilder(Math.min(limit, maxPageSize)); + this.segmentsBuilder = null; + this.timestampIntervalBuilder = blockFactory.newLongVectorBuilder(Math.min(limit, maxPageSize)); + this.tsOrdBuilder = blockFactory.newIntVectorBuilder(Math.min(limit, maxPageSize)); + this.sliceQueue = sliceQueue; + } + + @Override + public void finish() { + this.doneCollecting = true; + } + + @Override + public boolean isFinished() { + return doneCollecting; + } + + @Override + public Page getOutput() { + if (isFinished()) { + return null; + } + + if (remainingDocs <= 0) { + doneCollecting = true; + return null; + } + + Page page = null; + IntBlock shard = null; + IntVector leaf = null; + IntVector docs = null; + LongVector timestampIntervals = null; + IntVector tsids = null; + try { + if (iterator == null) { + var slice = sliceQueue.nextSlice(); + if (slice == null) { + doneCollecting = true; + return null; + } + if (segmentsBuilder == null && slice.numLeaves() > 1) { + segmentsBuilder = blockFactory.newIntVectorBuilder(Math.min(remainingDocs, maxPageSize)); + } + iterator = new TimeSeriesIterator(slice); + } + iterator.consume(); + shard = blockFactory.newConstantIntBlockWith(iterator.slice.shardContext().index(), currentPagePos); + boolean singleSegmentNonDecreasing; + if (iterator.slice.numLeaves() == 1) { + singleSegmentNonDecreasing = true; + int segmentOrd = iterator.slice.getLeaf(0).leafReaderContext().ord; + leaf = blockFactory.newConstantIntBlockWith(segmentOrd, currentPagePos).asVector(); + } else { + // Due to the multi segment nature of time series source operator singleSegmentNonDecreasing must be false + singleSegmentNonDecreasing = false; + leaf = segmentsBuilder.build(); + segmentsBuilder = blockFactory.newIntVectorBuilder(Math.min(remainingDocs, maxPageSize)); + } + docs = docsBuilder.build(); + docsBuilder = blockFactory.newIntVectorBuilder(Math.min(remainingDocs, maxPageSize)); + + timestampIntervals = timestampIntervalBuilder.build(); + timestampIntervalBuilder = blockFactory.newLongVectorBuilder(Math.min(remainingDocs, maxPageSize)); + tsids = tsOrdBuilder.build(); + tsOrdBuilder = blockFactory.newIntVectorBuilder(Math.min(remainingDocs, maxPageSize)); + + page = new Page( + currentPagePos, + new DocVector(shard.asVector(), leaf, docs, singleSegmentNonDecreasing).asBlock(), + tsids.asBlock(), + timestampIntervals.asBlock() + ); + + currentPagePos = 0; + if (iterator.completed()) { + iterator = null; + } + } catch (IOException e) { + throw new UncheckedIOException(e); + } finally { + if (page == null) { + Releasables.closeExpectNoException(shard, leaf, docs, timestampIntervals, tsids); + } + } + return page; + } + + @Override + public void close() { + Releasables.closeExpectNoException(docsBuilder, segmentsBuilder, timestampIntervalBuilder, tsOrdBuilder); + } + + class TimeSeriesIterator { + + final LuceneSlice slice; + final Leaf leaf; + final PriorityQueue queue; + int globalTsidOrd; + BytesRef currentTsid = new BytesRef(); + + TimeSeriesIterator(LuceneSlice slice) throws IOException { + this.slice = slice; + Weight weight = slice.weight().get(); + if (slice.numLeaves() == 1) { + queue = null; + leaf = new Leaf(weight, slice.getLeaf(0).leafReaderContext()); + } else { + queue = new PriorityQueue<>(slice.numLeaves()) { + @Override + protected boolean lessThan(Leaf a, Leaf b) { + // tsid hash in ascending order: + int cmp = a.timeSeriesHash.compareTo(b.timeSeriesHash); + if (cmp == 0) { + // timestamp in descending order: + cmp = -Long.compare(a.timestamp, b.timestamp); + } + return cmp < 0; + } + }; + leaf = null; + for (var leafReaderContext : slice.leaves()) { + Leaf leaf = new Leaf(weight, leafReaderContext.leafReaderContext()); + if (leaf.nextDoc()) { + queue.add(leaf); + } + } + } + } + + void consume() throws IOException { + if (queue != null) { + currentTsid = BytesRef.deepCopyOf(queue.top().timeSeriesHash); + boolean breakOnNextTsidChange = false; + while (queue.size() > 0) { + if (remainingDocs <= 0) { + break; + } + if (currentPagePos > maxPageSize) { + breakOnNextTsidChange = true; + } + + currentPagePos++; + remainingDocs--; + Leaf leaf = queue.top(); + segmentsBuilder.appendInt(leaf.segmentOrd); + docsBuilder.appendInt(leaf.iterator.docID()); + timestampIntervalBuilder.appendLong(leaf.timestamp); + tsOrdBuilder.appendInt(globalTsidOrd); + if (leaf.nextDoc()) { + // TODO: updating the top is one of the most expensive parts of this operation. + // Ideally we would do this a less as possible. Maybe the top can be updated every N docs? + Leaf newTop = queue.updateTop(); + if (newTop.timeSeriesHash.equals(currentTsid) == false) { + globalTsidOrd++; + currentTsid = BytesRef.deepCopyOf(newTop.timeSeriesHash); + if (breakOnNextTsidChange) { + break; + } + } + } else { + queue.pop(); + } + } + } else { + int previousTsidOrd = leaf.timeSeriesHashOrd; + boolean breakOnNextTsidChange = false; + // Only one segment, so no need to use priority queue and use segment ordinals as tsid ord. + while (leaf.nextDoc()) { + if (remainingDocs <= 0) { + break; + } + if (currentPagePos > maxPageSize) { + breakOnNextTsidChange = true; + } + if (breakOnNextTsidChange) { + if (previousTsidOrd != leaf.timeSeriesHashOrd) { + break; + } + } + + currentPagePos++; + remainingDocs--; + + tsOrdBuilder.appendInt(leaf.timeSeriesHashOrd); + timestampIntervalBuilder.appendLong(leaf.timestamp); + // Don't append segment ord, because there is only one segment. + docsBuilder.appendInt(leaf.iterator.docID()); + previousTsidOrd = leaf.timeSeriesHashOrd; + } + } + } + + boolean completed() { + if (queue != null) { + return iterator.queue.size() == 0; + } else { + return leaf.iterator.docID() == DocIdSetIterator.NO_MORE_DOCS; + } + } + + static class Leaf { + + private final int segmentOrd; + private final SortedDocValues tsids; + private final SortedNumericDocValues timestamps; + private final DocIdSetIterator iterator; + + private long timestamp; + private int timeSeriesHashOrd; + private BytesRef timeSeriesHash; + + Leaf(Weight weight, LeafReaderContext leaf) throws IOException { + this.segmentOrd = leaf.ord; + tsids = leaf.reader().getSortedDocValues("_tsid"); + timestamps = leaf.reader().getSortedNumericDocValues("@timestamp"); + iterator = weight.scorer(leaf).iterator(); + } + + boolean nextDoc() throws IOException { + int docID = iterator.nextDoc(); + if (docID == DocIdSetIterator.NO_MORE_DOCS) { + return false; + } + + boolean advanced = tsids.advanceExact(iterator.docID()); + assert advanced; + timeSeriesHashOrd = tsids.ordValue(); + timeSeriesHash = tsids.lookupOrd(timeSeriesHashOrd); + advanced = timestamps.advanceExact(iterator.docID()); + assert advanced; + timestamp = timestamps.nextValue(); + return true; + } + + } + + } + + @Override + public String toString() { + return this.getClass().getSimpleName() + "[" + "maxPageSize=" + maxPageSize + ", remainingDocs=" + remainingDocs + "]"; + } + + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorTests.java new file mode 100644 index 0000000000000..3b47597d6ea2f --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorTests.java @@ -0,0 +1,359 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.lucene; + +import org.apache.lucene.document.DoubleDocValuesField; +import org.apache.lucene.document.FloatDocValuesField; +import org.apache.lucene.document.LongPoint; +import org.apache.lucene.document.NumericDocValuesField; +import org.apache.lucene.document.SortedDocValuesField; +import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.document.SortedSetDocValuesField; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.NoMergePolicy; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.Sort; +import org.apache.lucene.search.SortField; +import org.apache.lucene.search.SortedNumericSortField; +import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.DocVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.AnyOperatorTestCase; +import org.elasticsearch.compute.operator.Driver; +import org.elasticsearch.compute.operator.Operator; +import org.elasticsearch.compute.operator.OperatorTestCase; +import org.elasticsearch.compute.operator.TestResultPageSinkOperator; +import org.elasticsearch.core.CheckedFunction; +import org.elasticsearch.core.IOUtils; +import org.elasticsearch.index.mapper.DataStreamTimestampFieldMapper; +import org.elasticsearch.index.mapper.DateFieldMapper; +import org.elasticsearch.index.mapper.KeywordFieldMapper; +import org.elasticsearch.index.mapper.NumberFieldMapper; +import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; +import org.junit.After; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Locale; +import java.util.function.Function; + +import static org.hamcrest.Matchers.either; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.lessThan; +import static org.hamcrest.Matchers.lessThanOrEqualTo; + +public class TimeSeriesSortedSourceOperatorTests extends AnyOperatorTestCase { + + private IndexReader reader; + private final Directory directory = newDirectory(); + + @After + public void cleanup() throws IOException { + IOUtils.close(reader, directory); + } + + public void testSimple() { + int numTimeSeries = 3; + int numSamplesPerTS = 10; + long timestampStart = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2024-01-01T00:00:00Z"); + List results = runDriver(1024, 1024, randomBoolean(), numTimeSeries, numSamplesPerTS, timestampStart); + assertThat(results, hasSize(1)); + Page page = results.get(0); + assertThat(page.getBlockCount(), equalTo(5)); + + DocVector docVector = (DocVector) page.getBlock(0).asVector(); + assertThat(docVector.getPositionCount(), equalTo(numTimeSeries * numSamplesPerTS)); + + IntVector tsidVector = (IntVector) page.getBlock(1).asVector(); + assertThat(tsidVector.getPositionCount(), equalTo(numTimeSeries * numSamplesPerTS)); + + LongVector timestampVector = (LongVector) page.getBlock(2).asVector(); + assertThat(timestampVector.getPositionCount(), equalTo(numTimeSeries * numSamplesPerTS)); + + LongVector voltageVector = (LongVector) page.getBlock(3).asVector(); + assertThat(voltageVector.getPositionCount(), equalTo(numTimeSeries * numSamplesPerTS)); + + BytesRefVector hostnameVector = (BytesRefVector) page.getBlock(4).asVector(); + assertThat(hostnameVector.getPositionCount(), equalTo(numTimeSeries * numSamplesPerTS)); + + int offset = 0; + for (int expectedTsidOrd = 0; expectedTsidOrd < numTimeSeries; expectedTsidOrd++) { + String expectedHostname = String.format(Locale.ROOT, "host-%02d", expectedTsidOrd); + long expectedVoltage = 5L + expectedTsidOrd; + for (int j = 0; j < numSamplesPerTS; j++) { + long expectedTimestamp = timestampStart + ((numSamplesPerTS - j - 1) * 10_000L); + + assertThat(docVector.shards().getInt(offset), equalTo(0)); + assertThat(voltageVector.getLong(offset), equalTo(expectedVoltage)); + assertThat(hostnameVector.getBytesRef(offset, new BytesRef()).utf8ToString(), equalTo(expectedHostname)); + assertThat(tsidVector.getInt(offset), equalTo(expectedTsidOrd)); + assertThat(timestampVector.getLong(offset), equalTo(expectedTimestamp)); + offset++; + } + } + } + + public void testMaxPageSize() { + int numTimeSeries = 3; + int numSamplesPerTS = 10; + long timestampStart = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2024-01-01T00:00:00Z"); + List results = runDriver(1024, 1, randomBoolean(), numTimeSeries, numSamplesPerTS, timestampStart); + // A time series shouldn't be split over multiple pages. + assertThat(results, hasSize(numTimeSeries)); + for (int i = 0; i < numTimeSeries; i++) { + Page page = results.get(i); + assertThat(page.getBlockCount(), equalTo(5)); + + DocVector docVector = (DocVector) page.getBlock(0).asVector(); + assertThat(docVector.getPositionCount(), equalTo(numSamplesPerTS)); + + IntVector tsidVector = (IntVector) page.getBlock(1).asVector(); + assertThat(tsidVector.getPositionCount(), equalTo(numSamplesPerTS)); + + LongVector timestampVector = (LongVector) page.getBlock(2).asVector(); + assertThat(timestampVector.getPositionCount(), equalTo(numSamplesPerTS)); + + LongVector voltageVector = (LongVector) page.getBlock(3).asVector(); + assertThat(voltageVector.getPositionCount(), equalTo(numSamplesPerTS)); + + BytesRefVector hostnameVector = (BytesRefVector) page.getBlock(4).asVector(); + assertThat(hostnameVector.getPositionCount(), equalTo(numSamplesPerTS)); + + int offset = 0; + int expectedTsidOrd = i; + String expectedHostname = String.format(Locale.ROOT, "host-%02d", expectedTsidOrd); + long expectedVoltage = 5L + expectedTsidOrd; + for (int j = 0; j < numSamplesPerTS; j++) { + long expectedTimestamp = timestampStart + ((numSamplesPerTS - j - 1) * 10_000L); + + assertThat(docVector.shards().getInt(offset), equalTo(0)); + assertThat(voltageVector.getLong(offset), equalTo(expectedVoltage)); + assertThat(hostnameVector.getBytesRef(offset, new BytesRef()).utf8ToString(), equalTo(expectedHostname)); + assertThat(tsidVector.getInt(offset), equalTo(expectedTsidOrd)); + assertThat(timestampVector.getLong(offset), equalTo(expectedTimestamp)); + offset++; + } + } + } + + public void testLimit() { + int numTimeSeries = 3; + int numSamplesPerTS = 10; + int limit = 1; + long timestampStart = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2024-01-01T00:00:00Z"); + List results = runDriver(limit, 1024, randomBoolean(), numTimeSeries, numSamplesPerTS, timestampStart); + assertThat(results, hasSize(1)); + Page page = results.get(0); + assertThat(page.getBlockCount(), equalTo(5)); + + DocVector docVector = (DocVector) page.getBlock(0).asVector(); + assertThat(docVector.getPositionCount(), equalTo(limit)); + + IntVector tsidVector = (IntVector) page.getBlock(1).asVector(); + assertThat(tsidVector.getPositionCount(), equalTo(limit)); + + LongVector timestampVector = (LongVector) page.getBlock(2).asVector(); + assertThat(timestampVector.getPositionCount(), equalTo(limit)); + + LongVector voltageVector = (LongVector) page.getBlock(3).asVector(); + assertThat(voltageVector.getPositionCount(), equalTo(limit)); + + BytesRefVector hostnameVector = (BytesRefVector) page.getBlock(4).asVector(); + assertThat(hostnameVector.getPositionCount(), equalTo(limit)); + + assertThat(docVector.shards().getInt(0), equalTo(0)); + assertThat(voltageVector.getLong(0), equalTo(5L)); + assertThat(hostnameVector.getBytesRef(0, new BytesRef()).utf8ToString(), equalTo("host-00")); + assertThat(tsidVector.getInt(0), equalTo(0)); + assertThat(timestampVector.getLong(0), equalTo(timestampStart + ((numSamplesPerTS - 1) * 10_000L))); + } + + public void testRandom() { + int numDocs = 1024; + var ctx = driverContext(); + long timestampStart = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2024-01-01T00:00:00Z"); + var timeSeriesFactory = createTimeSeriesSourceOperator(Integer.MAX_VALUE, Integer.MAX_VALUE, randomBoolean(), writer -> { + int commitEvery = 64; + long timestamp = timestampStart; + for (int i = 0; i < numDocs; i++) { + String hostname = String.format(Locale.ROOT, "host-%02d", i % 20); + int voltage = i % 5; + writeTS(writer, timestamp, new Object[] { "hostname", hostname }, new Object[] { "voltage", voltage }); + if (i % commitEvery == 0) { + writer.commit(); + } + timestamp += 10_000; + } + return numDocs; + }); + List results = new ArrayList<>(); + + var voltageField = new NumberFieldMapper.NumberFieldType("voltage", NumberFieldMapper.NumberType.LONG); + OperatorTestCase.runDriver( + new Driver( + ctx, + timeSeriesFactory.get(ctx), + List.of(ValuesSourceReaderOperatorTests.factory(reader, voltageField, ElementType.LONG).get(ctx)), + new TestResultPageSinkOperator(results::add), + () -> {} + ) + ); + OperatorTestCase.assertDriverContext(ctx); + assertThat(results, hasSize(1)); + Page page = results.get(0); + assertThat(page.getBlockCount(), equalTo(4)); + + DocVector docVector = (DocVector) page.getBlock(0).asVector(); + assertThat(docVector.getPositionCount(), equalTo(numDocs)); + + IntVector tsidVector = (IntVector) page.getBlock(1).asVector(); + assertThat(tsidVector.getPositionCount(), equalTo(numDocs)); + + LongVector timestampVector = (LongVector) page.getBlock(2).asVector(); + assertThat(timestampVector.getPositionCount(), equalTo(numDocs)); + + LongVector voltageVector = (LongVector) page.getBlock(3).asVector(); + assertThat(voltageVector.getPositionCount(), equalTo(numDocs)); + for (int i = 0; i < page.getBlockCount(); i++) { + assertThat(docVector.shards().getInt(0), equalTo(0)); + assertThat(voltageVector.getLong(i), either(greaterThanOrEqualTo(0L)).or(lessThanOrEqualTo(4L))); + assertThat(tsidVector.getInt(i), either(greaterThanOrEqualTo(0)).or(lessThan(20))); + assertThat(timestampVector.getLong(i), greaterThanOrEqualTo(timestampStart)); + } + } + + @Override + protected Operator.OperatorFactory simple() { + return createTimeSeriesSourceOperator(1, 1, false, writer -> { + long timestamp = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2024-01-01T00:00:00Z"); + writeTS(writer, timestamp, new Object[] { "hostname", "host-01" }, new Object[] { "voltage", 2 }); + return 1; + }); + } + + @Override + protected String expectedDescriptionOfSimple() { + return "TimeSeriesSortedSourceOperator[maxPageSize = 1, limit = 1]"; + } + + @Override + protected String expectedToStringOfSimple() { + return "Impl[maxPageSize=1, remainingDocs=1]"; + } + + List runDriver(int limit, int maxPageSize, boolean forceMerge, int numTimeSeries, int numSamplesPerTS, long timestampStart) { + var ctx = driverContext(); + var timeSeriesFactory = createTimeSeriesSourceOperator(limit, maxPageSize, forceMerge, writer -> { + long timestamp = timestampStart; + for (int i = 0; i < numSamplesPerTS; i++) { + for (int j = 0; j < numTimeSeries; j++) { + String hostname = String.format(Locale.ROOT, "host-%02d", j); + writeTS(writer, timestamp, new Object[] { "hostname", hostname }, new Object[] { "voltage", j + 5 }); + } + timestamp += 10_000; + writer.commit(); + } + return numTimeSeries * numSamplesPerTS; + }); + + List results = new ArrayList<>(); + var voltageField = new NumberFieldMapper.NumberFieldType("voltage", NumberFieldMapper.NumberType.LONG); + var hostnameField = new KeywordFieldMapper.KeywordFieldType("hostname"); + OperatorTestCase.runDriver( + new Driver( + ctx, + timeSeriesFactory.get(ctx), + List.of( + ValuesSourceReaderOperatorTests.factory(reader, voltageField, ElementType.LONG).get(ctx), + ValuesSourceReaderOperatorTests.factory(reader, hostnameField, ElementType.BYTES_REF).get(ctx) + ), + new TestResultPageSinkOperator(results::add), + () -> {} + ) + ); + OperatorTestCase.assertDriverContext(ctx); + return results; + } + + TimeSeriesSortedSourceOperatorFactory createTimeSeriesSourceOperator( + int limit, + int maxPageSize, + boolean forceMerge, + CheckedFunction indexingLogic + ) { + int numDocs; + Sort sort = new Sort( + new SortField(TimeSeriesIdFieldMapper.NAME, SortField.Type.STRING, false), + new SortedNumericSortField(DataStreamTimestampFieldMapper.DEFAULT_PATH, SortField.Type.LONG, true) + ); + try ( + RandomIndexWriter writer = new RandomIndexWriter( + random(), + directory, + newIndexWriterConfig().setIndexSort(sort).setMergePolicy(NoMergePolicy.INSTANCE) + ) + ) { + + numDocs = indexingLogic.apply(writer); + if (forceMerge) { + writer.forceMerge(1); + } + reader = writer.getReader(); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + var ctx = new LuceneSourceOperatorTests.MockShardContext(reader, 0); + Function queryFunction = c -> new MatchAllDocsQuery(); + return TimeSeriesSortedSourceOperatorFactory.create( + Math.min(numDocs, limit), + Math.min(numDocs, maxPageSize), + 1, + List.of(ctx), + queryFunction + ); + } + + static void writeTS(RandomIndexWriter iw, long timestamp, Object[] dimensions, Object[] metrics) throws IOException { + final List fields = new ArrayList<>(); + fields.add(new SortedNumericDocValuesField(DataStreamTimestampFieldMapper.DEFAULT_PATH, timestamp)); + fields.add(new LongPoint(DataStreamTimestampFieldMapper.DEFAULT_PATH, timestamp)); + final TimeSeriesIdFieldMapper.TimeSeriesIdBuilder builder = new TimeSeriesIdFieldMapper.TimeSeriesIdBuilder(null); + for (int i = 0; i < dimensions.length; i += 2) { + if (dimensions[i + 1] instanceof Number n) { + builder.addLong(dimensions[i].toString(), n.longValue()); + } else { + builder.addString(dimensions[i].toString(), dimensions[i + 1].toString()); + fields.add(new SortedSetDocValuesField(dimensions[i].toString(), new BytesRef(dimensions[i + 1].toString()))); + } + } + for (int i = 0; i < metrics.length; i += 2) { + if (metrics[i + 1] instanceof Integer || metrics[i + 1] instanceof Long) { + fields.add(new NumericDocValuesField(metrics[i].toString(), ((Number) metrics[i + 1]).longValue())); + } else if (metrics[i + 1] instanceof Float) { + fields.add(new FloatDocValuesField(metrics[i].toString(), (float) metrics[i + 1])); + } else if (metrics[i + 1] instanceof Double) { + fields.add(new DoubleDocValuesField(metrics[i].toString(), (double) metrics[i + 1])); + } + } + // Use legacy tsid to make tests easier to understand: + fields.add(new SortedDocValuesField(TimeSeriesIdFieldMapper.NAME, builder.buildLegacyTsid().toBytesRef())); + iw.addDocument(fields); + } +} diff --git a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/TSDBRestEsqlIT.java b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/TSDBRestEsqlIT.java new file mode 100644 index 0000000000000..bd0154176df88 --- /dev/null +++ b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/TSDBRestEsqlIT.java @@ -0,0 +1,99 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.esql.qa.single_node; + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + +import org.apache.http.util.EntityUtils; +import org.elasticsearch.Build; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.TestClustersThreadFilter; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xpack.esql.CsvTestsDataLoader; +import org.elasticsearch.xpack.esql.qa.rest.RestEsqlTestCase; +import org.junit.ClassRule; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.xpack.esql.qa.rest.RestEsqlTestCase.runEsqlSync; + +/** + * A dedicated test suite for testing time series esql functionality. + * This while the functionality is gated behind a query pragma. + */ +@ThreadLeakFilters(filters = TestClustersThreadFilter.class) +public class TSDBRestEsqlIT extends ESRestTestCase { + @ClassRule + public static ElasticsearchCluster cluster = Clusters.testCluster(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + + public void testTimeSeriesQuerying() throws IOException { + assertTrue("time series querying relies on query pragma", Build.current().isSnapshot()); + var settings = Settings.builder() + .loadFromStream("tsdb-settings.json", TSDBRestEsqlIT.class.getResourceAsStream("/tsdb-settings.json"), false) + .build(); + String mapping = CsvTestsDataLoader.readTextFile(TSDBRestEsqlIT.class.getResource("/tsdb-mapping.json")); + createIndex("k8s", settings, mapping); + + Request bulk = new Request("POST", "/k8s/_bulk"); + bulk.addParameter("refresh", "true"); + bulk.addParameter("filter_path", "errors"); + + String bulkBody = new String( + TSDBRestEsqlIT.class.getResourceAsStream("/tsdb-bulk-request.txt").readAllBytes(), + StandardCharsets.UTF_8 + ); + bulk.setJsonEntity(bulkBody); + Response response = client().performRequest(bulk); + assertEquals("{\"errors\":false}", EntityUtils.toString(response.getEntity(), StandardCharsets.UTF_8)); + + RestEsqlTestCase.RequestObjectBuilder builder = new RestEsqlTestCase.RequestObjectBuilder().query( + "FROM k8s | KEEP k8s.pod.name, @timestamp" + ); + builder.pragmas(Settings.builder().put("time_series", true).build()); + Map result = runEsqlSync(builder); + @SuppressWarnings("unchecked") + List> columns = (List>) result.get("columns"); + assertEquals(2, columns.size()); + assertEquals("k8s.pod.name", columns.get(0).get("name")); + assertEquals("@timestamp", columns.get(1).get("name")); + + // Note that _tsid is a hashed value, so tsid no longer is sorted lexicographically. + @SuppressWarnings("unchecked") + List> values = (List>) result.get("values"); + assertEquals(8, values.size()); + assertEquals("hamster", values.get(0).get(0)); + assertEquals("2021-04-29T17:29:22.470Z", values.get(0).get(1)); + assertEquals("hamster", values.get(1).get(0)); + assertEquals("2021-04-29T17:29:12.470Z", values.get(1).get(1)); + + assertEquals("rat", values.get(2).get(0)); + assertEquals("2021-04-29T17:29:22.470Z", values.get(2).get(1)); + assertEquals("rat", values.get(3).get(0)); + assertEquals("2021-04-29T17:29:12.470Z", values.get(3).get(1)); + + assertEquals("cow", values.get(4).get(0)); + assertEquals("2021-04-29T17:29:22.470Z", values.get(4).get(1)); + assertEquals("cow", values.get(5).get(0)); + assertEquals("2021-04-29T17:29:12.470Z", values.get(5).get(1)); + + assertEquals("cat", values.get(6).get(0)); + assertEquals("2021-04-29T17:29:22.470Z", values.get(6).get(1)); + assertEquals("cat", values.get(7).get(0)); + assertEquals("2021-04-29T17:29:12.470Z", values.get(7).get(1)); + } +} diff --git a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/resources/tsdb-bulk-request.txt b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/resources/tsdb-bulk-request.txt new file mode 100644 index 0000000000000..e7ec37c14a072 --- /dev/null +++ b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/resources/tsdb-bulk-request.txt @@ -0,0 +1,17 @@ +{"create": {}} +{"@timestamp": "2021-04-29T17:29:12.470Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "network": {"tx": 2001818691, "rx": 802133794},"cpu": {"limit": 0.3787411612903226, "nanocores": 35222928, "node": 0.048845732}}}} +{"create": {}} +{"@timestamp": "2021-04-29T17:29:12.470Z", "metricset": "pod", "k8s": {"pod": {"name": "hamster", "uid":"947e4ced-1786-4e53-9e0c-5c447e959508", "network": {"tx": 2005177954, "rx": 801479970},"cpu": {"limit": 0.5786461612903226, "nanocores": 25222928, "node": 0.505805732}}}} +{"create": {}} +{"@timestamp": "2021-04-29T17:29:12.470Z", "metricset": "pod", "k8s": {"pod": {"name": "cow", "uid":"947e4ced-1786-4e53-9e0c-5c447e959509", "network": {"tx": 2006223737, "rx": 802337279},"cpu": {"limit": 0.5787451612903226, "nanocores": 55252928, "node": 0.606805732}}}} +{"create": {}} +{"@timestamp": "2021-04-29T17:29:12.470Z", "metricset": "pod", "k8s": {"pod": {"name": "rat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959510", "network": {"tx": 2012916202, "rx": 803685721},"cpu": {"limit": 0.6786461612903226, "nanocores": 75227928, "node": 0.058855732}}}} +{"create": {}} +{"@timestamp": "2021-04-29T17:29:22.470Z", "metricset": "pod", "k8s": {"pod": {"name": "rat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959510", "network": {"tx": 1434521831, "rx": 530575198},"cpu": {"limit": 0.7787411712903226, "nanocores": 75727928, "node": 0.068865732}}}} +{"create": {}} +{"@timestamp": "2021-04-29T17:29:22.470Z", "metricset": "pod", "k8s": {"pod": {"name": "cow", "uid":"947e4ced-1786-4e53-9e0c-5c447e959509", "network": {"tx": 1434577921, "rx": 530600088},"cpu": {"limit": 0.2782412612903226, "nanocores": 25222228, "node": 0.078875732}}}} +{"create": {}} +{"@timestamp": "2021-04-29T17:29:22.470Z", "metricset": "pod", "k8s": {"pod": {"name": "hamster", "uid":"947e4ced-1786-4e53-9e0c-5c447e959508", "network": {"tx": 1434587694, "rx": 530604797},"cpu": {"limit": 0.1717411612903226, "nanocores": 15121928, "node": 0.808805732}}}} +{"create": {}} +{"@timestamp": "2021-04-29T17:29:22.470Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "network": {"tx": 1434595272, "rx": 530605511},"cpu": {"limit": 0.8787481682903226, "nanocores": 95292928, "node": 0.908905732}}}} + diff --git a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/resources/tsdb-mapping.json b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/resources/tsdb-mapping.json new file mode 100644 index 0000000000000..78af243bac610 --- /dev/null +++ b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/resources/tsdb-mapping.json @@ -0,0 +1,56 @@ +{ + "properties": { + "@timestamp": { + "type": "date" + }, + "metricset": { + "type": "keyword", + "time_series_dimension": true + }, + "k8s": { + "properties": { + "pod": { + "properties": { + "uid": { + "type": "keyword", + "time_series_dimension": true + }, + "name": { + "type": "keyword" + }, + "cpu": { + "properties": { + "limit": { + "type": "scaled_float", + "scaling_factor": 1000.0, + "time_series_metric": "gauge" + }, + "nanocores": { + "type": "long", + "time_series_metric": "gauge" + }, + "node": { + "type": "scaled_float", + "scaling_factor": 1000.0, + "time_series_metric": "gauge" + } + } + }, + "network": { + "properties": { + "rx": { + "type": "long", + "time_series_metric": "gauge" + }, + "tx": { + "type": "long", + "time_series_metric": "gauge" + } + } + } + } + } + } + } + } +} diff --git a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/resources/tsdb-settings.json b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/resources/tsdb-settings.json new file mode 100644 index 0000000000000..f84b1bc2a9dd1 --- /dev/null +++ b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/resources/tsdb-settings.json @@ -0,0 +1,9 @@ +{ + "index": { + "mode": "time_series", + "routing_path": [ + "metricset", + "k8s.pod.uid" + ] + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java index 7ae8e029fd761..546f34d1b474c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java @@ -23,6 +23,7 @@ import org.elasticsearch.xpack.esql.plan.physical.EsSourceExec; import org.elasticsearch.xpack.esql.plan.physical.EsStatsQueryExec; import org.elasticsearch.xpack.esql.plan.physical.EsStatsQueryExec.Stat; +import org.elasticsearch.xpack.esql.plan.physical.EsTimeseriesQueryExec; import org.elasticsearch.xpack.esql.plan.physical.ExchangeExec; import org.elasticsearch.xpack.esql.plan.physical.FieldExtractExec; import org.elasticsearch.xpack.esql.plan.physical.FilterExec; @@ -83,9 +84,11 @@ public class LocalPhysicalPlanOptimizer extends ParameterizedRuleExecutor> rules(boolean optimizeForEsSource) { List> esSourceRules = new ArrayList<>(4); - esSourceRules.add(new ReplaceAttributeSourceWithDocId()); + esSourceRules.add(new ReplaceAttributeSourceWithDocId(timeSeriesMode)); if (optimizeForEsSource) { esSourceRules.add(new PushTopNToSource()); @@ -127,13 +130,20 @@ protected List> batches() { private static class ReplaceAttributeSourceWithDocId extends OptimizerRule { - ReplaceAttributeSourceWithDocId() { + private final boolean timeSeriesMode; + + ReplaceAttributeSourceWithDocId(boolean timeSeriesMode) { super(UP); + this.timeSeriesMode = timeSeriesMode; } @Override protected PhysicalPlan rule(EsSourceExec plan) { - return new EsQueryExec(plan.source(), plan.index(), plan.query()); + if (timeSeriesMode) { + return new EsTimeseriesQueryExec(plan.source(), plan.index(), plan.query()); + } else { + return new EsQueryExec(plan.source(), plan.index(), plan.query()); + } } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EsQueryExec.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EsQueryExec.java index 9add95c28f433..779df60416f0b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EsQueryExec.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EsQueryExec.java @@ -105,6 +105,10 @@ public List sorts() { return sorts; } + public List attrs() { + return attrs; + } + /** * Estimate of the number of bytes that'll be loaded per position before * the stream of pages is consumed. @@ -128,10 +132,6 @@ public PhysicalPlan estimateRowSize(State state) { return Objects.equals(this.estimatedRowSize, size) ? this : new EsQueryExec(source(), index, attrs, query, limit, sorts, size); } - public EsQueryExec withQuery(QueryBuilder query) { - return Objects.equals(this.query, query) ? this : new EsQueryExec(source(), index, attrs, query, limit, sorts, estimatedRowSize); - } - public EsQueryExec withLimit(Expression limit) { return Objects.equals(this.limit, limit) ? this : new EsQueryExec(source(), index, attrs, query, limit, sorts, estimatedRowSize); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EsTimeseriesQueryExec.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EsTimeseriesQueryExec.java new file mode 100644 index 0000000000000..0d92a52e6053c --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EsTimeseriesQueryExec.java @@ -0,0 +1,91 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.plan.physical; + +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.xpack.ql.expression.Attribute; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.expression.FieldAttribute; +import org.elasticsearch.xpack.ql.index.EsIndex; +import org.elasticsearch.xpack.ql.tree.NodeInfo; +import org.elasticsearch.xpack.ql.tree.Source; +import org.elasticsearch.xpack.ql.type.DataTypes; +import org.elasticsearch.xpack.ql.type.EsField; + +import java.util.List; +import java.util.Map; +import java.util.Objects; + +public class EsTimeseriesQueryExec extends EsQueryExec { + + static final EsField TSID_FIELD = new EsField("_tsid", DataTypes.KEYWORD, Map.of(), true); + static final EsField TIMESTAMP_FIELD = new EsField("@timestamp", DataTypes.DATETIME, Map.of(), true); + + public EsTimeseriesQueryExec(Source source, EsIndex index, QueryBuilder query) { + this( + source, + index, + List.of( + new FieldAttribute(source, DOC_ID_FIELD.getName(), DOC_ID_FIELD), + new FieldAttribute(source, TSID_FIELD.getName(), TSID_FIELD), + new FieldAttribute(source, TIMESTAMP_FIELD.getName(), TSID_FIELD) + ), + query, + null, + null, + null + ); + } + + public EsTimeseriesQueryExec( + Source source, + EsIndex index, + List attrs, + QueryBuilder query, + Expression limit, + List sorts, + Integer estimatedRowSize + ) { + super(source, index, attrs, query, limit, sorts, estimatedRowSize); + } + + protected NodeInfo info() { + return NodeInfo.create(this, EsTimeseriesQueryExec::new, index(), attrs(), query(), limit(), sorts(), estimatedRowSize()); + } + + @Override + public PhysicalPlan estimateRowSize(State state) { + int size; + if (sorts() == null || sorts().isEmpty()) { + // track doc ids + state.add(false, Integer.BYTES); + size = state.consumeAllFields(false); + } else { + // track doc ids and segment ids + state.add(false, Integer.BYTES * 2); + size = state.consumeAllFields(true); + } + return Objects.equals(this.estimatedRowSize(), size) + ? this + : new EsTimeseriesQueryExec(source(), index(), attrs(), query(), limit(), sorts(), size); + } + + @Override + public EsQueryExec withLimit(Expression limit) { + return Objects.equals(this.limit(), limit) + ? this + : new EsTimeseriesQueryExec(source(), index(), attrs(), query(), limit, sorts(), estimatedRowSize()); + } + + @Override + public EsQueryExec withSorts(List sorts) { + return Objects.equals(this.sorts(), sorts) + ? this + : new EsTimeseriesQueryExec(source(), index(), attrs(), query(), limit(), sorts, estimatedRowSize()); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java index 4721c7e2cf08e..234e01ed11633 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java @@ -19,6 +19,7 @@ import org.elasticsearch.compute.lucene.LuceneOperator; import org.elasticsearch.compute.lucene.LuceneSourceOperator; import org.elasticsearch.compute.lucene.LuceneTopNSourceOperator; +import org.elasticsearch.compute.lucene.TimeSeriesSortedSourceOperatorFactory; import org.elasticsearch.compute.lucene.ValuesSourceReaderOperator; import org.elasticsearch.compute.operator.Operator; import org.elasticsearch.compute.operator.OrdinalsGroupingOperator; @@ -139,14 +140,24 @@ public final PhysicalOperation sourcePhysicalOperation(EsQueryExec esQueryExec, fieldSorts ); } else { - luceneFactory = new LuceneSourceOperator.Factory( - shardContexts, - querySupplier(esQueryExec.query()), - context.queryPragmas().dataPartitioning(), - context.queryPragmas().taskConcurrency(), - context.pageSize(rowEstimatedSize), - limit - ); + if (context.queryPragmas().timeSeriesMode()) { + luceneFactory = TimeSeriesSortedSourceOperatorFactory.create( + limit, + context.pageSize(rowEstimatedSize), + context.queryPragmas().taskConcurrency(), + shardContexts, + querySupplier(esQueryExec.query()) + ); + } else { + luceneFactory = new LuceneSourceOperator.Factory( + shardContexts, + querySupplier(esQueryExec.query()), + context.queryPragmas().dataPartitioning(), + context.queryPragmas().taskConcurrency(), + context.pageSize(rowEstimatedSize), + limit + ); + } } Layout.Builder layout = new Layout.Builder(); layout.append(esQueryExec.output()); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java index d70b0c3c0846e..d7d2e99426a97 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java @@ -163,7 +163,6 @@ public LocalExecutionPlan plan(PhysicalPlan node) { AggregateExec.class, a -> a.getMode() == AggregateExec.Mode.FINAL ? new ProjectExec(a.source(), a, Expressions.asAttributes(a.aggregates())) : a ); - PhysicalOperation physicalOperation = plan(node, context); final TimeValue statusInterval = configuration.pragmas().statusInterval(); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/QueryPragmas.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/QueryPragmas.java index 2ceee9de9001e..fd76edf46229e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/QueryPragmas.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/QueryPragmas.java @@ -41,6 +41,8 @@ public final class QueryPragmas implements Writeable { DataPartitioning.SEGMENT ); + public static final Setting TIME_SERIES_MODE = Setting.boolSetting("time_series", false); + /** * Size of a page in entries with {@code 0} being a special value asking * to adaptively size based on the number of columns in the page. @@ -128,6 +130,10 @@ public boolean isEmpty() { return settings.isEmpty(); } + public boolean timeSeriesMode() { + return TIME_SERIES_MODE.get(settings); + } + @Override public boolean equals(Object o) { if (this == o) return true; From fb0fc30e7d57f0e00d4d3c1ab9a7d9a2fabc8217 Mon Sep 17 00:00:00 2001 From: Jan Kuipers <148754765+jan-elastic@users.noreply.github.com> Date: Thu, 7 Mar 2024 10:07:31 +0100 Subject: [PATCH 039/248] Reset job if existing reset fails (#106020) * Try again to reset a job if waiting for completion of an existing reset task fails. * Update docs/changelog/106020.yaml * Update 106020.yaml * Update docs/changelog/106020.yaml * Improve code --- docs/changelog/106020.yaml | 5 +++++ .../tasks/get/TransportGetTaskAction.java | 2 +- .../xpack/ml/integration/ResetJobIT.java | 19 ++++++++++++++++++- .../ml/action/TransportResetJobAction.java | 10 +++++++++- 4 files changed, 33 insertions(+), 3 deletions(-) create mode 100644 docs/changelog/106020.yaml diff --git a/docs/changelog/106020.yaml b/docs/changelog/106020.yaml new file mode 100644 index 0000000000000..094a43b430f89 --- /dev/null +++ b/docs/changelog/106020.yaml @@ -0,0 +1,5 @@ +pr: 106020 +summary: Fix resetting a job if the original reset task no longer exists. +area: Machine Learning +type: bug +issues: [] diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java index fdb3a958f6f6e..9e0b6937257b4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java @@ -209,7 +209,7 @@ void getFinishedTaskFromIndex(Task thisTask, GetTaskRequest request, ActionListe client.get(get, ActionListener.wrap(r -> onGetFinishedTaskFromIndex(r, listener), e -> { if (ExceptionsHelper.unwrap(e, IndexNotFoundException.class) != null) { - // We haven't yet created the index for the task results so it can't be found. + // We haven't yet created the index for the task results, so it can't be found. listener.onFailure( new ResourceNotFoundException("task [{}] isn't running and hasn't stored its results", e, request.getTaskId()) ); diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ResetJobIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ResetJobIT.java index 4d336daa4924c..61ce2224c1ed9 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ResetJobIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ResetJobIT.java @@ -8,11 +8,13 @@ package org.elasticsearch.xpack.ml.integration; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.tasks.TaskId; import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; import org.elasticsearch.xpack.core.ml.job.config.Blocked; import org.elasticsearch.xpack.core.ml.job.config.DataDescription; import org.elasticsearch.xpack.core.ml.job.config.Detector; import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.config.JobUpdate; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCounts; import org.elasticsearch.xpack.core.ml.job.results.Bucket; import org.junit.After; @@ -34,10 +36,18 @@ public void tearDownData() { } public void testReset() throws Exception { + testReset(false); + } + + public void testReset_previousResetFailed() throws Exception { + testReset(true); + } + + private void testReset(boolean previousResetFailed) throws Exception { TimeValue bucketSpan = TimeValue.timeValueMinutes(30); long startTime = 1514764800000L; final int bucketCount = 100; - Job.Builder job = createJob("test-reset", bucketSpan); + Job.Builder job = createJob("test-reset-" + previousResetFailed, bucketSpan); openJob(job.getId()); postData( @@ -53,6 +63,13 @@ public void testReset() throws Exception { DataCounts dataCounts = getJobStats(job.getId()).get(0).getDataCounts(); assertThat(dataCounts.getProcessedRecordCount(), greaterThan(0L)); + if (previousResetFailed) { + JobUpdate jobUpdate = new JobUpdate.Builder(job.getId()).setBlocked( + new Blocked(Blocked.Reason.RESET, new TaskId(randomIdentifier(), randomInt())) + ).build(); + updateJob(job.getId(), jobUpdate); + } + resetJob(job.getId()); buckets = getBuckets(job.getId()); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportResetJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportResetJobAction.java index 35a80876ea763..030e25ea7797a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportResetJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportResetJobAction.java @@ -9,6 +9,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.node.tasks.get.GetTaskAction; import org.elasticsearch.action.admin.cluster.node.tasks.get.GetTaskRequest; @@ -124,7 +125,14 @@ protected void masterOperation( waitExistingResetTaskToComplete( job.getBlocked().getTaskId(), request, - ActionListener.wrap(r -> resetIfJobIsStillBlockedOnReset(task, request, listener), listener::onFailure) + ActionListener.wrap(r -> resetIfJobIsStillBlockedOnReset(task, request, listener), e -> { + if (ExceptionsHelper.unwrapCause(e) instanceof ResourceNotFoundException) { + // If the task is not found then the node it was running on likely died, so try again. + resetIfJobIsStillBlockedOnReset(task, request, listener); + } else { + listener.onFailure(e); + } + }) ); } else { ParentTaskAssigningClient taskClient = new ParentTaskAssigningClient(client, taskId); From 281dab59849fe8b0cc6aa042a948ee74c7c97039 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Aur=C3=A9lien=20FOUCRET?= Date: Thu, 7 Mar 2024 12:38:13 +0100 Subject: [PATCH 040/248] Fix ILM to DSL migration test for BA. (#106054) --- .../application/FullClusterRestartIT.java | 53 ++++++++++++++++--- 1 file changed, 45 insertions(+), 8 deletions(-) diff --git a/x-pack/plugin/ent-search/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/application/FullClusterRestartIT.java b/x-pack/plugin/ent-search/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/application/FullClusterRestartIT.java index 6b1b6fc886825..95b3b576eb46c 100644 --- a/x-pack/plugin/ent-search/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/application/FullClusterRestartIT.java +++ b/x-pack/plugin/ent-search/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/application/FullClusterRestartIT.java @@ -11,6 +11,7 @@ import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.cluster.local.distribution.DistributionType; import org.elasticsearch.test.rest.ObjectPath; @@ -22,12 +23,19 @@ import java.util.List; import static org.elasticsearch.Version.V_8_12_0; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; public class FullClusterRestartIT extends ParameterizedFullClusterRestartTestCase { - + // DSL was introduced with version 8.12.0 of ES. private static final Version DSL_DEFAULT_RETENTION_VERSION = V_8_12_0; + // DSL was introduced with the version 3 of the registry. + private static final int DSL_REGISTRY_VERSION = 3; + // Legacy name we used for ILM policy configuration in versions prior to 8.12.0. + private static final String EVENT_DATA_STREAM_LEGACY_TEMPLATE_NAME = "behavioral_analytics-events-default"; + + // Event data streams template name. private static final String EVENT_DATA_STREAM_LEGACY_ILM_POLICY_NAME = "behavioral_analytics-events-default_policy"; @ClassRule @@ -36,6 +44,7 @@ public class FullClusterRestartIT extends ParameterizedFullClusterRestartTestCas .version(getOldClusterTestVersion()) .nodes(2) .setting("xpack.security.enabled", "false") + .setting("xpack.license.self_generated.type", "trial") .module("x-pack-ent-search") .build(); @@ -48,7 +57,6 @@ protected ElasticsearchCluster getUpgradeCluster() { return cluster; } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/104470") public void testBehavioralAnalyticsDataRetention() throws Exception { assumeTrue( "Data retention changed by default to DSL in " + DSL_DEFAULT_RETENTION_VERSION, @@ -59,26 +67,32 @@ public void testBehavioralAnalyticsDataRetention() throws Exception { String newAnalyticsCollectionName = "newstuff"; if (isRunningAgainstOldCluster()) { + // Ensure index template is installed before executing the tests. + assertBusy(() -> assertDataStreamTemplateExists(EVENT_DATA_STREAM_LEGACY_TEMPLATE_NAME)); + // Create an analytics collection Request legacyPutRequest = new Request("PUT", "_application/analytics/" + legacyAnalyticsCollectionName); assertOK(client().performRequest(legacyPutRequest)); // Validate that ILM lifecycle is in place - assertBusy(() -> assertLegacyDataRetentionPolicy(legacyAnalyticsCollectionName)); + assertBusy(() -> assertUsingLegacyDataRetentionPolicy(legacyAnalyticsCollectionName)); } else { + // Ensure index template is updated to version 3 before executing the tests. + assertBusy(() -> assertDataStreamTemplateExists(EVENT_DATA_STREAM_LEGACY_TEMPLATE_NAME, DSL_REGISTRY_VERSION)); + // Create a new analytics collection Request putRequest = new Request("PUT", "_application/analytics/" + newAnalyticsCollectionName); assertOK(client().performRequest(putRequest)); // Validate that NO ILM lifecycle is in place and we are using DLS instead. - assertBusy(() -> assertDslDataRetention(newAnalyticsCollectionName)); + assertBusy(() -> assertUsingDslDataRetention(newAnalyticsCollectionName)); // Validate that the existing analytics collection created with an older version is still using ILM - assertBusy(() -> assertLegacyDataRetentionPolicy(legacyAnalyticsCollectionName)); + assertBusy(() -> assertUsingLegacyDataRetentionPolicy(legacyAnalyticsCollectionName)); } } - private void assertLegacyDataRetentionPolicy(String analyticsCollectionName) throws IOException { + private void assertUsingLegacyDataRetentionPolicy(String analyticsCollectionName) throws IOException { String dataStreamName = "behavioral_analytics-events-" + analyticsCollectionName; Request getDataStreamRequest = new Request("GET", "_data_stream/" + dataStreamName); Response response = client().performRequest(getDataStreamRequest); @@ -93,7 +107,7 @@ private void assertLegacyDataRetentionPolicy(String analyticsCollectionName) thr assertNotNull(policy.evaluate(EVENT_DATA_STREAM_LEGACY_ILM_POLICY_NAME)); } - private void assertDslDataRetention(String analyticsCollectionName) throws IOException { + private void assertUsingDslDataRetention(String analyticsCollectionName) throws IOException { String dataStreamName = "behavioral_analytics-events-" + analyticsCollectionName; Request getDataStreamRequest = new Request("GET", "_data_stream/" + dataStreamName); Response response = client().performRequest(getDataStreamRequest); @@ -105,13 +119,36 @@ private void assertDslDataRetention(String analyticsCollectionName) throws IOExc for (Object dataStreamObj : dataStreams) { ObjectPath dataStream = new ObjectPath(dataStreamObj); if (dataStreamName.equals(dataStream.evaluate("name"))) { - assertNull(dataStream.evaluate("ilm_policy")); assertEquals(true, dataStream.evaluate("lifecycle.enabled")); assertEquals("180d", dataStream.evaluate("lifecycle.data_retention")); + assertEquals("Data stream lifecycle", dataStream.evaluate("next_generation_managed_by")); + assertEquals(false, dataStream.evaluate("prefer_ilm")); evaluatedNewDataStream = true; } } assertTrue(evaluatedNewDataStream); + } + private void assertDataStreamTemplateExists(String templateName) throws IOException { + assertDataStreamTemplateExists(templateName, null); + } + + private void assertDataStreamTemplateExists(String templateName, Integer minVersion) throws IOException { + try { + Request getIndexTemplateRequest = new Request("GET", "_index_template/" + templateName); + Response response = client().performRequest(getIndexTemplateRequest); + assertOK(response); + if (minVersion != null) { + String pathToVersion = "index_templates.0.index_template.version"; + ObjectPath indexTemplatesResponse = ObjectPath.createFromResponse(response); + assertThat(indexTemplatesResponse.evaluate(pathToVersion), greaterThanOrEqualTo(minVersion)); + } + } catch (ResponseException e) { + int status = e.getResponse().getStatusLine().getStatusCode(); + if (status == 404) { + throw new AssertionError("Waiting for the template to be created"); + } + throw e; + } } } From 9473ebbe29b554f919c4bc95a0b85ae7c2d64448 Mon Sep 17 00:00:00 2001 From: David Turner Date: Thu, 7 Mar 2024 12:41:03 +0000 Subject: [PATCH 041/248] Avoid computing currentInferenceProcessors on every cluster state (#106057) This computation involves parsing all the pipeline metadata on the cluster applier thread. It's pretty expensive if there are lots of pipelines, and seems mostly unnecessary because it's only needed for a validation check when creating new processors. --- docs/changelog/106057.yaml | 5 +++++ .../xpack/ml/inference/ingest/InferenceProcessor.java | 7 ++++--- 2 files changed, 9 insertions(+), 3 deletions(-) create mode 100644 docs/changelog/106057.yaml diff --git a/docs/changelog/106057.yaml b/docs/changelog/106057.yaml new file mode 100644 index 0000000000000..c07f658fbbf8a --- /dev/null +++ b/docs/changelog/106057.yaml @@ -0,0 +1,5 @@ +pr: 106057 +summary: Avoid computing `currentInferenceProcessors` on every cluster state +area: Machine Learning +type: bug +issues: [] diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ingest/InferenceProcessor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ingest/InferenceProcessor.java index 6b28a9aef9f48..32c85eb4e335e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ingest/InferenceProcessor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ingest/InferenceProcessor.java @@ -368,7 +368,7 @@ public static final class Factory implements Processor.Factory, Consumer config ) { + final var currentInferenceProcessors = InferenceProcessorInfoExtractor.countInferenceProcessors(clusterState); if (this.maxIngestProcessors <= currentInferenceProcessors) { throw new ElasticsearchStatusException( "Max number of inference processors reached, total inference processors [{}]. " From 5d7549c2c8f851285741aa092ba8f68902e8744b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Istv=C3=A1n=20Zolt=C3=A1n=20Szab=C3=B3?= Date: Thu, 7 Mar 2024 14:07:14 +0100 Subject: [PATCH 042/248] [DOCS] Documents scripted metric aggregation limitation in datafeeds (#106059) --- .../ml/anomaly-detection/ml-configuring-aggregations.asciidoc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/reference/ml/anomaly-detection/ml-configuring-aggregations.asciidoc b/docs/reference/ml/anomaly-detection/ml-configuring-aggregations.asciidoc index b9ae702d3ccb4..f550c27db496e 100644 --- a/docs/reference/ml/anomaly-detection/ml-configuring-aggregations.asciidoc +++ b/docs/reference/ml/anomaly-detection/ml-configuring-aggregations.asciidoc @@ -76,6 +76,10 @@ chart. * Your {dfeed} can contain multiple aggregations, but only the ones with names that match values in the job configuration are fed to the job. +* Using +{ref}/search-aggregations-metrics-scripted-metric-aggregation.html[scripted metric] +aggregations is not supported in {dfeeds}. + [discrete] [[aggs-recommendations-dfeeds]] From 3f8504e7824786c7597925c69bdd727596f5fa9d Mon Sep 17 00:00:00 2001 From: David Turner Date: Thu, 7 Mar 2024 13:09:48 +0000 Subject: [PATCH 043/248] Optimize no-op update of SafeCommitInfo (#106066) Today we always read the last safe commit on flush in order to get its document count, so we can expire any peer retention leases which are lagging unreasonably. There's no need to do any IO here if nothing has changed, we can just re-use the info from the previous commit. --- .../index/engine/CombinedDeletionPolicy.java | 45 +++++++++++++------ .../engine/CombinedDeletionPolicyTests.java | 31 +++++++++++++ 2 files changed, 63 insertions(+), 13 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java b/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java index 1a1e470519213..270bcd2297a67 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java +++ b/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java @@ -97,21 +97,13 @@ public void onCommit(List commits) throws IOException { assert Thread.holdsLock(this) == false : "should not block concurrent acquire or release"; final int keptPosition = indexOfKeptCommits(commits, globalCheckpointSupplier.getAsLong()); final IndexCommit safeCommit = commits.get(keptPosition); - int totalDocsOfSafeCommit; - try { - totalDocsOfSafeCommit = getDocCountOfCommit(safeCommit); - } catch (IOException ex) { - logger.info("failed to get the total docs from the safe commit; use the total docs from the previous safe commit", ex); - totalDocsOfSafeCommit = safeCommitInfo.docCount; - } - IndexCommit newCommit = null; - IndexCommit previousLastCommit = null; + final var newSafeCommitInfo = getNewSafeCommitInfo(safeCommit); + final IndexCommit newCommit; + final IndexCommit previousLastCommit; List deletedCommits = null; synchronized (this) { - this.safeCommitInfo = new SafeCommitInfo( - Long.parseLong(safeCommit.getUserData().get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)), - totalDocsOfSafeCommit - ); + // we are synchronized on the IndexWriter in this method so nothing concurrently changed safeCommitInfo since the previous read + this.safeCommitInfo = newSafeCommitInfo; previousLastCommit = this.lastCommit; this.lastCommit = commits.get(commits.size() - 1); this.safeCommit = safeCommit; @@ -123,6 +115,8 @@ public void onCommit(List commits) throws IOException { } if (commitsListener != null && previousLastCommit != this.lastCommit) { newCommit = acquireIndexCommit(false); + } else { + newCommit = null; } for (int i = 0; i < keptPosition; i++) { final IndexCommit commit = commits.get(i); @@ -149,6 +143,31 @@ public void onCommit(List commits) throws IOException { } } + private SafeCommitInfo getNewSafeCommitInfo(IndexCommit newSafeCommit) { + final var currentSafeCommitInfo = this.safeCommitInfo; + final long newSafeCommitLocalCheckpoint; + try { + newSafeCommitLocalCheckpoint = Long.parseLong(newSafeCommit.getUserData().get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)); + } catch (Exception ex) { + logger.info("failed to get the local checkpoint from the safe commit; use the info from the previous safe commit", ex); + return currentSafeCommitInfo; + } + + if (currentSafeCommitInfo.localCheckpoint == newSafeCommitLocalCheckpoint) { + // the new commit could in principle have the same LCP but a different doc count due to extra operations between its LCP and + // MSN, but that is a transient state since we'll eventually advance the LCP. The doc count is only used for heuristics around + // expiring excessively-lagging retention leases, so a little inaccuracy is tolerable here. + return currentSafeCommitInfo; + } + + try { + return new SafeCommitInfo(newSafeCommitLocalCheckpoint, getDocCountOfCommit(newSafeCommit)); + } catch (IOException ex) { + logger.info("failed to get the total docs from the safe commit; use the total docs from the previous safe commit", ex); + return new SafeCommitInfo(newSafeCommitLocalCheckpoint, currentSafeCommitInfo.docCount); + } + } + private boolean assertSafeCommitUnchanged(IndexCommit safeCommit) { // This is protected from concurrent calls by a lock on the IndexWriter, but this assertion makes sure that we notice if that ceases // to be true in future. It is not disastrous if safeCommitInfo refers to an older safeCommit, it just means that we might retain a diff --git a/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java b/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java index b50251aef011c..dfd4ad1fc0a45 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java @@ -26,6 +26,7 @@ import java.util.Set; import java.util.UUID; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import static org.elasticsearch.index.seqno.SequenceNumbers.NO_OPS_PERFORMED; @@ -97,6 +98,36 @@ public void testKeepCommitsAfterGlobalCheckpoint() throws Exception { ); } + public void testReusePreviousSafeCommitInfo() throws Exception { + final AtomicLong globalCheckpoint = new AtomicLong(); + final AtomicInteger getDocCountCalls = new AtomicInteger(); + CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy( + logger, + new TranslogDeletionPolicy(), + new SoftDeletesPolicy(globalCheckpoint::get, NO_OPS_PERFORMED, between(0, 100), () -> RetentionLeases.EMPTY), + globalCheckpoint::get, + null + ) { + @Override + protected int getDocCountOfCommit(IndexCommit indexCommit) { + getDocCountCalls.incrementAndGet(); + return between(0, 1000); + } + }; + + final long seqNo = between(1, 10000); + final List commitList = new ArrayList<>(); + final var translogUUID = UUID.randomUUID(); + commitList.add(mockIndexCommit(seqNo, seqNo, translogUUID)); + globalCheckpoint.set(seqNo); + indexPolicy.onCommit(commitList); + assertEquals(1, getDocCountCalls.get()); + + commitList.add(mockIndexCommit(seqNo, seqNo, translogUUID)); + indexPolicy.onCommit(commitList); + assertEquals(1, getDocCountCalls.get()); + } + public void testAcquireIndexCommit() throws Exception { final AtomicLong globalCheckpoint = new AtomicLong(); final int extraRetainedOps = between(0, 100); From 1fcc11b0d4dddfe1f11c1688dbe86ef593f894db Mon Sep 17 00:00:00 2001 From: Jedr Blaszyk Date: Thu, 7 Mar 2024 14:17:34 +0100 Subject: [PATCH 044/248] [Connector API] Fix serialisation of script params in connector index service (#106060) --- docs/changelog/106060.yaml | 5 + .../connector/ConnectorConfiguration.java | 98 +++++++++++++++++++ .../connector/ConnectorIndexService.java | 2 +- .../UpdateConnectorConfigurationAction.java | 5 + .../ConfigurationDependency.java | 9 ++ .../ConfigurationSelectOption.java | 9 ++ .../ConfigurationValidation.java | 9 ++ .../ConnectorConfigurationTests.java | 85 ++++++++++++++++ 8 files changed, 221 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/106060.yaml diff --git a/docs/changelog/106060.yaml b/docs/changelog/106060.yaml new file mode 100644 index 0000000000000..2b6a47372ddd3 --- /dev/null +++ b/docs/changelog/106060.yaml @@ -0,0 +1,5 @@ +pr: 106060 +summary: "[Connector API] Fix serialisation of script params in connector index service" +area: Application +type: bug +issues: [] diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorConfiguration.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorConfiguration.java index 3b0254ef6ffcf..fc2c0920f49df 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorConfiguration.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorConfiguration.java @@ -30,7 +30,9 @@ import org.elasticsearch.xpack.application.connector.configuration.ConfigurationValidation; import java.io.IOException; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.Objects; import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; @@ -217,6 +219,62 @@ public ConnectorConfiguration(StreamInput in) throws IOException { ); } + public String getCategory() { + return category; + } + + public Object getDefaultValue() { + return defaultValue; + } + + public List getDependsOn() { + return dependsOn; + } + + public ConfigurationDisplayType getDisplay() { + return display; + } + + public String getLabel() { + return label; + } + + public List getOptions() { + return options; + } + + public Integer getOrder() { + return order; + } + + public String getPlaceholder() { + return placeholder; + } + + public boolean isRequired() { + return required; + } + + public boolean isSensitive() { + return sensitive; + } + + public String getTooltip() { + return tooltip; + } + + public ConfigurationFieldType getType() { + return type; + } + + public List getUiRestrictions() { + return uiRestrictions; + } + + public List getValidations() { + return validations; + } + public Object getValue() { return value; } @@ -320,6 +378,46 @@ public void writeTo(StreamOutput out) throws IOException { out.writeGenericValue(value); } + public Map toMap() { + Map map = new HashMap<>(); + if (category != null) { + map.put(CATEGORY_FIELD.getPreferredName(), category); + } + map.put(DEFAULT_VALUE_FIELD.getPreferredName(), defaultValue); + if (dependsOn != null) { + map.put(DEPENDS_ON_FIELD.getPreferredName(), dependsOn.stream().map(ConfigurationDependency::toMap).toList()); + } + if (display != null) { + map.put(DISPLAY_FIELD.getPreferredName(), display.toString()); + } + map.put(LABEL_FIELD.getPreferredName(), label); + if (options != null) { + map.put(OPTIONS_FIELD.getPreferredName(), options.stream().map(ConfigurationSelectOption::toMap).toList()); + } + if (order != null) { + map.put(ORDER_FIELD.getPreferredName(), order); + } + if (placeholder != null) { + map.put(PLACEHOLDER_FIELD.getPreferredName(), placeholder); + } + map.put(REQUIRED_FIELD.getPreferredName(), required); + map.put(SENSITIVE_FIELD.getPreferredName(), sensitive); + if (tooltip != null) { + map.put(TOOLTIP_FIELD.getPreferredName(), tooltip); + } + if (type != null) { + map.put(TYPE_FIELD.getPreferredName(), type.toString()); + } + if (uiRestrictions != null) { + map.put(UI_RESTRICTIONS_FIELD.getPreferredName(), uiRestrictions); + } + if (validations != null) { + map.put(VALIDATIONS_FIELD.getPreferredName(), validations.stream().map(ConfigurationValidation::toMap).toList()); + } + map.put(VALUE_FIELD.getPreferredName(), value); + return map; + } + @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorIndexService.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorIndexService.java index ea5ec92e18007..40a6eeaafd708 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorIndexService.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorIndexService.java @@ -418,7 +418,7 @@ public void updateConnectorConfiguration(UpdateConnectorConfigurationAction.Requ updateConfigurationScript, Map.of( Connector.CONFIGURATION_FIELD.getPreferredName(), - request.getConfiguration(), + request.getConfigurationAsMap(), Connector.STATUS_FIELD.getPreferredName(), ConnectorStatus.CONFIGURED.toString() ) diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorConfigurationAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorConfigurationAction.java index 0421d710ccdfb..9069f832e1c44 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorConfigurationAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorConfigurationAction.java @@ -30,6 +30,7 @@ import java.util.HashMap; import java.util.Map; import java.util.Objects; +import java.util.stream.Collectors; import static org.elasticsearch.action.ValidateActions.addValidationError; import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; @@ -70,6 +71,10 @@ public Map getConfiguration() { return configuration; } + public Map> getConfigurationAsMap() { + return configuration.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, entry -> entry.getValue().toMap())); + } + public Map getConfigurationValues() { return configurationValues; } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/configuration/ConfigurationDependency.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/configuration/ConfigurationDependency.java index 1efd3f47fdff0..46714eb5b34a0 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/configuration/ConfigurationDependency.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/configuration/ConfigurationDependency.java @@ -19,6 +19,8 @@ import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; +import java.util.HashMap; +import java.util.Map; import java.util.Objects; import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; @@ -84,6 +86,13 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } + public Map toMap() { + Map map = new HashMap<>(); + map.put(FIELD_FIELD.getPreferredName(), field); + map.put(VALUE_FIELD.getPreferredName(), value); + return map; + } + public static ConfigurationDependency fromXContent(XContentParser parser) throws IOException { return PARSER.parse(parser, null); } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/configuration/ConfigurationSelectOption.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/configuration/ConfigurationSelectOption.java index ba281c69702e0..3c17f97ead51d 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/configuration/ConfigurationSelectOption.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/configuration/ConfigurationSelectOption.java @@ -17,6 +17,8 @@ import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; +import java.util.HashMap; +import java.util.Map; import java.util.Objects; import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; @@ -60,6 +62,13 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } + public Map toMap() { + Map map = new HashMap<>(); + map.put(LABEL_FIELD.getPreferredName(), label); + map.put(VALUE_FIELD.getPreferredName(), value); + return map; + } + public static ConfigurationSelectOption fromXContent(XContentParser parser) throws IOException { return PARSER.parse(parser, null); } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/configuration/ConfigurationValidation.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/configuration/ConfigurationValidation.java index 8f05e67ecb14d..51e912650bc1d 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/configuration/ConfigurationValidation.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/configuration/ConfigurationValidation.java @@ -19,6 +19,8 @@ import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; +import java.util.HashMap; +import java.util.Map; import java.util.Objects; import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; @@ -100,6 +102,13 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } + public Map toMap() { + Map map = new HashMap<>(); + map.put(CONSTRAINT_FIELD.getPreferredName(), constraint); + map.put(TYPE_FIELD.getPreferredName(), type.toString()); + return map; + } + public static ConfigurationValidation fromXContent(XContentParser parser) throws IOException { return PARSER.parse(parser, null); } diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorConfigurationTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorConfigurationTests.java index 35b21ce676a57..3a7ff819ecbf5 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorConfigurationTests.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorConfigurationTests.java @@ -17,10 +17,14 @@ import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.application.connector.configuration.ConfigurationDependency; +import org.elasticsearch.xpack.application.connector.configuration.ConfigurationSelectOption; +import org.elasticsearch.xpack.application.connector.configuration.ConfigurationValidation; import org.junit.Before; import java.io.IOException; import java.util.List; +import java.util.Map; import static java.util.Collections.emptyList; import static org.elasticsearch.common.xcontent.XContentHelper.toXContent; @@ -188,6 +192,87 @@ public void testToXContentWithMultipleConstraintTypes() throws IOException { assertToXContentEquivalent(originalBytes, toXContent(parsed, XContentType.JSON, humanReadable), XContentType.JSON); } + public void testToMap() { + ConnectorConfiguration configField = ConnectorTestUtils.getRandomConnectorConfigurationField(); + Map configFieldAsMap = configField.toMap(); + + if (configField.getCategory() != null) { + assertThat(configFieldAsMap.get("category"), equalTo(configField.getCategory())); + } else { + assertFalse(configFieldAsMap.containsKey("category")); + } + + assertThat(configFieldAsMap.get("default_value"), equalTo(configField.getDefaultValue())); + + if (configField.getDependsOn() != null) { + List> dependsOnAsList = configField.getDependsOn().stream().map(ConfigurationDependency::toMap).toList(); + assertThat(configFieldAsMap.get("depends_on"), equalTo(dependsOnAsList)); + } else { + assertFalse(configFieldAsMap.containsKey("depends_on")); + } + + if (configField.getDisplay() != null) { + assertThat(configFieldAsMap.get("display"), equalTo(configField.getDisplay().toString())); + } else { + assertFalse(configFieldAsMap.containsKey("display")); + } + + assertThat(configFieldAsMap.get("label"), equalTo(configField.getLabel())); + + if (configField.getOptions() != null) { + List> optionsAsList = configField.getOptions().stream().map(ConfigurationSelectOption::toMap).toList(); + assertThat(configFieldAsMap.get("options"), equalTo(optionsAsList)); + } else { + assertFalse(configFieldAsMap.containsKey("options")); + } + + if (configField.getOrder() != null) { + assertThat(configFieldAsMap.get("order"), equalTo(configField.getOrder())); + } else { + assertFalse(configFieldAsMap.containsKey("order")); + } + + if (configField.getPlaceholder() != null) { + assertThat(configFieldAsMap.get("placeholder"), equalTo(configField.getPlaceholder())); + } else { + assertFalse(configFieldAsMap.containsKey("placeholder")); + } + + assertThat(configFieldAsMap.get("required"), equalTo(configField.isRequired())); + assertThat(configFieldAsMap.get("sensitive"), equalTo(configField.isSensitive())); + + if (configField.getTooltip() != null) { + assertThat(configFieldAsMap.get("tooltip"), equalTo(configField.getTooltip())); + } else { + assertFalse(configFieldAsMap.containsKey("tooltip")); + } + + if (configField.getType() != null) { + assertThat(configFieldAsMap.get("type"), equalTo(configField.getType().toString())); + } else { + assertFalse(configFieldAsMap.containsKey("type")); + } + + if (configField.getUiRestrictions() != null) { + assertThat(configFieldAsMap.get("ui_restrictions"), equalTo(configField.getUiRestrictions())); + } else { + assertFalse(configFieldAsMap.containsKey("ui_restrictions")); + } + + if (configField.getValidations() != null) { + List> validationsAsList = configField.getValidations() + .stream() + .map(ConfigurationValidation::toMap) + .toList(); + assertThat(configFieldAsMap.get("validations"), equalTo(validationsAsList)); + } else { + assertFalse(configFieldAsMap.containsKey("validations")); + } + + assertThat(configFieldAsMap.get("value"), equalTo(configField.getValue())); + + } + private void assertTransportSerialization(ConnectorConfiguration testInstance) throws IOException { ConnectorConfiguration deserializedInstance = copyInstance(testInstance); assertNotSame(testInstance, deserializedInstance); From 01db0812a32d9b678eadf1868a718f001123299b Mon Sep 17 00:00:00 2001 From: Michael Peterson Date: Thu, 7 Mar 2024 08:59:58 -0500 Subject: [PATCH 045/248] Improve error message for CrossClusterAsyncSearchIT test (#105988) Will help debug issues like https://github.com/elastic/elasticsearch/issues/105865, which is a non-reproducible occasional test error. --- .../search/CrossClusterAsyncSearchIT.java | 45 +++++++++++++++---- 1 file changed, 36 insertions(+), 9 deletions(-) diff --git a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java index 3605d6365f867..646ba1465c7c2 100644 --- a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java +++ b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java @@ -566,7 +566,10 @@ public void testClusterDetailsAfterCCSWithFailuresOnOneShardOnly() throws Except assertThat(localClusterSearchInfo.getFailures().size(), equalTo(1)); assertThat(localClusterSearchInfo.getTook().millis(), greaterThan(0L)); ShardSearchFailure localShardSearchFailure = localClusterSearchInfo.getFailures().get(0); - assertTrue("should have 'index corrupted' in reason", localShardSearchFailure.reason().contains("index corrupted")); + assertTrue( + "should have 'index corrupted' in reason but was: " + localShardSearchFailure.reason(), + localShardSearchFailure.reason().contains("index corrupted") + ); SearchResponse.Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); assertNotNull(remoteClusterSearchInfo); @@ -578,7 +581,10 @@ public void testClusterDetailsAfterCCSWithFailuresOnOneShardOnly() throws Except assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(1)); assertThat(remoteClusterSearchInfo.getTook().millis(), greaterThan(0L)); ShardSearchFailure remoteShardSearchFailure = remoteClusterSearchInfo.getFailures().get(0); - assertTrue("should have 'index corrupted' in reason", remoteShardSearchFailure.reason().contains("index corrupted")); + assertTrue( + "should have 'index corrupted' in reason but was: " + remoteShardSearchFailure.reason(), + remoteShardSearchFailure.reason().contains("index corrupted") + ); } finally { finishedResponse.decRef(); } @@ -605,7 +611,10 @@ public void testClusterDetailsAfterCCSWithFailuresOnOneShardOnly() throws Except assertThat(localClusterSearchInfo.getFailures().size(), equalTo(1)); assertThat(localClusterSearchInfo.getTook().millis(), greaterThan(0L)); ShardSearchFailure localShardSearchFailure = localClusterSearchInfo.getFailures().get(0); - assertTrue("should have 'index corrupted' in reason", localShardSearchFailure.reason().contains("index corrupted")); + assertTrue( + "should have 'index corrupted' in reason but was: " + localShardSearchFailure.reason(), + localShardSearchFailure.reason().contains("index corrupted") + ); SearchResponse.Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); assertNotNull(remoteClusterSearchInfo); @@ -617,7 +626,10 @@ public void testClusterDetailsAfterCCSWithFailuresOnOneShardOnly() throws Except assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(1)); assertThat(remoteClusterSearchInfo.getTook().millis(), greaterThan(0L)); ShardSearchFailure remoteShardSearchFailure = remoteClusterSearchInfo.getFailures().get(0); - assertTrue("should have 'index corrupted' in reason", remoteShardSearchFailure.reason().contains("index corrupted")); + assertTrue( + "should have 'index corrupted' in reason but was: " + remoteShardSearchFailure.reason(), + remoteShardSearchFailure.reason().contains("index corrupted") + ); } } @@ -726,7 +738,10 @@ public void testClusterDetailsAfterCCSWithFailuresOnOneClusterOnly() throws Exce assertNull(remoteClusterSearchInfo.getTook()); assertFalse(remoteClusterSearchInfo.isTimedOut()); ShardSearchFailure remoteShardSearchFailure = remoteClusterSearchInfo.getFailures().get(0); - assertTrue("should have 'index corrupted' in reason", remoteShardSearchFailure.reason().contains("index corrupted")); + assertTrue( + "should have 'index corrupted' in reason but was: " + remoteShardSearchFailure.reason(), + remoteShardSearchFailure.reason().contains("index corrupted") + ); } finally { finishedResponse.decRef(); } @@ -776,7 +791,10 @@ public void testClusterDetailsAfterCCSWithFailuresOnOneClusterOnly() throws Exce assertNull(remoteClusterSearchInfo.getTook()); assertFalse(remoteClusterSearchInfo.isTimedOut()); ShardSearchFailure remoteShardSearchFailure = remoteClusterSearchInfo.getFailures().get(0); - assertTrue("should have 'index corrupted' in reason", remoteShardSearchFailure.reason().contains("index corrupted")); + assertTrue( + "should have 'index corrupted' in reason but was: " + remoteShardSearchFailure.reason(), + remoteShardSearchFailure.reason().contains("index corrupted") + ); } } @@ -1163,7 +1181,10 @@ public void testRemoteClusterOnlyCCSWithFailuresOnOneShardOnly() throws Exceptio assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(1)); assertThat(remoteClusterSearchInfo.getTook().millis(), greaterThan(0L)); ShardSearchFailure remoteShardSearchFailure = remoteClusterSearchInfo.getFailures().get(0); - assertTrue("should have 'index corrupted' in reason", remoteShardSearchFailure.reason().contains("index corrupted")); + assertTrue( + "should have 'index corrupted' in reason but was: " + remoteShardSearchFailure.reason(), + remoteShardSearchFailure.reason().contains("index corrupted") + ); } finally { finishedResponse.decRef(); } @@ -1192,7 +1213,10 @@ public void testRemoteClusterOnlyCCSWithFailuresOnOneShardOnly() throws Exceptio assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(1)); assertThat(remoteClusterSearchInfo.getTook().millis(), greaterThan(0L)); ShardSearchFailure remoteShardSearchFailure = remoteClusterSearchInfo.getFailures().get(0); - assertTrue("should have 'index corrupted' in reason", remoteShardSearchFailure.reason().contains("index corrupted")); + assertTrue( + "should have 'index corrupted' in reason but was: " + remoteShardSearchFailure.reason(), + remoteShardSearchFailure.reason().contains("index corrupted") + ); } } @@ -1665,7 +1689,10 @@ private static void assertAllShardsFailed(boolean minimizeRoundtrips, SearchResp assertNull(cluster.getTook()); assertFalse(cluster.isTimedOut()); ShardSearchFailure shardSearchFailure = cluster.getFailures().get(0); - assertTrue("should have 'index corrupted' in reason", shardSearchFailure.reason().contains("index corrupted")); + assertTrue( + "should have 'index corrupted' in reason but was: " + shardSearchFailure.reason(), + shardSearchFailure.reason().contains("index corrupted") + ); } protected AsyncSearchResponse submitAsyncSearch(SubmitAsyncSearchRequest request) throws ExecutionException, InterruptedException { From 7179c12b248cea0a849122dd251f8a1d321d8d65 Mon Sep 17 00:00:00 2001 From: Daniel Mitterdorfer Date: Thu, 7 Mar 2024 15:31:02 +0100 Subject: [PATCH 046/248] [Profiling] Speed up serialization of flamegraph (#105779) The response of the flamegraph is quite large: A typical response can easily reach 50MB (uncompressed). In order to reduce memory pressure and also to start sending the response sooner, we chunk the response. However, this leads to many chunks that are very small and lead to high overhead. In our experiments, just the serialization takes more than 500ms. With this commit we take the following measures: 1. We split the response into chunks only when it makes sense and otherwise send one larger chunk. 2. Serialization of doubles is very expensive: Just the serialization of annual CO2 tons takes around 80ms in our test setup. Therefore, we apply a custom serialization that is both faster than the builtin serialization as well reduces the amount of bytes sent over the wire because we round to four decimal places (which is more than sufficient for our purposes). --- docs/changelog/105779.yaml | 5 ++ .../provider/json/JsonXContentGenerator.java | 14 ++++ .../xcontent/XContentBuilder.java | 8 ++ .../xcontent/XContentGenerator.java | 5 ++ .../profiling/GetFlamegraphResponse.java | 77 ++++++++++++++----- .../xpack/profiling/NumberUtils.java | 40 ++++++++++ .../xpack/profiling/NumberUtilsTests.java | 27 +++++++ 7 files changed, 156 insertions(+), 20 deletions(-) create mode 100644 docs/changelog/105779.yaml create mode 100644 x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/NumberUtils.java create mode 100644 x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/NumberUtilsTests.java diff --git a/docs/changelog/105779.yaml b/docs/changelog/105779.yaml new file mode 100644 index 0000000000000..3699ca0e2f246 --- /dev/null +++ b/docs/changelog/105779.yaml @@ -0,0 +1,5 @@ +pr: 105779 +summary: "[Profiling] Speed up serialization of flamegraph" +area: Application +type: enhancement +issues: [] diff --git a/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentGenerator.java b/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentGenerator.java index 1046a09f53197..f22176930da64 100644 --- a/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentGenerator.java +++ b/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentGenerator.java @@ -496,6 +496,20 @@ public void writeRawValue(InputStream stream, XContentType xContentType) throws } } + @Override + public void writeRawValue(String value) throws IOException { + try { + if (supportsRawWrites()) { + generator.writeRaw(value); + } else { + // fallback to a regular string for formats that don't allow writing the value as is + generator.writeString(value); + } + } catch (JsonGenerationException e) { + throw new XContentGenerationException(e); + } + } + private boolean mayWriteRawData(XContentType contentType) { // When the current generator is filtered (ie filter != null) // or the content is in a different format than the current generator, diff --git a/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentBuilder.java b/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentBuilder.java index 41512af0f79d4..2143814565a51 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentBuilder.java +++ b/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentBuilder.java @@ -1212,6 +1212,14 @@ public XContentBuilder rawValue(InputStream stream, XContentType contentType) th return this; } + /** + * Writes a value with the source coming directly from a pre-rendered string representation + */ + public XContentBuilder rawValue(String value) throws IOException { + generator.writeRawValue(value); + return this; + } + public XContentBuilder copyCurrentStructure(XContentParser parser) throws IOException { generator.copyCurrentStructure(parser); return this; diff --git a/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentGenerator.java b/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentGenerator.java index 97739635932a3..5037ed0b40664 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentGenerator.java +++ b/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentGenerator.java @@ -105,6 +105,11 @@ public interface XContentGenerator extends Closeable, Flushable { */ void writeRawValue(InputStream value, XContentType xContentType) throws IOException; + /** + * Writes a raw value taken from a pre-rendered string representation + */ + void writeRawValue(String value) throws IOException; + void copyCurrentStructure(XContentParser parser) throws IOException; /** diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetFlamegraphResponse.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetFlamegraphResponse.java index 468b74ed16000..65b342abddd9d 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetFlamegraphResponse.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetFlamegraphResponse.java @@ -191,27 +191,64 @@ public Iterator toXContentChunked(ToXContent.Params params ChunkedToXContentHelper.array("ExeFilename", Iterators.map(fileNames.iterator(), e -> (b, p) -> b.value(e))), ChunkedToXContentHelper.array("AddressOrLine", Iterators.map(addressOrLines.iterator(), e -> (b, p) -> b.value(e))), ChunkedToXContentHelper.array("FunctionName", Iterators.map(functionNames.iterator(), e -> (b, p) -> b.value(e))), - ChunkedToXContentHelper.array("FunctionOffset", Iterators.map(functionOffsets.iterator(), e -> (b, p) -> b.value(e))), + ChunkedToXContentHelper.singleChunk((b, p) -> { + b.startArray("FunctionOffset"); + for (int functionOffset : functionOffsets) { + b.value(functionOffset); + } + return b.endArray(); + }), ChunkedToXContentHelper.array("SourceFilename", Iterators.map(sourceFileNames.iterator(), e -> (b, p) -> b.value(e))), - ChunkedToXContentHelper.array("SourceLine", Iterators.map(sourceLines.iterator(), e -> (b, p) -> b.value(e))), - ChunkedToXContentHelper.array("CountInclusive", Iterators.map(countInclusive.iterator(), e -> (b, p) -> b.value(e))), - ChunkedToXContentHelper.array("CountExclusive", Iterators.map(countExclusive.iterator(), e -> (b, p) -> b.value(e))), - ChunkedToXContentHelper.array( - "AnnualCO2TonsInclusive", - Iterators.map(annualCO2TonsInclusive.iterator(), e -> (b, p) -> b.value(e)) - ), - ChunkedToXContentHelper.array( - "AnnualCO2TonsExclusive", - Iterators.map(annualCO2TonsExclusive.iterator(), e -> (b, p) -> b.value(e)) - ), - ChunkedToXContentHelper.array( - "AnnualCostsUSDInclusive", - Iterators.map(annualCostsUSDInclusive.iterator(), e -> (b, p) -> b.value(e)) - ), - ChunkedToXContentHelper.array( - "AnnualCostsUSDExclusive", - Iterators.map(annualCostsUSDExclusive.iterator(), e -> (b, p) -> b.value(e)) - ), + ChunkedToXContentHelper.singleChunk((b, p) -> { + b.startArray("SourceLine"); + for (int sourceLine : sourceLines) { + b.value(sourceLine); + } + return b.endArray(); + }), + ChunkedToXContentHelper.singleChunk((b, p) -> { + b.startArray("CountInclusive"); + for (long countInclusive : countInclusive) { + b.value(countInclusive); + } + return b.endArray(); + }), + ChunkedToXContentHelper.singleChunk((b, p) -> { + b.startArray("CountExclusive"); + for (long c : countExclusive) { + b.value(c); + } + return b.endArray(); + }), + ChunkedToXContentHelper.singleChunk((b, p) -> { + b.startArray("AnnualCO2TonsInclusive"); + for (double co2Tons : annualCO2TonsInclusive) { + // write as raw value - we need direct control over the output representation (here: limit to 4 decimal places) + b.rawValue(NumberUtils.doubleToString(co2Tons)); + } + return b.endArray(); + }), + ChunkedToXContentHelper.singleChunk((b, p) -> { + b.startArray("AnnualCO2TonsExclusive"); + for (double co2Tons : annualCO2TonsExclusive) { + b.rawValue(NumberUtils.doubleToString(co2Tons)); + } + return b.endArray(); + }), + ChunkedToXContentHelper.singleChunk((b, p) -> { + b.startArray("AnnualCostsUSDInclusive"); + for (double costs : annualCostsUSDInclusive) { + b.rawValue(NumberUtils.doubleToString(costs)); + } + return b.endArray(); + }), + ChunkedToXContentHelper.singleChunk((b, p) -> { + b.startArray("AnnualCostsUSDExclusive"); + for (double costs : annualCostsUSDExclusive) { + b.rawValue(NumberUtils.doubleToString(costs)); + } + return b.endArray(); + }), Iterators.single((b, p) -> b.field("Size", size)), Iterators.single((b, p) -> b.field("SamplingRate", samplingRate)), Iterators.single((b, p) -> b.field("SelfCPU", selfCPU)), diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/NumberUtils.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/NumberUtils.java new file mode 100644 index 0000000000000..d346dd279f250 --- /dev/null +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/NumberUtils.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.profiling; + +final class NumberUtils { + private NumberUtils() { + // no instances intended + } + + /** + * Converts a positive double number to a string. + * + * @param value The double value. + * @return The corresponding string representation rounded to four fractional digits. + */ + public static String doubleToString(double value) { + if (value < 0.0001d) { + return "0"; + } + StringBuilder sb = new StringBuilder(); + int i = (int) value; + int f = (int) ((value - i) * 10000.0d + 0.5d); + sb.append(i); + sb.append("."); + if (f < 10) { + sb.append("000"); + } else if (f < 100) { + sb.append("00"); + } else if (f < 1000) { + sb.append("0"); + } + sb.append(f); + return sb.toString(); + } +} diff --git a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/NumberUtilsTests.java b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/NumberUtilsTests.java new file mode 100644 index 0000000000000..0b8a410f9bb66 --- /dev/null +++ b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/NumberUtilsTests.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.profiling; + +import org.elasticsearch.test.ESTestCase; + +public class NumberUtilsTests extends ESTestCase { + public void testConvertNumberToString() { + assertEquals("872.6182", NumberUtils.doubleToString(872.6181989583333d)); + assertEquals("1222.1833", NumberUtils.doubleToString(1222.18325d)); + assertEquals("1222.1832", NumberUtils.doubleToString(1222.18324d)); + assertEquals("1.0013", NumberUtils.doubleToString(1.0013d)); + assertEquals("10.0220", NumberUtils.doubleToString(10.022d)); + assertEquals("222.0000", NumberUtils.doubleToString(222.0d)); + assertEquals("0.0001", NumberUtils.doubleToString(0.0001d)); + } + + public void testConvertZeroToString() { + assertEquals("0", NumberUtils.doubleToString(0.0d)); + assertEquals("0", NumberUtils.doubleToString(0.00009d)); + } +} From 422952d708ca295b106be589f7de355a4d6fe9cd Mon Sep 17 00:00:00 2001 From: Pat Whelan Date: Thu, 7 Mar 2024 10:17:05 -0500 Subject: [PATCH 047/248] [Transform] Make use of delegateFailureAndWrap (#106034) * [Transform] Make use of delegateFailureAndWrap Refactoring to a later pattern of ActionListener, reducing memory footprint and removing some redudant lines of code. --------- Co-authored-by: Elastic Machine --- .../action/TransportPutTransformAction.java | 50 ++++++++----------- 1 file changed, 20 insertions(+), 30 deletions(-) diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportPutTransformAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportPutTransformAction.java index 2c9fc8ffce5bf..8a82880f4d9a3 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportPutTransformAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportPutTransformAction.java @@ -42,13 +42,10 @@ import org.elasticsearch.xpack.transform.notifications.TransformAuditor; import org.elasticsearch.xpack.transform.persistence.AuthorizationStatePersistenceUtils; import org.elasticsearch.xpack.transform.persistence.TransformConfigManager; -import org.elasticsearch.xpack.transform.transforms.Function; import org.elasticsearch.xpack.transform.transforms.FunctionFactory; import java.time.Instant; -import java.util.List; -import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.xpack.transform.utils.SecondaryAuthorizationUtils.getSecurityHeadersPreferringSecondary; public class TransportPutTransformAction extends AcknowledgedTransportMasterNodeAction { @@ -108,21 +105,19 @@ protected void masterOperation(Task task, Request request, ClusterState clusterS } // <3> Create the transform - ActionListener validateTransformListener = ActionListener.wrap( - unusedValidationResponse -> putTransform(request, listener), - listener::onFailure + ActionListener validateTransformListener = listener.delegateFailureAndWrap( + (l, unused) -> putTransform(request, l) ); // <2> Validate source and destination indices - ActionListener checkPrivilegesListener = ActionListener.wrap( - aVoid -> ClientHelper.executeAsyncWithOrigin( + ActionListener checkPrivilegesListener = validateTransformListener.delegateFailureAndWrap( + (l, aVoid) -> ClientHelper.executeAsyncWithOrigin( client, ClientHelper.TRANSFORM_ORIGIN, ValidateTransformAction.INSTANCE, new ValidateTransformAction.Request(config, request.isDeferValidation(), request.timeout()), - validateTransformListener - ), - listener::onFailure + l + ) ); // <1> Early check to verify that the user can create the destination index and can read from the source @@ -170,24 +165,19 @@ protected ClusterBlockException checkBlock(PutTransformAction.Request request, C } private void putTransform(Request request, ActionListener listener) { - - final TransformConfig config = request.getConfig(); - // create the function for validation - final Function function = FunctionFactory.create(config); - - // <2> Return to the listener - ActionListener putTransformConfigurationListener = ActionListener.wrap(putTransformConfigurationResult -> { - logger.debug("[{}] created transform", config.getId()); - auditor.info(config.getId(), "Created transform."); - List warnings = TransformConfigLinter.getWarnings(function, config.getSource(), config.getSyncConfig()); - for (String warning : warnings) { - logger.warn(() -> format("[%s] %s", config.getId(), warning)); - auditor.warning(config.getId(), warning); - } - listener.onResponse(AcknowledgedResponse.TRUE); - }, listener::onFailure); - - // <1> Put our transform - transformConfigManager.putTransformConfiguration(config, putTransformConfigurationListener); + var config = request.getConfig(); + transformConfigManager.putTransformConfiguration(config, listener.delegateFailureAndWrap((l, unused) -> { + var transformId = config.getId(); + logger.debug("[{}] created transform", transformId); + auditor.info(transformId, "Created transform."); + + var validationFunc = FunctionFactory.create(config); + TransformConfigLinter.getWarnings(validationFunc, config.getSource(), config.getSyncConfig()).forEach(warning -> { + logger.warn("[{}] {}", transformId, warning); + auditor.warning(transformId, warning); + }); + + l.onResponse(AcknowledgedResponse.TRUE); + })); } } From 393bdbc0f30d69daaef58523cdeb09ae9ce3c19d Mon Sep 17 00:00:00 2001 From: Pooya Salehi Date: Thu, 7 Mar 2024 16:45:34 +0100 Subject: [PATCH 048/248] Consider ShardRouting roles when calculating shard copies in shutdown status (#106063) Not considering the roles can result in returning COMPLETE in stateless if the other copy is INDEX_ONLY. This can result in unavailability of search shards which is considered RED health in stateless. With this change, we'll return STALLED which prevents ES from exiting. Relates ES-7999 --- docs/changelog/106063.yaml | 5 +++ .../TransportGetShutdownStatusAction.java | 1 + ...TransportGetShutdownStatusActionTests.java | 45 +++++++++++++++++++ 3 files changed, 51 insertions(+) create mode 100644 docs/changelog/106063.yaml diff --git a/docs/changelog/106063.yaml b/docs/changelog/106063.yaml new file mode 100644 index 0000000000000..57c05370a943f --- /dev/null +++ b/docs/changelog/106063.yaml @@ -0,0 +1,5 @@ +pr: 106063 +summary: Consider `ShardRouting` roles when calculating shard copies in shutdown status +area: Infra/Node Lifecycle +type: bug +issues: [] diff --git a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportGetShutdownStatusAction.java b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportGetShutdownStatusAction.java index 3832bbf488045..9e8c54ba594ea 100644 --- a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportGetShutdownStatusAction.java +++ b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportGetShutdownStatusAction.java @@ -376,6 +376,7 @@ private static boolean hasShardCopyOnAnotherNode(ClusterState clusterState, Shar .allShards(shardRouting.index().getName()) .stream() .filter(sr -> sr.id() == shardRouting.id()) + .filter(sr -> sr.role().equals(shardRouting.role())) // If any shards are both 1) `STARTED` and 2) are not on a node that's shutting down, we have at least one copy // of this shard safely on a node that's not shutting down, so we don't want to report `STALLED` because of this shard. .filter(ShardRouting::started) diff --git a/x-pack/plugin/shutdown/src/test/java/org/elasticsearch/xpack/shutdown/TransportGetShutdownStatusActionTests.java b/x-pack/plugin/shutdown/src/test/java/org/elasticsearch/xpack/shutdown/TransportGetShutdownStatusActionTests.java index 2d4aaada484ad..9807fa72247a7 100644 --- a/x-pack/plugin/shutdown/src/test/java/org/elasticsearch/xpack/shutdown/TransportGetShutdownStatusActionTests.java +++ b/x-pack/plugin/shutdown/src/test/java/org/elasticsearch/xpack/shutdown/TransportGetShutdownStatusActionTests.java @@ -468,6 +468,51 @@ public void testStalledUnassigned() { ); } + public void testStalledIfShardCopyOnAnotherNodeHasDifferentRole() { + Index index = new Index(randomIdentifier(), randomUUID()); + IndexMetadata imd = generateIndexMetadata(index, 3, 0); + IndexRoutingTable indexRoutingTable = IndexRoutingTable.builder(index) + .addShard( + new TestShardRouting.Builder(new ShardId(index, 0), LIVE_NODE_ID, true, ShardRoutingState.STARTED).withRole( + ShardRouting.Role.INDEX_ONLY + ).build() + ) + .addShard( + new TestShardRouting.Builder(new ShardId(index, 0), SHUTTING_DOWN_NODE_ID, false, ShardRoutingState.STARTED).withRole( + ShardRouting.Role.SEARCH_ONLY + ).build() + ) + .build(); + + // Force a decision of NO for all moves and new allocations, simulating a decider that's stuck + canAllocate.set((r, n, a) -> Decision.NO); + // And the remain decider simulates NodeShutdownAllocationDecider + canRemain.set((r, n, a) -> n.nodeId().equals(SHUTTING_DOWN_NODE_ID) ? Decision.NO : Decision.YES); + + RoutingTable.Builder routingTable = RoutingTable.builder(); + routingTable.add(indexRoutingTable); + ClusterState state = createTestClusterState(routingTable.build(), List.of(imd), SingleNodeShutdownMetadata.Type.REMOVE); + + ShutdownShardMigrationStatus status = TransportGetShutdownStatusAction.shardMigrationStatus( + new CancellableTask(1, "direct", GetShutdownStatusAction.NAME, "", TaskId.EMPTY_TASK_ID, Map.of()), + state, + SHUTTING_DOWN_NODE_ID, + SingleNodeShutdownMetadata.Type.SIGTERM, + true, + clusterInfoService, + snapshotsInfoService, + allocationService, + allocationDeciders + ); + + assertShardMigration( + status, + SingleNodeShutdownMetadata.Status.STALLED, + 1, + allOf(containsString(index.getName()), containsString("[0] [replica]")) + ); + } + public void testNotStalledIfAllShardsHaveACopyOnAnotherNode() { Index index = new Index(randomAlphaOfLength(5), randomAlphaOfLengthBetween(1, 20)); IndexMetadata imd = generateIndexMetadata(index, 3, 0); From 1e445d9a4f1008ad7a98f5abd9ff787726df9c52 Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Thu, 7 Mar 2024 17:17:11 +0100 Subject: [PATCH 049/248] throw IllegalArgumentException instead if AggregationInitializationException when adding a sub-aggregation to a metric aggregation (#106074) This to avoid returning code 500 in such cases. --- .../metric/ArrayValuesSourceAggregationBuilder.java | 9 ++------- .../aggregations/AggregationInitializationException.java | 4 ---- .../bucket/terms/SignificantTextAggregationBuilder.java | 9 ++------- .../aggregations/metrics/TopHitsAggregationBuilder.java | 5 +---- .../support/MultiValuesSourceAggregationBuilder.java | 9 ++------- .../support/ValuesSourceAggregationBuilder.java | 9 ++------- .../search/aggregations/metrics/GeoBoundsTests.java | 3 +-- .../search/aggregations/metrics/TopHitsTests.java | 3 +-- .../topmetrics/TopMetricsAggregationBuilder.java | 5 +---- .../topmetrics/TopMetricsAggregationBuilderTests.java | 3 +-- .../aggregations/metrics/CartesianBoundsTests.java | 3 +-- 11 files changed, 14 insertions(+), 48 deletions(-) diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/ArrayValuesSourceAggregationBuilder.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/ArrayValuesSourceAggregationBuilder.java index 0f277ecd6c478..4cfd55a240451 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/ArrayValuesSourceAggregationBuilder.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/ArrayValuesSourceAggregationBuilder.java @@ -10,7 +10,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; -import org.elasticsearch.search.aggregations.AggregationInitializationException; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -45,9 +44,7 @@ protected LeafOnly(String name) { protected LeafOnly(LeafOnly clone, Builder factoriesBuilder, Map metadata) { super(clone, factoriesBuilder, metadata); if (factoriesBuilder.count() > 0) { - throw new AggregationInitializationException( - "Aggregator [" + name + "] of type [" + getType() + "] cannot accept sub-aggregations" - ); + throw new IllegalArgumentException("Aggregator [" + name + "] of type [" + getType() + "] cannot accept sub-aggregations"); } } @@ -60,9 +57,7 @@ protected LeafOnly(StreamInput in) throws IOException { @Override public AB subAggregations(Builder subFactories) { - throw new AggregationInitializationException( - "Aggregator [" + name + "] of type [" + getType() + "] cannot accept sub-aggregations" - ); + throw new IllegalArgumentException("Aggregator [" + name + "] of type [" + getType() + "] cannot accept sub-aggregations"); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/AggregationInitializationException.java b/server/src/main/java/org/elasticsearch/search/aggregations/AggregationInitializationException.java index f477cd884f79a..efe3af7ca27bf 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/AggregationInitializationException.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/AggregationInitializationException.java @@ -17,10 +17,6 @@ */ public class AggregationInitializationException extends ElasticsearchException { - public AggregationInitializationException(String msg) { - super(msg); - } - public AggregationInitializationException(String msg, Throwable cause) { super(msg, cause); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTextAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTextAggregationBuilder.java index 99dc93a175f7b..9b042ab4a6966 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTextAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTextAggregationBuilder.java @@ -16,7 +16,6 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; import org.elasticsearch.search.aggregations.AggregationBuilder; -import org.elasticsearch.search.aggregations.AggregationInitializationException; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregator.BucketCountThresholds; @@ -129,16 +128,12 @@ public TermsAggregator.BucketCountThresholds bucketCountThresholds() { @Override public SignificantTextAggregationBuilder subAggregations(Builder subFactories) { - throw new AggregationInitializationException( - "Aggregator [" + name + "] of type [" + getType() + "] cannot accept sub-aggregations" - ); + throw new IllegalArgumentException("Aggregator [" + name + "] of type [" + getType() + "] cannot accept sub-aggregations"); } @Override public SignificantTextAggregationBuilder subAggregation(AggregationBuilder aggregation) { - throw new AggregationInitializationException( - "Aggregator [" + name + "] of type [" + getType() + "] cannot accept sub-aggregations" - ); + throw new IllegalArgumentException("Aggregator [" + name + "] of type [" + getType() + "] cannot accept sub-aggregations"); } /** diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregationBuilder.java index c5dca1271c891..c9ccc1c6936d5 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregationBuilder.java @@ -21,7 +21,6 @@ import org.elasticsearch.script.Script; import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; import org.elasticsearch.search.aggregations.AggregationBuilder; -import org.elasticsearch.search.aggregations.AggregationInitializationException; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.support.AggregationContext; @@ -490,9 +489,7 @@ public TopHitsAggregationBuilder trackScores(boolean trackScores) { @Override public TopHitsAggregationBuilder subAggregations(Builder subFactories) { - throw new AggregationInitializationException( - "Aggregator [" + name + "] of type [" + getType() + "] cannot accept sub-aggregations" - ); + throw new IllegalArgumentException("Aggregator [" + name + "] of type [" + getType() + "] cannot accept sub-aggregations"); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceAggregationBuilder.java index 7e0c235ee4fb3..ecacf8cca8d01 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceAggregationBuilder.java @@ -14,7 +14,6 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; -import org.elasticsearch.search.aggregations.AggregationInitializationException; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.xcontent.XContentBuilder; @@ -44,9 +43,7 @@ protected LeafOnly(String name) { protected LeafOnly(LeafOnly clone, Builder factoriesBuilder, Map metadata) { super(clone, factoriesBuilder, metadata); if (factoriesBuilder.count() > 0) { - throw new AggregationInitializationException( - "Aggregator [" + name + "] of type [" + getType() + "] cannot accept sub-aggregations" - ); + throw new IllegalArgumentException("Aggregator [" + name + "] of type [" + getType() + "] cannot accept sub-aggregations"); } } @@ -59,9 +56,7 @@ protected LeafOnly(StreamInput in) throws IOException { @Override public AB subAggregations(Builder subFactories) { - throw new AggregationInitializationException( - "Aggregator [" + name + "] of type [" + getType() + "] cannot accept sub-aggregations" - ); + throw new IllegalArgumentException("Aggregator [" + name + "] of type [" + getType() + "] cannot accept sub-aggregations"); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java index 48afb79b95e90..0c1de6006ffa5 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java @@ -12,7 +12,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.script.Script; import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; -import org.elasticsearch.search.aggregations.AggregationInitializationException; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.xcontent.AbstractObjectParser; @@ -119,9 +118,7 @@ protected LeafOnly(String name) { protected LeafOnly(LeafOnly clone, Builder factoriesBuilder, Map metadata) { super(clone, factoriesBuilder, metadata); if (factoriesBuilder.count() > 0) { - throw new AggregationInitializationException( - "Aggregator [" + name + "] of type [" + getType() + "] cannot accept sub-aggregations" - ); + throw new IllegalArgumentException("Aggregator [" + name + "] of type [" + getType() + "] cannot accept sub-aggregations"); } } @@ -134,9 +131,7 @@ protected LeafOnly(StreamInput in) throws IOException { @Override public final AB subAggregations(Builder subFactories) { - throw new AggregationInitializationException( - "Aggregator [" + name + "] of type [" + getType() + "] cannot accept sub-aggregations" - ); + throw new IllegalArgumentException("Aggregator [" + name + "] of type [" + getType() + "] cannot accept sub-aggregations"); } @Override diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsTests.java index 61428fdeb335a..f0345a70294e0 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.aggregations.metrics; -import org.elasticsearch.search.aggregations.AggregationInitializationException; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; import org.elasticsearch.xcontent.XContentParser; @@ -53,7 +52,7 @@ public void testFailWithSubAgg() throws Exception { """; XContentParser parser = createParser(JsonXContent.jsonXContent, source); assertSame(XContentParser.Token.START_OBJECT, parser.nextToken()); - Exception e = expectThrows(AggregationInitializationException.class, () -> AggregatorFactories.parseAggregators(parser)); + Exception e = expectThrows(IllegalArgumentException.class, () -> AggregatorFactories.parseAggregators(parser)); assertThat(e.toString(), containsString("Aggregator [viewport] of type [geo_bounds] cannot accept sub-aggregations")); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsTests.java index 39f583fd2c56b..e5c5dbbe64696 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsTests.java @@ -9,7 +9,6 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.search.aggregations.AggregationInitializationException; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; @@ -170,7 +169,7 @@ public void testFailWithSubAgg() throws Exception { }"""; XContentParser parser = createParser(JsonXContent.jsonXContent, source); assertSame(XContentParser.Token.START_OBJECT, parser.nextToken()); - Exception e = expectThrows(AggregationInitializationException.class, () -> AggregatorFactories.parseAggregators(parser)); + Exception e = expectThrows(IllegalArgumentException.class, () -> AggregatorFactories.parseAggregators(parser)); assertThat(e.toString(), containsString("Aggregator [top_tags_hits] of type [top_hits] cannot accept sub-aggregations")); } diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/topmetrics/TopMetricsAggregationBuilder.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/topmetrics/TopMetricsAggregationBuilder.java index ba9dc7ab7eed9..e7e06946a5289 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/topmetrics/TopMetricsAggregationBuilder.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/topmetrics/TopMetricsAggregationBuilder.java @@ -12,7 +12,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; import org.elasticsearch.search.aggregations.AggregationBuilder; -import org.elasticsearch.search.aggregations.AggregationInitializationException; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -173,9 +172,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { @Override public TopMetricsAggregationBuilder subAggregations(Builder subFactories) { - throw new AggregationInitializationException( - "Aggregator [" + name + "] of type [" + getType() + "] cannot accept sub-aggregations" - ); + throw new IllegalArgumentException("Aggregator [" + name + "] of type [" + getType() + "] cannot accept sub-aggregations"); } @Override diff --git a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/topmetrics/TopMetricsAggregationBuilderTests.java b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/topmetrics/TopMetricsAggregationBuilderTests.java index b2986c3f1f170..b8086e038a626 100644 --- a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/topmetrics/TopMetricsAggregationBuilderTests.java +++ b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/topmetrics/TopMetricsAggregationBuilderTests.java @@ -9,7 +9,6 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.Writeable.Reader; -import org.elasticsearch.search.aggregations.AggregationInitializationException; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.BaseAggregationBuilder; import org.elasticsearch.search.aggregations.support.MultiValuesSourceFieldConfig; @@ -88,7 +87,7 @@ protected TopMetricsAggregationBuilder mutateInstance(TopMetricsAggregationBuild } public void testValidation() { - AggregationInitializationException e = expectThrows(AggregationInitializationException.class, () -> { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> { List> sortBuilders = singletonList( new FieldSortBuilder(randomAlphaOfLength(5)).order(randomFrom(SortOrder.values())) ); diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/metrics/CartesianBoundsTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/metrics/CartesianBoundsTests.java index 89e186fafd994..244439889aa0b 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/metrics/CartesianBoundsTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/metrics/CartesianBoundsTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.xpack.spatial.search.aggregations.metrics; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.search.aggregations.AggregationInitializationException; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; import org.elasticsearch.xcontent.XContentParseException; @@ -59,7 +58,7 @@ public void testFailWithSubAgg() throws Exception { """; XContentParser parser = createParser(JsonXContent.jsonXContent, source); assertSame(XContentParser.Token.START_OBJECT, parser.nextToken()); - Exception e = expectThrows(AggregationInitializationException.class, () -> AggregatorFactories.parseAggregators(parser)); + Exception e = expectThrows(IllegalArgumentException.class, () -> AggregatorFactories.parseAggregators(parser)); assertThat(e.toString(), containsString("Aggregator [viewport] of type [cartesian_bounds] cannot accept sub-aggregations")); } From e59b67a3df4059c9639eec068ef76ba9d5ce7bdd Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Thu, 7 Mar 2024 19:23:43 +0100 Subject: [PATCH 050/248] Grow buckets on RangeAggregator eagerly when it is the top aggregator (#105940) --- .../bucket/range/BinaryRangeAggregator.java | 16 +++++++++++-- .../bucket/range/RangeAggregator.java | 24 +++++++++++++++++-- 2 files changed, 36 insertions(+), 4 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/BinaryRangeAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/BinaryRangeAggregator.java index be9fca9acdbb5..51901b422c861 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/BinaryRangeAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/BinaryRangeAggregator.java @@ -92,17 +92,29 @@ public ScoreMode scoreMode() { return super.scoreMode(); } + @FunctionalInterface + private interface BucketCollector { + void accept(LeafBucketCollector sub, int doc, long subBucketOrdinal) throws IOException; + } + @Override protected LeafBucketCollector getLeafCollector(AggregationExecutionContext aggCtx, LeafBucketCollector sub) throws IOException { if (valuesSource == null) { return LeafBucketCollector.NO_OP_COLLECTOR; } + BucketCollector collector; + if (parent() == null) { + grow(ranges.length); + collector = this::collectExistingBucket; + } else { + collector = this::collectBucket; + } if (valuesSource instanceof ValuesSource.Bytes.WithOrdinals) { SortedSetDocValues values = ((ValuesSource.Bytes.WithOrdinals) valuesSource).ordinalsValues(aggCtx.getLeafReaderContext()); return new SortedSetRangeLeafCollector(values, ranges, sub) { @Override protected void doCollect(LeafBucketCollector sub, int doc, long bucket) throws IOException { - collectBucket(sub, doc, bucket); + collector.accept(sub, doc, bucket); } }; } else { @@ -110,7 +122,7 @@ protected void doCollect(LeafBucketCollector sub, int doc, long bucket) throws I return new SortedBinaryRangeLeafCollector(values, ranges, sub) { @Override protected void doCollect(LeafBucketCollector sub, int doc, long bucket) throws IOException { - collectBucket(sub, doc, bucket); + collector.accept(sub, doc, bucket); } }; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java index 7d7e1a1a03bc4..9640395712b23 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java @@ -715,8 +715,16 @@ static class NoOverlap extends NumericRangeAggregator { cardinality, metadata ); + if (parent == null) { + grow(ranges.length); + this.collector = this::collectExistingBucket; + } else { + this.collector = this::collectBucket; + } } + private final BucketCollector collector; + @Override protected int collect(LeafBucketCollector sub, int doc, double value, long owningBucketOrdinal, int lowBound) throws IOException { int lo = lowBound, hi = ranges.length - 1; @@ -727,7 +735,7 @@ protected int collect(LeafBucketCollector sub, int doc, double value, long ownin } else if (value >= ranges[mid].to) { lo = mid + 1; } else { - collectBucket(sub, doc, subBucketOrdinal(owningBucketOrdinal, mid)); + collector.accept(sub, doc, subBucketOrdinal(owningBucketOrdinal, mid)); // The next value must fall in the next bucket to be collected. return mid + 1; } @@ -736,6 +744,11 @@ protected int collect(LeafBucketCollector sub, int doc, double value, long ownin } } + @FunctionalInterface + private interface BucketCollector { + void accept(LeafBucketCollector sub, int doc, long subBucketOrdinal) throws IOException; + } + private static class Overlap extends NumericRangeAggregator { Overlap( String name, @@ -770,9 +783,16 @@ private static class Overlap extends NumericRangeAggregator { for (int i = 1; i < ranges.length; ++i) { maxTo[i] = Math.max(ranges[i].to, maxTo[i - 1]); } + if (parent == null) { + grow(ranges.length); + this.collector = this::collectExistingBucket; + } else { + this.collector = this::collectBucket; + } } private final double[] maxTo; + private final BucketCollector collector; @Override protected int collect(LeafBucketCollector sub, int doc, double value, long owningBucketOrdinal, int lowBound) throws IOException { @@ -817,7 +837,7 @@ protected int collect(LeafBucketCollector sub, int doc, double value, long ownin for (int i = startLo; i <= endHi; ++i) { if (ranges[i].matches(value)) { - collectBucket(sub, doc, subBucketOrdinal(owningBucketOrdinal, i)); + collector.accept(sub, doc, subBucketOrdinal(owningBucketOrdinal, i)); } } From 6b430ae75013f2038f01ee34a2749841afc0fbd7 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Thu, 7 Mar 2024 10:30:18 -0800 Subject: [PATCH 051/248] Speed up serialization of BytesRefArray (#106053) Currently, we are reading and writing byte by byte during the serialization and deserialization of a BytesRefArray. We can improve the performance by reading/writing through the backing pages or the underlying array instead. I will open a follow-up PR to utilize this change in serializing BytesRefBlock in ESQL. --- docs/changelog/106053.yaml | 5 ++++ .../elasticsearch/common/util/BigArrays.java | 23 ++++++++++++++++ .../common/util/BigByteArray.java | 27 +++++++++++++++++++ .../elasticsearch/common/util/ByteArray.java | 12 +++++++++ .../common/util/BytesRefArray.java | 18 ++++++++----- .../common/util/ReleasableByteArray.java | 12 +++++++++ .../common/util/BigArraysTests.java | 23 ++++++++++++++++ .../common/util/MockBigArrays.java | 12 +++++++++ 8 files changed, 125 insertions(+), 7 deletions(-) create mode 100644 docs/changelog/106053.yaml diff --git a/docs/changelog/106053.yaml b/docs/changelog/106053.yaml new file mode 100644 index 0000000000000..72cfe0207795d --- /dev/null +++ b/docs/changelog/106053.yaml @@ -0,0 +1,5 @@ +pr: 106053 +summary: Speed up serialization of `BytesRefArray` +area: ES|QL +type: enhancement +issues: [] diff --git a/server/src/main/java/org/elasticsearch/common/util/BigArrays.java b/server/src/main/java/org/elasticsearch/common/util/BigArrays.java index edec336c2a028..d1367f41d9d87 100644 --- a/server/src/main/java/org/elasticsearch/common/util/BigArrays.java +++ b/server/src/main/java/org/elasticsearch/common/util/BigArrays.java @@ -10,10 +10,12 @@ import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefIterator; import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.breaker.CircuitBreakingException; import org.elasticsearch.common.breaker.PreallocatedCircuitBreakerService; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.recycler.Recycler; import org.elasticsearch.core.Nullable; @@ -143,6 +145,27 @@ public void fill(long fromIndex, long toIndex, byte value) { Arrays.fill(array, (int) fromIndex, (int) toIndex, value); } + @Override + public BytesRefIterator iterator() { + return new BytesRefIterator() { + boolean visited = false; + + @Override + public BytesRef next() { + if (visited) { + return null; + } + visited = true; + return new BytesRef(array, 0, Math.toIntExact(size())); + } + }; + } + + @Override + public void fillWith(StreamInput in) throws IOException { + in.readBytes(array, 0, Math.toIntExact(size())); + } + @Override public boolean hasArray() { return true; diff --git a/server/src/main/java/org/elasticsearch/common/util/BigByteArray.java b/server/src/main/java/org/elasticsearch/common/util/BigByteArray.java index 2c623882afe14..c5a04e273e487 100644 --- a/server/src/main/java/org/elasticsearch/common/util/BigByteArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/BigByteArray.java @@ -10,7 +10,9 @@ import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefIterator; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; @@ -18,6 +20,7 @@ import static org.elasticsearch.common.util.BigLongArray.writePages; import static org.elasticsearch.common.util.PageCacheRecycler.BYTE_PAGE_SIZE; +import static org.elasticsearch.common.util.PageCacheRecycler.PAGE_SIZE_IN_BYTES; /** * Byte array abstraction able to support more than 2B values. This implementation slices data into fixed-sized blocks of @@ -139,6 +142,30 @@ public byte[] array() { throw new UnsupportedOperationException(); } + @Override + public BytesRefIterator iterator() { + return new BytesRefIterator() { + int i = 0; + + @Override + public BytesRef next() { + if (i >= pages.length) { + return null; + } + int len = i == pages.length - 1 ? Math.toIntExact(size - (pages.length - 1L) * PAGE_SIZE_IN_BYTES) : PAGE_SIZE_IN_BYTES; + return new BytesRef(pages[i++], 0, len); + } + }; + } + + @Override + public void fillWith(StreamInput in) throws IOException { + for (int i = 0; i < pages.length - 1; i++) { + in.readBytes(pages[i], 0, PAGE_SIZE_IN_BYTES); + } + in.readBytes(pages[pages.length - 1], 0, Math.toIntExact(size - (pages.length - 1L) * PAGE_SIZE_IN_BYTES)); + } + @Override protected int numBytesPerElement() { return 1; diff --git a/server/src/main/java/org/elasticsearch/common/util/ByteArray.java b/server/src/main/java/org/elasticsearch/common/util/ByteArray.java index e3b51ee7d2e32..861aa4f9c7eea 100644 --- a/server/src/main/java/org/elasticsearch/common/util/ByteArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/ByteArray.java @@ -9,6 +9,7 @@ package org.elasticsearch.common.util; import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefIterator; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.Writeable; @@ -51,6 +52,17 @@ static ByteArray readFrom(StreamInput in) throws IOException { */ void fill(long fromIndex, long toIndex, byte value); + /** + * Fills this ByteArray with bytes from the given input stream + */ + void fillWith(StreamInput in) throws IOException; + + /** + * Returns a BytesRefIterator for this ByteArray. This method allows + * access to the internal pages of this reference without copying them. + */ + BytesRefIterator iterator(); + /** * Checks if this instance is backed by a single byte array analogous to {@link ByteBuffer#hasArray()}. */ diff --git a/server/src/main/java/org/elasticsearch/common/util/BytesRefArray.java b/server/src/main/java/org/elasticsearch/common/util/BytesRefArray.java index c78db448380b3..d8675135a8cfe 100644 --- a/server/src/main/java/org/elasticsearch/common/util/BytesRefArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/BytesRefArray.java @@ -10,6 +10,7 @@ import org.apache.lucene.util.Accountable; import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefIterator; import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -64,10 +65,7 @@ public BytesRefArray(StreamInput in, BigArrays bigArrays) throws IOException { // bytes long sizeOfBytes = in.readVLong(); bytes = bigArrays.newByteArray(sizeOfBytes, false); - - for (long i = 0; i < sizeOfBytes; ++i) { - bytes.set(i, in.readByte()); - } + bytes.fillWith(in); success = true; } finally { @@ -149,11 +147,17 @@ public void writeTo(StreamOutput out) throws IOException { } // bytes might be overallocated, the last bucket of startOffsets contains the real size - long sizeOfBytes = startOffsets.get(size); + final long sizeOfBytes = startOffsets.get(size); out.writeVLong(sizeOfBytes); - for (long i = 0; i < sizeOfBytes; ++i) { - out.writeByte(bytes.get(i)); + final BytesRefIterator bytesIt = bytes.iterator(); + BytesRef bytesRef; + long remained = sizeOfBytes; + while (remained > 0 && (bytesRef = bytesIt.next()) != null) { + int length = Math.toIntExact(Math.min(remained, bytesRef.length)); + remained -= length; + out.writeBytes(bytesRef.bytes, bytesRef.offset, length); } + assert remained == 0 : remained; } @Override diff --git a/server/src/main/java/org/elasticsearch/common/util/ReleasableByteArray.java b/server/src/main/java/org/elasticsearch/common/util/ReleasableByteArray.java index 0102195f4e809..abb13b5395333 100644 --- a/server/src/main/java/org/elasticsearch/common/util/ReleasableByteArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/ReleasableByteArray.java @@ -9,6 +9,7 @@ package org.elasticsearch.common.util; import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefIterator; import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.ReleasableBytesReference; @@ -88,6 +89,17 @@ public byte[] array() { throw new UnsupportedOperationException(); } + @Override + public BytesRefIterator iterator() { + assert ref.hasReferences(); + return ref.iterator(); + } + + @Override + public void fillWith(StreamInput in) { + throw new UnsupportedOperationException("read-only ByteArray"); + } + @Override public long ramBytesUsed() { /* diff --git a/server/src/test/java/org/elasticsearch/common/util/BigArraysTests.java b/server/src/test/java/org/elasticsearch/common/util/BigArraysTests.java index 3372aa9bc685b..946effda16a76 100644 --- a/server/src/test/java/org/elasticsearch/common/util/BigArraysTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/BigArraysTests.java @@ -9,9 +9,11 @@ package org.elasticsearch.common.util; import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefIterator; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.breaker.CircuitBreakingException; import org.elasticsearch.common.breaker.PreallocatedCircuitBreakerService; +import org.elasticsearch.common.io.stream.ByteArrayStreamInput; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; @@ -275,6 +277,27 @@ public void testByteArrayBulkSet() { array2.close(); } + public void testByteIterator() throws Exception { + final byte[] bytes = new byte[randomIntBetween(1, 4000000)]; + random().nextBytes(bytes); + ByteArray array = bigArrays.newByteArray(bytes.length, randomBoolean()); + array.fillWith(new ByteArrayStreamInput(bytes)); + for (int i = 0; i < bytes.length; i++) { + assertEquals(bytes[i], array.get(i)); + } + BytesRefIterator it = array.iterator(); + BytesRef ref; + int offset = 0; + while ((ref = it.next()) != null) { + for (int i = 0; i < ref.length; i++) { + assertEquals(bytes[offset], ref.bytes[ref.offset + i]); + offset++; + } + } + assertThat(offset, equalTo(bytes.length)); + array.close(); + } + public void testByteArrayEquals() { final ByteArray empty1 = byteArrayWithBytes(BytesRef.EMPTY_BYTES); final ByteArray empty2 = byteArrayWithBytes(BytesRef.EMPTY_BYTES); diff --git a/test/framework/src/main/java/org/elasticsearch/common/util/MockBigArrays.java b/test/framework/src/main/java/org/elasticsearch/common/util/MockBigArrays.java index 5f6e50a7c83e0..902e089679f49 100644 --- a/test/framework/src/main/java/org/elasticsearch/common/util/MockBigArrays.java +++ b/test/framework/src/main/java/org/elasticsearch/common/util/MockBigArrays.java @@ -17,9 +17,11 @@ import org.apache.lucene.util.Accountable; import org.apache.lucene.util.Accountables; import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefIterator; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.breaker.CircuitBreakingException; import org.elasticsearch.common.breaker.NoopCircuitBreaker; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; @@ -398,6 +400,16 @@ public void fill(long fromIndex, long toIndex, byte value) { in.fill(fromIndex, toIndex, value); } + @Override + public BytesRefIterator iterator() { + return in.iterator(); + } + + @Override + public void fillWith(StreamInput streamInput) throws IOException { + in.fillWith(streamInput); + } + @Override public boolean hasArray() { return in.hasArray(); From 9da3caa0b24c22e27c3a2e0c29a2a1548146e549 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Thu, 7 Mar 2024 10:36:35 -0800 Subject: [PATCH 052/248] Split out Java matrix BWC tasks (#106078) --- .buildkite/pipelines/periodic.template.yml | 36 ++++++++++++++++++++- .buildkite/pipelines/periodic.yml | 37 +++++++++++++++++++++- build.gradle | 5 +++ 3 files changed, 76 insertions(+), 2 deletions(-) diff --git a/.buildkite/pipelines/periodic.template.yml b/.buildkite/pipelines/periodic.template.yml index 535305a7ebd33..42e922462c7ac 100644 --- a/.buildkite/pipelines/periodic.template.yml +++ b/.buildkite/pipelines/periodic.template.yml @@ -49,7 +49,6 @@ steps: - checkPart1 - checkPart2 - checkPart3 - - bwcTestSnapshots - checkRestCompat agents: provider: gcp @@ -59,6 +58,22 @@ steps: env: ES_RUNTIME_JAVA: "{{matrix.ES_RUNTIME_JAVA}}" GRADLE_TASK: "{{matrix.GRADLE_TASK}}" + - label: "{{matrix.ES_RUNTIME_JAVA}} / {{matrix.BWC_VERSION}} / java-fips-matrix-bwc" + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dtests.fips.enabled=true v$$BWC_VERSION#bwcTest + timeout_in_minutes: 300 + matrix: + setup: + ES_RUNTIME_JAVA: + - openjdk17 + BWC_VERSION: $BWC_LIST + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: n1-standard-32 + buildDirectory: /dev/shm/bk + env: + ES_RUNTIME_JAVA: "{{matrix.ES_RUNTIME_JAVA}}" + BWC_VERSION: "{{matrix.BWC_VERSION}}" - group: java-matrix steps: - label: "{{matrix.ES_RUNTIME_JAVA}} / {{matrix.GRADLE_TASK}} / java-matrix" @@ -85,6 +100,25 @@ steps: env: ES_RUNTIME_JAVA: "{{matrix.ES_RUNTIME_JAVA}}" GRADLE_TASK: "{{matrix.GRADLE_TASK}}" + - label: "{{matrix.ES_RUNTIME_JAVA}} / {{matrix.BWC_VERSION}} / java-matrix-bwc" + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v$$BWC_VERSION#bwcTest + timeout_in_minutes: 300 + matrix: + setup: + ES_RUNTIME_JAVA: + - graalvm-ce17 + - openjdk17 + - openjdk21 + - openjdk22 + BWC_VERSION: $BWC_LIST + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: n1-standard-32 + buildDirectory: /dev/shm/bk + env: + ES_RUNTIME_JAVA: "{{matrix.ES_RUNTIME_JAVA}}" + BWC_VERSION: "{{matrix.BWC_VERSION}}" - label: release-tests command: .buildkite/scripts/release-tests.sh timeout_in_minutes: 360 diff --git a/.buildkite/pipelines/periodic.yml b/.buildkite/pipelines/periodic.yml index 13c4566301afc..4af58e23db2b9 100644 --- a/.buildkite/pipelines/periodic.yml +++ b/.buildkite/pipelines/periodic.yml @@ -1,3 +1,4 @@ +# This file is auto-generated. See .buildkite/pipelines/periodic.yml # This file is auto-generated. See .buildkite/pipelines/periodic.template.yml steps: - group: bwc @@ -1230,7 +1231,6 @@ steps: - checkPart1 - checkPart2 - checkPart3 - - bwcTestSnapshots - checkRestCompat agents: provider: gcp @@ -1240,6 +1240,22 @@ steps: env: ES_RUNTIME_JAVA: "{{matrix.ES_RUNTIME_JAVA}}" GRADLE_TASK: "{{matrix.GRADLE_TASK}}" + - label: "{{matrix.ES_RUNTIME_JAVA}} / {{matrix.BWC_VERSION}} / java-fips-matrix-bwc" + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dtests.fips.enabled=true v$$BWC_VERSION#bwcTest + timeout_in_minutes: 300 + matrix: + setup: + ES_RUNTIME_JAVA: + - openjdk17 + BWC_VERSION: ["7.17.19", "8.12.3", "8.13.0", "8.14.0"] + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: n1-standard-32 + buildDirectory: /dev/shm/bk + env: + ES_RUNTIME_JAVA: "{{matrix.ES_RUNTIME_JAVA}}" + BWC_VERSION: "{{matrix.BWC_VERSION}}" - group: java-matrix steps: - label: "{{matrix.ES_RUNTIME_JAVA}} / {{matrix.GRADLE_TASK}} / java-matrix" @@ -1266,6 +1282,25 @@ steps: env: ES_RUNTIME_JAVA: "{{matrix.ES_RUNTIME_JAVA}}" GRADLE_TASK: "{{matrix.GRADLE_TASK}}" + - label: "{{matrix.ES_RUNTIME_JAVA}} / {{matrix.BWC_VERSION}} / java-matrix-bwc" + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v$$BWC_VERSION#bwcTest + timeout_in_minutes: 300 + matrix: + setup: + ES_RUNTIME_JAVA: + - graalvm-ce17 + - openjdk17 + - openjdk21 + - openjdk22 + BWC_VERSION: ["7.17.19", "8.12.3", "8.13.0", "8.14.0"] + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: n1-standard-32 + buildDirectory: /dev/shm/bk + env: + ES_RUNTIME_JAVA: "{{matrix.ES_RUNTIME_JAVA}}" + BWC_VERSION: "{{matrix.BWC_VERSION}}" - label: release-tests command: .buildkite/scripts/release-tests.sh timeout_in_minutes: 360 diff --git a/build.gradle b/build.gradle index c0b613beefea4..b499d619f246e 100644 --- a/build.gradle +++ b/build.gradle @@ -108,6 +108,11 @@ tasks.register("updateCIBwcVersions") { ".buildkite/pipelines/periodic.bwc.template.yml", BuildParams.bwcVersions.allIndexCompatible ) + writeBuildkiteList( + ".buildkite/pipelines/periodic.yml", + ".buildkite/pipelines/periodic.yml", + BuildParams.bwcVersions.unreleasedIndexCompatible + ) writeBuildkiteSteps( ".buildkite/pipelines/periodic-packaging.yml", ".buildkite/pipelines/periodic-packaging.template.yml", From 2fd05c538828399d3008948b67c638ea0a67decc Mon Sep 17 00:00:00 2001 From: Jonathan Buttner <56361221+jonathan-buttner@users.noreply.github.com> Date: Thu, 7 Mar 2024 15:03:30 -0500 Subject: [PATCH 053/248] [ML] Preserving thread context in inference API request executor (#106040) * Adding test to ensure the context is preserved * Cleaning up * Spelling * moving strings to constants --- .../http/sender/RequestExecutorService.java | 11 +++- .../sender/ExecutableRequestCreatorTests.java | 15 +++-- .../sender/RequestExecutorServiceTests.java | 60 +++++++++++++++++++ 3 files changed, 80 insertions(+), 6 deletions(-) diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/RequestExecutorService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/RequestExecutorService.java index 00c28e8afc069..ecbaf26ea17f4 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/RequestExecutorService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/RequestExecutorService.java @@ -11,6 +11,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ContextPreservingActionListener; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Strings; @@ -269,7 +270,15 @@ public void execute( @Nullable TimeValue timeout, ActionListener listener ) { - var task = new RequestTask(requestCreator, input, timeout, threadPool, listener); + var task = new RequestTask( + requestCreator, + input, + timeout, + threadPool, + // TODO when multi-tenancy (as well as batching) is implemented we need to be very careful that we preserve + // the thread contexts correctly to avoid accidentally retrieving the credentials for the wrong user + ContextPreservingActionListener.wrapPreservingContext(listener, threadPool.getThreadContext()) + ); if (isShutdown()) { EsRejectedExecutionException rejected = new EsRejectedExecutionException( diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/ExecutableRequestCreatorTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/ExecutableRequestCreatorTests.java index 24f930a818134..b4e770141939b 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/ExecutableRequestCreatorTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/ExecutableRequestCreatorTests.java @@ -9,13 +9,15 @@ import org.apache.http.client.protocol.HttpClientContext; import org.apache.logging.log4j.Logger; -import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.xpack.inference.external.http.retry.RequestSender; import org.elasticsearch.xpack.inference.external.http.retry.ResponseHandler; import org.elasticsearch.xpack.inference.external.request.RequestTests; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyList; +import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -33,16 +35,19 @@ public static ExecutableRequestCreator createMock(RequestSender requestSender) { public static ExecutableRequestCreator createMock(RequestSender requestSender, String modelId) { var mockCreator = mock(ExecutableRequestCreator.class); - when(mockCreator.create(anyList(), any(), any(), any(), any())).thenReturn(() -> { - requestSender.send( + + doAnswer(invocation -> { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[4]; + return (Runnable) () -> requestSender.send( mock(Logger.class), RequestTests.mockRequest(modelId), HttpClientContext.create(), () -> false, mock(ResponseHandler.class), - new PlainActionFuture<>() + listener ); - }); + }).when(mockCreator).create(anyList(), any(), any(), any(), any()); return mockCreator; } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/RequestExecutorServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/RequestExecutorServiceTests.java index ebad28095294b..5e88c3f1bb8f5 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/RequestExecutorServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/RequestExecutorServiceTests.java @@ -9,9 +9,11 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchTimeoutException; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.Strings; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; +import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.InferenceServiceResults; @@ -215,6 +217,64 @@ public void testSend_CallsOnFailure_WhenRequestTimesOut() { ); } + public void testSend_PreservesThreadContext() throws InterruptedException, ExecutionException, TimeoutException { + var headerKey = "not empty"; + var headerValue = "value"; + + var service = createRequestExecutorServiceWithMocks(); + + // starting this on a separate thread to ensure we aren't using the same thread context that the rest of the test will execute with + threadPool.generic().execute(service::start); + + ThreadContext threadContext = threadPool.getThreadContext(); + threadContext.putHeader(headerKey, headerValue); + + var requestSender = mock(RetryingHttpSender.class); + + var waitToShutdown = new CountDownLatch(1); + var waitToReturnFromSend = new CountDownLatch(1); + + // this code will be executed by the queue's thread + doAnswer(invocation -> { + var serviceThreadContext = threadPool.getThreadContext(); + // ensure that the spawned thread didn't pick up the header that was set initially on a separate thread + assertNull(serviceThreadContext.getHeader(headerKey)); + + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[5]; + listener.onResponse(null); + + waitToShutdown.countDown(); + waitToReturnFromSend.await(TIMEOUT.getSeconds(), TimeUnit.SECONDS); + return Void.TYPE; + }).when(requestSender).send(any(), any(), any(), any(), any(), any()); + + var finishedOnResponse = new CountDownLatch(1); + ActionListener listener = new ActionListener<>() { + @Override + public void onResponse(InferenceServiceResults ignore) { + // if we've preserved the thread context correctly then the header should still exist + ThreadContext listenerContext = threadPool.getThreadContext(); + assertThat(listenerContext.getHeader(headerKey), is(headerValue)); + finishedOnResponse.countDown(); + } + + @Override + public void onFailure(Exception e) { + throw new RuntimeException("onFailure shouldn't be called", e); + } + }; + + service.execute(ExecutableRequestCreatorTests.createMock(requestSender), List.of(), null, listener); + + Future executorTermination = submitShutdownRequest(waitToShutdown, waitToReturnFromSend, service); + + executorTermination.get(TIMEOUT.millis(), TimeUnit.MILLISECONDS); + assertTrue(service.isTerminated()); + + finishedOnResponse.await(TIMEOUT.getSeconds(), TimeUnit.SECONDS); + } + public void testSend_NotifiesTasksOfShutdown() { var service = createRequestExecutorServiceWithMocks(); From 81f033f6f493fa8b45c461884e6921b83ba14735 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Thu, 7 Mar 2024 15:52:18 -0500 Subject: [PATCH 054/248] ESQL: Fix order in block loading tests (#106087) The tests for loading `Block`s from scripted fields could fail randomly when the `RandomIndexWriter` shuffles the documents. This disables merging and adds the documents as a block so their order is consistent. Closes #106044 --- .../index/mapper/BooleanScriptFieldTypeTests.java | 15 +++++++++++---- .../index/mapper/DateScriptFieldTypeTests.java | 15 +++++++++++---- .../index/mapper/DoubleScriptFieldTypeTests.java | 15 +++++++++++---- .../index/mapper/IpScriptFieldTypeTests.java | 15 +++++++++++---- .../index/mapper/KeywordScriptFieldTypeTests.java | 15 +++++++++++---- .../index/mapper/LongScriptFieldTypeTests.java | 15 +++++++++++---- 6 files changed, 66 insertions(+), 24 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/index/mapper/BooleanScriptFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/BooleanScriptFieldTypeTests.java index 5040225373f54..d55eaf9df3452 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/BooleanScriptFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/BooleanScriptFieldTypeTests.java @@ -11,6 +11,7 @@ import org.apache.lucene.document.StoredField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.search.Collector; import org.apache.lucene.search.IndexSearcher; @@ -411,11 +412,17 @@ public XContentParser parser() { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/106044") public void testBlockLoader() throws IOException { - try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [false]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [true]}")))); + try ( + Directory directory = newDirectory(); + RandomIndexWriter iw = new RandomIndexWriter(random(), directory, newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE)) + ) { + iw.addDocuments( + List.of( + List.of(new StoredField("_source", new BytesRef("{\"foo\": [false]}"))), + List.of(new StoredField("_source", new BytesRef("{\"foo\": [true]}"))) + ) + ); try (DirectoryReader reader = iw.getReader()) { BooleanScriptFieldType fieldType = build("xor_param", Map.of("param", false), OnScriptError.FAIL); List expected = List.of(false, true); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DateScriptFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DateScriptFieldTypeTests.java index c5ba39b972651..25a79022c245e 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DateScriptFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DateScriptFieldTypeTests.java @@ -11,6 +11,7 @@ import org.apache.lucene.document.StoredField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.search.Collector; import org.apache.lucene.search.Explanation; @@ -471,11 +472,17 @@ public void testLegacyDateFormatName() throws IOException { ); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/106044") public void testBlockLoader() throws IOException { - try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181354]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181355]}")))); + try ( + Directory directory = newDirectory(); + RandomIndexWriter iw = new RandomIndexWriter(random(), directory, newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE)) + ) { + iw.addDocuments( + List.of( + List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181354]}"))), + List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181355]}"))) + ) + ); try (DirectoryReader reader = iw.getReader()) { DateScriptFieldType fieldType = build("add_days", Map.of("days", 1), OnScriptError.FAIL); assertThat( diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DoubleScriptFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DoubleScriptFieldTypeTests.java index ea427bb28bb1d..ed365a2460203 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DoubleScriptFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DoubleScriptFieldTypeTests.java @@ -11,6 +11,7 @@ import org.apache.lucene.document.StoredField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.search.Collector; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.LeafCollector; @@ -230,11 +231,17 @@ public void testTermsQuery() throws IOException { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/106044") public void testBlockLoader() throws IOException { - try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [2]}")))); + try ( + Directory directory = newDirectory(); + RandomIndexWriter iw = new RandomIndexWriter(random(), directory, newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE)) + ) { + iw.addDocuments( + List.of( + List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}"))), + List.of(new StoredField("_source", new BytesRef("{\"foo\": [2]}"))) + ) + ); try (DirectoryReader reader = iw.getReader()) { DoubleScriptFieldType fieldType = build("add_param", Map.of("param", 1), OnScriptError.FAIL); assertThat(blockLoaderReadValuesFromColumnAtATimeReader(reader, fieldType), equalTo(List.of(2d, 3d))); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IpScriptFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IpScriptFieldTypeTests.java index 5a556d561f008..5eb66e631d86f 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IpScriptFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IpScriptFieldTypeTests.java @@ -12,6 +12,7 @@ import org.apache.lucene.document.StoredField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.search.Collector; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.LeafCollector; @@ -246,11 +247,17 @@ public void testTermsQuery() throws IOException { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/106044") public void testBlockLoader() throws IOException { - try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.0\"]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.1\"]}")))); + try ( + Directory directory = newDirectory(); + RandomIndexWriter iw = new RandomIndexWriter(random(), directory, newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE)) + ) { + iw.addDocuments( + List.of( + List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.0\"]}"))), + List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.1\"]}"))) + ) + ); try (DirectoryReader reader = iw.getReader()) { IpScriptFieldType fieldType = build("append_param", Map.of("param", ".1"), OnScriptError.FAIL); List expected = List.of( diff --git a/server/src/test/java/org/elasticsearch/index/mapper/KeywordScriptFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/KeywordScriptFieldTypeTests.java index 7d6ad4a61560a..d8903251e6c3b 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/KeywordScriptFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/KeywordScriptFieldTypeTests.java @@ -11,6 +11,7 @@ import org.apache.lucene.document.StoredField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.search.Collector; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.LeafCollector; @@ -376,11 +377,17 @@ public void testMatchQuery() throws IOException { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/106044") public void testBlockLoader() throws IOException { - try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [2]}")))); + try ( + Directory directory = newDirectory(); + RandomIndexWriter iw = new RandomIndexWriter(random(), directory, newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE)) + ) { + iw.addDocuments( + List.of( + List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}"))), + List.of(new StoredField("_source", new BytesRef("{\"foo\": [2]}"))) + ) + ); try (DirectoryReader reader = iw.getReader()) { KeywordScriptFieldType fieldType = build("append_param", Map.of("param", "-Suffix"), OnScriptError.FAIL); assertThat( diff --git a/server/src/test/java/org/elasticsearch/index/mapper/LongScriptFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/LongScriptFieldTypeTests.java index ad6a6636a92fc..debcd3c5fa911 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/LongScriptFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/LongScriptFieldTypeTests.java @@ -11,6 +11,7 @@ import org.apache.lucene.document.StoredField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.search.Collector; import org.apache.lucene.search.FieldDoc; @@ -263,11 +264,17 @@ public void testTermsQuery() throws IOException { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/106044") public void testBlockLoader() throws IOException { - try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [2]}")))); + try ( + Directory directory = newDirectory(); + RandomIndexWriter iw = new RandomIndexWriter(random(), directory, newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE)) + ) { + iw.addDocuments( + List.of( + List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}"))), + List.of(new StoredField("_source", new BytesRef("{\"foo\": [2]}"))) + ) + ); try (DirectoryReader reader = iw.getReader()) { LongScriptFieldType fieldType = build("add_param", Map.of("param", 1), OnScriptError.FAIL); assertThat(blockLoaderReadValuesFromColumnAtATimeReader(reader, fieldType), equalTo(List.of(2L, 3L))); From 9b0f1cea8ceb603e1990bef99b5f7e895d7f617d Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 7 Mar 2024 12:52:55 -0800 Subject: [PATCH 055/248] Re-enable the RemoteClusterSecurityBwcRestIT (#106052) Jdk 22 had test failures which caused this test to be muted. It looks like this bwc test may have failed because earlier ES versions had not yet had the necessary change to Terminal backported yet. That should be fixed now with #106049 and #106051. This commit re-enables the bwc test. relates #104858 --- .../xpack/remotecluster/RemoteClusterSecurityBwcRestIT.java | 2 -- 1 file changed, 2 deletions(-) diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityBwcRestIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityBwcRestIT.java index 7c26b8e386cc5..fee5129f8c9b8 100644 --- a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityBwcRestIT.java +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityBwcRestIT.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.remotecluster; -import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; @@ -41,7 +40,6 @@ /** * BWC test which ensures that users and API keys with defined {@code remote_indices} privileges can be used to query legacy remote clusters */ -@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/104858") public class RemoteClusterSecurityBwcRestIT extends AbstractRemoteClusterSecurityTestCase { private static final Version OLD_CLUSTER_VERSION = Version.fromString(System.getProperty("tests.old_cluster_version")); From b22d2af669ac82ea66dc1c4f1decbc5099b8af72 Mon Sep 17 00:00:00 2001 From: Jake Landis Date: Thu, 7 Mar 2024 14:59:02 -0600 Subject: [PATCH 056/248] OIDC doc - mention client secret needs a restart (#106088) --- docs/reference/security/authentication/oidc-guide.asciidoc | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/reference/security/authentication/oidc-guide.asciidoc b/docs/reference/security/authentication/oidc-guide.asciidoc index 41cead20789b6..c2112b949c540 100644 --- a/docs/reference/security/authentication/oidc-guide.asciidoc +++ b/docs/reference/security/authentication/oidc-guide.asciidoc @@ -198,6 +198,7 @@ For instance bin/elasticsearch-keystore add xpack.security.authc.realms.oidc.oidc1.rp.client_secret ---- +NOTE: Changes to the `client_secret` requires a restart of the {es} nodes to pick up the change. NOTE: According to the OpenID Connect specification, the OP should also make their configuration available at a well known URL, which is the concatenation of their `Issuer` value with the From 863cbf6bb437c8d39012f5897ca0ab07377a61d8 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Thu, 7 Mar 2024 13:15:01 -0800 Subject: [PATCH 057/248] Add docs for cross cluster search in ES|QL(#105934) This change adds a documentation for cross cluster search in ES|QL. Relates #102954 Closes #105529 --- .../esql/esql-across-clusters.asciidoc | 224 ++++++++++++++++++ docs/reference/esql/esql-using.asciidoc | 4 + docs/reference/esql/index.asciidoc | 2 +- .../esql/processing-commands/enrich.asciidoc | 4 + .../esql/source-commands/from.asciidoc | 10 + .../search-across-clusters.asciidoc | 12 +- 6 files changed, 254 insertions(+), 2 deletions(-) create mode 100644 docs/reference/esql/esql-across-clusters.asciidoc diff --git a/docs/reference/esql/esql-across-clusters.asciidoc b/docs/reference/esql/esql-across-clusters.asciidoc new file mode 100644 index 0000000000000..f35a62c49aca3 --- /dev/null +++ b/docs/reference/esql/esql-across-clusters.asciidoc @@ -0,0 +1,224 @@ +[[esql-cross-clusters]] +=== Using {esql} across clusters + +++++ +Using {esql} across clusters +++++ + +[partintro] + +preview::["{ccs-cap} for {esql} is in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA of official GA features."] + +With {esql}, you can execute a single query across multiple clusters. + +==== Prerequisites + +include::{es-repo-dir}/search/search-your-data/search-across-clusters.asciidoc[tag=ccs-prereqs] + +include::{es-repo-dir}/search/search-your-data/search-across-clusters.asciidoc[tag=ccs-gateway-seed-nodes] + +include::{es-repo-dir}/search/search-your-data/search-across-clusters.asciidoc[tag=ccs-proxy-mode] + +[discrete] +[[ccq-remote-cluster-setup]] +==== Remote cluster setup +include::{es-repo-dir}/search/search-your-data/search-across-clusters.asciidoc[tag=ccs-remote-cluster-setup] + +<1> Since `skip_unavailable` was not set on `cluster_three`, it uses +the default of `false`. See the <> +section for details. + +[discrete] +[[ccq-from]] +==== Query across multiple clusters + +In the `FROM` command, specify data streams and indices on remote clusters +using the format `:`. For instance, the following +{esql} request queries the `my-index-000001` index on a single remote cluster +named `cluster_one`: + +[source,esql] +---- +FROM cluster_one:my-index-000001 +| LIMIT 10 +---- + +Similarly, this {esql} request queries the `my-index-000001` index from +three clusters: + +* The local ("querying") cluster +* Two remote clusters, `cluster_one` and `cluster_two` + +[source,esql] +---- +FROM my-index-000001,cluster_one:my-index-000001,cluster_two:my-index-000001 +| LIMIT 10 +---- + +Likewise, this {esql} request queries the `my-index-000001` index from all +remote clusters (`cluster_one`, `cluster_two`, and `cluster_three`): + +[source,esql] +---- +FROM *:my-index-000001 +| LIMIT 10 +---- + +[discrete] +[[ccq-enrich]] +==== Enrich across clusters + +Enrich in {esql} across clusters operates similarly to <>. +If the enrich policy and its enrich indices are consistent across all clusters, simply +write the enrich command as you would without remote clusters. In this default mode, +{esql} can execute the enrich command on either the querying cluster or the fulfilling +clusters, aiming to minimize computation or inter-cluster data transfer. Ensuring that +the policy exists with consistent data on both the querying cluster and the fulfilling +clusters is critical for ES|QL to produce a consistent query result. + +In the following example, the enrich with `hosts` policy can be executed on +either the querying cluster or the remote cluster `cluster_one`. + +[source,esql] +---- +FROM my-index-000001,cluster_one:my-index-000001 +| ENRICH hosts ON ip +| LIMIT 10 +---- + +Enrich with an {esql} query against remote clusters only can also happen on +the querying cluster. This means the below query requires the `hosts` enrich +policy to exist on the querying cluster as well. + +[source,esql] +---- +FROM cluster_one:my-index-000001,cluster_two:my-index-000001 +| LIMIT 10 +| ENRICH hosts ON ip +---- + +[discrete] +[[esql-enrich-coordinator]] +==== Enrich with coordinator mode + +{esql} provides the enrich `_coordinator` mode to force {esql} to execute the enrich +command on the querying cluster. This mode should be used when the enrich policy is +not available on the remote clusters or maintaining consistency of enrich indices +across clusters is challenging. + +[source,esql] +---- +FROM my-index-000001,cluster_one:my-index-000001 +| ENRICH _coordinator:hosts ON ip +| SORT host_name +| LIMIT 10 +---- + +[discrete] +[IMPORTANT] +==== +Enrich with the `_coordinator` mode usually increases inter-cluster data transfer and +workload on the querying cluster. +==== + +[discrete] +[[esql-enrich-remote]] +==== Enrich with remote mode + +{esql} also provides the enrich `_remote` mode to force {esql} to execute the enrich +command independently on each fulfilling cluster where the target indices reside. +This mode is useful for managing different enrich data on each cluster, such as detailed +information of hosts for each region where the target (main) indices contain +log events from these hosts. + +In the below example, the `hosts` enrich policy is required to exist on all +fulfilling clusters: the `querying` cluster (as local indices are included), +the remote cluster `cluster_one`, and `cluster_two`. + +[source,esql] +---- +FROM my-index-000001,cluster_one:my-index-000001,cluster_two:my-index-000001 +| ENRICH _remote:hosts ON ip +| SORT host_name +| LIMIT 10 +---- + +A `_remote` enrich cannot be executed after a <> +command. The following example would result in an error: + +[source,esql] +---- +FROM my-index-000001,cluster_one:my-index-000001,cluster_two:my-index-000001 +| STATS COUNT(*) BY ip +| ENRICH _remote:hosts ON ip +| SORT host_name +| LIMIT 10 +---- + +[discrete] +[[esql-multi-enrich]] +==== Multiple enrich commands + +You can include multiple enrich commands in the same query with different +modes. {esql} will attempt to execute them accordingly. For example, this +query performs two enriches, first with the `hosts` policy on any cluster +and then with the `vendors` policy on the querying cluster. + +[source,esql] +---- +FROM my-index-000001,cluster_one:my-index-000001,cluster_two:my-index-000001 +| ENRICH hosts ON ip +| ENRICH _coordinator:vendors ON os +| LIMIT 10 +---- + +A `_remote` enrich command can't be executed after a `_coordinator` enrich +command. The following example would result in an error. + +[source,esql] +---- +FROM my-index-000001,cluster_one:my-index-000001,cluster_two:my-index-000001 +| ENRICH _coordinator:hosts ON ip +| ENRICH _remote:vendors ON os +| LIMIT 10 +---- + +[discrete] +[[ccq-exclude]] +==== Excluding clusters or indices from {esql} query + +To exclude an entire cluster, prefix the cluster alias with a minus sign in +the `FROM` command, for example: `-my_cluster:*`: + +[source,esql] +---- +FROM my-index-000001,cluster*:my-index-000001,-cluster_three:* +| LIMIT 10 +---- + +To exclude a specific remote index, prefix the index with a minus sign in +the `FROM` command, such as `my_cluster:-my_index`: + +[source,esql] +---- +FROM my-index-000001,cluster*:my-index-*,cluster_three:-my-index-000001 +| LIMIT 10 +---- + +[discrete] +[[ccq-skip-unavailable-clusters]] +==== Optional remote clusters + +{ccs-cap} for {esql} currently does not respect the `skip_unavailable` +setting. As a result, if a remote cluster specified in the request is +unavailable or failed, {ccs} for {esql} queries will fail regardless of the setting. + +We are actively working to align the behavior of {ccs} for {esql} with other +{ccs} APIs. This includes providing detailed execution information for each cluster +in the response, such as execution time, selected target indices, and shards. + +[discrete] +[[ccq-during-upgrade]] +==== Query across clusters during an upgrade + +include::{es-repo-dir}/search/search-your-data/search-across-clusters.asciidoc[tag=ccs-during-upgrade] diff --git a/docs/reference/esql/esql-using.asciidoc b/docs/reference/esql/esql-using.asciidoc index f11fdd2d058a5..3e045163069ec 100644 --- a/docs/reference/esql/esql-using.asciidoc +++ b/docs/reference/esql/esql-using.asciidoc @@ -12,10 +12,14 @@ and set up alerts. Using {esql} in {elastic-sec} to investigate events in Timeline, create detection rules, and build {esql} queries using Elastic AI Assistant. +<>:: +Using {esql} to query across multiple clusters. + <>:: Using the <> to list and cancel {esql} queries. include::esql-rest.asciidoc[] include::esql-kibana.asciidoc[] include::esql-security-solution.asciidoc[] +include::esql-across-clusters.asciidoc[] include::task-management.asciidoc[] diff --git a/docs/reference/esql/index.asciidoc b/docs/reference/esql/index.asciidoc index 8fb20b981b93e..531336277ba6b 100644 --- a/docs/reference/esql/index.asciidoc +++ b/docs/reference/esql/index.asciidoc @@ -56,7 +56,7 @@ GROK>> and <>. <>:: An overview of using the <>, <>, -<>, and <>. +<>, <>, and <>. <>:: The current limitations of {esql}. diff --git a/docs/reference/esql/processing-commands/enrich.asciidoc b/docs/reference/esql/processing-commands/enrich.asciidoc index 603683858b8c0..f73eea6018cbc 100644 --- a/docs/reference/esql/processing-commands/enrich.asciidoc +++ b/docs/reference/esql/processing-commands/enrich.asciidoc @@ -15,6 +15,10 @@ ENRICH policy [ON match_field] [WITH [new_name1 = ]field1, [new_name2 = ]field2, The name of the enrich policy. You need to <> and <> the enrich policy first. +`mode`:: +The mode of the enrich command in cross cluster {esql}. +See <>. + `match_field`:: The match field. `ENRICH` uses its value to look for records in the enrich index. If not specified, the match will be performed on the column with the same diff --git a/docs/reference/esql/source-commands/from.asciidoc b/docs/reference/esql/source-commands/from.asciidoc index dbb5010060257..d81c46530e089 100644 --- a/docs/reference/esql/source-commands/from.asciidoc +++ b/docs/reference/esql/source-commands/from.asciidoc @@ -66,6 +66,16 @@ or aliases: FROM employees-00001,other-employees-* ---- +Use the format `:` to query data streams and indices +on remote clusters: + +[source,esql] +---- +FROM cluster_one:employees-00001,cluster_two:other-employees-* +---- + +See <>. + Use the optional `METADATA` directive to enable <>: [source,esql] diff --git a/docs/reference/search/search-your-data/search-across-clusters.asciidoc b/docs/reference/search/search-your-data/search-across-clusters.asciidoc index 93955659a1b2a..ee1d9fcae18e8 100644 --- a/docs/reference/search/search-your-data/search-across-clusters.asciidoc +++ b/docs/reference/search/search-your-data/search-across-clusters.asciidoc @@ -22,10 +22,11 @@ The following APIs support {ccs}: * experimental:[] <> * experimental:[] <> * experimental:[] <> +* experimental:[] <> [discrete] -[[ccs-prereqs]] === Prerequisites +// tag::ccs-prereqs[] * {ccs-cap} requires remote clusters. To set up remote clusters on {ess}, see link:{cloud}/ec-enable-ccs.html[configure remote clusters on {ess}]. If you @@ -39,15 +40,19 @@ To ensure your remote cluster configuration supports {ccs}, see * The local coordinating node must have the <> node role. +// end::ccs-prereqs[] [[ccs-gateway-seed-nodes]] +// tag::ccs-gateway-seed-nodes[] * If you use <>, the local coordinating node must be able to connect to seed and gateway nodes on the remote cluster. + We recommend using gateway nodes capable of serving as coordinating nodes. The seed nodes can be a subset of these gateway nodes. +// end::ccs-gateway-seed-nodes[] [[ccs-proxy-mode]] +// tag::ccs-proxy-mode[] * If you use <>, the local coordinating node must be able to connect to the configured `proxy_address`. The proxy at this address must be able to route connections to gateway and coordinating nodes on the remote @@ -56,6 +61,7 @@ cluster. * {ccs-cap} requires different security privileges on the local cluster and remote cluster. See <> and <>. +// end::ccs-proxy-mode[] [discrete] [[ccs-example]] @@ -64,6 +70,7 @@ remote cluster. See <> and [discrete] [[ccs-remote-cluster-setup]] ==== Remote cluster setup +// tag::ccs-remote-cluster-setup[] The following <> API request adds three remote clusters: `cluster_one`, `cluster_two`, and `cluster_three`. @@ -99,6 +106,7 @@ PUT _cluster/settings -------------------------------- // TEST[setup:host] // TEST[s/35.238.149.\d+:930\d+/\${transport_host}/] +// end::ccs-remote-cluster-setup[] <1> Since `skip_unavailable` was not set on `cluster_three`, it uses the default of `false`. See the <> @@ -1393,6 +1401,7 @@ cluster as the local cluster when running a {ccs}. [[ccs-during-upgrade]] ==== {ccs-cap} during an upgrade +// tag::ccs-during-upgrade[] You can still search a remote cluster while performing a rolling upgrade on the local cluster. However, the local coordinating node's "upgrade from" and "upgrade to" version must be compatible @@ -1403,3 +1412,4 @@ duration of an upgrade is not supported. For more information about upgrades, see {stack-ref}/upgrading-elasticsearch.html[Upgrading {es}]. +// end::ccs-during-upgrade[] From 6f8280c12a3e2f122206d59853d6f185862f3779 Mon Sep 17 00:00:00 2001 From: Brian Seeders Date: Thu, 7 Mar 2024 16:56:07 -0500 Subject: [PATCH 058/248] [ci] Refactor BWC templating in Buildkite pipelines to handle more scenarios (#106084) --- .buildkite/pipelines/periodic.yml | 1 - build.gradle | 68 +++++++++++++++++++++---------- 2 files changed, 46 insertions(+), 23 deletions(-) diff --git a/.buildkite/pipelines/periodic.yml b/.buildkite/pipelines/periodic.yml index 4af58e23db2b9..80f38dc79eecc 100644 --- a/.buildkite/pipelines/periodic.yml +++ b/.buildkite/pipelines/periodic.yml @@ -1,4 +1,3 @@ -# This file is auto-generated. See .buildkite/pipelines/periodic.yml # This file is auto-generated. See .buildkite/pipelines/periodic.template.yml steps: - group: bwc diff --git a/build.gradle b/build.gradle index b499d619f246e..0cb4133f2ed6e 100644 --- a/build.gradle +++ b/build.gradle @@ -64,6 +64,17 @@ ext.testArtifact = { p, String name = "test" -> }; } +class StepExpansion { + String templatePath + List versions + String variable +} + +class ListExpansion { + List versions + String variable +} + tasks.register("updateCIBwcVersions") { def writeVersions = { File file, List versions -> file.text = "" @@ -73,47 +84,60 @@ tasks.register("updateCIBwcVersions") { } } - def writeBuildkiteList = { String outputFilePath, String pipelineTemplatePath, List versions -> + def writeBuildkitePipeline = { String outputFilePath, String pipelineTemplatePath, List listExpansions, List stepExpansions = [] -> def outputFile = file(outputFilePath) def pipelineTemplate = file(pipelineTemplatePath) - def listString = "[" + versions.collect { "\"${it}\"" }.join(", ") + "]" - outputFile.text = "# This file is auto-generated. See ${pipelineTemplatePath}\n" + pipelineTemplate.text.replaceAll('\\$BWC_LIST', listString) - } + def pipeline = pipelineTemplate.text - def writeBuildkiteSteps = { String outputFilePath, String pipelineTemplatePath, String stepTemplatePath, List versions -> - def outputFile = file(outputFilePath) - def pipelineTemplate = file(pipelineTemplatePath) - def stepTemplate = file(stepTemplatePath) + listExpansions.each { expansion -> + def listString = "[" + expansion.versions.collect { "\"${it}\"" }.join(", ") + "]" + pipeline = pipeline.replaceAll('\\$' + expansion.variable, listString) + } - def steps = "" - versions.each { - steps += "\n" + stepTemplate.text.replaceAll('\\$BWC_VERSION', it.toString()) + stepExpansions.each { expansion -> + def steps = "" + expansion.versions.each { + steps += "\n" + file(expansion.templatePath).text.replaceAll('\\$BWC_VERSION', it.toString()) + } + pipeline = pipeline.replaceAll(' *\\$' + expansion.variable, steps) } - outputFile.text = "# This file is auto-generated. See ${pipelineTemplatePath}\n" + pipelineTemplate.text.replaceAll(' *\\$BWC_STEPS', steps) + outputFile.text = "# This file is auto-generated. See ${pipelineTemplatePath}\n" + pipeline + } + + // Writes a Buildkite pipelime from a template, and replaces $BWC_LIST with an array of versions + // Useful for writing a list of versions in a matrix configuration + def expandBwcList = { String outputFilePath, String pipelineTemplatePath, List versions -> + writeBuildkitePipeline(outputFilePath, pipelineTemplatePath, [new ListExpansion(versions: versions, variable: "BWC_LIST")]) + } + + // Writes a Buildkite pipeline from a template, and replaces $BWC_STEPS with a list of steps, one for each version + // Useful when you need to configure more versions than are allowed in a matrix configuration + def expandBwcSteps = { String outputFilePath, String pipelineTemplatePath, String stepTemplatePath, List versions -> + writeBuildkitePipeline(outputFilePath, pipelineTemplatePath, [], [new StepExpansion(templatePath: stepTemplatePath, versions: versions, variable: "BWC_STEPS")]) } doLast { writeVersions(file(".ci/bwcVersions"), BuildParams.bwcVersions.allIndexCompatible) writeVersions(file(".ci/snapshotBwcVersions"), BuildParams.bwcVersions.unreleasedIndexCompatible) - writeBuildkiteList( + expandBwcList( ".buildkite/pipelines/intake.yml", ".buildkite/pipelines/intake.template.yml", BuildParams.bwcVersions.unreleasedIndexCompatible ) - writeBuildkiteSteps( + writeBuildkitePipeline( ".buildkite/pipelines/periodic.yml", ".buildkite/pipelines/periodic.template.yml", - ".buildkite/pipelines/periodic.bwc.template.yml", - BuildParams.bwcVersions.allIndexCompatible + [ + new ListExpansion(versions: BuildParams.bwcVersions.unreleasedIndexCompatible, variable: "BWC_LIST"), + ], + [ + new StepExpansion(templatePath: ".buildkite/pipelines/periodic.bwc.template.yml", versions: BuildParams.bwcVersions.allIndexCompatible, variable: "BWC_STEPS"), + ] ) - writeBuildkiteList( - ".buildkite/pipelines/periodic.yml", - ".buildkite/pipelines/periodic.yml", - BuildParams.bwcVersions.unreleasedIndexCompatible - ) - writeBuildkiteSteps( + + expandBwcSteps( ".buildkite/pipelines/periodic-packaging.yml", ".buildkite/pipelines/periodic-packaging.template.yml", ".buildkite/pipelines/periodic-packaging.bwc.template.yml", From 20f5bac87ebc1e926f3a8c2e2974537341709b74 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Thu, 7 Mar 2024 16:55:26 -0800 Subject: [PATCH 059/248] Specialize serialization for ArrayVectors (#105893) Currently, we serialize blocks and vectors value by value, employing a simple yet effective approach. However, there are specific cases where we can enhance performance by serializing the underlying structure instead: 1. Serializing BytesRefArray of a BytesRefArrayVector. 2. Serializing the firstValueIndexes, nullsMask, and the underlying vector of an ArrayBlock instead of rebuilding the block from values. 3. Serializing BigArrayBlock. This PR addresses the first bullet point and lays the groundwork for implementing the second. --- docs/changelog/105893.yaml | 5 ++ .../org/elasticsearch/TransportVersions.java | 1 + .../compute/data/BooleanArrayVector.java | 30 ++++++++++ .../compute/data/BooleanVector.java | 42 ++++++++++---- .../compute/data/BytesRefArrayVector.java | 23 ++++++++ .../compute/data/BytesRefVector.java | 43 ++++++++++---- .../compute/data/DoubleArrayVector.java | 29 ++++++++++ .../compute/data/DoubleVector.java | 42 ++++++++++---- .../compute/data/IntArrayVector.java | 29 ++++++++++ .../elasticsearch/compute/data/IntVector.java | 42 ++++++++++---- .../compute/data/LongArrayVector.java | 29 ++++++++++ .../compute/data/LongVector.java | 42 ++++++++++---- .../elasticsearch/compute/data/Vector.java | 7 +++ .../compute/data/X-ArrayVector.java.st | 57 +++++++++++++++++++ .../compute/data/X-Vector.java.st | 49 +++++++++++----- 15 files changed, 396 insertions(+), 74 deletions(-) create mode 100644 docs/changelog/105893.yaml diff --git a/docs/changelog/105893.yaml b/docs/changelog/105893.yaml new file mode 100644 index 0000000000000..c88736f5dda3d --- /dev/null +++ b/docs/changelog/105893.yaml @@ -0,0 +1,5 @@ +pr: 105893 +summary: Specialize serialization for `ArrayVectors` +area: ES|QL +type: enhancement +issues: [] diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 29dec80875787..bc27ab8265b26 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -138,6 +138,7 @@ static TransportVersion def(int id) { public static final TransportVersion DATA_STREAM_AUTO_SHARDING_EVENT = def(8_598_00_0); public static final TransportVersion ADD_FAILURE_STORE_INDICES_OPTIONS = def(8_599_00_0); public static final TransportVersion ESQL_ENRICH_OPERATOR_STATUS = def(8_600_00_0); + public static final TransportVersion ESQL_SERIALIZE_ARRAY_VECTOR = def(8_601_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayVector.java index 1599061d04ce8..63f02b14d9481 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayVector.java @@ -8,7 +8,10 @@ package org.elasticsearch.compute.data; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import java.io.IOException; import java.util.Arrays; /** @@ -28,6 +31,33 @@ final class BooleanArrayVector extends AbstractVector implements BooleanVector { this.values = values; } + static BooleanArrayVector readArrayVector(int positions, StreamInput in, BlockFactory blockFactory) throws IOException { + final long preAdjustedBytes = RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (long) positions * Byte.BYTES; + blockFactory.adjustBreaker(preAdjustedBytes); + boolean success = false; + try { + boolean[] values = new boolean[positions]; + for (int i = 0; i < positions; i++) { + values[i] = in.readBoolean(); + } + final var block = new BooleanArrayVector(values, positions, blockFactory); + blockFactory.adjustBreaker(block.ramBytesUsed() - preAdjustedBytes); + success = true; + return block; + } finally { + if (success == false) { + blockFactory.adjustBreaker(-preAdjustedBytes); + } + } + } + + void writeArrayVector(int positions, StreamOutput out) throws IOException { + // TODO: One bit for each boolean + for (int i = 0; i < positions; i++) { + out.writeBoolean(values[i]); + } + } + @Override public BooleanBlock asBlock() { return new BooleanVectorBlock(this); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVector.java index 7c86f40981ec7..2f50b45fbfc9d 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVector.java @@ -7,6 +7,7 @@ package org.elasticsearch.compute.data; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -74,30 +75,47 @@ static int hash(BooleanVector vector) { /** Deserializes a Vector from the given stream input. */ static BooleanVector readFrom(BlockFactory blockFactory, StreamInput in) throws IOException { final int positions = in.readVInt(); - final boolean constant = in.readBoolean(); - if (constant && positions > 0) { - return blockFactory.newConstantBooleanVector(in.readBoolean(), positions); - } else { - try (var builder = blockFactory.newBooleanVectorFixedBuilder(positions)) { - for (int i = 0; i < positions; i++) { - builder.appendBoolean(in.readBoolean()); - } - return builder.build(); + final byte serializationType = in.readByte(); + return switch (serializationType) { + case SERIALIZE_VECTOR_VALUES -> readValues(positions, in, blockFactory); + case SERIALIZE_VECTOR_CONSTANT -> blockFactory.newConstantBooleanVector(in.readBoolean(), positions); + case SERIALIZE_VECTOR_ARRAY -> BooleanArrayVector.readArrayVector(positions, in, blockFactory); + default -> { + assert false : "invalid vector serialization type [" + serializationType + "]"; + throw new IllegalStateException("invalid vector serialization type [" + serializationType + "]"); } - } + }; } /** Serializes this Vector to the given stream output. */ default void writeTo(StreamOutput out) throws IOException { final int positions = getPositionCount(); + final var version = out.getTransportVersion(); out.writeVInt(positions); - out.writeBoolean(isConstant()); if (isConstant() && positions > 0) { + out.writeByte(SERIALIZE_VECTOR_CONSTANT); out.writeBoolean(getBoolean(0)); + } else if (version.onOrAfter(TransportVersions.ESQL_SERIALIZE_ARRAY_VECTOR) && this instanceof BooleanArrayVector v) { + out.writeByte(SERIALIZE_VECTOR_ARRAY); + v.writeArrayVector(positions, out); } else { + out.writeByte(SERIALIZE_VECTOR_VALUES); + writeValues(this, positions, out); + } + } + + private static BooleanVector readValues(int positions, StreamInput in, BlockFactory blockFactory) throws IOException { + try (var builder = blockFactory.newBooleanVectorFixedBuilder(positions)) { for (int i = 0; i < positions; i++) { - out.writeBoolean(getBoolean(i)); + builder.appendBoolean(in.readBoolean()); } + return builder.build(); + } + } + + private static void writeValues(BooleanVector v, int positions, StreamOutput out) throws IOException { + for (int i = 0; i < positions; i++) { + out.writeBoolean(v.getBoolean(i)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayVector.java index 5d47802bebabe..d0b600d0f0be2 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayVector.java @@ -9,9 +9,13 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.BytesRefArray; import org.elasticsearch.core.Releasables; +import java.io.IOException; + /** * Vector implementation that stores an array of BytesRef values. * Does not take ownership of the given {@link BytesRefArray} and does not adjust circuit breakers to account for it. @@ -30,6 +34,25 @@ final class BytesRefArrayVector extends AbstractVector implements BytesRefVector this.values = values; } + static BytesRefArrayVector readArrayVector(int positions, StreamInput in, BlockFactory blockFactory) throws IOException { + final BytesRefArray values = new BytesRefArray(in, blockFactory.bigArrays()); + boolean success = false; + try { + final var block = new BytesRefArrayVector(values, positions, blockFactory); + blockFactory.adjustBreaker(block.ramBytesUsed() - values.bigArraysRamBytesUsed()); + success = true; + return block; + } finally { + if (success == false) { + values.close(); + } + } + } + + void writeArrayVector(int positions, StreamOutput out) throws IOException { + values.writeTo(out); + } + @Override public BytesRefBlock asBlock() { return new BytesRefVectorBlock(this); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVector.java index 5c56ece72c298..c0b107065f43c 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVector.java @@ -8,6 +8,7 @@ package org.elasticsearch.compute.data; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -74,30 +75,48 @@ static int hash(BytesRefVector vector) { /** Deserializes a Vector from the given stream input. */ static BytesRefVector readFrom(BlockFactory blockFactory, StreamInput in) throws IOException { final int positions = in.readVInt(); - final boolean constant = in.readBoolean(); - if (constant && positions > 0) { - return blockFactory.newConstantBytesRefVector(in.readBytesRef(), positions); - } else { - try (var builder = blockFactory.newBytesRefVectorBuilder(positions)) { - for (int i = 0; i < positions; i++) { - builder.appendBytesRef(in.readBytesRef()); - } - return builder.build(); + final byte serializationType = in.readByte(); + return switch (serializationType) { + case SERIALIZE_VECTOR_VALUES -> readValues(positions, in, blockFactory); + case SERIALIZE_VECTOR_CONSTANT -> blockFactory.newConstantBytesRefVector(in.readBytesRef(), positions); + case SERIALIZE_VECTOR_ARRAY -> BytesRefArrayVector.readArrayVector(positions, in, blockFactory); + default -> { + assert false : "invalid vector serialization type [" + serializationType + "]"; + throw new IllegalStateException("invalid vector serialization type [" + serializationType + "]"); } - } + }; } /** Serializes this Vector to the given stream output. */ default void writeTo(StreamOutput out) throws IOException { final int positions = getPositionCount(); + final var version = out.getTransportVersion(); out.writeVInt(positions); - out.writeBoolean(isConstant()); if (isConstant() && positions > 0) { + out.writeByte(SERIALIZE_VECTOR_CONSTANT); out.writeBytesRef(getBytesRef(0, new BytesRef())); + } else if (version.onOrAfter(TransportVersions.ESQL_SERIALIZE_ARRAY_VECTOR) && this instanceof BytesRefArrayVector v) { + out.writeByte(SERIALIZE_VECTOR_ARRAY); + v.writeArrayVector(positions, out); } else { + out.writeByte(SERIALIZE_VECTOR_VALUES); + writeValues(this, positions, out); + } + } + + private static BytesRefVector readValues(int positions, StreamInput in, BlockFactory blockFactory) throws IOException { + try (var builder = blockFactory.newBytesRefVectorBuilder(positions)) { for (int i = 0; i < positions; i++) { - out.writeBytesRef(getBytesRef(i, new BytesRef())); + builder.appendBytesRef(in.readBytesRef()); } + return builder.build(); + } + } + + private static void writeValues(BytesRefVector v, int positions, StreamOutput out) throws IOException { + var scratch = new BytesRef(); + for (int i = 0; i < positions; i++) { + out.writeBytesRef(v.getBytesRef(i, scratch)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayVector.java index 9a9fedb95a1b6..a7868beaf5db8 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayVector.java @@ -8,7 +8,10 @@ package org.elasticsearch.compute.data; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import java.io.IOException; import java.util.Arrays; /** @@ -28,6 +31,32 @@ final class DoubleArrayVector extends AbstractVector implements DoubleVector { this.values = values; } + static DoubleArrayVector readArrayVector(int positions, StreamInput in, BlockFactory blockFactory) throws IOException { + final long preAdjustedBytes = RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (long) positions * Double.BYTES; + blockFactory.adjustBreaker(preAdjustedBytes); + boolean success = false; + try { + double[] values = new double[positions]; + for (int i = 0; i < positions; i++) { + values[i] = in.readDouble(); + } + final var block = new DoubleArrayVector(values, positions, blockFactory); + blockFactory.adjustBreaker(block.ramBytesUsed() - preAdjustedBytes); + success = true; + return block; + } finally { + if (success == false) { + blockFactory.adjustBreaker(-preAdjustedBytes); + } + } + } + + void writeArrayVector(int positions, StreamOutput out) throws IOException { + for (int i = 0; i < positions; i++) { + out.writeDouble(values[i]); + } + } + @Override public DoubleBlock asBlock() { return new DoubleVectorBlock(this); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVector.java index f54044874acdd..c5553f6a102f9 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVector.java @@ -7,6 +7,7 @@ package org.elasticsearch.compute.data; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -75,30 +76,47 @@ static int hash(DoubleVector vector) { /** Deserializes a Vector from the given stream input. */ static DoubleVector readFrom(BlockFactory blockFactory, StreamInput in) throws IOException { final int positions = in.readVInt(); - final boolean constant = in.readBoolean(); - if (constant && positions > 0) { - return blockFactory.newConstantDoubleVector(in.readDouble(), positions); - } else { - try (var builder = blockFactory.newDoubleVectorFixedBuilder(positions)) { - for (int i = 0; i < positions; i++) { - builder.appendDouble(in.readDouble()); - } - return builder.build(); + final byte serializationType = in.readByte(); + return switch (serializationType) { + case SERIALIZE_VECTOR_VALUES -> readValues(positions, in, blockFactory); + case SERIALIZE_VECTOR_CONSTANT -> blockFactory.newConstantDoubleVector(in.readDouble(), positions); + case SERIALIZE_VECTOR_ARRAY -> DoubleArrayVector.readArrayVector(positions, in, blockFactory); + default -> { + assert false : "invalid vector serialization type [" + serializationType + "]"; + throw new IllegalStateException("invalid vector serialization type [" + serializationType + "]"); } - } + }; } /** Serializes this Vector to the given stream output. */ default void writeTo(StreamOutput out) throws IOException { final int positions = getPositionCount(); + final var version = out.getTransportVersion(); out.writeVInt(positions); - out.writeBoolean(isConstant()); if (isConstant() && positions > 0) { + out.writeByte(SERIALIZE_VECTOR_CONSTANT); out.writeDouble(getDouble(0)); + } else if (version.onOrAfter(TransportVersions.ESQL_SERIALIZE_ARRAY_VECTOR) && this instanceof DoubleArrayVector v) { + out.writeByte(SERIALIZE_VECTOR_ARRAY); + v.writeArrayVector(positions, out); } else { + out.writeByte(SERIALIZE_VECTOR_VALUES); + writeValues(this, positions, out); + } + } + + private static DoubleVector readValues(int positions, StreamInput in, BlockFactory blockFactory) throws IOException { + try (var builder = blockFactory.newDoubleVectorFixedBuilder(positions)) { for (int i = 0; i < positions; i++) { - out.writeDouble(getDouble(i)); + builder.appendDouble(in.readDouble()); } + return builder.build(); + } + } + + private static void writeValues(DoubleVector v, int positions, StreamOutput out) throws IOException { + for (int i = 0; i < positions; i++) { + out.writeDouble(v.getDouble(i)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayVector.java index 9374a4db4b4c4..644af9ae512a8 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayVector.java @@ -8,7 +8,10 @@ package org.elasticsearch.compute.data; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import java.io.IOException; import java.util.Arrays; /** @@ -28,6 +31,32 @@ final class IntArrayVector extends AbstractVector implements IntVector { this.values = values; } + static IntArrayVector readArrayVector(int positions, StreamInput in, BlockFactory blockFactory) throws IOException { + final long preAdjustedBytes = RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (long) positions * Integer.BYTES; + blockFactory.adjustBreaker(preAdjustedBytes); + boolean success = false; + try { + int[] values = new int[positions]; + for (int i = 0; i < positions; i++) { + values[i] = in.readInt(); + } + final var block = new IntArrayVector(values, positions, blockFactory); + blockFactory.adjustBreaker(block.ramBytesUsed() - preAdjustedBytes); + success = true; + return block; + } finally { + if (success == false) { + blockFactory.adjustBreaker(-preAdjustedBytes); + } + } + } + + void writeArrayVector(int positions, StreamOutput out) throws IOException { + for (int i = 0; i < positions; i++) { + out.writeInt(values[i]); + } + } + @Override public IntBlock asBlock() { return new IntVectorBlock(this); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVector.java index bc7e3c87ec33d..1d4fb0741cab0 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVector.java @@ -7,6 +7,7 @@ package org.elasticsearch.compute.data; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -74,30 +75,47 @@ static int hash(IntVector vector) { /** Deserializes a Vector from the given stream input. */ static IntVector readFrom(BlockFactory blockFactory, StreamInput in) throws IOException { final int positions = in.readVInt(); - final boolean constant = in.readBoolean(); - if (constant && positions > 0) { - return blockFactory.newConstantIntVector(in.readInt(), positions); - } else { - try (var builder = blockFactory.newIntVectorFixedBuilder(positions)) { - for (int i = 0; i < positions; i++) { - builder.appendInt(in.readInt()); - } - return builder.build(); + final byte serializationType = in.readByte(); + return switch (serializationType) { + case SERIALIZE_VECTOR_VALUES -> readValues(positions, in, blockFactory); + case SERIALIZE_VECTOR_CONSTANT -> blockFactory.newConstantIntVector(in.readInt(), positions); + case SERIALIZE_VECTOR_ARRAY -> IntArrayVector.readArrayVector(positions, in, blockFactory); + default -> { + assert false : "invalid vector serialization type [" + serializationType + "]"; + throw new IllegalStateException("invalid vector serialization type [" + serializationType + "]"); } - } + }; } /** Serializes this Vector to the given stream output. */ default void writeTo(StreamOutput out) throws IOException { final int positions = getPositionCount(); + final var version = out.getTransportVersion(); out.writeVInt(positions); - out.writeBoolean(isConstant()); if (isConstant() && positions > 0) { + out.writeByte(SERIALIZE_VECTOR_CONSTANT); out.writeInt(getInt(0)); + } else if (version.onOrAfter(TransportVersions.ESQL_SERIALIZE_ARRAY_VECTOR) && this instanceof IntArrayVector v) { + out.writeByte(SERIALIZE_VECTOR_ARRAY); + v.writeArrayVector(positions, out); } else { + out.writeByte(SERIALIZE_VECTOR_VALUES); + writeValues(this, positions, out); + } + } + + private static IntVector readValues(int positions, StreamInput in, BlockFactory blockFactory) throws IOException { + try (var builder = blockFactory.newIntVectorFixedBuilder(positions)) { for (int i = 0; i < positions; i++) { - out.writeInt(getInt(i)); + builder.appendInt(in.readInt()); } + return builder.build(); + } + } + + private static void writeValues(IntVector v, int positions, StreamOutput out) throws IOException { + for (int i = 0; i < positions; i++) { + out.writeInt(v.getInt(i)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayVector.java index a50987f1d6959..b3cee58356d70 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayVector.java @@ -8,7 +8,10 @@ package org.elasticsearch.compute.data; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import java.io.IOException; import java.util.Arrays; /** @@ -28,6 +31,32 @@ final class LongArrayVector extends AbstractVector implements LongVector { this.values = values; } + static LongArrayVector readArrayVector(int positions, StreamInput in, BlockFactory blockFactory) throws IOException { + final long preAdjustedBytes = RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (long) positions * Long.BYTES; + blockFactory.adjustBreaker(preAdjustedBytes); + boolean success = false; + try { + long[] values = new long[positions]; + for (int i = 0; i < positions; i++) { + values[i] = in.readLong(); + } + final var block = new LongArrayVector(values, positions, blockFactory); + blockFactory.adjustBreaker(block.ramBytesUsed() - preAdjustedBytes); + success = true; + return block; + } finally { + if (success == false) { + blockFactory.adjustBreaker(-preAdjustedBytes); + } + } + } + + void writeArrayVector(int positions, StreamOutput out) throws IOException { + for (int i = 0; i < positions; i++) { + out.writeLong(values[i]); + } + } + @Override public LongBlock asBlock() { return new LongVectorBlock(this); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVector.java index 358f5b32366cb..60592469f0ea1 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVector.java @@ -7,6 +7,7 @@ package org.elasticsearch.compute.data; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -75,30 +76,47 @@ static int hash(LongVector vector) { /** Deserializes a Vector from the given stream input. */ static LongVector readFrom(BlockFactory blockFactory, StreamInput in) throws IOException { final int positions = in.readVInt(); - final boolean constant = in.readBoolean(); - if (constant && positions > 0) { - return blockFactory.newConstantLongVector(in.readLong(), positions); - } else { - try (var builder = blockFactory.newLongVectorFixedBuilder(positions)) { - for (int i = 0; i < positions; i++) { - builder.appendLong(in.readLong()); - } - return builder.build(); + final byte serializationType = in.readByte(); + return switch (serializationType) { + case SERIALIZE_VECTOR_VALUES -> readValues(positions, in, blockFactory); + case SERIALIZE_VECTOR_CONSTANT -> blockFactory.newConstantLongVector(in.readLong(), positions); + case SERIALIZE_VECTOR_ARRAY -> LongArrayVector.readArrayVector(positions, in, blockFactory); + default -> { + assert false : "invalid vector serialization type [" + serializationType + "]"; + throw new IllegalStateException("invalid vector serialization type [" + serializationType + "]"); } - } + }; } /** Serializes this Vector to the given stream output. */ default void writeTo(StreamOutput out) throws IOException { final int positions = getPositionCount(); + final var version = out.getTransportVersion(); out.writeVInt(positions); - out.writeBoolean(isConstant()); if (isConstant() && positions > 0) { + out.writeByte(SERIALIZE_VECTOR_CONSTANT); out.writeLong(getLong(0)); + } else if (version.onOrAfter(TransportVersions.ESQL_SERIALIZE_ARRAY_VECTOR) && this instanceof LongArrayVector v) { + out.writeByte(SERIALIZE_VECTOR_ARRAY); + v.writeArrayVector(positions, out); } else { + out.writeByte(SERIALIZE_VECTOR_VALUES); + writeValues(this, positions, out); + } + } + + private static LongVector readValues(int positions, StreamInput in, BlockFactory blockFactory) throws IOException { + try (var builder = blockFactory.newLongVectorFixedBuilder(positions)) { for (int i = 0; i < positions; i++) { - out.writeLong(getLong(i)); + builder.appendLong(in.readLong()); } + return builder.build(); + } + } + + private static void writeValues(LongVector v, int positions, StreamOutput out) throws IOException { + for (int i = 0; i < positions; i++) { + out.writeLong(v.getLong(i)); } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Vector.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Vector.java index fc09f636ac700..c309a7a0b8827 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Vector.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Vector.java @@ -75,4 +75,11 @@ interface Builder extends Releasable { * Whether this vector was released */ boolean isReleased(); + + /** + * The serialization type of vectors: 0 and 1 replaces the boolean false/true in pre-8.14. + */ + byte SERIALIZE_VECTOR_VALUES = 0; + byte SERIALIZE_VECTOR_CONSTANT = 1; + byte SERIALIZE_VECTOR_ARRAY = 2; } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayVector.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayVector.java.st index a02656f72e54c..b5ecb2cad4a56 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayVector.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayVector.java.st @@ -10,12 +10,19 @@ package org.elasticsearch.compute.data; $if(BytesRef)$ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.BytesRefArray; import org.elasticsearch.core.Releasables; +import java.io.IOException; + $else$ import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import java.io.IOException; import java.util.Arrays; $endif$ @@ -44,6 +51,56 @@ $endif$ this.values = values; } + static $Type$ArrayVector readArrayVector(int positions, StreamInput in, BlockFactory blockFactory) throws IOException { +$if(BytesRef)$ + final BytesRefArray values = new BytesRefArray(in, blockFactory.bigArrays()); + boolean success = false; + try { + final var block = new BytesRefArrayVector(values, positions, blockFactory); + blockFactory.adjustBreaker(block.ramBytesUsed() - values.bigArraysRamBytesUsed()); + success = true; + return block; + } finally { + if (success == false) { + values.close(); + } + } +$else$ + final long preAdjustedBytes = RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (long) positions * $BYTES$; + blockFactory.adjustBreaker(preAdjustedBytes); + boolean success = false; + try { + $type$[] values = new $type$[positions]; + for (int i = 0; i < positions; i++) { + values[i] = in.read$Type$(); + } + final var block = new $Type$ArrayVector(values, positions, blockFactory); + blockFactory.adjustBreaker(block.ramBytesUsed() - preAdjustedBytes); + success = true; + return block; + } finally { + if (success == false) { + blockFactory.adjustBreaker(-preAdjustedBytes); + } + } +$endif$ + } + + void writeArrayVector(int positions, StreamOutput out) throws IOException { +$if(BytesRef)$ + values.writeTo(out); +$elseif(boolean)$ + // TODO: One bit for each boolean + for (int i = 0; i < positions; i++) { + out.writeBoolean(values[i]); + } +$else$ + for (int i = 0; i < positions; i++) { + out.write$Type$(values[i]); + } +$endif$ + } + @Override public $Type$Block asBlock() { return new $Type$VectorBlock(this); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Vector.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Vector.java.st index c303a8391ad18..0796801c55d40 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Vector.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Vector.java.st @@ -10,6 +10,7 @@ package org.elasticsearch.compute.data; $if(BytesRef)$ import org.apache.lucene.util.BytesRef; $endif$ +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -108,38 +109,58 @@ $endif$ /** Deserializes a Vector from the given stream input. */ static $Type$Vector readFrom(BlockFactory blockFactory, StreamInput in) throws IOException { final int positions = in.readVInt(); - final boolean constant = in.readBoolean(); - if (constant && positions > 0) { - return blockFactory.newConstant$Type$Vector(in.read$Type$(), positions); - } else { - try (var builder = blockFactory.new$Type$Vector$if(BytesRef)$$else$Fixed$endif$Builder(positions)) { - for (int i = 0; i < positions; i++) { - builder.append$Type$(in.read$Type$()); - } - return builder.build(); + final byte serializationType = in.readByte(); + return switch (serializationType) { + case SERIALIZE_VECTOR_VALUES -> readValues(positions, in, blockFactory); + case SERIALIZE_VECTOR_CONSTANT -> blockFactory.newConstant$Type$Vector(in.read$Type$(), positions); + case SERIALIZE_VECTOR_ARRAY -> $Type$ArrayVector.readArrayVector(positions, in, blockFactory); + default -> { + assert false : "invalid vector serialization type [" + serializationType + "]"; + throw new IllegalStateException("invalid vector serialization type [" + serializationType + "]"); } - } + }; } /** Serializes this Vector to the given stream output. */ default void writeTo(StreamOutput out) throws IOException { final int positions = getPositionCount(); + final var version = out.getTransportVersion(); out.writeVInt(positions); - out.writeBoolean(isConstant()); if (isConstant() && positions > 0) { + out.writeByte(SERIALIZE_VECTOR_CONSTANT); $if(BytesRef)$ out.write$Type$(get$Type$(0, new BytesRef())); $else$ out.write$Type$(get$Type$(0)); $endif$ + } else if (version.onOrAfter(TransportVersions.ESQL_SERIALIZE_ARRAY_VECTOR) && this instanceof $Type$ArrayVector v) { + out.writeByte(SERIALIZE_VECTOR_ARRAY); + v.writeArrayVector(positions, out); } else { + out.writeByte(SERIALIZE_VECTOR_VALUES); + writeValues(this, positions, out); + } + } + + private static $Type$Vector readValues(int positions, StreamInput in, BlockFactory blockFactory) throws IOException { + try (var builder = blockFactory.new$Type$Vector$if(BytesRef)$$else$Fixed$endif$Builder(positions)) { for (int i = 0; i < positions; i++) { + builder.append$Type$(in.read$Type$()); + } + return builder.build(); + } + } + + private static void writeValues($Type$Vector v, int positions, StreamOutput out) throws IOException { $if(BytesRef)$ - out.write$Type$(get$Type$(i, new BytesRef())); + var scratch = new BytesRef(); +$endif$ + for (int i = 0; i < positions; i++) { +$if(BytesRef)$ + out.write$Type$(v.get$Type$(i, scratch)); $else$ - out.write$Type$(get$Type$(i)); + out.write$Type$(v.get$Type$(i)); $endif$ - } } } From ef680c920031ed1ffee871a3459f5c968c02292b Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 7 Mar 2024 17:22:39 -0800 Subject: [PATCH 060/248] Remove limitation on cross lib dependencies (#106099) Libs were meant to be a way to break up code from server without creating full fledged modules. They still exist on the system classpath, but we did not want to introduce a spaghetti of jars depending on each other. The check that ensures libs don't depend on each other was added before Elasticsearch was modularized. Since it now runs modular, the cross module dependencies are easy to visualize with module-info, and the module system protects us from circular deps. Additionally, the number of exceptions to the no-cross-lib-deps rule has grown considerably. Given all of the above, the check on cross lib dependencies no longer provides much benefit, and is more of a hinderance. This commit removes the check. --- libs/build.gradle | 31 ------------------------------- 1 file changed, 31 deletions(-) diff --git a/libs/build.gradle b/libs/build.gradle index a88618aea2fcc..ee4ae3db66741 100644 --- a/libs/build.gradle +++ b/libs/build.gradle @@ -12,35 +12,4 @@ configure(subprojects - project('elasticsearch-log4j')) { * tools. */ apply plugin: 'elasticsearch.build' - - /* - * Subprojects may depend on the "core" lib but may not depend on any - * other libs. This keeps our dependencies simpler. - * With the exception that specialised plugin apis can depend on "core" plugin api project - */ - project.afterEvaluate { - configurations.all { Configuration conf -> - dependencies.matching { it instanceof ProjectDependency }.all { ProjectDependency dep -> - Project depProject = dep.dependencyProject - if (depProject != null - && false == isPluginApi(project, depProject) - && false == depProject.path.equals(':libs:elasticsearch-x-content') - && false == depProject.path.equals(':libs:elasticsearch-core') - && false == depProject.path.equals(':libs:elasticsearch-plugin-api') - && false == depProject.path.equals(':libs:elasticsearch-logging') - && false == depProject.path.equals(':libs:elasticsearch-native') - && depProject.path.startsWith(':libs') - && depProject.name.startsWith('elasticsearch-')) { - throw new InvalidUserDataException("projects in :libs " - + "may not depend on other projects libs except " - + ":libs:elasticsearch-core but " - + "${project.path} depends on ${depProject.path}") - } - } - } - } -} - -boolean isPluginApi(Project project, Project depProject) { - return project.path.matches(".*elasticsearch-plugin-.*api") } From a97552b6f883fccbbb73eb7e4d130d88b9e8465e Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Fri, 8 Mar 2024 13:42:17 +1100 Subject: [PATCH 061/248] Introduce ESTesetCase#safeAcquire (#106100) Similar to safeAwait for Barrier and Latch, safeAcquire try acquire permit for a Semaphore within given timeout. Relates: #94737 --- .../main/java/org/elasticsearch/test/ESTestCase.java | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index 06d09f3942a1c..052b9a7165a6c 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -169,6 +169,7 @@ import java.util.concurrent.CyclicBarrier; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; +import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicInteger; @@ -2083,6 +2084,15 @@ public static void safeAwait(CountDownLatch countDownLatch) { } } + public static void safeAcquire(Semaphore semaphore) { + try { + assertTrue("safeAcquire: Semaphore did not acquire permit within the timeout", semaphore.tryAcquire(10, TimeUnit.SECONDS)); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + fail(e, "safeAcquire: interrupted waiting for Semaphore to acquire permit"); + } + } + public static T safeAwait(SubscribableListener listener) { final var future = new PlainActionFuture(); listener.addListener(future); From 7d2c5acabf0257504d3a3cb2076ba92821da6220 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20Fred=C3=A9n?= <109296772+jfreden@users.noreply.github.com> Date: Fri, 8 Mar 2024 10:06:24 +0100 Subject: [PATCH 062/248] Mute failing Test flamegraph tests (#106106) --- .../resources/rest-api-spec/test/profiling/10_basic.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/profiling/10_basic.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/profiling/10_basic.yml index 367655ba89388..0948eb662f07f 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/profiling/10_basic.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/profiling/10_basic.yml @@ -166,6 +166,8 @@ teardown: --- "Test flamegraph from profiling-events": + - skip: + reason: "https://github.com/elastic/elasticsearch/issues/106103" - do: profiling.flamegraph: body: > @@ -192,6 +194,8 @@ teardown: --- "Test flamegraph from test-events": + - skip: + reason: "https://github.com/elastic/elasticsearch/issues/106103" - do: profiling.flamegraph: body: > From 179739effdad01e0f9e35b09799cec07603e05bf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20Fred=C3=A9n?= <109296772+jfreden@users.noreply.github.com> Date: Fri, 8 Mar 2024 10:10:28 +0100 Subject: [PATCH 063/248] Mute testIndexDoesntExist and testSelectNode_GivenJobOpeningAndIndexDoesNotExist (#106109) Mute: https://github.com/elastic/elasticsearch/issues/106107 and https://github.com/elastic/elasticsearch/issues/106108 --- .../xpack/ml/datafeed/DatafeedNodeSelectorTests.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java index 5ddba7519eef2..517c851d43804 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java @@ -313,6 +313,7 @@ public void testShardNotAllActive() { .checkDatafeedTaskCanBeCreated(); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/106107") public void testIndexDoesntExist() { Job job = createScheduledJob("job_id").build(new Date()); DatafeedConfig df = createDatafeed("datafeed_id", job.getId(), Collections.singletonList("not_foo")); @@ -497,6 +498,7 @@ public void testSelectNode_jobTaskStale() { .checkDatafeedTaskCanBeCreated(); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/106108") public void testSelectNode_GivenJobOpeningAndIndexDoesNotExist() { // Here we test that when there are 2 problems, the most critical gets reported first. // In this case job is Opening (non-critical) and the index does not exist (critical) From 1530eb1bf782e56b1d1e9ddf30b2cf3098ae5d0d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20Fred=C3=A9n?= <109296772+jfreden@users.noreply.github.com> Date: Fri, 8 Mar 2024 10:41:55 +0100 Subject: [PATCH 064/248] Add version to skip block (#106111) Missed the version in the skip block --- .../resources/rest-api-spec/test/profiling/10_basic.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/profiling/10_basic.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/profiling/10_basic.yml index 0948eb662f07f..1e0c260a70e4f 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/profiling/10_basic.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/profiling/10_basic.yml @@ -168,6 +168,7 @@ teardown: "Test flamegraph from profiling-events": - skip: reason: "https://github.com/elastic/elasticsearch/issues/106103" + version: "all" - do: profiling.flamegraph: body: > @@ -196,6 +197,7 @@ teardown: "Test flamegraph from test-events": - skip: reason: "https://github.com/elastic/elasticsearch/issues/106103" + version: "all" - do: profiling.flamegraph: body: > From d502ade41a329d9017a865cd6433f107cfbdf8f7 Mon Sep 17 00:00:00 2001 From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com> Date: Fri, 8 Mar 2024 12:24:11 +0200 Subject: [PATCH 065/248] [TEST] add allowed warnings in tsdb yaml (#106006) Fixes #105944 --- .../rest-api-spec/test/data_stream/150_tsdb.yml | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml index 278c14c09a31a..20eb33ecefdee 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml @@ -196,9 +196,10 @@ index without timestamp with pipeline: dynamic templates: - skip: version: " - 8.12.99" - features: "default_shards" reason: "Support for dynamic fields was added in 8.13" - do: + allowed_warnings: + - "index template [my-dynamic-template] has index patterns [k9s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-dynamic-template] will take precedence during new index creation" indices.put_index_template: name: my-dynamic-template body: @@ -326,9 +327,10 @@ dynamic templates: dynamic templates - conflicting aliases: - skip: version: " - 8.12.99" - features: "default_shards" reason: "Support for dynamic fields was added in 8.13" - do: + allowed_warnings: + - "index template [my-dynamic-template] has index patterns [k9s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-dynamic-template] will take precedence during new index creation" indices.put_index_template: name: my-dynamic-template body: @@ -422,9 +424,10 @@ dynamic templates - conflicting aliases: dynamic templates with nesting: - skip: version: " - 8.12.99" - features: "default_shards" reason: "Support for dynamic fields was added in 8.13" - do: + allowed_warnings: + - "index template [my-dynamic-template] has index patterns [k9s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-dynamic-template] will take precedence during new index creation" indices.put_index_template: name: my-dynamic-template body: @@ -561,6 +564,8 @@ subobject in passthrough object auto flatten: version: " - 8.12.99" reason: "Support for passthrough fields was added in 8.13" - do: + allowed_warnings: + - "index template [my-passthrough-template] has index patterns [k9s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-passthrough-template] will take precedence during new index creation" indices.put_index_template: name: my-passthrough-template body: From 946bfb725d7a468e2d1943f5fd5e7500e852043a Mon Sep 17 00:00:00 2001 From: Jan Kuipers <148754765+jan-elastic@users.noreply.github.com> Date: Fri, 8 Mar 2024 12:18:50 +0100 Subject: [PATCH 066/248] Remove ununsed JobUpdate.INTERNAL_PARSER (#106112) * Remove ununsed JobUpdate.INTERNAL_PARSER * Fix compile errors --- .../xpack/core/ml/action/UpdateJobAction.java | 2 +- .../xpack/core/ml/job/config/JobUpdate.java | 76 ++++++++----------- .../core/ml/job/config/JobUpdateTests.java | 26 +------ 3 files changed, 34 insertions(+), 70 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateJobAction.java index 4e80fcab05e2f..15cd272d12b8b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateJobAction.java @@ -31,7 +31,7 @@ private UpdateJobAction() { public static class Request extends AcknowledgedRequest implements ToXContentObject { public static UpdateJobAction.Request parseRequest(String jobId, XContentParser parser) { - JobUpdate update = JobUpdate.EXTERNAL_PARSER.apply(parser, null).setJobId(jobId).build(); + JobUpdate update = JobUpdate.PARSER.apply(parser, null).setJobId(jobId).build(); return new UpdateJobAction.Request(jobId, update); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java index 3ba40c70d0701..4b11314b8bb43 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java @@ -34,57 +34,43 @@ public class JobUpdate implements Writeable, ToXContentObject { public static final ParseField DETECTORS = new ParseField("detectors"); public static final ParseField CLEAR_JOB_FINISH_TIME = new ParseField("clear_job_finish_time"); - // For internal updates - static final ConstructingObjectParser INTERNAL_PARSER = new ConstructingObjectParser<>( - "job_update", - args -> new Builder((String) args[0]) - ); - // For parsing REST requests - public static final ConstructingObjectParser EXTERNAL_PARSER = new ConstructingObjectParser<>( + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "job_update", args -> new Builder((String) args[0]) ); static { - for (ConstructingObjectParser parser : Arrays.asList(INTERNAL_PARSER, EXTERNAL_PARSER)) { - parser.declareString(ConstructingObjectParser.optionalConstructorArg(), Job.ID); - parser.declareStringArray(Builder::setGroups, Job.GROUPS); - parser.declareStringOrNull(Builder::setDescription, Job.DESCRIPTION); - parser.declareObjectArray(Builder::setDetectorUpdates, DetectorUpdate.PARSER, DETECTORS); - parser.declareObject(Builder::setModelPlotConfig, ModelPlotConfig.STRICT_PARSER, Job.MODEL_PLOT_CONFIG); - parser.declareObject(Builder::setAnalysisLimits, AnalysisLimits.STRICT_PARSER, Job.ANALYSIS_LIMITS); - parser.declareString( - (builder, val) -> builder.setBackgroundPersistInterval( - TimeValue.parseTimeValue(val, Job.BACKGROUND_PERSIST_INTERVAL.getPreferredName()) - ), - Job.BACKGROUND_PERSIST_INTERVAL - ); - parser.declareLong(Builder::setRenormalizationWindowDays, Job.RENORMALIZATION_WINDOW_DAYS); - parser.declareLong(Builder::setResultsRetentionDays, Job.RESULTS_RETENTION_DAYS); - parser.declareLong(Builder::setModelSnapshotRetentionDays, Job.MODEL_SNAPSHOT_RETENTION_DAYS); - parser.declareLong(Builder::setDailyModelSnapshotRetentionAfterDays, Job.DAILY_MODEL_SNAPSHOT_RETENTION_AFTER_DAYS); - parser.declareStringArray(Builder::setCategorizationFilters, AnalysisConfig.CATEGORIZATION_FILTERS); - parser.declareObject( - Builder::setPerPartitionCategorizationConfig, - PerPartitionCategorizationConfig.STRICT_PARSER, - AnalysisConfig.PER_PARTITION_CATEGORIZATION - ); - parser.declareField(Builder::setCustomSettings, (p, c) -> p.map(), Job.CUSTOM_SETTINGS, ObjectParser.ValueType.OBJECT); - parser.declareBoolean(Builder::setAllowLazyOpen, Job.ALLOW_LAZY_OPEN); - parser.declareString( - (builder, val) -> builder.setModelPruneWindow( - TimeValue.parseTimeValue(val, AnalysisConfig.MODEL_PRUNE_WINDOW.getPreferredName()) - ), - AnalysisConfig.MODEL_PRUNE_WINDOW - ); - } - // These fields should not be set by a REST request - INTERNAL_PARSER.declareString(Builder::setModelSnapshotId, Job.MODEL_SNAPSHOT_ID); - INTERNAL_PARSER.declareString(Builder::setModelSnapshotMinVersion, Job.MODEL_SNAPSHOT_MIN_VERSION); - INTERNAL_PARSER.declareString(Builder::setJobVersion, Job.JOB_VERSION); - INTERNAL_PARSER.declareBoolean(Builder::setClearFinishTime, CLEAR_JOB_FINISH_TIME); - INTERNAL_PARSER.declareObject(Builder::setBlocked, Blocked.STRICT_PARSER, Job.BLOCKED); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), Job.ID); + PARSER.declareStringArray(Builder::setGroups, Job.GROUPS); + PARSER.declareStringOrNull(Builder::setDescription, Job.DESCRIPTION); + PARSER.declareObjectArray(Builder::setDetectorUpdates, DetectorUpdate.PARSER, DETECTORS); + PARSER.declareObject(Builder::setModelPlotConfig, ModelPlotConfig.STRICT_PARSER, Job.MODEL_PLOT_CONFIG); + PARSER.declareObject(Builder::setAnalysisLimits, AnalysisLimits.STRICT_PARSER, Job.ANALYSIS_LIMITS); + PARSER.declareString( + (builder, val) -> builder.setBackgroundPersistInterval( + TimeValue.parseTimeValue(val, Job.BACKGROUND_PERSIST_INTERVAL.getPreferredName()) + ), + Job.BACKGROUND_PERSIST_INTERVAL + ); + PARSER.declareLong(Builder::setRenormalizationWindowDays, Job.RENORMALIZATION_WINDOW_DAYS); + PARSER.declareLong(Builder::setResultsRetentionDays, Job.RESULTS_RETENTION_DAYS); + PARSER.declareLong(Builder::setModelSnapshotRetentionDays, Job.MODEL_SNAPSHOT_RETENTION_DAYS); + PARSER.declareLong(Builder::setDailyModelSnapshotRetentionAfterDays, Job.DAILY_MODEL_SNAPSHOT_RETENTION_AFTER_DAYS); + PARSER.declareStringArray(Builder::setCategorizationFilters, AnalysisConfig.CATEGORIZATION_FILTERS); + PARSER.declareObject( + Builder::setPerPartitionCategorizationConfig, + PerPartitionCategorizationConfig.STRICT_PARSER, + AnalysisConfig.PER_PARTITION_CATEGORIZATION + ); + PARSER.declareField(Builder::setCustomSettings, (p, c) -> p.map(), Job.CUSTOM_SETTINGS, ObjectParser.ValueType.OBJECT); + PARSER.declareBoolean(Builder::setAllowLazyOpen, Job.ALLOW_LAZY_OPEN); + PARSER.declareString( + (builder, val) -> builder.setModelPruneWindow( + TimeValue.parseTimeValue(val, AnalysisConfig.MODEL_PRUNE_WINDOW.getPreferredName()) + ), + AnalysisConfig.MODEL_PRUNE_WINDOW + ); } private final String jobId; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdateTests.java index 09ff29f768dce..24a3a097e9e2d 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdateTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdateTests.java @@ -14,7 +14,6 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.test.AbstractXContentSerializingTestCase; import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xpack.core.ml.MlConfigVersion; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshot; import org.elasticsearch.xpack.core.ml.utils.MlConfigVersionUtils; @@ -35,8 +34,6 @@ public class JobUpdateTests extends AbstractXContentSerializingTestCase { - private boolean useInternalParser = randomBoolean(); - @Override protected JobUpdate createTestInstance() { return createRandom(randomAlphaOfLength(4), null); @@ -49,7 +46,7 @@ protected JobUpdate mutateInstance(JobUpdate instance) { /** * Creates a completely random update when the job is null - * or a random update that is is valid for the given job + * or a random update that is valid for the given job */ public JobUpdate createRandom(String jobId, @Nullable Job job) { JobUpdate.Builder update = new JobUpdate.Builder(jobId); @@ -126,24 +123,9 @@ public JobUpdate createRandom(String jobId, @Nullable Job job) { if (randomBoolean()) { update.setCustomSettings(Collections.singletonMap(randomAlphaOfLength(10), randomAlphaOfLength(10))); } - if (useInternalParser && randomBoolean()) { - update.setModelSnapshotId(randomAlphaOfLength(10)); - } - if (useInternalParser && randomBoolean()) { - update.setModelSnapshotMinVersion(MlConfigVersion.CURRENT); - } - if (useInternalParser && randomBoolean()) { - update.setJobVersion(MlConfigVersionUtils.randomCompatibleVersion(random())); - } - if (useInternalParser) { - update.setClearFinishTime(randomBoolean()); - } if (randomBoolean()) { update.setAllowLazyOpen(randomBoolean()); } - if (useInternalParser && randomBoolean() && (job == null || job.isDeleting() == false)) { - update.setBlocked(BlockedTests.createRandom()); - } if (randomBoolean() && job != null) { update.setModelPruneWindow( TimeValue.timeValueSeconds( @@ -251,11 +233,7 @@ protected Writeable.Reader instanceReader() { @Override protected JobUpdate doParseInstance(XContentParser parser) { - if (useInternalParser) { - return JobUpdate.INTERNAL_PARSER.apply(parser, null).build(); - } else { - return JobUpdate.EXTERNAL_PARSER.apply(parser, null).build(); - } + return JobUpdate.PARSER.apply(parser, null).build(); } public void testMergeWithJob() { From 8e6a2268cb0dca4f69656175de5db8f7d3fb1c83 Mon Sep 17 00:00:00 2001 From: Craig Taverner Date: Fri, 8 Mar 2024 12:27:03 +0100 Subject: [PATCH 067/248] Very rarely (once so far in CI) the STATS order is reversed (#106110) --- .../esql/qa/testFixtures/src/main/resources/spatial.csv-spec | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial.csv-spec index 1eb4d82b5fcc2..5c789cee0492f 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial.csv-spec @@ -115,6 +115,7 @@ stXFromAirportsSupportsNull#[skip:-8.13.99, reason:st_x and st_y added in 8.14] FROM airports | EVAL x = FLOOR(ABS(ST_X(city_location))/200), y = FLOOR(ABS(ST_Y(city_location))/100) | STATS c = count(*) BY x, y +| SORT c DESC ; c:long | x:double | y:double From 929e9821ef2bb0991c0d2cf9af6ea67366bd0f25 Mon Sep 17 00:00:00 2001 From: David Turner Date: Fri, 8 Mar 2024 11:43:00 +0000 Subject: [PATCH 068/248] Expose desired balance APIs internally in serverless (#106116) These APIs are useful for troubleshooting, we should make them available to internal users. --- .../action/admin/cluster/RestDeleteDesiredBalanceAction.java | 3 +++ .../rest/action/admin/cluster/RestGetDesiredBalanceAction.java | 3 +++ 2 files changed, 6 insertions(+) diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteDesiredBalanceAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteDesiredBalanceAction.java index 66382c20cae82..f0b516a876622 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteDesiredBalanceAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteDesiredBalanceAction.java @@ -13,11 +13,14 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.Scope; +import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; import java.util.List; +@ServerlessScope(Scope.INTERNAL) public class RestDeleteDesiredBalanceAction extends BaseRestHandler { @Override diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetDesiredBalanceAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetDesiredBalanceAction.java index a93c1e3d04fd6..0bb7cc5ff7473 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetDesiredBalanceAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetDesiredBalanceAction.java @@ -13,11 +13,14 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.Scope; +import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import java.io.IOException; import java.util.List; +@ServerlessScope(Scope.INTERNAL) public class RestGetDesiredBalanceAction extends BaseRestHandler { @Override From 49e93335aade8de34d924518abc8c9801cfd97f8 Mon Sep 17 00:00:00 2001 From: Mary Gouseti Date: Fri, 8 Mar 2024 14:04:13 +0200 Subject: [PATCH 069/248] Ensure backing indices will not be empty (#106073) (#106104) In some data stream tests we are using a helper method to create the backing indices. This helper method could return an empty list. A data stream is not allowed to have an empty list of backing indices, in many test a write index was added to the list, this ensured that the list was not empty, while in other tests that was not the case which made these tests flaky. In this PR we evaluate all the usages of `org.elasticsearch.cluster.metadata.DataStreamTestHelper#randomIndexInstances()` and we introduce `org.elasticsearch.cluster.metadata.DataStreamTestHelper#randomNonEmptyIndexInstances` to use in its place, in the cases we need an non-empty list. Fixes: #106073 --- .../cluster/metadata/DataStreamTests.java | 31 +++++++------------ .../metadata/DataStreamTestHelper.java | 16 ++++++++-- 2 files changed, 26 insertions(+), 21 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java index 8b6a0fcb55c5b..a07cd8e60411a 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java @@ -51,6 +51,7 @@ import static org.elasticsearch.cluster.metadata.DataStream.getDefaultBackingIndexName; import static org.elasticsearch.cluster.metadata.DataStreamTestHelper.newInstance; import static org.elasticsearch.cluster.metadata.DataStreamTestHelper.randomIndexInstances; +import static org.elasticsearch.cluster.metadata.DataStreamTestHelper.randomNonEmptyIndexInstances; import static org.elasticsearch.index.IndexSettings.LIFECYCLE_ORIGINATION_DATE; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -97,7 +98,7 @@ protected DataStream mutateInstance(DataStream instance) { var autoShardingEvent = instance.getAutoShardingEvent(); switch (between(0, 11)) { case 0 -> name = randomAlphaOfLength(10); - case 1 -> indices = randomValueOtherThan(List.of(), DataStreamTestHelper::randomIndexInstances); + case 1 -> indices = randomNonEmptyIndexInstances(); case 2 -> generation = instance.getGeneration() + randomIntBetween(1, 10); case 3 -> metadata = randomBoolean() && metadata != null ? null : Map.of("key", randomAlphaOfLength(10)); case 4 -> { @@ -125,12 +126,8 @@ protected DataStream mutateInstance(DataStream instance) { ? null : DataStreamLifecycle.newBuilder().dataRetention(randomMillisUpToYear9999()).build(); case 10 -> { - failureIndices = randomValueOtherThan(List.of(), DataStreamTestHelper::randomIndexInstances); - if (failureIndices.isEmpty()) { - failureStore = false; - } else { - failureStore = true; - } + failureIndices = randomValueOtherThan(failureIndices, DataStreamTestHelper::randomIndexInstances); + failureStore = failureIndices.isEmpty() == false; } case 11 -> { autoShardingEvent = randomBoolean() && autoShardingEvent != null @@ -631,11 +628,7 @@ public void testSnapshot() { public void testSnapshotWithAllBackingIndicesRemoved() { var preSnapshotDataStream = DataStreamTestHelper.randomInstance(); - var indicesToAdd = new ArrayList(); - while (indicesToAdd.isEmpty()) { - // ensure at least one index - indicesToAdd.addAll(randomIndexInstances()); - } + var indicesToAdd = randomNonEmptyIndexInstances(); var postSnapshotDataStream = new DataStream( preSnapshotDataStream.getName(), @@ -1652,7 +1645,7 @@ public void testXContentSerializationWithRollover() throws IOException { boolean failureStore = randomBoolean(); List failureIndices = List.of(); if (failureStore) { - failureIndices = randomIndexInstances(); + failureIndices = randomNonEmptyIndexInstances(); } DataStreamLifecycle lifecycle = DataStreamLifecycle.newBuilder().dataRetention(randomMillisUpToYear9999()).build(); @@ -1786,7 +1779,7 @@ public void testWriteFailureIndex() { boolean system = hidden && randomBoolean(); DataStream noFailureStoreDataStream = new DataStream( randomAlphaOfLength(10), - randomIndexInstances(), + randomNonEmptyIndexInstances(), randomNonNegativeInt(), null, hidden, @@ -1805,7 +1798,7 @@ public void testWriteFailureIndex() { DataStream failureStoreDataStreamWithEmptyFailureIndices = new DataStream( randomAlphaOfLength(10), - randomIndexInstances(), + randomNonEmptyIndexInstances(), randomNonNegativeInt(), null, hidden, @@ -1831,7 +1824,7 @@ public void testWriteFailureIndex() { failureIndices.add(writeFailureIndex); DataStream failureStoreDataStream = new DataStream( dataStreamName, - randomIndexInstances(), + randomNonEmptyIndexInstances(), randomNonNegativeInt(), null, hidden, @@ -1852,7 +1845,7 @@ public void testWriteFailureIndex() { public void testIsFailureIndex() { boolean hidden = randomBoolean(); boolean system = hidden && randomBoolean(); - List backingIndices = randomIndexInstances(); + List backingIndices = randomNonEmptyIndexInstances(); DataStream noFailureStoreDataStream = new DataStream( randomAlphaOfLength(10), backingIndices, @@ -1875,7 +1868,7 @@ public void testIsFailureIndex() { is(false) ); - backingIndices = randomIndexInstances(); + backingIndices = randomNonEmptyIndexInstances(); DataStream failureStoreDataStreamWithEmptyFailureIndices = new DataStream( randomAlphaOfLength(10), backingIndices, @@ -1900,7 +1893,7 @@ public void testIsFailureIndex() { is(false) ); - backingIndices = randomIndexInstances(); + backingIndices = randomNonEmptyIndexInstances(); List failureIndices = randomIndexInstances(); String dataStreamName = randomAlphaOfLength(10); Index writeFailureIndex = new Index( diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java b/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java index 1cc5006fe0018..8402b5756e915 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java @@ -263,8 +263,20 @@ public static String generateMapping(String timestampFieldName, String type) { + " }"; } + /** + * @return a list of random indices. NOTE: the list can be empty, if you do not want an empty list use + * {@link DataStreamTestHelper#randomNonEmptyIndexInstances()} + */ public static List randomIndexInstances() { - int numIndices = ESTestCase.randomIntBetween(0, 128); + return randomIndexInstances(0, 128); + } + + public static List randomNonEmptyIndexInstances() { + return randomIndexInstances(1, 128); + } + + public static List randomIndexInstances(int min, int max) { + int numIndices = ESTestCase.randomIntBetween(min, max); List indices = new ArrayList<>(numIndices); for (int i = 0; i < numIndices; i++) { indices.add(new Index(randomAlphaOfLength(10).toLowerCase(Locale.ROOT), UUIDs.randomBase64UUID(LuceneTestCase.random()))); @@ -296,7 +308,7 @@ public static DataStream randomInstance(String dataStreamName, LongSupplier time List failureIndices = List.of(); boolean failureStore = randomBoolean(); if (failureStore) { - failureIndices = randomIndexInstances(); + failureIndices = randomNonEmptyIndexInstances(); } return new DataStream( From 9ac281ed7e2f946f7bb370b1e50bb6f7a3537c8f Mon Sep 17 00:00:00 2001 From: Jedr Blaszyk Date: Fri, 8 Mar 2024 13:20:23 +0100 Subject: [PATCH 070/248] [Connector API] Add native_connector_api_keys feature definition (#106118) --- .../connector/ConnectorFeatures.java | 46 +++++++++++++------ .../connector/ConnectorFeaturesTests.java | 24 ++++++++++ .../connector/ConnectorTestUtils.java | 1 + .../application/connector/ConnectorTests.java | 3 ++ 4 files changed, 61 insertions(+), 13 deletions(-) diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorFeatures.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorFeatures.java index 51aa110342fe9..bbb8805de1f0f 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorFeatures.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorFeatures.java @@ -41,28 +41,33 @@ public class ConnectorFeatures implements Writeable, ToXContentObject { @Nullable private final FeatureEnabled incrementalSyncEnabled; @Nullable + private final FeatureEnabled nativeConnectorAPIKeysEnabled; + @Nullable private final SyncRulesFeatures syncRulesFeatures; /** * Constructs a new instance of ConnectorFeatures. * - * @param documentLevelSecurityEnabled A flag indicating whether document-level security is enabled. - * @param filteringAdvancedConfig A flag indicating whether advanced filtering configuration is enabled. - * @param filteringRules A flag indicating whether filtering rules are enabled. - * @param incrementalSyncEnabled A flag indicating whether incremental sync is enabled. - * @param syncRulesFeatures An {@link SyncRulesFeatures} object indicating whether basic and advanced sync rules are enabled. + * @param documentLevelSecurityEnabled A flag indicating whether document-level security is enabled. + * @param filteringAdvancedConfig A flag indicating whether advanced filtering configuration is enabled. + * @param filteringRules A flag indicating whether filtering rules are enabled. + * @param incrementalSyncEnabled A flag indicating whether incremental sync is enabled. + * @param nativeConnectorAPIKeysEnabled A flag indicating whether support for api keys is enabled for native connectors. + * @param syncRulesFeatures An {@link SyncRulesFeatures} object indicating if basic and advanced sync rules are enabled. */ private ConnectorFeatures( FeatureEnabled documentLevelSecurityEnabled, Boolean filteringAdvancedConfig, Boolean filteringRules, FeatureEnabled incrementalSyncEnabled, + FeatureEnabled nativeConnectorAPIKeysEnabled, SyncRulesFeatures syncRulesFeatures ) { this.documentLevelSecurityEnabled = documentLevelSecurityEnabled; this.filteringAdvancedConfigEnabled = filteringAdvancedConfig; this.filteringRulesEnabled = filteringRules; this.incrementalSyncEnabled = incrementalSyncEnabled; + this.nativeConnectorAPIKeysEnabled = nativeConnectorAPIKeysEnabled; this.syncRulesFeatures = syncRulesFeatures; } @@ -71,6 +76,7 @@ public ConnectorFeatures(StreamInput in) throws IOException { this.filteringAdvancedConfigEnabled = in.readOptionalBoolean(); this.filteringRulesEnabled = in.readOptionalBoolean(); this.incrementalSyncEnabled = in.readOptionalWriteable(FeatureEnabled::new); + this.nativeConnectorAPIKeysEnabled = in.readOptionalWriteable(FeatureEnabled::new); this.syncRulesFeatures = in.readOptionalWriteable(SyncRulesFeatures::new); } @@ -78,19 +84,19 @@ public ConnectorFeatures(StreamInput in) throws IOException { private static final ParseField FILTERING_ADVANCED_CONFIG_ENABLED_FIELD = new ParseField("filtering_advanced_config"); private static final ParseField FILTERING_RULES_ENABLED_FIELD = new ParseField("filtering_rules"); private static final ParseField INCREMENTAL_SYNC_ENABLED_FIELD = new ParseField("incremental_sync"); + private static final ParseField NATIVE_CONNECTOR_API_KEYS_ENABLED_FIELD = new ParseField("native_connector_api_keys"); private static final ParseField SYNC_RULES_FIELD = new ParseField("sync_rules"); private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "connector_features", true, - args -> { - return new Builder().setDocumentLevelSecurityEnabled((FeatureEnabled) args[0]) - .setFilteringAdvancedConfig((Boolean) args[1]) - .setFilteringRules((Boolean) args[2]) - .setIncrementalSyncEnabled((FeatureEnabled) args[3]) - .setSyncRulesFeatures((SyncRulesFeatures) args[4]) - .build(); - } + args -> new Builder().setDocumentLevelSecurityEnabled((FeatureEnabled) args[0]) + .setFilteringAdvancedConfig((Boolean) args[1]) + .setFilteringRules((Boolean) args[2]) + .setIncrementalSyncEnabled((FeatureEnabled) args[3]) + .setNativeConnectorAPIKeysEnabled((FeatureEnabled) args[4]) + .setSyncRulesFeatures((SyncRulesFeatures) args[5]) + .build() ); static { @@ -98,6 +104,7 @@ public ConnectorFeatures(StreamInput in) throws IOException { PARSER.declareBoolean(optionalConstructorArg(), FILTERING_ADVANCED_CONFIG_ENABLED_FIELD); PARSER.declareBoolean(optionalConstructorArg(), FILTERING_RULES_ENABLED_FIELD); PARSER.declareObject(optionalConstructorArg(), (p, c) -> FeatureEnabled.fromXContent(p), INCREMENTAL_SYNC_ENABLED_FIELD); + PARSER.declareObject(optionalConstructorArg(), (p, c) -> FeatureEnabled.fromXContent(p), NATIVE_CONNECTOR_API_KEYS_ENABLED_FIELD); PARSER.declareObject(optionalConstructorArg(), (p, c) -> SyncRulesFeatures.fromXContent(p), SYNC_RULES_FIELD); } @@ -129,6 +136,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (incrementalSyncEnabled != null) { builder.field(INCREMENTAL_SYNC_ENABLED_FIELD.getPreferredName(), incrementalSyncEnabled); } + if (nativeConnectorAPIKeysEnabled != null) { + builder.field(NATIVE_CONNECTOR_API_KEYS_ENABLED_FIELD.getPreferredName(), nativeConnectorAPIKeysEnabled); + } if (syncRulesFeatures != null) { builder.field(SYNC_RULES_FIELD.getPreferredName(), syncRulesFeatures); } @@ -143,6 +153,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalBoolean(filteringAdvancedConfigEnabled); out.writeOptionalBoolean(filteringRulesEnabled); out.writeOptionalWriteable(incrementalSyncEnabled); + out.writeOptionalWriteable(nativeConnectorAPIKeysEnabled); out.writeOptionalWriteable(syncRulesFeatures); } @@ -155,6 +166,7 @@ public boolean equals(Object o) { && Objects.equals(filteringAdvancedConfigEnabled, features.filteringAdvancedConfigEnabled) && Objects.equals(filteringRulesEnabled, features.filteringRulesEnabled) && Objects.equals(incrementalSyncEnabled, features.incrementalSyncEnabled) + && Objects.equals(nativeConnectorAPIKeysEnabled, features.nativeConnectorAPIKeysEnabled) && Objects.equals(syncRulesFeatures, features.syncRulesFeatures); } @@ -165,6 +177,7 @@ public int hashCode() { filteringAdvancedConfigEnabled, filteringRulesEnabled, incrementalSyncEnabled, + nativeConnectorAPIKeysEnabled, syncRulesFeatures ); } @@ -175,6 +188,7 @@ public static class Builder { private Boolean filteringAdvancedConfig; private Boolean filteringRules; private FeatureEnabled incrementalSyncEnabled; + private FeatureEnabled nativeConnectorAPIKeysEnabled; private SyncRulesFeatures syncRulesFeatures; public Builder setDocumentLevelSecurityEnabled(FeatureEnabled documentLevelSecurityEnabled) { @@ -197,6 +211,11 @@ public Builder setIncrementalSyncEnabled(FeatureEnabled incrementalSyncEnabled) return this; } + public Builder setNativeConnectorAPIKeysEnabled(FeatureEnabled nativeConnectorAPIKeysEnabled) { + this.nativeConnectorAPIKeysEnabled = nativeConnectorAPIKeysEnabled; + return this; + } + public Builder setSyncRulesFeatures(SyncRulesFeatures syncRulesFeatures) { this.syncRulesFeatures = syncRulesFeatures; return this; @@ -208,6 +227,7 @@ public ConnectorFeatures build() { filteringAdvancedConfig, filteringRules, incrementalSyncEnabled, + nativeConnectorAPIKeysEnabled, syncRulesFeatures ); } diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorFeaturesTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorFeaturesTests.java index 1563ff5fcf82c..941d0a9ed4594 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorFeaturesTests.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorFeaturesTests.java @@ -110,6 +110,30 @@ public void testToXContentMissingSyncRulesAdvanced() throws IOException { testToXContentChecker(content); } + public void testToXContent_NativeConnectorAPIKeysEnabled() throws IOException { + String content = XContentHelper.stripWhitespace(""" + { + "document_level_security": { + "enabled": true + }, + "filtering_advanced_config": true, + "sync_rules": { + "advanced": { + "enabled": false + }, + "basic": { + "enabled": true + } + }, + "native_connector_api_keys": { + "enabled": true + } + } + """); + + testToXContentChecker(content); + } + private void testToXContentChecker(String content) throws IOException { ConnectorFeatures features = ConnectorFeatures.fromXContentBytes(new BytesArray(content), XContentType.JSON); boolean humanReadable = true; diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTestUtils.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTestUtils.java index 6d94cdc3ebe35..e9053a0a64507 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTestUtils.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTestUtils.java @@ -114,6 +114,7 @@ public static ConnectorFeatures getRandomConnectorFeatures() { .setFilteringRules(randomFrom(new Boolean[] { null, randomBoolean() })) .setFilteringAdvancedConfig(randomFrom(new Boolean[] { null, randomBoolean() })) .setIncrementalSyncEnabled(randomBoolean() ? randomConnectorFeatureEnabled() : null) + .setNativeConnectorAPIKeysEnabled(randomBoolean() ? randomConnectorFeatureEnabled() : null) .setSyncRulesFeatures(randomBoolean() ? randomSyncRulesFeatures() : null) .build(); } diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTests.java index 5525b4694ef04..8ed18fc303498 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTests.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTests.java @@ -139,6 +139,9 @@ public void testToXContent() throws IOException { "basic":{ "enabled":true } + }, + "native_connector_api_keys": { + "enabled": true } }, "filtering":[ From bb1eddada3678257838b0590090ff9eb68acaa1b Mon Sep 17 00:00:00 2001 From: Tim Vernum Date: Fri, 8 Mar 2024 23:29:15 +1100 Subject: [PATCH 071/248] Respect --pass option in certutil csr mode (#106105) elasticsearch-certutil csr generates a private key and a certificate signing request (CSR) file. It has always accepted the "--pass" command line option, but ignore it and always generated an unencrypted private key. This commit fixes the utility so the --pass option is respected and the private key is encrypted. --- docs/changelog/106105.yaml | 5 + .../xpack/security/cli/CertificateTool.java | 71 ++++++--- .../security/cli/CertificateToolTests.java | 142 ++++++++++++++++-- 3 files changed, 180 insertions(+), 38 deletions(-) create mode 100644 docs/changelog/106105.yaml diff --git a/docs/changelog/106105.yaml b/docs/changelog/106105.yaml new file mode 100644 index 0000000000000..09f80e9e71e6d --- /dev/null +++ b/docs/changelog/106105.yaml @@ -0,0 +1,5 @@ +pr: 106105 +summary: Respect --pass option in certutil csr mode +area: TLS +type: bug +issues: [] diff --git a/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateTool.java b/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateTool.java index 24ece3ff99bc4..a9c0653716851 100644 --- a/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateTool.java +++ b/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateTool.java @@ -590,6 +590,29 @@ static void verifyIssuer(Certificate certificate, CAInfo caInfo, Terminal termin throw new UserException(ExitCodes.CONFIG, "Certificate verification failed"); } } + + protected void writePemPrivateKey( + Terminal terminal, + OptionSet options, + ZipOutputStream outputStream, + JcaPEMWriter pemWriter, + String keyFileName, + PrivateKey privateKey + ) throws IOException { + final boolean usePassword = useOutputPassword(options); + final char[] outputPassword = getOutputPassword(options); + outputStream.putNextEntry(new ZipEntry(keyFileName)); + if (usePassword) { + withPassword(keyFileName, outputPassword, terminal, true, password -> { + pemWriter.writeObject(privateKey, getEncrypter(password)); + return null; + }); + } else { + pemWriter.writeObject(privateKey); + } + pemWriter.flush(); + outputStream.closeEntry(); + } } static class SigningRequestCommand extends CertificateCommand { @@ -621,9 +644,7 @@ public void execute(Terminal terminal, OptionSet options, Environment env, Proce terminal.println(""); final Path output = resolveOutputPath(terminal, options, DEFAULT_CSR_ZIP); - final int keySize = getKeySize(options); - Collection certificateInformations = getCertificateInformationList(terminal, options); - generateAndWriteCsrs(output, keySize, certificateInformations); + generateAndWriteCsrs(terminal, options, output); terminal.println(""); terminal.println("Certificate signing requests have been written to " + output); @@ -639,12 +660,25 @@ public void execute(Terminal terminal, OptionSet options, Environment env, Proce terminal.println("follow the SSL configuration instructions in the product guide."); } + // For testing + void generateAndWriteCsrs(Terminal terminal, OptionSet options, Path output) throws Exception { + final int keySize = getKeySize(options); + Collection certificateInformations = getCertificateInformationList(terminal, options); + generateAndWriteCsrs(terminal, options, output, keySize, certificateInformations); + } + /** * Generates certificate signing requests and writes them out to the specified file in zip format * * @param certInfo the details to use in the certificate signing requests */ - void generateAndWriteCsrs(Path output, int keySize, Collection certInfo) throws Exception { + void generateAndWriteCsrs( + Terminal terminal, + OptionSet options, + Path output, + int keySize, + Collection certInfo + ) throws Exception { fullyWriteZipFile(output, (outputStream, pemWriter) -> { for (CertificateInformation certificateInformation : certInfo) { KeyPair keyPair = CertGenUtils.generateKeyPair(keySize); @@ -667,10 +701,14 @@ void generateAndWriteCsrs(Path output, int keySize, Collection { for (CertificateInformation certificateInformation : certs) { CertificateAndKey pair = generateCertificateAndKey(certificateInformation, caInfo, keySize, days, terminal); @@ -825,20 +861,10 @@ void generateAndWriteSignedCertificates( outputStream.closeEntry(); // write private key - final String keyFileName = entryBase + ".key"; - outputStream.putNextEntry(new ZipEntry(keyFileName)); - if (usePassword) { - withPassword(keyFileName, outputPassword, terminal, true, password -> { - pemWriter.writeObject(pair.key, getEncrypter(password)); - return null; - }); - } else { - pemWriter.writeObject(pair.key); - } - pemWriter.flush(); - outputStream.closeEntry(); + writePemPrivateKey(terminal, options, outputStream, pemWriter, entryBase + ".key", pair.key); } else { final String fileName = entryBase + ".p12"; + final char[] outputPassword = super.getOutputPassword(options); outputStream.putNextEntry(new ZipEntry(fileName)); writePkcs12( fileName, @@ -855,6 +881,7 @@ void generateAndWriteSignedCertificates( }); } else { assert certs.size() == 1; + final char[] outputPassword = super.getOutputPassword(options); CertificateInformation certificateInformation = certs.iterator().next(); CertificateAndKey pair = generateCertificateAndKey(certificateInformation, caInfo, keySize, days, terminal); fullyWriteFile( diff --git a/x-pack/plugin/security/cli/src/test/java/org/elasticsearch/xpack/security/cli/CertificateToolTests.java b/x-pack/plugin/security/cli/src/test/java/org/elasticsearch/xpack/security/cli/CertificateToolTests.java index 702bfac2a3ea5..1a11234c98e6e 100644 --- a/x-pack/plugin/security/cli/src/test/java/org/elasticsearch/xpack/security/cli/CertificateToolTests.java +++ b/x-pack/plugin/security/cli/src/test/java/org/elasticsearch/xpack/security/cli/CertificateToolTests.java @@ -25,7 +25,10 @@ import org.bouncycastle.asn1.x509.GeneralName; import org.bouncycastle.asn1.x509.GeneralNames; import org.bouncycastle.cert.X509CertificateHolder; +import org.bouncycastle.openssl.PEMEncryptedKeyPair; +import org.bouncycastle.openssl.PEMKeyPair; import org.bouncycastle.openssl.PEMParser; +import org.bouncycastle.openssl.bc.BcPEMDecryptorProvider; import org.bouncycastle.pkcs.PKCS10CertificationRequest; import org.elasticsearch.cli.ExitCodes; import org.elasticsearch.cli.MockTerminal; @@ -77,6 +80,7 @@ import java.security.cert.X509Certificate; import java.security.interfaces.RSAKey; import java.time.temporal.ChronoUnit; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; @@ -88,6 +92,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Function; import java.util.stream.Collectors; +import java.util.stream.Stream; import javax.net.ssl.KeyManagerFactory; import javax.net.ssl.TrustManagerFactory; @@ -266,9 +271,12 @@ public void testParsingFileWithInvalidDetails() throws Exception { assertThat(terminal.getErrorOutput(), containsString("could not be converted to a valid DN")); } - public void testGeneratingCsr() throws Exception { + public void testGeneratingCsrFromInstancesFile() throws Exception { Path tempDir = initTempDir(); Path outputFile = tempDir.resolve("out.zip"); + MockTerminal terminal = MockTerminal.create(); + final List args = new ArrayList<>(); + Path instanceFile = writeInstancesTo(tempDir.resolve("instances.yml")); Collection certInfos = CertificateTool.parseFile(instanceFile); assertEquals(4, certInfos.size()); @@ -276,7 +284,22 @@ public void testGeneratingCsr() throws Exception { assertFalse(Files.exists(outputFile)); int keySize = randomFrom(1024, 2048); - new CertificateTool.SigningRequestCommand().generateAndWriteCsrs(outputFile, keySize, certInfos); + final boolean encrypt = randomBoolean(); + final String password = encrypt ? randomAlphaOfLengthBetween(8, 12) : null; + if (encrypt) { + args.add("--pass"); + if (randomBoolean()) { + args.add(password); + } else { + for (var ignore : certInfos) { + terminal.addSecretInput(password); + } + } + } + + final CertificateTool.SigningRequestCommand command = new CertificateTool.SigningRequestCommand(); + final OptionSet options = command.getParser().parse(Strings.toStringArray(args)); + command.generateAndWriteCsrs(terminal, options, outputFile, keySize, certInfos); assertTrue(Files.exists(outputFile)); Set perms = Files.getPosixFilePermissions(outputFile); @@ -292,7 +315,6 @@ public void testGeneratingCsr() throws Exception { assertTrue(Files.exists(zipRoot.resolve(filename))); final Path csr = zipRoot.resolve(filename + "/" + filename + ".csr"); assertTrue(Files.exists(csr)); - assertTrue(Files.exists(zipRoot.resolve(filename + "/" + filename + ".key"))); PKCS10CertificationRequest request = readCertificateRequest(csr); assertEquals(certInfo.name.x500Principal.getName(), request.getSubject().toString()); Attribute[] extensionsReq = request.getAttributes(PKCSObjectIdentifiers.pkcs_9_at_extensionRequest); @@ -304,9 +326,84 @@ public void testGeneratingCsr() throws Exception { } else { assertEquals(0, extensionsReq.length); } + + final Path keyPath = zipRoot.resolve(filename + "/" + filename + ".key"); + assertTrue(Files.exists(keyPath)); + PEMKeyPair key = readPrivateKey(keyPath, password); + assertNotNull(key); } } + public void testGeneratingCsrFromCommandLineParameters() throws Exception { + Path tempDir = initTempDir(); + Path outputFile = tempDir.resolve("out.zip"); + MockTerminal terminal = MockTerminal.create(); + final List args = new ArrayList<>(); + + final int keySize = randomFrom(1024, 2048); + args.add("--keysize"); + args.add(String.valueOf(keySize)); + + final String name = randomAlphaOfLengthBetween(4, 16); + args.add("--name"); + args.add(name); + + final List dns = randomList(0, 4, () -> randomAlphaOfLengthBetween(4, 8) + "." + randomAlphaOfLengthBetween(2, 5)); + dns.stream().map(s -> "--dns=" + s).forEach(args::add); + final List ip = randomList( + 0, + 2, + () -> Stream.generate(() -> randomIntBetween(10, 250)).limit(4).map(String::valueOf).collect(Collectors.joining(".")) + ); + ip.stream().map(s -> "--ip=" + s).forEach(args::add); + + final boolean encrypt = randomBoolean(); + final String password = encrypt ? randomAlphaOfLengthBetween(8, 12) : null; + if (encrypt) { + args.add("--pass"); + if (randomBoolean()) { + args.add(password); + } else { + terminal.addSecretInput(password); + } + } + + final CertificateTool.SigningRequestCommand command = new CertificateTool.SigningRequestCommand(); + final OptionSet options = command.getParser().parse(Strings.toStringArray(args)); + command.generateAndWriteCsrs(terminal, options, outputFile); + assertTrue(Files.exists(outputFile)); + + Set perms = Files.getPosixFilePermissions(outputFile); + assertTrue(perms.toString(), perms.contains(PosixFilePermission.OWNER_READ)); + assertTrue(perms.toString(), perms.contains(PosixFilePermission.OWNER_WRITE)); + assertEquals(perms.toString(), 2, perms.size()); + + final Path zipRoot = getRootPathOfZip(outputFile); + + assertFalse(Files.exists(zipRoot.resolve("ca"))); + assertTrue(Files.exists(zipRoot.resolve(name))); + final Path csr = zipRoot.resolve(name + "/" + name + ".csr"); + assertTrue(Files.exists(csr)); + + PKCS10CertificationRequest request = readCertificateRequest(csr); + assertEquals("CN=" + name, request.getSubject().toString()); + + Attribute[] extensionsReq = request.getAttributes(PKCSObjectIdentifiers.pkcs_9_at_extensionRequest); + if (dns.size() > 0 || ip.size() > 0) { + assertEquals(1, extensionsReq.length); + Extensions extensions = Extensions.getInstance(extensionsReq[0].getAttributeValues()[0]); + GeneralNames subjAltNames = GeneralNames.fromExtensions(extensions, Extension.subjectAlternativeName); + assertSubjAltNames(subjAltNames, ip, dns); + } else { + assertEquals(0, extensionsReq.length); + } + + final Path keyPath = zipRoot.resolve(name + "/" + name + ".key"); + assertTrue(Files.exists(keyPath)); + PEMKeyPair key = readPrivateKey(keyPath, password); + assertNotNull(key); + } + public void testGeneratingSignedPemCertificates() throws Exception { Path tempDir = initTempDir(); Path outputFile = tempDir.resolve("out.zip"); @@ -939,19 +1036,6 @@ private int getDurationInDays(X509Certificate cert) { return (int) ChronoUnit.DAYS.between(cert.getNotBefore().toInstant(), cert.getNotAfter().toInstant()); } - private void assertSubjAltNames(Certificate certificate, String ip, String dns) throws Exception { - final X509CertificateHolder holder = new X509CertificateHolder(certificate.getEncoded()); - final GeneralNames names = GeneralNames.fromExtensions(holder.getExtensions(), Extension.subjectAlternativeName); - final CertificateInformation certInfo = new CertificateInformation( - "n", - "n", - Collections.singletonList(ip), - Collections.singletonList(dns), - Collections.emptyList() - ); - assertSubjAltNames(names, certInfo); - } - /** * Checks whether there are keys in {@code keyStore} that are trusted by {@code trustStore}. */ @@ -981,6 +1065,21 @@ private PKCS10CertificationRequest readCertificateRequest(Path path) throws Exce } } + private PEMKeyPair readPrivateKey(Path path, String password) throws Exception { + try (Reader reader = Files.newBufferedReader(path); PEMParser pemParser = new PEMParser(reader)) { + Object object = pemParser.readObject(); + if (password == null) { + assertThat(object, instanceOf(PEMKeyPair.class)); + return (PEMKeyPair) object; + } else { + assertThat(object, instanceOf(PEMEncryptedKeyPair.class)); + final PEMEncryptedKeyPair encryptedKeyPair = (PEMEncryptedKeyPair) object; + assertThat(encryptedKeyPair.getDekAlgName(), is("AES-128-CBC")); + return encryptedKeyPair.decryptKeyPair(new BcPEMDecryptorProvider(password.toCharArray())); + } + } + } + private X509Certificate readX509Certificate(InputStream input) throws Exception { List list = CertParsingUtils.readCertificates(input); assertEquals(1, list.size()); @@ -988,6 +1087,17 @@ private X509Certificate readX509Certificate(InputStream input) throws Exception return (X509Certificate) list.get(0); } + private void assertSubjAltNames(Certificate certificate, String ip, String dns) throws Exception { + final X509CertificateHolder holder = new X509CertificateHolder(certificate.getEncoded()); + final GeneralNames names = GeneralNames.fromExtensions(holder.getExtensions(), Extension.subjectAlternativeName); + assertSubjAltNames(names, Collections.singletonList(ip), Collections.singletonList(dns)); + } + + private void assertSubjAltNames(GeneralNames generalNames, List ip, List dns) throws Exception { + final CertificateInformation certInfo = new CertificateInformation("n", "n", ip, dns, Collections.emptyList()); + assertSubjAltNames(generalNames, certInfo); + } + private void assertSubjAltNames(GeneralNames subjAltNames, CertificateInformation certInfo) throws Exception { final int expectedCount = certInfo.ipAddresses.size() + certInfo.dnsNames.size() + certInfo.commonNames.size(); assertEquals(expectedCount, subjAltNames.getNames().length); From 5b6c51af7122762cb892e9996a62ef312669d296 Mon Sep 17 00:00:00 2001 From: Niels Bauman <33722607+nielsbauman@users.noreply.github.com> Date: Fri, 8 Mar 2024 13:43:29 +0100 Subject: [PATCH 072/248] Health monitor concurrency fixes (#105674) Update the implementation of the `LocalHealthMonitor` to ensure there is always only 1 `UpdateHealthInfoCacheAction` request in-flight, and that only 1 monitoring thread can update the "internal state" (i.e. the `lastReportedValue` in the `HealthTracker`s) at the same time. By enforcing this, we're avoiding potential concurrency issues. This PR also includes several tests that aim to validate the behaviour of the `LocalHealthMonitor` in specific multi-threaded situations. Resolves #105065 --- docs/changelog/105674.yaml | 6 + .../health/node/LocalHealthMonitor.java | 102 +++++++----- .../health/node/LocalHealthMonitorTests.java | 145 +++++++++++++++++- 3 files changed, 209 insertions(+), 44 deletions(-) create mode 100644 docs/changelog/105674.yaml diff --git a/docs/changelog/105674.yaml b/docs/changelog/105674.yaml new file mode 100644 index 0000000000000..7b8d04f4687a3 --- /dev/null +++ b/docs/changelog/105674.yaml @@ -0,0 +1,6 @@ +pr: 105674 +summary: Health monitor concurrency fixes +area: Health +type: bug +issues: + - 105065 diff --git a/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java b/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java index d5d336b88b8ad..5ff147a11a06a 100644 --- a/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java +++ b/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java @@ -37,6 +37,7 @@ import java.util.List; import java.util.Objects; import java.util.concurrent.Executor; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.core.Strings.format; @@ -77,6 +78,10 @@ public class LocalHealthMonitor implements ClusterStateListener { // Using a volatile reference to ensure that there is a single instance of monitoring running at all times. // No need for extra synchronization because all the writes are executed on the cluster applier thread. private volatile Monitoring monitoring; + // This variable keeps track of whether there's an in-flight request. We keep this variable here rather than the Monitoring class, + // as we'll create new instances of that class when we're (re)starting this local health monitoring process. + // This variable allows us to ensure that there's always, at most, 1 request in-flight, at any given moment. + private final AtomicBoolean inFlightRequest = new AtomicBoolean(false); private LocalHealthMonitor( Settings settings, @@ -152,7 +157,9 @@ private void stopMonitoring() { private void startMonitoringIfNecessary() { if (prerequisitesFulfilled && enabled) { if (isMonitorRunning() == false) { - monitoring = Monitoring.start(monitorInterval, threadPool, lastSeenHealthNode, healthTrackers, clusterService, client); + // First create the Monitoring instance, so we always have something to cancel. + monitoring = new Monitoring(monitorInterval, threadPool, healthTrackers, clusterService, client, inFlightRequest); + monitoring.start(); logger.debug("Local health monitoring started {}", monitoring); } else { logger.trace("Local health monitoring already started {}, skipping", monitoring); @@ -175,8 +182,6 @@ public void clusterChanged(ClusterChangedEvent event) { // On health node or on master node changes, the health node might be reset so the reported // health info gets reset to null, to ensure it will be resent. lastSeenHealthNode.set(currentHealthNode == null ? null : currentHealthNode.getId()); - // Reset the reference of each HealthTracker. - healthTrackers.forEach(HealthTracker::reset); if (logger.isDebugEnabled()) { String reason; if (healthNodeChanged && masterNodeChanged) { @@ -200,6 +205,11 @@ public void clusterChanged(ClusterChangedEvent event) { && currentMasterNode != null; if (prerequisitesFulfilled == false || healthNodeChanged || masterNodeChanged) { stopMonitoring(); + // Reset the reference of each HealthTracker. + // By doing this after `stopMonitoring()`, we're sure the `Monitoring` instance has been cancelled and therefore won't + // touch the `lastReportedValue` of the health trackers after we've reset them (only the new `Monitoring` instance will + // be able to update them). + healthTrackers.forEach(HealthTracker::reset); } if (prerequisitesFulfilled) { startMonitoringIfNecessary(); @@ -227,61 +237,49 @@ private boolean hasHealthNodeChanged(DiscoveryNode currentHealthNode, ClusterCha * This class is responsible for running the health monitoring. It evaluates and checks the health info of this node * in the configured intervals. The first run happens upon initialization. If there is an exception, it will log it * and continue to schedule the next run. + * Usually, there will only be one instance of this class alive. However, when we're restarting + * the monitoring process (e.g. due to a health node change, see {@link LocalHealthMonitor#clusterChanged}), there will likely (shortly) + * be two instances alive at the same time. To avoid any concurrency issues, we're ensuring that there's always only one in-flight + * request and if a {@link Monitoring} instance is cancelled while a request is in-flight, we'll prevent it from updating the state + * of the {@link HealthTracker}s (and it'll be up to the next/new {@link Monitoring} instance to send a new request and update the + * {@link HealthTracker}s' state). */ static class Monitoring implements Runnable, Scheduler.Cancellable { private final TimeValue interval; private final Executor executor; - private final Scheduler scheduler; + private final ThreadPool threadPool; private final ClusterService clusterService; private final Client client; - private final AtomicReference lastSeenHealthNode; private final List> healthTrackers; + private final AtomicBoolean inFlightRequest; private volatile boolean cancelled = false; private volatile Scheduler.ScheduledCancellable scheduledRun; private Monitoring( TimeValue interval, - Scheduler scheduler, - Executor executor, - AtomicReference lastSeenHealthNode, + ThreadPool threadPool, List> healthTrackers, ClusterService clusterService, - Client client + Client client, + AtomicBoolean inFlightRequest ) { this.interval = interval; - this.executor = executor; - this.scheduler = scheduler; - this.lastSeenHealthNode = lastSeenHealthNode; + this.threadPool = threadPool; + this.executor = threadPool.executor(ThreadPool.Names.MANAGEMENT); this.clusterService = clusterService; this.healthTrackers = healthTrackers; this.client = client; + this.inFlightRequest = inFlightRequest; } /** - * Creates a monitoring instance and starts the schedules the first run. + * Schedule the first run of the monitor. */ - static Monitoring start( - TimeValue interval, - ThreadPool threadPool, - AtomicReference lastSeenHealthNode, - List> healthTrackers, - ClusterService clusterService, - Client client - ) { - Monitoring monitoring = new Monitoring( - interval, - threadPool, - threadPool.executor(ThreadPool.Names.MANAGEMENT), - lastSeenHealthNode, - healthTrackers, - clusterService, - client - ); - monitoring.scheduledRun = threadPool.schedule(monitoring, TimeValue.ZERO, monitoring.executor); - return monitoring; + public void start() { + scheduledRun = threadPool.schedule(this, TimeValue.ZERO, executor); } /** @@ -301,7 +299,13 @@ public boolean cancel() { return false; } cancelled = true; - scheduledRun.cancel(); + var scheduledRun = this.scheduledRun; + // There is a chance this Monitoring instance gets cancelled before the `scheduledRun` field is assigned. + // However, this is not a problem as the most important thing is the `cancelled` field being set to false in this class, + // as that field actually prevents any updates to the HealthTrackers' states. + if (scheduledRun != null) { + scheduledRun.cancel(); + } return true; } @@ -318,8 +322,18 @@ public void run() { if (cancelled) { return; } + // Before we do anything, we're first going to make sure there is no in-flight request at this moment. + // If that's the case, we'll acquire the "lock", which prevents any other thread/instance from sending any requests. + if (inFlightRequest.compareAndSet(false, true) == false) { + logger.debug("Not allowed to send health info update request due to in-flight request, will try again."); + scheduleNextRunIfNecessary(); + return; + } boolean nextRunScheduled = false; - Runnable scheduleNextRun = new RunOnce(this::scheduleNextRunIfNecessary); + Runnable releaseAndScheduleNextRun = new RunOnce(() -> { + inFlightRequest.set(false); + scheduleNextRunIfNecessary(); + }); try { List> healthProgresses = getHealthProgresses(); if (healthProgresses.isEmpty()) { @@ -330,13 +344,13 @@ public void run() { var builder = new UpdateHealthInfoCacheAction.Request.Builder().nodeId(clusterService.localNode().getId()); healthProgresses.forEach(changedHealthInfo -> changedHealthInfo.updateRequestBuilder(builder)); - var healthNodeId = lastSeenHealthNode.get(); var listener = ActionListener.wrap(response -> { - // Don't update the latest health info if the health node has changed while this request was being processed. - if (Objects.equals(healthNodeId, lastSeenHealthNode.get()) == false) { - return; + // Only record health progress if this monitoring instance hasn't been cancelled in the meantime. + // This avoids any unwanted writes to the HealthTrackers' states after a new monitoring instance has possibly + // already started. + if (cancelled == false) { + healthProgresses.forEach(HealthTracker.HealthProgress::recordProgressIfRelevant); } - healthProgresses.forEach(HealthTracker.HealthProgress::recordProgressIfRelevant); }, e -> { if (e.getCause() instanceof NodeNotConnectedException || e.getCause() instanceof HealthNodeNotDiscoveredException) { logger.debug("Failed to connect to the health node [{}], will try again.", e.getCause().getMessage()); @@ -344,14 +358,18 @@ public void run() { logger.debug(() -> format("Failed to send health info to health node, will try again."), e); } }); - client.execute(UpdateHealthInfoCacheAction.INSTANCE, builder.build(), ActionListener.runAfter(listener, scheduleNextRun)); + client.execute( + UpdateHealthInfoCacheAction.INSTANCE, + builder.build(), + ActionListener.runAfter(listener, releaseAndScheduleNextRun) + ); nextRunScheduled = true; } catch (Exception e) { logger.warn(() -> format("Failed to run scheduled health monitoring on thread pool [%s]", executor), e); } finally { // If the next run isn't scheduled because for example the health info hasn't changed, we schedule it here. if (nextRunScheduled == false) { - scheduleNextRun.run(); + releaseAndScheduleNextRun.run(); } } } @@ -379,7 +397,7 @@ private void scheduleNextRunIfNecessary() { return; } try { - scheduledRun = scheduler.schedule(this, interval, executor); + scheduledRun = threadPool.schedule(this, interval, executor); } catch (final EsRejectedExecutionException e) { logger.debug(() -> format("Scheduled health monitoring was rejected on thread pool [%s]", executor), e); } diff --git a/server/src/test/java/org/elasticsearch/health/node/LocalHealthMonitorTests.java b/server/src/test/java/org/elasticsearch/health/node/LocalHealthMonitorTests.java index 768b646d84beb..a4436fd637c5a 100644 --- a/server/src/test/java/org/elasticsearch/health/node/LocalHealthMonitorTests.java +++ b/server/src/test/java/org/elasticsearch/health/node/LocalHealthMonitorTests.java @@ -27,14 +27,17 @@ import org.elasticsearch.health.HealthFeatures; import org.elasticsearch.health.HealthStatus; import org.elasticsearch.health.metadata.HealthMetadata; +import org.elasticsearch.health.node.selection.HealthNode; import org.elasticsearch.health.node.tracker.HealthTracker; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; +import org.junit.After; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; +import java.util.ArrayList; import java.util.List; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; @@ -44,12 +47,15 @@ import static org.hamcrest.Matchers.nullValue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; public class LocalHealthMonitorTests extends ESTestCase { private static final DiskHealthInfo GREEN = new DiskHealthInfo(HealthStatus.GREEN, null); + private static final DiskHealthInfo YELLOW = new DiskHealthInfo(HealthStatus.YELLOW, null); + private static final DiskHealthInfo RED = new DiskHealthInfo(HealthStatus.RED, null); private static ThreadPool threadPool; private ClusterService clusterService; private DiscoveryNode node; @@ -71,7 +77,6 @@ public static void tearDownThreadPool() { } @Before - @SuppressWarnings("unchecked") public void setUp() throws Exception { super.setUp(); // Set-up cluster state @@ -127,6 +132,14 @@ public void setUp() throws Exception { ); } + @After + public void tearDown() throws Exception { + super.tearDown(); + + // Kill monitoring process running in the background after each test. + localHealthMonitor.setEnabled(false); + } + @SuppressWarnings("unchecked") public void testUpdateHealthInfo() throws Exception { doAnswer(invocation -> { @@ -241,8 +254,136 @@ public void testEnablingAndDisabling() throws Exception { assertBusy(() -> assertThat(mockHealthTracker.getLastReportedValue(), equalTo(nextHealthStatus))); } + /** + * This test verifies that the local health monitor is able to deal with the more complex situation where it is forced to restart + * (due to a health node change) while there is an in-flight request to the previous health node. + */ + public void testResetDuringInFlightRequest() throws Exception { + ClusterState initialState = ClusterStateCreationUtils.state(node, node, node, new DiscoveryNode[] { node, frozenNode }) + .copyAndUpdate(b -> b.putCustom(HealthMetadata.TYPE, healthMetadata)); + ClusterState newState = ClusterStateCreationUtils.state(node, frozenNode, node, new DiscoveryNode[] { node, frozenNode }) + .copyAndUpdate(b -> b.putCustom(HealthMetadata.TYPE, healthMetadata)); + when(clusterService.state()).thenReturn(initialState); + + var requestCounter = new AtomicInteger(); + doAnswer(invocation -> { + var diskHealthInfo = ((UpdateHealthInfoCacheAction.Request) invocation.getArgument(1)).getDiskHealthInfo(); + assertThat(diskHealthInfo, equalTo(GREEN)); + var currentValue = requestCounter.incrementAndGet(); + // We only want to switch the health node during the first request. Any following request(s) should simply succeed. + if (currentValue == 1) { + when(clusterService.state()).thenReturn(newState); + localHealthMonitor.clusterChanged(new ClusterChangedEvent("health-node-switch", newState, initialState)); + } + ActionListener listener = invocation.getArgument(2); + listener.onResponse(null); + return null; + }).when(client).execute(any(), any(), any()); + + localHealthMonitor.setMonitorInterval(TimeValue.timeValueMillis(10)); + localHealthMonitor.clusterChanged(new ClusterChangedEvent("start-up", initialState, ClusterState.EMPTY_STATE)); + // Assert that we've sent the update request twice, even though the health info itself hasn't changed (i.e. we send again due to + // the health node change). + assertBusy(() -> assertThat(requestCounter.get(), equalTo(2))); + } + + /** + * The aim of this test is to rapidly fire off a series of state changes and make sure that the health node in the last cluster + * state actually gets the health info. + */ + public void testRapidStateChanges() throws Exception { + ClusterState state = ClusterStateCreationUtils.state(node, node, node, new DiscoveryNode[] { node, frozenNode }) + .copyAndUpdate(b -> b.putCustom(HealthMetadata.TYPE, healthMetadata)); + doReturn(state).when(clusterService).state(); + + // Keep track of the "current" health node. + var currentHealthNode = new AtomicReference<>(node); + // Keep a list of all the health nodes that have received a request. + var updatedHealthNodes = new ArrayList(); + doAnswer(invocation -> { + var diskHealthInfo = ((UpdateHealthInfoCacheAction.Request) invocation.getArgument(1)).getDiskHealthInfo(); + assertThat(diskHealthInfo, equalTo(GREEN)); + ActionListener listener = invocation.getArgument(2); + listener.onResponse(null); + updatedHealthNodes.add(currentHealthNode.get()); + return null; + }).when(client).execute(any(), any(), any()); + + localHealthMonitor.setMonitorInterval(TimeValue.timeValueMillis(0)); + localHealthMonitor.clusterChanged(new ClusterChangedEvent("start-up", state, ClusterState.EMPTY_STATE)); + + int count = randomIntBetween(10, 20); + for (int i = 0; i < count; i++) { + var previous = state; + state = mutateState(previous); + currentHealthNode.set(HealthNode.findHealthNode(state)); + localHealthMonitor.clusterChanged(new ClusterChangedEvent("switch", state, previous)); + } + + var lastHealthNode = DiscoveryNodeUtils.create("health-node", "health-node"); + var previous = state; + state = ClusterStateCreationUtils.state( + node, + previous.nodes().getMasterNode(), + lastHealthNode, + new DiscoveryNode[] { node, frozenNode, lastHealthNode } + ).copyAndUpdate(b -> b.putCustom(HealthMetadata.TYPE, healthMetadata)); + currentHealthNode.set(lastHealthNode); + localHealthMonitor.clusterChanged(new ClusterChangedEvent("switch", state, previous)); + + assertBusy(() -> assertTrue(updatedHealthNodes.contains(lastHealthNode))); + } + + private ClusterState mutateState(ClusterState previous) { + var masterNode = previous.nodes().getMasterNode(); + var healthNode = HealthNode.findHealthNode(previous); + var randomNode = DiscoveryNodeUtils.create(randomAlphaOfLength(10), randomAlphaOfLength(10)); + switch (randomInt(1)) { + case 0 -> masterNode = randomValueOtherThan(masterNode, () -> randomFrom(node, frozenNode, randomNode)); + case 1 -> healthNode = randomValueOtherThan(healthNode, () -> randomFrom(node, frozenNode, randomNode)); + } + return ClusterStateCreationUtils.state(node, masterNode, healthNode, new DiscoveryNode[] { node, frozenNode, randomNode }) + .copyAndUpdate(b -> b.putCustom(HealthMetadata.TYPE, healthMetadata)); + } + + /** + * The aim of this test is to change the health of the health tracker several times and make sure that every change is sent to the + * health node (especially the last change). + */ + public void testChangingHealth() throws Exception { + // Keep a list of disk health info's that we've seen. + var sentHealthInfos = new ArrayList(); + doAnswer(invocation -> { + var diskHealthInfo = ((UpdateHealthInfoCacheAction.Request) invocation.getArgument(1)).getDiskHealthInfo(); + ActionListener listener = invocation.getArgument(2); + listener.onResponse(null); + sentHealthInfos.add(diskHealthInfo); + return null; + }).when(client).execute(any(), any(), any()); + + localHealthMonitor.setMonitorInterval(TimeValue.timeValueMillis(0)); + localHealthMonitor.clusterChanged(new ClusterChangedEvent("initialize", clusterState, ClusterState.EMPTY_STATE)); + // Make sure the initial health value has been registered. + assertBusy(() -> assertFalse(sentHealthInfos.isEmpty())); + + var previousHealthInfo = mockHealthTracker.healthInfo; + var healthChanges = new AtomicInteger(1); + int count = randomIntBetween(10, 20); + for (int i = 0; i < count; i++) { + var newHealthInfo = randomFrom(GREEN, YELLOW); + mockHealthTracker.setHealthInfo(newHealthInfo); + // Check whether the health node has changed. If so, we're going to wait for it to be sent to the health node. + healthChanges.addAndGet(newHealthInfo.equals(previousHealthInfo) ? 0 : 1); + assertBusy(() -> assertEquals(healthChanges.get(), sentHealthInfos.size())); + previousHealthInfo = newHealthInfo; + } + + mockHealthTracker.setHealthInfo(RED); + assertBusy(() -> assertTrue(sentHealthInfos.contains(RED))); + } + private static class MockHealthTracker extends HealthTracker { - private DiskHealthInfo healthInfo = GREEN; + private volatile DiskHealthInfo healthInfo = GREEN; @Override public DiskHealthInfo checkCurrentHealth() { From 8a7dfdfe2482531db72c7a846d7878ec9530cb6f Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Fri, 8 Mar 2024 08:02:50 -0500 Subject: [PATCH 073/248] Deprecate allowing `fields` in scenarios where its ignored (#106031) closes: https://github.com/elastic/elasticsearch/issues/106026 --- docs/changelog/106031.yaml | 13 +++++++++++++ .../join/mapper/ParentJoinFieldMapper.java | 11 +++++++++++ .../rest-api-spec/test/20_parent_join.yml | 17 +++++++++++++++++ .../AggregateDoubleMetricFieldMapper.java | 11 +++++++++++ .../mapper/ConstantKeywordFieldMapper.java | 11 +++++++++++ .../test/aggregate-metrics/10_basic.yml | 17 +++++++++++++++++ .../test/constant_keyword/10_basic.yml | 17 +++++++++++++++++ 7 files changed, 97 insertions(+) create mode 100644 docs/changelog/106031.yaml diff --git a/docs/changelog/106031.yaml b/docs/changelog/106031.yaml new file mode 100644 index 0000000000000..d0a0303e74164 --- /dev/null +++ b/docs/changelog/106031.yaml @@ -0,0 +1,13 @@ +pr: 106031 +summary: Deprecate allowing `fields` in scenarios where it is ignored +area: Mapping +type: deprecation +issues: [] +deprecation: + title: Deprecate allowing `fields` in scenarios where it is ignored + area: Mapping + details: The following mapped types have always ignored `fields` when using multi-fields. + This deprecation makes this clearer and we will completely disallow `fields` for + these mapped types in the future. + impact: "In the future, `join`, `aggregate_metric_double`, and `constant_keyword`,\ + \ will all disallow supplying `fields` as a parameter in the mapping." diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentJoinFieldMapper.java b/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentJoinFieldMapper.java index d6b7ccad4f3c5..508e438932e68 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentJoinFieldMapper.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentJoinFieldMapper.java @@ -12,6 +12,8 @@ import org.apache.lucene.document.SortedDocValuesField; import org.apache.lucene.document.StringField; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.logging.DeprecationCategory; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.analysis.NamedAnalyzer; @@ -52,6 +54,8 @@ */ public final class ParentJoinFieldMapper extends FieldMapper { + private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(ParentJoinFieldMapper.class); + public static final String NAME = "join"; public static final String CONTENT_TYPE = "join"; @@ -112,6 +116,13 @@ protected Parameter[] getParameters() { @Override public ParentJoinFieldMapper build(MapperBuilderContext context) { + if (multiFieldsBuilder.hasMultiFields()) { + DEPRECATION_LOGGER.warn( + DeprecationCategory.MAPPINGS, + CONTENT_TYPE + "_multifields", + "Adding multifields to [" + CONTENT_TYPE + "] mappers has no effect and will be forbidden in future" + ); + } checkObjectOrNested(context, name()); final Map parentIdFields = new HashMap<>(); relations.get() diff --git a/modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/20_parent_join.yml b/modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/20_parent_join.yml index 4f80e4bcb3b38..2ac3a8dd8315a 100644 --- a/modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/20_parent_join.yml +++ b/modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/20_parent_join.yml @@ -144,3 +144,20 @@ teardown: parent_id: type: child id: "1" + +--- +"deprecated use of multi-fields": + - skip: + version: " - 8.13.99" + reason: "deprecation added in 8.14" + features: warnings + + - do: + warnings: + - "Adding multifields to [join] mappers has no effect and will be forbidden in future" + indices.create: + index: join-multi-field + body: + mappings: + properties: + join_field: { "type": "join", "relations": { "parent": "child", "child": "grand_child" }, "fields": {"keyword": {"type": "keyword"}} } diff --git a/x-pack/plugin/mapper-aggregate-metric/src/main/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldMapper.java b/x-pack/plugin/mapper-aggregate-metric/src/main/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldMapper.java index 1581803920cdc..03f1aaf8577cf 100644 --- a/x-pack/plugin/mapper-aggregate-metric/src/main/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldMapper.java +++ b/x-pack/plugin/mapper-aggregate-metric/src/main/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldMapper.java @@ -15,6 +15,8 @@ import org.apache.lucene.search.SortField; import org.apache.lucene.search.SortedNumericSortField; import org.apache.lucene.util.NumericUtils; +import org.elasticsearch.common.logging.DeprecationCategory; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.time.DateMathParser; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.index.IndexMode; @@ -74,6 +76,8 @@ /** A {@link FieldMapper} for a field containing aggregate metrics such as min/max/value_count etc. */ public class AggregateDoubleMetricFieldMapper extends FieldMapper { + private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(AggregateDoubleMetricFieldMapper.class); + public static final String CONTENT_TYPE = "aggregate_metric_double"; public static final String SUBFIELD_SEPARATOR = "."; @@ -187,6 +191,13 @@ public Builder metric(MetricType metric) { @Override public AggregateDoubleMetricFieldMapper build(MapperBuilderContext context) { + if (multiFieldsBuilder.hasMultiFields()) { + DEPRECATION_LOGGER.warn( + DeprecationCategory.MAPPINGS, + CONTENT_TYPE + "_multifields", + "Adding multifields to [" + CONTENT_TYPE + "] mappers has no effect and will be forbidden in future" + ); + } if (defaultMetric.isConfigured() == false) { // If a single metric is contained, this should be the default if (metrics.getValue().size() == 1) { diff --git a/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapper.java b/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapper.java index f2b1f013212db..ebf060f520c5a 100644 --- a/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapper.java +++ b/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapper.java @@ -21,6 +21,8 @@ import org.apache.lucene.util.automaton.LevenshteinAutomata; import org.apache.lucene.util.automaton.RegExp; import org.elasticsearch.common.geo.ShapeRelation; +import org.elasticsearch.common.logging.DeprecationCategory; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.time.DateMathParser; @@ -62,6 +64,8 @@ */ public class ConstantKeywordFieldMapper extends FieldMapper { + private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(ConstantKeywordFieldMapper.class); + public static final String CONTENT_TYPE = "constant_keyword"; private static ConstantKeywordFieldMapper toType(FieldMapper in) { @@ -98,6 +102,13 @@ protected Parameter[] getParameters() { @Override public ConstantKeywordFieldMapper build(MapperBuilderContext context) { + if (multiFieldsBuilder.hasMultiFields()) { + DEPRECATION_LOGGER.warn( + DeprecationCategory.MAPPINGS, + CONTENT_TYPE + "_multifields", + "Adding multifields to [" + CONTENT_TYPE + "] mappers has no effect and will be forbidden in future" + ); + } return new ConstantKeywordFieldMapper( name(), new ConstantKeywordFieldType(context.buildFullName(name()), value.getValue(), meta.getValue()) diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/aggregate-metrics/10_basic.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/aggregate-metrics/10_basic.yml index eda47355af0cf..2aa78a91f4dbe 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/aggregate-metrics/10_basic.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/aggregate-metrics/10_basic.yml @@ -350,3 +350,20 @@ - match: { hits.hits.1.fields.metric.0.max: 1000 } - match: { hits.hits.1.fields.metric.0.sum: 5000 } - match: { hits.hits.1.fields.metric.0.value_count: 10 } +--- +"deprecated use of multi-fields": + - skip: + version: " - 8.13.99" + reason: "deprecation added in 8.14" + features: warnings + + - do: + warnings: + - "Adding multifields to [aggregate_metric_double] mappers has no effect and will be forbidden in future" + indices.create: + index: aggregate_metric_double-multi-field + body: + mappings: + properties: + aggregated: { "type": "aggregate_metric_double", "metrics": ["max"], "default_metric": "max", "fields": {"keyword": {"type": "keyword"}} } + diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/constant_keyword/10_basic.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/constant_keyword/10_basic.yml index a89b24ff45593..ee08fcc3693d4 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/constant_keyword/10_basic.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/constant_keyword/10_basic.yml @@ -450,3 +450,20 @@ Cardinality agg: field: test - match: { aggregations.card.value: 1 } +--- +"deprecated use of multi-fields": + - skip: + version: " - 8.13.99" + reason: "deprecation added in 8.14" + features: warnings + + - do: + warnings: + - "Adding multifields to [constant_keyword] mappers has no effect and will be forbidden in future" + indices.create: + index: constant_keyword-multi-field + body: + mappings: + properties: + keyword: { "type": "constant_keyword", "fields": {"keyword": {"type": "keyword"}} } + From 026b305cd2f591d894d5b8cb5f712276c6c2fc28 Mon Sep 17 00:00:00 2001 From: Pooya Salehi Date: Fri, 8 Mar 2024 14:23:25 +0100 Subject: [PATCH 074/248] Ensure auto-expand indices have at least 1 replica in stateless (#106067) Relates ES-7986 --- .../cluster/metadata/AutoExpandReplicas.java | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/AutoExpandReplicas.java b/server/src/main/java/org/elasticsearch/cluster/metadata/AutoExpandReplicas.java index fe6efda67df29..106f4c1e4e387 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/AutoExpandReplicas.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/AutoExpandReplicas.java @@ -8,6 +8,7 @@ package org.elasticsearch.cluster.metadata; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.allocation.ExistingShardsAllocator; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.common.settings.Setting; @@ -18,6 +19,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.function.Supplier; import static org.elasticsearch.cluster.metadata.MetadataIndexStateService.isIndexVerifiedBeforeClosed; @@ -98,6 +100,13 @@ public boolean expandToAllNodes() { public int getDesiredNumberOfReplicas(IndexMetadata indexMetadata, RoutingAllocation allocation) { assert enabled : "should only be called when enabled"; + // Make sure in stateless auto-expand indices always have 1 replica to ensure all shard roles are always present + if (Objects.equals( + indexMetadata.getSettings().get(ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_SETTING.getKey()), + "stateless" + )) { + return 1; + } int numMatchingDataNodes = 0; for (DiscoveryNode discoveryNode : allocation.nodes().getDataNodes().values()) { Decision decision = allocation.deciders().shouldAutoExpandToNode(indexMetadata, discoveryNode, allocation); From 24228cd6ea2f6966daab81b3fcec57c7ec8424cf Mon Sep 17 00:00:00 2001 From: Jan Kuipers <148754765+jan-elastic@users.noreply.github.com> Date: Fri, 8 Mar 2024 14:49:45 +0100 Subject: [PATCH 075/248] During ML maintenance, reset jobs in the reset state without a corresponding task. (#106062) * During ML maintenance, reset jobs in the reset state without a corresponding task. * Update docs/changelog/106062.yaml * Fix race condition in MlDailyMaintenanceServiceTests * Fix log level --- docs/changelog/106062.yaml | 6 + .../xpack/core/ml/job/config/Job.java | 4 + .../xpack/ml/MlDailyMaintenanceService.java | 143 ++++++++++----- .../ml/MlDailyMaintenanceServiceTests.java | 170 +++++++++++------- 4 files changed, 218 insertions(+), 105 deletions(-) create mode 100644 docs/changelog/106062.yaml diff --git a/docs/changelog/106062.yaml b/docs/changelog/106062.yaml new file mode 100644 index 0000000000000..f4ff3df4045e6 --- /dev/null +++ b/docs/changelog/106062.yaml @@ -0,0 +1,6 @@ +pr: 106062 +summary: "During ML maintenance, reset jobs in the reset state without a corresponding\ + \ task" +area: Machine Learning +type: bug +issues: [] diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java index 1686cdea4340a..fbb1a137bdc13 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java @@ -485,6 +485,10 @@ public boolean isDeleting() { return deleting; } + public boolean isResetting() { + return blocked != null && Blocked.Reason.RESET.equals(blocked.getReason()); + } + public boolean allowLazyOpen() { return allowLazyOpen; } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlDailyMaintenanceService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlDailyMaintenanceService.java index 71469fccc0032..47f0fde838b8e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlDailyMaintenanceService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlDailyMaintenanceService.java @@ -10,9 +10,11 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.util.SetOnce; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.admin.cluster.node.tasks.list.TransportListTasksAction; +import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterName; @@ -27,12 +29,15 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.tasks.TaskInfo; import org.elasticsearch.threadpool.Scheduler; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ml.MlMetadata; +import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.action.DeleteExpiredDataAction; import org.elasticsearch.xpack.core.ml.action.DeleteJobAction; import org.elasticsearch.xpack.core.ml.action.GetJobsAction; +import org.elasticsearch.xpack.core.ml.action.ResetJobAction; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.ml.utils.TypedChainTaskExecutor; @@ -42,6 +47,8 @@ import java.util.Objects; import java.util.Random; import java.util.Set; +import java.util.function.Function; +import java.util.function.Predicate; import java.util.function.Supplier; import static java.util.stream.Collectors.toList; @@ -206,24 +213,34 @@ private void triggerTasks() { } private void triggerAnomalyDetectionMaintenance() { - // Step 3: Log any error that could have happened + // Step 4: Log any error that could have happened ActionListener finalListener = ActionListener.wrap( unused -> {}, - e -> logger.error("An error occurred during [ML] maintenance tasks execution", e) + e -> logger.warn("An error occurred during [ML] maintenance tasks execution", e) ); - // Step 2: Delete expired data + // Step 3: Delete expired data ActionListener deleteJobsListener = ActionListener.wrap( unused -> triggerDeleteExpiredDataTask(finalListener), e -> { - logger.info("[ML] maintenance task: triggerDeleteJobsInStateDeletingWithoutDeletionTask failed", e); - // Note: Steps 1 and 2 are independent of each other and step 2 is executed even if step 1 failed. + logger.warn("[ML] maintenance task: triggerResetJobsInStateResetWithoutResetTask failed", e); + // Note: Steps 1-3 are independent, so continue upon errors. triggerDeleteExpiredDataTask(finalListener); } ); - // Step 1: Delete jobs that are in deleting state - triggerDeleteJobsInStateDeletingWithoutDeletionTask(deleteJobsListener); + // Step 2: Reset jobs that are in resetting state without task + ActionListener resetJobsListener = ActionListener.wrap( + unused -> triggerResetJobsInStateResetWithoutResetTask(deleteJobsListener), + e -> { + logger.warn("[ML] maintenance task: triggerDeleteJobsInStateDeletingWithoutDeletionTask failed", e); + // Note: Steps 1-3 are independent, so continue upon errors. + triggerResetJobsInStateResetWithoutResetTask(deleteJobsListener); + } + ); + + // Step 1: Delete jobs that are in deleting state without task + triggerDeleteJobsInStateDeletingWithoutDeletionTask(resetJobsListener); } private void triggerDataFrameAnalyticsMaintenance() { @@ -257,73 +274,111 @@ private void triggerDeleteExpiredDataTask(ActionListener f // Visible for testing public void triggerDeleteJobsInStateDeletingWithoutDeletionTask(ActionListener finalListener) { - SetOnce> jobsInStateDeletingHolder = new SetOnce<>(); - - ActionListener>> deleteJobsActionListener = finalListener - .delegateFailureAndWrap((delegate, deleteJobsResponses) -> { - List jobIds = deleteJobsResponses.stream() - .filter(t -> t.v2().isAcknowledged() == false) - .map(Tuple::v1) - .map(DeleteJobAction.Request::getJobId) - .collect(toList()); + triggerJobsInStateWithoutMatchingTask( + "triggerDeleteJobsInStateDeletingWithoutDeletionTask", + Job::isDeleting, + DeleteJobAction.NAME, + taskInfo -> stripPrefixOrNull(taskInfo.description(), DeleteJobAction.DELETION_TASK_DESCRIPTION_PREFIX), + DeleteJobAction.INSTANCE, + DeleteJobAction.Request::new, + finalListener + ); + } + + public void triggerResetJobsInStateResetWithoutResetTask(ActionListener finalListener) { + triggerJobsInStateWithoutMatchingTask( + "triggerResetJobsInStateResetWithoutResetTask", + Job::isResetting, + ResetJobAction.NAME, + taskInfo -> stripPrefixOrNull(taskInfo.description(), MlTasks.JOB_TASK_ID_PREFIX), + ResetJobAction.INSTANCE, + ResetJobAction.Request::new, + finalListener + ); + } + + /** + * @return If the string starts with the prefix, this returns the string without the prefix. + * Otherwise, this return null. + */ + private static String stripPrefixOrNull(String str, String prefix) { + return str == null || str.startsWith(prefix) == false ? null : str.substring(prefix.length()); + } + + /** + * Executes a request for each job in a state, while missing the corresponding task. This + * usually indicates the node originally executing the task has died, so retry the request. + * + * @param maintenanceTaskName Name of ML maintenance task; used only for logging. + * @param jobFilter Predicate for filtering the jobs. + * @param taskActionName Action name of the tasks corresponding to the jobs. + * @param jobIdExtractor Function to extract the job ID from the task info (in order to match to the job). + * @param actionType Action type of the request that should be (re)executed. + * @param requestCreator Function to create the request from the job ID. + * @param finalListener Listener that captures the final response. + */ + private void triggerJobsInStateWithoutMatchingTask( + String maintenanceTaskName, + Predicate jobFilter, + String taskActionName, + Function jobIdExtractor, + ActionType actionType, + Function> requestCreator, + ActionListener finalListener + ) { + SetOnce> jobsInStateHolder = new SetOnce<>(); + + ActionListener>> jobsActionListener = finalListener.delegateFailureAndWrap( + (delegate, jobsResponses) -> { + List jobIds = jobsResponses.stream().filter(t -> t.v2().isAcknowledged() == false).map(Tuple::v1).collect(toList()); if (jobIds.isEmpty()) { - logger.info("Successfully completed [ML] maintenance task: triggerDeleteJobsInStateDeletingWithoutDeletionTask"); + logger.info("Successfully completed [ML] maintenance task: {}", maintenanceTaskName); } else { - logger.info("The following ML jobs could not be deleted: [" + String.join(",", jobIds) + "]"); + logger.info("[ML] maintenance task {} failed for jobs: {}", maintenanceTaskName, jobIds); } delegate.onResponse(AcknowledgedResponse.TRUE); - }); + } + ); ActionListener listTasksActionListener = ActionListener.wrap(listTasksResponse -> { - Set jobsInStateDeleting = jobsInStateDeletingHolder.get(); - Set jobsWithDeletionTask = listTasksResponse.getTasks() - .stream() - .filter(t -> t.description() != null) - .filter(t -> t.description().startsWith(DeleteJobAction.DELETION_TASK_DESCRIPTION_PREFIX)) - .map(t -> t.description().substring(DeleteJobAction.DELETION_TASK_DESCRIPTION_PREFIX.length())) - .collect(toSet()); - Set jobsInStateDeletingWithoutDeletionTask = Sets.difference(jobsInStateDeleting, jobsWithDeletionTask); - if (jobsInStateDeletingWithoutDeletionTask.isEmpty()) { + Set jobsInState = jobsInStateHolder.get(); + Set jobsWithTask = listTasksResponse.getTasks().stream().map(jobIdExtractor).filter(Objects::nonNull).collect(toSet()); + Set jobsInStateWithoutTask = Sets.difference(jobsInState, jobsWithTask); + if (jobsInStateWithoutTask.isEmpty()) { finalListener.onResponse(AcknowledgedResponse.TRUE); return; } - TypedChainTaskExecutor> chainTaskExecutor = new TypedChainTaskExecutor<>( + TypedChainTaskExecutor> chainTaskExecutor = new TypedChainTaskExecutor<>( EsExecutors.DIRECT_EXECUTOR_SERVICE, Predicates.always(), Predicates.always() ); - for (String jobId : jobsInStateDeletingWithoutDeletionTask) { - DeleteJobAction.Request request = new DeleteJobAction.Request(jobId); + for (String jobId : jobsInStateWithoutTask) { chainTaskExecutor.add( listener -> executeAsyncWithOrigin( client, ML_ORIGIN, - DeleteJobAction.INSTANCE, - request, - listener.delegateFailureAndWrap((l, response) -> l.onResponse(Tuple.tuple(request, response))) + actionType, + requestCreator.apply(jobId), + listener.delegateFailureAndWrap((l, response) -> l.onResponse(Tuple.tuple(jobId, response))) ) ); } - chainTaskExecutor.execute(deleteJobsActionListener); + chainTaskExecutor.execute(jobsActionListener); }, finalListener::onFailure); ActionListener getJobsActionListener = ActionListener.wrap(getJobsResponse -> { - Set jobsInStateDeleting = getJobsResponse.getResponse() - .results() - .stream() - .filter(Job::isDeleting) - .map(Job::getId) - .collect(toSet()); - if (jobsInStateDeleting.isEmpty()) { + Set jobsInState = getJobsResponse.getResponse().results().stream().filter(jobFilter).map(Job::getId).collect(toSet()); + if (jobsInState.isEmpty()) { finalListener.onResponse(AcknowledgedResponse.TRUE); return; } - jobsInStateDeletingHolder.set(jobsInStateDeleting); + jobsInStateHolder.set(jobsInState); executeAsyncWithOrigin( client, ML_ORIGIN, TransportListTasksAction.TYPE, - new ListTasksRequest().setActions(DeleteJobAction.NAME), + new ListTasksRequest().setActions(taskActionName), listTasksActionListener ); }, finalListener::onFailure); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlDailyMaintenanceServiceTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlDailyMaintenanceServiceTests.java index 618733ccccb06..f79dd645bfea5 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlDailyMaintenanceServiceTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlDailyMaintenanceServiceTests.java @@ -31,6 +31,7 @@ import org.elasticsearch.xpack.core.ml.action.DeleteExpiredDataAction; import org.elasticsearch.xpack.core.ml.action.DeleteJobAction; import org.elasticsearch.xpack.core.ml.action.GetJobsAction; +import org.elasticsearch.xpack.core.ml.action.ResetJobAction; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.junit.After; import org.junit.Before; @@ -38,8 +39,10 @@ import org.mockito.stubbing.Answer; import java.util.Collections; +import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import java.util.function.Supplier; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.same; @@ -79,29 +82,21 @@ public void testScheduledTriggering() throws InterruptedException { doAnswer(withResponse(new GetJobsAction.Response(new QueryPage<>(Collections.emptyList(), 0, new ParseField(""))))).when(client) .execute(same(GetJobsAction.INSTANCE), any(), any()); - int triggerCount = randomIntBetween(2, 4); - CountDownLatch latch = new CountDownLatch(triggerCount); - try (MlDailyMaintenanceService service = createService(latch, client)) { - service.start(); - latch.await(5, TimeUnit.SECONDS); - } + int triggerCount = randomIntBetween(1, 3); + executeMaintenanceTriggers(triggerCount); - verify(client, times(triggerCount - 1)).execute(same(DeleteExpiredDataAction.INSTANCE), any(), any()); - verify(client, times(triggerCount - 1)).execute(same(GetJobsAction.INSTANCE), any(), any()); - verify(mlAssignmentNotifier, times(triggerCount - 1)).auditUnassignedMlTasks(any(), any()); + verify(client, times(triggerCount)).execute(same(DeleteExpiredDataAction.INSTANCE), any(), any()); + verify(client, times(2 * triggerCount)).execute(same(GetJobsAction.INSTANCE), any(), any()); + verify(mlAssignmentNotifier, times(triggerCount)).auditUnassignedMlTasks(any(), any()); } public void testScheduledTriggeringWhileUpgradeModeIsEnabled() throws InterruptedException { when(clusterService.state()).thenReturn(createClusterState(true)); - int triggerCount = randomIntBetween(2, 4); - CountDownLatch latch = new CountDownLatch(triggerCount); - try (MlDailyMaintenanceService service = createService(latch, client)) { - service.start(); - latch.await(5, TimeUnit.SECONDS); - } + int triggerCount = randomIntBetween(1, 3); + executeMaintenanceTriggers(triggerCount); - verify(clusterService, times(triggerCount - 1)).state(); + verify(clusterService, times(triggerCount)).state(); verifyNoMoreInteractions(client, clusterService, mlAssignmentNotifier); } @@ -143,11 +138,7 @@ public void testBothTasksAreTriggered_BothTasksFail() throws InterruptedExceptio public void testNoAnomalyDetectionTasksWhenDisabled() throws InterruptedException { when(clusterService.state()).thenReturn(createClusterState(false)); - CountDownLatch latch = new CountDownLatch(2); - try (MlDailyMaintenanceService service = createService(latch, client, false, randomBoolean(), randomBoolean())) { - service.start(); - latch.await(5, TimeUnit.SECONDS); - } + executeMaintenanceTriggers(1, false, randomBoolean(), randomBoolean()); verify(client, never()).threadPool(); verify(client, never()).execute(same(DeleteExpiredDataAction.INSTANCE), any(), any()); @@ -160,15 +151,11 @@ private void assertThatBothTasksAreTriggered(Answer deleteExpiredDataAnswer, doAnswer(deleteExpiredDataAnswer).when(client).execute(same(DeleteExpiredDataAction.INSTANCE), any(), any()); doAnswer(getJobsAnswer).when(client).execute(same(GetJobsAction.INSTANCE), any(), any()); - CountDownLatch latch = new CountDownLatch(2); - try (MlDailyMaintenanceService service = createService(latch, client)) { - service.start(); - latch.await(5, TimeUnit.SECONDS); - } + executeMaintenanceTriggers(1); - verify(client, Mockito.atLeast(2)).threadPool(); - verify(client, Mockito.atLeast(1)).execute(same(DeleteExpiredDataAction.INSTANCE), any(), any()); - verify(client, Mockito.atLeast(1)).execute(same(GetJobsAction.INSTANCE), any(), any()); + verify(client, times(3)).threadPool(); + verify(client, times(1)).execute(same(DeleteExpiredDataAction.INSTANCE), any(), any()); + verify(client, times(2)).execute(same(GetJobsAction.INSTANCE), any(), any()); verify(mlAssignmentNotifier, Mockito.atLeast(1)).auditUnassignedMlTasks(any(), any()); } @@ -202,14 +189,10 @@ public void testJobInDeletingStateAlreadyHasDeletionTask() throws InterruptedExc .when(client) .execute(same(TransportListTasksAction.TYPE), any(), any()); - CountDownLatch latch = new CountDownLatch(2); - try (MlDailyMaintenanceService service = createService(latch, client)) { - service.start(); - latch.await(5, TimeUnit.SECONDS); - } + executeMaintenanceTriggers(1); - verify(client, times(3)).threadPool(); - verify(client).execute(same(GetJobsAction.INSTANCE), any(), any()); + verify(client, times(4)).threadPool(); + verify(client, times(2)).execute(same(GetJobsAction.INSTANCE), any(), any()); verify(client).execute(same(TransportListTasksAction.TYPE), any(), any()); verify(client).execute(same(DeleteExpiredDataAction.INSTANCE), any(), any()); verify(mlAssignmentNotifier).auditUnassignedMlTasks(any(), any()); @@ -240,14 +223,10 @@ private void testJobInDeletingStateDoesNotHaveDeletionTask(boolean deleted) thro ).execute(same(TransportListTasksAction.TYPE), any(), any()); doAnswer(withResponse(AcknowledgedResponse.of(deleted))).when(client).execute(same(DeleteJobAction.INSTANCE), any(), any()); - CountDownLatch latch = new CountDownLatch(2); - try (MlDailyMaintenanceService service = createService(latch, client)) { - service.start(); - latch.await(5, TimeUnit.SECONDS); - } + executeMaintenanceTriggers(1); - verify(client, times(4)).threadPool(); - verify(client).execute(same(GetJobsAction.INSTANCE), any(), any()); + verify(client, times(5)).threadPool(); + verify(client, times(2)).execute(same(GetJobsAction.INSTANCE), any(), any()); verify(client).execute(same(TransportListTasksAction.TYPE), any(), any()); verify(client).execute(same(DeleteJobAction.INSTANCE), any(), any()); verify(client).execute(same(DeleteExpiredDataAction.INSTANCE), any(), any()); @@ -255,29 +234,98 @@ private void testJobInDeletingStateDoesNotHaveDeletionTask(boolean deleted) thro verifyNoMoreInteractions(client, mlAssignmentNotifier); } - private MlDailyMaintenanceService createService(CountDownLatch latch, Client client) { - return createService(latch, client, true, true, true); + public void testJobInResettingState_doesNotHaveResetTask() throws InterruptedException { + testJobInResettingState(false); + } + + public void testJobInResettingState_hasResetTask() throws InterruptedException { + testJobInResettingState(true); + } + + private void testJobInResettingState(boolean hasResetTask) throws InterruptedException { + String jobId = "job-in-state-resetting"; + when(clusterService.state()).thenReturn(createClusterState(false)); + doAnswer(withResponse(new DeleteExpiredDataAction.Response(true))).when(client) + .execute(same(DeleteExpiredDataAction.INSTANCE), any(), any()); + Job job = mock(Job.class); + when(job.getId()).thenReturn(jobId); + when(job.isDeleting()).thenReturn(false); + when(job.isResetting()).thenReturn(true); + doAnswer(withResponse(new GetJobsAction.Response(new QueryPage<>(List.of(job), 1, new ParseField(""))))).when(client) + .execute(same(GetJobsAction.INSTANCE), any(), any()); + List tasks = hasResetTask + ? List.of( + new TaskInfo( + new TaskId("test", 123), + "test", + "test", + ResetJobAction.NAME, + "job-" + jobId, + null, + 0, + 0, + true, + false, + new TaskId("test", 456), + Collections.emptyMap() + ) + ) + : List.of(); + doAnswer(withResponse(new ListTasksResponse(tasks, List.of(), List.of()))).when(client) + .execute(same(TransportListTasksAction.TYPE), any(), any()); + doAnswer(withResponse(AcknowledgedResponse.of(true))).when(client).execute(same(ResetJobAction.INSTANCE), any(), any()); + + executeMaintenanceTriggers(1); + + verify(client, times(hasResetTask ? 4 : 5)).threadPool(); + verify(client, times(2)).execute(same(GetJobsAction.INSTANCE), any(), any()); + verify(client).execute(same(TransportListTasksAction.TYPE), any(), any()); + if (hasResetTask == false) { + verify(client).execute(same(ResetJobAction.INSTANCE), any(), any()); + } + verify(client).execute(same(DeleteExpiredDataAction.INSTANCE), any(), any()); + verify(mlAssignmentNotifier).auditUnassignedMlTasks(any(), any()); + verifyNoMoreInteractions(client, mlAssignmentNotifier); + } + + private void executeMaintenanceTriggers(int triggerCount) throws InterruptedException { + executeMaintenanceTriggers(triggerCount, true, true, true); } - private MlDailyMaintenanceService createService( - CountDownLatch latch, - Client client, + private void executeMaintenanceTriggers( + int triggerCount, boolean isAnomalyDetectionEnabled, boolean isDataFrameAnalyticsEnabled, boolean isNlpEnabled - ) { - return new MlDailyMaintenanceService(Settings.EMPTY, threadPool, client, clusterService, mlAssignmentNotifier, () -> { - // We need to be careful that an unexpected iteration doesn't get squeezed in by the maintenance threadpool in - // between the latch getting counted down to zero and the main test thread stopping the maintenance service. - // This could happen if the main test thread happens to be waiting for a CPU for the whole 100ms after the - // latch counts down to zero. - if (latch.getCount() > 0) { - latch.countDown(); - return TimeValue.timeValueMillis(100); - } else { - return TimeValue.timeValueHours(1); - } - }, isAnomalyDetectionEnabled, isDataFrameAnalyticsEnabled, isNlpEnabled); + ) throws InterruptedException { + // The scheduleProvider is called upon scheduling. The latch waits for (triggerCount + 1) + // schedules to happen, which means that the maintenance task is executed triggerCount + // times. The first triggerCount invocations of the scheduleProvider return 100ms, which + // is the time between the executed maintenance tasks. + // After that, maintenance task (triggerCount + 1) is scheduled after 100sec, the latch is + // released, the service is closed, and the method returns. Task (triggerCount + 1) is + // therefore never executed. + CountDownLatch latch = new CountDownLatch(triggerCount + 1); + Supplier scheduleProvider = () -> { + latch.countDown(); + return TimeValue.timeValueMillis(latch.getCount() > 0 ? 100 : 100_000); + }; + try ( + MlDailyMaintenanceService service = new MlDailyMaintenanceService( + Settings.EMPTY, + threadPool, + client, + clusterService, + mlAssignmentNotifier, + scheduleProvider, + isAnomalyDetectionEnabled, + isDataFrameAnalyticsEnabled, + isNlpEnabled + ) + ) { + service.start(); + latch.await(5, TimeUnit.SECONDS); + } } private static ClusterState createClusterState(boolean isUpgradeMode) { From e4749105145545177b24b5949f57fffa5495798b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20Fred=C3=A9n?= <109296772+jfreden@users.noreply.github.com> Date: Fri, 8 Mar 2024 15:14:48 +0100 Subject: [PATCH 076/248] Mute testIsFailureIndex (#106124) Mute: https://github.com/elastic/elasticsearch/issues/106123 --- .../java/org/elasticsearch/cluster/metadata/DataStreamTests.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java index a07cd8e60411a..141434842a4bc 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java @@ -1842,6 +1842,7 @@ public void testWriteFailureIndex() { assertThat(failureStoreDataStream.getFailureStoreWriteIndex(), is(writeFailureIndex)); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/106123") public void testIsFailureIndex() { boolean hidden = randomBoolean(); boolean system = hidden && randomBoolean(); From 18a509a18f9eadf7cee5593f38d18e9ad1854177 Mon Sep 17 00:00:00 2001 From: Stef Nestor <26751266+stefnestor@users.noreply.github.com> Date: Fri, 8 Mar 2024 08:19:27 -0700 Subject: [PATCH 077/248] (DOC+) Node Stats fs.available reflects XFS quotas (#106085) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Moving https://github.com/elastic/elasticsearch/pull/103472 here. --- 👋 howdy, team! Could we include "XFS quotas" as an example for "depending on OS or process level restrictions" for this doc's searchability for users to better understand how to investigate this potential lever's impact? TIA! --- docs/reference/cluster/nodes-stats.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/reference/cluster/nodes-stats.asciidoc b/docs/reference/cluster/nodes-stats.asciidoc index eacbabb99f045..e2848f9a8e70f 100644 --- a/docs/reference/cluster/nodes-stats.asciidoc +++ b/docs/reference/cluster/nodes-stats.asciidoc @@ -1792,14 +1792,14 @@ Total number of unallocated bytes in all file stores. `available`:: (<>) Total disk space available to this Java virtual machine on all file -stores. Depending on OS or process level restrictions, this might appear +stores. Depending on OS or process level restrictions (e.g. XFS quotas), this might appear less than `free`. This is the actual amount of free disk space the {es} node can utilise. `available_in_bytes`:: (integer) Total number of bytes available to this Java virtual machine on all file -stores. Depending on OS or process level restrictions, this might appear +stores. Depending on OS or process level restrictions (e.g. XFS quotas), this might appear less than `free_in_bytes`. This is the actual amount of free disk space the {es} node can utilise. ======= From 6892f1c8370cdd41132f7c6411e18c219e8f50b8 Mon Sep 17 00:00:00 2001 From: David Roberts Date: Fri, 8 Mar 2024 15:26:15 +0000 Subject: [PATCH 078/248] [ML] Make two tests work regardless of failure store feature flag (#106122) Two ML unit tests make assertions that are sensitive to whether the new failure store functionality is available or not. Since this functionality is still in a state of flux, accept error messages either with or without the new parts for now. (Even if the old text was left as an option forever it wouldn't really harm the ability of the tests to detect regressions as it's virtually impossible that some other bug would cause the text to go back to exactly what it was before.) Fixes #106107 Fixes #106108 --- .../datafeed/DatafeedNodeSelectorTests.java | 77 +++++++++++++------ 1 file changed, 54 insertions(+), 23 deletions(-) diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java index 517c851d43804..4bb612921876e 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java @@ -56,6 +56,7 @@ import static org.elasticsearch.xpack.ml.job.task.OpenJobPersistentTasksExecutorTests.addJobTask; import static org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase.createDatafeed; import static org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase.createScheduledJob; +import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -313,7 +314,6 @@ public void testShardNotAllActive() { .checkDatafeedTaskCanBeCreated(); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/106107") public void testIndexDoesntExist() { Job job = createScheduledJob("job_id").build(new Date()); DatafeedConfig df = createDatafeed("datafeed_id", job.getId(), Collections.singletonList("not_foo")); @@ -335,12 +335,22 @@ public void testIndexDoesntExist() { assertNull(result.getExecutorNode()); assertThat( result.getExplanation(), - equalTo( - "cannot start datafeed [datafeed_id] because it failed resolving indices given [not_foo] and " - + "indices_options [IndicesOptions[ignore_unavailable=false, allow_no_indices=true, expand_wildcards_open=true, " - + "expand_wildcards_closed=false, expand_wildcards_hidden=false, allow_aliases_to_multiple_indices=true, " - + "forbid_closed_indices=true, ignore_aliases=false, ignore_throttled=true, include_regular_indices=true, " - + "include_failure_indices=false, allow_failure_indices=true]] with exception [no such index [not_foo]]" + anyOf( + // TODO remove this first option and only allow the second once the failure store functionality is permanently switched on + equalTo( + "cannot start datafeed [datafeed_id] because it failed resolving indices given [not_foo] and " + + "indices_options [IndicesOptions[ignore_unavailable=false, allow_no_indices=true, expand_wildcards_open=true, " + + "expand_wildcards_closed=false, expand_wildcards_hidden=false, allow_aliases_to_multiple_indices=true, " + + "forbid_closed_indices=true, ignore_aliases=false, ignore_throttled=true]] " + + "with exception [no such index [not_foo]]" + ), + equalTo( + "cannot start datafeed [datafeed_id] because it failed resolving indices given [not_foo] and " + + "indices_options [IndicesOptions[ignore_unavailable=false, allow_no_indices=true, expand_wildcards_open=true, " + + "expand_wildcards_closed=false, expand_wildcards_hidden=false, allow_aliases_to_multiple_indices=true, " + + "forbid_closed_indices=true, ignore_aliases=false, ignore_throttled=true, include_regular_indices=true, " + + "include_failure_indices=false, allow_failure_indices=true]] with exception [no such index [not_foo]]" + ) ) ); @@ -357,14 +367,25 @@ public void testIndexDoesntExist() { ); assertThat( e.getMessage(), - containsString( - "No node found to start datafeed [datafeed_id], allocation explanation " - + "[cannot start datafeed [datafeed_id] because it failed resolving " - + "indices given [not_foo] and indices_options [IndicesOptions[ignore_unavailable=false, allow_no_indices=true, " - + "expand_wildcards_open=true, expand_wildcards_closed=false, expand_wildcards_hidden=false, " - + "allow_aliases_to_multiple_indices=true, forbid_closed_indices=true, ignore_aliases=false, ignore_throttled=true, " - + "include_regular_indices=true, include_failure_indices=false, allow_failure_indices=true]] " - + "with exception [no such index [not_foo]]]" + anyOf( + // TODO remove this first option and only allow the second once the failure store functionality is permanently switched on + containsString( + "No node found to start datafeed [datafeed_id], allocation explanation " + + "[cannot start datafeed [datafeed_id] because it failed resolving " + + "indices given [not_foo] and indices_options [IndicesOptions[ignore_unavailable=false, allow_no_indices=true, " + + "expand_wildcards_open=true, expand_wildcards_closed=false, expand_wildcards_hidden=false, " + + "allow_aliases_to_multiple_indices=true, forbid_closed_indices=true, ignore_aliases=false, ignore_throttled=true" + + "]] with exception [no such index [not_foo]]]" + ), + containsString( + "No node found to start datafeed [datafeed_id], allocation explanation " + + "[cannot start datafeed [datafeed_id] because it failed resolving " + + "indices given [not_foo] and indices_options [IndicesOptions[ignore_unavailable=false, allow_no_indices=true, " + + "expand_wildcards_open=true, expand_wildcards_closed=false, expand_wildcards_hidden=false, " + + "allow_aliases_to_multiple_indices=true, forbid_closed_indices=true, ignore_aliases=false, " + + "ignore_throttled=true, include_regular_indices=true, include_failure_indices=false, " + + "allow_failure_indices=true]] with exception [no such index [not_foo]]]" + ) ) ); } @@ -498,7 +519,6 @@ public void testSelectNode_jobTaskStale() { .checkDatafeedTaskCanBeCreated(); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/106108") public void testSelectNode_GivenJobOpeningAndIndexDoesNotExist() { // Here we test that when there are 2 problems, the most critical gets reported first. // In this case job is Opening (non-critical) and the index does not exist (critical) @@ -525,13 +545,24 @@ public void testSelectNode_GivenJobOpeningAndIndexDoesNotExist() { ); assertThat( e.getMessage(), - containsString( - "No node found to start datafeed [datafeed_id], allocation explanation " - + "[cannot start datafeed [datafeed_id] because it failed resolving indices given [not_foo] and " - + "indices_options [IndicesOptions[ignore_unavailable=false, allow_no_indices=true, expand_wildcards_open=true, " - + "expand_wildcards_closed=false, expand_wildcards_hidden=false, allow_aliases_to_multiple_indices=true, " - + "forbid_closed_indices=true, ignore_aliases=false, ignore_throttled=true, include_regular_indices=true, " - + "include_failure_indices=false, allow_failure_indices=true]] with exception [no such index [not_foo]]]" + anyOf( + // TODO remove this first option and only allow the second once the failure store functionality is permanently switched on + containsString( + "No node found to start datafeed [datafeed_id], allocation explanation " + + "[cannot start datafeed [datafeed_id] because it failed resolving indices given [not_foo] and " + + "indices_options [IndicesOptions[ignore_unavailable=false, allow_no_indices=true, expand_wildcards_open=true, " + + "expand_wildcards_closed=false, expand_wildcards_hidden=false, allow_aliases_to_multiple_indices=true, " + + "forbid_closed_indices=true, ignore_aliases=false, ignore_throttled=true]] " + + "with exception [no such index [not_foo]]]" + ), + containsString( + "No node found to start datafeed [datafeed_id], allocation explanation " + + "[cannot start datafeed [datafeed_id] because it failed resolving indices given [not_foo] and " + + "indices_options [IndicesOptions[ignore_unavailable=false, allow_no_indices=true, expand_wildcards_open=true, " + + "expand_wildcards_closed=false, expand_wildcards_hidden=false, allow_aliases_to_multiple_indices=true, " + + "forbid_closed_indices=true, ignore_aliases=false, ignore_throttled=true, include_regular_indices=true, " + + "include_failure_indices=false, allow_failure_indices=true]] with exception [no such index [not_foo]]]" + ) ) ); } From d5368ed816c96d115a9f7b63e49ac0f202547726 Mon Sep 17 00:00:00 2001 From: Nikolaj Volgushev Date: Fri, 8 Mar 2024 17:20:18 +0100 Subject: [PATCH 079/248] Has Privileges API customization (#105981) This PR makes request building customizable for the Has Privileges API. Relates: ES-7829 --- .../user/HasPrivilegesRequestBuilder.java | 6 ----- .../HasPrivilegesRequestBuilderFactory.java | 23 +++++++++++++++++++ .../xpack/security/Security.java | 8 ++++++- .../action/user/RestHasPrivilegesAction.java | 13 +++++++++-- .../user/RestHasPrivilegesActionTests.java | 11 +++++++-- 5 files changed, 50 insertions(+), 11 deletions(-) create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesRequestBuilderFactory.java diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesRequestBuilder.java index d32a911a440d6..52946c05cf87b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesRequestBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesRequestBuilder.java @@ -24,17 +24,11 @@ public HasPrivilegesRequestBuilder(ElasticsearchClient client) { super(client, HasPrivilegesAction.INSTANCE, new HasPrivilegesRequest()); } - /** - * Set the username of the user that should enabled or disabled. Must not be {@code null} - */ public HasPrivilegesRequestBuilder username(String username) { request.username(username); return this; } - /** - * Set whether the user should be enabled or not - */ public HasPrivilegesRequestBuilder source(String username, BytesReference source, XContentType xContentType) throws IOException { final AuthorizationEngine.PrivilegesToCheck privilegesToCheck = RoleDescriptor.parsePrivilegesToCheck( username + "/has_privileges", diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesRequestBuilderFactory.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesRequestBuilderFactory.java new file mode 100644 index 0000000000000..e610e40333da8 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesRequestBuilderFactory.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.security.action.user; + +import org.elasticsearch.client.internal.Client; + +public interface HasPrivilegesRequestBuilderFactory { + HasPrivilegesRequestBuilder create(Client client, boolean restrictRequest); + + class Default implements HasPrivilegesRequestBuilderFactory { + + @Override + public HasPrivilegesRequestBuilder create(Client client, boolean restrictRequest) { + assert false == restrictRequest; + return new HasPrivilegesRequestBuilder(client); + } + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index 219f645a92bbe..c6089df37bca2 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -175,6 +175,7 @@ import org.elasticsearch.xpack.core.security.action.user.GetUserPrivilegesAction; import org.elasticsearch.xpack.core.security.action.user.GetUsersAction; import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesAction; +import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesRequestBuilderFactory; import org.elasticsearch.xpack.core.security.action.user.ProfileHasPrivilegesAction; import org.elasticsearch.xpack.core.security.action.user.PutUserAction; import org.elasticsearch.xpack.core.security.authc.AuthenticationFailureHandler; @@ -568,6 +569,7 @@ public class Security extends Plugin private final SetOnce updateApiKeyRequestTranslator = new SetOnce<>(); private final SetOnce bulkUpdateApiKeyRequestTranslator = new SetOnce<>(); private final SetOnce getBuiltinPrivilegesResponseTranslator = new SetOnce<>(); + private final SetOnce hasPrivilegesRequestBuilderFactory = new SetOnce<>(); private final SetOnce fileRolesStore = new SetOnce<>(); private final SetOnce operatorPrivilegesService = new SetOnce<>(); private final SetOnce reservedRoleMappingAction = new SetOnce<>(); @@ -839,6 +841,9 @@ Collection createComponents( if (bulkUpdateApiKeyRequestTranslator.get() == null) { bulkUpdateApiKeyRequestTranslator.set(new BulkUpdateApiKeyRequestTranslator.Default()); } + if (hasPrivilegesRequestBuilderFactory.get() == null) { + hasPrivilegesRequestBuilderFactory.trySet(new HasPrivilegesRequestBuilderFactory.Default()); + } final Map, ActionListener>>> customRoleProviders = new LinkedHashMap<>(); for (SecurityExtension extension : securityExtensions) { @@ -1449,7 +1454,7 @@ public List getRestHandlers( new RestDeleteRoleAction(settings, getLicenseState()), new RestChangePasswordAction(settings, securityContext.get(), getLicenseState()), new RestSetEnabledAction(settings, getLicenseState()), - new RestHasPrivilegesAction(settings, securityContext.get(), getLicenseState()), + new RestHasPrivilegesAction(settings, securityContext.get(), getLicenseState(), hasPrivilegesRequestBuilderFactory.get()), new RestGetUserPrivilegesAction(settings, securityContext.get(), getLicenseState()), new RestGetRoleMappingsAction(settings, getLicenseState()), new RestPutRoleMappingAction(settings, getLicenseState()), @@ -2056,6 +2061,7 @@ public void loadExtensions(ExtensionLoader loader) { loadSingletonExtensionAndSetOnce(loader, updateApiKeyRequestTranslator, UpdateApiKeyRequestTranslator.class); loadSingletonExtensionAndSetOnce(loader, bulkUpdateApiKeyRequestTranslator, BulkUpdateApiKeyRequestTranslator.class); loadSingletonExtensionAndSetOnce(loader, createApiKeyRequestBuilderFactory, CreateApiKeyRequestBuilderFactory.class); + loadSingletonExtensionAndSetOnce(loader, hasPrivilegesRequestBuilderFactory, HasPrivilegesRequestBuilderFactory.class); } private void loadSingletonExtensionAndSetOnce(ExtensionLoader loader, SetOnce setOnce, Class clazz) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestHasPrivilegesAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestHasPrivilegesAction.java index d3a969fce8841..5c9d68d3c8b66 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestHasPrivilegesAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestHasPrivilegesAction.java @@ -24,6 +24,7 @@ import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.security.SecurityContext; import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesRequestBuilder; +import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesRequestBuilderFactory; import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.user.User; @@ -43,10 +44,17 @@ public class RestHasPrivilegesAction extends SecurityBaseRestHandler { private final SecurityContext securityContext; + private final HasPrivilegesRequestBuilderFactory builderFactory; - public RestHasPrivilegesAction(Settings settings, SecurityContext securityContext, XPackLicenseState licenseState) { + public RestHasPrivilegesAction( + Settings settings, + SecurityContext securityContext, + XPackLicenseState licenseState, + HasPrivilegesRequestBuilderFactory builderFactory + ) { super(settings, licenseState); this.securityContext = securityContext; + this.builderFactory = builderFactory; } @Override @@ -83,7 +91,8 @@ public RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient c if (username == null) { return restChannel -> { throw new ElasticsearchSecurityException("there is no authenticated user"); }; } - HasPrivilegesRequestBuilder requestBuilder = new HasPrivilegesRequestBuilder(client).source(username, content.v2(), content.v1()); + HasPrivilegesRequestBuilder requestBuilder = builderFactory.create(client, request.hasParam(RestRequest.PATH_RESTRICTED)) + .source(username, content.v2(), content.v1()); return channel -> requestBuilder.execute(new RestBuilderListener<>(channel) { @Override public RestResponse buildResponse(HasPrivilegesResponse response, XContentBuilder builder) throws Exception { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/user/RestHasPrivilegesActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/user/RestHasPrivilegesActionTests.java index 56eeb3405875c..02b7b88c29d0d 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/user/RestHasPrivilegesActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/user/RestHasPrivilegesActionTests.java @@ -23,6 +23,7 @@ import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.security.SecurityContext; +import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesRequestBuilderFactory; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -41,7 +42,12 @@ public class RestHasPrivilegesActionTests extends ESTestCase { */ public void testBodyConsumed() throws Exception { final XPackLicenseState licenseState = mock(XPackLicenseState.class); - final RestHasPrivilegesAction action = new RestHasPrivilegesAction(Settings.EMPTY, mock(SecurityContext.class), licenseState); + final RestHasPrivilegesAction action = new RestHasPrivilegesAction( + Settings.EMPTY, + mock(SecurityContext.class), + licenseState, + new HasPrivilegesRequestBuilderFactory.Default() + ); try (XContentBuilder bodyBuilder = JsonXContent.contentBuilder().startObject().endObject(); var threadPool = createThreadPool()) { final var client = new NoOpNodeClient(threadPool); final RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withPath("/_security/user/_has_privileges/") @@ -63,7 +69,8 @@ public void testSecurityDisabled() throws Exception { final RestHasPrivilegesAction action = new RestHasPrivilegesAction( securityDisabledSettings, mock(SecurityContext.class), - licenseState + licenseState, + new HasPrivilegesRequestBuilderFactory.Default() ); try (XContentBuilder bodyBuilder = JsonXContent.contentBuilder().startObject().endObject(); var threadPool = createThreadPool()) { final var client = new NoOpNodeClient(threadPool); From 2f0f917b38805adf54154ea09d57def793a8be33 Mon Sep 17 00:00:00 2001 From: Luigi Dell'Aquila Date: Fri, 8 Mar 2024 17:38:33 +0100 Subject: [PATCH 080/248] ES|QL: Disable optimizations that rely on Expression.nullable() (#105691) --- docs/changelog/105691.yaml | 5 + .../src/main/resources/conditional.csv-spec | 41 ++ .../src/main/resources/eval.csv-spec | 6 +- .../esql/optimizer/LogicalPlanOptimizer.java | 29 +- .../xpack/esql/optimizer/FoldNull.java | 3 +- .../LocalLogicalPlanOptimizerTests.java | 77 ++++ .../optimizer/LogicalPlanOptimizerTests.java | 368 +++++++++++++++++- .../esql/optimizer/PropagateNullable.java | 18 + .../xpack/ql/optimizer/OptimizerRules.java | 28 +- 9 files changed, 543 insertions(+), 32 deletions(-) create mode 100644 docs/changelog/105691.yaml create mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PropagateNullable.java diff --git a/docs/changelog/105691.yaml b/docs/changelog/105691.yaml new file mode 100644 index 0000000000000..89797782b06ee --- /dev/null +++ b/docs/changelog/105691.yaml @@ -0,0 +1,5 @@ +pr: 105691 +summary: "ES|QL: Disable optimizations that rely on Expression.nullable()" +area: ES|QL +type: bug +issues: [] diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/conditional.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/conditional.csv-spec index 177e169387642..64c5a7358ce22 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/conditional.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/conditional.csv-spec @@ -127,3 +127,44 @@ error_rate:double | hour:date 0.6 |2023-10-23T13:00:00.000Z // end::docsCaseHourlyErrorRate-result[] ; + + +nullOnMultivaluesMathOperation#[skip:-8.13.99,reason:fixed in 8.14+] +ROW a = 5, b = [ 1, 2 ]| EVAL sum = a + b| LIMIT 1 | WHERE sum IS NULL; +warning:Line 1:37: evaluation of [a + b] failed, treating result as null. Only first 20 failures recorded. +warning:Line 1:37: java.lang.IllegalArgumentException: single-value function encountered multi-value + +a:integer | b:integer | sum:integer +5 | [1, 2] | null +; + + +notNullOnMultivaluesMathOperation#[skip:-8.13.99,reason:fixed in 8.14+] +ROW a = 5, b = [ 1, 2 ]| EVAL sum = a + b| LIMIT 1 | WHERE sum IS NOT NULL; +warning:Line 1:37: evaluation of [a + b] failed, treating result as null. Only first 20 failures recorded. +warning:Line 1:37: java.lang.IllegalArgumentException: single-value function encountered multi-value + +a:integer | b:integer | sum:integer +; + + +nullOnMultivaluesComparisonOperation#[skip:-8.13.99,reason:fixed in 8.14+] +ROW a = 5, b = [ 1, 2 ]| EVAL same = a == b| LIMIT 1 | WHERE same IS NULL; + +a:integer | b:integer | same:boolean +5 | [1, 2] | null +; + + +notNullOnMultivaluesComparisonOperation#[skip:-8.13.99,reason:fixed in 8.14+] +ROW a = 5, b = [ 1, 2 ]| EVAL same = a == b| LIMIT 1 | WHERE same IS NOT NULL; + +a:integer | b:integer | same:boolean +; + + +notNullOnMultivaluesComparisonOperationWithPartialMatch#[skip:-8.13.99,reason:fixed in 8.14+] +ROW a = 5, b = [ 5, 2 ]| EVAL same = a == b| LIMIT 1 | WHERE same IS NOT NULL; + +a:integer | b:integer | same:boolean +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/eval.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/eval.csv-spec index a8e5a5930a06b..7d18d2616e376 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/eval.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/eval.csv-spec @@ -200,14 +200,14 @@ Chirstian. |Chirstian.Koblick|Chirstian.KoblickChirstian.|Chirstian Kyoichi. |Kyoichi.Maliniak |Kyoichi.MaliniakKyoichi. |Kyoichi ; -roundArrays#[skip:-8.11.99, reason:Lucene multivalue warning introduced in 8.12 only] +roundArrays#[skip:-8.13.99, reason:Alert order changed in 8.14] row a = [1.2], b = [2.4, 7.9] | eval c = round(a), d = round(b), e = round([1.2]), f = round([1.2, 4.6]), g = round([1.14], 1), h = round([1.14], [1, 2]); +warning:Line 1:56: evaluation of [round(b)] failed, treating result as null. Only first 20 failures recorded. +warning:Line 1:56: java.lang.IllegalArgumentException: single-value function encountered multi-value warning:Line 1:88: evaluation of [round([1.2, 4.6])] failed, treating result as null. Only first 20 failures recorded. warning:Line 1:88: java.lang.IllegalArgumentException: single-value function encountered multi-value warning:Line 1:133: evaluation of [round([1.14], [1, 2])] failed, treating result as null. Only first 20 failures recorded. warning:Line 1:133: java.lang.IllegalArgumentException: single-value function encountered multi-value -warning:Line 1:56: evaluation of [round(b)] failed, treating result as null. Only first 20 failures recorded. -warning:Line 1:56: java.lang.IllegalArgumentException: single-value function encountered multi-value a:double | b:double | c:double | d: double | e:double | f:double | g:double | h:double 1.2 | [2.4, 7.9] | 1.0 | null | 1.0 | null | 1.1 | null diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java index db5751245c40a..7a5e39fea8f95 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java @@ -17,6 +17,7 @@ import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.Equals; import org.elasticsearch.xpack.esql.expression.SurrogateExpression; import org.elasticsearch.xpack.esql.expression.function.aggregate.Count; +import org.elasticsearch.xpack.esql.expression.function.scalar.nulls.Coalesce; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.In; import org.elasticsearch.xpack.esql.plan.logical.Enrich; import org.elasticsearch.xpack.esql.plan.logical.Eval; @@ -44,7 +45,6 @@ import org.elasticsearch.xpack.ql.expression.predicate.logical.Or; import org.elasticsearch.xpack.ql.expression.predicate.regex.RegexMatch; import org.elasticsearch.xpack.ql.optimizer.OptimizerRules; -import org.elasticsearch.xpack.ql.optimizer.OptimizerRules.BinaryComparisonSimplification; import org.elasticsearch.xpack.ql.optimizer.OptimizerRules.BooleanFunctionEqualsElimination; import org.elasticsearch.xpack.ql.optimizer.OptimizerRules.ConstantFolding; import org.elasticsearch.xpack.ql.optimizer.OptimizerRules.LiteralsOnTheRight; @@ -82,9 +82,7 @@ import static java.util.Collections.singleton; import static org.elasticsearch.xpack.esql.expression.NamedExpressions.mergeOutputExpressions; import static org.elasticsearch.xpack.ql.expression.Expressions.asAttributes; -import static org.elasticsearch.xpack.ql.optimizer.OptimizerRules.FoldNull; import static org.elasticsearch.xpack.ql.optimizer.OptimizerRules.PropagateEquals; -import static org.elasticsearch.xpack.ql.optimizer.OptimizerRules.PropagateNullable; import static org.elasticsearch.xpack.ql.optimizer.OptimizerRules.TransformDirection; public class LogicalPlanOptimizer extends ParameterizedRuleExecutor { @@ -120,12 +118,11 @@ protected static Batch operators() { new ConvertStringToByteRef(), new FoldNull(), new SplitInWithFoldableValue(), - new ConstantFolding(), new PropagateEvalFoldables(), + new ConstantFolding(), // boolean new BooleanSimplification(), new LiteralsOnTheRight(), - new BinaryComparisonSimplification(), // needs to occur before BinaryComparison combinations (see class) new PropagateEquals(), new PropagateNullable(), @@ -1587,4 +1584,26 @@ private static LogicalPlan normalize(Aggregate aggregate, AttributeMap newChildren = new ArrayList<>(exp.children()); + newChildren.removeIf(e -> e.semanticEquals(nullExp)); + if (newChildren.size() != exp.children().size() && newChildren.size() > 0) { // coalesce needs at least one input + return exp.replaceChildren(newChildren); + } + } + return Literal.of(exp, null); + } + } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/FoldNull.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/FoldNull.java index 40b03be668606..5fa3dae744251 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/FoldNull.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/FoldNull.java @@ -8,9 +8,8 @@ package org.elasticsearch.xpack.esql.optimizer; import org.elasticsearch.xpack.ql.expression.Expression; -import org.elasticsearch.xpack.ql.optimizer.OptimizerRules; -public class FoldNull extends OptimizerRules.FoldNull { +public class FoldNull extends LogicalPlanOptimizer.FoldNull { @Override public Expression rule(Expression e) { return super.rule(e); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizerTests.java index 1b2210dbd5f4e..6370b0198ae88 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizerTests.java @@ -14,18 +14,22 @@ import org.elasticsearch.xpack.esql.analysis.AnalyzerContext; import org.elasticsearch.xpack.esql.expression.function.EsqlFunctionRegistry; import org.elasticsearch.xpack.esql.expression.function.scalar.nulls.Coalesce; +import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Add; import org.elasticsearch.xpack.esql.parser.EsqlParser; import org.elasticsearch.xpack.esql.plan.logical.Eval; import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelation; import org.elasticsearch.xpack.esql.plan.logical.local.LocalSupplier; import org.elasticsearch.xpack.esql.stats.SearchStats; import org.elasticsearch.xpack.ql.expression.Alias; +import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.Expressions; import org.elasticsearch.xpack.ql.expression.Literal; import org.elasticsearch.xpack.ql.expression.ReferenceAttribute; +import org.elasticsearch.xpack.ql.expression.predicate.logical.And; import org.elasticsearch.xpack.ql.expression.predicate.nulls.IsNotNull; import org.elasticsearch.xpack.ql.index.EsIndex; import org.elasticsearch.xpack.ql.index.IndexResolution; +import org.elasticsearch.xpack.ql.optimizer.OptimizerRulesTests; import org.elasticsearch.xpack.ql.plan.logical.EsRelation; import org.elasticsearch.xpack.ql.plan.logical.Filter; import org.elasticsearch.xpack.ql.plan.logical.Limit; @@ -50,6 +54,10 @@ import static org.elasticsearch.xpack.esql.EsqlTestUtils.statsForExistingField; import static org.elasticsearch.xpack.esql.EsqlTestUtils.statsForMissingField; import static org.elasticsearch.xpack.esql.EsqlTestUtils.withDefaultLimitWarning; +import static org.elasticsearch.xpack.esql.optimizer.LogicalPlanOptimizerTests.getFieldAttribute; +import static org.elasticsearch.xpack.esql.optimizer.LogicalPlanOptimizerTests.greaterThanOf; +import static org.elasticsearch.xpack.ql.TestUtils.relation; +import static org.elasticsearch.xpack.ql.tree.Source.EMPTY; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; @@ -64,6 +72,8 @@ public class LocalLogicalPlanOptimizerTests extends ESTestCase { private static Map mapping; private static final Literal ONE = L(1); + private static final Literal TWO = L(2); + private static final Literal THREE = L(3); @BeforeClass public static void init() { @@ -348,6 +358,73 @@ public void testSparseDocument() throws Exception { assertThat(Alias.unwrap(field).fold(), Matchers.nullValue()); } + // InferIsNotNull + + public void testIsNotNullOnIsNullField() { + EsRelation relation = relation(); + var fieldA = getFieldAttribute("a"); + Expression inn = isNotNull(fieldA); + Filter f = new Filter(EMPTY, relation, inn); + + assertEquals(f, new LocalLogicalPlanOptimizer.InferIsNotNull().apply(f)); + } + + public void testIsNotNullOnOperatorWithOneField() { + EsRelation relation = relation(); + var fieldA = getFieldAttribute("a"); + Expression inn = isNotNull(new Add(EMPTY, fieldA, ONE)); + Filter f = new Filter(EMPTY, relation, inn); + Filter expected = new Filter(EMPTY, relation, new And(EMPTY, isNotNull(fieldA), inn)); + + assertEquals(expected, new LocalLogicalPlanOptimizer.InferIsNotNull().apply(f)); + } + + public void testIsNotNullOnOperatorWithTwoFields() { + EsRelation relation = relation(); + var fieldA = getFieldAttribute("a"); + var fieldB = getFieldAttribute("b"); + Expression inn = isNotNull(new Add(EMPTY, fieldA, fieldB)); + Filter f = new Filter(EMPTY, relation, inn); + Filter expected = new Filter(EMPTY, relation, new And(EMPTY, new And(EMPTY, isNotNull(fieldA), isNotNull(fieldB)), inn)); + + assertEquals(expected, new LocalLogicalPlanOptimizer.InferIsNotNull().apply(f)); + } + + public void testIsNotNullOnFunctionWithOneField() { + EsRelation relation = relation(); + var fieldA = getFieldAttribute("a"); + var pattern = L("abc"); + Expression inn = isNotNull( + new And( + EMPTY, + new OptimizerRulesTests.TestStartsWith(EMPTY, fieldA, pattern, false), + greaterThanOf(new Add(EMPTY, ONE, TWO), THREE) + ) + ); + + Filter f = new Filter(EMPTY, relation, inn); + Filter expected = new Filter(EMPTY, relation, new And(EMPTY, isNotNull(fieldA), inn)); + + assertEquals(expected, new LocalLogicalPlanOptimizer.InferIsNotNull().apply(f)); + } + + public void testIsNotNullOnFunctionWithTwoFields() { + EsRelation relation = relation(); + var fieldA = getFieldAttribute("a"); + var fieldB = getFieldAttribute("b"); + var pattern = L("abc"); + Expression inn = isNotNull(new OptimizerRulesTests.TestStartsWith(EMPTY, fieldA, fieldB, false)); + + Filter f = new Filter(EMPTY, relation, inn); + Filter expected = new Filter(EMPTY, relation, new And(EMPTY, new And(EMPTY, isNotNull(fieldA), isNotNull(fieldB)), inn)); + + assertEquals(expected, new LocalLogicalPlanOptimizer.InferIsNotNull().apply(f)); + } + + private IsNotNull isNotNull(Expression field) { + return new IsNotNull(EMPTY, field); + } + private LocalRelation asEmptyRelation(Object o) { var empty = as(o, LocalRelation.class); assertThat(empty.supplier(), is(LocalSupplier.EMPTY)); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java index 943d60a3882b7..adcb1f611a343 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java @@ -22,22 +22,43 @@ import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThan; import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThanOrEqual; import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThan; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThanOrEqual; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.NotEquals; import org.elasticsearch.xpack.esql.evaluator.predicate.operator.regex.RLike; import org.elasticsearch.xpack.esql.evaluator.predicate.operator.regex.WildcardLike; import org.elasticsearch.xpack.esql.expression.Order; import org.elasticsearch.xpack.esql.expression.function.EsqlFunctionRegistry; +import org.elasticsearch.xpack.esql.expression.function.aggregate.Avg; import org.elasticsearch.xpack.esql.expression.function.aggregate.Count; +import org.elasticsearch.xpack.esql.expression.function.aggregate.CountDistinct; import org.elasticsearch.xpack.esql.expression.function.aggregate.Max; +import org.elasticsearch.xpack.esql.expression.function.aggregate.Median; +import org.elasticsearch.xpack.esql.expression.function.aggregate.MedianAbsoluteDeviation; import org.elasticsearch.xpack.esql.expression.function.aggregate.Min; import org.elasticsearch.xpack.esql.expression.function.aggregate.Percentile; +import org.elasticsearch.xpack.esql.expression.function.aggregate.SpatialCentroid; import org.elasticsearch.xpack.esql.expression.function.aggregate.Sum; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToString; +import org.elasticsearch.xpack.esql.expression.function.scalar.date.DateExtract; import org.elasticsearch.xpack.esql.expression.function.scalar.date.DateFormat; import org.elasticsearch.xpack.esql.expression.function.scalar.date.DateParse; import org.elasticsearch.xpack.esql.expression.function.scalar.date.DateTrunc; +import org.elasticsearch.xpack.esql.expression.function.scalar.ip.CIDRMatch; +import org.elasticsearch.xpack.esql.expression.function.scalar.math.Cos; import org.elasticsearch.xpack.esql.expression.function.scalar.math.Pow; import org.elasticsearch.xpack.esql.expression.function.scalar.math.Round; +import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.AbstractMultivalueFunction; +import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvAvg; +import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvCount; +import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvDedupe; +import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvFirst; +import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvLast; +import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvMax; +import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvMedian; +import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvMin; +import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvSum; import org.elasticsearch.xpack.esql.expression.function.scalar.string.Concat; +import org.elasticsearch.xpack.esql.expression.function.scalar.string.LTrim; import org.elasticsearch.xpack.esql.expression.function.scalar.string.Substring; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Add; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Div; @@ -68,9 +89,11 @@ import org.elasticsearch.xpack.ql.expression.Nullability; import org.elasticsearch.xpack.ql.expression.ReferenceAttribute; import org.elasticsearch.xpack.ql.expression.function.aggregate.AggregateFunction; +import org.elasticsearch.xpack.ql.expression.predicate.Predicates; import org.elasticsearch.xpack.ql.expression.predicate.logical.And; import org.elasticsearch.xpack.ql.expression.predicate.logical.Or; import org.elasticsearch.xpack.ql.expression.predicate.nulls.IsNotNull; +import org.elasticsearch.xpack.ql.expression.predicate.nulls.IsNull; import org.elasticsearch.xpack.ql.expression.predicate.regex.RLikePattern; import org.elasticsearch.xpack.ql.expression.predicate.regex.WildcardPattern; import org.elasticsearch.xpack.ql.index.EsIndex; @@ -88,6 +111,7 @@ import org.elasticsearch.xpack.ql.type.EsField; import org.junit.BeforeClass; +import java.lang.reflect.Constructor; import java.util.List; import java.util.Map; import java.util.Set; @@ -100,15 +124,28 @@ import static org.elasticsearch.xpack.esql.EsqlTestUtils.L; import static org.elasticsearch.xpack.esql.EsqlTestUtils.TEST_VERIFIER; import static org.elasticsearch.xpack.esql.EsqlTestUtils.as; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.configuration; import static org.elasticsearch.xpack.esql.EsqlTestUtils.emptySource; import static org.elasticsearch.xpack.esql.EsqlTestUtils.loadMapping; import static org.elasticsearch.xpack.esql.EsqlTestUtils.localSource; import static org.elasticsearch.xpack.esql.EsqlTestUtils.withDefaultLimitWarning; import static org.elasticsearch.xpack.esql.analysis.Analyzer.NO_FIELDS; import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.GEO_POINT; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.GEO_SHAPE; import static org.elasticsearch.xpack.ql.TestUtils.relation; +import static org.elasticsearch.xpack.ql.expression.Literal.FALSE; +import static org.elasticsearch.xpack.ql.expression.Literal.NULL; +import static org.elasticsearch.xpack.ql.expression.Literal.TRUE; import static org.elasticsearch.xpack.ql.tree.Source.EMPTY; +import static org.elasticsearch.xpack.ql.type.DataTypes.BOOLEAN; +import static org.elasticsearch.xpack.ql.type.DataTypes.DOUBLE; import static org.elasticsearch.xpack.ql.type.DataTypes.INTEGER; +import static org.elasticsearch.xpack.ql.type.DataTypes.IP; +import static org.elasticsearch.xpack.ql.type.DataTypes.KEYWORD; +import static org.elasticsearch.xpack.ql.type.DataTypes.LONG; +import static org.elasticsearch.xpack.ql.type.DataTypes.TEXT; +import static org.elasticsearch.xpack.ql.type.DataTypes.UNSIGNED_LONG; +import static org.elasticsearch.xpack.ql.type.DataTypes.VERSION; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; @@ -829,17 +866,6 @@ public void testDoNotEliminateHigherLimitDueToDescendantLimit() throws Exception as(filter.child(), Limit.class); } - public void testBasicNullFolding() { - FoldNull rule = new FoldNull(); - assertNullLiteral(rule.rule(new Add(EMPTY, L(randomInt()), Literal.NULL))); - assertNullLiteral(rule.rule(new Round(EMPTY, Literal.NULL, null))); - assertNullLiteral(rule.rule(new Pow(EMPTY, Literal.NULL, Literal.NULL))); - assertNullLiteral(rule.rule(new DateFormat(EMPTY, Literal.NULL, Literal.NULL, null))); - assertNullLiteral(rule.rule(new DateParse(EMPTY, Literal.NULL, Literal.NULL))); - assertNullLiteral(rule.rule(new DateTrunc(EMPTY, Literal.NULL, Literal.NULL))); - assertNullLiteral(rule.rule(new Substring(EMPTY, Literal.NULL, Literal.NULL, Literal.NULL))); - } - public void testPruneSortBeforeStats() { LogicalPlan plan = optimizedPlan(""" from test @@ -3269,6 +3295,23 @@ public void testPlanSanityCheck() throws Exception { assertThat(e.getMessage(), containsString(" optimized incorrectly due to missing references [salary")); } + // https://github.com/elastic/elasticsearch/issues/104995 + public void testNoWrongIsNotNullPruning() { + var plan = optimizedPlan(""" + ROW a = 5, b = [ 1, 2 ] + | EVAL sum = a + b + | LIMIT 1 + | WHERE sum IS NOT NULL + """); + + var local = as(plan, LocalRelation.class); + assertThat(local.supplier(), equalTo(LocalSupplier.EMPTY)); + assertWarnings( + "Line 2:16: evaluation of [a + b] failed, treating result as null. Only first 20 failures recorded.", + "Line 2:16: java.lang.IllegalArgumentException: single-value function encountered multi-value" + ); + } + /** * Pushing down EVAL/GROK/DISSECT/ENRICH must not accidentally shadow attributes required by SORT. * @@ -3388,7 +3431,7 @@ private void assertNullLiteral(Expression expression) { } // TODO: move these from org.elasticsearch.xpack.ql.optimizer.OptimizerRulesTests to org.elasticsearch.xpack.ql.TestUtils - private static FieldAttribute getFieldAttribute(String name) { + public static FieldAttribute getFieldAttribute(String name) { return getFieldAttribute(name, INTEGER); } @@ -3408,4 +3451,305 @@ public static RLike rlike(Expression left, String exp) { protected List filteredWarnings() { return withDefaultLimitWarning(super.filteredWarnings()); } + + // Null folding + + public void testBasicNullFolding() { + FoldNull rule = new FoldNull(); + assertNullLiteral(rule.rule(new Add(EMPTY, L(randomInt()), Literal.NULL))); + assertNullLiteral(rule.rule(new Round(EMPTY, Literal.NULL, null))); + assertNullLiteral(rule.rule(new Pow(EMPTY, Literal.NULL, Literal.NULL))); + assertNullLiteral(rule.rule(new DateFormat(EMPTY, Literal.NULL, Literal.NULL, null))); + assertNullLiteral(rule.rule(new DateParse(EMPTY, Literal.NULL, Literal.NULL))); + assertNullLiteral(rule.rule(new DateTrunc(EMPTY, Literal.NULL, Literal.NULL))); + assertNullLiteral(rule.rule(new Substring(EMPTY, Literal.NULL, Literal.NULL, Literal.NULL))); + } + + public void testNullFoldingIsNull() { + FoldNull foldNull = new FoldNull(); + assertEquals(true, foldNull.rule(new IsNull(EMPTY, NULL)).fold()); + assertEquals(false, foldNull.rule(new IsNull(EMPTY, TRUE)).fold()); + } + + public void testNullFoldingIsNotNull() { + FoldNull foldNull = new FoldNull(); + assertEquals(true, foldNull.rule(new IsNotNull(EMPTY, TRUE)).fold()); + assertEquals(false, foldNull.rule(new IsNotNull(EMPTY, NULL)).fold()); + } + + public void testGenericNullableExpression() { + FoldNull rule = new FoldNull(); + // arithmetic + assertNullLiteral(rule.rule(new Add(EMPTY, getFieldAttribute("a"), NULL))); + // comparison + assertNullLiteral(rule.rule(greaterThanOf(getFieldAttribute("a"), NULL))); + // regex + assertNullLiteral(rule.rule(new RLike(EMPTY, NULL, new RLikePattern("123")))); + // date functions + assertNullLiteral(rule.rule(new DateExtract(EMPTY, NULL, NULL, configuration("")))); + // math functions + assertNullLiteral(rule.rule(new Cos(EMPTY, NULL))); + // string functions + assertNullLiteral(rule.rule(new LTrim(EMPTY, NULL))); + // spatial + assertNullLiteral(rule.rule(new SpatialCentroid(EMPTY, NULL))); + // ip + assertNullLiteral(rule.rule(new CIDRMatch(EMPTY, NULL, List.of(NULL)))); + // conversion + assertNullLiteral(rule.rule(new ToString(EMPTY, NULL))); + } + + public void testNullFoldingDoesNotApplyOnLogicalExpressions() { + FoldNull rule = new FoldNull(); + + Or or = new Or(EMPTY, NULL, TRUE); + assertEquals(or, rule.rule(or)); + or = new Or(EMPTY, NULL, NULL); + assertEquals(or, rule.rule(or)); + + And and = new And(EMPTY, NULL, TRUE); + assertEquals(and, rule.rule(and)); + and = new And(EMPTY, NULL, NULL); + assertEquals(and, rule.rule(and)); + } + + @SuppressWarnings("unchecked") + public void testNullFoldingDoesNotApplyOnAbstractMultivalueFunction() throws Exception { + FoldNull rule = new FoldNull(); + + List> items = List.of( + MvDedupe.class, + MvFirst.class, + MvLast.class, + MvMax.class, + MvMedian.class, + MvMin.class, + MvSum.class + ); + for (Class clazz : items) { + Constructor ctor = clazz.getConstructor(Source.class, Expression.class); + AbstractMultivalueFunction conditionalFunction = ctor.newInstance(EMPTY, getFieldAttribute("a")); + assertEquals(conditionalFunction, rule.rule(conditionalFunction)); + + conditionalFunction = ctor.newInstance(EMPTY, NULL); + assertEquals(NULL, rule.rule(conditionalFunction)); + } + + // avg and count ar different just because they know the return type in advance (all the others infer the type from the input) + MvAvg avg = new MvAvg(EMPTY, getFieldAttribute("a")); + assertEquals(avg, rule.rule(avg)); + avg = new MvAvg(EMPTY, NULL); + assertEquals(new Literal(EMPTY, null, DOUBLE), rule.rule(avg)); + + MvCount count = new MvCount(EMPTY, getFieldAttribute("a")); + assertEquals(count, rule.rule(count)); + count = new MvCount(EMPTY, NULL); + assertEquals(new Literal(EMPTY, null, INTEGER), rule.rule(count)); + } + + @SuppressWarnings("unchecked") + public void testNullFoldingDoesNotApplyOnAggregate() throws Exception { + FoldNull rule = new FoldNull(); + + List> items = List.of(Max.class, Min.class); + for (Class clazz : items) { + Constructor ctor = clazz.getConstructor(Source.class, Expression.class); + AggregateFunction conditionalFunction = ctor.newInstance(EMPTY, getFieldAttribute("a")); + assertEquals(conditionalFunction, rule.rule(conditionalFunction)); + + conditionalFunction = ctor.newInstance(EMPTY, NULL); + assertEquals(NULL, rule.rule(conditionalFunction)); + } + + Avg avg = new Avg(EMPTY, getFieldAttribute("a")); + assertEquals(avg, rule.rule(avg)); + avg = new Avg(EMPTY, NULL); + assertEquals(new Literal(EMPTY, null, DOUBLE), rule.rule(avg)); + + Count count = new Count(EMPTY, getFieldAttribute("a")); + assertEquals(count, rule.rule(count)); + count = new Count(EMPTY, NULL); + assertEquals(count, rule.rule(count)); + + CountDistinct countd = new CountDistinct(EMPTY, getFieldAttribute("a"), getFieldAttribute("a")); + assertEquals(countd, rule.rule(countd)); + countd = new CountDistinct(EMPTY, NULL, NULL); + assertEquals(new Literal(EMPTY, null, LONG), rule.rule(countd)); + + Median median = new Median(EMPTY, getFieldAttribute("a")); + assertEquals(median, rule.rule(median)); + median = new Median(EMPTY, NULL); + assertEquals(new Literal(EMPTY, null, DOUBLE), rule.rule(median)); + + MedianAbsoluteDeviation medianad = new MedianAbsoluteDeviation(EMPTY, getFieldAttribute("a")); + assertEquals(medianad, rule.rule(medianad)); + medianad = new MedianAbsoluteDeviation(EMPTY, NULL); + assertEquals(new Literal(EMPTY, null, DOUBLE), rule.rule(medianad)); + + Percentile percentile = new Percentile(EMPTY, getFieldAttribute("a"), getFieldAttribute("a")); + assertEquals(percentile, rule.rule(percentile)); + percentile = new Percentile(EMPTY, NULL, NULL); + assertEquals(new Literal(EMPTY, null, DOUBLE), rule.rule(percentile)); + + Sum sum = new Sum(EMPTY, getFieldAttribute("a")); + assertEquals(sum, rule.rule(sum)); + sum = new Sum(EMPTY, NULL); + assertEquals(new Literal(EMPTY, null, DOUBLE), rule.rule(sum)); + + } + + public void testNullFoldableDoesNotApplyToIsNullAndNotNull() { + FoldNull rule = new FoldNull(); + + DataType numericType = randomFrom(INTEGER, LONG, DOUBLE); + DataType genericType = randomFrom(INTEGER, LONG, DOUBLE, UNSIGNED_LONG, KEYWORD, TEXT, GEO_POINT, GEO_SHAPE, VERSION, IP); + List items = List.of( + new Add(EMPTY, getFieldAttribute("a", numericType), getFieldAttribute("b", numericType)), + new Add(EMPTY, new Literal(EMPTY, 1, INTEGER), new Literal(EMPTY, List.of(1, 2, 3), INTEGER)), + new Sub(EMPTY, getFieldAttribute("a", numericType), getFieldAttribute("b", numericType)), + new Sub(EMPTY, new Literal(EMPTY, 1, INTEGER), new Literal(EMPTY, List.of(1, 2, 3), INTEGER)), + new Mul(EMPTY, getFieldAttribute("a", numericType), getFieldAttribute("b", numericType)), + new Mul(EMPTY, new Literal(EMPTY, 1, INTEGER), new Literal(EMPTY, List.of(1, 2, 3), INTEGER)), + new Div(EMPTY, getFieldAttribute("a", numericType), getFieldAttribute("b", numericType)), + new Div(EMPTY, new Literal(EMPTY, 1, INTEGER), new Literal(EMPTY, List.of(1, 2, 3), INTEGER)), + + new GreaterThan(EMPTY, getFieldAttribute("a", numericType), getFieldAttribute("b", numericType), randomZone()), + new GreaterThan(EMPTY, new Literal(EMPTY, 1, INTEGER), new Literal(EMPTY, List.of(1, 2, 3), INTEGER), randomZone()), + new GreaterThanOrEqual(EMPTY, getFieldAttribute("a", numericType), getFieldAttribute("b", numericType), randomZone()), + new GreaterThanOrEqual(EMPTY, new Literal(EMPTY, 1, INTEGER), new Literal(EMPTY, List.of(1, 2, 3), INTEGER), randomZone()), + new LessThan(EMPTY, getFieldAttribute("a", numericType), getFieldAttribute("b", numericType), randomZone()), + new LessThan(EMPTY, new Literal(EMPTY, 1, INTEGER), new Literal(EMPTY, List.of(1, 2, 3), INTEGER), randomZone()), + new LessThanOrEqual(EMPTY, getFieldAttribute("a", numericType), getFieldAttribute("b", numericType), randomZone()), + new LessThanOrEqual(EMPTY, new Literal(EMPTY, 1, INTEGER), new Literal(EMPTY, List.of(1, 2, 3), INTEGER), randomZone()), + new NotEquals(EMPTY, getFieldAttribute("a", numericType), getFieldAttribute("b", numericType), randomZone()), + new NotEquals(EMPTY, new Literal(EMPTY, 1, INTEGER), new Literal(EMPTY, List.of(1, 2, 3), INTEGER), randomZone()), + + new Equals(EMPTY, getFieldAttribute("a", genericType), getFieldAttribute("b", genericType)), + new Equals(EMPTY, new Literal(EMPTY, 1, INTEGER), new Literal(EMPTY, List.of(1, 2, 3), INTEGER)) + ); + for (Expression item : items) { + Expression isNull = new IsNull(EMPTY, item); + Expression transformed = rule.rule(isNull); + assertEquals(isNull, transformed); + + IsNotNull isNotNull = new IsNotNull(EMPTY, item); + transformed = rule.rule(isNotNull); + assertEquals(isNotNull, transformed); + } + + } + + // + // Propagate nullability (IS NULL / IS NOT NULL) + // + + // a IS NULL AND a IS NOT NULL => false + public void testIsNullAndNotNull() throws Exception { + FieldAttribute fa = getFieldAttribute("a"); + + And and = new And(EMPTY, new IsNull(EMPTY, fa), new IsNotNull(EMPTY, fa)); + assertEquals(FALSE, new PropagateNullable().rule(and)); + } + + // a IS NULL AND b IS NOT NULL AND c IS NULL AND d IS NOT NULL AND e IS NULL AND a IS NOT NULL => false + public void testIsNullAndNotNullMultiField() throws Exception { + FieldAttribute fa = getFieldAttribute("a"); + + And andOne = new And(EMPTY, new IsNull(EMPTY, fa), new IsNotNull(EMPTY, getFieldAttribute("b"))); + And andTwo = new And(EMPTY, new IsNull(EMPTY, getFieldAttribute("c")), new IsNotNull(EMPTY, getFieldAttribute("d"))); + And andThree = new And(EMPTY, new IsNull(EMPTY, getFieldAttribute("e")), new IsNotNull(EMPTY, fa)); + + And and = new And(EMPTY, andOne, new And(EMPTY, andTwo, andThree)); + + assertEquals(FALSE, new PropagateNullable().rule(and)); + } + + // a IS NULL AND a > 1 => a IS NULL AND NULL + public void testIsNullAndComparison() { + FieldAttribute fa = getFieldAttribute("a"); + IsNull isNull = new IsNull(EMPTY, fa); + + And and = new And(EMPTY, isNull, greaterThanOf(fa, ONE)); + assertEquals(new And(EMPTY, isNull, nullOf(BOOLEAN)), new PropagateNullable().rule(and)); + } + + // a IS NULL AND b < 1 AND c < 1 AND a < 1 => a IS NULL AND b < 1 AND c < 1 AND NULL + public void testIsNullAndMultipleComparison() { + FieldAttribute fa = getFieldAttribute("a"); + IsNull aIsNull = new IsNull(EMPTY, fa); + + And bLT1_AND_cLT1 = new And(EMPTY, lessThanOf(getFieldAttribute("b"), ONE), lessThanOf(getFieldAttribute("c"), ONE)); + And aIsNull_AND_bLT1_AND_cLT1 = new And(EMPTY, aIsNull, bLT1_AND_cLT1); + And aIsNull_AND_bLT1_AND_cLT1_AND_aLT1 = new And(EMPTY, aIsNull_AND_bLT1_AND_cLT1, lessThanOf(fa, ONE)); + + Expression optimized = new PropagateNullable().rule(aIsNull_AND_bLT1_AND_cLT1_AND_aLT1); + Expression aIsNull_AND_bLT1_AND_cLT1_AND_NULL = new And(EMPTY, aIsNull_AND_bLT1_AND_cLT1, nullOf(BOOLEAN)); + assertEquals(Predicates.splitAnd(aIsNull_AND_bLT1_AND_cLT1_AND_NULL), Predicates.splitAnd(optimized)); + } + + public void testDoNotOptimizeIsNullAndMultipleComparisonWithConstants() { + Literal a = ONE; + Literal b = ONE; + IsNull aIsNull = new IsNull(EMPTY, a); + + And bLT1_AND_cLT1 = new And(EMPTY, lessThanOf(b, ONE), lessThanOf(getFieldAttribute("c"), ONE)); + And aIsNull_AND_bLT1_AND_cLT1 = new And(EMPTY, aIsNull, bLT1_AND_cLT1); + And aIsNull_AND_bLT1_AND_cLT1_AND_aLT1 = new And(EMPTY, aIsNull_AND_bLT1_AND_cLT1, lessThanOf(a, ONE)); + + Expression optimized = new PropagateNullable().rule(aIsNull_AND_bLT1_AND_cLT1_AND_aLT1); + Literal nullLiteral = new Literal(EMPTY, null, BOOLEAN); + assertEquals(asList(aIsNull, nullLiteral, nullLiteral, nullLiteral), Predicates.splitAnd(optimized)); + } + + // ((a+1)/2) > 1 AND a + 2 AND a IS NULL AND b < 3 => NULL AND NULL AND a IS NULL AND b < 3 + public void testIsNullAndDeeplyNestedExpression() throws Exception { + FieldAttribute fa = getFieldAttribute("a"); + IsNull isNull = new IsNull(EMPTY, fa); + + Expression nullified = new And( + EMPTY, + greaterThanOf(new Div(EMPTY, new Add(EMPTY, fa, ONE), TWO), ONE), + greaterThanOf(new Add(EMPTY, fa, TWO), ONE) + ); + Expression kept = new And(EMPTY, isNull, lessThanOf(getFieldAttribute("b"), THREE)); + And and = new And(EMPTY, nullified, kept); + + Expression optimized = new PropagateNullable().rule(and); + Expression expected = new And(EMPTY, new And(EMPTY, nullOf(BOOLEAN), nullOf(BOOLEAN)), kept); + + assertEquals(Predicates.splitAnd(expected), Predicates.splitAnd(optimized)); + } + + // a IS NULL OR a IS NOT NULL => no change + // a IS NULL OR a > 1 => no change + public void testIsNullInDisjunction() throws Exception { + FieldAttribute fa = getFieldAttribute("a"); + + Or or = new Or(EMPTY, new IsNull(EMPTY, fa), new IsNotNull(EMPTY, fa)); + Filter dummy = new Filter(EMPTY, relation(), or); + LogicalPlan transformed = new PropagateNullable().apply(dummy); + assertSame(dummy, transformed); + assertEquals(or, ((Filter) transformed).condition()); + + or = new Or(EMPTY, new IsNull(EMPTY, fa), greaterThanOf(fa, ONE)); + dummy = new Filter(EMPTY, relation(), or); + transformed = new PropagateNullable().apply(dummy); + assertSame(dummy, transformed); + assertEquals(or, ((Filter) transformed).condition()); + } + + // a + 1 AND (a IS NULL OR a > 3) => no change + public void testIsNullDisjunction() throws Exception { + FieldAttribute fa = getFieldAttribute("a"); + IsNull isNull = new IsNull(EMPTY, fa); + + Or or = new Or(EMPTY, isNull, greaterThanOf(fa, THREE)); + And and = new And(EMPTY, new Add(EMPTY, fa, ONE), or); + + assertEquals(and, new PropagateNullable().rule(and)); + } + + private Literal nullOf(DataType dataType) { + return new Literal(Source.EMPTY, null, dataType); + } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PropagateNullable.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PropagateNullable.java new file mode 100644 index 0000000000000..eee5d9b4c49dc --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PropagateNullable.java @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.optimizer; + +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.expression.predicate.logical.And; + +public class PropagateNullable extends LogicalPlanOptimizer.PropagateNullable { + @Override + public Expression rule(And and) { + return super.rule(and); + } +} diff --git a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/optimizer/OptimizerRules.java b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/optimizer/OptimizerRules.java index f084b5cda4abe..7625cbf3a56e5 100644 --- a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/optimizer/OptimizerRules.java +++ b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/optimizer/OptimizerRules.java @@ -1678,14 +1678,9 @@ public FoldNull() { @Override protected Expression rule(Expression e) { - if (e instanceof IsNotNull isnn) { - if (isnn.field().nullable() == Nullability.FALSE) { - return new Literal(e.source(), Boolean.TRUE, DataTypes.BOOLEAN); - } - } else if (e instanceof IsNull isn) { - if (isn.field().nullable() == Nullability.FALSE) { - return new Literal(e.source(), Boolean.FALSE, DataTypes.BOOLEAN); - } + Expression result = tryReplaceIsNullIsNotNull(e); + if (result != e) { + return result; } else if (e instanceof In in) { if (Expressions.isNull(in.value())) { return Literal.of(in, null); @@ -1697,6 +1692,19 @@ protected Expression rule(Expression e) { } return e; } + + protected Expression tryReplaceIsNullIsNotNull(Expression e) { + if (e instanceof IsNotNull isnn) { + if (isnn.field().nullable() == Nullability.FALSE) { + return new Literal(e.source(), Boolean.TRUE, DataTypes.BOOLEAN); + } + } else if (e instanceof IsNull isn) { + if (isn.field().nullable() == Nullability.FALSE) { + return new Literal(e.source(), Boolean.FALSE, DataTypes.BOOLEAN); + } + } + return e; + } } // a IS NULL AND a IS NOT NULL -> FALSE @@ -1851,7 +1859,7 @@ protected Set resolveExpressionAsRootAttributes(Expression exp, Attr private boolean doResolve(Expression exp, AttributeMap aliases, Set resolvedExpressions) { boolean changed = false; // check if the expression can be skipped or is not nullabe - if (skipExpression(exp) || exp.nullable() == Nullability.FALSE) { + if (skipExpression(exp)) { resolvedExpressions.add(exp); } else { for (Expression e : exp.references()) { @@ -1871,7 +1879,7 @@ private boolean doResolve(Expression exp, AttributeMap aliases, Set< } protected boolean skipExpression(Expression e) { - return false; + return e.nullable() == Nullability.FALSE; } } From b39e7e1f3a9d1f04d186fc894907d173b8811912 Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Fri, 8 Mar 2024 12:04:55 -0600 Subject: [PATCH 081/248] Fixing RejectedExecutionTests (#106134) --- .../integration/RejectedExecutionTests.java | 26 +++++++++++++------ 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/RejectedExecutionTests.java b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/RejectedExecutionTests.java index ccd9023f745bb..23e60af94ac36 100644 --- a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/RejectedExecutionTests.java +++ b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/RejectedExecutionTests.java @@ -15,6 +15,8 @@ import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase; import org.elasticsearch.xpack.watcher.trigger.schedule.IntervalSchedule; +import java.util.Arrays; + import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; @@ -24,6 +26,7 @@ import static org.elasticsearch.xpack.watcher.test.WatcherTestUtils.templateRequest; import static org.elasticsearch.xpack.watcher.trigger.TriggerBuilders.schedule; import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.interval; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; public class RejectedExecutionTests extends AbstractWatcherIntegrationTestCase { @@ -39,6 +42,7 @@ public void testHistoryOnRejection() throws Exception { prepareIndex("idx").setSource("field", "a").get(); refresh(); WatcherSearchTemplateRequest request = templateRequest(searchSource().query(termQuery("field", "a")), "idx"); + // The following watch will get rejected because we have configured the watcher thread pool queue size to be 0: new PutWatchRequestBuilder(client()).setId(randomAlphaOfLength(5)) .setSource( watchBuilder().trigger(schedule(interval(1, IntervalSchedule.Interval.Unit.SECONDS))) @@ -47,13 +51,22 @@ public void testHistoryOnRejection() throws Exception { .addAction("_logger", loggingAction("_logging").setCategory("_category")) ) .get(); - + // Now we make sure that we get a watcher history record for the failed watch (it is written on a different thread pool) assertBusy(() -> { flushAndRefresh(".watcher-history-*"); - assertResponse( - prepareSearch(".watcher-history-*"), - searchResponse -> assertThat(searchResponse.getHits().getTotalHits().value, greaterThanOrEqualTo(2L)) - ); + assertResponse(prepareSearch(".watcher-history-*"), searchResponse -> { + assertThat("Watcher history not found", searchResponse.getHits().getTotalHits().value, greaterThanOrEqualTo(2L)); + assertThat( + "Did not find watcher history for rejected watch", + Arrays.stream(searchResponse.getHits().getHits()) + .anyMatch( + hit -> hit.getSourceAsMap() != null + && hit.getSourceAsMap().get("messages") != null + && hit.getSourceAsMap().get("messages").toString().contains("due to thread pool capacity") + ), + equalTo(true) + ); + }); }); } @@ -64,9 +77,6 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { .put(super.nodeSettings(nodeOrdinal, otherSettings)) .put(XPackSettings.SECURITY_ENABLED.getKey(), false) .put(LicenseSettings.SELF_GENERATED_LICENSE_TYPE.getKey(), "trial") - .put("thread_pool.write.size", 1) - .put("thread_pool.write.queue_size", 1) - .put("xpack.watcher.thread_pool.size", 1) .put("xpack.watcher.thread_pool.queue_size", 0) .build(); } From df90fe258f29a73d95f123cf9e368bb42a7b4820 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Fri, 8 Mar 2024 10:26:09 -0800 Subject: [PATCH 082/248] AwaitsFix #105951 --- .../xpack/watcher/test/integration/RejectedExecutionTests.java | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/RejectedExecutionTests.java b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/RejectedExecutionTests.java index 23e60af94ac36..379df1e28e7bb 100644 --- a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/RejectedExecutionTests.java +++ b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/RejectedExecutionTests.java @@ -37,6 +37,7 @@ protected boolean timeWarped() { return false; } + @AwaitsFix("https://github.com/elastic/elasticsearch/issues/105951") public void testHistoryOnRejection() throws Exception { createIndex("idx"); prepareIndex("idx").setSource("field", "a").get(); From 46e6021e8aac4cea17bb816115b74cc3011d3b72 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Fri, 8 Mar 2024 10:26:29 -0800 Subject: [PATCH 083/248] AwaitsFix #105951 --- .../xpack/watcher/test/integration/RejectedExecutionTests.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/RejectedExecutionTests.java b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/RejectedExecutionTests.java index 379df1e28e7bb..4a3bcca3acb85 100644 --- a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/RejectedExecutionTests.java +++ b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/RejectedExecutionTests.java @@ -37,7 +37,7 @@ protected boolean timeWarped() { return false; } - @AwaitsFix("https://github.com/elastic/elasticsearch/issues/105951") + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/105951") public void testHistoryOnRejection() throws Exception { createIndex("idx"); prepareIndex("idx").setSource("field", "a").get(); From 80da0a7591184146db8c1d2240415beb10613074 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Fri, 8 Mar 2024 11:33:31 -0800 Subject: [PATCH 084/248] Fix test in release builds (#106138) I believe we used an `assert` here when we wanted an `assume` to skip these tests in release builds. Closes https://github.com/elastic/elasticsearch/issues/106128 --- .../elasticsearch/xpack/esql/qa/single_node/TSDBRestEsqlIT.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/TSDBRestEsqlIT.java b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/TSDBRestEsqlIT.java index bd0154176df88..b7ab7b623d460 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/TSDBRestEsqlIT.java +++ b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/TSDBRestEsqlIT.java @@ -42,7 +42,7 @@ protected String getTestRestCluster() { } public void testTimeSeriesQuerying() throws IOException { - assertTrue("time series querying relies on query pragma", Build.current().isSnapshot()); + assumeTrue("time series querying relies on query pragma", Build.current().isSnapshot()); var settings = Settings.builder() .loadFromStream("tsdb-settings.json", TSDBRestEsqlIT.class.getResourceAsStream("/tsdb-settings.json"), false) .build(); From 009eb5fd67f217b4d592d9556dca9c2642ecb365 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Fri, 8 Mar 2024 11:45:35 -0800 Subject: [PATCH 085/248] AwaitsFix #105577 --- .../downsample/DataStreamLifecycleDownsampleDisruptionIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDownsampleDisruptionIT.java b/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDownsampleDisruptionIT.java index 76cc8308a4703..65a4d84e921a2 100644 --- a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDownsampleDisruptionIT.java +++ b/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDownsampleDisruptionIT.java @@ -57,6 +57,7 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { return settings.build(); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/105577") @TestLogging(value = "org.elasticsearch.datastreams.lifecycle:TRACE", reason = "debugging") public void testDataStreamLifecycleDownsampleRollingRestart() throws Exception { final InternalTestCluster cluster = internalCluster(); From 265c3eee67cfc0374848cfdb3441eaa838ee0608 Mon Sep 17 00:00:00 2001 From: Joe Gallo Date: Fri, 8 Mar 2024 15:35:58 -0500 Subject: [PATCH 086/248] Revert "[DOCS] Mute snippet tests for #75069 (#75237)" (#106140) This reverts commit 75c585c0f19731cad5a5ec90c46b6a70f345f705. --- docs/reference/sql/endpoints/rest.asciidoc | 6 ------ 1 file changed, 6 deletions(-) diff --git a/docs/reference/sql/endpoints/rest.asciidoc b/docs/reference/sql/endpoints/rest.asciidoc index 6c83b67a94385..8168a1c14e1a3 100644 --- a/docs/reference/sql/endpoints/rest.asciidoc +++ b/docs/reference/sql/endpoints/rest.asciidoc @@ -579,7 +579,6 @@ POST _sql?format=json "fetch_size": 5 } ---- -// TEST[skip:waiting on https://github.com/elastic/elasticsearch/issues/75069] // TEST[setup:library] // TEST[s/"wait_for_completion_timeout": "2s"/"wait_for_completion_timeout": "0"/] @@ -603,7 +602,6 @@ For CSV, TSV, and TXT responses, the API returns these values in the respective "rows": [ ] } ---- -// TESTRESPONSE[skip:waiting on https://github.com/elastic/elasticsearch/issues/75069] // TESTRESPONSE[s/FnR0TDhyWUVmUmVtWXRWZER4MXZiNFEad2F5UDk2ZVdTVHV1S0xDUy00SklUdzozMTU=/$body.id/] // TESTRESPONSE[s/"is_partial": true/"is_partial": $body.is_partial/] // TESTRESPONSE[s/"is_running": true/"is_running": $body.is_running/] @@ -630,7 +628,6 @@ complete results. "completion_status": 200 } ---- -// TESTRESPONSE[skip:waiting on https://github.com/elastic/elasticsearch/issues/75069] // TESTRESPONSE[s/FnR0TDhyWUVmUmVtWXRWZER4MXZiNFEad2F5UDk2ZVdTVHV1S0xDUy00SklUdzozMTU=/$body.id/] // TESTRESPONSE[s/"expiration_time_in_millis": 1611690295000/"expiration_time_in_millis": $body.expiration_time_in_millis/] @@ -663,7 +660,6 @@ POST _sql?format=json "fetch_size": 5 } ---- -// TEST[skip:waiting on https://github.com/elastic/elasticsearch/issues/75069] // TEST[setup:library] You can use the get async SQL search API's `keep_alive` parameter to later @@ -702,7 +698,6 @@ POST _sql?format=json "fetch_size": 5 } ---- -// TEST[skip:waiting on https://github.com/elastic/elasticsearch/issues/75069] // TEST[setup:library] If `is_partial` and `is_running` are `false`, the search was synchronous and @@ -719,7 +714,6 @@ returned complete results. "cursor": ... } ---- -// TESTRESPONSE[skip:waiting on https://github.com/elastic/elasticsearch/issues/75069] // TESTRESPONSE[s/Fnc5UllQdUVWU0NxRFNMbWxNYXplaFEaMUpYQ05oSkpTc3kwZ21EdC1tbFJXQTo0NzA=/$body.id/] // TESTRESPONSE[s/"rows": \.\.\./"rows": $body.rows/] // TESTRESPONSE[s/"columns": \.\.\./"columns": $body.columns/] From 83585315fe85eb57b074df12f926051e443ad111 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Fri, 8 Mar 2024 13:48:26 -0800 Subject: [PATCH 087/248] Only apply build to direct libs (#106101) Sometimes libs have subprojects that may not be java projects. This commit adjusts the shared configuration for libs to only affect direct subprojects of :lib. --- libs/build.gradle | 2 +- libs/native/jna/build.gradle | 2 +- libs/x-content/impl/build.gradle | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/libs/build.gradle b/libs/build.gradle index ee4ae3db66741..afb82b5e63864 100644 --- a/libs/build.gradle +++ b/libs/build.gradle @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -configure(subprojects - project('elasticsearch-log4j')) { +configure(childProjects.values() - project('elasticsearch-log4j')) { /* * All subprojects are java projects using Elasticsearch's standard build * tools. diff --git a/libs/native/jna/build.gradle b/libs/native/jna/build.gradle index 555f17152c418..e34f35318126a 100644 --- a/libs/native/jna/build.gradle +++ b/libs/native/jna/build.gradle @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -apply plugin: 'elasticsearch.java' +apply plugin: 'elasticsearch.build' base { archivesName = "native-access-jna" diff --git a/libs/x-content/impl/build.gradle b/libs/x-content/impl/build.gradle index 4bf498b1b392e..41b65044735ca 100644 --- a/libs/x-content/impl/build.gradle +++ b/libs/x-content/impl/build.gradle @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -apply plugin: 'elasticsearch.java' +apply plugin: 'elasticsearch.build' base { archivesName = "x-content-impl" From 2bb5bb9f87b173c43c02ada52d3d16ece5b3f98d Mon Sep 17 00:00:00 2001 From: Joe Gallo Date: Fri, 8 Mar 2024 17:17:52 -0500 Subject: [PATCH 088/248] Tighten up preconditions and test conditions in watcher yaml rest tests (#106141) --- .../xpack/watcher/WatcherRestTestCase.java | 43 +++++++++++++------ .../test/watcher/usage/10_basic.yml | 18 +++----- 2 files changed, 38 insertions(+), 23 deletions(-) diff --git a/x-pack/plugin/watcher/qa/common/src/main/java/org/elasticsearch/xpack/watcher/WatcherRestTestCase.java b/x-pack/plugin/watcher/qa/common/src/main/java/org/elasticsearch/xpack/watcher/WatcherRestTestCase.java index 341e92641f641..1779fa4345a85 100644 --- a/x-pack/plugin/watcher/qa/common/src/main/java/org/elasticsearch/xpack/watcher/WatcherRestTestCase.java +++ b/x-pack/plugin/watcher/qa/common/src/main/java/org/elasticsearch/xpack/watcher/WatcherRestTestCase.java @@ -21,6 +21,8 @@ import java.util.Map; import java.util.concurrent.TimeUnit; +import static org.hamcrest.Matchers.equalTo; + /** * Parent test class for Watcher (not-YAML) based REST tests */ @@ -78,20 +80,37 @@ public final void stopWatcher() throws Exception { } public static void deleteAllWatcherData() throws IOException { - var queryWatchesRequest = new Request("GET", "/_watcher/_query/watches"); - var response = ObjectPath.createFromResponse(ESRestTestCase.adminClient().performRequest(queryWatchesRequest)); + { + var queryWatchesRequest = new Request("GET", "/_watcher/_query/watches"); + var response = ObjectPath.createFromResponse(ESRestTestCase.adminClient().performRequest(queryWatchesRequest)); + + int totalCount = response.evaluate("count"); + List> watches = response.evaluate("watches"); + assert watches.size() == totalCount : "number of watches returned is unequal to the total number of watches"; + for (Map watch : watches) { + String id = (String) watch.get("_id"); + var deleteWatchRequest = new Request("DELETE", "/_watcher/watch/" + id); + assertOK(ESRestTestCase.adminClient().performRequest(deleteWatchRequest)); + } + } - int totalCount = response.evaluate("count"); - List> watches = response.evaluate("watches"); - assert watches.size() == totalCount : "number of watches returned is unequal to the total number of watches"; - for (Map watch : watches) { - String id = (String) watch.get("_id"); - var deleteWatchRequest = new Request("DELETE", "/_watcher/watch/" + id); - assertOK(ESRestTestCase.adminClient().performRequest(deleteWatchRequest)); + { + var queryWatchesRequest = new Request("GET", "/_watcher/_query/watches"); + var response = ObjectPath.createFromResponse(ESRestTestCase.adminClient().performRequest(queryWatchesRequest)); + assertThat(response.evaluate("count"), equalTo(0)); } - var deleteWatchHistoryRequest = new Request("DELETE", ".watcher-history-*"); - deleteWatchHistoryRequest.addParameter("ignore_unavailable", "true"); - ESRestTestCase.adminClient().performRequest(deleteWatchHistoryRequest); + { + var xpackUsageRequest = new Request("GET", "/_xpack/usage"); + var response = ObjectPath.createFromResponse(ESRestTestCase.adminClient().performRequest(xpackUsageRequest)); + assertThat(response.evaluate("watcher.count.active"), equalTo(0)); + assertThat(response.evaluate("watcher.count.total"), equalTo(0)); + } + + { + var deleteWatchHistoryRequest = new Request("DELETE", ".watcher-history-*"); + deleteWatchHistoryRequest.addParameter("ignore_unavailable", "true"); + ESRestTestCase.adminClient().performRequest(deleteWatchHistoryRequest); + } } } diff --git a/x-pack/plugin/watcher/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/watcher/usage/10_basic.yml b/x-pack/plugin/watcher/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/watcher/usage/10_basic.yml index 17031abf39e02..b3682b05d7e68 100644 --- a/x-pack/plugin/watcher/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/watcher/usage/10_basic.yml +++ b/x-pack/plugin/watcher/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/watcher/usage/10_basic.yml @@ -1,21 +1,18 @@ --- "Test watcher usage stats output": - - skip: - version: "all" - reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/65547" - do: catch: missing watcher.delete_watch: id: "usage_stats_watch" - - do: {xpack.usage: {}} - - set: { "watcher.count.active": watch_count_active } - - set: { "watcher.count.total": watch_count_total } + - do: { xpack.usage: {} } + - match: { "watcher.count.active": 0 } + - match: { "watcher.count.total": 0 } - do: watcher.put_watch: id: "usage_stats_watch" - body: > + body: > { "trigger": { "schedule" : { "cron" : "0 0 0 1 * ? 2099" } @@ -47,9 +44,9 @@ } - match: { _id: "usage_stats_watch" } - - do: {xpack.usage: {}} - - gt: { "watcher.count.active": $watch_count_active } - - gt: { "watcher.count.total": $watch_count_total } + - do: { xpack.usage: {} } + - match: { "watcher.count.active": 1 } + - match: { "watcher.count.total": 1 } - gte: { "watcher.watch.action._all.active": 1 } - gte: { "watcher.watch.action.logging.active": 1 } - gte: { "watcher.watch.condition._all.active": 1 } @@ -60,4 +57,3 @@ - gte: { "watcher.watch.trigger.schedule.active": 1 } - gte: { "watcher.watch.trigger.schedule.cron.active": 1 } - gte: { "watcher.watch.trigger.schedule._all.active": 1 } - From 9953b12eb7a3ab19d614a3ea240bb5a6d0719d99 Mon Sep 17 00:00:00 2001 From: William Brafford Date: Fri, 8 Mar 2024 17:31:18 -0500 Subject: [PATCH 089/248] Add pluggable BuildVersion in NodeMetadata (#105757) Here we introduce a BuildVersion interface that can be created with a version ID. That Version can be checked to see if it's before the minimum compatibility version of the currently running node, or if it comes after the current node's version. We will use the existing BuildExtension to retrieve implementations of BuildVersion provided by downstream projects. * Add interface and implementation for pluggable build version * Use BuildVersion internally in NodeMetadata * Use static holder class to only load BuildExtension once --- docs/changelog/105757.yaml | 5 + .../gateway/GatewayIndexStateIT.java | 4 +- .../org/elasticsearch/env/BuildVersion.java | 125 ++++++++++++++++++ .../env/DefaultBuildVersion.java | 77 +++++++++++ .../elasticsearch/env/NodeEnvironment.java | 5 +- .../org/elasticsearch/env/NodeMetadata.java | 53 ++++---- .../env/OverrideNodeVersionCommand.java | 2 +- .../gateway/GatewayMetaState.java | 14 +- .../gateway/PersistedClusterStateService.java | 4 +- .../internal/BuildExtension.java | 12 ++ .../java/org/elasticsearch/node/Node.java | 4 +- .../elasticsearch/env/BuildVersionTests.java | 44 ++++++ .../elasticsearch/env/NodeMetadataTests.java | 38 ++++-- .../env/OverrideNodeVersionCommandTests.java | 8 +- .../PersistedClusterStateServiceTests.java | 7 +- ...ecurityImplicitBehaviorBootstrapCheck.java | 3 +- ...tyImplicitBehaviorBootstrapCheckTests.java | 33 +++-- .../xpack/security/SecurityTests.java | 4 +- 18 files changed, 369 insertions(+), 73 deletions(-) create mode 100644 docs/changelog/105757.yaml create mode 100644 server/src/main/java/org/elasticsearch/env/BuildVersion.java create mode 100644 server/src/main/java/org/elasticsearch/env/DefaultBuildVersion.java create mode 100644 server/src/test/java/org/elasticsearch/env/BuildVersionTests.java diff --git a/docs/changelog/105757.yaml b/docs/changelog/105757.yaml new file mode 100644 index 0000000000000..f11aed2b2d96b --- /dev/null +++ b/docs/changelog/105757.yaml @@ -0,0 +1,5 @@ +pr: 105757 +summary: Add pluggable `BuildVersion` in `NodeMetadata` +area: Infra/Core +type: enhancement +issues: [] diff --git a/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java index 5f3b854b74fb4..d1827bf49410f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java @@ -9,7 +9,6 @@ package org.elasticsearch.gateway; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; @@ -32,6 +31,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.IOUtils; +import org.elasticsearch.env.BuildVersion; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.NodeMetadata; import org.elasticsearch.index.IndexVersions; @@ -564,7 +564,7 @@ public void testHalfDeletedIndexImport() throws Exception { .putCustom(IndexGraveyard.TYPE, IndexGraveyard.builder().addTombstone(metadata.index("test").getIndex()).build()) .build() ); - NodeMetadata.FORMAT.writeAndCleanup(new NodeMetadata(nodeId, Version.CURRENT, metadata.oldestIndexVersion()), paths); + NodeMetadata.FORMAT.writeAndCleanup(new NodeMetadata(nodeId, BuildVersion.current(), metadata.oldestIndexVersion()), paths); }); ensureGreen(); diff --git a/server/src/main/java/org/elasticsearch/env/BuildVersion.java b/server/src/main/java/org/elasticsearch/env/BuildVersion.java new file mode 100644 index 0000000000000..e1f5879ae9569 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/env/BuildVersion.java @@ -0,0 +1,125 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.env; + +import org.elasticsearch.Build; +import org.elasticsearch.Version; +import org.elasticsearch.internal.BuildExtension; +import org.elasticsearch.plugins.ExtensionLoader; + +import java.util.ServiceLoader; + +/** + * A version representing the code of Elasticsearch + * + *

    This class allows us to check whether an Elasticsearch release + * is "too old" or "too new," using an intentionally minimal API for + * comparisons. The static {@link #current()} method returns the current + * release version, and {@link #fromVersionId(int)} returns a version + * based on some identifier. By default, this identifier matches what the + * {@link Version} class uses, but the implementation is pluggable. + * If a module provides a {@link BuildExtension} service via Java SPI, this + * class's static methods will return a different implementation of {@link BuildVersion}, + * potentially with different behavior. This allows downstream projects to + * provide versions that accommodate different release models or versioning + * schemes.

    + */ +public abstract class BuildVersion { + + /** + * Check whether this version is on or after a minimum threshold. + * + *

    In some cases, the only thing we need to know about a version is whether + * it's compatible with the currently-running Elasticsearch. This method checks + * the lower bound, and returns false if the version is "too old."

    + * + *

    By default, the minimum compatible version is derived from {@code Version.CURRENT.minimumCompatibilityVersion()}, + * but this behavior is pluggable.

    + * @return True if this version is on or after the minimum compatible version + * for the currently running Elasticsearch, false otherwise. + */ + public abstract boolean onOrAfterMinimumCompatible(); + + /** + * Check whether this version comes from a release later than the + * currently running Elasticsearch. + * + *

    This is useful for checking whether a node would be downgraded.

    + * + * @return True if this version represents a release of Elasticsearch later + * than the one that's running. + */ + public abstract boolean isFutureVersion(); + + // temporary + // TODO[wrb]: remove from PersistedClusterStateService + // TODO[wrb]: remove from security bootstrap checks + @Deprecated + public Version toVersion() { + return null; + } + + /** + * Create a {@link BuildVersion} from a version ID number. + * + *

    By default, this identifier should match the integer ID of a {@link Version}; + * see that class for details on the default semantic versioning scheme. This behavior + * is, of course, pluggable.

    + * + * @param versionId An integer identifier for a version + * @return a version representing a build or release of Elasticsearch + */ + public static BuildVersion fromVersionId(int versionId) { + return CurrentExtensionHolder.BUILD_EXTENSION.fromVersionId(versionId); + } + + /** + * Get the current build version. + * + *

    By default, this value will be different for every public release of Elasticsearch, + * but downstream implementations aren't restricted by this condition.

    + * + * @return The BuildVersion for Elasticsearch + */ + public static BuildVersion current() { + return CurrentExtensionHolder.BUILD_EXTENSION.currentBuildVersion(); + } + + // only exists for NodeMetadata#toXContent + // TODO[wrb]: make this abstract once all downstream classes override it + protected int id() { + return -1; + } + + private static class CurrentExtensionHolder { + private static final BuildExtension BUILD_EXTENSION = findExtension(); + + private static BuildExtension findExtension() { + return ExtensionLoader.loadSingleton(ServiceLoader.load(BuildExtension.class)).orElse(new DefaultBuildExtension()); + } + } + + private static class DefaultBuildExtension implements BuildExtension { + @Override + public Build getCurrentBuild() { + return Build.current(); + } + + @Override + public BuildVersion currentBuildVersion() { + return DefaultBuildVersion.CURRENT; + } + + @Override + public BuildVersion fromVersionId(int versionId) { + return new DefaultBuildVersion(versionId); + } + } + +} diff --git a/server/src/main/java/org/elasticsearch/env/DefaultBuildVersion.java b/server/src/main/java/org/elasticsearch/env/DefaultBuildVersion.java new file mode 100644 index 0000000000000..6cec751a1cad1 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/env/DefaultBuildVersion.java @@ -0,0 +1,77 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.env; + +import org.elasticsearch.Version; + +import java.util.Objects; + +/** + * A {@link BuildVersion} that uses the same identifiers and compatibility constraints + * as {@link Version}. + * + *

    This default implementation of BuildVersion delegates to the {@link Version} class. + * It's intended to let us check wither a version identifier is "too old" or "too new." + * "Too old" is determined by {@code Version.CURRENT.minimumCompatibilityVersion()}, + * and "too new" is anything that comes after {@code Version.CURRENT}. This lets us + * give users simple rules in terms of public-facing release versions for Elasticsearch + * compatibility when upgrading nodes and prevents downgrades in place.

    + */ +// TODO[wrb]: make package-private once default implementations are removed in BuildExtension +public final class DefaultBuildVersion extends BuildVersion { + + public static BuildVersion CURRENT = new DefaultBuildVersion(Version.CURRENT.id()); + + private final int versionId; + private final Version version; + + public DefaultBuildVersion(int versionId) { + assert versionId >= 0 : "Release version IDs must be non-negative integers"; + this.versionId = versionId; + this.version = Version.fromId(versionId); + } + + @Override + public boolean onOrAfterMinimumCompatible() { + return Version.CURRENT.minimumCompatibilityVersion().onOrBefore(version); + } + + @Override + public boolean isFutureVersion() { + return Version.CURRENT.before(version); + } + + @Override + public int id() { + return versionId; + } + + @Override + public Version toVersion() { + return version; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + DefaultBuildVersion that = (DefaultBuildVersion) o; + return versionId == that.versionId; + } + + @Override + public int hashCode() { + return Objects.hash(versionId); + } + + @Override + public String toString() { + return Version.fromId(versionId).toString(); + } +} diff --git a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java index b246802d06fee..291e9697def4a 100644 --- a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java +++ b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java @@ -22,7 +22,6 @@ import org.apache.lucene.store.NativeFSLockFactory; import org.elasticsearch.Build; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeRole; @@ -628,7 +627,7 @@ private static NodeMetadata loadNodeMetadata(Settings settings, Logger logger, D assert nodeIds.isEmpty() : nodeIds; // If we couldn't find legacy metadata, we set the latest index version to this version. This happens // when we are starting a new node and there are no indices to worry about. - metadata = new NodeMetadata(generateNodeId(settings), Version.CURRENT, IndexVersion.current()); + metadata = new NodeMetadata(generateNodeId(settings), BuildVersion.current(), IndexVersion.current()); } else { assert nodeIds.equals(Collections.singleton(legacyMetadata.nodeId())) : nodeIds + " doesn't match " + legacyMetadata; metadata = legacyMetadata; @@ -636,7 +635,7 @@ private static NodeMetadata loadNodeMetadata(Settings settings, Logger logger, D } metadata = metadata.upgradeToCurrentVersion(); - assert metadata.nodeVersion().equals(Version.CURRENT) : metadata.nodeVersion() + " != " + Version.CURRENT; + assert metadata.nodeVersion().equals(BuildVersion.current()) : metadata.nodeVersion() + " != " + Build.current(); return metadata; } diff --git a/server/src/main/java/org/elasticsearch/env/NodeMetadata.java b/server/src/main/java/org/elasticsearch/env/NodeMetadata.java index 2122e5fcc8b6c..8d8505f0147bc 100644 --- a/server/src/main/java/org/elasticsearch/env/NodeMetadata.java +++ b/server/src/main/java/org/elasticsearch/env/NodeMetadata.java @@ -9,7 +9,6 @@ package org.elasticsearch.env; import org.elasticsearch.Build; -import org.elasticsearch.Version; import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.gateway.MetadataStateFormat; import org.elasticsearch.index.IndexVersion; @@ -36,26 +35,27 @@ public final class NodeMetadata { private final String nodeId; - private final Version nodeVersion; + private final BuildVersion nodeVersion; - private final Version previousNodeVersion; + private final BuildVersion previousNodeVersion; private final IndexVersion oldestIndexVersion; + @UpdateForV9 // version should be non-null in the node metadata from v9 onwards private NodeMetadata( final String nodeId, - final Version nodeVersion, - final Version previousNodeVersion, + final BuildVersion buildVersion, + final BuildVersion previousBuildVersion, final IndexVersion oldestIndexVersion ) { this.nodeId = Objects.requireNonNull(nodeId); - this.nodeVersion = Objects.requireNonNull(nodeVersion); - this.previousNodeVersion = Objects.requireNonNull(previousNodeVersion); + this.nodeVersion = Objects.requireNonNull(buildVersion); + this.previousNodeVersion = Objects.requireNonNull(previousBuildVersion); this.oldestIndexVersion = Objects.requireNonNull(oldestIndexVersion); } - public NodeMetadata(final String nodeId, final Version nodeVersion, final IndexVersion oldestIndexVersion) { - this(nodeId, nodeVersion, nodeVersion, oldestIndexVersion); + public NodeMetadata(final String nodeId, final BuildVersion buildVersion, final IndexVersion oldestIndexVersion) { + this(nodeId, buildVersion, buildVersion, oldestIndexVersion); } @Override @@ -93,7 +93,7 @@ public String nodeId() { return nodeId; } - public Version nodeVersion() { + public BuildVersion nodeVersion() { return nodeVersion; } @@ -103,7 +103,7 @@ public Version nodeVersion() { * the current version of the node ({@link NodeMetadata#upgradeToCurrentVersion()} before storing the node metadata again on disk. * In doing so, {@code previousNodeVersion} refers to the previously last known version that this node was started on. */ - public Version previousNodeVersion() { + public BuildVersion previousNodeVersion() { return previousNodeVersion; } @@ -111,11 +111,12 @@ public IndexVersion oldestIndexVersion() { return oldestIndexVersion; } + @UpdateForV9 public void verifyUpgradeToCurrentVersion() { - assert (nodeVersion.equals(Version.V_EMPTY) == false) || (Version.CURRENT.major <= Version.V_7_0_0.major + 1) - : "version is required in the node metadata from v9 onwards"; + // Enable the following assertion for V9: + // assert (nodeVersion.equals(BuildVersion.empty()) == false) : "version is required in the node metadata from v9 onwards"; - if (nodeVersion.before(Version.CURRENT.minimumCompatibilityVersion())) { + if (nodeVersion.onOrAfterMinimumCompatible() == false) { throw new IllegalStateException( "cannot upgrade a node from version [" + nodeVersion @@ -128,7 +129,7 @@ public void verifyUpgradeToCurrentVersion() { ); } - if (nodeVersion.after(Version.CURRENT)) { + if (nodeVersion.isFutureVersion()) { throw new IllegalStateException( "cannot downgrade a node from version [" + nodeVersion + "] to version [" + Build.current().version() + "]" ); @@ -138,13 +139,15 @@ public void verifyUpgradeToCurrentVersion() { public NodeMetadata upgradeToCurrentVersion() { verifyUpgradeToCurrentVersion(); - return nodeVersion.equals(Version.CURRENT) ? this : new NodeMetadata(nodeId, Version.CURRENT, nodeVersion, oldestIndexVersion); + return nodeVersion.equals(BuildVersion.current()) + ? this + : new NodeMetadata(nodeId, BuildVersion.current(), nodeVersion, oldestIndexVersion); } private static class Builder { String nodeId; - Version nodeVersion; - Version previousNodeVersion; + BuildVersion nodeVersion; + BuildVersion previousNodeVersion; IndexVersion oldestIndexVersion; public void setNodeId(String nodeId) { @@ -152,22 +155,20 @@ public void setNodeId(String nodeId) { } public void setNodeVersionId(int nodeVersionId) { - this.nodeVersion = Version.fromId(nodeVersionId); + this.nodeVersion = BuildVersion.fromVersionId(nodeVersionId); } public void setOldestIndexVersion(int oldestIndexVersion) { this.oldestIndexVersion = IndexVersion.fromId(oldestIndexVersion); } - private Version getVersionOrFallbackToEmpty() { - return Objects.requireNonNullElse(this.nodeVersion, Version.V_EMPTY); - } - + @UpdateForV9 // version is required in the node metadata from v9 onwards public NodeMetadata build() { - @UpdateForV9 // version is required in the node metadata from v9 onwards - final Version nodeVersion = getVersionOrFallbackToEmpty(); final IndexVersion oldestIndexVersion; + if (this.nodeVersion == null) { + nodeVersion = BuildVersion.fromVersionId(0); + } if (this.previousNodeVersion == null) { previousNodeVersion = nodeVersion; } @@ -207,7 +208,7 @@ protected XContentBuilder newXContentBuilder(XContentType type, OutputStream str @Override public void toXContent(XContentBuilder builder, NodeMetadata nodeMetadata) throws IOException { builder.field(NODE_ID_KEY, nodeMetadata.nodeId); - builder.field(NODE_VERSION_KEY, nodeMetadata.nodeVersion.id); + builder.field(NODE_VERSION_KEY, nodeMetadata.nodeVersion.id()); builder.field(OLDEST_INDEX_VERSION_KEY, nodeMetadata.oldestIndexVersion.id()); } diff --git a/server/src/main/java/org/elasticsearch/env/OverrideNodeVersionCommand.java b/server/src/main/java/org/elasticsearch/env/OverrideNodeVersionCommand.java index 14ae6cd5e26a0..c7dd913174782 100644 --- a/server/src/main/java/org/elasticsearch/env/OverrideNodeVersionCommand.java +++ b/server/src/main/java/org/elasticsearch/env/OverrideNodeVersionCommand.java @@ -82,7 +82,7 @@ protected void processDataPaths(Terminal terminal, Path[] paths, OptionSet optio confirm( terminal, - (nodeMetadata.nodeVersion().before(Version.CURRENT) ? TOO_OLD_MESSAGE : TOO_NEW_MESSAGE).replace( + (nodeMetadata.nodeVersion().onOrAfterMinimumCompatible() == false ? TOO_OLD_MESSAGE : TOO_NEW_MESSAGE).replace( "V_OLD", nodeMetadata.nodeVersion().toString() ).replace("V_NEW", nodeMetadata.nodeVersion().toString()).replace("V_CUR", Version.CURRENT.toString()) diff --git a/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java b/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java index e7b8eadb3f771..50fae2a1dda03 100644 --- a/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java +++ b/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java @@ -14,7 +14,6 @@ import org.apache.lucene.util.SetOnce; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.coordination.CoordinationMetadata; @@ -35,6 +34,7 @@ import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.Tuple; import org.elasticsearch.core.UpdateForV9; +import org.elasticsearch.env.BuildVersion; import org.elasticsearch.env.NodeMetadata; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.node.Node; @@ -222,7 +222,11 @@ private PersistedState createOnDiskPersistedState( } // write legacy node metadata to prevent accidental downgrades from spawning empty cluster state NodeMetadata.FORMAT.writeAndCleanup( - new NodeMetadata(persistedClusterStateService.getNodeId(), Version.CURRENT, clusterState.metadata().oldestIndexVersion()), + new NodeMetadata( + persistedClusterStateService.getNodeId(), + BuildVersion.current(), + clusterState.metadata().oldestIndexVersion() + ), persistedClusterStateService.getDataPaths() ); success = true; @@ -260,7 +264,11 @@ private PersistedState createInMemoryPersistedState( metaStateService.deleteAll(); // write legacy node metadata to prevent downgrades from spawning empty cluster state NodeMetadata.FORMAT.writeAndCleanup( - new NodeMetadata(persistedClusterStateService.getNodeId(), Version.CURRENT, clusterState.metadata().oldestIndexVersion()), + new NodeMetadata( + persistedClusterStateService.getNodeId(), + BuildVersion.current(), + clusterState.metadata().oldestIndexVersion() + ), persistedClusterStateService.getDataPaths() ); } diff --git a/server/src/main/java/org/elasticsearch/gateway/PersistedClusterStateService.java b/server/src/main/java/org/elasticsearch/gateway/PersistedClusterStateService.java index b86cfa6fdb7af..49ac38d656278 100644 --- a/server/src/main/java/org/elasticsearch/gateway/PersistedClusterStateService.java +++ b/server/src/main/java/org/elasticsearch/gateway/PersistedClusterStateService.java @@ -69,6 +69,7 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.env.BuildVersion; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.NodeMetadata; import org.elasticsearch.index.IndexVersion; @@ -377,7 +378,8 @@ public static NodeMetadata nodeMetadata(Path... dataPaths) throws IOException { if (nodeId == null) { return null; } - return new NodeMetadata(nodeId, version, oldestIndexVersion); + // TODO: remove use of Version here (ES-7343) + return new NodeMetadata(nodeId, BuildVersion.fromVersionId(version.id()), oldestIndexVersion); } /** diff --git a/server/src/main/java/org/elasticsearch/internal/BuildExtension.java b/server/src/main/java/org/elasticsearch/internal/BuildExtension.java index 921577317604a..cc02495b39520 100644 --- a/server/src/main/java/org/elasticsearch/internal/BuildExtension.java +++ b/server/src/main/java/org/elasticsearch/internal/BuildExtension.java @@ -9,6 +9,8 @@ package org.elasticsearch.internal; import org.elasticsearch.Build; +import org.elasticsearch.env.BuildVersion; +import org.elasticsearch.env.DefaultBuildVersion; /** * Allows plugging in current build info. @@ -26,4 +28,14 @@ public interface BuildExtension { default boolean hasReleaseVersioning() { return true; } + + // TODO[wrb]: Remove default implementation once downstream BuildExtensions are updated + default BuildVersion currentBuildVersion() { + return DefaultBuildVersion.CURRENT; + } + + // TODO[wrb]: Remove default implementation once downstream BuildExtensions are updated + default BuildVersion fromVersionId(int versionId) { + return new DefaultBuildVersion(versionId); + } } diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 165c5f6524104..8ff2ac5e5fca0 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -12,7 +12,6 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.util.SetOnce; import org.elasticsearch.ElasticsearchTimeoutException; -import org.elasticsearch.Version; import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.bootstrap.BootstrapCheck; import org.elasticsearch.bootstrap.BootstrapContext; @@ -47,6 +46,7 @@ import org.elasticsearch.core.PathUtils; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.env.BuildVersion; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.NodeMetadata; @@ -340,7 +340,7 @@ public Node start() throws NodeValidationException { nodeEnvironment.nodeDataPaths() ); assert nodeMetadata != null; - assert nodeMetadata.nodeVersion().equals(Version.CURRENT); + assert nodeMetadata.nodeVersion().equals(BuildVersion.current()); assert nodeMetadata.nodeId().equals(localNodeFactory.getNode().getId()); } catch (IOException e) { assert false : e; diff --git a/server/src/test/java/org/elasticsearch/env/BuildVersionTests.java b/server/src/test/java/org/elasticsearch/env/BuildVersionTests.java new file mode 100644 index 0000000000000..a6bc5495be877 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/env/BuildVersionTests.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.env; + +import org.elasticsearch.Version; +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.equalTo; + +public class BuildVersionTests extends ESTestCase { + public void testBuildVersionCurrent() { + assertThat(BuildVersion.current(), equalTo(BuildVersion.fromVersionId(Version.CURRENT.id()))); + } + + public void testBeforeMinimumCompatibleVersion() { + BuildVersion beforeMinCompat = BuildVersion.fromVersionId(between(0, Version.CURRENT.minimumCompatibilityVersion().id() - 1)); + BuildVersion afterMinCompat = BuildVersion.fromVersionId( + between(Version.CURRENT.minimumCompatibilityVersion().id(), Version.CURRENT.id()) + ); + BuildVersion futureVersion = BuildVersion.fromVersionId(between(Version.CURRENT.id() + 1, Version.CURRENT.id() + 1_000_000)); + + assertFalse(beforeMinCompat.onOrAfterMinimumCompatible()); + assertTrue(afterMinCompat.onOrAfterMinimumCompatible()); + assertTrue(futureVersion.onOrAfterMinimumCompatible()); + } + + public void testIsFutureVersion() { + BuildVersion beforeMinCompat = BuildVersion.fromVersionId(between(0, Version.CURRENT.minimumCompatibilityVersion().id() - 1)); + BuildVersion afterMinCompat = BuildVersion.fromVersionId( + between(Version.CURRENT.minimumCompatibilityVersion().id(), Version.CURRENT.id()) + ); + BuildVersion futureVersion = BuildVersion.fromVersionId(between(Version.CURRENT.id() + 1, Version.CURRENT.id() + 1_000_000)); + + assertFalse(beforeMinCompat.isFutureVersion()); + assertFalse(afterMinCompat.isFutureVersion()); + assertTrue(futureVersion.isFutureVersion()); + } +} diff --git a/server/src/test/java/org/elasticsearch/env/NodeMetadataTests.java b/server/src/test/java/org/elasticsearch/env/NodeMetadataTests.java index b7001943073bc..46d6beb56138b 100644 --- a/server/src/test/java/org/elasticsearch/env/NodeMetadataTests.java +++ b/server/src/test/java/org/elasticsearch/env/NodeMetadataTests.java @@ -36,6 +36,10 @@ private Version randomVersion() { return rarely() ? Version.fromId(randomInt()) : VersionUtils.randomVersion(random()); } + private BuildVersion randomBuildVersion() { + return BuildVersion.fromVersionId(randomVersion().id()); + } + private IndexVersion randomIndexVersion() { return rarely() ? IndexVersion.fromId(randomInt()) : IndexVersionUtils.randomVersion(random()); } @@ -43,7 +47,7 @@ private IndexVersion randomIndexVersion() { public void testEqualsHashcodeSerialization() { final Path tempDir = createTempDir(); EqualsHashCodeTestUtils.checkEqualsAndHashCode( - new NodeMetadata(randomAlphaOfLength(10), randomVersion(), randomIndexVersion()), + new NodeMetadata(randomAlphaOfLength(10), randomBuildVersion(), randomIndexVersion()), nodeMetadata -> { final long generation = NodeMetadata.FORMAT.writeAndCleanup(nodeMetadata, tempDir); final Tuple nodeMetadataLongTuple = NodeMetadata.FORMAT.loadLatestStateWithGeneration( @@ -62,7 +66,7 @@ public void testEqualsHashcodeSerialization() { ); case 1 -> new NodeMetadata( nodeMetadata.nodeId(), - randomValueOtherThan(nodeMetadata.nodeVersion(), this::randomVersion), + randomValueOtherThan(nodeMetadata.nodeVersion(), this::randomBuildVersion), nodeMetadata.oldestIndexVersion() ); default -> new NodeMetadata( @@ -87,20 +91,17 @@ public void testReadsFormatWithoutVersion() throws IOException { Files.copy(resource, stateDir.resolve(NodeMetadata.FORMAT.getStateFileName(between(0, Integer.MAX_VALUE)))); final NodeMetadata nodeMetadata = NodeMetadata.FORMAT.loadLatestState(logger, xContentRegistry(), tempDir); assertThat(nodeMetadata.nodeId(), equalTo("y6VUVMSaStO4Tz-B5BxcOw")); - assertThat(nodeMetadata.nodeVersion(), equalTo(Version.V_EMPTY)); + assertThat(nodeMetadata.nodeVersion(), equalTo(BuildVersion.fromVersionId(0))); } public void testUpgradesLegitimateVersions() { final String nodeId = randomAlphaOfLength(10); final NodeMetadata nodeMetadata = new NodeMetadata( nodeId, - randomValueOtherThanMany( - v -> v.after(Version.CURRENT) || v.before(Version.CURRENT.minimumCompatibilityVersion()), - this::randomVersion - ), + randomValueOtherThanMany(v -> v.isFutureVersion() || v.onOrAfterMinimumCompatible() == false, this::randomBuildVersion), IndexVersion.current() ).upgradeToCurrentVersion(); - assertThat(nodeMetadata.nodeVersion(), equalTo(Version.CURRENT)); + assertThat(nodeMetadata.nodeVersion(), equalTo(BuildVersion.current())); assertThat(nodeMetadata.nodeId(), equalTo(nodeId)); } @@ -109,7 +110,7 @@ public void testUpgradesMissingVersion() { final IllegalStateException illegalStateException = expectThrows( IllegalStateException.class, - () -> new NodeMetadata(nodeId, Version.V_EMPTY, IndexVersion.current()).upgradeToCurrentVersion() + () -> new NodeMetadata(nodeId, BuildVersion.fromVersionId(0), IndexVersion.current()).upgradeToCurrentVersion() ); assertThat( illegalStateException.getMessage(), @@ -122,7 +123,7 @@ public void testUpgradesMissingVersion() { public void testDoesNotUpgradeFutureVersion() { final IllegalStateException illegalStateException = expectThrows( IllegalStateException.class, - () -> new NodeMetadata(randomAlphaOfLength(10), tooNewVersion(), IndexVersion.current()).upgradeToCurrentVersion() + () -> new NodeMetadata(randomAlphaOfLength(10), tooNewBuildVersion(), IndexVersion.current()).upgradeToCurrentVersion() ); assertThat( illegalStateException.getMessage(), @@ -133,7 +134,7 @@ public void testDoesNotUpgradeFutureVersion() { public void testDoesNotUpgradeAncientVersion() { final IllegalStateException illegalStateException = expectThrows( IllegalStateException.class, - () -> new NodeMetadata(randomAlphaOfLength(10), tooOldVersion(), IndexVersion.current()).upgradeToCurrentVersion() + () -> new NodeMetadata(randomAlphaOfLength(10), tooOldBuildVersion(), IndexVersion.current()).upgradeToCurrentVersion() ); assertThat( illegalStateException.getMessage(), @@ -153,10 +154,11 @@ public void testDoesNotUpgradeAncientVersion() { public void testUpgradeMarksPreviousVersion() { final String nodeId = randomAlphaOfLength(10); final Version version = VersionUtils.randomVersionBetween(random(), Version.CURRENT.minimumCompatibilityVersion(), Version.V_8_0_0); + final BuildVersion buildVersion = BuildVersion.fromVersionId(version.id()); - final NodeMetadata nodeMetadata = new NodeMetadata(nodeId, version, IndexVersion.current()).upgradeToCurrentVersion(); - assertThat(nodeMetadata.nodeVersion(), equalTo(Version.CURRENT)); - assertThat(nodeMetadata.previousNodeVersion(), equalTo(version)); + final NodeMetadata nodeMetadata = new NodeMetadata(nodeId, buildVersion, IndexVersion.current()).upgradeToCurrentVersion(); + assertThat(nodeMetadata.nodeVersion(), equalTo(BuildVersion.current())); + assertThat(nodeMetadata.previousNodeVersion(), equalTo(buildVersion)); } public static Version tooNewVersion() { @@ -167,7 +169,15 @@ public static IndexVersion tooNewIndexVersion() { return IndexVersion.fromId(between(IndexVersion.current().id() + 1, 99999999)); } + public static BuildVersion tooNewBuildVersion() { + return BuildVersion.fromVersionId(between(Version.CURRENT.id() + 1, 99999999)); + } + public static Version tooOldVersion() { return Version.fromId(between(1, Version.CURRENT.minimumCompatibilityVersion().id - 1)); } + + public static BuildVersion tooOldBuildVersion() { + return BuildVersion.fromVersionId(between(1, Version.CURRENT.minimumCompatibilityVersion().id - 1)); + } } diff --git a/server/src/test/java/org/elasticsearch/env/OverrideNodeVersionCommandTests.java b/server/src/test/java/org/elasticsearch/env/OverrideNodeVersionCommandTests.java index 8a4da8e8cee94..39872df80236e 100644 --- a/server/src/test/java/org/elasticsearch/env/OverrideNodeVersionCommandTests.java +++ b/server/src/test/java/org/elasticsearch/env/OverrideNodeVersionCommandTests.java @@ -136,7 +136,7 @@ public void testWarnsIfTooOld() throws Exception { expectThrows(IllegalStateException.class, () -> mockTerminal.readText("")); final NodeMetadata nodeMetadata = PersistedClusterStateService.nodeMetadata(dataPaths); - assertThat(nodeMetadata.nodeVersion(), equalTo(nodeVersion)); + assertThat(nodeMetadata.nodeVersion().toVersion(), equalTo(nodeVersion)); } public void testWarnsIfTooNew() throws Exception { @@ -161,7 +161,7 @@ public void testWarnsIfTooNew() throws Exception { expectThrows(IllegalStateException.class, () -> mockTerminal.readText("")); final NodeMetadata nodeMetadata = PersistedClusterStateService.nodeMetadata(dataPaths); - assertThat(nodeMetadata.nodeVersion(), equalTo(nodeVersion)); + assertThat(nodeMetadata.nodeVersion().toVersion(), equalTo(nodeVersion)); } public void testOverwritesIfTooOld() throws Exception { @@ -184,7 +184,7 @@ public void testOverwritesIfTooOld() throws Exception { expectThrows(IllegalStateException.class, () -> mockTerminal.readText("")); final NodeMetadata nodeMetadata = PersistedClusterStateService.nodeMetadata(dataPaths); - assertThat(nodeMetadata.nodeVersion(), equalTo(Version.CURRENT)); + assertThat(nodeMetadata.nodeVersion(), equalTo(BuildVersion.current())); } public void testOverwritesIfTooNew() throws Exception { @@ -206,6 +206,6 @@ public void testOverwritesIfTooNew() throws Exception { expectThrows(IllegalStateException.class, () -> mockTerminal.readText("")); final NodeMetadata nodeMetadata = PersistedClusterStateService.nodeMetadata(dataPaths); - assertThat(nodeMetadata.nodeVersion(), equalTo(Version.CURRENT)); + assertThat(nodeMetadata.nodeVersion(), equalTo(BuildVersion.current())); } } diff --git a/server/src/test/java/org/elasticsearch/gateway/PersistedClusterStateServiceTests.java b/server/src/test/java/org/elasticsearch/gateway/PersistedClusterStateServiceTests.java index bad6702e8ad83..7951c23ae815a 100644 --- a/server/src/test/java/org/elasticsearch/gateway/PersistedClusterStateServiceTests.java +++ b/server/src/test/java/org/elasticsearch/gateway/PersistedClusterStateServiceTests.java @@ -52,6 +52,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.env.BuildVersion; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.NodeMetadata; @@ -1439,13 +1440,13 @@ public void testOverrideLuceneVersion() throws IOException { } NodeMetadata prevMetadata = PersistedClusterStateService.nodeMetadata(persistedClusterStateService.getDataPaths()); - assertEquals(Version.CURRENT, prevMetadata.nodeVersion()); + assertEquals(BuildVersion.current(), prevMetadata.nodeVersion()); PersistedClusterStateService.overrideVersion(Version.V_8_0_0, persistedClusterStateService.getDataPaths()); NodeMetadata metadata = PersistedClusterStateService.nodeMetadata(persistedClusterStateService.getDataPaths()); - assertEquals(Version.V_8_0_0, metadata.nodeVersion()); + assertEquals(BuildVersion.fromVersionId(Version.V_8_0_0.id()), metadata.nodeVersion()); for (Path p : persistedClusterStateService.getDataPaths()) { NodeMetadata individualMetadata = PersistedClusterStateService.nodeMetadata(p); - assertEquals(Version.V_8_0_0, individualMetadata.nodeVersion()); + assertEquals(BuildVersion.fromVersionId(Version.V_8_0_0.id()), individualMetadata.nodeVersion()); } } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityImplicitBehaviorBootstrapCheck.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityImplicitBehaviorBootstrapCheck.java index c6396f886b4bc..2d535100d468d 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityImplicitBehaviorBootstrapCheck.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityImplicitBehaviorBootstrapCheck.java @@ -34,7 +34,8 @@ public BootstrapCheckResult check(BootstrapContext context) { } if (licenseService instanceof ClusterStateLicenseService clusterStateLicenseService) { final License license = clusterStateLicenseService.getLicense(context.metadata()); - final Version lastKnownVersion = nodeMetadata.previousNodeVersion(); + // TODO[wrb]: Add an "isCurrentMajor" method to BuildVersion? + final Version lastKnownVersion = nodeMetadata.previousNodeVersion().toVersion(); // pre v7.2.0 nodes have Version.EMPTY and its id is 0, so Version#before handles this successfully if (lastKnownVersion.before(Version.V_8_0_0) && XPackSettings.SECURITY_ENABLED.exists(context.settings()) == false diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityImplicitBehaviorBootstrapCheckTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityImplicitBehaviorBootstrapCheckTests.java index 413358f784dea..6777c38b809e0 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityImplicitBehaviorBootstrapCheckTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityImplicitBehaviorBootstrapCheckTests.java @@ -13,6 +13,7 @@ import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.env.BuildVersion; import org.elasticsearch.env.NodeMetadata; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.license.ClusterStateLicenseService; @@ -32,9 +33,11 @@ public class SecurityImplicitBehaviorBootstrapCheckTests extends AbstractBootstrapCheckTestCase { public void testFailureUpgradeFrom7xWithImplicitSecuritySettings() throws Exception { - final Version previousVersion = randomValueOtherThan( - Version.V_8_0_0, - () -> VersionUtils.randomVersionBetween(random(), Version.CURRENT.minimumCompatibilityVersion(), Version.V_8_0_0) + final BuildVersion previousVersion = toBuildVersion( + randomValueOtherThan( + Version.V_8_0_0, + () -> VersionUtils.randomVersionBetween(random(), Version.CURRENT.minimumCompatibilityVersion(), Version.V_8_0_0) + ) ); NodeMetadata nodeMetadata = new NodeMetadata(randomAlphaOfLength(10), previousVersion, IndexVersion.current()); nodeMetadata = nodeMetadata.upgradeToCurrentVersion(); @@ -67,9 +70,11 @@ public void testFailureUpgradeFrom7xWithImplicitSecuritySettings() throws Except } public void testUpgradeFrom7xWithImplicitSecuritySettingsOnGoldPlus() throws Exception { - final Version previousVersion = randomValueOtherThan( - Version.V_8_0_0, - () -> VersionUtils.randomVersionBetween(random(), Version.CURRENT.minimumCompatibilityVersion(), Version.V_8_0_0) + final BuildVersion previousVersion = toBuildVersion( + randomValueOtherThan( + Version.V_8_0_0, + () -> VersionUtils.randomVersionBetween(random(), Version.CURRENT.minimumCompatibilityVersion(), Version.V_8_0_0) + ) ); NodeMetadata nodeMetadata = new NodeMetadata(randomAlphaOfLength(10), previousVersion, IndexVersion.current()); nodeMetadata = nodeMetadata.upgradeToCurrentVersion(); @@ -88,9 +93,11 @@ public void testUpgradeFrom7xWithImplicitSecuritySettingsOnGoldPlus() throws Exc } public void testUpgradeFrom7xWithExplicitSecuritySettings() throws Exception { - final Version previousVersion = randomValueOtherThan( - Version.V_8_0_0, - () -> VersionUtils.randomVersionBetween(random(), Version.CURRENT.minimumCompatibilityVersion(), Version.V_8_0_0) + final BuildVersion previousVersion = toBuildVersion( + randomValueOtherThan( + Version.V_8_0_0, + () -> VersionUtils.randomVersionBetween(random(), Version.CURRENT.minimumCompatibilityVersion(), Version.V_8_0_0) + ) ); NodeMetadata nodeMetadata = new NodeMetadata(randomAlphaOfLength(10), previousVersion, IndexVersion.current()); nodeMetadata = nodeMetadata.upgradeToCurrentVersion(); @@ -105,7 +112,7 @@ public void testUpgradeFrom7xWithExplicitSecuritySettings() throws Exception { } public void testUpgradeFrom8xWithImplicitSecuritySettings() throws Exception { - final Version previousVersion = VersionUtils.randomVersionBetween(random(), Version.V_8_0_0, null); + final BuildVersion previousVersion = toBuildVersion(VersionUtils.randomVersionBetween(random(), Version.V_8_0_0, null)); NodeMetadata nodeMetadata = new NodeMetadata(randomAlphaOfLength(10), previousVersion, IndexVersion.current()); nodeMetadata = nodeMetadata.upgradeToCurrentVersion(); ClusterStateLicenseService licenseService = mock(ClusterStateLicenseService.class); @@ -119,7 +126,7 @@ public void testUpgradeFrom8xWithImplicitSecuritySettings() throws Exception { } public void testUpgradeFrom8xWithExplicitSecuritySettings() throws Exception { - final Version previousVersion = VersionUtils.randomVersionBetween(random(), Version.V_8_0_0, null); + final BuildVersion previousVersion = toBuildVersion(VersionUtils.randomVersionBetween(random(), Version.V_8_0_0, null)); NodeMetadata nodeMetadata = new NodeMetadata(randomAlphaOfLength(10), previousVersion, IndexVersion.current()); nodeMetadata = nodeMetadata.upgradeToCurrentVersion(); ClusterStateLicenseService licenseService = mock(ClusterStateLicenseService.class); @@ -136,4 +143,8 @@ private Metadata createLicensesMetadata(TrialLicenseVersion era, String licenseM License license = TestUtils.generateSignedLicense(licenseMode, TimeValue.timeValueHours(2)); return Metadata.builder().putCustom(LicensesMetadata.TYPE, new LicensesMetadata(license, era)).build(); } + + private static BuildVersion toBuildVersion(Version version) { + return BuildVersion.fromVersionId(version.id()); + } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java index 4aefc436d82f5..4a5412ad9c5bb 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java @@ -11,7 +11,6 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchSecurityException; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionModule; import org.elasticsearch.action.ActionResponse; @@ -33,6 +32,7 @@ import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.env.BuildVersion; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeMetadata; import org.elasticsearch.env.TestEnvironment; @@ -202,7 +202,7 @@ protected SSLService getSslService() { private Collection createComponentsUtil(Settings settings) throws Exception { Environment env = TestEnvironment.newEnvironment(settings); - NodeMetadata nodeMetadata = new NodeMetadata(randomAlphaOfLength(8), Version.CURRENT, IndexVersion.current()); + NodeMetadata nodeMetadata = new NodeMetadata(randomAlphaOfLength(8), BuildVersion.current(), IndexVersion.current()); ThreadPool threadPool = mock(ThreadPool.class); ClusterService clusterService = mock(ClusterService.class); settings = Security.additionalSettings(settings, true); From 5db2b064a611de7c96b73775df82009642982a3c Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Fri, 8 Mar 2024 18:10:12 -0800 Subject: [PATCH 090/248] Reduce internal states of Block (#106145) Currently, all Blocks inherit from AbstractBlock, resulting in every Block having fields such as positionCount, firstValueIndexes, and nullsMask, which are specific to array blocks.. This change moves these fields to AbstractArrayBlock and removes AbstractBlock. --- .../compute/data/BooleanArrayBlock.java | 19 +-- .../compute/data/BooleanBigArrayBlock.java | 19 +-- .../compute/data/BooleanVectorBlock.java | 3 +- .../compute/data/BytesRefArrayBlock.java | 19 +-- .../compute/data/BytesRefVectorBlock.java | 3 +- .../compute/data/DoubleArrayBlock.java | 19 +-- .../compute/data/DoubleBigArrayBlock.java | 19 +-- .../compute/data/DoubleVectorBlock.java | 3 +- .../compute/data/IntArrayBlock.java | 19 +-- .../compute/data/IntBigArrayBlock.java | 19 +-- .../compute/data/IntVectorBlock.java | 3 +- .../compute/data/LongArrayBlock.java | 19 +-- .../compute/data/LongBigArrayBlock.java | 19 +-- .../compute/data/LongVectorBlock.java | 3 +- .../compute/data/AbstractArrayBlock.java | 97 ++++++++++--- .../compute/data/AbstractBlock.java | 127 ------------------ .../data/AbstractNonThreadSafeRefCounted.java | 4 + .../compute/data/AbstractVector.java | 5 - .../compute/data/AbstractVectorBlock.java | 23 ++-- .../compute/data/ConstantNullBlock.java | 43 +++++- .../elasticsearch/compute/data/DocBlock.java | 11 +- .../compute/data/X-ArrayBlock.java.st | 19 +-- .../compute/data/X-BigArrayBlock.java.st | 19 +-- .../compute/data/X-VectorBlock.java.st | 3 +- 24 files changed, 261 insertions(+), 276 deletions(-) delete mode 100644 x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractBlock.java diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayBlock.java index 666f1ad926eeb..45b5c09fdc01e 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayBlock.java @@ -35,20 +35,18 @@ final class BooleanArrayBlock extends AbstractArrayBlock implements BooleanBlock positionCount, firstValueIndexes, nulls, - mvOrdering, - blockFactory + mvOrdering ); } private BooleanArrayBlock( - BooleanArrayVector vector, + BooleanArrayVector vector, // stylecheck int positionCount, int[] firstValueIndexes, BitSet nulls, - MvOrdering mvOrdering, - BlockFactory blockFactory + MvOrdering mvOrdering ) { - super(positionCount, firstValueIndexes, nulls, mvOrdering, blockFactory); + super(positionCount, firstValueIndexes, nulls, mvOrdering); this.vector = vector; assert firstValueIndexes == null ? vector.getPositionCount() == getPositionCount() @@ -115,8 +113,7 @@ public BooleanBlock expand() { expandedPositionCount, null, shiftNullsToExpandedPositions(), - MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING, - blockFactory() + MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING ); blockFactory().adjustBreaker(expanded.ramBytesUsedOnlyBlock() - bitSetRamUsedEstimate); // We need to incRef after adjusting any breakers, otherwise we might leak the vector if the breaker trips. @@ -160,10 +157,14 @@ public String toString() { @Override public void allowPassingToDifferentDriver() { - super.allowPassingToDifferentDriver(); vector.allowPassingToDifferentDriver(); } + @Override + public BlockFactory blockFactory() { + return vector.blockFactory(); + } + @Override public void closeInternal() { blockFactory().adjustBreaker(-ramBytesUsedOnlyBlock()); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBigArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBigArrayBlock.java index a19ed24302b65..890e6b6a59acd 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBigArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBigArrayBlock.java @@ -36,20 +36,18 @@ public BooleanBigArrayBlock( positionCount, firstValueIndexes, nulls, - mvOrdering, - blockFactory + mvOrdering ); } private BooleanBigArrayBlock( - BooleanBigArrayVector vector, + BooleanBigArrayVector vector, // stylecheck int positionCount, int[] firstValueIndexes, BitSet nulls, - MvOrdering mvOrdering, - BlockFactory blockFactory + MvOrdering mvOrdering ) { - super(positionCount, firstValueIndexes, nulls, mvOrdering, blockFactory); + super(positionCount, firstValueIndexes, nulls, mvOrdering); this.vector = vector; assert firstValueIndexes == null ? vector.getPositionCount() == getPositionCount() @@ -116,8 +114,7 @@ public BooleanBlock expand() { expandedPositionCount, null, shiftNullsToExpandedPositions(), - MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING, - blockFactory() + MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING ); blockFactory().adjustBreaker(expanded.ramBytesUsedOnlyBlock() - bitSetRamUsedEstimate); // We need to incRef after adjusting any breakers, otherwise we might leak the vector if the breaker trips. @@ -161,10 +158,14 @@ public String toString() { @Override public void allowPassingToDifferentDriver() { - super.allowPassingToDifferentDriver(); vector.allowPassingToDifferentDriver(); } + @Override + public BlockFactory blockFactory() { + return vector.blockFactory(); + } + @Override public void closeInternal() { blockFactory().adjustBreaker(-ramBytesUsedOnlyBlock()); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVectorBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVectorBlock.java index d707e3cf901c1..a42e9b148064d 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVectorBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVectorBlock.java @@ -21,7 +21,6 @@ public final class BooleanVectorBlock extends AbstractVectorBlock implements Boo * @param vector considered owned by the current block; must not be used in any other {@code Block} */ BooleanVectorBlock(BooleanVector vector) { - super(vector.getPositionCount(), vector.blockFactory()); this.vector = vector; } @@ -36,7 +35,7 @@ public boolean getBoolean(int valueIndex) { } @Override - public int getTotalValueCount() { + public int getPositionCount() { return vector.getPositionCount(); } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayBlock.java index 69e5499eaba46..d71afdbdee2df 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayBlock.java @@ -38,20 +38,18 @@ final class BytesRefArrayBlock extends AbstractArrayBlock implements BytesRefBlo positionCount, firstValueIndexes, nulls, - mvOrdering, - blockFactory + mvOrdering ); } private BytesRefArrayBlock( - BytesRefArrayVector vector, + BytesRefArrayVector vector, // stylecheck int positionCount, int[] firstValueIndexes, BitSet nulls, - MvOrdering mvOrdering, - BlockFactory blockFactory + MvOrdering mvOrdering ) { - super(positionCount, firstValueIndexes, nulls, mvOrdering, blockFactory); + super(positionCount, firstValueIndexes, nulls, mvOrdering); this.vector = vector; assert firstValueIndexes == null ? vector.getPositionCount() == getPositionCount() @@ -119,8 +117,7 @@ public BytesRefBlock expand() { expandedPositionCount, null, shiftNullsToExpandedPositions(), - MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING, - blockFactory() + MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING ); blockFactory().adjustBreaker(expanded.ramBytesUsedOnlyBlock() - bitSetRamUsedEstimate); // We need to incRef after adjusting any breakers, otherwise we might leak the vector if the breaker trips. @@ -164,10 +161,14 @@ public String toString() { @Override public void allowPassingToDifferentDriver() { - super.allowPassingToDifferentDriver(); vector.allowPassingToDifferentDriver(); } + @Override + public BlockFactory blockFactory() { + return vector.blockFactory(); + } + @Override public void closeInternal() { blockFactory().adjustBreaker(-ramBytesUsedOnlyBlock()); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVectorBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVectorBlock.java index 92f93d5d23a49..1a077f38385e3 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVectorBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVectorBlock.java @@ -22,7 +22,6 @@ public final class BytesRefVectorBlock extends AbstractVectorBlock implements By * @param vector considered owned by the current block; must not be used in any other {@code Block} */ BytesRefVectorBlock(BytesRefVector vector) { - super(vector.getPositionCount(), vector.blockFactory()); this.vector = vector; } @@ -37,7 +36,7 @@ public BytesRef getBytesRef(int valueIndex, BytesRef dest) { } @Override - public int getTotalValueCount() { + public int getPositionCount() { return vector.getPositionCount(); } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayBlock.java index b5f5c69e0508a..e9ddabb878b8d 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayBlock.java @@ -35,20 +35,18 @@ final class DoubleArrayBlock extends AbstractArrayBlock implements DoubleBlock { positionCount, firstValueIndexes, nulls, - mvOrdering, - blockFactory + mvOrdering ); } private DoubleArrayBlock( - DoubleArrayVector vector, + DoubleArrayVector vector, // stylecheck int positionCount, int[] firstValueIndexes, BitSet nulls, - MvOrdering mvOrdering, - BlockFactory blockFactory + MvOrdering mvOrdering ) { - super(positionCount, firstValueIndexes, nulls, mvOrdering, blockFactory); + super(positionCount, firstValueIndexes, nulls, mvOrdering); this.vector = vector; assert firstValueIndexes == null ? vector.getPositionCount() == getPositionCount() @@ -115,8 +113,7 @@ public DoubleBlock expand() { expandedPositionCount, null, shiftNullsToExpandedPositions(), - MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING, - blockFactory() + MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING ); blockFactory().adjustBreaker(expanded.ramBytesUsedOnlyBlock() - bitSetRamUsedEstimate); // We need to incRef after adjusting any breakers, otherwise we might leak the vector if the breaker trips. @@ -160,10 +157,14 @@ public String toString() { @Override public void allowPassingToDifferentDriver() { - super.allowPassingToDifferentDriver(); vector.allowPassingToDifferentDriver(); } + @Override + public BlockFactory blockFactory() { + return vector.blockFactory(); + } + @Override public void closeInternal() { blockFactory().adjustBreaker(-ramBytesUsedOnlyBlock()); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBigArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBigArrayBlock.java index 39f959edf5ee3..702499513a0c3 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBigArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBigArrayBlock.java @@ -36,20 +36,18 @@ public DoubleBigArrayBlock( positionCount, firstValueIndexes, nulls, - mvOrdering, - blockFactory + mvOrdering ); } private DoubleBigArrayBlock( - DoubleBigArrayVector vector, + DoubleBigArrayVector vector, // stylecheck int positionCount, int[] firstValueIndexes, BitSet nulls, - MvOrdering mvOrdering, - BlockFactory blockFactory + MvOrdering mvOrdering ) { - super(positionCount, firstValueIndexes, nulls, mvOrdering, blockFactory); + super(positionCount, firstValueIndexes, nulls, mvOrdering); this.vector = vector; assert firstValueIndexes == null ? vector.getPositionCount() == getPositionCount() @@ -116,8 +114,7 @@ public DoubleBlock expand() { expandedPositionCount, null, shiftNullsToExpandedPositions(), - MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING, - blockFactory() + MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING ); blockFactory().adjustBreaker(expanded.ramBytesUsedOnlyBlock() - bitSetRamUsedEstimate); // We need to incRef after adjusting any breakers, otherwise we might leak the vector if the breaker trips. @@ -161,10 +158,14 @@ public String toString() { @Override public void allowPassingToDifferentDriver() { - super.allowPassingToDifferentDriver(); vector.allowPassingToDifferentDriver(); } + @Override + public BlockFactory blockFactory() { + return vector.blockFactory(); + } + @Override public void closeInternal() { blockFactory().adjustBreaker(-ramBytesUsedOnlyBlock()); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVectorBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVectorBlock.java index 2aa8e07c25604..647849a968df9 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVectorBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVectorBlock.java @@ -21,7 +21,6 @@ public final class DoubleVectorBlock extends AbstractVectorBlock implements Doub * @param vector considered owned by the current block; must not be used in any other {@code Block} */ DoubleVectorBlock(DoubleVector vector) { - super(vector.getPositionCount(), vector.blockFactory()); this.vector = vector; } @@ -36,7 +35,7 @@ public double getDouble(int valueIndex) { } @Override - public int getTotalValueCount() { + public int getPositionCount() { return vector.getPositionCount(); } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayBlock.java index 2afefbff16117..1470a85f615d1 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayBlock.java @@ -35,20 +35,18 @@ final class IntArrayBlock extends AbstractArrayBlock implements IntBlock { positionCount, firstValueIndexes, nulls, - mvOrdering, - blockFactory + mvOrdering ); } private IntArrayBlock( - IntArrayVector vector, + IntArrayVector vector, // stylecheck int positionCount, int[] firstValueIndexes, BitSet nulls, - MvOrdering mvOrdering, - BlockFactory blockFactory + MvOrdering mvOrdering ) { - super(positionCount, firstValueIndexes, nulls, mvOrdering, blockFactory); + super(positionCount, firstValueIndexes, nulls, mvOrdering); this.vector = vector; assert firstValueIndexes == null ? vector.getPositionCount() == getPositionCount() @@ -115,8 +113,7 @@ public IntBlock expand() { expandedPositionCount, null, shiftNullsToExpandedPositions(), - MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING, - blockFactory() + MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING ); blockFactory().adjustBreaker(expanded.ramBytesUsedOnlyBlock() - bitSetRamUsedEstimate); // We need to incRef after adjusting any breakers, otherwise we might leak the vector if the breaker trips. @@ -160,10 +157,14 @@ public String toString() { @Override public void allowPassingToDifferentDriver() { - super.allowPassingToDifferentDriver(); vector.allowPassingToDifferentDriver(); } + @Override + public BlockFactory blockFactory() { + return vector.blockFactory(); + } + @Override public void closeInternal() { blockFactory().adjustBreaker(-ramBytesUsedOnlyBlock()); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBigArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBigArrayBlock.java index dc60ce43c04cc..5e29dace7449c 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBigArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBigArrayBlock.java @@ -36,20 +36,18 @@ public IntBigArrayBlock( positionCount, firstValueIndexes, nulls, - mvOrdering, - blockFactory + mvOrdering ); } private IntBigArrayBlock( - IntBigArrayVector vector, + IntBigArrayVector vector, // stylecheck int positionCount, int[] firstValueIndexes, BitSet nulls, - MvOrdering mvOrdering, - BlockFactory blockFactory + MvOrdering mvOrdering ) { - super(positionCount, firstValueIndexes, nulls, mvOrdering, blockFactory); + super(positionCount, firstValueIndexes, nulls, mvOrdering); this.vector = vector; assert firstValueIndexes == null ? vector.getPositionCount() == getPositionCount() @@ -116,8 +114,7 @@ public IntBlock expand() { expandedPositionCount, null, shiftNullsToExpandedPositions(), - MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING, - blockFactory() + MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING ); blockFactory().adjustBreaker(expanded.ramBytesUsedOnlyBlock() - bitSetRamUsedEstimate); // We need to incRef after adjusting any breakers, otherwise we might leak the vector if the breaker trips. @@ -161,10 +158,14 @@ public String toString() { @Override public void allowPassingToDifferentDriver() { - super.allowPassingToDifferentDriver(); vector.allowPassingToDifferentDriver(); } + @Override + public BlockFactory blockFactory() { + return vector.blockFactory(); + } + @Override public void closeInternal() { blockFactory().adjustBreaker(-ramBytesUsedOnlyBlock()); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVectorBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVectorBlock.java index 97a4a48533e3a..4f9bb236dfa80 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVectorBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVectorBlock.java @@ -21,7 +21,6 @@ public final class IntVectorBlock extends AbstractVectorBlock implements IntBloc * @param vector considered owned by the current block; must not be used in any other {@code Block} */ IntVectorBlock(IntVector vector) { - super(vector.getPositionCount(), vector.blockFactory()); this.vector = vector; } @@ -36,7 +35,7 @@ public int getInt(int valueIndex) { } @Override - public int getTotalValueCount() { + public int getPositionCount() { return vector.getPositionCount(); } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayBlock.java index 7491d6519fc57..2406196ba8bdd 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayBlock.java @@ -35,20 +35,18 @@ final class LongArrayBlock extends AbstractArrayBlock implements LongBlock { positionCount, firstValueIndexes, nulls, - mvOrdering, - blockFactory + mvOrdering ); } private LongArrayBlock( - LongArrayVector vector, + LongArrayVector vector, // stylecheck int positionCount, int[] firstValueIndexes, BitSet nulls, - MvOrdering mvOrdering, - BlockFactory blockFactory + MvOrdering mvOrdering ) { - super(positionCount, firstValueIndexes, nulls, mvOrdering, blockFactory); + super(positionCount, firstValueIndexes, nulls, mvOrdering); this.vector = vector; assert firstValueIndexes == null ? vector.getPositionCount() == getPositionCount() @@ -115,8 +113,7 @@ public LongBlock expand() { expandedPositionCount, null, shiftNullsToExpandedPositions(), - MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING, - blockFactory() + MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING ); blockFactory().adjustBreaker(expanded.ramBytesUsedOnlyBlock() - bitSetRamUsedEstimate); // We need to incRef after adjusting any breakers, otherwise we might leak the vector if the breaker trips. @@ -160,10 +157,14 @@ public String toString() { @Override public void allowPassingToDifferentDriver() { - super.allowPassingToDifferentDriver(); vector.allowPassingToDifferentDriver(); } + @Override + public BlockFactory blockFactory() { + return vector.blockFactory(); + } + @Override public void closeInternal() { blockFactory().adjustBreaker(-ramBytesUsedOnlyBlock()); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBigArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBigArrayBlock.java index 3ff9a12991d43..f4b1f16566d24 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBigArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBigArrayBlock.java @@ -36,20 +36,18 @@ public LongBigArrayBlock( positionCount, firstValueIndexes, nulls, - mvOrdering, - blockFactory + mvOrdering ); } private LongBigArrayBlock( - LongBigArrayVector vector, + LongBigArrayVector vector, // stylecheck int positionCount, int[] firstValueIndexes, BitSet nulls, - MvOrdering mvOrdering, - BlockFactory blockFactory + MvOrdering mvOrdering ) { - super(positionCount, firstValueIndexes, nulls, mvOrdering, blockFactory); + super(positionCount, firstValueIndexes, nulls, mvOrdering); this.vector = vector; assert firstValueIndexes == null ? vector.getPositionCount() == getPositionCount() @@ -116,8 +114,7 @@ public LongBlock expand() { expandedPositionCount, null, shiftNullsToExpandedPositions(), - MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING, - blockFactory() + MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING ); blockFactory().adjustBreaker(expanded.ramBytesUsedOnlyBlock() - bitSetRamUsedEstimate); // We need to incRef after adjusting any breakers, otherwise we might leak the vector if the breaker trips. @@ -161,10 +158,14 @@ public String toString() { @Override public void allowPassingToDifferentDriver() { - super.allowPassingToDifferentDriver(); vector.allowPassingToDifferentDriver(); } + @Override + public BlockFactory blockFactory() { + return vector.blockFactory(); + } + @Override public void closeInternal() { blockFactory().adjustBreaker(-ramBytesUsedOnlyBlock()); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVectorBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVectorBlock.java index 1f4565fec5a8d..0d7d1f691837f 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVectorBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVectorBlock.java @@ -21,7 +21,6 @@ public final class LongVectorBlock extends AbstractVectorBlock implements LongBl * @param vector considered owned by the current block; must not be used in any other {@code Block} */ LongVectorBlock(LongVector vector) { - super(vector.getPositionCount(), vector.blockFactory()); this.vector = vector; } @@ -36,7 +35,7 @@ public long getLong(int valueIndex) { } @Override - public int getTotalValueCount() { + public int getPositionCount() { return vector.getPositionCount(); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractArrayBlock.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractArrayBlock.java index d6046f0bda085..46e1ee1fc8983 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractArrayBlock.java @@ -11,34 +11,30 @@ import java.util.BitSet; -abstract class AbstractArrayBlock extends AbstractBlock { - +abstract class AbstractArrayBlock extends AbstractNonThreadSafeRefCounted implements Block { private final MvOrdering mvOrdering; + protected final int positionCount; - /** - * @param positionCount the number of values in this block - */ - protected AbstractArrayBlock(int positionCount, MvOrdering mvOrdering, BlockFactory blockFactory) { - super(positionCount, blockFactory); - this.mvOrdering = mvOrdering; - } + @Nullable + protected final int[] firstValueIndexes; + + @Nullable + protected final BitSet nullsMask; /** * @param positionCount the number of values in this block */ - protected AbstractArrayBlock( - int positionCount, - @Nullable int[] firstValueIndexes, - @Nullable BitSet nullsMask, - MvOrdering mvOrdering, - BlockFactory blockFactory - ) { - super(positionCount, firstValueIndexes, nullsMask, blockFactory); + protected AbstractArrayBlock(int positionCount, @Nullable int[] firstValueIndexes, @Nullable BitSet nullsMask, MvOrdering mvOrdering) { + this.positionCount = positionCount; + this.firstValueIndexes = firstValueIndexes; this.mvOrdering = mvOrdering; + this.nullsMask = nullsMask == null || nullsMask.isEmpty() ? null : nullsMask; + assert nullsMask != null || firstValueIndexes != null : "Create VectorBlock instead"; + assert assertInvariants(); } @Override - public boolean mayHaveMultivaluedFields() { + public final boolean mayHaveMultivaluedFields() { /* * This could return a false positive if all the indices are one away from * each other. But we will try to avoid that. @@ -51,7 +47,7 @@ public final MvOrdering mvOrdering() { return mvOrdering; } - protected BitSet shiftNullsToExpandedPositions() { + protected final BitSet shiftNullsToExpandedPositions() { BitSet expanded = new BitSet(nullsMask.size()); int next = -1; while ((next = nullsMask.nextSetBit(next + 1)) != -1) { @@ -59,4 +55,67 @@ protected BitSet shiftNullsToExpandedPositions() { } return expanded; } + + private boolean assertInvariants() { + if (firstValueIndexes != null) { + assert firstValueIndexes.length == getPositionCount() + 1; + for (int i = 0; i < getPositionCount(); i++) { + assert (firstValueIndexes[i + 1] - firstValueIndexes[i]) >= 0; + } + } + if (nullsMask != null) { + assert nullsMask.nextSetBit(getPositionCount() + 1) == -1; + } + if (firstValueIndexes != null && nullsMask != null) { + for (int i = 0; i < getPositionCount(); i++) { + // Either we have multi-values or a null but never both. + assert ((nullsMask.get(i) == false) || (firstValueIndexes[i + 1] - firstValueIndexes[i]) == 1); + } + } + return true; + } + + @Override + public final int getTotalValueCount() { + if (firstValueIndexes == null) { + return positionCount - nullValuesCount(); + } + return firstValueIndexes[positionCount] - nullValuesCount(); + } + + @Override + public final int getPositionCount() { + return positionCount; + } + + /** Gets the index of the first value for the given position. */ + public final int getFirstValueIndex(int position) { + return firstValueIndexes == null ? position : firstValueIndexes[position]; + } + + /** Gets the number of values for the given position, possibly 0. */ + @Override + public final int getValueCount(int position) { + return isNull(position) ? 0 : firstValueIndexes == null ? 1 : firstValueIndexes[position + 1] - firstValueIndexes[position]; + } + + @Override + public final boolean isNull(int position) { + return mayHaveNulls() && nullsMask.get(position); + } + + @Override + public final boolean mayHaveNulls() { + return nullsMask != null; + } + + @Override + public final int nullValuesCount() { + return mayHaveNulls() ? nullsMask.cardinality() : 0; + } + + @Override + public final boolean areAllValuesNull() { + return nullValuesCount() == getPositionCount(); + } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractBlock.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractBlock.java deleted file mode 100644 index 0c5207133f71d..0000000000000 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractBlock.java +++ /dev/null @@ -1,127 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.compute.data; - -import org.elasticsearch.core.Nullable; - -import java.util.BitSet; - -abstract class AbstractBlock extends AbstractNonThreadSafeRefCounted implements Block { - private final int positionCount; - - @Nullable - protected final int[] firstValueIndexes; - - @Nullable - protected final BitSet nullsMask; - - private BlockFactory blockFactory; - - /** - * @param positionCount the number of values in this block - */ - protected AbstractBlock(int positionCount, BlockFactory blockFactory) { - assert positionCount >= 0; - this.positionCount = positionCount; - this.blockFactory = blockFactory; - this.firstValueIndexes = null; - this.nullsMask = null; - assert assertInvariants(); - } - - /** - * @param positionCount the number of values in this block - */ - protected AbstractBlock(int positionCount, @Nullable int[] firstValueIndexes, @Nullable BitSet nullsMask, BlockFactory blockFactory) { - assert positionCount >= 0; - this.positionCount = positionCount; - this.blockFactory = blockFactory; - this.firstValueIndexes = firstValueIndexes; - this.nullsMask = nullsMask == null || nullsMask.isEmpty() ? null : nullsMask; - assert nullsMask != null || firstValueIndexes != null : "Create VectorBlock instead"; - assert assertInvariants(); - } - - private boolean assertInvariants() { - if (firstValueIndexes != null) { - assert firstValueIndexes.length == getPositionCount() + 1; - for (int i = 0; i < getPositionCount(); i++) { - assert (firstValueIndexes[i + 1] - firstValueIndexes[i]) >= 0; - } - } - if (nullsMask != null) { - assert nullsMask.nextSetBit(getPositionCount() + 1) == -1; - } - if (firstValueIndexes != null && nullsMask != null) { - for (int i = 0; i < getPositionCount(); i++) { - // Either we have multi-values or a null but never both. - assert ((nullsMask.get(i) == false) || (firstValueIndexes[i + 1] - firstValueIndexes[i]) == 1); - } - } - return true; - } - - @Override - public int getTotalValueCount() { - if (firstValueIndexes == null) { - return positionCount - nullValuesCount(); - } - return firstValueIndexes[positionCount] - nullValuesCount(); - } - - @Override - public final int getPositionCount() { - return positionCount; - } - - /** Gets the index of the first value for the given position. */ - public int getFirstValueIndex(int position) { - return firstValueIndexes == null ? position : firstValueIndexes[position]; - } - - /** Gets the number of values for the given position, possibly 0. */ - @Override - public int getValueCount(int position) { - return isNull(position) ? 0 : firstValueIndexes == null ? 1 : firstValueIndexes[position + 1] - firstValueIndexes[position]; - } - - @Override - public boolean isNull(int position) { - return mayHaveNulls() && nullsMask.get(position); - } - - @Override - public boolean mayHaveNulls() { - return nullsMask != null; - } - - @Override - public int nullValuesCount() { - return mayHaveNulls() ? nullsMask.cardinality() : 0; - } - - @Override - public boolean areAllValuesNull() { - return nullValuesCount() == getPositionCount(); - } - - @Override - public BlockFactory blockFactory() { - return blockFactory; - } - - @Override - public void allowPassingToDifferentDriver() { - blockFactory = blockFactory.parent(); - } - - @Override - public final boolean isReleased() { - return hasReferences() == false; - } -} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractNonThreadSafeRefCounted.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractNonThreadSafeRefCounted.java index 2dfd8c3eca5ac..ea023c6b46d9e 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractNonThreadSafeRefCounted.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractNonThreadSafeRefCounted.java @@ -59,6 +59,10 @@ public final void close() { decRef(); } + public final boolean isReleased() { + return hasReferences() == false; + } + /** * This is called when the number of references reaches zero. * This is where resources should be released (adjusting circuit breakers if needed). diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractVector.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractVector.java index 1eb2c09f78511..1e1f8bbf2f8df 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractVector.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractVector.java @@ -43,9 +43,4 @@ public void allowPassingToDifferentDriver() { protected void closeInternal() { blockFactory.adjustBreaker(-ramBytesUsed()); } - - @Override - public final boolean isReleased() { - return hasReferences() == false; - } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractVectorBlock.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractVectorBlock.java index d33d1a1afda41..452bdad1ab192 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractVectorBlock.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractVectorBlock.java @@ -10,43 +10,44 @@ /** * A Block view of a Vector. */ -abstract class AbstractVectorBlock extends AbstractBlock { +abstract class AbstractVectorBlock extends AbstractNonThreadSafeRefCounted implements Block { - AbstractVectorBlock(int positionCount, BlockFactory blockFactory) { - super(positionCount, blockFactory); + @Override + public final int getFirstValueIndex(int position) { + return position; } @Override - public int getFirstValueIndex(int position) { - return position; + public final int getTotalValueCount() { + return getPositionCount(); } - public int getValueCount(int position) { + public final int getValueCount(int position) { return 1; } @Override - public boolean isNull(int position) { + public final boolean isNull(int position) { return false; } @Override - public int nullValuesCount() { + public final int nullValuesCount() { return 0; } @Override - public boolean mayHaveNulls() { + public final boolean mayHaveNulls() { return false; } @Override - public boolean areAllValuesNull() { + public final boolean areAllValuesNull() { return false; } @Override - public boolean mayHaveMultivaluedFields() { + public final boolean mayHaveMultivaluedFields() { return false; } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/ConstantNullBlock.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/ConstantNullBlock.java index b093e3edd5884..eb86d01fbdf3c 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/ConstantNullBlock.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/ConstantNullBlock.java @@ -18,12 +18,21 @@ /** * Block implementation representing a constant null value. */ -final class ConstantNullBlock extends AbstractBlock implements BooleanBlock, IntBlock, LongBlock, DoubleBlock, BytesRefBlock { +final class ConstantNullBlock extends AbstractNonThreadSafeRefCounted + implements + BooleanBlock, + IntBlock, + LongBlock, + DoubleBlock, + BytesRefBlock { private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(ConstantNullBlock.class); + private final int positionCount; + private BlockFactory blockFactory; ConstantNullBlock(int positionCount, BlockFactory blockFactory) { - super(positionCount, blockFactory); + this.positionCount = positionCount; + this.blockFactory = blockFactory; } @Override @@ -222,4 +231,34 @@ public long getLong(int valueIndex) { assert false : "null block"; throw new UnsupportedOperationException("null block"); } + + @Override + public int getTotalValueCount() { + return 0; + } + + @Override + public int getPositionCount() { + return positionCount; + } + + @Override + public int getFirstValueIndex(int position) { + return 0; + } + + @Override + public int getValueCount(int position) { + return 0; + } + + @Override + public BlockFactory blockFactory() { + return blockFactory; + } + + @Override + public void allowPassingToDifferentDriver() { + blockFactory = blockFactory.parent(); + } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/DocBlock.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/DocBlock.java index 8c75c8216c59e..a58b8c34b17d5 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/DocBlock.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/DocBlock.java @@ -20,7 +20,6 @@ public class DocBlock extends AbstractVectorBlock implements Block { private final DocVector vector; DocBlock(DocVector vector) { - super(vector.getPositionCount(), vector.blockFactory()); this.vector = vector; } @@ -190,4 +189,14 @@ public void close() { public void allowPassingToDifferentDriver() { vector.allowPassingToDifferentDriver(); } + + @Override + public int getPositionCount() { + return vector.getPositionCount(); + } + + @Override + public BlockFactory blockFactory() { + return vector.blockFactory(); + } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayBlock.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayBlock.java.st index 20395ff27b1b4..019379821dbaf 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayBlock.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayBlock.java.st @@ -46,20 +46,18 @@ final class $Type$ArrayBlock extends AbstractArrayBlock implements $Type$Block { positionCount, firstValueIndexes, nulls, - mvOrdering, - blockFactory + mvOrdering ); } private $Type$ArrayBlock( - $Type$ArrayVector vector, + $Type$ArrayVector vector, // stylecheck int positionCount, int[] firstValueIndexes, BitSet nulls, - MvOrdering mvOrdering, - BlockFactory blockFactory + MvOrdering mvOrdering ) { - super(positionCount, firstValueIndexes, nulls, mvOrdering, blockFactory); + super(positionCount, firstValueIndexes, nulls, mvOrdering); this.vector = vector; assert firstValueIndexes == null ? vector.getPositionCount() == getPositionCount() @@ -134,8 +132,7 @@ $endif$ expandedPositionCount, null, shiftNullsToExpandedPositions(), - MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING, - blockFactory() + MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING ); blockFactory().adjustBreaker(expanded.ramBytesUsedOnlyBlock() - bitSetRamUsedEstimate); // We need to incRef after adjusting any breakers, otherwise we might leak the vector if the breaker trips. @@ -179,10 +176,14 @@ $endif$ @Override public void allowPassingToDifferentDriver() { - super.allowPassingToDifferentDriver(); vector.allowPassingToDifferentDriver(); } + @Override + public BlockFactory blockFactory() { + return vector.blockFactory(); + } + @Override public void closeInternal() { blockFactory().adjustBreaker(-ramBytesUsedOnlyBlock()); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BigArrayBlock.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BigArrayBlock.java.st index d65c54b5e2b24..14ec5382f282c 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BigArrayBlock.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BigArrayBlock.java.st @@ -36,20 +36,18 @@ public final class $Type$BigArrayBlock extends AbstractArrayBlock implements $Ty positionCount, firstValueIndexes, nulls, - mvOrdering, - blockFactory + mvOrdering ); } private $Type$BigArrayBlock( - $Type$BigArrayVector vector, + $Type$BigArrayVector vector, // stylecheck int positionCount, int[] firstValueIndexes, BitSet nulls, - MvOrdering mvOrdering, - BlockFactory blockFactory + MvOrdering mvOrdering ) { - super(positionCount, firstValueIndexes, nulls, mvOrdering, blockFactory); + super(positionCount, firstValueIndexes, nulls, mvOrdering); this.vector = vector; assert firstValueIndexes == null ? vector.getPositionCount() == getPositionCount() @@ -116,8 +114,7 @@ public final class $Type$BigArrayBlock extends AbstractArrayBlock implements $Ty expandedPositionCount, null, shiftNullsToExpandedPositions(), - MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING, - blockFactory() + MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING ); blockFactory().adjustBreaker(expanded.ramBytesUsedOnlyBlock() - bitSetRamUsedEstimate); // We need to incRef after adjusting any breakers, otherwise we might leak the vector if the breaker trips. @@ -161,10 +158,14 @@ public final class $Type$BigArrayBlock extends AbstractArrayBlock implements $Ty @Override public void allowPassingToDifferentDriver() { - super.allowPassingToDifferentDriver(); vector.allowPassingToDifferentDriver(); } + @Override + public BlockFactory blockFactory() { + return vector.blockFactory(); + } + @Override public void closeInternal() { blockFactory().adjustBreaker(-ramBytesUsedOnlyBlock()); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorBlock.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorBlock.java.st index 4bc3c66b65743..e3d696ddf9120 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorBlock.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorBlock.java.st @@ -24,7 +24,6 @@ public final class $Type$VectorBlock extends AbstractVectorBlock implements $Typ * @param vector considered owned by the current block; must not be used in any other {@code Block} */ $Type$VectorBlock($Type$Vector vector) { - super(vector.getPositionCount(), vector.blockFactory()); this.vector = vector; } @@ -44,7 +43,7 @@ $endif$ } @Override - public int getTotalValueCount() { + public int getPositionCount() { return vector.getPositionCount(); } From 58477b5808d569174bf84a87d54c1dfdfd61e57c Mon Sep 17 00:00:00 2001 From: puppylpg Date: Sun, 10 Mar 2024 00:06:19 +0800 Subject: [PATCH 091/248] Assert exception type rather than message for connection error (#105823) This commit adjusts readiness probe tests to no longer check a connection error message (which may be influenced by locale), but instead check the exception type itself. closes #105822 --- .../test/readiness/ReadinessClientProbe.java | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/test/framework/src/main/java/org/elasticsearch/test/readiness/ReadinessClientProbe.java b/test/framework/src/main/java/org/elasticsearch/test/readiness/ReadinessClientProbe.java index a479fa0ccbc9e..afefce7b3b2b9 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/readiness/ReadinessClientProbe.java +++ b/test/framework/src/main/java/org/elasticsearch/test/readiness/ReadinessClientProbe.java @@ -14,6 +14,7 @@ import org.elasticsearch.readiness.ReadinessService; import java.io.IOException; +import java.net.ConnectException; import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.StandardProtocolFamily; @@ -22,8 +23,6 @@ import java.security.PrivilegedAction; import static org.apache.lucene.tests.util.LuceneTestCase.expectThrows; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.hamcrest.Matchers.containsString; import static org.junit.Assert.fail; /** @@ -70,11 +69,10 @@ default void tcpReadinessProbeFalse(Integer port) throws Exception { try (SocketChannel channel = SocketChannel.open(StandardProtocolFamily.INET)) { AccessController.doPrivileged((PrivilegedAction) () -> { - String message = expectThrows(IOException.class, () -> { + expectThrows(ConnectException.class, () -> { var result = channelConnect(channel, socketAddress); probeLogger.info("No exception on channel connect, connection success [{}]", result); - }).getMessage(); - assertThat(message, containsString("Connection refused")); + }); return null; }); } From 721d9fadd79387284352d97f0007f2a75d1e69de Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Sat, 9 Mar 2024 12:15:04 -0800 Subject: [PATCH 092/248] Specialize serialization of array blocks (#106102) A follow-up of #105893 Currently, we serialize blocks value by value, which is simple but effective. However, it would be more efficient to serialize the underlying structures of array blocks instead. --- docs/changelog/106102.yaml | 5 ++ .../org/elasticsearch/TransportVersions.java | 1 + .../compute/data/BooleanArrayBlock.java | 25 ++++++++ .../compute/data/BooleanBlock.java | 53 +++++++++++----- .../compute/data/BytesRefArrayBlock.java | 25 ++++++++ .../compute/data/BytesRefBlock.java | 54 +++++++++++----- .../compute/data/DoubleArrayBlock.java | 25 ++++++++ .../compute/data/DoubleBlock.java | 53 +++++++++++----- .../compute/data/IntArrayBlock.java | 25 ++++++++ .../elasticsearch/compute/data/IntBlock.java | 53 +++++++++++----- .../compute/data/LongArrayBlock.java | 25 ++++++++ .../elasticsearch/compute/data/LongBlock.java | 53 +++++++++++----- .../compute/data/AbstractArrayBlock.java | 62 +++++++++++++++++++ .../org/elasticsearch/compute/data/Block.java | 7 +++ .../compute/data/X-ArrayBlock.java.st | 26 ++++++++ .../compute/data/X-Block.java.st | 58 +++++++++++------ 16 files changed, 447 insertions(+), 103 deletions(-) create mode 100644 docs/changelog/106102.yaml diff --git a/docs/changelog/106102.yaml b/docs/changelog/106102.yaml new file mode 100644 index 0000000000000..b7c13514f6715 --- /dev/null +++ b/docs/changelog/106102.yaml @@ -0,0 +1,5 @@ +pr: 106102 +summary: Specialize serialization of array blocks +area: ES|QL +type: enhancement +issues: [] diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index bc27ab8265b26..392e157f60952 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -139,6 +139,7 @@ static TransportVersion def(int id) { public static final TransportVersion ADD_FAILURE_STORE_INDICES_OPTIONS = def(8_599_00_0); public static final TransportVersion ESQL_ENRICH_OPERATOR_STATUS = def(8_600_00_0); public static final TransportVersion ESQL_SERIALIZE_ARRAY_VECTOR = def(8_601_00_0); + public static final TransportVersion ESQL_SERIALIZE_ARRAY_BLOCK = def(8_602_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayBlock.java index 45b5c09fdc01e..710eb17f72f6a 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayBlock.java @@ -8,8 +8,10 @@ package org.elasticsearch.compute.data; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Releasables; +import java.io.IOException; import java.util.BitSet; /** @@ -53,6 +55,29 @@ private BooleanArrayBlock( : firstValueIndexes[getPositionCount()] == vector.getPositionCount(); } + static BooleanArrayBlock readArrayBlock(BlockFactory blockFactory, BlockStreamInput in) throws IOException { + final SubFields sub = new SubFields(blockFactory, in); + BooleanArrayVector vector = null; + boolean success = false; + try { + vector = BooleanArrayVector.readArrayVector(sub.vectorPositions(), in, blockFactory); + var block = new BooleanArrayBlock(vector, sub.positionCount, sub.firstValueIndexes, sub.nullsMask, sub.mvOrdering); + blockFactory.adjustBreaker(block.ramBytesUsed() - vector.ramBytesUsed() - sub.bytesReserved); + success = true; + return block; + } finally { + if (success == false) { + Releasables.close(vector); + blockFactory.adjustBreaker(-sub.bytesReserved); + } + } + } + + void writeArrayBlock(StreamOutput out) throws IOException { + writeSubFields(out); + vector.writeArrayVector(vector.getPositionCount(), out); + } + @Override public BooleanVector asVector() { return null; diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBlock.java index fffa3af137d76..1dd231c129a2d 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBlock.java @@ -7,6 +7,7 @@ package org.elasticsearch.compute.data; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -49,10 +50,19 @@ private static BooleanBlock readFrom(StreamInput in) throws IOException { } private static BooleanBlock readFrom(BlockStreamInput in) throws IOException { - final boolean isVector = in.readBoolean(); - if (isVector) { - return BooleanVector.readFrom(in.blockFactory(), in).asBlock(); - } + final byte serializationType = in.readByte(); + return switch (serializationType) { + case SERIALIZE_BLOCK_VALUES -> BooleanBlock.readValues(in); + case SERIALIZE_BLOCK_VECTOR -> BooleanVector.readFrom(in.blockFactory(), in).asBlock(); + case SERIALIZE_BLOCK_ARRAY -> BooleanArrayBlock.readArrayBlock(in.blockFactory(), in); + default -> { + assert false : "invalid block serialization type " + serializationType; + throw new IllegalStateException("invalid serialization type " + serializationType); + } + }; + } + + private static BooleanBlock readValues(BlockStreamInput in) throws IOException { final int positions = in.readVInt(); try (BooleanBlock.Builder builder = in.blockFactory().newBooleanBlockBuilder(positions)) { for (int i = 0; i < positions; i++) { @@ -74,22 +84,31 @@ private static BooleanBlock readFrom(BlockStreamInput in) throws IOException { @Override default void writeTo(StreamOutput out) throws IOException { BooleanVector vector = asVector(); - out.writeBoolean(vector != null); + final var version = out.getTransportVersion(); if (vector != null) { + out.writeByte(SERIALIZE_BLOCK_VECTOR); vector.writeTo(out); + } else if (version.onOrAfter(TransportVersions.ESQL_SERIALIZE_ARRAY_BLOCK) && this instanceof BooleanArrayBlock b) { + out.writeByte(SERIALIZE_BLOCK_ARRAY); + b.writeArrayBlock(out); } else { - final int positions = getPositionCount(); - out.writeVInt(positions); - for (int pos = 0; pos < positions; pos++) { - if (isNull(pos)) { - out.writeBoolean(true); - } else { - out.writeBoolean(false); - final int valueCount = getValueCount(pos); - out.writeVInt(valueCount); - for (int valueIndex = 0; valueIndex < valueCount; valueIndex++) { - out.writeBoolean(getBoolean(getFirstValueIndex(pos) + valueIndex)); - } + out.writeByte(SERIALIZE_BLOCK_VALUES); + BooleanBlock.writeValues(this, out); + } + } + + private static void writeValues(BooleanBlock block, StreamOutput out) throws IOException { + final int positions = block.getPositionCount(); + out.writeVInt(positions); + for (int pos = 0; pos < positions; pos++) { + if (block.isNull(pos)) { + out.writeBoolean(true); + } else { + out.writeBoolean(false); + final int valueCount = block.getValueCount(pos); + out.writeVInt(valueCount); + for (int valueIndex = 0; valueIndex < valueCount; valueIndex++) { + out.writeBoolean(block.getBoolean(block.getFirstValueIndex(pos) + valueIndex)); } } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayBlock.java index d71afdbdee2df..6cc66183db2ed 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayBlock.java @@ -9,9 +9,11 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.BytesRefArray; import org.elasticsearch.core.Releasables; +import java.io.IOException; import java.util.BitSet; /** @@ -56,6 +58,29 @@ private BytesRefArrayBlock( : firstValueIndexes[getPositionCount()] == vector.getPositionCount(); } + static BytesRefArrayBlock readArrayBlock(BlockFactory blockFactory, BlockStreamInput in) throws IOException { + final SubFields sub = new SubFields(blockFactory, in); + BytesRefArrayVector vector = null; + boolean success = false; + try { + vector = BytesRefArrayVector.readArrayVector(sub.vectorPositions(), in, blockFactory); + var block = new BytesRefArrayBlock(vector, sub.positionCount, sub.firstValueIndexes, sub.nullsMask, sub.mvOrdering); + blockFactory.adjustBreaker(block.ramBytesUsed() - vector.ramBytesUsed() - sub.bytesReserved); + success = true; + return block; + } finally { + if (success == false) { + Releasables.close(vector); + blockFactory.adjustBreaker(-sub.bytesReserved); + } + } + } + + void writeArrayBlock(StreamOutput out) throws IOException { + writeSubFields(out); + vector.writeArrayVector(vector.getPositionCount(), out); + } + @Override public BytesRefVector asVector() { return null; diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefBlock.java index 8ed17a1435302..8331d948ca329 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefBlock.java @@ -8,6 +8,7 @@ package org.elasticsearch.compute.data; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -53,10 +54,19 @@ private static BytesRefBlock readFrom(StreamInput in) throws IOException { } private static BytesRefBlock readFrom(BlockStreamInput in) throws IOException { - final boolean isVector = in.readBoolean(); - if (isVector) { - return BytesRefVector.readFrom(in.blockFactory(), in).asBlock(); - } + final byte serializationType = in.readByte(); + return switch (serializationType) { + case SERIALIZE_BLOCK_VALUES -> BytesRefBlock.readValues(in); + case SERIALIZE_BLOCK_VECTOR -> BytesRefVector.readFrom(in.blockFactory(), in).asBlock(); + case SERIALIZE_BLOCK_ARRAY -> BytesRefArrayBlock.readArrayBlock(in.blockFactory(), in); + default -> { + assert false : "invalid block serialization type " + serializationType; + throw new IllegalStateException("invalid serialization type " + serializationType); + } + }; + } + + private static BytesRefBlock readValues(BlockStreamInput in) throws IOException { final int positions = in.readVInt(); try (BytesRefBlock.Builder builder = in.blockFactory().newBytesRefBlockBuilder(positions)) { for (int i = 0; i < positions; i++) { @@ -78,22 +88,32 @@ private static BytesRefBlock readFrom(BlockStreamInput in) throws IOException { @Override default void writeTo(StreamOutput out) throws IOException { BytesRefVector vector = asVector(); - out.writeBoolean(vector != null); + final var version = out.getTransportVersion(); if (vector != null) { + out.writeByte(SERIALIZE_BLOCK_VECTOR); vector.writeTo(out); + } else if (version.onOrAfter(TransportVersions.ESQL_SERIALIZE_ARRAY_BLOCK) && this instanceof BytesRefArrayBlock b) { + out.writeByte(SERIALIZE_BLOCK_ARRAY); + b.writeArrayBlock(out); } else { - final int positions = getPositionCount(); - out.writeVInt(positions); - for (int pos = 0; pos < positions; pos++) { - if (isNull(pos)) { - out.writeBoolean(true); - } else { - out.writeBoolean(false); - final int valueCount = getValueCount(pos); - out.writeVInt(valueCount); - for (int valueIndex = 0; valueIndex < valueCount; valueIndex++) { - out.writeBytesRef(getBytesRef(getFirstValueIndex(pos) + valueIndex, new BytesRef())); - } + out.writeByte(SERIALIZE_BLOCK_VALUES); + BytesRefBlock.writeValues(this, out); + } + } + + private static void writeValues(BytesRefBlock block, StreamOutput out) throws IOException { + final int positions = block.getPositionCount(); + out.writeVInt(positions); + for (int pos = 0; pos < positions; pos++) { + if (block.isNull(pos)) { + out.writeBoolean(true); + } else { + out.writeBoolean(false); + final int valueCount = block.getValueCount(pos); + out.writeVInt(valueCount); + var scratch = new BytesRef(); + for (int valueIndex = 0; valueIndex < valueCount; valueIndex++) { + out.writeBytesRef(block.getBytesRef(block.getFirstValueIndex(pos) + valueIndex, scratch)); } } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayBlock.java index e9ddabb878b8d..d872a4938a734 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayBlock.java @@ -8,8 +8,10 @@ package org.elasticsearch.compute.data; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Releasables; +import java.io.IOException; import java.util.BitSet; /** @@ -53,6 +55,29 @@ private DoubleArrayBlock( : firstValueIndexes[getPositionCount()] == vector.getPositionCount(); } + static DoubleArrayBlock readArrayBlock(BlockFactory blockFactory, BlockStreamInput in) throws IOException { + final SubFields sub = new SubFields(blockFactory, in); + DoubleArrayVector vector = null; + boolean success = false; + try { + vector = DoubleArrayVector.readArrayVector(sub.vectorPositions(), in, blockFactory); + var block = new DoubleArrayBlock(vector, sub.positionCount, sub.firstValueIndexes, sub.nullsMask, sub.mvOrdering); + blockFactory.adjustBreaker(block.ramBytesUsed() - vector.ramBytesUsed() - sub.bytesReserved); + success = true; + return block; + } finally { + if (success == false) { + Releasables.close(vector); + blockFactory.adjustBreaker(-sub.bytesReserved); + } + } + } + + void writeArrayBlock(StreamOutput out) throws IOException { + writeSubFields(out); + vector.writeArrayVector(vector.getPositionCount(), out); + } + @Override public DoubleVector asVector() { return null; diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBlock.java index 890f965c765bb..20be6402ba097 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBlock.java @@ -7,6 +7,7 @@ package org.elasticsearch.compute.data; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -49,10 +50,19 @@ private static DoubleBlock readFrom(StreamInput in) throws IOException { } private static DoubleBlock readFrom(BlockStreamInput in) throws IOException { - final boolean isVector = in.readBoolean(); - if (isVector) { - return DoubleVector.readFrom(in.blockFactory(), in).asBlock(); - } + final byte serializationType = in.readByte(); + return switch (serializationType) { + case SERIALIZE_BLOCK_VALUES -> DoubleBlock.readValues(in); + case SERIALIZE_BLOCK_VECTOR -> DoubleVector.readFrom(in.blockFactory(), in).asBlock(); + case SERIALIZE_BLOCK_ARRAY -> DoubleArrayBlock.readArrayBlock(in.blockFactory(), in); + default -> { + assert false : "invalid block serialization type " + serializationType; + throw new IllegalStateException("invalid serialization type " + serializationType); + } + }; + } + + private static DoubleBlock readValues(BlockStreamInput in) throws IOException { final int positions = in.readVInt(); try (DoubleBlock.Builder builder = in.blockFactory().newDoubleBlockBuilder(positions)) { for (int i = 0; i < positions; i++) { @@ -74,22 +84,31 @@ private static DoubleBlock readFrom(BlockStreamInput in) throws IOException { @Override default void writeTo(StreamOutput out) throws IOException { DoubleVector vector = asVector(); - out.writeBoolean(vector != null); + final var version = out.getTransportVersion(); if (vector != null) { + out.writeByte(SERIALIZE_BLOCK_VECTOR); vector.writeTo(out); + } else if (version.onOrAfter(TransportVersions.ESQL_SERIALIZE_ARRAY_BLOCK) && this instanceof DoubleArrayBlock b) { + out.writeByte(SERIALIZE_BLOCK_ARRAY); + b.writeArrayBlock(out); } else { - final int positions = getPositionCount(); - out.writeVInt(positions); - for (int pos = 0; pos < positions; pos++) { - if (isNull(pos)) { - out.writeBoolean(true); - } else { - out.writeBoolean(false); - final int valueCount = getValueCount(pos); - out.writeVInt(valueCount); - for (int valueIndex = 0; valueIndex < valueCount; valueIndex++) { - out.writeDouble(getDouble(getFirstValueIndex(pos) + valueIndex)); - } + out.writeByte(SERIALIZE_BLOCK_VALUES); + DoubleBlock.writeValues(this, out); + } + } + + private static void writeValues(DoubleBlock block, StreamOutput out) throws IOException { + final int positions = block.getPositionCount(); + out.writeVInt(positions); + for (int pos = 0; pos < positions; pos++) { + if (block.isNull(pos)) { + out.writeBoolean(true); + } else { + out.writeBoolean(false); + final int valueCount = block.getValueCount(pos); + out.writeVInt(valueCount); + for (int valueIndex = 0; valueIndex < valueCount; valueIndex++) { + out.writeDouble(block.getDouble(block.getFirstValueIndex(pos) + valueIndex)); } } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayBlock.java index 1470a85f615d1..492769d1f3d43 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayBlock.java @@ -8,8 +8,10 @@ package org.elasticsearch.compute.data; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Releasables; +import java.io.IOException; import java.util.BitSet; /** @@ -53,6 +55,29 @@ private IntArrayBlock( : firstValueIndexes[getPositionCount()] == vector.getPositionCount(); } + static IntArrayBlock readArrayBlock(BlockFactory blockFactory, BlockStreamInput in) throws IOException { + final SubFields sub = new SubFields(blockFactory, in); + IntArrayVector vector = null; + boolean success = false; + try { + vector = IntArrayVector.readArrayVector(sub.vectorPositions(), in, blockFactory); + var block = new IntArrayBlock(vector, sub.positionCount, sub.firstValueIndexes, sub.nullsMask, sub.mvOrdering); + blockFactory.adjustBreaker(block.ramBytesUsed() - vector.ramBytesUsed() - sub.bytesReserved); + success = true; + return block; + } finally { + if (success == false) { + Releasables.close(vector); + blockFactory.adjustBreaker(-sub.bytesReserved); + } + } + } + + void writeArrayBlock(StreamOutput out) throws IOException { + writeSubFields(out); + vector.writeArrayVector(vector.getPositionCount(), out); + } + @Override public IntVector asVector() { return null; diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBlock.java index 9a66445eb55a2..296d9378323a5 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBlock.java @@ -7,6 +7,7 @@ package org.elasticsearch.compute.data; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -49,10 +50,19 @@ private static IntBlock readFrom(StreamInput in) throws IOException { } private static IntBlock readFrom(BlockStreamInput in) throws IOException { - final boolean isVector = in.readBoolean(); - if (isVector) { - return IntVector.readFrom(in.blockFactory(), in).asBlock(); - } + final byte serializationType = in.readByte(); + return switch (serializationType) { + case SERIALIZE_BLOCK_VALUES -> IntBlock.readValues(in); + case SERIALIZE_BLOCK_VECTOR -> IntVector.readFrom(in.blockFactory(), in).asBlock(); + case SERIALIZE_BLOCK_ARRAY -> IntArrayBlock.readArrayBlock(in.blockFactory(), in); + default -> { + assert false : "invalid block serialization type " + serializationType; + throw new IllegalStateException("invalid serialization type " + serializationType); + } + }; + } + + private static IntBlock readValues(BlockStreamInput in) throws IOException { final int positions = in.readVInt(); try (IntBlock.Builder builder = in.blockFactory().newIntBlockBuilder(positions)) { for (int i = 0; i < positions; i++) { @@ -74,22 +84,31 @@ private static IntBlock readFrom(BlockStreamInput in) throws IOException { @Override default void writeTo(StreamOutput out) throws IOException { IntVector vector = asVector(); - out.writeBoolean(vector != null); + final var version = out.getTransportVersion(); if (vector != null) { + out.writeByte(SERIALIZE_BLOCK_VECTOR); vector.writeTo(out); + } else if (version.onOrAfter(TransportVersions.ESQL_SERIALIZE_ARRAY_BLOCK) && this instanceof IntArrayBlock b) { + out.writeByte(SERIALIZE_BLOCK_ARRAY); + b.writeArrayBlock(out); } else { - final int positions = getPositionCount(); - out.writeVInt(positions); - for (int pos = 0; pos < positions; pos++) { - if (isNull(pos)) { - out.writeBoolean(true); - } else { - out.writeBoolean(false); - final int valueCount = getValueCount(pos); - out.writeVInt(valueCount); - for (int valueIndex = 0; valueIndex < valueCount; valueIndex++) { - out.writeInt(getInt(getFirstValueIndex(pos) + valueIndex)); - } + out.writeByte(SERIALIZE_BLOCK_VALUES); + IntBlock.writeValues(this, out); + } + } + + private static void writeValues(IntBlock block, StreamOutput out) throws IOException { + final int positions = block.getPositionCount(); + out.writeVInt(positions); + for (int pos = 0; pos < positions; pos++) { + if (block.isNull(pos)) { + out.writeBoolean(true); + } else { + out.writeBoolean(false); + final int valueCount = block.getValueCount(pos); + out.writeVInt(valueCount); + for (int valueIndex = 0; valueIndex < valueCount; valueIndex++) { + out.writeInt(block.getInt(block.getFirstValueIndex(pos) + valueIndex)); } } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayBlock.java index 2406196ba8bdd..77ae863e41ff0 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayBlock.java @@ -8,8 +8,10 @@ package org.elasticsearch.compute.data; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Releasables; +import java.io.IOException; import java.util.BitSet; /** @@ -53,6 +55,29 @@ private LongArrayBlock( : firstValueIndexes[getPositionCount()] == vector.getPositionCount(); } + static LongArrayBlock readArrayBlock(BlockFactory blockFactory, BlockStreamInput in) throws IOException { + final SubFields sub = new SubFields(blockFactory, in); + LongArrayVector vector = null; + boolean success = false; + try { + vector = LongArrayVector.readArrayVector(sub.vectorPositions(), in, blockFactory); + var block = new LongArrayBlock(vector, sub.positionCount, sub.firstValueIndexes, sub.nullsMask, sub.mvOrdering); + blockFactory.adjustBreaker(block.ramBytesUsed() - vector.ramBytesUsed() - sub.bytesReserved); + success = true; + return block; + } finally { + if (success == false) { + Releasables.close(vector); + blockFactory.adjustBreaker(-sub.bytesReserved); + } + } + } + + void writeArrayBlock(StreamOutput out) throws IOException { + writeSubFields(out); + vector.writeArrayVector(vector.getPositionCount(), out); + } + @Override public LongVector asVector() { return null; diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBlock.java index 5e5dc0606b896..820600bda87f9 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBlock.java @@ -7,6 +7,7 @@ package org.elasticsearch.compute.data; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -49,10 +50,19 @@ private static LongBlock readFrom(StreamInput in) throws IOException { } private static LongBlock readFrom(BlockStreamInput in) throws IOException { - final boolean isVector = in.readBoolean(); - if (isVector) { - return LongVector.readFrom(in.blockFactory(), in).asBlock(); - } + final byte serializationType = in.readByte(); + return switch (serializationType) { + case SERIALIZE_BLOCK_VALUES -> LongBlock.readValues(in); + case SERIALIZE_BLOCK_VECTOR -> LongVector.readFrom(in.blockFactory(), in).asBlock(); + case SERIALIZE_BLOCK_ARRAY -> LongArrayBlock.readArrayBlock(in.blockFactory(), in); + default -> { + assert false : "invalid block serialization type " + serializationType; + throw new IllegalStateException("invalid serialization type " + serializationType); + } + }; + } + + private static LongBlock readValues(BlockStreamInput in) throws IOException { final int positions = in.readVInt(); try (LongBlock.Builder builder = in.blockFactory().newLongBlockBuilder(positions)) { for (int i = 0; i < positions; i++) { @@ -74,22 +84,31 @@ private static LongBlock readFrom(BlockStreamInput in) throws IOException { @Override default void writeTo(StreamOutput out) throws IOException { LongVector vector = asVector(); - out.writeBoolean(vector != null); + final var version = out.getTransportVersion(); if (vector != null) { + out.writeByte(SERIALIZE_BLOCK_VECTOR); vector.writeTo(out); + } else if (version.onOrAfter(TransportVersions.ESQL_SERIALIZE_ARRAY_BLOCK) && this instanceof LongArrayBlock b) { + out.writeByte(SERIALIZE_BLOCK_ARRAY); + b.writeArrayBlock(out); } else { - final int positions = getPositionCount(); - out.writeVInt(positions); - for (int pos = 0; pos < positions; pos++) { - if (isNull(pos)) { - out.writeBoolean(true); - } else { - out.writeBoolean(false); - final int valueCount = getValueCount(pos); - out.writeVInt(valueCount); - for (int valueIndex = 0; valueIndex < valueCount; valueIndex++) { - out.writeLong(getLong(getFirstValueIndex(pos) + valueIndex)); - } + out.writeByte(SERIALIZE_BLOCK_VALUES); + LongBlock.writeValues(this, out); + } + } + + private static void writeValues(LongBlock block, StreamOutput out) throws IOException { + final int positions = block.getPositionCount(); + out.writeVInt(positions); + for (int pos = 0; pos < positions; pos++) { + if (block.isNull(pos)) { + out.writeBoolean(true); + } else { + out.writeBoolean(false); + final int valueCount = block.getValueCount(pos); + out.writeVInt(valueCount); + for (int valueIndex = 0; valueIndex < valueCount; valueIndex++) { + out.writeLong(block.getLong(block.getFirstValueIndex(pos) + valueIndex)); } } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractArrayBlock.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractArrayBlock.java index 46e1ee1fc8983..81098cba393bb 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractArrayBlock.java @@ -7,8 +7,11 @@ package org.elasticsearch.compute.data; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; +import java.io.IOException; import java.util.BitSet; abstract class AbstractArrayBlock extends AbstractNonThreadSafeRefCounted implements Block { @@ -118,4 +121,63 @@ public final int nullValuesCount() { public final boolean areAllValuesNull() { return nullValuesCount() == getPositionCount(); } + + static final class SubFields { + long bytesReserved = 0; + final int positionCount; + final int[] firstValueIndexes; + final BitSet nullsMask; + final MvOrdering mvOrdering; + + SubFields(BlockFactory blockFactory, StreamInput in) throws IOException { + this.positionCount = in.readVInt(); + boolean success = false; + try { + if (in.readBoolean()) { + bytesReserved += blockFactory.preAdjustBreakerForInt(positionCount + 1); + final int[] values = new int[positionCount + 1]; + values[0] = in.readVInt(); + for (int i = 1; i <= positionCount; i++) { + values[i] = values[i - 1] + in.readVInt(); + } + this.firstValueIndexes = values; + } else { + this.firstValueIndexes = null; + } + if (in.readBoolean()) { + bytesReserved += blockFactory.preAdjustBreakerForLong(positionCount / Long.BYTES); + nullsMask = BitSet.valueOf(in.readLongArray()); + } else { + nullsMask = null; + } + this.mvOrdering = in.readEnum(MvOrdering.class); + success = true; + } finally { + if (success == false) { + blockFactory.adjustBreaker(-bytesReserved); + } + } + } + + int vectorPositions() { + return firstValueIndexes == null ? positionCount : firstValueIndexes[positionCount]; + } + } + + void writeSubFields(StreamOutput out) throws IOException { + out.writeVInt(positionCount); + out.writeBoolean(firstValueIndexes != null); + if (firstValueIndexes != null) { + // firstValueIndexes are monotonic increasing + out.writeVInt(firstValueIndexes[0]); + for (int i = 1; i <= positionCount; i++) { + out.writeVInt(firstValueIndexes[i] - firstValueIndexes[i - 1]); + } + } + out.writeBoolean(nullsMask != null); + if (nullsMask != null) { + out.writeLongArray(nullsMask.toLongArray()); + } + out.writeEnum(mvOrdering); + } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Block.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Block.java index 5a6d7cb4a6003..b14a27fa01930 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Block.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Block.java @@ -239,4 +239,11 @@ static List getNamedWriteables() { ConstantNullBlock.ENTRY ); } + + /** + * Serialization type for blocks: 0 and 1 replace false/true used in pre-8.14 + */ + byte SERIALIZE_BLOCK_VALUES = 0; + byte SERIALIZE_BLOCK_VECTOR = 1; + byte SERIALIZE_BLOCK_ARRAY = 2; } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayBlock.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayBlock.java.st index 019379821dbaf..a7c5f10032394 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayBlock.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayBlock.java.st @@ -10,14 +10,17 @@ package org.elasticsearch.compute.data; $if(BytesRef)$ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.BytesRefArray; import org.elasticsearch.core.Releasables; $else$ import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Releasables; $endif$ +import java.io.IOException; import java.util.BitSet; /** @@ -64,6 +67,29 @@ final class $Type$ArrayBlock extends AbstractArrayBlock implements $Type$Block { : firstValueIndexes[getPositionCount()] == vector.getPositionCount(); } + static $Type$ArrayBlock readArrayBlock(BlockFactory blockFactory, BlockStreamInput in) throws IOException { + final SubFields sub = new SubFields(blockFactory, in); + $Type$ArrayVector vector = null; + boolean success = false; + try { + vector = $Type$ArrayVector.readArrayVector(sub.vectorPositions(), in, blockFactory); + var block = new $Type$ArrayBlock(vector, sub.positionCount, sub.firstValueIndexes, sub.nullsMask, sub.mvOrdering); + blockFactory.adjustBreaker(block.ramBytesUsed() - vector.ramBytesUsed() - sub.bytesReserved); + success = true; + return block; + } finally { + if (success == false) { + Releasables.close(vector); + blockFactory.adjustBreaker(-sub.bytesReserved); + } + } + } + + void writeArrayBlock(StreamOutput out) throws IOException { + writeSubFields(out); + vector.writeArrayVector(vector.getPositionCount(), out); + } + @Override public $Type$Vector asVector() { return null; diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Block.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Block.java.st index c5fd7e8302776..06aed6f7b0fad 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Block.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Block.java.st @@ -10,6 +10,7 @@ package org.elasticsearch.compute.data; $if(BytesRef)$ import org.apache.lucene.util.BytesRef; $endif$ +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -64,10 +65,19 @@ $endif$ } private static $Type$Block readFrom(BlockStreamInput in) throws IOException { - final boolean isVector = in.readBoolean(); - if (isVector) { - return $Type$Vector.readFrom(in.blockFactory(), in).asBlock(); - } + final byte serializationType = in.readByte(); + return switch (serializationType) { + case SERIALIZE_BLOCK_VALUES -> $Type$Block.readValues(in); + case SERIALIZE_BLOCK_VECTOR -> $Type$Vector.readFrom(in.blockFactory(), in).asBlock(); + case SERIALIZE_BLOCK_ARRAY -> $Type$ArrayBlock.readArrayBlock(in.blockFactory(), in); + default -> { + assert false : "invalid block serialization type " + serializationType; + throw new IllegalStateException("invalid serialization type " + serializationType); + } + }; + } + + private static $Type$Block readValues(BlockStreamInput in) throws IOException { final int positions = in.readVInt(); try ($Type$Block.Builder builder = in.blockFactory().new$Type$BlockBuilder(positions)) { for (int i = 0; i < positions; i++) { @@ -89,26 +99,38 @@ $endif$ @Override default void writeTo(StreamOutput out) throws IOException { $Type$Vector vector = asVector(); - out.writeBoolean(vector != null); + final var version = out.getTransportVersion(); if (vector != null) { + out.writeByte(SERIALIZE_BLOCK_VECTOR); vector.writeTo(out); + } else if (version.onOrAfter(TransportVersions.ESQL_SERIALIZE_ARRAY_BLOCK) && this instanceof $Type$ArrayBlock b) { + out.writeByte(SERIALIZE_BLOCK_ARRAY); + b.writeArrayBlock(out); } else { - final int positions = getPositionCount(); - out.writeVInt(positions); - for (int pos = 0; pos < positions; pos++) { - if (isNull(pos)) { - out.writeBoolean(true); - } else { - out.writeBoolean(false); - final int valueCount = getValueCount(pos); - out.writeVInt(valueCount); - for (int valueIndex = 0; valueIndex < valueCount; valueIndex++) { + out.writeByte(SERIALIZE_BLOCK_VALUES); + $Type$Block.writeValues(this, out); + } + } + + private static void writeValues($Type$Block block, StreamOutput out) throws IOException { + final int positions = block.getPositionCount(); + out.writeVInt(positions); + for (int pos = 0; pos < positions; pos++) { + if (block.isNull(pos)) { + out.writeBoolean(true); + } else { + out.writeBoolean(false); + final int valueCount = block.getValueCount(pos); + out.writeVInt(valueCount); $if(BytesRef)$ - out.write$Type$(get$Type$(getFirstValueIndex(pos) + valueIndex, new BytesRef())); + var scratch = new BytesRef(); +$endif$ + for (int valueIndex = 0; valueIndex < valueCount; valueIndex++) { +$if(BytesRef)$ + out.write$Type$(block.get$Type$(block.getFirstValueIndex(pos) + valueIndex, scratch)); $else$ - out.write$Type$(get$Type$(getFirstValueIndex(pos) + valueIndex)); + out.write$Type$(block.get$Type$(block.getFirstValueIndex(pos) + valueIndex)); $endif$ - } } } } From 713f52167fb1a94f3f5fdc774daa35a5b2b21ff1 Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Mon, 11 Mar 2024 09:37:54 +0100 Subject: [PATCH 093/248] Disable parallel collection for terms aggregation with min_doc_count equals to 0 (#106156) --- docs/changelog/106156.yaml | 6 ++++++ .../aggregations/bucket/terms/TermsAggregationBuilder.java | 5 +++++ .../search/aggregations/bucket/TermsTests.java | 6 ++++++ 3 files changed, 17 insertions(+) create mode 100644 docs/changelog/106156.yaml diff --git a/docs/changelog/106156.yaml b/docs/changelog/106156.yaml new file mode 100644 index 0000000000000..63232efe6e5fb --- /dev/null +++ b/docs/changelog/106156.yaml @@ -0,0 +1,6 @@ +pr: 106156 +summary: Disable parallel collection for terms aggregation with `min_doc_count` equals + to 0 +area: Aggregations +type: bug +issues: [] diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java index 68263e2d72b9c..13e5fe3dbd11f 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java @@ -139,6 +139,11 @@ public boolean supportsSampling() { @Override public boolean supportsParallelCollection(ToLongFunction fieldCardinalityResolver) { + if (minDocCount() == 0) { + // if minDocCount os zero, we collect the zero buckets looking into all segments in the index. to avoid + // looking into the same segment for each thread we disable concurrency + return false; + } /* * we parallelize only if the cardinality of the field is lower than shard size, this is to minimize precision issues. * When ordered by term, we still take cardinality into account to avoid overhead that concurrency may cause against diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsTests.java index 4a9e086d72143..d431a3a156957 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsTests.java @@ -214,5 +214,11 @@ public boolean supportsParallelCollection(ToLongFunction fieldCardinalit assertTrue(terms.supportsParallelCollection(field -> randomIntBetween(1, 10))); assertFalse(terms.supportsParallelCollection(field -> randomIntBetween(11, 100))); } + { + TermsAggregationBuilder terms = new TermsAggregationBuilder("terms"); + terms.shardSize(randomIntBetween(1, 100)); + terms.minDocCount(0); + assertFalse(terms.supportsParallelCollection(field -> randomIntBetween(1, 100))); + } } } From 1b8baf1cf860004a85d9368accb9f707f31202c7 Mon Sep 17 00:00:00 2001 From: Simon Cooper Date: Mon, 11 Mar 2024 09:12:42 +0000 Subject: [PATCH 094/248] Convert most uses of BaseMatcher to TypeSafeMatcher (#105764) --- .../geometry/simplify/Vector3DTests.java | 11 ++-- .../decider/DiskThresholdDeciderIT.java | 33 ++-------- .../search/geo/GeoPointScriptDocValuesIT.java | 31 ++------- .../spatial/CentroidCalculatorTests.java | 27 +++----- .../random/RandomSamplerAggregatorTests.java | 10 +-- .../test/hamcrest/OptionalMatchers.java | 2 +- .../test/hamcrest/TupleMatchers.java | 5 +- .../fielddata/GeometryDocValueTests.java | 16 ++--- .../aggregations/GeoLineAggregatorTests.java | 31 +++++---- .../geogrid/BoundedGeoHexGridTilerTests.java | 63 +++++++++---------- 10 files changed, 84 insertions(+), 145 deletions(-) diff --git a/libs/geo/src/test/java/org/elasticsearch/geometry/simplify/Vector3DTests.java b/libs/geo/src/test/java/org/elasticsearch/geometry/simplify/Vector3DTests.java index a84fee8c892b7..5a88091cbcd4f 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geometry/simplify/Vector3DTests.java +++ b/libs/geo/src/test/java/org/elasticsearch/geometry/simplify/Vector3DTests.java @@ -9,9 +9,9 @@ package org.elasticsearch.geometry.simplify; import org.elasticsearch.test.ESTestCase; -import org.hamcrest.BaseMatcher; import org.hamcrest.Description; import org.hamcrest.Matcher; +import org.hamcrest.TypeSafeMatcher; import static java.lang.Math.toRadians; import static org.elasticsearch.geometry.simplify.SimplificationErrorCalculator.Point3D.from; @@ -238,7 +238,7 @@ private static Matcher samePoint(Simplifi return new TestPoint3DMatcher(expected, 1e-15); } - private static class TestPoint3DMatcher extends BaseMatcher { + private static class TestPoint3DMatcher extends TypeSafeMatcher { private final Matcher xMatcher; private final Matcher yMatcher; private final Matcher zMatcher; @@ -252,11 +252,8 @@ private static class TestPoint3DMatcher extends BaseMatcher(shardSizes.getSmallestShardIds())); + assertBusyWithDiskUsageRefresh(dataNode0Id, indexName, contains(in(shardSizes.getSmallestShardIds()))); } public void testRestoreSnapshotAllocationDoesNotExceedWatermark() throws Exception { @@ -158,7 +158,7 @@ public void testRestoreSnapshotAllocationDoesNotExceedWatermark() throws Excepti // increase disk size of node 0 to allow just enough room for one shard, and check that it's rebalanced back getTestFileStore(dataNodeName).setTotalSpace(shardSizes.getSmallestShardSize() + WATERMARK_BYTES); - assertBusyWithDiskUsageRefresh(dataNode0Id, indexName, new ContainsExactlyOneOf<>(shardSizes.getSmallestShardIds())); + assertBusyWithDiskUsageRefresh(dataNode0Id, indexName, contains(in(shardSizes.getSmallestShardIds()))); } @TestIssueLogging( @@ -221,11 +221,7 @@ public void testRestoreSnapshotAllocationDoesNotExceedWatermarkWithMultipleShard assertThat(restoreInfo.successfulShards(), is(snapshotInfo.totalShards())); assertThat(restoreInfo.failedShards(), is(0)); - assertBusyWithDiskUsageRefresh( - dataNode0Id, - indexName, - new ContainsExactlyOneOf<>(shardSizes.getShardIdsWithSizeSmallerOrEqual(usableSpace)) - ); + assertBusyWithDiskUsageRefresh(dataNode0Id, indexName, contains(in(shardSizes.getShardIdsWithSizeSmallerOrEqual(usableSpace)))); } private Set getShardIds(final String nodeId, final String indexName) { @@ -346,23 +342,4 @@ private void assertBusyWithDiskUsageRefresh(String nodeId, String indexName, Mat private InternalClusterInfoService getInternalClusterInfoService() { return (InternalClusterInfoService) internalCluster().getCurrentMasterNodeInstance(ClusterInfoService.class); } - - private static final class ContainsExactlyOneOf extends TypeSafeMatcher> { - - private final Set expectedValues; - - ContainsExactlyOneOf(Set expectedValues) { - this.expectedValues = expectedValues; - } - - @Override - protected boolean matchesSafely(Set item) { - return item.size() == 1 && expectedValues.contains(item.iterator().next()); - } - - @Override - public void describeTo(Description description) { - description.appendText("Expected to contain exactly one value from ").appendValueList("[", ",", "]", expectedValues); - } - } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoPointScriptDocValuesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoPointScriptDocValuesIT.java index 3b2d266e77cda..c62f4932220fc 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoPointScriptDocValuesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoPointScriptDocValuesIT.java @@ -21,8 +21,7 @@ import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; -import org.hamcrest.BaseMatcher; -import org.hamcrest.Description; +import org.hamcrest.Matcher; import org.hamcrest.Matchers; import org.junit.Before; @@ -33,12 +32,12 @@ import java.util.HashMap; import java.util.Map; import java.util.function.Function; +import java.util.stream.IntStream; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.oneOf; public class GeoPointScriptDocValuesIT extends ESSingleNodeTestCase { @@ -255,28 +254,8 @@ public void testNullPoint() throws Exception { ); } - private static MultiPointLabelPosition isMultiPointLabelPosition(double[] lats, double[] lons) { - return new MultiPointLabelPosition(lats, lons); - } - - private static class MultiPointLabelPosition extends BaseMatcher { - private final GeoPoint[] points; - - private MultiPointLabelPosition(double[] lats, double[] lons) { - points = new GeoPoint[lats.length]; - for (int i = 0; i < lats.length; i++) { - points[i] = new GeoPoint(lats[i], lons[i]); - } - } - - @Override - public boolean matches(Object actual) { - return is(oneOf(points)).matches(actual); - } - - @Override - public void describeTo(Description description) { - description.appendText("is(oneOf(" + Arrays.toString(points) + ")"); - } + private static Matcher isMultiPointLabelPosition(double[] lats, double[] lons) { + assert lats.length == lons.length; + return oneOf(IntStream.range(0, lats.length).mapToObj(i -> new GeoPoint(lats[i], lons[i])).toArray(GeoPoint[]::new)); } } diff --git a/server/src/test/java/org/elasticsearch/lucene/spatial/CentroidCalculatorTests.java b/server/src/test/java/org/elasticsearch/lucene/spatial/CentroidCalculatorTests.java index 7a5cb5de49bdc..fce58b07eb090 100644 --- a/server/src/test/java/org/elasticsearch/lucene/spatial/CentroidCalculatorTests.java +++ b/server/src/test/java/org/elasticsearch/lucene/spatial/CentroidCalculatorTests.java @@ -22,9 +22,9 @@ import org.elasticsearch.geometry.utils.GeographyValidator; import org.elasticsearch.geometry.utils.WellKnownText; import org.elasticsearch.test.ESTestCase; -import org.hamcrest.BaseMatcher; import org.hamcrest.Description; import org.hamcrest.Matcher; +import org.hamcrest.TypeSafeMatcher; import java.util.ArrayList; import java.util.Collections; @@ -409,7 +409,7 @@ private Matcher matchesCentroid(CentroidCalculator expectedC return new CentroidMatcher(expectedCentroid.getX(), expectedCentroid.getY(), expectedCentroid.sumWeight(), weightFactor); } - private static class CentroidMatcher extends BaseMatcher { + private static class CentroidMatcher extends TypeSafeMatcher { private final double weightFactor; private final Matcher xMatcher; private final Matcher yMatcher; @@ -432,24 +432,17 @@ private Matcher matchDouble(double value) { } @Override - public boolean matches(Object actual) { - if (actual instanceof CentroidCalculator actualCentroid) { - return xMatcher.matches(actualCentroid.getX()) - && yMatcher.matches(actualCentroid.getY()) - && wMatcher.matches(weightFactor * actualCentroid.sumWeight()); - } - return false; + public boolean matchesSafely(CentroidCalculator actualCentroid) { + return xMatcher.matches(actualCentroid.getX()) + && yMatcher.matches(actualCentroid.getY()) + && wMatcher.matches(weightFactor * actualCentroid.sumWeight()); } @Override - public void describeMismatch(Object item, Description description) { - if (item instanceof CentroidCalculator actualCentroid) { - describeSubMismatch(xMatcher, actualCentroid.getX(), "X value", description); - describeSubMismatch(yMatcher, actualCentroid.getY(), "Y value", description); - describeSubMismatch(wMatcher, weightFactor * actualCentroid.sumWeight(), "sumWeight", description); - } else { - super.describeMismatch(item, description); - } + public void describeMismatchSafely(CentroidCalculator actualCentroid, Description description) { + describeSubMismatch(xMatcher, actualCentroid.getX(), "X value", description); + describeSubMismatch(yMatcher, actualCentroid.getY(), "Y value", description); + describeSubMismatch(wMatcher, weightFactor * actualCentroid.sumWeight(), "sumWeight", description); } private void describeSubMismatch(Matcher matcher, double value, String name, Description description) { diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregatorTests.java index 0916d2ad541e8..2b6a38b685303 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregatorTests.java @@ -58,7 +58,7 @@ public void testAggregationSampling() throws IOException { counts[integer.get()] = result.getDocCount(); if (result.getDocCount() > 0) { Avg agg = result.getAggregations().get("avg"); - assertThat(Strings.toString(result), agg.getValue(), allOf(not(notANumber()), IsFinite.isFinite())); + assertThat(Strings.toString(result), agg.getValue(), allOf(not(notANumber()), isFinite())); avgs[integer.get()] = agg.getValue(); } }, @@ -163,11 +163,11 @@ private static void writeTestDocs(RandomIndexWriter w) throws IOException { } } - private static class IsFinite extends TypeSafeMatcher { - public static Matcher isFinite() { - return new IsFinite(); - } + public static Matcher isFinite() { + return new IsFinite(); + } + private static class IsFinite extends TypeSafeMatcher { @Override protected boolean matchesSafely(Double item) { return Double.isFinite(item); diff --git a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/OptionalMatchers.java b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/OptionalMatchers.java index 1cd92296a4ec7..043fe40c91f79 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/OptionalMatchers.java +++ b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/OptionalMatchers.java @@ -72,7 +72,7 @@ public void describeMismatch(Object item, Description description) { } } - public static Matcher> isPresent() { + public static Matcher> isPresent() { return new IsPresentMatcher<>(anything()); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/TupleMatchers.java b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/TupleMatchers.java index ac21cf1410882..38774f0e5cfa2 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/TupleMatchers.java +++ b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/TupleMatchers.java @@ -45,7 +45,10 @@ public void describeTo(final Description description) { * For example: *
    assertThat(Tuple.tuple("myValue1", "myValue2"), isTuple(startsWith("my"), containsString("Val")))
    */ - public static TupleMatcher isTuple(Matcher v1Matcher, Matcher v2Matcher) { + public static Matcher> isTuple( + Matcher v1Matcher, + Matcher v2Matcher + ) { return new TupleMatcher<>(v1Matcher, v2Matcher); } } diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/fielddata/GeometryDocValueTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/fielddata/GeometryDocValueTests.java index 55988e72a2383..c92d65a301a3a 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/fielddata/GeometryDocValueTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/fielddata/GeometryDocValueTests.java @@ -28,8 +28,8 @@ import org.elasticsearch.lucene.spatial.GeometryDocValueReader; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.spatial.util.GeoTestUtils; -import org.hamcrest.BaseMatcher; import org.hamcrest.Description; +import org.hamcrest.TypeSafeMatcher; import java.io.BufferedReader; import java.io.IOException; @@ -226,7 +226,7 @@ private static RectangleLabelPosition isRectangleLabelPosition(Rectangle... rect return new RectangleLabelPosition(rectangles); } - private static class RectangleLabelPosition extends BaseMatcher { + private static class RectangleLabelPosition extends TypeSafeMatcher { private final Point[] encodedPositions; private RectangleLabelPosition(Rectangle... rectangles) { @@ -257,14 +257,10 @@ private Point average(GeoPoint... points) { } @Override - public boolean matches(Object actual) { - if (actual instanceof GeoPoint) { - GeoPoint point = (GeoPoint) actual; - int x = CoordinateEncoder.GEO.encodeX(point.lon()); - int y = CoordinateEncoder.GEO.encodeY(point.lat()); - return is(oneOf(encodedPositions)).matches(new Point(x, y)); - } - return false; + public boolean matchesSafely(GeoPoint point) { + int x = CoordinateEncoder.GEO.encodeX(point.lon()); + int y = CoordinateEncoder.GEO.encodeY(point.lat()); + return is(oneOf(encodedPositions)).matches(new Point(x, y)); } @Override diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/GeoLineAggregatorTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/GeoLineAggregatorTests.java index 0b76e786b26be..86575d418e605 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/GeoLineAggregatorTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/GeoLineAggregatorTests.java @@ -57,10 +57,10 @@ import org.elasticsearch.search.aggregations.support.MultiValuesSourceFieldConfig; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.xpack.spatial.SpatialPlugin; -import org.hamcrest.BaseMatcher; import org.hamcrest.Description; import org.hamcrest.Matcher; import org.hamcrest.Matchers; +import org.hamcrest.TypeSafeMatcher; import java.io.IOException; import java.util.ArrayList; @@ -458,11 +458,11 @@ private void assertGeoLine(SortOrder sortOrder, String group, InternalGeoLine ge } } - private Matcher isGeoLine(int checkCount, long[] line) { + private static Matcher isGeoLine(int checkCount, long[] line) { return new TestGeoLineLongArrayMatcher(checkCount, line); } - private static class TestGeoLineLongArrayMatcher extends BaseMatcher { + private static class TestGeoLineLongArrayMatcher extends TypeSafeMatcher { private final int checkCount; private final long[] expectedLine; private final ArrayList failures = new ArrayList<>(); @@ -473,26 +473,23 @@ private TestGeoLineLongArrayMatcher(int checkCount, long[] expectedLine) { } @Override - public boolean matches(Object actualObj) { + public boolean matchesSafely(long[] actualLine) { failures.clear(); - if (actualObj instanceof long[] actualLine) { - if (checkCount == expectedLine.length && actualLine.length != expectedLine.length) { - failures.add("Expected length " + expectedLine.length + " but got " + actualLine.length); - } - for (int i = 0; i < checkCount; i++) { - Point actual = asPoint(actualLine[i]); - Point expected = asPoint(expectedLine[i]); - if (actual.equals(expected) == false) { - failures.add("At line position " + i + " expected " + expected + " but got " + actual); - } + if (checkCount == expectedLine.length && actualLine.length != expectedLine.length) { + failures.add("Expected length " + expectedLine.length + " but got " + actualLine.length); + } + for (int i = 0; i < checkCount; i++) { + Point actual = asPoint(actualLine[i]); + Point expected = asPoint(expectedLine[i]); + if (actual.equals(expected) == false) { + failures.add("At line position " + i + " expected " + expected + " but got " + actual); } - return failures.size() == 0; } - return false; + return failures.isEmpty(); } @Override - public void describeMismatch(Object item, Description description) { + public void describeMismatchSafely(long[] item, Description description) { description.appendText("had ").appendValue(failures.size()).appendText(" failures"); for (String failure : failures) { description.appendText("\n\t").appendText(failure); diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/BoundedGeoHexGridTilerTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/BoundedGeoHexGridTilerTests.java index 08850c982c206..9ffcdebc729f6 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/BoundedGeoHexGridTilerTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/BoundedGeoHexGridTilerTests.java @@ -17,8 +17,8 @@ import org.elasticsearch.h3.LatLng; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.spatial.common.H3CartesianUtil; -import org.hamcrest.BaseMatcher; import org.hamcrest.Description; +import org.hamcrest.TypeSafeMatcher; import static org.elasticsearch.common.geo.GeoUtils.normalizeLon; import static org.elasticsearch.xpack.spatial.search.aggregations.bucket.geogrid.GeoHexGridTiler.BoundedGeoHexGridTiler.height; @@ -205,7 +205,7 @@ private static TestCompareBounds withinBounds(GeoBoundingBox other) { return new TestCompareBounds(other, -1); } - private static class TestCompareBounds extends BaseMatcher { + private static class TestCompareBounds extends TypeSafeMatcher { private final GeoBoundingBox other; private final int comparison; @@ -225,25 +225,22 @@ private TestCompareBounds(GeoBoundingBox other, int comparison) { } @Override - public boolean matches(Object actual) { - if (actual instanceof GeoBoundingBox bbox) { - if (comparison == 0) { - matchedTop = closeTo(bbox.top(), 1e-10).matches(other.top()); - matchedBottom = closeTo(bbox.bottom(), 1e-10).matches(other.bottom()); - matchedLeft = closeTo(posLon(bbox.left()), 1e-10).matches(posLon(other.left())); - matchedRight = closeTo(posLon(bbox.right()), 1e-10).matches(posLon(other.right())); + public boolean matchesSafely(GeoBoundingBox bbox) { + if (comparison == 0) { + matchedTop = closeTo(bbox.top(), 1e-10).matches(other.top()); + matchedBottom = closeTo(bbox.bottom(), 1e-10).matches(other.bottom()); + matchedLeft = closeTo(posLon(bbox.left()), 1e-10).matches(posLon(other.left())); + matchedRight = closeTo(posLon(bbox.right()), 1e-10).matches(posLon(other.right())); + } else { + if (comparison > 0) { + // assert that 'bbox' is larger than and entirely contains 'other' + setBoxWithinBox(other, bbox); } else { - if (comparison > 0) { - // assert that 'bbox' is larger than and entirely contains 'other' - setBoxWithinBox(other, bbox); - } else { - // assert that 'bbox' is smaller than and entirely contained within 'other' - setBoxWithinBox(bbox, other); - } + // assert that 'bbox' is smaller than and entirely contained within 'other' + setBoxWithinBox(bbox, other); } - return matchedTop && matchedBottom && matchedLeft && matchedRight; } - return false; + return matchedTop && matchedBottom && matchedLeft && matchedRight; } private void setBoxWithinBox(GeoBoundingBox smaller, GeoBoundingBox larger) { @@ -295,22 +292,22 @@ public void describeTo(Description description) { } @Override - public void describeMismatch(Object item, Description description) { - super.describeMismatch(item, description); - if (item instanceof GeoBoundingBox bbox) { - if (matchedTop == false) { - describeMismatchOf(description, "top", other.top(), bbox.top(), true); - } - if (matchedBottom == false) { - describeMismatchOf(description, "bottom", other.bottom(), bbox.bottom(), false); - } - if (matchedLeft == false) { - describeMismatchOf(description, "left", other.left(), bbox.left(), false); - } - if (matchedRight == false) { - describeMismatchOf(description, "right", other.right(), bbox.right(), true); - } + public void describeMismatchSafely(GeoBoundingBox bbox, Description description) { + super.describeMismatchSafely(bbox, description); + + if (matchedTop == false) { + describeMismatchOf(description, "top", other.top(), bbox.top(), true); + } + if (matchedBottom == false) { + describeMismatchOf(description, "bottom", other.bottom(), bbox.bottom(), false); + } + if (matchedLeft == false) { + describeMismatchOf(description, "left", other.left(), bbox.left(), false); } + if (matchedRight == false) { + describeMismatchOf(description, "right", other.right(), bbox.right(), true); + } + } private void describeMismatchOf(Description description, String field, double thisValue, double thatValue, boolean max) { From 2063fab989f79d683e0c08998a7d7ef15b7ffe40 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20Fred=C3=A9n?= <109296772+jfreden@users.noreply.github.com> Date: Mon, 11 Mar 2024 10:16:13 +0100 Subject: [PATCH 095/248] Add user information to slowlog (#105621) * Add user/auth information to slowlog --- docs/reference/index-modules/slowlog.asciidoc | 73 +++- .../common/settings/IndexScopedSettings.java | 2 + .../org/elasticsearch/index/IndexModule.java | 7 +- .../elasticsearch/index/IndexingSlowLog.java | 65 ++- .../elasticsearch/index/SearchSlowLog.java | 33 +- .../index/SlowLogFieldProvider.java | 35 ++ .../elasticsearch/indices/IndicesService.java | 32 +- .../elasticsearch/index/IndexModuleTests.java | 18 +- .../index/IndexingSlowLogTests.java | 65 ++- .../index/SearchSlowLogTests.java | 45 +- .../indices/IndicesServiceTests.java | 79 ++++ ...g.elasticsearch.index.SlowLogFieldProvider | 10 + .../elasticsearch/test/cluster/LogType.java | 4 +- .../action/TransportResumeFollowAction.java | 2 + .../xpack/security/SecuritySlowLogIT.java | 392 ++++++++++++++++++ .../security/src/main/java/module-info.java | 4 +- .../xpack/security/Security.java | 34 ++ .../slowlog/SecuritySlowLogFieldProvider.java | 57 +++ ...g.elasticsearch.index.SlowLogFieldProvider | 8 + .../xpack/security/SecurityTests.java | 4 +- .../xpack/watcher/WatcherPluginTests.java | 4 +- 21 files changed, 893 insertions(+), 80 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/index/SlowLogFieldProvider.java create mode 100644 server/src/test/resources/META-INF/services/org.elasticsearch.index.SlowLogFieldProvider create mode 100644 x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/SecuritySlowLogIT.java create mode 100644 x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/slowlog/SecuritySlowLogFieldProvider.java create mode 100644 x-pack/plugin/security/src/main/resources/META-INF/services/org.elasticsearch.index.SlowLogFieldProvider diff --git a/docs/reference/index-modules/slowlog.asciidoc b/docs/reference/index-modules/slowlog.asciidoc index 55c0867e485f5..c29296b59ad4a 100644 --- a/docs/reference/index-modules/slowlog.asciidoc +++ b/docs/reference/index-modules/slowlog.asciidoc @@ -58,33 +58,56 @@ The search slow log file is configured in the `log4j2.properties` file. [discrete] ==== Identifying search slow log origin -It is often useful to identify what triggered a slow running query. If a call was initiated with an `X-Opaque-ID` header, then the user ID -is included in Search Slow logs as an additional **id** field +It is often useful to identify what triggered a slow running query. +To include information about the user that triggered a slow search, +use the `index.search.slowlog.include.user` setting. + +[source,console] +-------------------------------------------------- +PUT /my-index-000001/_settings +{ + "index.search.slowlog.include.user": true +} +-------------------------------------------------- +// TEST[setup:my_index] + +This will result in user information being included in the slow log. [source,js] --------------------------- { - "type": "index_search_slowlog", - "timestamp": "2030-08-30T11:59:37,786+02:00", - "level": "WARN", - "component": "i.s.s.query", - "cluster.name": "distribution_run", - "node.name": "node-0", - "message": "[index6][0]", - "took": "78.4micros", - "took_millis": "0", - "total_hits": "0 hits", - "stats": "[]", - "search_type": "QUERY_THEN_FETCH", - "total_shards": "1", - "source": "{\"query\":{\"match_all\":{\"boost\":1.0}}}", - "id": "MY_USER_ID", - "cluster.uuid": "Aq-c-PAeQiK3tfBYtig9Bw", - "node.id": "D7fUYfnfTLa2D7y-xw6tZg" + "@timestamp": "2024-02-21T12:42:37.255Z", + "log.level": "WARN", + "auth.type": "REALM", + "elasticsearch.slowlog.id": "tomcat-123", + "elasticsearch.slowlog.message": "[index6][0]", + "elasticsearch.slowlog.search_type": "QUERY_THEN_FETCH", + "elasticsearch.slowlog.source": "{\"query\":{\"match_all\":{\"boost\":1.0}}}", + "elasticsearch.slowlog.stats": "[]", + "elasticsearch.slowlog.took": "747.3micros", + "elasticsearch.slowlog.took_millis": 0, + "elasticsearch.slowlog.total_hits": "1 hits", + "elasticsearch.slowlog.total_shards": 1, + "user.name": "elastic", + "user.realm": "reserved", + "ecs.version": "1.2.0", + "service.name": "ES_ECS", + "event.dataset": "elasticsearch.index_search_slowlog", + "process.thread.name": "elasticsearch[runTask-0][search][T#5]", + "log.logger": "index.search.slowlog.query", + "elasticsearch.cluster.uuid": "Ui23kfF1SHKJwu_hI1iPPQ", + "elasticsearch.node.id": "JK-jn-XpQ3OsDUsq5ZtfGg", + "elasticsearch.node.name": "node-0", + "elasticsearch.cluster.name": "distribution_run" } + --------------------------- // NOTCONSOLE +If a call was initiated with an `X-Opaque-ID` header, then the ID is included +in Search Slow logs in the **elasticsearch.slowlog.id** field. See +<> for details and best practices. + [discrete] [[index-slow-log]] === Index Slow log @@ -119,6 +142,18 @@ PUT /my-index-000001/_settings -------------------------------------------------- // TEST[setup:my_index] +To include information about the user that triggered a slow indexing event, +use the `index.indexing.slowlog.include.user` setting. + +[source,console] +-------------------------------------------------- +PUT /my-index-000001/_settings +{ + "index.indexing.slowlog.include.user": true +} +-------------------------------------------------- +// TEST[setup:my_index] + By default Elasticsearch will log the first 1000 characters of the _source in the slowlog. You can change that with `index.indexing.slowlog.source`. Setting it to `false` or `0` will skip logging the source entirely, while setting it to diff --git a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java index 41dd840b0c0e7..452fc14025e2e 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java @@ -85,12 +85,14 @@ public final class IndexScopedSettings extends AbstractScopedSettings { SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_DEBUG_SETTING, SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_INFO_SETTING, SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE_SETTING, + SearchSlowLog.INDEX_SEARCH_SLOWLOG_INCLUDE_USER_SETTING, IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN_SETTING, IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_DEBUG_SETTING, IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_INFO_SETTING, IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_TRACE_SETTING, IndexingSlowLog.INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING, IndexingSlowLog.INDEX_INDEXING_SLOWLOG_MAX_SOURCE_CHARS_TO_LOG_SETTING, + IndexingSlowLog.INDEX_INDEXING_SLOWLOG_INCLUDE_USER_SETTING, MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING, MergePolicyConfig.INDEX_MERGE_POLICY_TYPE_SETTING, MergePolicyConfig.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING, diff --git a/server/src/main/java/org/elasticsearch/index/IndexModule.java b/server/src/main/java/org/elasticsearch/index/IndexModule.java index b768c2f5a7d28..06a5e13a208be 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexModule.java +++ b/server/src/main/java/org/elasticsearch/index/IndexModule.java @@ -194,13 +194,14 @@ public IndexModule( final Map directoryFactories, final BooleanSupplier allowExpensiveQueries, final IndexNameExpressionResolver expressionResolver, - final Map recoveryStateFactories + final Map recoveryStateFactories, + final SlowLogFieldProvider slowLogFieldProvider ) { this.indexSettings = indexSettings; this.analysisRegistry = analysisRegistry; this.engineFactory = Objects.requireNonNull(engineFactory); - this.searchOperationListeners.add(new SearchSlowLog(indexSettings)); - this.indexOperationListeners.add(new IndexingSlowLog(indexSettings)); + this.searchOperationListeners.add(new SearchSlowLog(indexSettings, slowLogFieldProvider)); + this.indexOperationListeners.add(new IndexingSlowLog(indexSettings, slowLogFieldProvider)); this.directoryFactories = Collections.unmodifiableMap(directoryFactories); this.allowExpensiveQueries = allowExpensiveQueries; this.expressionResolver = expressionResolver; diff --git a/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java b/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java index 7b0a46f022dad..14c2c5440bd24 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java +++ b/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java @@ -69,6 +69,13 @@ public final class IndexingSlowLog implements IndexingOperationListener { Property.IndexScope ); + public static final Setting INDEX_INDEXING_SLOWLOG_INCLUDE_USER_SETTING = Setting.boolSetting( + INDEX_INDEXING_SLOWLOG_PREFIX + ".include.user", + false, + Property.Dynamic, + Property.IndexScope + ); + /** * Legacy index setting, kept for 7.x BWC compatibility. This setting has no effect in 8.x. Do not use. * TODO: Remove in 9.0 @@ -96,6 +103,7 @@ public final class IndexingSlowLog implements IndexingOperationListener { * characters of the source. */ private int maxSourceCharsToLog; + private final SlowLogFieldProvider slowLogFieldProvider; /** * Reads how much of the source to log. The user can specify any value they @@ -117,7 +125,8 @@ public final class IndexingSlowLog implements IndexingOperationListener { Property.IndexScope ); - IndexingSlowLog(IndexSettings indexSettings) { + IndexingSlowLog(IndexSettings indexSettings, SlowLogFieldProvider slowLogFieldProvider) { + this.slowLogFieldProvider = slowLogFieldProvider; this.indexLogger = LogManager.getLogger(INDEX_INDEXING_SLOWLOG_PREFIX + ".index"); Loggers.setLevel(this.indexLogger, Level.TRACE); this.index = indexSettings.getIndex(); @@ -171,22 +180,66 @@ public void postIndex(ShardId shardId, Engine.Index indexOperation, Engine.Index final ParsedDocument doc = indexOperation.parsedDoc(); final long tookInNanos = result.getTook(); if (indexWarnThreshold >= 0 && tookInNanos > indexWarnThreshold) { - indexLogger.warn(IndexingSlowLogMessage.of(index, doc, tookInNanos, reformat, maxSourceCharsToLog)); + indexLogger.warn( + IndexingSlowLogMessage.of( + this.slowLogFieldProvider.indexSlowLogFields(), + index, + doc, + tookInNanos, + reformat, + maxSourceCharsToLog + ) + ); } else if (indexInfoThreshold >= 0 && tookInNanos > indexInfoThreshold) { - indexLogger.info(IndexingSlowLogMessage.of(index, doc, tookInNanos, reformat, maxSourceCharsToLog)); + indexLogger.info( + IndexingSlowLogMessage.of( + this.slowLogFieldProvider.indexSlowLogFields(), + index, + doc, + tookInNanos, + reformat, + maxSourceCharsToLog + ) + ); } else if (indexDebugThreshold >= 0 && tookInNanos > indexDebugThreshold) { - indexLogger.debug(IndexingSlowLogMessage.of(index, doc, tookInNanos, reformat, maxSourceCharsToLog)); + indexLogger.debug( + IndexingSlowLogMessage.of( + this.slowLogFieldProvider.indexSlowLogFields(), + index, + doc, + tookInNanos, + reformat, + maxSourceCharsToLog + ) + ); } else if (indexTraceThreshold >= 0 && tookInNanos > indexTraceThreshold) { - indexLogger.trace(IndexingSlowLogMessage.of(index, doc, tookInNanos, reformat, maxSourceCharsToLog)); + indexLogger.trace( + IndexingSlowLogMessage.of( + this.slowLogFieldProvider.indexSlowLogFields(), + index, + doc, + tookInNanos, + reformat, + maxSourceCharsToLog + ) + ); } } } static final class IndexingSlowLogMessage { - public static ESLogMessage of(Index index, ParsedDocument doc, long tookInNanos, boolean reformat, int maxSourceCharsToLog) { + public static ESLogMessage of( + Map additionalFields, + Index index, + ParsedDocument doc, + long tookInNanos, + boolean reformat, + int maxSourceCharsToLog + ) { Map jsonFields = prepareMap(index, doc, tookInNanos, reformat, maxSourceCharsToLog); + jsonFields.putAll(additionalFields); return new ESLogMessage().withFields(jsonFields); } diff --git a/server/src/main/java/org/elasticsearch/index/SearchSlowLog.java b/server/src/main/java/org/elasticsearch/index/SearchSlowLog.java index c477f5e4978d5..eb227e6e1136d 100644 --- a/server/src/main/java/org/elasticsearch/index/SearchSlowLog.java +++ b/server/src/main/java/org/elasticsearch/index/SearchSlowLog.java @@ -44,7 +44,16 @@ public final class SearchSlowLog implements SearchOperationListener { private final Logger queryLogger; private final Logger fetchLogger; + private final SlowLogFieldProvider slowLogFieldProvider; + static final String INDEX_SEARCH_SLOWLOG_PREFIX = "index.search.slowlog"; + + public static final Setting INDEX_SEARCH_SLOWLOG_INCLUDE_USER_SETTING = Setting.boolSetting( + INDEX_SEARCH_SLOWLOG_PREFIX + ".include.user", + false, + Property.Dynamic, + Property.IndexScope + ); public static final Setting INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING = Setting.timeSetting( INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.query.warn", TimeValue.timeValueNanos(-1), @@ -118,7 +127,10 @@ public final class SearchSlowLog implements SearchOperationListener { private static final ToXContent.Params FORMAT_PARAMS = new ToXContent.MapParams(Collections.singletonMap("pretty", "false")); - public SearchSlowLog(IndexSettings indexSettings) { + public SearchSlowLog(IndexSettings indexSettings, SlowLogFieldProvider slowLogFieldProvider) { + slowLogFieldProvider.init(indexSettings); + this.slowLogFieldProvider = slowLogFieldProvider; + this.queryLogger = LogManager.getLogger(INDEX_SEARCH_SLOWLOG_PREFIX + ".query"); this.fetchLogger = LogManager.getLogger(INDEX_SEARCH_SLOWLOG_PREFIX + ".fetch"); Loggers.setLevel(this.fetchLogger, Level.TRACE); @@ -154,33 +166,34 @@ public SearchSlowLog(IndexSettings indexSettings) { @Override public void onQueryPhase(SearchContext context, long tookInNanos) { if (queryWarnThreshold >= 0 && tookInNanos > queryWarnThreshold) { - queryLogger.warn(SearchSlowLogMessage.of(context, tookInNanos)); + queryLogger.warn(SearchSlowLogMessage.of(this.slowLogFieldProvider.searchSlowLogFields(), context, tookInNanos)); } else if (queryInfoThreshold >= 0 && tookInNanos > queryInfoThreshold) { - queryLogger.info(SearchSlowLogMessage.of(context, tookInNanos)); + queryLogger.info(SearchSlowLogMessage.of(this.slowLogFieldProvider.searchSlowLogFields(), context, tookInNanos)); } else if (queryDebugThreshold >= 0 && tookInNanos > queryDebugThreshold) { - queryLogger.debug(SearchSlowLogMessage.of(context, tookInNanos)); + queryLogger.debug(SearchSlowLogMessage.of(this.slowLogFieldProvider.searchSlowLogFields(), context, tookInNanos)); } else if (queryTraceThreshold >= 0 && tookInNanos > queryTraceThreshold) { - queryLogger.trace(SearchSlowLogMessage.of(context, tookInNanos)); + queryLogger.trace(SearchSlowLogMessage.of(this.slowLogFieldProvider.searchSlowLogFields(), context, tookInNanos)); } } @Override public void onFetchPhase(SearchContext context, long tookInNanos) { if (fetchWarnThreshold >= 0 && tookInNanos > fetchWarnThreshold) { - fetchLogger.warn(SearchSlowLogMessage.of(context, tookInNanos)); + fetchLogger.warn(SearchSlowLogMessage.of(this.slowLogFieldProvider.searchSlowLogFields(), context, tookInNanos)); } else if (fetchInfoThreshold >= 0 && tookInNanos > fetchInfoThreshold) { - fetchLogger.info(SearchSlowLogMessage.of(context, tookInNanos)); + fetchLogger.info(SearchSlowLogMessage.of(this.slowLogFieldProvider.searchSlowLogFields(), context, tookInNanos)); } else if (fetchDebugThreshold >= 0 && tookInNanos > fetchDebugThreshold) { - fetchLogger.debug(SearchSlowLogMessage.of(context, tookInNanos)); + fetchLogger.debug(SearchSlowLogMessage.of(this.slowLogFieldProvider.searchSlowLogFields(), context, tookInNanos)); } else if (fetchTraceThreshold >= 0 && tookInNanos > fetchTraceThreshold) { - fetchLogger.trace(SearchSlowLogMessage.of(context, tookInNanos)); + fetchLogger.trace(SearchSlowLogMessage.of(this.slowLogFieldProvider.searchSlowLogFields(), context, tookInNanos)); } } static final class SearchSlowLogMessage { - public static ESLogMessage of(SearchContext context, long tookInNanos) { + public static ESLogMessage of(Map additionalFields, SearchContext context, long tookInNanos) { Map jsonFields = prepareMap(context, tookInNanos); + jsonFields.putAll(additionalFields); return new ESLogMessage().withFields(jsonFields); } diff --git a/server/src/main/java/org/elasticsearch/index/SlowLogFieldProvider.java b/server/src/main/java/org/elasticsearch/index/SlowLogFieldProvider.java new file mode 100644 index 0000000000000..c272ec23ef7e5 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/SlowLogFieldProvider.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index; + +import java.util.Map; + +/** + * Interface for providing additional fields to the slow log from a plugin. + * Intended to be loaded through SPI. + */ +public interface SlowLogFieldProvider { + /** + * Initialize field provider with index level settings to be able to listen for updates and set initial values + * @param indexSettings settings for the index + */ + void init(IndexSettings indexSettings); + + /** + * Slow log fields for indexing events + * @return map of field name to value + */ + Map indexSlowLogFields(); + + /** + * Slow log fields for search events + * @return map of field name to value + */ + Map searchSlowLogFields(); +} diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index b47d10882a5c1..3319b29df6dfa 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -85,6 +85,7 @@ import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.SlowLogFieldProvider; import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.index.bulk.stats.BulkStats; import org.elasticsearch.index.cache.request.ShardRequestCache; @@ -736,7 +737,8 @@ private synchronized IndexService createIndexService( directoryFactories, () -> allowExpensiveQueries, indexNameExpressionResolver, - recoveryStateFactories + recoveryStateFactories, + loadSlowLogFieldProvider() ); for (IndexingOperationListener operationListener : indexingOperationListeners) { indexModule.addIndexOperationListener(operationListener); @@ -812,7 +814,8 @@ public synchronized MapperService createIndexMapperServiceForValidation(IndexMet directoryFactories, () -> allowExpensiveQueries, indexNameExpressionResolver, - recoveryStateFactories + recoveryStateFactories, + loadSlowLogFieldProvider() ); pluginsService.forEach(p -> p.onIndexModule(indexModule)); return indexModule.newIndexMapperService(clusterService, parserConfig, mapperRegistry, scriptService); @@ -1390,6 +1393,31 @@ int numPendingDeletes(Index index) { } } + // pkg-private for testing + SlowLogFieldProvider loadSlowLogFieldProvider() { + List slowLogFieldProviders = pluginsService.loadServiceProviders(SlowLogFieldProvider.class); + return new SlowLogFieldProvider() { + @Override + public void init(IndexSettings indexSettings) { + slowLogFieldProviders.forEach(provider -> provider.init(indexSettings)); + } + + @Override + public Map indexSlowLogFields() { + return slowLogFieldProviders.stream() + .flatMap(provider -> provider.indexSlowLogFields().entrySet().stream()) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + } + + @Override + public Map searchSlowLogFields() { + return slowLogFieldProviders.stream() + .flatMap(provider -> provider.searchSlowLogFields().entrySet().stream()) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + } + }; + } + /** * Checks if all pending deletes have completed. Used by tests to ensure we don't check directory contents * while deletion still ongoing. * The reason is that, on Windows, browsing the directory contents can interfere diff --git a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java index 928a03eed2bd6..4e6f702b67252 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java @@ -232,7 +232,8 @@ public void testWrapperIsBound() throws IOException { Collections.emptyMap(), () -> true, indexNameExpressionResolver, - Collections.emptyMap() + Collections.emptyMap(), + mock(SlowLogFieldProvider.class) ); module.setReaderWrapper(s -> new Wrapper()); @@ -257,7 +258,8 @@ public void testRegisterIndexStore() throws IOException { indexStoreFactories, () -> true, indexNameExpressionResolver, - Collections.emptyMap() + Collections.emptyMap(), + mock(SlowLogFieldProvider.class) ); final IndexService indexService = newIndexService(module); @@ -280,7 +282,8 @@ public void testDirectoryWrapper() throws IOException { Map.of(), () -> true, indexNameExpressionResolver, - Collections.emptyMap() + Collections.emptyMap(), + mock(SlowLogFieldProvider.class) ); module.setDirectoryWrapper(new TestDirectoryWrapper()); @@ -631,7 +634,8 @@ public void testRegisterCustomRecoveryStateFactory() throws IOException { Collections.emptyMap(), () -> true, indexNameExpressionResolver, - recoveryStateFactories + recoveryStateFactories, + mock(SlowLogFieldProvider.class) ); final IndexService indexService = newIndexService(module); @@ -651,7 +655,8 @@ public void testIndexCommitListenerIsBound() throws IOException, ExecutionExcept Collections.emptyMap(), () -> true, indexNameExpressionResolver, - Collections.emptyMap() + Collections.emptyMap(), + mock(SlowLogFieldProvider.class) ); final AtomicLong lastAcquiredPrimaryTerm = new AtomicLong(); @@ -751,7 +756,8 @@ private static IndexModule createIndexModule( Collections.emptyMap(), () -> true, indexNameExpressionResolver, - Collections.emptyMap() + Collections.emptyMap(), + mock(SlowLogFieldProvider.class) ); } diff --git a/server/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java b/server/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java index fb83e817c052e..d8d5ab56c6e1d 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java @@ -39,6 +39,7 @@ import org.mockito.Mockito; import java.io.IOException; +import java.util.Map; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.emptyOrNullString; @@ -49,6 +50,7 @@ import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.startsWith; +import static org.mockito.Mockito.mock; public class IndexingSlowLogTests extends ESTestCase { static MockAppender appender; @@ -71,7 +73,7 @@ public void testLevelPrecedence() { String uuid = UUIDs.randomBase64UUID(); IndexMetadata metadata = createIndexMetadata("index-precedence", settings(uuid)); IndexSettings settings = new IndexSettings(metadata, Settings.EMPTY); - IndexingSlowLog log = new IndexingSlowLog(settings); + IndexingSlowLog log = new IndexingSlowLog(settings, mock(SlowLogFieldProvider.class)); ParsedDocument doc = EngineTestCase.createParsedDoc("1", null); Engine.Index index = new Engine.Index(new Term("_id", Uid.encodeId("doc_id")), randomNonNegativeLong(), doc); @@ -132,7 +134,7 @@ public void testTwoLoggersDifferentLevel() { ), Settings.EMPTY ); - IndexingSlowLog log1 = new IndexingSlowLog(index1Settings); + IndexingSlowLog log1 = new IndexingSlowLog(index1Settings, mock(SlowLogFieldProvider.class)); IndexSettings index2Settings = new IndexSettings( createIndexMetadata( @@ -145,7 +147,7 @@ public void testTwoLoggersDifferentLevel() { ), Settings.EMPTY ); - IndexingSlowLog log2 = new IndexingSlowLog(index2Settings); + IndexingSlowLog log2 = new IndexingSlowLog(index2Settings, mock(SlowLogFieldProvider.class)); ParsedDocument doc = EngineTestCase.createParsedDoc("1", null); Engine.Index index = new Engine.Index(new Term("_id", Uid.encodeId("doc_id")), randomNonNegativeLong(), doc); @@ -169,12 +171,12 @@ public void testMultipleSlowLoggersUseSingleLog4jLogger() { LoggerContext context = (LoggerContext) LogManager.getContext(false); IndexSettings index1Settings = new IndexSettings(createIndexMetadata("index1", settings(UUIDs.randomBase64UUID())), Settings.EMPTY); - IndexingSlowLog log1 = new IndexingSlowLog(index1Settings); + IndexingSlowLog log1 = new IndexingSlowLog(index1Settings, mock(SlowLogFieldProvider.class)); int numberOfLoggersBefore = context.getLoggers().size(); IndexSettings index2Settings = new IndexSettings(createIndexMetadata("index2", settings(UUIDs.randomBase64UUID())), Settings.EMPTY); - IndexingSlowLog log2 = new IndexingSlowLog(index2Settings); + IndexingSlowLog log2 = new IndexingSlowLog(index2Settings, mock(SlowLogFieldProvider.class)); context = (LoggerContext) LogManager.getContext(false); int numberOfLoggersAfter = context.getLoggers().size(); @@ -210,7 +212,7 @@ public void testSlowLogMessageHasJsonFields() throws IOException { ); Index index = new Index("foo", "123"); // Turning off document logging doesn't log source[] - ESLogMessage p = IndexingSlowLogMessage.of(index, pd, 10, true, 0); + ESLogMessage p = IndexingSlowLogMessage.of(Map.of(), index, pd, 10, true, 0); assertThat(p.get("elasticsearch.slowlog.message"), equalTo("[foo/123]")); assertThat(p.get("elasticsearch.slowlog.took"), equalTo("10nanos")); @@ -220,7 +222,36 @@ public void testSlowLogMessageHasJsonFields() throws IOException { assertThat(p.get("elasticsearch.slowlog.source"), is(emptyOrNullString())); // Turning on document logging logs the whole thing - p = IndexingSlowLogMessage.of(index, pd, 10, true, Integer.MAX_VALUE); + p = IndexingSlowLogMessage.of(Map.of(), index, pd, 10, true, Integer.MAX_VALUE); + assertThat(p.get("elasticsearch.slowlog.source"), containsString("{\\\"foo\\\":\\\"bar\\\"}")); + } + + public void testSlowLogMessageHasAdditionalFields() throws IOException { + BytesReference source = BytesReference.bytes(JsonXContent.contentBuilder().startObject().field("foo", "bar").endObject()); + ParsedDocument pd = new ParsedDocument( + new NumericDocValuesField("version", 1), + SeqNoFieldMapper.SequenceIDFields.emptySeqID(), + "id", + "routingValue", + null, + source, + XContentType.JSON, + null + ); + Index index = new Index("foo", "123"); + // Turning off document logging doesn't log source[] + ESLogMessage p = IndexingSlowLogMessage.of(Map.of("field1", "value1", "field2", "value2"), index, pd, 10, true, 0); + assertThat(p.get("field1"), equalTo("value1")); + assertThat(p.get("field2"), equalTo("value2")); + assertThat(p.get("elasticsearch.slowlog.message"), equalTo("[foo/123]")); + assertThat(p.get("elasticsearch.slowlog.took"), equalTo("10nanos")); + assertThat(p.get("elasticsearch.slowlog.took_millis"), equalTo("0")); + assertThat(p.get("elasticsearch.slowlog.id"), equalTo("id")); + assertThat(p.get("elasticsearch.slowlog.routing"), equalTo("routingValue")); + assertThat(p.get("elasticsearch.slowlog.source"), is(emptyOrNullString())); + + // Turning on document logging logs the whole thing + p = IndexingSlowLogMessage.of(Map.of(), index, pd, 10, true, Integer.MAX_VALUE); assertThat(p.get("elasticsearch.slowlog.source"), containsString("{\\\"foo\\\":\\\"bar\\\"}")); } @@ -238,7 +269,7 @@ public void testEmptyRoutingField() throws IOException { ); Index index = new Index("foo", "123"); - ESLogMessage p = IndexingSlowLogMessage.of(index, pd, 10, true, 0); + ESLogMessage p = IndexingSlowLogMessage.of(Map.of(), index, pd, 10, true, 0); assertThat(p.get("routing"), nullValue()); } @@ -256,19 +287,19 @@ public void testSlowLogParsedDocumentPrinterSourceToLog() throws IOException { ); Index index = new Index("foo", "123"); // Turning off document logging doesn't log source[] - ESLogMessage p = IndexingSlowLogMessage.of(index, pd, 10, true, 0); + ESLogMessage p = IndexingSlowLogMessage.of(Map.of(), index, pd, 10, true, 0); assertThat(p.getFormattedMessage(), not(containsString("source["))); // Turning on document logging logs the whole thing - p = IndexingSlowLogMessage.of(index, pd, 10, true, Integer.MAX_VALUE); + p = IndexingSlowLogMessage.of(Map.of(), index, pd, 10, true, Integer.MAX_VALUE); assertThat(p.get("elasticsearch.slowlog.source"), equalTo("{\\\"foo\\\":\\\"bar\\\"}")); // And you can truncate the source - p = IndexingSlowLogMessage.of(index, pd, 10, true, 3); + p = IndexingSlowLogMessage.of(Map.of(), index, pd, 10, true, 3); assertThat(p.get("elasticsearch.slowlog.source"), equalTo("{\\\"f")); // And you can truncate the source - p = IndexingSlowLogMessage.of(index, pd, 10, true, 3); + p = IndexingSlowLogMessage.of(Map.of(), index, pd, 10, true, 3); assertThat(p.get("elasticsearch.slowlog.source"), containsString("{\\\"f")); assertThat(p.get("elasticsearch.slowlog.message"), startsWith("[foo/123]")); assertThat(p.get("elasticsearch.slowlog.took"), containsString("10nanos")); @@ -288,7 +319,7 @@ public void testSlowLogParsedDocumentPrinterSourceToLog() throws IOException { final XContentParseException e = expectThrows( XContentParseException.class, - () -> IndexingSlowLogMessage.of(index, doc, 10, true, 3) + () -> IndexingSlowLogMessage.of(Map.of(), index, doc, 10, true, 3) ); assertThat( e, @@ -311,7 +342,7 @@ public void testReformatSetting() { .build() ); IndexSettings settings = new IndexSettings(metadata, Settings.EMPTY); - IndexingSlowLog log = new IndexingSlowLog(settings); + IndexingSlowLog log = new IndexingSlowLog(settings, mock(SlowLogFieldProvider.class)); assertFalse(log.isReformat()); settings.updateIndexMetadata( newIndexMeta("index", Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING.getKey(), "true").build()) @@ -328,7 +359,7 @@ public void testReformatSetting() { metadata = newIndexMeta("index", Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()).build()); settings = new IndexSettings(metadata, Settings.EMPTY); - log = new IndexingSlowLog(settings); + log = new IndexingSlowLog(settings, mock(SlowLogFieldProvider.class)); assertTrue(log.isReformat()); try { settings.updateIndexMetadata( @@ -361,7 +392,7 @@ public void testSetLevels() { .build() ); IndexSettings settings = new IndexSettings(metadata, Settings.EMPTY); - IndexingSlowLog log = new IndexingSlowLog(settings); + IndexingSlowLog log = new IndexingSlowLog(settings, mock(SlowLogFieldProvider.class)); assertEquals(TimeValue.timeValueMillis(100).nanos(), log.getIndexTraceThreshold()); assertEquals(TimeValue.timeValueMillis(200).nanos(), log.getIndexDebugThreshold()); assertEquals(TimeValue.timeValueMillis(300).nanos(), log.getIndexInfoThreshold()); @@ -392,7 +423,7 @@ public void testSetLevels() { assertEquals(TimeValue.timeValueMillis(-1).nanos(), log.getIndexWarnThreshold()); settings = new IndexSettings(metadata, Settings.EMPTY); - log = new IndexingSlowLog(settings); + log = new IndexingSlowLog(settings, mock(SlowLogFieldProvider.class)); assertEquals(TimeValue.timeValueMillis(-1).nanos(), log.getIndexTraceThreshold()); assertEquals(TimeValue.timeValueMillis(-1).nanos(), log.getIndexDebugThreshold()); diff --git a/server/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java b/server/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java index 2fa3216ad5556..a50092a0b8d12 100644 --- a/server/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java +++ b/server/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java @@ -36,12 +36,14 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.Map; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasToString; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.not; +import static org.mockito.Mockito.mock; public class SearchSlowLogTests extends ESSingleNodeTestCase { static MockAppender appender; @@ -92,7 +94,7 @@ public void testLevelPrecedence() { try (SearchContext ctx = searchContextWithSourceAndTask(createIndex("index"))) { String uuid = UUIDs.randomBase64UUID(); IndexSettings settings = new IndexSettings(createIndexMetadata("index", settings(uuid)), Settings.EMPTY); - SearchSlowLog log = new SearchSlowLog(settings); + SearchSlowLog log = new SearchSlowLog(settings, mock(SlowLogFieldProvider.class)); // For this test, when level is not breached, the level below should be used. { @@ -176,7 +178,7 @@ public void testTwoLoggersDifferentLevel() { ), Settings.EMPTY ); - SearchSlowLog log1 = new SearchSlowLog(settings1); + SearchSlowLog log1 = new SearchSlowLog(settings1, mock(SlowLogFieldProvider.class)); IndexSettings settings2 = new IndexSettings( createIndexMetadata( @@ -189,7 +191,7 @@ public void testTwoLoggersDifferentLevel() { ), Settings.EMPTY ); - SearchSlowLog log2 = new SearchSlowLog(settings2); + SearchSlowLog log2 = new SearchSlowLog(settings2, mock(SlowLogFieldProvider.class)); { // threshold set on WARN only, should not log @@ -212,7 +214,7 @@ public void testMultipleSlowLoggersUseSingleLog4jLogger() { try (SearchContext ctx1 = searchContextWithSourceAndTask(createIndex("index-1"))) { IndexSettings settings1 = new IndexSettings(createIndexMetadata("index-1", settings(UUIDs.randomBase64UUID())), Settings.EMPTY); - SearchSlowLog log1 = new SearchSlowLog(settings1); + SearchSlowLog log1 = new SearchSlowLog(settings1, mock(SlowLogFieldProvider.class)); int numberOfLoggersBefore = context.getLoggers().size(); try (SearchContext ctx2 = searchContextWithSourceAndTask(createIndex("index-2"))) { @@ -220,7 +222,7 @@ public void testMultipleSlowLoggersUseSingleLog4jLogger() { createIndexMetadata("index-2", settings(UUIDs.randomBase64UUID())), Settings.EMPTY ); - SearchSlowLog log2 = new SearchSlowLog(settings2); + SearchSlowLog log2 = new SearchSlowLog(settings2, mock(SlowLogFieldProvider.class)); int numberOfLoggersAfter = context.getLoggers().size(); assertThat(numberOfLoggersAfter, equalTo(numberOfLoggersBefore)); @@ -235,7 +237,7 @@ private IndexMetadata createIndexMetadata(String index, Settings.Builder put) { public void testSlowLogHasJsonFields() throws IOException { IndexService index = createIndex("foo"); try (SearchContext searchContext = searchContextWithSourceAndTask(index)) { - ESLogMessage p = SearchSlowLog.SearchSlowLogMessage.of(searchContext, 10); + ESLogMessage p = SearchSlowLog.SearchSlowLogMessage.of(Map.of(), searchContext, 10); assertThat(p.get("elasticsearch.slowlog.message"), equalTo("[foo][0]")); assertThat(p.get("elasticsearch.slowlog.took"), equalTo("10nanos")); @@ -248,6 +250,23 @@ public void testSlowLogHasJsonFields() throws IOException { } } + public void testSlowLogHasAdditionalFields() throws IOException { + IndexService index = createIndex("foo"); + try (SearchContext searchContext = searchContextWithSourceAndTask(index)) { + ESLogMessage p = SearchSlowLog.SearchSlowLogMessage.of(Map.of("field1", "value1", "field2", "value2"), searchContext, 10); + assertThat(p.get("field1"), equalTo("value1")); + assertThat(p.get("field2"), equalTo("value2")); + assertThat(p.get("elasticsearch.slowlog.message"), equalTo("[foo][0]")); + assertThat(p.get("elasticsearch.slowlog.took"), equalTo("10nanos")); + assertThat(p.get("elasticsearch.slowlog.took_millis"), equalTo("0")); + assertThat(p.get("elasticsearch.slowlog.total_hits"), equalTo("-1")); + assertThat(p.get("elasticsearch.slowlog.stats"), equalTo("[]")); + assertThat(p.get("elasticsearch.slowlog.search_type"), Matchers.nullValue()); + assertThat(p.get("elasticsearch.slowlog.total_shards"), equalTo("1")); + assertThat(p.get("elasticsearch.slowlog.source"), equalTo("{\\\"query\\\":{\\\"match_all\\\":{\\\"boost\\\":1.0}}}")); + } + } + public void testSlowLogsWithStats() throws IOException { IndexService index = createIndex("foo"); try (SearchContext searchContext = createSearchContext(index, "group1")) { @@ -257,7 +276,7 @@ public void testSlowLogsWithStats() throws IOException { new SearchShardTask(0, "n/a", "n/a", "test", null, Collections.singletonMap(Task.X_OPAQUE_ID_HTTP_HEADER, "my_id")) ); - ESLogMessage p = SearchSlowLog.SearchSlowLogMessage.of(searchContext, 10); + ESLogMessage p = SearchSlowLog.SearchSlowLogMessage.of(Map.of(), searchContext, 10); assertThat(p.get("elasticsearch.slowlog.stats"), equalTo("[\\\"group1\\\"]")); } @@ -267,7 +286,7 @@ public void testSlowLogsWithStats() throws IOException { searchContext.setTask( new SearchShardTask(0, "n/a", "n/a", "test", null, Collections.singletonMap(Task.X_OPAQUE_ID_HTTP_HEADER, "my_id")) ); - ESLogMessage p = SearchSlowLog.SearchSlowLogMessage.of(searchContext, 10); + ESLogMessage p = SearchSlowLog.SearchSlowLogMessage.of(Map.of(), searchContext, 10); assertThat(p.get("elasticsearch.slowlog.stats"), equalTo("[\\\"group1\\\", \\\"group2\\\"]")); } } @@ -275,7 +294,7 @@ public void testSlowLogsWithStats() throws IOException { public void testSlowLogSearchContextPrinterToLog() throws IOException { IndexService index = createIndex("foo"); try (SearchContext searchContext = searchContextWithSourceAndTask(index)) { - ESLogMessage p = SearchSlowLog.SearchSlowLogMessage.of(searchContext, 10); + ESLogMessage p = SearchSlowLog.SearchSlowLogMessage.of(Map.of(), searchContext, 10); assertThat(p.get("elasticsearch.slowlog.message"), equalTo("[foo][0]")); // Makes sure that output doesn't contain any new lines assertThat(p.get("elasticsearch.slowlog.source"), not(containsString("\n"))); @@ -295,7 +314,7 @@ public void testSetQueryLevels() { .build() ); IndexSettings settings = new IndexSettings(metadata, Settings.EMPTY); - SearchSlowLog log = new SearchSlowLog(settings); + SearchSlowLog log = new SearchSlowLog(settings, mock(SlowLogFieldProvider.class)); assertEquals(TimeValue.timeValueMillis(100).nanos(), log.getQueryTraceThreshold()); assertEquals(TimeValue.timeValueMillis(200).nanos(), log.getQueryDebugThreshold()); assertEquals(TimeValue.timeValueMillis(300).nanos(), log.getQueryInfoThreshold()); @@ -326,7 +345,7 @@ public void testSetQueryLevels() { assertEquals(TimeValue.timeValueMillis(-1).nanos(), log.getQueryWarnThreshold()); settings = new IndexSettings(metadata, Settings.EMPTY); - log = new SearchSlowLog(settings); + log = new SearchSlowLog(settings, mock(SlowLogFieldProvider.class)); assertEquals(TimeValue.timeValueMillis(-1).nanos(), log.getQueryTraceThreshold()); assertEquals(TimeValue.timeValueMillis(-1).nanos(), log.getQueryDebugThreshold()); @@ -401,7 +420,7 @@ public void testSetFetchLevels() { .build() ); IndexSettings settings = new IndexSettings(metadata, Settings.EMPTY); - SearchSlowLog log = new SearchSlowLog(settings); + SearchSlowLog log = new SearchSlowLog(settings, mock(SlowLogFieldProvider.class)); assertEquals(TimeValue.timeValueMillis(100).nanos(), log.getFetchTraceThreshold()); assertEquals(TimeValue.timeValueMillis(200).nanos(), log.getFetchDebugThreshold()); assertEquals(TimeValue.timeValueMillis(300).nanos(), log.getFetchInfoThreshold()); @@ -432,7 +451,7 @@ public void testSetFetchLevels() { assertEquals(TimeValue.timeValueMillis(-1).nanos(), log.getFetchWarnThreshold()); settings = new IndexSettings(metadata, Settings.EMPTY); - log = new SearchSlowLog(settings); + log = new SearchSlowLog(settings, mock(SlowLogFieldProvider.class)); assertEquals(TimeValue.timeValueMillis(-1).nanos(), log.getFetchTraceThreshold()); assertEquals(TimeValue.timeValueMillis(-1).nanos(), log.getFetchDebugThreshold()); diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java index 60545ac71b2bf..ee1bdf927a11b 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java @@ -38,6 +38,7 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.SlowLogFieldProvider; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineConfig; import org.elasticsearch.index.engine.EngineFactory; @@ -192,6 +193,50 @@ public void onIndexModule(IndexModule indexModule) { } } + public static class TestSlowLogFieldProvider implements SlowLogFieldProvider { + + private static Map fields = Map.of(); + + static void setFields(Map fields) { + TestSlowLogFieldProvider.fields = fields; + } + + @Override + public void init(IndexSettings indexSettings) {} + + @Override + public Map indexSlowLogFields() { + return fields; + } + + @Override + public Map searchSlowLogFields() { + return fields; + } + } + + public static class TestAnotherSlowLogFieldProvider implements SlowLogFieldProvider { + + private static Map fields = Map.of(); + + static void setFields(Map fields) { + TestAnotherSlowLogFieldProvider.fields = fields; + } + + @Override + public void init(IndexSettings indexSettings) {} + + @Override + public Map indexSlowLogFields() { + return fields; + } + + @Override + public Map searchSlowLogFields() { + return fields; + } + } + @Override protected boolean resetNodeAfterTest() { return true; @@ -746,4 +791,38 @@ public void testBuildAliasFilterDataStreamAliases() { assertThat(result, is(AliasFilter.EMPTY)); } } + + public void testLoadSlowLogFieldProvider() { + TestSlowLogFieldProvider.setFields(Map.of("key1", "value1")); + TestAnotherSlowLogFieldProvider.setFields(Map.of("key2", "value2")); + + var indicesService = getIndicesService(); + SlowLogFieldProvider fieldProvider = indicesService.loadSlowLogFieldProvider(); + + // The map of fields from the two providers are merged to a single map of fields + assertEquals(Map.of("key1", "value1", "key2", "value2"), fieldProvider.searchSlowLogFields()); + assertEquals(Map.of("key1", "value1", "key2", "value2"), fieldProvider.indexSlowLogFields()); + + TestSlowLogFieldProvider.setFields(Map.of("key1", "value1")); + TestAnotherSlowLogFieldProvider.setFields(Map.of("key1", "value2")); + + // There is an overlap of field names, since this isn't deterministic and probably a + // programming error (two providers provide the same field) throw an exception + assertThrows(IllegalStateException.class, fieldProvider::searchSlowLogFields); + assertThrows(IllegalStateException.class, fieldProvider::indexSlowLogFields); + + TestSlowLogFieldProvider.setFields(Map.of("key1", "value1")); + TestAnotherSlowLogFieldProvider.setFields(Map.of()); + + // One provider has no fields + assertEquals(Map.of("key1", "value1"), fieldProvider.searchSlowLogFields()); + assertEquals(Map.of("key1", "value1"), fieldProvider.indexSlowLogFields()); + + TestSlowLogFieldProvider.setFields(Map.of()); + TestAnotherSlowLogFieldProvider.setFields(Map.of()); + + // Both providers have no fields + assertEquals(Map.of(), fieldProvider.searchSlowLogFields()); + assertEquals(Map.of(), fieldProvider.indexSlowLogFields()); + } } diff --git a/server/src/test/resources/META-INF/services/org.elasticsearch.index.SlowLogFieldProvider b/server/src/test/resources/META-INF/services/org.elasticsearch.index.SlowLogFieldProvider new file mode 100644 index 0000000000000..fcd1211eee0c5 --- /dev/null +++ b/server/src/test/resources/META-INF/services/org.elasticsearch.index.SlowLogFieldProvider @@ -0,0 +1,10 @@ +# +# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +# or more contributor license agreements. Licensed under the Elastic License +# 2.0 and the Server Side Public License, v 1; you may not use this file except +# in compliance with, at your election, the Elastic License 2.0 or the Server +# Side Public License, v 1. +# + +org.elasticsearch.indices.IndicesServiceTests$TestSlowLogFieldProvider +org.elasticsearch.indices.IndicesServiceTests$TestAnotherSlowLogFieldProvider diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/LogType.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/LogType.java index 96178e621e018..76468b9be9ed5 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/LogType.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/LogType.java @@ -11,7 +11,9 @@ public enum LogType { SERVER("%s.log"), SERVER_JSON("%s_server.json"), - AUDIT("%s_audit.json"); + AUDIT("%s_audit.json"), + SEARCH_SLOW("%s_index_search_slowlog.json"), + INDEXING_SLOW("%s_index_indexing_slowlog.json"); private final String filenameFormat; diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java index f13ac2f2845b0..cad1a37a3a17d 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java @@ -505,12 +505,14 @@ static String[] extractLeaderShardHistoryUUIDs(Map ccrIndexMetad SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_DEBUG_SETTING, SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_INFO_SETTING, SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE_SETTING, + SearchSlowLog.INDEX_SEARCH_SLOWLOG_INCLUDE_USER_SETTING, IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN_SETTING, IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_DEBUG_SETTING, IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_INFO_SETTING, IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_TRACE_SETTING, IndexingSlowLog.INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING, IndexingSlowLog.INDEX_INDEXING_SLOWLOG_MAX_SOURCE_CHARS_TO_LOG_SETTING, + IndexingSlowLog.INDEX_INDEXING_SLOWLOG_INCLUDE_USER_SETTING, MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING, MergePolicyConfig.INDEX_MERGE_POLICY_TYPE_SETTING, MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING, diff --git a/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/SecuritySlowLogIT.java b/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/SecuritySlowLogIT.java new file mode 100644 index 0000000000000..704799a45824c --- /dev/null +++ b/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/SecuritySlowLogIT.java @@ -0,0 +1,392 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security; + +import org.apache.http.HttpHeaders; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.Response; +import org.elasticsearch.common.io.Streams; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.Strings; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.LogType; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xcontent.XContentType; +import org.junit.ClassRule; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Base64; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import java.util.function.Consumer; + +import static org.hamcrest.collection.IsIn.in; +import static org.hamcrest.core.Every.everyItem; +import static org.hamcrest.core.IsNot.not; + +public class SecuritySlowLogIT extends ESRestTestCase { + + private record TestIndexData( + String name, + boolean searchSlowLogEnabled, + boolean indexSlowLogEnabled, + boolean searchSlowLogUserEnabled, + boolean indexSlowLogUserEnabled + ) {} + + private static int currentSearchLogIndex = 0; + private static int currentIndexLogIndex = 0; + + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .nodes(1) + .distribution(DistributionType.DEFAULT) + .setting("xpack.security.enabled", "true") + .user("admin_user", "admin-password") + .user("api_user", "api-password", "superuser", false) + .build(); + + @Override + protected Settings restAdminSettings() { + String token = basicAuthHeaderValue("admin_user", new SecureString("admin-password".toCharArray())); + return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); + } + + @Override + protected Settings restClientSettings() { + String token = basicAuthHeaderValue("api_user", new SecureString("api-password".toCharArray())); + return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); + } + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + + public void testSlowLogWithApiUser() throws Exception { + List testIndices = randomTestIndexData(); + for (TestIndexData testData : testIndices) { + searchSomeData(testData.name); + indexSomeData(testData.name); + } + + Map expectedUser = Map.of("user.name", "api_user", "user.realm", "default_file", "auth.type", "REALM"); + + verifySearchSlowLogMatchesTestData(testIndices, expectedUser); + verifyIndexSlowLogMatchesTestData(testIndices, expectedUser); + } + + public void testSlowLogWithUserWithFullName() throws Exception { + List testIndices = randomTestIndexData(); + createUserWithFullName("full_name", "full-name-password", "Full Name", new String[] { "superuser" }); + for (TestIndexData testData : testIndices) { + final RequestOptions requestOptions = RequestOptions.DEFAULT.toBuilder() + .addHeader("Authorization", basicAuthHeaderValue("full_name", new SecureString("full-name-password".toCharArray()))) + .build(); + searchSomeData(testData.name, requestOptions); + indexSomeData(testData.name, requestOptions); + } + + Map expectedUser = Map.of( + "user.name", + "full_name", + "user.full_name", + "Full Name", + "user.realm", + "default_native", + "auth.type", + "REALM" + ); + + verifySearchSlowLogMatchesTestData(testIndices, expectedUser); + verifyIndexSlowLogMatchesTestData(testIndices, expectedUser); + } + + public void testSlowLogWithUserWithFullNameWithRunAs() throws Exception { + List testIndices = randomTestIndexData(); + createUserWithFullName("full_name", "full-name-password", "Full Name", new String[] { "superuser" }); + for (TestIndexData testData : testIndices) { + final RequestOptions requestOptions = RequestOptions.DEFAULT.toBuilder() + .addHeader("es-security-runas-user", "full_name") + .addHeader("Authorization", basicAuthHeaderValue("admin_user", new SecureString("admin-password".toCharArray()))) + .build(); + searchSomeData(testData.name, requestOptions); + indexSomeData(testData.name, requestOptions); + } + + Map expectedUser = Map.of( + "user.name", + "admin_user", + "user.effective.full_name", + "Full Name", + "user.realm", + "default_file", + "auth.type", + "REALM" + ); + + verifySearchSlowLogMatchesTestData(testIndices, expectedUser); + verifyIndexSlowLogMatchesTestData(testIndices, expectedUser); + } + + public void testSlowLogWithApiKey() throws Exception { + List testIndices = randomTestIndexData(); + String apiKeyName = randomAlphaOfLengthBetween(10, 15); + Map createApiKeyResponse = createApiKey( + apiKeyName, + basicAuthHeaderValue("admin_user", new SecureString("admin-password".toCharArray())) + ); + String apiKeyHeader = Base64.getEncoder() + .encodeToString( + (createApiKeyResponse.get("id") + ":" + createApiKeyResponse.get("api_key").toString()).getBytes(StandardCharsets.UTF_8) + ); + + for (TestIndexData testData : testIndices) { + final RequestOptions requestOptions = RequestOptions.DEFAULT.toBuilder() + .addHeader("Authorization", "ApiKey " + apiKeyHeader) + .build(); + searchSomeData(testData.name, requestOptions); + indexSomeData(testData.name, requestOptions); + } + + Map expectedUser = Map.of( + "user.name", + "admin_user", + "user.realm", + "_es_api_key", + "auth.type", + "API_KEY", + "apikey.id", + createApiKeyResponse.get("id"), + "apikey.name", + apiKeyName + ); + + verifySearchSlowLogMatchesTestData(testIndices, expectedUser); + verifyIndexSlowLogMatchesTestData(testIndices, expectedUser); + } + + public void testSlowLogWithRunAs() throws Exception { + List testIndices = randomTestIndexData(); + + for (TestIndexData testData : testIndices) { + final RequestOptions requestOptions = RequestOptions.DEFAULT.toBuilder() + .addHeader("es-security-runas-user", "api_user") + .addHeader("Authorization", basicAuthHeaderValue("admin_user", new SecureString("admin-password".toCharArray()))) + .build(); + searchSomeData(testData.name, requestOptions); + indexSomeData(testData.name, requestOptions); + } + + Map expectedUser = Map.of( + "user.name", + "admin_user", + "user.effective.name", + "api_user", + "user.realm", + "default_file", + "user.effective.realm", + "default_file", + "auth.type", + "REALM" + ); + + verifySearchSlowLogMatchesTestData(testIndices, expectedUser); + verifyIndexSlowLogMatchesTestData(testIndices, expectedUser); + } + + public void testSlowLogWithServiceAccount() throws Exception { + List testIndices = randomTestIndexData(); + Map createServiceAccountResponse = createServiceAccountToken(); + @SuppressWarnings("unchecked") + String tokenValue = ((Map) createServiceAccountResponse.get("token")).get("value").toString(); + + for (TestIndexData testData : testIndices) { + final RequestOptions requestOptions = RequestOptions.DEFAULT.toBuilder() + .addHeader("Authorization", "Bearer " + tokenValue) + .build(); + searchSomeData(testData.name, requestOptions); + indexSomeData(testData.name, requestOptions); + } + + Map expectedUser = Map.of( + "user.name", + "elastic/enterprise-search-server", + "user.realm", + "_service_account", + "auth.type", + "TOKEN" + ); + + verifySearchSlowLogMatchesTestData(testIndices, expectedUser); + verifyIndexSlowLogMatchesTestData(testIndices, expectedUser); + } + + private static void enableSearchSlowLog(String index, boolean includeUser) throws IOException { + final Request request = new Request("PUT", "/" + index + "/_settings"); + request.setJsonEntity( + "{\"index.search.slowlog.threshold.query." + + randomFrom("trace", "warn", "debug", "info") + + "\": \"0\", " + + "\"index.search.slowlog.include.user\": " + + includeUser + + "}" + ); + client().performRequest(request); + } + + private static void enableIndexingSlowLog(String index, boolean includeUser) throws IOException { + final Request request = new Request("PUT", "/" + index + "/_settings"); + request.setJsonEntity( + "{\"index.indexing.slowlog.threshold.index." + + randomFrom("trace", "warn", "debug", "info") + + "\": \"0\", " + + "\"index.indexing.slowlog.include.user\": " + + includeUser + + "}" + ); + client().performRequest(request); + } + + private static void indexSomeData(String index) throws IOException { + indexSomeData(index, RequestOptions.DEFAULT.toBuilder().build()); + } + + private static void searchSomeData(String index) throws IOException { + searchSomeData(index, RequestOptions.DEFAULT.toBuilder().build()); + } + + private static void indexSomeData(String index, RequestOptions requestOptions) throws IOException { + final Request request = new Request("PUT", "/" + index + "/_doc/1"); + request.setOptions(requestOptions); + request.setJsonEntity("{ \"foobar\" : true }"); + client().performRequest(request); + } + + private static void searchSomeData(String index, RequestOptions requestOptions) throws IOException { + Request request = new Request("GET", "/" + index + "/_search"); + request.setOptions(requestOptions); + client().performRequest(request); + } + + private static void setupTestIndex(TestIndexData testIndexData) throws IOException { + indexSomeData(testIndexData.name); + if (testIndexData.indexSlowLogEnabled) { + enableIndexingSlowLog(testIndexData.name, testIndexData.indexSlowLogUserEnabled); + } + if (testIndexData.searchSlowLogEnabled) { + enableSearchSlowLog(testIndexData.name, testIndexData.searchSlowLogUserEnabled); + } + } + + private static void createUserWithFullName(String user, String password, String fullName, String[] roles) throws IOException { + Request request = new Request("POST", "/_security/user/" + user); + request.setJsonEntity( + "{ \"full_name\" : \"" + + fullName + + "\", \"roles\": [\"" + + String.join("\",\"", roles) + + "\"], \"password\": \"" + + password + + "\" }" + ); + Response response = client().performRequest(request); + assertOK(response); + } + + private static List randomTestIndexData() throws IOException { + List testData = new ArrayList<>(); + for (int i = 0; i < randomIntBetween(1, 10); i++) { + TestIndexData randomTestData = new TestIndexData( + "search-" + randomAlphaOfLengthBetween(5, 10).toLowerCase() + "-" + i, + randomBoolean(), + randomBoolean(), + randomBoolean(), + randomBoolean() + ); + setupTestIndex(randomTestData); + testData.add(randomTestData); + } + return testData; + } + + private void verifySearchSlowLogMatchesTestData(List testIndices, Map expectedUserData) + throws Exception { + verifySlowLog(logLines -> { + for (TestIndexData testIndex : testIndices) { + if (testIndex.searchSlowLogEnabled) { + Map logLine = logLines.get(currentSearchLogIndex); + if (testIndex.searchSlowLogUserEnabled) { + assertThat(expectedUserData.entrySet(), everyItem(in(logLine.entrySet()))); + } else { + assertThat(expectedUserData.entrySet(), everyItem(not(in(logLine.entrySet())))); + } + currentSearchLogIndex++; + } + } + }, LogType.SEARCH_SLOW); + } + + private void verifyIndexSlowLogMatchesTestData(List testIndices, Map expectedUserData) throws Exception { + verifySlowLog(logLines -> { + for (TestIndexData testIndex : testIndices) { + if (testIndex.indexSlowLogEnabled) { + Map logLine = logLines.get(currentIndexLogIndex); + if (testIndex.indexSlowLogUserEnabled) { + assertThat(expectedUserData.entrySet(), everyItem(in(logLine.entrySet()))); + } else { + assertThat(expectedUserData.entrySet(), everyItem(not(in(logLine.entrySet())))); + } + currentIndexLogIndex++; + } + } + }, LogType.INDEXING_SLOW); + } + + private static void verifySlowLog(Consumer>> logVerifier, LogType logType) throws Exception { + assertBusy(() -> { + try (var slowLog = cluster.getNodeLog(0, logType)) { + final List lines = Streams.readAllLines(slowLog); + logVerifier.accept( + lines.stream().map(line -> XContentHelper.convertToMap(XContentType.JSON.xContent(), line, true)).toList() + ); + } + }, 5, TimeUnit.SECONDS); + } + + private static Map createApiKey(String name, String authHeader) throws IOException { + final Request request = new Request("POST", "/_security/api_key"); + + request.setJsonEntity(Strings.format(""" + {"name":"%s"}""", name)); + + request.setOptions(request.getOptions().toBuilder().addHeader(HttpHeaders.AUTHORIZATION, authHeader)); + final Response response = client().performRequest(request); + assertOK(response); + return responseAsMap(response); + } + + private static Map createServiceAccountToken() throws IOException { + final Request createServiceTokenRequest = new Request( + "POST", + "/_security/service/elastic/enterprise-search-server/credential/token" + ); + final Response createServiceTokenResponse = adminClient().performRequest(createServiceTokenRequest); + assertOK(createServiceTokenResponse); + + return responseAsMap(createServiceTokenResponse); + } +} diff --git a/x-pack/plugin/security/src/main/java/module-info.java b/x-pack/plugin/security/src/main/java/module-info.java index 4b99ab5ed6b2c..9806650f99094 100644 --- a/x-pack/plugin/security/src/main/java/module-info.java +++ b/x-pack/plugin/security/src/main/java/module-info.java @@ -65,8 +65,10 @@ exports org.elasticsearch.xpack.security.action.user to org.elasticsearch.server; exports org.elasticsearch.xpack.security.action.settings to org.elasticsearch.server; exports org.elasticsearch.xpack.security.operator to org.elasticsearch.internal.operator, org.elasticsearch.internal.security; - exports org.elasticsearch.xpack.security.authc to org.elasticsearch.xcontent; + exports org.elasticsearch.xpack.security.slowlog to org.elasticsearch.server; + + provides org.elasticsearch.index.SlowLogFieldProvider with org.elasticsearch.xpack.security.slowlog.SecuritySlowLogFieldProvider; provides org.elasticsearch.cli.CliToolProvider with diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index c6089df37bca2..d3898cc510d77 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -178,13 +178,16 @@ import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesRequestBuilderFactory; import org.elasticsearch.xpack.core.security.action.user.ProfileHasPrivilegesAction; import org.elasticsearch.xpack.core.security.action.user.PutUserAction; +import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.AuthenticationFailureHandler; +import org.elasticsearch.xpack.core.security.authc.AuthenticationField; import org.elasticsearch.xpack.core.security.authc.AuthenticationServiceField; import org.elasticsearch.xpack.core.security.authc.DefaultAuthenticationFailureHandler; import org.elasticsearch.xpack.core.security.authc.InternalRealmsSettings; import org.elasticsearch.xpack.core.security.authc.Realm; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.RealmSettings; +import org.elasticsearch.xpack.core.security.authc.Subject; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; import org.elasticsearch.xpack.core.security.authz.AuthorizationEngine; import org.elasticsearch.xpack.core.security.authz.AuthorizationServiceField; @@ -2022,6 +2025,37 @@ private void reloadRemoteClusterCredentials(Settings settingsWithKeystore) { future.actionGet(); } + public Map getAuthContextForSlowLog() { + if (this.securityContext.get() != null && this.securityContext.get().getAuthentication() != null) { + Authentication authentication = this.securityContext.get().getAuthentication(); + Subject authenticatingSubject = authentication.getAuthenticatingSubject(); + Subject effetctiveSubject = authentication.getEffectiveSubject(); + Map authContext = new HashMap<>(); + if (authenticatingSubject.getUser() != null) { + authContext.put("user.name", authenticatingSubject.getUser().principal()); + authContext.put("user.realm", authenticatingSubject.getRealm().getName()); + if (authenticatingSubject.getUser().fullName() != null) { + authContext.put("user.full_name", authenticatingSubject.getUser().fullName()); + } + } + // Only include effective user if different from authenticating user (run-as) + if (effetctiveSubject.getUser() != null && effetctiveSubject.equals(authenticatingSubject) == false) { + authContext.put("user.effective.name", effetctiveSubject.getUser().principal()); + authContext.put("user.effective.realm", effetctiveSubject.getRealm().getName()); + if (effetctiveSubject.getUser().fullName() != null) { + authContext.put("user.effective.full_name", effetctiveSubject.getUser().fullName()); + } + } + authContext.put("auth.type", authentication.getAuthenticationType().name()); + if (authentication.isApiKey()) { + authContext.put("apikey.id", authenticatingSubject.getMetadata().get(AuthenticationField.API_KEY_ID_KEY).toString()); + authContext.put("apikey.name", authenticatingSubject.getMetadata().get(AuthenticationField.API_KEY_NAME_KEY).toString()); + } + return authContext; + } + return Map.of(); + } + static final class ValidateLicenseForFIPS implements BiConsumer { private final boolean inFipsMode; private final LicenseService licenseService; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/slowlog/SecuritySlowLogFieldProvider.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/slowlog/SecuritySlowLogFieldProvider.java new file mode 100644 index 0000000000000..1610aedd1d363 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/slowlog/SecuritySlowLogFieldProvider.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security.slowlog; + +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.SlowLogFieldProvider; +import org.elasticsearch.xpack.security.Security; + +import java.util.Map; + +import static org.elasticsearch.index.IndexingSlowLog.INDEX_INDEXING_SLOWLOG_INCLUDE_USER_SETTING; +import static org.elasticsearch.index.SearchSlowLog.INDEX_SEARCH_SLOWLOG_INCLUDE_USER_SETTING; + +public class SecuritySlowLogFieldProvider implements SlowLogFieldProvider { + private final Security plugin; + private boolean includeUserInIndexing = false; + private boolean includeUserInSearch = false; + + public SecuritySlowLogFieldProvider() { + throw new IllegalStateException("Provider must be constructed using PluginsService"); + } + + public SecuritySlowLogFieldProvider(Security plugin) { + this.plugin = plugin; + } + + @Override + public void init(IndexSettings indexSettings) { + indexSettings.getScopedSettings() + .addSettingsUpdateConsumer(INDEX_SEARCH_SLOWLOG_INCLUDE_USER_SETTING, newValue -> this.includeUserInSearch = newValue); + this.includeUserInSearch = indexSettings.getValue(INDEX_SEARCH_SLOWLOG_INCLUDE_USER_SETTING); + indexSettings.getScopedSettings() + .addSettingsUpdateConsumer(INDEX_INDEXING_SLOWLOG_INCLUDE_USER_SETTING, newValue -> this.includeUserInIndexing = newValue); + this.includeUserInIndexing = indexSettings.getValue(INDEX_INDEXING_SLOWLOG_INCLUDE_USER_SETTING); + } + + @Override + public Map indexSlowLogFields() { + if (includeUserInIndexing) { + return plugin.getAuthContextForSlowLog(); + } + return Map.of(); + } + + @Override + public Map searchSlowLogFields() { + if (includeUserInSearch) { + return plugin.getAuthContextForSlowLog(); + } + return Map.of(); + } +} diff --git a/x-pack/plugin/security/src/main/resources/META-INF/services/org.elasticsearch.index.SlowLogFieldProvider b/x-pack/plugin/security/src/main/resources/META-INF/services/org.elasticsearch.index.SlowLogFieldProvider new file mode 100644 index 0000000000000..41f0ec83ac3f1 --- /dev/null +++ b/x-pack/plugin/security/src/main/resources/META-INF/services/org.elasticsearch.index.SlowLogFieldProvider @@ -0,0 +1,8 @@ +# +# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +# or more contributor license agreements. Licensed under the Elastic License +# 2.0; you may not use this file except in compliance with the Elastic License +# 2.0. +# + +org.elasticsearch.xpack.security.slowlog.SecuritySlowLogFieldProvider diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java index 4a5412ad9c5bb..66b03e8dedd32 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java @@ -41,6 +41,7 @@ import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; +import org.elasticsearch.index.SlowLogFieldProvider; import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.index.engine.InternalEngineFactory; import org.elasticsearch.indices.TestIndexNameExpressionResolver; @@ -373,7 +374,8 @@ public void testOnIndexModuleIsNoOpWithSecurityDisabled() throws Exception { Collections.emptyMap(), () -> true, TestIndexNameExpressionResolver.newInstance(threadPool.getThreadContext()), - Collections.emptyMap() + Collections.emptyMap(), + mock(SlowLogFieldProvider.class) ); security.onIndexModule(indexModule); // indexReaderWrapper is a SetOnce so if Security#onIndexModule had already set an ReaderWrapper we would get an exception here diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherPluginTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherPluginTests.java index 64bf5b5d99fdb..bee2d6aa22355 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherPluginTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherPluginTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.SlowLogFieldProvider; import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.index.engine.InternalEngineFactory; import org.elasticsearch.indices.SystemIndexDescriptor; @@ -66,7 +67,8 @@ public void testWatcherDisabledTests() throws Exception { Collections.emptyMap(), () -> true, TestIndexNameExpressionResolver.newInstance(), - Collections.emptyMap() + Collections.emptyMap(), + mock(SlowLogFieldProvider.class) ); // this will trip an assertion if the watcher indexing operation listener is null (which it is) but we try to add it watcher.onIndexModule(indexModule); From be1cf31cf8b7dfaf0950fdc2eabdf703e42a1fd7 Mon Sep 17 00:00:00 2001 From: Jan Kuipers <148754765+jan-elastic@users.noreply.github.com> Date: Mon, 11 Mar 2024 11:01:11 +0100 Subject: [PATCH 096/248] Exclude internal fields from job APIs. (#106115) * Exclude internal fields from job APIs. * Skip BWC breaking tests * Remove unused Job.STRICT_PARSER --- x-pack/plugin/build.gradle | 2 + .../core/ml/action/PreviewDatafeedAction.java | 2 +- .../xpack/core/ml/action/PutJobAction.java | 11 +-- .../ml/action/ValidateJobConfigAction.java | 14 +--- .../xpack/core/ml/job/config/Job.java | 67 +++++++------------ .../xpack/core/ml/job/messages/Messages.java | 2 - .../xpack/core/ml/job/config/JobTests.java | 25 ++----- .../rest-api-spec/test/ml/jobs_crud.yml | 2 +- .../rest-api-spec/test/ml/validate.yml | 16 +---- 9 files changed, 38 insertions(+), 103 deletions(-) diff --git a/x-pack/plugin/build.gradle b/x-pack/plugin/build.gradle index eae3031512d4f..72e63b3255999 100644 --- a/x-pack/plugin/build.gradle +++ b/x-pack/plugin/build.gradle @@ -109,6 +109,8 @@ tasks.named("yamlRestTestV7CompatTransform").configure { task -> ) task.skipTest("ml/jobs_crud/Test update job", "Behaviour change #89824 - added limit filter to categorization analyzer") task.skipTest("ml/jobs_crud/Test create job with delimited format", "removing undocumented functionality") + task.skipTest("ml/jobs_crud/Test cannot create job with model snapshot id set", "Exception type has changed.") + task.skipTest("ml/validate/Test job config is invalid because model snapshot id set", "Exception type has changed.") task.skipTest("ml/datafeeds_crud/Test update datafeed to point to missing job", "behaviour change #44752 - not allowing to update datafeed job_id") task.skipTest( "ml/datafeeds_crud/Test update datafeed to point to different job", diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PreviewDatafeedAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PreviewDatafeedAction.java index 8d4e9d25b94a3..d03a6d5c0c7c5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PreviewDatafeedAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PreviewDatafeedAction.java @@ -60,7 +60,7 @@ public static class Request extends ActionRequest implements ToXContentObject { private static final ObjectParser PARSER = new ObjectParser<>("preview_datafeed_action", Request.Builder::new); static { PARSER.declareObject(Builder::setDatafeedBuilder, DatafeedConfig.STRICT_PARSER, DATAFEED_CONFIG); - PARSER.declareObject(Builder::setJobBuilder, Job.STRICT_PARSER, JOB_CONFIG); + PARSER.declareObject(Builder::setJobBuilder, Job.REST_REQUEST_PARSER, JOB_CONFIG); PARSER.declareString(Builder::setStart, START_TIME); PARSER.declareString(Builder::setEnd, END_TIME); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutJobAction.java index 400bdaa3a27ea..efb4dacd83ba4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutJobAction.java @@ -20,7 +20,6 @@ import org.elasticsearch.xpack.core.ml.job.messages.Messages; import java.io.IOException; -import java.util.List; import java.util.Objects; public class PutJobAction extends ActionType { @@ -35,7 +34,7 @@ private PutJobAction() { public static class Request extends AcknowledgedRequest { public static Request parseRequest(String jobId, XContentParser parser, IndicesOptions indicesOptions) { - Job.Builder jobBuilder = Job.STRICT_PARSER.apply(parser, null); + Job.Builder jobBuilder = Job.REST_REQUEST_PARSER.apply(parser, null); if (jobBuilder.getId() == null) { jobBuilder.setId(jobId); } else if (Strings.isNullOrEmpty(jobId) == false && jobId.equals(jobBuilder.getId()) == false) { @@ -58,14 +57,6 @@ public Request(Job.Builder jobBuilder) { // would occur when parsing an old job config that already had duplicate detectors. jobBuilder.validateDetectorsAreUnique(); - // Some fields cannot be set at create time - List invalidJobCreationSettings = jobBuilder.invalidCreateTimeSettings(); - if (invalidJobCreationSettings.isEmpty() == false) { - throw new IllegalArgumentException( - Messages.getMessage(Messages.JOB_CONFIG_INVALID_CREATE_SETTINGS, String.join(",", invalidJobCreationSettings)) - ); - } - this.jobBuilder = jobBuilder; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ValidateJobConfigAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ValidateJobConfigAction.java index 48549ae100e36..76cba60667c32 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ValidateJobConfigAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ValidateJobConfigAction.java @@ -14,11 +14,9 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.core.ml.job.config.Job; -import org.elasticsearch.xpack.core.ml.job.messages.Messages; import java.io.IOException; import java.util.Date; -import java.util.List; import java.util.Objects; public class ValidateJobConfigAction extends ActionType { @@ -32,10 +30,10 @@ protected ValidateJobConfigAction() { public static class Request extends ActionRequest { - private Job job; + private final Job job; public static Request parseRequest(XContentParser parser) { - Job.Builder jobBuilder = Job.STRICT_PARSER.apply(parser, null); + Job.Builder jobBuilder = Job.REST_REQUEST_PARSER.apply(parser, null); // When jobs are PUT their ID must be supplied in the URL - assume this will // be valid unless an invalid job ID is specified in the JSON to be validated jobBuilder.setId(jobBuilder.getId() != null ? jobBuilder.getId() : "ok"); @@ -45,14 +43,6 @@ public static Request parseRequest(XContentParser parser) { // would occur when parsing an old job config that already had duplicate detectors. jobBuilder.validateDetectorsAreUnique(); - // Some fields cannot be set at create time - List invalidJobCreationSettings = jobBuilder.invalidCreateTimeSettings(); - if (invalidJobCreationSettings.isEmpty() == false) { - throw new IllegalArgumentException( - Messages.getMessage(Messages.JOB_CONFIG_INVALID_CREATE_SETTINGS, String.join(",", invalidJobCreationSettings)) - ); - } - return new Request(jobBuilder.build(new Date())); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java index fbb1a137bdc13..8da0209e10293 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java @@ -98,8 +98,9 @@ public class Job implements SimpleDiffable, Writeable, ToXContentObject { public static final ParseField RESULTS_FIELD = new ParseField("jobs"); // These parsers follow the pattern that metadata is parsed leniently (to allow for enhancements), whilst config is parsed strictly - public static final ObjectParser LENIENT_PARSER = createParser(true); - public static final ObjectParser STRICT_PARSER = createParser(false); + public static final ObjectParser LENIENT_PARSER = createParser(true, true); + // Use the REST request parser to parse a job passed to the API, to disallow setting internal fields. + public static final ObjectParser REST_REQUEST_PARSER = createParser(false, false); public static final TimeValue MIN_BACKGROUND_PERSIST_INTERVAL = TimeValue.timeValueHours(1); @@ -114,26 +115,12 @@ public class Job implements SimpleDiffable, Writeable, ToXContentObject { public static final long DEFAULT_MODEL_SNAPSHOT_RETENTION_DAYS = 10; public static final long DEFAULT_DAILY_MODEL_SNAPSHOT_RETENTION_AFTER_DAYS = 1; - private static ObjectParser createParser(boolean ignoreUnknownFields) { + private static ObjectParser createParser(boolean allowInternalFields, boolean ignoreUnknownFields) { ObjectParser parser = new ObjectParser<>("job_details", ignoreUnknownFields, Builder::new); parser.declareString(Builder::setId, ID); - parser.declareString(Builder::setJobType, JOB_TYPE); - parser.declareString(Builder::setJobVersion, JOB_VERSION); parser.declareStringArray(Builder::setGroups, GROUPS); parser.declareStringOrNull(Builder::setDescription, DESCRIPTION); - parser.declareField( - Builder::setCreateTime, - p -> TimeUtils.parseTimeField(p, CREATE_TIME.getPreferredName()), - CREATE_TIME, - ValueType.VALUE - ); - parser.declareField( - Builder::setFinishedTime, - p -> TimeUtils.parseTimeField(p, FINISHED_TIME.getPreferredName()), - FINISHED_TIME, - ValueType.VALUE - ); parser.declareObject( Builder::setAnalysisConfig, ignoreUnknownFields ? AnalysisConfig.LENIENT_PARSER : AnalysisConfig.STRICT_PARSER, @@ -165,17 +152,35 @@ private static ObjectParser createParser(boolean ignoreUnknownFie parser.declareLong(Builder::setModelSnapshotRetentionDays, MODEL_SNAPSHOT_RETENTION_DAYS); parser.declareLong(Builder::setDailyModelSnapshotRetentionAfterDays, DAILY_MODEL_SNAPSHOT_RETENTION_AFTER_DAYS); parser.declareField(Builder::setCustomSettings, (p, c) -> p.mapOrdered(), CUSTOM_SETTINGS, ValueType.OBJECT); - parser.declareStringOrNull(Builder::setModelSnapshotId, MODEL_SNAPSHOT_ID); - parser.declareStringOrNull(Builder::setModelSnapshotMinVersion, MODEL_SNAPSHOT_MIN_VERSION); parser.declareString(Builder::setResultsIndexName, RESULTS_INDEX_NAME); - parser.declareBoolean(Builder::setDeleting, DELETING); parser.declareBoolean(Builder::setAllowLazyOpen, ALLOW_LAZY_OPEN); - parser.declareObject(Builder::setBlocked, ignoreUnknownFields ? Blocked.LENIENT_PARSER : Blocked.STRICT_PARSER, BLOCKED); parser.declareObject( Builder::setDatafeed, ignoreUnknownFields ? DatafeedConfig.LENIENT_PARSER : DatafeedConfig.STRICT_PARSER, DATAFEED_CONFIG ); + + if (allowInternalFields) { + parser.declareString(Builder::setJobType, JOB_TYPE); + parser.declareString(Builder::setJobVersion, JOB_VERSION); + parser.declareField( + Builder::setCreateTime, + p -> TimeUtils.parseTimeField(p, CREATE_TIME.getPreferredName()), + CREATE_TIME, + ValueType.VALUE + ); + parser.declareField( + Builder::setFinishedTime, + p -> TimeUtils.parseTimeField(p, FINISHED_TIME.getPreferredName()), + FINISHED_TIME, + ValueType.VALUE + ); + parser.declareStringOrNull(Builder::setModelSnapshotId, MODEL_SNAPSHOT_ID); + parser.declareStringOrNull(Builder::setModelSnapshotMinVersion, MODEL_SNAPSHOT_MIN_VERSION); + parser.declareBoolean(Builder::setDeleting, DELETING); + parser.declareObject(Builder::setBlocked, ignoreUnknownFields ? Blocked.LENIENT_PARSER : Blocked.STRICT_PARSER, BLOCKED); + } + return parser; } @@ -1020,26 +1025,6 @@ public Builder setDatafeedIndicesOptionsIfRequired(IndicesOptions indicesOptions return this; } - /** - * Return the list of fields that have been set and are invalid to - * be set when the job is created e.g. model snapshot Id should not - * be set at job creation. - * @return List of fields set fields that should not be. - */ - public List invalidCreateTimeSettings() { - List invalidCreateValues = new ArrayList<>(); - if (modelSnapshotId != null) { - invalidCreateValues.add(MODEL_SNAPSHOT_ID.getPreferredName()); - } - if (finishedTime != null) { - invalidCreateValues.add(FINISHED_TIME.getPreferredName()); - } - if (createTime != null) { - invalidCreateValues.add(CREATE_TIME.getPreferredName()); - } - return invalidCreateValues; - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(id); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java index ad7a6b998fafd..52c97ece1b017 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java @@ -222,8 +222,6 @@ public final class Messages { public static final String JOB_CONFIG_FUNCTION_REQUIRES_OVERFIELD = "over_field_name must be set when the ''{0}'' function is used"; public static final String JOB_CONFIG_ID_ALREADY_TAKEN = "The job cannot be created with the Id ''{0}''. The Id is already used."; public static final String JOB_CONFIG_ID_TOO_LONG = "The job id cannot contain more than {0,number,integer} characters."; - public static final String JOB_CONFIG_INVALID_CREATE_SETTINGS = - "The job is configured with fields [{0}] that are illegal to set at job creation"; public static final String JOB_CONFIG_INVALID_FIELDNAME_CHARS = "Invalid field name ''{0}''. Field names including over, by and partition " + "fields cannot contain any of these characters: {1}"; public static final String JOB_CONFIG_INVALID_FIELDNAME = diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobTests.java index 4fff2804f9350..047f3a418c36b 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobTests.java @@ -43,7 +43,6 @@ import java.util.Collections; import java.util.Date; import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -101,7 +100,7 @@ protected Writeable.Reader instanceReader() { @Override protected Job doParseInstance(XContentParser parser) { - return Job.STRICT_PARSER.apply(parser, null).build(); + return Job.LENIENT_PARSER.apply(parser, null).build(); } public void testToXContentForInternalStorage() throws IOException { @@ -119,10 +118,10 @@ public void testToXContentForInternalStorage() throws IOException { } } - public void testFutureConfigParse() throws IOException { + public void testRestRequestParser_DoesntAllowInternalFields() throws IOException { XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(XContentParserConfiguration.EMPTY, FUTURE_JOB); - XContentParseException e = expectThrows(XContentParseException.class, () -> Job.STRICT_PARSER.apply(parser, null).build()); - assertEquals("[4:5] [job_details] unknown field [tomorrows_technology_today]", e.getMessage()); + XContentParseException e = expectThrows(XContentParseException.class, () -> Job.REST_REQUEST_PARSER.apply(parser, null).build()); + assertEquals("[3:5] [job_details] unknown field [create_time]", e.getMessage()); } public void testFutureMetadataParse() throws IOException { @@ -554,22 +553,6 @@ public void testBuilder_givenTimeFieldInAnalysisConfig() { assertThat(e.getMessage(), equalTo(Messages.getMessage(Messages.JOB_CONFIG_TIME_FIELD_NOT_ALLOWED_IN_ANALYSIS_CONFIG))); } - public void testInvalidCreateTimeSettings() { - Job.Builder builder = new Job.Builder("invalid-settings"); - builder.setModelSnapshotId("snapshot-foo"); - assertEquals(Collections.singletonList(Job.MODEL_SNAPSHOT_ID.getPreferredName()), builder.invalidCreateTimeSettings()); - - builder.setCreateTime(new Date()); - builder.setFinishedTime(new Date()); - - Set expected = new HashSet<>(); - expected.add(Job.CREATE_TIME.getPreferredName()); - expected.add(Job.FINISHED_TIME.getPreferredName()); - expected.add(Job.MODEL_SNAPSHOT_ID.getPreferredName()); - - assertEquals(expected, new HashSet<>(builder.invalidCreateTimeSettings())); - } - public void testEmptyGroup() { Job.Builder builder = buildJobBuilder("foo"); builder.setGroups(Arrays.asList("foo-group", "")); diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/jobs_crud.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/jobs_crud.yml index 3c4439444d1a1..24e869781f677 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/jobs_crud.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/jobs_crud.yml @@ -1130,7 +1130,7 @@ "Test cannot create job with model snapshot id set": - do: - catch: /illegal_argument_exception/ + catch: /x_content_parse_exception/ ml.put_job: job_id: has-model-snapshot-id body: > diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/validate.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/validate.yml index 1df34a64f860a..a2cfb65b08a11 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/validate.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/validate.yml @@ -76,21 +76,7 @@ "Test job config is invalid because model snapshot id set": - do: - catch: /illegal_argument_exception/ - ml.validate: - body: > - { - "model_snapshot_id": "wont-create-with-this-setting", - "analysis_config" : { - "bucket_span": "1h", - "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] - }, - "data_description" : { - } - } - - - do: - catch: /The job is configured with fields \[model_snapshot_id\] that are illegal to set at job creation/ + catch: /x_content_parse_exception/ ml.validate: body: > { From 890bd4b8a59904b6f08c8c2d99470efbc8b3fb45 Mon Sep 17 00:00:00 2001 From: Daniel Mitterdorfer Date: Mon, 11 Mar 2024 13:48:12 +0100 Subject: [PATCH 097/248] Consider context in raw serialization (#106163) With this commit we use `writeRawValue` instead of `writeRaw` when serializing raw strings as XContent. The latter method does not consider context (e.g. is the value being written as part of an array and requires a comma separator?) whereas the former does. This ensures that pre-rendered double values as we use them in the flamegraph response are rendered correctly as XContent. Closes #106103 --- .../xcontent/provider/json/JsonXContentGenerator.java | 2 +- .../resources/rest-api-spec/test/profiling/10_basic.yml | 6 ------ 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentGenerator.java b/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentGenerator.java index f22176930da64..09cbdf2d571cd 100644 --- a/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentGenerator.java +++ b/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentGenerator.java @@ -500,7 +500,7 @@ public void writeRawValue(InputStream stream, XContentType xContentType) throws public void writeRawValue(String value) throws IOException { try { if (supportsRawWrites()) { - generator.writeRaw(value); + generator.writeRawValue(value); } else { // fallback to a regular string for formats that don't allow writing the value as is generator.writeString(value); diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/profiling/10_basic.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/profiling/10_basic.yml index 1e0c260a70e4f..367655ba89388 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/profiling/10_basic.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/profiling/10_basic.yml @@ -166,9 +166,6 @@ teardown: --- "Test flamegraph from profiling-events": - - skip: - reason: "https://github.com/elastic/elasticsearch/issues/106103" - version: "all" - do: profiling.flamegraph: body: > @@ -195,9 +192,6 @@ teardown: --- "Test flamegraph from test-events": - - skip: - reason: "https://github.com/elastic/elasticsearch/issues/106103" - version: "all" - do: profiling.flamegraph: body: > From 642fc14ef63d4d859360c9f8e1b0871164a0714c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Aur=C3=A9lien=20FOUCRET?= Date: Mon, 11 Mar 2024 13:54:21 +0100 Subject: [PATCH 098/248] Ensure LTR models are cached when used as a rescorer. (#106161) --- .../loadingservice/ModelLoadingService.java | 5 ++- .../ModelLoadingServiceTests.java | 38 ++++++++++++++++++- 2 files changed, 39 insertions(+), 4 deletions(-) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingService.java index 5994c61f46297..5869f353c80c9 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingService.java @@ -647,7 +647,7 @@ private void handleLoadSuccess( // Also, if the consumer is a search consumer, we should always cache it if (referencedModels.contains(modelId) || Sets.haveNonEmptyIntersection(modelIdToModelAliases.getOrDefault(modelId, new HashSet<>()), referencedModels) - || consumer.equals(Consumer.SEARCH_AGGS)) { + || consumer.isAnyOf(Consumer.SEARCH_AGGS, Consumer.SEARCH_RESCORER)) { try { // The local model may already be in cache. If it is, we don't bother adding it to cache. // If it isn't, we flip an `isLoaded` flag, and increment the model counter to make sure if it is evicted @@ -810,7 +810,8 @@ public void clusterChanged(ClusterChangedEvent event) { ); if (oldModelAliasesNotReferenced && newModelAliasesNotReferenced && modelIsNotReferenced) { ModelAndConsumer modelAndConsumer = localModelCache.get(modelId); - if (modelAndConsumer != null && modelAndConsumer.consumers.contains(Consumer.SEARCH_AGGS) == false) { + if (modelAndConsumer != null + && modelAndConsumer.consumers.stream().noneMatch(c -> c.isAnyOf(Consumer.SEARCH_AGGS, Consumer.SEARCH_RESCORER))) { logger.trace("[{} ({})] invalidated from cache", modelId, modelAliasOrId); localModelCache.invalidate(modelId); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingServiceTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingServiceTests.java index 40b0dd519f7d8..bab292671c0bc 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingServiceTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingServiceTests.java @@ -43,7 +43,9 @@ import org.elasticsearch.xpack.core.ml.inference.TrainedModelConfig; import org.elasticsearch.xpack.core.ml.inference.TrainedModelInput; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.ClassificationConfig; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.InferenceConfig; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.InferenceStats; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.LearningToRankConfig; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.inference.InferenceDefinition; import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.ml.MachineLearning; @@ -424,6 +426,34 @@ public void testGetModelForSearch() throws Exception { verify(trainedModelStatsService, never()).queueStats(any(InferenceStats.class), anyBoolean()); } + public void testGetModelForLearningToRank() throws Exception { + String modelId = "test-get-model-for-ltr"; + withTrainedModel(modelId, 1L, LearningToRankConfig.EMPTY_PARAMS); + + ModelLoadingService modelLoadingService = new ModelLoadingService( + trainedModelProvider, + auditor, + threadPool, + clusterService, + trainedModelStatsService, + Settings.EMPTY, + "test-node", + circuitBreaker, + mock(XPackLicenseState.class) + ); + + for (int i = 0; i < 3; i++) { + PlainActionFuture future = new PlainActionFuture<>(); + modelLoadingService.getModelForLearningToRank(modelId, future); + assertThat(future.get(), is(not(nullValue()))); + } + + assertTrue(modelLoadingService.isModelCached(modelId)); + + verify(trainedModelProvider, times(1)).getTrainedModelForInference(eq(modelId), eq(false), any()); + verify(trainedModelStatsService, never()).queueStats(any(InferenceStats.class), anyBoolean()); + } + public void testCircuitBreakerBreak() throws Exception { String model1 = "test-circuit-break-model-1"; String model2 = "test-circuit-break-model-2"; @@ -656,13 +686,17 @@ public void testAliasesGetUpdatedEvenWhenNotIngestNode() throws IOException { assertThat(modelLoadingService.getModelId("loaded_model_again"), equalTo(model1)); } - @SuppressWarnings("unchecked") private void withTrainedModel(String modelId, long size) { + withTrainedModel(modelId, size, ClassificationConfig.EMPTY_PARAMS); + } + + @SuppressWarnings("unchecked") + private void withTrainedModel(String modelId, long size, InferenceConfig inferenceConfig) { InferenceDefinition definition = mock(InferenceDefinition.class); when(definition.ramBytesUsed()).thenReturn(size); TrainedModelConfig trainedModelConfig = mock(TrainedModelConfig.class); when(trainedModelConfig.getModelId()).thenReturn(modelId); - when(trainedModelConfig.getInferenceConfig()).thenReturn(ClassificationConfig.EMPTY_PARAMS); + when(trainedModelConfig.getInferenceConfig()).thenReturn(inferenceConfig); when(trainedModelConfig.getInput()).thenReturn(new TrainedModelInput(Arrays.asList("foo", "bar", "baz"))); when(trainedModelConfig.getModelSize()).thenReturn(size); doAnswer(invocationOnMock -> { From 903524f9e353b41a259aa25112f2a4be93cd8c07 Mon Sep 17 00:00:00 2001 From: Mary Gouseti Date: Mon, 11 Mar 2024 15:21:28 +0200 Subject: [PATCH 099/248] Introduce the data stream global retention cluster state metadata (#106170) In this PR we add the global retention custom metadata to the cluster state. This is just the structure, they are not used and they cannot be managed yet. This will come in a subsequent PR. Part of https://github.com/elastic/elasticsearch/issues/106169 Reviewed in https://github.com/elastic/elasticsearch/pull/105682 --- .../org/elasticsearch/TransportVersions.java | 1 + .../metadata/DataStreamGlobalRetention.java | 148 ++++++++++++++++++ ...reamGlobalRetentionSerializationTests.java | 99 ++++++++++++ 3 files changed, 248 insertions(+) create mode 100644 server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetention.java create mode 100644 server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionSerializationTests.java diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 392e157f60952..515992e18d62d 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -140,6 +140,7 @@ static TransportVersion def(int id) { public static final TransportVersion ESQL_ENRICH_OPERATOR_STATUS = def(8_600_00_0); public static final TransportVersion ESQL_SERIALIZE_ARRAY_VECTOR = def(8_601_00_0); public static final TransportVersion ESQL_SERIALIZE_ARRAY_BLOCK = def(8_602_00_0); + public static final TransportVersion ADD_DATA_STREAM_GLOBAL_RETENTION = def(8_603_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetention.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetention.java new file mode 100644 index 0000000000000..f3b88ba6083c3 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetention.java @@ -0,0 +1,148 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.metadata; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.cluster.AbstractNamedDiffable; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.NamedDiff; +import org.elasticsearch.common.collect.Iterators; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Iterator; +import java.util.Objects; + +/** + * A cluster state entry that contains global retention settings that are configurable by the user. These settings include: + * - default retention, applied on any data stream managed by DSL that does not have an explicit retention defined + * - max retention, applied on every data stream managed by DSL + */ +public final class DataStreamGlobalRetention extends AbstractNamedDiffable implements ClusterState.Custom { + + public static final String TYPE = "data-stream-global-retention"; + + public static final ParseField DEFAULT_RETENTION_FIELD = new ParseField("default_retention"); + public static final ParseField MAX_RETENTION_FIELD = new ParseField("max_retention"); + + public static final DataStreamGlobalRetention EMPTY = new DataStreamGlobalRetention(null, null); + + @Nullable + private final TimeValue defaultRetention; + @Nullable + private final TimeValue maxRetention; + + /** + * @param defaultRetention the default retention or null if it's undefined + * @param maxRetention the max retention or null if it's undefined + * @throws IllegalArgumentException when the default retention is greater than the max retention. + */ + public DataStreamGlobalRetention(TimeValue defaultRetention, TimeValue maxRetention) { + if (defaultRetention != null && maxRetention != null && defaultRetention.getMillis() > maxRetention.getMillis()) { + throw new IllegalArgumentException( + "Default global retention [" + + defaultRetention.getStringRep() + + "] cannot be greater than the max global retention [" + + maxRetention.getStringRep() + + "]." + ); + } + this.defaultRetention = defaultRetention; + this.maxRetention = maxRetention; + } + + public static DataStreamGlobalRetention read(StreamInput in) throws IOException { + return new DataStreamGlobalRetention(in.readOptionalTimeValue(), in.readOptionalTimeValue()); + } + + @Override + public String getWriteableName() { + return TYPE; + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.ADD_DATA_STREAM_GLOBAL_RETENTION; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalTimeValue(defaultRetention); + out.writeOptionalTimeValue(maxRetention); + } + + public static NamedDiff readDiffFrom(StreamInput in) throws IOException { + return readDiffFrom(ClusterState.Custom.class, TYPE, in); + } + + @Override + public Iterator toXContentChunked(ToXContent.Params ignored) { + return Iterators.single(this::toXContentFragment); + } + + /** + * Adds to the XContentBuilder the two fields when they are not null. + */ + public XContentBuilder toXContentFragment(XContentBuilder builder, ToXContent.Params params) throws IOException { + if (defaultRetention != null) { + builder.field(DEFAULT_RETENTION_FIELD.getPreferredName(), defaultRetention.getStringRep()); + } + if (maxRetention != null) { + builder.field(MAX_RETENTION_FIELD.getPreferredName(), maxRetention.getStringRep()); + } + return builder; + } + + /** + * Returns the metadata found in the cluster state or null. + */ + public static DataStreamGlobalRetention getFromClusterState(ClusterState clusterState) { + return clusterState.custom(DataStreamGlobalRetention.TYPE); + } + + @Nullable + public TimeValue getDefaultRetention() { + return defaultRetention; + } + + @Nullable + public TimeValue getMaxRetention() { + return maxRetention; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + DataStreamGlobalRetention that = (DataStreamGlobalRetention) o; + return Objects.equals(defaultRetention, that.defaultRetention) && Objects.equals(maxRetention, that.maxRetention); + } + + @Override + public int hashCode() { + return Objects.hash(defaultRetention, maxRetention); + } + + @Override + public String toString() { + return "DataStreamGlobalRetention{" + + "defaultRetention=" + + (defaultRetention == null ? "null" : defaultRetention.getStringRep()) + + ", maxRetention=" + + (maxRetention == null ? "null" : maxRetention.getStringRep()) + + '}'; + } +} diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionSerializationTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionSerializationTests.java new file mode 100644 index 0000000000000..8c3d36464784e --- /dev/null +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionSerializationTests.java @@ -0,0 +1,99 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.metadata; + +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.Diff; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.test.AbstractChunkedSerializingTestCase; +import org.elasticsearch.test.SimpleDiffableWireSerializationTestCase; + +import java.util.List; + +public class DataStreamGlobalRetentionSerializationTests extends SimpleDiffableWireSerializationTestCase { + + @Override + protected ClusterState.Custom makeTestChanges(ClusterState.Custom testInstance) { + if (randomBoolean()) { + return testInstance; + } + return mutateInstance(testInstance); + } + + @Override + protected Writeable.Reader> diffReader() { + return DataStreamGlobalRetention::readDiffFrom; + } + + @Override + protected Writeable.Reader instanceReader() { + return DataStreamGlobalRetention::read; + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry( + List.of( + new NamedWriteableRegistry.Entry(ClusterState.Custom.class, DataStreamGlobalRetention.TYPE, DataStreamGlobalRetention::read) + ) + ); + } + + @Override + protected ClusterState.Custom createTestInstance() { + return randomGlobalRetention(); + } + + @Override + protected ClusterState.Custom mutateInstance(ClusterState.Custom instance) { + DataStreamGlobalRetention metadata = (DataStreamGlobalRetention) instance; + var defaultRetention = metadata.getDefaultRetention(); + var maxRetention = metadata.getMaxRetention(); + switch (randomInt(1)) { + case 0 -> { + if (defaultRetention == null) { + defaultRetention = TimeValue.timeValueDays(randomIntBetween(1, 1000)); + } else { + defaultRetention = randomBoolean() ? null : TimeValue.timeValueDays(randomIntBetween(1, 1000)); + } + } + case 1 -> { + if (maxRetention == null) { + maxRetention = TimeValue.timeValueDays(randomIntBetween(1000, 2000)); + } else { + maxRetention = randomBoolean() ? null : TimeValue.timeValueDays(randomIntBetween(1000, 2000)); + } + } + } + return new DataStreamGlobalRetention(defaultRetention, maxRetention); + } + + public static DataStreamGlobalRetention randomGlobalRetention() { + return new DataStreamGlobalRetention( + randomBoolean() ? null : TimeValue.timeValueDays(randomIntBetween(1, 1000)), + randomBoolean() ? null : TimeValue.timeValueDays(randomIntBetween(1000, 2000)) + ); + } + + public void testChunking() { + AbstractChunkedSerializingTestCase.assertChunkCount(createTestInstance(), ignored -> 1); + } + + public void testValidation() { + expectThrows( + IllegalArgumentException.class, + () -> new DataStreamGlobalRetention( + TimeValue.timeValueDays(randomIntBetween(1001, 2000)), + TimeValue.timeValueDays(randomIntBetween(1, 1000)) + ) + ); + } +} From cebb94d845d68afd134b5cc38fc0ccad7fc4058f Mon Sep 17 00:00:00 2001 From: Joe Gallo Date: Mon, 11 Mar 2024 09:33:56 -0400 Subject: [PATCH 100/248] Don't try to delete watcher history backing indices in ESRestTestCase.wipeAllIndices(...) (#106144) --- .../java/org/elasticsearch/test/rest/ESRestTestCase.java | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java index 6520e3d0f68bd..a841e9b4304b3 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java @@ -1103,8 +1103,10 @@ protected static void wipeAllIndices() throws IOException { protected static void wipeAllIndices(boolean preserveSecurityIndices) throws IOException { boolean includeHidden = clusterHasFeature(RestTestLegacyFeatures.HIDDEN_INDICES_SUPPORTED); try { - // remove all indices except ilm and slm history which can pop up after deleting all data streams but shouldn't interfere - final List indexPatterns = new ArrayList<>(List.of("*", "-.ds-ilm-history-*", "-.ds-.slm-history-*")); + // remove all indices except some history indices which can pop up after deleting all data streams but shouldn't interfere + final List indexPatterns = new ArrayList<>( + List.of("*", "-.ds-ilm-history-*", "-.ds-.slm-history-*", ".ds-.watcher-history-*") + ); if (preserveSecurityIndices) { indexPatterns.add("-.security-*"); } From d32a53b99a0d5e87abe7c645aa6d7d5563aaf2a0 Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Mon, 11 Mar 2024 08:39:19 -0500 Subject: [PATCH 101/248] fixing RejectedExecutionTests (#106146) --- .../xpack/watcher/test/integration/RejectedExecutionTests.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/RejectedExecutionTests.java b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/RejectedExecutionTests.java index 4a3bcca3acb85..e5f4091ca89eb 100644 --- a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/RejectedExecutionTests.java +++ b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/RejectedExecutionTests.java @@ -37,7 +37,6 @@ protected boolean timeWarped() { return false; } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/105951") public void testHistoryOnRejection() throws Exception { createIndex("idx"); prepareIndex("idx").setSource("field", "a").get(); @@ -73,11 +72,11 @@ public void testHistoryOnRejection() throws Exception { @Override protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { - return Settings.builder() .put(super.nodeSettings(nodeOrdinal, otherSettings)) .put(XPackSettings.SECURITY_ENABLED.getKey(), false) .put(LicenseSettings.SELF_GENERATED_LICENSE_TYPE.getKey(), "trial") + .put("xpack.watcher.thread_pool.size", 1) .put("xpack.watcher.thread_pool.queue_size", 0) .build(); } From 37542e624564ec009aec6b95f260b8c420fb5ecf Mon Sep 17 00:00:00 2001 From: Stef Nestor <26751266+stefnestor@users.noreply.github.com> Date: Mon, 11 Mar 2024 08:00:32 -0600 Subject: [PATCH 102/248] (Doc+) Link Troubleshooting Discover from Mapping Explosion (#105991) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 👋 howdy team! [Mapping Explosion](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-explosion.html) is a common root issue of [Discover Slowness](https://www.elastic.co/blog/troubleshooting-guide-common-issues-kibana-discover-load), so cross-linking these Dev-reviewed pages. --- .../troubleshooting/common-issues/mapping-explosion.asciidoc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/reference/troubleshooting/common-issues/mapping-explosion.asciidoc b/docs/reference/troubleshooting/common-issues/mapping-explosion.asciidoc index 5ba18df3e6a6b..fd1a31228c95f 100644 --- a/docs/reference/troubleshooting/common-issues/mapping-explosion.asciidoc +++ b/docs/reference/troubleshooting/common-issues/mapping-explosion.asciidoc @@ -24,7 +24,8 @@ reporting that the coordinating node is waiting for all other nodes to confirm they are on mapping update request. * Discover's **Fields for wildcard** page-loading API command or {kibana-ref}/console-kibana.html[Dev Tools] page-refreshing Autocomplete API commands are taking a long time (more than 10 seconds) or -timing out in the browser's Developer Tools Network tab. +timing out in the browser's Developer Tools Network tab. For more +information, refer to our https://www.elastic.co/blog/troubleshooting-guide-common-issues-kibana-discover-load[walkthrough on troubleshooting Discover]. * Discover's **Available fields** taking a long time to compile Javascript in the browser's Developer Tools Performance tab. This may potentially escalate to temporary browser page unresponsiveness. From 4573921501cb269b30c42fe4c899a6136242ab5d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Istv=C3=A1n=20Zolt=C3=A1n=20Szab=C3=B3?= Date: Mon, 11 Mar 2024 15:57:00 +0100 Subject: [PATCH 103/248] [DOCS] Adds an MDX file for testing purposes. (#106165) --- docs-mdx/painless/painless-field-context.mdx | 136 +++++++++++++++++++ 1 file changed, 136 insertions(+) create mode 100644 docs-mdx/painless/painless-field-context.mdx diff --git a/docs-mdx/painless/painless-field-context.mdx b/docs-mdx/painless/painless-field-context.mdx new file mode 100644 index 0000000000000..8e3c38938b5b8 --- /dev/null +++ b/docs-mdx/painless/painless-field-context.mdx @@ -0,0 +1,136 @@ +--- +id: enElasticsearchPainlessPainlessFieldContext +slug: /en/elasticsearch/painless/painless-field-context +title: Field context +description: Description to be written +tags: [] +--- + +
    + +Use a Painless script to create a +[script field](((ref))/search-fields.html#script-fields) to return +a customized value for each document in the results of a query. + +**Variables** + +`params` (`Map`, read-only) + : User-defined parameters passed in as part of the query. + +`doc` (`Map`, read-only) + : Contains the fields of the specified document where each field is a + `List` of values. + +[`params['_source']`](((ref))/mapping-source-field.html) (`Map`, read-only) + : Contains extracted JSON in a `Map` and `List` structure for the fields + existing in a stored document. + +**Return** + +`Object` + : The customized value for each document. + +**API** + +Both the standard Painless API and +Specialized Field API are available. + +**Example** + +To run this example, first follow the steps in +context examples. + +You can then use these two example scripts to compute custom information +for each search hit and output it to two new fields. + +The first script gets the doc value for the `datetime` field and calls +the `getDayOfWeekEnum` function to determine the corresponding day of the week. + +```Painless +doc['datetime'].value.getDayOfWeekEnum().getDisplayName(TextStyle.FULL, Locale.ROOT) +``` + +The second script calculates the number of actors. Actors' names are stored +as a keyword array in the `actors` field. + +```Painless +doc['actors'].size() [^1] +``` +[^1]: By default, doc values are not available for `text` fields. If `actors` was +a `text` field, you could still calculate the number of actors by extracting +values from `_source` with `params['_source']['actors'].size()`. + +The following request returns the calculated day of week and the number of +actors that appear in each play: + +```console +GET seats/_search +{ + "size": 2, + "query": { + "match_all": {} + }, + "script_fields": { + "day-of-week": { + "script": { + "source": "doc['datetime'].value.getDayOfWeekEnum().getDisplayName(TextStyle.FULL, Locale.ROOT)" + } + }, + "number-of-actors": { + "script": { + "source": "doc['actors'].size()" + } + } + } +} +``` +{/* TEST[setup:seats] */} + +```console-result +{ + "took" : 68, + "timed_out" : false, + "_shards" : { + "total" : 1, + "successful" : 1, + "skipped" : 0, + "failed" : 0 + }, + "hits" : { + "total" : { + "value" : 11, + "relation" : "eq" + }, + "max_score" : 1.0, + "hits" : [ + { + "_index" : "seats", + "_id" : "1", + "_score" : 1.0, + "fields" : { + "day-of-week" : [ + "Thursday" + ], + "number-of-actors" : [ + 4 + ] + } + }, + { + "_index" : "seats", + "_id" : "2", + "_score" : 1.0, + "fields" : { + "day-of-week" : [ + "Thursday" + ], + "number-of-actors" : [ + 1 + ] + } + } + ] + } +} +``` +{/* TESTRESPONSE[s/"took" : 68/"took" : "$body.took"/] */} \ No newline at end of file From 5e5205994747618edf5d81cb6a813adef9f2dbce Mon Sep 17 00:00:00 2001 From: Ievgen Degtiarenko Date: Mon, 11 Mar 2024 16:07:56 +0100 Subject: [PATCH 104/248] Add allocation stats (#105894) This change attempts to add allocation section to the node stats in order to simplify unbalanced clusters debugging. It is required for https://github.com/elastic/elasticsearch/pull/97561 --- docs/changelog/105894.yaml | 5 + docs/reference/cluster/nodes-stats.asciidoc | 41 ++++ .../test/nodes.stats/80_allocation_stats.yml | 22 ++ .../org/elasticsearch/TransportVersions.java | 1 + .../elasticsearch/action/ActionModule.java | 2 + .../TransportGetAllocationStatsAction.java | 132 +++++++++++ .../admin/cluster/node/stats/NodeStats.java | 57 ++++- .../node/stats/NodesStatsRequestBuilder.java | 5 + .../stats/NodesStatsRequestParameters.java | 3 +- .../node/stats/TransportNodesStatsAction.java | 42 +++- .../elasticsearch/cluster/ClusterModule.java | 4 + .../allocation/AllocationStatsService.java | 94 ++++++++ .../allocation/NodeAllocationStats.java | 51 +++++ .../org/elasticsearch/node/NodeService.java | 3 +- .../cluster/node/stats/NodeStatsTests.java | 11 +- .../elasticsearch/cluster/DiskUsageTests.java | 6 + .../AllocationStatsServiceTests.java | 205 ++++++++++++++++++ .../allocation/NodeAllocationStatsTests.java | 76 +++++++ .../node/tracker/DiskHealthTrackerTests.java | 1 + .../info/RestClusterInfoActionTests.java | 1 + .../MockInternalClusterInfoService.java | 3 +- .../AutoscalingNodesInfoServiceTests.java | 1 + ...sportGetTrainedModelsStatsActionTests.java | 1 + .../node/NodeStatsMonitoringDocTests.java | 1 + .../xpack/security/operator/Constants.java | 1 + 25 files changed, 755 insertions(+), 14 deletions(-) create mode 100644 docs/changelog/105894.yaml create mode 100644 rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/80_allocation_stats.yml create mode 100644 server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetAllocationStatsAction.java create mode 100644 server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationStatsService.java create mode 100644 server/src/main/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationStats.java create mode 100644 server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationStatsServiceTests.java create mode 100644 server/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationStatsTests.java diff --git a/docs/changelog/105894.yaml b/docs/changelog/105894.yaml new file mode 100644 index 0000000000000..a1a99eaa6259b --- /dev/null +++ b/docs/changelog/105894.yaml @@ -0,0 +1,5 @@ +pr: 105894 +summary: Add allocation stats +area: Allocation +type: enhancement +issues: [] diff --git a/docs/reference/cluster/nodes-stats.asciidoc b/docs/reference/cluster/nodes-stats.asciidoc index e2848f9a8e70f..b755baac3901b 100644 --- a/docs/reference/cluster/nodes-stats.asciidoc +++ b/docs/reference/cluster/nodes-stats.asciidoc @@ -50,6 +50,9 @@ using metrics. `adaptive_selection`:: Statistics about <>. + `allocations`:: + Statistics about allocated shards + `breaker`:: Statistics about the field data circuit breaker. @@ -2802,6 +2805,44 @@ search requests on the keyed node. The rank of this node; used for shard selection when routing search requests. ====== + +[[cluster-nodes-stats-api-response-body-allocations]] +`allocations`:: +(object) +Contains allocations statistics for the node. ++ +.Properties of `allocations` +[%collapsible%open] +====== +`shards`:: +(integer) +The number of shards currently allocated to this node + +`undesired_shards`:: +(integer) +The amount of shards that are scheduled to be moved elsewhere in the cluster +or -1 other than desired balance allocator is used + +`forecasted_ingest_load`:: +(double) +Total forecasted ingest load of all shards assigned to this node + +`forecasted_disk_usage`:: +(<>) +Forecasted size of all shards assigned to the node + +`forecasted_disk_usage_bytes`:: +(integer) +Forecasted size, in bytes, of all shards assigned to the node + +`current_disk_usage`:: +(<>) +Current size of all shards assigned to the node + +`current_disk_usage_bytes`:: +(integer) +Current size, in bytes, of all shards assigned to the node +====== ===== ==== diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/80_allocation_stats.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/80_allocation_stats.yml new file mode 100644 index 0000000000000..a2e1117073cde --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/80_allocation_stats.yml @@ -0,0 +1,22 @@ +--- +"Allocation stats": + - skip: + version: " - 8.13.99" + reason: "allocation stats was added in 8.14.0" + features: [arbitrary_key] + + - do: + nodes.info: {} + - set: + nodes._arbitrary_key_: node_id + + - do: + nodes.stats: + metric: [ allocations ] + + - exists: nodes.$node_id.allocations + - exists: nodes.$node_id.allocations.shards + - exists: nodes.$node_id.allocations.undesired_shards + - exists: nodes.$node_id.allocations.forecasted_ingest_load + - exists: nodes.$node_id.allocations.forecasted_disk_usage_in_bytes + - exists: nodes.$node_id.allocations.current_disk_usage_in_bytes diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 515992e18d62d..418720284eda8 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -141,6 +141,7 @@ static TransportVersion def(int id) { public static final TransportVersion ESQL_SERIALIZE_ARRAY_VECTOR = def(8_601_00_0); public static final TransportVersion ESQL_SERIALIZE_ARRAY_BLOCK = def(8_602_00_0); public static final TransportVersion ADD_DATA_STREAM_GLOBAL_RETENTION = def(8_603_00_0); + public static final TransportVersion ALLOCATION_STATS = def(8_604_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index dc3b02872fd83..a8f26ab966646 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -12,6 +12,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.action.admin.cluster.allocation.TransportClusterAllocationExplainAction; import org.elasticsearch.action.admin.cluster.allocation.TransportDeleteDesiredBalanceAction; +import org.elasticsearch.action.admin.cluster.allocation.TransportGetAllocationStatsAction; import org.elasticsearch.action.admin.cluster.allocation.TransportGetDesiredBalanceAction; import org.elasticsearch.action.admin.cluster.configuration.TransportAddVotingConfigExclusionsAction; import org.elasticsearch.action.admin.cluster.configuration.TransportClearVotingConfigExclusionsAction; @@ -645,6 +646,7 @@ public void reg actions.register(TransportAddVotingConfigExclusionsAction.TYPE, TransportAddVotingConfigExclusionsAction.class); actions.register(TransportClearVotingConfigExclusionsAction.TYPE, TransportClearVotingConfigExclusionsAction.class); actions.register(TransportClusterAllocationExplainAction.TYPE, TransportClusterAllocationExplainAction.class); + actions.register(TransportGetAllocationStatsAction.TYPE, TransportGetAllocationStatsAction.class); actions.register(TransportGetDesiredBalanceAction.TYPE, TransportGetDesiredBalanceAction.class); actions.register(TransportDeleteDesiredBalanceAction.TYPE, TransportDeleteDesiredBalanceAction.class); actions.register(ClusterStatsAction.INSTANCE, TransportClusterStatsAction.class); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetAllocationStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetAllocationStatsAction.java new file mode 100644 index 0000000000000..a17a627342c4f --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetAllocationStatsAction.java @@ -0,0 +1,132 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.admin.cluster.allocation; + +import org.elasticsearch.TransportVersions; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.MasterNodeReadRequest; +import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.routing.allocation.AllocationStatsService; +import org.elasticsearch.cluster.routing.allocation.NodeAllocationStats; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.io.IOException; +import java.util.Map; + +public class TransportGetAllocationStatsAction extends TransportMasterNodeReadAction< + TransportGetAllocationStatsAction.Request, + TransportGetAllocationStatsAction.Response> { + + public static final ActionType TYPE = new ActionType<>("cluster:monitor/allocation/stats"); + + private final AllocationStatsService allocationStatsService; + + @Inject + public TransportGetAllocationStatsAction( + TransportService transportService, + ClusterService clusterService, + ThreadPool threadPool, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + AllocationStatsService allocationStatsService + ) { + super( + TYPE.name(), + transportService, + clusterService, + threadPool, + actionFilters, + TransportGetAllocationStatsAction.Request::new, + indexNameExpressionResolver, + TransportGetAllocationStatsAction.Response::new, + threadPool.executor(ThreadPool.Names.MANAGEMENT) + ); + this.allocationStatsService = allocationStatsService; + } + + @Override + protected void doExecute(Task task, Request request, ActionListener listener) { + if (clusterService.state().getMinTransportVersion().before(TransportVersions.ALLOCATION_STATS)) { + // The action is not available before ALLOCATION_STATS + listener.onResponse(new Response(Map.of())); + return; + } + super.doExecute(task, request, listener); + } + + @Override + protected void masterOperation(Task task, Request request, ClusterState state, ActionListener listener) throws Exception { + listener.onResponse(new Response(allocationStatsService.stats())); + } + + @Override + protected ClusterBlockException checkBlock(Request request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); + } + + public static class Request extends MasterNodeReadRequest { + + public Request(TaskId parentTaskId) { + setParentTask(parentTaskId); + } + + public Request(StreamInput in) throws IOException { + super(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + assert out.getTransportVersion().onOrAfter(TransportVersions.ALLOCATION_STATS); + super.writeTo(out); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + } + + public static class Response extends ActionResponse { + + private final Map nodeAllocationStats; + + public Response(Map nodeAllocationStats) { + this.nodeAllocationStats = nodeAllocationStats; + } + + public Response(StreamInput in) throws IOException { + super(in); + this.nodeAllocationStats = in.readImmutableMap(StreamInput::readString, NodeAllocationStats::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeMap(nodeAllocationStats, StreamOutput::writeString, StreamOutput::writeWriteable); + } + + public Map getNodeAllocationStats() { + return nodeAllocationStats; + } + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java index 595e441e9b2cf..8fcb5a320bd41 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.support.nodes.BaseNodeResponse; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeRole; +import org.elasticsearch.cluster.routing.allocation.NodeAllocationStats; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -97,6 +98,9 @@ public class NodeStats extends BaseNodeResponse implements ChunkedToXContent { @Nullable private final RepositoriesStats repositoriesStats; + @Nullable + private final NodeAllocationStats nodeAllocationStats; + public NodeStats(StreamInput in) throws IOException { super(in); timestamp = in.readVLong(); @@ -117,11 +121,12 @@ public NodeStats(StreamInput in) throws IOException { ingestStats = in.readOptionalWriteable(IngestStats::read); adaptiveSelectionStats = in.readOptionalWriteable(AdaptiveSelectionStats::new); indexingPressureStats = in.readOptionalWriteable(IndexingPressureStats::new); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { - repositoriesStats = in.readOptionalWriteable(RepositoriesStats::new); - } else { - repositoriesStats = null; - } + repositoriesStats = in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X) + ? in.readOptionalWriteable(RepositoriesStats::new) + : null; + nodeAllocationStats = in.getTransportVersion().onOrAfter(TransportVersions.ALLOCATION_STATS) + ? in.readOptionalWriteable(NodeAllocationStats::new) + : null; } public NodeStats( @@ -142,7 +147,8 @@ public NodeStats( @Nullable AdaptiveSelectionStats adaptiveSelectionStats, @Nullable ScriptCacheStats scriptCacheStats, @Nullable IndexingPressureStats indexingPressureStats, - @Nullable RepositoriesStats repositoriesStats + @Nullable RepositoriesStats repositoriesStats, + @Nullable NodeAllocationStats nodeAllocationStats ) { super(node); this.timestamp = timestamp; @@ -162,6 +168,31 @@ public NodeStats( this.scriptCacheStats = scriptCacheStats; this.indexingPressureStats = indexingPressureStats; this.repositoriesStats = repositoriesStats; + this.nodeAllocationStats = nodeAllocationStats; + } + + public NodeStats withNodeAllocationStats(@Nullable NodeAllocationStats nodeAllocationStats) { + return new NodeStats( + getNode(), + timestamp, + indices, + os, + process, + jvm, + threadPool, + fs, + transport, + http, + breaker, + scriptStats, + discoveryStats, + ingestStats, + adaptiveSelectionStats, + scriptCacheStats, + indexingPressureStats, + repositoriesStats, + nodeAllocationStats + ); } public long getTimestamp() { @@ -271,6 +302,11 @@ public RepositoriesStats getRepositoriesStats() { return repositoriesStats; } + @Nullable + public NodeAllocationStats getNodeAllocationStats() { + return nodeAllocationStats; + } + @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -297,6 +333,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeOptionalWriteable(repositoriesStats); } + if (out.getTransportVersion().onOrAfter(TransportVersions.ALLOCATION_STATS)) { + out.writeOptionalWriteable(nodeAllocationStats); + } } @Override @@ -343,7 +382,11 @@ public Iterator toXContentChunked(ToXContent.Params outerP ifPresent(getIngestStats()).toXContentChunked(outerParams), singleChunk(ifPresent(getAdaptiveSelectionStats())), ifPresent(getScriptCacheStats()).toXContentChunked(outerParams), - singleChunk((builder, p) -> builder.value(ifPresent(getIndexingPressureStats()), p).value(ifPresent(getRepositoriesStats()), p)) + singleChunk( + (builder, p) -> builder.value(ifPresent(getIndexingPressureStats()), p) + .value(ifPresent(getRepositoriesStats()), p) + .value(ifPresent(getNodeAllocationStats()), p) + ) ); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java index ab7278c629bf2..8d863653874bb 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java @@ -158,6 +158,11 @@ public NodesStatsRequestBuilder setRepositoryStats(boolean repositoryStats) { return this; } + public NodesStatsRequestBuilder setAllocationStats(boolean allocationStats) { + addOrRemoveMetric(allocationStats, NodesStatsRequestParameters.Metric.ALLOCATIONS); + return this; + } + /** * Helper method for adding metrics to a request */ diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestParameters.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestParameters.java index 2948af59d17fd..9e965fcccb2f3 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestParameters.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestParameters.java @@ -89,7 +89,8 @@ public enum Metric { ADAPTIVE_SELECTION("adaptive_selection"), SCRIPT_CACHE("script_cache"), INDEXING_PRESSURE("indexing_pressure"), - REPOSITORIES("repositories"); + REPOSITORIES("repositories"), + ALLOCATIONS("allocations"); private String metricName; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java index 1edc57b0a7df2..6ff2303997482 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java @@ -8,11 +8,15 @@ package org.elasticsearch.action.admin.cluster.node.stats; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.action.admin.cluster.allocation.TransportGetAllocationStatsAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.nodes.TransportNodesAction; +import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.allocation.NodeAllocationStats; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; @@ -42,7 +46,9 @@ public class TransportNodesStatsAction extends TransportNodesAction< NodeStats> { public static final ActionType TYPE = new ActionType<>("cluster:monitor/nodes/stats"); + private final NodeService nodeService; + private final NodeClient client; @Inject public TransportNodesStatsAction( @@ -50,7 +56,8 @@ public TransportNodesStatsAction( ClusterService clusterService, TransportService transportService, NodeService nodeService, - ActionFilters actionFilters + ActionFilters actionFilters, + NodeClient client ) { super( TYPE.name(), @@ -61,6 +68,7 @@ public TransportNodesStatsAction( threadPool.executor(ThreadPool.Names.MANAGEMENT) ); this.nodeService = nodeService; + this.client = client; } @Override @@ -68,6 +76,34 @@ protected NodesStatsResponse newResponse(NodesStatsRequest request, List responses, + List failures, + ActionListener listener + ) { + Set metrics = request.getNodesStatsRequestParameters().requestedMetrics(); + if (NodesStatsRequestParameters.Metric.ALLOCATIONS.containedIn(metrics)) { + client.execute( + TransportGetAllocationStatsAction.TYPE, + new TransportGetAllocationStatsAction.Request(new TaskId(clusterService.localNode().getId(), task.getId())), + listener.delegateFailure((l, r) -> { + ActionListener.respondAndRelease(l, newResponse(request, merge(responses, r.getNodeAllocationStats()), failures)); + }) + ); + } else { + ActionListener.run(listener, l -> ActionListener.respondAndRelease(l, newResponse(request, responses, failures))); + } + } + + private static List merge(List responses, Map allocationStats) { + return responses.stream() + .map(response -> response.withNodeAllocationStats(allocationStats.get(response.getNode().getId()))) + .toList(); + } + @Override protected NodeStatsRequest newNodeRequest(NodesStatsRequest request) { return new NodeStatsRequest(request); @@ -80,10 +116,10 @@ protected NodeStats newNodeResponse(StreamInput in, DiscoveryNode node) throws I } @Override - protected NodeStats nodeOperation(NodeStatsRequest nodeStatsRequest, Task task) { + protected NodeStats nodeOperation(NodeStatsRequest request, Task task) { assert task instanceof CancellableTask; - final NodesStatsRequestParameters nodesStatsRequestParameters = nodeStatsRequest.getNodesStatsRequestParameters(); + final NodesStatsRequestParameters nodesStatsRequestParameters = request.getNodesStatsRequestParameters(); Set metrics = nodesStatsRequestParameters.requestedMetrics(); return nodeService.stats( nodesStatsRequestParameters.indices(), diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java b/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java index 5f682804a5b88..809e069b0028b 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java @@ -29,6 +29,7 @@ import org.elasticsearch.cluster.routing.ShardRoutingRoleStrategy; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.AllocationService.RerouteStrategy; +import org.elasticsearch.cluster.routing.allocation.AllocationStatsService; import org.elasticsearch.cluster.routing.allocation.ExistingShardsAllocator; import org.elasticsearch.cluster.routing.allocation.WriteLoadForecaster; import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; @@ -118,6 +119,7 @@ public class ClusterModule extends AbstractModule { final Collection deciderList; final ShardsAllocator shardsAllocator; private final ShardRoutingRoleStrategy shardRoutingRoleStrategy; + private final AllocationStatsService allocationStatsService; public ClusterModule( Settings settings, @@ -154,6 +156,7 @@ public ClusterModule( shardRoutingRoleStrategy ); this.metadataDeleteIndexService = new MetadataDeleteIndexService(settings, clusterService, allocationService); + this.allocationStatsService = new AllocationStatsService(clusterService, clusterInfoService, shardsAllocator, writeLoadForecaster); } static ShardRoutingRoleStrategy getShardRoutingRoleStrategy(List clusterPlugins) { @@ -440,6 +443,7 @@ protected void configure() { bind(AllocationDeciders.class).toInstance(allocationDeciders); bind(ShardsAllocator.class).toInstance(shardsAllocator); bind(ShardRoutingRoleStrategy.class).toInstance(shardRoutingRoleStrategy); + bind(AllocationStatsService.class).toInstance(allocationStatsService); } public void setExistingShardsAllocators(GatewayAllocator gatewayAllocator) { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationStatsService.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationStatsService.java new file mode 100644 index 0000000000000..dbafd916b2a42 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationStatsService.java @@ -0,0 +1,94 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.routing.allocation; + +import org.elasticsearch.cluster.ClusterInfoService; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.routing.RoutingNode; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalance; +import org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalanceShardsAllocator; +import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.util.Maps; + +import java.util.Map; + +public class AllocationStatsService { + + private final ClusterService clusterService; + private final ClusterInfoService clusterInfoService; + private final DesiredBalanceShardsAllocator desiredBalanceShardsAllocator; + private final WriteLoadForecaster writeLoadForecaster; + + public AllocationStatsService( + ClusterService clusterService, + ClusterInfoService clusterInfoService, + ShardsAllocator shardsAllocator, + WriteLoadForecaster writeLoadForecaster + ) { + this.clusterService = clusterService; + this.clusterInfoService = clusterInfoService; + this.desiredBalanceShardsAllocator = shardsAllocator instanceof DesiredBalanceShardsAllocator allocator ? allocator : null; + this.writeLoadForecaster = writeLoadForecaster; + } + + public Map stats() { + var state = clusterService.state(); + var info = clusterInfoService.getClusterInfo(); + var desiredBalance = desiredBalanceShardsAllocator != null ? desiredBalanceShardsAllocator.getDesiredBalance() : null; + + var stats = Maps.newMapWithExpectedSize(state.getRoutingNodes().size()); + for (RoutingNode node : state.getRoutingNodes()) { + int shards = 0; + int undesiredShards = 0; + double forecastedWriteLoad = 0.0; + long forecastedDiskUsage = 0; + long currentDiskUsage = 0; + for (ShardRouting shardRouting : node) { + if (shardRouting.relocating()) { + continue; + } + shards++; + IndexMetadata indexMetadata = state.metadata().getIndexSafe(shardRouting.index()); + if (isDesiredAllocation(desiredBalance, shardRouting) == false) { + undesiredShards++; + } + long shardSize = info.getShardSize(shardRouting.shardId(), shardRouting.primary(), 0); + forecastedWriteLoad += writeLoadForecaster.getForecastedWriteLoad(indexMetadata).orElse(0.0); + forecastedDiskUsage += Math.max(indexMetadata.getForecastedShardSizeInBytes().orElse(0), shardSize); + currentDiskUsage += shardSize; + + } + stats.put( + node.nodeId(), + new NodeAllocationStats( + shards, + desiredBalanceShardsAllocator != null ? undesiredShards : -1, + forecastedWriteLoad, + forecastedDiskUsage, + currentDiskUsage + ) + ); + } + + return stats; + } + + private static boolean isDesiredAllocation(DesiredBalance desiredBalance, ShardRouting shardRouting) { + if (desiredBalance == null) { + return true; + } + var assignment = desiredBalance.getAssignment(shardRouting.shardId()); + if (assignment == null) { + return false; + } + return assignment.nodeIds().contains(shardRouting.currentNodeId()); + } +} diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationStats.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationStats.java new file mode 100644 index 0000000000000..57484d6da53c7 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationStats.java @@ -0,0 +1,51 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.routing.allocation; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.xcontent.ToXContentFragment; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; + +public record NodeAllocationStats( + int shards, + int undesiredShards, + double forecastedIngestLoad, + long forecastedDiskUsage, + long currentDiskUsage +) implements Writeable, ToXContentFragment { + + public NodeAllocationStats(StreamInput in) throws IOException { + this(in.readVInt(), in.readVInt(), in.readDouble(), in.readVLong(), in.readVLong()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(shards); + out.writeVInt(undesiredShards); + out.writeDouble(forecastedIngestLoad); + out.writeVLong(forecastedDiskUsage); + out.writeVLong(currentDiskUsage); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject("allocations") + .field("shards", shards) + .field("undesired_shards", undesiredShards) + .field("forecasted_ingest_load", forecastedIngestLoad) + .humanReadableField("forecasted_disk_usage_in_bytes", "forecasted_disk_usage", ByteSizeValue.ofBytes(forecastedDiskUsage)) + .humanReadableField("current_disk_usage_in_bytes", "current_disk_usage", ByteSizeValue.ofBytes(currentDiskUsage)) + .endObject(); + } +} diff --git a/server/src/main/java/org/elasticsearch/node/NodeService.java b/server/src/main/java/org/elasticsearch/node/NodeService.java index 4b9e5dc83c538..87384b50d7ffd 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeService.java +++ b/server/src/main/java/org/elasticsearch/node/NodeService.java @@ -195,7 +195,8 @@ public NodeStats stats( adaptiveSelection ? responseCollectorService.getAdaptiveStats(searchTransportService.getPendingSearchRequests()) : null, scriptCache ? scriptService.cacheStats() : null, indexingPressure ? this.indexingPressure.stats() : null, - repositoriesStats ? this.repositoriesService.getRepositoriesThrottlingStats() : null + repositoriesStats ? this.repositoriesService.getRepositoriesThrottlingStats() : null, + null ); } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java index 0290bfb9c236f..e4b821fba7634 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java @@ -21,6 +21,7 @@ import org.elasticsearch.cluster.routing.RecoverySource; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.cluster.routing.allocation.NodeAllocationStats; import org.elasticsearch.cluster.service.ClusterApplierRecordingService; import org.elasticsearch.cluster.service.ClusterApplierRecordingService.Stats.Recording; import org.elasticsearch.cluster.service.ClusterStateUpdateStats; @@ -1043,6 +1044,13 @@ public static NodeStats createNodeStats() { RepositoriesStats repositoriesStats = new RepositoriesStats( Map.of("test-repository", new RepositoriesStats.ThrottlingStats(100, 200)) ); + NodeAllocationStats nodeAllocationStats = new NodeAllocationStats( + randomIntBetween(0, 10000), + randomIntBetween(0, 1000), + randomDoubleBetween(0, 8, true), + randomNonNegativeLong(), + randomNonNegativeLong() + ); return new NodeStats( node, @@ -1062,7 +1070,8 @@ public static NodeStats createNodeStats() { adaptiveSelectionStats, scriptCacheStats, indexingPressureStats, - repositoriesStats + repositoriesStats, + nodeAllocationStats ); } diff --git a/server/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java b/server/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java index 5e122c4050b6c..8334c535cea43 100644 --- a/server/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java @@ -183,6 +183,7 @@ public void testLeastAndMostAvailableDiskSpace() { null, null, null, + null, null ); DiskUsage leastNode = DiskUsage.findLeastAvailablePath(nodeStats); @@ -211,6 +212,7 @@ public void testLeastAndMostAvailableDiskSpace() { null, null, null, + null, null ); DiskUsage leastNode = DiskUsage.findLeastAvailablePath(nodeStats); @@ -241,6 +243,7 @@ public void testLeastAndMostAvailableDiskSpace() { null, null, null, + null, null ); DiskUsage leastNode = DiskUsage.findLeastAvailablePath(nodeStats); @@ -275,6 +278,7 @@ public void testLeastAndMostAvailableDiskSpaceSomeInvalidValues() { null, null, null, + null, null ); DiskUsage leastNode = DiskUsage.findLeastAvailablePath(nodeStats); @@ -304,6 +308,7 @@ public void testLeastAndMostAvailableDiskSpaceSomeInvalidValues() { null, null, null, + null, null ); DiskUsage leastNode = DiskUsage.findLeastAvailablePath(nodeStats); @@ -334,6 +339,7 @@ public void testLeastAndMostAvailableDiskSpaceSomeInvalidValues() { null, null, null, + null, null ); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationStatsServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationStatsServiceTests.java new file mode 100644 index 0000000000000..d99d4c1b54527 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationStatsServiceTests.java @@ -0,0 +1,205 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.routing.allocation; + +import org.elasticsearch.cluster.ClusterInfo; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ESAllocationTestCase; +import org.elasticsearch.cluster.EmptyClusterInfoService; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalance; +import org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalanceShardsAllocator; +import org.elasticsearch.cluster.routing.allocation.allocator.ShardAssignment; +import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.util.concurrent.DeterministicTaskQueue; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.telemetry.TelemetryProvider; +import org.elasticsearch.test.ClusterServiceUtils; + +import java.util.Map; +import java.util.Set; + +import static org.elasticsearch.cluster.routing.TestShardRouting.newShardRouting; +import static org.elasticsearch.cluster.routing.TestShardRouting.shardRoutingBuilder; +import static org.hamcrest.Matchers.aMapWithSize; +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.hasEntry; + +public class AllocationStatsServiceTests extends ESAllocationTestCase { + + public void testShardStats() { + + var ingestLoadForecast = randomDoubleBetween(0, 10, true); + var shardSizeForecast = randomNonNegativeLong(); + var currentShardSize = randomNonNegativeLong(); + + var indexMetadata = IndexMetadata.builder("my-index") + .settings(indexSettings(IndexVersion.current(), 1, 0)) + .indexWriteLoadForecast(ingestLoadForecast) + .shardSizeInBytesForecast(shardSizeForecast) + .build(); + var shardId = new ShardId(indexMetadata.getIndex(), 0); + + var state = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(newNode("node-1"))) + .metadata(Metadata.builder().put(indexMetadata, false)) + .routingTable( + RoutingTable.builder() + .add( + IndexRoutingTable.builder(indexMetadata.getIndex()) + .addShard(newShardRouting(shardId, "node-1", true, ShardRoutingState.STARTED)) + .build() + ) + ) + .build(); + + var clusterInfo = new ClusterInfo( + Map.of(), + Map.of(), + Map.of(ClusterInfo.shardIdentifierFromRouting(shardId, true), currentShardSize), + Map.of(), + Map.of(), + Map.of() + ); + + var queue = new DeterministicTaskQueue(); + try (var clusterService = ClusterServiceUtils.createClusterService(state, queue.getThreadPool())) { + var service = new AllocationStatsService(clusterService, () -> clusterInfo, createShardAllocator(), TEST_WRITE_LOAD_FORECASTER); + assertThat( + service.stats(), + allOf( + aMapWithSize(1), + hasEntry( + "node-1", + new NodeAllocationStats(1, -1, ingestLoadForecast, Math.max(shardSizeForecast, currentShardSize), currentShardSize) + ) + ) + ); + } + } + + public void testRelocatingShardIsOnlyCountedOnceOnTargetNode() { + + var indexMetadata = IndexMetadata.builder("my-index").settings(indexSettings(IndexVersion.current(), 1, 0)).build(); + var state = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(newNode("node-1")).add(newNode("node-2"))) + .metadata(Metadata.builder().put(indexMetadata, false)) + .routingTable( + RoutingTable.builder() + .add( + IndexRoutingTable.builder(indexMetadata.getIndex()) + .addShard( + shardRoutingBuilder(new ShardId(indexMetadata.getIndex(), 0), "node-1", true, ShardRoutingState.RELOCATING) + .withRelocatingNodeId("node-2") + .build() + ) + .build() + ) + ) + .build(); + + var queue = new DeterministicTaskQueue(); + try (var clusterService = ClusterServiceUtils.createClusterService(state, queue.getThreadPool())) { + var service = new AllocationStatsService( + clusterService, + EmptyClusterInfoService.INSTANCE, + createShardAllocator(), + TEST_WRITE_LOAD_FORECASTER + ); + assertThat( + service.stats(), + allOf( + aMapWithSize(2), + hasEntry("node-1", new NodeAllocationStats(0, -1, 0, 0, 0)), + hasEntry("node-2", new NodeAllocationStats(1, -1, 0, 0, 0)) + ) + ); + } + } + + public void testUndesiredShardCount() { + + var indexMetadata = IndexMetadata.builder("my-index").settings(indexSettings(IndexVersion.current(), 2, 0)).build(); + + var state = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(newNode("node-1")).add(newNode("node-2")).add(newNode("node-3"))) + .metadata(Metadata.builder().put(indexMetadata, false)) + .routingTable( + RoutingTable.builder() + .add( + IndexRoutingTable.builder(indexMetadata.getIndex()) + .addShard(newShardRouting(new ShardId(indexMetadata.getIndex(), 0), "node-1", true, ShardRoutingState.STARTED)) + .addShard(newShardRouting(new ShardId(indexMetadata.getIndex(), 1), "node-3", true, ShardRoutingState.STARTED)) + .build() + ) + ) + .build(); + + var queue = new DeterministicTaskQueue(); + var threadPool = queue.getThreadPool(); + try (var clusterService = ClusterServiceUtils.createClusterService(state, threadPool)) { + var service = new AllocationStatsService( + clusterService, + EmptyClusterInfoService.INSTANCE, + new DesiredBalanceShardsAllocator( + ClusterSettings.createBuiltInClusterSettings(), + createShardAllocator(), + threadPool, + clusterService, + (innerState, strategy) -> innerState, + TelemetryProvider.NOOP + ) { + @Override + public DesiredBalance getDesiredBalance() { + return new DesiredBalance( + 1, + Map.ofEntries( + Map.entry(new ShardId(indexMetadata.getIndex(), 0), new ShardAssignment(Set.of("node-1"), 1, 0, 0)), + Map.entry(new ShardId(indexMetadata.getIndex(), 1), new ShardAssignment(Set.of("node-2"), 1, 0, 0)) + ) + ); + } + }, + TEST_WRITE_LOAD_FORECASTER + ); + assertThat( + service.stats(), + allOf( + aMapWithSize(3), + hasEntry("node-1", new NodeAllocationStats(1, 0, 0, 0, 0)), + hasEntry("node-2", new NodeAllocationStats(0, 0, 0, 0, 0)), + hasEntry("node-3", new NodeAllocationStats(1, 1, 0, 0, 0)) // [my-index][1] should be allocated to [node-2] + ) + ); + } + } + + private ShardsAllocator createShardAllocator() { + return new ShardsAllocator() { + @Override + public void allocate(RoutingAllocation allocation) { + + } + + @Override + public ShardAllocationDecision decideShardAllocation(ShardRouting shard, RoutingAllocation allocation) { + return null; + } + }; + } +} diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationStatsTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationStatsTests.java new file mode 100644 index 0000000000000..ad371ed239795 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationStatsTests.java @@ -0,0 +1,76 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.routing.allocation; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +public class NodeAllocationStatsTests extends AbstractWireSerializingTestCase { + + @Override + protected Writeable.Reader instanceReader() { + return NodeAllocationStats::new; + } + + @Override + protected NodeAllocationStats createTestInstance() { + return new NodeAllocationStats( + randomIntBetween(0, 10000), + randomIntBetween(0, 1000), + randomDoubleBetween(0, 8, true), + randomNonNegativeLong(), + randomNonNegativeLong() + ); + } + + @Override + protected NodeAllocationStats mutateInstance(NodeAllocationStats instance) throws IOException { + return switch (randomInt(4)) { + case 0 -> new NodeAllocationStats( + randomValueOtherThan(instance.shards(), () -> randomIntBetween(0, 10000)), + instance.undesiredShards(), + instance.forecastedIngestLoad(), + instance.forecastedDiskUsage(), + instance.currentDiskUsage() + ); + case 1 -> new NodeAllocationStats( + instance.shards(), + randomValueOtherThan(instance.undesiredShards(), () -> randomIntBetween(0, 1000)), + instance.forecastedIngestLoad(), + instance.forecastedDiskUsage(), + instance.currentDiskUsage() + ); + case 2 -> new NodeAllocationStats( + instance.shards(), + instance.undesiredShards(), + randomValueOtherThan(instance.forecastedIngestLoad(), () -> randomDoubleBetween(0, 8, true)), + instance.forecastedDiskUsage(), + instance.currentDiskUsage() + ); + case 3 -> new NodeAllocationStats( + instance.shards(), + instance.undesiredShards(), + instance.forecastedIngestLoad(), + randomValueOtherThan(instance.forecastedDiskUsage(), ESTestCase::randomNonNegativeLong), + instance.currentDiskUsage() + ); + case 4 -> new NodeAllocationStats( + instance.shards(), + instance.undesiredShards(), + instance.forecastedIngestLoad(), + instance.currentDiskUsage(), + randomValueOtherThan(instance.forecastedDiskUsage(), ESTestCase::randomNonNegativeLong) + ); + default -> throw new RuntimeException("unreachable"); + }; + } +} diff --git a/server/src/test/java/org/elasticsearch/health/node/tracker/DiskHealthTrackerTests.java b/server/src/test/java/org/elasticsearch/health/node/tracker/DiskHealthTrackerTests.java index 7089e5a19bc63..dd2ef861e85c3 100644 --- a/server/src/test/java/org/elasticsearch/health/node/tracker/DiskHealthTrackerTests.java +++ b/server/src/test/java/org/elasticsearch/health/node/tracker/DiskHealthTrackerTests.java @@ -325,6 +325,7 @@ private NodeStats nodeStats(FsInfo fs) { null, null, null, + null, null ); } diff --git a/server/src/test/java/org/elasticsearch/rest/action/info/RestClusterInfoActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/info/RestClusterInfoActionTests.java index f3ef110ad4ce8..f0473ae344a79 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/info/RestClusterInfoActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/info/RestClusterInfoActionTests.java @@ -121,6 +121,7 @@ private NodeStats randomNodeStatsWithOnlyHttpStats(int i) { null, null, null, + null, null ); } diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java b/test/framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java index 1004ea5b50119..e07c27b22c926 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java @@ -89,7 +89,8 @@ List adjustNodesStats(List nodesStats) { nodeStats.getAdaptiveSelectionStats(), nodeStats.getScriptCacheStats(), nodeStats.getIndexingPressureStats(), - nodeStats.getRepositoriesStats() + nodeStats.getRepositoriesStats(), + nodeStats.getNodeAllocationStats() ); }).collect(Collectors.toList()); } diff --git a/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/capacity/nodeinfo/AutoscalingNodesInfoServiceTests.java b/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/capacity/nodeinfo/AutoscalingNodesInfoServiceTests.java index 3f147c94c5ec2..9658db911f6df 100644 --- a/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/capacity/nodeinfo/AutoscalingNodesInfoServiceTests.java +++ b/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/capacity/nodeinfo/AutoscalingNodesInfoServiceTests.java @@ -443,6 +443,7 @@ private static NodeStats statsForNode(DiscoveryNode node, long memory) { null, null, null, + null, null ); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsStatsActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsStatsActionTests.java index 377b7fd45d78c..5c2c3abf232f5 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsStatsActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsStatsActionTests.java @@ -274,6 +274,7 @@ private static NodeStats buildNodeStats( null, null, null, + null, null ); diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/node/NodeStatsMonitoringDocTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/node/NodeStatsMonitoringDocTests.java index 2acdc8ae72232..54f3ce634a25a 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/node/NodeStatsMonitoringDocTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/node/NodeStatsMonitoringDocTests.java @@ -460,6 +460,7 @@ private static NodeStats mockNodeStats() { null, null, null, + null, null ); } diff --git a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java index 2250411fa7882..e65db8632062d 100644 --- a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java +++ b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java @@ -340,6 +340,7 @@ public class Constants { "cluster:monitor/nodes/info", "cluster:monitor/nodes/stats", "cluster:monitor/nodes/usage", + "cluster:monitor/allocation/stats", "cluster:monitor/profiling/status/get", "cluster:monitor/remote/info", "cluster:monitor/settings", From 013dc8cb4c1049e0a9ce5960e3ed3bbb45193dff Mon Sep 17 00:00:00 2001 From: Joe Gallo Date: Mon, 11 Mar 2024 11:18:18 -0400 Subject: [PATCH 105/248] Fix a missing minus sign in the sytnax (#106178) --- .../main/java/org/elasticsearch/test/rest/ESRestTestCase.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java index a841e9b4304b3..66af90ce14d64 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java @@ -1105,7 +1105,7 @@ protected static void wipeAllIndices(boolean preserveSecurityIndices) throws IOE try { // remove all indices except some history indices which can pop up after deleting all data streams but shouldn't interfere final List indexPatterns = new ArrayList<>( - List.of("*", "-.ds-ilm-history-*", "-.ds-.slm-history-*", ".ds-.watcher-history-*") + List.of("*", "-.ds-ilm-history-*", "-.ds-.slm-history-*", "-.ds-.watcher-history-*") ); if (preserveSecurityIndices) { indexPatterns.add("-.security-*"); From c06e29e3b34c684c6eee29715027416f21ddcf16 Mon Sep 17 00:00:00 2001 From: Daniel Mitterdorfer Date: Mon, 11 Mar 2024 16:39:13 +0100 Subject: [PATCH 106/248] [Profiling] Allow to override index settings (#106172) With this commit we reference index-specific custom component templates. These component templates can be used to override e.g. the number of shards. We have used a similar approach in #99909 to allow users to customize the built-in ILM policy. --- docs/changelog/106172.yaml | 5 +++++ .../profiling/index-template/profiling-events.json | 6 +++++- .../profiling/index-template/profiling-executables.json | 6 +++++- .../resources/profiling/index-template/profiling-hosts.json | 6 +++++- .../profiling/index-template/profiling-metrics.json | 6 +++++- .../profiling/index-template/profiling-stackframes.json | 6 +++++- .../profiling/index-template/profiling-stacktraces.json | 6 +++++- .../profiling/index-template/profiling-symbols-global.json | 6 +++++- .../profiling/index-template/profiling-symbols-private.json | 6 +++++- .../xpack/profiling/ProfilingIndexTemplateRegistry.java | 3 ++- 10 files changed, 47 insertions(+), 9 deletions(-) create mode 100644 docs/changelog/106172.yaml diff --git a/docs/changelog/106172.yaml b/docs/changelog/106172.yaml new file mode 100644 index 0000000000000..80d80b9d7f299 --- /dev/null +++ b/docs/changelog/106172.yaml @@ -0,0 +1,5 @@ +pr: 106172 +summary: "[Profiling] Allow to override index settings" +area: Application +type: enhancement +issues: [] diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-events.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-events.json index e2d17c8327704..3f2e0ca21bdbd 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-events.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-events.json @@ -7,10 +7,14 @@ }, "composed_of": [ "profiling-events", + "profiling-events@custom", "profiling-ilm", "profiling-ilm@custom" ], - "ignore_missing_component_templates": ["profiling-ilm@custom"], + "ignore_missing_component_templates": [ + "profiling-events@custom", + "profiling-ilm@custom" + ], "priority": 100, "_meta": { "description": "Index template for profiling-events", diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-executables.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-executables.json index 57fd114c57e27..088589f7df769 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-executables.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-executables.json @@ -4,11 +4,15 @@ ], "composed_of": [ "profiling-executables", + "profiling-executables@custom", "profiling-ilm", "profiling-hot-tier", "profiling-ilm@custom" ], - "ignore_missing_component_templates": ["profiling-ilm@custom"], + "ignore_missing_component_templates": [ + "profiling-executables@custom", + "profiling-ilm@custom" + ], "priority": 100, "_meta": { "description": "Index template for .profiling-executables", diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-hosts.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-hosts.json index 526d8090b0ac6..4d750726b8028 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-hosts.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-hosts.json @@ -5,10 +5,14 @@ "data_stream": {}, "composed_of": [ "profiling-hosts", + "profiling-hosts@custom", "profiling-ilm", "profiling-ilm@custom" ], - "ignore_missing_component_templates": ["profiling-ilm@custom"], + "ignore_missing_component_templates": [ + "profiling-hosts@custom", + "profiling-ilm@custom" + ], "priority": 100, "_meta": { "description": "Index template for profiling-hosts", diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-metrics.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-metrics.json index d09de006d025d..74516d7cb826c 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-metrics.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-metrics.json @@ -5,10 +5,14 @@ "data_stream": {}, "composed_of": [ "profiling-metrics", + "profiling-metrics@custom", "profiling-ilm", "profiling-ilm@custom" ], - "ignore_missing_component_templates": ["profiling-ilm@custom"], + "ignore_missing_component_templates": [ + "profiling-metrics@custom", + "profiling-ilm@custom" + ], "priority": 100, "_meta": { "description": "Index template for profiling-metrics", diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-stackframes.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-stackframes.json index 694ae6ba92a57..0cbd868c2eade 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-stackframes.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-stackframes.json @@ -4,11 +4,15 @@ ], "composed_of": [ "profiling-stackframes", + "profiling-stackframes@custom", "profiling-ilm", "profiling-hot-tier", "profiling-ilm@custom" ], - "ignore_missing_component_templates": ["profiling-ilm@custom"], + "ignore_missing_component_templates": [ + "profiling-stackframes@custom", + "profiling-ilm@custom" + ], "priority": 100, "_meta": { "description": "Index template for .profiling-stackframes", diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-stacktraces.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-stacktraces.json index c4c920a76c375..d280906873ffa 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-stacktraces.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-stacktraces.json @@ -4,11 +4,15 @@ ], "composed_of": [ "profiling-stacktraces", + "profiling-stacktraces@custom", "profiling-ilm", "profiling-hot-tier", "profiling-ilm@custom" ], - "ignore_missing_component_templates": ["profiling-ilm@custom"], + "ignore_missing_component_templates": [ + "profiling-stacktraces@custom", + "profiling-ilm@custom" + ], "priority": 100, "_meta": { "description": "Index template for .profiling-stacktraces", diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-symbols-global.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-symbols-global.json index a7bae1adbb548..dd5eca49b9daa 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-symbols-global.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-symbols-global.json @@ -4,11 +4,15 @@ ], "composed_of": [ "profiling-symbols", + "profiling-symbols@custom", "profiling-ilm", "profiling-hot-tier", "profiling-ilm@custom" ], - "ignore_missing_component_templates": ["profiling-ilm@custom"], + "ignore_missing_component_templates": [ + "profiling-symbols@custom", + "profiling-ilm@custom" + ], "template": { "settings": { "index": { diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-symbols-private.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-symbols-private.json index 999bf7721b897..04c382e558591 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-symbols-private.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-symbols-private.json @@ -3,7 +3,11 @@ ".profiling-symbols-private*" ], "composed_of": [ - "profiling-symbols" + "profiling-symbols", + "profiling-symbols@custom" + ], + "ignore_missing_component_templates": [ + "profiling-symbols@custom" ], "priority": 100, "_meta": { diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingIndexTemplateRegistry.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingIndexTemplateRegistry.java index 1762b2537c455..c90e0e52c4d58 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingIndexTemplateRegistry.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingIndexTemplateRegistry.java @@ -45,7 +45,8 @@ public class ProfilingIndexTemplateRegistry extends IndexTemplateRegistry { // version 2: Added 'profiling.host.machine' keyword mapping to profiling-hosts // version 3: Add optional component template 'profiling-ilm@custom' to all ILM-managed index templates // version 4: Added 'service.name' keyword mapping to profiling-events - public static final int INDEX_TEMPLATE_VERSION = 4; + // version 5: Add optional component template '@custom' to all index templates that reference component templates + public static final int INDEX_TEMPLATE_VERSION = 5; // history for individual indices / index templates. Only bump these for breaking changes that require to create a new index public static final int PROFILING_EVENTS_VERSION = 2; From 5f81c1bbe6b6301d5be87167f16d7c4dae42b059 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Aur=C3=A9lien=20FOUCRET?= Date: Mon, 11 Mar 2024 17:26:01 +0100 Subject: [PATCH 107/248] First version of the LTR guide. (#105956) --- .../learning-to-rank-feature-extraction.png | Bin 0 -> 171518 bytes .../search/learning-to-rank-judgment-list.png | Bin 0 -> 54420 bytes .../search/learning-to-rank-overview.png | Bin 0 -> 168555 bytes .../learning-to-rank-model-training.asciidoc | 168 ++++++++++++++++++ .../learning-to-rank-search-usage.asciidoc | 78 ++++++++ .../learning-to-rank.asciidoc | 136 ++++++++++++++ .../search-your-data.asciidoc | 1 + 7 files changed, 383 insertions(+) create mode 100644 docs/reference/images/search/learning-to-rank-feature-extraction.png create mode 100644 docs/reference/images/search/learning-to-rank-judgment-list.png create mode 100644 docs/reference/images/search/learning-to-rank-overview.png create mode 100644 docs/reference/search/search-your-data/learning-to-rank-model-training.asciidoc create mode 100644 docs/reference/search/search-your-data/learning-to-rank-search-usage.asciidoc create mode 100644 docs/reference/search/search-your-data/learning-to-rank.asciidoc diff --git a/docs/reference/images/search/learning-to-rank-feature-extraction.png b/docs/reference/images/search/learning-to-rank-feature-extraction.png new file mode 100644 index 0000000000000000000000000000000000000000..6dc2ee31902f61d1343adabaaa301974e3cc47ab GIT binary patch literal 171518 zcmeFZcUY6l7CstK6pRSU1{94XrZL#QgC(m{$MU0Ooup%)eD zy+i0Cy@Vbhl;4N@oU_^Io^$X0Ik>Av9kbyp1g~RyQrzLc+Kww z0T(5M-0CH(A?qb$W_v>;B`rEks^F*k;xZYhtNBZkp;zxoV@dS>P%BXT5k{g((-25v z6d^CAM+;6%+vIT|ts-n-e`wlH?L$@qB+jK+P%Z0jr>xwP+v&|J2-c^O%Ii7q?jHvssg6b!bs16Xt~$ z5Mhz3bz5^jA-)O~wR#j%EnNGx$xZZ*IwNnaXB+fgpUmvWD*@jMN!I&@wc76)n-?*u zn^y7P$uB3qpuBuYbWZM-05i$8uOvQFq;gWs)Z9#x>3%hG)iNy46l3{y7a&x84_K0R zo&_d`CY}S$xVdgVsm@XvlYi^uge^4!<)e`Tn@jAhV%*?e;74ZcgiSA1J7$8dUFUk|p|eO2wtpt3KSmz7%*N z#jTt6x_=8F`JlhjlpY_IxfQti2&)vXv3izwTcMN_}ztI#&q&9T3&K zY17yjgU^M`-rozl=Ms;KN5tE=Q_ek%eKpXi|H7#7rrl*BvZ~7o!H^%?&!L4w1r!A} z!{HBZhK1d45Sbx%C@lYDOmWO|Ou~?S?OGng&};g}nGHJkr_~Q7&UZCD-Kf|T-%H;M z-mBU(K4gDIEFJ#*@jB~|t8#DbWNuB9P7_b_q=xe;YTluvGJ1DQPU0oWORmDt!%wP} zR@lO#G@oSNc2hrpH=ra*Qx18@h}9)(f{TOWI!8H2y9Om^t_FwtgS_svZuI)cT*Xr6 z;udf-|0d1mtxdK~vrWXMfEO|wX+PAye1LpVY^82>YGs#4reRnfN9LDn7s?eXZ{+=? zXG@DJy!O;4Qax5ZwfAnusN(BZISVR_QhpteP{((hH)gNQno=X*qZQvOR<>HkuyP7( z9%yYK?;vZDUIRA=UL$oh5!s{z5v`e<+4R}7{m_16x-d({nQnFK{SvbhA#B7aeH!8R!m}K)p03tAN!_zmG@1f+R8k{+s+0fV8Cz`V(M?YBFeWlMh$6K{Z z3!(d1r^)1TZt>&!_nj#`Mng}9<=wK@NXCi8kx4;Gs_?7{`5K+ef)|1#8#!JWT3lX) zE?-<|+kUz;y8W2^8957i5xMQbs%14-l#h$V23TD`EUZ2zN!J7L6BzVwBjg}#=ai$NdyHM$tRjGk=g|3obeCFIWO zO~P)%dhJ?aNmwNI0@iX=2wPiRTre{dGdy0FHB1=E$G{7Gm!B^(I}GF2nM3C-g?^Si9ZJZmvJHY^FSKI8%{Uv2%<04OJ2AFTHG= zh2HMsEr%_c`IvUZ4NLk`&RmhtbhQr-pX@b};@+aKs<3vjSVStRDXM|fIMW}cd8dKX zs$N(&U`8GRqjIn5Q29^6y)=m?xh3&c zdzxZG5~9aX4t4BF%R(N0{;DvX!1u#!%XQ0mt7h-v{yAbdY5x@79`QovLdh0!o5#IU zMOKccn<)_xZkk#pvL_-D{84mKo*XXR5K;7<#0bV@pGRLf6{(Gk(yGk&qg)F!9_Wl= z`dt?DGGwEe7342`DCA3*R+J4&*Z%l%<)WTj=3`5RxhxB{Hf4y89*_P+<@Xm2nbh8T zK6-VzKXQMe?esJ1Y>$v@1Gxhk&=f&3=%+!tybit5s!reW>76e-Im^Bsr7b+o#|5`M zWzE8rJ1ajYF&+A5w`#S={BpH(9(A93+#dd&X;f$gjxAn9FiPSqYn6OVkB2&{JA{3U zj&c^V3L)3!(!hDWUCOQbk4pL~r=4v>`O+~GxY z-M`Bv_V!`C$GU}wrpFd1O%O@T7k8>#N9N#;dS$HzR z9;y-~d0$f4_uX-?aODcuF|f1W8pEu) z$#;6*hR!|kfPHnTFRM6n9}bd}h!&A!Uhk7WVvoVP5nm!I_e<*!`@2X14Ex?fSHV(A z3B(CplY-6@(Sb;SDqkT!pmV?P6N5lOHlXwWct#ocJpGFRUZ-XL{5%&G z2)YP-y9T`6Q;7fhG>K};xqn_0cLKkGq%~v}6o5|+Ge-*xdnaqS^R#GB3~+nn`XW+Lzr7CBKV+k+>#VEvOw3Xp82$1(-&|TXK@ym(}w=__oti|?lyn5WbgEkZUG(SJ^h9E9uFVyzv>2x zik;pSRkd-qu+x*XfdOL%v>_pU?}6Cw=l?Ii{MF)_lDdDDymw!S?`+XCKmB`AEhh^{ zSvU-6(^=v#ef^{G*&qK=P>lDq>t}}I&vE{J7Z_-XOJcnLQkuji#&nnmpvRjwaw?j@ zCtzi#e?(Tm8^@oYz%|i5HK^9e5eOs&Qjn9@bSGLFzv!hk6oWqs@RYxH?Me16Ph8FG z(0gkT<(o~e@5NglMBnWsxJS(^eQvoA=dWSgdM4IzmrCVk69c_n$RdLrt_FNQn}y6} zBM-BobvqXo{HC?07xybYJkMUlDj6rTmwt%Z#fs-G-?1ZQ_5%_Br?=#*3(|fEH>i@N zKt$&$|HIp75D}*^@qa8!$~-0S2O0ohR$=`wwV%%(JNKVj1@8IL5)o_i@`;81XSqlz zQyeM&o6yGRNmXmUt_%DZsigc`#s7;yr0!RlrFMwx`~HhmKxgs)MWFw6_Uyp?PqTko z*#FG<%uM>LS4|LO5 zSjXIW;WIY>@Sf|pr2Ll7y^Xti)|dJZUg{qhXlZz=b9-JUPd6SLY2e@xA@@k`$w}^I zay8b6o833Tzx}U251W)9HiL(q{q@=9_LxOg!+?ujIVp00UZ^dwV^q{jSG?rBV69cI z*vU#CzqOPv85q2}e$PSbPdO@IfrxB%%HMxJch*VnB}%!tgxXDFbS4ano=F^&R3e?wEM#G=|@}8z~x-O#Tlbpw9zH;&@-e|EwUSdw&#wTt>sjHz#_-Ek@o8N=!~N z!V5q<`9A9mHzZwe_wB*24r6&VttYXqNTj9Y)i2bwSEr}1F)|LW5Ztex*3G<5LaKW1 zgwFB8nYDv?Dv0=ewvW4-*X5kjQswtg70qX{p+z2}t~n@ZB^;)zO7ZU9`>2QWolzCV znft?ByuU%m`AVRlDJIg5;b*1vxeF*EE-qt4dPK0TBA%i{^%w}yYBN#1X1JWvO~SpI9=z{n=% zOU#Fz6?*l~6fe<|$w>eYi=gEPxkrymMk`!n7^qTQW4Ky8PO%V-3n*8bwDM04{fXBc zh#};vW@+iSd3cm{Tq&X%rKQWz*50C2KWBcaoUB9n&DTL>6lNHAlkD@^wQt-R0AUxB zfWaQ$s&pDWtAg&%)2f0f=(+D_DM$6!-lcO5oY-BEk(JG`dKMDGDDup+v2kqoxU&8V zscKcd*4$0N%Kc-(lqscSAu4}IYZ4^-bSa$)f=adP3+JMeVwbet@g5*1IO)>W+qY70 zUCWxbdGh3KxZ73{8Q?aUeis%vwfzs5x^DfodH`2tce{H*JGB8@vm;hX5$)UCLS(82 z4-c1zF~zEo!FBauj>Y+Vfvt~B2BHc)?6atqe)^=vFGV@G4ynX#q$yycfLF|C0* zCKYt(V_|v~)q)ER-@aW#-YN{etILG%-X%eg*q_g~`Zbha2YD)Kyy!$b>9uyl<9H;? zyEY$H`zL;}fn2iCv ziT!UY22nn-{~sy(x>^PX^TbFxa%CPKBe zDBvdE<(WXAIJ^ai+OwB$GcWONKfs965RgtpF#%;X`qUh=Fn}g^GIRh89KKlcICv#p-CST3FC^v``r1H$%IX+ z&6Pfr+{wC~L=32d(+7TKhuaU;%iJAiF~0~a_Kq+1uB~`krK)Gd{bQ>_aur3t!e(9q zg*Vc~3zspmmOx=<#b%>coSa-4s`(1i(vejy$6&W(+|dqcv0eZarfd zyWou?iu07#CPc*jyE_)RzfA@u5@I$f3}0?qntIqxNjAhT(}&kV;fg-eso$yZ0Z{?tVF-f0T&K=;Rr)4j+G*8zX7FghHIxPLQVlJp#btXm2J`Qh* zG-IFLj1ePud@9&oqc*OKPE8EW(QdL!U_b?3_#*tBlN|4CxKZh#{Yc;BhLC&2WhPNw z^(aJDLC0u3_plg3;<4)rmOY`^P0`24|Jdsr!|14$SQiCdJlpGwyF)LnNxmex${4EP zx3*iTGV%k1$jijWaRerB77<#bv=cx@3B+gia59J#=eRARoxjc5BGOr-n9l4z1~dD$b_Z& zL&vuJ-c8z6+p-eO1t}?+;?2={RTl0&=Yits;N||XB|9xis=@l`SDMiIX*uJhF#1&^G(il1xnk-fZ4xda;R1r)^Vf*(ly_Af9Gn zA1j8wEM{ty#^^SdJ-zwEjH0#s+c|pgvChS&lXVU8vYX^@xae!<_P^~$=lbJ>YY)GK z+z^!E;zoEF+is5&TZ~Rl9{sX7@=?yJ5MDK#PSUce-D98Z_CvhY4tykt4L@>N+@#hk zeGv*Zm?9&Q+`8laow!G@?@n8ExFq+o5G*FK3SZG(BUSGzj5*Gv7^%?3I9ghMy07Qn zWOySYX7PJNARI%hLK*FVy8-OqV`GPWbb%Omz_hVIzGOogp^-rYqS8K3{5gU8aAsjkv8B*kLQjwfyF0 zsmqZ%CdpL;Lroz!dE6#N9@9*S43Sg`^JZHNd`9&@f43`UgmIlgua&mo^-{zf_f+>A zb&?%p;>ahRHDxQ{HG5I0>#)7Uj&Ssg-GnX)R*gHMeITCMxw!GJB*hNg$pX%3YW!D% zgm*7-f1L3l)s6h2ADpc43FTxFb{ynL*XMW%XYaFkUov4fam%tb0S9sM$6HNI&60sj zlD+4^bYGRPf=9&@NMbE4hPJV|?n}euMfNk==vNq_iyc9kNnMkN!m zgFk3xs%p#WZWFNF%^iJwd(Aymc_*vN=zGG!3U<%N^w(^B_qQE`?T04HOA-~N~mP9 zZ7|MpY~kwWuq?g;)T2X^5}-}fh&HZM*kyRIlIdU|m@Yo4ov0Gh4%^4mo_H^$F-`#m`ug}YP5C_usZca z=+uCt(SlZMm8Y*S;%W@etw!ciOV{H!uLDM<0~nQWJ~^xGS)&5ctPg>9s&}WxDy-_b zd)E68lxP-yioq)MFw-1~{3p#v!2)s&YneKT(K?chV97(dtcd*BP=itgViYC`*J#O7 zW@M+BEPG=4EO9uSmV;;seK8wdL8-kXsy@o$wY+Q+>wi>i7_K@RzVoqTqM}I)!8kzZ zrU|xf>yQ~OOb=>f_3QwU*NSJG=V%?~?QktL-{65=hW4#d0iP;ftY@(z3`Kf8lkclV zGcMS*vj$r>E06?S7t^WhdTEjWH`mqi8+;{6=+P1?9`~vfZ0lMIv==KBg^hkRa<17= z1W~(S->*Uk;jt8P`BBtRp%Ty5+Wc=LiVNXl1y5+kMO3(mnmmn^CW!mT30Ioc8M>;N zxSgndVS3(6H?nq!L)GJWy^l>9hH83svRX!+O0TAr!u>>L>f1=REZfpO;iJTQuHNWY z=lxcf&BzP$D2%rZ0;U!jxh7%g-gMIl1T~Mwr()0?J(pH=e2xtCP>}Pv><$Rn;ES)X z#^Fcb;xOnRV+V+Wny@CHt5PYe0O2><872OAV7PONRtek6AABl78ZTsCy;+bEK#%T*8OsL`xK3swR;yeZNWUgJ0TU#%CWh(ae`_~&RLYL5qBjAVm__Vq0$kWUbejpZ>b zR6j9}s;_ws%-J6sfIh^=yvwIf)U+81XW!dXI?N=ypjN>9Tu=a(m=6o1fAFV`gC4#Hvx<} zJ|7|V^jv@G13~b5AjOT_zF#!nz&1VyCP0R_fY1;K%|{#$R`vsV z1wHLFMqvT#(&r8;K<%%Rjp=BNhPpQa%=EdZi*b;|H!ShNDpEcn!BJ8MVZzg!(@HVs zq*Gj@3h@%nMJ1oC%-0Q)cqv;m>o0$Qf@;MiAvg5(5+YKFkfnFtzI#8|jUFV)Zm5j! z8F3PE)UV?)s{Q#LHP_fD$)z#ZmBiEKyJ|z*ihVaqfb5BS9DKeh>ZrL3jYq6E7a9Pz z!Rc+KoS?~+qMtvoT~f<074Ee4UEb)f6gfs!Lnl^6@tJB9DXol2U-!ttV(f=lMJ+81 zLH6rBQ$<%mi;(w3aiXYG=Xel+LxrGrsG(8s2j<-%{Op=+O&x^0&TF>O+kJiErSR;; zF;Z`K4$ji;#3tqq30wIs~Koy^@ z3aelHR>}5zABLKYfY8EkocNody`8{ zI^j*7f+a?nXzqkCuuXbThZ{CH)CZe?s~ML}&Nb$((C=7*6(bxD7a}(=n(Rj%o2I^W zDVx7P`04BMgTj~4(%kN&#NMXC@)sYD@6{jB^L~~Ls?B2Lu4Scs4$KK6*KgN({x&B9 z{Uk}Ug}i;cYF}^4u5MWmAUd>`M_7bS5C>cQX05kdo~iVG&3cJb2P4Ch#n9k2yU0n~ zhagnxR;gabi$|Ww!@Ueiw^WUK%9XjU_@S0QD`&6$ydpuZEJcGUo4#GRF=gA9-a`to z0~-0VBd}u=SGT8BEUpO?#eaA=^9$#@?fvdu*yc*+@k1h8artP9T+R|JyIh{eowpaV z&0IJ=i%^->J?g14xh#9zBThX1rBHF%tXzZOa|xaB1hjpf3xQ_{-p?rP#$;&=eq)t< z@aoy%vc-``LL~(Slw@gXCt+K{YjU-!MS3|}C^l8Ua;2@wg-y&cA*$@IoPd z*_s2INbD4q(YPgK3^Wd^3~+`dU~?JXC@q$~w?)0}9LP`m`Qk=@U7fiNTbZ7~@lb!i zvHKMOlBfZYgem`$nao*8;zzrl1rmHr+cJ_ClPGx7{)Aqn4(h0*Zy?4Vad; zSJ&_8D8ZMuN-4YNU=L>zzWZf){5{$p_|lO-mVEaUm@1jq=TyT$0}0sBc{FbR?eyze ziX@L-%=?0g5@EPJ9izT4mQg?F6D93DP1t62Q)XNKWQmr~0j|zDrafsMe#}Tu1rb_3 zX^`|bKDdM55S^{jJmGr|mK^c6uJKqs@EAR?$ZVsiBfNq>H6`Hf2qh=00Q`N4hUCV) zF6%(*UZhyzEw*~rBUO^TYZ2o9oA6;V>pm`H{Y;K%?H(&2y3k>gUHt-TSNs30Gs3x{L3AYEbhDL zm~^<+b=(F4X?+2o^BB#2zpaw&JE%0fmM}ItI!Y%F-^haKTY!!9>NJ+?Y*A%HM)^+R z2}r{+j8)y?RQ|CV!XZ|$3e@kO*m|TQ>C>`Pysi1j^NOQZ?Q`&pY)zu1wx|O(Shk&fvP!+$frPCCZ{rp=%6WiAq(>VO3f@qJ_c=SK1(%y4JSFgD>Ytx5&?b-u)<@I&6&`S z17-bY8&rU>U_;IHyxpbURt&xOZt}DKrKM69{;AlT5-cF4r;R>vY zb1*+D>e}5tqy%aWdOZ#Mha*bLu+XwTi#m6LOO;HO-o(ETdpuQ>!PM4ea%40^`)RMm zGfAJK`_}6N=<{K(f{`Ch7KFT-H3=`bmL{z500NVZR!z3Qg`5bwDnVn|I^=_LWx|TT zzz39};fhQEfJ^>hL)We!zci9-wfKqL;8&=dWQ3Fctz~GjYClp5t>fST5MqyCzkbE` zWRzZ)nJMEW(5~=q)lkslD|^QTm{<~tnTW-rt9$F}~E-0nvX1m^y)qqcF zOgl`mT&b@N*}$2Y3B$o2Je~ZmiI{8{dm8#`AR*jTk}5?=+eX39bAIgO=Bnhuxf;o+ z>QXoBYJ8(4tXJq7Qw706skm=*6XP-Uj?ZVL$b=(3YPlB7$h z;WRZs48-Ju1$ngT@@Lm7&{bfVK4tqqo%~?_F6@18ceV>ru+m*damVaHxxO)CNk$}O zr0Um^MRGGzkY3Pwn=!?6Vls9T_I_(7e2mwvBLRJ+by2Wl8F2pgM9Ea_UCl{H7cGV+ z&gBV8If+9O#(E%P3Vzt@Ic!8rX(UdwRg=fD%NEOA&*5Gl=q1tPLQ!?>7St`c_ zY*__kq~G>XVg!B4oQ}N!Kw+=q2in;^-23}3gYGY<@Nb`PMQpz!XqtJT9=hlTa#H=qLk)1E&qaCAp<_&qx?L((y~rD||%5650TclE_&zVFS%)}8dQ7fIt}s~Q<9c9fUc z!o?iI!J9$kH*|fitD17Z?{_pjosHI7wkQk4!)iXpKv)E=rDDB~xKvu3r)Amh-b+PO z)E`qyr7!?I*yz$-aNaEwtkIdB|Fqm)eG9$w80WDXyYJycj(LPa34TB zsRWwVaktKxd#pjeb0uMnl6O%<@ZJC~?TWj59XpMO?Lh_`=ZS31N@CAv<9RDJj(6f(zr=3_&U0~^#{Or4b=@M zQIM+AFP92P9ZkELIB0dE$yCwAJLk|+7{!IKJShxl=NkeSRfUU3^(|Z!e$<_uo4@vr zwdn`;vN>)TGVjQ%+)wbFjJsLbRZ>h6={tdlj*qywyqL5;E> zR=0@toF?jxaiv#AZnt7T5*e2GJ-MH2x{u(H6w)v8MG592nph`?YlG3H@R(qfOV@_k zs5^qN18wYor$qjGo>J;71z-qA>uG$q&k~}fUPQi^RNdA_c-ypdBiZQE z0Mzf%5E>HVz!jwOk*^Vbdu=Ba;(m*trB}&dEjcEKEBd4smyZ5i`*PWJf7Kv(cKDr! z1m2qZjZNhz4@-TMcOfB;ea-M~kD5H9xbnpl)F3Phi^W>vXK_TfMR^Wz^i6;&xqZan z`$;Sg#l~hoyH|HumE2shMf=Gse7T)+U=bc@OQeiS!ErsQ2s}RmQ#kyt|yUE+|wPg43UJRLbJYXM=mr! zA?s;M>X7Lxqw(c{3fV=1>C`<&U$``EhHPsNNe<9VD-1k`u&{hrVx{Pz?t~Rz`gSfT zyjA7oh*llQ@hCOzmP-0ks6sf7f6g<0*e|Bfro@L*rF4u;?%V1a?1k~EExHD^Z*L>zh^5Ki1@GbDGsK+k6fo1p$> zz-dAV{O}@qdlV4f$m`_L(s)7#e6@FMG>TxCoC^X!(P-~q3fx_jybUliLwPuJQ?O_tBP+ z>Md5qgb&V~ z>_3%s1#1Y>stVDjXlrS;wDisY8mjYd+X;a!>5?ZGH~7^a$FQ@ph>clMy8i6H;jwYi zH^bYcOK|k0@B94rcI_r6D=C6NGcirP7}!+OWQm_cJZ;GQAv_fIZDLu42szFUI2t#6 zs@5gM=!(Qj+{aA(^b9|&*Ihwnb$J!N(>2@^H zO@=r{Pl=LB`s86@&m=S-?=$m zxAoVm4J-@_zZoZf@k(p$>&J!;G046hE9FG)=n`KG#pDT5m}GfNVctLvVe;7bMBP@v zuZX#IR7elMRey2>8&YRh&sr14Bo0WV&=?+V&zkHRj@MLx2J$}cR3FZs94b%jKiAU; zvfUm_BiSku-DSKXkdnN;a$}hjPG6JyjY3=SUaIcfczPQV|y?vvW z8u-Uc6#y@wgNO_3Ny^rLhotTmo($3^u?2Wthhf5^w{@2uW;2xF5Ta{v9`yufTU;=n zSEa>gK#~J-)=C{H)MgX4TbH_lG}#^ZQMS%>L#~eQnw|`r_!5pXuV|fo# zocCH)vbHLgMX_`^Vs+?MJ#>iEe0&a~-%ufoVxZU`3#$1BP+9a{KIGVwpYUUa&UwTB ztqtPrDbcy8QT#?-KV)-pdPxIns)~0}(TvZff~9 zbrYCPe<`Rk$gl2{89GR$kT}gVwDy>>dl{cc?UwL$6SJa@iyIQSL%2A3T)l-M&PB|- zSPT@~Ra8`q9C#(MoM;P{favq-FxL7;{#Zzp66PH<(?Fj93`;SJvE-5-Ji%MY2z9bn zYnB(nAoxA>+XPf26D{~{?dYO2C&AuLd8oC`i1#U+P&A)_0`S~04aq^-$wAB^9tsyO zqsM7d88R)!j2l)jDtZ0`Oru_%=M zIhd2}_K&KR4$lUC>Z|~tLDZJEkS|!Z%cz_Kx8z;!xcW%MqqXccUU)+9^6iU>IJFKNZ#~VC?Y)Gg;Repq`h~ zfCi(!7wav+sr%I`LHfDJrp$NmN%N~~WaNj{BT-Zcw(>HpOW~2OA7XCI!!1;dV7)}B z(TDj=j)MhmCiV8Wi3Nd(eK-IV6GR(u>ulmgY6`q(;Abn&9?mG41Ex4qMhOa+kq*Dg zijTJ*dfoqBnO4`vK_A$6LbFogVO47F>8x3DnVw(OvgE>3#i-B#aa zfn#B#__>-2srs4;_pygLaUt~Lz34m%$GwP0@PddAPIFq{y}}i&Q%VoC)~CR4uypQ~ zC7iHc2Exb@f*E@V>yp0vpX??|>qiR`yd<=7-xe0EX;%#VjP>>Bkh-g%1YHtWe2Ni6 z$n5#fPfIj&?{Oo=KG?%Tsx0T$V{MO$9=6O!w3K=Yf?_*|55SAvxFxb~P#PcqqBqFSq^CfG^G5@!B|rGuElp;A1T$ z%#Dk%PMw04I5wb-EVAustNiW_yYDRumoQEPHi78kL-QpvP|Xz3?IJB8^zu;jL2ESV z*!aP~+Acrl;6%uPMD0SmTRTP6&{;)epQ`)124o41C*m~}=*Rh4mv+vPD@*DZVwGy( zcwmnYHyZto%3+z~=yFT z=yj3~q=BjiayEt5=KxY3g|=GuJzNqN4e71BnCk81A0M zt<@Sm`ZsEod{~OwEvu+OVR9PiUD%bBUJtN2*R7sx(3ReCAxqDueQ0)+)`y zo_*qb^wW9Css3xTXDH1dG;J!zKJr4a@l3Bo5-<7m zPAC%!&Bn5flO?zn*o?Za`c}#-CyUSPYn1F<7b6t6_Vwt;9JW)1w@mpTvE7pB+`2Y- zRf-t|nBBQ4mV0t%&5l%z$d_DIY4d%c4giH?w4lN*0BbV-Qv}9lcXUfbgV1WdN+fEw zbo=8yi?n6x#g9U(B4InZC8fl{m}{=HpLUA8@e)H%dy}AxmcbAbl8XuzyB;tkt!4eu zxT2qaPTz9)P4Qd2CqK1LVFNgkeyx7g%!CN(y(Y89Hc|^suEx0M46`UU=om2GG?33A z@u!gzHr&q6dASb-lv^GI>pP2eL5Q_aSwlQXi61c3 zmW3n$j%xvMyt>>rahA_aNw}oz_X$8N)n(=%cl8WsG^QntJ`yUdYB_E!+Ef-H;=AyZ zYxLaA*aC|xZj%b_*nH#p%s2!2FC_60vhZAFzs%hP8klLM(uj%syx~V-r%&eAe$>~l z^{%KsZEF>f8ogcTxvEs*yjWy#9og9i_$6F(7_4a}VXO(=>!YnBRc5m}(Fn%@2!aVn zOz03mmiQqN$msjS%I*C4%Z}=c>H=z{`#UU0XX6 z&w2=~w|@{YrT11Oo#kTl_1U=w?IHjcx6rq4@MI98nlY4_kq~-It}d5ief#c=#kXn^ zewW?{MsFdCull3@Z|J~+*018C^2dPN|A2Stxnq*BBY#+*FAnkcWUp+n^aApP#$^%R z+685d5j4ANLwvZ9z=+bR9<=tW#-^PQwUvBbUy1beT}1npRFwN1cyIA^qV?^+88{`7 zXzDocY*MkSw8gGI#E3fO-|QpFL<)S}q~_3Pn~RUEx0uMa$wT6d$WbbvMQ1P!Qsvr6 zyGEIs*M%kMe9Ec<94+ZDZCRkte8pmf(_?Z+JodUI{ih)0=7Id(@^< ziBqa+N(EiF28F_l$dlc8AOCUF`|0(=Is7sBdM>%PZFZ4@n3I^Wc5XR!_oyEK8nDKw6o<0 zc^zk#@544KNdhcHcaE|HW3@xAHQIv(yE$w1eOe&^)?sP9yQA zQf;EaNyp}|M3tLIlw?CM^(bv-%J_0~AkVni@89pW=y~N)_Qx7lB3B1x^f=*#&qfqB z`)P)R)eMHYA4w~opyjR~A*O?3>=Ufs!pzz@*v$onGdMMIbc6Dr;C<26V_KE^TKVWP zdyOv0`f6hT({mL)y(V0$vfGBS+y>K`8;FO>1=?oVc$&`S^r39G=x^0p1=tGMaMUVH zcy0gfy6TO4&Tre3@7Ir0z^GF^uXwT+@WiAyDcinHP9Kbej|J(Idn)0&kVsovcY|RY z6IUi(KF=A65J$XB(L|8VnnZiV7vqfs&o<5qSe>@VClhPwCF+(6_m=TsSEJM2u$4L$ zQ2UcRE0qRXP=^TZPfM5rp=*&>r$reFm-?U|7G=hW^ggalDmV>vGM=kt_47NqM457c z!ny|krUCv-wms@ zY0?aIZC5XIPn9aJJUCj)(aPL~|M-5q=8%ZRCLGb2k|=&77W; zg|sR9!bf|~ldQh=RY7pz5+cb*-=Fr6{zHMI*rY&)rm)jay`x#ur&WCTetwvSM}OAj zF+RGa&?AJ~5LIz()w{XA(zriZK1h;On?UomVrxKA{h`USFV;v#B)ky7 z_Du2N^JA-+K!C=0b-|__Ym9%zFqx;YnkDW4;}*I=-xUh|Qsa)fHo)ogTHo=QQAG1^Z7Y>;GK)IvhCIa8fG~^0$ERuh35vNNQuK zHbnlOKlxYfdu6~Hca!dKaPD87`;Y0~zaI5h2F`7)%6VP+ zFUL!dAwXa>ITX}$HZb~25Gi(GQn|871U>p|w9j7sM=Ij42U4g2#XP*4EPFQB@>lKu zQ~UqtYF{8G9vBdGWUEtsVV2G&B#H!ozM6GvYtE-UvE4e8AXUWS9P5uRmX zcpQ+lA#{UW>FmXWUSN?wLi@Dny5T^=mQGOO^nlx5_$d-G z1V4Ip+M9nZ4$R#or_D{t&wcL7O7w7)hWZ7FeR_?hZ|H{^gKrY~FlV1W{9jx@{qf~-^wyt2e= z;(M?a6*Xp%md*lq5w4~<)ptrXF#He6lIs6m01E#B`m)xjS?I6!iePmO4an;Qt5%J0 z$=gSSjHiZn&+t-H+^2&I0;o&!so(ZSOFt`bo219@%i&gYV=Zd@ivvmQ6fc#K1wCuqz*_GmRA2hA7?>9?;92I_bnyQ6P@bO&-WCR%)6Ls z{#)xrJg4Uv=P|g&Ag;mY%>nx5t~+<`=wH}wi(qH0)p>N8LHGZS5EA1RxQJmEbs+sQ zn5^XN{9*g4N(?&_Lh2DYz2^UO1^(|_IrYj!4C+eTj%6PMBCv=Vdg9mER3r}kLYM@f z0qMA9lbJtzasn9N!K9$6+`sh%K+6VxOClIv3D4yA&-`k=8qwk^8>eUM{!4}xFoUnO z9CsnnalSj1D}76SQBl2JpS!8Tt^Gt@(#$DSd>RAUPlw<48o+aBp}y(<*2Q0Mm2bXk zk*laAi}2zFUAh!}TcS2=EXRNGTRQt}^G39~jt+Z{ceF*= zh3s%i_pwAHq5ofa4eX8CAh{JDRdn$E)h42UbuqfcYo z?j|-eQB&Uq_Mk*s)vg!ZlyuSN(kQm5A53&KLa7q0GTT1ih_;B`m03-U?;ekTU;`)6 z#Z3kK@BhOMq?DzW(_ZS;rb#LZhc-dx1nFY_L8e#}|W={`SN_fU~L~`i0J{qC7IyY4*utg$Q;!29+gV6$y0PUbv6?jRJ7}MUcTpJotYMH z_a53{`XX!c19zh5@x|-?+56gMZtW_MEU_2B>v_tZP@s5C8%8=iQT&+P0>0$`03;5i zFni87_uU1CN3EGa>h~pODo3XUb`yyt4pX+5 z?57tJqOkE9T3e+zE?s`}{I&(4ZHAg)N&d%Drb{8uw7vJfi++fio71TEx~uaK3h0$C z60GbS`$-ivm6qA`qhz|*dRd+RQ5U1t#%GC#ld*gBfD>MMMnOYm_Dp`)^BrS7g3TJt zP}s-Y`R7b2d=LOercL5N_cYm|W#&J~#Lc1#L>~h?l%a3OGoNTEyPZ|p9|=E7`;^7P zrcUtJuaOykH~rIT*A3?q`;KSx*Mz?*^na*$_$IKCa;4T#e|!-i|2%K(D_Aa>h?<7d>YI*L4~bBU2@ z4xb808&ilff}DS0H|}8FiC?;m8)}f82NlkhigH;j(m}CE9n&NZk{B}sG2(cLz8mj) zxtVz@{e}#_L%^t(0}|)3;QyhO4{Rk-s!qMis#{92T6Hf%t`pI!2Im|Lm9nwv3Xjq4 zM~R(ihkg?i=s>ND`jpiIJs|LLQDf{sBdUN_cWJWMl5n%V>@OKa05xyzn%3~UC;k6u zF+p~)_5G0Y!TZ-akE?sH-SpYHhHEX~TBHUOuj4PZM;?7EE-Lx)T@RC#*0i&Vs%rmr zvesuezD%mX{A>0PDJ{Sswo@c1|06#w@P$IA2`UMAx=gXt||2Lncy8_B#)6)g_zrQ=37)`6E@z zZyX;e=ZN%b~@Oo99wfcxJuxergv_6YU7QPR2s+xy%dH_ zy(fV7Yub;KISD&??3(z6L;@bAGta#L#j+3_3b3V+CM5KKSQZRhb=r7A$uy#qV`9(T z16kz#s^gxbGj%+{l~hJfj?X~XE5UBM|B^7vQv#ZGX<7ZMFt;#@VGijc0wN;daQ=gY z$5s;h9S`Wkhy@gA|K|+2#p#wpX^H*V6}*z*;81KRP~QuX_ah%b>Z)=>plt465b@s| zfuYe;+!WANo}A>BdH8NIUn%EwC_rIyZmVrsbiSQIJ4yAfj9sA;X~!l(&^B4c2NL`F z^D!o$CUJit)4Y)%M7Pm_y2NXM3xpmNn|kR-&P=#!vgTizeR;IE>0_{c@7odk*H1EE z0O7)?zue3?&1&FHb;rWyfYO^v_Np0~H6b8x4}__;n!_YqMQi;7{UuBMxm8`~3RD}; zrS=#aqn{edKS*n@4?Z8yQHC~GKaY~mT_PtuPm9GIFd zn>+Z@xHtXS7kifoR6uphrNoBB7xzp?jk&REE(&tdXh4%_YAGK39ocKVg}&?&Dn6tR zx#``hVUY>|KHyPAw8@af`;*C=4F8~@de%x+k7U51eD^TG0W>B|*?+OkDB1ct)jc`H z!bKz5cFsop8zuu7UFnFc;UDJ*8w6W9S6*6NHT1QjO1Axprv;_NT{dN~fQddTumtdC z_e>Hq8D@1ESf*1mChc^{=biU#&4#zub)upWee;U!$iHWpy10CU->Gokqe{#`m^rceSnLnr@KLSm~k}l9Jf$pF}UkM8zbtA^erNN14x`zhE3_&gWpFU>?oa zC@379+?`Ev$C7lcyuXx%W*!RI)ZD+(eEC7~w!KY+`Yqg(VLXy&`$KeE1#*6$_=<6T zx2`bQPbTd3s~#ZCsnmhHFuolxE#X&RUW=R*rmGK zAr&n8wzglOIt~~4S~V`SMGBU*HkCgHE|?gZ>GFig3*?!YnJ4oL6K}R$9>uT2jFl*3 ze6a|&AWbC^GG59t-n17lKu+}Z`}l>y;@*8R04+?0v~4CYRWF*o0nN~7dN-?qJ3}71 zZ<__NH+mFlB2+-&CA72(q1M{%DrBQXgK{%y-KTl63YxDJ6X3u!6Zi+4#8eHh)3ZzOnb|0pXCklfy0w@6cFhYvp zw(a0i)G*OE8u&n38j!YME7*v`iTrtLr$Crp|NLF`Cr^bBZ{mH*=;u?Qm_VXALlfD2 zx=BPqLn9xzM#Hnf5`91XJ*1o3eSdbJ2Rciy3AEm)yCbXhhnp33Sz8h3p>1bnpWB9P z%8a0TnwvZ&CEanO4chaT5~=GwQisnST%>LQa?$_hpcQ;re7lkw1jVYuxh<8xruWLq zd-+9-@HK}U569g*lCyL~QKghV;7gp+^ojd$05k+Y`mb1eif2W%AHUtFdF{EICDFof zODZZeXcK4@=;zho(cneIsVYeh1xU@|*NMSRn$O{LRWEhoO_vG&RLY-gl3r;?LmwOA{T>KL%|sqI5;Zlko+MZ70e=pl7bC9X=j3<;~pK^Yr}=trd&$juvv zw7wO08qPB&BX6P_(MBk|%qe0eFH+7Pt)qxBDL12YZ(99CmpWZ{vBCvaEt?)_VPM_Lf&^zehrlGET|H|#z9l;oI~Y&XlJ z`T*?Rej;+=T8q5I{vQ8mfdo{l_X%6Ke%u>7+#~^><`4nf7ywsg~;fl&hJhYlB zT-awWa_-*)ksdX7x;hLL;cn*3c$w86CJ#_cy0>gHE(>IPLT8_|tchdAo8olWbwANC zx`9fTk=@LkSpkWWKg?92)N)oI55?IH`HGr^eD4`ZzMucR;4%EC5eaD0PHIbww^oFPtgk27+pS-j-qnJO zx2KgYYYjCUASvX9%_ZtSX3gcZN2c1QRNwy?&sNss*nXU?((Z9j_u_*bT;9UMnx(q?Y|-?5~1+2Fd0cJmb9%BH6Y5i|gvLy)uRw z55j-Dr2?L1YSuRqD|!zE4h-VJF-vj_{v-7xZZJuxaqe_-5EB09?ic-DFeQcgW3d@~ z4jNv-$O|5&>`-zhAa@;8GehvRF97_|S%0D7@$`>vKNf<~ygD;8O^Dg%xW5YuuN4YD z60n)U;NWM}a-*fLY08c6`Wok}PI&JO%S7-5#tg00h2f9J@d3{xC)YJSrXuUDYFy(7|iRR0qRTj9=wV_WLSpv==|e?&Pla+RnO;mKhteC3Q8VV7tI1j z!HT_CF;0{w6OwH1dMBn43_z8NY3w=2W+2Mi*FW?}KLaf$crQmK14!MmdYT(g6J=LR zoI=2&{z2xIJD|sqTQh7qy7HQq)oIy06wy=1ci{Sjr5Z`hNb-bTjy zOVY8_-1*L%%(z%>d!*C?Z;mt}gM6TlkR#og_({6cfM@&rKgaBA#boo2LH70bci24# zN!7f2SW?0#lnFbY8)sOH3MOBm{|+H@p!TiXo95(Ta&CrNE>tOEn(fid zk0~e`&ULVzt{Y&zvyqo?NoI#JYtf=d{OjPt7q^^p5^pSKrS)tyz~)WftHG7AuSLU0Z+5<J-4BzQ+Jn%e-lp`C zBLl}mp+(*G`JZC}TWY7E$;312GD*dk*qTIQlhXo%5mO>`hk$~Cfim*SVCIh+0ETZp zw>j=XQLm=f1U%`(n~bi`ierMtmFeLOsNdK6^VlYsGeMepU zX3DA$FR9LP<(O?JL$Q)3jFhelb+3>P=f$@?sP-k|%D`V5%OS|74MqZ!8XlsgR8O;H zA6l&D@yVLRjinP_-c=MuH(LYQO0NI{@;@Sroeqo7tR1-s#j0J(F&Wa0$nP`T8K)hU znN8$z{nXLji8%2K2F8t$fa}kSksa7hRjJ=TuGt+PPhsPUB{8SR=YbS9^}|HI%<=9m zbI{)rY5$ItD#O-0Y)n?iGsZ{f4EI)^+I;~cNQBYd{T|w$jsO!E&C!zo6DCLI_XVP4 zbbteYw)OEHGQZd6@0PAI5AsLI6A+ zqsJ6LUx9Vq&%oVJFgvEbryW$6y%E0ZQR&UB|9Sc>zTv9YBM4bleUWQ9>o*sB#i$pn z?uQPI@&*Rq@9_LUlh!O4aDPn@tg2pjs$ZGY8JBv_ot!0cL@cEuZD=gtmh|*7N8D15 zue&KB&tEc`!6YSA(7DGqG@j$s7B|Yf(_Tc4^2R>=gdW9JG3_MS1y8N+Ly;zbA0EnfUZWg&-4>?n zc~gL|r$Q*u{QK{ZtyBR#)F0VNUKHgS6!s24sc-#*`u#!Qx>U8NkJAhcj2T~g znbe;(>Gf#0cg=Tisw|e7goTBzY<*l`W%u~AQPuIGZT%z9?!F(i!Mh!K<)>Z0!aMF~ z$Vf;S;mX;KR&Nu^!1DsvYhpgT;i>^_e+3kVUS3J-xwvd{T(yuIjrfJT+Q)ALsemB+_l0zmNuC4Eknw2_E`+^ie9C#R(-;4cN@cudfb%E<1Ea8 zCP~f3rP6e1vK#?KP@Q8H*ZKeK1qOZ&yi)1R2wtQH28}H)J=z7V?eCfDgr-mNHg!Y7 zo~Bo|BADR+xikp&`E`J3nZYp3H93ZSe{rl-l*-dkQXXltCTHItG?#(Mf6o-;>*gx} z#p<6MoCVAl-lf)~X-#1jJ8U#h?bvz#DgF$HPz}yHl@w;J7@}7%2 z-koI@ZDW~~7xpI7Q9pE0U_6Y`f2V2w6`LwTKn)Hyx(F;f-6alLEItT7$_&Da%@KM? zKMxK!-)mo-x(8CPA>mm6u&)dJ(8hWnVAj^FkG!^Fl{94>4?}y|?RViNq(5IaeErWc za@@Z&gb$t@oOuNWu`U=g;uyp)bX|YFzgvt1#cIO;y-y%gyFcswcFKB@nPJ*8Zg0Az zpCF~_;j`>AIe&Uu$6Im7)>oTAHuU$dffp-r3ioNR>%n>Qp3VgZ1x3v@W7fti@nK23 zqmKDmOJLFr>I4_9QP5wqTt4erYSaY;l$p57!Wm{aln!UTK-WX*mwT7w$B*wW->w$^ zgG?Gw_ZQsdm(*?}HGL`?zQ17~yXkvOtOB9c^@~?2C$Kd zuS#*(2HKO8cOH{KlL*5xivMWGk7&LENMSvEQbK^z8hWQb%PAoz6N%q^QTQY4h19!u zHku^gWgq}7qzT>shcbG-r<_)}lZqz8IO%qLe|;5T5-HHhk*xWy?;cxMUTr=5(EIv} z_#>|Hs*HFBlRqrgOZ*??64g5?i=BIL2ubd*edpuGpx1!(hxXf>usK7?@s&yCMt50b>gsqcF*RwGCpi!6nK)Yh!L8Ueuu_6LSpxFq zKnZpegdZbzcAU0%Jk9$B0JS6k;hTyiu#J3#R)d4O(;*K<0hh(pG9?g}3 zcD)RBfB}eA&{lLm)4ckfmiT?z6h82A-enq22a9`R$=_zA#J-X+kJHGZroYn5{d2rP z{{=NR?h_i$N#&>l%3UPh=-wtp-=$K^NBudSpv5QmoBb5$p5~(VV)aQQhPC6g<5zPhSq~xC{!6Q&3KYiAD+~mzUsWq3XADFPSLnwD`;I?1u30>fI+*R_#m0bs~gA;64 zE>LjVL)FXM^OXxkpiY(|!-v`;;fITT>-E26sF0rj*AG1xQq^3(*_ANvZh>kEO1R*) zv|7d0IC46BmQRj(lNLT6)Ad4`Z1Wv+tfOR~)nvXhJ*2RE)MNu>NfdQJ_|0UKPhlOb zZcp)w700NgkcaF@H+{aLpyVQLy|5dQZ7-oU}WM(B*9|AfJ@$->fp2n01qKVjKw<31`#| z^NCWo7}|Bq(bPHZaABqyzZl=e_wa+~SmdfTs<0_)L3r|o`BiX_4(=U3ogIZ;`l-F% z;_JOZq}F0X@_eEutV(kwG;Q7r6izdxVFtt)1|JQ@fte~(5#JnT4Cz6Jp^nxVR3yRo zRcz|rJ{ttO7`szr&2w3HM=sGlzHQv4E<3@clETEu*tuRQi5dEhK;8d~dnUUT+J2Z> z`g}}vvMFSAsl7e7v1!NeP4~U1qEE<3T2eg8+N?PVO548vDKH#jheCq6P z>zgz`kYMYw*(EM`Mgg$kI@k$w;KA?<0_jYe`S+4aoev|Fd?ml?=`cqUzj&sGR_3!i zMJ8oq?Dbv`rYRI~t3Do@=vK>RAB}E&br*W6_v6yGk;0^v-rCd6VdBXkQo9#q%DvfW}>YI=RqZoLmVCOS&i)dudiAB;hun*{CQ5~BH`4zv0)|{rN3C?_W2^GX}LP-@s<32rG%cx%SlpuGXS^c z@#*a91h|#%Xq=$9_{V$Jk(JWHqKD0O6%eqsiG=Ii1lETPmR#fTZ@ZehS7mbGE&#~nXqXZ7N@OOLVv-c_ez2VTA@^ezX3%&5~=_`MpC(>G7nJ2Ez?9|;{l9yRR; z{7XkpjzsHG9|7w>lrwsz`#AIX}xZY>I`jFPh<6Vd^Q;$rt} z+n#=aIK~TtCAYLjIES@wne0c7=;$ zDm{IqL;qVb5GR@<{k7&3aM3zWa(6gB%mjaGGIvG8^LsM_hc;+F7}ZL2iYTmm;c93|Bs#i|Hn?j=;>Z} z7q9hax6*=Gn$&Qc?r+T9>*$s9{j+6~{Nv`UdQK}4nk&8TZc5EA5|4YocdphSHA(5t zG^m%4qSXi?*tO{~mtSf214af6$9HSDtJ@#X(v0ALl0OB_p zwwZyr@vFA$xT%lAGEWEpP3^i9fAtKPq1|j2BwP4UWvLNRR6R$pr>gXV9t(sEH68sW zH;Ct#qnxjTQfW4YDwD*)C7=;`)lKHZ&-maG(AyQSTZFdk1+|=!ER!#>vb-xWaiVk= z0pOYU`A7Z-`~sDE0hJ<+i_B(xh!{}tAtG{DpqZl*5MAGv%5Yy~NOF}2n_Jj zbj4k$&TyB^K3sull*VbHx5qk9#7J1Zmh-59GfAFx`3}c1Vo1d^VXe^0SDHSot5uz8z22Ihx8I@P5?Wo@_MNL345|6}_c~ z%@s9NYABrGi3kVLBl6y?`HH6^OI-wK!a7DGM?1*jP<@@60GsCEN=4RKE^ME*eEz5WzdBtbw%N%o5y;u$+LBx zZCv0k%Gv6BC^z|Mz6R7e0(QC2fj6C}q25?s0BGd|nfc-m+#cn|iO};yeUJEiEpk?dT?`uzx6> zCKCw`(AhM-UC4%pYis^?ar8YKxdlO z1eIsi5g~+t{Uthil}V?O3yEta4`};rCr7G1%BbHEI~xLm0O_dHi3dPvPM=wg2IzWS zC=*=XBtaa8kkXkpg=iu`PFCmn%t9w%*XC*JG;?z1Y4QcT{UX{k214J1fL1x zdtWgUAON zTs#tut(u$WHEEzdcMyqYeDAyl616-PRva1`l=9^S96W1Qi+q(lq8lm9Fn&YBS^<~e zGH2d=JLbcjFPANwmT+Ho?+Nu^);_n7{XB&=uf&Sf$Pb!0DQjnL-W2Q4Pj7rmK_xpe zTGkC&G!N&{J-fy)6LrOeJ?oDvc~>3S$7L+}Y`(&b5JFa`Nl>BKJd&{}78FUJ%~sN= zk1JzMFOtLEaVNI_iUWPSUIYM??KP5K2XeskiEVY8JY28(g^HD8Te?8gK3;q?bMx-Q zi$BuLl}=|_(jF1c^>O2OkG-PILzGKk-ZU>vYm1WMa7vW? z=U3yam9J?KL5?|hm!x=J-X{q6%{<;;L1wdM{II!M?aRTK>9Fzw*9#%5Cpl?Y_6R=| z!o;sh1#IZXX^CS_@0ANwtrhaC6(303b=)ygkT93|274?(^t1bu2p!{8va8qUl?l!J zRsAij4wu?*(sTp>?AM!|cKq<51f#?*-)q)MQSuxMdSMbJ0FQ5SX zozUCVho=C>OiQ;T1MsgR&hb!k?dk?wNq{?<$6F>bg`?4-m4;Vx7;r}99OiS}0*X(- zGUeZ;z4E>*ZDT0P(_AjEXX6rrsSUPMco($)Ksi z*W#ZCxG?hxfyedamPi}AWrUGFh^b`=mdG+y&rfMICOdc0-N!95yn?H&@)@7%Q(Kfs z7$hlG#&%TclB||ZX{%yNL~CKXs{15JzFd<0+hCr9?<9k2zLG&Tc;5#S8Z$ZW%m_Dr znpdhJo5U4;L*=9K-ZiMPBoTEV{3_wyGu@lEQLK%exp_u%kdgxeZ&2-~4}qbzl+;y% z=Hcbw>%D^)l6@*Wk*W(KK{Z%=t9vEWv%+GbU!IP7aKn%wb)}VL_bKG>fKv>cg{IMu zeaEz2`e#PhI@PBKkY;H-@ZDi9ed0T7gTgy?Jq5h81mp86RgV|mf+lMrYpQO%>?}Q| z2O69*-bT^B46S(>9fCRqs) zxHDBZ;$n_-*3BD9ZcE08Tf9^j*l;tBcc$lc(8OF^O2>V5R>y5-^-T0_ zEh;a1*pbO#VKnm!v^Gq7)uOB{YmIJai(u`S5kvcKrEY@OxXKUH_PPwOlsW1~$1j3d z;k{E!l6$o;pqyv1QN9y`iUlf6)Ez%ykqZr?v_J|eW_w45fzy*`_#AQrMt#}f;-+|S zx?09St*ow?GKrBV_pX>h*L3ntpxnJqsN)#RmCW{u$Ewij&(ZrFON`UlGN~#h!5Z4V zFD=Y&J&x0HOC^j#s!u_VYjbTTAZQ-t9c7B{r^?tmTF4CVnsy&K15v7>M|^7QrHQV? zL&3{?@mA0-UEF}`0{TQrv&8KDHsd~;gNmynl z&Fg!!`vhvj27D*!TW_>VJjl_|MNooL##N4Lpu)sxmu-0=0`jYGcSiCj%{jIPF7r#@ z*oQ?K=y2lm~3WMplKECW+)Mi>tkOc%2GA#Vg`ELF+u z$=rkj*x3m5?BTS4<$#w<=TNTdT^TvbrQxBf*H4WyUynkLAhH~^jeuXUq1HddMlL40 zJLDE*h%ET=zh*>=9|oXLMHPNnIrZp2m+s+hVZ1b7DwN2rlo`YrT8xJTC#WeSe(blb z#*m3P@?UG=#$&plFr3~Mp6fY@-M9Fw3LXL&YThT#=+B-S~xLC2^`Blk%I>0BjS5aG4q zKKYPt?)29>yxBIJ_HS2jmRnohxSuXv3eB_OmH(-9IeaFPp3YE28-fDf>|4x7PWO5$VTziZoOwTJN8O~u_z!%I4Z3+-YHh+sfT z(gvwxRZ)c4tL(Dwi*DZ3nZ$1LQj=rsO_#UB&hRBTPr>LI;JtwcHTLOZDnkhR`8Bd}ng#?gKExAbWj%m1n1qsHo9ljg`uSP6park+ z4O$!CyhiJx*ce}-IWQQtLmG^^H(A6qYf7`IIqk3&l_h1_Y-ptr^yywNovHQ0SSE4C zIa_k)0FuoRIXSwfg9Xf8!2Jd(A|;t@(%!T2kvwz7bl@@nm5M|bTPTf6KJf7JLjP{T z6})`BwXAE%g@{Q}mXhE*Cwn;~qPIUR%R40)6dgG9967=RaxT-qfxwoFk^*y9((bDt5 znWjKuFw9|^-csK(uM{IBzVWx^V7=CRFsGPrjw^C~;4e9L$rus_ebj5BC|)ZM#V<{& z3|neBG^6Lk#7^?w7v1AjfmltkK|Y$18nE2!4e#&OGP>uvB5)wpL}EaM%Z-Q^WOc>unNhPFZ z-As}7nc!Nch1s$v?u;`sMP~J}go_{~ug}9r8IaQfw$s3U{q~>3UKu>XT{8^3lFP3P zm%&_H8qj(af3NgR_2vEg{k{FkBk$u#hw-_J0sZ;suUC+@p0uA-wym$=o5pK0rsAduxcEIJqGq9E*Zkz}7%_9@X7HGnzm~Q=Za+Ue zGVABEAknD!Xa+H&IUaJE5h|s=Vtk08YYM7dlxA;BX2m~#(kRN1>ZSqR#$HFSP3CZ{ zBYm5=>zhc#=Ds2f#ll+Tbw;1JnwJ(-y{q2;Js{fu8$;j*|Rz6G9x!j zHTt>-CrIV_vP4SaP4>d=W7JbH9^a?#I9gQyw4%E02i-W_SE$hZN~ub8hF^w+fsNbf zPgB@j;S@e6PVB4eUSu1FIN_lDpG`_$4V-(HNoT!^%cwxuVVN z*`XzLvtGWrUMivgveIgjJ=tUbiGH_W;~l#@!d1d^b&`VUZ#l!?EW@}?VSh^4Mjb;0 z>i1X^1T&n&4;2m_U&An&Y}Z;f99UVZr6sZ#XBSS&lFkQNRz_wz5Tapn8Iu#MFO#~> zBZi#}Ni>^R*4nFKBpv+}C;MOr;#u^Ed7ox z+&c!}r!O&$-_q?{;aAKQ*q45>VJA{7Oqd|oUkUgYuN)voaPqS@kvVzRgXn8r&8DX{ zZeO{02|ji=-0sO4gR%0UQBIDN2f z#%gO5M)5D*~b^1uAxyc$v&tb)( z`mt*y>j;;<-yYQTB89lwTfJ!$&Zn==bkU9v&G_sVhgu56gzc{Ry0@q0e$2XW$znQt zkS1*N3I#{Gj8EsrF{=1p_*TjY{*cYEw8$J2+9nGQT3K1KA+2~=s;ZtYR_EitA2>5M z*TcV2=o7%BMTpR$h)X1^94}p0FKkmM743iQ74}+(n5~kr+Po9UGtnD)lnqa zg*I)>c4QeM-{kg5HgSHPF~!3F6geW($0|A~PT?hd8NdDnI)yQIHv>d-$J-b_7S9E# z%vZ94oCj)N4-7ebj%K>!S85Ox!ogs1)TcF?93qjc9n}#MpMUB{b;lCEgDJZnf#W+r zR4*Q47)G?V!$@aVyE;K6H;)y?Dr6gSK95t|977xpqD$_uLI8VDm ztl8o4Zhga-evpbpUg?8yRPH36+s$q6dOAVa2dj(Mgv$zh<*V(!u1__1FC3)9Ol*|B z*?T99sZLx6ErzM5MX6xKp%C=k-1lK&ld;?J#cRTV|-eHn2gdcF3_xS3(zAp@azl`VbU zsVwiO@bVO(2veNL3y(&N%(+ij;NDzXX-n%ej_TZ1dutPKdLCyWNY|9g9qFJN9u)fB z1kP@aT<`CAH@}%~6cRdc|KNVaV;IjW&A#dH0b0;^HaPnZEp?Cdjt|FUO3d?i#EO1M zCBjlhPjCQS_8`?4X=!vd=$z(r91am_UsCv`hDAzr9o|doBn32ix)HjdzN}kWn~*q~ zEKtVMe$gx>1U12%kxM-fI}j7{-Y48AKd159NytgThD_VjUVPk~2|j!1gpp5Bbh+&6 zpxfm8>70JHD%4S4rTC$nM}RK%&n$qjF{Zo9s(R<07p2u-?gqWnX-h9@&6}@0I=3JY zCngR5gNZ}LW`3gruf$Zeb|;6lb}XKjeOXvOi4W5D%MUFUKx8y~HuVsip_YAD;GJ7w z7LicC#3`tt?@J03$g1V^_VuhYPN(@zwwwvp#9i_V8;mC_&Hk9Ku1#@o+lOn9S&)*2 z?*6Xo1{#xk86#=w^Opf7&1sl%Knvn7!$YxF!(&=6z8?-Os{VDn>%8L?OJYXf_jls2 zjXu`neDZ~4kr;-n*yYb3nh}M;Nu8e<_?8m%5zL|q=@`l(tvFt_B3W?{GAch8<9Kq6 zX@q_p_#~844iWSw>}2YLUD*a)Hg=q;L_FV77yjz?gyB)xlP zgl8j^#RF5|2>ALz%i@HLw|*}x;R&dX$TpG^>+4ph@ly@@hLCP<`wiMWu>{BjqhQ{> zB}(`(E}%&aC>GB<_7L7C9;3vR&q3&v3_XF~Lin($=^Z`%dkmdl&L%~OD{xl!gi30A z*~wU|B80!IzOA2yccqD^6C5uWSKXU`YiL`M8q`b`^tK9f#+rq$Pru4_#o=j!pKiRi zXCvK57EKE}T3_2_+rE<|?o`2S56?kPAJiJZCfCzhd}+BKhH``q#OPiw1KApyX90dI zNA1nhpxCdexo2mY2pt11n&|`LP71H=l4J0bKh-(L2+*4}rnT680(q|cP?ytwarm&# ze>q0b-=o%)j6S49qhohyQ~V{S9EQBGMLq|U)FPS;dgFh*A5!8^_(jo_M;gW;KjV81 z_p`Tb>PReT+X+mdS<}y$bk_$g!_^k3Wi}smkE{Dj*g%tMaH$}lQm1J%Es3U5GSG0< zd{u3cfy<}yw#Fz#_~}TD(oB%q6pNS-4h6z2L*YgOdj#FEfz`sdU%Z=L?33sG1Q`5j z2*Vs)VALr@LIzb`?+{-mLrh_As3Uw~q(I0?k24S9%=b$4k72I_9ICbV<+4>%T~s_0 zqnAQ%Ulq!b)Md*MnOgHukl0_f-sdDYY!ZPxeO|O&&nOP}mL}_;!Or+%zS8-8x!rCS z#a33$g_x8dC?bAng0zCo?L+2vOSiL1JH{Ka%~KZ%JTDpL^QwRQ07|iV&y3~V4~d$) zLnQb^Z;LWfCxWt#)15S1@uW&b0_PcFBm#;JOwMV!ZycS;v8L98DuKTF%0Du-WoyG4 zeNRALw9|StK7G7tB`wR`MFJL>$)4=*XM&Ph;}ln<;O4DoXfbishTViy?M1Ia>8A~b zu`VjC_6Ht8STkBO71_KfANdY@+Tp)fRox{w|0L6mIQ;cP!~TUOhLus>2uOAZbv^QW z!##q*E${<=RFRk-s^=TyESzb!*W3kg(2Mu!o;9DI500;`@g8;)*q)D=cm%F6*bVEv zfh;&yFFY}pFwtiK!#_jbe>nV}HcvKGiHRx5a%UwBs~v>djtCPWYUP5-fk#NAB<-lA zmr-p2w-38s$M`Y-(-Z5LD*-qmi@pQ2mN*ZxUEs}^m;$X|yvp2jPkoWOar`}Qcedcc zL-FO+6E2apT5!IT3E>yqs<#JE94saRxs8a4BsJk zT^}J(V^z1mOU(E#3=1YhQ$f~TxnJjO1=E`qS zEbPq09zFnG?EW-x@Zz2x~oW$F$qXh7wWvoaTqtE;8}53M<>r6qU%ff-Eng0xZQ zYF?;>%f0(f7*yOCbZG3(osL+AT7_)KBLkxMRdnz%p?!t;*}S)KNd=6bu+|=QV5%Ih z%z-!6Vb9WA9M$|elqFhw(~ah0N2wnZmTN{`q~g%|JvdR4H8rd3J>a9gP=2!RdcUE{ z#DJaD;b*-q=D`3_+85f_%F!816PG$4%ObEcl);-NjmKEH!^HP7_vMCm3WbK0I% zyf;O)cpDyx>{K4*#+amV&tVzBTQMzIAem3;hj>#Yb*rHS!cD;nUnPYI`f!^wC~lQc z&&@qOf{gZ8l=N@%WAg`O0oPzGC<%#=zdAs>x zcSEFr+a+AM8ptMZ)?RXHOf4mXZ5@M9L_8zuivPad7kXeRh@mz$KHO*#Yktq;jW7=) z+Jt(uKtaSvy0pYT!3S^l$jXXGjgAqYpTN7<$NF5gW6mvqdcEIYn+??SthzA6C_Te@ zU=7oaS$%`3YKsi_(OEPMleo5-j`sP8k6nq(w->CAwl^x`WW4Oj-cxzT1F)$X{Nx_z zUsz>k1AOR2si!P}#7tk&{xsY2G5-d3quFfNY{|g=VG9gjpy%Zr)qeG~d_5==N_jz% zbhgBA|52Wqo3*)qyZPCdB|6V&6I~acg^QNVRRiDeFC{|-LV4MhztFQWbK#fCrpcdw zJZ!*v{l39R$U;8G7*t^|03rysd01_4({bN7ZXuqQj-vG1!JSt3f7V&eLsZpW3_IcW z?a2+0yL`3MC*=3OCcdmhe815|!&Z^rC03RqCA!CjZS8#$w5c5;&6na8?%_2^?bDb1 z#NT#SDs@HU8JQ7m?n21GEB2R5@oU?%n);V-bP(wjyw)kHjE*@=Wm0WrVg?%0_c2Jg zTO^hkK1Z-|jo`!ze8rJ8WgXqy0?8X~^TCAb*g$sdG>D0Of_+ayr(kkKXwp&?Q*9x1 z3E7Nar>MQJyJO?N>|W9rsfH4P^wGZ(9_;%WZGyLtgezfg^K!7_Br0qi> zdlz%UrXYp}gWZB}ZDh02Y+c0z*$X}mhAF!oq1=>Sb7;=~$o+m@oqid~!Im}7J;T7P zuV&*_5DE8!8v))1pX6Sm7HlqoB{_o##vPAX!Wnm@DE0a?kmC%Q$Ux7U{iVQdO)S6S zZXg*`C^FtkDFrP-(d%W~Y9p7iFa+2aR3YDkNv>WX?_L6|u%3Yg6k0DOGk}Jv?LMb@ zKt$ozKK8GO<#}h9PQUPxOyHzoCajDx9dvUym_jq?JQfd}HD4HQFSVxfy9S=kj5c3j zM;R|>EeOgDIGK6~^b^irwiPEe8+wXK=?{k-!6vMXAFw0VEN81*?{KJxiLL(85ZyYA zz>lY7OdU%A5(86FTn>^$?nriP8{L^Vgp(gNpTRD~fz9Zlt&5;|{|do`&soX1`O|m! z#GYI_nKy@#gGgkqZ)Eq?3g}nvee^5DS^|F&q9C$5d6^`K0$U2hKIWdWAQ$v2;yC<@ zf?CRNOM{qqG}KHQ{E7?qyKaliszX_fyY%I7Ul`rZabh?bptKg2gDX&+73hC8l!L1#2)#*gSU zPpqzEl~O?tZ?b8#reVM*bgd-G$e2&HL)qJ%2iU&EHNx>wHcFM=OrvnImkFHzT)lV5nDfn!#0q-y-6`eo+jJJRiJK$~^w@vshmgG)Y($ zP;!3f0bO)fCbMlsPaYnd)h^bOPd-kNK`(uw_$3zCr!PCy_FctIWJjPs$ykp6AnN}| z(^dGx{m1_c-8GZLrp?rJone|2)5EwE(>;A+!`L)4b(rZo#&ma&$*JjXrhlLB@Avrq z2cNso`}2xtz1NiVACGY`5T+3Ke|V`Hmn}sbBnRS7gAU`Tv1e)Y9g{8t1T8t4NUN&i zo&VMY9H%;=mmm7e`fJ3lZ8&K0x~+S+x=9IbAFt4bc!zV+yRP1K@cQLHrgf~tg2!=T z9^JXFdA?~2i5Ulp-&oC8xjymhWf*32UvekP5(4Y_T2YX51(sJKH2Xpxq(Ypt$|CMybiF3K z5OiTl&-yr`GQV6GU-a{5CcH1JY^~JYgfj-CX$kq;ojbUL6p`g_>9;=uw6+RC5Cw*u zXgKI$0W;|L!TZ}}45cnE#QJ!mZ2aP}=uDY>54_p$+>S$aK>`tYqjay;MKt3MNFTzG ziV4#FrjJjs6fU&cO$AYD#IV~j4WR`P;BjiI!nXB##s8%;g}O2Y{?_;G>|S7L^dRc< z>LA#7x`Je@$;RkCgtRu<0m_ic_GoXyS*a@O=ZF6AYwWU8`5tER_f-&+w9yJyBy?6{F87?mu_e5{$tX(<0TIs45_K!;$XN~ zXfaK?Z52=ua^z=Rv69tq>8sJ^zGxyvHX_lj*|84jprvDMtW70JcWq$US6N5vUul#8 zzJ}{jKFQJpp_0xF;ZMUWOgG@@@2}gBl$e~F=NXU>tfBC_bvv8i3 zBHbR6{!%R0>zQk!dXE{vULlF@|w!s=0P5 zhkrL+YijwQ?OYV0gi|M`*|Dhl+|;Z^bdfG&BO}@k@s4|`?z|6?mjOT(J*tw4oA+)| z{iA|$6Z9FUV?~Eb_Z!1-PqwdRr;^!Fs*r&bzL?5E?eljLu47-rbX)}sRkk1V2-me} z^C}NKb>8UM08Voa$!YW^eBXYAeJAPH{t4pbSb8^VYQS50OP|D|jCPiQOiIyfx2*r^ z+N0#U>tDryr7Y^gw$^pMu*bw6OhKm_^^pMmOM*9vUvPMr)x^a2WxO0Hcq>Iil7GjM zd+rxLqs!6A$d6rP&~^w&?;p}4?-cJ`Mg&KOM#_V(VCi9-4@B~iCO{4{{^w%D8Pbh0 zXR#>>IF-Fn!O@5ZakpjHTDLG}lA~1hpVE$L{!_%VIWjr)!&A^1s!+BF5CJ9GSTMDV zBvA`zsKUcHg8xaj&Bi@B7ijw8E9x!#pALBn`1S-|L|U48-VRNk^kxVpg2J;Ho+@Nv zSX5VDwp`UK_9ei&l0gLrgQ*-9ZyJ%rXcrc0z7X5nPt~AlqMpM12dO3YTn`e#vGva& z1>F%8H=tSY-v6;g~s<*2P}PgNW3hT@W?lQ=K}qz1`+Y0vhUyNeZwXVmDrQ$gGAe4j~b7f;YhIX_phTQ26+ z!oMjei>HSjC#r1cs(hQqhrTJu>Fii*1n=_47?}DcBpv!)@^ypR;&Jy{VrZ#M<(I-A zy`)C!)3RxMVhUykf%>W=QFY(EEy}y{K~Sw*$UILJanBZscNzRvS`MFpnQ|@rR7^8@ zgk;ggXmxv8K1&rNW=1EZ&wR{Dt6La4H4W_*%uysn{^`9`M}{6|m=&VNLcwmckv)by z#Qi*k5IA_2gr+$KsUct>ws%0NZvL!UVy~cCC;ZX#_U^Dsc{Q9Xv}T6PfU4B3gdm=# z?B&nrhRHKZedRpGQ@|XWXw)R*iitkeE?kJ9Pk$5b1f#;FZ>gxj!-EoEU)5U<|95^H zit`z?uCg(U?kL(5mG5**)E9^rbQk_hF3hwtj4dE1TbSU3% zM69b-U~&{dt*oyljNWQ4++m4V`J^o0b&m7V9Bxe=e#L^je$5~LLc0&w$1OGcW0xDm z(Q4!jMm1i1J{9&OxD}Y;`zbrK!od{uZPk?t8W_K*t>3aii6T(0OgLRR@1JG1xA`il zy;Mu-CO=24N**J93@>Kr$3g!!_0V7ETw6t)`?F`r=%d*kQ{%Cw6-nQoYqyS#=vk}F z%0nzPQ31WDOcV0y>M8AbMyw`BNtG)}m15GwHB|=VxqH)3NcFXv+Nf=ht{w1EU;oPU z7Q19)zC{`9ADP*a(aMSyOw+G$sB11_{A!2$ZD}p@CO4PLS1Yvbg6F|_u-1>eTi-Qw zHAVflJGM*1H4q$=YU{55DmF;qND_-*`UKI9g*CKQ6C}vrMZ%aPuga$ilM?ockG!xR zb0Iw}KSfV0^oZk13Aw-Dw_$R9VFUM-d~uEEX9 z?DWA4p;&)p%^IhIDBu2Pssz{?tT5C6EKH-s6Di*K-Rx1ADrSuZyfvI z^1-z!mKiJ}q?Y_l7!(sqRM^ToJxKdZ?+mn_Galy}qJmZT;Xrxn)2^E?v%=X*({tcqb269)DV$@Hve9b6&Rv%b z^>G*odXbAKxFb&;=p_|FQ~EL5ClVk}NltuH|S_FuIg_^(8)H~j zP#VQigBh_xw<5jgw!?pX)`?`Z1Jyu#nn-LkByqlE$YpmY5M)##8071a@z^FM;kwAK zgw~>dZxduWh;Yvcj~$RqtCl zmFA;$svyMK<#vJXGJdx1wby!>srLz^SH*)1X0=cvUcoC)`mmG-k#6{Co#s$Vuc%Yv z`W`!6+HHAnrl&CX0;J5KE?@2x!rwhYPtYO(n3Er_qMBdad|v4?CG(WIIt<;7<R_t}_9)UN*|*4o5r zmx_30;rK4Cl@YW+jHIL~fl4?}b~ofbQ@&8R3ugu=F~nBl@@MX8`VZj+#lJ}Ij^5l; z(*a}V>u`0_NpeB0CjlG`wnQm%f93Ltjfx0YJ3kwgtNhzn5ZPSDJ#fZ_fy8W}aQwF* zJ*cs#!_=^9yRD(t%Z~Jmg$om-NqSsE3#_9?kk9i4Tn7z^6UvuiQr|354J16DuL%m3 zWEX26B&0iX9CxRm_sQ3Pm=HK&17o^CR}PUXg57@8ixK(2(51Z>qL zaA0Dffh;zDiMrAzKMPX1mKdg*%1NnxWp2iRoldrTuuboz!ASH|&f1k7G_0i$`qwx`omry02#nm2S9Fy7~Xa;_*nEX%mLRSEH<>5kuoo?%GV z)fYA!0Q>3QzIeL^`oth$YV^5uXv>O^(Q`yze?IQd4j9fglV?tEFI*Ltb-9Lu3-0|ufMGNLrCa*s>aDO3t|Jv4?pG5k0cJue3 zoJucU=f;*e?mscT`+9-`jbR>8(cT`P({6D8hRb)?0$USUGP@s@{cv&RPaLIKb=rN6 z(Sqj@N29}dbM*W(`1#c_A(2=@UzKtHGQ{6vUgAD!S7Ok?6c{BSq*)d>S6E@F{m#r> z?(nBP?>ZLF;?9ZE{Yfky+qT`c3YmW=cz$(Xx6n%c*FQ9&`sL=4#w2rUV0HhS`*T27 zEtr>JlkVRrK8_78pr)Z0Z}(`C9+(yW&NM2F`1@J^MOPJHBSxj}*%G&XDgkpyQ5Jq9 zGfbh}>NIp551UF$dYMKtgLX+QB~c5+9nMd@XisV%jvcE&kM5zO0=pavL!37Q{KC$6 z$}%KP4uvUUNN4b$wr4aYFsfG(SjC{{(}X2??H6yms#A&dAgfq&ppA@*kytg^=Vd^T zjN{qmL#4|!HS9=IeHs6%@C>YV07s=a;W2{$eH`(LT@9I}e5Lm$R0h5GD`BLWb?Lpn z?Bd);TwN~KAnSGGwHWuYeHmB}#CIwqr`*4qfwzht7f&a$u;0QUt*ypGp3ow=ct8&> zK^C}ZC1-(y(`!YUb6q!(jTQ3%mIm>v|0cc~Gw3NIYk8DqR(4E=1nAO_!>1L~Q%05B z5W9~_@)c!kbd$L{gO#w3V_UujxbBkU(xX(}Nc(6JQ+^mweTx2r{ET-cGunLt{Z7Uq zOleTKk<#G~peZ8-HQWsQElFi`NxIOS+cp|bPFAVv_-Kj|UE@rFF%)>x>lLA%7P|Q* zAZ`&HUNKF{Et<0eC&%lFhS{LrXBx9dhh)<3AC33;Np^#S%sd0Gw#GINKPogg{kZ@2 zH+tKMWI4dl5t+5oi{!ze?x%|(5iI6QLJ3X);ccqRiuWzh z%Xw?rT{5$rtbOs_FXz~37wmuUt|i-buCg%j05C2R=DXvg|BjC0u9!LM^#y(wn<@;X zXd+H*b$#MaExl)Q01o15a$U+tCC_d=56;AyHhD9XHyGB+eKB^z*eU9YvX2(;L;7#Kiji{j{3a9=P zyC<#pRd!@!3kKbG_sw%W30qJeZOs7tqVz8XGL0v;-|tHwH?9NC8WiRV>DUiA6oAgA z%bqsog!6V&E4*CY_1;hhZUyT6=M6f8ojJ+NFzI2P={wqL;t2NhFT3EkJUj^pze{NK z5%h_8^fy-fulqhNhY3EhTVSn+#W9sli1;R~^)-V52azyd{sh-UKwIf3jWDvu;B3M8Am2`JTHXzz9f&B*ZS%Um&TivV)ruB^o zIgB{@EUFF9Vzm|fnhkWh-#~;J^!W!kjV_aqI)a&L8ps6e#X(8-qo3}t=gf_r`8}*e zUhIP%4AacfqnhY1OuLuoQyVl~vTg2R=R zTOv~rO%9rf+;!QlyK}qg#kg9^ykp;$j%bP{$_d(D$`@D>Nj^C7!C;ZfZi2=;Ob2Yr z8!pceqi3=G%bIX=M)=QXoP%65v3r3vUW$-;gKt=J{N0A2jp!(;M^~eYuSQ)t0J7&%!dd*%C&%=z=)j8xAizB)~#GfV9;ckEh0JW^#cgOt@TGx`Z&iZYqL z`&}+W#P>(#f&mlq=X^`PItSoc@^{OvswfGNXluht-jgOp4S)ltYd^m|Id~>$bf(X~ z#W#0I2wte&Xna`KbaY-l-;wn}**iG5V)*U=!&6EyEUQ)lwfG#ks8^rF?nnYdI+9mo z!SXhKP&Ox&@?vX&bTh0AC_J}MQ@wtQ*;kkC-g-IChPelNh0GYu2*SabHvBu6qxZ?5 z2>;<_-5pL1lu*P9!2bPiD6rhz9oNJJ;DOF&>77r?MbL93MM*Mr%xNG98E!?hLUXpR z5xC$~v}+hgxAj@KX8nl0GBD? zZNi|Aszr?0KbZToX}D`TSo_|iuSFy>*%=?Dy7eG0@SC9@_w%?9Eq6y#g`0&KA{I@? zz}KnqwF|E0;*O2s0uM;zLMTl%(=i__)d^Zs?l4Ax3ZhcyAGeQP2DcAOyHeji)4kga zJIPz%`4EU3ZGz;4QASSXv+iOi%`Ey~$KxFm;yR`)AgMoSh25aQ6E*ke*1az3*63q@ zhXMu4t>r|!PDxRL)mRlGUlbmLJAx@Tg3?F(oaVcjp=P4yYnS1zj~*AX+O@7LM=cxI z_5e9Z`VllCu1pf7DS2d@1R)XNh`v`kcS#Jo-)M3E%gO=P3z0=EMAu)i$XIqVjBD9s zz4809e|!;lp?x7uwLW4{*RV?D(>in2b!lVxO(Kn?vvoPl+vCrA`noBo04oI~b@97O z4rD;{A*kVITO+3)p$7i=D-$G+Tf%A;z9Z()$p!SSFx~dZC;O`wg!QRJtjYuOIGdt& z0fT<1Vj6L=*8#p~g9K;_L$olSR2ST89#g|0wIl;4fITR%2fbavLx1Erm1T>+ON4O= z%CfAlrjKipH1yFfXj+h;Myk?+DV=pj#M5ab#Q!j$RH@$xZB>|OdcVw04Jt_kOtL~N zNgYh!J%O;X)SJ-JzS9WH+ZU-IZq(F4-pU4-JyZ!NXM`KsYLnp=aw{vlnMa@;1N{s> z0Or_5Z8+*Fmgb@c&Ogu&XSO?Lp$qaAlr*-ciPglZnEUsA!1od^z}^hccjnK}!K?4} z`A!ixa#Lhq{=%bai~NtHoe@;9BlSL8yCf5gHO;t2?ILm}rEeS5J#1>EW!Y!_>Jw%2 zm}+j+`4bSoAl_KNGXYh1W}xRE`iiy)%(#%2^fQlfVNeA&`A83bhXEH)IuREK4DVd~ zoNo4=*r)mnxipgyb248}Y{45QZn?i2?PNc!lUw4P|tn}>W80huZ5+61e!Q+4R zRFWo3JecK9U*zrB{*%J08E>)8h>^;&(I;3;o9;35da0aKAZ|Ga!3X9BiVGp{>F2yb zQf`M_H3H`>=U}4`C^svQg53Wq#lJUNDhF&r>6PBG#0b`zayk>}B`k=UX_I$#C2=JN zL#-GM49{$Hso&DgGlG(_R|PL9gJkwq=sO3>nio`0o7{qRE@a*+hTdpv$iMtX8A*#3 zfOkr6!L2b1oJbUSVV>)Crq5)=$+Nne$x<~&%gq|fCC|Mb-R%J^@-+OsNI1S7y4^5LJm1@?by_9M?} zxyg^gfkWHI*(qQx1?-001$xDKS~eP$Dfrk&x2Y5 z&Z~8cA}Mm1XTFFG+^Q+Ron=%vPkF3=QiL1*C}`2X`KJ!yrA7P(S{BT}?36 z5iU}xrDYRBf3GearwJ)=&~dPRKB^`08GD{?=?))EeX7%S)SX&rlK(l_PK;5^fGp0b z`5`Fj+L^?bXY;!M3GH5#^k_fvMh^pSI&JpqBIu13~a&t@I{vyC%EHTrl|9ecM+u@A)o$Gs~*0rY!D2Y4l zAr?9(B}y$(UR#rdrb{Ie=u}%zs!3Dx1nXTl!7>pcy2C>k5&$ea|VF! zxMa+fW#wDsyS&j68+z7!_D!v9(IIQO zi3wcLk9C`&IoVa|O%`_W-IGY#F@wJ*>mgYW_;KGtmkQ|2aZiKvE$I_0G9&J1W85M7 zWMbmGK}ZcihJ}&+;WV!^JSXRS-t;R*9mSUPM#4Jb*cRhK3`;Uo#Hj)o03%xMWI1I9 z0qtlJMwY>8_hm%Wi8JQlid$&9yaPW+Dt3nfj~5T7s6Wb+UZRGri%F!a@K8#kiqDX> zg#{6MU3pKZa0K!{S`Q9AV^&4+DyO+v_Ep^VB;AWB#Ux{qIld87jz4bs1%dB%U&(<` zC8{Az2wRS8y=&pSkD-KU!0cl+ymdlZ?cJ7A87TvZz*96COG#^jWKNQrz^JgN zWe7IVZ<0-W27?ir^{Uq%b zR0+|R70jbg?+}Fy$V7y>c70eI9{T9FfqA&Bx$hvN=6$251rxfBo%u_twcYmx) zfFdVL>flS~y!~5PXRR*Z8n#hOpdr0#O$6I#!?ti*b@~`xOwx^QG4>xo`@WroZHAu8f zoNcvMHzo44q@^7KEm>J``{}AD#{9hi#LZ@Cu*F{q1VFJHwfR1K(tu zRNCuK=6U`z=*EpUI|nr2){?)hN;^Mrm{ra_V15e&ZwN<-!RLA;dt;E`6XcI zEJC7-hWe?n|HRW4Y@%==sg3G0vT2v9>(FqG+eFoW0QkUTK@v3)fQOYcbp#F;^2MWOo*Pq36yuRa@Djkr?EhyeTUjGnn7tW{4q zWRVhdWs6Uj^dwgv3!TZUR9+3%BiT#iLr1kf_FE7{whD^CkY<$)Q@@G>pfp2b zM!@K_)|#> z$ek_ps6IrhZ=nNmbFVLAPkclt3BBKA3|`l(vRssSfDK#YXDNagM5FL_m+p0sR6o>H zI!gCO=bw=zK@txsnO)k;I1Hy?j@X8{*yQ1|V?C$LubzsvX5ypckt(pu-K-2l&gl_v zezW1X%VfO%_8jK~tFbT!Iol#1#?2L@hj7d7#mO{;-QRs`?HG{=Xc|LZO??+$s`73J2ZL*h6 zFBBDP9u>&qz#kSSO$trw%0de4&YjQyD>OPH3iq;KJvsc)`^&fYzQy-h3_&95(bGYu zM#j=`bA8eYJNta1pJ!%19<>*dIVSRhsNu;+joP6J^JTeJQx>ME(frDlWleB8Pn?5v(OhYHlCV#9v4Omz6~s=DO_9x01o!_nEOWE->=}+q_!()OQc! z0AhlA0-7{QfcS)AkyqRbCIAftv($l$AbGF6^J39D2+mY*kpH0nCpbm-|lEbE7M8#yTzJg0MMM zp2ek_z`e7steTTa5FUpM!t+JDwSJ`htyEpgLKt7fny*JX(NW?dWkRawp#TsniQ6MO zJlVxbY$}9!q-BCHNxVDB^Cr@xSu&m7!=6M{b-Tw$eq6{oZoBZhI}b{E?US`n;PLm8VxY5g3kSYBDpK^pU*p85e~rsgVKRRx zJ#UhS??Z}^FWPeCjItno7yHZQe6>y+Sn~`vkLL^W#c;&U;G1XM1)S`?73H5j*IszC zG?o3DKEvFlxygC}HqU?}pc$|`TA8qld+N&pvm|+5Sm=?viEfQd z0xn3q^k3A;NcG*|G(@i{R)aJ``DK%?Hb`$4WdVIh_F(Q2=m~mBLxo*RR9Y`3@-JKkq<0Ehsk`0376sqb@daBSn9Jf_*ni3-DMH$AY-0T{rL-QTh*Bu!S1 zGvAWK&S+)9O<~4l^m}!9l+~W=aTLvi;V93!_TIyAqCY?i*faH zqLq3p4+0ius?@AR3hhVg>jIilySczTE+$gD1E&?F3dm)TAKaQ(QY8WFq=`87mJUTe zRwJG1;@tUF8TvY$e5fjs3WiKzq13izbKqP44 zoJn-KfrWO%BQ0h5Vq(nap>H=730W7<`xhD`Lol@y%O+R`q=7l+>h_6l!}>L?2jbXF zqVS&LdOVLKZ}YZP0Li$#>(`@=)9ig_L2~t50Ng!t`?&m4LnA~+JHevfUq8{p%tJ*(CB88rv z-)kJp5@v-{R9=+z!q<%Pt|1m#xBg|b+9kR?4>iL!5UvoXO~KXG`MMKzQV6%7Js9($ zLW5M?LZNW~q?g^wS8K6LWn=zFX078``xQnfvh`$mD)0maF+l|R*ptAt`~PZ*FVjE` znW$UamU%TnADbRbFI9W?A1#CTzJPv!-h1odX!0;F1pCJ(-U4ikfF7@9Pd)nDz~1 zRd?M^%P3Pr!wDVszWdz5eML<0gX|FK;|#3%c-jyU;~-C|?sUi4Nc9IF=S-5YB>#hN zMDiif>OzaMK7ZC7+6=^b#U_WK8sLnK=Q(%S~92Z*>caSU(*j2W>!krt)5)=VljqjacHE1 zS=<^$vMCKfJHjo*!YNrezJe=$^%qk~t{$<&&aYWBBdFLUU3@C1nm`mEnn`t!iT;vXdn(1Q2LEPK1;zljj6C*O3dggtj_{yUpS zl>Wl!1?q(uK_X-BU~l(IMeHSa<+0fh(-Z*XU8)$X3+)hwI|o}1BIiU(hw#yzZxR-e zv}`vHVkqko9!jT1O?8iU3(J*n*`nIHZ|UX;kTy$8jl}{Yu{w;S!Wc`c2{*p*bKV+cDUOQ06=Su@zkU7cu|y#v+9qC)4QHquS(txE z?n=x`x$vfmc0W1Jn8-FYSpadoHT==p+P9^(y8iR@uN_w0dK*#Vt>F&sYI=g zX`-Z|N%%8b{tXARfPljfd5iS4gjo76b3yM`r%7u6ZUG-^na!fKv#; zsIcr4r-own5Hq??b$bPz4$w2uc2usp2DAXdoxwUv+0^)n;ar$o2qz5e(6tF%6+&B3 zBINrmPWUTUr#L(;(3o+qR;bRTdQ%r$+Du#g250yp!?t zC5cXJ-|Fi1e+b>4MKQc4CLoT8sAB5z#q`5~>(gHqy3Bz2@_|#9Q#y_RVZqS_WGfhsDJ9y27#@g@rsHEGs}L&KTyK} zNP+ch;r>wGzN;g%g*@HSLG#(pmxQ(AB1OC;+v!-s2Y+y2KN9|4pVrjq%6ftx_}Iu) zj1(IbKZEJucGzh)`P0;84-wb^Hs)XY5le0Wt~~feZY^lhqoI#YxbNCfOv=RYI zTVb!0-s62qE+&BfOC#(TM)hBCXA(^4)>%;CSLBQyVGv7S%R!118a(f1P-pvrG z{?LNf-d+6i&3m5J8NZfQ{`^lE?D22sQ-7P+)hV*Wv+bll3P4g6fbQ>7Ucb$HvN@?P z6JNu0f{W)AOA}Jw(F{KL;;!`w2v8`T?U)Wi^8(uuO3fwV(%;03@j9EbnetqDzTWeP z&9lq63_$pvCrngY?!@R>zJ<51Xkn+cSJ{LRwRmzKpUD!@n@O$qFJmt6-WaHXC>6oF zyYrD^;%Zgy;+zUsH9$mVg{?Qb15p|npNl#Upm762ED7=nnsJc$WS!}hw>=eHBtXp; zt5o4bX|(eChRCxDtcTL z#Y={TRfY#1vi&!pGcDOIL+EANb0>$a+(LPLAB1M+nOGN2mZr zTBYNKHDWX*8S<<0oShr~lBtdG#$ME=}PxJSR#f5GEp9MeyJ6UWITNM1%2S2?AV*#18#ark`bMw*SQWoTM z#)`!S9H-;^Jh_qw@A_LVu$aZO;OsRFcZ1g?(gxdP3rT22Yb$dRiSbW-*$n;;`39z z#rvrKT6NF^app}Gwzrr?FYSGx86W#_&)Qye9H?Bjo-?-&zB^ire_)O^P*4J@;8-F^ z!a;qMys=jRt#7W`U9m*Wnp=pm4Vm2&!X?$V)PB;gR)U^)+X0115ZuuL`n zN*(`c^|2q!XdCNye4{)r{5?#Ea;Kki{-0S$IY*RtQ@uAO+1zMmRW)d0jo?ctN37>? z=4PaO--ah+7DG9-0C$Ist0;y!{*{@wPVW!Vg|7=j@fX<=he3DO57!9;izZ<}7^Pdj zM^Je+kWt(#+yA}qnGbSzr|a(@pf=OIM+pBic;71-Q2J37_-?s@@AmU$_q!lZDYjqb zhAMh(2KWE?0AlwuPm@lcEpJg==W9S?v0zoj{_Ctn^(A|wxF;oyIy5zifh2^8Ey>2`b4NN>umj<(u)8r070P$OyL}@TI&D>LtvhLr4v& zGLY#~Jp%SbkRYOb6xZ{!;G8tXc)E%ION6Fw=B#0yAp>6o5xrnZ#6tYg`e)-BKd@N* zd)1(EIp|fYTv)ao)2ms%ie^)NOn}TulRpUFhj>gMCKzbU$=c>P$+-8Nzt`|#^Ep2T z6r&}%Y>RC1;08KP<}awrgfyw~2Zwm)=*V$cjKps1=`yshC1vdZI1J*pI4d{J;g81K zJ|>88*`I8%T9wqRQiDzX1DD3$Kpx^fyL>~3Kt0Ysea>O>saJ-?=1sUV%ZM1pz|>uL zcC0&=4H|r)zyaJyRgw987WmU(%#*UT?x=#q+e^b+aRNULNI|;DC#&Z^+eApt=F=ya zOEUf~dYTfFNf*6hTSYo{SCS;!eA4FkH>FPt0UUEvgdsO!;wuhr!mb792B-+qZ~(AK zOwIx*-({cgD`0q>(AC74C8UD9LC7Q(se?o0@KCwujWL!g{?|HaN~4onU{70z9euuo zgDPHz`V==zZS~%Cdj+XNP@|a2hR1YzT{BF$d4}R*mFA)kn-K-Rboe&x%q? zjNZUg605BOodC?BS(X^rAJ>b}8gDj6>wDAVBtP^;-=91!E`K?v8Z4j!{!=>Vk^v06 zjj=b}*BMWXfVgowrkj{ehRGwm;MP+hg|^6@{h9}j*GN~?gz9jOW#j(Y&karb5vm~< zdo3HU7B2$ap&C0N_cqO=WHv~dFb-x1e)|PqtxcQ#Jfk3< zb-JYq7I4t89mF$au-mbb^!loZLhdih?^C-n==AKD;D! z1SIkwTXf&H|6TQ1)cO`7byX&7XC*a=d0fv$p<}d}tEkrg&|XLStfnwK)0eyDtzL_R z?{=FIgVy8QQ?>y4G+La05?7yXNz!{4z|3zRC4A6g2P{63rY@i%k0G`^ioQ?4l`%~P z2d|5pgAwAnu>$qAlwUmew>W0(Fn3LASX7Org1o99Wn-<{@JCN^2L|=J$-hT8JErMd zlTp0hnJvt9B&rVjcl}^0wXMsLe~{BaY8OquRYH8<+-?@9q#D7$%|(g)rF-l@)Z>$l`>CMB&$zSYZ} zIZU*K+MZXQ$Mu$74{L@U&{yD1O&q*osjH-2KMqxeARxpT=T68ALv&vP9i)DGGtDOu zp32-dN=&Xl$!}C1)O>2b{`2ZRs?v`qwso01n${vJFfOS6^rHqzU(Tx@nGT()Hl|ZP z)Tt0(Bdb}neLgTGoJc#tju?Ao8&|^+Bq8WO;hFIYXLY1VBZC4(XzEYR#91A>&M%y| zmSuhVE!*`bCA$OVJM1gbI!0ykIHO7mJz#06YloOMF z>?~*F0pa&PIL0n#SG&0U8m!c_i4wu!aT$f!Wx3B= zROX-XCfwF23O$a=`vbmuSbcU`aBVO6!~HwtkI(e?-?u2vg(nT-4RI6qS*ms)1|+U2 zjD9+2T!8ZW1Q^|zJp#l9&0_9@!ne|?93*j;W?yg@w8|XMF+k!{Y64cPdfy{vafJ_m zPq>5fpQenw@QRqo3ZnP*KCr%6ZyMuH%e{qsPG4+r%8-z7*cP2U4ikV`RkvJwmR(m~ z{B+{L{wZbUp(X?e&x!xR3kVIx7TBMGShB09JIF0+B^q^!gZkc5&z_9xd}tmPo4Q>? zKv~BrtDi>(M75rH+~#sa>7&sv<(AkQt)+mp`^x?lKqzjX@vUEL`9b|v--cPmU`EWD zDX3FME1G^jIulmzrH#+Ehi5C-aTSiIH)b5w+=1W=s6j-YhGMiu2Zg8LJ{`@2cS`i2 zb8U|XO3-$m!Hr4HFQu)((oO2CZRp1bFL@?Ru7Rlk!OQT6SkdvCO+m>H-m{|oIFka% zLg@)yI_X}C5jK{^%lP2_#>wQ*9n=?RSbWpPV8+=Zoz+pO5=}Xsu=<(kDM4lfR*pr_ z^_k58`{Ju6-r9LGsY+SazzQ?l^5c{i^EZ2OH(H774H|7{Im3 z3nIcb0vVe2k1)mJtRHvfV2@yir!)aoeP0xZk67YIA{=xff6##eTc?wgBySHj3y-YQ zUaUQIkHWal%;#LBAOz9??RU#v>YG{HwU>f4A0nW?cO)}8=iZwhJuK|wOvuPbF&&D-+O5sc}KS#jh!F*_6~mw64_Jv z6VySSclwBtXpAECoZlwXE#%eAw-y(n7BZp+&6(50Y{{h<2a3+SnaG~6BgV;cCL|xy zo;DDq`DEXOe&g*F=fKkTT13Wf1$6fWeqOZli=CV_*wMLFx)rc9>bMH?p1!VLw{pzm zj?oEM&OWJJYh;s8o8%?__8%HHc(HCa{Vt_w>b$dWyj1nmg>6qtr_v%xpnLM)J{#a=&OS9*Zt1|4N(<@J&J>IsP_8t#*Xdn+}%LB-z_;4~J!Mou{ z);$05w|z(dt9WnZ->KQ}zMGTTNycyU;zRuX*KUoszHjp5qe>mMl-pNG!tBIhzXwHIw=XY+@y3};H%4!6LUQn@Cmz3_R#K(*3;)O^r(7k;> zTDzRz?)mdHi#s##im~RX?L5Y+XGK|H7z!iwxCKQleq5s1z#|vQa<6r z3nJmtE-;b4Y2JZT7cLl-5%COq*eL1k^d-lk`bZED#L;r=)ifV-*@kLsYX#9yw;Zq& zcizf09=`_FpQ=8lypld!cmZgsg;=Y#Ym;1IOw6J$09Qza?=O0)){HyL)4Urww$4{S zz@mxcA=;D562wY>6T$)!rW;(XScbvrxl!fXTKWwB93<6$0ic1ja(z>@3y7QPgS*Wd zNsoFZK`~WjR)55%5Ld4~Z+X{gu00tf1QaZIXsb!yGM|V&bw48-f6jLpEWCLHCW{8IeT$h{$(;s%!)2~+qC8V8Y=7}f zU+dk?K#u5LmGSQ;I9RPbD`UAcVw@@x@9via?KI8YGlI8CyWZHLKAwJfeY%yuuH1gT z{uWuGgCg=hSnkQx+;Lq$tEP<6Pm635q|ybAj{idTa$vRq%>Z^kgKMv{$-}$XspVp& z)fBCx{`Qk|)=!-$j(-0Sdtdq1R`)~;0g6L#iU)TsP&`<1hvE*U#ogWA-JReCio3fN zZE-2?TKuN({e1s|`};(mhvXz@?{#L+tXVU|GFMiKk1MYa%RN z?v^;|>X4`@43C_K;+~htV`GND;@E2Zd9n4q%7(xq@}{NBp)(yx04DtJO|u3I`vXx+ zYm#{ByC3ewPqR;vKWz;-+MHzmK&yf?eU5YPSM*H9K}^ez{zfD}#WJ#2KA${P>%z+3 zeisk5PgV7IkCekuEozvZ16n<7qfTpVI_?CZnfVwk8OwR%i}b7wLgo9~OkIZC(N8W1 z=;2jzH7i{fn0)ZZldWIQ6^^jmzaxo!tO~_(J4Wt6NOm)kKls2|aoB)pA6^~RA*Q#| z_Q&;II(9MQ46O5=!;_Efamwr3$Jb?(3B=O~LaO8N^Ih`ebS?ml;ArL-w((>`XSjTP ze*ey>Bi2BN)v#$(JFML!+V;#Iau{liavY}~BE$nqnDAG`4kLZB)gTIFSaEAG*!-@6 z+Hkq~Bec9DcP07&pV8%R!;R`-Ya`cx>fbilb-WvjXNQ z*sb~cc*>RWj*We?Mgh1l)0>6z!+wGDJ*({`oYXrHwxe1n z)cB}urP_(k{J=(Dc9Y+l^cnV!bn1vg`Yz=+azR);GJp+z;SP`^Iqmvbgv~Ao)e+Ve zcBevH!1C+S+aH>zT+Vgl8j7UQs|`9sTeM7{b8*9C2@ zdD(=d6qfGk+3+WiCRm?k43pq-m*QOE4}f$vE%U(Z+0lhdcwA#JQ1+QT(d*;Te)jc`tu}MYesq^=I`~)ksGsl;fH*) zn}V;j`8m5)+%XWRT4JP$<|86jhlj&NrAS}%S5j#?r0yvZ!^Qu`d2C6_31eOpurzkh zLq5T6W)_6>Q1bNGQ)jCN<5MI-1Zl>TF%xWK9nEI-93Qyx>nV1><1t&y@Pxgr;7}}Lybu%#ofFYCjyu_STiT72IZfK)ql+AuU zSOXTrOOMQ@7$QbE%e&a0(APk~}Av`yvi^YSyN z;fMho-yY$jRU==!DXPdX`qBE4Z1FS#jAf8X1Xl3+X2S?-TW}R+_BZD+3u^NztovRp zNpdWOm*=Py+Gz=j1YWZYf0!L3G=B^ks+?&!>3jig#At*E8qaZKi`VyxT3S@@VQ8cT)8f4h*e)4Idt4>}2Oq)otXAWT2=16Wp9 zB0P-c&0_3xa4>M|>pgERCu4?a;V{V@H})noGmQT%?QJ)9z^M164vwKc`Q5Gmo9Mc7 zzmX`;tB}Nn1a16*1*8swKqw-Soq9N8c2-Bv{VewUHdZ~lWZdeLNQSdjuq1(_6z?S* zQ?s3>I&Sq(jU%_6)fN`?8=t=OkhfJR<$a&FJ8hW$-JRlYVKD!rJoel7uvJX4X8Ido zG%;?QMJ`dXnaD0OdLn=gE0=p0W@;iPdiFUNH5N4&-5G9gP@i+?{#+p6$y-q|RZ#a= zTfB*3IKk8;00AL0#Qmi8mJciAs>{e=qtT){_I<9>-W?<7yN|IJ9In>l2Mh5b3LVYw zow!Yd?+Jb8_`r|aP4e3Bo$x}~ch|=lK;f|db;RxNh0}v;oTB-hadjWVeK1WfdVzhb z5J%kX5uB)Df?#?l+=U43nE{jF?7Q)XHp6h#jKUurt``y!DJ)LxQH;5oLohJ_aJ)@j zF}bPMG(me_G(ksty4uIo2g^9Y>pG{1T=oF+kqo{RFZ8{#P4*#F9|%baOt!<9k!?yA1Jti#p7@sItpgeSurjA(Es@t8wNW@p?0B z4yEC7+-$zwha3;{>&5B{-t2fjW&vU!_VWj$&H!KO4bx%QnQnUTFOgu$J&$9rj1|eZ z-z?|oD0s%nrou*Dk~O;EzJxr%%9tpBNG4#>MQNcA1s8N7w9I##|tNG+Pb9 zN_unPy~s|`EhYa{ohhB!S$VM{+b>R5>8GHnNJXH?koy;t+Tm6F3BCs*#5$i%f%B18 zRQ$G$*TZw!tD^3ik?{x%SLVw~Kl)H|Mmmr6^!{G;2fW^lR&HJA58=jp+nc#pxh#$Y z^<)D1)A;wWr5atnP3gzI7;7Z*CV4Eeblc$`{#G*|BBf({wQwfj+VVPx(^pLI$DKwX zo|Yny-QtD!pR2WZcp@z5sd=V(mn3N8JBu?(m}3E=rh+$5{N8!<0MVVyDxbTt_F&4g z3sS`Hp}=DvoPO;4q9-JS=$t@zrrikPzTfn&FZe@t2tM>vN>s|?Gn2RhL@Ri>1^rUr zh4YkY62rpo@SnKK|9y%e3Hcg;J#Z=hsmIb zM_^;bFOM1&I6xG6X=OC)?J|b9yx5@))PD0W6^`83JUUlj%@t!RqL^i+Jwg3gzb<;4c?O3b|j5}p#IaS zR8z=%wh>G=W2Eek>zw&CRyOD4TeB${LwA`g)juDTop7%urUS&n65S<6Mo_Nmzknj@ zwG)X5FPOz4jvMZt{^nJ6lxz1vVw0W1@yvN)D)F^wa z^_1rrN+WkT(d28i?_huCUh#PQ#zHi6H2i^yQ9VRTVlmoT3o6$M_MyNK!6D?_Id>CU z*S7{A3vYX2B=}_8+!c9X>E+7=d;rl#*z!h@8|lZ(90&Nbx~QV%m!e0Z^nH(^7PpaB zb|0E$y8uB8<`dO#%tjo~KwA=KGd}W1wsVqoV()3)4`c0e>!ij$W3`wgNtf@oa1Z7? z+TB0nczn8!CHN`Fj*Nsh^yvaaZkOcYmSy%77N5DLm27GM1;$RXpai0qYb z2Bq>22kQE-35E{Sx{7L@&Y{lBGsMJk_wDkvYbxa+(sV0;$5nRG8;*YB7zxfnm0(^p z`lF_v@%2GLe2T;uW#{y-qYQ~FcsYDuH{1^lT-rcB%HCAYWYa#m5q&)C=pVy@=@F7xVv=GgkK2= z0*L%Nolf1zk>=ee67DIrOlw75 z@QN_UfiL>F@KSgk45Fd150$b{Z%OeD92;JTm7(9h54w2raQ#fRO221p_UV%Ic?G;? zfsz^G7hKp4qRxG{%*#X>K zEd&c1m+Adfd(D2YVfc;6KM8U7o^i(umN#r(o^rQ1)YGqjab$@0Ak9x~`m!1>AgOm3 zE2Rrc-uqszh(sWs>BJPGdP>iBPiZzgy*%obJ$UqYkmmUvx>R%ZwM*i*Y|a!5kE3#| z$VZaAN}$XpuCzgAIE-C4L+xU@H8UT+9yAnVFww-yx<@D02OU&s38i^b=kWhqeR5?0)*V@h~FC`Q-eOsXgE{I{2&;T-};&-uA0k$WYJwHwrxy>-o zYPywVs30)n;*%k=dS5kfMS)v(7HtS~P^iUjqfq;|NhHQ@sp?Bbt#0Ead}3N&9u-kw zCc%hLQIN~ovOy>1wJsN{)(;6mElsP(lD^q8#aofcy2keU#yu34a>D)h4EOC3Y7)~a zHvz;vwuyn}VYtMOX@MY5R^zAL8{4_Ily))pF0IAA4yo(@mf~%PCST$JqQLlCct(Bw zHt&#?CevXC0!!&RqJ@fY;rR@Kpgwu~q&Vn*%L?~RWPX!m0kbM{cQ=C6qfMx0Z#*+v zmh6`EWzH(0ES^{OQAGruCNAAqD&?Y&k*B$lI04Q$kls$C&E6g3%J_1tl8c@%D$5*y zm2WVW=J*>erPt@)=|r@&MAE|h)J^aHdn)-ltnI{m+~EItzi(XUYL|jD;|!G0$?%=n zq+Q4DRX&G#weXg?0f*tbbjPLDD@{3ps6T%gzdRNuDU≺i(9@;2?N!b#Z*@k5Sf& z5_7YsTyU&J8Flj%EdwZ;`bCQS(=%N?*5U=thC(O*N?Dhymgw8s#^M@PFe+nC50zfG0J!8)&kkb)TQbvF^*>eqpwZpp-))*cROrHn#78`ZNc^F}`oIhx)Lvk3 zQ7jb(F??rK#k=gdmi;DOwcfDmhMNUNS#rgingeA#10cm~tg=Ej<)OdLzf_iI@C0ei z!eEF~$z_M0$SYE?q8y{4O0i(Z95ypXl)*2Ng{r#bfi?y1W)8_;a zH8*q7OD4q=f zT|2NTyd@Gn)`l6p~u?qdNO4D+5X+_=+@>%Qm%7`JOCf$JwCx574*MqGyoC zQ$k-r`(TsWB1yo*LeByCZ~wc*|JLGvxcL9$8F{ai|3#zbz*2qEbw9_=U}U|C0R^dQ zqhsw<$X?o9ufSVdle@ise6#{LM7P1~f+QeD*R5sk3B!_&DV1&Cv7YJh?{&w@qL4U+ zDN!ZVV@k+gE^NQa^ynyd^a3#f~8n*|5nMMU+cjb#M5-u_T2=Tfjw_kBP zox3h*&>XFRJW3t@EQ#DDa>W+!0bY>9?gc`BS4-y;CO_Iu3S+WU+6S9W#$={rjs0l> z1{QsxNIGQ?hBN|msAK0WL-DfotI?u+#~3^Wgp73P{o*`457)oU7B{+IhpUH(6yC>h z4*g}?{{`i&a(adfA>ZUU``!1&)_x8D{HOr_+s59jKj2Y zWe1PczWRO_jAve>$G7+Us;0wRhYKDpATbOEl#GZ$vgigYxjj}SqVdC`ZC9p#_a;4A zcb<9K?c%#@=4x4r`9v?C7JqsomDF)LD4r3pyS~nL4U<=V#}3c{#?UBxfGjj>HY@hc zvQiipX7?^HN4(FMG;*}VHB)2{J}>+-4UmLb5jUSDSL7nz#{MWQi|(w@{;VP>M%kpx zU;@85?%jEQ{E$hh%J(b9ysK$!ToTQxSP`+e-gn%U1qK^8VJszbjc~Xc3MTNVI1Iq$ zsiyOh^`q-^3x2|O##BA@Mf^4lC)?KWs@l>w$MXDSr z_s_89=|n&Xw^L;clcsJg2+upTfUp0;svo=B*k?mX^NAER$3-CJXXsmq z0_&g=P`-qB5*7rD;QMg11$@PDUtNtygg+Lq@cf`9(I;s-NVaOA%i zl>V+QARrdhg+s?G{@*43w-*1y#s8j>|KIF|nGCIR9Es48>-qW)kKL~$3=+YEu{5Ur z%-oFua|;cO*1165QxW@~)792-o7IlN6juE)jp^z=B3?`73sH-Qi=xh_GpLn&O*gt;Y@cFtY&Twg&JJcxYpgRkYYf)-Yh0@{ z2i@jezpw!>Qousc^KYfWqIQC2#mxK6<-Nl6Kfj&r@{iHe z)y84c_*SJ-M$K|n$u!-@$V#=!RPWC&cub$^$HsrRqC|jck>`!N47!brR#?x6eV*>e zvwh}Mm~=1)WDU z;`x5a?1a`h{u!&0Km7TyzFlWJY`(s>?K<+8-*Gp5d~(u-+e)1+nJn#Gau@T!bmF@5 zb1mMBX_&xy$F<%|o68?h4Sn;BClt%}`x7-%458Sk3f;1%N{xn3qR<2cS^%YF4=*6a zt^e*+Ks`;welwp*w<%hnTV|)&nfC14(_M~jmS7z8v$GcUDnakA6wAvuq5$B%5+Seq zqmCcC&Vq}PVVLBz4>VFY?MDla`fI-%^+Qnp;Ixrkj~Hi^Emm(p_Ju%BvQST6@5R&Q zRw#W&Ms`FIN@5Sko&7sJ=tH(Z8Hlj~b6tq)+DB)7lRLOWzMy z(L?jgiGbtj81Cfduc6?x5sUfc+nCpvd^ea zHS$=HVRPda1^khyR8}{`iH!ZvRR&Z}cdl;A_~Y@Xz%l}T`!1`093VuSi&P|}RYail z;2%69g_}Gl`FU-(&6+)qd1zEUUCvLY?DW{n9WORPyylPc&Gwh7AnLnTIo7V8MjIcG zf}57Wcr&cn%Y%Mns?|VAx#SBws-@N{NvEYBGX37j#7IfCpQ~LOy0Z88rgSNStzw}C z^_^C3fAA3Wr(U6f0|TH73)1d-?y6=n)$7(qn&o~!VNKQ#?!Vrw|W+x57<9~FyR^B z>4u-*67#lw$HsP8XOVvKr{n^jWt978k~Q`)b8g*WOaNuH8Zgn zv)!-v3!0Uriv#3-Z|^}2Sz)W+db8XEl(IP|(Mfq!JExW7yKe`m5XjYg3_&OaBY{RE zQdG&s%guIq@wjyQ%~Pa$(=|B~qF)VrW(S|jpxfu}E1$`~hEB#QFQlL>IFNim(S^9~ z+R)y(fj;Q9<9ZiOwaFCFrrA*lQVR3q8Di1tn9!&-m=wAkd8rX^=lKsf>^_SGw%fbk zY@dtEw?iDXgmyvr10r&Q=Wt*qx1Do|PvAOJE}i+MbTm{|-g; zX}J4UZRi$FQ??`hNOo5)Z(V53Kg&*;O}j&Nhj?JY8l)r@lLOghKMxhKJKcw76>Oes z?%F}h)<}OP@_(kKTt&aVw(hTGHXaX*3(854jO%v`FSUJ9$jZipTc;TxG=kb(YF?j3 zl4eOXR>3?$pEyDL=5_@VW0Wf$kz)ii#PH_Y;OHHYm186gpah7=}E z$J5_>XPzT%XhqV#OCj-Vjb61?d=_ofW~@e&5u!v?PD!88*WlbXXom)#_gf_%PgaDX zSv!dj@);cC=)@d4qju3ozqGY6cJ$&{HKTgpC^w86UI_sS|I#eLgj8pM%i{^#DqUVI z-gHE_S?tgV6u7|KY#G~y z*6!R&L5yjBZvN!bCPb=a=a9^wTXopYsrZqOLmaSD!Uy*+p+G z`0~O>qQmAPsYlaPIlSKWRT7oJeDmDsfvss2Y&cMS;9e938WGBUnRxv6A z`-6_$r)SMY*N=M!VI}cpm;XTE+?O4C1ukwI#35nTkS!Kj%Tk4`W`0V{R)QOfY~7l} z*#&(Hvht1{6JiDw9=o+|T5n;6XohQ=D~FOUxK_o~VtPKq4qI}(?dG+VYEw+6%MC7> zd``_;y~Z&GyiUQM>|{GZ#1TxTKkJmWsc8nP&k}Th{9&yJ?x*oHD>K$0COKRu@_tEt!#t zKUKqRRcXwiX+5sdocZrvAtu3sg$ci*ME#cm8Q;2xDE#)Njn|&Z1dnq0jGt$Y$N`o# zWBVbAr7%BH-4fTG)S53PVJ%$S)@}DU>UN{>% zu63}fZ4sOYCP8H$2jp$iX_YJOue=6qt(sO}F`hT=yovtH_fF#igs~vdp<(Q*L+CrPDJzLDLDfe0guu&8~w>Vstqv@hbdl zj;Yu|0vj}C{!DG~_muEDEQVxfNb#>8dgCT(RZ*q%s|%GfYFstxHzevPXmkz=hwc>~ z+xtGHI8qT>;}AM-H}ua#>}+B2x*|4eDp(L)mA_HI5Ut6P#vt<~ghSs;u|FkKCfjf7xrwzParptUMW{qhz)9 zGLgS`NwSvT{rQw$p$^`R4+yo$!tX_jW`+Xa7V6bN`*I#JKD* z9=q~|gK_>!@Qj106k1&mF=2X&u6OTEspjAr+oU=@T~;yQb{O17B?)p>l*f?fx5;Z= zLmH?yQ3J->j$h(Jttw&_vUugD=7!p8TF+}%KAb~Ml-b-TRJ!V&xXi86_ zMZN{_B80zZk{BQi(E0FjMH?+Z;$NBzp2iSK-C%4O+NHN`-s2nSXL>vcT{dm1l0+{V z{>bUgeYQG@fV03LmT^(=rJi=XT$< zJM~PS1;Srt{&BNe$t40D(kk;tHBWvWRgfA>aXIFxz`D>?5{!viE_p#`020#%_YLy}0LkGZ%`%%j2x9YW2Eo+odFxaGyq<-^pZ&G6Y&n4)& z&W8e1;M=>J!}UMW@ydIWs9SN8<6Ra2*rBZ!^=2kh+~U zF`3a&H8d4I!V(J^hr!826sYNhnJs(@21^pqDA^yjbB#X_{8?o{N8V|lValHpv z{C747b4unH{v}JacQdes`ldfu)!n~-2AXHBdTR`*6*!j2o?RqR)lXxfzibZ3M2DSS09`h-3;(i^(n>)|>Uhvg;r*i>9DfEwZ=~o{^HV zLi>d(gMO7;E?qG`6THIBzneI}sGM*ahp-s%Kk`;vrZ9S9O!X5|RPlZOhB9TGCm9AV zMN+>75R7wQqfd)?M2xnl&^XCAP4LD)8p@FUfl~kN2&iee>2TbHYPfZ#@}$5)0XzAe zc@7Ph=FI5I3LTOdLH~yshFo-MNOhuseca9d%$B(9JN26MB|46s+ejD~_EQ-NQFZ)V zL7a)8ha4)EQFaTKH_MdJJkq2~!){ysrDtfR1JYz!Y)qncAA|XqcCALunhNbGHyxv7Kw^Jk#ZPO~lZ*-$&Ry zl!)qW$t}+{hbGM>Hd-HwYl=DeqY4eHWaTn2>NRulqsI1Hh?D?7*0sqDHdIb3!{J@n z&tahoOQMh3i6O3a_|sg^19Xuw4s8!odhIrd@<)klXcz}FQSQ!Nms7W@eAD>Kt50X# zu_i%2!q2_Wxd9bZNRJD1g~05vLoFipPK!*^U<=RGAQ+ClvnQ9Ke zt#u&Vz{Y6pln&9f5!zpOev!AI$C-`+xA|C*5NJzY4tSJ*d@`uVJQDl|q1kwjme*#; z`o09179Rz~2AbTK041-ZGG%%r-^CUJ)W1p0mK>37or_W_t^E8m9&(eusdckZqtU-6 za^dQef$(%8v!26}NaeCe;56{F6>L}mBsZ5htkG{TO@ssD$&Ur6YeJ!u3l4G5qTV9w z_&bWH^*MaKy~NSmn}Z(LC)rcF&#`c3?w#M?I%a7R0XpRQM(k8^gq66Gz-=Os#dk0c zAX69bs-Zqn9IEW7KxnsUo{A(OJX>#2a>9+tVVAkat;VGZ3FS^iz+RZWjJ5K;lw`c` ziBfcXd_+W{!+fdYGdu$L{hnY;mM?ky_+-l6CDu{#Tjq8KANzNIA%m+iffwtO!3QL> zNbo{-%+;!58pn5|OI37{QZ4Ljltr6lF6=p_gn44@x~c=30R!R27@Oxo*F35KH|s<<8_utc8(&P zrO~84oMdLDLC?&BZo3FiPu&JCsQ4@k2vZvMJ+1nT+t^cfEL=#%S>TeMWdT)h)3KXh zN5=3GuixFzftK%AMu?luS3cjT?HhhR2Sw{{JS^@cUKNOoMIA0X{kkudgrc4?%5YE3 zalbIDI;9muoUWXr=6K=dL%#wgH28IkUO%ws3TescHcBAWY%ae<2?C-A;do%I7k>i7#$pKCar701)wvo?5eoShKB=RfeaauA)NtN#&@o|Ac-5gqV;?BU@)dRotyzcx zv+!4)%7^y+`tQUpkOx9Kz@u4r1;PH_G`T21MEYU+7Ww2qAw2`fk*nTPu_PL7YT>ca zC-(fTrR2Q4y$2`Y8tuV7xFdL}O-Br>UOXI@eCWKG7p5nzq|qcsg~nr_E2OB{d<{Z7 zdXU-r^woVWT2-0*IhtcaFMq{VS?+$TEunPTD@;xL z{ce@d7vnK4r^zE*m+v(RvsW@h2W2KDQEBi#$O-(lFdj7{-|Kc~ zmC3ZlhMLqr!-yh~D;G=^m&2|t$i4)*XIPhumK*|I%hNn4)kxtZNtsXeHaxE=2|Et; z!SqiTm(MxNtBATToptitT~5p~%Gfdw;l6v1^s1;jn*06x-_ZU3fl;**!<`bOX;#vW zXaypt#5tPwcH(~y) zJ5B5w}MuSHPCTXmr@ws zd+tQnyw~pBmptvV((^SrHI6paRUf}XHwx8B6`ZjsJ4GwXE8Y8BmYx1 z89^1=D8KkVIaVcvd&kk&y1y=z>&sR9cI02c>$8DyfR*x@^mW`7nr?2Zo=I8)c0w)$ z?p0ISLc^U;TkOq_e=I@DRxSV7CUkU6SIUgP&cF6=n>W-{Vft!pbAEfWgh%#EVaTy& zP3pYIMhFO%+RcDJf}MQe`h*_|r2BC$blpNcbN=bYU?vV48LmMEx>192>4iF6gO)KC zC~V^4vSzU-qXF}EFDYUl8$X9cit!W;D>##Te-J~i$>gs1=1v+#C0$C2orsC`4tY`9 zk#GI?`MafZA6-pNeCzD2P-acpk*k$+Ogs;kS&qTK5RHa5z1l?Tc#$(6nKMeUCuBKG zW-`AQ+k;b2*Hh|s4!*J}wRpW#yHTomTR_S~&_igpzPl&q#(cG!oXqu6Yb()ftNvcb zWL9%H-C<3wQ&b8jjWXy?bF`IEt@vqE&%;!H}rskDZJ!hOw= z!=T_hkYrscW>}2#4xNGn>L79I4Hh)~RR5X6hb$Jb|D~}Q*FXH|S9MbR{nUzC_n0g; zb=>2xIIwn3vwZ?F74`Luah|2iuknzNRCZbo*I8Q9cfUZ)O+rwvBXMY}(QSPWsDK}1 zqtonszs|9V#%2~BUo7>ovteLwI;5ZqxcxSoB0I~m=lai|Mg%lE6g{3HP7D6oW7F9c zCK4X2PHjzE4a^yYIKQn;SZK@>WB2UDsDWKSgA2b%8rG44`n+A z-R6Pdm#=@NM@hLpS1ukiS87!@!mr2 zUOCBwMmWeTXCM!0OfzbPeEkwE3dNzrI)moxDBB8AhoY1ND;R>9AQr?M-TaC?#?Mj1Oaf871 z0_ZNOW^lYyc7}vMjE>al@Zog*5Pf;XCUTE!ufP3sG#kl`6j9o@w{ni2Vs`r!oL@n+ z<$3zyjVPEoiUE5GCoDN#C#Bk$|w6( z1XPNY2r7fVU({p#48$!lbN|HgG5-FFYIH$fQl~@iV$<`87?X)iw63$Tqb7%m z;~z^`gT4LAeK}SD4t1hiOVQNZJL`JES8&2wF-20xPxN~}78%^+mO;-^63LayH!1#} z$_MXFp)4bQW%zd7{y6pJ^+lO7;Fm2w!IB|8LA^Ihyu78z zdl!w2r#6=}3J+2**~t8kj_$pH%+@+<%_(t;Bq+Hsk$lLtMttsjkSe<@@NTEch7|{J zYaElR&HOFy9p;0UOKvLmJHP(M&iSAxYfoodBz1c;VPjH$*JrbsHjtFlhVarz_N-F0 zANRB+ga-gs^Jk@@1vnx9BlU}OkO8;QDuDJq`JiA5%s=RfZjq~{nVY&u9FCQj&3>Ja zslf6EjYC%EfXBT}P@=9yI(8k@FLKeZu{}Q{nlY?J(Dpj{%h9?>c8Np`dQckvn$DYx zen|%Da8|hi?O=p!5~KeN_L*LsG6&#U_r&V)bU&{-iABe%B|g`MoxAAQo^2jQ$y3vR zVp+i5(FW}v5T9erN^CK^pS3jB?#;I=cZiPAAoq644Gp1v{1D4JR#ovio8?Axuw*W0<_ytHRo|?jhZIY2xF=H@iz8r?mI2!cdVxf$caZV9_sKD(E5FM;0u{7U#e0q;MIQ&I9%CP2|)XtnV^(G-+yrTvx)w zG#MW9{ytbwLtWQX_zHKFuB*j%+q>+CqqVTQEnAN-?ye@p*IcNhA_G9khWCl_lD=a; zHo6gu)4%KhXWEqEkL8z#bGD=GuqFCI9kN!xd@GMp8cTIgxtkxz*dXxKlOlBX7*nYH z-atM_bD|?cXM9Hl9yJloNX)SC0m4KEl~kM)wBkTRDB4NTewy^CI`S-@JmB~1*dqUa z*TXuHSHtlNCqaUvLKJ&_qf(dP#TMR@0{LkI7XZx)H_JIsT}%(PK$hX~!%SMV0sMz~zsthk@!ahmzO4iHE6V$Gv1!wUgO+@6_?_>4TP>x#Id?a zKirwZUhK7#AN2XQ#uJZXL$cvY9udlc&#Ccb@%6Z92Wt(Ldz+D+_s&v2|Rf zhMqO2&SYD)xy^dV){%Li@Y*aAP~K0^qrM+3&tWe0y_iUnbwyZem@X8j$bo0P(x-JA z_8avMTB0K>iiR8M)#40);gLmptF*(wKc8hAocp%_9-i^Z`{L66K90bUpwFe-bMoFq zKB5dkazAj$m#gIKd&!+I;dHp`D()9PhC{e~UY7=b8b2c@a=Mj~m*lbxi=)sV`~RX6 zy+d?-w__9H29Kcib(dif$0{lK$GeB%AX5_&WYMB!qTHoAQ(;%~F2pEgB*Q&nM|57> z`I$A!t=i%Riu)gp6aT`W#vFGd^p4Zix!m_aP!0-S^4mq)zLG~iV*q26@-=w!B>gf1 zJovZ&S+>MRHCS1hQGjWDjrW2lLeR4IdixiYTb?oN5(1&T=l@k_D#g8YTkCsIu+|v| z6ue*O15>IWl_zkE08%_+AwjLG=1<}>hJm(YtFjU(A+iu$C<1-4zn>n8fp3%lF9p}F z$gMuLFfr-MS8u>D75XZye`7}ZVDH~!TyT!Q)VA_??{Z5+O2iN$9e3mB`y#c_+3wc* zTj=&6D6yuf%?+OM5&c_|aQux!v2mZf;%W4XUF&S4ol{6DnKZl{m2wPN!7O`OI1~(> zUjyKlNIaxguHz9yd;Z()_oq}ot6YMnU=^69U^Yv(aWNmbOl{r84$rvsxPn^ym5_IT zC=2U~x(b5YsxCFwNbd@UO?yU~|0_&;1ugmkZd=ePkCO60oRgp8jzis&KaiCAe z4ZklrYuz{e2{54-^`Y_mc21_x9mlxEXV-DvrEcx5fZ48crM&L^!dMwn>2xY-4iq_7X61XQ`-bw@27|fUfm|!xyU+P#DWVgK@KF3 zw{my3%V#xu&^p|0c)X-tR2Vki@V6zy6{=~uqn~>g!CqKLN{>i_8*V47)mx?0uQyUe z_-`e+-#pLqYQGBi5N-R&4qidC7iF^Elt{Ju-J(C?n14FM72}Q8Zbc~MZp9`N6WUdL zA94~C?Qp<0kmJ_=Nid(3OtciS&)~sqDzo=}$41u^C@m1c2zsvV;?@#WM&5F(sV0PZ z7m4UH2gIDu;Q&3C3~F6W4wKDZSWqd?aq(QWM=HTv?tDW3koNM3PRz|cP|UzJuN+iH zbbi8AGBejMF08uEDT1kfh69Meg(3X@;{Zaa8b>Blap-ns|M9SN@^4?~^HBRE(=OdC zwZiTruowXgvTH);19ak{KB5JAACK!T>x(^{cUt+Enu(JCc~Mpqv^;OU{_M6gX)*1W zx5W1J4XHR5Op7$%^RUJ3K^~oyeR{y-ay&oliTh%ARHVW|=pqLQK=B~ zgIO@xp^@H#NuJy4?`bFrs)W8CAw7nqPNj_Vo~|hl1jPF%WK1xFR%#)|z^P8YcIUpt zgvu8D5WXAnsWEM5YW%L|eLuqc(eVbJg+7@%U~C+l=IMdJaL!(5>ctiEsv<=WVy_Qsq8dvu!gQn=y8lsF%aJYMMU|1{AQ1$Z5(ncNd%2T4=8o&= zANk&Hh`914k}v7ZQpGC*ZlMC7BOCwWr_}0|L3_snFTK4i)GY3l9~gXPYa}V@bke!I zN}pmKq6pXoV!-CloTck8Mh6V4U49$K19A;wtEk)>mH!(~36S zvMi_u@#U7dxJ#^C4fm3cOjQk^5@b=y^ zy&emaK|BXl%5sV3o0yeo?KNNv@t7`>czhXeJ057rlFnsfwJN7%K!#^rR*Bj|*Cz1h zx2)p-8V@LLpYM0y?F!IAOLj3Vocs?xq)bhDARxkj=rPejR#%Z8ZNBgJyVSkJO=pqJ zu3yE86V3;zik^)vvlc2RF#*};`+yL4CTM$Ft0Vmp5AtMhtzh#Ik9O5^pNU|ZBOj=A zs0vs0ZK{tN3v-@D?7;i)sC1)qLww=bzA5I~v9v7t;>fTBw^IiyH+K*al2c<@X22K> zpD3R8i31r)T^cG#$E!{MhHV+MgtGsQCdAwKmJ6gzv&x@vgJ%3vZ}adEEqHN-AyH}4 zWAPh|e2Oj4NkM0B1RsRnek22LQddiGN;XM?w<$pkrb5c^yK&iy+;Kfrzct?-@7Nx$ zNDZqC2FPM`9&XRR*1Pk8^S3X3Bf%+@YTaIE9+RWd7K^u>E(xMXAwD*T|6`|xv2!^&mVIGU~z z*-4Xu!-PYL96vE799O3vo^d!HGw%U!hiPe|LQh*}9y+B&pipB-76ot(Mqscy{6v8R z&&ba)=v_`zC)UAhC2gR#Xl+wsWCa{dDics}n$AVpIzPxIyvbh-M^eT76QL_)}@(|PZCTv-UFSE#fMjl%#tHscBj=ERm0{pUR+UbNJ0EE!@U&dqydyqkjK9m)B zP9X_sXM>7iN9Urh!P%YXfeG|%1=Wu^-Q9z`ySux)ySr;}65QP-xCI&wXY<|hpK-^%kLT%(@$m$7uibmCUA=16tXZ?< zjinXVXD>fp{Yw6IM$64xpbn<%dI_?mS;$Or77#K|?iIG)4{eO5Qnyxgq3vA03Y*^H=LK zfSbI}Q~#0yl&1&<#4j(n6dpnLssdeoCsKdt7dozgjZkCh2_x)=5f$0?a?HmQz6N3W z74gehkLje*#e=ZvRHnO5utKyM_TT8o3}xt${D0Y78vpaL{lg@~+H60EL#jX*u79UE zA%Xm80HUSVkm7i!q%gb53k8@)2r9O#RE7x&>P^sTw@Bw~HtOZ9h+s4id`HyzR45J* zmYUrxmFzY9o`9)fVw)FpPW8O5y?Avr7#b~h|s138EWdn@6E4`liE%5$HIGBMgL;zKAcwq!qX>jE(Yzc;g?tVQ$vnm8mpSG= z;SZP_ni_Bm2?cyx&#G=H%HM_l{CO4%tz44eld$jwkcNa>IocCR=J2I0pEsJfje5_bhse+wQj@x6ts@_qS>yypK%_P#~c9t}yQj(K73 z^IMnb{;3W^i%`oL^M6`d=C%bN&`st*-+d@w{*^|~ojc@f^(D05ot zJyUkWJxJySK;>j7mQ2_Q@e<^7Vlyvpmw!kP7%>HQN=8ql%HGF;Ei9lvo!Z#Pjn=7V zMh=c6HfWsWe&74Ztz(~HN_kY`@B52VpWgn}s;PkYjsC{AV_pI(3)s5P@h0zux{8Fm zYB%fgijhZTE21^qVxPLyxVZ=D)=C5Uk+Td#qVbB4dBl-JAiElckLIz-nwkFNoDf-U z&hUitih!C}i?ID!iM#^8o41j-S1BDk1?LVm@MH#Topt77sR!;E_=fN@OzqE6g%O}3 z;lP3w{W!Nhxd|l$!mrzW{w!Z*3uO6iM9K($XzavKT z)6sTxgq&>zpL4bcJie!CM9)Qcs!TQdyGg}C^WW_O2ip+zO((cUi#?Shl8#%MdR+wW zjrV%l(%W5XV#TQtDzugg?eJmLD?_>GAdW)dU+z8s`Z1}o# ze@mKaXlQs1dbWR9lDc=yDWXj$$!VegEM7V*%q>hpUURx4!_!e#XF2iZ9rQ{dbWOth zA|^g`;JiyIUf+1hcN>IuR_#PelsDkTqcr?I*!6H+Cr{V!?JjpqDbS4NaX;X8#dL;r zo7Eoyy-_8qc&hiMBa`z~>iTf?wK;TELZ%u+F3)D8XTY1RT;-ikhJY3_3*-#Sf_V4n zuTsQ*ej@c)|KbbIx=>Sx!uE!e(};pAqQXX@{zKblz2*1$ zhVS%8OhX|U(mTsTlg=!!2;t8@hc94y0;oG;JB2c#NZ^uSw zD(EjU3Nk@!(pBc0{;36X|V;&rP~2__O(Av!JRI=2Rol zp@^{My+-q-zfkb8p~geDr84E#HlU&G8K(R_ee(;Z3k>J|PrECIv*b-?sk25AzIBKa zw=)8p&TIw_E5BUP8q7Kt+bIlJj?nN*h>;;9a@&^}E68;jp+>W_jD^(E;7mtTHm4@h zB$-T0>)x!$ks2ffp7YdQ2OHk zX1}JkI;c=aOM1U%qZ++A6U+im(xK)$SI*g13XjjQA_5D)Q;_?s!rHc#h|+gZQCpIEF?~w zL$GhNS!Z9kzpJ(1&_LTBa(7Cn!Jku8Cj#fqr8V%UPAe=TRxAn!%DEz_q1ujm5l|2$J3;3XFfqy68q-tw9!kao#1E{y_d!uUd02B;2PwlsuxGGPDv;PA5M!vBD=-un?yuvM$8>U^tNQ|5&8ziW~KL<@k_ z1j(*5c=ps*s(9QUL$OyVxxR8$hg3I^$7n}UC~^xeqLX2foDY3|I8dq;pZRj*ZS7!rx>E0o= z*~~{0ol7R}Uh3C71%UEzRKHOaOpX@T3onL`uXAY2#V#V_w0)mj2Df!eCaojHP=S2j z0Gx7{1}ExJsyJA_(!}{X=dy-)(%E8R7EPHuoiF#hP?!AC3Em0m?WzpRmiFSV;$1&U z=MhRL1CJc{vu4H8Cbr z>{x@SpF4wm&zIYkuz!e$N8BimL?&gaYKNY}aQgJaDTiU-*hS~EsbH4mQVou<$|233 zT#(k~w2Z;c4+1G3l@}@y!4y{57s0f#m&#g-*p*Y8$B5(%Dr`ArINGXtZCJV9Zcnhqw|UBhqu4A9^|EU$q)EpNlJnH0 z_Xq5uj`MRIkPtT<%7pu3igS!rS4ooN90ff**He20fdl`DUPdGZ^T)UAZ-z&cAQ3ZT zdg~Pm56z!#hAA&Wf2v_Db8-)-V9a>AAr`8A2?x71k^y+$Rkt4`sghNcXO98}kc{GCkQ{zpe zoD#s?1Uss9S~sH;-7~1M3-F`(Rm{%CKA-`$HoqE2oSALP0x%u6{&3|=O@j4a?m0J7 z!Ux7Uax9J0*Q3R*_QTcuV?VH$tpxJ4_wn)W~7}O~!PQH~V zbZhsQwwB!xImNE0qG7i0!h7(CNmXcuy60i62IG=`8B4BItxVl{18n3v(cAX zQd;5MB)<4sF4;_HY5m+%2vb;X$BB6Om0YQV4R1|3|IbrV;hPy+*}ga>n^(TuH@kJ$ zoPQymBDD6o)e19r_XLpJ zYpHgou94d(Q!P5(@@rD!EN-$3ieQjM!eCw>F=C?G)0up6lBVH5@4rDztWlw!nVUD0zOCs}va{t2~ z;f~GR&~GeMD(?C+EUEADc`9Wjx^)Yc65pJp?*9Clu8GFzgkca3FdX28AM#Y-?i}lu zM2He5aAeANi{*35b@SFq8YOL!V9^KwG@%6gxJU{m@ zBffimq_-2WufiU7NW&*@a3)YgL^mX#EY<~DLgZy`m>p5u^im^NA#s;z7L<$>cIw^c zz6bE#hVu_HBpJuyE4>Vdbh#ZZM*(-BK`cq&CNa z6Ii6$&kxA0l(4cQq_k}~l0N3w;;-8MkMNx4ONS1~naGm^oL6{(e}6E;Yq(B*Y_-5X z|B?H`I}(*!^oXezX@I)3lXCG6LeyKefY3MXB$?)u5(L^(pBBtd2^$x znMZNvL&}#Legq)AQYqC*Uh$a=T4P== zR+x^y4G3ll1d)2~M-Qdf{ma?-9@(!Z!~Jrj2oZEQ)>0e;#MAS6BXJ>-sSK&}B#rAQ z&ZkM*rd~-n{iP{bWBMp&)Q%9u`IgjK}6_d5|qPOx_J7!h4hD{FgjTSA(!u=eF` zDbjzFzJpMDr7=aF$A5s*wfrgRna%d>>biI5y;1Mjs?}i-Ob{9VB2elfEFSzlA(N>$ zgo@Simd>_^T=a^a0DD?B;v!y^LN#^(A~N(?e9RL?qEi%UXC(HMk{k<}5>uJ(Mo)Fm z+6?}c;5qdm^<%ar{LUEBI48=l`}eYzIWd6>6ptTymJ0>#qLBMbVljnzSl>bJ)s_)C zE2XZnK~B9dljdv4{zm9P!7t_cVpjVt1;fnglMhKR^^-RlI;)3V0wVDUmIcA|1>Uq= zPHj$&6K_tX036?NQhNebj!SFRP#S9k?NKpepZSNQ413JdvY*X%7A$V0RdZ>_h|!bah9&LWJNW3?v&;> z6nArqajQ`*3r#|yw$c~2I(t0xw0LLh2Gb8=CO>|#b%-E4!yrc~;+*2#Iiqh_p-Ph) zy~8^cK<6uQ`IXk+*RMVqZ$I|fU>tRlhLON)C4DdDLErC>jdsoz(alURm%F^&>9@@u%lvw_V-Gbvnlz zg=|+163p(Ocn6$gwv1Cq7Dx9gKr-XQE!pije7ddF@AaQ}V^fJp@XT8~!6DaSoV3&3 zyIZFXnFQrmyyVWHU~XnmGJ}@p=9c>t1dWC;gHrZRQQXTP%|G!`QioI9M1LUv*jtf1 z!0;61Jtptne_F0R;$fT;qDh~V(1J9#oIn%Dh2qvDY(01I1poFB%c(_Rv3bW7i16Ri zt+)sNFnNmP_OtsNBo)00_)`CAM|c|N`fMAWlVDcyo{;}Gqpsvci(L^fk4TSuRa?2mhBUZ4L z9^~F`Ftl&{8x%!lK=$QHxOV6HT@@BF5|NWMUM*+j2MwjPMjbAe0j4iQawl~*nwfhJ zU9r=SG9aLx?ECWX-t(FS@RscNHh5yxA=C!+0i*|s_)4Vnqkk?Fjygjxnc)YSa;Pxd`H96gx-V^t9L$JS zh_|<4tZSD4X@Yp>4deB3r29_2VZ4FD5s8^q35j{wo+vb((>mevL6Y8cH+Z$Dho!_@ zVvcQG)?h@|zy$b-OMm1t-FL1zNOn@AJC0E`y>Yx)*v#HYzs=3g=Jcg?CLY34Sn2W1 zU1{@Z|j?qVgJb`NFU!oU~itgSL5 zhu|=wf(}F$-3OHVOyn18+&a5YF1hKl&|3DyG))V+T>MRM47q$Xq>|NVNHzLqQ(>z7 zHJMHY&fQoA7fO%>9WIN@b@FVK`v=ONj@{vzM%Bje?Q~go^JLo31)LqG@ci5R<#Q%{ zi>IAn;->Cu`I;yRxoWdnIyJW9#6&3wsFMOTG$`S1fSK&V_|5K5+0Ijfqd=snvrIb+ zjVz?HB*-F#TR0DAvJJ~$^IO|Vg*!P36uWJdQwUm^2tT0iOz=dE3I)gw3rHNn`R@k$ z0aa?`m1RxJAPZ%RavwNfbCf5`ov873pO; zBtaE(y9#?j{69d#c^^Wgu?5M1PoeOuU<8o`_`xZE66lc5tI_UW9M`EE9na|OGb$=g z@9FkPgY@=%{3{q5KmxoSPV;*!2|Aj;W}MlHCM6^!Ac7?D3ua9W$TdZl)_ewGL<2V` zES0yvp$D=UOKkU`2fd)7`JV(v{05!m4}{7bZhD)@nUv7z^>meMba#|%6n4mcaiWF; zrWcr3?1+c~ToNX{q(%h;7^aC)q+RC?EJ~f*iG?7^3@{a$CXnRjt4Ll7cY5WBU>_>* zcHXi!={y$sr{lTObH2}l)QO72N%r(Vs!@w3KVkTvbcK*-*~3Y$U%7prX`{X7}O!k_C3@{CDOxDA*9djUhq^>yYUILnm;p zDwG4>hXe%YPSr&6Pf*!adOsAl$A|I8Ri)?IZ&Y&hLFe>xA>3{4lPC$mGcx ze}r~YK`rR-l$px{%f28{|80XBy-YSzEE*HaAGjg`9*YWGIIwte5EUJ0VejkV4WfZs zfK3VmCP82i$+gadUWS4wCgb?NM6wY>$D+U!*5tOxLW7F}#`VRO3yK3zHUuQU5X>+9 zzUbuQz)b#+EFyyc4$$!p1*WClhvF&+su<{3n9C9drkiMB-=RPzpN$hsKm~$&w^t^k z3bglIBZX9UewT6rCJ(0wsz9Yi=c9mP--U8%^CXl1h!c}>NC8YP8!K`y8hAQQZu8$r z(eQwA{-jF5alj%Y!{=2X`iBA5wjdSGgHk%hi0K2W5hu!7O|7}U>D{K&1p3knl2C!fTdsI~56?I|Q}$MCyjHNP zE+vnYMMi@C(CqO1Z;z%j_o!r7Tz@{^9!Wc3fGs>qKH_N9+pcPK@wKUO``jd*M{ay6 zF^X<4qOb=I>GqxYp@m`kH=0hZw);JU9gik65e|N(;Y6RTzKUn1QYXww?ufYeh+hO{ z4q}vvILmE_!NEb2l7NEz0QV%w|Ns314A?RPKCsF7=2Zm$|6l&!0|EaS#t(C{0I&FhR(|7@(fX&PcFIFbj- zDZfyvMU73*Q~VFB7-)OYXhbH8RS9A9ddzL961YEkB=l`o@6W%zXDz#P%u(UJad4ot zJqJU)<~+LV;uf8Q{W{^MgLCWey2+rh#etbcB@VfzCDSFVAt~D?312zB<&z%*s_!kP zQRYhZUYtt#UMq0-$~q3n{?MT@x>2QrcxBkXli9K9k{qkkEn)!=;QtECtd~3MbsohZ zAt2Otyb)?Y0P|&U#nC!dvp$O^^x>WsSPv{d5Z=o9biAP(ZWgmHk}}WSkX+1A`4C$Z zr+-^#C|YQC@C!sZ=I{?W#hj#F3dv>_xFA74$t1&(Mu#-&4<|6xhVb(Zb=)#A?wY{i zN5Supyy3+0ugsRquduO1t%Q{haDsaO&(UVzDRZIC?0=tZ&XE!q?AKLfs@-IUw&xyB~rB$JqU@EH>3G0)fwgDS<_jq0kc&a6o9+zXwcpOB8 z-4j&irOCxzfZ!rJZyE}HqY^ z-0%&vtW+-dD=g~Auj%Q}@ zPYOhPbZO1yk~V{Ik5AVf6s``Tk>PohVn(WXr8g8u9k z{$+QY%^th!`KtB(r*CGH+Z*!Or!7cTYXuLyqhBs6)SBShMDrZZdp?~$bU(Q64#IcF z)aupNcvI~MN42`Hg`19Ne)?t#AX&pjsugCrt(wgi84eGb&-F~fQ!GnPS&=CEIp2I< zg1md$<@U9c;&W^)=F37nlcP_+W_|jW3ZyrG>!w$dmEK1SrVBX`D1nNl;EqpBd>0&zPEl#?QC(HTn z{iK%7JI(z1*!&{2WzV?H>%fB5@t*q`!*-z{z1@Dgxn1qofm)ApY+%~xdgs!(R^4)y zW+RGkE|axpGt;?dD+@y=$5^q;&c7sTsN~EFZ<2&1>t(ohYl!-+@S$-DrF=p6xk@p$ zxo*p&{`0n)96OLUrMv{0gZ5;4@2J&E$3&5kzr(AaPlVI!#O}fG ziH3v9i9_qP{BPgGCgd7);=We-79X$IXPYm!#g{8|6C^3pz_@$r(0zhy6U=kK6be2l zv&Wn$nb5M9)7<7+Z1?7(CafJ5n^G>WLlXzuHFO^Va(7Y9D}s&ahQ_Xkx>jDX89@DR z=99B!CZHLZUh{D+^SivFl6~Tq!_7QRBd%nvise!|KO65?_#cx62rAJDW2Wsl0AZTHQslquwX%2{jHd6|HLN)8bVC9TcwyiYrB zJXGJ!DDrr2sjg48L0_F76IoyQOu9_~9HG1^6tm!!dz?)5CGPb~i0rke5{ zNJ!$iB;0*NQ8FHY7)?IUl z1`FlpY+Mzbn|rvNW@z0BzA`9)XgM}m`Q?4NUmPoCif~rb6sOhISiqa^k?pS~KZ93o zb-1tL+9dNF{J*;Hkg+<9gBlGE2$Y&Fuy4mPQCCE~b`|K-aO|%6Tz0WoouB9%jUFoy zQ}9abAir61P*W(FJ8Pn+;6v9QTY4DQSgg?7N8|(?J74!k(1A|k3&j}n|AKwHZCY$L zY&f!9*BuIvXY(1VY%Oqn^T_7|h0)6>7`|GsSbUbwF%gZ)Hj5$VF@Yatt<&a>e58We z{H**diB4_N>3sg@-=2<49Yjihk}*_?es`;<@f8#93sNIyoG9toN;8{ zwHG!|`Wp)+#<$uGt7gf)ZYz?kFSj$eRoOACF7^|PwXSk&hj7T4&c%47+Zg`8TCA44 zhF;Exe6@@TmXNZqiQ9~?oJ*MD#(?%ij#s~+N$)q~@=a)S*3Q>lSGyr3Saez82i4P) zu%wD~?L}m60jZdD#*bXv>c?4(>3jB*(3ve(%!T<^nM}qd*`L~uwmpu{+8fh8DV}Kq zXeTB2(z(pmCeL)JONkQ-PHom&8UKDx;_XC-GhuZy&r#}={5-eAR(6Tua|>rn>9>E| zQ2rg(&nT>(yx>Pn27Fn|Bgs0&o)1CnNBb!gnK)u*3nrxURsR8tYN*l5as&A{0Qp$w z`llJ!9MEG(Ewb5q31BRrk?uHcUuZ}Oo zIhax`pLDHjpgCt+)nfaW7|RgC$>f3bp^<28E>eL<%Lf09U{Houh*VpP@2Ac&nlj## z?>1!HjQ$>j(y;A(PzbTb%lvA9N`cq&3f08IMy&(ps9$w~%7Uu8lo#6>t0kuZVr7NO zc86+rhx1QH7+jx@HhUoy)5nkeeb`XWufsAX2&1ks z`9h(w88{?Mx!xxVb63CUs@IHpJ4$GSgZRndlrc!CWOLYr#G(c@ExyyQt7$Ye5Yx+^ z{+vxJqDkK5a=8d$pRtkZ0Jl{%;yYRU&b93%+jlNwvLui+`4KWZRH-1$C?;iE)KJw7 zzKRb6sxG*Wn=Vow({$xxOR|UenXqf^-Q24fT4u(-oY!7NBCAzCZb3G&6^d8@m_oY^ zzh2eaQyQ_Fb9i;hrC{DDnq4^mfg`u?492bRo5w(cW;DSf2kNG0=ka`tbxnSoS$PK_ zB-$m!+lsU2^t)t;77T@yIav4UFT}6)&MnQrO>Vcm)Z4#^7z zFC*CThJQ3tH~T+9GUNL$b;SVqw`PC3r5g6eBl^0F?JVWXo^umNRhYE=;%X371T&k85yKS!Jw%F@A81Z20 zz>xBq!T>rPaT`~G`CjfsI`8w7)vVRynb}Yo8fgQ!*V~LUHJaRL!X8!@S(*Xe1QY%( z-NO)nXnrfzaKdjG1W1v8FG7VF%BLc%*P5Dej6SY4w}4JMkTcPF&CypIK$7`;i!2vo zVi&OZbN$MJp#oO=R>CzhX8BX@OfWdMO)2C8{bW$=I1Ju$ASg!kGNja>roqE>M|HKw zv+dpP#Fj82yuWB%zHgB#J9WKXGqL*aC`$3)aCUuI+&~uSV@X;P2*NC=d<_`-ZIuHG zktcvC#H4jFPnR?n__(nlR4$n26v|`{c=#;U@Lk@DL6j53vj~ z9R87za=?9HR30A02_e*KGfH=V4vgJ7ZyQQ*Ybr6#ZA$@g>PXd}URwyil{|u?Guajr zPKx!Jy-?id68-f*0tpVJ$&LuLFxdAK4>h(aR5Bt$M_^XK9mJSKkM<+RWNfpt0D76& zcEkN{?-moQ$wZ?t=@@KB^wl>^nrZB+BF(&i0QX}kz>)~g*zlrz&twkP#(5&+Ym0*+ zS^3N8|JxSucK4fId6^LFnRmFATQ9KT6t3!;7fa~~$;_7@1|9jobzSXNhH*L%c8Jad zzYEc(qN9W#i;Q(y7QidhLg3qaEt`^0(AjpGZkaX_brGioW(}l22n{iS{$mC@^&ZWS zOQ)9^3zmyobotL09WcV4rc$~T$&yezb{`z46+I_$)I(Vge>}G^Ow7gd5mBdnOq7oy z;P@HkUywnnrrz_qu2!u@g_)`Uy%DDGUh@kVxKeLGfky`zDus8JM3N&2s6>Hf|L-A_+andVvZC zi(T}tX=WW2NjF|xIvxrAyurXC5L396v|QK#pAdfy$2@|)w+H%pn#?BQn+%rKA95$* zZCuy5UsT4fINn4koQwtITmVH*%(XptysK>k!;0Q7`JQO~g~iQrGP05_^-}mP=Xb6l zY3E$7nsP+6p2WU?bWgF5bRu&pd`4umqBu(L z$LwyY(>F;*T!u@tLJ>vHaPt8_HPq0duHQj_SV}cG<({M|Kaflaoq3n0Xzst~;zYm(kf1he<=P~0j)Qa_g?Ohr><$c=t1POemrA|=2{8PU= znSPoc?!N#$C8F4uk@|Eb5qW`FDU(E9P-T4cDTlfxVgfu*J##)+rkYuBz1ITmZ7Hs9 z%f-JYxI*XUZU5KX-Xd#aj=Mtkp(eiiXZxM)q`FOlOS8bJ5CPqWFWUB-?4fA$ulDJ~Y&v;nbLy*e*? z!wS=)j3%qagy5C0A(P9Ht>D<506rx9fO4a@f2%%+9#!agbKhE>f4iLdekKQv^Xd9* z(C&yw@gh@RBui7k4&c^a;b-}SWPD%Nq`t~}j@(<*m#-kD6q?J&3@Y7|c#X^^S8m(0 z;z%6c8@?*8abHh+yX25O9%3gnk9#sB+x$bKnyxTry`8)CrK77tzbDYupIxiR{G4uF zCtDkHB^!0yfhZAuD1_hS=?aJ)p_W0t7@@@DGEq_KN9*?L>QF#gMJ%t*SwDah76IT9 z%}PfSZr}esH|K`|cpdf#sQeGH7fKXNc#akH7)Z)h@{ouZk|kb`H;WxkZ~oBVba4pE zkO+H!oqm9hofGtn)ax0=7b=xKr(<--ztv{sF8ldC?12jw^+q>zLSdZ_-pE>{t4)bc zRggQ2XVKc|kKHgoL!{qs>M;KSyV;%Hb{}aa&w*er1xR9c*@5W%$8K4oooV53aWQq< z8TP39jxGWl>=`>h^FT(<5{nqVRW*VE|0A1jNi^8s|o14MQT7^kWu!33v@ zEm6k8oO7#b`xuS)s^yA97UB55)Sg`h>hAJ3^Il3W&)`dr5mH>;OiE-M68z)i#q%&m zE;41qc)F}h;Bdz2V&*PEcFlB{Nj_~b*H8v^h!;aT31i7&#a0;i5LEx{5b1vsUH*!v z^vuLze?hAMQ*fyus)>!OBu(LKEAs&lVfr!?(a#rGsFc4H3Us<=m}??*urc8Eub;2Q zBW@WkN1|bp#g5GkH8m@oBO|M)I8Wr>+*YcFLh{Kje4W9JU)u>8=#?=7`N`mwW5ha4 zdSCyIkaU@leO|$N-t0uBn2!#u=@wU|{D8l~PW8U^+6WlbkFlC$$ngn9?>RoYTL$YUZhSX{+8I5nr#~d{30b@S^>Udsyk?zag{nMu`-posRPXxe%>jNW(&a3L>Ji zh3OBm^bvq7+CzR2SzVZy{n-9@yz_k7unAXx>dNP}Xkg6@uOFlvN`(Yp=l|-Nnd?_I zevvWd!wA9rnE3Xjm+75dbm;B69>GLn&A4X!xDh(imTM#pE@sl*ahz&l-jR1fTy&N{ zl+OCTG2COS{a^xze^vSR67PH+tK+DEKG1~|weu@Z44HY0iTET9K5^nh5JEhRg>1s; zoBeSAfo?{G*W{WE0sEl#zm{~Ab@OmqDE7DIiV`%qh~rHKd8CVm?q=?~1Lv=K(}AK&jUk`(QjXp%^v zCMTaj3gI&IHB$MD03GItDcTAkBe)u84}^yJDwUep>7HC`NXHDB>@Z_VJ5o#vIHh&+ znW1MV^Gs7|JfA3N;l7|jQP>l*TR|Z1e7*m!T~w?|#?XVDwIu8PL5KthYjJy`+=tqO z(la@1S1gi&LPSD;BC7y(FAD5ZvdKNkwxC1dJP*T&NON21bD+#Fp`K`oNGZE+`Z6Sl zvYNSsRtP;QQ3YE^owJ3?K&c+(_R|qH&ZLAD=27uKZx0O-X@%g{yyBtDe^0()x6Lay zs%hVX5pp}~Fcc(^pu#ES%iXpcZ*$*nxd@jMBq244bRs8gI~S`lpu;XQsG!0D0V9ja zpPly`s3EJ5fjDJ+50i~wA(619ba#ku$F8QXd2>uL+vdx4=3aLBe12dq4JF`gKKkS; zgY0#NK+IYKql%DEg_k}Xe=k8f;L68Vq?Y<5b2EiFs^6qkBW;LPRFDRh5LJEl`f zK@!GyCP7~k9Mr-blV+uKT>P_{e{d?UVfrjjoTeaSnE6MSlfN{UKh?2|R;)RY1ry3L z8Sevvp!dv~iBWOewd?raqD!sMcrPh7Em?7vKamoWC!$QB?_TYYrzlg_|9r5fxPBN* zrI66+2sfgVH}AfGOtW7+w>qe3UNksAmE*Iq>F~*Ot&Gg=qKXs)GQm*J4EtOk2U$0s z%w9)=F%faeiW8-@$iDM*H?)H%wW^8~$f*y^MA% zrz_Qw3;b(@$G*V<{0f+5>v*hQgMY3Q$M?%w8NxmOOry7jd~C&;APU9)rRLX6X+TAe zLtUM8{3{s~Xr%H-fvcosBF%6^kh6r-)_sb*`pA4mqfr#Ot*KdT@o;jnftD+cg1U6K zeH$u^(NepMRh0!1`x{Ya7IeCP($b$`WJ!E#)GkiS=R1U5TUz>uuC72~_%k&`YARyy zZQ?dr_P~DfY7(1#%YA5nASmo~G0kuvm6kq_-*|Lsz&7!(ey3rCLkV^1#~-R>@qpQr-hcL4OlvObZV>*{026jHQOFy$;b=&hs7yODB2n-=^TXL7wB%o zQTS|mxsRyoc~96iOK;Chz5yfBCC!Yzcf+UBwHInS7!ISgx^aeo2h({iV*svUl`A2b zRL%J!?8*RD9XfW|(*C2wOZTJyZ7kgUzIqtKOJ}PKSWq*ZpTRE7S%4Ic<2@kGlLYy9 z;Fmn^#cSBY?fxO^j|>wQaE{Aek7FLD3>3Zu%}9WKrZnSJL;r{R2H3P}k>k-9wofR} z<5haSV;}HMUkDf%y9EWEE&_%Qqg_MOHazc_A2OSi54y#3m@YnDitGjrenhM$Z^SIK znkx-^Wc8V4lN!G6y7WH9CxngJa-^dKL>2v`5e--D8XJGiuymI1d?*I`?6@!|X~yrX zz@Q1MX&|%eWdPk-Soc25LLYzcx9OVcVaOU{nTh*90nj zIdbM#^1Y){k`C47X=EoDQb>M-oFW2N%Y?2YygjI^6v7Yv6Gs+Od7evSGBr;O-=IWf z@@8dnZzs^k(VYZ%L25Fzn&NS)0uLW-GQi}*AchQaf*V_mG#?<3;0LrW$Ux$n8P~sPMx*wTFuWOYey1KEbg?|;h|He z^3a5&3h2j@;*Yd>jnmV}A+R$LCzZ9tiQDSW(k%1)u!Nb#Jla0=wv-mqIx(Q*AY$%qWa;49WBkFC>*xUQQGIsTw z9dDhkLm35T*>f3W_Is{ZkBo*L=jCHbCo9o*pT5Jet9`@U9f8IDzXdmG@ zQ zaTwAI3zrWh5J|XIml>Ei^N$)x}%UVA3GBRu%+yf$9KQU~XAFp&< zie+vWIanAsVrv*48P3U_Dh~(s5TxM7?z-nMu){9~P%A4liQ)}p#Q815O9_QY2;O1clvr%zH6AX5uE~)W;rHWyB8mRodK`@m4$G*bWS%^$ZtRHVhWmfI zYYtCX{u!7o-*ezV7!r9mn@H`~7jgRHyN$!NeBWKIOnV;36R%RrAufBG#p&A70naMdhZZjR<6S8Am4yE;=PhtkOcO>;&k5`83Rg=v%GvoK;Z6*}yX*#JXg}U5p zGbCx^J_%PIPh+8HP`~HoV{^HYQvwDiXoIx2hpQb!T@LjJEec$+D6ksqSnUVDndODX z;29h4eibE>wrRNNzWv%ccH^$?L{0IZt(hVF7I_37oB9q2J3i>b#^p#Wx)Q4vI>{7D zt@dttO%03k#MbYK<*`8ad#1^$`hPQ6M*U2%lU4vjJA%b=m-XqhY`V=+R@GxQ@vyP$ zKoM8gmYK?Tltne^{yJnw49qyvoA|f2LZuN0!eY^?(-*<##@^= zUY>ZSeVfXl^>)WrWHA31$e7}sjgE{@KyG0Ws<=~6_&o+V>idXCC?w{kFLdRGyxTPd z<{d$JyS{9(+bETKod~RLw1)|twC-BENv<===3eM}o3Z0^y(ZXne&Bkm|0{AMTf75ru~cgegD6)iM1jvFg@kdb%aKdF?yUMS6VphyhhJI{OKY;Z(k^{( zEx%Uzx$=4A5J8DZN$S`*ibYNlIh}_MT8c z{{9!8fMjp*9U2spCVq_|OlsIjnh@+C`8Of@RN{3G zo4?ALq+uA2%jNS9EbsgO#f@h}s%dF1D7pW%qoVNEz)P*Ot-YxGO7vrF*G(ey3_P>7 zgDwsNWXG7#ZWX=?>FCo`OK*TW6c9^w~*N;E8>RnzO_WItuUS%&}BY=Qj zFJ;O-L<8LYc05ke*(j9MTX#fU>~Ten(kobmlA$RV(s;k>xHsahVkJ%p20o_k(QXQ! zDu%dauz8nEoXOZG+#LKSWmCfTM4)9Bz0F!n)8Q7bO0_?m#@+J`dSPUAbQ#iwuH5wB1JE(e?5ldG!5_)BBPE_ zUh#7q2=&g$7iy)-miTmcq&AV4@(Jff@K2aq{b@rBBYQtC7ilH|5&QDE(ZeSonoDQ4 ztcmu1`UCB%6;LJc>XF(U!*~nyMH>$AlKp|G-1AqWsr%suJJAr1K;_ z7Kj`MYSdPP-DE))_a4_T3{d}zy|;?WI_$cBB}7uXyO9p*2I+3N>F(~9?z)j~kdp51 zZjf&2?h+8#f1daK4)(!5-s2nNIdkYxko$_Y)-~sEvPYpzMG2Ym4Jz$WuUvl4XtHYv zUPMMQZ*(5IPwA0;?EG$%D_VnN99WgH=5Ad28)ZoS$k*K?DyRj!fNqX z;rjG|UNrP7hE17SyksuOwa~Hnz*%N*z5v7jv>4MT{=Z z%UPA<6aC>J%Q8ruSn#r=Z@=`@J$gjuWKP?j0K?s;s$$hGwUAC7)WinP=Eej=Ph_yO ztk--oh(0;R=HB0 zUEJW0KodzDSpQROS$dH2N-4 z3A(Ed790JvO-HWZWH=_RDU_u8*PNdxk|p~2s{QI*9L00rX z?9HEV1Te{fz9_0?CmFvyFxAk1n^X~d%d_xdf;t;P@ZN}fNT_2DGO1?Qk`Qk&BuCGD zJc=2WNF;i8+)1n={ZiiA`n>De&DnH)CFKm+30wuMidw=QgG7i8&}!essH#z5Axb#m z+9<4&XkHL^1K73We5Qd-&m@J(qIf*j6iyYKF0A{vWyxTzF-6~djH<_Ak=XMOqfG9T zX|&0AXDUV`9RiR~gwP!>{Ucw$7>;QUc7SwQ>sGfvBo__a zx-x~{C*9RW5H|3}9Mur$`Ov4p$OtacrIMO&HiW{bCAkOH_ZDCtXGpRRdZg92KTk0E zS^fToGtjGQunByxd%;}Q~Ef>zN;lO<{77X_I4h!ZnWF?R;r|a#cAcipr>V^>Tx!D0iT* zpZ%JTW~6tMI5%pf$c{MrIoT{>qxzxqqR}UI8C=?O z_-q^Hk|kJg%mx2_2J^DF%Hk%Wt4GJY4SExkh2Dsi{WPM<{dC?zc=dZ#7q&|U{$|Cp zjKu38NWo@%N=31x>AP}-^s|Q%FJR??^Cbk{R)Mu$yuluLX`r z7#RWN0lC~j8$1~Ir-*QNDb78BC_u(NC(5yvvRQG|tDr%ERyDpm<{#}S-ooaIq5D@0 zps0(Zlq_b1-c_|n0~=ZztXtJ}lh^Tir}pLkfJAZ2?kvRfvaKn04>=&80w^N6JOhMY z3S4n*=WEkE@9E-5ylq5>o31GdNi4sGASEVNe4kf#gG_Qv75wYb+)!yWz-xzVWA`aw zltqkle#t12ttbEGu1T<33WbqUG3C3EFD~@4oeDAB8L+VOK5R!6j(al~oj~@ZWk79z zk2rIp)tD=f=ogr6l{jZN3c7k_fJX=w8~NChwpbMh>^z z4ExL|7G*?nz6(e?EDX`=c06!Q?Q~GdXW5B6Y z)in{8x0iLqK10us;Hknfjr5$ieFZl;J>q9}D)+M(lXP|~ll2>L;?Cb7B5_2GukgD^ z08PziDWDUUmo3-c9#HpjW|Sv-;%qa#SBHbV>Mq$R;gJ8iQ8j-;|H}DzjEfEpe|i!gV382rp@2Z{OZFg#&6I^wiL zr+EhQZdr$3@$K$ul>-6;YHhaT2k43(VQy#mhdja@J!BGANz)oLY=2tUM_O$4daPak zijTiw=NurX3f+{@N?x%_knvl+erE|ZIu!8Z<`D=GC)ItVt-yYLv{KiEwGN0Z-yu)| zd?jd#b@A)36%V0HnjJNOB8~igc8IcSUECm4$o`4*?Fy7zn^kYMUhA5H6o$4MwsfH? z{~jhbSElg{<}z6e4$u(Fc;Q6wffaVU3qF~cDD9T56gq61;{m$rl0jPW23Pwx=MMQ6jKJ5n?l7CX|0 zM8TKWCfU^mhKf)&Z$wqI%_3e}V-r&*KzmS|Tv@P%r%fbfXcCZ4nD_6E5SS1(y(nSR z$2N5n#=|*ehnYqm(EjN7=q-zudZeKt}?XC>cKo3@zKJ;Qb%|Y74uFM~WTr zB(ouUj&JK=m2T4(8iH!Jq_ymzP~kiVLL%NYZN4iIXx)-!_dSXhy>$9_;KEG3&&vzb z+vWgHiNF6Ndz~el(ZAln0>N5QHiQQA2}7prvA4hU2_N|VUv(JGO(~oE-WVXTyvamS za~tJ+#8Os(Dy@6>h%I;cJ0;_G9+=>Doyp${6McDVep3vn&Cp8ft*B7%qx9c!YZT&~ zNS2Wp4p9L#%6`%%xwW9C;YUZ(LtottZ_Fb{v;HB{*enk$ zG5BSp#?|?W@qJn&Z!9_#B82ho4<(-mwseQwq5?oq7Clk6Vm@-42ywH?&!q?d;WN@f zs?ow-2ek74kE0b^-I0+1tM*ajbTUh3U=}xV(=0splwoY zzW6~sQr>Qq`+LF+YdhgHA55Y{7896xcAfb$wjJCJpSIppAPr# zkgBE_$HA_!@2cj}bL5h$GjPJ&Qk9x?4}NmHkH z87NFN5jk12$0i{9RKND;3oL|0r%eEN{byy`0kryXG{?~Y!vgSZ$=IhP8`L2TxP<04 z;MT-FJB;RNuw33|?F~q-s4@=fD_;Oi3$^{{$XzcsIxJ8(RzZGK#P5yzZ=Nve5&dpo z@MaK(-M2@bb(fcheF%y@MJa;!dMl0O0?EUPUIO+9fJIC#yJYm;?QD~iqsWS2P~AS= z$g=c%PU%`TQ9cTfdjb%uLQ=EypuM5&lZULI{OFr6+noL!87%s`0VwEA>ny=JptkEKVn5C?$aw?_RByji_xDWIL+^alKX@h*`w-554;?d>N(t6> z%HG8;sSS?8O$ir)o3DFf8HwK`qjEz|uB!y@NnqzN%7$}OrlP9aEdgtsmkYbT|2~|@ zs49>@*ltbC>p^%OpXbmV|V`Qc^A)W@qC?Q|H}g$uiGiB z#&_m304&&Ya-UIMT#iO_Zh6Z8-too|B0n)g=f}Q3*27^_1{|-y#`$}rI+mKHLy?T* z_3#J%9ApoNl57Im^`=@_@ftz@j*Q1sgkhON4j6sz;N3;XhjOI`f1WGT{$!$q?fl6I zyQsK$HRY*vwiqSOA@0%UR@2&d@EfhpG!R$NVo}@x#ZZs)H48KgFdm066U~RKQImff z!;JY=bCCzkVlQAt(EcAcqGk$RhS9I0!gN~8s{yHTg$yZ-0nuFL^AnVy1YE)5iA-bm z758)0X_2>hV$(3Oibc2Z6l~5gz9Hzo6CajiuBD(+puGDj|LPGR5Lbxt(biQ>wki3WBJ7SlyE58Sn|H z8T&@VuJizi`@Rwi535({&7=LRhctBC0L$~BKD_=L*l&*X`Afj55nVIUcAlw)y&NMYFQ2+t!ePb(R^O=s6BF8hw zB)`ITQ)DXt&o+^=ipArFAQdWFfp$mi5hcso z$%i+hcAEHxW;T|5X7+i(6>(8CL~fo|ZM-S_Ir1o&`wP?wD^_I#fRD0H;xbJ*ErgLw zrZaB;(P+5#a#!e|VVX)VMc-gK-@C?a@!j&%1vwKR!Vt~DD$`(e-AEBvagkx=^XGPu z#Ti~%^>rd(wii@&p;vl;z};ixKdnm$4JJ*je#ERi!==kOf8A*NJ|(cY{Nnu(oZm7P zayLLK6a9Kts4+pRQw1@J%U4YdJHf~?$sx5se{wy4$&+b%gFijku&S0-9^zSnCwUv} zinVBNGsQ?_2pD(_GWwtAziSIDa?taIo#0kpa1%3t+Q%z%yzbgZ8%sa+hIBXeW_w@x z|4nojS!Ol8)-jI|!VK_oTsw?Hp@DdBu~41JVCNM6xJ{(21d7R=A~fND4XpoWi?9gl z{E)U}PP=wrJdtkPknB^smm-ukKE7f+n1NfjPl~YfWwu^Q^u$YsQx6nO*ejFFbYjVN z0CLwC=`WuL%F)iHv^9%`%*O@H5T@lYVV~PRNYW~x(iZ@PhY#Uwb>HJes@>7FU3M;J zD(Yei#W~HXZVaP49|Z9NCZ0Lkrb&Bl#}i>~kim0x%(^XJ6}o&#Z${;m$7x3?m4$?> zO+D<^nn-_7J#P}=Jg~~ouvRD58*qZwf?YqP?dei8EgwG^z+$imw0_FCR1o)pXu8$n zz#_u_%SoCEh5ef$orB%nU_urx8|!M8Wz%6+Zp>T|(se%lE*0t!o@3!u49qs@N#L{o z!Fp@_r<}R)IUDJ+>Y6o&Q7#oVWyY%_;`-OWpxHb*>x{dk1WqG@LDFl45KL<5+YoTba*NphSwiL_aYKkQV-0bH!aaEbHX>rbgOehy#KDF6l#-cwcliC?Jy`Ny z3MF3_!42o<6hl(U4gaS{nlivdsk%CeVy6dRHC4R{U`tg`&|Y^Y*s>YdK4X zwmqg7%C6eC-2ISvKPKt_lvxZp)=uYH%>oZ}Uun{U%%4J1^y)?tI}wP)f%Ixcub+K1 zgLSuN&ndzIG6-{kWod~W#yDNTWs}g&_eVNthE`61n16$|>|qVM7Z>@P{ex+Y}zQLD)JEUd-8^0N5HUekYOL0sof%J=xb^TY*lR=3T<#rvWj%E z&O(xM`0OZmB-WW{#hVy#v2@scI0D!J?~n@L;0#zuAgi^pU`x42_Q;1j5r;&nTLGk* z_3e*;XYL_D%}WK4q1bnIHQ_}k3>7j*XxbGZ%x6}HES(k5SEAjH&qR&81fie=}B7uiA zI@_N~VU8haR3!2CzU4Z}R;*UfgvM$jMJ=eLW^ggmsr@pkz-nh1cnn_fCQ#uFjPEK^ zsERT^My&KFz$yatxG>j#MGY&GV!ojI>%^Kq`D`9bOy2tHTy`ISl+-wWk1E%WDQm-= z*}q0)A+Gowss3G$oxYu1oh_buca|SDT^$4p+6Y$VodTV&)O2}34{gV(OqMSdV4U}9 z#U9r_*Pgk2h@orIWNL`}UuJJRVicY6l5rDK0-1mRTe~||78*sIw5Gx8*BT$lpEzZ} zY=bXUW05+oA2$kJzl@||Rm&Gr@-fWgY)%7tC_G6lrMO-3&LmTw$5&xYkfV>TJNpa^ z>^blb;F@xU-q3Gw2#cS>fU?#5-UL)A1O$tp`7E@Gv3Z{lMD1My&E@CrLkE%fJvHk7 z0n_AmZsf>T$P_KwwyHt;arH+|oa+HM;8Q zYoEc)7Up^!JN^KVXvg+6K3XIqcPW;u_Qwnj9LU|4Dgm=a@r{@EO(Vhgwfz4ol7I)< zJsI#-tp%=hS><}$gXS1hl_y1Wvjx?NaIN8UtZpY(iB9<}W;ERQRK)PgdO^?E42Of! zTA@F87IJl-V;q`v(fv{04a<$T#gt|p-U-!`VRZI*u!x<9mV|@Pzvo}QH$4VRSv$!MEpP^%4?V6*5cGq4jYH6s zd3I$EUuc(~hc3r#^(xj9iLf&yP(j#2YlOkds6QRIOH;!AX~G{Z2QUx= zo_%e$#Ep)wF&8RaVCd!CdmDa0o0mrsNa=)T8;GwGRd2VpdTYoSd1WLloebP$O%Zbv z?c}EBx}E~Y72Dbrdd@SZDxsz}P<4@QOuq(&;`2i%9)8AHBA<_z=q5aVKt7e?&JnUM zZ|MgEKK=9Z=WXA|w|JCq14!>Drc))q>l5PsAf>ug-%))CF=~FAk9f!~xHg3cY#qPE;DyEbm8s*pm9j=MSHNdew}=i4zuxYKj-myp3B1rpK+Ic%x&srFHW1;P6pe^K z)u??SAMYL%wQ{s%tu&~!mQbD!CC9G&$r2(lsi*zNyyRk;%4+K!n>I5;;*Iqdwzt0T z6z==d>qvI+)B60t5(<;IhsHqKY;mCZQw?(DFU;KuSgPc*{#*nbJ zoFU{6dR^4K2b4}>a++!+84-I1GVWVnM62;Lj>M0M$!H?mnj_<-X4$8~4lyZMqH4|H zuu~<*e|QZSTy@(7@7`=K0Moxlj}jd z;DgB-ukH-g&jy>5@B~l5-F7Q(YIE<2kfIrcw~uw`sD_c85aSZ=FLBD29qK6L{)(X~lm(4D>VRAy z_y_0CS@&U|a8maf*+KTKiGnsl!L0cpH6N3Oc?;fncY;cAx9O5u`tT z)&N;veq=QD811Yqj3Izb@HQ?lXT1Vd{qs*<&q>_`g!sB~}L`t9Q$&U%6w&vKaC!)9lU714Ti~gUE+!e&l*!h%S z`ltxGJU&b@J*X8|Z5KV@j$!`mVI0(tX*$TF{MncSwzNORK6y=HY00{L?E96e&@jxh zYRFO8F9E#&=90pz)4L=8MYb1h%o* zjQ+YP5bhFCf?A@KqJi;54ap9#tV$d#wkXNMdW=rZ6IMtl}V?_wSdfD(nXpZta1o+2%dWxhe-=U0@ntvZp zTY=wzEVdHqRsy~&9@~@Fy6XRO*sB?}BHa%r zYg&z82GU8_+*)B|0o~^vVdFLE)TBHA16nQ>;Z{L)0-V|*F+%&Vefz^T==U6ZW63O- zEcdi((@S(VYpyTbQNUk&2PoD8oaOOFfU7vTm)C%G_>LF8A~HW%tW)?pXI8&wm=9Pl^Go;>XxJ#=n%1)CyTFL$Nu` z?{LIVW63>FP~KvdtBHx<(0n3(bixH#kUJdTYVl$_y!pYLjQubAGTkW^U5B=CvH(LZ z$Ovxk$M2CMS!v5e<<@$Zbo?l3XfeN>0*?L=BfnEj$_s&!&w zBK-40YbSuz4Eg?{DZ~fc;YNBVtah#JD2);Nr>hh)#5uZxamRVZvdqrQR)-81j2Zyo zq``%CVDNmBUC`;iD}+@Y=1&&73k}#8LyoZUGb=tgvsS$5wSZr((X%||rb1yK@`Sz} z=VBtG#`jomp5fh);x1JZA<=_o^e)m;nsRNwJZjH{?fx+~l`cadtVrV8i9+)V+u!N` zEmtJl58CPqyUy$Xll%D&_7u_tj(W%*O+Z9f%ZKj!g}v`Hlluv=pyy~SL`{gxQ2~N~ zq>jisre-k6?cbcGGmM=uz1ykbn&ZDx0NLrl4O#zcaOu`+TWitT$b9JUN#gLi2ok`o zaF|b-wmefA+nS5s4TlIK@7~s-IG{sxA{URQshlsXF*q1r<`{j}!DEYhBdMUC2*= zS(yIE^oTdrU?F+fc$4L{2nuCIO;4$xv>LQ=VpVAOd%C^ijptdHz-cCMCz~i-Mg)(L+s#oYlnW)ra3XFs*};@HtK>f^%p`=2t&j)v`GLYv>z}cGGee8Fp#%KTpqir%XM$@ z9eh%WF6I1^`J>k@%qWZOA>T7#1b3>B-A$W@;zvEl6Go}Y09mfxD#AA)i}w{CpWWTA@e1b9fb(XZ}> z>h0R%2y-u*RkO~~1LTU9e-%N7n~_Zp^IoJVA0R@}c61#6Oe>K?-a$G5Fy`po@Jv8* zM;1sQx()!l5krcos4;+w1}>yab3+H(ss;b&%fjiJGIOJPzM*|W4VzIRgyI8moJ6rPnlTvk_I`C1lioagPBAA zBdMH;iH`(Sc6PL<0@y<*)PUd)XX^%-%UM2G?qyE&a`SR?!(G&tUx9Pn?HqrIIa)dc zl1lr}{4tOH_|#W_25MA`WFvK(uDt!C2+Q1%!X6QKl8t|BTMu~a+S~TBQ!XaAe~!w+ zKU;~p2&UkDy;+r?M~8eA{%-92zbc>S4MvT|=>G*>k5BDE&U2d{_@5e1n(`xis!g7x z#rmp0X4cqiP1$_mYtr(jsO<8{^2k|lU+LfM=)k!cdJK4GIp^PIupvKy(F)n(VF^drML-beR$(mLLQvS;O2~n6jH}KyeC5Cv`-Hr(5dB? zYfa@ck|*S%%;ey!W?O5~-${+xJgzIJOATY?dTkr08;O>*hAO`sM7>qo+!&}G=%U3h zwbrT~>$Do3lD3>Rfh5jZ9A_2ZaV0V(1gNWjx@-y1$1}!56X)yR zPN|BV_N4bC)7fQr#F1p{24iF!eyLqAa?jIZyg&934H4AZZ~CzTRJ%ItoU3HOg_M@9 z;mX2prCJAn>2YEz827Ll&Azs6I(F(a$g9~ms4Msm)nLLMVUfJh;LkLWe>+?Knp6a0 zui%IGr#SpfN%%fuBhLi@z|Y78{YIW4XN1>2D7t|D3WZncK*ukYPqP)))U&0&t2$QL z=L!Jy=fNHr8f7hkCM2Yi8NYDRLErfCvxW(J7e-m2Bu?ihXmmElosO$abo z9>-?2jlVUL}-{X*ZA1r5fc$uJ%OK5*GOIw^CcF{HV&UID&g|n7^zFVrY|6w5z0gH)UCZ($O zhLfR)n}|C{2~~d2%PwSb3AXSYSed8njRyRlhts4o$J6_a6;7rqbzW-mC9ZPF@f{R; zztl&D&%jRrk)GzA#}K(My`rzT?jm1w~+PtF>%y!!}toIs=?hyWpLOxnCK9q zWNWxz7{MU7Q%%VvUalvmSr$#DTd$74?DrttKrxui&X`Sp%5-jZv9ga5<}%0ld1sRy zLm!L|kXVoj_$)AAGB-6U^*&uc=YG+sq~G6~ECP&j$&w7gl_y{T#@`Rj?bu-uFRXzB z=E3BgefEls=yd?6&hLfF0)sB&?dj^~k&HjN3S>+rn`S0}d;gK1Y%I02!yB-|o>&vf zI{G^Wbj$5iq0|C`YgTgc*g7m*pNKl0*#|4S(J%sIF#p9PBKmH;}d zzLKSMF4s_8PRCzq!3wRE*BzQf_}HPITpxQQHZZ9bvRNy%**RY>1=^0D0*XXJt^;() z5a7vm50u}}ob!)8ofyd(Rqa=mT2L~6t?EVUzk;uvEF~$?gmsV;>P?m?(2zHWKxF{_ zkAzl*=}eNypYT&n4FZNz3=YUz(Z|DLjSBa|ne=fJ2k`^&YIo zI4jcT)8a(+(b_$Gni{-!7+T^zS)lCzFNk(*@@y`>gq7O?FU{P-AjHZ}Lq) zZZOXdUI+zUgeth&r)gJ0tLFh{Q-#<4tY-HpJi)jEyo-XtPmg5KA=wubb}_cD7`nLJ z>Km5E?w1k3t&Z44Txq0%fwElr*8}6u%&)Fk;Wb|t;DgkOH`ag% zp__RH7{I|SqH|0!IX?tfXaxe zK0a~U!hI(;6a@o;4WM4hRWTA_BN!xd%U5b$!?d~_(aW1ZXIPOa0uEdI^u}ryQJJCK)zhn}u?(J#E)((;84JGNWn>2qYJPC6d7d z8=#71QlyYux~cpu0!i5A$%&tZ5yi~#N$p5#P&Q+M>>S|^%bCWXor`667?Zy>3O zfZj^^=X>!&zOgL}5L#V1sk9iqaLXdol^D~HKsZ3sCBW;zUDu_O%d)I!YaY%WJf-;h zW3DsV1oBRS>EEo6fLZ%IGn)V!9DXa3bB!31v)?Y3>Lia~&HHCe|4NMfwiZn!xJ<8C zOCXJ?8QT34?TpUA&f{u9jWE|AIrQ-UVY6m)8E1DsZzh@&&E(8~^$38oAtbTUuqoISjC3V`2mSQp!zxjQluhU8^9h`W zB6yBg@L!b+vhnHog{x*@tFh7=FE3P5qNMnt&VkVV=Ff?&1RpxQyAkQk8$9m&Iy<+w zBjQ6DjZNl&ag*2GcOE|IB>RD+2POtZ0k{={$d;xFtbSTXV=;nmi+B(hY5qu zIQKUDr;i+&FiEbymFF-rbKXb~%ED;R%2%A$=RP+d_rq{68S@L9%~7Q}FFoMq&R|X1 z%>Gf_9BW^!{@rw$y;w#%-+@I2CuAE=8u|~lf6nm}SOIq-$}PYd+Dhhs#+zoM4v!qT z0w7JQ)g8PhsqYRr*H!AC^Roi(N|OK(eP}!$?*Bx0{icMRH~UJTl(e;2T>8;#xK8O?0)To@Es;~x*D;MHyv)DYsWs76=2`RdtR@pHCD~k?h&I z*QTr+{8gQRs_@*x;%YoGZmGXhn0^k|Eoh-w#ejd*T6rV|3^v-KWV>bulh!2Gze3S{}%MH1_z0ov0?-Qm@2G!Hwa8`tCez6 z*{n5e=pHN-qG$`tKvvIDu5w+}dyJ27|2}-cDAMwmW*tRmEgtQ0`W&+`fqw6VLrGzJ z{Nz9i#kZ?#YYuI4>`SOWeyoPqUvCCd;TFgmh}U?eL5QlO$#zU#PcQltl7ED71#foy z7CPe64?N(jdOoEFW1H@y>R-*p@Q0n{iZLVXr0hNL)GW?D{6%#L5#sm0&m>ILGS2-0 zCklu+9i999>*I%~Qn&w_tug5ZW!(SMAt(C6e2>G4jFhmK>^%DvlQ9vWM{a{?iDuxY z^b4otN=VD(0FX(KJ|^H!q`5x+$qRDh3XSo?+5sj@8P>N+y|KnnQXLw-h4M# zxOi_Fd3o#DI_j$a(zC6u<~?c5&sX`~ZCvobJ-pE_OVv)|S9PgB%a*9;H@)o%Y`!`C z(54}hmaS}yV*El{FFHVnC+7n@AB|EH9e|KRKl_sdrB#kHvef$^_Ln=J*UPfVYx~>Q zQ2t1h61f?VBx~uX^G-Rokg#*}O{9QwA(A%q677)wZwmPM!Ixg`&Hhh)7r&@~H7P>a znds=(+Jrn0*uPR3u9~}Soy(KblgBXL!m9GE&zyx$uhB*g99qhQX)=$=rV>z*REKr$ zsr{k$IJ8bZPxHK8iO^a#W1Hw^_$^rKUW)Lac(IJG55_uqMt}+f*Zhs#`6yCEoYQ`# zzrl8S4T*?#d_ucf6&#zz3aRE@@L~?z{|SOz(bqv9xLUY)wGcTaYayttyzkE!FTd#A zj3s|Lqm6Kr_mtWVG>2GG2?ui~sQHUXw2N?!=yAt7h(^2D61g2sFC+ob|5Y);E3a-g zj5u7qZ9N#V@b=$m=Ti+HTW9gr;2C3T3##Eu3gz2~y(C-2kHz9k3r!|p_=?IM>gkOF1#TqkGE(1P z*|&0hMDQ|X;QA+wsX?lvRcq5i-?(@s3#j}bsc%<93kJ7*Dz z)HcbAuERV~WEGtrbAtkT8|h4D4~Ms{McKh=ol zU^u6yUXriJS9;8-?&@`;7jhT(p2KEwH9z_W; z%*nugDr1zb=*F9(l(J2Xx&JkkoH(Wxxnrj(8R1RT=IaO{kfzH&V&9sVW4)fAHi`5y zdDaxuf|KB1ET030jre!%_lqKmf^H3CL?Hk}LL6@HV{Nkt>>D*}SOFsVV@?ysadDV%@fc>xdoVJ1Qj3)q_(N05Sn7zt@p|=H#L6`A~*E%VB zopOU!CrZ&tQUi7lja3r8XwT9z1d!2F0bGl{{Ym|wFkiKBh%qjL^fcV>7z=ZB;;=af zo6Hq`n4i!|m8kb5Au&7ZK9WHT94Er93|bHD|BkGWbJIgioaZcX2ATL*-6z3tQ(;m!QrZb|NbFcGBO;m_v8xJrHheuZ?b z=^Eh2BoCMjNA2bJjQ9QYtcO*uTbRll5Ql zUijuV!Xg~t^=`HmQzeZKbK4M$lHfrH0&N7r8UUyCayM>c6Ab!B`J*G60?*`JTzLsi z0Io=qhf6L0Dl`h4^WnDyny4@Uu(T=Af7xQnXYl$~6+-1W#8EN8y|1i2dg0Nin;Eq? z>O13>UT2Wtl_(Buu5LENQ5Q~=B?+%mM#9w=VW_88@w=0k4X}e67VoAhQWxr?a-v;z`4F-hdZbL)f7gzJt{~fkRCkWgIX|T zHLUJQs+i;XG@`2a%OkcjL9N`si*?Bsn-v)hX~f0})AkQtZ2X7yh)n#6@-P>He6G`4 zNzD#Bfq#QXiIU&#ZULI#fS*Zw+S-g zQ5;;{W6KL$XseYa=shN3QbkYk__9CT-5L0`$CxvYYmZT z*&(QvVgT-U_LA$EtKV|ICwjuC!#pQj+zM@!2N@tjJvI!rWGj5>B2?%f3NxUDI{`+= zI4y@GBx`X^q*@Aan^arML~z)AB)CW#<)GqV>U6F4a5kA-vTyv(lR9t{IVF0<@da{t z^NZG)N!9&{#qz(fPtD*M%#|gtC=~~^ZO5CX?0yCNSMx}-K&lx^J_z>iSSYBQO29_6 z+6>UzD!3TnbCeY84VLR^pcg<_8HbbCn#spw>Pu1$%hldAT5ZfFVJN8emN!K`ux57; z#pS3nu!jG_|DB0pZ-eYqtjO>GxwFrU(!z>5k~^CwM)Juu&bK~`r? z?V?Sy0H*Hwh6R|E1oeh5t-t-PElY0wNIe(rmgv(gu*<^eM;4}63f{c4)zct14@iK6 zK?rWChyMLFS2bD?v8jGnPdDs`(4))>K{uJ0y&0Y^gkOmGjldTj9}f z`EBoNbFU`47 zu`in9y%QJhuJQ5Z6S|Z&XSL2OknFro+4Jit;e?RlfZ{a`7V_@wLA>Pi1QlEi4Y`TB zM4FH5*___jH(I!sWSKtgnPYrvA`t#J_aF0<<7g#2RB~$D=%wF{uARd8&fMh`S{5+T z1oRaDANJlVDy}YQ!wjSeZjA*9($GNg;K7}s!QI{6Jxy>3F2Oyxy9c)f3$DT4E%+S1 zHUGt|najDF`EQc5Lg;hO-qm~8u6pY!k=oHFbfxIM+>Hi>Q2e@9BwEZHp`cx=9tNHw zKyiwbJK9{@>hddQMf>x$P+wE`#b%v_!qZh>a^goC+}kd1yV+BVP%3KjKuZI1kp?iG z=sKgCpLTtdbj@XhLwOn(&PG`gSRw0VrQGOpM6GNFiww!TY>(xCSOE6a#z$1euf^U6 z$#z1^M4XX3jo4|=-a>FweaTxJkdQgG+mw;>1Q^Batui$y%~1(0?82`zLNeiavApW&_hN*9{{`&OHn4*z za*j6}gi`ZF7uAk~{d{#yC#0_Oe*uB|a#jmULDI_WCl3DBmaK=rtsPa^TQ~Qe56aFu z4j48KcC4QwIa?sW!iU!jF;1C!{^q{o&XDOH)F(ob@|}xywyUFzKI$Fjl52$I*XeJ! z0gdj<9==2{BIf)fo?xLWY+$h~^j(+BK9BP}zV)~Gh?Md+O^OxL%|>3{Hbf8%j{`8x z{Ix@CJf*DwbKlb1HAKg;B@&T8^?<&+RznWCs^}AFgCbo$?QS>{#dVh$eT1R79F9a2 z>|;)>XAB@NM5H;f!uHqRgxw8vb`<4PJE2GOyWEIRF1_Aws+&O3ETHZJ-NhZ=?0$8Z zkyye=Wf)$J4iq}zv~fA{|Dbi{U=bg3+^)+$#6vGU><4{KnF4x4M#>bpwCp|bd!J;h zp3qff)@?2%Zy9JtsM=aAx|l@wpr@hsr@WUG<^e9cMCHr!r{6%=`yt5}({>n@_D1@= zv4tQ@_YRI|`1xry%`Rim*8uGvyU667|Hbl6z5H5%tmJ&V`wBx?E=mEH#ena+V|gCA?-?G?wSsu(z@w6@%wb}b*EJQbZLdkFweu~ z@6oFX=`Ng8dcw-LWX+v*C}S95Xdeq_S0ArA;HNPY4Qx??W>ZR?EF07`UY= zWA6_;`UY3L`cE1rZ|M~Uc-@bc_{VF&u@{%h(NdEB8xg2b2BG+$mQ&x)j6!z)1Z@;% z`79AMajfq>BBn6^ErW-6+n$uVk^=rX-jb9@n-nzd>FS)g z{E#Cmen(itDK>7pV+68tv*Ryqg<$c5B89HHSTjL%%oKy+=2_l)Va$3=TkO*QP><5lj82Vs z_Ihc??B^OK-K)<&g|1Ij-BkQrn0@jw?wAUE%ma@H!Z2S4ejHs7P9N-J>@?tWN{`+op*cB zv?<{Pf2{4%r%hBn5*qMs!xrsYglUj-HjsVa#i8h`@HMo5nD{zqD4ArPPxJk6p{jHu zLoq5MR^J}snMC%FAOnX6M3_*)b9|L!RG@zRPgrlXq~3>WXo_Tf@Rci?-Bfmm*BS`1 zJzX;%FffbR>JPvd^htYQo@lx>)bB4xh4Lb&qIW`2qa6dX3Vy5Eugp@^mfZ)DO)PY_ zDt~?5aZ}iEI0TCsRD}#KfZdJWlZE0=I+2`cv)h)932y|WPhEFyE6Uqe*N-F3{MxJe zdq3{$-h=_hlSm*mO%YomhjBWgK^{DzVJhl3f5`5vvx9Mp!Y+kOS<)O$zkhVI!3HOK z!YhU|oA<>+H0yPUGm2IA()XTg#^;odKjN2H5mxDOAAViuo)eBMQ^AujJB3f-wA`0PpN&cox{_>oZoiEp`k8(o zrEM33b#z>jxoZ%dTmU}jmvYgRI)q<~A%zk79d7w?!E+dc=nPr2P&~uccS+SUsa$r7 z2S{<5xB5Gl5%^gs7F5ohNPbWe|lXzHb;&n50AHjY5b(#`5duNTq}9;m%lM=#eyH= zDP7P4X^Yr(#}V^(&l_|J1Q)Ri#t2*mnQ8j&)i+*!#5aC(+1fu{sm+B)J4V(RLD0X1 zK`ALN3tK3g= z7EBSQk5CsfZt@6)Fw5HcA=WQ1yz!D?nrl+cK&>qU4M5!g1cvqGI^0R+D}|g=rcB6kGE?mK!@J=xGyO)1b{XY=a6uxda5a16ywRXP4C!A2!e%WOK zKz2u9_{75$EG2uHCc&NN635p!J3E*jgOpCL#G?(LwNvPbcooqKzl-yoV6gO!XFXpkBeW0vV-%wjNT93N`&%ZWJ-lZ zzl#;n(PBR$orO}Yp{lM8ES_ja2*n^xXkY=2tN6N%I-#1P8an2p<_cj@4{i%0g7_m~$v|G8Y@F_F9Pc;KA}w`u{h z`bj)k0)(!SUxFQ9qL^WKi6-rWM;=2pTFD~WEA${MRDF27u9Gljvv7j_NLg@E;S}l_ z1&pqH#kco$y;nV5GI_OhD1Nt$L^O##XrF3VtPzq2+81C+BCjfpI6g8&F0P=@?8Ykk zM#VQ(39JNxzb?V(0^qobftBZ1@UGuU-7bDUtzs3aGD8$5Qqnu$)>t=zi{yZ;_C2b4 zjfW$p!lO{MlQti+iV5jVnKlh*f3=fJfvjV8x>!@RjWwdM1*eVX`))K|#vhKjpXBL4 zJVH3TgU6$Ylxz2{@06rX5VwD+eR1 zNIv~uY(O4rRjk)iUKSf2yuS1TuK2+W##cCEPeEYlt8ebku&nMX&2JTW7+9&c#=?G4~9K2 z1$vCMFlJic)QP~|7fIy$CX+QrPw$vz&2g+j4A6eo^AnlwlDHm{{ZXdTVm7k0CY9TC zT}mxGMa-}3Ie|`aGec*M^x%yC2wyBxCJ{w@ug8Rx&Hee-@ZC}7nr(^{#nsICTIz#a zh#0=B#jN!HvaT~-V6?tuqHIF*ewJ8Cpp)xt)NxcA;&6a3dSdg$xA{ZB-fK0#YPzDs z7({*o`f&r`JE=$1t*;XQjMc3#-d*1_;)0AB$k#{7!bV!;k(~p$ffU0Tjc8L?Pc5!W zBGMw2pDd1`6W#BaQ<{bXZApuAcExV^^FUT5Sbjs9QEp0JkcVBxs01v(?xVY_y&R&V zz|)ItfK3qVeMDSy@J184qoOiUd$!aU8P+0K7XiRjRzekdx_Vw;dGaYxL8o6%U^KAs z;P;S+&WGhj$ODv^5{3*($w8>3Y@;0S7U$x4nfbL?F(Y0SQfvB}L00dHx*jqC1o~^o4xHDZ5EB4w%JSqH3p?7CjDAy^9^tZtgAH~ za*-fvxim=TDBILxzGrR!F_)nKozaZ*N$NErF=$;XI}YOW_=AyW ziY5xzH>PcQ0`K_Mdj7eGm5IKG4QKo*6nOU-hE;T8k!2*Ztj^lwX0b@83Z}i*r6iSQ zco;8?iT{%De8imjL>)H814&1m(w`Opr>!vy(Mp9 zMD)aa>XgSfN0-% zZL8hFYe)2c$Vl4KL`iLGe$*1l1O#UjAVw7{;;nmC*?BiPU7@FYm^WE`8Bt8kQ`Gr9 zBIwE#REB+*wan{|H4v20OcI7@F0I6gzci?|svFmO-a&3i&q}BVNqYp;hycj*IlveV z_N9Ge23BF27p&N+&MKN6XAK9B#|^t)hYbfv&qf`!?dv^2C*HZOW<4bV|2CHcvcjQM zU=G0TBRB-V%QYJ@Vmw#HninJaftT~Pui#KY^d}aQ=ue=)T<9N1CaA>y*4+Q5t;6J& zgf$*Yr@I-n84DtvxYIxT(0iW%#RHJjEA`_-zVdusNPSjrs+7}JL@v7ek8HDFm%dI> zv27xXWFM}ytG8o5BB@+o`F(`DA$Ke+YP0vAVGX0t-fw>+Cnc+EvV-*%n|uD7`E zXG2CwXUuyQx}0&BCxK4&7Q}qyLokgaYMT^R=|OjqB8aK=Yzbt?;>0f5W(TW!#dFsj zOHv0@1hqR!b>l?9r;{p70oV9hAK09%Iz12E20SiqE0&FXl2y9F_HXz;wfi}IM!Fqy zKXGI3I1dg!$x3{}0|NaOEFv?VkbXfE>+;m;snCw2eqx;*5d`MU2!S98+6d0eVk7a0 z7S8fC>#TrKinCF~JORfG0!CMlkE(#&`th_i+QO*y;Z}(nJCY?ZjufX-qit=-De9A@ z8ZNMq!%xp;vaNvVh&pJsrY@#hJB1&4j&oX1902ThoZl$#*q%4mNq2S4To5TU&S4uX zvfuGM**z0oFFrQ!;uaRt%Nf7lor?bE@HT=9_a20a# z)tf&&!e+=oW?fQ)$8ted&aLOF0;gOw?zG;*%aYpmWI0xi-v&52TZ5_?U*a_nEP{89 za}sRkW3%t@-B(x#U9DF8r%M!%a+kmAfNQkJo;NyEt4Yf@yib}mudV4hJynB@D%+UN zerKHM`P|Cs21=-Z&(Ey4nrhmu`gs*Y8?fthR>1WxpHOOtG-)tLi(|gBC^O=*jf~U8 zg(-;;{wj|pYd?1wP(Zt)QRdpl?5>9B#kU+j_(XeUcRXDylvA#hxUcX??m&Sw#<=bL zk>G{UZ#zb_D-KZ2n-_1$K@J4F>*qa@YC3#3q}C+bUekio+-Ic(2)H?xE`N53O6RX( zOMhe}4Njp_I8%U5jJKc@i=$Rm0e}-xF3GlWtC4z-AsmcWpDiLTO+H-+M@J0Syb_g+ z&N?idW@DJ+V3RMcm)AVYD+3a998~yE$lEa!$Qn6TcNn z>`zfFR(~Xxsuk2+e!Z|u^N{#(_9S*G4-h@_^|Jgx`+&^dG%Gs=s}hfbfU?2FQ zt_EcQ&bh^m%buFZNZox8IzXVp+^OM4@;~v-70|x$M8>_%iqAw|W`6H`q8MLq_vA1e zWpzY(l~^v=63xV@g49f-@c@(i+5u|CIUYsxdGFT>0Fy^RGo$%6G~>z%37RS-E`Y#1 z3UImgbtR3?c8MnV4(H29`pTB}mR}>AtBq(#=d!Nsc@Vndvu-<40inqggnYeVG_pO; zA(2;ik%NekKa)y_n1bA7e23>FxoT@MeYGRgzn|=o#DLzfNGkgyuFn_y2G!Qvw9P~7 zESUu!h7_WK%N?EvoVq^W8x6unqdiv6m~5#|(8d?ZPBPQc10u7>S)sNyjFd zJ*wNJ5jKGw|Kq@j+gN1mIK$LXy#GzCq*2>PhrdV?yP_xOf4nbuJ)g7%*Jz$1p!dTAUFCl8Gx4a$!S zJ;T8vTPRTu;au-qQtpvOrYty2YS3x32?|aY$u&&ph;>$})Y7C8!!p+n{5c>K6Glpu z5Cf7uEm1Ab7kk=Tn0_MVhJV>{8O{eM0OI_w{E7iF#O$b1Q z)h*IJx(`-iE(Zk3h%4WrYBCh%pr)&em%g*rT`5>}J9N2#5<(4=R84+@kbzX6{Tp%a>mn>ABJMfLu`|?K+VD8RPUIs;|Lzp3zo1+;rb#xbW zQehUJTBJpR=J7|VF&aoAC|e`V`h1VREIz~@g_D6I)-&bq&$p`v9GjlBSvdTnm8V8u z=M;=YL~x2xNy7tu1RpnzSM2tSd8`gSSoX~$6g!3aZN_(c)2^Hl;9Vm?I$2M{}YZ=NqG8JA2H3r-w%JRajy*WAc-xWOgMEf`SBJHS-PvE}BYwD?VV^ zLniVnm?!r*A`M)YD*8D6?ou)rY^`LyC7? z}Bv$?$r3A!6M0DdR(^H#@eb)@aLjunaa+s?eIE8)ZsRJ5!<4O z6Ff%;&%eiuQ?2amL)54~9$qzntA+4WXji>2;i%~OfhJPc!G2k-2C+QmvN0w(7VDx; zE3OCMa$0rHNqkMVV|46?&Q*`I#jt%{kLjDAr`*UmiNS`Eh>gqaLiN|BBe@=KOh86% z$dYx0U7^-Fxu(Tj`DG!tYyuMaZ5|U)9fs1-+U?L!g&tql6Swe04oX!@p!5%T)2}(o zfiq80MDK`0poNCra!Ze{qqz7@Etx>hO4&-Y2g^orlS!X6m-(>>`nP2&`YgSnWai31 zYt_5{gAa`zQ<`{GXIKk?dTRxbXN5Q%b?M2L9X_|qn3*1uY>wXd`B!1V7=RqXa(CTe zJU)D7K*x~ySBcOR1!aig{ng&y{#04c4my8`Z#3h} zTD^5fu<;N;?Ito~m=p=;5uGczj~DqsmuQMsq$MT7%|&ad0+j09JbpTvRq(eL6o9ND z-ww2T=ImZhHfsDuW|Xa{1lyb6;SRY!M0)jCsIKdBe4>WOdbqzP8ISQl;S2mZU8Jbm z{oHC<^p^Ajt6i4KNYhMuvLPpq$~(`)h%3nieGpxT5sWVp&%G@ZZ!UIPfn|-nVD7rHX4G*9TL&tLm~^ApOyB(20E68oAWMQ5Odki zRO5ZS2cW^^Vdf~N7HGtbr1n7P*+1_SCJUsCKR+2`E<0;tX)5^3)J=qeOLR+HhTo9; zcYCC&2`4*#pNp|JF??sg57@QeVx#36X~#*0%UDLN^SnT?kOY<_6c)Z*Ydx#isN#hC zm!!k?9^(rCYF}3)^B~;l<`lz%3UbUqbnz>Q`JV_+CeAHGs60N?KSLOT$PE1)scJ7} zV%5Xv+~i0S+*FhB-t>#jjLV@WDI&No$5sDBVOpmj1e0z!k8iNAyqe1NV0Z=2FzyfFA>)Z6t=g5RcrfK7rmo&u@ za!)~Js~pzShgs%VwELJru8k~yH&c$xDr3pKEB9;XWodnYRR;y>?J z2GV5mx(g#+KM?s1{R``G*^QkGHoZ-++p`!bGcYF09C)Q~p z9o>J^ZV2~v7owEepQ$MJ3b#BuvHcWfdRsDWv_XRHNoE5g9P3Szi5{GzHU1ExxvLz) z9PGJ+{>-4)R#ZxddSand=ze^X_ylwX5Z|>7%rMLqdPxhzoAlUb(i1$sj}iEchjhd6ZlVxMdbRh3TE#1l zJ|!k!K6ZU56!D9gtZ+r0&hK2R4ZZlaZM*0~Dw1!wtX0o`E+Oxf@=Weoed9Km4&udb zhOwOs;N~f8{zcgn;YK~X+VTxO9NgM>N*O^|9}}if=<*K(?#jkiSvv9{ze1n0G6Z|| zo6Zmk3VCrw*sXGrrp8OKC}8-s@f=9sa=|59LVEcZ1k6CEzF#Vp;)3uTm+uH4qYxx9$cp_2ImPR64jJ9m3_duMcI{WJ$0ltGozV?cP#FKq z2N4ll^0v0aor$T5sNZj2q|~C;14c+hQ3HLb^)5qkB|-1@*RWdby;k>>X(QMM5>VhfA=-hNMc%0T#ST8Q@QMW3>_B57FkxzY#IS03>d zMXAVJV2Xm_j5>+n=ij|Z5>^)!ig#>MU>der2?UX&eSB#QY&+pQECdIP1Xp04zKgrF zrX)c7DKuYUhy)^g2>>j-M*@||*SjdIVHa8IG8fv6*#YQ4%VZ!U1>DdXe$*{`=&KMG z&?#M*&3q{gyNEk4xy;%zU z-~61{9%&NUZ>ki_q+t?37)*#uo}AtsWJR9VM`wy;LIgYl8+r!GcPYR@_ziV%`wodQ zdHI+>*?{qViQ7zz6p}lCgK>I92qXI2UtNL#27cr9JOivE0lNrhM_QT=!D0bMK>{s> z1dHnB0y&$+4hf3+`{Ya9j`NVV9X(L@|T#>hca z=VWs-fiyL@ZfVX20)GR37-k1cclV`KaMnv(Bq0U}g}?xOm5u?pfBOqR-jW-Dtk8t& zlub}x1`8pLB;o7J6CrZ>QQHFt2U&RY5JGr3VE>u=!YK<@A3>Ue+Uc}%h?2vmkA1m)V|IY11{df z)%Bpmpuk(GWCgRofFPl(CLpUgE{8AnV@Ol4xS~`UV_pVu&N*SGa^3F}YXh2zW34FcC61>7qQ|OksW}moBI;pq^_1QE!u6Gil;z4f#S@ zbuzB#zJ94$ZVG$2)v2Zk(izH~xq)A?QELZLRDJ3{Q0CFwi96$|jGPTJZDUIKNi<*Z zHR>mTuvyOh`iTU`z^3`(_M%0pFFe#&{ll(m;~gP<@E1&Yf{v#J5&<`nRfL@}{c-b_ ze7-;8M=96BBbG4DNrcQCHyWHMIWhTZP$UA4 z>9ODXlxB#pW|M)RdZ!xRz;~?txC3k2vl0a$*~Q@;R-M7y7+ZMOEr>na?679N2Iaa7MxpHy# zb)~`t$pjhxPB!f*lWJ~3>Jd9eU_b}^JTvzM`{vtkJ7o#~UpA$Xm`J3+ZwM0E(v%@M zMMHDPZt}rjAO=WpffxRN{tJs6Sou)Q)ep-QIG_>E8{*irw4y3H7@2LcF;;^GqCYeDaD775W{Bji;#v32i zo78H6u>cPlieu1*Ax|MrK?}Tx7LoEA2=z^dWAodmz`D#TF zF*?(saLg|U)oM<@Ae_Ll4`q>!WYcYA)b%1x60le$``0@qM&c;a6vZOoVk^an`DWYl z<6t2uQu^b$OiF9kgqYGtm$xvw-5_ug!e)?;ENhKAgR-=!CMI%er*UIV_5?C1;IL#_COIq1*+QoZATgKNT0<4<;hrA z9#5DlEI)fr9yt6sZ$HE+dyCDi-VoHhau++idJKu$>m!$w4(}0_f*fCLS6c*^*~7I4 zK(Nq2p%z9AAH5zDdOCnPSOCJ?1}>u>Gu=QxhFsfOlox+Pf&H)*;@}Bv%3^1pt=<=$0=yW2ufIz}2z; z`)V~4y9mziU8xrNi^~!78@wwGc#FH+)x(7F*nnotsyAjHPVvRxNqDue7AzD8cvL0w zqC~L$*V1wJfcrB<{PAZa;FNwL9w@<=1RfpTq!!eo1Uy=cXPHy5Q16SI!(!qll<;yv z^~J3T67%9a?*EDTf5S`7|KA}27Q+9FiQ-mJx4k}3mcbIj? z$8?yIN7A^WM-u6i$1;9&hFutBJ1|9!0zS=1JdOMvV!_4i_=T^p@ILMH`zg6G#+@B+ zv;EXjQ?|=OKxwI$ruzzKLH=pN&hvazO^4OxHQZvDpY!p9XE~m28dta?%`7NPXj%X| zHZ*%KI+Pf3V=~;zaP4kq{)^!yJi+fB-jQn@g~r=#!uR{zay-HYO3%Wqegry=@jJ?Q zvunP;c zI0NTS2sSl0gyRQfS6}lj&UM6&T8bPo*#$bAY^R_j!di@yFH&N2uQA;8sgEI)J7ZDz zAfN?8|2pbU%Ff6bmyp+nquIc}!#Pqlb5U}||F$3>H);j>EbfErjngG6#T~JlWN$lfqh|H9ZVq1X3#|@$hulM1m%{Wo8~|0kl*~t@$#L3|wl+;9bt? z`Lr}&tKS4~gv8lalUb@>s5Z0YfS-x?dgDU3b@@SDW?G?Cv%SQ6qAYAAi&$`0U;Bo4%lHm-;mq)y@y_->39R_j3O7Xta%cbB`gsz?~GR-w6beY}{J z^M@eztj?(Kyuxppr@|y`osg&BEu8-)azogeT&_`m!X8?JGAUS5rVCh^HuW_ZFp=yp znb|SFvmjoElN-F!*%sZ_nIt;E3xF6}?yy{%$^YqLt+nv>-ZP~r>v6qIyWOmP5pr_P-1(q1sysbiq$an5 zxb=r`^vM9pgzDv0bEF?*n}C%nDI^&X?qZzCyzJ1_Dg=5T8y(Xrl2#l7B2%{_4oV4zL%Pk1a|p~JEP zf1yNv#KasPFV1Nzwcc*K%=7qS*;_XZNZj|_tgP)LaT8k3cWgK}dfcLLx%S&YE>Vy? z7kZZ+FMdkO=dED!x*a>w_l4@y#v^tpj{#!8(Am-1ia;#v7cv9@rEoi1sB-~G62EZQ zh#mVxFo3Kz3PxIBc50(R^&)-#IS`j#R@cvP7LUMn^YwOh=WlVvC^o(Rhr$BL$E0tL z@5$(33!ft^)86P6-k4+-B{vu=Z+s(_~ zTS@#3bOC6mS=tR&_1dj|d5g8~onF=T@fUTr(>e!fu@`eds;CV6B~?`3;do)z=y2xL z=(&i|`1}*j9SAhLOC0{_4no~)3*KpIX5i7EZi2Pee71EJlZu#H*K^1=5U5hY-)yj5 zr4`#zq|!sw^)Ek|ZkJKP{-U43TZp<=iB7uicD#_+aJ#jTm;LMBYPx9uxbq%BQyl6X zw+KIZ1CWS*qwO&n^)i*T#J=hcn$Lp)GwMWFN;P80jv^)5QCz^xI;r-)p;fE1h*_&^ zA%vgDe+fLLGV$URd4&H?!s`dh%bFRr8=B*&B3f4;9zLDJ+wZ=tJ6e?e=XGOdt+9Cn z#h*-OM@g2VvwRpHzji-`E56F%w421`a@~H{Xm!OyvWpseBidol2R>`+{X0;#yIfle zsJ!F7TFlhE$jeX&?>GR)BcKdh+O*7S#?BL%xF7h_sL&N)<+JRyab;)l+(B64ALG{T z?73j>yoCDfAyqov`bqt!*Zo-iHG}?2n{bI0`92s}c6x{6?V_;mvj#P~owe2Y_Wy_B zAoLgCmI_!sA%Q==Zh&H=W~vdj{fmLxNBZAe3ZGk5%(T*Jnkn9S3rahHoe7IDBI#x?A2u6hmBG5-~Ka*1OK$cP<; zqPQ6Vvf+eCTD>xj79^wA4D3alG!Gs|jkc+8cP0T|hsyHx)0^!p_xns0?XpwlK=j;W--mGM)C1eDCw> z5M2OL>vPQhC|6Cc0HNiyU*6X9wl30c^ouuURn!uhDUxSIrlP0LVTvK}kTkf&x$FMv ze}O*R9F8}<7jAo3ANgA!n2?T}uJ@Lud*JYD&FUiRezo?N`l-cA*N7h#mKze)&0QZe zf%DGZ~ zb5?kq!*+7~MeJF`1zC$$3;0iL7K+Qa{GvAd#>(w*S`|3D6Yn=EIrIzA{}mMyt`vK? z(B0{``%;jv+1^)q0d(N=_W!%<081x z$1wA0u)i$xG$sed+JVhY9E#Tz7m9V`F@gnkHBUOe{!4iefAtgg)$TZ#>ew|!hK%1!C+yA%Tq7^` z*4Z?dn}A90N9>0-=TpA+ov1Y3o6jWg0!#1!{ctEFa6G#Eo05U7qQ*BtuuKQ!cti2< zzqX0jmL|kpG*7#5^#uq~iMh=zA+$)h+CTfHXe7Bl-d_Sd$~GVKAm<2=j|Qgz+*thJ zPqnT!*XdmcYpg|?-rGGjRtbMuT-aUoa~mCfxUy-StMn$SR z5m8#R0Dtt*t9gfji>zmCRUV6Y?K*_j4z2S&k=wQ*{L(TOq{hjKz2tzbJJJ1U(~@mD zW{r}gj4*oBE@PoNO}qj`aw3Tn0(~=-?mP{sK*Uzbk4{gX+xOHW2>b;g@(_s$T(OOG z7SLEN(7d}Ds40BErU`(ZW^-(1Vr2-)aW)14M5KJ?kNl@~Dth*W@1PI*p(?LP9S2{V zOufeQ%()SKSAI=wn8)Cjg~QBgLO(iW1Q$!S4NG;-%pza01oFc8P>k@rVSRlcDSUjj zqGS(Z&mY>Hwz*Ld1}`B47XLew2a_vY6P9wApb+qYkFvZ^pdVusX;2RP+l@8W{b#d-1VEkBw?NBgwx^RS zYkE~6c(pgbtO|(w2o^l>9n^@JF}Sgc6NnKnaV9&gCkO3J8-5>t6eXwsk8v?gj$Ksd zrnJsNh}x;f06-P!S$1~@hw0W3T&ZX$Z^(SdRpV6Un9E*~R282a{TuMtcP{KZSd%{c zd_-$#&6CL?j*MeV}u=Ua3`W0j64LXPpu z|CBpn<~BfqF54ErYuW3Dz-LvU`2wUd3>zi2#A?h|EDS8`zn&n zDs`^}MyW>mu8!1D#-xL4%)iRzMGLIx?t@V0u63$76^q3EqrY&le%hQWy)^-;iqq?R zGN6^HvdH|RbllhblNME1g7(Ln^B2whjqKZTvZr(AY4N;oWWJ|%v-QT_6S5@s2b4b#y>&o*iOD4{K$K*m@FI=0!|3fW+8PWfY<6rK=ubpdX& zJB7EgDaG=;&8!#v??ELY*3x8yR11A6vqIm*vvRZ2pMU6Y$2LT{F204OrmPS}8iVDs zCp&3AryNs<=`BA5$*531WG>n}HLiRVT6K<%)9VuAXts_8x?qQh{gut)P-owxiFo>J zclG}9vC?mQUs|%#+#JT^<+i&Z+!WJ>!#uSraAAC=RT^*RVXIYs^%RoVyRG#DDyxo6 zEI>inDCN}Py684EszccWh^-+C&yV|QMADHrIyV&=%X5x6eW+GZP}bWeK!i#3vta-o z&L)W;F}ek(aI}d38>z|5aTGZWJXDfjvfju6hXq169kpw&Fk*of#`|k2wNFF&(ntvN?u*aP)z;Nv|Lu^3p}++xNZz&``=>m)U$fbHZWaQ7HGgy#B&`N^bilT#vi? zSf+tMfhM&{Rf`25xL=m@(*pk`0*O~eT?deR(WNT8y4HTvk^z1r`9@y^znRoG6%Xi@ zns4$=tb%;%)zxTp#mT5{tC+G?>17^eoa=dh_=PgFr2O#}O7TIW>%PUZR0p=+jvPfQ zrc`tTF36SJGwOQKxyJfN&s`GSm5zU6@u7^irOg8=#*kkRvH*xykV$$*LIr+;G6O$G zG6rVnl=pu8c_Bk)3Lm2ermd7eN^k_pqwtkm-?qLH0b-m>QLP1Qu9p2 zn%s@}Tv`tnNl2sBqHI#O+-jfI5t8M7O_SQZ7=d(p-M+LVQB4DMKzLsxN6Wdfa78@T zuuh^;DshE|Q#2PxuU;AZl>Vv52YJI+pM4P-yP#tm=GB~3)oIT0a=&Jp!`JKqVI>>B zUyj1t0SW`>A7qyky_WjhSo$mRz;SJ||Ct&Z%+7w6t zbtIGlWr0i)0dp$NeGtk?7MjbB_(yNRIo^Ed!o+w{2)Kck>|e9=1TU^8vOdQ&U*gv% zdQ0VYYKy52h698r?Mab(0A($VM&s@?!yx)vt4Nr9sJ=dU6fm>CLz*UqS7Mb zdKZ=Tk*aqKpNimKx(4>->!sUTyYVvszAyx$;>D)hY2;l@4n4 zMMhi(_Bp=1WN18)ZL#{tCvi!kc}1txbV>!HGnqan7wtU`CWAkV5mDcK_d;36#c_L!{2C6qNRF^l0NOxVXsVu5>o zF?VORP0{|PB?~)Ww*_G^rpswJ42vDS@}K0S2ZUxOBV`-=VdgTTkXOjqy^?wSqNo}t zSn}TyC?rO$sBJN&5D23tUSy>yCaQl3n2{AfY?GH#&;rTEH~^r>iKircmvT8ID)Pgh zYGpLNF&E27HNUbtPI{Mpvqk+w6pKnS3!BRn2!x_~%wPPH*MxV_-yil$9;A{TLcDoob4|j&RNZH?=(anj z&OdIv-@{N0QH8&}p@T9$ZYD(91DQzIKB%Y2UL2M9OAlt3{2q*`u#+lIsE;k} z^q2p`4#%fZD&GsYJ+6#}`ew0Jrq2#@rT_%t#dmzxIQ5acM6ZtZC|q)%Wvs9r!RJX5 z)~Nb0%bZeLBjz6;qNoF4MzdvYrez}J)`Ea&pSX_A-qG8?x^#&_=3SmC~G1U zMf9DCP7=6ix=(ZCKANBWBJhDC!%b}m;(4*1czW7RdmR<23XP(OO-QtVDSo)KMStkj zje#otGM0#Z(BM;`8G3N&w5Dlb1<`30zobRU8~%%gngj0^xni01+s(d>Uly^0*;G|& zr9kOYu$+on%kTZ$71NOf$40dMlVe z=bQS>?Z*xAHqz9Ack7>etfclbZpd#j+j!Y)iR0g@abcyK>Ja1HJp+}$m>yL(7* z*WeDpA-F>zIKlnk?(RODzk90sVrFjUrmOnDf}#$ay*Hn%Z>@JdFKM#rNXkx-&uNwC zt**85X%cY+EsHUrKqs+SIl#nFt1Yn4Ax zlOIkk+_d1VSmrwA4SYOYDGat^)H}eQ$2QCn)R$u*Mhh?>UWj`?}v;Yo~+9U2;Vn%eb3%M{ngh~iAy*Oc-L;_ zbz@IHWc=3tWEs}-;pt|&6DlDh5o5j&bkSR~O4iaRX~ImOGy_M`9K!|lKmZ7#GRprp z9D`DRsOtscW`C6wPZG%3GQ%&~9UixAV5yf|ZaJ@Y@lzmBmcma}um7>H+f1EnesXHi zY@H1K_=Bz-d|wJQJ)^dC1eWR-y3***I@4wfKjWytV5Sk}K9}eGoo3JBN*2GtvT^Nt z`d@P$zzH%ui9RW!y=D<^T=lQ??`rq7Tt87NwoLy3+{!pfeyFnhs;F>w1%^$+==O+H zo`};2dD@V|ATX>#ix?7SVJ{*Q#Go{$G~iar8(szeB=C^r?Ud92lXPf+V9)B;Etd%M zdKoNwu3FvXuR044mMOd+Cz#ex75v&HupY7HVG{qfdIhrfhC5FsJFrw~l^%HjxB;ov z)eLn(5G}6|TKosH%j?f_DSO-DJ5)GA;JO%v>^R6as#w5>Gw*@b&>iAFH}i$TrLoFr z@}m-u*rKjU$H3j)kGR*dK^Sq3B^CYjQ;B0QO6vx}Wq-NPzI7%;h~DIeg@@ty-xWH6r^I z?sx5e>k8Nm+f-R~kqF0u1C}wBj#HSoGr%-3H&h-45)@KyG8> zm@L3~BcOlaaMF#%pq&s}=Ll;OL27|9EBqEKjFt17V&Ewbh9xe*a98|(%6 z4{xleXW}LeR$XSpp0uv2T}@hn^Ze9MKaR5)cs1lwDing@CBBUvp*UyR@BDl2FK43S zgFulFh1wMY*ndz5ZzmbWq=Ho%+!&*gLrC0yA~(#nj(vtn%X`TCs_u9_2mAyM&tV)m zxYwt00aV}L9Dt#*(=_U_*y;?rgaIMRKr(EBTr0-C%yb6U<#E#@xAKeT-y1~fMzM_` z^?m7T%dNqZA(#7q78HD@VdC?-1+)oYbL7^DicpDZO{zg?Vw|i64FQNGahb8-cMh;5 zt(hQ~omfLIatKwzA&dcpjJExg3XF9T>0fFdy5j}6OvJkw?4K9HE2%s_rriBmY6w-A z!lxL$%XsLkIgg1^c}1*hB}g!`#wQ-SuIF@ZrW@<*7gMi6fL6CW<3qGFP&dx@%@GAiRPVcLrTvO2jc-ch&2P8e0+l{4HzJP^HdlvP*X|FD;>qu7bm>fO-Sky|{G!ug~8@lR9yL>7lysX54CE=pz^+DdCPquf=M$PNQB{q^0o+ zYV-+xNBFUZH}}WRj=$6{_40;$PF6g>ZmB8Tton#5fL@kOwHE>v%Cb}?1HD%d*4Hpc z7v);4@iE{%}pVVi>=^`l};Mgu&C{NzNexejowtk&vTAmXWC9d5@6qOLmm)jNddr7 z4o2in!Ri{kU9`kVBK?Tggjg0h_;v(i`n|o6%%t>yTo<)r)L58A0$_!hV02?a8S?jh zJ>;1>a#kIn(yeboseaV9rt9J|0_)0q6nEoTnh;)eS0=>1P3k|2!4p+JR&Kg0%heKd zGDi$c$$wuE!T!xJe}o!ztj*)z*(-$^9w0jcZGqLO#z2WiSops);Xe-Tg;WIwVG4P60L>(L}wuuM$A^BD(iS z4g7GW!OMS=1kZ0!s3(vo7hgE)d=iOME~b();a*dp(7cppV3!Qc06R@qQORm0y%6fk z-}}Rx$CZrjboVWkb#mnCcw;ws6p*&3iF-I<1fLzd7mw+3^z>7;*0pB-3h9UXVn_iH zXJEmeg53Adg6l>LT%O%+qB;Df;tF^4ZVM}v;_XXAakSh*KZ5QnbDqA1{t0L7V3_CS zC{}MJ_UCr(G9I7BukZ=WH`*0k_M0uYj0ny|oUrKs9SbcybrmC8uL?d!L$5Bfyzn8X z-ErDs#Io7WYyNiXx8fzjdwKxhsjzDJ1yw8BOq>~)5+|DTXBT;xZ+Jz8^JDl!u;1Fe zzjaSzzODO8<lvhDQE)B;(H-n+Ar) z6wI$fJfp?FDvj0V7e0qUiCoZwo>QF{&^7Kdj$w07B>J5??(QZ--}YB{ z!oO|^)9i6)@Fx$7MtxjZ_c&fSKgQI1jfg8v4@vJoo-=2f5@wzxk!dqDNoFCvO2G`% z;$U7IG3Fq|{oWSQA}l$gp~5hu?BQ2@l%>p zvPH5QEYmx)^3i9*v71G$-VAXOZ^AK7X-?0Iv)a1tth z4jjc>3CCW@Vs)wf1^o;RKG+q{~Mw}V9F>>Dw) z(&zrvF^*1{6Ez9fipV5BC4Au^>F?O{!Ct09tMHl%=75hyErZEI4URt$314<_toZ1+ z-kMo<_at6fmhN1?iIpM^XC_StAV&^=#U1Wn+inG-iFww723wK>WqDgdHpc>@!7fUT z5~Q+Y!P{%Uh8LzrEGLTm+Z%ZLTT&dTpg@OzEeLuhB>Iin1JIBjcNc%L+*q&Etx#cB z2X~J|s4vZyEfm4ahZ=(o!+P|<>KdS8vH-QDC>yraaO32?#4@l#%EJ-HPUtJ;R0HhU}QI1u4L^@jFg4haI3kRWVz4Yn7@cvy+U4yf{VIW)tOe1sP2z!mq zI}z{5#{IWEV^m|5qWSHkOVJ>{MDboiU$7=JoXF_k7<^#`7bDFuJBYbvEM+U9U z*#b&6%^F`6L!9Gi0x^nOd4U(C1)6HO`uz;inXJT<$U@+N)+0K+@n<^xJ1*tU4mg}X z?QsaGe}7{U>eqJu>p|x;A!c_>pLU-9NoI*ZfmT*+`-Wo6*8!#242^ur`3Y!c*sj#Z z+BBdy(G7VMZsZtkzFVyzv||0)C&PhF1?wBRwo$J2`n-?=oce{0OYvxaw-*4C`ZS zdCnh27g}misb!60-*crtvTPH?zca=XuX@25BB90`SczRF^(1#jqyFLsXq8PBYK*F3 z{}as5u6s}hcaz4BLutZm0541I7MDWEK-Uun$_k}ZxmMDgc#eh{rayujr)KtAq->X&u#m83wU0c?^eR<1U<9@s>#CA8Rju%}g2 zE6=VOCp@Z4zdWqw>Q(wjN6)nRCwv%9|5u%J5rD7_1c4v#XVKM|^?CU;nLQ8JyhJvt zO=ytBD^uNV9%@!kDtrD&<3E`HWF%Sr&ckW< z)IgRFz=pbUwW>TK5Gm#L$*DL-OmAhL{~(76n)D(QlgiD!x%!><;o8Yv?iGU>2M^s@ zx8c->K$eNPA)`NjjsSqlnZ%g(6f~3#dzi#}QJCy9ciiBEqPVi%$xvk#;fsVzS@aKR z3?kK3AKSlFH@;bGQ0af{8B!YvdnRIjmgBBvb1|eQjk%J3ZZB zJC_H(L9C1>{-xPQ4?v4W#`fzDC?XE*N{%GY8~Z2{+FggIBpVvdAd3QNEq@|0kJ{_b z{{^8JeU;6^V?H-JGnXgb`k=cauHLq%ycV=z3vZ=O67--gELOBb>j?P#-9&s&HO$#Q z0e618R=cuHoR)^wjJ1bwl)H}QyXOi7&BDbe)gr)GLqnYZUV$mlLd~!-$o8Z12Q0@o zC^{F2eXk~^TdyaZD{P|qUNHcYKiS>Zz_P6&eIF1rUSSJ~xm5HR3SmHZpxur}CNmIQ z!{hc1z+6t@*0}KYcs+Rmga}2mg%*K0QgrTz@xF}&5sJM&M!)+6_rDxtJyv!d|Ge@1 zQ*oyblMlbW0y-61e@Bm{ieo2=w1`lyQk~blJY>?f>J^+sk39)@0D~3JGxDQMFY5u3%{WZoQw%(A=_ATgI>bRR+fqq_Yi;<0E}0rf^fp737Pt*F|Bj zDztU7h?ZOunF+#FA+=v`qltL1-=+)F;qac>Ehce$}C{WAVS6bj=v zj~eLmsJ(s~Y7GPS&xD=fOhz5Ri3U78Ce5qBQ*Tx7bNGZpq7b}iz#$D&@Cl}FAIQGL zcWz5N7B&Rsn``IB-tDaLlabQ&u!v?~-vblm7is$M$De{3d7J7~|LNL1gU-|8M)ZZZ z_JFT)`TU;KHu z8&sGpy_7NO2}((FEG>6_v*90t8HY1vZ3%)sV?lyGE~g575vOh}!{7{iYX4?=KH~S% zSzUQG{-{NM&qs^_DvSjNb}Q&bs;r?1nLdEI3^O^0?R-f$)AFMQW^a<#dZM>#UJ#lj zM~pQB%FbG@a5@+xwy*89#Z1R5mhX2Q4&1tIHYA{pE*9U4LgwNz@S-^;u+aPO{fg%) z#`jnMi$pIbq^a)9@XLx+ppx1Yf&asV)GmXZ2D)-(YzZ$Q8M%1GA^wb;aqJEx!P}@(ymgFR`>?F{B}77_^AZc zXXOTI;nM!n_OMk3^GV z59qRz0F!EAQY&tv(hmX2-T&cpez{dbz7afP0AHa1jZmUJ=SczoY9{!UAh%|r&nLhg zx>tHA%Fje^MPo>nuo@0!Hbaa?gT27vzb!xOM*ULetP1JW^<)R0>sekSBYR`UFI zSxS2_C5H(!N<0o{gnkAfL{l#jQKqncZ_3c$#e976+(Lq1TcIELOT*bJ|8TuuK`vZ^ zP;iE5gois89acycA6<)(nHNz>A*#;ADw6e-5)umbVn`9Z2RHr=HBVxKlZfpvZYVM)0MtaOi6^EQb^mQ_i*il z?W-eq$2Sf~^Mov3Vu4N==JOmVk{C;eV1~QRUJ$T}?Dn;v+Q#U{0@RK1D%c#00v>AGrF}rfF}jTz3y#UDG-h$lBaa1CUZJ1j`wWQVL@bM*ktR}Q)&Z$$ ztH`Z)ojblmDkblL!wA-298~kObGsh5ED-hJB(g3wW`Kb1thUuR}-94NK7}^Sr$h~?Yelp7l;q=%!)`W^QnrV`}}mABv6I=9Puq( z^fdt~T4Fdpv31-e@T9xt3T~pfNAn!ng^kA3k4^sl1RGZyd7Pl=cf>+0tcwBaZe zlN(Hl#1ejW{L_{_9GawwFKNHofPkl!a?z#J>Ku59vI97F85st+NNn{$ zc3JdNe*`1Dk>lmP@&9+7dce+4KIqd|(IB;M-Klkb;vAWMjLPq33iNC)Pi zNcb+`;MtuUA`hm&Fz3K2$4Bzxd&!kjuUQogaUkl0$nd^ICoQg#iR%WQZy9&ZX|d$o z#0My2PUT(pjXuj^+~gaFcg(n;Rtm(u4a9Fi5gyLy@DK@lu1p?$L3YeN9#W{14nlfV{HJZ$S(Hj$`{B6e zx}l>^%_gw=iZic`h~oRCH`JD%kJ9>c`3zjfI3s?_Ts3A)VYiA_Jq2A!L7V zz}0N{ziVZ+u|` z*M8Dym5PR&>P^{~I(G0w1dT8s0+3eIi|H2UV^yO(|<&GPdZPQKlQcVKJ zB@hAL?-4gJL*PPmP$JYdQookc&|x)&HT^@QquMt2zc`exY$~2K zZNgLP)OQ^xp_XpB1vBQ;3oY?l?6cm4vB#-ddQVTg6>IMx9|u`g*i-i8^KtQ>Uso0c z3i89NsEkGk^iq+xU&9(MXhm*#AY$FiLb^Co)EQN)xVLxJ=%ZCJHx?5DsyOO?0DxLR z9G-}`@A6naU1BQ>1+(|17|AK*bnk1uv72kuOjB`M{6OsYO9idwQ)ksY+w_j_Bgf6d zjR|4#xy3)fnnY>L@30+_k1Z*i27oMngVd0i&J6Sk7@k@tXVr{#O^?{3a>G}#nj+3< zJ+pFBDv#vejuS*^4je*V!7gCkGYh}dRUiIAn)88;RR(zfK`;2X?_6lH7m*!6XbQ4k?orw~RbF-e`DDc*5^Exsfc;-ypZq2NQf`m*){QFAX_T@4tcTxg zO|WCdhZQVM)rTOOy-q2g%raeBa3)HtUa=W9yG{3eZE@akCOe65Ib{jHI*Ts{U=4zM zuZ`0mGXqH!2Dsom1xFMi(uz)K+Xy+GB29`2ZSzjp?h_bwM2yvO=tW;HCn&M0dqSz; ze$>cuKAlNf%OMYBAY3HVPFO|}yqQvG)qCE9qgf*n7OjSSAAT^*kr<^p21>G*T&iRm z9`Jr8UP9FhCTWCU!obckgNP<_IzPJkeraONHVHrca24C&*VgKTP$)Q{Iz;`R4%Ygr zC`9O(V5p?aQ-3q~wZETHG%VKA-`)sBg{7u~zvHCJ>TB+8`nXB02QRVv0r5+}Q;1VV z=!O`D1(rnsL0roTOmV=dvb%&;;8}jq9Rc!sr6ko^v}*x}2g^_!0Q|3pKdM)<$&L zKa3tUjz>lb@CW&^95^QnuXbsaVt!X+K^MOgWOMhi&w<`?$+;8wBUtbWbp!rGNY^`4 zDvFH^cuADC{3C^FUmxQ@N%cOQvbQr* z=_~5muUu)}{uW~OU02c1{}>_RVK0^>hr46`&dI7E{B^!~j<|h~6bVuMH0@xswobuc zhFr&j|G~#AW9?ZFfw-3v|2yfXcGQ8JD3=-tkUm?t_x2z{SJQHk?6=eFZR9RDDMbZN zQ`l{m_d^hHO1+C0+^lb$$oE~hIl_iT(6~P#KN)wMa{xzGfd*vW(#n^Av4>`G`?09i zevFl`GbIvoD;XF>TxT(fl@%1hjZSH?G*e_CY;)|l_3lkxT~({WhFF$YTif~9m0u^U z_!hz8nbh$9VJK`C`_3JmydHzjb|XBtq%cSSV2c~(Kstbk_90K@Sb6hCaWxDn;~FeM zU7;y2D#B4x`K3-#_}FwN$3~vJ^Xg<_NZ09(0(VxBs?vyh(ia=39gz#*`>xs)#DNK6 z_dcPLxg1P&?(jt(2m~5X??cxvC)c0nqh4nV@+%<#R*w{@He((|#1$nP5t>r4 z$b~5-ciEohmJ9LW^>0?>$yEO2`4ZK|_A`XQX5IHZ?}U)mf4y6{`{`Of+CwS+RHq^d zN;1@##WQ$^O-;il5l(DD$zr)6+TMP5j9QsqAjRQBE$b;X)%erBs()mC4a=bT+k0I7y4iu=$S%uN-Uv?IQ}74Mz~Hw3jVl8zC^Q^CgFXu zV5>f>dG-Xx<<(}IfY6JFbkh%F&r9kKgKt{j7CY#agh|+bR_N@o+bkWoXepB_Meff! zMB*ptV#-?o+5Ql)^&l*87ClW(Wj1U~(qTf!Rj>Amg>}Sa@1^(4v+1&AhF!`6&EAJP1$i5EPt{oV z81NPKY3td@gKv9g4yqqkFO_CTY16JmIV-#hJFACTjP18brserPr@PpDqZeCH0v3#S z$9V>9V*ACCkKZm6ak;1^z4s)iU*HKF|7S=b?Wd#ZCZ85E^jHc~1vHchJf7t?o=qcp z--*5ts@7QgoONTQXy39T2nrG`^9V#}pMI_85bX$tKk~CbdwX(RwM2>_hUk}Zl@H%E zMxM{kmaQDF6{+T>S}8QtTPajt`wv%C|FQTz13l?)7Xjx#6!k2fxZ6x9et-S4$)|49skaioO`WA0DG^RPct}8Ve?IF+g#nAdKT|cbFVhbhzk`*ibKao6AOsLh(nwv8BwtO zQIyqTpOZ$u6FLTV&2^Yl!j~N285}2zxZh+KU3)5i@y!>=vlnrB4A*}d_q+U5eBJD_ zw<;E$v5T3fEG(yPealYmXc%NC4l1Z?B;@up5wBE$29LuTX#V8?gEQ+JWq!`U? z3t%cg<;-QEXl-!`>%x6^`Pa__O8FA3L!r|0BeM#OUc~wG@9mdQ>|qnD7gh23att&f zt4Mh-*_;AuO{UR2^-Yjd#oS8Q;mbDf({>&m;^H)A{2YS_YJ>&ZWY+0uYU_9?N&SKq z_9Hm5SXs_;qcZFMj8OB>){aR z*Vs5|?Y&9Cf7cf8N%j?Q#Un*Ud}6PtSeqqXpaqkAD}em)sePR9(Cr!&AdwCEgYx2C z#@?hiom{rLlJxSJcVhNp?<$r;0>n^>9}Fr)@7O$`AN7;thpoD1`a!MXfipYaZuluSvr`lmi*kNND zd-`LjdYhTXX!4DsipTv;V0!T$^dcSD#I;)7FFtdAOU+k{^GaQ;Hl9pqk~jzm`~=@9 z1~Y2ZNNagq56?G0-PNVCS{w?iD2gf&rsV=uKC|m%k2lP&jAo-5g;sQm6@Tg+uS<7V zyl%%9c7=IAAbx*mMRxU$yYDyzWb$wiIbA9Ju$kOC1`@})iKH@#h`_?7|__D zw(IDCh@VdG=*yr}EuuQEe!IU$#Y7vr+1n{ZA+yX^at-0_?I~+5w=SCRk#_~s z!;S8y)muJ6rlA=Og}+lI+GHltMP{kPSMJoZ#YVq%B}HQNuI#fN@M_9NEomuf876%m z+zNzVNEe)Gbm$qF;~~Zr_R(etTauM|B-v9m&=^^^_fE|mEiCC3r#&U3$BufR&oZm3 zHxkPnI`Q>8U{P$Ig4Jhf=q1>P=EVtw27`jY3%F#2iQ-AJKPQw#m`rE(?zTrLhyN(a zXS1m144Q2q&_#{=;Qo#2FD|jYx1oRe&wOcRRAaD&P^XNh1b?_Zi1)KUjH&{C3KJ~& z(%PpcT^ucy(iE8>vrzjsl32{(CpVN}{b|@MCh|aKTH}%_eQCt4#=jt|4Y6C0&7)3`e3DHXi%-za2M+7G%?srQzB9NiQ<8;)xg8|`3Bhk}ZaE;8 z`)HJ&oA65t(Yy}|0dG@br(2OsE(2q&R=ngu}<(v_h<-GWiO$o(P?pwJps~{bxK}UXOYk) zs6HCiC;!36QIsy2UWw1;ig`g%T+LQHQ7PVw{6;{Fo92JumKuRz1?86S-@|qCsW3@c zNg1g)nOxV&x4H<9Rl^++rlzcn@6AFGUF9_2AFaX~|Os;OD*So1&edv&Nov$`Yqx@(Unl46~ZWG=+Wm>#h zr7YR^>DtucI}EOVl@M&NR7P$sc;1yJeAG!F3uALu^zCr#&}Yc6m0~(u6XM~w{;2R% z1Vnvu!>}-cMNK*#fZIP>>%1q5*;a#IOxFDg8v$Iojv#t+ri z0ymxW`7evK80W0}ttX`;Pg=ktn1qnibJQ=slMuWDTs~G;L08P+jDiq$Sq_@8=Eh(Y z=Fs&h{v%}AwCpxs>NYF)tk|K~hwWthpq=8-HjsB1G=*Ssole9}4p@q<_=;vT%`$Z` zjV3s^55s@rnMbAS1D!rm=fkjJHiJ8I`a8CGBJH$@yg#g**gc->_;jf7#@(9%h%ie{ zF9uROJ|o9hzA9J!zuEWz zk?ie*lp_EX5r+l>P`xZ8*VcYO(P^r~(7+Gqf_T-5^lGetrf?L{eSwCI0DR8XJ^)Di zbwgwJ`zwl!WEkDK5J-AMd1n9~sTFVo!+0z_q~fpNpb7FUp-O;WFM%`h|Njy!#xO1^ zG;yO(hW=@#`=OsW_vW5iXEmpB%eR7noL1ACnOPD6P>qm|%E-`U2Acir4555Ug+NjP z`|?mnn1mO&C#2EOk|(hM;AH z5L_(?JR#km>gb;|zz2WQq`Oujd;|Uz=AZt8#}MErEZMGH!r6a-vy9ePSd~8u`2JdN zBWCru@($a;~M>8eR%==D|?g+{eJ*#6Ldmu0LnFc z*eC<|_$!#UjI;*B@%3Lr@V0gm$v@!rP9gxN?0+xu-*@rfaq<5#N3dDTQ~qxj!2c%m zWfFdhyUwclGtxm*UKBUY_M|HnJcv%eZNz4&eQ2Rk?5d=~pbc8#n*o({XY~sDgZ8z3 zzSSo9#AJAXTIb2LPUmm43(QHK%P1-p0nCuPMf^hJ-Ab|2f6 z-rqeT@%1C`hIOIHgPYtUZcx)-fnmDp4ckomMeQo0rL!r z-~ZU&5mJr19@3o8ms*rDZHK*QKs2kpX#BM_F~BWS z^)(5HjNQjeUQgKH3d@w5svF!2q<(SA)ldI*=T3j|dANQUQ8{yxHQ_R3zwY`Sbp;lI zvu;Nor!lw*rN3SBy5>}|LOofojQ2BhvYB(OS;mFwA^p0tw;ofqVG{VJo!eM)v*zNt z=wGVh2|A(@J?+#{bC94r$eUh2uIX##HXLiFSoWY;F`T9Q=Nh6{W-#!e|8FE+!5m=U zSM3KjsiHNr8#XxLp!ElMy8p*7UBFhNGEbQ(Sgur1Ee}Fu`U%T+*qJ7M0_2>|WaapF zN@ytYt}AT06rB90Mut(}qEE#8C1GtTgHbA9(;Xq=UnkWZv$3|?N=w{ohiA_?=f^w1 zf<@D&Z99_J`jS;YHzkc1nRc|k0Hz@4!EX%WH**&YS`z|PE_CZpobPIkJocGSP|hpu zR_+o{TwQu~hW7^x%BQrk{_1DSSopBPIYPc&dTk0UwYgQwBlWcOg-I0CT(4tV@%@&G zs!_QXx|?DN=by181p=hS$exYu@u7hOoO} zyqtn|rg3<|m54+Ef41@f4|o#1qqJ&5=i4J{1k^l4EpCws6{t_0+ zKKHTE7~MW_uRWv!WfQFPMH|wC71CJC3Xy2ZmTdxXV7AcwpOg|Sbs8iCX5{FmS#Vl_0324T3k6``+rxf%bO+F~)&vJy|KaE7Ykofl%nXFm`nb(N#;<+= zn!=`(n<2nATrlQ^_%QIVP69XpulB!}`0u;;@3{Ey9Qi+Nz4&PcaLhCLYynR}+ogLX z+t~`;3avWJTGp>KU;cGqE_yiT$(T}fEg#aRAW}T z&~6mGP-{^G>twA_Af-B6HZXp?n}?q`44bs3dUL!$$z$g*^wV#zedYy^&3sp@ma|uN z9EW?qC~xp|Wb0xg2fgUQSiHUPfGx>-p$Z^?sV&u+`$m+iH^|+=asSXenQxi3S!^6A z*KAonT59Q=E?4ejGaviIX00#^bP-Vhpq&#x_ZI{zc(a~vHp|sL(TN$U4IXyWrP{M> z0>6s=^tWL@w1{tm0sugi?Tp=Aeil-cgL0dv%P`~B67z7X=`)k2cih9#&*;tv_lN5v zyfjrEov$hb?q-SG~>iY7xFXYG`g}E1h1up;;B+ zzuGL+TgYbDeS3`6VzX)l!geo#e9g(v0PB_}xb5?b+jeuE)$ARYX=z2B)2#ZkA9b@D z;q2yaZ-rvs`}2V&wE>&KwsvUSg3a7-#gk4KU&4)9mu7Py6xV&aztY_ZLaL3j@a|4K zoUW-B(`hp;w_n_>KNu|wEK#rA&dPGLVznbYGc>vN2NcED%|gEWP?v)z&bK^|lg(Pv z9F|jKY?d0eKkKj5aFO9!oC9As-egjROn8<4$vZ^s<}-}Kdu%N}v^aHL&sd%clkj9p zzQ-KE__syVsl(o=SN2lQw?%oOIs82m@1rF13tpRX$D?CG8rumXhY3`sLB?-6w}(E2C+7Y5TW5cUe$(?k(2;1ygEH53 zIJLs=bQV#L2FML*?<-cQ(g|$E~x;%u9SsqDWW>B~fsRqR`8~ zcdL6qd1=B#|6cSt+aFeEft`}z9(C&xbb%_j>q!p>U@Z>FS}U#rCRwx zso%}dQSbczsd1ZBuSaM ziWe?YWeQ4Ahf5AtuUOcuw_Vxm&}I{2g7H` z3ik3lKsu4KBEv%v+td1I^m8b(25dwcr)C-rJP4RtRcVWyX=%aXXKlBfuV}HWE4a^I*^X$)JC{ zjC&qSFzI2Nheh&m?RUiO#=FGq`*3q)lCSYRtaM2DIu4JajSe+lK*gFvQRHEwV2JN- zS)`=>SeLtr$F`|m1dX_dzwAl4m_)%GFw6yiSF6z4=o_w0saYCKEWK)9Imr8Rf3vq{ z+j7>d0J;eNSmY5?Qna^04vBRRMS56=U*NytRVSANP%myg-zh{ zTh`+97*yZHhdg2s(^OaHJq$#@h=q-p1gl2mvMhbR+CQ*s+nJU8hgg=9jDH=PUq;hI zJ!?*q=$VG0)g;Zpsh-w+BoZ~v3Fq34bIS;*pv5nS@G6Z1H#do5^2h5^r3MfNT(=6a z&H2r@;Rl2nTmxX!KFNY&Z}!tp?p7A+nBmX{KgT+W-|^2DDax*A7;SxVb=^sy9*u8s z0Ecnu)5R#5eSff@v#*RZqYtq3eR=lDd_x{r-Ruj^@T@+V&P&609?`t_n*J{G4%m%E z9e}=Sat`k)9E0{oi2}NRcSK+HdnIK90MyoY#A)0Zdas zDd|;c(7j{i!%4`~)q09IX!~Pi?_`9##*I6_^M#P|I36vl<>hHO%Uk~g67P_YcsGwh z7@#V&YzKNMy8`%=?jJL5f}3%_?A$Vyb$F~Y%r>Y;3lrxO5DVon8;uZf*gleTs7fKE zt6(v*enN>@xnL#MY_#GOV*vdA=<&p((>6cLW{|p!B6Qro!v#}&x}5A2HSVwzJK`W~ z&0wGcHK?%W2sg0hZ{`ikmH{P8AjOMfyTu(Q7|K^BCx&^DJN; zZ$4Vcc!fTzWkn4v_Z5WJ>(Z`(KE(-AsAPaL9Kdpv7GyT#>jip>L#6vh|?T#?lZipqu6ynXU~Kw3K1%6jAx1#dwf)H8<_jD` zAaH7B?oTp z&)ZF&K=PxxW7gT`D~G{(rM}=HxpWp;#^9!Q;Q+y9`(aGavxdR6{Za+EeX?kQqviOG z01TH_$LYAPZ!t;tgIOXVh0=}chK03qt?L&{I7?R?eE(EJy@IDbgTLZUIZFKAG!p`Y z9sjx_vTSv^@%*)*-;%nnfPH%;twRv8E%vxzcKNzwGOw1iOr}_Xyc@z+@UTRjt1<9>T>-O%J{OBGaS zofovi5caq?(e$IV{IT+2#?IL(k;W{aiKMGqYuZvtqu%*u*`eluDv+WM^L!q7uaGgn z&jnwsER;HgzGZzDwG}UD|9kWGV?-8bkKGu(qRPj}qM(aJ*8{lny8l&V3T%`n=oCAc zq^JQ&r;w6TU7GV1;{~Z7f+pgLfqyBXy2c+8Gp2zR!j?i^K#wPqZK@+L0nw!t|9-&i z7$ur_uVqfPB>_9bFX~{nrEo=PxPFD=&N~M*_3}J@ve=@8xFMnIwM9|U;TWH>cuj7{ z(PcB32k8b)9g1b670s`yLbH7L?~=xBkK@=^px%7Q%* z7P6YJY6Ju6^}C92NiEpbv8mNh0z?QIj|>gRE8c?I0XZ)Ljm%113XYbGIQ>uQ9NY+n zY5jw+_{QLhj~0F)7paU0l-(ba!3-X+9RD`?nn%4i#v*1vCZhv7OGN2f?CGhf$1xe) z?k({dG~lKibKKO(_Z;un)<+iAMDYOCapjJD!kc_55BT>+Lhjes_4?6cumpkvpL>0W zSznLpb%IhRRt;8Kl0bC&5!L}{OwpARtHs`kx!g8_ zObr%_CuU-hA=_AC*3RqoYL4qK?@?$EV&457!x}!l3_cX=D4>zU<35YVOPk2;n{+yC z={i?UVy-C3xj(u?x0yi)=VG>&1bZB{xYbDQ#}XcsE%L@MKir1#;{2pCZ&oP}ve~^zK<2^y#a^sC{+f2xz_;DQ6h^byw^f+V$ zPbxrKbOWVnGe{n>HE`3=;L(bqY8f1TTCIPtm?_u=EBeu7-PD22R~^oj$mL>E){%vq zd<-6%xR!0>;aEa65=|TN*5ynGmbuX*al2j&kd8AnOnYqPp$MkPr?t>e2iy!;r=?eh ztHkZqD3>Siq>Ix)=yZxura$h$M*}RnW)HG(qd@DX-gzq`y1FddD@KeYdVjQ~q^}_N9Uc>$PZs9*+A9J6rPT znyRJm$0AY|%gZ&IrAFezANv>!W>0mvir2P@0$I}%tz>!gStqgu5A5?2o6nVsiZ^UN zRp93-3u#&NdM6BIbP1$G2p-es2~7|bRykBmRm-V2_#6!ft|ZnP4#Fqx5&C{U58O54 z(#qdN#E%(NIqoDvF=8r8&oLc7X+)(KZq7{_n{w0~(=v=KZ?a2X&gI>)sw(+Ul&BT^ z=|8_&MIJc4$V)leEn-bcLyon`U}Z@0h6y&yU@%aJn@*rNBnWzXEpxAvGRb(!y(l|F z*^VaYd^ig(zb5BvzH>_(?OrT9^3Eo7jS7zg2o4TMCL7#Z4X!m#0X@v=l=abnTI_l+ z${ODK{1&{~mg;=hT}w*Ulqay3KuAz-K}bxk^bt(@O$oP!N?}0coM(ud8srJJj1x zibgYA>rp6r+c+o~!N(cG8#75CU7o!c0jO_IHlrSxT8>Nh4ad<=#3sky@L!4HpO8H_ zpDrqlAwJhN+Sp{MgS3LZfk`5&H)uCCS?&3!y;_P+gQi|0%L;57vr}UB6Vx3O)|O|> z-K(}4Qby*{R4U1fiY#pHr1@p7m|2`}Y%(l@jRnYMl~6SacF2tPz6#b}^M_afV3ysN zTfE8}UpB+*E2FF~Kir3b?*|Fl+`o`x#V6WrHW2H%kF$TQn8_apV)rizPHz*zcm>h< z3L3|L46S^btb_#)*0+bc#i|W9p?=3QWh9+9NbgnB zgFK=i9Li{)^ZRpCTq}&vZ>>pGu?pi$hH8%M#J(KC96-0#ag*VOxl{wDl`=HVnw|xA;Q$UObV%+BGbdX zYv@(GZ2U|S^-Q%^8;$nn@N)5Pd6@|r7V`5onKmHZEi$*#x*Bz=_Sr6qDWAHyo~j{5 zQQ#Qmye@Q&tCz)?CG2NT-z&9X=}?kelKHsw(u>tX!YP+HOy|GH~d%c)nA~osz zaX4}BzGEZXK8pnZc$-5~z2*9$fl_ivhRUW4U7x&D6^-XyD;Kc^^y*JuRbEADT&fKY ziRABt*2x#^D+M#pkIO;#mk$7$zAKysGuJXA2ngRby?++mLom5N zOF(0)BFz3gh)iWrmHAU=OraG}R!PL4$SBZHZck#v#?TTcV)wfXu9~^odm?%vWIOU@ z5`xo_mb*qk5%koGL^m7w)=@-_e6d-3s94OIQNEyjcL|lpFUJ& zn-m_oDjLXj8velvFmLO1CM+(&n>T}kbH5zyBj);~)};}u*Bt55eJBRcW4D6|3Z!p` zjA0Fc-*d#BwQWcK8d7ozalP0fp=u0$cc#Vy=~tOM{2ARc;&%R-xVMZXOC;xhlS0O5 zEyV@9jm4^^gAtEK!q1&&AfLSf_?YGd+Qz8`7p#R54}@Et&K@ZyIMUEZsIa$|kMSnp zVF(c*aGU;cn?02+j;Ks7Yf?rr7oAq*=Kg18{6p;QeOyHr!1bo+$?0bIeZz^4u{=3It89A7>`3LloQrQ!bSJQ^_jEmy=?1J z=xkqsIjPvwF(*)IMQGCW-A^g3dka>1`THxl8S=HV{^M07ui0OD#!Jr1UML0K@2+ z+FK(pA$PJIXOrq2J|Hd4XVTmg6QCY&h)u>5m@}OaH5cRuMnW|QV#Na@q$;#GN@nw$ zxH2r3ZQANz%NY@$N4NL6*TpbqT*0u_}1w<6k3T{c|yrUY++Jo`f{&br{ zR}Ggfcwiy$N)l0Leu9r@V7Zxq#65v0uGs1>qBaty+k|jxF#$9(vR$rof5g7(*Rs1W z;M$14S)}=#1Ii6RXXTn0ZC;!G{!nI%)g$>3a}Yg0iS1P-{vEEC3BICx7o6Ya^s&Vx zl&m<09MI28Jmn<5n|`<-E>VdEv zi@7#3z=L%Rh!IJ>6#x%bjE1VTkE#)GBr&y*q>=CE>MnnQSPI1`32Ae&7{h5@hd_-4 zsqm-AZwE`y>1IS4pA%a)Wp&Jf-KEM;6`90&R7p51LR)R_=>e5A0dXFuXbEEno$ z{ekIE)dO%c7PZtZ-GYMppkcWBb2x3D=|)Uk<2}Jdgs%-K_g{l4vl^SVnkty;%+|UZ zxmwiUqz^KAVbTcO9qRCcGabfFemnYX{jE=N2DAB|a$giF^<`Dx1gWZ~k-hzqU#X-LZpVb6i5u9Zs zHEn(evR^f>i4-`i!Wuch_@425qCt)c(jYpZETei8N@}v)+CplyVBuZZg;}W zc~PudxUcddBK#zG`dQNo?ar*kUl=-nfbb0#&4(X18#njgUZCtd!CS^7rFpr(c7F;T z7ZtvVablH;eM&R^VTQca)b#gShS;IHJ?$Y3FXd0?RC1tuvs%$f^mC=WBj4LfdaozssHRmlUFZO{mVn z@0o$qQpEHw#usf`dbW3SG@nkyNhP)%hb+4D$8lgO%kHm*i;W;P(s6~mYhGQa8VwAi z%P~tp#LAfT*{&3r33rr?P|O8rSMFOPfZwd2WdWm>Vlt=7%B@lpVbH}VuDBstT31q5 z0q=$cCknE4%aAoY;~Og_&lk5%G!U7`*(XobNfuqTtP%qKG9v$q6KYWvnq1rGG^Vxp zdkJ&IddL0qU3Fk|{x$a#vl2znZ5EK7(O^6jV!O`XYzNBRkQk zgbw4_v{Vw0McG~F{WiJ2L=f)XSQ6Tds(zJagB##2Te!kdxl20vaXPH-dDC#X*!YKV zIZnRsH0#bSD~^Yi%2I`J6|6ZR<)%_9i+5H*(&l=uB`xp-^Z0l;7qM+k2^k7l@=*AT zv%VurgI`dB7YL!x=c!H2NU2u)8P~cp$CmTvkmFseL)xFp`BzAUXn?rb$Wj-Wbxxk( z+k|vp&O-OU-4u%4Wp(WwoLy$a#G-+MOs)x>+YfXg>KSLAH+PC^}IoVnm#;tk6Z z*xzJNZjrNz0=E&d{EF}A2dc&74mm(7hm@J$W5;&o&VBx!Q)%?9@Z_qRqj6+{I6(5y z&ezrp95xZ*t*^y#Mt9DXj>4))g~Cv@yv%xu=PTQ1G%(DT$C$}zPpi^4wm0DIt=GT<@^PrLb=nwD^KD0O8m|AR&l(r=P7CX&_>!`VG)mCc)e^C^ z)r_;l*-3}PKTG+p?a;)xW=TxbP=fK1V2)SYc64~OBMw|6Gao&J2dS^n+VH!-?M~Pbb&Ft@Ke@kTF>aG+F+bsV1SnGXcWV} zfHz?k(k!X(8;3FXkFTWIV?i4Y1qnf1(!-iET{U~tY}nm~o+%y~_9602xVq)NQGgh@h*I}~}t2^*(m*~6i95Ei8GN!QmJOGj7Y zzux&_%vzUK+_og?%N%nWoY^jHWQ)hon(q-i7J9dP8GbcEw6F)1 zI?UPJTP;^9w-PQ-@V9?O+fC%-B)XB_jL6$VIC-_jn{Xl3ydQm?Y(e6s#Z%9CT}0>$ z(#>p$Daa5NI`LUf+082mNiJ&}7^!RCVq|k~C-)t$>X*E-(rFPgcihqoY^UBkb<0GdOo_$5sP#)MwX(-)g z6{U!{g}EIARC`>ljqVq}HAE3xR5#qVIz!&uH=mCMC6HpdX`#6xnTge#%wtymu(g}R zdpD~~3pbd#1rU3JH2yQKn9`t6EZ^^N7M4r7#i|vrg`d;x?~`Z#8q*#oUe+weHPNqx z_((v9d!kq%`f*zOH68H1?%Ga&^Gmffa#qnBBo5j`*!IuE^Wg{_HQ@^tr8B%2X=R`d z$v`a|?@mzd07e94iemX>##=9f^N5m9LZ%1Z*F?N?SVhg~BuB64$P?dNyj_B(22GK< zOLQFo1QO62Q|7`sV#fV-)nrQC;LWnXARJEnl|wDrstP=-qC63mG#8-gFniCOu)A+Qk-$sf5_vDSLoZe|xAoxW5nivx5B#4!PxITD%XfszL4100 z{(I*L_}GelI<0Y=tt%crkH-4$%K`ZhR;mE)z)P+bqB?4qgGt?jzN}4dn_|5G%f%-! z^R@fh0Zr?ZIgEj7K@bbsT}rkcgy#A93=H07peOsg^Inl1U!YD@7d4yIRl9nn9me_S zQg_6V{b~ocm@SqooCiwJBkL!BdH@7}6?qu7M!*w8aeVy>uHs$hJWadl>0MSh!ERWC!D}mTDBUcONILl9bk{jYvH}PsUero#@`f zNb+hSSEq1Gu5chJ^F1jPTq3FWCr<$SA;)@`wWHTTLoW|KM}4U`>`**i(=*fx~HQe6uMkuvNre*!g{scXv z%~FEBIEhIWL+8ib1^O9u`ASIieHorE)6Hf5a)jaf8BDG4nT+x9b*VP7Z<@+F8iTor zHoz@N3*P!Xebwf9?gl>s%`AFevWwWGp_)%Xf|S>{6gGaQW~NFy<^Ty~UDju#UBpR2 zBK~wgeq3S{y3l}Z8m$0wl?com7QFxml7aQT=^mqkO3Y%=il}I2wGMxA0WHB!@6VV0 zm}In6COCu-*DIHYrZFsUXQ&SU)Nd%h^zEidwea4M{vy0(`y1wqT4o?FM$5 zQ|7m7I%}F&UZC8SkI=8qMc_V~gEuW-BFL!GqQT-&c?O8A`yY+&_dssG$2;ZEcJ=jr zDiH3w9iF~CSiZm2NGp-xbaiYK!CWKVj%^#Q79S{&iP|aL(!VfA1^12t!E8|HjdVI} z>1Ema8lc8K!EgC_JInnA z1@G+-6WknVmy3yIq{Ubd=ieiT8MOqKHF8<}>bk!yUZso6pqgkLH6O3t=%SK3i9^A|y6(QIeI@1!MV)ld2zI+&=nD z%9ponaJZ*p5@O@KxHc9U<<}iY=Q%sj`;hxc+Mt-K8Ot(%RzzdLgBgRSew ze9aZfx#2g*=qx^61mqgC9f_iQuvTzLkfo5=`;&P0Nm7x$G6R9yiYH=E)D>U&+^!qJp9*3i z-wM+%yEH@nR-jCVTBE0|ICVTQ>uom;9r)vB)z%MBl1dlHQ&<{KwJm}KoxJAL)wfXb zdaw}%Xo#$#DCT3qBk@5s4~b!NCy#x4Ui{3gq*6S2DSi9&`tGqq?{EIT?L@3EKlBRf z6?j-r!p3C_!F~KmbC57)7?x=QYyeo4P1A}VP(9q`h=!7D#50>(N$Unrye#~hsq7_T z6towWwcd7rPIX87)c+x~H_ayaoa7`wZL%(3gUwU`2UaNL$z|*YR&gr&3$_W}OQp4~ z&F4XBu4+rb0TRH^-#(=P9CXx1G0|o80N%;ltYNd0Dcw!hcSy>4a zY$T#P%ol&7dz}DaC07MSrCn%6_}u&? zusvtsd93pLAvI$vvpL+q5JB%D6M}NfJX~zYTZQQ$Az=h42EZKl!UAg0y2~~>bk@ZW zXrNHxw6P0%xW!9CKc$Z;_0Kwnvl!&v#iYg@#O|sT*BfyM*4y(}$}b6aopK`v8Imsv@ZmwuGU{{bx{fE#%_85lk;09LM^eD#DZHWDN zkq4FrT8x&%$(Cfy6>>WnIJyfHibaCVkR`^%7yqMCrgXKwUxN2@bxYrB1RoZvTjtmh zCd?9|>ML1JOAmei*7480FrP|66N&X)#+7d9TSaQSIhFM;j%7z(?z;-R#%@1qc>Usb z(k$g~YT<+hyLM}5F{Hm~z@dJc<9;n&*WDNS^B6F~>1|#vx&)6*CzaV}3NQ)6(^>C) z;9`#=KD5JD1ocUDidD7bO=L)#kZ2AGsD*WTfH#OTQi%u+@5i+aiaC`pyPo|$NdF^U zi)f;dzL_T@aARKHzxM5TP{mYLW=>&jhO2OoM=P1H*>&WJkOiT&RU=lIL?lFGLEV+z zF4@$Y5(D})u}{rN6_t-UV~`6&Y4v>^gl|`iF}wA*?BIyiWzQ^H7wmjT0o(dlwE zI-1?T@C7l1Qgwf#Qa_`>+;k^pV%MhJD{DRjb4VGo>!QQtURpH8(cu1zJC~^(rD(!o zzXEZ#)#hkK*xH`s^dMZtYL!G~hUmWgeP& zBXpI_G*b5LBgRieX`A}6llrlM!Bbd+g%;2b@u)Rh+d~?k+=%tzp@#N(1)lmKU#D6x zA7Z%6D4*!Ph$oHPjT&sz$$gv0E+Wm*bjbIGFCnLWhS3zSH*)cs4zEC!Pm*O8Pz8(M zh^L<{p2%k4uWCwU+|LP)5#YJ}J=7N7k=<4@?~44~<(Gw0Z8{<-goVltH|_ZIpkCo-YiFke8+sz}v3E13;;V_wexW8v+8!>-3R}!Y2?z zR4n}+T57sGkbpH5FH`9?t&tM88@R%lBwc*Ew6{Wv5gYJOj~-+lTZxhGvQ6@;pV2 z>Qj;X>g^VzRg`%G%kyB;q>EbJ4i6VOKro$X>9KnTJJ6EN+h9|Dt(%G$T+atI(o^p5 z3}a6Ic>YCL2J~fx?mm?pQd6zqJ|s<~Om6!WNyfrJ-%5K)ivWpA;#cB%1o3Rm{_g)- zsVMQhQg)rAGw{;?g^K;A1eLDi`3CW|y=Ojc_h-gItzr1YMsDVl*oPE*!w4~#KTV&>NBg; zkwB!A)5^6Lq$J(1eU3bYec5t)R3{zk>RGVPT2px&amo)tFDL>*3|m{d#HslcC>!ot*OG2s?tj1it*$R93tk`RIjN*lK>RRj1lr$;=1Q$ag_&JU$H?IF zWJ^fBO;-W=9qID$B~!ZpB(TPQ(GPvqs8H%jNcK-hW2F$V^dTz(S1Cw*PvuzN(v!64 z=9U={6~6ze5kDRK=b0$cF~+qr&NQ_$P+EM|myPN#5 z0{8VsI6D+H=j&n=nUD}m!h&X*ak~tUz1_iJ$1_EKdui*{m_ofBxq&CPBkNh9jV0B6 z50%*Z{E?&sS&ksG%nz1qYM+tSbbm`MI-^tP+EW8x!OR;Fad7GG;3@;Q%}5)~4W=?0X}i05S``cyz56M6PM`$N(+h#c(&Y-#4~YY6 zgtmuk4u7?S+_dyEkg)u9(qZGqGk#b07dEhO^?EL+UvtY7_U$s1&sNjst2gstUO3dbsZ21;neW@F=gAV1w3QYB!yA>c~uGn035S7r#3)2|6LVn2g zi+9Tre;>ZE7pRaG;$IzRJNCLLF+i@#!{|}vDFIxVC#f0UoACtjM&e;i^n%CwjLl)} z^zw0$Nlba$-H0dqfETOn9Wa*c^xG5RrTv|9n1PPuaMu0pgKJcJA-hl*DjItjR8zZm zQ6sVeS05ez(sJk{K_T;!xgTX$dF(JG`7hypL1e;Sf_LBqpIifawp7eBeq0*ntLKqO z>x}!JCQ9jCI1a7c;u+1%J|&;5ndZ|t>+a5QeMKJxL5>8v5gaTHiMwA1^o~lge!`IwMk5P1CbPr@IZzhpOVEHP9m3H_0kcTs)qR8suEnH)-`N8fAanRCB zWl0U|fI#1+*ZbUxr|T#Ko#OP8>o7*PsA=$Rs$zz+PY`QL%kZ{iL?>#m zRgx-~C&AO+7hITPs$9GUtZ$ok3sfEsw%Tli*ZFuX|A2y<^$w8=b7>X|SxIZSF;5>- zgG?0&N9D%sRWhXd$ZyDQ0cGeP#GvPWxCSiC_VP58yka8D0cI8Z&0U%1@+G6f`3@bh z7VVjM%;PHG^HUM|r9!!Moe%lT8Yemnzh?Cv)Wrn-;FR2*10J_}&p!#Y>?lRdsyoaD zUk)VNJ#H_OH=frD^>r{2zjDz?WTUlhPlpUVa3a;jJZ1MD1OlLU|x7A*LFIsb7`W#YnE1hiS-z@Y%hAg9&xpr?N;d=mVe`hNO=~LY0Im@<$fkr@|4yhSxfV!Jr z4PpI-dg0%|h^Fm3I~~ujzy9<_;ir0Z>5j&gn-5jltZimZe0E~d@FcT6r%EwrjW=cP zZveEhnmAGLnB#ZuJ+C<=c4xDTN3epogY2Joht@<16{kM(fx7=8-0)n}L$c=|cAaqC zWbaK{ocrxaB9qR~s|xH@8PctB!E5}5=u96{c8jy)eLx!?%}0G+avrj*oN0ww0tD)2 ze=^dTGO`w;;T}hk%bGcSPdQ3~B{h;|Y07XVDCe1@J_{XWUUVvagOH}Lk>|4_tK~<#KKewpCM1m99*{YxM&e31| z(WHG@_zfd-pz@sc;=Ko8qG;_bwfXt2{jgz;Q7M9oZjv?Be3ye|{RRu~%X+-es|~Fg zEMHDck+QBbz7;+;^)mcI$L2QT9Fsi4JwP%-G!#B+0&;VjmE02SL5mfabyj4)Ou%@V zo8;2X_Q9t06|uL(W~PKO^y=aMHl7v|@8Uq;RB{IZK)>OCOQ7-Y*uwLTRAEUQ0O~+d zdQ5oladed}lizpO!LU)} zfYH6O`rq6tGhap%ZnTWdTSkm-r<%kO3A8?xXlb0jQO=2AIE%-Lm+JH1vN>k;8-5l# zTNnAal^B|ect0meIjVuutAII79s0uvHWyiyKoXAzZc5f8Fc91Bckj!dDiZs`1OF)#%tNf=?3p_N*bBvO z#4L8!M+p+{d<)zIC(D4=P1)|#A@M0g9?5PV^8q3J>Dx9yf=cjtrr-ZCUzGPFo^J#t zUfexDka7HkZXEt7qE8?MSi#d+DANRm+YnDcW;wJ~v0o5j!q~nsK*MJ^@sZGhBtL#f z?tzw##Y^sxAb>}&FCw>jUq)n6tos>hLB#WyTq~45u^BJ2Bc0v>kxA@lvmmQ3R4ihb z{QJ$l@~Po3>Bt4(fDdURUBcJJhuvTbr!05u&Rb(rJT}_bDO7%={)MMDsLYPu4G8SQC`}KZ`6irgPjwT! z0!B`MDt$V_doHux=Su5P&9feh@}psj?g?tZ#WE9Zb#peWEDt{x+?>{PDBPnr3`J7@ zhl0m~n&-?&Y-#kT9G&4qj_bf4|^Ucb7)zxXOV6>;Qa5*o!y znfufSMh-}_>0e9YQZLMY@w#ii`eC>6$H9Imnfug?gu359oP4d9=*8l!>G`=)odw!P zMl400<>D?8yJpebYSW*^BJ*rK5SeP(Z&Jj$76mGUBQ8b+KrEZh+Lim=z%`)#8PLd3 zP@_AAHSo|u>?AApM|hEH_Bj>jCROo|6S% zMc8~Di!l>nzzX8L3s_ogYG6~XY4AWadW6+2xY!iTRzmQY;eap^irIv?(`dU){)N3e zJ?;8|Y>rkski0k-+hoe9E2i-vdL{jKo2GSsvB^Q5b5=XX=%-|5q`_||BPK=Lw(B(` zkC(|YGVZQvE3FQb8~086DGC~Z5o_AgXG1)sO4B`k_s!18a)@G+lB)^?{xq(rw@&4et_$kebw|;UpR2F(LBKl74?40dfEe<;9|$M8xtTaauW8~`-xto0Go+! z|ADVJS+>dh_y+2AiWCgj*6X_g&SOid<#K)U7SSFvggybxEWoaUUp;4BKh3p+c**M* z=(x9qx*zf$NI4CGg6SYo=`3Z2I+j>{XqbreLu%3=Gzw5`#VMeiTb&0;hZGAJt~H=+R|ixU5ykqib#ge*e`xTi{KELGs>`B@k#BSdp0Z;Pa_7vWULV=< z%&VhOSg)>rrlPY<6aWeVe!;(yKt@mS43EH`RQeU%#7%}carIV zj8-KF^D#1jMocrozEMc#*|Xb|Ax%L*wE za>p(oB0fp3a$0|#sUN-FC}~i{t8`U-5D_uQC3GRz42*>a;M1Jl$(P>_#V+A*EZ8H| zw6trETkUP=ylS?$)_RJS#NBNaRf|RM?~y%py()ZpB(q-Gu0?BwI3S-s!;@xjrO`Cn z!efD;nV;V64ssli2HB){q){=Wl1x)F87K;0yAm|#{#6r4H2m3BNoxTlj^|2#&nY&g zEZ9Sp!u|yO#3@-h8d)PnMZp_|z&rmpO&rEl7Cw51 z{HGjZi|#(r4HA3mc@L*~&~$9ou2oeWXBZS-=|5yCtx6G3c@xldFDJB|uW{*pau2R| zul$thgw&5b$d^;j_aenVsK&8$F71nWq5L6lz@+?}R|Q}RcbtKp>#19+Mt>?dt;^-j zKrKv5hh1uh;xe`ETKf%wI}y27)!xpI$_u`{>0O*t=s z?{8A0hr(H1RDM{W^l#i4H(*fR`kf4?lMdJ}$m9>S3DolU4Xvrb1qg+yLJp0azwr*; z*#nY!%vJ+^1D>mq+?)$u&#bOAT z*%}?-9YzIvmP`raAONR|*@8Sjn~ zV#C*ZqiYsp)+%2B-`~QcOOjRDcIM?*$JQ|;U8Zvuu+OTDMwxUHcg$gV+j;9$VzK*N zY5LvOtz6>7fb_GI#K-|9%>F<*?qtVQ+CIu=c-coJZu}p2n%n|?TOoW%Cvf~irIGzb zx`d%rK!oMZ{m{m>Zqyd1s9#NJimY4#m8q}qH119{?FE$LfBXK?RzLyCUgOh1G3LFa zNF#TVVl`U41ZJpN=C(ONy{B>&Lx1?@d)kXvP|%V$lD4)qE)hIXk_fJbx1ADE5k=h; zFt^4P7YW7!g(Z?VL6ne*%)ZSzax+j1cx7KeGfodnWKkaV+WGmGx zRuP=Z;LBlA2v>0^xmFWX+fR8T?KsJKS zGp`tS-$$yg32SoC`dbp~`oB$<^DvlHFhF=eOJnCY$PN*|%obV0#jiyi8I4?tQy32} z2{&DD=`>pUc0RAW)wSp$iCtF07Za2*Csh{Af#@<+-q&gf%6?3KdS8flm*5;)8Mqqf8)gz?HC;e=M#@&PR^@&(dRIN`Z%KI`gxo`lNU`zahbXzeNGh2Nqsc@0G zMpXP=GvXe?ZXS1an=$s|mHc7|b4~KBubdBv8HF`DCOtg)FE8Hggu&8Xob7GgX#hO{=DNQCIaUl zC%IJf&525c4w0MVxIFmPE-!9NXr3Tlv1JlOYPA_eC+7`?<^!>(ucLYIu0OD}YN?H? z+O#@1qOu(F>lrgyyz_Z0LAxqWSlVAC>+`3R_?97+$lye}aWi_Ph5oqX3kG}b?GJTJ zzBOX1!bodEDoemH!pwvafH%3rmzwSnfvj0j0{dTEYeGA2IeWSHg2^I)WeLHHV!qoZ zGh2ua8v_ZSZAUM0iz9svkdi3_ARXJjRkQw6yagKiyNy~_VYV-#k|R>v0DZDW zg`4b0t{i;xmTbu%-FPbNzgP8qR>0rCae(t}hWCmfSV2;fx>v2BPZ9FIM>_?af#Pv^ zBKNUiOP9=O$M4_Z1(~$;XK}a-_G(&ZiD%0ZZ0mBlRJp0kZxMQ<%clw%w)%i9{JDX} zx4aDGSG|$)urvy-&hmaczwAn>Eh(O|Y~9aCojcTo;*Bl*={>@gWb3?`ZP>J*-TPX5 zFn#E2PY1*cHLkh(d&AcY2z9T^AABL=!b8=g=7Apza<>diDvN)QIO1Jl$HD_slzdP# zUC-APjF?-*F2r-bPVmkBG>h&Tvsz|4X`h^S(Mdm&Y!y7}FQDk)bb$8pZSY@vOpMNK za&<3qAnrdSE`c$K*(;H)cJPfhH=5;KRt&m!txGJ^MM1Vgw7G49lUd1kb|bH5`2rOc z?e}?CWn6KH1fZCP;ID$&4pPDFG*j5S*0mmj=@k({f;_2$IRV0S!}0HH%m#_b7QydJ zFBMuvHgl@Jgy?R*CU@;YUuNq0pp!nhdha+yM~4gKXkV=$`WJex{x*5ZHJ0>jMn+aD z5%_~-P>Rx>Hh7QqjPy3#9dhg?o4cG%*N~6OO!S1u_;)@M42{m;k!#nmUX~tg0T98l z$f~w@v`1*mCF_L8R$1x*Z{ujW!;A!-kH9#b$bue5)pkEF=xeWamwNM5LjBnL!Kdwa zTe%v)`NP_;6!IgWCdW^hFK9kq%L{GTMsgRm%u;(+*;I?>$E;P%9g2=c0XRq&s z#`r6(G9n0D>5?~@O}Jc^#LGYONCO+$cMnENQ*(VL@62XG_|twmPXU;{410h|%l&Kw zxW|RME4joieGtd02&Qm)*$&Y==X#u5V5zH89=Vg=K)g=QVL{{(5x>ATPVAX?iE(WQ zEJEx$yem5l(fz%<2^3!#(I@)~zQ76|NTp@IJeX&5i++~jSEkmj&lP`9==3Isf}TV& zbDKlLnsF_p72ICUHp0z^R+Yu~uB`sn-?Qh7yP4#kl$f25{k0<(*C+nK;A4I*UbcF{M~_sqs@0LUfXgiWc$4Mf zg(;UdG^&C1v<`Tq9G1=>*@TdQeGJya`V=zoM9MrOg;n3TUfZFgn186d%dDb?1UG3& z3g|5XNMhsU98tgnW?I0Y@d5}n48FDT|at7aMi58x4J=UWTvZfe6TARO7e z_~i#8e@uR4;J@=~bQx6F&;52j$6eI(ah%(F>O(ALa*vQN#lSOQpsN-e6wyw4Y%=M~ zTn#G{@W0ei*y0bl0QE+r^W5ZfFYse?7D`{~3A2*K$J|8j;p_Au} zMTWylxpJ{;iCToDYW)jZR*yG{%)`;K{}H3SJ~Nw0_*mzWEpO4z#T?k6i;WC(qjUnr zZ2?JB)EOl}>$At2KP5p*qyawZx#IuD9{*qX^8W=<|NjNbtx>kP9ao~WcL9HxQ!Cc$ zo-9=B;%aiC6Czir{ zO*G$_fw6}&D3IcxahhElcCZV;8bE*d%G(?T9SMZTF>)`gc7jP#B^QmuNwYi-p zi`B4q@xqi%+lDp5r>qus)k`$tj+o?DTDtV+-ZwS6h`E{{$c*|4^av4PVp!~45lP1@ zSeff_C1M0V&Ce`8!tFDPdEZoJE6Klb{x1?{Ng5uhk|uQl#xZ@Z<#v&JA^8N|CV&*HHHz1+vkBHNrb;;kFvx*?rK{!vYGuj|%8>St zrgCUr?DLL|W4F5eZi7Z1lG1^vT7^DY^?jkt z3?0QEG%8flTMXIJ0i@;&qu6`bR8z+^HO=@15qa~D`}Ttx<(Gf=$9>EmRmcA&?e||_ zWPO7a%TQ$rDUY+DMXK>woC|C7pE!}rIQd(k4aqEv7jh~lD12PV{I}R(;8=zsEl8K1 zHZh6Y>aPWZ??)yUApW{Sz0UX7U#CF37XGb?VV((E@~F7`pUy@`1c@MF%YVXv+hXEd zO$;*k#Dp6V?NS+1l6g=Eh%&-7o^k&#+E^F%1D#)opC@a{X4d||FUb61h`#QvO%SGKlkW)_wlgapr%#lEwIR32C=I0pmY6^e=~~Gj9G@+yAR2BorwSh~gF3e|+WNn?_Nk`cJ3yZ*OWxM=wA& zV*HP1`)`N7f8Ry^*9-W!m;U8rUp;;5@agw|c?cKc{$E}t5*>BMi;%?s^0bkPaQ>^S z1bsq5negI#`Cpwj2z36RPwC&@6!QJMz+01lyYK($Jt*LzIBNk55t1jskF2DkL>c(g GkN*ctuW!u& literal 0 HcmV?d00001 diff --git a/docs/reference/images/search/learning-to-rank-judgment-list.png b/docs/reference/images/search/learning-to-rank-judgment-list.png new file mode 100644 index 0000000000000000000000000000000000000000..3f0c212df321b679490c267a6b2a53f2f78b48a8 GIT binary patch literal 54420 zcmdqIhd*3h*Efz3EkX#Q6G8Oo-RPqCUZY0uEtnBW^cE5#YVZdw6wZ{v^1@{yNivTqcsZ3%ka-hnA!<5gh7Y5`JwL<2_ioIPB|r!P{d*n z8ZC~FdrzPk_BPW(oD7+WPsL$Hi(~Bj^#0Os#L{AIyZ#jl>XYZR8LE`dAz#P{ z3DF6XDU{ErKj1)7$NGWY{J@#~2h$^}jR=Nhl+1HF@Xbc@ zszPh<3!PxzFxSIpxOsk_@kDt~qSGV7u`KCI?xUo4t-M4>iF-f*5{qP&IF4(6R7-O7 z{^|RVhwWnQ>}jDdG)ceEa#_4*I^iKi#LGkLQn4#jDPttM$NZBCo9kXkr}i0KLd0Wk z+3gBPlgZ`6;^UtoUWLhC1ykqbVD{;M`cCT`@}ap9#;h8DB|r2+;2p6*r)Z2%rlZqt za3-1RErvD+!akmL zulZjObj$Ru{!BKlG+Z4gFc1YzsBI4S;v|(0$~;{EGfObTqr8d!e&7Wb8aU|TQ%ROy zbi#JjRtYSbpp0K;Tz+z>tR~HxPai+C9!>6LUmKfwTDD=_SXxsmhLYgpef6?ATV+8u z%-d#3Cd48l(N(nMFxYh)?a(*FM`70r0Q>l zf8W1qN7qL&TE;Sd%LGM5rF|k3r2ie=Rg&Y51%`u;2>X>s1V#Nfl zWs;?$lD1pBQq7}Xv|Ikhyx*bp0!KW^4J+7{fGkU5H}0VoRx$m|grvQH*-?;m5iO~M5BjPeyA{PO^splks^t3sL*4y3xgx3cTZ>Hn0&{IjmPZVd zrLr5@iW+S@dM&7I7*Kzi4}=epU-Ri=Dt+t z)wE)Ee>}ya`#fEQ^!u9EZ;HbPxgS;kj`RpfQDaCaMre# z{$mbndTW{uLhHDQTym|MaVvY&Gpw3O;i9Y(i*mx;N~v6_jS@u~JXxNs3cJGUr?YN` z`1_GXi7Z;5%Nq@9OwVd}#cejmV`$eT@!@o`{<1p6T}L zZeg?{!cVYfCTl1m$n*-=;s?AR)HO}9f`~nEXV9mCMO3H z9gJT2PsKQHIn)i6{XB?nX%Jfi$KKq-jOYvqRe8AoaEUUFk|E|ShFXGVa)eACh zIjrnBx1VHde>R$jE~8FtN#&E2lh!yOjfVAk_0;ZE>9(S=AeW_PQ~@!87^51a43Pz~ z#T(lTaeNc}82RWP9<@NV^eyl(T^=(}vfr1>+5TCj)rsx1j~hP>JMB;8{I31B{CzH; zU&~y5x)wc~I3b2jUdCJr!Ag%EPI_RNS1u=Q!;=;sGc|ukQ1XNLP^sSLv_spE+ifLn zB;zwDFQM?D%ek!!ju*zXhQBEv1kZHLy~}w2B$N$Th$;dP54#jAklKc*R=9*Gm9Ui; zt_};uDi6yF>-os|akTgR`G*2ojT#M_?3V1v>}!h>i^(qI*oiRW?%J-E)vVR?o@n+{ zgL=!A<)@*tevxlTC^$_kV7r9T424QV96I`a25+8H6VXsXo+DV9zG!ulDVZ~AGe)@^ z%%{yKiK>}mySzW_=G??*Xk;K^dWbuZK8HcCK&_CgQ2w6nRRdI|65_6N-1BMq(>-RQ z!jVEeJ6JWWTI;E`MnnOx814h)D5G7{z4K^0PVA71~A{%pf4P)^x*K8Ua;enae|b8WvcL7{t5{;I6d6t`4G zf38&(Vsg;faR%PQccyiIIRkZw@ltggx{e;9F!HEzpW1o|U%rqwNe@VWc8Fk{Qqcir zK^`NfYi68J$_AHC{4S?(N^li%TS?q;%Wtesv}V+Zj0o#IiG@h^XCf9@W@oDgE$2Ec za##PXzTH2m6ZrgF=~!v;P``1_zA(^kLUmPjXQ;I@vog`-epBtim0`Lzxco=o<_`Dm&&b&a+B>9wMP%TWi(5mhX=rsi zISfuwl?yDm){nr7JIkX@VoX#7(wuAO-CHMS#mcYeonq%m4}6yz0+IqRu39iI8vUR8 zQ_q`uwjH7#c0fDFLtUt?#mFJet)ycqw=zcxJ6ePJd~L~y$=_SkKDA3tx7}4x+gQx{ zdz_GA*v+2#fyeR5C|%mO*;jKbJoK&fb*5|qTrf?zJ6vaxMiHXj%I_C=tTqgCiRzAe z*!rVS`=Y%NB9Zk)n;ufl<2gATFA-;+?QQxY#Sk>XxzoT=aQ`-LM+m_g zcsqt>FxfT(K@E4t`5~&Regy1cBdqlmY*bWGSb%#h6m(Q_6b#@F75IsxQvC0|9O`qF zdw0*#P*6hcP|*KFqY7M+pE%%$Z1eYeFFphX6Zne|`1xj{{SP%7JoDcF-ctkbP$ada z6%>H0mZiJ3wX?@-7tg=}JVoFEw(Cm+4-^y<2ILP_LGu|D=zr2qTi;V(MOoO=#fjsU zm5YTnhp&?>vL6&tUt!?Z$=dT3t*?`#vxl&+*wZ@-Vc;J5nDZ&^9f_xd*i(HKby{f` zcWYXHju#v+o{Hnp($b2$TiFO}%ESGEp_ty(W>A6oXxN~c#viH21O)2LxlKmGxlK*EO%##@1n3(-C1l2@9!jA< zkkROX4I3<`+4>wDl-eFH)VJLPEYEyNOS??>@P_#KtY$%eO6g0Wp#SrWrm*}J3n4K} z{22|E0Oh~mXcTl1CF;LPl~IC0?cK5y|7U+P`U3KQJVe2gfOca3ixJv( zbXwRwM`LZge{oF4G!2hA?|EJYghJ`Q#iz$W1-e!(~*e5IgYd=6M5(HT5 z!(P$4(eQ?{Gi9%M4j2dye<8W<-;H>V^$)8`pkZ0HiLw$mGCdI~ls|5JkYXc078N>vtAn5CxMnguNZDQcIU@w99FJ#?_xHD;jS zx_>eedYjB!Vgf8R6oCV1fGckU`f}{Ajp*XH5~An?`A6!VLaL9{Z_0bcFw**j%L1Sw zAPz@C<{9+y_72i4DO3Hxt%!`db0wG+pbDej$)zq?Ym_);{NRPou`@iAEHAV zU02sV0kS+IT2;)q&j=(^8Bw`8UMWeOfv~_v+JY!(c0hk`m(?ua)vOZ*ffzG{%COKX z2qjXx6(8)RJH;TECJ0=dOoCv4`}ht_KGpV%ZGp#-38ZJCee|C z)@{CTp*6n~v<(}dKVBIH@x^Mno)W7|62*-ilQ(ts3F^cxEsRP!GAdXZ$r^&zXztqU z9q7%H+;cjBvz2?!gz7i=!AmL*%d@R?NR_p@D3kUE|Dd^Nr>o6k4)BG_Q!reVjTCYC zLD*`e^qq)4e}allX;C2WnSz2YuxV9WT(W7C)RD~78c}$8MBRxP^G#OHjUWirjf$5a zBoPmgB|M-^;sJ~bjQAdtg4SH+t}_SK=@d%9$JSlDbtGuTfH8Nzg?fzL83}Pp`16wa zPOpGonrvCIA*UsF>Msc505U*##?RMtHONJ28{a;@Tl+=MS-`N{L z#+8uThfL$;dM5wXKE<=2ocIuBH%y7MKtNl&xI7rOaYeV1P|V5kf-eboPT>>JaUAg( z19DiwLAYpD>{ExzfS{?Yi3^ zlOvuy(aL&?$`~Yw_uVZ}oeycqKy%*5BY}60(d3e#&4CK%`dk{@c>Muob(}L1$3{p8 z@QRC%X=>?LZ@*UWP-?DnNnei)3XovU(Af19(tvX|_ZvsdAUD>E!f#%hHE!flC|_#^ zN+Uf(`vS{n@6Zd9%H>1n6ky$iJ^JoqL8Ya!R$#yLJ{Ed%(9{}l*dMn;g{q>z;)}MO zZ%i#|?su0;_X&B;t2b_yiAbNz&mNuCg%;X$>eBsUj3tx0SP0bXMQAU6L(WDP|9973 z7mc8ED(;9{=Ot@2uk!kQvH)tp!qrmn;%jmj3+g83jUXN&izaAwL<3`#n+cxym5D~NywEAtYdGK94Jvv04 zZD;V!@?wGw>6s8}M@Bc}#w0)|(14u$BURFhTiCt}G)KR^7B?k-JqiXrPdv2|a|P5J zd6gOj#G`~yHD*I_MJCnsEVg7GE$Sm(@lIJ%26*Gv?Ny3uy0ENmSIx=hFO?*-M4cu^ zbja!UM7x_3v}px2qSY9u$+@-!xmB6@$#i8bR2IpH+kZMga(GR|v@`2aHiUj%0QFCj%dJDeZXEX7$_HMi?Vr3pWyy2>-UNIqT8h)_^+1d74 z8Y^!@C<@7zm@zn#IM1~B(0J|hJ%5J%_4GZhn-%8V*49|tZ|p~yq_zVu8{TJ_B%W!? zA5ZdxZ!E&+W|sKyJP+W0jdET5J%UNBld>1qI!d`0izheXTAsR}xQfj`>AkM@?IUq8 zA^dC_s44+vim0YDYc+6LY<#!LVW`^ED&j0C<$2rkq0pdH_;m3)bo;YYd2);xZH(8O zZy)A;c0)4?w1yF$2lGM=v3eordy)di8B5i%0n99$uXGEbTZ$nkG={oqV)_4u@8rGH)OpO7LhrE6_*yrk^vfnw?@Q zn~B=Lt6FWz+{~!K`<@dact}8L?FZU&g*;7;@Iy*>>b?0VjwM{20qsX3jfpIJ4M3G_g@y1~3 zVdYIf=t=GVyo!=uSvF@YT=<#EPuo8AOLsMmZ*wlVLX(i`Cs&9087b!=vm>&irwFp7 zDO1Qh)2~gv^^s*w=j2~s{Fvu&Q7V7yxjyZa=XqwYGXZXAGiynINPVSGZTZ$sZAEJQ zn+{KDI^5Fw=y;;qVHmQ#rY4@SZtU^ zQZ`qrumd%fAnY6S=3#3wfleu$?K7LMHfJz=F3&^l>bThDB~@-H5!8cOD^ICQ>?sZ2 z-ffm>&oHZV_YMstJ-dcNU=03~RmwMssaaW+Gq}#Jz;;fMPSKKUXp!o zX(*kz;-<(O1e1f;>tx`khGjgmRwT|R*J05Z+AHHD>8VaH@E^WXG3q+mv)&I4+X=SF z?xn|9Kigsj*j&h_K8DvKV10U9z96JONyz=W4M-dc0vPsNg`yS}7^BmYsaARaV>^?*HbS z2WLMG-qqP@o7!hKxt1oc7cCwBt*T>j9u7ExE}#oai?$1a`**9OEZzbifiW=R z$H?i2-{E^#r`s<#6hFVnJ-d=4fPW-wV-I=l=)fgoDTw&cmC~t@eLpmyLx^Kho8Nwh zZXMI4zIj^hDnb&UqX8zB5Nw5oaNP*sdUE?@g@$2wRok(f71kgM|8TJQ!_K2i0@@=k znP)%sx|XShH6pOmlzOYx|27@1%FHlO$#m7sbClz3xQ4^24V+kXw#*n% zGMlM=mHfK+&9^q3!xU~y4tD)UMG)Jq^HP0qWd#2?*f4lG7T#T^n?30X>mB(rVid@{ zx@k`#;1VfRengMiH+%+u42S2PZ+ym81_h-?4W42lFlEPREP3)@qeC8!wCRN(b4`9|TR+=h)JWevt%_?qpL8A#X+>;)Z3yKRHGeqY?jM)Yh=t@?2PCedz{7=C6{T?c<$ zQkRP`2|%5OEZ>_GctrsJwyFMKt;D7asyJK}j20QK`LQ6nEN2f)nwD}dm2`^~s`*=~ z=6I8?>9N5vNry2Raf9qOs?;hU5QkC}jv}fz{@!`{G9Y5p`ZYs_qqX-U0~ARS!-?Mx zsAiw|Tv~`>Az-@>w0g2O*NQ!7g~1d)pzRr~K5Udo+z#)5JME?QKoNZBSL zS#BS0FMKF#)%08)NKbQa{fk?4R;J06{h)nyru~1WMI(jTO2d}?qACNg8~Z@xVFlluKyeyrP5NfTVJrP z3&3<_$jX~G$%UaEbp1AYmnR#!8z*_cD*gPB$0r23p~EEnC$Gn1d>pH5C}Z~YN{reG zX=p^2FFNR!kmJbnbDYC*%x7F%b<+{92vX{XEV<6aM0|2AFy6-<5T9#29W3pfrYXg^ z*$H;Z9zT0Iq$m;lL_pTFd){&N7uBgQHe{Ssq+?hB5l)Z!i+|3439Lu7Pn^M#ol#V? zAJFQZK)^NZ1aF{X=FA8@BA#%V)J@MWSI&4LwTGGMa=qaK%zKeu!!sv~6}?8-2Z7gc z;-D0`KK23`k^IL+F4u`G6<&MzIKfFp0sq5*%pMfbbW^PZG54%iCHn1v!Ybz8tuP+V zqwGWN(mCFoKh5tj&e}~)YG|Ipvul=qj{rGnp@mHG1!XGsv?vvk4wuIv^OB#|`ga|o zulVpg-79|*)_tG09VzE$5&b6Otdhf z*IdOb-(c<{rl#MbC9=%EhUT+e81EAtQN=FTQP__a-TQ2kB8UZE;I@NIv@3)x`^-W- zD-v{zaUG7Z5QjfgjurXHiwr&<4K{(Hz#br9u2gtC<@hkMCX#uMm{b^d4jZwObyB*} zE5S`)!R>P7-apiIr49kS@Ee&2DTUgI9EL%|GQ*`h7u6{Y2(u%K#uO>{wi#i@QPo;? zJGfVQ@VTQ8o3j+si?60qQxB?(5z`wE0*?ZfO1h@P-nMKX zJ;mSqNiR|{|4oIbNVlOmacF0{nh3s^G&mv0IEpJ^c_7taSJO=+kUClR@J-3?CB{lQ zh1+b=S|lAP?s5;&O?<*E9rA#XwXMT>M!dSS=qQwuN%gSB!1TAx1}>Mq7S%|zoJo`l zdm5O;@aH8iJGmYGTeAKRP-Wz&BJ=s&(pbv*lnC)-^lF@-|GMNDxJi|j0M4j98$vGlivX^A z9h(q*n=sr~O*zW%5c3$O{CfiKtfBjgj`KIYf=4|83${`FNep4nW7P+jfI8TS!JANI7n^HV1C z*SbA>a@%CiTyKj}PM3+J7t|fML;i;joufOKyO8|wipj&)lZSy?*A5rvLCXTKM)}A8 z0dL4TsWvA9k*ABd=}`RBlmf@C@Ui9oR?-~c^3*sGC$C5e9$?=vwn$& ze+X>?9Rr&_>#-2Tm`0tWX6)|71LA7kP9yS^J^hv!6-`sSQ>n;LhH z`==Z;D-uk(_t)^ezNjtQGU!GHF9FU+2&dE&)r@zm@K1lTmO3U4r6?3?bKd9_4RC;P z<>WN#UusqdgTDInsIdtI40Odc1dY4vJ5gY?0h^%orPXk5(y#mmCIVjr!F~og$Vr=B z;hbt`;Wm0;F)(l9fBv=XF>)<2JtJ-XR!5=ICqBn8>SUspOJt~Xc?y!t%eV|Yk z3w#i!`_1qsE~cw5w`P#e!h4r6mb5~6Ju+}NT9gbVk#;U$To)O=tDB#PHz18OTa#U? zSBz8RXlz!MG{FR+E{Ei8PgC?;FuxW}7EkQ-7QV8IU6PCAo za;b(jD_alq9Fhk3&-7LwC48;Mc0%lZTCVHQV0O<4HWuP3I;RP8*6y%_7lxUsk1nKrz0$I)31gS zM%xtwXh>`tr4&%C0Ca)e=T#8FyEQcGH@(fm0Us>0%oH|!`CMU743K}w?;y*XH_#hq zAnfNWctHDQCtj%pckNvZQhuNy*iiEU@0*X}AmZ>7Yf;rvb zrJ{t=2~v!qe%w3fuJnKU^%VM|n5{9m8raCalZ&cyl(J0iNQB7L?M;R*f-fYnob%Cd z`@AUZPT%j)T^6e}|B)-?#sRa^tVd12Vsw=Zel}Euo4@=P*eHFsCmTZ$o$|+6y>Saet!A!0Mn09wzQMtiGMb_Nxip zn*PN(ay&sHMf(H;1bX4CDdeTEN;1~p#TGtg>QS=;A8Si}Bo%;a;`Y;eyjer6&visL zZ%q{&eDFYb#Nw28?WetV;mdtPey)S#g+}1uja>6+{7-2VMa&>P?{BJ)I%Oc)QtoXF zSX2pe_<7fd%q6)-mF{>dUxbZ-%|k&T8VvXKFluLkW8v!fcW1mYf`EXSv^Xg zKrs89YXY{PB+^xMEm*~ZGeRf#r&}_*M`26rT<>;Tn>eS(>X(N1oVyAaB1Jc0jt!cg zKdqC-^34LsMzbWg{s1m?weo7By!)WR>M>k($^VZjfigh?RLpltOc-aM^Fms`)}B2O z{>ye;WndiL%P{bBX)Q)EClVc2HWyaUV4l?^rXg~-`-m2 z9c64uJHD`~FB-YGc`-&X=u-r89nx35R;&LNp3bZlf6%nWF~~H2ejtpOy}T9J*{z!8 zMNzjMx?y&{XWO4hXqj`H@x^$<=Sn$ho5$q{=MrOYCqF%hI&H4+moH2vA04!1J>i#h zvDZJpvsSpwF0hl*3a0=TC<>7bGgWO4v&70s>@PsFA*}Xr-qJ4Hi6Sv_g^zP0Ch&NM zbX-DN@O2F|rAfq@^QC+YC65ZM%jMwO^U$1ppj|P?HQksI@itC_S5m*)F*+4RwI!Vt z9hCIz*R-c0bPlho@BF#Rsv8UK0S$1nQy;l`WhD zF7#okHB78%`Sxm#URz6fkz`m?QFPi*iZ0Mi6FTR$xZz-c$5fEip$2)k9NU(5WAY%^ z5X1p-G%jc4D|BhCGdz12(Vf@Lc;DO1DEn9Fn6E5Uge*lB7m=ZMn>8q6+4Biv>~@k) z%jVq0>_H&}#wj&*N&oZ0FdcCFzz~z-y~^*c@XCHRqbJ)t(ZNfXwku|aJd$3UUN^HV z2zOA8fkoa0D@PlfF}E6}7II6|VN;PJEAX`+d8SRqsEdvEc?;x0^6#s11IqrPaJvHk z^Hm&L@iLMHCe*T!Kl#$Ha4-y5g;#_Rz3u0Mi?jn6HIGCgqR$cac6;9Bf})gBCU1249Xn`Q~9*>YU^ z?PZT=lXD9@lgHSJ^@tN6NvjqR@tj)0H!mU4!5L%1H*E9&$xv!?NyVIy{;^vN>E9fo z!xpWUzDqAo3&K%$PFI8Ck`J>Q9GinPPQW+Ox9WA`n{M&h4UG9n(Q^NjNa(Cth0jI~ z8DioGQX2lc-LLQ`&<8@tGd`PDUc(ya1SfF!y;PqqZ+2<%84+gK2(!3V## zrNm4ay+`ausW9#-kUN-Bb#R%Yp6+Xf*VwMNFMblydpNd2Ro*5kc8$-oAL-qx6ueY8 zt^9tuE`>H;NE#}?=d?0*6g3)|Sehqp;oSKf1k&whs)yk zOC54<```iAL}DGN$PL?ir6br26YmK>|E|SRuX*(DWX?b`w`^cc3iiYnIy!`~#O?e7 z^!ngOZngPNQCx8a%I;_@!~Vhrhtoo9zo#)V+&f!`?F-jO=STRQUMtx8c>}2~m#i5* zbS}o_Bz(>j8e-AGzq^HUGaQ5b%c+(Q6l-#h6p1swEK zIEV%xjfYp(N9`pJapi73T(_@6EiPBy6L87>md*He$oh7PQzn(0KDN;=C-SwAHPsAovqyM^lbw+Dj?nqbLq?ORIC-VAtYu!c4LjyNn z2Ixpgid&K8st{)k1QM*8DlDrP4(5V~d}ZQU7hEYf*v;}bRyk}*hRnA-pWP{TYoB5d z*xH}%3iJB;r+v552A#NUXeTQnPj>_EMlcTzxP@AQlU@_jCI(0``I2 zB`r-{4KLLS2i(G;dOh55vbFPD{&s>IU8t-j(YfU^&$CWxo#Ge*x>2TR2^Fz6;g}UN z^_bAnUcL&*Q*C^cMtGEzQms=s$1C$Enrt$bIuMGGG3MU!d$T~ZhVjGYNFNIUU5h=P zx@_B490>@;Fn6Y|G8$MQ=>MV8RA05==GJzjG*ISuX>Gu48?#p+fe6Su>h0Y4CAE`% zi|6cm5-p&P2=H6ILM1;S|0TTnVY<4A6%r292pgsWUm1v`nnVTvu&l;>Kflv8`;l~zJVCX6XL*cMhJ8U{7=f%Lw&6Wb@;1T-JJ&54 zL^a*|ehf8X(OE@uqij6*kp~MF8f4_wM^(MWZYp;|S{{WJiHW)MG(@qMT!DG@*UM!) zG-RpQzsvZLTJU8C7?E4Z3jhkm#Nh3Ld4bcep@5A{iQt6?AW{2HAn(t&wfxb<9j5}w zqRj!@^!cMNU8WOzh%eK#^gaYoOw_uy(}D7~5sK@@ET6@|j2J4AWIh=7v8-j&zELvj zYa*wMbK_+<+Uf6`3pp|S&!B?rPr`S4@#2QzqP@Z@c)P~eL6f&F_*eocgQJiW>)=oV zmHuVr-C`W%&ft*sEh zRgjSd|5czM9d(af74_{ifQ*q$ki+)rU8x*E3P`kny{m#xrhXR?LZU8KqKbng-T;7r zF=_m*yrjDV_YOnEiB|9(9rG*iVO>JRw!v7YIV~+vQ)j>_sh7CJP>@CiDP6_hQ6Z{c z#VJ03Jc#GhNXi5d6afAKQILdW4e^~qafkbkH52}q5oXkgFRKrOcCUkhx;3c{jw2ni zZjE13sKLc_2eUYQX0lGq3ZNcJy5gwOipSTq1EW<0Syp z1A1>gI#x~s$|`Rfirbq;tODeAiX&A}NC(jEy^ai)#0wj-mDt5db|>* zlshqh)uNPbDc#l7uScWaGV{W@UgXKhqA3Fq3iGwAOnaoKT(`e|o^|~f6tKitXp9E4 zWHF-gy1)f$_-1Ux1MWq@fHcY?*}4yj?yU5xy_W;ParS(1rQ^b?mjVC=INrvmkh>Wg z>JTLH_^z*5H1haoQ2@Z;)$5O3Sb}l@*%L*p$C!6=t_6i@Y5vu0;nqFVA};ud(xij| zU^_r-UvbYFL;v!>5>pIIy)%HgK*4Y%KIF6kRRjKs7C@Rc!TRJ)_8|gu4gz-T^{QwyU1?^8x zB+>;EA%b2+Lv?4(?6nZ>a5@0_@IY@J2Tc_)y08L~-%rImNP|B1(?CT+q&h$h3++6g zLqKo39|LfvX@G1ts{=sesNZl}V?YS?*9iXwKwB0@#R5;M1fl&KBee=Z+L&V4-S7Vk zZZ-vg-i6sxLA+})qWZcK>%9lepj4$X$aDX#rd8^q#57&2~!fN%!UyK z!h6zAg5~=WFh``WLK^qo9$vUsUS;erT*b4!t8fP(Qx=f2LP5hu!bb!xjAtYsT)I~vimS;)$aFJ*sSI3>6uXjDkvt|t_Fx{b| zMpc4v$$=5^UVO(vqh<&aJS%vmnWK;Nl>D$2jlt8qPy-4Sb@RRg$i{l`ZQn_71K}Y- zl5FZb{aqNLV*Z1PdhgB)@La!t^#q(fhrS0xlGX;$?U#=$=$L;6pf$E4{;M|3sY!4F z8GHm^SBgw44G>^LtbO6C7%63$n5$O?!QISZ3c-ENwGxZU$5urA9Oz0SdCcdt267ek zVU&5hKzoN#qR?&U^Fo8lr=cT)gM5(qBZn02=gCMr6`FgEb1nSE+X!e{#XSTV^K9=d zq>Xz9^!Ds+w>8@xV=m#?B9iTQs3G3+@T?nyq$aaqT8|b;Jd!Hkj7tCCF zOz@H$ux0$)HFw6l5mTV%_s~oJ#=t>r{{(3u22u{y<6z|=nHM{Cb$)XvWQJ*$Ac)~_ z3?#IT_=YvIe5KU)Tlhp*0}#!e#-+RA=9d zeRH=4;WvHz#PK(}S-;DHZLGvHAD|SpasqA<_$sfRL=%~fY5z)2>=A!Al~hM%O)CKC z9TG?f?ckX&1;kTk~e zR(5fDBR5sUigp_zR5P_wEnUzp0=WS;;B5KizA+%FXEVI1SN+`Q@tqv8XkYc>;YV8; z+T{i!S6c9mT)WQMQ^#aYH&U0g5EWjR5uAN^iVK^BWypD?20bNijE`nDa@u(EHXU-RAU1by1-^_s}VpWNgVk}*;>_R#22Kd3-WfW@HXb(iPMWeA#c&kn_{UyASrCBdWh zn+uXWjd$1Ti37>>d9rA?KwJl=E2x$zBeaDbb~)&g(Lui?lA0YRoo?AP)Ql9O$@a*I#bkyPFiiBK_u3gMwOTBGvYM!^vzJN%WrP4x!q1 zs3l@=odWTW2CO_dH``_xvK%`jdi)+Q#VnUDo3VF4D^^@ajl@5SqYcj8cyzTlA|ny6 zkVbqa_YlCD?Jt2&AJx2`QX!#rm{cm1-pf1fvW!)?IqE(=;cU&T&@#=!2VKeJF(3Ed z<9*|5))4*K9Jy<6IwgO7wR#GDccC$%IIOuMa+s+Fg1Z=cU8 zUw>aQ`^*1S;toecLNFxvwUPK6soBAIb0x7;5REYCxZrouqv6W=M@XWgN z*w1sHPj4>;Mj-+0{Fxz64bqluG*$& zzM$`kGu}n~*y!ZNEG9+hiAweuE_;D{saz!)4mt!rNzQAL`o~oVHZ*5<1$%U48^Uu> zQljOfG_=W9*K#aZo~Cih8~RSq%s=hw0XZ|SuoL`6u4Oxv ze?T?YYa_?dulH^_sd~+9N5twekc?3350bz7^1{M=Alc1!_W>Dq1m`_6OYte+AD~+V z>(@_dV{)GL+gXuOq2JSG&~~HeojP2+Ks-1r`Xc-8;AD=5F3+|I%tH)_80SGvIK|ub z(i_a}QlG_a>8xL>0Q5&ayeCqhsPaR zpKIS(o(D7VS{_&=pP#WiKrswPp%)Rzg-W4vtGq2O7cZqQOrS(nYN^>f z?Io7aNMr;ea&@zw#)`fcithwqVe9HipZh^7K8qT^EAK;YZUpuTrMrOm6fS=G$Ah`q zk2jqu6B&^`X&W-=@ZVPJaEiaW3b@qDRuk?-J3?<4LNf*9p+N^0rIe^kEm*dZ(i? zYeh`}Y_vD?{^lA0dfHv?;+>{HkE~Gm(c-ft3!nhvSduv9lWcww^5au_CI$f+@!I9s zP4J|8*xYORoy*Hp&gwTejHQD|0->?J*Aqw>fNYo1$rg*abc4GwDRCEP>s5$unEwvq z0zbS08f(AhI{&!&jD$t2=u7=lqT&VqQUOlZvo~J(zMY^K`*E>E0mG9~1`jHb75#%Ral{Jj<1eJ^xdxESFit{(KpOnAwWm+!~x8 zGFm-TZM9)I#~TnN!ugOy+R`+U&tckAm4;FX90Aj)GO%g#IAMtjXqqDDFIh131R~ z=#;vjo&XdDT93>;w`>qws5B95RiLmYIBAgxM^u#-ozh&b)q0K-DJ)HJhGb5 zqx_X)@sCRs3k|~kh4VDZh#miaq?I;PcXC*p@VMQmTzVi7{q4>_Rr60Cqv)c^wwDU> zonQC4E!0#y?@$_**)PQdc&G50Zzr>PJ*a^hNUj^WyvDDf2XJ0AQvYQTBWPV17D+_t7{}y*U^u1BJEHqKW z>L^e6jwrrMRO6?0t599xQL(4Itx(^wC4fzt_($)Y6j!u>rrs|dK&P^U>lU&Gf5SIh zeO_NsTq}Q0<$d#F0iyP5OgufULYKbk)-6f2Q$AzQD9agmKnQFIAAF3Wg zpx>G!CSLRDJ{2?VDEDq(QB7Y9!OwgU@-T6JJzpV3Ra3@&<&QHCJm~g(j^6OH%IX(e z-b$GqX=E*Ygblz+ltxU?E{d?zUa$rC<9zd^dVBz>|xHdFplzBxesRjG0kqTMv>;hv)RCfvZe?r@&|VxkN|Sz|Y;?tFryX54E= z1Ow0K`(v``T`7zm$v7#e@x^X(nle}Qpvx1=F6+%lj4i&C`&~Qb?GWJiatJ^n%~XP& zE>g0vq3V7CD+3CiC(#)H4K3)i@k{2!tSRT_t8~Onv1InlUnY)R*NEXxP3MXD@+1NM zU0XuM73mH1HiIE{F0T9NkT+1^zjuV1&Ncv0#2N~-Z!4Be{Y03jyHDkn<3)Z(Oi;*M z`dc94wTq;@X4t~6{Rh(chbmi+0FbcOwX+=&){&L#+0R%hws3~b#ohz}+0>drCW}30 z$aTxmudohN((~fVs}~>9*et8g7zrdqQ{sGC9h_miw(&9W)cRtw zCK1GO3ER~3`5pAi@+{|TCg=0T@5evVxHF0u^r;=@D!C>~G!>kSS=B&h^85;GHjnA= zpK2zU&%Hu(hGmakR7LQS_ic)b`k(enwm2~t-o!^`$GpFCpkFEf>~vBo-0nvy`01Qs zk|nw|Higo9=##=y^-ZxVxC>kU;em=QjbHCMB_$RKs=(?eY;uUQJR#h-lZGvWXHpKC z^rWW>otkh<>cKly!w0!$?CV7RU^eZz1#r3Cw;^6dT2&!l>$Qy*0bx?mV$Xp&`(nts zY}2eaQLgjZkhjdXKT3Wv`Gr@as57OWu~Qq!Mj!O%wBy=(_O&hmVwW4Ul^0l1=T)Hw zi(HHFInH#9+h*FmGaN5)9(*0~ju(|~SuT}Zcugtm<*;}43Vv9Xfi|)@4#s_{x<3nm zKt~LU^9P)--amTPBVMlC_8~w5sFh!OoDo@%E;`oO8)ocA9G&kcRwS5>cI$1eeRRh0 z_Ip3)0-rUX@S&S;FcrzWA47@(DURVB^*@AUep~y7*@L&|5~&e*Etq45FZAj+_@lhT z$D~%7<2gi@))DKRxBV~r7M0PHuBFpQuAMY#%^7L8doFI}ir4m!cT8)*C%WXq_F3=G zfn2iZnU0?QoG3`9_@UT(#Y+SW5xFi<{F{FklcN1+`Yfaw4bt&M{CiR%=V#Hd&+UY+ z^r~c=?JJgcW0O?mQ(@Vs&rhVyKcz3%XCt>&*wy=U7WcbrO-9AF{ws)-AfbSObhk)Kst8D{ zNDC-PcQ*^^kdp51?iP>+>5%U3?l@zu_3Yn%_O;JB*Z;gYZ(&|*jXCBRV~zR!-1j|F zwqTEucul%^)7L0JV)DZ@@0MP^>lBc_JFblWE#Ia%4-9`?wd*b1zDcmKt5-6cHzFiU z9N8b{5@zo`mC0ruSxhyfr}L%-UCzJaBgRJ83VPMICma#MPsjO<$=uqQ#s=Q9lezuc zZ-}^R`fPEbbIE!wgM$e8Y8nZ*EpEZ%w2ue42G4F~)T#o=P?yTfujc@N5J7efK`B{e zf^C552AnrN^bez(t>99I;g;<~Xf`W5*H5n7 z{c0&OLoY6(H05V9**g7$NGFwue}~vPpU=tVNNmX-MvoRJ940Go3_|F4o`^IY;^r`f zMxx$j#JLgQt{>n08BeDhYA{@>4ditEZQW%u<*j46$Xt>0ff^DKL;YoC<~~Y}OjdbO ztuxwgx>MTvc@WJBDv)$YJ1mx~l-swdV)EKOnfvv%hUJ+4IOo0N!(Ldpmo#!B8Jd1 zWoE3#7T1y8J4^|HWRmZS6MGn6zj{hAZR_8slXs$w8pnfgKR`EDp1u?x4ih{g{F%ae}qD|hNe(F`MJ%DF`$h+NS#lrvVSQPSMf@bd>V=IpO@Z9 z1D+y{x1g+%X>#YV`wWjQYK-#-oj>tNU7WPh=a&wfC=244%Z0$2@UbpZ2K@fuiZsF2 zk}+V2a6D`9AR;%_t2dst?*94G`k0thEV5B-qnE@XdYoplpypS0uZLuWeLV81WL?ze z2JS_ac4kJjl_=TCk0LyS0nfLQC=N~@(7WeSTC0u*FNHTr(awX08?$z(JCkKf{7*JS zLq{*h*VN%da$K=JsM?ufNuT~Q1L`Bn{hOb#UEFg8e)l$y4+FPG(`iqJU4c4`GiadY zY8y$ovEp)}BdL7&g+E#q`mAMGqG)SlJCFT(IWbO@3F1=RmATo#XkVp3=->}ODv{ak z?A}M85ZWHF+umC>(azGCUG<_9U34I`9&Eqb7Yk#ccKrbA2A2q%%Qw<44^?(*4o6aK zCR;7|M7a`1smNPTgdkqEcy5dJhW|O_;u{xwSN4dg>j48{XhvUr?*H`3JE%)kk@uVlt5a2 zX4)Ogm1j?N%)p{n(Z`v3p8)()G@k9)gje7d~8%!Uq_DKjto(8Gc8LZaR zWOCZN&yC}R%Z=BX8jK|Llb_4@F3tKX?w8#wqM()McV5n}ACspE7_`Dm%;di&CG zp$IjzU!i?KtaT6oibEYeMc2-KNbCYf9(TlvF;yk!a5jThxdu#&sfVN69Z{8a&OmFy zzdL3!rdXL%*<;vt)q3w9oene$Ky!wyV1BOD7EPya(elK)#{|_?ikW@yhvL3fyW!OJ zI|A)Gwgi5zx9L_K{<>_|>|7sw!;%x1g?6Zy)s4dTI5_jQz3g0iD*% z2L>);(EF6$G_MF95G7it7`ouy6@<@~X0f7Qr+g=boG-Q`&Ue<)2A#z;6XDgi3^PN> z=m+~+8-1CGL>42-7Gh236HG3nskY-DLF&-)X05$(yN(JG>}y5Oq0^l2Va(s#XnaNY ztuA#IWmc=ZIPAH$e{(1C*=C>W&N42@2`6O&pJ&Xkr^*>iE(LL1$$X>3lQ&ZtEv|Z~ z)f>UGGyTy>cg5TM_TM@QWSJxdtj^su)ZfI?tQ*MP?5C|3T?)#~72U2>U4c3&crmIM z5q$Eua`F$uq;xfFhSaNl@t&UGjp-lqt2!qqUH(hZSNJe-EF8^@!eYK3Er!UhH_w8_zYc?Kn9#3b=IiFs%QD9X5qc!i_bf!L%EX(y$s^}LP~*Z z9X{}6r?`7wpjpQl!K6N9c@I&05?IGZe?0~7E`O@k3g;kZUw1?$fthdR*Ky$pLD@&O znZHyk#Qi^-LJwOl%y7*4Wn+*VH`eU!uV(I3n4hb>uT#}IV9v(2k)H{XE^aAM`vk?fM~Z>(t!+07-9 z<09^C<`oUm>eT>2&F=(nZ8XYxJP3xJa6s0kc#x|&RQiwrit>fnGAKu=kY1`w(zeNq zh{k98QGHb6!3GIE4vMpg7e`%PwTleIeF)!`(d=ntYo-NKiOzHuB9Z>7)Z_duBUeOz zw?_)AXU~+0c1Ap=I2U1cC$Rvuql3*|xUhSXx*s_39cSOtlcQoUK9QZLg_l^K2|l z!pLbS9gxHtK6Ayw<67}gg}Ru#YuvIJF(Aj`FW*1KE~a+lt!Q;mkbr$K5yG2fgqb#+tWc{{HfHJ6^RA36G}qS1}xCj0J&1*1la?L6ac;bVE9-{4nuG0#K}V? zaA2zF{%(&N;XEyjGn4?8+8aO}_Rpb4)^N-bMEo&>7e?OXrzt)c3SAHC+@C)r$9@2K z&6AK2>aXx|^wjX%Gio?G?5e%Zdw4wv;TG|e4+bB0V!WF{1S4-WTgl9cG%%>9Cli7D zz(Y9GGvPP+-Q{erPi9(w zP7|ulY{b{>=iFkLH$bnk2_e3$LC(F)$e%we!}(zQKN4(wBS9ccd3wmB|B*xE{O+Fn zoR64D=<9!@d~#I~Ml<;H4d4Ixgnf$d;ds~U)*B*bHYJ^gYXs-P0qsp0?9$92Q^xt# zy?#uxrwmP^pPq<7+W<9|aI_GThZuur@$GTP$q(3o;~8q^Lmh`R zXyhlhbY6f5Gele9V84S#m(=brfE*E+KPTapg`**SGIMS3j;j79YcM1Xe~2^$mCq=D zVYz^h-(Q&KqGYhi3~mbJeJDWMFKBadmBE)b#f}l+!GvK1x*`-a@bNo^FR`=LLMA_T zo82G`KI-OcR&peasV1uuomhs$#&6J~s4hE{siI5B{)+vJIrJp}x4;B{_1c|^^ z45#_#)Cb@`{q%k?0bLJxXv0$2Ygur?$VQ5vmAp6X;~2CZv+M8F6jmVZ#13|-eyhW=!S55vVXvCj_!pgoJP`tBj(E)ZDMwvZ?_z@kL@PmGWUDBRGP5D2p1td=Fs?$Qyw02!hgYZ*4s+i&H-AYSjNaxT|rpZM`pr%#$dQ! zKpR=IWU+}%{?B}NAsgrm7lX>CC#uk;p(z?4PXhjpk0pq~ItFw-Z&5p5H@ScTbY>FL zO#|!Lhz10kV7SuZLz>8MB(MO|-eW99Lhu2fMuP0Wtp#5IrO!in6<|G^l$$so=m;Lb zSCnu>q~Bl(@lNy5_JiR$6tv}S+lG+5#eBz(R;WII^+F5R(raxiI;jZKDlEK{#1+`mpgkHz&O|*@Ke<;adT5 zqgFwi=ARMA(2NqBn$7NQC<4|$Yc?vXnY6|rZl;-UJ@ohgT9h2Ri`)@zuLpuAs3&%YEn1k7nVgC3E8YpvF=y56O;Bqx}^h6 zHkmDCA`v()LFNHu+u)|O##&`8|JEmv$P+%(kV5V3?A7;=;?{&b#Jhd!L#dG(L&c1@ zt}BZrXELo%xA!RM-t7C3OZbrhH_(LetF-U&jP7@T=lVVl6YvfHoqHC9{IH}m_HGEN zn@OYe3mP{3B4gm&Z*EEw3*S7l+Tmc(D1K5IIlEq^`1915n2(FnLGal(awuN-XGg!Br zT*y4Q1Tgg2Ie|8_$l9#FHt5B-&M%TafnRWd)W!8?2Pb!;I0NbP!NiM^1`%Q)E0~8% zXY9FyYQ_9;ujTxL(yvc*0fs+B0OG?6l{dJLh}eUGltg_>^B)#ZVNgs}ftt-@iRyZ` z-JcMq30V@JY$!AhTr=Sx?EeS3;D=AiYTE5^!FhZJFRUUtRW15P3*m^R{QvvM3+yFNLavtC|jAFOA^QjZ_xYp>j!6lp-lh;ryl;0{8cbu621 zm)u7MFCpM%mMWr1Q4`Viy2Qtz)lCC>gYfm`=k_0xI&R}x%Gr_KWDt6ys&T??(E_>5 zq02GM&@;G*#XO!^(=_bx7dRw`CXQqdE49nSQ25m5f4()SJqmKaq!`w8y=S4;ps>X~ zNSEZo>ctb>ezg2pG+Q>15tI*dL0LLqxi0kfs!LCGmaxquA4=eRC1md`#fVq8V)KfA zuDH!d(J!hlR9>MxB28^DYha2Kw+KsI6eWwem~rY?j}$3O1TS6y-HFC|HoDyORvkLK z*f(YMgs1=ZGL5?MRJ*vGYAJUzro?Lf&zsLd&xh2}HK+j5@9o%l)wiQW6 zV`M+KDT~}PXmCrfX8p&xbWlzUNa5WJnNmiBzt4qz(Ie$(5WO<}>SMg5`#cG~r05}I z9?Ej#;*MeHN4#%-b?`a}Q`HHcYuueyBSq}{k+U3#8GT5SY26C##p<}Jvg{JJM!YyP z76Ynlgro6Pg5zlx&)t#icv!yJ#=_vwN!ty)u{mD(M1mBltFg*OpBt0SKMsfDDeAA! z#ajn%1)oQjkt8E+D}nMMFvZRETQl>w1^AJsa(qqCu&RqZcJ|6L2|Jj&mKfH3Y?q2z zC2&3O!dWd>mHtSylkE#p)h;#Uo<+wjz=B>(v_TKt+<-%4X^CtN&xkWbIQ5}llqUyW z&=A&=nHzjJ^+h|0AdG5Me_)97jRMB}`_!hi<*y9E&oLqQV`PU|C0lpmX%{Nl?k}d%!_E z#IhL39NGF9)V*sIWoP?&8)wXR(+V3<4h%m@^%w3iH|igBY17IOn4R*7MPNZ*KIJiX zdzwNG2kxQ1noZIPFO{w&Gm~b^Uz-?ys`7>|%``xuLuA3KQX}?*l1puuNGLgr8!dkN zJK!*|O+$0$EL>eMYaQ{kEntXhk!83oRpw?*)0rh!-8f2y_1JS-IyFyUNc(fDLkmQ? zoT@;vsOEsv#J*Zflq@s;rKNsSwO##frt@$*ohOhwDc>nTgEo?;)Gg(GTt&otbiXiv zx+Fvrvb*d_+(B2-*LH%;cA3pxY<*~+G9A>_ceR@1FTV2vj|lHV*z0IA6Z`nU&FAw|NqABudHbW9Jl!= zYn<3zt{Z8+_$2d+ekA!8rD4Wa4TtGNY7#6%eMpl994!5=ww1~it(8Zh)cq}blFQyU zfeb?<_ReKvn#*22!N-o5@*S$&ft(8Wh=0+6?%bT`ZD$}JI3(dV3^<+HxWN(%S1KVYXqQayU^>@hQ+~s-${P81 z>AyMrb$bq+!)t*E3#@aqHgk>oZ(ToQHb--Hrx;|UzMW&qIAS~>DlvR=u)w!dTAdA2 zMCx&?)K;r@9V}PEuk1xhSUkP1qf>6A4SrDFKlr}cuapcReC*Km`EdWIED7R^=L;Sj zbb*;~s^)YheK6P1{$cT||AWPQ?hD^ti$JAZRda&m6qN?EA%;p$vZhCpMc-QtuKENm zHFq}qKg8)`f~c$gR*tae{D)i4X|iQKUy3{r$o9t{3B-z zV`^hMpR(OyAq!$YkL*p*eOZER*P1^8^h1RS^Kw|EsJF)hQv5us&5gE8Oq|Z!-y9Y} zO?25;!ZJ&F;k31Y|1@Mc@2DgpPRW33Z=Vk$!Kjc@YmDD|BY6I`iRvi(VPlu^TtU%v zZ&>pvfiQXrHB9*gyug>PV@?C8lc$rWXohXqQOtCqOKj?+#N{Ek_8^kXU?{u!P{6cK zb8^1uI^xUN@?hj`x6Du3B%!eVOaWW>q=$l9@7Wx$2YqxdN^m~|V3;Etw&Wi7cuTWF z8LPhDGUZtby1OPF-gG1M6@+eJZ1XJviKc#GV7s@0b^UZ#CL=7`gy(v2<4Q$zaAdxQRJ(baT2|Y)-bryR^`qSKQUAs#WIrV+xOO7L|ukS z>CiZj#8Y<+v?+2=bf))jkN5ANZNyPP0{?^V=l^Fh5klQa9ep zIe2Y67V{*zs#m?iKRi+sa8D9e68=ED)2o=5d6HB3)NIa_kpF__r6m&BGG{!zf+0r` zgCXj;cY{@qcr@U#P9q6OZ|^I zu?_}Lc9_((CV0YKR}&YFxA^%~Jrf`u4EEl~< z`AQS&uGuIZd@Ev*_4N5`O)-ooGEV-<2Elz)zK|JT`BnS8;_JqmXMdYreNpeKAW!M=d9orYy`l@LXT{Xr!bFJb8=oQ=L5+s1udD!;# zK5>_4Vcz#8(PlH}H0@EGzsgJ%Y3JnZtTjS|YX65U|B=fv8kb8-nJ3AH5ED=#Xv|#j z6+97fw5<8fLdfwg8Rx!J$0&-M&z}NSo`NNQte&nGD=jousFcKjsPXjhbW^m9-mdUj zJtG0Cs~=@C6Z}R-{x`UvnB_?1x!x>>#-ZVA3;yNQ_+2ifHT^`j>}y#iC}n%neglPR zHgwmex9XwZo!8erMLDL!1m6V3v3Bw>zjpEqUGHU$yp_gHXH#sk-Rw9oI_T9&&wSf{ zR7@(20Go<2B@%M!PNV-MDd3J*V)iVzsu;?r0fhyJ#n-m+j$Gpn+>t6L)i5~%e-A0z zXaIh5gT~qwjQ{3JypwSlp8_iE$g8|hj^2kLXflRFFBPP%WdvYQU3I-xKEQ*ODIV(! zDSYxGMwh*51sOnVxlhUH`y-h|1^MGTGPxD2w1ZtOxix^SCT73}WQ@?C2PQbx1ny@l zoSOU`eWd>X0pL#~yB_#g{G1`48tX~%9$hsEy;jg&obOIr>l^F}6IKSVPxU`a`tXui zL}vKQ*-6_So1k)p%}#>@au=5#qT&7OgXQm5({wO8g=liv6T|T-^COFcmpK+!MX!3j z`yBl*&*w=i7Ta7T4NWWTrd}gmIIa9Iz*tgmZ z@2!1JrZTeDyk-jXfi;V1zWA(w{e6#rV+l5j1>~yG)pZW_Yf9X_i zEMRf8AR1>XxG9y6c8l0+qZhgF6C82!+?7F|t;4_pm)d1xr1F76&iN1M&W##qg5K<- z41_M`|Aba+E!=;ca%ziz7V6+7@=rwRw^sr#iHWaKXJ9)qy0Jq1H%~%lGwWu3pvd4^ ziqF{unyc%(8=z>KG$7>AO?R)FPS?XZC^|5S9QD-3;n;tV2z#FYq4m(r(CBx+oaV_Z zHp0hZKI6TQA0J1Et_ne4^9&k&b)3A@EPafZ1T1m%z?n$$XVaECK>*j&;q-sW*9&9fQx zv^gV!^iwWsIc-e>A>RLrpdI=uZVr9&*KJ0?rG7jGY!?r@(!^cpFB{jTQuxsgNfwH# zw_j{dR5V<>XOeYj-of9io4Nn%rA5Pw4E0)f?Yq-nX1+?9zs0_A1_;+13{p4isAR6-xrF8Yp2D+Di%=tp}tL+a`NdOj<%m%G~ZOXZdZH`v=$+K#SJqV z%Mg1<-b9Gr+?+ibd;6zkX)XY#Yb?NN$8kqM=yu36lpV9kpf>rd$W2M}jUHdQsbrO3 z6xH;j*fpYPKC8rMUH)<_eLD~JWHBz-O*`@P5XixHSIpy4f^AyL@*XE_H5^X_)>yx8 zr7|+j$=yS+Guf?Ht5Q*$$ed(_*g(qNAw82cALJflH5YvhGUHcm?yt!+IigYz-T7s` zJAxhB*GnJUFd!=X2cLmVXf{h+oKY$FEvG~%ZNh&ue6jy9e1rdchA%7Qn$c(Qwf@On zF)vfBTyBpv`(yKzyE>Vfz#TUnb0*oeW)KsDNdw7(RejJ-olh$M=JY3rel2P>6F*U* zd%0kR*Zm>Nwm^C3Yao_UP;D&So9Ea0Cb8jIWx$F@dvtVLqR>NV$xIq5$?C#Y z{`^xJNjOWBcJ^CPoPdW?3#;)_jKpK;JO3o~CmA|*WZ8urKSe z@NfTYt-oVoM>^VP4_mq6{ER%qUysh2m<6Wd)Fm*S2L7H%xTy%NMJ zuiKX;Cn}F>voIl1BmZqpKCi?#(^{$LN z_6`r!V`&1WA602R(;e9(83E$)%y}|yUEBhvi#NkD@W2CWEFX3Lb`g5nc%c9g+QLiC z@{lyqH^Xq{FkDb=@=pK-THB-lp!$N@PyYh-3Z&y}0th@ep+=8;Z}Rs-1}!P89V>F* z_q3m|yxtG^p;Wn-9%DtIhOT@2RkhJpff2k*t=vOumxk~=DPCzk*IRh8WLo12_T3c( z48UC$aTTvRsI4>RY-J@Ocn6#ouRwAmfee$?M%_NoJkoT~u(|!{;CN(zlI+FNDw-X7 z(8UkfE13j0`NM-fxy1COwkdghH3zG;NRgURvq7Q!`QWVV>#?oQ5^d|lk>GF|Dcu9^ zGrroBRpLPkILYh5Z=+t^zuKmdgtcVx=QSq$j_sxBw zn`^1d-K)oM-z2NMoG66TDak?uFSNRl4M_gO=X0mgPV>xuWmv8C5929)mUZxKT*Yw- ze&LH&q@{A+g8as>yg=;(!##W0%5CK$Jia(TI;mx0TAmIx+^h_IIKk<;)yQMH9yG##?eK~`9Kh*tv{UL}*eL1J4raQD zhQTEvZrywSk02OsI3K*40Nq8NZA~mphA~ub0`xUJI74lFk)!hHT-$Jxq~a7n$DGD( zyz(KKEtKX`O|$$SqVYdH)bVb+2^$Hi?gZR*Fiuq0yM~o6&l44Tc`o( zU$lZ005%Ddxwa~3c8}!*zHMUJ@hh$UF>XEYq&8U zNE~Rq!DT@8z=X;RJ>G#@1{w(fO*hk~@jYgcfM)&QhFjn7|3q}VPAH#DPX0yeo`qom zGDrk~X)~!`b z?A1&fjOIc{)f_E?BoRssG1a_Q{TX2FR7;|Lk| zz;TM$Sqtm)%Bk?0q5fj0FgC1dSe_~_YG~(aW->nRS=azaqZ3YfC@6rY&3TBu39W}8 ze6Tk`A0deZk)|<9D|CgLy7N}tKbS)A(&MYr2xT+uqvW2Oe)VpuQen%eSDAZ|38kwL9oN2cQiu~X9`DipZjo2 z|Lk9?fu=QZq>YQ|7f!5y{gHee36bgi;Y=dic%=B&5@oT`9*&6Pc`yQs?MLVl8Cvu7 zkC1So`3U!jSWDn5dL%fi!W9nUlL~fjE1St?%&11N!2{Vxpo2Y*y>zbkm3nZKG~w_? zY3jbXW-MOdAt9Jt{TQ1it1y#2`~B3~KDda0kUx0@0!GRL8u`2{6tqrt_H#iM33L5p zVpGg;MS_lg=m$jTIC<4VLmr|9*0d-0#VwFwGmv=y3kuR7xXHT@O zbk+;2JT^IC0vBnrYz5lFFePSPO5}efSa=}S+n;MUk=*M&Ljo7k{aK!Z&v`ZZw6BVA zWrJjGCzA$PXYBnb1X0ryp~|8^AGF}vCc_h79wPmQ%bA!NiMV8Al6yx@0qX%z~ZoJ@P&UAuBWJY@IDbd3Fz4m5*(MA zL;K=$(YTlg?4W7Efm`1*VeLmqOY!f6d(<7`kZO>DkzI|<QyAr{<_P%itz7tSvjW}^1$p^xaf0)f6{uJy% znh^tq*L~2f7$G4@NYB7gAvWXp5ZE-J%-@Y4DVB^F_8X{&|{_^ml)^S5TcwvA#Lw19A|6hq zu!WBv*~w0RBG7!!H@sMgeUHuM<8Ln-?*?;<8Z*p(-FO$)ToQFBY$4Y^x6)msNI|ZI zy{FA_kBsSEU)5NBYIH#11LS`={{x9*rF4$xL)8jf!Ctj$0qdIrp0%`+7G&!9D8~gS zGUvAj9BYFy9F3l`Tqf`1x}#WtHsH@MB3@b4TMwVc)Ci-o!O*mhjpZD(^?o2actH~& z54PZfEjV};-flqEq$-8{h)jy#))ik;vF3Vsq*yjkKk9t7er^}?i(T5Ec+s18#%>;4 zCXOR%_Np=XMfvUNlxfCgMebl&u&`vz3!QFrQj~hwK`*XY6GwU!i9MfkX^f_G4P0>Y z`@3%Jd4_cRGNK~$G1McO)-cO0oB!CCb%xwdUmS6QeYa|sSnn)vqt6ZKu$t5Uc#&`4JQxp zpxEe=k6(jEvQ@wHRhr_jGCy((g>8#SODXL(3`TMvCkWq(XOFSOjie3Hkjhm*P!w92 zJGqDgCuf|~^#UVLoZpzKy@$rr1lJ{2#bto`&(o|oGNJ~0d1l`^SguJ6Sbg4*fU|WK zj?0_dHD>j_$lXuzhjE#f3R^^^xIktRk*nw!Sy;DfczGE&T&{JZs#@+`fv_7(!!~sC zSKWx$n`93*59aDBrE6Ce(Qolm%`T$-hIJAOYY)*E>4qm!+%>6i?#}WZ0`yO>+fp8u z=`}`cI62_x)a?I0=lG2WPEDHa1nxVog1J3N-_VKahHTqLrQKby<0&y?v!Hq{(#hV0 z%B~TyVN}v-0g-%xVMN97`6(}KjS(~*_?BxHH4l-*pxzlOq){lGQ5O0Nj(?f{C?Xb= zrNr}#usivEGr#_&XQ-c^HTX zQ|e${U|(9~>MCrPC>fr|h=rn)A81_M&ja11*mcfN3(~2L4|D?2QEtN=w9T-1*{sBbPJwP3({SVu{b%f>`XzPe@aUY7I;S!K+yBP z?!bU!gY^5;flOW4_3=^60B7jm&n^-9SU~9!I)HpOGgo88V{$jIJt62{ zcPv1m`)NSvY{sc-VdM{DgeI`&;A_q^V51Ef4MQGE7|QP zJ+O>K3gOti1bz=`r#V48`4UR3k3e^{v1mN8Y+*bu2f%%n>VAi%u+d0u*|R3Hoi2)p zVG2e0S$35W^2`K5+C8PJS9rxcQZI#f$XqcVOyS>|yvg{ZyR_M6eW4T8ee;eOMC7l(75_jQH zcc`Vzt9);aF168uw4U`%RglSVf3nl6oU+v}GOtU^lc{Set??!eYq{pSW1kWeDyWG? zve|6(M!Ka;K_Gg4^zMNP&$iv)M+Mo2UY1|`%j{zWOdhdle0}Ddt+5aCH-_J{KUkod zD9g)Yo;e%7>Hqi~q5Dr4Z{UNYsj0cFT^H}pbdjqwjNUFbtS>FYu;06?iFk-_Tc{zo zF^y(So9gEm?vV9$v~tsyOyvOd43Mh&Lkg_JsQIQY$UceoV!0E;8Hvfa4Yi%r9kTJM z$SjjRbm@n0qXYF?!)}FU_+t|j1eQb(@bx1$UY2>V^~S%wXnU;UyiGUnDQuaS4>iY| zHPRu8&sjG&cTl=G+U!)d*YB*H+GL@yTX znongES+A9^D;d1=_btQ&zT8*gb`G5o zjk)W{1s13={-6c(;!6i9qUF@JZ z*qP#GmW;B!<~NR~%mofMR5#4EbG!WZ^et1`0y8{mt2^4zea)0{e@6NU_Hi zv`eScF`lp0mn-MzSG|ku@DS(-e)^VRJ+gO|Raj=qHk9(`V;^v&2G(s}QIOYRPb3S| zXpy?$>gf6PF&2o*{DkV->+6U^W-zc@vP8F0bJ<(bJqtzDwp>hI|Lbl0~m}L@= z{lXj9xTdouS3&*{6GL)DeQFI$SyxG;#_m6p zxrxG(zTiRlZpa?XdIC%9(HE<|t9-J}K zHGeQHEC7;~=+BjkoC3vu#8Jv{gHWND+CK&8x$R%s#q~8{XmeDWDig*!_zWM!rGLaT z>@=esEYXRCFdh`I^M(ILgPKy&8E@oI>an*ZenG!LOtfk{3U@O&rwRRfQnl z8y2>Cq84}5!u_Nuhg{;oHfEy9EugxK2(r+`e8z+Bsu38acRDC! z*7$4kxPvBXZogfZh|@Vfl9PJa$TyjlmCU-!O-3 zbF8-~>|pn)kU#Oxc5PuaO&oKB7(bGa)!-`5NN4iLEz#ZL>-dhonGJ9%@+vLy^onm* zM&Rk-TtM&awpbnMT7z7ZA1|hOpo{Zydd_hVTO$$ass4t;$!W7oEuA1xX_zNz>EgOgvifx|et~;%d|THi;goQL>}D z2sw!O0p!e8B9-Um6CMXs#I3hL@Tn<=|*5oC~4vk1dEEesT@y$vX&oV&+-l!{w}g zCR>nmSf-=hg=ioESD6zBw&%@MY)q>~K6hE{^tDP-G8_2#h*9e8{?XQEN6)9ol?u}_ zw@tr`(-s$?`NN6TOy~z^BBdSoTfoMynw@K+(8Y1tdoPF0=7Sa#Z<8l-Gb-}dVFN;J z80(@mq)bGUGZcsVREI-CgV(mnPd{BxW)`F@I~*-%>KtVnL@iFyf+a02;;c;g_ByFr zDWj3g-?qq6r)7)N_?TMaXcUiaa8ya1aB}U0Le*}QreE|(Ddv3@LUWPFcbOhkUYx9h zPZ~ERe`Xf&eJwQ4&pqH1a`L2wgul&seWNHR4O2g#Mpslqm(Z~Qgabqn1vc_Oy0zz>E5?EDfo!ukA-ve)97JC+g24oiWz@r$8oz!-8JPVewol+ zvuiZ^S_=o9X=DUhsMZ-LK8q{A3J@I0S64#gpL9{A9WkjdIbl(y$UI(l6utb0&lCa9 zG~vl)zi0oO%f4JWf2oweHmC6}joRJAgpTt&# z?L6bihv=h=2<=}$&OQ!vt1rJSGvHs5C>b=Ge?G~W>9`_SUidcU?Rgj{0^RlC=2gru zDY&W|;%hPfr4&x}@y|?k3ADoYN z4A=#&Y-^8y?&nDssWgUq2gfk8IXN6Wt6jL?KdUV0q{m9CAB8QlYk$R6}nROLDfz?k=FJgo)b-R zhxAi!t9HtS*f&HUZg8S4;$Es&g0H-(5SHcIZY}fqUTvA1GPcFral)x%vr7lYJ((ir z4Cz`Mr_Y{`75u_rQ^W%C2TzSFIjd$~5~gU+)|u5>w(HR&%O+Mbxlj9fJv}_YHxSGH z?rtz8A}1-uC)n%pwPpZuyWQL!b}h5j>Yq>KcPy&-9~y6b>q^$&*bRjyuY(Ck&KUbHp8|wSXG@khVbs2a|Oh!zb`Q7D>G)h2l15*5< zIMbe*HWuJr$A#2fk-`Ar2_N2G86hNyqA_c}y7ZVYexV%D(l#G@t9$3xNtc)b9FH!u6 zpdci}Tq%HcM{Vf-+00|d(bJX024FBCnylYZkMZWg#k-yY(FGvQ?@MPMNVxly-`Nt* zulH!NrkuyjDNeUn&G_A8+5IgP_rnK8MJjKi)E)g6Ce8U>FMYD0i@)&0{a7vb3h>4u z6(*oi%Bzs`K4|w^4uSpdknI@AqlbV;8AHD0WOFizG2BH`qlMVa)X4{g!BGSsge9?R zk@)$b&xtFcldb0P|MicNAIqPjAKsUr#Cxbqq$n9dSMrqszR8&Y#}{ODR}c|ba~di8 zBx09|&M+q~lJQ&ooSnG+6&++1MA5rQkToR3v&#N#s@UKmCQ+miCJWTK{`(9cy?51W zapbR-SgBHL!)e5Tjk9Ze>j!q3A_IDQkl8$Qx2Vtka}$*pFKf?bAID}xCqARVC9nO{ zK}>1*&7yH@z&iAmkBHs2bOC~(I6X?(j}iypAdU>r-Mm`K^GVX=`+NXq6KS(5JckQDA;J_bN1vVb=2J+7)Xad!6&RM=?{ZZ*m)Srad*XdR~{=l}~rTlbm9Re%ja4~-4`o8k(q0CmHi=!9j`{s5@&bl zL=eO7L#3JSsdXOeWZ-7DwU*DE6a3{@C(G_0mhU9&5Mm#Rh~G?Ksk-Y2%o%L#?~uSt zlTEf}#ts3On1pyG)UJHpH#_BW5$HNOYCdC=rt4}6URmN<+@CfkvM>$o*P2qOM(SO7 zCWM7W+#y)S8(yNiR4oJz7;NFGeg{d|mptTkf8y zTCab#*ZvY26kGuWp_^mx_Rh#yNrKcwnj|3Qo38ED=RP`jDQPe27dX>enS8J|hgn1j zxL9^J_I_bqxHKN78By6E$yM}A%=?^J zSbbgU>u_-X^MRD2H}dxcy!#EK{Vr$IGMHP6QF+I?Yb9n_2y{20YEOD@^6wsMFTXKA zF;YrXlp%`#J&!_I)KC+l+pA7Z_GLr zs+!;z!tc7Gc&Y?+|9XR!!6FLpQxv0syjY*(id`I1 zjtg`=XN_Mnw>I9Ms$G5%5Y#D&;MbGw4&&F$#>Z5({Xu{JIEPXAgwJ*n$;Ii?sXLM- z&py_P1R7q5y36}D_j{ORGnt+OX4eJ74diSmYtHp50eI6NuKWr00LMof@DckjywUOL zK-r#wgvh$yV1{DrD+SQKt}*R=4NX-0iCdRLK93LiVJ$4>*#dn-8VD_V${wEkkG0%X5?mQd-(+yI?02y=SA>BeqQ9Uc*8>QZ84SV zf*If*kg2~r1u%o_4!+3EXPBweRVkXLIKoXBdF)uf+6Ne11-~(qDJnp3kBo>=rWEd6 z9pexSDgh|~?S1GBubUXaXJvJ!eTEqdg|88jwBVQvV<4mz^r`sN0Hro|F^E|nc@f@8 zj{xV^_i&<+;P}7!2!W^K>FqkLQ8Fv}7g<1u`yK#{2D`@*b8!YzSQ>U?2j=F%W(%31 z>8nqGw_;~~e_m!p46}zC$$0tK!0n-?rLy~*h!Bj#-a)NB@N^vebzO5|blbI*by5pX zT1-G{x;8X59RZq}-U2@B5i$9E8`P>!X72OqNE_~`Hl7aYCI!uRs-{%JE(SDgL^2l2 z(V%m$LkfH2J{&c_s{L+61j2m+8Q$YyqXM{1gpVnk4zKAjxCK$x!{PSc)JP8t$W54V z%yxPY`qe|rz^+mr+OKw#Xcd-(Fb1Tqo9w25a{KhTM_NrHFod6^PdLjPK%tY1wkl_< zENnzgLCv95p!s5`Yp-?x7ns7`?pnGCWH4kE` zNf=4V0d%@YIOQb+L)%_rZz87QX!~q}{vDUQFml(Vex4F=qc#}6gp8;`?-4SNS5OSM z&~x7xKg9vll4`M80^1yfA>=Uq!5){)z9mp@CR4_G)$(s+Iie^y{_Op#nD)Q_=q?8S zsE;r1^zT3ZKU_&(>`vyv>an`nRiQ+3jd&*!L{}&q1i<=B*J>i9kFSAE7rmRF>KW{1 zrzI4q1qNVKN)rVoj)4wL^p}=TP!TP_%xe$CAW(h>>z9WMluRI*6MD`=W98|ap7~&n zV%*KA)}UXKo!rWHplYo-6{lqDV0hNK|sBL6RUrKv1%hg9u1Y-2?#<5s{oFXON8KjAW3U zlSs}Pn&#B%exCPz_CEXj&inU_vB&U-qg{5_s#R61YF_icCr27O9Lm@H-ka_fMhqTI ze4z3akZ8@R2Shm6Ark$bBCp^hdWWqZEG1flq3HQt>nAPsv^rMw&q;9bZ7wrfV^r#j zniq{+G3xVpj<|;`ml1oZ8hsBEpoEdmZ!{L4p-d1OF1oDME9ik6S8!48%fUmJ+jmVgHC*&EB^E>85o)dFCa0jdLWfm<6Jn+bqW1rF<*CUj_7DHgGVSRJN7 z!k<`cZXR%tnX{i%#OQ8=do)~{9W%tFTd*o?(_(xP@EX?gEbk%vO47ue#l*nEDtB|l z8Z+;%-olb{!K^M*MTrUKzy>6Qr;n@_Ty!#6IAw}1cQN&y(tP<5y7|<+PwE2vV7L=l zy>6If3;CMd6zCpncz7s`8PFHyrUX>Yfwlir#I0Z6Vk=;LtM*Vaq@nAcz~YEX#pqbk z*EJ?J_}>H|H|+xF#|xhKz;z)wA38!Xs%#a08=GgCwf$xXAhKIBj$@Zfu(3qIJc!3F zRYgxc)N`#13(gpS_IU1OD`q0ZULaE+2`w0gCnM!FrqSr`e?*bBB#i0)GTAQ{7Z6tp z_xl|3p=6O}6;w_-bP=kUOVfx0eGwb(r?ojqR8B~K-P_|XSoS`@l1aok%+lszVRK-T zl5kC*fOYaH*%B&t@e&lfFuyHl7!_Jyj~LA>z?OpN89B)#@W^=< zy^=HUpx-&><;gceJs40Qgm?O?sPddy9C0W=otQDHz#2Bbg^2uxL9|La_6r|7nu zu8}MV`Nx|VMjpP%)lu3dV<%3n-T&jp?J(jInNJj9y(%lN z3nMgciSdy?KmD;g;0OZ3CZ~8iFUte(oVh$}n`>@}b65b8F3@U6cN9a3>9h`~1~+5i z+wO=L8wPhH!NIlF$&}A@H27dch@>LrFU0J`ekiZWBW;j-v- z`w1YcAICBJdiH51#3w&N9E71JAVg`Neq zerp{%E)*pzl8Zg(*Rt#(vwS=M)*Ej<)s&84lV_Jo)_Ns|yb~zw%=ybG*5%2a|rob+NUdDjVlK^!7u@ zv)`}}l$Gmq_qp8eK6NCqN1iiXB|tiM%5ANl7mzz=SdY}#{&lir zSmm>9uIt-fXHcv>}vN;Yy|FRmghWKeNTVFyD|?`9sGNCA%Q^X7}FgGv&w`O)*4kmAT(+Z zZA+1)k_-1xMcvAqGCnLa1VtYmmBU$mPu0o`?^2&XJ>qrr>g+%Y%V2M5^jj@bTQ| z8<~K)$o2dxj2Qk^bA~6u!+i47JB@4flTwVVNz)q)sj* zI_#m;YQBN0UsgJ*=C*!8M8$aN98Au%=8oZBzd66~sXU*}YAxhU)-C%##AK>a(uBjt zN$e3%KK$}vmGe8*cx}FCsU`_&kW7}a0sjDsG4vbv+(i}CCDpiR-?w{WH#~N7;JSwZ zCsUoWcxH`Qr&l8ieEPZF6l_+Y*p67BI&tS_ne|VNW)pY61JbHQS+Hf4x-K=7k;z=&wY1& z`Fm=?qcN7InxWRoHB$b7?4(rYpTaJ7<~Obz@6^>&BJK8%u^#pw5R0lSwVlN@4SX(J zto6}F*lI1kEH@L_S9P*!2+St$Ux?6Q-t{*z7o@qWw`1aHplecU1`ctoa_xK zqZK-11%%J&;t8?dQ&a;}my4rR%F#J9l3#-f`=a-F8s@nnEb^yD@I7 zC#uGsL@{v@Q-9uDz7J3Yi}I(stkWppkl~Fb2HYFcr*`YTAG6dSkn`S#btRz5a)IC* z3ejy0S?2k!rg|-_pib#X$^A@W30`wPbEn{KyK{RCAK#?SsiiXFlS~f3qRVHG5Sy&} zIY=SS8+wSWY?5fu2$8l(-(ELBZ=A>9yrIbf>Ms^S5K+-)V7Pna_c>l{mG{lZY~e~! znbtmgyf`X~u0; zjAxHu%MW0B2)BI%2jV%2HZ_japwK6QK8wsS%2FC(A&m$JHl|@B0eT~UThTi+&tq4% z*>tx#6=DTtXejO&2hZK!+uDXeMx(FTsWX(m(YjMcWyxJ9T8;d*7~40D;75@Gn@+im|FLT{d8@t;Hx6XSvpTm z(`Obpq$~vwzXrHlN8R33Xew&gPTZ~46 z(=z;Ie@$bo+>E9A$38X1K)RMnf2+Ckyigoirl4?KYl85L{2#MjjD*{aw206h|N8fU zNZ-_HD(#OrcVHUOv^og4333t|`!0pYCc6}K%0%wIgw@VakpbQ~AJhY|0yN2K6<>vI z5s(x5wl6t2rM}52%)U4`uaG|XT%w&rJ=6H^ajoa(e#fE`abVzVUR?g=_3guHgwNLq zHXW6!{fd{BH>X80yWB2m%h3Wx_mk1Z!cb86#Q3HD)W0}2eyLQUFqsbkdYpUB08+D_ z?75dU62MV-=V42WT?ubS3sug>2Ipz$vJPljb#w5EXpgFLlF30qbWl*%_Ash~CyU~H zrfS}H8>#-n?}<6wd^i;B-kE;Kr31{2S2BB19jm;(KW)r*CU%aw9w=?Z0mig|xq&5p zm*@OMB~A`{9k$n1u@##$eN_Vsbalp+eF~L)6gRJBzv8=@{n{eoM}JFz4SSh+<>`q{5k1uV0V>0lIx@=4%DPT7ssSzW2skWvAm-x9BbA?JITaL|9cCxbM3tTBmuKf3E_pEUou2gpk^`bM}n|%#w=cJi_PHJKDRmYxn%_I zKB>}B!Ux1_UVCXj|76^L@55=9-321G-@@oLu_)1tA$K?&SfR8KfJA{6ol--C zgT}8)g|H#h5tWa|a26ity=TtUhm#L=5`Bex9=jfda-_|Bmp}cb1dA(3VF^;l;vqXa zO6ikW@;u!jV3?(4(`tHkAnGk`8u7m71$(Sdo(L{IYTSlik>?{7`9$1H*X-tVS3R8q zvF?|b>hptlHp`$5KJS>O`^wl4oq*mq!p2hX$Ui`+TNQBNuRv0caO;at;Ug zCqc@5rXlBx4KGxv;xISWV?OvP;cK7^(K@F3>7u7vSugWGF>!8uRn0R$;amnY@S8DF z|DnP|PJy524`>F7uJWzIQ$JE&Y3IQ*o4Hbc+sj}-(}6EFul+OUzwW8( z?a<9J+eiPj7%wxIFUe91KP`A;O(W!(Uh5xdN8~8*ONsYe%j5Dgyp@=4c(tJFs0sr; zsgErUvc&15Gxo8v0jLL`uAPy0v?3-l$Zo^Dkw?2@q! zJ1DejWzhSb-XpJAz3NQFyT*Wd`u_mpWEB4d;s+1@FF<_s(fHL`o{_U=gv79MZDjg6 z_a>Rmp9sC>%#S=qxA!g7Kp44yQ96S0`z$54VpAsW!}%+&0Zmt2mnZq}9Py~2&d^Sp z(psBGb`iI7#G=%^9pdMDxClFe{bw9C+M4!dFMGC#Hg_4y&$uEriE%cr4FX*d;G*)fauU-soKPta|O&(_)GEQ*~&J!w@Z!73)JWjXT+zWl^!%2T`01 znM#Ly;u>y!z^Hz?gs=2{_0$?r;@>7U2@*ix-CXsHOoP}V_87+2``eeNyT2z5LWN>{L?U}Pq>H6G@1OdyZl&;bD_cZn|$x5 z;D_$ARv_&wbQg$`MXkiPD6Nm)L7tFs%B~#L-DpmSj7ZArXCv45^}E)Vm5R*-26y`^ zdOuMmNI%SHHs|$?P~kU? z$L00fv10DcaUW(D-S*te6WFvqx;XN5D|DF^D*;*KZ}Td3d{ln3Tti8-*5$LCc8yJY zF-jlI_^VOq6^qu)n$>ZbNtwNx@I`gBTBd!G@Ab1Ke^s-E=>5dA?DutxWtWV(_-(MO zZ#CY4`}gwvb-QQP?Q52yGzTeWMP_|FgFC&IF}lH%7YEz0=-Ij%xW2#ko)3Bu5__pR z2mg%tT~RY>S6)5G=5=d&#p!cXJo~ji7?Y{7MgG08JM0ecF-aZKvx{hZ`LV+HPtFGh zL>;hiMT(R`(z14c1t@-D%XX}Sw z9)OK@s4#MFz$Q`6s^sDctv6``lLX-GgNnCn2AehMj)&4Kj~b%RT|y>=78xeNTcTd1an14lgIwy)cJ$oryyKI&8TqF=nMqD5`PSM*)m2Y zaorGZd%<`M_9I)g(4?Jq>eBiC9vHlpeGogY?_ShL1|0lY?Z!^@+=|e7N3c(3P|N-^ zR948xDhQLBZY+^y`_T5qM}Y5r$&A|KMJk=>Ya+t)v<)Kre6vF{W~M=Q{=kpL3liDo z=epqL1JqvFlJuljJCBcZTTJOoL|c35#f`1*^E~CeQloZ>-lS9Yi>}S_f-mO)HC!cv z%|dL-d%@1FAf;K_;)#d^lU)Iu6$_5Lqbzf}r*aR7 z5)wFULSMTmINQWhu26E+#8z5`Gb;<_!y-LZ?FV6fVD#5!- zo%mOc~ac|nCc4i*jjUf7Ne(bAH_V7PEXhTWJ+@2ENwkaY$ z<(cMUZ~xl7#vKPjFy?)UepTsF_pjq=ybq%0_1Iu?GAX}jL%YaspL{>1nR|gPD8X#F zkf1`oNnzBL9Ee2vOvSfk?*2a8MHA)>;j@<%$ z)Dy1KEsNh~wtxDE%BEBU?xJk^^jKGGY=;*MiPtel;^q9Nk^sSjHKk2Nxx{#CS3Y~s z7Da`4$LGQ+&evBc{c}FtmMAT~2MQ9YdFkm$*@29&VomwftWBD4Jdr-~aVu&W@h|ID zNl!H?P=7XvlNsfC&cuHvWxa@Sm@_kTJ-*0oIq|3>|CzWuS<2ArLbL79EYUt+ukQxX zX?lS91_sqXzNm2=eNxh#RjAPUB5dJbu-=e)=ort`Td)VD*fJ353=~%^)gnY(dG}go zK(_C`--5&J>1E}ztGP^iQxiLpVU!cqHZ`~ET=zz{uZ->l+3PBgun-Gya$mrkyAKo- z(pg&|2&e*rfJR@9cGFd-CVoKaT}qg+*LbMU(eO0aU^vwU>< zV+c@^S?x;IpX2z&r{huag+?9BO(^?BpO{BM@9}CWu-{qD zUv6h%PmfKa9&;D{WzWC(5Tf`9Who1N3~l7x=sPXIgV|Cp{5;nb4eTt}P1!(I9B4vzrgrosIC=YN^^?kSf- zMf~x$-UidjN1olkjRAUPGTJ*|067cwOqPc!_IC zjp%}!&|oIvmW;L$5g;HuRKbM!BS6(`&Io+D{|?@#ZXwkRR90jQ=dz$!mEOH~`GQ=KpzkA^R!H5(K3_ecu1N5 z+(&;`Wt(6_G60Nf=BLaGKPw)_NAwBw4fWZXc zczBSb>zvn7V|N8%eB%pjB$!=6^VerTYKzy&f#$QA(dRp%!6EHK@f2c=yYj_l+&nXG zWFJqOm=px(T<%z9M4FtGXKL2rnas)z*cNJ363eZ5aJ`y2}E|}30j33Z^iaW7*Sc~96 z#+8YGLEGLVzEopb24ToJN3z6YN~rhIYILA7Rt*4Z^m26X(~^tcyUL7im-h<6hAHrl zqIM8t6QjEhntx9a8go#~*6)8t0`(b$CiEf)ivmHy!H`{slLvU-7Y*N#^P&?u$SDZa z-qE7JYtkf{ZV(@J>z+k&t}Y}-&ssn#TcNq_>leih#^^^heuZjO23XL4`hdqMUPlVe z-$xR!SQ1b4UR^@G61hNx_m5KV_&2feQDRY)ftUXIAqDvGn+DbTfBEo#zS6VqGW<%# zmFR57VO{$zLL{R=e7T?iNX_|uG;2=s1gf6E{2=+0H5z#n%Uo_17sF&9P*c50iv^v$ z@Kc!%zexdPGVpO~zq*5&=cyC~Jm+C=t%4FU1 zm&jxEMM1Z^(+A(fP6oc4hUbIMb2n1#;XzlP>xm*Eb->gWP<)AHWDYnr1%aElhb#E1 zNsAb|0eNT(K=+c`QXXT{EqhlAR~9nq#v+dJ68i|gOcjTsITEvJym{lQmq##q?n#S| zJY@qiTXW}RZjvX!%7Xivd3g&ndQ@tG>gGq7udCrLTcyN9*j4*K6US6HmqlM9WJVOa zejEL~TwCmFavmW2A|k!Zm@ZtPeFYWYF_ubp12A`m%iASK0GeK|ogNu^T$n-Ja`nMJ zEyxvd!p1L$%o5IccWjB!gSe=qj~62rwz{neVc;VJ>|SdE{1lK-!rfe_9P5c*LU4sg zAT2fI9T8*O&^G|#Z(lAsEKfslzafc!79p6IL!qRNI~@jp;}=zy25-p>N8^%S!~%;f zv+}D(Om2|SE1a9l80!ny9$$DC2eMCTNKy+a2b%w)!zEb<-TcP48L5q!=8w=}J)}Y1 zvc?nJ6#*F?D+FA}rC-rY$k2dkWEf@%`O3cY5vm@qtN(-;I}-qF1rgiONK7h~9q$OX z=XdmV!~vHXRiGpV?4J*?alS%T8Lrc@D`7GxWM`Y2#W4mS&saCR9U5pS&BzBXyuWo~-*$7yQU}NOr7wFjuXfSKY;>EY-pkEb+#B@N~1$gAKgrC!V z(a$n-_OptxB4!#;4$Kew!CWRhGT?O38uHqBm=~&c03VKSA14($Z>0rQPYF(rJ8!o~ z@Db~{4~E6R{Q|vp<9X8;S-{#~MbPJQ0b2Wkg#T?4`t8gcu30kO6PJ9_CDbdh-IZV* zxYAo>B$wHyKc)AVG&~eF6tmkm^7^8F4X<`X?&h0xja{-nPy(4q783lr0F~=NjyN?) zcTAs+C3gT0Mqx9scYFLLn?o}6(E?^Sbr6#!05{j)n+b)rNVv{y6%-pRnj}7DjzC0H z3}s&HS%_CWox-D49_w_r`_uU_(SaKyIbUDqP2AZ=i`;isHKK(Lu393_=buRw8h6P( z{iB#l$Z7Pk_H}u~4Rntlz8^(p{_dGekcpBjF$~n=$yM675Mex=pL`M{5TUDC$Z4mM z7t7b%eu-v7uo*@W|okP}XI(b+|3)`=F#%+YXLe^#+_r<{U6^E$Fp^!%%4@g{o| zYjlV--(m;LVZk+dY{g>(;bO|-wI{BJ)c86osgE^Qh7#?CBV0Mxf9x)rjjbIQ8#Yr& zoN00AQD)Ez`?laYwX)fq0G6_R?Af>!W2Hoyvwg6d4KZ<|*6;JQ=mo>|C(wH%fzxm@ zX|`Tq)SuX58#@_KBuBcVU?TJc6}9e-EC_+*hiAmmGmB+5kkTCVaSk0&ntK(k13@s@ zA{JYZX7!HW=UO2_8b3$cLTew8&WBg-ahnhNcPE_ReZCs5u%f(tJ&17vJ3D?0QE^IW zKAPy4BXh7uuA0u=gz)O$e`X~4F`C`}FJ9`I}n|?k-^|WMx4;9f2oUrKr5dJ?XKEtmT zqU6)dq@kx(+pP(RkK1_b%fK&t=V;7mcWKNbFZ}fF6gpd_YiI{C38hZ_#FgF3e_8ao zMLw2&dn+;(P%v4L$o%xC=a{viVn8S>Z7=|=lEU#YbOz$0LbTJxoa@LF#F`WhGnK>E zCZ*O?5?n&M?XptLqgZgvikNWR^@Y0iZE5)V>K|xSsw4EaTxsFeGl(ohn-z?#JhD6f zjP3muFBnbFwvuN0{BxjC9CClPnYVXF4Ey!HFXY}UU|HD^K?$u6LWag0@gWL!jo2DGG`%-e4tmHrvVr zMHj9ih0mDAxuZN^AkNSDwqHrGoCTk;tx9P;xMA$^1@(5p7ef6+{~xGdwABl-l5;#z zWUDpWRcLq&27_$Kz5^~uVkSiCOVK3h>WulMwZMUPiUc|3kXdey-neNWU03Q<)z6*$ z9;`cAKKjULVZCfpZXw>~Ykc+|>%XMpPx@qNxZ~Q4-Nxhv2DKJ;7BXhyF3Gd(1L!X^ z-gGkxB$KC$oE~%_goP=4LW0OsD|oW|8d6!3yMGnA?F*jdldK@ySfzIzS4W}?wVe{p zqWZ4Z6sQT$(#E3RPd~lrh>ob7yZtfIta2lAe*iZ%Qz9AtoBBTyZZ=OL4#>36uND__ zbXWAN6*S&z{cqg=s*ut6@+O?5D7f?gg#C}qOh6XQQW_;k(X)FoO|ISGIJ$Bh97ltX zY-m?mMPR5su3Q%*cd*?6^}1whYxcuqGBTVjUZ&1*0qehEvt}8j=)Yy-WEP%O#HkC6 zv6tRhC-k{^z%%+VOHciw}Yx%r}QPLk#SIAPY{L;IUjDz+}yH|qHq+xOE{iEZI^ zkfMEDKF$-+I(3BWcbO(VPZvN7kzLcp2Z^)wXM3LKxzJPzQHxm4i$^>Y-`n z>)J|8td!6-KH-m2Jg*;F{!40Jq6DX-`ct!vNQ$GcYomA5o*vx8K@{RtBdSAe%%wq+ z=BJN{ASlPlswmg_yLp#y?h%%^yM%I|q}ezkrgr(^`1K6QI~&&BG7#RJ0=jm;rca5EhgV;dQ&RmUBu}e4>Z(7Q^=?YRT2}gPQ1R%q zB=7~T+c8r~pW~(iZYb7nrYIghV^J&nyZg<4}WJnuqDQM zB~+3F*=5I8+NqfuE`3gC#~sUeEbobmV6sttenajQAOQpOodfr$n*K zr1WUB-;GbV286inzt7SWI?^4ny8v324~9i+lS--!_7l%pDYf2w!gapMboe+TMPAt*H-dwo?H; zx7*C_yJf&cQ`f(~hNufqGQvMJQ;y_ng)jx!V*b;L5XJj7d{68i0ZG?bKj81Q< zXITbFKTmfdz1S?OA7+KJ$#h@&z^7^%#;00keWzAo=G3M(J}%?UGGoc_Qa6dMaj!wu zC_&QnSvv78zq|82^U!-`+|&f_rG{MIL&!rRKGl}1T%?`CAKoox_!sg5*_3+(5lu$v zbGy_>F{`Mp7cxF#@(6Dd#jTFZ4CRRjJqv|F{(Q}8D$Br^#bdVmH6{c3Ry>=3OUU(sA%g!5TCnlA<=;_aU_RPEH%t(z}IzjNfA zc^o}QTY}>H3!R@x#E+n{;=j;27aE=GIOo|^Qw-$1KB*DDX5@WoAi1J`)_5}id3})h z59M!~Ze!>hQ7 zclP?JVL@7McVx!QFxlA5V<*9;KY`UkQ`t?!Lxq`P(IziY!hGICIwDjz) zXd>=sRq!%DdTqBk*`oGoe`1f`dsuR5q#_DcJn`qP4lrc7;k8=%Wl4}RZDl}I*y^}S zs-r~bqtupC0{_>`llFd9xRihE{g*e@h|FO!z&ePSFwkTwir>CexYp?GvVd#o7y_}j zh4uV*eFLW@E2S+td+M?+S<}~0vd(BK`xUM3C$s@1NRGsP9eTz&?(32uwrq!VLvM7qOnsiRRJ6341(Ml?-F*^r*x3Z06UAY@q;se~N*p0cN;&=J9=(Bo)n^`ygg zYnN&Pv@Ar2?h1jtgZ?$-gu<#Z#~_Pn<{gOrJHt-rd(Qn`)ffm^#;WB=ne+3Un2NuU zm~6G>O%7D3K8$)%e3~PlG43%lTwj-F1UVNx341Y*&$2F^v~c#<;8WGTmFZr#H#|^u zKPa(sqg>>Te+fmIqB#v5ZmyN2eey~X5{5T0%Ew7PRnH-yE>ziisEOiKxW4<^g0?%x zXT>N&H;H;@q71@^sAEuH#IkIPDqL@{qU-m~?>(VRvY=uEcM3JgU#>oAA!{r!6#|9& z@KaG$H=yh&)+yI8!4@;%MfC2?_57_#pcXfKZ+fT!l@A2!W0|6zH@mtyb4$`ZSx!$=^ zR977&_=iE4_j@oJ zv9A9Hu@{}1d)cdT+wGuAEEc@6TQb?3CMWD7eH&^lLpjMDH`1S?Vuskm2u(6|0-c_@ z&bl^En;6~a1+abm%E-l0XzqUpg>nBq_vbi@j+iuJb##ao(An$tsK4^TZUusOio?&Ng09yC= zSilgN`aBLT+mfaP+tw>#na@G~$8tin;HqqrS8d*rXJS`=4}JU#{w{hl6BU?nM(N`~ zT$NZxmZP3Hm{M+2^u@6pn6_@Qc%ymTL>2IbQ1zPbe8~Nl)_sSmWOYntJT;G6Q+e?6 z!tWZmdRp`#vx=X(!h*nA3*>Cl^E{i8SR9;$@<#uwF#N;vzJByn>TX(qXF0jit4_kh zZSRmqna{r6llNLavXU7{7YB`rWc+1>HfGjZbLWU!Vl&z+4veKhznLJ(R>UHt_$wQ2 z0kBzn0EZ*}SfV}l57Mv{BLGk5y5V3R5eIgqP{Qq>-#f|d521ae`54`qj9>UOm;^B` zA>+|;=Gni*;J3-tf9>=;63-ccbD4ay-$62=WWo!W8#*hKtAk1Z(&TBg8}g)QbQ(_V zpLBy!sjcy$A~m_@zPix2bB>qWQJm1RW<7LPONRs)TY9vT3Mn2#t`Gtb**J1mw5#Qu zS3m|QZjHHM(P;V}%viu1l7rv4I5Z7}tY0>#f#K8b_QIVDIJIHi)lm_uHWZ)9a1i{Q zDB(P4!R~TUe-X8icT;DqECp|)(y!2@hdutoW@;+S@GaxwS!VDeDnlG`*l!5b^=4ho zjZyiZ!EvpW-$t>;VFc&;sztcrEw}8;l8S%f;~l{wA`Ct*Qq2=e!~GoMmEfMC)jE`v zurq15-8{M+aE^)~d#EP?xo-WiiRO8_czbGNkA{0<#>BvPKRt<9gf4&f_^_<{j79`z zT`t@L+@Wx(@ zr%~?S%EO4gdDUs=x0#UM;io%#AY^3s54HC*3Q?HBTt;NtaA;J^53G7x#z(aG9hnsy z6W`Ae%d{l4LPwXf(eCuxQ}G;43OIb(x*7QV9J7g4|5-OGi?=?`ZZ#l9QoeYY*72}% z!1@Q=;ZCFf10nxB97+Z)Ek&SyPMHZx^#uL>&73rW=iVf8sB%E-+HMsylTvz{PT|3t z2p#$3UwZiU`RTKtEzyYlc1SERh63heYMsxz(xMjvoUc4T`?EX=*Kf1_lhXHms~fxE zo&peBAs&yo*$0DXd=>aH|NV>+m)DRj;bj@qUzD#sofFNO{;`3D&0{TmFQ{R0e} zfgGF?i8P1|Tbk{=8{62G5InT`-(;isVm}rMG1dN&q0tN0s zk_54{5|_fgt2_N?z>~389@yn!p5)mo>(Pvdu z#_OO3_|Ulz2S~R`@L|u?s{~q{Ut*OLTV2GQx~{e?UD1_4pXcln_x`2brl`oc&3Wv& zI9y~RVRc1U`}@EeH)&1&vn6pkQ21M+N6Yyctc%leK1Y4~HcOF=E)C?Gm-S%}HgtU6 zOqtIbR-}_@R!xJ+RKuh=K zgYV+M1kA>qNBK87F*=MspJ-yM3C_4!jKS4Vjg&uch2Kx$YL{a_~mQ(AYsw@4rtqtmwC z{wa!$j)>o_YhW4&vQ((CVsvIes+Mw%-uoT~ed5SxKb&M%mlI)9fB#$Q4u{O1&iRkY z75XoFUg+Sw63IRGqW9qEu7CQ8gYyP}v=Y*XoI#J#r2j#kH3bgFV0?>^5&T44N>HIV z9WVF^pf(Q^K}V9y{J7rjV?@C2#;<~S@K8$}-iZ~SfIkqWjOz58Lc1&-1B&NOT^MFi zRD#swJ4!yc{6yvgA)8a)y?2Dr>+`UXNBJ-mqYF1$QvN~!x|iT46*g7`5N}p=(qUzz z#hX~R7r6QUBZ)e2ef_(R_W!ni01~|Y|E=$_?;gRX5uxytg&giCHs^P}siQ@}BFRGx zQ+Hk_$;Z|KOsp}5TgTpFJ78k9aYN%^#ah(rLBLjHH*& zS>rc0$&`+_vHpREFL-_ctE--Rsx}|xWW{Q;>^A#wKiYsf#RmWj9&>t|$tkHy}`6Y7*^ zVY&G(4Oa&9vdqwhX$qaWcVHhORae3_uJCn+Z+nt%s6kaR3%SCo?DulRi}Fa)3}X(d zrI{TWno(xN1}+DKE^7A&Lphie=; zAFjcNmo2cWP#O#Hnm&~K(9k4Lg}qfvU~L4ShzoJ)puB#MOoEE(UG(M=)g$FMAP)FY z@1-={Bxsx#LmR>wn6noB!AB3jNHHSvdB25!XM={DA+Q;aVgMg6RE74yHo!fNgz9mi z-}h(G&);PYnwgL(L1784$gwIR5x$oZ#7QfVV?RdQ3=mDL9>jSH7x{*!5Dx##YsLpg ztv)ue4NtLKdKN4=aOPlQzp(D>)*v)N=>+V3@z|GgehepPONA$iW9XxorkeLgiGT*3 zT(C%FB~wzKHP2Ov_A`dVqp<*2zsul`3~CduX_%eBQ!%lO+I7SA%JrA_oR=hZ2(vy- z?{~$*GMe?jJ(UjT?0{?Q`1%GmZY7TA6}1t64!RwJ9V+vO3-5M4Q&s&!jB#T8AnA_c zR%|^def$#3Nvmxr$HxI}`AfZTt!my7Jc|9;)3(z}*pb{h+kyO@Z*gUx%UPoV#D@ZE zU0aYk2-siSuRahufBTN~OZdV|c#3^^S_Bba58v0V(yb)ANqNF76m{riAG*FGe`V6+ zJ$O&}GX2WbA8j!%8)$Y|nNs!1AaFUw2T z81wC+nydpe6o=3)g59KR&dxQSVPDohmNpg@)7`O#bk3aYqLp0o@#$;_`K@R+X^0}X zTD|a9(&+~jkxnhKJjDz{PNAh&01cg0-bM~zL>q|dN1Y=E99>mu_)6f(-tP$Avz?Q7@QE|A2Jp^)iKrK z(9uX?fPormNWn_sBwd>Gcm#7~HmADx4fCV&=jDjyiK`{GnYDxN)h=h6y_##a8MUl- zdm7<%aoB3u4%jy_EIlMW1U*JQ6R#h>e)}3y$W_#&!L6FBzE|#Pk!gYPCQ^UI>}Y(v zH^y8;*KH!yV$Hm|zqoxjq_LKF-ZSjx!K1)7_W)^>9h7;3WCHTglTb35hcX8;(K2Va zN}}+Y;cweyO{2omij)ne)4IhRlUvZNYv%ZWzEg$ zg6c~6GU|%wWb6p*eEcHxlJmUi(EODspUZLLjSIw}o4_F;lUl+e;d_V0B zp+8ou(O+C34G?h&a>d36X_uaFJ_#Yum+EIwR_#`EB_+ZjCiJ4ZrKOBhY{Qj$L#ad& zY^yetJQK?!tBqjoyVn6)MJ2Bz$EHL2z!NrOUi*l@B|k7NQ+cYOI!7be73Wq#1ZUj4I{GB##VyEXJNi zF-)9wqqeJtrIs2aM$cdR($vqSJtW$QmK{vcecE?xh~hB ztqN(9@zUC@`*PyBg=$4&B|f!p9_k?Tw*M+*2w%gl(sp7E<$B>vNGruX<@w$%#e}pn ze})(4?PTSY)lqTZ{E^GW1Y#kQBvKQ$EfV;~;7D;w7DMAnwLKOm_Rdt`EY0+Ed7nPy zi(dBk?e9K2N7d}ngHnf5b9<_FOJ?~VreiYSWj6Yo%F@bWv>w)1?Ov*52yOp|?pYqM)RDzFiIzW|Itfx5J zca8$~X?t%bzl5siPEQLZ1O#p_aGB12TNDYfCOqI9Rpn3fYQo!KYR=RsdeKbs39Q(0YC#gK?r7_wW%38FK7dYXoYG?vnN|s&Db`LP4j}UW-P-XIJ?gCweGPVXP1p|XLW9* zZbUOW_RV|Hdtdh3Mgy#g40-Xq8k%rM67Pf$BsLWLa#@?>Z^sAMCY`G0>+d?s_l?6I z)jR-s4V>R>z1g)pJQ{wMoIb4!S!5<_BCFP>bANHJaBX|7JVz|)rPRdc;&CWDz;7Mg z5scFGsUhB3<6P!u*5~YW57POvBgIqvit+ZvUF2o%`&8Ui?q*>xTs)myth37P#Et6l zJ)e0u%3%tMuLKCjW|#uc5Gg5O_D`XpmQ|sszC9My(wPns&J+N(;x;?lRa zWY9IR)-z;qwzRqL2a3m;3plhiwAUqZwzROa<8tOD{n3I8IKDp(A|?6J#NM2jR8?Az zM9A9Kkc5qak%5tv50Qj~gvZvvh)Y3O^w;IUH(pW`dwUx$5Xi~NiNT4P!P?ds^n#O< z6U4{_Vq&5PTF~3MSlR13(_7h*{p{p-Kf;D~`nIMv_NLZWB=`O5>RCJ3^OBO@5A^5t zbDoCIrvDnr%I?=~0XGP`{{{4dff4klZ{Sj%`?Fkfrp|^IYQm;~C7}TEcUkmo@;{W;MuM2rV_xJuk zWbsqbKh6S@=0oHG{aH0W#Pig^YhWP>Ooe5YfKNcp?q4wX%1HI|6F7#B>M$PbYKDRm zfRYdvRC0#io<(RtKDp~WK&4meW{8NOGff}U$yR!)hEe95=rMm*Os?Trwx?EZfb&E? z<8VYWTUb4s6`h{uX3dVJlhZiHt-sc|*e!X2hseRG=_q-xmuGFpt~2@U!gJ)t{8g(j z6bw9x_kZw5APF&xOaLQSC1peu3K|wo;Q#z*9|0|GG2}n#{C<422^d(D@0W*mhyU4x zpn&GisQ=O2f$qOnLP00gG+ih0{D=1o1&ufN;6I(oTY3R+ieeByW!Qfp$@?Y(-u`F* zVN-7bO(e9mk6S$hU1a~EECI3e{)bKfEB1e<5C0YWzuQ~?)%Jh0b^mK*{>^6kzs$A^ ze11IW?zQkCiQ|G><*MgrY{mzCf&KmFf(a+^hXw6t2AVshi3lkx_9g!KP*7!q%RWf)Tm z0mZ`olTWFH8b2p(1C5k)y@NV+*7hZh`MChb*Do;t5S1_W{*4cr?f2z9-SHf}KR*kY zq5YQds0|g)*B8KqNEpZ{eeR`%7IyFb4?f%V|9#KSWcyQddn@N%^9`m+uhIYD4j%M{ zps`XzasdntS#_ZwXoRF47Je}6DJ%EOzre}DI~)=LyDvI=GHAb6n?V2?hNZMI57tC) zOTzr;m|1ZEM!?K868n1!n)~-Fb1v13K>AZ;EpSwT!>_`LhLX16HAVi>rw=9ui9n>@ z=r;Uc=*S02+Z(D6&g&zm;4j!z0|f^^`1Lg7N!rIE%A%jE9j*tAp%9!v`1j)5d_kIo zGYQ0Mx_!rQ{b&AY;u#T@5CQ07h(Va^x?}>BI88#qQZ22pbx2FJ$V$&n{EoZ8Db;XhVK2&Yw+(7JIK# zhh=udza|y#59`dZeA7-Uu+;d%?WZQSU_e_%@#tpD{|#xsD|kamYic{n;QgA^a5&U} zUqiDhtVsjA5$ew_ok?3lA}9-40{;y9YgcOCZ|$C*eu-b0O#tu7LT&gNP7qYcS;sc? z&n{(QYvd1oqecIv)IY`XK?+dLw=PqhroSd7F7#uu_5QoqF#lcb|HW$mcW3_)$N%#? zTORN3^lH!c`&V!Txt&p0+zX+oq7vMn_HhWwCKI!fP~&g==q*qJ*ycB=F+Z)20F=)t z#l3X}!$T)91`i}%sLah=hPqcAF<+e4?-}l?==7B8gkDuQT$lAMPn#c!rxe(ASyp34 z2T0D+x`4ZubOv5^BU(7?2VL(qqSA5qMQtE%41UhfPJ#jK7)cSLO=i?UC-mD@ z2yEVa&dXv1u|F6F7I9ybO0F%@1%^eU#?!*rsN+@2Go9_ZNa4PPF+U`%@aeGv$qOb_ zsmtSycPeGe`2t~0?+uQ|Evv4a*G`lp7ziiYumfJgqKVr9ekr?FsssA(;5pn3aBEr9 zQf)qeJTBk8XSx$55mJrc*@shzPS@PBB$&Q3;a}0B){T zJ_F@%XaMg^24DjM^37kM6bfcd_8uERFw#n2j1w^>RPE@MQ`BW#NzRdxJv#YlrC$}O z3V6n^2Gn)X&*2E%si`|ma z?aafnoZ^sl$HC}Jp+p3&*y%TRqo6zh$O*T{=;5QMaf(TMuO`9S4IuzpoQ$vWXSIH- zkGHZnRLGbwP!I6aqm!`yfF}il-V|TYtLMF9nW9q=sd=mOeBwlr&3f);?Q%6;+OZBt ze1r~9oaTF)z|GO*w69YO&gThGztKNW!9hzqNa9fA1|VZ-*g$|CcCQF!ONfHKm4)B$ zCHf<**MK6t#*qBi4*G#lzz?Al(3X-@JsA2R4oJY*9Qv(PwHG-r*AQV=Wv*Qnw6&Wg zje4~gXz3lS5pelRoCJ`K6kh^jm824DMulNn+lC)ZDu2Vk1*}?zoj&dFd;2X#v=RVc zlqSgii7>J#e&`GTR%cI&h{Q1IW>GnB}^XW6^nYDL)w9?p zw!E;|X1hPA%XtFXi+G{`u$E3=w3f?QrpxQ5vdfJBxTYU48K-yWk?yxE+WI=p5`ca9 zR>Y1YZA-r#oI_R-Yp)Z-hyYp3&d`P%dEK^}Xt8Gc9-#Yt;k>lHi1p3iscPZ4-6=54D( zi?Y?TJ1r*UKL~IDhQ%*anD8G<11|=wBZuVZAC>PfY>aS^Gpk&?9!3>vmdm^@ z)He22Ly2n-1*6XBDBz72>%wXUa*PjQog`L8HeNMVjk*+Ww+%N6nd822jA%F*=g!a# zYLO6nf*?B)mHS!wz|RS0f{@Er>2;BdG&Y^K?o@wa>{-Ihp+R)uVNGdjp&Cx&2Du)WW2Q1OFQIU151s|uUrDgUE zi{^{;X{oKR3;kq{u<4@_6pj6`uVVISUBZG1M#UWWW+?|)tgSUVi}(A#S9kf!4?v1C z9z%-(8;ZbG^+(M4zW^!pd%%tsy!qRz2p}Q>L5`aZ49l|)DV3s6==u}^Q7IY(%ni*^ zHAkU#a4+H@XMIk?IY;HT+p$Z!)j5I*o*%Wyy_(D|tA~grw3F%uRv9$S>iP0t?pPX| zGz;tOu{%2K(JM|BtFn08isbYdboOgiZS}Pd+I-!d(2f;s>xe68bKRNCujG**nsy7& z@Xm2AO^oDWn{`P}P|kKJ7uEHPYUP}qbvuU(nSaKx>#Y}v)+R0Y9 z5Hp0h?MZbKEk4>U?uk{w?z6q!F5UhB1N#^durvN2`}jwkg@8r>DjDJU+fqVX-^XGh z=5GpMO<-7qFKmch%}dhtE|?T*7D7UyBg2gdaroI`;4HZ)MKKE*@kiuZD&CR>>ju~+ zirh}@d7aCrNSf^Ggc{|nMn-2VZW&c2a>1Ec&<0?d4G1Pd62G1U!iG~Y(}t`ZWseWdwm9rX%CMU0XT$NjCt}$AioU> z2{qtTBoIo8egg%L=KJVxE)`1p!$EqRShm@Y@k_~MoFg5tWXx}=0~1G*L!|6@=?U*5 z=*_X$kYUEx!}+4kDTJ|7UL+3f_r8mdT#g6=u?1a&cj``J&iKwJn+f`YQK&P8dVFQN z5C&oWwJUHwjVGrdTk>VB07{%SLzaC5`AP$uA(*OyJ>W=i+1V@*F+Xq(ex7 zvZrR9gUQN#j}!Hfr#RKm66q)n6iaMQ4Qg+v z8wAZAJxay!s&_G!uxrv9d86Hy1><#(H(5N!uUnLFT|AmiAe`mKN6|o-JHoh>4nPW` z@58@&gFhDg9(O(t>ZyJBZ;{-#H5^}fpn+zd%jupGLe2> z?(xP5rxlC!kU~78L(I~pjmq)vkRVMIy?%bv*_L&dXJl2(p*W(MGMJQ5owd^9iKRCM z%l=4%2m92;##+oN8MVno*~ig*`LSk+!j~k|Z`K^?vqZ-NZ>z|Xt2Q%S#fVHPh~hnls#{iGPM~rk{E~?5sx7j zpLi4~pnC6r+J(t~tIZ#1051gipQqwV|MEYX_x`7oQb1oHpzf}+9bY0PvVOwCqO*Db zB1sC=<>A24C6!kXAaf-s3o;x&<`i^* z!3msIS7K?&jyxUxjxA%R+tB%BbM*0yW<$jYQLN|H4(i5aCU|@H%wf>eL68@_H{tol zNH+Ui59w67A@b*u(lxs>%d1C%hO6&R1(d)t4%E&7-1(NEKVZMVwP@nKyTq z+)nlQv2!%fVfYC-p3l3VCEp3#t@bXuiEe=QZT#0>5`Dj&V1dSaw3O9Kh?1xl$h!Q( z)x?Whp+sPit9p*wj7MIAtJ;!CNr$-O43Ba2#kw9lT?m`a_lZbRM*U2l>z11)Mj5%x zJNhKfBe4DYP^V)}t|n)BPq$w=U1BYq3ClT?6V5*Pa&@VcppYy?Kd98@bO`U!c`9G-Ot3SKSgXZr-zJ znH^oUU@Jy;RM)Y^0{f>0COJ;k&7fr0Q@a`a@#Pl^U*QMhB0VRob`q$rym(0Pd|j%{ z-FoDN=(!wY3Avr(6JZ-;7?c(KkGXNghtFOOtfj>EX;j0FG2?9a)01x}xn>NdMgy@_ zd;rezXI*dr?KnIQGY*S4i(xNgSKPDrYRhg~4SJQqc!vGh&$}C0YW1u%!(_DuY#3a=yy_?#wbsH-H(vK+7-D;Y7Ee9)h| z#=Q?y`bg)uK%#DAsa8*-YETPHTxM-+k0XL^M5uLfq`i$gbL+E2u6A#i9w*`IYK~k9 z73UXEKz%wr#GEb&D=tUMH?5T^*H(Y7s9k}2m{f#*efQ-i<>Vv^Q`6PzBUB$J(yAT^ zi0!teP0@e5+g}+5A7?CaBxt;w#Ceb?>ZsT4L4c;N`^BMcrpnr2b7sfZNzmC5pUr-_ zQUPSLb>X<7UE$L`%>kIpR)a+y!rM?nYnx;qLZV07i+v|s)}z-Oxzdh7n`_CMdeIHrl03l3Xt6=6_S zoa)29qkr)=i~4U*_Fa>N^`XfvkB9o4^Ooh*`ReP7FYC3C-JasJ8Hf5AJL9+1Z_kor zQWlo3BRa-5AUBrvhf9VlQw*O@IEYkC@~;glJI_U=RHlgxdngbhcoR?5!Yo%<1)cc|97MqoREy#3s8#iC2z|}TE##SQ* ziCiLT8xhuolw>shgQLemw)^1}PT5+4$Klw-e7Dp59%=y`a!kW*H>npIdFdTm=VDx| z8Do~h&GcBd@(2LgL}XI9$jyV%Iss6+GPsx-bFzpj{3LDYS;VRgKE6!(cg$8Gly)dv zXh+bk)K%%)6`d+PJM~wtT1@f9-m!du0bsQn!T=QIEK4z_ohjqTpwZD=qvqEqG{fo2 zijY-N={TC8ST#5F;cmO!FeJRlft@&g!~0#b@tKuLuh#%_6qF+u;E`bYIDLi1!}6F* zxmw;lcdW^_(KE<83+VBJz7Ot%NTj{H8ksga? zk~RD2Ze9g-bv9FyT5$i0lHVbc+`{b6o>Xqi4W04n_pkDz)~9}SE0|fxKJB#HpX8Tv zrir$rO53s~=|wCjvfEoPJIYfVJDAf3sW~&B`&MD?+M032*o4Kq!Y83Z=NiYRC*yp* zoOEWuG?zDbgEc9nK#w-bTMYu8;Wc(O(BQLKzMXaKi>SQ^zdFf|v#vdE#O~Yb{1MTt zpOs_!Efe4@%eI1ehpVLYgzsA5%@&&d#9>$xPKmiKytSAG4X4Xx^UKy;mk~5wBHs#e zkJOZgINBolOeP2I<^pk3)yCX1tjYA#Qvhv@oU*V192|=hR@X9Hu>5;2)?lxv)2KqM zZ>Q-xQ-WSvGby$*BtV5g!i_&YO>n5huM{btdBEAG9Y@up<1#k$^>8}hk~#lE*ig#F z1R&4k&|~ze0qaA@YKru?^*P5Dr-x3^kxhwU_;_QmmYXW(A?SZB8#R0u8|-?eYT*nJ zF)mkLduj=F>lr~=dKY4uyFRnSZ)oV9^?SHQQIDHdLw8q6MV50~+10x<={f9Hb@E5@ zvR8ndl*dP=>6=om`^+)=Gl59kJ@(V6OE`tiGs&7K(;r2qXvT>$XktKQjdc$2)$t#; z4m7Fs%093Iskj&MM-Rb;5Rw5@r(F+no5>W_M<$hf1nf{)R-y3{4~}~q0Wd%zol0+qP4}YUVWSKYvr^!^tJQg?Gs56|f_1lpC zaH*IMB;_e-Ba^<^<8Zg#8s6bPz1Lo=4I)eqt2~g?nBHU>fT`B6-+f7SeU<99>|-lC znIc(8Oy~JC1T>jaDab)kXm&)F;)F8=m>gv2go-1p74b%F@veE=Pf1eMeD4i(yhX!0sePAL&}1CfGCIGDpt4x$Mpbb5Di&^M zZ1@<}4z-Px3}Kuov>Qm8<&`$Q7yWiAoftWz64Y@*p1!MCd;&jMb6CO@55xrI7~y?? zClm!*K;d{WELv{J;YE8^XT*xO#xebg3=_+>T(y0|mAudiQ3!(^H}p>T04ht)C$E?g zKN|%ILxGfZC-kj8G`-_$I z_TnD3a-(P|Q0=738o2o`=24GIc@ennA#0w7&~9C0jX{@twC3SFoB4#b?W;Bj9A#1~ zXz^4a^N~urD{m(eUsa(FdQ9!CL>-*UcE1Q@pHzU^W)i7vwKBvpfNChX@$#POT^VC~9x_NEg|IhOvAr$7$-0ZXSR=dN2nnyd6N=eJ8`q?L^0 z4I!1%B!7S|0qQ!0tW{9P^nFJ0(x&c~WEC4*clkWF{v>vzIKK0`(oJd!9wYP{gP`?;MPRzOsA7Z0Vp;vFVP1iK#shKdn{f@E7f>_+Qxp@cAL6<#(!snOqV4L|JC*l8{CNTKrF5u z_GBTt>3B&BOQA|n+3r^`rN?aKW^I6HUdrtji27zSGRCq(;7+haZ}HXLRZn5o5*)vL zeY!qaJaK}a`JGB^+!#54%VAio?(*JVb=u{-NqsAUZlu$`jbt(!eJ12D^}e6grjhgR zs8lB_=OAbo;QZNxBHYL3ER0?ete}?oSuJ3vZH!jMkGf~ru2ayFp;ZBhLswD$Z^#D? z8-d0#4~_T7Q^ z$}wd#qj2E7V^B6vEnq<;i{A_bSr>#C(+3+2rbA6`_aw1o7%z=s5042fw^gJR10Mtu zRLlv`c7>Wp3BjpMBaYnky>8N0YD`WEUN^;{Q}Lzoe$z|SCDHTSXAAQS&A7;_>&WS+ z777;!Wf_gun(jj)f@9;>u2PpMctilseuc~ZDsWa|RP(qQPtSJQHX z-PUZ!`eT>Ac&>4r$1iAMLk-n4)eTKeAE#!f2Mtn_za&Y&s?hLsgE($PaI{!OgXt>P zB7_3MBq7Bg4`KXOI_ zN-%580!KHf{N0KOeS~2ecPI_cyGmU#tU=kKka>Yh+aL<$=jk%iJ&xM<=^#FVkzx&N zm%aC4^s!8(8gBVYxAON6Lh7f3I0;=bX~Y!ogHBk~r|2qEHmA>>&-1i2z3d%{Ib6mD zY-H^_32Rwzik5W<@u^fMHeuVQ2kG6(gajMef#>pCs)tX2w~>W0G3`~!@y=TY#!MgY z&y^`}30lSP1m)7*adkj+7l&G8{I~Xl6U<%LB0cXzWrZ+2TulPO7qc2uq6*YC604Js@iA7qqfX(tPw{!T3boJbOg>Q9 z3=>JF6e83 z(_RnXVcK4T3d^le=EP*ulPlCu(uwnIIP~BZB5Hc4S+(AE>d$Ee{BJc+Jx@X!st%+t zhMo7~A|2z|JeQBE<~fGy26*|03(Z_C=kiisxx7uuo}qJo``zR|6a~U?Ufbh1CC|@3 zUYzzhlfa{xyWD|6L>fS@*ss_K7h2^$@ODxC_>V*$I(*UqRNvv)R&cYttPRifvm1=; z;~R3z31a?(f>G1S;REX(C+ucvieT3{M-32CZeIc=s9(#VKIj4@6OJF;>_urj+|2KF zF{aYAhQX!ju?r-}PcDk1dsVdJlZ^Dsc8I;MsZmu5bs?{(#E8OgKh9cAh$yU|GCFkl z%G(}E`;nUVE2!KR*Sif6aoVhxEPqSUvZAri+}j(E348{3Nxq(nLKN}7!z&<+C^{$v zq;K4nq!x(HfO&kj8{U_p&T}&{e|S>AOh&yVv4*`=Dr>!_Jh8Q}nY7+IKKE>n86yC$ov+kB+1LO?l>(||s z!I7@SOxxv}(Q1hJI?tOrYP2^-QVHj>n_hhWs;zJTaZn=lQ=ujYZ3V~XN)#9NY#7(( z)vJMvQS0W5Vh@x|_L8mQzO74KofSkx8`+E_9e zYmG7s1EQ`=Yt?UjU~GkM-9=H&W4VYDJsEbcPVt9^xENlD#(vXx-^HmviAgj58j8*W`t=ZhZ17vk3*?cCFjcU1lwufU``@pbm%fV}9(6c5*aN1E!m9K53353pyI;Kae0=tWy3^}2)x{OIKe6E|Nq) zACEez_~Gnp^BGs$0~JJlQEQIeP(q^PFv6Q`E$(!`rGb~fAX_ms8FEW6o9iGzWeE++ zUjr;e6paRJUays4wGKdi{m4ZLC5w*3`jrWBqdKtdvolw)v;AdCmo`z!Kh33WKUZ#4L+`$0P^akm2f2Ki>4oe7s z2}EhPcFS>zV@v!%!@5cf?Dy%RNd;wr_Je?j9{Fq~2kmw_-IXpy{5W1IeDwJ2)}LyT zV!GHx2ir}DwRg=`z-I0*E}z}SrX+8gT}**Wni)X21Hwr&Cl|5+8=hqtkWw|!t%sE+ zk+08r+Vj?BW$dJ5pjEO{E^~U_uCF^RIy!*3Vc?r*GBUbpEhK9|y$Y8Qo=*KV&~iX= zwd&Gvs~fAQE4jmj)JUL9GZ$=?DaH{J!RWB68P5qiKl@OX&EE-TNVP`Lc)n)5gsmUf zsy)jk?n#E0AL(fn#+<_1bhCR^m5Si{77m6f&ej0ktcY#@)Xot} z46p@xk8=*V9XESjBpgH2bv(Uho}%mnX+>G4xXas4?rclrtE(s=4}Fn3577+F3j~M& zEDd3xFa%-2w!RA`5Bs1{k?Z!p&I9ixq3-qxf|D`8enh@wUp`ZwFfcyJ#Sid^Oc>>B zKpb56tZumC-xxQ4qY$uA$5+AK5e4s4LnPPIEhK6t?}jJYkI6-`4dlBG``?jwnJTE` z&Bm~mY*ew3Z_nMC1XSz{({?SPA`h-VEJdt!Fr9z-8K3uRJ&PSt%M}?d*K2aUuRH2R zn>+j0gxw-Td-4MQdPl`y>^F!nJs;Ax`+4kM5eoW}6?XWq^kWDHYVi*~?Y zzF5@nYfqcN@js2cuTRj4zn!gkmA#-n@SX-jXYz(RcZgObD{_SWmKDC-G2c%1yOq}i z@P&b)ngvgiQz9Pa=qCzMf%c?(rfb#>wG6pD&f%rb$x}Bx!MI1e<$KCq^aRI1HgzSF zFm<{K=fT<0YJt?sJo6o1BME(YEmG{mVnjD>Vl&j8*!8X1HY>7Rkh-}RuNbo8bJi)| zM~u~Puhin09OE#VT_Gd$$nhA~EBbdHPVQ>j>x2Alm{4hET0W@p z?<;$KktO0~S&s!j%yK>T(5`B#=PfatJTeZr*)+HkOjU3!;^s|U=BL{Bls9AdvJBqu z!>K>>^=L}RI?;e3gs?T z`mH*BI~b{ndStHiq3$m{}{jgrTVqH#8^Mj35+CM7Kd?O%9;2SU~!TLx>B zB{eackC(fRd51UQ0JTgItIEd&r|$|~EHiiN#1Zw;(3= z?e^Xb!usO#!u!yU7eh~5Wx=tW$$L;CE(F?*49V=XPl>k8Aghg+y~D{aCoX6}Q8}N6 z#pq{6rM+huWuuMc2Innvs~IcGZJCa#i=np zqGveTe21+vYNM}W7Pabi1yVAH2bvkF%XSHL%xNSIypw8bi(>^&be$}(>4wb658bMe z20dVzZgPA(<@F5-y)m^Sv6f*+qZ=sk8l-qfLAYgoJ&M!T)U?{+S`KM4#OU0GQ|y+x zgB~n(BUVAJhYCpvpa`jSlgv%jJ<=4?IeD0GI0CCv?Y+uH2q{sY?Evnz((iyss$?TS zx8Re4!*ui9c3&df9J`gpoA%JiW$hjZ@=rE7SC{oJ&jSLLP(-RUHIk$%#d)caGa7j9=hM zOyJ4wZCzzB)j|1&J72`zm=Kd&kq2L;yy!g7IQ>4@=h1WNFN>3*Qb-7Agc~g*wt|(@z#N}AlKi}dB!l_rI2*K0a zcqWdIyoNMo#XW)6w@nQ?f1rfp>MhzVJXa({tMLTPS8i{t?LqVvcsgSIeK>uTbovN9 zrqP)CKt&1iI*YlgEZ%s5;p?C-+5tbk*7a$4XgnMu=_1e53#+Ni{yT#a4QuYXmPBb2 z_-wg!{AE+JQTfjX2oO})a;x=%Q<{t=7zT|Cvj{Clvu^3m@X@yu+Z}t%ka4s=fA*@U z`n7!yJdVg%_9nBVu`jLXiW6(|@02GX4o>T%T88U3Lbv{{aYKx4_WIl7>~pT2CpjY4 z-sVaL%fw|g6{g54Kt$?lQVP=jtRLs^IFyT(TnFFS^|an0=Y_vH9mP%f#DSuYTK$}Z zGrjaTezl9z=grFLofS(XT4l}`wD+}V#Q}LnW5Hf@SkxvvWz05hQ57Rr-;Z6J%N*bD zoDj|IsofVQ_ypjvG~cv^8T4P19|Co@@4JO?Dv-vQvWde#cf`eBGUIX3YnQ5vh9o{w zT~3{dy1l2V*qhY_16Iw(N&}V##=Z`{UkTN#zJxFt&t0wS-&gkTus=Dbu=)~Iy208# zLkz}g0w@cdTh`Ubf7?KD>U-^cd+Ao5a@8vV`X=pX{VF0ICXicp?Iqx3} zoq)5(5M^4feYi)PAMabonk}+q*Ct!*_b|e&$I4?JO#<)($3dz0k*L$tP(yH$DhUvw zcD-#JLCgiUj~89|R#+}xXLkWv4;VM5KoJvxfxg{%Nn03}XkrlUfTQV}G4SM1#;>fC zSK{icLZUcxw;zmXM{LIf%IZ|O@`>M6>I3$pfvqF&+L4Z_(+>2$*t#3!N4>v_Z!qLY zp9BS*7`+E}Mb_8Sij9c4Wjnkh_Za{Izh~asV2*awN^-kl+^TKdwCg87%DG5I)mn$f zt5j`t4mB8vVg1nF13<}7DyfgUZW=QNE~TQ2@C?xF()Q4+pRKV7oVS+TRt+HaRV7sK zQE>C$%m6Icc(Hu@`ee)H#ReFy{V|Y_6$rAyfMG%R-r)EwcHB%RCfvz9;w>4|8y}+6 zRJ)QeqoVPo5S@ikN!e}keESIGe#Pam>7;+2@>ukoEXbfIK?HGLwPmCwns*1{o8SKnAk_Y;UeU+7rte4Z5WoA zpO=~4Ob*jGNHrA#GJ_RwlQ|^+&ejgU6EH&_^gSh7GT0ao3b`nmof+?e>btLk!|0gj zGzO{CNd<%Wr=^!fQ6jD{`cd#xIgg?RQCDVOJ}}alcDt|?1)V<%ZvNO*d!p3|x-`83 zD$%lM$WRj!@w}YwWH#B%c7Tx4^zfCIsb;gxn@`razW^)!iaOfm;MMs@ofVZO7va8# zr){_EIKK5x7E9)|06m=PS^OO{&I};qh5=FzqCeIORCQiv_ZEaSPk7dCA$d{J9zQeb zOd05^+996Y6$8sQa*cWDz`PD~s%oZu5KOe0oP&j=9aUhratubH6v#w%-=p#@I0#;x zNZW7i7Ab$O61sg`Wp4!3MBUD9SE^)0oXtFQ30_mbuZOz1x^%ia>p4_5Hyl~5eqY6m z)B7yF*YJon|IzB@;6?f)$!=n=l$dC9Z99)tzB3DBYMs?*<)-rmkb!r%uZ+Ki3#ORP z@tGvH_~BxRcJ;AiA<`Z8ZE>m;%Fmw$HL44W@l)xn>MkWLhZCRPJuetliCDpKevwVV}oV;UZ!4SlLJItCT4xTm4c&IzP`IAZQA)g5*EC-f&I zJcwNOJ-gz`65aO?D9c>7s>*3CZmro{MS?15cq7OhSjv^uz`N&r?mWKMix?`{QJHS|K&Ff!sukJ#E;u9~m8?SqZ z37s!o!jtA{=Wk1N%qT11%NTKvjZ1|Jj3UU~UJr#bpb&68*TEw*p;NCKCiWjn{i3GW zAvxZw5}!EL#B~wlb^Oew*w`3inL)o?D_3E=Xf5VV;ZAGzle7r`3_M#WsT}+M{hu

    nO~ zqBUeTwp+-J#<5ijD+ji>n-V|)70Y0*7-g5uMs|}W3ZBlWW}R*2@<8gqFn-|cL69m~ zt9gk1vycDFZ3<+KpY>Oixo80+tb?bOlu5hE)8Byl@hit%-eGo2h;-A%p441==NtCk zkC1`Zv?gAI-mj*&-ijt}LYI`18&^{PKovDVP!VjcN>U%qnv(ZC8N!CDo~@I!$NDBu zzlF-?DMNL@$+XD?3VjGVJk2>|!dwyZ^s+tw42jas>j48Z^;K|tAJLT* zGlh5#ZSg)>r@=is7>~cNnSkr4W19RnmC$1#s^~j`|CWJfZbHWiQ!fAQwc?YIxes$w zrzv|uV;;eCK9X~G)wR=iu1ybEKHXZZ`vmyoS4IZ-8to9wvk%}kQ_?)TPT9X?mI@o% z8hCHw1-9^XOFUj@7DpXo`ctfQUrl-1p`j5nP})(<>bd z=~~;Pic+X7tEDdJX{B4~f@jsg@`ZL2g5dVx6f>4VT^Rsy<@|{0%|rmw38=8g+o=`( z|JZxWu&Cc|Z5UCM5HJX7kVZuFfDut@=s;${`yy?2(er^=mVx31c0w!re83c4!cDjc zOXjT~&j-vdz}aHYdW;S0NM(Kw`mDi;T;op5&b%6K%64uq>YRpUqsd9e{4{ekifVS) zQF(nLG`;DNO%gZihpMBr4>M{dT49{8MjPTtNEEzqoqvRhye^^CJ zr}^!><=)4A0fU@HSk+!dGS&d+1R0x`2S#}uGU#6t%6Fwa%k2;*b&JASVOco|bCMcm zt$i)jhDHwq{4L9!=A9xu2*SvgICjeYn#tXh;jT~31I&rbxCF6RgQ5!}3F$`8YIv-k zK8~a5O0#qO%C4J-Ah!PX*RbV!@IFB#XJRLjaESruSpN6aecMiR%q zbYK$aFa{WlpDq!~zhos>e z*~!7jWBnZh19^(I{XX00InFw|h8UiF!t791T^)5U^Wx#WGgW^HJK_;ofpMw{plR`( z)3lO1pzyBPPialC!2UwyNJCj>80d|EFP8VGwI^=OmfQB*+EG?GEVo@YW%!~qi8JPU zOb5C|f?>fKQNX7AzpInbuv&mWYS0Z}Y zR{{BL&;z^O-sz`o)5C%n@wDP|sma(^g-L{Q^jb60oGx<-JdC?)(U7=fTwqgAG8>)h zo3YZ6;n5K8TVlzZ78B1RY0dkow*hy8zH@JtNFyQ75HzEM*z!U!y59bwCMzX_j5d2c z&gS9oVSwMwmvq%%8Lb;S39+Z`mLsN$$q6BSXmsi$%tKze+)HsUFS~(}y3^z{Aynf8 zzxp1r`o?~?qg)+^OK}1r8lyPZ`cmYvO^0dfz3M)3f@zh(XUyTb1#NiM?+jgjQJ#Xi zDfP1N82R8Fak*{X@v^KyksFUwkQyZjIdN==?e^}Eb1Z}bHWrz`kW5Y@pasd}9*@7) zDFRTT%N0NNl@3|AfbI!(J?{QOZEX#1kH*2VF<)7OpQXd)a^@S-m!CRC^v&fBj8_$>_G6iFU;-K;q<2cH@OKs-jn|R>(g-?!6qsQ_%2*IhI#$ zr`DRc3R}=XO*FfhxSvUOo~PZtf6UK{guAkEO!KGJ=$x5w?*oA8K@GLRQRq;>M2$P7 zvWrz&n)1*Pd(JWlw`Fjh#vz_PYm;!vi=ZH2{>7oFir z$fYyK7jLLB5sQ%V%&4r4h?+nyrF z8bU`0tw%a|(r>%6kdg$vK>H8kh=#)otDw_b@sE|>v@{`|-J7G1MpM?%GuDyYuX{_E zW`{p;y78Qup>pM)5cqHkxXVo+I-&--Z=X}KD9ix@d4bq=q&T*?bP@H1!yXh~k}#Z( z*JR;7Z@sTsoga-8)lGZI*TLta!8l|_FyP_tu%ipnK@Q&dDVbHz2{bZ%dlb=<3N3i zanjZ^y?MAzSp`)ad#&txEG>8FChrC3my@rRDl;E`xYYLrH;%t7N`)VQQMY7HUBM(R zZ#vp}InfaWe8s$PV&dPeCmV50khRG{snQI_{dg_3%?GpYu4?j>X&V!)9k1A%A`t@< zm+@jxUnIxj)UE=hJ-(1N>UxaMQlWVVpD-z8cJ3~R#~6A^+*$wX6yigbGVMCI%dz8S zW02kH;7;}N)uWSfeeuIl()TtR$#dEIIrhlMq0Xh^lYZ#~z;}VEL75sP6-rkGM_vOk zJ-n*711%DCSAK)_X~KCat34XtlDMA3vYa#uYg*#-pRnYo?#cdO$6D&kV_MbV+U$2C zgvLI!XZ>#g{s<=~J=VNcMDCZ-xGE;g@3BJ1bitC3IOyrACONZiJKSh87{{_N#R|J>dH3fB+HF<47Fy%{ zfdV*ke?}{V-0O#%$iy_tT%7Kj7zH&OMx;Z@W$A*OzZxxx(*i(P$&Qhg^s46W@ z+~TkhW0Q_yOWyyP(}qN(@JV9VP|xbRfPX^qwru}t={2LL#q`~HyheaOH2lH(BlS+_>C4{^B?DDCg;#u z#_@9G9_pL{b~}@hxbDL@+Ex)uP7_?6B;8uhR+Dk#Tk+D`5 zX)X>NwN))-K_tPIxO=bXf<2b{GUL9)wS8CbZcEfmkZJtpzYXPiO{^I|Q2Wh%V=O~6 zsdhGFQBZMjbIzh}QiuZVs3Jj&?x`XpITtDh_X;GByt2##MTR8S=z5D&?tL!J1qUz) zRhPWTU1%BE8Ww}UYRO<@!k2&6z9%Y`XsqF9wWNz9WX00QuA2XLE>skHUQazq=F~;b z|BFJu5@F-E@%YtXtZ28eq-TuQ6&bxdD^as-Rm~?GGU%!~RLgbP6;8P)e};(JA>wP} z6$-0HtaVGT)zv=l;;Bw)h+Et+l2>lKu;| zl*aNQ_IZR>%0OoGTg5g{|1F#U)jd)CVpy0LHO2Nsrbpi2vb%+m{nn7Tau|!1@QsCC zftB9dfK>3PkzEY%_5tkVNj_xJRio@}1fVf6GZCIr?Spj~a_|k!Iuf$ZytZyDK1h$B z(k&1cq&UbN_lXjNkv!=@$7J>+5X(O#9V5052gwQ%MQ^{q1ggHS8Kif!K zm8k#PxPr=6J;6AZx{RDrCTDjwYthq!jc_M@d3&Q&p078cD8juK?d^bEmZmb8NxPx3 zq8SRP*d#sXxr0NdO52b%X1Yh1_>t4}p|FX2Q3&feor-t3CXlWLrW#)_9XKDu zgX?&$*gSJd`R)4lw#^AqIV;YKzejE#Ove}1w{2Jo56Rh%Hr{*~e--9cGp(#{?Vcnc zH@AvU-yesRc;L*g_Yb%SM86*`p;wK`|6 zWw|6x392F-p)MfjDjl+w0h%TJicPen)T8h+S9O?)XQ9cBT8S63rTW^GVq|71`X#~^zuSNMXg9(m>D?IZ3zoDX zho+RENWD5Ewtf3=dWG+^O_p*n-6=ZTQXN7@VfJ|X_d1PeIZWv+=*Q(m*QRcr$6yr4{27*Oi0UuacF4;FfvaU??XR!Wd=4dJ1+2+MxDd!oJmBv1 z3cOaU*1cAeZ}h;ji4VeVbxzps3+37TVrfmh&QrHy`7FyjH0ZD&=4o+pGCZ6bkciB< z6oZPrh`pM8hl^AVE>5lW<}Pcy`ZHPpDTlatFpu18=427Pg4V!Hf78fkJX}F@Vwi5+ z^_sugH;WEkIf)q9TxWPNQO7RR+(1KsR~ouekBoOCXo1YiyhD*o2UME6RFJL}>(3X^ z6lQtK^I`WIT9FP(d{B2J4@tZv;iPoV6=+$)%Rcht|~vMkjpWC({(5hlLrs*qqskcCEW2G zM3X&VD>98($QG22`|UfZ-W^VPz^MMDjT$c}?=3kBW>Cu9w^yBg-Dpmpf*#GfdA+czX(9i!c<<2l(GO!_sS0V3ruAdbNE#y~ z;fe?h{$xb&ruKeO-9i;Gt2CH!(8Z;m!x{72s`pklkj9uZ%O+4+d$=90hilsrcii{GklX2G@u1^)p ztO6U4KrSyjxf*6Clz^2 zeIhO9PJ@7-{KYJZKLKoKlNyOlvCI}ROjbQ*#v306E1cFEG%h?#KGL)7_Sn4mH_yWF z(|0RHk&i6Sc&4)DCN5SguB;|k0X=uFDhAA}XtT>aa&zNecDBtY)X=7%1SuHvFAYH{ z=JAodL&CuEj_8&Pl5;$ry(qC*PUfGV!&kJ4qe`sG@?X zA%%`hD^qXD4#=MjIPbcy5Te)LAP4#!rgGVjh<(SkLNJ zG)omfkgZx9=wRY#^&Gu|DuFJHh$(sP@PUp_d22a~Wnqtgm<*WiqlK3ZE~GA}RHI=h~2RYty_Vi6jrJFP54mpM&4uMZ+2@=zZX^A#Bv+(B$1RlCf<;@kD^f zq(rZ0XPCpd{uV4wEvA-6okJI2rQl^u|FRA)Q!knaU$s$-)}T%hGYJ}=#n}e+G%?$I zT#*r?r)G)$UsYp!aHaYKXG!6?M_$L$W_{L1<3kM6D+4-AQC@vciQ1q1On7RF9yv4< zXWocRa`PXMquuMTN_P=7UyvGHcEn6%QYkb(5~~I)ReEwY{kxX7<`;Xnx~BUWuM z8awPKfNZ)%Yq^VTalRKl?b^N7_ia{6XCmW*t*Ntx%|8tbpXV&B$E(G3#>lc^ti^s` zPR14vr+yqJJ)24VJX9J>pv;n$J2}e$(!m@&YaCr3du!f;+>HxF$?f?xGQrvWiVB!{ zA$0NZwC`l0(-jPp%z{zLI7nGd-PxwR=_r4<(&@3ia7bss5%X*j6L2hfpcO_XHf_;B zeBoHikdEbW%f{y491EjQA8;4nd#~vxdlfy#0qpgW>40N5$ztP9R+r#wz?5ggE!0bB z4%h|12ty7U9((bS7kK=1V~AxjH)7PAtel>Zy{akj+zhqV@ir#v^*!xi+wW%ZSnD1kr+nMLSRF$QMxv zJ$Sb3ag42A0k0o^;Ox9C-?eC^rqpCc*AD+s(p`_w zNvGQiE$^uzGaXSdbhFvuC3~ig;~fDHGcEd)Qn5Gw%vZXd!4$k7=sl*m87DcN*spO zW6lbn*1NT;?CyNZlKn7NyUS4q2t(osF4E7!B`*A%TFtWKC6>4UXM1?V<1TY;I*0>) z*Y^l%zWM6&9$|P&&d8e%SV<4bg3xy3}JCR{e z@UWed|G}ZPnF_QVk433jYvuxnFu-Egt(8YMccF>q+jG%#*d*!AUv$B4qY^hB3GYTu z4dU}Pzh5;$r=kXt;dpLm|7utKl%6~rhTz&T2D-0Ld)>gpxSI<0-1IrN6Ey_B=D&n_ zFNHENPl}!LF6wIr^t-PEC|?_31m40cNh$dF2(32`<$Yd&Ort|4PQ}PlQW8JsIJbw< zocp{>hm)$Ks^eZ}yz~6p@dPU>*RX8`Mwda8$0YOldpAp7@+53M)&|4UF8ZP8d&9&e zuk5Er-h4*)_rBzm=U|msOXw)*$qBAqHoi!#c`!F1Pv_P#FU7Ty*k3vSVjOqF-?7Qa z#uPH~4bsBY3}zDSO?w26`kDF0M~+lGGkWK={w9H;(}3kiH{ucEU=TingX;XqU8uv~ z!F{)`Hud;|t$0k{VYcmvn zdZ0xL79PI3elCg>1~dx~MWw0OB4uO3*^HT$BGoKOG_$@@vscP;9TSrBr2k5Tk$d2Pq^TR(C?DStZ;T2@+xyC?Aeh7+ z694vJE5^_NUnp30-twO0hw1N+S%m)u0eM=4_GIcC=w^FABs>>?vEtW8LfQ$Dh$_L{ zZ(PjVk^H(wf8A~J+XMnIA&ElBV;3#3sQUsOj!0B-nIr3RPdekqbgKmMd z6=l6*%3u=w{f*uU(=Ig%$g%}<3Igu^fF#jqr{NA8FL;UOi1!j1Mu*qC8 zNp*2BTi2B!PH=tQH&N)yb!~>>_e=jYPmw#l<=CtL>@~fKHMFnI@Kd>?s^MlzOO<9L z#`&+lD1RLw%qZ%eYwlTp_KnyF#X9XBO02}6eOl)azH$qU_XmWC|9%@W1JrU)!oZcc z_8VeXOzbv3+qRzniUD~Fudw7wJ0o04f~FKJVlP1|8%=z=-@L_ZQS{@5&R+L*YyXnkLmv1bqti!m6h$_Q^l1IE&f^l zdVVb7m%vkq#0>wi?7pU=K(W-nKViqe6nBN4>-=QHS5e&+TMGo!zj3{sc%lp}yQln7 z+DbJ;$9ZV@V`eEsz$6%XD&nv%oRtqL{-pD#HaH(DS1j)w`ngmL9(uFM5WU0}HvLG6 zZJmnI-g)IOaa{t-T(7*pwZ?>7oS&*91H2Mty%x+ufGyz1b;Cy=%J*F8#x*bFJ~b(oN@p7_uBeP zh2Zv}Li9i3ixP4LSmyTLoBu{moIIxCH`-$}$=I+J1n=w5fB*iM%+XDNDrmfB`Tc>z z6H$TZI5+|dzuicBzDeFE7=7arBXu58qQl~rDu>h3N@-|!xuJe`tuCUpv5V-BBAh!} z9|v%b>GveP&ix?pz#7Q-R>S+>eOdP&2l&GVC3zw!E$JU0eX(ix-yAnGN}l{TQF1_v zPLB6sgQm&dDi+c!|M)lz17J&kAYyg?=S8fey!K04ya3bsH?5+McUURqFW!MfF_MK< z0A|ud`rqH-;;Y>NPkS(V|Id*Gh`n|c^a|V4l68%tNVuq9bQmmJAd1h7pB03)#*~PjA*S_N4nT2hZjTE*UZvh ziz%XyIcQS2?7hLgO@5fV-SHu*V&bEXsNf9-oL_GbY5U*5{S}j2U%P_EuAoAXs}(!U zdwLwxiNPlDl19Q7k)stdESFWfYXjNTM=BGret%5VA9BE>9oRQ1dthP1_+~qW=l^VK z{-uLH7Rb%xmd_01eOKPWHu9H^u<5N`lRtIf3$fX zE*dkI99bztxH&TFR3k_u7DeiS5p{Hf0k4F+-6lo8;~QrsFabHSk7^1+R%w6 z30c#S+08Fj7#M@H6E0$Rk-9%6i{)Ed7n7^Dw;n51AIQ_B_c}d@Sy8O3Q5kopU{p+f zSl#;_p|+h5zj0JScC=S3oBg49Fv(43wwCD!BJGmT6Qn^Z7eC^(7J%^~mM1=$$bU%t zD4!CYmYGSFdF(lk zj8s^%obm08Wv;ORzvCQxc1Xgp+A<7QhRN1I9@Q#N11jH#IXMz~Y~PylNz2%iHmr_) z4nwD?(Q1ci8T>t)ks37oU?P5_)Rm#ShG9p!_dc=R!?tTm$!w?z^#D-eGDtM>yN9nY9e=hn_&OUqiSQUB)yLn@_luWCzx1J`gE_q zLf*dVtn~d-|8CSf<)rY;IU#z@ax2H(><2+cA$ndhts+r-AK3IeCmO=!9d_5N=E?0w z<;V>8#$kMx?=WSdnyk2?#PnW29^73@OY(PHUs@EHck>sa$sIhNd}8~3>MXoGUxN?f zfVC&erBQe0nZbOrCaAvG;5xVw5h^;MDq3>N*t783Ufa^husbsuR!USdbK)EHGq~@= zQ~Kmr30&0+Li2E6ejfFMJ<-!}wX>sWArbpf_uYdSTkU>M*H-B!>1`M>;gz_b0$twT zk3eps{=Mx(_VY$YS*t6Vf6VTeS>j26aQ$QBvwh2jwTCPJir1sywjB1|I>fgje6^aA zCKfxZ2F{-&s&6n^#>Q$ZEcOow;U`9EuD9hX;st3EmZ8?*5>~{THO~912!*KAS_ovs z!EkhU)eeEs)_eJWwmSR8(acPD5ieZj9s}HC{i(}Z#zDc!JEoG)p&_;TX)>wf&qh_* zUw)murYyjVFD9_HAz?Vz+sQw_QK-D!lJfj(!}_#GTG!>hU3_cG2EhW&wv&*L&{Hkv z_Nbn@K$v5{b9@J9`fCCYx3M^`fDD|TQdL2<8=1xxB(n#E^DzG)kxk)i<3knm`Nht& zGCf7IxZ^5cR>x0__|I0Z7Vto2pyIgAC2Sq`PT5^8oSbwmthBNMmh`+eLyWnvtLek8 zlgyaOX8x>IXlc}WWhJrLzxuh7HJ-;hn%v`B<7-Zerqylp%E^SQJ+uT_OlswMa4WS@ zUfGxymI^o#;hL=@2NTnZ!w)E1*;YA;zD6KAAMlcP3|pVW&Aj~#6Sn4}?Wumd+w-b$ z0GZ*%Y1Y5{<2~z;t{$dW_8ujqK30C8O3{0+cZh`1YQ^dC7ztZ&!#=^$*urH1qq04 z_w{zRPQFZ25*~l;$s~qo-Vj%+*u8|& zW8Mha3#aZ=^UJ5hLD=xy{5kYNjG7{mQnSJ;=;%@%@aM&Q6K?#q){A30IDxfjKaR69 z99XTKqSkR&5PGm5DnJkFHBFDBBNexwyfXMms#FomZzz5+7!sfE62Du(JCZf|1Aqva z(Tm-e`$(i~tV#H%D${0te4M3QKF~Vwy4ZD;Y_*!7MHNDIh@A_rQK&SjPh@IYFlJ^& z+bGqiIVC~_=1mCt;uV;l<=xqPMphEWl~sLJ->o&83FV!h6j zH`HsL@|iV`tCsn4?E2oHIh1MRX`(KJgvY52u^^c8!i83C`b@1)GpJ89w8q-F$0MGn z>bce84u!U(Wq*7ktL5;3VV+=?n^uVt_i&9%(6XGkD|BRcy;qw!!82$0A#N)Nax>t6*FNE-q`CB(2j39^ejZkGwcQ2KjUdeY>S zy^c9Xg|^_@OGY)hVVE5|3$AbOEu_BYNox|N3ll|@<`!ZKUtd{#XhwCB?&hU^eg z?b`Jp%a%POMJ}%eVFhH-zK!FqYv$HbyM<;lqB~msBDhX>$y0>qgi*c7wQS7U*&5^x zTJ?BKA44>?@-y!ZIaRMMz0UB}3X#m1*S>6cx@;`~g6|9^k*HmO@*vtM=xi45Sw3YN z;j`q_!gp4gx{D7xhu==us@!TzzH5ah8Z^0h}^M*D(>MQAP+j4>0R! zsrY3pmCQ+ZmCD6B$odBMljAR^^roJwZi5H8#5YV3!*M&CqXADLU2 zlg0bItm>3b{q#g;u2f#tY>|itzwIt*{-=Ect#W6aR@zx7I=9(6V<1;?9J!(F<~i0k+WK4cXvb07b-hk# zjkVpYmf=0`{VABJ{jP^AK~{dc88KC6Ogt~K>nApb!sDHyJu8HE*9+2mALI9p`W8TU znbhLdH{D3&U+TAQM&6WC1*KB9%P$iO{oj^*v-HaX9Oe@{zg6sDjMvo-6)`aWaHGg-$Oes5bxb8tw%romjW1MR z328|Jhj}08T#74aA(34@IfiS*c&Pm#`h!fJE1>}d6cQFtiG<(X8a(G;t%cj5hVv=n z9b^AhRzLVjG2JAeL>DIiajDH|NEG+|V}D$yrFWRP!KBnkFI@tec-Pd@b;()45&G?m ze^c`g{Tcc6s{OjQ@a&HAvP=B*((tVmBF>kA!SO=l{7?fJ7Js6iugFCj6Bo!N1{Ob# zYRTFjuw0%=NLMtu>6Y74JTxk|L*HHeWCBI*s62lrCwfy>XVK-BC55X&!V?$0=n_q| ze4PZ&c7$IvLew&VMXhgSF%9$05u7S>K(M<%dNGi<)Xru_j{mm4b({3gflW6(GBX@p zGfw(QY5 z>e||B{LWav{CZ_7W>=}1^BeaP1Jh6Qn$P_o${xSE%+(ToAmbEl9pgc-R=zs3^b^}p zn15cq>a1Q-o&vwv<>jslqjpYYFTpQmd+j_Cq3JfUDo|FTvxlr!PLZ ztuwtFw^jVwfKyyeeVR&gXUyCgFTqxIXY6DAiKM;YTzNE~FUWXu?_=ULK+oqrrtsn&6l?K^ej}3^WS-^nt;RmB(h zR<;f|xwV>k&%wovTh9(0r|I}BY_SF_6>+aeriJpgnPMHf*KO`Xdh*pojmcG?Z)}kc zXBbGUk>FqXhD11nb#_DLxW(+}Oq*mIdQ&|lp6}+^8vW(oUjq037T@5{MepJ2NDIp= zjQgz#d)GamCb^u>B88E#w8bvGPRsxx{*!V9;p{#`pIRh8thQ$UG-mAK-!6quK-5PUGZ9|gUk$Ibwkzh`MfnX`{Fv7}v4hi7ZUH0S4 z%$`W#v|fWxF&X=3A~O>HOhx;(r0$5{y8zUdn?GHi`ACiHJn~v3*}8Eeu(yBvSpu(4 z+^Pl#j2|8C3MwX$7o}D-F11+1EN?*XAFT41;%upaJ3^8sp0obmJG~`|gh{>b0knvH z_bO|R?D#8xVZJ3rkP3zNY8fWC+AY!&@*z~Pt_|>u$>+u#pYc|~d)ViW739fTxO3Dv z*vGc|Qk-o>W{@j& z;Aw)*SohXs3dFI0B^tv$KR{wEH*g|81{0$ZX{+Ovux41V?zbs&zX z!`~PjH$&lcKUX&J4&l5{=IS;Ad*c8RvUWA%oA2M&p5KEL8cPGzPs?&5kay!fGq}0gP_5N3*~|8`)Q=`XIz{i#gX4Rx zHG>3v_Lhcz{&pJ8M&pkksLUfQ`cAwVuQXn z7Z_WK%hz2WO@DdPt7YjKC3<$8n^I-aa?2Bv$YY%)wpzJ=T#_N1xjd7z%iFrOt|s82 z`m%NCrqaCkiI%0&4>qxgR!zY+ev3%B`@HbtNbgBGIsU@_U<*BMf@e$dtlgv0(1Mc| zRIb7CAvprCL*HKL6j7!$RB(*yma%c(7o$}H)1Kn|VVZZST)jyN)VLzYS-I;grqc#+ zrlZP{V0rcuh5dz$xiww{<7805jS1G^2ERtiRvMI81Y9 z%+I)Tq^-3Rddw7~ZJWO=rx?RsEml2YQ#%~Of%*xV`^T~0qX$tis=_zow~Wto*e!7X zw%opEjnheIv6FCp?8b+-i>{w5>d~_Th96W?4eJjUg$c!70i2yOkmW*xL=TPESG5IU zwDZZ+eYL)1&=Xcfr~R3Cp=U6Kpk9E=Sla6fh_YIJyx`&Zy$soPriQgz*_;*S;bCKf zV^p%7Ld=#(HZ}R**^YfcR#*MUslBF&$f+~Bn11ttmJGE@iMzdYLd@6tau(z0d!v$h zj+qI0EN2CRUEg#Dx1<>s%51)?w;Zj2^$$ARvhb!~rN`ve_eCe=Yie*HAcCU`kL+AIRh2Hu6Ouj8TeXZb!npHXeB+~u0;r!r z8iyCiW`6BwSguMCDLLA_i(tlAgPVYM(IOW;{L)PLF)erh+S_*lR)Og@1LT!CZ1 zl+(4FLttmQ99_Vpy@_=-3q?GpV~`fxb77OGB3}=-?tWX-uj#l?aFeeJGMJBciE6yg zqX~zIXG<{_(Y9UPGE9fnC3MO#rWalE3I>$d|SouvVhucpto!-&p4A`|M~&sgBV znAp4L4&!HX3u6`v&@}Obt2Pgvlw1mB3Y-v59~AZJtD^V#*FI7k+CV2jSENHNB=$1L z&6ES%ldDzx@ci}$K0|yRSI>dIxApfj_46h@lzWRFlTlIojp;jKsNjGdc$W2}c5W1V zMg3*L!7Q^5QsV*z+php1Hv4(>uyRDjP|4yQ?K)Jo|MY9~{jGgvh1}F_{*`3^=Gvl) zosWBJwTdRK(+WS<)(tX~nXm}GkysIYO$^xT+R}v{{BkE^_aJ za)Nf^7VOnR$Fl5-$e{JW<(@rJuZ67-3lPd}Fdx^epnM?m&L*U`ZS}NF+Qh8Kb+Zq4YRRaB*LM z*89Xf5VPDJ4@)lAn!LxOk4Mp4a&;TaI?D;62&3xH(~%o!nke0FG1hHvRW)~7+c=)@ zv!zaVVz!w*F#u3ZXm_oGmy1We;6%eacnSXC;u(p&)vF@&K~TtR+(5PGzM6FfqwDyl zn}~)^Z@sfA6aLd2)wH-sCRGiQlb^B&@EF~u#3)zuryxh29#L%=aTJxBCK%jbQ-=Ad zGU&>KGK0~Kp94BSA5AhM+bccB<{KQUY|hZy&F*o-RQq z-w^#?l>H1XROppg?zUmC0g`HjluZBdER`wstS=!yO6>o{V2PDFQ57X#}^MKVww zvot&oaSQ=TQ;~xee4zg`(^5P0nU#n3w~FeA_$i9>#Z{Z{BqEhim0^fou@%}8ND9TTV#}St%UzGUv}>zxCNZi+qL>z6h!eheF>3%Srhw>Gy{S*1rZ=8;LRB`Ud``Xq&whU*#RF`dS z+6B$Tp?A@|RJ-&>y;F#7THl)WzQk2CDd)up5_b{=Qu;~I!34rXBM}`iqwPtXh=K{ zY+*C3#lDC(x7=dfZ0$p4@<)bWY=h{QK!KPhH3T>f+gdSS>k&8V=iCGmVLL6)N z$C-W6OdOhGJB8?x@XflCXpX}6=NPG`GcD@N>lAAmjk!(vyv;x66Z12rE46nP=eIV7 z-W8i{&SM;9@g^lW@5b64WQ1TwpGgnhFP{blx0WnU2&sS=KvXvjB$OtcL63#ipEYqr zx`Zw!U1$Pw<$1ATxKa|=KsSWu*-LA}t&aSpY0!QR@Xbh)+b7NTx@}odqtzjr>$5(Z zP-DMTeOMuEHuliP<&>nF(JD8(*bH=^65M}cC>0yLt32VNd?5<}!g87tb8^6sbrj_z z`3FFlQVDQu45!Q(cjz-7%keqlgihJVm8_AN=g~l&a#C9cEI%d~izF361N%}YYAi>~ zVR00m;j5!gc)?k2lb4ERTR!-PPSol9Gpko?f$Bwt$7+o9=#!_~uv=lRLGsWjD^8AP z6QVKp0ew))8_X|x>J{>*$J1=XEKUU=ha#$1oW?EU-u1F^hwKP~u1sRy*&_Y0a5UbS zTcM0yv-q`^iKu({*FUn)Zi;~^p3=e?=BP6dgj~G`qng#;Yu<2D+o4ZNbz1!6w0+NN z$LC?zby0?J54O>YGdg^x$a|H3W~zHpmv`?S;!Re->LTuqI@xJYj}b+r+qLoQiXido ztfLx(MUDf+V2G0IV2@I*8Gg1#Sx#S`4oe?Y6k&Z7(RoInJDA7v%5QfltwtrNU7MQ6 z84s2;3lsh+@D1F@gfSnJp9cK$a^h6YKmGDC(l4)^-b%rkYrL=q3-E#|Y_9SoT5eo$ z#Zp`wezw>Um;|H;oqUM(Py@&I;(P+9ECO4k0|gVE^63olR)NO^1~Z_HGk6;%{TI0y z^CqR3GWyfkMKq36a$r975deLM%hd4c**liVg zgGFap#w<6P_U-FXD3sJ|=Y0VW6FO!1ICy#S(diXo!GAJ0&oR3jk{~l@7e?i(AG>qT z2=rzF?Q^i6R#?^y=ot_9wnM>>WDlyr@vip6e#CiC%xuyH(0COxqwi`SAIx)@EJ&$W znT2qQBWW!?3onsvs>~3;Ml^r|L~sTASiB<~MeYW7^@BG&Bu5C@jM%C!q6f|;o`_>eBI)dxD3g7++%6^A-c;7QuE30Dywd))yjsKfVw zqjFo9`^ZTxc3U#6=pO~(MXMnK{HiTP@gn&FH6h@-50s?OQHkj*b1KX$!A^D-+zJu7 z%Z|$-c0-@8T}z7L?Mj!gxG22Vy$>_{BQ81bPOfs+32@#0|1HzldrCma+4l69U0E6N zk~n0P_^mOr9vrTBy~QrN$WeUoki+#8=##Z zG=sfo*)3Y{G27yfYOWIt^za+?0$vtNVj(~lP6_sB2Se<1BBIf-qjvKcKm%2wgYN{- z)VQV0kU#kG{*jV;ds73_=qIUMaB?iH0_QBZ7uvs$J{}kU zmm!@iqu5)Zy?4KdZahW)nEQL;JruA!{qN?Zf*Zmr7tRU1-j3($E208)*o=el{Eg4= z78m>`0p>|>#7q1$)B)zg6%*3HQgE|i&(BoO^H&U9$n@#COwU#4oIAUMjtjQ#{8tL? zxn55K2W-pG!!+~2+X#U7#rj9(r3Lp$1lOxIiH7sr)`nl1CEP|nT@3kgfbMN?QUe8y zs5*Bu0Z)M`z8$(r4rWW<4w3#f{`(cUhUus2s?EMA?a^{1vtgp13d_~SW%j;-QN@Tp zJ~pJD_}o3$?YXqa|It|#bR~klVRQz_Ycwg>fsaCJ)zjO*wCaB&0dQ|t&^-rZRIX1L zvB>UKv*5*{^!;atf;kRagC-B?kfhfw`3pVa{?GK}firNIap4$9an%6wA<)`639w`3 zKTzHOCoZ7P|8qGaV`x2j+`LR4Ak=(Dsj$VQck@WIa()go5*DbYRso#<`{yc)c*mJ zVvs_r=hVhvoK_!Llzq5G0xr7eWb9pt*Tw8+Ik;Oac-(uyn-)$dCkx~onYdAx$0JgW zihg!6-V90|ZW{G^)$;XPCGr*4Lknr}i6m$u6U=zw;|xT)E~kTG-glZOF|Q=@uVY?J z7~NcdcgxMqt3?idaCLhtuIv^^KOYea*V&MZtg082=b70YTW{uSln1TWPkef5IBiM7 zX&+R}wYnKe+!{TqS3P#OE^+YdX=sjB)(BdsU1oU7>&Q+36h&bMvBg0QtR23g)ZNR_ zH%A0N{6QeWdn`l5pK4GigL9AH3^*t{Z{_9ZYn6i}t6tgI+LH6NU47Xu*keSVDZ-#{ z!NFp1rd6%F7X!fE&67ue!t*~o`YH2Xlt(W)k>ek|=Jb3Cshi9E<4pM)MOdC0W21J_ z>s_{ZcLh=LVN~0Qba(#$3G7QP@9jSfiHuVcpXdo~zm+8sxCkjl+&*_;s5 zU9htWFr$uaa1P49brVWdE;1h5NNhrvQ$)RQ9;JXK#0 z40LW%dVYPaMU>I<5I(bEGB8|T0=O|_dD~ToTS>zz9>>}xR`b2+F5hE{H~v8!I4in5 zPrW_Mf0}ae_`E>V*^;dQO5?_^hV!){nQQtUvgKa+S%Ok&Ej)HZ+INmDZ3%CI81r9J zw@~7{)u64CFS#kv$kBVW*rc$w+-m~ftR^>|U(^r<-cHqetsuUc!m8P>LFRD%YKWEQ z8L5ljIf5M4VXMNVz?+W|-W>Vwy&2%O>6bST3avElQkAJm*C)>m#qO&x?svqs@{VR- zM&+_=Xt+VI7)SkBcvn6-)XD>z4Y^qPiK?rPi}jAL>Bttv+5(wf>juN@d4CyQ3^pdq zz;v80o68DU>tU@+75T*sZr{L+7f^qapH_7>guj?nmLjKzdMQWFvUO!MioB_r6hP;E zTFP4W$+cp=Q&@G+X?3FJi6r1rK8OS{{l6uMHg2!Y0Zscs?@#v4Rl~K^WP_dOnQxC- ztEwT#Bl-|3)o+;gmzHZ-o{}amo#BE+LdD9B%XdfQWif)BRqU+h9X*Kn)?SE5;O>GQ zFUIe`N*RCP>|$6pTVc_0aU}t=)4_V!{q`9e<_4}~=lMRL7pva{%4%e_uh}j9%ZH#n zzq44>aBfAeLcDG->FfzH$DGOnrNyo7D+WOIS^y_?pj&KYYnNN9)Kz=%QGzD-=1`eM z#7guBjgH(gF@grjR>TNrYFT+3B>$gB!nv=lM+5t_F*Dd zt@qr0`Bk2cx+Xmhw>#&nOvHu3j7^PLW6dR^l%ZOa$GQ$~3I1QgjMwMAnC8~rzLm;Y z(~okBD5cct#SWw>gEOm;52J5kA`BtBE z8rprSqWj|Q0*aC&1Os6FC#-;R5GH=>J(uw<=!VqEY}!8HM`$$+fJ|Z0<1OdppmKEy z9+!OL+!3`RnkPy@jPp9o8R-s`#Fg2N3}5SFZLp18k1um7N6)48XjBN>#U?BLI{B`6 z`nmgs4+@q8m3e7fYJIhNmD?*yp&|1FZ=2u8=o={#Q{g0FX5H15q7Lwt;U(>b^ zHV)q!bQnDMgWXHphs>T`#TY06?~o{!(D&YKz}nB-ofQ!fcVShPB*YP(q*r=e=Vh8# z*ktpBe}av#eC%7vVqr}y2IbXoXdHLJ@usy_&NDr`NU(R&q1>!G+|qXj%yYKsM{Tul z%(ICA$)pO@v_N;E>`BDxj%3~6_W3e`cI)cNBRFWm9+E0QN!iur8yxU~a zyBa!@2+mB5soAQQ8*`sUg+?MznG5Td-Q;(XImn8#zZB4#8qn{YwR_N3lxY)iRdG~@ z7~g}et+@EclOtV%#r^Bn!Q@_(ZTS>j`6+|WyQ(wP=Ogzu)3Xy)iv>*Y2GBm9>bk;21?K4+f63h|o! z4E6OabRd!1fvo@^wOpKo)cbey9dlm>>>sE%Jh|Y*q@J(#)KWeFZUAAwfb%jVj)?YB z4Xf7V6nkT)Z4zp@v;9^kX^JvxC;z&EgKLp(=S@feJU~Sdpq{cf;+aI$Y|#U#NN#`NIYv#KrHF$ZZH}bS7tdl zw5_Hj{Ee4{tRDZAduzOPz$#-tYT0w2PQYgzS<)zdMzgJ&(wj?Lh1q>PG{Rb~#A<_#XKqD* ztw>dIZR9!`C|ghwS!w3itaKf049e^YCQ~_2oIA?s9jey;^H)aZ!Fr zmF+%TV5?H=!&EzSWauia=!S-<$U|Vm@AMZOs4Yr?AH-@ zRJQ8EN=x~)629c2X6x+ld(Y#*5FtT3FDMd!;{2CKe|7_a2(p?@ZuiKPh;V z8gMbbR}D<=_p3-!*w&A3*J@vDkmoP6nygtp`jS4?GSs!VvJM9O=Uf7yj?#F2k>I+2 z(7A5cr5Me8 zZ8_9I2nyQ`6jL@y%2DfeW(F&rjl<4kGOuqz1#9!$IyhN^wI4cW*xlA+CD%KvU+$orN6LX&LF_Tf^pN&t zf2oHp)Xs~s3Xu>|MkH|Mu2w+Vaf}gw3u^xuu*6fnqZ7J73iVqe01msi)XuH9Wvgy5 zU|Ny?xxLJl`~9)u>p0`&EQVD&%?~5_DUWfRGpg0^I~lAKd)v%-w;~|wmH{NT_j!W- z2$=~@tmI$Batvn4`4(TT>)FJCc}0s}iGG9I$$(_GMBlOeFhw)pJI_Dfr(lMaJ+hA* zHh8ojy(&0V?GGTzCE{JO_ZN@OK+?9Ru?%o9JRac8@qyJD{ld;oEF{8~iFV_nuG`^0 zNefNuU!x&Z{C*c(SvOTdzkG45-)ac`PL*PvzdcOP|zb zvdKh1~Bgbq&3K@K`=u50b5@rJI>pjnZT@qWDvUTbmL&SnEO{(VCC zDI?9E`3zOAre6YRb!30%Q+-$cj%?wwlbmjirMc6Ws@^)e?0a6!8U;^H3x;>M1JNNR3f`a;c1eiN2~Rshla@U5m?HUoT+Y*YfL(uyiz_LhPbEI>E7lGY`XGD}ylh z);*5UJ+M1AiSH*cHNwu%W?H>&r?KE|R~FATfe=n3Qk z|IkNR3Hc9JGDYFvFx0QG?b9h&iW&Hz^TD~!Shf3Wup9244OdwH2iZn&9UOo!PhAZeqhI}6?ap|KNt?zvj$rm}Fvy&qU#jF)R9^sQGyn#JVY}C4 zYdfFQulH72vD)!u3tMd52PC{aX)k7+MfT#g5nd^S+R%M!S zb>BukVW_axvL!2S^~hS0sM>R0#)DsuKexpxOw!IdVf=%&s>va>bUNL~lY!0?h){+4 zarZ)l_+fo@T|#Sn_bLgVtzb1%TmhS9b*+yL)|zxy+oT=~%Y4*a23(E1`3;}hJ2C!1 zVJ2dS*L-KDK38M3YL0s{gPK(2c8XL7yWPrZ#u^V2vubvNixKt_l&%Y7Z)6zAb?zKx z%Eg2~pu7?d-kdSN;LwjBAXB7c7YW~AZ=)7|r8#3pbJj)ieqW{&sPE+rg|5w3`3I^(}|8Bs+>i1vJ7Q?(cyO69WS5?{9#fB&_jN3_*~hIao4!`^ zW!B){vl6RB1BjrCv`pI!GkBE-Tb)Y|a#!t0oF~ZetSYhh2zWxutZLRCYNeEYdwA!m z!z=IdUXpN?@j%sN2nn+Ld6cu|SosWW>x#wGM$0bw!r1Xna@XsRgdo0cf083{ckCK*1%rbzGc7A)Hj^> z_qU^sLIpp$9zgHV7~S89sT|vVOcBJ++5r~Daq&ui^-fry~t8@Uh08Q0sL9 z%plaUv=Eck)UfCZ!WzFBaT%6Qe{`qEa9-e6PEKZs%3+R?&|L$OT%B3p3~@-9L|vi0 z3gAS@Lc}o}duC&K=-Q;*rChua?Iz2;2*>tDB5m8@9WkKr%7UG-yJGvj;Gl;!9fZKm z`-{p8GO|C|Q%Z0y>V1-oehR9U>;NauC@(UE0;KQ8#h_EdLGnQTO!P&6Rzu7&`acAJlSRN}-Q`%d-bnF^ACy zPH19qYM+pqU`ycEv375*#U(zl|7566KFff!*W@ z1hGQ7=QmRCkpqphklXHkQ#q&1JJ#Tc439T@*GW=(7|Ow95;$Uk6dGuWWGa8$U!bRh z`@GagTl9jEX34Vacv0jvtHIW@i#|bhgys=TBsisDFZL6)Av3#NeP@GNFoeZ-ZduhX zWB0(vpf&H>qH$kVhf1~6{KwiTBcJ{EqQ#X%kt|vqJ>Am7K1b77W@57t+D$(?9x_e+hOB6#nx@`ayuX%zV(%a6Z(G z1VA_d|_np%ZmJ(hog@$f?!FL!$#R)_jcurkgX2ae=PM z*b^V!aTK`0+Ytt(D4+r~#T>CXXn$Com%g}fVqN00;ym1L7a=iQk2M+{d35aat@b}rq z;U+A=s9mnxrlo?P*T}Zi9ymqy!JnLh3K~$am0`L9B&?pn{AfoNut)F!>YB?!?<9=nQCLM?vNDg+11U81S$?MbD$1s%J|q(M97x53Cx6a||hj zB(%q*?%M2@73a1p;=ns_$_&shgs^srfry6R0oq7CypHFAso@m})QGSBgQ>%OabMq6 z(yCl67%pmLHys*@mXqUlD;m;K+TtdqcqCJ%?l|u?Q~OIb0=7MZ?DP!jBt_AH5u#9g z2xg8DvkCKmz+HS~PV#i6A%MFL1(xea(-R3VA zod{!s>*1c89BsW8e|ND36pyIvffbfl>$(yLi#k3jos_D66A(%HNWlOK_$zZ@^$@4zO+hD- z0X;9-78<6C`@Y)?4-+~(kd~?usw!CsyI&KpZ$MS$NFMj9f$-HKTa{;Np(7hHG%;li`{AGi?H1#m&q+a%g)O*Iwq`aS{3 z75Sh+w+l`{K2LE7hBmLQX(E!;udT?xB&iEnb9lkqkxn&Zd58SuM$sJ=V6%+s1+Rwd zSq=N8p5>M`uC0cekW(INfRf-r=62kjUz~CZ+MIjKHTHj5Gl^HHo99$HMx2K1@`r1E z-hrrJjy6wVkw|aKfd%8fyIbkr;A%VCHKGVdFhJ$0pcT^t;~@Wj#qhdQiPzP@ig8zt zmE~H4i}m=0 zC6rQga5hk6c*!)gaSw?-69{=QLkBDYwJK--9^FzNp*gWN3D&|Z;enY$qZWEqj&g?* zypVWNbVH}hO*I4ibyB@Q6YYB`_|sZm1!`K<;ZgJI>#J;!C8L7S&36#vQYXFHeR=;} z@RGdr=9>t%4|`PtLZ&T69y(-Ff+|IveJ1sXp+!LcpzC8U%}{mM-#9_eSAZiKS!U>W zMj*PoJfG1EV*vbfaZU%e`wCJHL}l8JA0HA;C9C9fQKMl zj)dS6UyVvKz_3=QZrp>uI=K>T(}6F@?ul#C0@wVTvBmFGW51Uvk6=*d6OXg0wzjP^ zQ6KbS88Vg~zJj6E+j6=k)?~f?TzP^QLYxS*f_n!$lmPx__Vd5P)oE-f#_>#R;6yaP z%{_nk=wMw*Yvb@j+|f$c+CbiNUZBz`^zgb6$$;DEYz8bq>S5gb#V`NoD`U;w3g)Z( z%?INlALK{@g{pFwK}bygSL?-sGY1QH)h^XTr1B@gG2(j$?z7Bi zbSh$)Y<%!&3LLIG%t6Fi2i%er|JIVok0A$4Qu&#+x4x})unbMTV@Y!_sqC3!lTRz) zH>v9q^!lY7E&+p*3NHjC{&@)VcjUh%0|6vs2O`T*%5ht~TLN38dU8tD{D>)nzS(Yi zuMW(4kGU+seGqf%9vqCpDUg3RpZk6I<#+w#)jcR)8>MFCKz91@QZL;Dl!g9l`O;w`_w?W@=tV!*l1)>FN`)`7bF)8dT;zeYANe zRgruC$juTNX47MC2o;GzIi!~~W~QI}OF1-jlBucxZE#^;{(Po2$KrdM1_ng0#lOzW zqV-5JDf&mTp=7YVUZIL;jFQ3gG&7W^*%1SsI>1)|P`F3;3;x6omBD=sWK-;uQI;P!InznorKt+#z`5h239*MJ-F^uIP>JPJQ% zdH~(;2Any98xX1|_3Fm5>1v(7x1Rj0!H_Azk}Zg=-{#;!|9eqOWgRe8FoS&JLBAJk z8F2gEM#y5@Af{COyHagIt+v3Sc3Bd`2o8f)zrY}rmq3Pwnj-%S2Bk$|0fy~`;7SQP zc3K0QV5#E5ch$d_K3Vcq{Lbg^X90k-mVr1_Ghbn>LlMos z|H3#TH+Fp5*e<>2cC3UBUHW#M>@OTg0@`UA%logF2?dDr-&Q^>w^UdrL&mEO-!)LW zkfy9zgu>2$esXl-P@nD>c9RAKAp-ePR+wt`O#N&6@9>A2api^aJMF>PwVIfx{N-!w z4Uc(jhUmu@3vNC`3Ry(P)BOj|m3D>0`?bzyiu6;u#5<3^>2_EziM^_Z0I)GgOINkCl;SD1Y zeiU&aodf@e&H<0;7B!YJxJCUSi)L@f1^sd2R-zBXFlr~b_MI_8xl%;~1^A=o5A7hoV7KS6og2a6B;d~k zCvb>IqEZD<#6hPcI70_h6}W6%JzIV_<`0Hh{VXm|CW_ixY0Ml6`6Lx z?w*!=dU@7IxsrN=)AvG;#|ztLG&W~XcHqMLq0|xx2-1&O?F|&)!Kg$>75%_iX z(9O3BblE=|e_t`bGV~9H$M3EIcKG{@>;b;Se_ai2}baD2vg0WR0&R<+q9!PvxSqnrdd~2T%?<;)YRD&n2bL*St zcvTY^r?{x13H86=VtysB(&^ZVv&Im`spiH-FDGDvDB61akcNX)?38-9*s3 zQw2kb%>Az@+}e8E*=lHlWeZ>yjz2*o__y3GD8v`HF_y^syOedaq$M)LTAT`G zmyRD1KDOTdlVQIAtqA&9ntur-H&~$DQhW1Tdzc?J1I?q z#7^qyE{eY^O~N!GuhWnGtJhUshLdUxLqMwC7QJS!?ffLwh*w4&Y=ifpxTYd@2IU~; z6Q2LNi{-bh>~<-ok!=PYgwRr@y3H88zG5`H4}(ax$PZGjAMgvQW&+`%q8zj;?n^)B zf8per(m)XgDU$18U($b9B&Qz0Z3juUCoP~8+a;g@6a;&bogSDtfew(NlU`Dd?Rxa4 zbKJf&8R7^u6(Dy;1gUsZqHukqFFUCo@iGy*3eS)91gDWdbrq^?bSKOv5By*16hIAI zm}P`?ep>MwC`~qZe_HV~@WDw!nF8?YkZTO?C%v37Mu2i4Zwc%C9btoY!aWfAcMq(h z3IpOpr@@c%crf>;g!35;HZ&<^e&vS5=pIyfdh(M;Xm7hTf7gg|P=T0D1P{nrpWNH> zj(nDquxb_5_nVX3*9E)Q@syuxrpuB_tVw=hWs;x~qfrYA&o~`O6LebnsVqT9rs%`d zBphAqcZ+rpS7+z=J`9;Hdvy(&C3`*G7f9~1DA;_yTeIzD&^V~Q_AW|+NoI;5&{uc< z?F5d&;#Ey>4+A>)qF_j2W}Dx^KCfdhJ3&c&3WHmrKb%>N1;N3 zz5>NkOf!4&%+F2V)_P8W5`s?bFN%Z~9_ssueW9s8cFLKtx!pi_C?i}#m<02q3|Fnl z1w)A-_G}fq8LH)W`$+I%82IwVnmbCY(}*hZjX32efoHA~N}29B+nH4y@iwP3iO0u~ z<$ONciFJYzH)D-w{wX7{82WASMb!jM`F02SC7Tc$DG`++CNRiL1}c@{>KoVS-L8S& zD+}T_;xOMwDS9SFyk||!GZ-t1q+>mu@g%Zi(~{f`VHjW7?7!ZEC(@xFN|Ok&0qw4t*r zY>ZMc4ZqC5j9k-aE)iZV8yDE0>*>T^e(~gzej8FqC?Tb0xPDC}JlS^myM-2K^BgU5gXaap`4Xj|mG@*cS4ST{-0;*rR)w0vO^VNcFHJ_W3E-&dO5lJhUN z7Qzo>PP*y&#VTp1(QPxo$@9#S%_aLl8SFqwX)1L1dj2b8BcBC#{SB`oKvfic=xP*E$J9G4m%B~;B$HGw1(p*FP~g9<1BEU zL?JV9;g@{N=zdL%d>g3X>HVhYko-=8%h~auk@8p=vLGDrYZzFAv$s8X388OBOyH9_ z_;`$xNw5$ZeT5FUt8GM@+_|(*G5j8*P02FUcgjo=DGsn!Mwe`;jmXNYvx|z!N+sSz#+`K*y7OQ zLoj%{(?W2}+fqaNQ{EIf$-NUu6b&?F#pF`B!00o z2Heg@99UfU>)i#B?kT2em71REND|;l(aU&XIgke;utS{m6pDg$Y&Rx!;a)=UD*&oJ zzW}I`Auu{gDZah|0NL(hjx^>%-^f(Kk9p|odo)i+ECP}{@7{fq`Upma!4dPy8OudCL% zJ7aI2j|WLDnngzxr(XBG|7G!qr~C6g%vz(bS>2#Od5w0#>zLrmkquUoh>vi}TKMUh zZX-Q@l@(n+)QQUg+cONn4MM4B>$lYNU(gWqF-W|Y3;>4pTG`jXqr@D+iwN_(F5e?Z zKeLRpSs8#fl-12Z}RFIw68_w&&^X`GGDDftS}N!DSmff7Ejt0kx4THe#@l)1@O)xC zPkipPby{5Cl1Qi(&iK*O%A2F1%s)h!XX&5JV~zEQIGtO?D10!JoYTkNBtrI_8{Q)! zD5YyQiv>%w&!{bQ)P%Xr@V1@+Z=fgy_Ooa|ffu}&;gldx@CC1J)%TD6VFVmZTxfny z;A3Y%zA6%ILe?qHDkTE|U_Gadse-8FB>p^PN6DUiI$K7K)uIS6INM>dxT` z!$8J6Nwvu+|8xi1Q1XfT&$XC{`3{w}%(<$rzvTC~lyHc``k4xJJ-6?}2*$fG@=^Wn zGMpsb)yw~saFI}GnsP+{5=c7?AO(G0(0YW=*pw(+=N8l3NiIcl68|TJN;h0Nh0kS) zc)Bxbge2_p_B|MZ$t!l!izo`guZ*8V_TAVH3UKv^4 z-_-Lv*v-+6>s6~Z#>%%Le`g?bV%beR;Dd4qAN2kw4-ket zfR~n@aWIhFC~DRn5sc>$4v-NQ@5;K|@x73kfDqoSiv}c0P#L4aKC7DFOj8yyZILX% zp(`TejP&lMk4EWWZD37x9ZOJXJJzOa!;;q8yPLOdv@sNeLxID^pp=wl=e-kX+8HOc z_~Dp->A?bA`svhom5=`j?=L+ibQ$cVdt&WJF6hLl$Jdtv|| zV;tHKKvgM*&xlEjfy^BE(6yN(*faBa){J1xE^6Qm3dR8u+^SJAoWT7`h0>Euj7f5EnD zeV?h5mviyCcaM8G4?UC%NC%t;jy?`K&YDlfVr}#spda1Tjw9zQ>5Ih|pYP3!>a!%Z z7sBDHa-2;w+uzQTeMtT`xiY&G;PWNFV<>lBhgLeiZ0zZs1?p~paU2^&$ww6m5@uj} zz?TUXklHW_!~q4kc@D5#o0KNnBIwDcA2TPYg}=WKTf>7Wz*spZ$4tMx#KMo&HUwT2 z!goiL%epp%kruAu!)@JlL_fxS-K$w8qG#C{m1PF1$NoMau6P z*++-5QF_i?e;C@3MQVRI-b^N|Qt-}^JFQze}P#A3AmE$uY_ zQ=|S`zK7rugP=z|_kus<;dNpty=2iiGg1F!7Oy;@6wR(g@Aq(;m9B=i*oZjcMYcYCzJiPRH(o1@fm+!IP$h zY9IPn-;+{CMiO5r$BD#Y4bh?|p*Y}|Xtm4J%3$J!ECt~Y-n%+FOHDiNfsEF;t@QIg%HBuPQTrAa}8)PK;6WH)M2VCY4WD>?t z1C{}4%ZS4RtrQsov!W+26CaWIobtE8LK=V~^k&+f8Vv(K?kjySH?Fx2%eyGh{`J_5BN{D6# z`&mp{VFV^V{s&A9C&1giKAQE@{~&-s!sIH7i5dRD`njPx5S@6JgThhYP^?onwm3*m zB;$nN^jreGghU6i$eFOx<5$$<^K9Vwy%sh~g=7gghgsa5ip9n=9Vi_&oThzb1mkCj z#jz{9xx#o6+y4uY57 zwv|rrWSKNFFbP>T-hS%PT=`&zbVaLLOvj8Uaqk0ho?2?6UW6|{CKodA4d8t*sNsyH zV-eql5H0@2(^OQ1@bDI@wZ45L|D=zOMp_wYD2OMW4Dm77qQAf+5+)|H?mR7R*gZ(b zZ~zW7xx^<1mbSx!l@&ey3>i<~VN$hKjJtCkKiAqjvvTJokB#!$))toT(`0DJl_ib3(F>=>q{~_=X}CI`t}xo`!z&+!J^@XBj%Fdz-`;bH$EY)ly%* z%fFx4W8V-HgVGVCZk4lcU*kb!GaB}#s3+TirT;@5dOUL1nRE!qZT5#S z`Y!v_*NQr0oTbPT(D_7pe%9ChO5dIjw{lGYWnV?j>a&_0GtD~P^yL&Z>h8;IVL+jJ zgo7Y#G{B-iRlolzz5Hk|Q5gnTU-_Ldg8J&QZ<0i#mwl$HHk$HTNO~&<6troGQ<~my zX3^(Dw<}r0#{Ii5I@7fSO*`VR9dX;5l0Z#0V@{wqCfz)}Ju&sS$MqQwVIw_!6uK|G&$l4FmcjPl2& zpn{x>`M)_A0g6j@&!f{uQEQ$m*_{PHWkCr=h)O(!sKkImL`>~Pq*T!dI`h#%u4r%5 z=(@S=#u9fDJ~YeuHLYN0M|ULLD6&l|Z#y%KoqcrQAOmdG&=OFr)&J68C1s#p%u%cQ z$bpz)Uf$h{7Gzr48cn^g%RnP5VC-{N=dzi&c@LI;{XP|Dh8)7VCjMLv_|Z~PIRsl@ zzasZ56ZG{ZgVOHB_pYnnU8q?7q;O?*HN(cN(%UA07=+MU zB>pTkKQx5MS#x2>R@*qY_b)MxCM#t0MhIWQ^WkOU*m-HJKO(~ws!!Q`ga9;&?bNq`<`oE z?RoRNnr9NmFt5HAA@qqH%6HxAefoaYjT_ zZ(he|rajKQrI)a5t|l2JzQ6wYsWYBw7*R}Gf>bNl0Z}1}WXFKYuakw2_=kl?fdqTK zlE12%RG$g`?i4zyOcUn|7U>kMTT;fJNRua(+l3!&to?Aq(cNNdQ89+Bzl){&&i%ph zMC-(Zwc9sF?x0pPCvQ>m8taM=T>X@KZ&=BDQaw-O-3T*i9ia)Gf-)xy07*ZX(Kkz8 zY=ABPXOh0B;w>l@ACX8Hq2rmSi4q#!gBcP)CCP&yB}s+#A{MB!?*YjRMnwG4&}VKPS+&fGbok ze@;qxvuJ93lF@k}DV=m;=u;n^2RD5Y-f0y6u6K?F{M0)!Ly*woyLsbuL{r!a0&Rz^ z{2sclx6B&!bjL{5l&~kk8c|vIt>}b$OB>SpZayPn->|4o(@?NE<}~Y4X)t@(3)pb? zhipPm+irsXxhAq+%NcjjaU6VV2*Z=sM!>nlKfpO$>8_UeYjpyn(#Q07%p@q%LL-qG zh+UE?h#*F&YTaNtu|guM)PeHe97>U{SL1@8eNybo##WN8JFuXUj%}1_Q4(ujQNXe| z-d&$A7c6d)P5I(?tXqHYRKpI_@@S<PRCpAQ3mXdeB*s zq}^iJ`5D=3u6l2?|AmR2 zbH+OBC%ec*M3S=AP5vZl2o~uGfWg2ab!eoQZPtu%KWunf4PO8;E zQ!rxQ+&Nli)urDW$#_Yf$>152#$~eT_cTUBnK~8ADWe`+-Swy#A*z0%s>0&dj2am) zl*k#CpXVG8O1c=`9QYQBldN=q&fJeKL77_)NY%eJ&%i@JfMy_kZeLo%$zF)rrZE|cDU0F{Tx_+ZKr{>e zfOny-VU|a-pqyD9WNDtuh)EJ~-79p%jyhXugx_4CYCa*Ff@QX9jsws7cwUGYix&Me z3FM`wq6w}h50q(jrfJS`b36??rsto6$C6fqb3hir0iR_mjy7D)`fv$O%&C4biV&ve zImKjq$@in6wD~k0*aLwl6$u~7kMwJV%~9!sh|&XA>D{S#pEiUeljHbN)GD3t>^vTf5VeL zh0?J&omsJov!j%78(ET;xFz3YtTeL2?Mvzhxzi`JBd@LlLtHJvWQdVR30X=gay0a# zzbJxdKJhL0mI74r;^`nPbqZ82elufU7&%(`7-g5Hc~&wGmhM{&^P>?&ygE#}m<}pL z^efL+F_L&3Wp9wf+d_{n0GI5cAsev=EG}Ba6a%>k^U?YA&g}o-|Fk&BRlPEVrtNNH z`EO~9R>oO6V%scrrr@2(IxrhCxpYQa5^zy0J9p5uf5>Ty47rvB4MKCoaO+_i5qWBNsTmXbXF-m2 z9WDjIjOlvV$f9pcJYt`Hkkl`ywU=vx@K%8ElS_gBzpnoTOvEg7f+2>1{>0f5PecO` zNIoxNz$L#Zfd4~6c7=;@z^0H0_qPt&bIyE=f>(O+gIx-00B&C#rx{Q(Jk@|a8Oh*! zl}N+u_9LTarS58Cmm8hrd^tCJXw)j47+Vtx<@Jz)2#HmbsQTof=(?may*x*)>T zHmiZM(Ng)(TU|KEcBl;^HE!ekXK#cq4dvc^O4IZVM>$mt{gNS*l_V)2@aCtS!Oi%9 z2v4fMj)zeRLOrHBFs%j#=Uh$(=VUS;!DZ4pteZ)a=>lGoiqD=#N~x`L+`mfiEDZ-Y zEqJ&X|6zg(;7H26L`rcuo2{J_fwRYsDCyeTW$lBJbIS{bd5(O}=7aH;gSBcpU9UmM zDejs3)^(dhj|lzjD669^nN@in+NY?)+^#%TA;V*?XQNn@b#=NN#PhfGF4JARmQ$~} z-PymkLNEI5UBj{HlWj8c^Rur;CM8ui^P;?*t$3_6F!;B-EBerjo_mqh39q5+slP7< zG`5!^*+*fuW6#%R=~Kb-;Mbl54SXcVj=?q?*3|BSf~c zT*qWJKUBA~qUVPAXdjSml6F@DGJpgn=RK5DqkdSxVEUHn4Hu;50Hi`SvMYkKQLa<01{nT4M!@HE*} zev;M`5Q1GlZU64Tetq`Ac&c5<@$y~AHLkYu_Xp*c8rcH2b#b=UN5e1iR*d!LuRMT7 zBr1`kC^!gfE-`uGeKrt}6#+CCF=xloCjsvOKgYT$!}aOD3GuG17Br)^j&ZI79e0Nn zl(>747&BA~2=t!-K8wI_X5pd26|>G{)}jJkYKj2CL)vTBcaqMd_JaOf-+OkuQ*SeF z&y+LHFB_b13>j%DCIG;PK0gzCYw{0o`0_Z4e)!JE-7cJhl7682k- z^hso$S&InOcUt@PHfAZ8ffpxCLM-lt&8&FwB&zu#ZtrYauUYMm#_J`8xJM8X~GrT`cL*-9)S z8VwyZT@?Bsc@YUE75`O~Vye9uGt>-v0eDGMP_P#{N{A3;r4>kc3g;&wQ4=-+xD0$g z5Ohp%xJECSztQUT?`bR-^G$*gbndWF)bdz7U$tZ?Hvl!7_`?RtWbfh|qtDW7lVF@? zW7!+6=4Rn%ljAw;mzYJ<9@a~zC$Y<$*l9Ri6ndbMIv?bO&qS0Q1eVm`2=&_ z7IifNK?ARW^%l{l0SmJ7O$&6X(-abeABu?j+Z9u=EUgJXCpSMCPoAi;Rfw;m?*2+> zc-DD&dX6I8S=ZHB$ldKd6(h~&Bfxl`z&r#&J^jSbh#&8cA2cs{<1h1)a01+k39!z{ z6+6UzI5<9raL!OA094apB=iBCHF1D(AXNs{`HkF&PHtkUIu6rz2|kDE&~(Dvj3+&o zZ!mach% zo1$frh~1a#L7CmSw>MDe%vi_n5WBTAC*>hT_`!m4=D`bz^-ll6gX{p>8dzWLJVk${ znI`8cgQ{)cS?eO}UKb8@vwF^FtAPf{9(b$ zH$BcM4T1g|sB2s!IBCKK`YUU+FL3ds6983^w)zjMfX4^*+NCZ$h1YC0?fE~3ZNgZ9 zfAM;R2U2x^?d*IeuxNeXzMmOrLZ45>NhB0gIQxzr50q}jGp9-2wi~~R%~k8~I!>w( z6=D_F|0Dsj$+!L08(;3BAE-*GrhLhFyorsqsajxZ5-(_LoYR3iTt$uh-d6)fp;8zX zQl@&_?ep{<>FyuYJCO)|U?@=)+1ZHW>l<6svwu5DUju9OI56I-QF{UN?%WfxeWWP( zF{$!ehM$+mUh{M?xL&f@`ny_^NJb;caq_1}wTS{g&kC*jS#H(Mh-nYX>uK$CxZCVF zM)DDOqgt5~Epwp7n-hd~?BJOjHd2({gS~`W0Q+EC@V9XwP-=X}h1#km=S-hZtUM=8 zhYOfXfTPT_es4g%R$+3o9#M&LOSY9nVFHLU14{3(phc4gtW}( z7+HH~^hrgq(~maNmFC&SINSO!+Dy7RzQyAWKyD<`si;FVQ;;|HF8YK=7u}@5Q^n!o zc^Q4RNIfEyQbhD|{eB>~^`{Jted8w=^#qi`=-%PCyLjer+>TAYoGHv#axSV(3?&zm z?KAT--q&re<%nF+*O#l_>uuf_k$dc;a}F;8<@kA-jn`R#Z=J-)#D@nXs~ZO|1~d3C zCkag2FllGJ5V;rxYn)Knw2ZP?9Xs4u3FbShuWpy>W7sWHf4nhk(zZp>pXWt1%Jp{D zr>pR+YU;h+5T8_s&Ykg~x-#2F$@tALg4)FnAw_2!wpHnavku(W&6{Ct)euz1*gcm7 zP|3U&ON1uI@ap*3>IlqzIa+omw83`~z&8!tN%kGE;9_|LZ=hIaDC))iD4qkMG3nCm zQ=rh}*+A@6K(0Kgm_O9KodnL{motDmVJxe2Fe#@7wI2HhXDfJS*g|@uHARqP9@?yYQ!qDRErR zrxX%7MnxXRf7x19a8GiWuDx8;el%x9%C~i;Tnl!T&^2?T;<=U=yEp6q;p{D-s`}RM zVL=oSX=$WGKtMvJQ@UGP=|;N2qXHt`a%kz6Mp8jQ`p^y1-QC|hsK58#|Mk89@B7AZ z;1~{sv-f`Xv*w!5oO2OR4l<_bG4HQV#c54VekbQ~TfK+wx=A~YC+7L-NB`}nQ;orc zVHvt1M7gRh!J?YEo7UM)7X`*>Wm{i`Ev~i=V|rE@<}Pe=Yp>`Pl0L<6TFq<~ndGV# zg434Jj~rsFM;!!H!wu;q*(f%qvv?<+S_^=<1~K@=tK#frR=Bn9luGyId!^OZm7We4 zLFPH;_f~7wR2`!7J8u7{J|6N2_5JxLXO({! z5yIa@7RchkuJZWlEyO}w98^D*lU>Gvs(fh!W%cnKwqN_9@N0m(4;tN^pWd$yPw^8@ z3;J-sBy%Q%=^Kj^4;S{Onh_O}zVdvwyvNJUf|kBAc8SBJL7_dZfF5p}_fK z`;il&PkoG?GP(@XZt)qKcWEbu*P_oQUfuSt8N==L>a~D`I|v9j$L}3Ki$UlCIj1#4 z8T&0iUS0}vM!xhuUeH+Oef9p{P31(Pz30=yaHxr}k)9HXd#svJSa+tG#K`e-{}ni4 zvfB2&U<^yvrq}KER|q+SrwYm3mOAw1K^B9xQTB^JuwfE+ioxRTb#)@0rXcu{n5L@p z^aWYn6Pr&~OcWci!pBZl(=FuM_4bdQR(8@G@=e5lt-4u$o^F?DpTXd`DB4kra8}t? z^hHD{AdKI1M?KX;b05}Yn@`~OOHVsnmQS=l-7`2M+d-8&n(PnCRWFom4#cCyFPdL{ zFjT6;H0}CY?Zk|y1!v|0Adm5_@DswLf&}FDbU7q)>g~G`P8bSpt(wm%5L<+Q&HNpUSf{;=31E4s-{uy5&6d3doI>alzled@D6J4D(h zfKliueMld40YJ<%Y50)y4Nw!={nL_P{g&(2$?##oshDa8Sky0&NM4q^!RxHl;f#sg z%>7lP;qLl4M@qMR%F18?Zn>4WhL8u#NTOMzPq={TYICGggm6kRuW-8d67tH`=7lX5 zbxowSBcfcXIwn>Fd36t_e)nKIO23JhP*DT7*s$+yOps}wG5A%Z@@Jmd{={dE@{RnV zB5SKVEy^i&yO-LiFP)WT&z>;n$oM;7Ci;9?=IvN(EX?u2)Ab(BxU8@+X)ik}*AN>l z((u>w+R^k;;f1t39GA>es6U4KVUW{;jvzEmHZWg-rtwkAR|Vo%G z5!UBpUfqJuh~Dly(72MQ+4Gaz9K3_z+1}cyFm-e!bsJ)`+R4qiS>ST+q09jUI1*r~ zQb#}83*Iu%<9B*S4|~4zP^02*m+P6+)kllPcyjN|tRycb@Eu<6j;S7-)%_Y3xUcm{ z&+kpTP7Ypr&lnjnz_{G5NT%I6(A{@;QkZSV>mxUYZX7|egN?h_nbbV z{&04jbC87^Q~$gjb-9!C?00HEtKLoh(b+^G20fNiDL1U;StrW%iT1u2!;pSNb_@Dy z+Gi?cGDpq?vY1FzYJUFR1@BI{aToo{2rB0)hni`*2j&Er^mdaIT!|~(b=6B0$mi4F z*l)>>&9=%}zOWqsj2{(3ESe(K?GL2p+{sMN(XMd3rcwBZvW-tFvQj3p2jh!4$=Mi6 z7?J=eDF`1<>OJ^$T_TY)Esz!wGanmXcyMrw{9hYCP?Mt+|6cKKz7;pW1T!==)=-QO z4nNCdZ}Jcd}?Mp*n3tQa@cmT`JiWW-wvi-^lk!bk4atfQi8~zp|`d$fcR+T^j z=GFOb51YUgT-DEPTV(PU7)i}LZz2xCSzT>OS>-}pZt<=>xDX$w_M^X$L%7(dB``M- z5Wc>E93v-U^uR|{z&KN@kQ2!t-+?0m_{M9+o> zign{DN_OaABYHFYL3Y0V=;8V^j!sRP!lYbbpEbCGuh-rCWm(^v#QT=?y?{pQ>n8;h zIL1U=&98c{J9BRX*G%Bl2m#&4N#`puHG^udVp3+klM=oR62$MQV=aNt>9*I=oPor6 zGW*MWY6Ys_l=L0##o=AqW@icwQLa{yGM_aLHQPRDUFBbNzif|yQ1b_MxD49qaJmd9 z3D>LhyBE_R%c0PD4B-^1^`-GzJv|HU2BEJ9a@X*zd(S4LJAnYlqVD@Ij+yV8iC=+Z za`ZYlZIDe}vQJN#-3lJNr`b-|Km2zwhV!=O=W(8zQ>pE}}@qfiwvYe32ddi!%r?aKPyjPo9%>!F2sls*`1G$=sbV0ehmhA5s^HJ)u7BG9Zp?-pbkCt^6N$&G{zbP^4647KgRd2fV%)bRy z^wx9Un3C~0+DHQm+U=t8gc;L|Z?!#UoySs(jQS@t$m4x)t+(LI?tM_ppPC*IrGWv> z0q)oHm0-9B*Goo6xp1i#n0y(A=i2Eq!w!)V<;H?wRBouALq(YxFZ#`AZ;w@M8SUB# zK%5s3K_hf}&xyK^_#%fQEaV#hZvWq&jb9~Sw!VVJM0Tprk$))ZWBI5RX+TWxv<}F2 zTs_Iq3l&($+(E^A`qX@dHJ(E>$vcAhNmVzwUa%EM^l46Aw*vshk6m)6b8pyjPz@6b18Mr3swRo4G z->6E#V744%bvO{d7u}fcxH@f39sno4znR7P$Z^0tTi#(hmXEo7@H9)3?SUK;WU+I! z)UnC>8$CvwNxnRd0j2q3!k7AsQm!X}3}n!s;_K^5cBh0r+ubWM-4bx$4sZ3^a^BF2 zRX+0PRRn13d`*MnVef=%E5F5$2S_<}{YM=k% zV{&K;_KI=L>`i+lqh*Y;?ueBt#l6uCZs?@rfx0KfSn-K>JR!^4yDHUl(0c46o`3m?_mYxn?mlJW>-qC!jX+{`41_}GS z!$WgruGYu&kw)E)sFRA+tkF;9Qq)xm1f3Td5W* zs)G}mIqu7IOYjD(ydK(;&L|&OY5W#iaR<&f#ecp_tutNvvTuze@&d zt*Xuv7V?_&7}IK}nfNTWm#JE=>^4^6MPJ8jC@-Y%AWRQi7ku>|IyK%sF`&PwVz;WN z6yF}jveGd0L!U@wv37@kK6tabXLtS{*_$Izw+$$qK_O2ABd6yf`M_{N69*VO<5Bl4+uVroPMl+}?s%mS^tp^{L(xj8?pJ@5hhrdy<#R{e{{ zSAnj2p*^mjynelBD-dYyP$|{o;-<#xB0k&XIaZ#<1Z}3u%cP+Sem$jeY4;E|NvNw#w zi5r{RORe1J^$(+2)ZVb0wmRUYlahQd$=!z|n&Vj*auKU^9LMb**Zp~J z;-drk)59XPs|SrX`3R*?`^)`aJ4kFG-s6j~gGM0CZzVEtz8Esmwp*B^w@}0vZDjLt zzRxX(8KO;qRcQ1>#&Aqyp!?`V-`^~ba&x~vEy~%J4`yT7`HkErO|GWkr0pG_#ul&i zX;Sd_f&I=eSG@!+31=aRJ3FdtG*=U~1^aueY08ewfKzRIi6Xo~axY_?wL?LZ16lB< z=C--$htsaG@8aIoZcIiuBap+ID%v@VlZdzN-7{x#bNa_tL@N*Braq8UvEHzk{txS*>E+jua zYkf#Aeh>B#RjieO{96;73O-0it{qSNYmG%t=VP~wgBj-@&(F<=NO4T)FD;hq7Ro+_xOw7d8Pk)R_NmBV|&827as*ICA{0WQNnAf+BMKYx$I!Mc_Wjz+<|3^>dI04nqDo^bucR$^!JYhIw zI)N{mPQgR}19nCLP;Jwl3GhdGADi1fJ6q`fvyBxDAwWb|z~AiYcvFPm>(HckP{a`- zn}_eB6e__Z#*TE#LjlG;2xUmiIJrV9^;S}=Fdpk(PK1ge+BdJWkem z($^t99zyaSeHC=OXKFSt!0{o`rR1t+^;UU924Pq=Eez={R2-5K`^4}nQ?#}FcKJi| z1onOP*7lbHw8C%CN#1KfGJ6+Ja$@bOL!yrj%gF;UudH~PwY;BykZhuWdr==bvj-2v zXT5!9NcRS~Cxr5^F$?_R{1-fF0`XHw{0Lmzc&wR%;(P^6??jp{X<_M3w{Swq1B99` zkTFTj-DuH={ZGNFTXp5JJzIYMQ)&u>$ccGQw0$e-B^G3m6V#IstPZ5EJ>rDM$^<{0_yWrKm zsU4NN#<+xP`!aLL;fhaau3^OZ0}ft$d|ak;!RqrDKM^z$-Ci~FjCpN4emxGCWcB2c z2^=0eE?NmN%6~0JJz}jNloiQ$f*$KE#5M9_h%m=^YGZl)+WEp(9;b~yJU9kFYL!_@re9;>h8 z?)yoybtGcFr3AO+Q=?nm{--%=r98oer*{{+qx&lW|6gMUgT*%C2*OF`9zbv}pbJYJ zXy9^Vq59;H(MAY0oxN}~Tkc3(Zq@gmGx$MXe`hURej{gPBlyj!D=lpM zJLrjU&eh%SJ0Xwbu_SD)|IYPLF}KI?`&nx%+>%&px;Yqh(?3|ofiXcSl8%b=1_C)k z_=`?18Wx7(K+?3uL6F75uHvX3QXw?wXY`eKj>#5EGEAAs-BB&`GwVo~Hb*J=9P;gv zOtp#`Rk&VfqXLa#TxH4C2$2P^a{;$3sJ-I=&inNrx`{QE+Wea{&HYB^hv)%WLRrwR zG`ahQYHGH9gFd@wKn^=JG70%JZ?L$0G)UKpAIQ1u^e0_s`we`08TDVE&AbY!O-)U? zbNX+}=Itu+bievp?4ZlkP#8E8mT2H|V5Q%2?*0){GdHZz3C%s-@a#3<@Bd8YA9)YR zl-VCl?_c_pYk%OBvuS81tfU3tLZn(V^PzkYkzhc*&T*akrxz>p0c4?sPV_4|ruqXR z9L9|ga+CNFwo|HzN6I}|<&rsK1`}x@3H78)VpwfEk#%?xwc%Qblnh#0rnIHP^ zy@h_(DgBwY5=HCqvyG`ZOJlk*?cO9{AugMlHroo(<@+U z-eevulc(HtdzeJlmNqXZ4913B)Gm8V*$G1_A%6T(?q>TNPv5-mQG7AC8Gl7xG!1^PJ&W zJZB>3_cUbF*BhRBD~JKp*1wf3(xmasYh+EiJz)oD+MHeH$^9%^?U4TJ`#EJt%jS$> zOdPr@v`;6!j49ipnm(f;?voLn_do30w}gpfByq`{l$h>@cAXiFE|HRp?rqUW6{~Ma zrY8C;s0`waS{K#K+HHB7N8jC00Mda9Ko?uOWGEK%Y4-6+WuDbs!Q9_gdTQzFed3Cm?+mgL0N^ubTQ?L1;rE+l#iREFTXWcJ4v8P!F&9hN}0C{qZ*3| zUk`}N9N!ht4$%^ARqoEw78j63RL4QbZ2F5&oWdEtJ{T6^n%OV>ShRZd4>g8`69084 z>Z^*ngK>{Ip0xHAk?;uacO~ShwU!MJMD?hr1R(Z_0vjK&vM7)AYR179R2(x)>v8T` zOt(nu2erJFuir$Y=oM_H)#MhWV>92}YP=ebf{V-seTve`*RI$uPd42cH45*l0aey6 z@m%!*x65U%ZGp$pW>%>w>x7$r`YTFCrHRBVY(B%Flb#mlGKV9Zu}WLPg|2viMiK)B z&AAV0vP2x!<>+(z(n)(=r7l+j$;)=90}PC+*%Ww8%I`klF|WUGsOAz7GhTo;c;(9{ zNM=lYaX2k}&8S(+1?(I;*2Kz5v^g0aSD&o?6eAy7;!isifUSUWN6h2&%YcdH`8Jg@ z_YI!8#1T>YkmQuREgoU4wt9?;6s&3ww4W3z=-Hj_QEe@K6GdKug;P7u|FCN}K2NsY z`P6b|UYjQN;U0VXgR_QLlJllXl9v8Lk&+5bwdL|qw$LZmqv0m>D4gb0B5xgKfld<- z@^5++@tDvGA&yooZF}QEnPsm`+fUKj@<>)c{h)3uIhh$Z4U|W`<~jKv*}x@1r)mx) z5wc{^Dzm5e`BMq7;Tunt>g6JjF^eAIX%lNDvqEh2Q1V2Zy;N)EP=ClpmWV}9@T^>lxxJe3 zs&nAM)`H&0hTwXAx1IZ+J&N3X4C}|Fk}vJT$y>KklY!6kPBM`@Ai=&r1w+N|q)_=3 zyC*pAzbu5{pR3?`CM5Uzw+Mlxbbr*J5k|9eSj-F;5=at&5~kTQ#869Ffi2oF>eFrX z&jKl{^KC?TxR|f}kG{nMuKPxs?2u4NX|s9HK~97iyjf~udWZi~emfv$!r6Y9QI*d& zCdI~c3{4;J9sqBKgF*fBhzdsYJ0a;w8E1NDyf4gnS7@8VG%hVAAEtV*x15f@=4`CRlhb2WsoLRDgjv&avU>qlEZ@Fk3jxQ&YTfA`YHp;9NE5u+WQ14V1cn?GCxtYIpzmx4jmH7Pvpp0~w@wjY6{Qz6nom?AnVzE&1F24>G~@ggHxa|J6>kxdN&Sz>Fq|JPZ>sm6 z86%MfcCd9(gF(bX(DYNXewMXe_AlqdlyM}SWEJd^>x$#^-AUoOQJwO}cT717h1*4L z1+T>q^h7V%n`cC&(f9P$6%{JQMs+%7O?dY@~C+;`JBUNQbGm)7THmUq1?K% zuQZVd%*(x@GC`ag8Z{9(agT9AR+=pAgCjZ^~RUIFiDdeEGFtP6$ zSW?JRbuiMf0xL!vZq2=^!5sazdP+G6>=9G_M7#Z&V6|ucWYEit`mokYTr<5317xqA zrPP#S@rpd5*qDJWdMIMo7D;G-ie~)VA&0GU-{n-254W1RU*MYwJdy8di)iB#m)I+g zNVz?&9doIo3%e+Wn3m6ALf^;y4Le!oUt!}mE7 zh?Bd{mU%}+CjA|5LXxHZ6qJq=TVX+T9aZ?A4*sSt|Np2pzsg|CgoyPSL*AFTSg0)% zO0yC6bwAiF^n!s@pQCv>Dvbd);cV>feqIq#$WlA^9lgv;|K|#=0w=|PoBZB|Ku~eiR7rz z5C0LSe?^Dah0*{jC!RE((-Xo!wQ&RGo!pDL!>=mNAlM`r>;66PZTASs1u9u%_&#s;ix}#`H-1w4mdkptm{8n9R_1+bZmiu~{J+PsL>=3)=m`z-8 znO34HZRim3qwLuyCSZg3>7TG7hkwF~Cg37RBB?|nmAnr%SyVjQM1L+x=#=8Kf$mSi zc8E}VLlR4#4jTU2*u#AS^kqn zY6H+=#tr;#M?nnu`+BSX`7ty9StHZfWS6HYUo`W=X7S=!0M@T8(f5p?nw6SK z_P&YYnYpHN`O+bv!6yd{gMV^VqYSG`0vKGc0y2eEDkPskVW|<$D&rW5pJD~3 z3JXf_Byl-~IN#^mD60SBEU-FW6$LaS-3e#pUUjRFm`6Q>4)1fzEOqO1?aZIV7&G{U z$XclAlY#4}0oSiWRv^;6YxIRHJ3_mT+Sk3VMkxSGjWIm2rJ{%sXw1O!P&@dAJ(?v< zP4|{)WE9xpCXXb0*e<8kH+#Gi}VIPyY1zv6+%-vi}5>K?ia=2L7pERB)N{ z+1S8d@V~Kf|B|HoI}PuCT8>Cv3u+eZ`!d2ytsZz5nLNnD`(w0>PIrMn-8c?ik-g;X z)XBSO%$TyzCC)ICdxsnz`P+%f4|T@EQVmVZt&?hN?oF!lCs%Z@-U~CI42l%BM@}Y1 zge+2tKShEHGq?AZM?2_~4n%nv_Od{a))4fg6mSeEY4s0Z6iZb%#|!P9do3*I-Z@>% zi~$7NO%tn_jI(}%v8Xhrj@7g0Y)3BQEl!BeIOqy3e*$O%BmShK{e%ben9lzfKIA_m z&_a4uQ-M~KFF^!1&HtS_KQ&xAoC-&3_IeuE0i#mx@t`v+DN7p9An#aUjDt_-*3 z6%*r!;|-SwEYPd7Td6-vuo-peDKvPBk0U5WZ%-d5Xf%*Ul{|diffM^NU76na+9`tt zubb}W|F31!_~lpGP`(_?#>GNt;S2vxyWrj}qY2#Z{DoEqcZ#oora@k#@&abQ%H^F{ zbcWj{t<)rQkoEo5+qtan{@nul<)s6O2XA*``^wwrw-fu69%=k!U`wl?kV1rpeZibWB;*G=#LqY*^sXjDq_}!D zm&)=NS6s8*h4yD0H5#Q8H}nZB%(tYpYIvh_@jK)fg4a6NIa56(ic-_)5q()ZzA4S{ zT(1Z3i>2Mzl(Sf52~^hL2ErQpl-!Ju9Hz;IW%$Km_@pv72lO zcfh-ZC6F1yjS&8_6D_>z53k74HWHOa30vo}hmw)HfceIn+tilkq&gXj)6!hSsF(wR zK3_)KZ@Q#nQYA>m@wv{Aq=m*69Q9yIm=4XHpHNd8|4Xs2!_E zRAWsgsA?R|9gJ3(34&CFU*|wN{_#cY`Tp@mLlnVhM~zl}ajghZ{G;D2`h7*b$p8I{ z7=Yt-Kn$J}$gt#Od>vh`W{jku6(~ebPO###SJ{Kd@M7@jpEV5cP6@U*tz?AX2T>6x4d^PWIForDewa9*aY<5_CXcXcXNfxgL_PXpPtcH#oXH@W-PS+h z5P4BNPR;-Lsh2@r_9{v7-iFXwi<==_8)k1H_5yn$+{L>EQl9@aWVqEe_mzC=OvaR> zuC9fvZXIC}asOMPW29$(Y)4W5QlrNYJb=YOo`X21vBSEfKZCZZVb|m!3E!6k&4U0^#Q@vRewbZWm5EiJukq2Ih$?tBk5tepsbW)5 zs)%Jo>b@F4@k>pW4)Jt%S!1o=oj1Y8W4j>`zE& zo9&DlraBArhQa}S?Y)0Wx#UkNBQOCJmj7dVUBqL~y{=vrh4K)! z@<__ZJSKygjNuxRa>F6UI0r)H9KY3X8&$KkWtWoHz<1l?(?YvIF)AEurXYbqb0p*nIoepYjbrry)UQQU^x z@r0X$((nN~Jv1t_qhoO(gdU^`ZTghGI(P{!15ASqDt=@7{k~(0pCEWzEkbFpg!;FS z^?TCmbN2)dss{1UgxJ z!ZnU<$eo=ZvdPuuAz0{2f&g2J5OPRz zD!)Vz%w079`4`&re(~e<$P%$qmBc&TIVIs_Qv4E375AS$i^UDYJD)d<{IXM0yMGj- z3Z?{Q&IqHWCMn$fd6o)xGi!;PfU~R6=euo5LZ#7cFq&<2`LLxXvCXxpr_W?ADlynY zzRz8hMI|HERY!2>-ZcQjHt3TcRLUqI7KI0B93nlpxms)$JVy&yC~7svzRuhSu|~fJ zrNCGOc#c0AObmaEy9kNQ>Gb(Z++H#}gZWZalYZ=3tWfrr{KF}Na6?r@oKl&t*}JiD zb1AkPmSh9c>0?&0r|KONp1B?VKBOs?K`Oojn1Kqc7y541`Hu3B*L%q2`DF_9&c^k{ zZv%DdSNTsf2cPdz8^9CdohJLlgDC;Z2j+40te}=%auN7oL;k?;Pp9hXp<`k!xx?h z!$SkB)MThW>GMS3Rp9nY8X_8YupV=XxU<;)%&C1LGob~QQ$1NQD6ix;v4+XaO~hil z7xjHoG=%aC9hRI+#*=+hxex8&0H&Dog^P$$zBeXVzD8@{Tb}>Tn(&|Hm>_p6fUcWN z67gZHUP`Ym#n{!Znh~LsbHIwST8z?KtTjIbBsabX$&Ea8u(QI5fo$>1_bA&=5=X&$ zZ<(s7KE3a@!G#YbXow^^ZB$xaDL`_8GjP+Ap#^|uT)K3f_5Tm^`tMGOzkQr?^EG)W zrm_SH;p2D_4itVbK%*U#pjCa znUXF`f6w*(9qok-#nco5w&6U2)V2LF4;;5Zvkw;^K7z;+v8GnmsHRf6vGd-x-~O&? z!EtKadeMuG^hxe@k)wOD80uBDoc(H0I$r}ftL1FfDO>Cy58Q-MONsSCh?;3<75)j| zx&F&x6#+gA)-m~`4=gLzXZR_~ey3TxgXRwp%8vM30*iBkJYLSdZ{zS~j%)IM%mN)m zatcom`Kn_$zNqd)dn^O?PRX;Na7g-fuL&)@X?C?I<9~m5=6w3`t2wt zqk6%?SL}OLc|g6E+k0`Z>d5Y$`|`)Hg%WUo_M5?@2xE<(L9rRc+iTGun@q(F{5lN4 zRg{7h;-@k1`wUnKM@touAao1IY&-4$;aKwE%!2BB+19f2P8 z6&9A+`Gc_ESQGLc?BMB&lQhcINpc9k`Ji-=zvt|A7ujL6N#6=sr|J_dBVZG6i7dHJ zQr{|TMqX*s$CPoohV?15ip8$Z?#nHoRP!9=+pQ904G&&wn2bYI_SgNj&4wZU>RnjK zkSZFmKLB|Fl#=N)xBRJk0S(q65h#{HjR=UCAi6&N&@5j-E^58oKfp5E*ZG78B&y6M zgx5K=IDZb2sQc@3yQK1kK|=L`N_(`5_$;YrwRV#B7Rp($_*! zD5}hH!Fp9$T*99Ma9-R+LsX&zuSqzqlbM5ys`vD5h&YOWtOz_lBE0}e)ugd5uFc#h z06M|JyEX`b_)qYT`Y1a74U#PZeLu}tQB1>t@CI_vH%-l!atV^0RP8@XG9E_UZKW;B zv^U0`C!+l2ngqHKMAU6_&Le$Yn9piZf&evrr=+I4R6>V^-fB2_FF-BefLd6dAD%bg zxcmR>P9{>Qh_OhG*p>kzH_CnY9v-VQYRLx#aEx;^*}45Qd-vd z_kA%H)#oynd1hEO*$+}CDxB>nbg&^WfT4*&e5_q39G8J_ole2W%Qj~$gF%IpE%TG-(T#V3pC<^Dy zBP!UtIsC(iAgFVm3moe4T4)7FC`C9WV0T44B_M*%3rsBFSBXOdm?#P>OxDNl=Z5E$ z{{DN5Yhh~s4(h_q}P`774 zOv`C0MJMFuB_W!{dLV%4R2R9#VSohnd|+H5oYH25V+xvweDyv$_yJ!PK$1G3vIM?> zFMa8(*Eiy|&>JLhFweV<# z$8(1o=HkC-Kgq^B(9X`-HIlOec5yn!b&Sdt5fr|!$&pr>z|(jaZ;AwcHPM&^Uilc( zCHO@@(Ea@hCja&cPBO5}`I3b~{kBeD6g38YLGK#RX+!p*rh6E5L}0VO+)c#z>q(@z zqR8?Ui4Z@wn2$2+e4v)pI}kGJ@@TV4F=irww18=1rng>3j+;jT` zkjW;L;)qHC12c~kM{3wJsmS{42YUk%6+9m)EN4`m)n5AK*b{Qr2(`RC>7O=Lz6Nwy z=P!y%f{02NHwI!%9dALyLm!HyFf2-$SY21D;s8>RP#RPB2vsmqS&wmwWRkQ%-e{!+^>yh!s*_aw1vHI)$hL7hhU2w!uEER;u(ONOJE` zVr`)^GIqs0{rV??ik+8w0jnHqS2$WxEbR-bI}Q540fla^tC_CXdgb)ak>}0n_Swjh zfyLMiktMY*jExd@Ph_p5B7+8|>dmg9mz_XGUYHx@#e@-Y)17R0AsAn(8yR9UHqK$x zUdn2O#c?&w=Q%>uSF2eXRBewg8+ZCf*27Y~&erGW>$gTdlP-BKf{x&G983MW%k%_) zyUVrr^15BQ%*imhhyC57O*%Ks1>*S(XT?+48i>%IQ1rT<%RX}a)iaX5Voy&zF+3oO zeP$?b63D6L9Q@HbK}Yqya0(adfaw}t9q{VWDm~YdM(n~Q8KJKMRzPg8N$K)%8@;i!pM7r&CWeK%SYsNMWduVfSOEURl>P z>BWar341)Mzn6{n=&udvXF%*Nl%`SsF>TNMEQQV5>41pW z)&R44W6%WKPu}-^Nk#&~R7liE>b4Wd``x>JE(*1OX%ptox9T zqMACrL^0-`kKf?EH9{H3EGhn}m|m`D#qD>N2XU|7ed0$0bC@LbNfJak(ViQ_{zB$h zzt$b8zpOj!R1K>4?B5kWddsOYA9>&2Rf&(Tx1d~CB{zX&4>=_Yya+EU z9CDXoGJ#^zRIq!4jsqT#lu$Vdl6_)GxnBv@wTdERy?&hx79U{A@U+J^`;;!F|EqOC zkU*EFL8*Dqs;19I)g4+S7F4kAr#`*(41#{&^I4+y6LW1j2UO0?;2aI%mhN~pQ%;*A z>Amj8V0N~0oUYmo?S0UG;r;9eq=f3#PH1KHWX)rlNYkJjhJhT1?HqKr7}}qD?d6)U zRyh^G1=5I81PQ79kPdsfi3p*)`#4^nK?#xw~1Mif=%{j1d0m6?c)`ooyb*lJ`|CV*2Ye21#>3 zElBW8@20%aOO4|~1~};U!$vh>LWlU;rE+@5KCBN6-EfA|e@ zGioso{8QU=9eC-f#`Kb@#jqK7A*V#Zsbp)xIsK&I# zzuRzP98Mo9Svz^C<4=V4$BOEj%atkj%! zMR*b(e|jqmuT(#soQ2KW5bsuWF{Yg3)a>`-v<%Bw`R?i$ubSHxl!u?Q$i%Y7f~-f$ zaqDr(R*a2w%Ow`KBkXmRj2`<3?w>l zQ7D3QHLnW~_P4@AVZ~E_pIvlU>D>cDWilk9LT6Eb132L^gyZQuI&9t<*v;YYw7iSot(Sn*m^E zQ#pJVPa<+}oQcDG*6t^?xkRUu6maUMq$v)u)XX5l9j?s;SgmBSG-K>`fH-R8^$PWR z3#l_kPGVz}N-l}5FOV5#HWh-r$JqW+6_~RtYPTi`1(7_d!emm+QQTTSi-Nm@6dfT{ zYSv4oS?g-Y{9eH8^j@N5KNeyaZVw;*@cB$nxz1{28y&3kBex6-pS zgAxymTcpFq=6fb5T~1Hi(OBzdbGwN+aJssDs0j?dN<*@ zv;F=aqlHuE5~D5?H}fH4H`B}0H{D0&J1(czOrl2{v!s_r1!CgmR8HzV z(Ie8qvB)Ft=yqJ>0&g`OEFL`V2x{f!AD^~S zBLDa?(-da5drO=bCF;TNwrl;jwi_X{peIJ-5N6ko6{kV1ypHsSe7VMPN)M#m>+!Cq zz6}i{<8{enmPk|*7C##Vu*|a}`3^_sR5WHwrv;~qG8H;9(ue4;xDD&9!6b2bDUIau z0v9@&-8!_Df_0JtXMIX&(=Wr5xD8+-CDVl)328MmE57Up1!~j-=^vSfBOZ!+1}ki*dtX-zij#Vf&H6(irc(cw6SdKx7rvv=!U5b zduDLV<8ni~o*4OX1ewB3^aWSqe5o0bW__d{p=Vm9#{H zgZtEy+0vMRx?;p2Xz{Tc1KsnXDkAMiMQk1unc&V-z&;~Ic+>YdRibcySvD$S!h^I- zq+*)|Nv25*7wTqcSME2My;KK>V^uq?m1zp+tn}xL;2uh9r4YFdmha9A>UHr<6m6ZT zope{$EA34e7oDFu#qDh7=UdB(DW>F1eO9_n#!Iud%3)$0chl$i=E`KA$YN(fRF2P; z#85@%6+u!01vAv=OWy+jQK&+J07thfcPM|un0|fi7)y?Mf97sWeE-6N&jo#f?$tnr zVyv!X(*B5XR++1+FF4?gBSvaYTqd?V@e?ur1$gI^uzz93@W1Rc)~TvgQ?;M!^3SeQ z5k3|LG&}-z`G~*n6E|?ihc;C1qh0f8UmKl#{YR8%46u)9V-Z?!7NdAgI(7$>@%ydC z+3}H}l8^eNuyH?ETCs}+GB?jhYAA`TIOKh+ZLzw0eLHU;!~;R69QBna11hI@5}P1( zYnja?k7!a`va~02B)-{g(u4l|@uwRN(?<)dOx~IvrHc(OS*y>V?yfdWX9wTn5QZ!Q zH^e+a6D!)2JmzVKv*dLBX}$7=qp4(S(H=5AZ448enjM$j;HZJL)|{M*Qst$p5b}*P z{x6Ff)P1>|UO$Du>`k#&c<^kk_s=g=ds78hj?xv#MfqkUL2*L_M7sF{)yzI6ket4G zt?(O0==}{wKv>n5S2?UE2fpIAEI8mON{N%0k48hE7beSmWZT%NiW%*b3W&rEGvXoWbS7;QF~#HAG=^D=N#*%~9&&T| z_=FUW6X68Tn`gUBu@OR%)AyfR<{(bPIac zq7c?F;g!6GV?WkL{{u!DH_kfw*d%|#;p%O-L$|<%5%<8tD9)OuCzmxg;d*$J{bLI_W~T=m%r}uq zL5PiG%xKP_OG}U!*V-!Yqzcn~P{Yz;Sv>naZYNVQ|=!u!dt)CRfN);dmz^Ya$G37R#*jBFN1xhwr#&0o~0qTpv&=_UVp5 zr1)p4g|aCJGXF5@$fLlO?XotPg=22_kIw&g>!@M#tqi&9Qg=8Vn3g)EvM?PmE_kQM zWNc~=hr6pex@&0G)a5~4Rg>68wd5zjpyO#CEo_^IyILbT|_6CRNBbW-i&GH~iWIoTeGQLy5yYKmLC^GOjC~{LTH*C-}%#`@mrz%HbB-N;5 zg}#qWYTPYl!?jWlZS3FNtvh1mpE&KJOHssvfWej9n$=Idm@;GJNZK$q;-!|Xckc7FP%c##Qs>Ol%~in(8U=y9+ESJWFrCBrwRzuIJ-Z68)n4Ru zubK)Xr{sJsW`8m-W$_85b?Z2ghb`oHey(F$GZ(@m+buZGJj|u=`Hj*Y&OO1?=lCpI z1_w#*S?2qAC#PgJS#2C+KN~v=+pEQfnvNzZc{gBk@z;ySv)_xxU#n_c`41*2!oGXo z^WFYP9Of;zZ=aOj;y!vKvkiM*I52R_hp~0Uj@M*Kjzq;oCX2$WT?+RHElg)yD5LYV zYK%LbOjqkiF#TZ|K0_+qxZQeJsnN!1!Dwz>AT`YOsqtMdG{_@u5F(Cy&%BeiOy2k7 zi(KpV88Z@IaX@svehmP(5{_>MAmw&)ZP#dS5tl~yjuHepVw|MUg+`PrT*Z}c`e}=| zQAZ{_4&Ay1)zKz2)>Z<39}Dj7&vOYEJ`P1iZCyUc)^d~kE~f2qOTn)yNZ60fxGk64 zx~r?<_sdC_>ABC=P0aShqY!jI%XY)fV*qc}X5hdHCF;p9B$E6JjznT4_1B(QMHw>< zMg5q3MFN;=QY5W%EA@WsyqK;Gr@%!!oi_q|6o95iPRB8e*gBNU7f?Zr)b*=;VFZ6A z<>czxEH(GOC~JQ#-diNC>5P#VK8=c`!2-*j;d+gs%*meAsNmgDPuKd|$F{oNM0><}8zymUQEl=2URBGGu!x8)MK_V1Xha zNQ$(8bcdoKjij_nx3shvprq0;bc&QPFf^#-Fbv(TbW1mU`x(&pob$$czw3Jc48OSM znf>g&*1higUTafh#R?+An#l)f?YUW#v9%9}hbwyI^DXDX`(Jn3VZE^!v0r}+7p~Is z*2}?G@y}Zt2{au}`C3j6ue}yTh!}OswHG-l7?VTGU2sg)gBzdjNp*}>YO`%qj9e0j z2BEw-aYb$g)a}8>achLDgeH&N>1Lw7yF4UNWUpK70ZS|Y%2~NPzL8y%CxhH>0w9t! z1rc1VbwU%0s+20OzB-n-dlPSJS9;BvJFN7#zK|On4)wp&a87I0M;XVX!h$>)Iwnii z3!PoY7d6UcW0|(KSH2(Kig1n7o1IU`_evW>1Q>#wP~rI+p_)2V1rOo1{(h&hYzP{h zP;@j!z07UXjU04Fmsrfr>bb0yK$`wqO?V!WcmYr?UjQSAyGd4DnZ49z9Puo+)?RJB zl&P=OK_<3Bl-Js~TxHwBvP^kn=Z$hw6nExr=OjcmSrp(SCqC}yGf8cr9TY%N1^vxwI`*;@R$ z&wC>CY(cdnFh}7$%G#G`PW-RlPX{;-C+23dZ$lS3df}?fcSfl)NUfwC>4ERepAcpqO8up618#k14VVndAS$rt=cbvgLOrl0CrC4G-(Q(*QKeHq zYWg7}AZ2ZY-_EdkPxN8dv6y$w+SDg$HEa%c( z(}%(xlXum~Zk}Bey2(NWaE?hU6?;`_Ajqzq;rnMPn7g`WY)b=|6ZSX5GFdAg_;DY` z?x2`+KX_KIXjQqVelSlG2Qd_>_Lov7egdB5##ySa-! z4Z3;*$vkc=QB$aS{#7%SBORZI^5z%uphiolzzkEzC1lWxjVL(nf6pvQ_a(2|I`y-g1-9$q=MSzWee3mN zTXcFoBb6&6M?0j=HIL;XjQz`%bHxYb&F8pMIlqFFVKJg2VAmY$CiR8LkP8Y+>2ym> z=Gv!8)cbNegaCQx++K_`eg(7<8-!$ilqMF|S*zAz1UCLWfep=Z4S4Tc7>6k63;HNMd;ajT&+&R1Cd|+ zSUo~O>e1R~wjRn#$HiVX0Vj*8??bl@0FE%ls{y?UAl4cB@)AW+AOQ#7FpY;c4LdInIxAgV&@U!GE>`h)0#tRU|lU@xfWhA0Zj5? zzAL$qOWv|DbRlwYQkrDF=|{nADR%Ta4AGc-X@q})tnAqk0VT?B$-sBS@%jq>N4Aqg z`N+4-2&xbkN(J@r*HyXsGF%Ypm;IEX=L+Y zLLuKgemdS~&Z=CM!gVFeC1`0;%liGn{!V8Zr2zQ8J%Cjpp)te{5*qP#SH4dSi)URR z!7>#8Li-HpO1%552cXGnIcQfJb1<%M%C>rn6Wmrht^j6QVkoZ~%Ie%Uir@xB5dgHw zH}HML;1&z%9OH;h_@%bz%)}Iki1oq6C z80VhBewNh!BGX%&v0duX7GnqE9z8q{0TI1?s2iv0Y*d!TJmXGD)2^nGka8#-Pu}$ zyrUXirl1ir4Q)Fs7Z-b!(_id3nj>pLC9M;40TwO5H5ap#vmdu{6Lp%wO&#qb}_6fUV zTa@vdR4od6vEHD5wQG!p8!k7ZO&1il9~$#74lFK&i%8@0idN~%Ywl_TZ&bC|_jq_u zBX^L;)wfEo#q6O_gM7<^P|%N4s&~m9jFA z2I#64Tr&-DM>zA)3j)NqI1XRT;~^s4m()13~Iu9 z8Zr+(sx|b~fAXqp-#WXv`KmPfP0ry|W-gLeU7LJ%&q}APAsYx0I%W3f97O1W4JP%N zRi`I`(lf=zL^Z0y=Mf19cBA(TnNi?GvD(qu1Mqf?aw;s&b6a#}Gh*)Gqn8)~w3K<5 z6{+heBEV8IbLVM`M%+$bSO9DY2m!flUJR$BY0Y_2r#Dp_KYiMph>FwEdax;vHb@Ud zr)ZU>wP)ER_iOc6$`NEtca|e8jIEx#B+9y*hmi*ZDIi@-G{Va>GOD}#>dw-jv=v{T zBq+^^vifhO2SmrJ5d%krxjMPaP}z#c#bmtY7obaWYkz z^!XzLXRdLl8J5M8)8GNfOLYs#?9ayCw(0`K2&}9Y;1tiN+d9=`^yd}3-F6)<;W>{$ z?ZqBj0qz^sc36lv=vyJw*uA6{X+E@9pP8zDfAg`brt?^uobAHJq6Tg}h^ zsu4OjM@FW!N}}&8@g63h=DvciKTk9{q*qm0VP2C6biWDrOBVs=Y5x@w0{$){tV-i> z*|euxkKlf!(jV~wi+t{OTI?h@H@lX9+w-y8m$Oydj_1lY|?Et$OKU<5}QWX)0d~_qc8J%eZ&SKF2$62BG8x^ zcIzy-bK*<5+=m}2oB%q%1#Hi2*oEW0I54UKk-;TS^Wq$>fV+(gv%|*jPtnEC%6e-LOR=yi_ccC=R zM-vZx>2-KB?taCjX_m8oghQq~XGF7srJV>%19!a4P#F$6s+LG{C?!)9D!|zG7`OjxKOb4G|pXo~WS+N%F(O~;Fr!`eG}c|d(iwgO?Rx00N5rB*H`kpV zLC1JGwPS-^xoRQR$&CBDyEU??ieVt?kS;go@Zs!m1hP>z(_nwc%ygtO+g?5(+k8sC zopyZ;N|}!dAPzn+-e+g>vdOGW>*}gI2)AVH)jncU1R&gk^d(o~Ckif`*&8drf+ZJY zKU1cH6NzW`wHlPa#|(4Zh9kL_z-i+bw$#|Rz<42~$Osq7$yr#ORCbY$XJs)%eQh!% zm_!wn%)R{HZ@ZcnFz(gq%a#p$qveo>NLHz%_p0;XT?pz%z0ITSEfS-^O2nB4M6`C^ z=_rfs$AV#oDsDDm=r08#kv(??)=6oApiG5N2p#sD?pt+nxnjzysosk6r=uW`kiK#Gc^7 zu&sLMRA3nk!4t`HZJElHj3J-8J$)TZ7b;gMvg)|F9hkoKItv>4v6JG}&F4=-GK4}U zK4dzma5AATx-b(}xLc~0STftIGd>^;XCsI&)R%l?4It*5)x}(3Fmz+Q$#3O{k8OLU zq0lhovZk#R7Xm2#=+mm+xM_~D!nZAr>l?t8qxf%wAv_Z;;-=rCo)E=jsY!|*aW6R- zWFLWZ*;++`vUPV7GEVVyLA6RFkfFF zJM^R?DrG*rU}>D*?_-C|k7bRdz~G|l*aZ~RV0v_f7&XX8tBahw4S@?VqXm&3CN8|V zs>}lOwqEDOvnZa%P$xBHJu|OVmp232*z z(gN0II_O!5LM_^v?5=8a$OlepWJ16_5ex8~Vtv2ftK{FmSKw?C&RqmN0h=rr69I&A zi_Gh}mZarw^|a77nYfk(vcy{KjL+Rr{3r%j&+WEFvk$=?VU~o^5*eZ)p0+iMD-)Dx z&%RpM^O=TQ?qPao4kC7qmUvbMlAVxmVPfp(xYQ39Xfi$>$a;9XIZZ{%{cLGr4QA{R zL24?T2J%VLV7}CwYg*BH`Yx6MrhO?nkva+xTPqd{z`?h+weQzmMZiYcO&}W!)+J)m zRDq+FsSEty5_B;Eo7ZE#V|}<5-^jGxB~EFy#%m$Q%WvOuiNuD0fyZmI&q*B_ z5agM+@ZD0}RjNfE9XmE$K4C{o6Ic8hx8~e-H4)3oTG=IBT_+5N+k!p&p7hUo+z*NNo zT(%TPjxeW7R#?(;W&7IH!KsFnQ}Cc3z@B3e|APNBl!>Ct)DojO+AH3K#ObRUW(@B|6`1rtAcYCX+mazZC!yoy*Ovr-!v$5= z4CAk#(6;>_`%I_&uN5TE7nzq8L=wFmI}Tt&p1t}9**`l54w#vHtlB!Hs1;?z0C#)E z-F48)$h z?I>~$K5;D@uAc)KhQ4v+0QR?YT#_Efg4^33I`cX0D>`5L6=Rw zp;?ad3GkjD*}9Y?~q$$NJ0QdIqHI!DTNGSf+(-c;&ghQdeJ=1+9h{)FXh~YJ1z0puD6w$I8?Mh zB{pe7$2vb#=l^k?96mfni|0YbDe@@3FwS>{S}~plX3tG3j?%T1PzO~{duK7Cr#RE1 z2g9qszX|Qig6%pA$QQmNB!BdWWqxMk8~U^ zj~2>gHhCy?CjX)brmh$jROr+L`Okgj$!SvY#m;>}RNhFx2=hIVHqt43RRjA4%T%5^ zZOnAHq#}8&?$iTwH7wMSi@mT*6;8M4x@E9WMB=vUxu8X-=Z$R-xNQXRb1_q9K`INr znZ~LkX%j4;zlk3srTV^sLP^xkxeup|;uib|_Yhj^$CIhWvGI!R&0tf(HSvO4844 z$T^8sP;WNVt@G?Rg36Fj^p-}=7RiE4ix1-##*b{(9;Kf7Lk?NYE+QLxNf1q@43)1s3*3eHxt z{hs_~lf5`m3B5gR|9K2^zQ7g9Yq*>K(jNdFX24KDA4G_#o_35DdCQH2c$5 z?82?y7ft0xG}c}phc_KM-{j}YIqmfb_hzA?@-x+_(#E`Du2=}pFuVZo?d0dIG&lXC z$2Z?kTzAEKUxE#p8C#B}=Y|`Q1oPj$xM4gjrkdX5!efr=kDCz6CO@EBXL%*n3Di1V zm|91&GS3HUmH~fg>zy$fC*rs>f%hom_(kj*&2J++u7nvSq7ej!+wv$f1EF zj9399tM$|SH$W;CHVc@LR(;#XgSeLL@HJAW`zY*SK${N;9<|kBx==kx1Jo=l`TxzS z+YEXkeS4!}Arkb5jO!Q%WKxYWmCg1-niB>>aAEi6Gm-{^9!@~;LZ{sh&AS10#nkd{ z{LA(r0D_loM=b{e)%LgKHI4*eY+^1V5vW$Ux)s zoQ}gW`VqiUg2b2&KZ=5#_(@b8OisgY41$g(|J48FXbxLLA>Qc;;E>xNV*`Y&|sebYGTS;(4Us_&)#ZcN?i5f+W;WXje z@lGQ<-f4hb%GFh;)he=Des4AT{;~;26#g>om>iM+O^$$jtW#u0@IS&{__9072&Op; zUYwNcJIJ6)W-qgtlB-~Zr-sPI3qjy)4+km>lO+Pinf11%>so5im$Nz|nSmF~`jVZ< zS!lac0*31~j2qP6Fu?&OtdZX9>DZ(g1sUXZ1Y)pjp?XN3{VyxNIUldS{GC zI{f&_O8@#~4e>8NXv|l$n*X^b>sv!Yd$K|91q;k}HIWEP%pxFhtt5u_)%jS@Md5Nj2Aerv&@&y<#3p^ zCX|gn{TF_7yBIjV?PcT#4$NwacH4YSF?~G+XJXAr4@|H zyz=II2;j-2jw0kVc`6JC@}-KNwyWf7t7{Bk8qRmC(Du}bif@8OiI$Zt<5Qvly zPSO18UFp9t-(GoFe!%sSrUmF>HJ$i)P9gy*nFMKdTMEN zV>@l{=h+d|TY`q0KZkeiKu1&y%KPGVPNpD%J^J=s>s76hvbz1h#ewzU+gYY6BEET*+>*GruqTW6m|$ zn;yS?84T6*Z|=t4%D$x#~lXHO}fH+b33+@BJd z!|}ND1?K^t(AL{Qo0d}=AGF;LG;GJeoYa`Rp7iaAr6)LM>3?x@IGyn?F6;c}kN&hp zju)N`r_&fk2bcEmLmyOWsh_11MTx(c66B%0czb2m$QF3M_(p=^nxN(Y(+s*pP=#j;Dl`SFRmEI8BYIO37$Yn1&YZb!|;FPdWCj}o<;eyO^ zSQ7S_VhIBp_u+;l0p_yGg3TEC0)0D{ms^pAZ8ldjd-Wal=r$GooX)_ga*8aWUQ;Zw%heB92qb?1m4 zr6>H&>-r-)Lv!!5{ZcJ zQPD?`B|A7ag&AYaX8KjR%K)vRxpGJus0cz-`N}})O9BoAnFidXf3erQ9DCcUghs@k^~n-JhOeu zI7fvgHzN-S)|qA&Cp8gl|Av3wCiV{9Bu3LyLlNMzgB^a6G3%GlfVy()iZWHhJlRsp`Vpf2o31MZD7K7k*t^0z%po!odC~lEQIVfM zwi?nF@061?wvkMxgT*#z9wmXd+WO#2Qty2NM?Uo446wMS@}>`7{!d5eq7+0~{qU{$ zB#UtS9LqQ$YR9J8eWVdzvb%&qA{Q$FPkF{q3~o55#y{U1 z0>pjp504#ixm4%6QJ3jsl-R!BKg(T89w+kl5I1blm_5D4GzZ+dz6 z<|7B=WkPLggAKkGLb&xR4OGR#fBgOX2ofV05&!Nd##v-r8DqcmIg3gYVL^R%sVaVL zwok{X1+=iR?a`#;j9oEVye@j@z;@GJBR8*paeusQ(rO6o5c#pvnhRP=c_NL=E6ZVDjI;At3Cy;k{wQ zLXV^gF#}aqMh*YiaQ#s0aM5)MHB-&{iRCOOxJkKbRkC5t$>xMTpxzIoBOAOs~ciqgo$flbw+gviu zp;7Ss-8Gx3cE9Ej9moDega{Ihh6yg%uOh+%Szj>d3v`FIppbJ zJrZiUXH@nHT%UpDty<KdU=mw z3YcR;`~pWDju=rN;O^6W8T!HByC z)`KRuR81GS7vv|MMqFO4E%neRFRt^Bgxj%I-k`CsvqPYGJdE|3`6f!LP}VsWZNB3X7jpuY zdo*yk94;pf18}V0!H_M4C1;AJcRB^A$3NB~=g)BhbaJG#1H`gU4M@24#QG8_zmh^V zKHuU$V@ElenGth(Q99JChc{ZE-afzDLl7C`f~Jl z{de^~Kg%QI9G?P`6!5_QYN;5;!wx($AOoW^Y#+2O^22OroHCtFwTGd#2dWNU#1I?nwz3Od3 zX3-B`^>On;^bD=Co}0q_tL1tE1D8nL3SziBhgue#0zT|gx^+4alk*Yl{ z4a>>`(8#=v8uAR=trJa>>u7)Ic~CK5vTMc7jvdX#l}CgjC$btk1e}13AtgR;dD5$H zAVYr?|E>RjU{|P!L6zEg-$wv~O%Mlx%d7Cun-??L=8yaoHYwbpqoTt~2Ih4W$BPVU5n8g~-PyQqSX5aN>rjh-FM-ip635=oF z7N`~b|MgCcTWuKVf@(!~K9SbczXH82jzgm}-AI)z`G z(CK&+R{v=eUO1SVl*@>EA(^x z&8Fy<$x-r|R1ghw_jF4+s&J02`W>@9YS|%oUMYml&R#UNVJTl*yJy;;A|t4B>0IoP zX6}}u=FmjdeudEC50oi8%RCqk!7-aQSHOTVnoZ}AbcVl*8r_Ou(W9Qywl9I|!uj=e zVT18E)doH}Ixo{{l_V0Rus$y0E=nqQN!@ znXpHF4(Tgg)f(nYR>#5JSq-He{xdbStx$8TE$C#?$bE7zd}(179qmF>0}YGnMi^fb zmXfD%v#_kt$cAXjj{t8O?VVky%3YgLr9My05>6lIq3eH7pR(E~mRgdWCt=cBvVXaF z6<7rcShEiRpJXnwPzLN;rchjc`ATrIZNI7~f!dO!>&f)&k>*>Uy&V(+_^=oELNvh{D`b#neI=%BtK4|;4wWsI zgwek?G(boEn?+wryR2g5!|nTH-ZA9L!GGwSIpJjj1+cN{zpdOQ6HFt|&V8tldw<=k zk|x-%47eee^%f5@2YC-Zxaf>!b$Bu>vp+4kCqqTUi;;aeL8)_a*|w((Gx zA~4~P_gnrf;9ZqyY|BraxGF7m?~aN+n4yMY8efTZVynE*+wF=>OOro+O_0*R&(^=Y zk6k?uxQJH9sTE7Hy(xR8cbqIW%WP#a=2czL24QBB0s4a%1Yn;KaQ$)2z&$Dfut47* z?fflkbJ>J+@@s#wX-Ev;8X17*QWa<414k?KkbgO7QCO)uAebdJdK0M^aE(|ki#E^Y#fyr=rPTGfVV6XOtj|++?Fb**Si^@>RL^BosS;(Mp z0BMXE=@(>r%%+Mts?((X#M}`U9Ql49Oq-spY_tS+ac${chv?NB5?ajsCNK>H{F1G| z|0O_M`t<3O!SavVr|mg9Wzt1fL%voL{RNhK_WF(7RB&HZ)KpM|OwvqGQBjkNXCS-` zxN=M_z(}U0Vo3`3dB&-dzL)hB{EE$ja|Qi)`F;KSbL*xn19Jx|t&S?tq0;7?m+eEy z%~=f95dm-5MN271P?H9CXxb?sQAs4&KS0Ab(e5v0^G_1;&v>y=z#@^ddP$5)qi7q8 zpZ3Qc4NFR*g$lOx|B7jZCV+5`fr-Q^Zu(b5!?IL ztRa+c)|AFN;-_`6B&Px`Q|;;<`OTWS^EV!o9691hlqFLRIpJD3{i z-R|7~mT%jGg!Y4GavDr|LkYe9>HkW8^1sIEx-nls@^2h|ZdXm#jJmUP!yC6L8dv7ae-U#MK~M~KYq%wFVJdbOQzJ3*gs+x00r2Q>ZamfFwW94d7T zS7Vu@KXdC*!975^sds1TFvEB(!yOlt^6NK_Mm1ep_#kpFRuD2sDl^pnT!3WNZ>zV` zI4_&{n(|ybkG)6p*dPKT?|%~)ZN+t5g=5FO^v_pfIV`Xy>Dxv!bb{Q@yY=C69!;Fd z(6qJREq(wqwz!||#LVRZlr$)yAnzUlW;z|FX+#9@XbE_Y?5s0}fSKkV5F+GIm8f}V zMo1ceI{I#88^R3-<3$*s904LL9shxiF1Q8<8nx-QLj7+yFoOrywng97P-B&kV5C$C zFR(-BW9-mA8;OQT^OGK6DzawqfFlJquPlZO*at5XwQ0z~xFd|vT36fNUOR_D>Q_%4 z>sI1_(yb6=V;JS_EHuH%i~XJy#!qrjiqA<8f~X}?+e&_7V<=g6d7P`FCkYMaKbXlX zE2m@m$Kusl@Jq}cu>(BLEBFk$SQdRM<#yE#5btvnG!@d3EhqurCr~WkcsW2=V^|y9 zc?5|7N1iKSRKa+zQnzfa5q2C{a_ki4gT!N5x6^i@9uXX_b!yGt66hh`u;1zioo%%O zoc`y*U_-z{Rh$A_`AEM1gUXOH4t#Cekp6L1TZiruPi75ZD{Rp8$&fm&s0Zz8obVrj zEWR1c?`J)ad^X7hK@m4V13+(c^;lr|F(J|?H8vpv9+<@gBaYK~EV`Tq8Z>F@n3J#A z*ybL)3q)+uR4BCTAJ=(Fb4Ems3@?A$y${hm%6hYC$71^0h1hS!XInUMm;u@_;drpyYqtWj*kA2 zH3cxLN4}u1|D!$QMgy!A0*?RgJmIrILMz8pKbaVIcDqQIUE_K)(!Jb(I_@;aAO2u| zFLbm9Aqb9!k|{Jk7yS8`18XTZYEka_A$8!RyhL;F$P>CZxPA^gqELn>A|wMxoIBWp^NNy*mzrb+}Z@xKWJMG4g!cs zR`eMPieO;RD`*8vkAq_AAC0JgWAVc~Ir6XdBjKqJMVW?;{&HWRKt_UDD#%uJu2!-Jl zLI<5nkr$LLr6eVqq3ygIU`#~g`7q`2YSplO>)JcEVlCp@MS}_Km3h1j9<0C{4g4MO>JHXbcXmOs)ANOu%{FP=Aqtb)k<$$@ivN9MN{X2=p z@~)>HjU%BRrF*{?F4w0Pi6n9gie@-EbJrmigYyTdS7sel^Bv~G1+0JiW)P4OOzS5-I!>CP7;1A;ye~>*G3PeeJIMSv^|j9(D=9)V%x{_ z!ZQB8)8m)f3mD5@oMYc8`Kxsv_ zq}bnC4{=jZ1U{``x8)5(wRrxOnnaDWd`BZhTC&!m>4g*bkNI581iAm*tafZAgjyQG zZfYs|ZH*%Rhl3qBa7?Q1a3j)m7dpRiL>%}YNN5a z*dLoIG_op&ILr$CVWY!L7P+}g)J3|Ua(S0xZ9wnAMey&KNq3iiyGH(PKj`o0bpNMF z0oq#SxUJeHhjYN(qS2d*tmm<8z_U`3GGYcQ+;yxi2t2Bl_Amir5wHuONej~qK}ham zn)gu&J31*k@X;AK!Yr1|tp-dh&wvU=fHdD3G~Z2-^0pJ#HJq;| znCKQ7DxjJdiXnu$sg>@<;aMOD)9n_k>yk*t>uc z#iZj3I&lG_%4oU0iJ2GAhv8+<0$vts4j%YyJoIa9cL#kSISXj&we7xqwQp}G-L_^q zy#dsZF?=4}w#8(&U+`ibphsK*iw&(=$N%RM&9EdK|Jf}S01IV_1rDVW6%4CiK?umy zLXy&_j=~t12|mL`2oX=Vfdd2m<(UWS&=cf4XfXQTWT_~>pvg#1w$MiFHPiOuJa`-x z2-%g#X=>*)SAD@dS(G>L%^4)eR6z?|gZ1>sP0tj;K~NGW;kXY{Y_G4rIFI zXxiC;nx`HBES{Y!s7Ujc1k(}?4QF5d?n41)xK|KXbq0i|AoyE?cI*aPu3=kyYQ6zz z!7NAfw8N8p)*e{0bQVa3L@R@STVU$A=@0-}eO96gHTtavBm^8y=ye30GB%lb{xtKq z<_|M}!xy^s13bfbtg8up4(?8RZsn~COvO2Tig~t1bu`JQ{5TC&o%lx`Me)?I0iWUe zQLRuigHb2~J!91GCcFz_bq5rl%@xHv61g9;!6X^ebZ5&E!1{E0PPPw2bJDruj8l)E_9_NA z6W{)R)MHZYl=ZKC7^ln89T49qAvY;-S^5#N1lRfz_q;qdeO;t&^@rV-vLbO%V1a2z z$q!ho!$0|DqQUrQcn*a!{~-jRKQ_Bo22 zV`iDLFt3z@MGNs>E2m1rac-atP-~s>zZAAgd4U54{@t~B{y7|o05}?W>3RAP@5G72 zLt8x?-V-OpPC%^UeC+u_Rpo=?Dy57yY1Ryr9}9Ow=4vif%Z`aGM%!jQWbLA&y`p?B z2F!EMR`ryJoeV3Tgx}wwiz_?<+C$Cv#E(YG>u?enioD$ky z<#Wn?{+g9o^!+^jL!}RHn((@Z>*=! zo(AFcyqBtZWw@$&cX~mueh*!KeU4yy4Z}V0FLj=Wh-`!Ox`3pp_Yd`HZtr#Px}hC9 z-g{K`x8Z<|C^m>~aTsMD!kGj;I5!V9d_**ILT>`qf1ai`}U7Le9; zncy~eM?6lJxHu6vU=&(0|wNsf&^664PMqUfh9az4lA;TJ=rI5YR`ntLu`B1yK z#OcZ{KX0*j_@5#2V%f4X02l(@_L5@&&mSW?Hk@~V54~uGK%9*p*O*gAxvTkHA zZT9b5S<2m#kJEfai&C37sDH^YP=SspJ5bOtS{^{|@S(U3ZP z1{QD%zite?F`oCpj=p%Ery-bDrk%=uwv&-|GR#7W$(<-PH2GdMm83=!RzR>qseRRh zZui(7&l?x=9lPr3THUlX7}MkJ?mTvu2*|t_QBSesH(x4K`k3p<0%^)R>8NA%BO2tS zmkVX*TY5o6nw<|5OTI_yUSQI87ubg~{JJt+P6Y_?SPedko!s>y<@_j=Jn5^I5GCvs zH)y*nyd7s$w*lH<6ElL4y~`n-8_QZY9zpQ&;Nc3T=-fl2S9_&0L7|V{-$n2aVw1GY z!)?1F+EXR1IYWcv_eM=jwJM8*c7LGqUDo?U1yuR6I;iul2UZIBpn}C7fC0enI2Qse zAl30T585L5%rf3$$X{Ee>Yuhq=b4kV@ZL-0E#YRD>9owGIiptG+L4G;HD)0%z^tWU z^@0l_EKAwShO6n#_ahvJWh*qZ`5C_yV}A zq1&#%3%qGr@=&Rt7W+d`xLQMbgdVzAE#R;{ratw_t20bm?y`|WEXx~F8Ksfo6!v10 z{3}1jQ$6-~R=Nkyk5gLj38u=`*BB>yU!-8zEeKrfpxP9<{g5K+z@V|b^jVF9AoAk} z&yTDWHuI(eF~J+NZE~y&?jF+qSO98hVxY#i@*jZAO_>>e#5y=A_%@7IJi!2`7t_>* z7^wq9s>s2^X3$_ut5XNk3|J&0!zS+ONO{%RaKol@UL#))%mD*ycvGR02iIWNAmoZB z2QIhf`@Gayw}J~}a%h3oatA?9%)Cv2Ge9?)+~*3X?<8e)98<729`0j(+EDoI7-5CRsHq$3hRBCu%f&vaE!>?o1ByyJAU9ztbD2FF?u2t0#Ih)$B*8C|0g+9v&w7+Ij*}Tm zPP+@r{jW8)jH)I0mzx$ze8}8)MdhJB=+Jhp7@HJ#hi?@?WnG?1jt6 zdEutWW>^MqZsTw~W79_go$d^)+(xiIWv4>NZs7OO!(`d(nqX5=j8+J0s6h+3O+jHq z*XvRC(;tjU?H$h1h3D)n+(rz~qHlAi2S&s;HY$CBuRpw4frYN_porM57mnfZW?kd2 zDqC9&JJ?P=OD}wbfKK!iEH!0+Cyq_HWkHR6KwFhoGQON8zMbKr;+(p-|ed4=Xp05l_xJyDVAefKrPvByOv`hNKqa#7z1Dy%&HH7 z*&8u2>T7bHQ|a5wSf9;5TD)0B zcRq-W17FMozL!{U``O-4SK=*qzIWUmK0}OGPB)ew12#%_ z@?u!}q|g+3QmN45uwaV5&xWakpU9Tt&ZTQHMCx9g^1=_jwfm|{7ySsCQwN;gvlsHS zNBY0w#MvyqS(#AJ40v0WwYk7I;(k~W6LrmesAn$w)Zp`>F5A6akEks11gRvdQzvnr zV`9&9)_0_YmrcxGx9m5A^HZh}#rZ;P`N~m(S}6Lb=yUbTNtT9bZbK&XlM;m;&atzR zjP=t3oJOcsDSEj$E;Fn?k-pF%uH-i)EME#TUR{qQ%qu+;Y2$6+lRM2LPZum}d1Xyx|HBsO)wvQ}W()M}eM%q*8 zPMEsksxi31%qkpRzQ{Q?Bpp@uxe;;oDgMXHUd1APDMT5kNn%}y;}{n$C86&5-_fK< zSYPzB``qJK_dCH=E7u;aGbVeeUn+E5bhc8T>Dk0GA0kh#Z}iu&%W}~5np`o@3E`cp zGPm6QHeYX0Uy~kQUSDU;Jh9c$vh1CS@^KJ&v{pY4Iwh#r8{U(8-gp1c?5sYd#$adow=SHt z;N7%}znlrAKQk~(UCvDq@PU+b#n2&v@vQ>Jp-@gtSZJNXNOsCH!`E=v`3(u&Vsbkr z#OW+UPbid@uBbN^H5oLz4rJtSE6=qV#_}p#>xOkD?i{&QfM5o`bXlCSB4B}}4230E z+n=hGw@AZi-7nwhWD4{6R!GKZ-p+*}G$1f>aZtcvaG%GL1_apQ*J^_U!SmTup;8Ik zh5 ze~#}QIsi8tKN7?Ta-MJCQPd>n#*&NVA}2#R+AWQmeg?Dj5?+AwQd2|%a7tdFE5Iqm z?>H$z5@NtEpwiE&2jPLpNJTKVCnGS*)<1_+A0ZTP&w&id#r3+oWe}n%x*{v*_@NT> z%Aw}j&%)r9JE=cSm?Ee`rwfIVrT+8K^*{TDAm@2yR(p zaR8NC{b&NL1vvD71!IJ~C?nmTjH0%mQuIr#iW}lzmgr%jy-zFONmOajDSw*mb`Z$z zN&bPLQ6B6LzlNbm#l)btKS%Q3xV$o^R&Cy;#;Z8luyC1p@v|v4knisy4j1^{grZmE zVzAKdz6vSEBb}=~w>>^=5sPHc1+pb^4gZKmW)LPY1e`Wbl>Q_7JJ?@omsWDyf+(I41x|ZN;GJoj=64%Xa z+tveeab1>bu!Y~qAB6#Pa4m&w^rb+QEuz^xFRTGOG&E;h?KN6ilDTLfV4`tbeD>#l+q1KNh>ilD8k?X14zRV zN;d-14D;RZ%y5n#&;5Ar@1Ea3T(5e3Ywsu4TF=@8(mvooSJUBbZdR6_(rrSaT%;`P zJ1~^QsZ;iJb4+-c%tS3ugDpMY5~C36n%vNoL28hm4`-FvU0ZP#DxGm*SO zzMvFU^WWLp$*B;=kkP&b(<5i=w6Y$7>3|EK%phQpJob*zuW0;i6QAkI@jlAMx+zTG zKAx&z3`B=W@i7I$bpsV>ykI-KZ*AkQC7Hbmq{FXrlXDn{=- z4sf}9%b_4EujF>K=*qc_(651hoq+^9z%}nU?EpyZY!KT>J>z@g;#2Tj+VAY`zK&Pj zxnoCv{Jac-0GO9%u8M~(4pm#S^=8;4uTy+bcnLz>GeWjU03irEdE?ltNBf#IgAB3_h^$5V3q|puy}jH-Tn9#ZXJ<2Zsrlg=4@{LDWEa%qI6LhtZ3z1}f^~83HQa4g zmNXjq_R*rt+wxSt^!#vtWeLteYJuxV*S@5%^4wecZqOUM(R)uKL)WX))xi*iz(@y^Tf1LYRxr)=Pc{xO;A!XV_)5}>l;&gpR_bWx`l-rWaY?H4RONf zG2V*6u<)66qa~1N43993DL@#KA#j0yFa6VY`!U5VBbak>-OYKZ)cW8E)fa%9_;9|q zo6X9{s4LEROgZ_Mgr#a|!3qhVSvL1E1%--@F+|K^t%2@UM0%%r7n8VDXBJ#vxdNlq zo9E$tr4z)T(F{VZ>T9Y%t?M=~YQC)IHpeO<^8IcG7<e}XV!6yb|33EBqMM7t+w$tjR^XF$) zJ{;h7?IVl<&(I)5sU5O{p+_f*|~s8sAj4{TEG@HcOHL<-$Dx1VPQFfL9uO@-QffXM~|< z{ZLj#W0E!j;kIYKtj8@`uh+6(5wi7UpblFSj^N0#GT45#MRg`0OZe^ap@ve#lNM}! z_+eFT&2_Wa#dbMy;d32zD!@37tk1jokG8Qr*-x#7;DvHKt?iecuAL$4-m_9xB^tVz z39YV8?GH40<(fg5PA)rs{|Z3X*w!lS*=vl2WgojN0zBrQHrqfF!<3J0wlm9qsQqF4 z>gd`NE!xfoMBG)l32$;kO|1&67FM{>wVzY1TzgwrVF}4Hp=WvsHvPdXinaJB>YC*@ z+$4Rb%%R95MYRkn*^tFR{%AsK{vN15eEQO{R@8$sd>{ znD7D`Zqk5E2xTJSqY{YqkE!FZ`|DLnsv zXXLHo`PDYZM2{3jmzj#I6cW51o4u4k;&x%f4B1zkU=bEi$|JvQR4!jI;1n8xT)W=T z;|X7a-Ly{G_#(m3uoSM>01q6Jt8`sCIWix!KF2wC(z@X0T0Yi7NgE8E0jg-c2T8d%AtH1+`anVYor zVSq~zXgZ26CihKMC^`I4TgVR1i6&aux?%2!VT(^fusKKrVsRLtT!2Qtfh@`6{Cpl@ zmgyna#MYZ~+laNtAdVf$t9GW<;Hnf6Bzy?LQElUO{{?b+W0o*%cK8nrWxOTK&|bUV z-VEM}G8yO0UgCU|zYhwAh^=udv=J3L(+vPD!VbK7jYgSEn>Q?l@SuSW#8u1mgSg>I zNbRtyD+Fh*WY?>z*yL5-XuWk->5esEs}X*pmQd*wwjY;)^6dY42J*ReI>~?n8@2&{ zpPR^vG-v=_{K8Q(PYsX!jG4@fD*7U6bYjQ3_-=t&cjMKDxYt8-5g zT90Np2dF6Z)BnZt$~bXRn#yAyEU%v%`&>}t$GCq@^_1vM86+9;Km~AS6|`B$e9oY1 zH@2+OfH%#+%ILtfD+K-|!LYG$p@w%ASlZk@H#`LnFF$5%1nCXGoWy@L_5bFholCk( zfqFI4drLKP&Y0B5-mM@=S-96R$x(kZR8jNs=Oup)f#u9WtuBwk^ z^oW7q{v#@n!cKp=gx{5EeZTC=jFYV)0c+dfNKFVL>iiiZ;zIYa83Tk>J!CPW=-h~6 zXRpZ#cx;TrxW5C{%ZWF9_gvkkmR{;Pe4nt-7N6$K=2f)L=tMwg+ zCVuZ0pV9QJ?3K!q{7xZ1i11Fpox0uEkJc*Usrv z(53sG|D{Xs6(oi#PEDxd^ad5@pq4QxI0Nymn5Vs=crO{9mJD9<1&u3PaQ$14y|3C3c4{?0GQbCZ zj3F&PlVVZi&vxCsJs?V$2wMSPNvj102uAE@0a0u>_58F8`VChQX;udWGCh4p||-X%x0j8?3mHCAIPuTVd=aE`iQ^@5wFw7Eo!n%)F6nX7`qqJxLd3t*1`S z&&ifV{*DYAInEQ}Z_*EclayBT_)huKbwLjVu&7aZTB3;OBGZ52BKPdYKp@}hMSbBs zqEnqp6O|5Ww8^L_Zmk4m;powVi9=sQ6`%6 zWFl+hn-UP}%US}Q*bL`uF4!F$#YpTG@2yS6u@1+HA)xVM1jx+C9F4x?9fkfT$^%EC zN0b!uoc~9&N<6A8`HUSVRbP;*`4AE2ipO_VHjjQoq}elH#qOIu1pH9c2uJ{l_)$Dn zCH^<9KnDbERyk?hW-WbSx1gSsai{#YZe|o)Vs6%p<}sE}HkL{kO6b0lDiIYkvA=nO zi=andjej11IJ{%tM3Cd0x1b+5>wthsQPQ7>nGle}nh`hW08#!!>pXQ}$@d$AxrkaE z_5EYQ3rISU7>n)V44^n%S&e?ST%R&4Ig>WlHqpA5^_^~|YiiiX3+H{{!B;`RqF5ZJ zH*W08$p>yfraehMVd4A{!Whg4k248HIJhO<;;(QE_?SQMQS~T*W>%3&Or?nTcB~{l zp{hH@RA>-lRCr!L&Fq+i2ChnScq5MOJV3O>I(}KG>wx?<+cb&y`ul4ZZv=%t2$xqD zCXVp&8aI=z99o`ief4PFR9deluGG#14rHcg3kpXB*Zo2SpFU0xn5-zq zX^07?FoKw1Jp7XST{azH_N2oBE~&UG;GaJV=yeV?jyJH8x2P{X1eLJwTd_L!R`<~3 z#!8|E<>bqzEOcDmN2~fZ)u%sHS1)pvnm1k2sJfrZMds1Gz$sRq$h0!kx!fr}ErUf6 z&88&+tN|mrypJK$j+l>gf!-VhPq{#E)`m$tL490AjFnJ{xM>6U7KeCO8f3q(s_W)! zIW(@pU@DK|CX%kUl<~heaI~+SU@6VqSQ!{Fc30)IDDm1yLbPGn4syk5TvUa5W5wxrh#@dcALJNy$**0Up z4?K0?ehze9Smix<<7EWLW-G446u2i-EI9LgF;vMtUsmrjPPDPd2O=m5M`l6geAie8 zR|9%`)8OGLV8^`6YEQ7Wk}V0G20!L&%VDKl73_jRwB*ihyh=hhpB$cMDDk>OX9=;p zckJzg^}*W=jg39T0S+APVd?_lo#g3wZwiAZYABj`{~S%~a4R9S?+^-Qayt$m!EXd$ z-x}2D|KCwASaJmJTgMW&z0f|Pm;Iu^72WLRcbJ1^&?|kmyUKA0W&jnfJQjBd@6mV> z#t=((*Sr7&w0q)Np_cCP)xVD;4xCEFIN#?}&>duLkLhtmDh#W>f3-7W#H8L;NM9I=gF5$vI3Y-&apPSdjWh_@g1 z@pjeUya(Oz@abyHqtFenuU``aG-#MT734t-%w9mk7O@xA%AC$eThs(tS$CDL%d+6p zd8poWM@DEsvVxz3detkw4V_WZ2rv@VB9w-Ul9d=#UQ_Bfdgo zpHorrMa$^b-lqFGs7f(P&g==^oI(PpTGnl zKx9mkrm~ayn`OZ1(e_MPAX@z(P891cIR{~S@BxtDKQssEVz<&BP+jA+JgI-ei{Lm1 z5W60Qg@==mT7~hOYQC5u(FY{g&UvfcO{F05(yXFIDqt}nwSPr9L*=AmFgX9XMI3XM zDfbXpQv#=mfLV(FyL23vU4P2!vb9}%gyZMc zun>9IZ|@%Hg8<&3t(JY~Wxwz09EvjT`bfT{N~vjq`_fR0!B{4jA&?fN8v2W+=xm7R zqjJk{^B$8=>eW_N6|2wWst!@r%gNoT-@{*@tLXI7V|C{xU(YK~+u&v7w%#xUWtBoe zhNs!~%8yjuLXojFJRJRl6$Q?a=czJ9-T3GlDS;m z?-|^bAi~9wOAxF?&6cAzXPV>BX!3t8%WbeMv2YCP7J1`p!9m`#N)7d=iS`XE2xino zv;{X)mB~h3ta`sgykOp1ZFV>JZkNmojR?VJep^3P0rJijHL{13s4F2rpMb? zXp5}d@`goV0Fgh6sBV5qRWP2?8SN9Qd`YH^d3 z;(i@tg`uMzdJGV*uiCX(li9y+X?S0(aSSY`&^*#xA;Tq7X#2~PnfDk+SG=kW;~l$_ zNezPsoC>iA^rUwS7R>#_|4*qi~)I9q-sF)ySOe8Ima| zS|ch*uqT*+piTnXbH%qF4?v6XmNyxPC`i7C^y5%_032zt(h5*{LlV3KMfnJJS(J}b z{H>w!ui+ML!t2Bxqey|=gM3-t+2Q^Nc61~1?nH++`D{78@b#zALK%{pbZFh1cY6>b z%7d0QA5!=*KhPXDq|?DWp`|arZ4&*RhOgrS#(-KYXDx<}E3^z{otPnOvA9rd5w7Pn zm0*L;6WvPb$W!NntvIvfLl3Ph_cSH}d`0K&_8$H)17# z+$9P^RCL45mjMpU5?f&%3h}ztsxtLt5_IhV^>JT`HMGwh5k``A;|&*py(ol9S=3v$ z^w0>J9k;l+TXmj(A?Onr9;>p+XVoLwpf&gMBwNHo;qK=x>Zx%`p>Se8R!OrvfGIm8 z*-AGb6zf_tcMNJ3QN!I_At+0^nYhZwvR?%=`jP3#bPYM=v6hdbK|YyNs}}MuPboTiy1O@-{(7ZHsgjE zEd#8L%_`v6Rm`QGRlnut(luxaNy3 zd>?JYg#M=V^_2n*#`trfgTu+3tm9eh-;`Mx>QGLw#6v_@wIL*P(8_e4EdB)O5T&t| zSEwl-Jm$BDKmsI)4IgQ7Aryf9HrpXTZm19Oc>wy5$BhLYGQf?Yh;A(|Z*F{0)dy~8 zMx^IYA4NWE7N3J(Gj!IRC{vHGpdLx0`=einw8@v*Zh{+&}LerXNse(9;T zOB}QgwaQ~Hf;y4b#W(ucRy{V4HLq`Er-=x@tf%*sq+aMbAGY!E=zSP`r*3XcHhUgT6P;fadjA%?rtATf-WZj4vsppgwp1 zoimShPMAcfxt|r=I{L{O9!Sj}MjDI`FSINuX4lX>v#jgNpGB-sYC@l|TGB5u|24pl zw#4he#zxE0tXwa&o^?Zwv_CJbGR28YsJdPQ;mR}-_W)iBB$UbBLsdGZ?mKlPF5{(? z?FO%QDD}<*F<7#i_}A|PYeS7=>Uu1P2Hl3rzZ-1iN(8=0q}vQND24IhGVH+LWLPqQ z7!^eMX>{Q#)ii)at1=YO`d+nLx_l%BK;)sX{V8<6YZ-yZ-HAqD%EP5>ZJ-DeKO#+SRDkhkP1P)F7z-6_*Q-5NGOsGe9lHx9VUGXENKpYc4q z#K@P$!Og_?+w|rmJWvrVGn5|RUVW##cu8Z`2pq1*q<$E2{;9qAG;ygzZ4u&L$4B%l zuT!iLyKm6cL<_ig+o3mFA+NeI=m>3o`>Xl0NCj%Q$XQgbB2UHrvB);BZ*QlIE6>|Z z^nKWu>X0QCx?*G0Q98IiQEsSoWg21M<7{L`G@^E_NWhhY(Rew zTG0&88t!_pIXKV(5^H|}N!q!b%YI7Yd@kSgy?2T%ci=2!L`5Or4JhITNY$ht} zOo~^Ar>xDoqWwGPns8pW(7!YbUd61otuzRijca|3>Vg0&#jPXaXOfVrUIvVAeYku^ z8H5WL0yfwSuP1R81H=AhPQ0TdW zNP%2}?@0(<+K6$s+iP7}Ai4iAz10)Wu^D1LRaLBV*RBVmNyj)?X zSU}*Rou0FX6|GRi)AdHzp`g&5ZniwM$`frbJ2n{AKz?@u2M_F7c@0kef9O}l z4T8yiPuZp71chUpKp$f^!;#ekpsum#T9nC>5O`5#bp4+$RG)t(eTXNGOy7UXmA_ap zPv2E1bI`5l{-6MGtHUiJoI`f4GQ`1qHa`-;7wqaF81sb$S!$J*c@Px?(}0Jl5NFC! zz+g0;+BdO&&is3(*Hmn-XbQpTuWwJW=A@6s`nu{tCWE= z=fxpT`Y6kTJgf6sV@YTcBTG`JR2(!z`8WA#{y0duYpvWsQx)EA2tK9HK~dF^@2`SY;9Adpwf!FgL z--*o#A-87iO)K=13(Fu`im7p8B8B?iJjsH)pAXm~-VbS^aH11ShKZ^JEQrx-iEmf8 z8pVncK6J%%QoEYYIo&99)NGVqw7Dr3o=8p6d2kT!CMv_gIRQ{Qsf-@7_Mb7H104~kDO7h-+syq_*J&HH$F`Q zMj8{UnaA?Zk8sR5;-Ls|-U2XLA4v@FZ39ba+;tZfe-7+mfiYlIH{YMMB0zu~mU9() zeYzAIQ(InTSPM}J*`{1Q1Li7(Z*O1ct2^CVana57ieyHHT1BVd6$j!c2&6WFWCA7Y?q{Q z2;AVd|3_tfyCUgCuxzgn9>0})Ce74pKh-AP!PWg0_}a~pDk)3syahu;Qyw)_D_+%1 z>PP5an)|m;S6UBo(?DGyZ|>Ge;wVsy@qT38(D!l(>%X*3ouQJ1IgcguZow|LJ%mb@ zlu!CJv_8xN9SVftCf@_3+C`dUFkq%auF^$Iqqsf8l`)WBrM}6tGdw|}u;R7!Db<|i z=vz!3J$+MA9Q^uV)r(bg?yAsFACH5OamU|xc7Q;7#jw_8vSF_yZA6WVdiA~9><*NCP9mNIeTKCs3e~qd;m2^nso(ukkyUG}#3vd|f^|-;kV6IE zP_l-@fMW3glU;(_=RT`#kL1z%Ef{Fc=-eN)kwss0afO?Toa=HKif2sya8$&=LIp*R zD_C^i{R#<`C`umU7}tox7jp&Zj-E2z3FK{1pb}B=BF{BDkC+{%D;TS^oBVU$4wNW2 z?4a`eRN3$GW+G}|45Te|dR1X6!a9^{`C;xU8v$}xpT6zs^woN-X1Q50t9;>uEYHSg zmbT#wAemHc?YK>C&kWr`37B=5IeK%BaZ!T>4 zKffBX1A95@CJVR$kt7dfE4Q($zhA&FD0(>Hx#<~rMBlVSQ$!4Rc(-4I%AXOE*biDO zLBCxqP}X>5?E_wgN{c8<@t}>hOk5}fEr(^}no?IaCXuOaB0np{&>GIx`exJrfG#5# zr&<-r;mm5G7_c8$P?>(hBJS{gcxdP4$#&mIM<>tgFO;cLjzNQ0kNgIr*xh&sSG+bl z&88%rOWapCDBGr9>@%EH-Dl83fvg<3!p7z9^-kM;gI}XQmSapvcQI^A&2%a({VZFBR^(j|T!OLX<`Vo=3B*4u-i;&bJmHg;1bbA6?Y^A}1WM}yT$al$ z(u{QD-(HxB;zUVC-S-ExN)54w{4#0z8ph43sMuXiAHbg>_k>d_{{VR*n$!CZWh&+H4B@H%%)Ire6<5GPJ;&Sn{qrFM>&x^Y?<{$b-2q1^)(ZrTL9g=s9)h&l`<(S4%qQM1zNsJWVrLOANJI&;7H{6> zWPY@uzN>b@pPZTGX=*5lmHa-40hHbDo@H)k$k6IMSEG7%_96k#LB454K`HAJ>77M* z^*-dQ!Q*!57ng5U>x!9U@H+D1t}zmrb+-(B&>@S@qV=?Td&M+xzO0cRfdnWw9V?pG!)jqa)2IXV*#F*EK8 zU+W@@>pG&0v|H}^PYL5uRG^E*sPH^_44duP$6p+Vqs7cbgXr(B z>K{W`mCwJ%qVW2`I?sQNUsfkLv9Wt3dEDL=77D4};SU{b#>ch?JGS7Ic2+7+98!T99w$zCQnyGH&w z!vhTs4JjU5tE}942BEj2Q}2e_# zQ4zF29>jX9KlyAt5(v#T^tm;<9*%cSjkGYd)=A^GkHh)KfVZ^{@jd!;zfB@rCrrp( z^Jw)$btAa+bkWi2?l8G$m+n>i!VP_x8H@@!= z&%R$#s-9?f3m|#){r!}~xo^{AaAV{AXJYj9Ae?sl6or{NVa$~QsxF9sPr5{GK&qeL zIcB%oI~rx-u+el`3kTNiA*=zq6w&}=ZgKr%GG`Nj&@I@Hw*dPcpW&Mvw*N}#1|$qu z-UqXfqz8%*Y2R1Kbrn1M2RvFN1*I0Je@kDrhk*0Mr)w`V5H4q0Iv&%55jX6cT*R_; z(SW2YA)2e>T}>LD#5(XegR%u0x_7|KQAVLR8OyzdN_B+ObJYKq z)NJ?P#a@CEDh^Tn|K2yoqc+f6;lvVi=0Mj3=P=;{mteYvHGG1+^MLPFQx*+Q!CHs( zsECDw9~IFYuDI~vI`6t4>`QhJ%eyI(eQ)kiruZb>q_ZRfm@q#t5 zrB4+nRf0=|(W0T>C*#7yEdYdqbUhS;Yl~LY;Kz~?Z1N!1%V4r}h6)0L_~?p3g;cy2 z+=-tr+5MUia0)%PG~-!v^T-Bya=Wp12l6)j9N~wgt35zJd7_5n{VPFrRYbR5USxhg z97K8#0;wZY_p@GpZ-WoDibcOR zpx8KxUu5rP0JtQ>2Z|q#mSk@EOj}d?1v{! z0K&sb%!l`|NFhgt1_wk^AY(OVAI5ga`FT^6MiI|r*S@_{D!iFT(RG^ONGlzsyA z8U6UEQS+y^<+pwoyPqG2-$1Y(O?W>yDB3~3?1EmolINVF3+xDAyFUrmV+S>EEpUndxfXsq&-UX&m;CX| zd;S3&=82nvJZanhw2Fv8@q55X!q*7Y!b@--4^hJE6Y~RzykIfm2VTOU_MZpC50GZ? z0$$(?X$DWLMI81CIfMUXp8v%sQ&0MAoFzVSyGmY5hy4xE5hlTs;r`KU#p+|55<8_b zBX3%SkkQ=I)U!9LWky=TAoPP>bvHQ213L%)59U#RS}~{Zf~r?PIYeX})D%GZ-*2qr z1~f-z!5(D1dY(v!rSWYi-lHC13N!WcBRXh=JvHA}1@72=tn?NzlZpz!8V=ThV_un-1=os5~@@Fyp^}0a({OdL1%~6`prB427Kwf^bYABiU?1fI@Iyo3l%oInBY{za{;eow|CZ`IXByRWLc+7E!hnc? z%d%nZrH5v5$mYtl>M_y?=)}anT3S6sW?of@1BjxKb|trbmYrkOM0cdP$31qPHbk?U zdaE%{t$x_ZYpaGT4rbWh$?hgp9wx)pH=O+=F#%qli+QeJ<7V_Ft7lQ)Z;Nm0w zhq8cG#|`Uj7n@c6!HdwzzgO|1qKtpzHUB9`8WZ2pG`ZQ8V#?7}X4xP(7{_%?Y^j9Z zdBJ0bT6o7rfPGl5aU6T~`o>q6z#xKMrRtHR7AhZbKXOPZ)j4kI*GD}m2Qw*OKT8f%Pfd``|5 z!ZH9H#2%8Gn#m_cZ>}p)i8)>gQG_Xg0lR`KCg~NIrI(9TL-Z<(2rpnF(XCc($&6Yz zia&YH>Xh*Hqq9994yTWWyYrZR*E1NOEj4wBCYa9QOt!Ap@Qhm*y>*6GZm0CiuuoU& z`6=UBonlR_BU~K-Q?QlEzp;^l?+{jR#FG81-YB;3xCyXTB`V#3m9LyM3o|-tLQz$H zCzsY%sxLiYTWR&nB+3v|OY^|uTBp+xBCt3pBvRJ}sTR+lb}q)vs$5ps@p|Rae8=ZW zOJ10*#KYU@6)05YB(I4UXMLY>^TACCZ{t%{!@rfPjAJ9%H9{O~;)NaLp1Cl^hRG%R z##Hn&TlvT!KxLp8)~$6f|K8h!d_!t#4O0`^$L}QETxR_|_ReqTZ25f1&anT>2^Mj|DCCPPt(;wy405 zyjTyqe|vjSGqt=)4AG|+w2EVH8BIIk;ScWj6{AeUkgkW=$TK=fQBbSwb#|ImDi6J^ zs8M2K1BYeSu(jR>_BiSB18gHGW8nT5B(WLUmTOt+URZc#ymH$N4l~=192ez0Lc@2X zcUr$@)bv?#i$Z5z;rFC{!VND0`m!?99a%i6dm7_BGQ?^r%q)J0-{PBOZ@NQPn6tJo zIa_tu^5$xbbMy0-@3LMm2OJf>mX);44d_95T*YT0@;^0~zx7Z1k68%l9Y@7ceby(6 zCGBjxmw7y^rh4?~m;`m!w$70;eLhU@nOw2cQXIk!LHHGwcVB4>m_!w8l#fM!0N#Pi)STduZwM#od4`-^E!C$63|`QF0V-4%4Ak@xlJEcGH0i} zZ)@eHm2tuahvFgeR7Gu~71Jzn^npf}M9&DDh8e%3A)!Gc!un#+iZ#WrSh;I|0uTS6 z3!)&&P&Wok4&WxDj&?ky74N!?Xv!EdWNDdrlfQCv1Q=)F^}sy>zQ34sna#E5Eo`(c z_gi&3fU)$^$7m{=8aAq=lVVSkYvBHq0+}kc4(Qh#UiMC55e8oX!>IpH+aSm}dYlCO#L<*I& zw7!4vYjg6}p%ohGAT|6Hj?UV0e|JCCUL?<2(~JBQ%+ve;qca?^3Se4a;OtKRVDX5- zjKdmwWb`wYvZ2LmaSn%tx#H`O=%$S=pi$x*!uWvM_iqGD#@5lnYg+!k3tSQYT-9*3 za3~!^sBN}`;DBRflgqrcdHkQgSsEyDoZcX`u&ijNDng}fMZrJIdqX2FIKqRy($lH5 zXh29uIg)L&cZ?c5*r_5(Ui0RHrSzNn0aPjlNC5gVLz?wA`-u(`Ij|TJ`0=}8{9*)# zFhJz^YT618kP7tl8E28k7o5fVUCC_dPuLiUY{=KD6IOfJfd7>gZ?Iyg>b~Pqzj< zNo+`s#eeE}K-mq{!j4BEb0hZF+u1r7$Y;bumeS@@+#_L7`<<07+fIBHshc!GrHe2-ew_Uv& zt}#djXQyBr127x)kIagGXbHeC0qmo*4`w`@OTx#+9?O<@TQrf-=3aVZ_DSS$oqwoh zO@sYp>xb?eHY9w#;$Q96B2^4;VEl8;nq<}KJx^lD3A?^zLaeLjqyqCWIOXd5i znFdpMJ)6$=Ze4TJXjzFpLNi(;e*m#W&X%)s!MSrR@_I3^65@_Bv>D3qn*n%4W8pC% z<~pGKhzYC?qN@NItU`i!9qiB#yi#fv<2HV7aluOmgHil<-Qe??xM8i+g}ZGy%}+`T zaPv5uJ(U){kDvCquG=}3=dZe|E2cxAV7&{jJ( z^w#~G2l)V4a$=>?p_a8ZTem<>Nd&bM&AcKL9<9RiNy#u!C87fGCAJ`o-MVk|fvr`P zZ^ed6WanOiEYHx|!=VOgu@=P$2P2S-fsre4<_yv8OOO0M?WyKq(LRo}ZDK_&u?C*> zRhUtq3rlLZSkp0F8*p54^iOf0dE+*gA=F|>tA!BvDzho+81UQ*uzUm7lpJt|gTydi z`|yU7u-L7o%GC$o6tEszG=x>!<+=iDR!Y&-KFmbdQiup+t}k{?e*s+F^F+ih`i)TS>`nG)T3xJErM<4^7(3w^Z-&qWr_JwA~>Uh+p+Ky+i$i~mXvN*3T))!LojwNid z7|^-`#}b47%HqbLKx#v5yx)ThBp3<+H2){B8O)BQ7D6d-<5tCAi)86FV%Mz zW#H`++3g=du_nSajbZWfLMyr$e{iEhWs>AcE?Zt{7UDcJWgy%UdtXWX>nYPASRlJb znL>igPHMoFVCs>&bcRToBQ)py-0<=fpd8%>HhH)6s&y;NIumcWPK9^hilGHwu2h~Y zlD+0F6s(IENfGIDJwDLb9vB?cp}9Ov%y;<>`Ar6(?(`=XOegSpy$fj? z{d0T_TMiZ!IpeLC5M8xS@rp?|my;F)GcVDUJ+RY@ z8x>+B22vcHY;W*y255hb_dlR$IDjUrE^JEqoOlr*t9rroCeXPf*5NUL;+|e#edx<` ze>5I4K=nW-Q)iUUZ!9ae!Q2VZR7a!$3B7 zNu#)4Vj~jdmwyLq``$hbsdq>jqB$45zP&1P*>bcr0Dfp=D_)hX`t`~%n2Y21ZQyGg zU8F?BCTJN^Z`>RKTu;v@dhAHU9o>qpm?j)Wq?9hD6+mhO2xNIE&G4y~wS7#_x@s;* zE+Rav9nAiuprX^&4hbWkzu>j55CSIt8&cT;|~=7!oxxM z!(T-RrF(1pUxNxK%2*+BJ{0503=}!6Y-%2?4l`C2JzpO#2Kdp`53k*q%^9QRge@P)QGJd1-?6m-)B5z@|hAiLu9)Cj%oaZc&e)=dGI%RUGZ`g8bJhU5oA|L^;1 z{!nlf&t&q^aQ)y-)7y=Awf}m#^W4~ngB+W{ITRnR*R}%>B>Ltiu>B<8L(Y~^Q=9j@ z)c3~7b&&xR5s6t;YyOIrM(zERvBlx~qrj+#`Pdy9NSnpjdWuE{H(&INckO@VEHa^D z<7{KeFU1CEoD~Ixqh~;|0g{3X<_%<1gtJe87<2O9qR=sjJjwR*`snf1 zq1_MTa}p8*S$qW2xFGPq)>AlJfWNVb&?|BQ2Qn4)UM|a!&#ji)*Zb=7#}06C*Ci#q zPe%q`(rA{p*q#s)EP*~d@eMeNo%p$P=|(M0y|LMa`4=cs^w9~heRFbTXQpV$#6rW!m|<3wuju~ahYP~_#c_V19zke z1u)g(qVi#mKX{@T$MLVPcHnF(?lq69C)~7yK+S}Ae_dvhfExXLZb=?&q16+ zVdT#x{ex5cM*N8!e1B#IU=WCxeg3DH3vfC3;#(IY|5rw`OJU;^Z-oal`*gtWNFXkJ zfwuzPrS93C1}HE)F&FMy)BI(d0Jif#S<~F%4NP1ObKjn#ta>7qwCMXx=C5lQ{C@() zJtogM!8QJo{Y~x(AEc^kzSV$huT8zotuE(ly;kLOW_-F~@mTkWuv(RR7|=V&8s45{ z8WdNV6sg_(%eM(T3U#e+|3J(}ZgQcEGd%Lbcmwb^PQf?!Bv#iNV)!^p_ ziA#1_t5bh0ULZgnFLO~(oAp+Ftx@6pt|^9}EmcSH(AHdWqWx4`UH8Qw{7%9@{^@tp z@F8$Wt)ZhnA?%q-+7|r5%@(^9N#WJ}xf%Q_!3IJoNQpv0Y7QD)`oC>B7XDOh9Q^|-`bn_?a`Fi$FWQypG@ov5 z7U-ChEdyaX+aze>EkiF)chKYGJT@EDq{X?2LXA~Z1xbd{dd}Kd5iMa%#e>hMevZ?7 z2(f739*)m-pc?qUO(ND9ThD*@9F*j>8&|kleKGhbo$%8kr~rE`J2>#JVvtx&P96hI6x)bb`yLcr{sYH3zbRZKx(qXU%`g|&+ zuQyg8Nf09VUVqMVxN*m(6?Gfeyde+dp7uvNhJG1#8rmtb^S_sUlAG{(t_YGZnU{RN)FsH4z-8|tNUXDG=DV}K8{7E$C6QuFC0K*ECe@Vl8>Jtq$0RVcD;uB`u(1-L&dK)yBnvJI6SN~t*X(YcT^`hCkEF#L`Oclk9X}< zT5;W%y71+AE7=)LA1$lbUPYUoqNeY`UdX_q?Uo#VF&Ac$>hzY5*A{vNgv7^BOM36w z3;n|6s-&b!qAuIE$m-^N(@pFjCGisy@)5-BY^RZ!Y^oFxwRDnT#;Ur!)1o6UOSU&< zsWcUdEsj3yag01zspL2xonHIq}?gF z#obhAb66d3J6Fl;z5$U{(iZz0&QkbW!QB%PF|orBw54b^jp~%@O~i2cL&~T(<^7D9h&r5(AQ%8RGf{TK5-tkGC>nMChZI z?*?lPP577BN3O2BM=k;@hoaJ;1)i>rj||pjnalMxL+J!Z!MkdFu|(NQHlc@95JZ1% zSwuD4QzRZE${lisp_Lwx}K2_6RJ}$8Oy2E;gcV^!Xr0b{8NzkBX-?|0Q^-A(9Cv9BQQs7x`FduGX%au(? z=2@T05vLb{Ay=A@TvLA%u^Ce*(Ty1&FZnXeHvGzuL3Uok`I{WK70PnBAaV0*c5!nb z!Xu*dOTrcdbph&Z?7UYrx+pTf+oPnlCBs`vQl>%+yjB=f^NN%xm$~ zJMGZ*M+}5~M+jh>UkwkNP!yp@k*%+!Vr@;I9Hkiy>+VEJgRP_TZs?4jf9dl08ce=^ zotY*$P-LMjeZ+1;(1VDtSLL3P$nxwZ%aG4?OF>JbEsttc2=mR+lUX;5AZc_vOnWYZYDFKE!=*=#iVpefb0S*BZU(&@l-d)3* z(e_!IMkhc#H2gUpPFnBXtB8I*;3fQehgT>BA0%%l^?F*Se%_jLukcPQkP%-X;@c>J zHEA@GwSod9%inF4EQbB{)ONR6{!~Ry%vy$>nw_>`q8f>bk*0vMiNH07C0O zf=H*Rm0)l^XBEHFJs;Vz=(18=#l)Gg*zVj;!e$@9;V+0=)^gCY-q>7o;tSQ8+nlN5 z5U<)WN)QcQXcH2_1}c0EBW9PDlq?`i5*J9{p;KfiM_t8*;L&q8Rxvc;-puJ-O6lz+ zCc^g)*A^QdG|5l2j1hBl3}))LZ&xf2a0s?2v&deOVpN8ihI@~vM(PF4Irj2Wh@ut3 zFL*3OkeE<#>^NCeqjZlDafWN;#kzQA^H_{E)US4%0_9A*v38GPcGoe6oiAG>TSMZo zu8-4$7+*@g01ojYDb@N!&!rCMmU4kyvmsF@Idr2>J|;3$RLoq=sR83_Oj+M(iQ%2i zv++5=u<51DKNHN-l#)BC&)}(NvGH{+U4ZH@Vau0<*o1mS3GEgGZ=N*4E%qb|AD?et zi6>h-mIB{c>+Mzauu2umWZ$a@JFOuZ>dO~ieS%S@R?|F3b)#n|M$CtKORK`!X3IPW z^D>^-jYP51E9Ycuzr@6c9{ohw%E9q3AK2=;v^@D_EZ^!jRe34(e~TSjqpC?Eh@qm? zJ(;c?`+0^o)ECMmnQGMzZF^0%6 z`YsEz;s$1_OJi{~RGDQ}pu>V-INC_-ME8#g*2jnAbdsq%S_PAvwpbv%54S8@Bj3LW zR|^n4=BbkXuJ3}!oZ53sf|%{do06f`HYzPQ(!81djq|p% zV{^^HqS3R@q(LwZzOeJHKImPu_2AaLQyr+)hq&z<6RrI}rqgyWmTtusv|JJrpsl=I z|E*#pxbAXja1e-@*C^&``8QtNahfZ3u;7|jEZU3IWb6tc4aO|ov5qhDFDulYkVVUb zZJei#t~vwa-jvh0w$k|ogm0(p!9OdX(~?Ph!C%_v{7kCA%Q_T|c0(*b%XxyUjeKN? z)VvzsuB!aM_TD?J$*g-977#n4q7;#$D8&LINQXonMMS_6=^`pflOnx_z*taeDotu) z0}%n~ou~+*1yGP)gwU(h03pfQPXaT*jC}fj=Q{8CoonX5nIX@!_u8x6Ypr{)$A}IG zpJl1;!h8Yp@QZN0cZ^Y7g^4s*lzt9w*C}N3>6O;y!2PXmRjt=M?~KW3MA;TkW`Qqu zy84$e{9QF=y2>DB@gw!F>a^8E$XI?Lc!hjHxwqz#B=d;-!il35F=J+?C!%O>&>nG8SH9PF%QANpUD@r>cT$by)x4dd6{5_)nOo|FezR{rdk#d8lo z2ZskzY&V#WOIi(ZtQ9X3R*MIy!~vS<`l7M~Pf-7=YkE zKxlQq4tG{FOeBIM+Iptp6ia7!IW?A&w&>rA>>FL8yK2=%V-)DvoQvzIJ#%cDyTbJmMHwM-EjD36j35TcHH#LkR= znslLJ4gy=~fdFEE0ER8XLOcV}44IrxW_wu6XmPx-zhvfJfpqe&UDfuO#KSK+D6be? z{Y(o8!-kz0g&9;U^PwR-Dyl5zp+SA+2qmdh@x9H|R5Lz11BrS^ns2qrm z8hbHXoGa;JLd*bW*~p~tKC3ZSh?+m_(rzGa+TL)ZB?DlAEisT_w~r^`9#cU+D%_0Z z(9X)!?w+cRj7q;4;kSTb7xE&nm|fed#fQIS9bG_1@LW$%eu~H+p8QCBO!PAgI$UJz zNPzU_5ue9}ir5kj#p6*3NQp>y)TaYa#3#sn_xRW!0&8dWR+CQb8NEb^9j$f$BZFat zjA=|JczFvjw_3&yB4z`SvnEs;dnI z_0gyI3r(J0bbEQxrN}T4@i`@@wg9|k<^C;arZsX zK}K_Oe!_sch=JmKv~G9unY>X@>=m18!XrAEld@ub^t%fS+=jW`5FHc2 z0bEJ3Q7(>=M4xgQ1t*Uhd9+JM5u2fp+Hkc?s6XNM`lOwq2Ddcd%GsaAYEK(40JiI; zPfT)swG`$@w8RYWN){xO@M(6!_xr_!-&zceH#JX*Tuu`lK}}rEa|-k`%@kWMgEzZJQH1Ab&63+>ZaQ(BBViM7ZsLgos5WpEGowP@MG~kPZCkzV zx2Xv4&iqZgm-AYBF0Zt&&360b-hWcH|B=uveXjg65Z?!K)Fp|v={exr$>sadrR`a9 zCW6ZFK$3k;l}S`nk*m*Ry&?gLhM02gD|9&U8Qy(F6Eg_g%#Sx2U^fboU)DEE6DriN z*OpAlWDurUPBp56sAge-Ipu#}cJv&K$+SGcL#hBVRG-o+L-wDobHc|&P^!nWCnB6M zH(yBMyI5rIk$aKWex-`YRJh1*2izz6}KP3k$(KWvw{Jcv`dS1ElcN znvSSVrYU|da$Ygvu~kNPnIOQoGf?htByvaU+m2h$94zc);UOlm=YJ@9il8vq3{PSD zkAE*weYDB6Qii~0)6>{1P=#VL-doIF@@K-ZrEu$XOzrBxG#?#{gN#2ri-g< z9<770&CpyXh~eyF=0QDPJs=;bR z?!|fb#4)O2O&ptEbW9MY1Jz&(2-nNb{kxc=U{m`e@2SqreaIckMiOhRcFWg~xyuCqO>K_pLEI~9d8M_v3{kUyMuqFK#U1vd2wN~1s$y%$e4 zvJD90ep&!C-nYC0a%0D~>L9b@t($2=yl@7xtN4R7y8@-RTsc~bJCRKMbo=^9-YgT} z6l=AST29#b6duovoVU}rGg7-hJIcrGv#V{UxN9u4)kE=oxchnA<}d58zHPjfQnPoo z&5Cw~X57?oHo5Gqqz4y&k!YBDyAtJL?t9rfDkoqxi9wyt+cO4ZALJ=U1TlkT^lK(T@hH>9HFRlHwO<0fY?+2@`o-jWo978kiKZ z@`Sd~wZliyrH+*+qfqmcZ?lF%>-htQEpLT#W%!*c$aC7VTl{sVeiN4BJot8|ZX@h% zfA4E5Ds}%tP9Cq`i-^ws?1#ze&$MY76ZmQ$j*~#B?SaZtZ8FwL{aJaAs*nNarMX2* zLJ+;*=+3fWw_%%hjwjgLnLqb-7=rZVIhy6Jqf<#PuQ8`niAnv|Z_@cuSvbHBX`{2_ z=J#*G*RjJ2Tr9<-J5ViF^<4@EP56oADmEuItN1N#HudfJC*?^GBdo}$ZV1lKWmj2n z;hTDzZ`GD`P1k;VVuHGv>m6p(oi6#wFaY+r=@nw=6pypP(7WMS!wF`Kxgbk*48$Jd~0i5@I7$ln4~eckfH+{{{tf{*$uHz#G_M(&i1O?z_b_)>Db%*~}`(V$GRs?AvzL=xuhf9u!C7 zhw8o$VnXFOndk8zoX1OJJ4+ciH&ta84_Nyh*D{c&MqQWh-|qD|JCDp~5ekU-u)CY_ zv%LoQH8eR`Z{|%Mu~f6yk=5iXICrYoYVzCRNf)D$yqWsKfHbUle5QVV&P|Mb8#yEW zdXZ#8bzi@ov;q}wKU0ks2gjv!kz@aI(^wm_j~A`8=@D)bMBO+HZH;bWYXF_F9?{(i zszE+O4xjFCPp;Fw2~g9a_2Met{eOg?Kc7LW->bWXC{hu`DfoWC1b?zJSzO zW7mRr5CZ(Wo0@Ecmwr=!V39MF6~uh%-j@DMc12L)u!zu8U3hl!fAZ`D=+ci*8&gf3 zlDTEIC%EgMt_1ze0$sUGH zTq=KVv{w}=fAHB^v>OJA^=VWMfq`qjllkjRpd{F(T8ucpSDxgf!FvV>BEgL@2ao8 z4~L+?_&(j{WXN|~-kauM@yIJI+p1^y7d=!s$Xfy_a|?M`8GE(3SQcn7XkpX8^3ej> z**d55P#{CSS`pOW!`Ze~Z<)0ABnr@85v~tCYtzQRa=HM|7;Wp+ZV~E}W^=WS&(3Z( zJt%W!StbI|fiB2J(oGvf7hKPUJfK)eJK8pHgN(O(d-cvi%Zz!(i@@XZBMD_&pv|4e z9$JP;xA9)^@$`&gJwn{E;D{fB6?Rz*F9{65w)1fz7^;-IMz4fHb&0q>=aq6d2k>M` zO%gr^&S4(60P!LJpa@rgo7p(db{6uU^R0)(ppxG%uA~LCTU;0X2>79H+K}=>m=(MN zM1;wvMf($Q^`UVP1*RnQ$n;Ri3&gnHxCiOhbNq@L__o)eQmF0|f>@MX5%6E*l5BrP z5^ms$+lh3c9uW;eft zvNiHZFbAZX5B;^4AzM(iVFf=tqC34Pxt$`w&$M5r6{>jALXIvVsdtSyg7ktQM`?Th zu~U#1eI7fv0&&y=hZT3aykOB`bwYqBV)OrAB|3Fzt#}oTV3GHI-cDs$8&{Oy!Bmd@ zw)a$g<5z{Xx@Q&)yWl@C?6i~cK)Qm-ab@arkF>K^&+uK4pzd18Dj0%Uo%6Vb!tSEg zmF))-sxi-DTL6K+ka}rp(1cfgkWNLNFGq@=L7TQL;z?SYB;vvH&>W-$bRy=fy~sii z#zBRWZx)rSrnVg4xj{Uis_Q#5E&d}cB|bHtB}K17v*4m5de9k0V!{@+Y2nRKE#Zc) z3Sprd67)Od2;OX3JVTXyIbnZz4NawN%0sB>zcv4Ijk>@O6e>)!CVxRlH+Di9!ka%e zQ(Yv$hp&2)YAEV0DNvSLH2bee{uRmpjaqV+QJk;6;EcLKst+F=?Xs3k8nPSf3#_uw z=A~0%^S=xOCDU#P+vOH;6b;OX!92V5FwHqv{{guLHWiv%0O@V`+#mXT=zui>?_u=- z_*b33=|2d;>eIU%kqd?y4}}SX6Msh1xKM1-c2gx_X*T8jXraoc^9ycwF&qqmSEeMr zBQ7*=bZH@dg(WM4wC16n1b9f@-%>LWaf1+dTX`0?FhKQe7Z$^U*+93A&05`yV3jQ? z8%-akO0X=vc`-nO8eh!Np&>HO!+2>z5ghXe%?sSS1Q#><{!j&h6ZT)@)w8{X%;rFV z(Sfgu%qxI^k?LT)LZH=DMEkqFw+&zX4(hJs1+-S+4XI0T^?Mrj%xgoU7T^o_kKhY%wKsBL zi_l{ml$6FG0hX%4L?3YiKC1jk*k=CfOAEMR$4}vg_i%v!Cr8h1Hl3?a?XKiUn`Ixc z`>;0r+yYK~{!=)ST@bh%vc4Ie3dJ>}rBQb;+J&5Lg1_6FOAgU1Kar{beUr7WXj@w6 zxdi9_#4vagb&WNLv@q6Co-`NLxN#Bg)4cS1Pf$?aa{q>7;DUQE1>T61iw19*vdB(N zKM7AY8hh?M+a0P~DdF(plsA(a6DMDF%EI1lV}5{{u6A@DxRJy4rwrMT(jQ{xkh?l^ ze3TPp_eWZ~dI2jd7ygsU(^7DFAs-xvYy8C#A&?U20Lb<;DZx_BLA!!rHKqBgMIB^I zDVu&STUuI&gHM4|7)Usm9Sai48@qlkkzATHdkp|w{Z%j6Rt+k49sIfUXjzi;6-41T znFqV0q0CX^=Q77-Q2z~xf|sl{%H)9nh2wuNsa_UWF9FC9*Y+mJ4G2(Q^CwfqrO@Ct zNL%#B z2j3uy9}vH>)fDQ4OHG-;0>B5D2=5x^86t|X3MnoDHVpEN(Qf3sQa6hk$7=tGb+fqa zG24y5X&`+xM9(UlyE)|Rf-Bqo&#r8#xCH}%*~s10sgDW*rLSm(i3y!pM^SC3nUV$~ zA~45S<>}F9<-O#&M~vcMe}h)VnsHZ5&l;om#J<^5fkRP6K`I*uzBF?vDOVwe{4FT) zb`o-TLD{>ko-cNc;!06o(t>D?r-LJj%N@)zrJFMVeNIZu5r<-^J>v@afZ6lo<$Qo* zLMis=;{2*`7vh4BcguqBdgSduwGVna_jM>fk#y?wB7RDgwFqs=c)Jdi$OWo*W`ZrN z0l3+|5>G9CVK?x(K_*8Aa{@sVSyJJa{=8M?)bQ3gz?NmKty;g)bdDpC<;#F4F?!F4u*DgJ%XU>Hp z32DvX&ZXzJ^d(?#461Or*td|pL8eeqbib=NZb}JWb9wOdc`ZAFe5&E-?X9CzPZD!x zA|W2?bPhttl3Gvd)Z_zYsh<@W3!RF-g#*uibX-K@15Z?*|Dtw{@hMEWE zqhX|4_Uty9d@F+xP8w?7rSuCj{|&Crrq{YY-4>qO)!5tl0V%(8f(LOJ;;p@1;N|+G zcIkq_LKgWXE2RkCX!Ll?L{(wJ^2WonCLi8>())UghiiNQ*5A@D->&d;@UvG1#$#1em~3g`teJ;Yn<6rE z=Uwz|c8r)AidEP?n2_pdi;`O>*51~VGOH@zKQj~HggF!tN9>M-%7`^0X&O=cp~B|F zO4U`gb8QZ&1?Jq^lE=J+B&z*M)Ua4w|14Bx^cDuDurRY$Z&FPY;lWQJA=*pYwAzN)U10e6AAZ|_OO>@I+*f@zgXjY zFW<6>6eYEE%@${zK4}u=c+plbmBtORi4Xk8($KPKJPsCJmKx&Fss!raI;qAEq9xHM z15b-bQF0U|qB1om6dWqe{o{vf&e7rQsxaGFrscEh1u`y;#D&+l(NmdV{*(^^crD5K zjPRPthF-j$1$Vnl&wX}r(+dHXjqeRcvc$H^4fSbtXbS`dWd^ngjJ4N zX!6Kol~6wo57Vwv3DCN%$Qx*yA(zH##eE@If7r?&ZVXj6`Y0i1XLT2Ot;y& z2jsH~>+Zo9Bjf6&USa7Tcnt|e`AbcDehMLTUE|}#CGrg{<0^* z;V`sVbB(u13bb4gHN}(d~a>P#+dIO@`3^_9(Vd~wRX=Hb>R_BBpSM?x^$5xm# zrJ>&r^+0wcN0#hJpiD{i<_zY^lLv288V&RM7OJhge#N^9slx+{#2R5bd&G=N9&wMI z*N2nbFM#C}s$7mvH<(;Za)RjW$x|Ube!p|RWlJ4Ju}IGM5D@o{#(5eS-wplLUt>He zgN110%%oo9;b!aEngp9(eZnL}H4yj|jdvPph%S=WLySytp>i>Cmnly_^Wi%bdy3LA z1#yap?W;Z9iV|f#gfj)+&vzgxdko~~^jbv{NOq10ErVlxnz}YcQuBTI&Jap|0@lM& zat{||E&*fD*VDYN#;-;?{WXj{wFl{N!S(t)>d0H?l4MLMlZwe5uOEZMnIq;WSG_ES zma4=k4mF`$#xiqrN1MnlKFr9Ys(|O{Z<$muXMTRRxOS$Mhuk1qH;Z#?CnZV^zr@Sl zvznc-w3!LQ1L6+SZkj^?d+&v0AA7VUC0WXJ{IjT2+kDB#C~z}m$6z8U!++Q%uYZtC zXtT?FD%FvWdbOjv>zP5c-6qJkM^^TrmxR@;jiC4xl0Z3z(gz8_jo5yS&CD2%bGfp;sOTSjZP%#&UZd(}26u@Thez$^WVN>{D*MGG~K% z`H{}77dgeJoha=?4aE0_Zt|1i%2j?E-br5y^x-v!0+}r7>3172EH6wgb!c?-BtBkr zKMyuU-UnnTuK>H^{{*=+fg})^1LG2q?G|mHvud%Q;E{2h60eWuU0IF_HJI1vSWO&B`S*8EbrR;%DjRXrinYn>Y76HQQ!pHImL{xrsMj851$2{Sw;9)@e&c57NvPD-7cSue+&X!BRc5b!pPLEc*hh?c)KM zh9Z?QqysTc^?{ME-{W|x#RJ>^L`w3q%kNuUCcbeuM>~(lfy-3#3u~1fnR6RLmefBX zs<@$Of0^CgTW^M)OvlZ^XvUR@{RaP#gg7j?~{wo3$Ocq z)f6%!dnqWDA@RT8#Fm|MC0F&qGi@o$bYZz(ubl%toyk{XYchg`(Ai^NgPl%xiKE)- z`4aL+IX<@lS|k^7vOXf&(7ixSV)+bwi&%G7oXy0jb^mp(*`vd? zfN1UH8|Oac3@(7k;?acxfL=eV(tCeDCknW^hHHceQBM8NlxNI&BC7M1^|FA%Xx9*A z{fUPn7e8?UODq?8%6&62*_GI9jU4UD>ZzA>uo#>-o--3Dl9k-k>hk&>!3DTpwH7{* zbSPgVe0F&b;r9N+L8C+vvH1<`Ocr|WOLb*Y>VlBYL=Ngs=?8hRNS3hR$u<=UE27TJjP}!I9cu7EhJiJ(SDwb6NV*ngP67$Yq@@|7P9@txjP6I(G>yOD@wet7rVGbP_vp3zK~$vBm2O0SINHW7UNLpJGN^_s8r}5NeaSjvMhlJ`YG3N1w6S19F8u z!`S1cdF={SghzpLgG@)6tv0}~7Ag=~c={EGw7b3UWrs^ON7Nw%BL6+$>ATSn`EyVL z;ofOT+4Vw_og;M=$(xu@mdsb+;Fnc8h?X8~G58c_0y6JCIwdKy>m_csM>C41T47S_ z5xJB*zfY%yCxFj*){{~P+d-hZUh(?zSV@bE%~X=rL+F;2Q^QLVm81oQHo2B+H#(J5 z?psv>zTP3YaR1_;kERwxg*b+U^WG36a5{bEAKS8f6BnGRbT_C}H8_6;cH5t7;pj zUjyzZ{;*Fu$1S^@4*S~ZbK4}h-kTRZJXH#4% z0PZX#W~J%dwE~h$aAvofFxqVPfIY)1!chTy3$rJ8wKYRQK|r%0m_GbGzuUuda6eT? zzH=eHVY5qk$Dc9 z?t^!D>L%?j=~-r<-lJef(RS&3q)?^*=?tLymnF!8wA!J*GI%BZZpr$@LxI=Iu6yx4 z9*}&L+x{C|!qe7LBXgie1DNl>28k#&&A%?FIuPf1#vAEey2fxjfGI zXBXWEICcp+=f~wpeM9^+luVh^;#tsN!d;LAN zK6_d`7|`tem1mEFB+0|aMpqiJ3j=Ws3p_jGv45tfEcN1OIpFZd-`>!7x(J;3W<%qu z=+>gEpll*)T=7)cu$3=Dn3Fj3=%LMALw>yqefSkK3)9~hGCI+J=FcpRei#0%R|!FR zsyji7e^-7*lx!x+B-;E#*~v(<>n3n1oYjSx@yCL}+Wv~c1}zfz)Y&e;K$WfEshn>%`~?wi6(mE9j|IU74HxmD^uX1zDPO zefmJI`CU)`+_2Cq9&rP1=5<`5)|*Y|w+lurm~Yvym@feF>h(D3`)j;fXzWp^Y5$E- zooFk!^-?#o)Dc3c=MtcV$A!O%lm!)u?8K3W@ZV`BALQ}dKXTxf;g2$q(F;}S);S1( ztfXPjZ=RsXM7`6{Qvb`9{jK}p-yCS}aAc5nbMEJy_yf-vVTtwIdB5k-s40VVkr50twOeS6tmbkc8nt5|$@Sb#Fxo`Clo`L5+tVUi!6HX!P37f*&;hm!ODk ze|5|tgk`r4qWDw&qK!eouheGOiaR7j+EP!|mYA$0#ea``shs~ZSNLCn{r}%_gKFcc zbxGRw*^r^U->IF~l`SkOjPC#VcyK+PN@AYsDn8?=C#a<`m?o=dhHo;xNQ8zCz|%pH zG=H_bT@7B7^-OFj6j|weN}wOlyMe1|-wf7ds`0J|4ruG^{K$)dtm=F1$5E7y{y3;Y zny)?7JOb!)39pHJ-wgqhKv9AhxH)=r#kGlLIaCmIPk3ZXf1q`vX~;RF_4I4a0nfE| zL*0Auq;=o-tYk+oT|F9HABtfqhPy{>GCc!Ezj&SPh~BvE^{H>kvC$8Tl%of2UxMuPl07aXz3jb9pX2#{4JgcPK^LjEnuE~*Min`6`q155v^UjJOkORsH?boskQxp=<-tEcL(nU8sX@YNj;AnwZpr2aL*6)AJQfU6;Mp7 z*34@rZ8FXAnmOwUSD|@&mTRYEA@Tv!W0|MRV#AWn^TMxs0zF#S_fgAA53c_B`#Ddz zJniQJMhGnAaHn)+XtS4=9pK^vuU-Yrl6BK_V87H#iq|@t*E&(Vd#f7g$eOQ7TX{ud zX`BO24$}YlMg2#gpvD=qZRz(4A>h4x&G2ga_{{-yh}KXJf+ftby&q;Vy+92WyMIgQ z_zt<;b@1F(P7XvCY8`G9M806WMO?dkC$;&#?)RHga!?hIt~zlQ!GqucbohJHAOCLs zKRos7rMNArc9DyA1j%FN2{)l?!Y>ydmr&5&6vP_0+4Q~Zz`465`oVEDy%sZUT@UHC zgPqKyMJ-A6C1u%j{^eYO*37@;J39LReTo20nQ|E* z&)^~lJ5if^KC$v;K2)=Kr&5y;<{8r;@`>@dAQkO0J+Hgv453B_wEk+L6S`{+fX7`x zcfz)P4;GFOduTe6-5Z_N`S4KAP<@}bQy!j6)T$-xjjDPNZN8H_a(Y5L&D4h32M2qn z<37X}L15>3%>b3Tv*ycgau$$O+NLTStRfP2E2!}aP#x---)_oEKn@jG-L{%TJx2BB z+)D^&IyeXK0N?y}$++@{I~!SY+((j_b>QkMzk%f#U(=IHotn-)pp@C#6KGT0bvv+m zUFSF7%_0wKVsWN3b&Er1OlRc$B58qPd%|!76c{8b9|62PXlA> z!iyvvxah-!%HDGhCV3h!j&ukSy1JwkUNg8`FfjJdQaF6?E!1kKItMNe)`<8fxa|O} z&zl0kljiD}3?4x@;wNN_{hN^)1LfQhPijoX`i(0^&hb3dNt1h7(|aD^hcB9ER#V`~ z1E5K)Y_bOQUw7t~K*gj8hYY~6DdZn!ldT6O_(BqOn2yb%U*YvYd22=vvw63t@zSY{ zhr!%J?VF!=i@J8rf!?yFB|-a7hL~mMtPvPS^{p#o1R8tt!$FQ_Zk=VDTAAbKCJrSP z><&ne5+e%8Qp7g1ou1wq3VQZCIQ`yJ zseduTRI;xmuvg2GDn0_TS+LFT%`oKArXw@NH4Dl0CSH`eata zVqdeIM=@XN!v&(<4^GQ=-Gq7+TkG4}EKEL zYBRRLpvf>!%`cl!u)fo?FlIj<)m9m$Ke`=a!6FMeU7KG$8!Mm>Km09w)C>6u!|j?i zj8g3E2BYh?gW+M12NSqB{kgfUtu`aV`@toCYV}?4SF7)Dby9QKjntl%?hCI(g8Fi0 z^BWhGjLC7V#{T@scX&GeEJ4m>MgT?{+h~)3YQodow;yFF^aAvMU6Jtjt z0<5|0euL&KU^6Vfi8!@ztmk&LhbBb8rb+{fJ{!pI54|6g>1q7J$%@^sx6+#O;BD>H z4YkrnL@3vFFrUF^uQhjM)2r{v)C#;6^&;)haxbA;Hp!x9s4>-z-6E$yM!y)a+w>(g zXP-S5XimEb7h9N5bLG{!_FORBBGT#e4V!_g$`RtXR~AuH*D@R2s>bA}Arhr8C*@Um zv|J5of1{cVmrWmCt2ZaP1>_sU53pd=g~xopbzkaT(2R z-xV8W?~biLf-E?)6=i!F+dE=isetclcsIOJI2hwH!Q%r4331u=;sOr0NSlxEO&)2o zj<*;g)W0e^^;I?NL&;QUo|v0Kta(*Ui3gRVdf(I%3*ARa=V*%faU$Re2g6@EzO^%e z;AP+m#%8I?b^B6Tvr=xO*?Q+Aj}~Av)GMEIvt715O&b(9(Wxv_q?o}S+9K&DQ&BMy zi+vZIOTJWBQ+3#v3-it?NiAoVlq3=$DU5LkyziH9DE?3^;E1$JrB|oncyFys-Q0dZEPp zp}Z1*9Ze*y-fRA=Rnvr#+u-=ZY+GuxW(k@5N9~%_RDJ!5{ri)L>oD`jw~_~v<%?6j zM(A+!iab6xcmaL_=^IhfFEGY~FMGI}55tYjnGz|r3^j1pe49O|*PMyyT@QO0pOh<_ zZD3uGYRRto<{(LyD3G3dsmk{?f0%3^IrJI~Z7~DBTv}fBQPHUyHF=>rz20H2=9E~x z`!&>US0-;&@`%kCYLpn@PxA2ygmkkg;pDGYMh6sLgOPXzCxhdKqqke@SUNM`hNf%E zsx@TN_$JxN%G=3Ug}^7oksQ1ua8<bC;+1)7OIzYF)Jd!9XDnxOgS3%i+2 zN31rR`k%i@*i{{=FdX?5P&G>Nl{256@U5JfRD@n5eYhK-zZ$fRKI%g7q_tm3CJM{$ z&)g;Y6w-v#B^Zfcs*(dQ_2-PdJ=1@mkyk39EDI5d!6d($ub~z9ziM(h17g_1NYc!- z10%`##2P2pp$kRt^9l0X3|c7FUC~R%4zCth)`~5J#&uj_FrjFZJat^03c)r z3H`3}TMeMj2H|6haTv@iXlz}Yx|*+Fn`q2 z)Vjr+2Kmtc^H!NXGe1_9D#PrMhglrqha@kK@WWEs#E2v5ZbPfE z#9kpCO&3^V4}-n8_S+^NKFT3UR!%l5P1P?x#N$DT4@>^Y+1^05Ss;CDN;tR-56`g) z>+Xa|*KN6_#mOzU{aVLzDH5~p_F*Vh(5so`P+<$rIpmbS{M*FA$Qg+GinFr^3mABz5f-0+0D(m zTfr1ExykQ*$^CAU4}`qc9%lM!rxLG`re_ys z6zc9D2V6>3g5_y<>R7LRT?P@q(r|=sUU9tlW4>HJa@2OFk3DBYSCz-m1&*x|X$C`CU=BSPI3DFq z>?YL*P)>kvGmg5-*T*W1eFoF0CH=j5qMOO;mo`Ux@Ml{)^3-2(yke{OJ8drGs?FJX z%5baO>l+$e@5j0iVaNPc<0igqnEIdo%yVu3lp^x%riU!~#plEi@!ft|a(|Q1+eiD3 zJ$`>@%fXWOieU#%o;`7TlKuJplf$CnINzp<8Fp+~wMB+qK*dy|tO*KHdBtrw9X3%F z@cVx=TyeYg5n*k&z#1n#1By$6Yp z$JHR~)~R`|YhOJ|g4l||$R1qKYnB%73AXCA_$< zD~ip`fP-YkT0z$8OUwo2i1w}R+5I7Lbu-^zv3dKnlPXVFId{RDKhA9y)|s5!FB0ig zsnb!W=3((f&s<-R@f;iC`}n(lGV*JVg5&Q~H6~WXHot3)9p2bB)oI0x!XTtBKJ5Q@T2JjuHy}4NPDFozFXMAlYMv5YOc_^-+BE^z$M0mneAemGse*J^^9iL)U@mig78%5z04>c zg}Dyp!Vl|t@@fxM<&y{7Ew>oVe5vQ3bGT%Jnu@tq)E#>Fg=PiU_r{un?1@tSjtPST z{`pQVOdRR;fR?5{Az0*|R{nMUVYjBdqpTo17PovT;_g^s08gzhaatuoSUUh%odbfq z-2FWHhi?(V`O3(0BMt-FKl4W?4D#mA_)Rix1>}}nV{Q_6B$F__t~jEItJci718hK! z`7>A;Z{L;bW(L5Y@1R=&I)}d7>bOzniJZ@T39{od({dH1Lg%B zOYM^KJ1WFJHdn$V^TsObkGSC8w$HSuR^4kx+G=GPj<$ttdi3VF7MGJn*0+(Mn4Y=+RZV#o8$&GYNzg0w=L)#XQkS?fHp=y!cIsT#t?Co&{e&at1_N`W zHz6s#G4Cv!r{7s({d=P`UBB8~!@beym`RT>d|V!?mR@{2BJ-Zx2vN^@Xbf8zEl*)O zq{YppIqIi=#c^Dz947=`7VQ}n7!AN^QA4C3;5)6Brk`{j+eZ1O&Wb;7_dDo56+bDB<|ynM_S*Cbn%tGcdFkvYtJA*z3#)tSX}tCVNhTId5FG=PSg~;p zHRAM;3X1vl84-#>mKw;B6aM3)@q~jyD921M`jaPr@8eCTw$c9i$G1~=^lN9k!SsvA z8sL%h6Y2`!ki$$HRnXnrBZB7WpINOFi!K0N^a72?TANJ2_=7QGb(qBvL!l4q2}wPCjax8Z4_Yq0fUz9A=k*c%uO3UtEj!=;?1VDVzFuim*nX;w z$AufYTCwf|#F~kCQ?VJ$da3*xx8daus&QSK+xrO^Xa9V{iurm=_i@1=*hZv|gbe5_ zY>xo%e#%DkH9P^}&HPcA;H7`PBG8mUGYQ1X&j@U<7Qb!>CJIbY{X*afemj6zFDEj0 zEGrl+Lke(d;IM6t8~gSH&}U(_4}SFuO=JkUUvN&p!E5g0RMu{&pxJXyfZ{K>b=eAl{=% zh@a{{wArT~FyOCe!5w4yenCstiu*OR04i7>Ra;*Yo&QH1aMrh+f7RG}&KE3ch$vS+SMOitc zxH^5#DozEQs;6hM;@(eW23BZ%cld^xDHWC4@OBm=p*iW6`AEO&DvqQBu%+(F5*Y;o zNCHEDY<34<)bSFf_n50-N3*v1lbXSGB)TTPRa%zNK-itI&bjrzBt;8SuD+7z`wNUc z{@SdXTyFNS3+4hfd8bD+KoH#Wc9oXjg?m6szS5QRJHH~!G?CFQ=_Wl@({TE=W!tvl z>|u&@Dg?22t6cf@ItCo#jf&4gkmtGT2{F*Nwmx2E*}A|DrF|TDMm=0K0{CTOvE~vc z=1L7<2MaW2w{tuP$x`qFaGu{TaIKbfv4BSk46)LNxz+$M5FG!S2OVEih6cY`muNaxEkPP-de0^ShVf7N0Y zbc}WaZ)o$YxJQ-Zdj;?zKsEXwFKCTdBY?if_&y=(ig>fZz+vfZ##N4B6Zja(P8n@1 z1b4m00dKCms$s==u4SS03wZt)DLvY#>qVY%JiAhL{?e*y zW}p==J1PmTJ_W8e)&GVIN(?Et(X#)&WW8ug*?)ZlP@ z;A%>7RRV0XQOb1_bwy_ZBS}fs>co#%EyF5ywhm}1N8dLtYEy6yXlXxXOVp1g3g?Bil-uszX6hP_CIcn9eoS(y zmaY8peGo*RAHG7f#aZC%#!-*CRw1fRo&rh{Al-O?Dk5z#x>@J9PR1W^nU3BM92ynE zKpr6n?Aad1Ap4hL{p~yi!5h?z_eA{x{rx&YsN-496}HTsEY%`7dKcL9p|{+$l&}fB zAkErhl@)ni1J3B4s7WleXD@nif{a_gK3?|7aYPQNSWJc0HxG$_98Z zfCjYtR_YVsYT)_uT#=TmP`r$Q=9gOSm!QG+0r2Ai>FiZ_&Yth3K-o=d?Dd8V|k}8NNKr5aE>aD_1MPC3%@a(q3 zVMq@IsU&pYs8#-+7d)TeFI9uOH;h3>SCq2 z%76TsAb@7-eOI}t(5wu2q9B-|z$$-#7Cc`GZ?TKIHF zBcp(SAy-vzT;&MHfev+vuFs{J3JWkCetx@!&uap;f(~i9AMgUei@&V%H1&Tx0>F!( z!qK?*?|--WU*IqxMAFkheVW#t21`1<|H>+s$yo`}p1x!}`Hb0->Xpw4=3d zN;H8t95%J^{n68T;k<@k`{={5P9tSc8|tV!0hjT5(GwD@SFu{jeC?*~f?(2-5v%{g z2=L|Fv_fdrUI30U*ekCBFaG>}7ch-d6IMrB2wI_~|33F01@fLm&4fsdtwCZn(C$IkTzX0!^ca;DD literal 0 HcmV?d00001 diff --git a/docs/reference/search/search-your-data/learning-to-rank-model-training.asciidoc b/docs/reference/search/search-your-data/learning-to-rank-model-training.asciidoc new file mode 100644 index 0000000000000..fb026578bc00d --- /dev/null +++ b/docs/reference/search/search-your-data/learning-to-rank-model-training.asciidoc @@ -0,0 +1,168 @@ +[[learning-to-rank-model-training]] +=== Deploy and manage Learning To Rank models +++++ +Deploy and manage LTR models +++++ + +preview::["The Learning To Rank feature is in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but this feature is not subject to the support SLA of official GA features."] + +NOTE: This feature was introduced in version 8.12.0 and is only available to certain subscription levels. +For more information, see {subscriptions}. + +[discrete] +[[learning-to-rank-model-training-workflow]] +==== Train and deploy a model using Eland + +Typically, the https://xgboost.readthedocs.io/en/stable/[XGBoost^] model training process uses standard Python data science tools like Pandas and scikit-learn. + + +We have developed an +https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/08-learning-to-rank.ipynb[example +notebook^] available in the `elasticsearch-labs` repo. This interactive Python notebook +details an end-to-end model training and deployment workflow. + +We highly recommend using https://eland.readthedocs.io/[eland^] in your workflow, because it provides important functionalities for working with LTR in {es}. Use eland to: + +* Configure feature extraction + +* Extract features for training + +* Deploy the model in {es} + +[discrete] +[[learning-to-rank-model-training-feature-definition]] +===== Configure feature extraction in Eland + +Feature extractors are defined using templated queries. https://eland.readthedocs.io/[Eland^] provides the `eland.ml.ltr.QueryFeatureExtractor` to define these feature extractors directly in Python: + +[source,python] +---- +from eland.ml.ltr import QueryFeatureExtractor + +feature_extractors=[ + # We want to use the score of the match query for the title field as a feature: + QueryFeatureExtractor( + feature_name="title_bm25", + query={"match": {"title": "{{query}}"}} + ), + # We can use a script_score query to get the value + # of the field rating directly as a feature: + QueryFeatureExtractor( + feature_name="popularity", + query={ + "script_score": { + "query": {"exists": {"field": "popularity"}}, + "script": {"source": "return doc['popularity'].value;"}, + } + }, + ), + # We can execute a script on the value of the query + # and use the return value as a feature: + QueryFeatureExtractor( + feature_name="query_length", + query={ + "script_score": { + "query": {"match_all": {}}, + "script": { + "source": "return params['query'].splitOnToken(' ').length;", + "params": { + "query": "{{query}}", + } + }, + } + }, + ), +] +---- +// NOTCONSOLE + +Once the feature extractors have been defined, they are wrapped in an `eland.ml.ltr.LTRModelConfig` object for use in later training steps: + +[source,python] +---- +from eland.ml.ltr import LTRModelConfig + +ltr_config = LTRModelConfig(feature_extractors) +---- +// NOTCONSOLE + +[discrete] +[[learning-to-rank-model-training-feature-extraction]] +===== Extracting features for training + +Building your dataset is a critical step in the training process. This involves +extracting relevant features and adding them to your judgment list. We +recommend using Eland's `eland.ml.ltr.FeatureLogger` helper class for this +process. + +[source,python] +---- +from eland.ml.ltr import FeatureLogger + +# Create a feature logger that will be used to query {es} to retrieve the features: +feature_logger = FeatureLogger(es_client, MOVIE_INDEX, ltr_config) +---- +// NOTCONSOLE + +The FeatureLogger provides an `extract_features` method which enables you to extract features for a list of specific documents from your judgment list. At the same time, you can pass query parameters to the feature extractors defined earlier: + +[source,python] +---- +feature_logger.extract_features( + query_params:{"query": "foo"}, + doc_ids=["doc-1", "doc-2"] +) +---- +// NOTCONSOLE + +Our https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/08-learning-to-rank.ipynb[example notebook^] explains how to use the `FeatureLogger` to build a training dataset, by adding features to a judgment list. + +[discrete] +[[learning-to-rank-model-training-feature-extraction-notes]] +====== Notes on feature extraction + +* We strongly advise against implementing feature extraction on your own. It's crucial to maintain consistency in feature extraction between the training environment and inference in {es}. By using eland tooling, which is developed and tested in tandem with {es}, you can ensure that they function together consistently. + +* Feature extraction is performed by executing queries on the {es} server. This could put a lot of stress on your cluster, especially when your judgment list contains a lot of examples or you have many features. Our feature logger implementation is designed to minimize the number of search requests sent to the server and reduce load. However, it might be best to build your training dataset using an {es} cluster that is isolated from any user-facing, production traffic. + +[discrete] +[[learning-to-rank-model-deployment]] +===== Deploy your model into {es} + +Once your model is trained you will be able to deploy it in your {es} cluster. You can use Eland's `MLModel.import_ltr_model method`: + +[source,python] +---- +from eland.ml import MLModel + +LEARNING_TO_RANK_MODEL_ID="ltr-model-xgboost" + +MLModel.import_ltr_model( + es_client=es_client, + model=ranker, + model_id=LEARNING_TO_RANK_MODEL_ID, + ltr_model_config=ltr_config, + es_if_exists="replace", +) +---- +// NOTCONSOLE + +This method will serialize the trained model and the Learning To Rank configuration (including feature extraction) in a format that {es} can understand. The model is then deployed to {es} using the <>. + +The following types of models are currently supported for LTR with {es}: + +* https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeRegressor.html[`DecisionTreeRegressor`^] +* https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestRegressor.html[`RandomForestRegressor`^] +* https://lightgbm.readthedocs.io/en/latest/pythonapi/lightgbm.LGBMRegressor.html[`LGBMRegressor`^] +* https://xgboost.readthedocs.io/en/stable/python/python_api.html#xgboost.XGBRanker[`XGBRanker`^] +* https://xgboost.readthedocs.io/en/stable/python/python_api.html#xgboost.XGBRegressor[`XGBRegressor`^] + + +More model types will be supported in the future. + +[discrete] +[[learning-to-rank-model-management]] +==== Learning To Rank model management + +Once your model is deployed in {es} you can manage it using the https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-df-trained-models-apis.html[trained model APIs]. +You're now ready to work with your LTR model as a rescorer at <>. diff --git a/docs/reference/search/search-your-data/learning-to-rank-search-usage.asciidoc b/docs/reference/search/search-your-data/learning-to-rank-search-usage.asciidoc new file mode 100644 index 0000000000000..1d040a116ad9a --- /dev/null +++ b/docs/reference/search/search-your-data/learning-to-rank-search-usage.asciidoc @@ -0,0 +1,78 @@ +[[learning-to-rank-search-usage]] +=== Search using Learning To Rank +++++ +Search using LTR +++++ + +preview::["The Learning To Rank feature is in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but this feature is not subject to the support SLA of official GA features."] + +NOTE: This feature was introduced in version 8.12.0 and is only available to certain subscription levels. +For more information, see {subscriptions}. + +[discrete] +[[learning-to-rank-rescorer]] +==== Learning To Rank as a rescorer + +Once your LTR model is trained and deployed in {es}, it can be used as a <> in the <>: + +[source,console] +---- +GET my-index/_search +{ + "query": { <1> + "multi_match": { + "fields": ["title", "content"], + "query": "the quick brown fox" + } + }, + "rescore": { + "learning_to_rank": { + "model_id": "ltr-model", <2> + "params": { <3> + "query_text": "the quick brown fox" + } + }, + "window_size": 100 <4> + } +} +---- +// TEST[skip:TBD] +<1> First pass query providing documents to be rescored. +<2> The unique identifier of the trained model uploaded to {es}. +<3> Named parameters to be passed to the query templates used for feature. +<4> The number of documents that should be examined by the rescorer on each shard. + +[discrete] +[[learning-to-rank-rescorer-limitations]] +===== Known limitations + +[discrete] +[[learning-to-rank-rescorer-limitations-window-size]] +====== Rescore window size + +Scores returned by LTR models are usually not comparable with the scores issued by the first pass query and can be lower than the non-rescored score. This can cause the non-rescored result document to be ranked higher than the rescored document. To prevent this, the `window_size` parameter is mandatory for LTR rescorers and should be greater than or equal to `from + size`. + +[discrete] +[[learning-to-rank-rescorer-limitations-pagination]] +====== Pagination + +When exposing pagination to users, `window_size` should remain constant as each page is progressed by passing different `from` values. Changing the `window_size` can alter the top hits causing results to confusingly shift as the user steps through pages. + +[discrete] +[[learning-to-rank-rescorer-limitations-negative-scores]] +====== Negative scores + +Depending on how your model is trained, it’s possible that the model will return negative scores for documents. While negative scores are not allowed from first-stage retrieval and ranking, it is possible to use them in the LTR rescorer. + +[discrete] +[[learning-to-rank-rescorer-limitations-field-collapsing]] +====== Compatibility with field collapsing + +LTR rescorers are not compatible with the <>. + +[discrete] +[[learning-to-rank-rescorer-limitations-term-statistics]] +====== Term statistics as features + +We do not currently support term statistics as features, however future releases will introduce this capability. + diff --git a/docs/reference/search/search-your-data/learning-to-rank.asciidoc b/docs/reference/search/search-your-data/learning-to-rank.asciidoc new file mode 100644 index 0000000000000..08fad9db9c0f6 --- /dev/null +++ b/docs/reference/search/search-your-data/learning-to-rank.asciidoc @@ -0,0 +1,136 @@ +[[learning-to-rank]] +== Learning To Rank + +preview::["The Learning To Rank feature is in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but this feature is not subject to the support SLA of official GA features."] + +NOTE: This feature was introduced in version 8.12.0 and is only available to certain subscription levels. +For more information, see {subscriptions}. + +Learning To Rank (LTR) uses a trained machine learning (ML) model to build a +ranking function for your search engine. Typically, the model is used as a +second stage re-ranker, to improve the relevance of search results returned by a +simpler, first stage retrieval algorithm. The LTR function takes a list of +documents and a search context and outputs ranked documents: + +[[learning-to-rank-overview-diagram]] +.Learning To Rank overview +image::images/search/learning-to-rank-overview.png[Learning To Rank overview,align="center"] + + +[discrete] +[[learning-to-rank-search-context]] +=== Search context + +In addition to the list of documents to sort, the LTR function also requires a +search context. Typically, this search context includes at least the search +terms provided by the user (`text_query` in the example above). +The search context can also provide additional information used in the ranking mode. +This could be information about the user doing the search (such as demographic data, geolocation, or age); about the query (such as query length); or document in the context of the query (such as score for the title field). + +[discrete] +[[learning-to-rank-judgement-list]] +=== Judgment list +The LTR model is usually trained on a judgment list, which is a set of queries and documents with a relevance grade. Judgment lists can be human or machine generated: they're commonly populated from behavioural analytics, often with human moderation. Judgment lists determine the ideal ordering of results for a given search query. The goal of LTR is to fit the model to the judgment list rankings as closely as possible for new queries and documents. + +The judgment list is the main input used to train the model. It consists of a dataset that contains pairs of queries and documents, along with their corresponding relevance labels. +The relevance judgment is typically either a binary (relevant/irrelevant) or a more +granular label, such as a grade between 0 (completely irrelevant) to 4 (highly +relevant). The example below uses a graded relevance judgment. + + +[[learning-to-rank-judgment-list-example]] +.Judgment list example +image::images/search/learning-to-rank-judgment-list.png[Judgment list example,align="center"] + +[discrete] +[[judgment-list-notes]] +==== Notes on judgment lists + +While a judgment list can be created manually by humans, there are techniques available to leverage user engagement data, such as clicks or conversions, to construct judgment lists automatically. + +The quantity and the quality of your judgment list will greatly influence the overall performance of the LTR model. The following aspects should be considered very carefully when building your judgment list: + +* Most search engines can be searched using different query types. For example, in a movie search engine, users search by title but also by actor or director. It's essential to maintain a balanced number of examples for each query type in your judgment list. This prevents overfitting and allows the model to generalize effectively across all query types. + +* Users often provide more positive examples than negative ones. By balancing the number of positive and negative examples, you help the model learn to distinguish between relevant and irrelevant content more accurately. + +[discrete] +[[learning-to-rank-feature-extraction]] +=== Feature extraction + +Query and document pairs alone don't provide enough information to train the ML +models used for LTR. The relevance scores in judgment lists depend on a number +of properties or _features_. These features must be extracted to determine how +the various components combine to determine document relevance. The judgment +list plus the extracted features make up the training dataset for an LTR model. + +These features fall into one of three main categories: + +* *Document features*: + These features are derived directly from document properties. + Example: product price in an eCommerce store. + +* *Query features*: + These features are computed directly from the query submitted by the user. + Example: the number of words in the query. + +* *Query-document features*: + Features used to provide information about the document in the context of the query. + Example: the BM25 score for the `title` field. + +To prepare the dataset for training, the features are added to the judgment list: + +[[learning-to-rank-judgement-feature-extraction]] +.Judgment list with features +image::images/search/learning-to-rank-feature-extraction.png[Judgment list with features,align="center"] + +To do this in {es}, use templated queries to extract features when building the +training dataset and during inference at query time. Here is an example of a +templated query: + +[source,js] +---- +[ + { + "query_extractor": { + "feature_name": "title_bm25", + "query": { "match": { "title": "{{query}}" } } + } + } +] +---- +// NOTCONSOLE + +[discrete] +[[learning-to-rank-models]] +=== Models + +The heart of LTR is of course an ML model. A model is trained using the training data described above in combination with an objective. In the case of LTR, the objective is to rank result documents in an optimal way with respect to a judgment list, given some ranking metric such as https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#Discounted_cumulative_gain[nDCG^] or https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#Mean_average_precision[MAP^]. The model relies solely on the features and relevance labels from the training data. + +The LTR space is evolving rapidly and many approaches and model types are being +experimented with. In practice {es} relies specifically on gradient boosted decision tree +(https://en.wikipedia.org/wiki/Gradient_boosting#Gradient_tree_boosting[GBDT^]) models for LTR inference. + +Note that {es} supports model inference but the training process itself must +happen outside of {es}, using a GBDT model. Among the most popular LTR models +used today, https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/MSR-TR-2010-82.pdf[LambdaMART^] provides strong ranking performance with low inference +latencies. It relies on GBDT models and is therefore a perfect fit for LTR in +{es}. + +https://xgboost.readthedocs.io/en/stable/[XGBoost^] is a well known library that provides an https://xgboost.readthedocs.io/en/stable/tutorials/learning_to_rank.html[implementation^] of LambdaMART, making it a popular choice for LTR. We offer helpers in https://eland.readthedocs.io/[eland^] to facilitate the integration of a trained https://xgboost.readthedocs.io/en/stable/python/python_api.html#xgboost.XGBRanker[XBGRanker^] model as your LTR model in {es}. + +[TIP] +==== +Learn more about training in <>, or check out our https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/08-learning-to-rank.ipynb[interactive LTR notebook] available in the `elasticsearch-labs` repo. +==== +[discrete] +[[learning-to-rank-in-the-elastic-stack]] +=== LTR in the Elastic stack + +In the next pages of this guide you will learn to: + +* <> +* <> + +include::learning-to-rank-model-training.asciidoc[] +include::learning-to-rank-search-usage.asciidoc[] diff --git a/docs/reference/search/search-your-data/search-your-data.asciidoc b/docs/reference/search/search-your-data/search-your-data.asciidoc index 8362094fab10c..bed204985296c 100644 --- a/docs/reference/search/search-your-data/search-your-data.asciidoc +++ b/docs/reference/search/search-your-data/search-your-data.asciidoc @@ -46,6 +46,7 @@ include::search-api.asciidoc[] include::search-application-overview.asciidoc[] include::knn-search.asciidoc[] include::semantic-search.asciidoc[] +include::learning-to-rank.asciidoc[] include::search-across-clusters.asciidoc[] include::search-with-synonyms.asciidoc[] include::behavioral-analytics/behavioral-analytics-overview.asciidoc[] From 9448483555e4fcab3fa7362deb524c7f9a045780 Mon Sep 17 00:00:00 2001 From: David Turner Date: Mon, 11 Mar 2024 17:06:34 +0000 Subject: [PATCH 108/248] Fix numeric sorts in `_cat/nodes` (#106189) Some of the columns in the `GET _cat/nodes` output are numeric, but formatted in a specific way, so we represent them as strings today which makes them sort incorrectly. This commit combines the string representation with the original numeric value so these values sort correctly. Closes #48070 --- docs/changelog/106189.yaml | 6 ++ .../rest/action/cat/RestNodesAction.java | 11 ++- .../rest/action/cat/RestTable.java | 20 +++++ .../rest/action/cat/RestNodesActionTests.java | 73 +++++++++++++++++++ .../rest/action/cat/RestTableTests.java | 20 +++++ 5 files changed, 124 insertions(+), 6 deletions(-) create mode 100644 docs/changelog/106189.yaml diff --git a/docs/changelog/106189.yaml b/docs/changelog/106189.yaml new file mode 100644 index 0000000000000..ec485f0e60efb --- /dev/null +++ b/docs/changelog/106189.yaml @@ -0,0 +1,6 @@ +pr: 106189 +summary: Fix numeric sorts in `_cat/nodes` +area: CAT APIs +type: bug +issues: + - 48070 diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java index 9b70776551ba6..e5e0f9ee926f3 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java @@ -57,7 +57,6 @@ import org.elasticsearch.search.suggest.completion.CompletionStats; import java.util.List; -import java.util.Locale; import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.rest.RestRequest.Method.GET; @@ -375,14 +374,14 @@ Table buildTable( ByteSizeValue diskTotal = null; ByteSizeValue diskUsed = null; ByteSizeValue diskAvailable = null; - String diskUsedPercent = null; + RestTable.FormattedDouble diskUsedPercent = null; if (fsInfo != null) { diskTotal = fsInfo.getTotal().getTotal(); diskAvailable = fsInfo.getTotal().getAvailable(); diskUsed = ByteSizeValue.ofBytes(diskTotal.getBytes() - diskAvailable.getBytes()); double diskUsedRatio = diskTotal.getBytes() == 0 ? 1.0 : (double) diskUsed.getBytes() / diskTotal.getBytes(); - diskUsedPercent = String.format(Locale.ROOT, "%.2f", 100.0 * diskUsedRatio); + diskUsedPercent = RestTable.FormattedDouble.format2DecimalPlaces(100.0 * diskUsedRatio); } table.addCell(diskTotal); table.addCell(diskUsed); @@ -408,17 +407,17 @@ Table buildTable( table.addCell( hasLoadAverage == false || osStats.getCpu().getLoadAverage()[0] == -1 ? null - : String.format(Locale.ROOT, "%.2f", osStats.getCpu().getLoadAverage()[0]) + : RestTable.FormattedDouble.format2DecimalPlaces(osStats.getCpu().getLoadAverage()[0]) ); table.addCell( hasLoadAverage == false || osStats.getCpu().getLoadAverage()[1] == -1 ? null - : String.format(Locale.ROOT, "%.2f", osStats.getCpu().getLoadAverage()[1]) + : RestTable.FormattedDouble.format2DecimalPlaces(osStats.getCpu().getLoadAverage()[1]) ); table.addCell( hasLoadAverage == false || osStats.getCpu().getLoadAverage()[2] == -1 ? null - : String.format(Locale.ROOT, "%.2f", osStats.getCpu().getLoadAverage()[2]) + : RestTable.FormattedDouble.format2DecimalPlaces(osStats.getCpu().getLoadAverage()[2]) ); table.addCell(jvmStats == null ? null : jvmStats.getUptime()); diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestTable.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestTable.java index 6845fec4db6fe..cfe5d6d2aef39 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestTable.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestTable.java @@ -496,4 +496,24 @@ public boolean isReversed() { return reverse; } } + + /** + * A formatted number, such that it sorts according to its numeric value but captures a specific string representation too + */ + record FormattedDouble(String displayValue, double numericValue) implements Comparable { + + static FormattedDouble format2DecimalPlaces(double numericValue) { + return new FormattedDouble(Strings.format("%.2f", numericValue), numericValue); + } + + @Override + public int compareTo(FormattedDouble other) { + return Double.compare(numericValue, other.numericValue); + } + + @Override + public String toString() { + return displayValue; + } + } } diff --git a/server/src/test/java/org/elasticsearch/rest/action/cat/RestNodesActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/cat/RestNodesActionTests.java index 2f5293d7a44a8..7ddd63db73986 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/cat/RestNodesActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/cat/RestNodesActionTests.java @@ -9,17 +9,24 @@ package org.elasticsearch.rest.action.cat; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.monitor.fs.FsInfo; +import org.elasticsearch.monitor.os.OsStats; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.rest.FakeRestRequest; import org.junit.Before; +import java.util.ArrayList; import java.util.Collections; +import java.util.List; +import java.util.Map; import static java.util.Collections.emptySet; import static org.mockito.Mockito.mock; @@ -48,4 +55,70 @@ public void testBuildTableDoesNotThrowGivenNullNodeInfoAndStats() { action.buildTable(false, new FakeRestRequest(), clusterStateResponse, nodesInfoResponse, nodesStatsResponse); } + + public void testFormattedNumericSort() { + final var clusterState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(DiscoveryNodeUtils.create("node-1")).add(DiscoveryNodeUtils.create("node-2"))) + .build(); + + final var nowMillis = System.currentTimeMillis(); + final var rowOrder = RestTable.getRowOrder( + action.buildTable( + false, + new FakeRestRequest(), + new ClusterStateResponse(clusterState.getClusterName(), clusterState, false), + new NodesInfoResponse(clusterState.getClusterName(), List.of(), List.of()), + new NodesStatsResponse( + clusterState.getClusterName(), + List.of( + // sorting 10 vs 9 in all relevant columns, since these sort incorrectly as strings + getTrickySortingNodeStats(nowMillis, clusterState.nodes().get("node-1"), 10), + getTrickySortingNodeStats(nowMillis, clusterState.nodes().get("node-2"), 9) + ), + Collections.emptyList() + ) + ), + new FakeRestRequest.Builder(xContentRegistry()).withParams( + Map.of("s", randomFrom("load_1m", "load_5m", "load_15m", "disk.used_percent")) + ).build() + ); + + final var nodesList = new ArrayList(); + for (final var node : clusterState.nodes()) { + nodesList.add(node); + } + + assertEquals("node-2", nodesList.get(rowOrder.get(0)).getId()); + assertEquals("node-1", nodesList.get(rowOrder.get(1)).getId()); + } + + private static NodeStats getTrickySortingNodeStats(long nowMillis, DiscoveryNode node, int sortValue) { + return new NodeStats( + node, + nowMillis, + null, + new OsStats( + nowMillis, + new OsStats.Cpu((short) sortValue, new double[] { sortValue, sortValue, sortValue }), + new OsStats.Mem(0, 0, 0), + new OsStats.Swap(0, 0), + null + ), + null, + null, + null, + new FsInfo(nowMillis, null, new FsInfo.Path[] { new FsInfo.Path("/foo", "/foo", 100, 100 - sortValue, 100 - sortValue) }), + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null + ); + } } diff --git a/server/src/test/java/org/elasticsearch/rest/action/cat/RestTableTests.java b/server/src/test/java/org/elasticsearch/rest/action/cat/RestTableTests.java index 7a8c67177aade..1ec180fdaad77 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/cat/RestTableTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/cat/RestTableTests.java @@ -259,6 +259,26 @@ public void testMultiSort() { assertEquals(Arrays.asList(1, 0, 2), rowOrder); } + public void testFormattedDouble() { + Table table = new Table(); + table.startHeaders(); + table.addCell("number"); + table.endHeaders(); + List comparisonList = Arrays.asList(10, 9, 11); + for (int i = 0; i < comparisonList.size(); i++) { + table.startRow(); + table.addCell(RestTable.FormattedDouble.format2DecimalPlaces(comparisonList.get(i))); + table.endRow(); + } + restRequest.params().put("s", "number"); + List rowOrder = RestTable.getRowOrder(table, restRequest); + assertEquals(Arrays.asList(1, 0, 2), rowOrder); + + restRequest.params().put("s", "number:desc"); + rowOrder = RestTable.getRowOrder(table, restRequest); + assertEquals(Arrays.asList(2, 0, 1), rowOrder); + } + public void testPlainTextChunking() throws Exception { final var cells = randomArray(8, 8, String[]::new, () -> randomAlphaOfLengthBetween(1, 5)); final var expectedRow = String.join(" ", cells) + "\n"; From f05a66c3dd4d90e8998c188a02b677d3963fdc3f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lorenzo=20Dematt=C3=A9?= Date: Mon, 11 Mar 2024 18:14:17 +0100 Subject: [PATCH 109/248] Migrate `cat` YAML tests from version to cluster_features (#106167) --- .../test/cat.aliases/10_basic.yml | 36 ++++++++++--------- .../test/rest/ESRestTestCase.java | 12 ++++--- .../rest/yaml/ESClientYamlSuiteTestCase.java | 6 ++++ .../rest/yaml/YamlTestLegacyFeatures.java | 29 +++++++++++++++ 4 files changed, 62 insertions(+), 21 deletions(-) create mode 100644 test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/YamlTestLegacyFeatures.java diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.aliases/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.aliases/10_basic.yml index 9566f6f036c3f..9eebb281795b0 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.aliases/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.aliases/10_basic.yml @@ -1,8 +1,8 @@ --- "Help": - - skip: - version: " - 7.3.99" + - requires: + cluster_features: "cat_aliases_show_write_index" reason: "is_write_index is shown in cat.aliases starting version 7.4.0" - do: @@ -22,7 +22,7 @@ --- "Help (pre 7.4.0)": - skip: - version: "7.4.0 - " + cluster_features: "cat_aliases_show_write_index" reason: "is_write_index is shown in cat.aliases starting version 7.4.0" - do: @@ -51,8 +51,8 @@ --- "Simple alias": - - skip: - version: " - 7.3.99" + - requires: + cluster_features: "cat_aliases_show_write_index" reason: "is_write_index is shown in cat.aliases starting version 7.4.0" - do: @@ -81,7 +81,7 @@ --- "Simple alias (pre 7.4.0)": - skip: - version: "7.4.0 - " + cluster_features: "cat_aliases_show_write_index" reason: "is_write_index is shown in cat.aliases starting version 7.4.0" - do: @@ -108,8 +108,8 @@ --- "Complex alias": - - skip: - version: " - 7.3.99" + - requires: + cluster_features: "cat_aliases_show_write_index" reason: "is_write_index is shown in cat.aliases starting version 7.4.0" - do: @@ -149,7 +149,7 @@ --- "Complex alias (pre 7.4.0)": - skip: - version: "7.4.0 - " + cluster_features: "cat_aliases_show_write_index" reason: "is_write_index is shown in cat.aliases starting version 7.4.0" - do: @@ -269,8 +269,8 @@ --- "Column headers": - - skip: - version: " - 7.3.99" + - requires: + cluster_features: "cat_aliases_show_write_index" reason: "is_write_index is shown in cat.aliases starting version 7.4.0" - do: @@ -307,7 +307,7 @@ --- "Column headers (pre 7.4.0)": - skip: - version: "7.4.0 - " + cluster_features: "cat_aliases_show_write_index" reason: "is_write_index is shown in cat.aliases starting version 7.4.0" - do: @@ -373,10 +373,10 @@ --- "Alias against closed index": - - skip: - version: " - 7.3.99" + - requires: + cluster_features: "cat_aliases_show_write_index" reason: "is_write_index is shown in cat.aliases starting version 7.4.0" - features: ["allowed_warnings"] + test_runner_features: ["allowed_warnings"] - do: indices.create: @@ -409,10 +409,12 @@ --- "Alias against closed index (pre 7.4.0)": - skip: - version: "7.4.0 - " - features: ["allowed_warnings"] + cluster_features: "cat_aliases_show_write_index" reason: "is_write_index is shown in cat.aliases starting version 7.4.0" + - requires: + test_runner_features: ["allowed_warnings"] + - do: indices.create: index: test_index diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java index 66af90ce14d64..307daddd17c37 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java @@ -355,16 +355,21 @@ public void initClient() throws IOException { assert nodesVersions != null; } - protected TestFeatureService createTestFeatureService( + protected List createAdditionalFeatureSpecifications() { + return List.of(); + } + + protected final TestFeatureService createTestFeatureService( Map> clusterStateFeatures, Set semanticNodeVersions ) { // Historical features information is unavailable when using legacy test plugins boolean hasHistoricalFeaturesInformation = System.getProperty("tests.features.metadata.path") != null; - final List featureSpecifications; + final List featureSpecifications = new ArrayList<>(createAdditionalFeatureSpecifications()); + featureSpecifications.add(new RestTestLegacyFeatures()); if (hasHistoricalFeaturesInformation) { - featureSpecifications = List.of(new RestTestLegacyFeatures(), new ESRestTestCaseHistoricalFeatures()); + featureSpecifications.add(new ESRestTestCaseHistoricalFeatures()); } else { logger.warn( "This test is running on the legacy test framework; historical features from production code will not be available. " @@ -372,7 +377,6 @@ protected TestFeatureService createTestFeatureService( + "If this is a legacy feature used only in tests, you can add it to a test-only FeatureSpecification such as {}.", RestTestLegacyFeatures.class.getCanonicalName() ); - featureSpecifications = List.of(new RestTestLegacyFeatures()); } return new ESRestTestFeatureService( diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java index 7d8d1175385a1..804f4eae4042d 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.UpdateForV9; +import org.elasticsearch.features.FeatureSpecification; import org.elasticsearch.test.ClasspathUtils; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.TestFeatureService; @@ -194,6 +195,11 @@ public void initAndResetContext() throws Exception { restTestExecutionContext.clear(); } + @Override + protected List createAdditionalFeatureSpecifications() { + return List.of(new YamlTestLegacyFeatures()); + } + /** * Create the test execution context. Can be overwritten in sub-implementations of the test if the context needs to be modified. */ diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/YamlTestLegacyFeatures.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/YamlTestLegacyFeatures.java new file mode 100644 index 0000000000000..0c27cea49f955 --- /dev/null +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/YamlTestLegacyFeatures.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.test.rest.yaml; + +import org.elasticsearch.Version; +import org.elasticsearch.features.FeatureSpecification; +import org.elasticsearch.features.NodeFeature; + +import java.util.Map; + +/** + * This class groups historical features that have been removed from the production codebase, but are still used by YAML test + * to support BwC. Rather than leaving them in the main src we group them here, so it's clear they are not used in production code anymore. + */ +public class YamlTestLegacyFeatures implements FeatureSpecification { + + private static final NodeFeature CAT_ALIASES_SHOW_WRITE_INDEX = new NodeFeature("cat_aliases_show_write_index"); + + @Override + public Map getHistoricalFeatures() { + return Map.ofEntries(Map.entry(CAT_ALIASES_SHOW_WRITE_INDEX, Version.V_7_4_0)); + } +} From 8be3fb229d8c29d7a3931ffcb5e03e0bb0476c0f Mon Sep 17 00:00:00 2001 From: Joe Gallo Date: Mon, 11 Mar 2024 13:22:46 -0400 Subject: [PATCH 110/248] Mute failing snippet tests (#106196) --- docs/reference/sql/endpoints/rest.asciidoc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/docs/reference/sql/endpoints/rest.asciidoc b/docs/reference/sql/endpoints/rest.asciidoc index 8168a1c14e1a3..258df3c8afc97 100644 --- a/docs/reference/sql/endpoints/rest.asciidoc +++ b/docs/reference/sql/endpoints/rest.asciidoc @@ -579,6 +579,7 @@ POST _sql?format=json "fetch_size": 5 } ---- +// TEST[skip:waiting on https://github.com/elastic/elasticsearch/issues/106158] // TEST[setup:library] // TEST[s/"wait_for_completion_timeout": "2s"/"wait_for_completion_timeout": "0"/] @@ -602,6 +603,7 @@ For CSV, TSV, and TXT responses, the API returns these values in the respective "rows": [ ] } ---- +// TESTRESPONSE[skip:waiting on https://github.com/elastic/elasticsearch/issues/106158] // TESTRESPONSE[s/FnR0TDhyWUVmUmVtWXRWZER4MXZiNFEad2F5UDk2ZVdTVHV1S0xDUy00SklUdzozMTU=/$body.id/] // TESTRESPONSE[s/"is_partial": true/"is_partial": $body.is_partial/] // TESTRESPONSE[s/"is_running": true/"is_running": $body.is_running/] @@ -628,6 +630,7 @@ complete results. "completion_status": 200 } ---- +// TESTRESPONSE[skip:waiting on https://github.com/elastic/elasticsearch/issues/106158] // TESTRESPONSE[s/FnR0TDhyWUVmUmVtWXRWZER4MXZiNFEad2F5UDk2ZVdTVHV1S0xDUy00SklUdzozMTU=/$body.id/] // TESTRESPONSE[s/"expiration_time_in_millis": 1611690295000/"expiration_time_in_millis": $body.expiration_time_in_millis/] @@ -660,6 +663,7 @@ POST _sql?format=json "fetch_size": 5 } ---- +// TEST[skip:waiting on https://github.com/elastic/elasticsearch/issues/106158] // TEST[setup:library] You can use the get async SQL search API's `keep_alive` parameter to later @@ -698,6 +702,7 @@ POST _sql?format=json "fetch_size": 5 } ---- +// TEST[skip:waiting on https://github.com/elastic/elasticsearch/issues/106158] // TEST[setup:library] If `is_partial` and `is_running` are `false`, the search was synchronous and @@ -714,6 +719,7 @@ returned complete results. "cursor": ... } ---- +// TESTRESPONSE[skip:waiting on https://github.com/elastic/elasticsearch/issues/106158] // TESTRESPONSE[s/Fnc5UllQdUVWU0NxRFNMbWxNYXplaFEaMUpYQ05oSkpTc3kwZ21EdC1tbFJXQTo0NzA=/$body.id/] // TESTRESPONSE[s/"rows": \.\.\./"rows": $body.rows/] // TESTRESPONSE[s/"columns": \.\.\./"columns": $body.columns/] From bcecb482ca8979f4bf4103cd17af2e306b548580 Mon Sep 17 00:00:00 2001 From: Joe Gallo Date: Mon, 11 Mar 2024 15:19:07 -0400 Subject: [PATCH 111/248] =?UTF-8?q?Revert=20"Tighten=20up=20preconditions?= =?UTF-8?q?=20and=20test=20conditions=20in=20watcher=20yaml=20rest=20tes?= =?UTF-8?q?=E2=80=A6"=20(#106197)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit 2bb5bb9f87b173c43c02ada52d3d16ece5b3f98d. --- .../xpack/watcher/WatcherRestTestCase.java | 43 ++++++------------- .../test/watcher/usage/10_basic.yml | 18 +++++--- 2 files changed, 23 insertions(+), 38 deletions(-) diff --git a/x-pack/plugin/watcher/qa/common/src/main/java/org/elasticsearch/xpack/watcher/WatcherRestTestCase.java b/x-pack/plugin/watcher/qa/common/src/main/java/org/elasticsearch/xpack/watcher/WatcherRestTestCase.java index 1779fa4345a85..341e92641f641 100644 --- a/x-pack/plugin/watcher/qa/common/src/main/java/org/elasticsearch/xpack/watcher/WatcherRestTestCase.java +++ b/x-pack/plugin/watcher/qa/common/src/main/java/org/elasticsearch/xpack/watcher/WatcherRestTestCase.java @@ -21,8 +21,6 @@ import java.util.Map; import java.util.concurrent.TimeUnit; -import static org.hamcrest.Matchers.equalTo; - /** * Parent test class for Watcher (not-YAML) based REST tests */ @@ -80,37 +78,20 @@ public final void stopWatcher() throws Exception { } public static void deleteAllWatcherData() throws IOException { - { - var queryWatchesRequest = new Request("GET", "/_watcher/_query/watches"); - var response = ObjectPath.createFromResponse(ESRestTestCase.adminClient().performRequest(queryWatchesRequest)); - - int totalCount = response.evaluate("count"); - List> watches = response.evaluate("watches"); - assert watches.size() == totalCount : "number of watches returned is unequal to the total number of watches"; - for (Map watch : watches) { - String id = (String) watch.get("_id"); - var deleteWatchRequest = new Request("DELETE", "/_watcher/watch/" + id); - assertOK(ESRestTestCase.adminClient().performRequest(deleteWatchRequest)); - } - } + var queryWatchesRequest = new Request("GET", "/_watcher/_query/watches"); + var response = ObjectPath.createFromResponse(ESRestTestCase.adminClient().performRequest(queryWatchesRequest)); - { - var queryWatchesRequest = new Request("GET", "/_watcher/_query/watches"); - var response = ObjectPath.createFromResponse(ESRestTestCase.adminClient().performRequest(queryWatchesRequest)); - assertThat(response.evaluate("count"), equalTo(0)); + int totalCount = response.evaluate("count"); + List> watches = response.evaluate("watches"); + assert watches.size() == totalCount : "number of watches returned is unequal to the total number of watches"; + for (Map watch : watches) { + String id = (String) watch.get("_id"); + var deleteWatchRequest = new Request("DELETE", "/_watcher/watch/" + id); + assertOK(ESRestTestCase.adminClient().performRequest(deleteWatchRequest)); } - { - var xpackUsageRequest = new Request("GET", "/_xpack/usage"); - var response = ObjectPath.createFromResponse(ESRestTestCase.adminClient().performRequest(xpackUsageRequest)); - assertThat(response.evaluate("watcher.count.active"), equalTo(0)); - assertThat(response.evaluate("watcher.count.total"), equalTo(0)); - } - - { - var deleteWatchHistoryRequest = new Request("DELETE", ".watcher-history-*"); - deleteWatchHistoryRequest.addParameter("ignore_unavailable", "true"); - ESRestTestCase.adminClient().performRequest(deleteWatchHistoryRequest); - } + var deleteWatchHistoryRequest = new Request("DELETE", ".watcher-history-*"); + deleteWatchHistoryRequest.addParameter("ignore_unavailable", "true"); + ESRestTestCase.adminClient().performRequest(deleteWatchHistoryRequest); } } diff --git a/x-pack/plugin/watcher/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/watcher/usage/10_basic.yml b/x-pack/plugin/watcher/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/watcher/usage/10_basic.yml index b3682b05d7e68..17031abf39e02 100644 --- a/x-pack/plugin/watcher/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/watcher/usage/10_basic.yml +++ b/x-pack/plugin/watcher/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/watcher/usage/10_basic.yml @@ -1,18 +1,21 @@ --- "Test watcher usage stats output": + - skip: + version: "all" + reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/65547" - do: catch: missing watcher.delete_watch: id: "usage_stats_watch" - - do: { xpack.usage: {} } - - match: { "watcher.count.active": 0 } - - match: { "watcher.count.total": 0 } + - do: {xpack.usage: {}} + - set: { "watcher.count.active": watch_count_active } + - set: { "watcher.count.total": watch_count_total } - do: watcher.put_watch: id: "usage_stats_watch" - body: > + body: > { "trigger": { "schedule" : { "cron" : "0 0 0 1 * ? 2099" } @@ -44,9 +47,9 @@ } - match: { _id: "usage_stats_watch" } - - do: { xpack.usage: {} } - - match: { "watcher.count.active": 1 } - - match: { "watcher.count.total": 1 } + - do: {xpack.usage: {}} + - gt: { "watcher.count.active": $watch_count_active } + - gt: { "watcher.count.total": $watch_count_total } - gte: { "watcher.watch.action._all.active": 1 } - gte: { "watcher.watch.action.logging.active": 1 } - gte: { "watcher.watch.condition._all.active": 1 } @@ -57,3 +60,4 @@ - gte: { "watcher.watch.trigger.schedule.active": 1 } - gte: { "watcher.watch.trigger.schedule.cron.active": 1 } - gte: { "watcher.watch.trigger.schedule._all.active": 1 } + From 41a0210837f1727333bc93e8dffd8a57e73c0ce7 Mon Sep 17 00:00:00 2001 From: David Turner Date: Mon, 11 Mar 2024 19:34:09 +0000 Subject: [PATCH 112/248] Expand docs on number value conventions (#106198) Today we do not say explicitly that `integer` response fields are really arbitrarily large JSON integers and may not fit into a Java `int`. This commit expands the docs to add this information. --- docs/reference/api-conventions.asciidoc | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/docs/reference/api-conventions.asciidoc b/docs/reference/api-conventions.asciidoc index b0fa51679d661..1a63af19b0a33 100644 --- a/docs/reference/api-conventions.asciidoc +++ b/docs/reference/api-conventions.asciidoc @@ -337,8 +337,22 @@ value `true`. All other values will raise an error. [discrete] === Number Values -All REST APIs support providing numbered parameters as `string` on top -of supporting the native JSON number types. +When passing a numeric parameter in a request body, you may use a `string` +containing the number instead of the native numeric type. For example: + +[source,console] +-------------------------------------------------- +POST /_search +{ + "size": "1000" +} +-------------------------------------------------- + +Integer-valued fields in a response body are described as `integer` (or +occasionally `long`) in this manual, but there are generally no explicit bounds +on such values. JSON, SMILE, CBOR and YAML all permit arbitrarily large integer +values. Do not assume that `integer` fields in a response body will always fit +into a 32-bit signed integer. [[byte-units]] [discrete] From eb602f428f5f7cfb101d5e890db56029ab9211f6 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Mon, 11 Mar 2024 22:15:37 +0100 Subject: [PATCH 113/248] Remove some dead code in o.e.common.inject (#106183) Random find from researching things: We never annotate fields with `@Inject` so we can remove all the infrastructure around that. --- .../elasticsearch/common/inject/Binding.java | 2 +- .../common/inject/ConstructorBindingImpl.java | 4 +- .../common/inject/ConstructorInjector.java | 1 - .../elasticsearch/common/inject/Inject.java | 3 +- .../common/inject/InjectorImpl.java | 16 +--- .../common/inject/MembersInjector.java | 15 +--- .../common/inject/MembersInjectorImpl.java | 45 +--------- .../common/inject/MembersInjectorStore.java | 13 ++- .../common/inject/SingleFieldInjector.java | 60 ------------- .../common/inject/SingleMemberInjector.java | 28 ------ .../common/inject/SingleMethodInjector.java | 11 +-- .../common/inject/TypeLiteral.java | 14 --- .../inject/internal/BindingBuilder.java | 4 +- .../common/inject/internal/Errors.java | 18 ---- .../inject/internal/InstanceBindingImpl.java | 4 +- .../inject/internal/LinkedBindingImpl.java | 4 +- .../internal/LinkedProviderBindingImpl.java | 4 +- .../internal/ProviderInstanceBindingImpl.java | 4 +- .../internal/UntargettedBindingImpl.java | 4 +- .../common/inject/spi/InjectionListener.java | 35 -------- .../common/inject/spi/InjectionPoint.java | 87 ++++--------------- 21 files changed, 49 insertions(+), 327 deletions(-) delete mode 100644 server/src/main/java/org/elasticsearch/common/inject/SingleFieldInjector.java delete mode 100644 server/src/main/java/org/elasticsearch/common/inject/SingleMemberInjector.java delete mode 100644 server/src/main/java/org/elasticsearch/common/inject/spi/InjectionListener.java diff --git a/server/src/main/java/org/elasticsearch/common/inject/Binding.java b/server/src/main/java/org/elasticsearch/common/inject/Binding.java index 9f519e3daca0a..b2bb645089b48 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/Binding.java +++ b/server/src/main/java/org/elasticsearch/common/inject/Binding.java @@ -77,6 +77,6 @@ public interface Binding extends Element { * @param visitor to call back on * @since 2.0 */ - V acceptTargetVisitor(BindingTargetVisitor visitor); + void acceptTargetVisitor(BindingTargetVisitor visitor); } diff --git a/server/src/main/java/org/elasticsearch/common/inject/ConstructorBindingImpl.java b/server/src/main/java/org/elasticsearch/common/inject/ConstructorBindingImpl.java index 0c690f7ed9fa1..153c9627d736e 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/ConstructorBindingImpl.java +++ b/server/src/main/java/org/elasticsearch/common/inject/ConstructorBindingImpl.java @@ -54,11 +54,11 @@ public void initialize(InjectorImpl injector, Errors errors) throws ErrorsExcept } @Override - public V acceptTargetVisitor(BindingTargetVisitor visitor) { + public void acceptTargetVisitor(BindingTargetVisitor visitor) { if (factory.constructorInjector == null) { throw new IllegalStateException("not initialized"); } - return visitor.visit(); + visitor.visit(); } @Override diff --git a/server/src/main/java/org/elasticsearch/common/inject/ConstructorInjector.java b/server/src/main/java/org/elasticsearch/common/inject/ConstructorInjector.java index 7b9f4be9c5a99..d38a75e0720d7 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/ConstructorInjector.java +++ b/server/src/main/java/org/elasticsearch/common/inject/ConstructorInjector.java @@ -80,7 +80,6 @@ Object construct(Errors errors, InternalContext context, Class expectedType) constructionContext.setCurrentReference(t); membersInjector.injectMembers(t, errors, context); - membersInjector.notifyListeners(t, errors); return t; } catch (InvocationTargetException userException) { diff --git a/server/src/main/java/org/elasticsearch/common/inject/Inject.java b/server/src/main/java/org/elasticsearch/common/inject/Inject.java index 0a30b7b97a2da..e56c4c21ad39e 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/Inject.java +++ b/server/src/main/java/org/elasticsearch/common/inject/Inject.java @@ -21,7 +21,6 @@ import java.lang.annotation.Target; import static java.lang.annotation.ElementType.CONSTRUCTOR; -import static java.lang.annotation.ElementType.FIELD; import static java.lang.annotation.ElementType.METHOD; import static java.lang.annotation.RetentionPolicy.RUNTIME; @@ -45,7 +44,7 @@ * * @author crazybob@google.com (Bob Lee) */ -@Target({ METHOD, CONSTRUCTOR, FIELD }) +@Target({ METHOD, CONSTRUCTOR }) @Retention(RUNTIME) @Documented public @interface Inject { diff --git a/server/src/main/java/org/elasticsearch/common/inject/InjectorImpl.java b/server/src/main/java/org/elasticsearch/common/inject/InjectorImpl.java index 8d51894bf9907..d1086eb64ecc7 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/InjectorImpl.java +++ b/server/src/main/java/org/elasticsearch/common/inject/InjectorImpl.java @@ -39,7 +39,6 @@ import java.lang.annotation.Annotation; import java.lang.reflect.GenericArrayType; -import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Modifier; import java.lang.reflect.ParameterizedType; import java.lang.reflect.Type; @@ -195,8 +194,8 @@ static InternalFactory> createInternalFactory(Binding provide } @Override - public V acceptTargetVisitor(BindingTargetVisitor, V> visitor) { - return visitor.visit(); + public void acceptTargetVisitor(BindingTargetVisitor, V> visitor) { + visitor.visit(); } @Override @@ -270,8 +269,8 @@ public Provider getProvider() { } @Override - public V acceptTargetVisitor(BindingTargetVisitor visitor) { - return visitor.visit(); + public void acceptTargetVisitor(BindingTargetVisitor visitor) { + visitor.visit(); } @Override @@ -584,13 +583,6 @@ SingleParameterInjector createParameterInjector(final Dependency depen return new SingleParameterInjector<>(dependency, factory); } - /** - * Invokes a method. - */ - interface MethodInvoker { - Object invoke(Object target, Object... parameters) throws IllegalAccessException, InvocationTargetException; - } - /** * Cached constructor injectors for each type */ diff --git a/server/src/main/java/org/elasticsearch/common/inject/MembersInjector.java b/server/src/main/java/org/elasticsearch/common/inject/MembersInjector.java index 0a4464a373e18..ffaee1648ab5a 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/MembersInjector.java +++ b/server/src/main/java/org/elasticsearch/common/inject/MembersInjector.java @@ -25,17 +25,4 @@ * @author jessewilson@google.com (Jesse Wilson) * @since 2.0 */ -public interface MembersInjector { - - /** - * Injects dependencies into the fields and methods of {@code instance}. Ignores the presence or - * absence of an injectable constructor. - *

    - * Whenever Guice creates an instance, it performs this injection automatically (after first - * performing constructor injection), so if you're able to let Guice create all your objects for - * you, you'll never need to use this method. - * - * @param instance to inject members on. May be {@code null}. - */ - void injectMembers(T instance); -} +public interface MembersInjector {} diff --git a/server/src/main/java/org/elasticsearch/common/inject/MembersInjectorImpl.java b/server/src/main/java/org/elasticsearch/common/inject/MembersInjectorImpl.java index b32cddf9be4bc..8c190ef301651 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/MembersInjectorImpl.java +++ b/server/src/main/java/org/elasticsearch/common/inject/MembersInjectorImpl.java @@ -19,7 +19,6 @@ import org.elasticsearch.common.inject.internal.Errors; import org.elasticsearch.common.inject.internal.ErrorsException; import org.elasticsearch.common.inject.internal.InternalContext; -import org.elasticsearch.common.inject.spi.InjectionListener; import java.util.List; @@ -31,28 +30,12 @@ class MembersInjectorImpl implements MembersInjector { private final TypeLiteral typeLiteral; private final InjectorImpl injector; - private final List memberInjectors; - private final List> userMembersInjectors; - private final List> injectionListeners; + private final List memberInjectors; - MembersInjectorImpl(InjectorImpl injector, TypeLiteral typeLiteral, List memberInjectors) { + MembersInjectorImpl(InjectorImpl injector, TypeLiteral typeLiteral, List memberInjectors) { this.injector = injector; this.typeLiteral = typeLiteral; this.memberInjectors = memberInjectors; - this.userMembersInjectors = List.of(); - this.injectionListeners = List.of(); - } - - @Override - public void injectMembers(T instance) { - Errors errors = new Errors(typeLiteral); - try { - injectAndNotify(instance, errors); - } catch (ErrorsException e) { - errors.merge(e.getErrors()); - } - - errors.throwProvisionExceptionIfErrorsExist(); } void injectAndNotify(final T instance, final Errors errors) throws ErrorsException { @@ -64,20 +47,6 @@ void injectAndNotify(final T instance, final Errors errors) throws ErrorsExcepti injectMembers(instance, errors, context); return null; }); - - notifyListeners(instance, errors); - } - - void notifyListeners(T instance, Errors errors) throws ErrorsException { - int numErrorsBefore = errors.size(); - for (InjectionListener injectionListener : injectionListeners) { - try { - injectionListener.afterInjection(instance); - } catch (RuntimeException e) { - errors.errorNotifyingInjectionListener(injectionListener, typeLiteral, e); - } - } - errors.throwIfNewErrors(numErrorsBefore); } void injectMembers(T t, Errors errors, InternalContext context) { @@ -85,16 +54,6 @@ void injectMembers(T t, Errors errors, InternalContext context) { for (int i = 0, size = memberInjectors.size(); i < size; i++) { memberInjectors.get(i).inject(errors, context, t); } - - // optimization: use manual for/each to save allocating an iterator here - for (int i = 0, size = userMembersInjectors.size(); i < size; i++) { - MembersInjector userMembersInjector = userMembersInjectors.get(i); - try { - userMembersInjector.injectMembers(t); - } catch (RuntimeException e) { - errors.errorInUserInjector(userMembersInjector, typeLiteral, e); - } - } } @Override diff --git a/server/src/main/java/org/elasticsearch/common/inject/MembersInjectorStore.java b/server/src/main/java/org/elasticsearch/common/inject/MembersInjectorStore.java index 9352c84db28f6..925739af25742 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/MembersInjectorStore.java +++ b/server/src/main/java/org/elasticsearch/common/inject/MembersInjectorStore.java @@ -21,7 +21,6 @@ import org.elasticsearch.common.inject.internal.FailableCache; import org.elasticsearch.common.inject.spi.InjectionPoint; -import java.lang.reflect.Field; import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -62,12 +61,12 @@ private MembersInjectorImpl createWithListeners(TypeLiteral type, Erro Set injectionPoints; try { - injectionPoints = InjectionPoint.forInstanceMethodsAndFields(type); + injectionPoints = InjectionPoint.forInstanceMethods(type); } catch (ConfigurationException e) { errors.merge(e.getErrorMessages()); injectionPoints = e.getPartialValue(); } - List injectors = getInjectors(injectionPoints, errors); + List injectors = getInjectors(injectionPoints, errors); errors.throwIfNewErrors(numErrorsBefore); return new MembersInjectorImpl<>(injector, type, injectors); @@ -76,14 +75,12 @@ private MembersInjectorImpl createWithListeners(TypeLiteral type, Erro /** * Returns the injectors for the specified injection points. */ - List getInjectors(Set injectionPoints, Errors errors) { - List injectors = new ArrayList<>(); + List getInjectors(Set injectionPoints, Errors errors) { + List injectors = new ArrayList<>(); for (InjectionPoint injectionPoint : injectionPoints) { try { Errors errorsForMember = injectionPoint.isOptional() ? new Errors(injectionPoint) : errors.withSource(injectionPoint); - SingleMemberInjector injector = injectionPoint.getMember() instanceof Field - ? new SingleFieldInjector(this.injector, injectionPoint, errorsForMember) - : new SingleMethodInjector(this.injector, injectionPoint, errorsForMember); + SingleMethodInjector injector = new SingleMethodInjector(this.injector, injectionPoint, errorsForMember); injectors.add(injector); } catch (ErrorsException ignoredForNow) { // ignored for now diff --git a/server/src/main/java/org/elasticsearch/common/inject/SingleFieldInjector.java b/server/src/main/java/org/elasticsearch/common/inject/SingleFieldInjector.java deleted file mode 100644 index 7e8bfed724d59..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/inject/SingleFieldInjector.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright (C) 2008 Google Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.elasticsearch.common.inject; - -import org.elasticsearch.common.inject.internal.Errors; -import org.elasticsearch.common.inject.internal.ErrorsException; -import org.elasticsearch.common.inject.internal.InternalContext; -import org.elasticsearch.common.inject.internal.InternalFactory; -import org.elasticsearch.common.inject.spi.Dependency; -import org.elasticsearch.common.inject.spi.InjectionPoint; - -import java.lang.reflect.Field; - -/** - * Sets an injectable field. - */ -class SingleFieldInjector implements SingleMemberInjector { - final Field field; - final InjectionPoint injectionPoint; - final Dependency dependency; - final InternalFactory factory; - - SingleFieldInjector(InjectorImpl injector, InjectionPoint injectionPoint, Errors errors) throws ErrorsException { - this.injectionPoint = injectionPoint; - this.field = (Field) injectionPoint.getMember(); - this.dependency = injectionPoint.getDependencies().get(0); - factory = injector.getInternalFactory(dependency.getKey(), errors); - } - - @Override - public void inject(Errors errors, InternalContext context, Object o) { - errors = errors.withSource(dependency); - - context.setDependency(dependency); - try { - Object value = factory.get(errors, context, dependency); - field.set(o, value); - } catch (ErrorsException e) { - errors.withSource(injectionPoint).merge(e.getErrors()); - } catch (IllegalAccessException e) { - throw new AssertionError(e); // a security manager is blocking us, we're hosed - } finally { - context.setDependency(null); - } - } -} diff --git a/server/src/main/java/org/elasticsearch/common/inject/SingleMemberInjector.java b/server/src/main/java/org/elasticsearch/common/inject/SingleMemberInjector.java deleted file mode 100644 index a4e25f9fd000b..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/inject/SingleMemberInjector.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright (C) 2008 Google Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.elasticsearch.common.inject; - -import org.elasticsearch.common.inject.internal.Errors; -import org.elasticsearch.common.inject.internal.InternalContext; - -/** - * Injects a field or method of a given object. - */ -interface SingleMemberInjector { - void inject(Errors errors, InternalContext context, Object o); - -} diff --git a/server/src/main/java/org/elasticsearch/common/inject/SingleMethodInjector.java b/server/src/main/java/org/elasticsearch/common/inject/SingleMethodInjector.java index f6d9a2eb2c396..d36bc1e623a99 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/SingleMethodInjector.java +++ b/server/src/main/java/org/elasticsearch/common/inject/SingleMethodInjector.java @@ -16,7 +16,6 @@ package org.elasticsearch.common.inject; -import org.elasticsearch.common.inject.InjectorImpl.MethodInvoker; import org.elasticsearch.common.inject.internal.Errors; import org.elasticsearch.common.inject.internal.ErrorsException; import org.elasticsearch.common.inject.internal.InternalContext; @@ -28,19 +27,17 @@ /** * Invokes an injectable method. */ -class SingleMethodInjector implements SingleMemberInjector { - final MethodInvoker methodInvoker; +class SingleMethodInjector { + final Method method; final SingleParameterInjector[] parameterInjectors; final InjectionPoint injectionPoint; SingleMethodInjector(InjectorImpl injector, InjectionPoint injectionPoint, Errors errors) throws ErrorsException { this.injectionPoint = injectionPoint; - final Method method = (Method) injectionPoint.getMember(); - methodInvoker = method::invoke; + method = (Method) injectionPoint.getMember(); parameterInjectors = injector.getParametersInjectors(injectionPoint.getDependencies(), errors); } - @Override public void inject(Errors errors, InternalContext context, Object o) { Object[] parameters; try { @@ -51,7 +48,7 @@ public void inject(Errors errors, InternalContext context, Object o) { } try { - methodInvoker.invoke(o, parameters); + method.invoke(o, parameters); } catch (IllegalAccessException e) { throw new AssertionError(e); // a security manager is blocking us, we're hosed } catch (InvocationTargetException userException) { diff --git a/server/src/main/java/org/elasticsearch/common/inject/TypeLiteral.java b/server/src/main/java/org/elasticsearch/common/inject/TypeLiteral.java index 72bf444d2dd3b..afc0db15e3942 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/TypeLiteral.java +++ b/server/src/main/java/org/elasticsearch/common/inject/TypeLiteral.java @@ -20,7 +20,6 @@ import org.elasticsearch.common.inject.util.Types; import java.lang.reflect.Constructor; -import java.lang.reflect.Field; import java.lang.reflect.GenericArrayType; import java.lang.reflect.Member; import java.lang.reflect.Method; @@ -249,19 +248,6 @@ public TypeLiteral getSupertype(Class supertype) { return resolve(MoreTypes.getGenericSupertype(type, rawType, supertype)); } - /** - * Returns the resolved generic type of {@code field}. - * - * @param field a field defined by this or any superclass. - * @since 2.0 - */ - public TypeLiteral getFieldType(Field field) { - if (field.getDeclaringClass().isAssignableFrom(rawType) == false) { - throw new IllegalArgumentException(field + " is not defined by a supertype of " + type); - } - return resolve(field.getGenericType()); - } - /** * Returns the resolved generic parameter types of {@code methodOrConstructor}. * diff --git a/server/src/main/java/org/elasticsearch/common/inject/internal/BindingBuilder.java b/server/src/main/java/org/elasticsearch/common/inject/internal/BindingBuilder.java index 3837741bc3119..fd40879025c65 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/internal/BindingBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/inject/internal/BindingBuilder.java @@ -60,7 +60,7 @@ public void toInstance(T instance) { Set injectionPoints; if (instance != null) { try { - injectionPoints = InjectionPoint.forInstanceMethodsAndFields(instance.getClass()); + injectionPoints = InjectionPoint.forInstanceMethods(instance.getClass()); } catch (ConfigurationException e) { for (Message message : e.getErrorMessages()) { binder.addError(message); @@ -84,7 +84,7 @@ public BindingBuilder toProvider(Provider provider) { // lookup the injection points, adding any errors to the binder's errors list Set injectionPoints; try { - injectionPoints = InjectionPoint.forInstanceMethodsAndFields(provider.getClass()); + injectionPoints = InjectionPoint.forInstanceMethods(provider.getClass()); } catch (ConfigurationException e) { for (Message message : e.getErrorMessages()) { binder.addError(message); diff --git a/server/src/main/java/org/elasticsearch/common/inject/internal/Errors.java b/server/src/main/java/org/elasticsearch/common/inject/internal/Errors.java index 03a584d5c508b..8c44780e7b814 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/internal/Errors.java +++ b/server/src/main/java/org/elasticsearch/common/inject/internal/Errors.java @@ -21,13 +21,11 @@ import org.elasticsearch.common.inject.CreationException; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Key; -import org.elasticsearch.common.inject.MembersInjector; import org.elasticsearch.common.inject.Provider; import org.elasticsearch.common.inject.ProvisionException; import org.elasticsearch.common.inject.Scope; import org.elasticsearch.common.inject.TypeLiteral; import org.elasticsearch.common.inject.spi.Dependency; -import org.elasticsearch.common.inject.spi.InjectionListener; import org.elasticsearch.common.inject.spi.InjectionPoint; import org.elasticsearch.common.inject.spi.Message; @@ -315,14 +313,6 @@ public Errors errorInProvider(RuntimeException runtimeException) { return errorInUserCode(runtimeException, "Error in custom provider, %s", runtimeException); } - public Errors errorInUserInjector(MembersInjector listener, TypeLiteral type, RuntimeException cause) { - return errorInUserCode(cause, "Error injecting %s using %s.%n" + " Reason: %s", type, listener, cause); - } - - public Errors errorNotifyingInjectionListener(InjectionListener listener, TypeLiteral type, RuntimeException cause) { - return errorInUserCode(cause, "Error notifying InjectionListener %s of %s.%n" + " Reason: %s", listener, type, cause); - } - public static Collection getMessagesFromThrowable(Throwable throwable) { if (throwable instanceof ProvisionException) { return ((ProvisionException) throwable).getErrorMessages(); @@ -381,14 +371,6 @@ public void throwConfigurationExceptionIfErrorsExist() { throw new ConfigurationException(getMessages()); } - public void throwProvisionExceptionIfErrorsExist() { - if (hasErrors() == false) { - return; - } - - throw new ProvisionException(getMessages()); - } - private Message merge(Message message) { List sources = new ArrayList<>(); sources.addAll(getSources()); diff --git a/server/src/main/java/org/elasticsearch/common/inject/internal/InstanceBindingImpl.java b/server/src/main/java/org/elasticsearch/common/inject/internal/InstanceBindingImpl.java index 07c9dd0e4cf25..f5b36cf33b800 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/internal/InstanceBindingImpl.java +++ b/server/src/main/java/org/elasticsearch/common/inject/internal/InstanceBindingImpl.java @@ -59,8 +59,8 @@ public Provider getProvider() { } @Override - public V acceptTargetVisitor(BindingTargetVisitor visitor) { - return visitor.visit(this); + public void acceptTargetVisitor(BindingTargetVisitor visitor) { + visitor.visit(this); } @Override diff --git a/server/src/main/java/org/elasticsearch/common/inject/internal/LinkedBindingImpl.java b/server/src/main/java/org/elasticsearch/common/inject/internal/LinkedBindingImpl.java index 56e1a92c25018..135726f80e25b 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/internal/LinkedBindingImpl.java +++ b/server/src/main/java/org/elasticsearch/common/inject/internal/LinkedBindingImpl.java @@ -43,8 +43,8 @@ public LinkedBindingImpl(Object source, Key key, Scoping scoping, Key V acceptTargetVisitor(BindingTargetVisitor visitor) { - return visitor.visit(this); + public void acceptTargetVisitor(BindingTargetVisitor visitor) { + visitor.visit(this); } @Override diff --git a/server/src/main/java/org/elasticsearch/common/inject/internal/LinkedProviderBindingImpl.java b/server/src/main/java/org/elasticsearch/common/inject/internal/LinkedProviderBindingImpl.java index a27692a68882b..0bfd2ef273a74 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/internal/LinkedProviderBindingImpl.java +++ b/server/src/main/java/org/elasticsearch/common/inject/internal/LinkedProviderBindingImpl.java @@ -44,8 +44,8 @@ public LinkedProviderBindingImpl( } @Override - public V acceptTargetVisitor(BindingTargetVisitor visitor) { - return visitor.visit(this); + public void acceptTargetVisitor(BindingTargetVisitor visitor) { + visitor.visit(this); } @Override diff --git a/server/src/main/java/org/elasticsearch/common/inject/internal/ProviderInstanceBindingImpl.java b/server/src/main/java/org/elasticsearch/common/inject/internal/ProviderInstanceBindingImpl.java index 676c0717896d5..792c18920a6fa 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/internal/ProviderInstanceBindingImpl.java +++ b/server/src/main/java/org/elasticsearch/common/inject/internal/ProviderInstanceBindingImpl.java @@ -57,8 +57,8 @@ public ProviderInstanceBindingImpl( } @Override - public V acceptTargetVisitor(BindingTargetVisitor visitor) { - return visitor.visit(this); + public void acceptTargetVisitor(BindingTargetVisitor visitor) { + visitor.visit(this); } @Override diff --git a/server/src/main/java/org/elasticsearch/common/inject/internal/UntargettedBindingImpl.java b/server/src/main/java/org/elasticsearch/common/inject/internal/UntargettedBindingImpl.java index e5a916d4be62e..c5595d570563f 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/internal/UntargettedBindingImpl.java +++ b/server/src/main/java/org/elasticsearch/common/inject/internal/UntargettedBindingImpl.java @@ -32,8 +32,8 @@ public UntargettedBindingImpl(Object source, Key key, Scoping scoping) { } @Override - public V acceptTargetVisitor(BindingTargetVisitor visitor) { - return visitor.visit(this); + public void acceptTargetVisitor(BindingTargetVisitor visitor) { + visitor.visit(this); } @Override diff --git a/server/src/main/java/org/elasticsearch/common/inject/spi/InjectionListener.java b/server/src/main/java/org/elasticsearch/common/inject/spi/InjectionListener.java deleted file mode 100644 index 1f5b969559020..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/inject/spi/InjectionListener.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright (C) 2009 Google Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.elasticsearch.common.inject.spi; - -/** - * Listens for injections into instances of type {@code I}. Useful for performing further - * injections, post-injection initialization, and more. - * - * @author crazybob@google.com (Bob Lee) - * @author jessewilson@google.com (Jesse Wilson) - * @since 2.0 - */ -public interface InjectionListener { - - /** - * Invoked by Guice after it injects the fields and methods of instance. - * - * @param injectee instance that Guice injected dependencies into - */ - void afterInjection(I injectee); -} diff --git a/server/src/main/java/org/elasticsearch/common/inject/spi/InjectionPoint.java b/server/src/main/java/org/elasticsearch/common/inject/spi/InjectionPoint.java index 4e20b26d83284..df00c889fd3eb 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/spi/InjectionPoint.java +++ b/server/src/main/java/org/elasticsearch/common/inject/spi/InjectionPoint.java @@ -29,7 +29,7 @@ import java.lang.annotation.Annotation; import java.lang.reflect.AnnotatedElement; import java.lang.reflect.Constructor; -import java.lang.reflect.Field; +import java.lang.reflect.Executable; import java.lang.reflect.Member; import java.lang.reflect.Method; import java.lang.reflect.Modifier; @@ -57,7 +57,7 @@ public final class InjectionPoint { private final boolean optional; - private final Member member; + private final Executable member; private final List> dependencies; InjectionPoint(TypeLiteral type, Method method) { @@ -75,26 +75,6 @@ public final class InjectionPoint { this.dependencies = forMember(constructor, type, constructor.getParameterAnnotations()); } - InjectionPoint(TypeLiteral type, Field field) { - this.member = field; - - Inject inject = field.getAnnotation(Inject.class); - this.optional = inject.optional(); - - Annotation[] annotations = field.getAnnotations(); - - Errors errors = new Errors(field); - Key key = null; - try { - key = Annotations.getKey(type.getFieldType(field), field, annotations, errors); - } catch (ErrorsException e) { - errors.merge(e.getErrors()); - } - errors.throwConfigurationExceptionIfErrorsExist(); - - this.dependencies = Collections.singletonList(newDependency(key, Nullability.allowsNull(annotations), -1)); - } - private List> forMember(Member member, TypeLiteral type, Annotation[][] parameterAnnotations) { Errors errors = new Errors(member); Iterator annotationsIterator = Arrays.asList(parameterAnnotations).iterator(); @@ -125,7 +105,7 @@ private Dependency newDependency(Key key, boolean allowsNull, int para /** * Returns the injected constructor, field, or method. */ - public Member getMember() { + public Executable getMember() { return member; } @@ -230,13 +210,12 @@ public static InjectionPoint forConstructorOf(TypeLiteral type) { * ConfigurationException#getPartialValue() partial value} is a {@code Set} * of the valid injection points. */ - public static Set forInstanceMethodsAndFields(TypeLiteral type) { + public static Set forInstanceMethods(TypeLiteral type) { Set result = new HashSet<>(); Errors errors = new Errors(); // TODO (crazybob): Filter out overridden members. - addInjectionPoints(type, Factory.FIELDS, false, result, errors); - addInjectionPoints(type, Factory.METHODS, false, result, errors); + addInjectionPoints(type, false, result, errors); result = unmodifiableSet(result); if (errors.hasErrors()) { @@ -246,7 +225,7 @@ public static Set forInstanceMethodsAndFields(TypeLiteral typ } /** - * Returns all instance method and field injection points on {@code type}. + * Returns all instance method injection points on {@code type}. * * @return a possibly empty set of injection points. The set has a specified iteration order. All * fields are returned and then all methods. Within the fields, supertype fields are returned @@ -256,8 +235,8 @@ public static Set forInstanceMethodsAndFields(TypeLiteral typ * ConfigurationException#getPartialValue() partial value} is a {@code Set} * of the valid injection points. */ - public static Set forInstanceMethodsAndFields(Class type) { - return forInstanceMethodsAndFields(TypeLiteral.get(type)); + public static Set forInstanceMethods(Class type) { + return forInstanceMethods(TypeLiteral.get(type)); } private static void checkForMisplacedBindingAnnotations(Member member, Errors errors) { @@ -274,18 +253,16 @@ private static void checkForMisplacedBindingAnnotations(Member member, Errors er // name. In Scala, fields always get accessor methods (that we need to ignore). See bug 242. if (member instanceof Method) { try { - if (member.getDeclaringClass().getField(member.getName()) != null) { - return; - } + member.getDeclaringClass().getField(member.getName()); + return; } catch (NoSuchFieldException ignore) {} } errors.misplacedBindingAnnotation(member, misplacedBindingAnnotation); } - private static void addInjectionPoints( + private static void addInjectionPoints( TypeLiteral type, - Factory factory, boolean statics, Collection injectionPoints, Errors errors @@ -296,20 +273,19 @@ private static void addInjectionPoints( // Add injectors for superclass first. TypeLiteral superType = type.getSupertype(type.getRawType().getSuperclass()); - addInjectionPoints(superType, factory, statics, injectionPoints, errors); + addInjectionPoints(superType, statics, injectionPoints, errors); // Add injectors for all members next - addInjectorsForMembers(type, factory, statics, injectionPoints, errors); + addInjectorsForMembers(type, statics, injectionPoints, errors); } - private static void addInjectorsForMembers( + private static void addInjectorsForMembers( TypeLiteral typeLiteral, - Factory factory, boolean statics, Collection injectionPoints, Errors errors ) { - for (M member : factory.getMembers(getRawType(typeLiteral.getType()))) { + for (Method member : getRawType(typeLiteral.getType()).getMethods()) { if (isStatic(member) != statics) { continue; } @@ -320,7 +296,8 @@ private static void addInjectorsForMembers } try { - injectionPoints.add(factory.create(typeLiteral, member, errors)); + checkForMisplacedBindingAnnotations(member, errors); + injectionPoints.add(new InjectionPoint(typeLiteral, member)); } catch (ConfigurationException ignorable) { if (inject.optional() == false) { errors.merge(ignorable.getErrorMessages()); @@ -333,34 +310,4 @@ private static boolean isStatic(Member member) { return Modifier.isStatic(member.getModifiers()); } - private interface Factory { - Factory FIELDS = new Factory<>() { - @Override - public Field[] getMembers(Class type) { - return type.getFields(); - } - - @Override - public InjectionPoint create(TypeLiteral typeLiteral, Field member, Errors errors) { - return new InjectionPoint(typeLiteral, member); - } - }; - - Factory METHODS = new Factory<>() { - @Override - public Method[] getMembers(Class type) { - return type.getMethods(); - } - - @Override - public InjectionPoint create(TypeLiteral typeLiteral, Method member, Errors errors) { - checkForMisplacedBindingAnnotations(member, errors); - return new InjectionPoint(typeLiteral, member); - } - }; - - M[] getMembers(Class type); - - InjectionPoint create(TypeLiteral typeLiteral, M member, Errors errors); - } } From 8ed115a6acbddbadcfa6a74e4cdd4e47f755d79f Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Mon, 11 Mar 2024 15:22:02 -0700 Subject: [PATCH 114/248] Simplify jna loading (#105813) Previous versions of Elasticsearch had JNA as an optional dependency because the transport client did not need it. Since the JNA classes are now always available, ClassNotFoundException is not possible. This commit adjusts initializing jna to no longer call Class.forName. --- .../src/main/java/org/elasticsearch/bootstrap/Natives.java | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/bootstrap/Natives.java b/server/src/main/java/org/elasticsearch/bootstrap/Natives.java index 4fa670b28872b..040c50b2b74e2 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/Natives.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/Natives.java @@ -12,6 +12,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.common.ReferenceDocs; +import java.lang.invoke.MethodHandles; import java.nio.file.Path; import java.util.Locale; @@ -33,10 +34,10 @@ private Natives() {} try { // load one of the main JNA classes to see if the classes are available. this does not ensure that all native // libraries are available, only the ones necessary by JNA to function - Class.forName("com.sun.jna.Native"); + MethodHandles.publicLookup().ensureInitialized(com.sun.jna.Native.class); v = true; - } catch (ClassNotFoundException e) { - logger.warn("JNA not found. native methods will be disabled.", e); + } catch (IllegalAccessException e) { + throw new AssertionError(e); } catch (UnsatisfiedLinkError e) { logger.warn( String.format( From 9fe8b96666864fe8358a37818cae47931ad3ac60 Mon Sep 17 00:00:00 2001 From: Youhei Sakurai Date: Tue, 12 Mar 2024 08:03:21 +0900 Subject: [PATCH 115/248] Handling exceptions on watcher reload (#105442) --- docs/changelog/105442.yaml | 6 ++ .../watcher/WatcherLifeCycleService.java | 4 +- .../xpack/watcher/WatcherService.java | 7 +- .../watcher/WatcherLifeCycleServiceTests.java | 99 +++++++++++++++++-- .../xpack/watcher/WatcherServiceTests.java | 29 +++++- 5 files changed, 134 insertions(+), 11 deletions(-) create mode 100644 docs/changelog/105442.yaml diff --git a/docs/changelog/105442.yaml b/docs/changelog/105442.yaml new file mode 100644 index 0000000000000..b0af1b634d984 --- /dev/null +++ b/docs/changelog/105442.yaml @@ -0,0 +1,6 @@ +pr: 105442 +summary: Handling exceptions on watcher reload +area: Watcher +type: bug +issues: + - 69842 diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleService.java index 571e8912b43b2..f6e34ccb243c8 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleService.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleService.java @@ -166,7 +166,9 @@ public void clusterChanged(ClusterChangedEvent event) { if (watcherService.validate(event.state())) { previousShardRoutings.set(localAffectedShardRoutings); if (state.get() == WatcherState.STARTED) { - watcherService.reload(event.state(), "new local watcher shard allocation ids"); + watcherService.reload(event.state(), "new local watcher shard allocation ids", (exception) -> { + clearAllocationIds(); // will cause reload again + }); } else if (isStoppedOrStopping) { this.state.set(WatcherState.STARTING); watcherService.start(event.state(), () -> this.state.set(WatcherState.STARTED), (exception) -> { diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java index a067b99c6bff0..5389f34212270 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java @@ -201,7 +201,7 @@ void stopExecutor() { * Reload the watcher service, does not switch the state from stopped to started, just keep going * @param state cluster state, which is needed to find out about local shards */ - void reload(ClusterState state, String reason) { + void reload(ClusterState state, String reason, Consumer exceptionConsumer) { boolean hasValidWatcherTemplates = WatcherIndexTemplateRegistry.validate(state); if (hasValidWatcherTemplates == false) { logger.warn("missing watcher index templates"); @@ -221,7 +221,10 @@ void reload(ClusterState state, String reason) { int cancelledTaskCount = executionService.clearExecutionsAndQueue(() -> {}); logger.info("reloading watcher, reason [{}], cancelled [{}] queued tasks", reason, cancelledTaskCount); - executor.execute(wrapWatcherService(() -> reloadInner(state, reason, false), e -> logger.error("error reloading watcher", e))); + executor.execute(wrapWatcherService(() -> reloadInner(state, reason, false), e -> { + logger.error("error reloading watcher", e); + exceptionConsumer.accept(e); + })); } /** diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleServiceTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleServiceTests.java index 57ec168728171..365b072a418ef 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleServiceTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleServiceTests.java @@ -258,6 +258,91 @@ public void testExceptionOnStart() { assertThat(lifeCycleService.getState().get(), equalTo(WatcherState.STARTED)); } + public void testReloadWithIdenticalRoutingTable() { + /* + * This tests that the identical routing table causes reload only once. + */ + startWatcher(); + + ClusterChangedEvent[] events = masterChangeScenario(); + assertThat(events[1].previousState(), equalTo(events[0].state())); + assertFalse(events[1].routingTableChanged()); + + for (ClusterChangedEvent event : events) { + when(watcherService.validate(event.state())).thenReturn(true); + lifeCycleService.clusterChanged(event); + } + // reload should occur on the first event + verify(watcherService).reload(eq(events[0].state()), anyString(), any()); + // but it shouldn't on the second event unless routing table changes + verify(watcherService, never()).reload(eq(events[1].state()), anyString(), any()); + } + + public void testReloadWithIdenticalRoutingTableAfterException() { + /* + * This tests that even the identical routing table causes reload again if some exception (for example a timeout while loading + * watches) interrupted the previous one. + */ + startWatcher(); + + ClusterChangedEvent[] events = masterChangeScenario(); + assertThat(events[1].previousState(), equalTo(events[0].state())); + assertFalse(events[1].routingTableChanged()); + + // simulate exception on the first event + doAnswer(invocation -> { + Consumer exceptionConsumer = invocation.getArgument(2); + exceptionConsumer.accept(new ElasticsearchTimeoutException(new TimeoutException("Artificial timeout"))); + return null; + }).when(watcherService).reload(eq(events[0].state()), anyString(), any()); + + for (ClusterChangedEvent event : events) { + when(watcherService.validate(event.state())).thenReturn(true); + lifeCycleService.clusterChanged(event); + } + // reload should occur on the first event but it fails + verify(watcherService).reload(eq(events[0].state()), anyString(), any()); + // reload should occur again on the second event because the previous one failed + verify(watcherService).reload(eq(events[1].state()), anyString(), any()); + } + + private ClusterChangedEvent[] masterChangeScenario() { + DiscoveryNodes nodes = new DiscoveryNodes.Builder().localNodeId("node_1").add(newNode("node_1")).add(newNode("node_2")).build(); + + Index index = new Index(Watch.INDEX, "uuid"); + IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(index); + indexRoutingTableBuilder.addShard( + TestShardRouting.newShardRouting(new ShardId(index, 0), "node_1", true, ShardRoutingState.STARTED) + ); + RoutingTable routingTable = RoutingTable.builder().add(indexRoutingTableBuilder.build()).build(); + + IndexMetadata.Builder indexMetadataBuilder = IndexMetadata.builder(Watch.INDEX) + .settings(settings(IndexVersion.current()).put(IndexMetadata.INDEX_FORMAT_SETTING.getKey(), 6)) // the internal index format, + // required + .numberOfShards(1) + .numberOfReplicas(0); + Metadata metadata = Metadata.builder() + .put(IndexTemplateMetadata.builder(HISTORY_TEMPLATE_NAME).patterns(randomIndexPatterns())) + .put(indexMetadataBuilder) + .build(); + + ClusterState emptyState = ClusterState.builder(new ClusterName("my-cluster")).nodes(nodes).metadata(metadata).build(); + ClusterState stateWithMasterNode1 = ClusterState.builder(new ClusterName("my-cluster")) + .nodes(nodes.withMasterNodeId("node_1")) + .metadata(metadata) + .routingTable(routingTable) + .build(); + ClusterState stateWithMasterNode2 = ClusterState.builder(new ClusterName("my-cluster")) + .nodes(nodes.withMasterNodeId("node_2")) + .metadata(metadata) + .routingTable(routingTable) + .build(); + + return new ClusterChangedEvent[] { + new ClusterChangedEvent("any", stateWithMasterNode1, emptyState), + new ClusterChangedEvent("any", stateWithMasterNode2, stateWithMasterNode1) }; + } + public void testNoLocalShards() { Index watchIndex = new Index(Watch.INDEX, "foo"); ShardId shardId = new ShardId(watchIndex, 0); @@ -301,7 +386,7 @@ public void testNoLocalShards() { when(watcherService.validate(eq(clusterStateWithLocalShards))).thenReturn(true); when(watcherService.validate(eq(clusterStateWithoutLocalShards))).thenReturn(false); lifeCycleService.clusterChanged(new ClusterChangedEvent("any", clusterStateWithLocalShards, clusterStateWithoutLocalShards)); - verify(watcherService, times(1)).reload(eq(clusterStateWithLocalShards), eq("new local watcher shard allocation ids")); + verify(watcherService, times(1)).reload(eq(clusterStateWithLocalShards), eq("new local watcher shard allocation ids"), any()); verify(watcherService, times(1)).validate(eq(clusterStateWithLocalShards)); verifyNoMoreInteractions(watcherService); @@ -380,12 +465,12 @@ public void testReplicaWasAddedOrRemoved() { when(watcherService.validate(eq(firstEvent.state()))).thenReturn(true); lifeCycleService.clusterChanged(firstEvent); - verify(watcherService).reload(eq(firstEvent.state()), anyString()); + verify(watcherService).reload(eq(firstEvent.state()), anyString(), any()); reset(watcherService); when(watcherService.validate(eq(secondEvent.state()))).thenReturn(true); lifeCycleService.clusterChanged(secondEvent); - verify(watcherService).reload(eq(secondEvent.state()), anyString()); + verify(watcherService).reload(eq(secondEvent.state()), anyString(), any()); } // make sure that cluster state changes can be processed on nodes that do not hold data @@ -425,7 +510,7 @@ public void testNonDataNode() { lifeCycleService.clusterChanged(new ClusterChangedEvent("any", currentState, previousState)); verify(watcherService, times(0)).pauseExecution(any()); - verify(watcherService, times(0)).reload(any(), any()); + verify(watcherService, times(0)).reload(any(), any(), any()); } public void testThatMissingWatcherIndexMetadataOnlyResetsOnce() { @@ -452,7 +537,7 @@ public void testThatMissingWatcherIndexMetadataOnlyResetsOnce() { // first add the shard allocation ids, by going from empty cs to CS with watcher index lifeCycleService.clusterChanged(new ClusterChangedEvent("any", clusterStateWithWatcherIndex, clusterStateWithoutWatcherIndex)); - verify(watcherService).reload(eq(clusterStateWithWatcherIndex), anyString()); + verify(watcherService).reload(eq(clusterStateWithWatcherIndex), anyString(), any()); // now remove watches index, and ensure that pausing is only called once, no matter how often called (i.e. each CS update) lifeCycleService.clusterChanged(new ClusterChangedEvent("any", clusterStateWithoutWatcherIndex, clusterStateWithWatcherIndex)); @@ -577,7 +662,7 @@ public void testWatcherReloadsOnNodeOutageWithWatcherShard() { when(watcherService.validate(any())).thenReturn(true); ClusterChangedEvent event = new ClusterChangedEvent("whatever", currentState, previousState); lifeCycleService.clusterChanged(event); - verify(watcherService).reload(eq(event.state()), anyString()); + verify(watcherService).reload(eq(event.state()), anyString(), any()); } private void startWatcher() { @@ -609,7 +694,7 @@ private void startWatcher() { lifeCycleService.clusterChanged(new ClusterChangedEvent("foo", state, emptyState)); assertThat(lifeCycleService.getState().get(), is(WatcherState.STARTED)); - verify(watcherService, times(1)).reload(eq(state), anyString()); + verify(watcherService, times(1)).reload(eq(state), anyString(), any()); assertThat(lifeCycleService.shardRoutings(), hasSize(1)); // reset the mock, the user has to mock everything themselves again diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java index 19bac967c576a..24a4eede1b20d 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java @@ -77,6 +77,7 @@ import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; +import static org.mockito.Mockito.spy; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -349,12 +350,38 @@ void stopExecutor() {} ClusterState.Builder csBuilder = new ClusterState.Builder(new ClusterName("_name")); csBuilder.metadata(Metadata.builder()); - service.reload(csBuilder.build(), "whatever"); + service.reload(csBuilder.build(), "whatever", exception -> {}); verify(executionService).clearExecutionsAndQueue(any()); verify(executionService, never()).pause(any()); verify(triggerService).pauseExecution(); } + // the trigger service should not start unless watches are loaded successfully + public void testReloadingWatcherDoesNotStartTriggerServiceIfFailingToLoadWatches() { + ExecutionService executionService = mock(ExecutionService.class); + TriggerService triggerService = mock(TriggerService.class); + WatcherService service = new WatcherService( + Settings.EMPTY, + triggerService, + mock(TriggeredWatchStore.class), + executionService, + mock(WatchParser.class), + client, + EsExecutors.DIRECT_EXECUTOR_SERVICE + ) { + @Override + void stopExecutor() {} + }; + + ClusterState.Builder csBuilder = new ClusterState.Builder(new ClusterName("_name")); + Metadata metadata = spy(Metadata.builder().build()); + when(metadata.getIndicesLookup()).thenThrow(RuntimeException.class); // simulate exception in WatcherService's private loadWatches() + + service.reload(csBuilder.metadata(metadata).build(), "whatever", exception -> {}); + verify(triggerService).pauseExecution(); + verify(triggerService, never()).start(any()); + } + private static DiscoveryNode newNode() { return DiscoveryNodeUtils.create("node"); } From 6837d081f99cce8dfa39b8a0e3f4601cef52bac6 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Mon, 11 Mar 2024 16:34:32 -0700 Subject: [PATCH 116/248] Use search threadpool in ESQL (#106050) The esql and search thread pools share the same characteristics. Merging these thread pools would simplify workload tracking and autoscaling efforts. --- .../esql/enrich/EnrichLookupService.java | 5 +-- .../esql/enrich/EnrichPolicyResolver.java | 7 ++-- .../xpack/esql/plugin/ComputeService.java | 9 +++-- .../xpack/esql/plugin/EsqlPlugin.java | 17 +-------- .../esql/plugin/TransportEsqlQueryAction.java | 4 +-- .../elasticsearch/xpack/esql/CsvTests.java | 36 ++++++++----------- 6 files changed, 28 insertions(+), 50 deletions(-) diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java index 8e5db20d7c849..b935632874157 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java @@ -52,6 +52,7 @@ import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestHandler; @@ -128,13 +129,13 @@ public EnrichLookupService( this.clusterService = clusterService; this.searchService = searchService; this.transportService = transportService; - this.executor = transportService.getThreadPool().executor(EsqlPlugin.ESQL_THREAD_POOL_NAME); + this.executor = transportService.getThreadPool().executor(ThreadPool.Names.SEARCH); this.bigArrays = bigArrays; this.blockFactory = blockFactory; this.localBreakerSettings = new LocalCircuitBreaker.SizeSettings(clusterService.getSettings()); transportService.registerRequestHandler( LOOKUP_ACTION_NAME, - this.executor, + transportService.getThreadPool().executor(EsqlPlugin.ESQL_WORKER_THREAD_POOL_NAME), in -> new LookupRequest(in, blockFactory), new TransportHandler() ); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java index 113f8b95ca089..d4f6ea3e510c7 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java @@ -37,7 +37,6 @@ import org.elasticsearch.xpack.core.enrich.EnrichPolicy; import org.elasticsearch.xpack.esql.analysis.EnrichResolution; import org.elasticsearch.xpack.esql.plan.logical.Enrich; -import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; import org.elasticsearch.xpack.esql.session.EsqlSession; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.ql.index.EsIndex; @@ -80,7 +79,7 @@ public EnrichPolicyResolver(ClusterService clusterService, TransportService tran this.threadPool = transportService.getThreadPool(); transportService.registerRequestHandler( RESOLVE_ACTION_NAME, - threadPool.executor(EsqlPlugin.ESQL_THREAD_POOL_NAME), + threadPool.executor(ThreadPool.Names.SEARCH), LookupRequest::new, new RequestHandler() ); @@ -272,7 +271,7 @@ private void lookupPolicies( new ActionListenerResponseHandler<>( refs.acquire(resp -> lookupResponses.put(cluster, resp)), LookupResponse::new, - threadPool.executor(EsqlPlugin.ESQL_THREAD_POOL_NAME) + threadPool.executor(ThreadPool.Names.SEARCH) ) ); } @@ -290,7 +289,7 @@ private void lookupPolicies( new ActionListenerResponseHandler<>( refs.acquire(resp -> lookupResponses.put(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, resp)), LookupResponse::new, - threadPool.executor(EsqlPlugin.ESQL_THREAD_POOL_NAME) + threadPool.executor(ThreadPool.Names.SEARCH) ) ); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java index 64f393ccdf2b0..7af37a3eeb114 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java @@ -81,7 +81,6 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Supplier; -import static org.elasticsearch.xpack.esql.plugin.EsqlPlugin.ESQL_THREAD_POOL_NAME; import static org.elasticsearch.xpack.esql.plugin.EsqlPlugin.ESQL_WORKER_THREAD_POOL_NAME; /** @@ -116,7 +115,7 @@ public ComputeService( this.transportService = transportService; this.bigArrays = bigArrays.withCircuitBreaking(); this.blockFactory = blockFactory; - this.esqlExecutor = threadPool.executor(ESQL_THREAD_POOL_NAME); + this.esqlExecutor = threadPool.executor(ThreadPool.Names.SEARCH); transportService.registerRequestHandler(DATA_ACTION_NAME, this.esqlExecutor, DataNodeRequest::new, new DataNodeRequestHandler()); transportService.registerRequestHandler( CLUSTER_ACTION_NAME, @@ -196,7 +195,7 @@ public void execute( final List collectedProfiles = configuration.profile() ? Collections.synchronizedList(new ArrayList<>()) : List.of(); final var exchangeSource = new ExchangeSourceHandler( queryPragmas.exchangeBufferSize(), - transportService.getThreadPool().executor(ESQL_THREAD_POOL_NAME) + transportService.getThreadPool().executor(ThreadPool.Names.SEARCH) ); try ( Releasable ignored = exchangeSource.addEmptySink(); @@ -628,7 +627,7 @@ private void runBatch(int startBatchIndex) { final int endBatchIndex = Math.min(startBatchIndex + maxConcurrentShards, request.shardIds().size()); List shardIds = request.shardIds().subList(startBatchIndex, endBatchIndex); acquireSearchContexts(clusterAlias, shardIds, configuration, request.aliasFilters(), ActionListener.wrap(searchContexts -> { - assert ThreadPool.assertCurrentThreadPool(ESQL_THREAD_POOL_NAME, ESQL_WORKER_THREAD_POOL_NAME); + assert ThreadPool.assertCurrentThreadPool(ThreadPool.Names.SEARCH, ESQL_WORKER_THREAD_POOL_NAME); var computeContext = new ComputeContext(sessionId, clusterAlias, searchContexts, configuration, null, exchangeSink); runCompute( parentTask, @@ -734,7 +733,7 @@ void runComputeOnRemoteCluster( final String localSessionId = clusterAlias + ":" + globalSessionId; var exchangeSource = new ExchangeSourceHandler( configuration.pragmas().exchangeBufferSize(), - transportService.getThreadPool().executor(ESQL_THREAD_POOL_NAME) + transportService.getThreadPool().executor(ThreadPool.Names.SEARCH) ); try ( Releasable ignored = exchangeSource.addEmptySink(); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java index 61f0393c80948..fded9339567bd 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java @@ -70,7 +70,6 @@ public class EsqlPlugin extends Plugin implements ActionPlugin { - public static final String ESQL_THREAD_POOL_NAME = "esql"; public static final String ESQL_WORKER_THREAD_POOL_NAME = "esql_worker"; public static final Setting QUERY_RESULT_TRUNCATION_MAX_SIZE = Setting.intSetting( @@ -112,12 +111,7 @@ public Collection createComponents(PluginServices services) { ), new EsqlIndexResolver(services.client(), EsqlDataTypeRegistry.INSTANCE) ), - new ExchangeService( - services.clusterService().getSettings(), - services.threadPool(), - EsqlPlugin.ESQL_THREAD_POOL_NAME, - blockFactory - ), + new ExchangeService(services.clusterService().getSettings(), services.threadPool(), ThreadPool.Names.SEARCH, blockFactory), blockFactory ); } @@ -186,18 +180,9 @@ public List getNamedWriteables() { ).toList(); } - @Override public List> getExecutorBuilders(Settings settings) { final int allocatedProcessors = EsExecutors.allocatedProcessors(settings); return List.of( - new FixedExecutorBuilder( - settings, - ESQL_THREAD_POOL_NAME, - allocatedProcessors, - 1000, - ESQL_THREAD_POOL_NAME, - EsExecutors.TaskTrackingConfig.DEFAULT - ), // TODO: Maybe have two types of threadpools for workers: one for CPU-bound and one for I/O-bound tasks. // And we should also reduce the number of threads of the CPU-bound threadpool to allocatedProcessors. new FixedExecutorBuilder( diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java index baaa4abe23b3d..366046d39dc43 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java @@ -82,7 +82,7 @@ public TransportEsqlQueryAction( super(EsqlQueryAction.NAME, transportService, actionFilters, EsqlQueryRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); this.planExecutor = planExecutor; this.clusterService = clusterService; - this.requestExecutor = threadPool.executor(EsqlPlugin.ESQL_THREAD_POOL_NAME); + this.requestExecutor = threadPool.executor(ThreadPool.Names.SEARCH); exchangeService.registerTransportHandler(transportService); this.exchangeService = exchangeService; this.enrichPolicyResolver = new EnrichPolicyResolver(clusterService, transportService, planExecutor.indexResolver()); @@ -124,7 +124,7 @@ protected void doExecute(Task task, EsqlQueryRequest request, ActionListener listener) { - assert ThreadPool.assertCurrentThreadPool(EsqlPlugin.ESQL_THREAD_POOL_NAME); + assert ThreadPool.assertCurrentThreadPool(ThreadPool.Names.SEARCH); if (requestIsAsync(request)) { asyncTaskManagementService.asyncExecute( request, diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java index 20714cc5633b6..dd937c11c9642 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java @@ -96,6 +96,7 @@ import java.util.Map; import java.util.Set; import java.util.TreeMap; +import java.util.concurrent.Executor; import java.util.concurrent.TimeUnit; import static org.elasticsearch.test.ListMatcher.matchesList; @@ -107,7 +108,6 @@ import static org.elasticsearch.xpack.esql.CsvTestsDataLoader.CSV_DATASET_MAP; import static org.elasticsearch.xpack.esql.EsqlTestUtils.TEST_VERIFIER; import static org.elasticsearch.xpack.esql.EsqlTestUtils.loadMapping; -import static org.elasticsearch.xpack.esql.plugin.EsqlPlugin.ESQL_THREAD_POOL_NAME; import static org.elasticsearch.xpack.ql.CsvSpecReader.specParser; import static org.elasticsearch.xpack.ql.TestUtils.classpathResources; import static org.hamcrest.Matchers.equalTo; @@ -161,6 +161,7 @@ public class CsvTests extends ESTestCase { private final Mapper mapper = new Mapper(functionRegistry); private final PhysicalPlanOptimizer physicalPlanOptimizer = new TestPhysicalPlanOptimizer(new PhysicalOptimizerContext(configuration)); private ThreadPool threadPool; + private Executor executor; @ParametersFactory(argumentFormatting = "%2$s.%3$s") public static List readScriptSpec() throws Exception { @@ -174,18 +175,17 @@ public static List readScriptSpec() throws Exception { @Before public void setUp() throws Exception { super.setUp(); - int numThreads = randomBoolean() ? 1 : between(2, 16); - threadPool = new TestThreadPool( - "CsvTests", - new FixedExecutorBuilder( - Settings.EMPTY, - ESQL_THREAD_POOL_NAME, - numThreads, - 1024, - "esql", - EsExecutors.TaskTrackingConfig.DEFAULT - ) - ); + if (randomBoolean()) { + int numThreads = randomBoolean() ? 1 : between(2, 16); + threadPool = new TestThreadPool( + "CsvTests", + new FixedExecutorBuilder(Settings.EMPTY, "esql_test", numThreads, 1024, "esql", EsExecutors.TaskTrackingConfig.DEFAULT) + ); + executor = threadPool.executor("esql_test"); + } else { + threadPool = new TestThreadPool(getTestName()); + executor = threadPool.executor(ThreadPool.Names.SEARCH); + } HeaderWarning.setThreadContext(threadPool.getThreadContext()); } @@ -343,7 +343,7 @@ private ActualResults executePlan(BigArrays bigArrays) throws Exception { bigArrays, ByteSizeValue.ofBytes(randomLongBetween(1, BlockFactory.DEFAULT_MAX_BLOCK_PRIMITIVE_ARRAY_SIZE.getBytes() * 2)) ); - ExchangeSourceHandler exchangeSource = new ExchangeSourceHandler(between(1, 64), threadPool.executor(ESQL_THREAD_POOL_NAME)); + ExchangeSourceHandler exchangeSource = new ExchangeSourceHandler(between(1, 64), executor); ExchangeSinkHandler exchangeSink = new ExchangeSinkHandler(blockFactory, between(1, 64), threadPool::relativeTimeInMillis); LocalExecutionPlanner executionPlanner = new LocalExecutionPlanner( sessionId, @@ -406,13 +406,7 @@ private ActualResults executePlan(BigArrays bigArrays) throws Exception { DriverRunner runner = new DriverRunner(threadPool.getThreadContext()) { @Override protected void start(Driver driver, ActionListener driverListener) { - Driver.start( - threadPool.getThreadContext(), - threadPool.executor(ESQL_THREAD_POOL_NAME), - driver, - between(1, 1000), - driverListener - ); + Driver.start(threadPool.getThreadContext(), executor, driver, between(1, 1000), driverListener); } }; PlainActionFuture future = new PlainActionFuture<>(); From 8d839e3b5248e8e29dbb615778b99624f07379cc Mon Sep 17 00:00:00 2001 From: Fang Xing <155562079+fang-xing-esql@users.noreply.github.com> Date: Mon, 11 Mar 2024 23:02:57 -0400 Subject: [PATCH 117/248] add mv_slice and mv_zip (#106147) --- .../esql/functions/mv-functions.asciidoc | 4 + .../esql/functions/mv_slice.asciidoc | 47 +++ docs/reference/esql/functions/mv_zip.asciidoc | 38 ++ .../esql/functions/signature/mv_slice.svg | 1 + .../esql/functions/signature/mv_zip.svg | 1 + .../esql/functions/types/mv_slice.asciidoc | 17 + .../esql/functions/types/mv_zip.asciidoc | 6 + .../compute/gen/EvaluatorImplementer.java | 190 +++++++++- .../src/main/resources/boolean.csv-spec | 23 ++ .../src/main/resources/floats.csv-spec | 15 + .../src/main/resources/ints.csv-spec | 145 ++++++++ .../src/main/resources/ip.csv-spec | 44 +++ .../src/main/resources/show.csv-spec | 6 +- .../src/main/resources/string.csv-spec | 44 +++ .../multivalue/MvSliceBooleanEvaluator.java | 140 +++++++ .../multivalue/MvSliceBytesRefEvaluator.java | 140 +++++++ .../multivalue/MvSliceDoubleEvaluator.java | 140 +++++++ .../multivalue/MvSliceIntEvaluator.java | 139 +++++++ .../multivalue/MvSliceLongEvaluator.java | 140 +++++++ .../scalar/multivalue/MvZipEvaluator.java | 127 +++++++ .../function/EsqlFunctionRegistry.java | 4 + .../function/scalar/multivalue/MvSlice.java | 344 +++++++++++++++++ .../function/scalar/multivalue/MvZip.java | 211 +++++++++++ .../xpack/esql/io/stream/PlanNamedTypes.java | 30 ++ .../function/AbstractFunctionTestCase.java | 2 +- .../scalar/multivalue/MvSliceTests.java | 346 ++++++++++++++++++ .../scalar/multivalue/MvZipTests.java | 120 ++++++ 27 files changed, 2447 insertions(+), 17 deletions(-) create mode 100644 docs/reference/esql/functions/mv_slice.asciidoc create mode 100644 docs/reference/esql/functions/mv_zip.asciidoc create mode 100644 docs/reference/esql/functions/signature/mv_slice.svg create mode 100644 docs/reference/esql/functions/signature/mv_zip.svg create mode 100644 docs/reference/esql/functions/types/mv_slice.asciidoc create mode 100644 docs/reference/esql/functions/types/mv_zip.asciidoc create mode 100644 x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceBooleanEvaluator.java create mode 100644 x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceBytesRefEvaluator.java create mode 100644 x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceDoubleEvaluator.java create mode 100644 x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceIntEvaluator.java create mode 100644 x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceLongEvaluator.java create mode 100644 x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvZipEvaluator.java create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSlice.java create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvZip.java create mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceTests.java create mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvZipTests.java diff --git a/docs/reference/esql/functions/mv-functions.asciidoc b/docs/reference/esql/functions/mv-functions.asciidoc index a95a3d36a9963..07d89e7879e67 100644 --- a/docs/reference/esql/functions/mv-functions.asciidoc +++ b/docs/reference/esql/functions/mv-functions.asciidoc @@ -17,7 +17,9 @@ * <> * <> * <> +* <> * <> +* <> // end::mv_list[] include::mv_avg.asciidoc[] @@ -29,4 +31,6 @@ include::mv_last.asciidoc[] include::mv_max.asciidoc[] include::mv_median.asciidoc[] include::mv_min.asciidoc[] +include::mv_slice.asciidoc[] include::mv_sum.asciidoc[] +include::mv_zip.asciidoc[] diff --git a/docs/reference/esql/functions/mv_slice.asciidoc b/docs/reference/esql/functions/mv_slice.asciidoc new file mode 100644 index 0000000000000..f4431b25232a2 --- /dev/null +++ b/docs/reference/esql/functions/mv_slice.asciidoc @@ -0,0 +1,47 @@ +[discrete] +[[esql-mv_slice]] +=== `MV_SLICE` + +*Syntax* + +[.text-center] +image::esql/functions/signature/mv_slice.svg[Embedded,opts=inline] + +*Parameters* + +`field`:: +Multivalue expression. If `null`, the function returns `null`. + +`start`:: +Start position. If `null`, the function returns `null`. The start argument can be negative. An index of -1 is used to specify the last value in the list. + +`end`:: +End position. Optional; if omitted, the position at `start` is returned. The end argument can be negative. An index of -1 is used to specify the last value in the list. + +*Description* + +Returns a subset of the multivalued field using the start and end index values. + +*Supported types* + +include::types/mv_slice.asciidoc[] + +*Example* + +[source.merge.styled,esql] +---- +include::{esql-specs}/ints.csv-spec[tag=mv_slice_positive] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/ints.csv-spec[tag=mv_slice_positive-result] +|=== + +[source.merge.styled,esql] +---- +include::{esql-specs}/ints.csv-spec[tag=mv_slice_negative] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/ints.csv-spec[tag=mv_slice_negative-result] +|=== diff --git a/docs/reference/esql/functions/mv_zip.asciidoc b/docs/reference/esql/functions/mv_zip.asciidoc new file mode 100644 index 0000000000000..4e71e2cafb9c4 --- /dev/null +++ b/docs/reference/esql/functions/mv_zip.asciidoc @@ -0,0 +1,38 @@ +[discrete] +[[esql-mv_zip]] +=== `MV_ZIP` + +*Syntax* + +[.text-center] +image::esql/functions/signature/mv_zip.svg[Embedded,opts=inline] + +*Parameters* + +`mvLeft`:: +Multivalue expression. + +`mvRight`:: +Multivalue expression. + +`delim`:: +Delimiter. Optional; if omitted, `,` is used as a default delimiter. + +*Description* + +Combines the values from two multivalued fields with a delimiter that joins them together. + +*Supported types* + +include::types/mv_zip.asciidoc[] + +*Example* + +[source.merge.styled,esql] +---- +include::{esql-specs}/string.csv-spec[tag=mv_zip] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/string.csv-spec[tag=mv_zip-result] +|=== diff --git a/docs/reference/esql/functions/signature/mv_slice.svg b/docs/reference/esql/functions/signature/mv_slice.svg new file mode 100644 index 0000000000000..277566a35e47d --- /dev/null +++ b/docs/reference/esql/functions/signature/mv_slice.svg @@ -0,0 +1 @@ +MV_SLICE(v,start,end) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/mv_zip.svg b/docs/reference/esql/functions/signature/mv_zip.svg new file mode 100644 index 0000000000000..02c61b3c4bc5c --- /dev/null +++ b/docs/reference/esql/functions/signature/mv_zip.svg @@ -0,0 +1 @@ +MV_ZIP(mvLeft,mvRight,delim) \ No newline at end of file diff --git a/docs/reference/esql/functions/types/mv_slice.asciidoc b/docs/reference/esql/functions/types/mv_slice.asciidoc new file mode 100644 index 0000000000000..1891fed3631e9 --- /dev/null +++ b/docs/reference/esql/functions/types/mv_slice.asciidoc @@ -0,0 +1,17 @@ +[%header.monospaced.styled,format=dsv,separator=|] +|=== +v | start | end | result +boolean | integer | integer | boolean +cartesian_point | integer | integer | cartesian_point +cartesian_shape | integer | integer | cartesian_shape +datetime | integer | integer | datetime +double | integer | integer | double +geo_point | integer | integer | geo_point +geo_shape | integer | integer | geo_shape +integer | integer | integer | integer +ip | integer | integer | ip +keyword | integer | integer | keyword +long | integer | integer | long +text | integer | integer | text +version | integer | integer | version +|=== diff --git a/docs/reference/esql/functions/types/mv_zip.asciidoc b/docs/reference/esql/functions/types/mv_zip.asciidoc new file mode 100644 index 0000000000000..6ee6c29c77264 --- /dev/null +++ b/docs/reference/esql/functions/types/mv_zip.asciidoc @@ -0,0 +1,6 @@ +[%header.monospaced.styled,format=dsv,separator=|] +|=== +mvLeft | mvRight | delim | result +keyword | keyword | keyword | keyword +text | text | text | keyword +|=== diff --git a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/EvaluatorImplementer.java b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/EvaluatorImplementer.java index f283e3b59bb63..55a81cd7aaace 100644 --- a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/EvaluatorImplementer.java +++ b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/EvaluatorImplementer.java @@ -36,10 +36,15 @@ import static org.elasticsearch.compute.gen.Methods.buildFromFactory; import static org.elasticsearch.compute.gen.Methods.getMethod; import static org.elasticsearch.compute.gen.Types.BLOCK; +import static org.elasticsearch.compute.gen.Types.BOOLEAN_BLOCK; import static org.elasticsearch.compute.gen.Types.BYTES_REF; +import static org.elasticsearch.compute.gen.Types.BYTES_REF_BLOCK; +import static org.elasticsearch.compute.gen.Types.DOUBLE_BLOCK; import static org.elasticsearch.compute.gen.Types.DRIVER_CONTEXT; import static org.elasticsearch.compute.gen.Types.EXPRESSION_EVALUATOR; import static org.elasticsearch.compute.gen.Types.EXPRESSION_EVALUATOR_FACTORY; +import static org.elasticsearch.compute.gen.Types.INT_BLOCK; +import static org.elasticsearch.compute.gen.Types.LONG_BLOCK; import static org.elasticsearch.compute.gen.Types.PAGE; import static org.elasticsearch.compute.gen.Types.RELEASABLE; import static org.elasticsearch.compute.gen.Types.RELEASABLES; @@ -53,6 +58,7 @@ public class EvaluatorImplementer { private final TypeElement declarationType; private final ProcessFunction processFunction; private final ClassName implementation; + private final boolean processOutputsMultivalued; public EvaluatorImplementer( Elements elements, @@ -68,6 +74,7 @@ public EvaluatorImplementer( elements.getPackageOf(declarationType).toString(), declarationType.getSimpleName() + extraName + "Evaluator" ); + this.processOutputsMultivalued = this.processFunction.hasBlockType && (this.processFunction.builderArg != null); } public JavaFile sourceFile() { @@ -94,10 +101,17 @@ private TypeSpec type() { builder.addMethod(ctor()); builder.addMethod(eval()); - if (processFunction.args.stream().anyMatch(x -> x instanceof FixedProcessFunctionArg == false)) { - builder.addMethod(realEval(true)); + + if (processOutputsMultivalued) { + if (processFunction.args.stream().anyMatch(x -> x instanceof FixedProcessFunctionArg == false)) { + builder.addMethod(realEval(true)); + } + } else { + if (processFunction.args.stream().anyMatch(x -> x instanceof FixedProcessFunctionArg == false)) { + builder.addMethod(realEval(true)); + } + builder.addMethod(realEval(false)); } - builder.addMethod(realEval(false)); builder.addMethod(toStringMethod()); builder.addMethod(close()); return builder.build(); @@ -117,17 +131,21 @@ private MethodSpec ctor() { private MethodSpec eval() { MethodSpec.Builder builder = MethodSpec.methodBuilder("eval").addAnnotation(Override.class); builder.addModifiers(Modifier.PUBLIC).returns(BLOCK).addParameter(PAGE, "page"); - processFunction.args.stream().forEach(a -> a.evalToBlock(builder)); String invokeBlockEval = invokeRealEval(true); - processFunction.args.stream().forEach(a -> a.resolveVectors(builder, invokeBlockEval)); - builder.addStatement(invokeRealEval(false)); + if (processOutputsMultivalued) { + builder.addStatement(invokeBlockEval); + } else { + processFunction.args.stream().forEach(a -> a.resolveVectors(builder, invokeBlockEval)); + builder.addStatement(invokeRealEval(false)); + } processFunction.args.stream().forEach(a -> a.closeEvalToBlock(builder)); return builder.build(); } private String invokeRealEval(boolean blockStyle) { StringBuilder builder = new StringBuilder("return eval(page.getPositionCount()"); + String params = processFunction.args.stream() .map(a -> a.paramName(blockStyle)) .filter(a -> a != null) @@ -154,6 +172,7 @@ private MethodSpec realEval(boolean blockStyle) { builder.addParameter(a.dataType(blockStyle), a.paramName(blockStyle)); } }); + TypeName builderType = builderType(resultDataType); builder.beginControlFlow( "try($T result = driverContext.blockFactory().$L(positionCount))", @@ -166,13 +185,36 @@ private MethodSpec realEval(boolean blockStyle) { builder.beginControlFlow("position: for (int p = 0; p < positionCount; p++)"); { if (blockStyle) { - processFunction.args.stream().forEach(a -> a.skipNull(builder)); + if (processOutputsMultivalued == false) { + processFunction.args.stream().forEach(a -> a.skipNull(builder)); + } else { + builder.addStatement("boolean allBlocksAreNulls = true"); + // allow block type inputs to be null + processFunction.args.stream().forEach(a -> { + if (a instanceof StandardProcessFunctionArg as) { + as.skipNull(builder); + } else if (a instanceof BlockProcessFunctionArg ab) { + builder.beginControlFlow("if (!$N.isNull(p))", ab.paramName(blockStyle)); + { + builder.addStatement("allBlocksAreNulls = false"); + } + builder.endControlFlow(); + } + }); + + builder.beginControlFlow("if (allBlocksAreNulls)"); + { + builder.addStatement("result.appendNull()"); + builder.addStatement("continue position"); + } + builder.endControlFlow(); + } } processFunction.args.stream().forEach(a -> a.unpackValues(builder, blockStyle)); StringBuilder pattern = new StringBuilder(); List args = new ArrayList<>(); - pattern.append("$T.$N("); + pattern.append(processOutputsMultivalued ? "$T.$N(result, p, " : "$T.$N("); args.add(declarationType); args.add(processFunction.function.getSimpleName()); processFunction.args.stream().forEach(a -> { @@ -189,11 +231,12 @@ private MethodSpec realEval(boolean blockStyle) { } else { builtPattern = pattern.toString(); } - if (processFunction.warnExceptions.isEmpty() == false) { builder.beginControlFlow("try"); } + builder.addStatement(builtPattern, args.toArray()); + if (processFunction.warnExceptions.isEmpty() == false) { String catchPattern = "catch (" + processFunction.warnExceptions.stream().map(m -> "$T").collect(Collectors.joining(" | ")) @@ -403,7 +446,7 @@ private record StandardProcessFunctionArg(TypeName type, String name) implements @Override public TypeName dataType(boolean blockStyle) { if (blockStyle) { - return blockType(type); + return isBlockType() ? type : blockType(type); } return vectorType(type); } @@ -442,7 +485,7 @@ public String factoryInvocation(MethodSpec.Builder factoryMethodBuilder) { @Override public void evalToBlock(MethodSpec.Builder builder) { - TypeName blockType = blockType(type); + TypeName blockType = isBlockType() ? type : blockType(type); builder.beginControlFlow("try ($T $LBlock = ($T) $L.eval(page))", blockType, name, blockType, name); } @@ -474,6 +517,10 @@ public void unpackValues(MethodSpec.Builder builder, boolean blockStyle) { // nothing to do } + private boolean isBlockType() { + return EvaluatorImplementer.isBlockType(type); + } + @Override public void buildInvocation(StringBuilder pattern, List args, boolean blockStyle) { if (type.equals(BYTES_REF)) { @@ -488,14 +535,21 @@ public void buildInvocation(StringBuilder pattern, List args, boolean bl return; } if (blockStyle) { - pattern.append("$L.$L($L.getFirstValueIndex(p))"); + if (isBlockType()) { + pattern.append("$L"); + } else { + pattern.append("$L.$L($L.getFirstValueIndex(p))"); + } } else { pattern.append("$L.$L(p)"); } args.add(paramName(blockStyle)); - args.add(getMethod(type)); - if (blockStyle) { - args.add(paramName(true)); + String method = isBlockType() ? null : getMethod(type); + if (method != null) { + args.add(method); + if (blockStyle) { + args.add(paramName(true)); + } } } @@ -824,12 +878,101 @@ public String closeInvocation() { } } + private record BlockProcessFunctionArg(TypeName type, String name) implements ProcessFunctionArg { + @Override + public TypeName dataType(boolean blockStyle) { + return type; + } + + @Override + public String paramName(boolean blockStyle) { + return name + (blockStyle ? "Block" : "Vector"); + } + + @Override + public void declareField(TypeSpec.Builder builder) { + builder.addField(EXPRESSION_EVALUATOR, name, Modifier.PRIVATE, Modifier.FINAL); + } + + @Override + public void declareFactoryField(TypeSpec.Builder builder) { + builder.addField(EXPRESSION_EVALUATOR_FACTORY, name, Modifier.PRIVATE, Modifier.FINAL); + } + + @Override + public void implementCtor(MethodSpec.Builder builder) { + builder.addParameter(EXPRESSION_EVALUATOR, name); + builder.addStatement("this.$L = $L", name, name); + } + + @Override + public void implementFactoryCtor(MethodSpec.Builder builder) { + builder.addParameter(EXPRESSION_EVALUATOR_FACTORY, name); + builder.addStatement("this.$L = $L", name, name); + } + + @Override + public String factoryInvocation(MethodSpec.Builder factoryMethodBuilder) { + return name + ".get(context)"; + } + + @Override + public void evalToBlock(MethodSpec.Builder builder) { + builder.beginControlFlow("try ($T $LBlock = ($T) $L.eval(page))", type, name, type, name); + } + + @Override + public void closeEvalToBlock(MethodSpec.Builder builder) { + builder.endControlFlow(); + } + + @Override + public void resolveVectors(MethodSpec.Builder builder, String invokeBlockEval) { + // nothing to do + } + + @Override + public void createScratch(MethodSpec.Builder builder) { + // nothing to do + } + + @Override + public void skipNull(MethodSpec.Builder builder) { + EvaluatorImplementer.skipNull(builder, paramName(true)); + } + + @Override + public void unpackValues(MethodSpec.Builder builder, boolean blockStyle) { + // nothing to do + } + + @Override + public void buildInvocation(StringBuilder pattern, List args, boolean blockStyle) { + pattern.append("$L"); + args.add(paramName(blockStyle)); + } + + @Override + public void buildToStringInvocation(StringBuilder pattern, List args, String prefix) { + pattern.append(" + $S + $L"); + args.add(prefix + name + "="); + args.add(name); + } + + @Override + public String closeInvocation() { + return name; + } + } + private static class ProcessFunction { private final ExecutableElement function; private final List args; private final BuilderProcessFunctionArg builderArg; private final List warnExceptions; + private boolean hasBlockType; + private ProcessFunction( Elements elements, javax.lang.model.util.Types types, @@ -839,6 +982,7 @@ private ProcessFunction( this.function = function; args = new ArrayList<>(); BuilderProcessFunctionArg builderArg = null; + hasBlockType = false; for (VariableElement v : function.getParameters()) { TypeName type = TypeName.get(v.asType()); String name = v.getSimpleName().toString(); @@ -871,6 +1015,14 @@ private ProcessFunction( args.add(new ArrayProcessFunctionArg(TypeName.get(componentType), name)); continue; } + if (isBlockType(type)) { + if (builderArg != null && args.size() == 2 && hasBlockType == false) { + args.clear(); + hasBlockType = true; + } + args.add(new BlockProcessFunctionArg(type, name)); + continue; + } args.add(new StandardProcessFunctionArg(type, name)); } this.builderArg = builderArg; @@ -885,4 +1037,12 @@ private ClassName resultDataType(boolean blockStyle) { return useBlockStyle ? blockType(TypeName.get(function.getReturnType())) : vectorType(TypeName.get(function.getReturnType())); } } + + static boolean isBlockType(TypeName type) { + return type.equals(INT_BLOCK) + || type.equals(LONG_BLOCK) + || type.equals(DOUBLE_BLOCK) + || type.equals(BOOLEAN_BLOCK) + || type.equals(BYTES_REF_BLOCK); + } } diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/boolean.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/boolean.csv-spec index 3d9f9aa6e1c27..bda103080adc0 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/boolean.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/boolean.csv-spec @@ -232,3 +232,26 @@ emp_no:integer |languages:integer |byte2bool:boolean |short2bool:boolean 10020 |null |null |null 10030 |3 |true |true ; + +mvSlice#[skip:-8.13.99, reason:newly added in 8.14] +row a = [true, false, false, true] +| eval a1 = mv_slice(a, 1), a2 = mv_slice(a, 2, 3); + +a:boolean | a1:boolean | a2:boolean +[true, false, false, true] | false | [false, true] +; + +mvSliceEmp#[skip:-8.13.99, reason:newly added in 8.14] +from employees +| eval a1 = mv_slice(is_rehired, 0) +| keep emp_no, is_rehired, a1 +| sort emp_no +| limit 5; + +emp_no:integer | is_rehired:boolean | a1:boolean +10001 | [false,true] | false +10002 | [false,false] | false +10003 | null | null +10004 | true | true +10005 | [false,false,false,true] | false +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/floats.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/floats.csv-spec index f56266f868d44..0138ec1a70989 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/floats.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/floats.csv-spec @@ -224,6 +224,21 @@ row a = [1.1, 2.1, 2.1] | eval da = mv_dedupe(a); [1.1, 2.1, 2.1] | [1.1, 2.1] ; +mvSliceEmp#[skip:-8.13.99, reason:newly added in 8.14] +from employees +| eval a1 = mv_slice(salary_change, 0, 1) +| keep emp_no, salary_change, a1 +| sort emp_no +| limit 5; + +emp_no:integer | salary_change:double | a1:double +10001 | 1.19 | 1.19 +10002 | [-7.23,11.17] | [-7.23,11.17] +10003 | [12.82,14.68] | [12.82,14.68] +10004 | [-0.35,1.13,3.65,13.48] | [-0.35, 1.13] +10005 | [-2.14,13.07] | [-2.14,13.07] +; + autoBucket FROM employees | WHERE hire_date >= "1985-01-01T00:00:00Z" AND hire_date < "1986-01-01T00:00:00Z" diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec index baf6da2cd0bde..63bc452bf5bd5 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec @@ -384,6 +384,151 @@ row a = [1, 2, 2, 3] | eval da = mv_dedupe(a); [1, 2, 2, 3] | [1, 2, 3] ; +mvSlice#[skip:-8.13.99, reason:newly added in 8.14] +// tag::mv_slice_positive[] +row a = [1, 2, 2, 3] +| eval a1 = mv_slice(a, 1), a2 = mv_slice(a, 2, 3) +// end::mv_slice_positive[] +; +// tag::mv_slice_positive-result[] +a:integer | a1:integer | a2:integer +[1, 2, 2, 3] | 2 | [2, 3] +// end::mv_slice_positive-result[] +; + +mvSliceNegativeOffset#[skip:-8.13.99, reason:newly added in 8.14] +// tag::mv_slice_negative[] +row a = [1, 2, 2, 3] +| eval a1 = mv_slice(a, -2), a2 = mv_slice(a, -3, -1) +// end::mv_slice_negative[] +; +// tag::mv_slice_negative-result[] +a:integer | a1:integer | a2:integer +[1, 2, 2, 3] | 2 | [2, 2, 3] +// end::mv_slice_negative-result[] +; + +mvSliceSingle#[skip:-8.13.99, reason:newly added in 8.14] +row a = 1 +| eval a1 = mv_slice(a, 0); + +a:integer | a1:integer +1 | 1 +; + +mvSliceOutOfBound#[skip:-8.13.99, reason:newly added in 8.14] +row a = [1, 2, 2, 3] +| eval a1 = mv_slice(a, 4), a2 = mv_slice(a, 2, 6), a3 = mv_slice(a, 4, 6); + +a:integer | a1:integer | a2:integer | a3:integer +[1, 2, 2, 3] | null | [2, 3] | null +; + +mvSliceEmpInt#[skip:-8.13.99, reason:newly added in 8.14] +from employees +| eval a1 = mv_slice(salary_change.int, 0, 1) +| keep emp_no, salary_change.int, a1 +| sort emp_no +| limit 5; + +emp_no:integer | salary_change.int:integer | a1:integer +10001 | 1 | 1 +10002 | [-7, 11] | [-7, 11] +10003 | [12, 14] | [12, 14] +10004 | [0, 1, 3, 13] | [0, 1] +10005 | [-2, 13] | [-2, 13] +; + +mvSliceEmpIntSingle#[skip:-8.13.99, reason:newly added in 8.14] +from employees +| eval a1 = mv_slice(salary_change.int, 1) +| keep emp_no, salary_change.int, a1 +| sort emp_no +| limit 5; + +emp_no:integer | salary_change.int:integer | a1:integer +10001 | 1 | null +10002 | [-7, 11] | 11 +10003 | [12, 14] | 14 +10004 | [0, 1, 3, 13] | 1 +10005 | [-2, 13] | 13 +; + +mvSliceEmpIntEndOutOfBound#[skip:-8.13.99, reason:newly added in 8.14] +from employees +| eval a1 = mv_slice(salary_change.int, 1, 4) +| keep emp_no, salary_change.int, a1 +| sort emp_no +| limit 5; + +emp_no:integer | salary_change.int:integer | a1:integer +10001 | 1 | null +10002 | [-7, 11] | 11 +10003 | [12, 14] | 14 +10004 | [0, 1, 3, 13] | [1, 3, 13] +10005 | [-2, 13] | 13 +; + +mvSliceEmpIntOutOfBound#[skip:-8.13.99, reason:newly added in 8.14] +from employees +| eval a1 = mv_slice(salary_change.int, 2, 4) +| keep emp_no, salary_change.int, a1 +| sort emp_no +| limit 5; + +emp_no:integer | salary_change.int:integer | a1:integer +10001 | 1 | null +10002 | [-7, 11] | null +10003 | [12, 14] | null +10004 | [0, 1, 3, 13] | [3, 13] +10005 | [-2, 13] | null +; + +mvSliceEmpIntStartOutOfBoundNegative#[skip:-8.13.99, reason:newly added in 8.14] +from employees +| eval a1 = mv_slice(salary_change.int, -5, -2) +| keep emp_no, salary_change.int, a1 +| sort emp_no +| limit 5; + +emp_no:integer | salary_change.int:integer | a1:integer +10001 | 1 | null +10002 | [-7, 11] | -7 +10003 | [12, 14] | 12 +10004 | [0, 1, 3, 13] | [0, 1, 3] +10005 | [-2, 13] | -2 +; + +mvSliceEmpIntOutOfBoundNegative#[skip:-8.13.99, reason:newly added in 8.14] +from employees +| eval a1 = mv_slice(salary_change.int, -5, -3) +| keep emp_no, salary_change.int, a1 +| sort emp_no +| limit 5; + +emp_no:integer | salary_change.int:integer | a1:integer +10001 | 1 | null +10002 | [-7, 11] | null +10003 | [12, 14] | null +10004 | [0, 1, 3, 13] | [0, 1] +10005 | [-2, 13] | null +; + +mvSliceEmpLong#[skip:-8.13.99, reason:newly added in 8.14] +from employees +| eval a1 = mv_slice(salary_change.long, 0, 1) +| keep emp_no, salary_change.long, a1 +| sort emp_no +| limit 5; + +emp_no:integer | salary_change.long:long | a1:long +10001 | 1 | 1 +10002 | [-7, 11] | [-7, 11] +10003 | [12, 14] | [12, 14] +10004 | [0, 1, 3, 13] | [0, 1] +10005 | [-2, 13] | [-2, 13] +; + autoBucket // tag::auto_bucket[] FROM employees diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ip.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ip.csv-spec index 0b2ce54d5fd22..54256b3420c82 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ip.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ip.csv-spec @@ -277,3 +277,47 @@ lo0 |fe81::cae2:65ff:fece:feb9 eth0 |127.0.0.3 eth0 |fe80::cae2:65ff:fece:fec1 ; + +mvSlice#[skip:-8.13.99, reason:newly added in 8.14] +from hosts +| where host == "epsilon" +| eval a1 = mv_slice(ip1, 0, 1) +| keep host, ip1, a1 +| sort host, ip1 +| limit 5; + +host:keyword | ip1:ip | a1:ip +epsilon | [127.0.0.1, 127.0.0.2, 127.0.0.3] | [127.0.0.1, 127.0.0.2] +epsilon | fe80::cae2:65ff:fece:fec1 | fe80::cae2:65ff:fece:fec1 +epsilon | [fe81::cae2:65ff:fece:feb9, fe82::cae2:65ff:fece:fec0] | [fe81::cae2:65ff:fece:feb9, fe82::cae2:65ff:fece:fec0] +; + +mvSlice#[skip:-8.13.99, reason:newly added in 8.14] +from hosts +| where host == "epsilon" +| eval a1 = mv_slice(ip1, 0, 1) +| keep host, ip1, a1 +| sort host, ip1 +| limit 5; + +host:keyword | ip1:ip | a1:ip +epsilon | [127.0.0.1, 127.0.0.2, 127.0.0.3] | [127.0.0.1, 127.0.0.2] +epsilon | fe80::cae2:65ff:fece:fec1 | fe80::cae2:65ff:fece:fec1 +epsilon | [fe81::cae2:65ff:fece:feb9, fe82::cae2:65ff:fece:fec0] | [fe81::cae2:65ff:fece:feb9, fe82::cae2:65ff:fece:fec0] +; + +mvZip#[skip:-8.13.99, reason:newly added in 8.14] +from hosts +| eval zip = mv_zip(to_string(description), to_string(ip0), "@@") +| keep host, description, ip0, zip +| sort host desc, ip0 +| limit 5 +; + +host:keyword | description:text | ip0:ip | zip:keyword +gamma | gamma k8s server | fe80::cae2:65ff:fece:feb9 | gamma k8s server@@fe80::cae2:65ff:fece:feb9 +gamma | gamma k8s server | fe80::cae2:65ff:fece:feb9 | gamma k8s server@@fe80::cae2:65ff:fece:feb9 +epsilon | epsilon gw instance | [fe80::cae2:65ff:fece:feb9, fe80::cae2:65ff:fece:fec0, fe80::cae2:65ff:fece:fec1] | [epsilon gw instance@@fe80::cae2:65ff:fece:feb9, fe80::cae2:65ff:fece:fec0, fe80::cae2:65ff:fece:fec1] +epsilon | [epsilon host, epsilon2 host] | [fe81::cae2:65ff:fece:feb9, fe82::cae2:65ff:fece:fec0] | [epsilon host@@fe81::cae2:65ff:fece:feb9, epsilon2 host@@fe82::cae2:65ff:fece:fec0] +epsilon | null | null | null +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec index 3f2d87c6d7a08..d38dce49020c4 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec @@ -54,7 +54,9 @@ mv_last |"boolean|cartesian_point|cartesian_shape|date|double|g mv_max |"boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version mv_max(v:boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version)" |v | "boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version" | "" |"boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version" | "Reduce a multivalued field to a single valued field containing the maximum value." | false | false | false mv_median |"double|integer|long|unsigned_long mv_median(v:double|integer|long|unsigned_long)" |v |"double|integer|long|unsigned_long" | "" |"double|integer|long|unsigned_long" | "Converts a multivalued field into a single valued field containing the median value." | false | false | false mv_min |"boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version mv_min(v:boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version)" |v | "boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version" | "" |"boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version" | "Reduce a multivalued field to a single valued field containing the minimum value." | false | false | false +mv_slice |"boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version mv_slice(v:boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version, start:integer, ?end:integer)" |[v, start, end] | "[boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version, integer, integer]" | "[A multivalued field, start index, end index (included)]" |"boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version" | "Returns a subset of the multivalued field using the start and end index values." | [false, false, true] | false | false mv_sum |"double|integer|long|unsigned_long mv_sum(v:double|integer|long|unsigned_long)" |v |"double|integer|long|unsigned_long" | "" |"double|integer|long|unsigned_long" | "Converts a multivalued field into a single valued field containing the sum of all of the values." | false | false | false +mv_zip |"keyword mv_zip(mvLeft:keyword|text, mvRight:keyword|text, ?delim:keyword|text)" |[mvLeft, mvRight, delim] | ["keyword|text", "keyword|text", "keyword|text"] | [A multivalued field, A multivalued field, delimiter] | "keyword" | "Combines the values from two multivalued fields with a delimiter that joins them together." | [false, false, true] | false | false now |date now() | null |null | null |date | "Returns current date and time." | null | false | false percentile |"double|integer|long percentile(field:double|integer|long, percentile:double|integer|long)" |[field, percentile] |["double|integer|long, double|integer|long"] |["", ""] |"double|integer|long" | "The value at which a certain percentage of observed values occur." | [false, false] | false | true pi |double pi() | null | null | null |double | "The ratio of a circle’s circumference to its diameter." | null | false | false @@ -153,7 +155,9 @@ double e() "boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version mv_max(v:boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version)" "double|integer|long|unsigned_long mv_median(v:double|integer|long|unsigned_long)" "boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version mv_min(v:boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version)" +"boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version mv_slice(v:boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version, start:integer, ?end:integer)" "double|integer|long|unsigned_long mv_sum(v:double|integer|long|unsigned_long)" +"keyword mv_zip(mvLeft:keyword|text, mvRight:keyword|text, ?delim:keyword|text)" date now() "double|integer|long percentile(field:double|integer|long, percentile:double|integer|long)" double pi() @@ -224,5 +228,5 @@ countFunctions#[skip:-8.13.99] show functions | stats a = count(*), b = count(*), c = count(*) | mv_expand c; a:long | b:long | c:long -92 | 92 | 92 +94 | 94 | 94 ; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec index bdbcfb3cb49e9..e6c73f9054c51 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec @@ -696,6 +696,50 @@ ROW a=[10, 9, 8] // end::mv_concat-to_string-result[] ; +mvSliceEmp#[skip:-8.13.99, reason:newly added in 8.14] +from employees +| eval a1 = mv_slice(salary_change.keyword, 0, 1) +| keep emp_no, salary_change.keyword, a1 +| sort emp_no +| limit 5; + +emp_no:integer | salary_change.keyword:keyword | a1:keyword +10001 | 1.19 | 1.19 +10002 | [-7.23,11.17] | [-7.23,11.17] +10003 | [12.82,14.68] | [12.82,14.68] +10004 | [-0.35,1.13,13.48,3.65] | [-0.35,1.13] +10005 | [-2.14,13.07] | [-2.14,13.07] +; + +mvZip#[skip:-8.13.99, reason:newly added in 8.14] +// tag::mv_zip[] +ROW a = ["x", "y", "z"], b = ["1", "2"] +| EVAL c = mv_zip(a, b, "-") +| KEEP a, b, c +// end::mv_zip[] +; + +// tag::mv_zip-result[] +a:keyword | b:keyword | c:keyword +[x, y, z] | [1 ,2] | [x-1, y-2, z] +// end::mv_zip-result[] +; + +mvZipEmp#[skip:-8.13.99, reason:newly added in 8.14] +from employees +| eval full_name = mv_zip(first_name, last_name, " "), full_name_2 = mv_zip(last_name, first_name), jobs = mv_zip(job_positions, salary_change.keyword, "#") +| keep emp_no, full_name, full_name_2, job_positions, salary_change.keyword, jobs +| sort emp_no +| limit 5; + +emp_no:integer | full_name:keyword | full_name_2:keyword | job_positions:keyword | salary_change.keyword:keyword | jobs:keyword +10001 | Georgi Facello | Facello,Georgi | [Accountant, Senior Python Developer] | 1.19 | [Accountant#1.19, Senior Python Developer] +10002 | Bezalel Simmel | Simmel,Bezalel | Senior Team Lead | [-7.23,11.17] | [Senior Team Lead#-7.23, 11.17] +10003 | Parto Bamford | Bamford,Parto | null | [12.82, 14.68] | [12.82, 14.68] +10004 | Chirstian Koblick | Koblick,Chirstian | [Head Human Resources, Reporting Analyst, Support Engineer, Tech Lead] | [-0.35, 1.13, 13.48, 3.65] | [Head Human Resources#-0.35, Reporting Analyst#1.13, Support Engineer#13.48, Tech Lead#3.65] +10005 | Kyoichi Maliniak | Maliniak,Kyoichi | null | [-2.14,13.07] | [-2.14,13.07] +; + showTextFields from hosts | where host == "beta" | keep host, host_group, description; ignoreOrder:true diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceBooleanEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceBooleanEvaluator.java new file mode 100644 index 0000000000000..6c4174bd9cca9 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceBooleanEvaluator.java @@ -0,0 +1,140 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.xpack.esql.expression.function.Warnings; +import org.elasticsearch.xpack.ql.InvalidArgumentException; +import org.elasticsearch.xpack.ql.tree.Source; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link MvSlice}. + * This class is generated. Do not edit it. + */ +public final class MvSliceBooleanEvaluator implements EvalOperator.ExpressionEvaluator { + private final Warnings warnings; + + private final EvalOperator.ExpressionEvaluator field; + + private final EvalOperator.ExpressionEvaluator start; + + private final EvalOperator.ExpressionEvaluator end; + + private final DriverContext driverContext; + + public MvSliceBooleanEvaluator(Source source, EvalOperator.ExpressionEvaluator field, + EvalOperator.ExpressionEvaluator start, EvalOperator.ExpressionEvaluator end, + DriverContext driverContext) { + this.warnings = new Warnings(source); + this.field = field; + this.start = start; + this.end = end; + this.driverContext = driverContext; + } + + @Override + public Block eval(Page page) { + try (BooleanBlock fieldBlock = (BooleanBlock) field.eval(page)) { + try (IntBlock startBlock = (IntBlock) start.eval(page)) { + try (IntBlock endBlock = (IntBlock) end.eval(page)) { + return eval(page.getPositionCount(), fieldBlock, startBlock, endBlock); + } + } + } + } + + public BooleanBlock eval(int positionCount, BooleanBlock fieldBlock, IntBlock startBlock, + IntBlock endBlock) { + try(BooleanBlock.Builder result = driverContext.blockFactory().newBooleanBlockBuilder(positionCount)) { + position: for (int p = 0; p < positionCount; p++) { + boolean allBlocksAreNulls = true; + if (!fieldBlock.isNull(p)) { + allBlocksAreNulls = false; + } + if (startBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (startBlock.getValueCount(p) != 1) { + if (startBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + if (endBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (endBlock.getValueCount(p) != 1) { + if (endBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + if (allBlocksAreNulls) { + result.appendNull(); + continue position; + } + try { + MvSlice.process(result, p, fieldBlock, startBlock.getInt(startBlock.getFirstValueIndex(p)), endBlock.getInt(endBlock.getFirstValueIndex(p))); + } catch (InvalidArgumentException e) { + warnings.registerException(e); + result.appendNull(); + } + } + return result.build(); + } + } + + @Override + public String toString() { + return "MvSliceBooleanEvaluator[" + "field=" + field + ", start=" + start + ", end=" + end + "]"; + } + + @Override + public void close() { + Releasables.closeExpectNoException(field, start, end); + } + + static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory field; + + private final EvalOperator.ExpressionEvaluator.Factory start; + + private final EvalOperator.ExpressionEvaluator.Factory end; + + public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory field, + EvalOperator.ExpressionEvaluator.Factory start, + EvalOperator.ExpressionEvaluator.Factory end) { + this.source = source; + this.field = field; + this.start = start; + this.end = end; + } + + @Override + public MvSliceBooleanEvaluator get(DriverContext context) { + return new MvSliceBooleanEvaluator(source, field.get(context), start.get(context), end.get(context), context); + } + + @Override + public String toString() { + return "MvSliceBooleanEvaluator[" + "field=" + field + ", start=" + start + ", end=" + end + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceBytesRefEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceBytesRefEvaluator.java new file mode 100644 index 0000000000000..4a4a169e45aee --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceBytesRefEvaluator.java @@ -0,0 +1,140 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.xpack.esql.expression.function.Warnings; +import org.elasticsearch.xpack.ql.InvalidArgumentException; +import org.elasticsearch.xpack.ql.tree.Source; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link MvSlice}. + * This class is generated. Do not edit it. + */ +public final class MvSliceBytesRefEvaluator implements EvalOperator.ExpressionEvaluator { + private final Warnings warnings; + + private final EvalOperator.ExpressionEvaluator field; + + private final EvalOperator.ExpressionEvaluator start; + + private final EvalOperator.ExpressionEvaluator end; + + private final DriverContext driverContext; + + public MvSliceBytesRefEvaluator(Source source, EvalOperator.ExpressionEvaluator field, + EvalOperator.ExpressionEvaluator start, EvalOperator.ExpressionEvaluator end, + DriverContext driverContext) { + this.warnings = new Warnings(source); + this.field = field; + this.start = start; + this.end = end; + this.driverContext = driverContext; + } + + @Override + public Block eval(Page page) { + try (BytesRefBlock fieldBlock = (BytesRefBlock) field.eval(page)) { + try (IntBlock startBlock = (IntBlock) start.eval(page)) { + try (IntBlock endBlock = (IntBlock) end.eval(page)) { + return eval(page.getPositionCount(), fieldBlock, startBlock, endBlock); + } + } + } + } + + public BytesRefBlock eval(int positionCount, BytesRefBlock fieldBlock, IntBlock startBlock, + IntBlock endBlock) { + try(BytesRefBlock.Builder result = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { + position: for (int p = 0; p < positionCount; p++) { + boolean allBlocksAreNulls = true; + if (!fieldBlock.isNull(p)) { + allBlocksAreNulls = false; + } + if (startBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (startBlock.getValueCount(p) != 1) { + if (startBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + if (endBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (endBlock.getValueCount(p) != 1) { + if (endBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + if (allBlocksAreNulls) { + result.appendNull(); + continue position; + } + try { + MvSlice.process(result, p, fieldBlock, startBlock.getInt(startBlock.getFirstValueIndex(p)), endBlock.getInt(endBlock.getFirstValueIndex(p))); + } catch (InvalidArgumentException e) { + warnings.registerException(e); + result.appendNull(); + } + } + return result.build(); + } + } + + @Override + public String toString() { + return "MvSliceBytesRefEvaluator[" + "field=" + field + ", start=" + start + ", end=" + end + "]"; + } + + @Override + public void close() { + Releasables.closeExpectNoException(field, start, end); + } + + static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory field; + + private final EvalOperator.ExpressionEvaluator.Factory start; + + private final EvalOperator.ExpressionEvaluator.Factory end; + + public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory field, + EvalOperator.ExpressionEvaluator.Factory start, + EvalOperator.ExpressionEvaluator.Factory end) { + this.source = source; + this.field = field; + this.start = start; + this.end = end; + } + + @Override + public MvSliceBytesRefEvaluator get(DriverContext context) { + return new MvSliceBytesRefEvaluator(source, field.get(context), start.get(context), end.get(context), context); + } + + @Override + public String toString() { + return "MvSliceBytesRefEvaluator[" + "field=" + field + ", start=" + start + ", end=" + end + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceDoubleEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceDoubleEvaluator.java new file mode 100644 index 0000000000000..3e4a83cec68b7 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceDoubleEvaluator.java @@ -0,0 +1,140 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.xpack.esql.expression.function.Warnings; +import org.elasticsearch.xpack.ql.InvalidArgumentException; +import org.elasticsearch.xpack.ql.tree.Source; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link MvSlice}. + * This class is generated. Do not edit it. + */ +public final class MvSliceDoubleEvaluator implements EvalOperator.ExpressionEvaluator { + private final Warnings warnings; + + private final EvalOperator.ExpressionEvaluator field; + + private final EvalOperator.ExpressionEvaluator start; + + private final EvalOperator.ExpressionEvaluator end; + + private final DriverContext driverContext; + + public MvSliceDoubleEvaluator(Source source, EvalOperator.ExpressionEvaluator field, + EvalOperator.ExpressionEvaluator start, EvalOperator.ExpressionEvaluator end, + DriverContext driverContext) { + this.warnings = new Warnings(source); + this.field = field; + this.start = start; + this.end = end; + this.driverContext = driverContext; + } + + @Override + public Block eval(Page page) { + try (DoubleBlock fieldBlock = (DoubleBlock) field.eval(page)) { + try (IntBlock startBlock = (IntBlock) start.eval(page)) { + try (IntBlock endBlock = (IntBlock) end.eval(page)) { + return eval(page.getPositionCount(), fieldBlock, startBlock, endBlock); + } + } + } + } + + public DoubleBlock eval(int positionCount, DoubleBlock fieldBlock, IntBlock startBlock, + IntBlock endBlock) { + try(DoubleBlock.Builder result = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + position: for (int p = 0; p < positionCount; p++) { + boolean allBlocksAreNulls = true; + if (!fieldBlock.isNull(p)) { + allBlocksAreNulls = false; + } + if (startBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (startBlock.getValueCount(p) != 1) { + if (startBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + if (endBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (endBlock.getValueCount(p) != 1) { + if (endBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + if (allBlocksAreNulls) { + result.appendNull(); + continue position; + } + try { + MvSlice.process(result, p, fieldBlock, startBlock.getInt(startBlock.getFirstValueIndex(p)), endBlock.getInt(endBlock.getFirstValueIndex(p))); + } catch (InvalidArgumentException e) { + warnings.registerException(e); + result.appendNull(); + } + } + return result.build(); + } + } + + @Override + public String toString() { + return "MvSliceDoubleEvaluator[" + "field=" + field + ", start=" + start + ", end=" + end + "]"; + } + + @Override + public void close() { + Releasables.closeExpectNoException(field, start, end); + } + + static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory field; + + private final EvalOperator.ExpressionEvaluator.Factory start; + + private final EvalOperator.ExpressionEvaluator.Factory end; + + public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory field, + EvalOperator.ExpressionEvaluator.Factory start, + EvalOperator.ExpressionEvaluator.Factory end) { + this.source = source; + this.field = field; + this.start = start; + this.end = end; + } + + @Override + public MvSliceDoubleEvaluator get(DriverContext context) { + return new MvSliceDoubleEvaluator(source, field.get(context), start.get(context), end.get(context), context); + } + + @Override + public String toString() { + return "MvSliceDoubleEvaluator[" + "field=" + field + ", start=" + start + ", end=" + end + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceIntEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceIntEvaluator.java new file mode 100644 index 0000000000000..fc54dfb1f8336 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceIntEvaluator.java @@ -0,0 +1,139 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.xpack.esql.expression.function.Warnings; +import org.elasticsearch.xpack.ql.InvalidArgumentException; +import org.elasticsearch.xpack.ql.tree.Source; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link MvSlice}. + * This class is generated. Do not edit it. + */ +public final class MvSliceIntEvaluator implements EvalOperator.ExpressionEvaluator { + private final Warnings warnings; + + private final EvalOperator.ExpressionEvaluator field; + + private final EvalOperator.ExpressionEvaluator start; + + private final EvalOperator.ExpressionEvaluator end; + + private final DriverContext driverContext; + + public MvSliceIntEvaluator(Source source, EvalOperator.ExpressionEvaluator field, + EvalOperator.ExpressionEvaluator start, EvalOperator.ExpressionEvaluator end, + DriverContext driverContext) { + this.warnings = new Warnings(source); + this.field = field; + this.start = start; + this.end = end; + this.driverContext = driverContext; + } + + @Override + public Block eval(Page page) { + try (IntBlock fieldBlock = (IntBlock) field.eval(page)) { + try (IntBlock startBlock = (IntBlock) start.eval(page)) { + try (IntBlock endBlock = (IntBlock) end.eval(page)) { + return eval(page.getPositionCount(), fieldBlock, startBlock, endBlock); + } + } + } + } + + public IntBlock eval(int positionCount, IntBlock fieldBlock, IntBlock startBlock, + IntBlock endBlock) { + try(IntBlock.Builder result = driverContext.blockFactory().newIntBlockBuilder(positionCount)) { + position: for (int p = 0; p < positionCount; p++) { + boolean allBlocksAreNulls = true; + if (!fieldBlock.isNull(p)) { + allBlocksAreNulls = false; + } + if (startBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (startBlock.getValueCount(p) != 1) { + if (startBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + if (endBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (endBlock.getValueCount(p) != 1) { + if (endBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + if (allBlocksAreNulls) { + result.appendNull(); + continue position; + } + try { + MvSlice.process(result, p, fieldBlock, startBlock.getInt(startBlock.getFirstValueIndex(p)), endBlock.getInt(endBlock.getFirstValueIndex(p))); + } catch (InvalidArgumentException e) { + warnings.registerException(e); + result.appendNull(); + } + } + return result.build(); + } + } + + @Override + public String toString() { + return "MvSliceIntEvaluator[" + "field=" + field + ", start=" + start + ", end=" + end + "]"; + } + + @Override + public void close() { + Releasables.closeExpectNoException(field, start, end); + } + + static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory field; + + private final EvalOperator.ExpressionEvaluator.Factory start; + + private final EvalOperator.ExpressionEvaluator.Factory end; + + public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory field, + EvalOperator.ExpressionEvaluator.Factory start, + EvalOperator.ExpressionEvaluator.Factory end) { + this.source = source; + this.field = field; + this.start = start; + this.end = end; + } + + @Override + public MvSliceIntEvaluator get(DriverContext context) { + return new MvSliceIntEvaluator(source, field.get(context), start.get(context), end.get(context), context); + } + + @Override + public String toString() { + return "MvSliceIntEvaluator[" + "field=" + field + ", start=" + start + ", end=" + end + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceLongEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceLongEvaluator.java new file mode 100644 index 0000000000000..d6a1e7e45cabf --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceLongEvaluator.java @@ -0,0 +1,140 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.xpack.esql.expression.function.Warnings; +import org.elasticsearch.xpack.ql.InvalidArgumentException; +import org.elasticsearch.xpack.ql.tree.Source; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link MvSlice}. + * This class is generated. Do not edit it. + */ +public final class MvSliceLongEvaluator implements EvalOperator.ExpressionEvaluator { + private final Warnings warnings; + + private final EvalOperator.ExpressionEvaluator field; + + private final EvalOperator.ExpressionEvaluator start; + + private final EvalOperator.ExpressionEvaluator end; + + private final DriverContext driverContext; + + public MvSliceLongEvaluator(Source source, EvalOperator.ExpressionEvaluator field, + EvalOperator.ExpressionEvaluator start, EvalOperator.ExpressionEvaluator end, + DriverContext driverContext) { + this.warnings = new Warnings(source); + this.field = field; + this.start = start; + this.end = end; + this.driverContext = driverContext; + } + + @Override + public Block eval(Page page) { + try (LongBlock fieldBlock = (LongBlock) field.eval(page)) { + try (IntBlock startBlock = (IntBlock) start.eval(page)) { + try (IntBlock endBlock = (IntBlock) end.eval(page)) { + return eval(page.getPositionCount(), fieldBlock, startBlock, endBlock); + } + } + } + } + + public LongBlock eval(int positionCount, LongBlock fieldBlock, IntBlock startBlock, + IntBlock endBlock) { + try(LongBlock.Builder result = driverContext.blockFactory().newLongBlockBuilder(positionCount)) { + position: for (int p = 0; p < positionCount; p++) { + boolean allBlocksAreNulls = true; + if (!fieldBlock.isNull(p)) { + allBlocksAreNulls = false; + } + if (startBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (startBlock.getValueCount(p) != 1) { + if (startBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + if (endBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (endBlock.getValueCount(p) != 1) { + if (endBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + if (allBlocksAreNulls) { + result.appendNull(); + continue position; + } + try { + MvSlice.process(result, p, fieldBlock, startBlock.getInt(startBlock.getFirstValueIndex(p)), endBlock.getInt(endBlock.getFirstValueIndex(p))); + } catch (InvalidArgumentException e) { + warnings.registerException(e); + result.appendNull(); + } + } + return result.build(); + } + } + + @Override + public String toString() { + return "MvSliceLongEvaluator[" + "field=" + field + ", start=" + start + ", end=" + end + "]"; + } + + @Override + public void close() { + Releasables.closeExpectNoException(field, start, end); + } + + static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory field; + + private final EvalOperator.ExpressionEvaluator.Factory start; + + private final EvalOperator.ExpressionEvaluator.Factory end; + + public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory field, + EvalOperator.ExpressionEvaluator.Factory start, + EvalOperator.ExpressionEvaluator.Factory end) { + this.source = source; + this.field = field; + this.start = start; + this.end = end; + } + + @Override + public MvSliceLongEvaluator get(DriverContext context) { + return new MvSliceLongEvaluator(source, field.get(context), start.get(context), end.get(context), context); + } + + @Override + public String toString() { + return "MvSliceLongEvaluator[" + "field=" + field + ", start=" + start + ", end=" + end + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvZipEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvZipEvaluator.java new file mode 100644 index 0000000000000..b53a1c8f9b3c0 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvZipEvaluator.java @@ -0,0 +1,127 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.xpack.esql.expression.function.Warnings; +import org.elasticsearch.xpack.ql.tree.Source; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link MvZip}. + * This class is generated. Do not edit it. + */ +public final class MvZipEvaluator implements EvalOperator.ExpressionEvaluator { + private final Warnings warnings; + + private final EvalOperator.ExpressionEvaluator leftField; + + private final EvalOperator.ExpressionEvaluator rightField; + + private final EvalOperator.ExpressionEvaluator delim; + + private final DriverContext driverContext; + + public MvZipEvaluator(Source source, EvalOperator.ExpressionEvaluator leftField, + EvalOperator.ExpressionEvaluator rightField, EvalOperator.ExpressionEvaluator delim, + DriverContext driverContext) { + this.warnings = new Warnings(source); + this.leftField = leftField; + this.rightField = rightField; + this.delim = delim; + this.driverContext = driverContext; + } + + @Override + public Block eval(Page page) { + try (BytesRefBlock leftFieldBlock = (BytesRefBlock) leftField.eval(page)) { + try (BytesRefBlock rightFieldBlock = (BytesRefBlock) rightField.eval(page)) { + try (BytesRefBlock delimBlock = (BytesRefBlock) delim.eval(page)) { + return eval(page.getPositionCount(), leftFieldBlock, rightFieldBlock, delimBlock); + } + } + } + } + + public BytesRefBlock eval(int positionCount, BytesRefBlock leftFieldBlock, + BytesRefBlock rightFieldBlock, BytesRefBlock delimBlock) { + try(BytesRefBlock.Builder result = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { + BytesRef delimScratch = new BytesRef(); + position: for (int p = 0; p < positionCount; p++) { + boolean allBlocksAreNulls = true; + if (!leftFieldBlock.isNull(p)) { + allBlocksAreNulls = false; + } + if (!rightFieldBlock.isNull(p)) { + allBlocksAreNulls = false; + } + if (delimBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (delimBlock.getValueCount(p) != 1) { + if (delimBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + if (allBlocksAreNulls) { + result.appendNull(); + continue position; + } + MvZip.process(result, p, leftFieldBlock, rightFieldBlock, delimBlock.getBytesRef(delimBlock.getFirstValueIndex(p), delimScratch)); + } + return result.build(); + } + } + + @Override + public String toString() { + return "MvZipEvaluator[" + "leftField=" + leftField + ", rightField=" + rightField + ", delim=" + delim + "]"; + } + + @Override + public void close() { + Releasables.closeExpectNoException(leftField, rightField, delim); + } + + static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory leftField; + + private final EvalOperator.ExpressionEvaluator.Factory rightField; + + private final EvalOperator.ExpressionEvaluator.Factory delim; + + public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory leftField, + EvalOperator.ExpressionEvaluator.Factory rightField, + EvalOperator.ExpressionEvaluator.Factory delim) { + this.source = source; + this.leftField = leftField; + this.rightField = rightField; + this.delim = delim; + } + + @Override + public MvZipEvaluator get(DriverContext context) { + return new MvZipEvaluator(source, leftField.get(context), rightField.get(context), delim.get(context), context); + } + + @Override + public String toString() { + return "MvZipEvaluator[" + "leftField=" + leftField + ", rightField=" + rightField + ", delim=" + delim + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java index ede3633c1b3e8..b577b8a68cd54 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java @@ -73,7 +73,9 @@ import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvMax; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvMedian; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvMin; +import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvSlice; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvSum; +import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvZip; import org.elasticsearch.xpack.esql.expression.function.scalar.nulls.Coalesce; import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.StX; import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.StY; @@ -212,6 +214,8 @@ private FunctionDefinition[][] functions() { def(MvMax.class, MvMax::new, "mv_max"), def(MvMedian.class, MvMedian::new, "mv_median"), def(MvMin.class, MvMin::new, "mv_min"), + def(MvSlice.class, MvSlice::new, "mv_slice"), + def(MvZip.class, MvZip::new, "mv_zip"), def(MvSum.class, MvSum::new, "mv_sum"), def(Split.class, Split::new, "split") } }; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSlice.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSlice.java new file mode 100644 index 0000000000000..b7868b33102a3 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSlice.java @@ -0,0 +1,344 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.ann.Evaluator; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; +import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; +import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.Param; +import org.elasticsearch.xpack.esql.planner.PlannerUtils; +import org.elasticsearch.xpack.esql.type.EsqlDataTypes; +import org.elasticsearch.xpack.ql.InvalidArgumentException; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.expression.function.OptionalArgument; +import org.elasticsearch.xpack.ql.expression.function.scalar.ScalarFunction; +import org.elasticsearch.xpack.ql.expression.gen.script.ScriptTemplate; +import org.elasticsearch.xpack.ql.tree.NodeInfo; +import org.elasticsearch.xpack.ql.tree.Source; +import org.elasticsearch.xpack.ql.type.DataType; + +import java.util.Arrays; +import java.util.List; +import java.util.Objects; +import java.util.function.Function; + +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.FIRST; +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.SECOND; +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.THIRD; +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.isInteger; +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.isType; + +/** + * Returns a subset of the multivalued field using the start and end index values. + */ +public class MvSlice extends ScalarFunction implements OptionalArgument, EvaluatorMapper { + private final Expression field, start, end; + + @FunctionInfo( + returnType = { + "boolean", + "cartesian_point", + "cartesian_shape", + "date", + "double", + "geo_point", + "geo_shape", + "integer", + "ip", + "keyword", + "long", + "text", + "version" }, + description = "Returns a subset of the multivalued field using the start and end index values." + ) + public MvSlice( + Source source, + @Param( + name = "v", + type = { + "boolean", + "cartesian_point", + "cartesian_shape", + "date", + "double", + "geo_point", + "geo_shape", + "integer", + "ip", + "keyword", + "long", + "text", + "version" }, + description = "A multivalued field" + ) Expression field, + @Param(name = "start", type = { "integer" }, description = "start index") Expression start, + @Param(name = "end", type = { "integer" }, description = "end index (included)", optional = true) Expression end + ) { + super(source, end == null ? Arrays.asList(field, start, start) : Arrays.asList(field, start, end)); + this.field = field; + this.start = start; + this.end = end == null ? start : end; + } + + @Override + protected TypeResolution resolveType() { + if (childrenResolved() == false) { + return new TypeResolution("Unresolved children"); + } + + TypeResolution resolution = isType(field, EsqlDataTypes::isRepresentable, sourceText(), FIRST, "representable"); + if (resolution.unresolved()) { + return resolution; + } + + resolution = isInteger(start, sourceText(), SECOND); + if (resolution.unresolved()) { + return resolution; + } + + if (end != null) { + resolution = isInteger(end, sourceText(), THIRD); + if (resolution.unresolved()) { + return resolution; + } + } + + return resolution; + } + + @Override + public boolean foldable() { + return field.foldable() && start.foldable() && (end == null || end.foldable()); + } + + @Override + public EvalOperator.ExpressionEvaluator.Factory toEvaluator( + Function toEvaluator + ) { + if (start.foldable() && end.foldable()) { + int startOffset = Integer.parseInt(String.valueOf(start.fold())); + int endOffset = Integer.parseInt(String.valueOf(end.fold())); + checkStartEnd(startOffset, endOffset); + } + return switch (PlannerUtils.toElementType(field.dataType())) { + case BOOLEAN -> new MvSliceBooleanEvaluator.Factory( + source(), + toEvaluator.apply(field), + toEvaluator.apply(start), + toEvaluator.apply(end) + ); + case BYTES_REF -> new MvSliceBytesRefEvaluator.Factory( + source(), + toEvaluator.apply(field), + toEvaluator.apply(start), + toEvaluator.apply(end) + ); + case DOUBLE -> new MvSliceDoubleEvaluator.Factory( + source(), + toEvaluator.apply(field), + toEvaluator.apply(start), + toEvaluator.apply(end) + ); + case INT -> new MvSliceIntEvaluator.Factory( + source(), + toEvaluator.apply(field), + toEvaluator.apply(start), + toEvaluator.apply(end) + ); + case LONG -> new MvSliceLongEvaluator.Factory( + source(), + toEvaluator.apply(field), + toEvaluator.apply(start), + toEvaluator.apply(end) + ); + case NULL -> EvalOperator.CONSTANT_NULL_FACTORY; + default -> throw EsqlIllegalArgumentException.illegalDataType(field.dataType()); + }; + } + + @Override + public Object fold() { + return EvaluatorMapper.super.fold(); + } + + @Override + public Expression replaceChildren(List newChildren) { + return new MvSlice(source(), newChildren.get(0), newChildren.get(1), newChildren.size() > 2 ? newChildren.get(2) : null); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, MvSlice::new, field, start, end); + } + + @Override + public DataType dataType() { + return field.dataType(); + } + + @Override + public ScriptTemplate asScript() { + throw new UnsupportedOperationException("functions do not support scripting"); + } + + @Override + public int hashCode() { + return Objects.hash(field, start, end); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + MvSlice other = (MvSlice) obj; + return Objects.equals(other.field, field) && Objects.equals(other.start, start) && Objects.equals(other.end, end); + } + + static int adjustIndex(int oldOffset, int fieldValueCount, int first) { + return oldOffset < 0 ? oldOffset + fieldValueCount + first : oldOffset + first; + } + + static void checkStartEnd(int start, int end) throws InvalidArgumentException { + if (start > end) { + throw new InvalidArgumentException("Start offset is greater than end offset"); + } + if (start < 0 && end >= 0) { + throw new InvalidArgumentException("Start and end offset have different signs"); + } + } + + @Evaluator(extraName = "Boolean", warnExceptions = { InvalidArgumentException.class }) + static void process(BooleanBlock.Builder builder, int position, BooleanBlock field, int start, int end) { + int fieldValueCount = field.getValueCount(position); + checkStartEnd(start, end); + int first = field.getFirstValueIndex(position); + int mvStartIndex = adjustIndex(start, fieldValueCount, first); + mvStartIndex = Math.max(first, mvStartIndex); + int mvEndIndex = adjustIndex(end, fieldValueCount, first); + mvEndIndex = Math.min(fieldValueCount + first - 1, mvEndIndex); + if (mvStartIndex >= fieldValueCount + first || mvEndIndex < first) { + builder.appendNull(); + return; + } + if (mvStartIndex == mvEndIndex) { + builder.appendBoolean(field.getBoolean(mvStartIndex)); + return; + } + builder.beginPositionEntry(); + for (int i = mvStartIndex; i <= mvEndIndex; i++) { + builder.appendBoolean(field.getBoolean(i)); + } + builder.endPositionEntry(); + } + + @Evaluator(extraName = "Int", warnExceptions = { InvalidArgumentException.class }) + static void process(IntBlock.Builder builder, int position, IntBlock field, int start, int end) { + int fieldValueCount = field.getValueCount(position); + checkStartEnd(start, end); + int first = field.getFirstValueIndex(position); + int mvStartIndex = adjustIndex(start, fieldValueCount, first); + mvStartIndex = Math.max(first, mvStartIndex); + int mvEndIndex = adjustIndex(end, fieldValueCount, first); + mvEndIndex = Math.min(fieldValueCount + first - 1, mvEndIndex); + if (mvStartIndex >= fieldValueCount + first || mvEndIndex < first) { + builder.appendNull(); + return; + } + if (mvStartIndex == mvEndIndex) { + builder.appendInt(field.getInt(mvStartIndex)); + return; + } + builder.beginPositionEntry(); + for (int i = mvStartIndex; i <= mvEndIndex; i++) { + builder.appendInt(field.getInt(i)); + } + builder.endPositionEntry(); + } + + @Evaluator(extraName = "Long", warnExceptions = { InvalidArgumentException.class }) + static void process(LongBlock.Builder builder, int position, LongBlock field, int start, int end) { + int fieldValueCount = field.getValueCount(position); + checkStartEnd(start, end); + int first = field.getFirstValueIndex(position); + int mvStartIndex = adjustIndex(start, fieldValueCount, first); + mvStartIndex = Math.max(first, mvStartIndex); + int mvEndIndex = adjustIndex(end, fieldValueCount, first); + mvEndIndex = Math.min(fieldValueCount + first - 1, mvEndIndex); + if (mvStartIndex >= fieldValueCount + first || mvEndIndex < first) { + builder.appendNull(); + return; + } + if (mvStartIndex == mvEndIndex) { + builder.appendLong(field.getLong(mvStartIndex)); + return; + } + builder.beginPositionEntry(); + for (int i = mvStartIndex; i <= mvEndIndex; i++) { + builder.appendLong(field.getLong(i)); + } + builder.endPositionEntry(); + } + + @Evaluator(extraName = "Double", warnExceptions = { InvalidArgumentException.class }) + static void process(DoubleBlock.Builder builder, int position, DoubleBlock field, int start, int end) { + int fieldValueCount = field.getValueCount(position); + checkStartEnd(start, end); + int first = field.getFirstValueIndex(position); + int mvStartIndex = adjustIndex(start, fieldValueCount, first); + mvStartIndex = Math.max(first, mvStartIndex); + int mvEndIndex = adjustIndex(end, fieldValueCount, first); + mvEndIndex = Math.min(fieldValueCount + first - 1, mvEndIndex); + if (mvStartIndex >= fieldValueCount + first || mvEndIndex < first) { + builder.appendNull(); + return; + } + if (mvStartIndex == mvEndIndex) { + builder.appendDouble(field.getDouble(mvStartIndex)); + return; + } + builder.beginPositionEntry(); + for (int i = mvStartIndex; i <= mvEndIndex; i++) { + builder.appendDouble(field.getDouble(i)); + } + builder.endPositionEntry(); + } + + @Evaluator(extraName = "BytesRef", warnExceptions = { InvalidArgumentException.class }) + static void process(BytesRefBlock.Builder builder, int position, BytesRefBlock field, int start, int end) { + int fieldValueCount = field.getValueCount(position); + checkStartEnd(start, end); // append null here ? + int first = field.getFirstValueIndex(position); + int mvStartIndex = adjustIndex(start, fieldValueCount, first); + mvStartIndex = Math.max(first, mvStartIndex); + int mvEndIndex = adjustIndex(end, fieldValueCount, first); + mvEndIndex = Math.min(fieldValueCount + first - 1, mvEndIndex); + if (mvStartIndex >= fieldValueCount + first || mvEndIndex < first) { + builder.appendNull(); + return; + } + BytesRef fieldScratch = new BytesRef(); + if (mvStartIndex == mvEndIndex) { + builder.appendBytesRef(field.getBytesRef(mvStartIndex, fieldScratch)); + return; + } + builder.beginPositionEntry(); + for (int i = mvStartIndex; i <= mvEndIndex; i++) { + builder.appendBytesRef(field.getBytesRef(i, fieldScratch)); + } + builder.endPositionEntry(); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvZip.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvZip.java new file mode 100644 index 0000000000000..6227efeced36e --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvZip.java @@ -0,0 +1,211 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefBuilder; +import org.elasticsearch.compute.ann.Evaluator; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; +import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.Param; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.expression.Literal; +import org.elasticsearch.xpack.ql.expression.function.OptionalArgument; +import org.elasticsearch.xpack.ql.expression.function.scalar.ScalarFunction; +import org.elasticsearch.xpack.ql.expression.gen.script.ScriptTemplate; +import org.elasticsearch.xpack.ql.tree.NodeInfo; +import org.elasticsearch.xpack.ql.tree.Source; +import org.elasticsearch.xpack.ql.type.DataType; +import org.elasticsearch.xpack.ql.type.DataTypes; + +import java.util.Arrays; +import java.util.List; +import java.util.Objects; +import java.util.function.Function; + +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.FIRST; +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.SECOND; +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.THIRD; +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.isString; + +/** + * Combines the values from two multivalued fields with a delimiter that joins them together. + */ +public class MvZip extends ScalarFunction implements OptionalArgument, EvaluatorMapper { + private final Expression mvLeft, mvRight, delim; + private static final Literal COMMA = new Literal(Source.EMPTY, ",", DataTypes.TEXT); + + @FunctionInfo( + returnType = { "keyword" }, + description = "Combines the values from two multivalued fields with a delimiter that joins them together." + ) + public MvZip( + Source source, + @Param(name = "mvLeft", type = { "keyword", "text" }, description = "A multivalued field") Expression mvLeft, + @Param(name = "mvRight", type = { "keyword", "text" }, description = "A multivalued field") Expression mvRight, + @Param(name = "delim", type = { "keyword", "text" }, description = "delimiter", optional = true) Expression delim + ) { + super(source, delim == null ? Arrays.asList(mvLeft, mvRight, COMMA) : Arrays.asList(mvLeft, mvRight, delim)); + this.mvLeft = mvLeft; + this.mvRight = mvRight; + this.delim = delim == null ? COMMA : delim; + } + + @Override + protected TypeResolution resolveType() { + if (childrenResolved() == false) { + return new TypeResolution("Unresolved children"); + } + + TypeResolution resolution = isString(mvLeft, sourceText(), FIRST); + if (resolution.unresolved()) { + return resolution; + } + + resolution = isString(mvRight, sourceText(), SECOND); + if (resolution.unresolved()) { + return resolution; + } + + if (delim != null) { + resolution = isString(delim, sourceText(), THIRD); + if (resolution.unresolved()) { + return resolution; + } + } + + return resolution; + } + + @Override + public boolean foldable() { + return mvLeft.foldable() && mvRight.foldable() && (delim == null || delim.foldable()); + } + + @Override + public EvalOperator.ExpressionEvaluator.Factory toEvaluator( + Function toEvaluator + ) { + return new MvZipEvaluator.Factory(source(), toEvaluator.apply(mvLeft), toEvaluator.apply(mvRight), toEvaluator.apply(delim)); + } + + @Override + public Object fold() { + return EvaluatorMapper.super.fold(); + } + + @Override + public Expression replaceChildren(List newChildren) { + return new MvZip(source(), newChildren.get(0), newChildren.get(1), newChildren.size() > 2 ? newChildren.get(2) : null); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, MvZip::new, mvLeft, mvRight, delim); + } + + @Override + public DataType dataType() { + return DataTypes.KEYWORD; + } + + @Override + public ScriptTemplate asScript() { + throw new UnsupportedOperationException("functions do not support scripting"); + } + + @Override + public int hashCode() { + return Objects.hash(mvLeft, mvRight, delim); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + MvZip other = (MvZip) obj; + return Objects.equals(other.mvLeft, mvLeft) && Objects.equals(other.mvRight, mvRight) && Objects.equals(other.delim, delim); + } + + private static void buildOneSide(BytesRefBlock.Builder builder, int start, int end, BytesRefBlock field, BytesRef fieldScratch) { + builder.beginPositionEntry(); + for (int i = start; i < end; i++) { + builder.appendBytesRef(field.getBytesRef(i, fieldScratch)); + } + builder.endPositionEntry(); + } + + @Evaluator + static void process(BytesRefBlock.Builder builder, int position, BytesRefBlock leftField, BytesRefBlock rightField, BytesRef delim) { + int leftFieldValueCount = leftField.getValueCount(position); + int rightFieldValueCount = rightField.getValueCount(position); + + int leftFirst = leftField.getFirstValueIndex(position); + int rightFirst = rightField.getFirstValueIndex(position); + + BytesRef fieldScratch = new BytesRef(); + + // nulls + if (leftField.isNull(position)) { + if (rightFieldValueCount == 1) { + builder.appendBytesRef(rightField.getBytesRef(rightFirst, fieldScratch)); + return; + } + buildOneSide(builder, rightFirst, rightFirst + rightFieldValueCount, rightField, fieldScratch); + return; + } + + if (rightField.isNull(position)) { + if (leftFieldValueCount == 1) { + builder.appendBytesRef(leftField.getBytesRef(leftFirst, fieldScratch)); + return; + } + buildOneSide(builder, leftFirst, leftFirst + leftFieldValueCount, leftField, fieldScratch); + return; + } + + BytesRefBuilder work = new BytesRefBuilder(); + // single value + if (leftFieldValueCount == 1 && rightFieldValueCount == 1) { + work.append(leftField.getBytesRef(leftFirst, fieldScratch)); + work.append(delim); + work.append(rightField.getBytesRef(rightFirst, fieldScratch)); + builder.appendBytesRef(work.get()); + return; + } + // multiple values + int leftIndex = 0, rightIndex = 0; + builder.beginPositionEntry(); + while (leftIndex < leftFieldValueCount && rightIndex < rightFieldValueCount) { + // concat + work.clear(); + work.append(leftField.getBytesRef(leftIndex + leftFirst, fieldScratch)); + work.append(delim); + work.append(rightField.getBytesRef(rightIndex + rightFirst, fieldScratch)); + builder.appendBytesRef(work.get()); + leftIndex++; + rightIndex++; + } + while (leftIndex < leftFieldValueCount) { + work.clear(); + work.append(leftField.getBytesRef(leftIndex + leftFirst, fieldScratch)); + builder.appendBytesRef(work.get()); + leftIndex++; + } + while (rightIndex < rightFieldValueCount) { + work.clear(); + work.append(rightField.getBytesRef(rightIndex + rightFirst, fieldScratch)); + builder.appendBytesRef(work.get()); + rightIndex++; + } + builder.endPositionEntry(); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java index 3ca5f2f5868ba..384bfd164b0a7 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java @@ -97,7 +97,9 @@ import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvMax; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvMedian; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvMin; +import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvSlice; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvSum; +import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvZip; import org.elasticsearch.xpack.esql.expression.function.scalar.nulls.Coalesce; import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.StX; import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.StY; @@ -419,7 +421,9 @@ public static List namedTypeEntries() { of(ScalarFunction.class, MvMax.class, PlanNamedTypes::writeMvFunction, PlanNamedTypes::readMvFunction), of(ScalarFunction.class, MvMedian.class, PlanNamedTypes::writeMvFunction, PlanNamedTypes::readMvFunction), of(ScalarFunction.class, MvMin.class, PlanNamedTypes::writeMvFunction, PlanNamedTypes::readMvFunction), + of(ScalarFunction.class, MvSlice.class, PlanNamedTypes::writeMvSlice, PlanNamedTypes::readMvSlice), of(ScalarFunction.class, MvSum.class, PlanNamedTypes::writeMvFunction, PlanNamedTypes::readMvFunction), + of(ScalarFunction.class, MvZip.class, PlanNamedTypes::writeMvZip, PlanNamedTypes::readMvZip), // Expressions (other) of(Expression.class, Literal.class, PlanNamedTypes::writeLiteral, PlanNamedTypes::readLiteral), of(Expression.class, Order.class, PlanNamedTypes::writeOrder, PlanNamedTypes::readOrder) @@ -1831,4 +1835,30 @@ static void writeLog(PlanStreamOutput out, Log log) throws IOException { out.writeExpression(fields.get(0)); out.writeOptionalWriteable(fields.size() == 2 ? o -> out.writeExpression(fields.get(1)) : null); } + + static MvSlice readMvSlice(PlanStreamInput in) throws IOException { + return new MvSlice(in.readSource(), in.readExpression(), in.readExpression(), in.readOptionalNamed(Expression.class)); + } + + static void writeMvSlice(PlanStreamOutput out, MvSlice fn) throws IOException { + out.writeNoSource(); + List fields = fn.children(); + assert fields.size() == 2 || fields.size() == 3; + out.writeExpression(fields.get(0)); + out.writeExpression(fields.get(1)); + out.writeOptionalWriteable(fields.size() == 3 ? o -> out.writeExpression(fields.get(2)) : null); + } + + static MvZip readMvZip(PlanStreamInput in) throws IOException { + return new MvZip(in.readSource(), in.readExpression(), in.readExpression(), in.readOptionalNamed(Expression.class)); + } + + static void writeMvZip(PlanStreamOutput out, MvZip fn) throws IOException { + out.writeNoSource(); + List fields = fn.children(); + assert fields.size() == 2 || fields.size() == 3; + out.writeExpression(fields.get(0)); + out.writeExpression(fields.get(1)); + out.writeOptionalWriteable(fields.size() == 3 ? o -> out.writeExpression(fields.get(2)) : null); + } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java index 4d44d3111c094..9daf043714efc 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java @@ -446,7 +446,7 @@ private void testEvaluateBlock(BlockFactory inputBlockFactory, DriverContext con // TODO cranky time - public final void testSimpleWithNulls() { // TODO replace this with nulls inserted into the test case like anyNullIsNull + public void testSimpleWithNulls() { // TODO replace this with nulls inserted into the test case like anyNullIsNull assumeTrue("nothing to do if a type error", testCase.getExpectedTypeError() == null); assumeTrue("All test data types must be representable in order to build fields", testCase.allTypesAreRepresentable()); List simpleData = testCase.getDataValues(); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceTests.java new file mode 100644 index 0000000000000..4d1e58893739a --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceTests.java @@ -0,0 +1,346 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.geo.GeometryTestUtils; +import org.elasticsearch.geo.ShapeTestUtils; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; +import org.elasticsearch.xpack.esql.expression.function.scalar.AbstractScalarFunctionTestCase; +import org.elasticsearch.xpack.esql.type.EsqlDataTypes; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.tree.Source; +import org.elasticsearch.xpack.ql.type.DataType; +import org.elasticsearch.xpack.ql.type.DataTypes; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.Supplier; + +import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.CARTESIAN; +import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.GEO; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; + +public class MvSliceTests extends AbstractScalarFunctionTestCase { + public MvSliceTests(@Name("TestCase") Supplier testCaseSupplier) { + this.testCase = testCaseSupplier.get(); + } + + @ParametersFactory + public static Iterable parameters() { + List suppliers = new ArrayList<>(); + booleans(suppliers); + ints(suppliers); + longs(suppliers); + doubles(suppliers); + bytesRefs(suppliers); + return parameterSuppliersFromTypedData(suppliers); + } + + @Override + protected DataType expectedType(List argTypes) { + return argTypes.get(0); + } + + @Override + protected List argSpec() { + return List.of(required(representableTypes()), required(integers()), optional(integers())); + } + + @Override + protected Expression build(Source source, List args) { + return new MvSlice(source, args.get(0), args.get(1), args.size() > 2 ? args.get(2) : null); + } + + private static void booleans(List suppliers) { + // Positive + suppliers.add(new TestCaseSupplier(List.of(DataTypes.BOOLEAN, DataTypes.INTEGER, DataTypes.INTEGER), () -> { + List field = randomList(1, 10, () -> randomBoolean()); + int length = field.size(); + int start = randomIntBetween(0, length - 1); + int end = randomIntBetween(start, length - 1); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, DataTypes.BOOLEAN, "field"), + new TestCaseSupplier.TypedData(start, DataTypes.INTEGER, "start"), + new TestCaseSupplier.TypedData(end, DataTypes.INTEGER, "end") + ), + "MvSliceBooleanEvaluator[field=Attribute[channel=0], start=Attribute[channel=1], end=Attribute[channel=2]]", + DataTypes.BOOLEAN, + equalTo(start == end ? field.get(start) : field.subList(start, end + 1)) + ); + })); + // Positive Start IndexOutofBound + suppliers.add(new TestCaseSupplier(List.of(DataTypes.BOOLEAN, DataTypes.INTEGER, DataTypes.INTEGER), () -> { + List field = randomList(1, 10, () -> randomBoolean()); + int length = field.size(); + int start = randomIntBetween(length, length + 1); + int end = randomIntBetween(start, length + 10); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, DataTypes.BOOLEAN, "field"), + new TestCaseSupplier.TypedData(start, DataTypes.INTEGER, "start"), + new TestCaseSupplier.TypedData(end, DataTypes.INTEGER, "end") + ), + "MvSliceBooleanEvaluator[field=Attribute[channel=0], start=Attribute[channel=1], end=Attribute[channel=2]]", + DataTypes.BOOLEAN, + nullValue() + ); + })); + // Positive End IndexOutofBound + suppliers.add(new TestCaseSupplier(List.of(DataTypes.BOOLEAN, DataTypes.INTEGER, DataTypes.INTEGER), () -> { + List field = randomList(1, 10, () -> randomBoolean()); + int length = field.size(); + int start = randomIntBetween(0, length - 1); + int end = randomIntBetween(length, length + 10); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, DataTypes.BOOLEAN, "field"), + new TestCaseSupplier.TypedData(start, DataTypes.INTEGER, "start"), + new TestCaseSupplier.TypedData(end, DataTypes.INTEGER, "end") + ), + "MvSliceBooleanEvaluator[field=Attribute[channel=0], start=Attribute[channel=1], end=Attribute[channel=2]]", + DataTypes.BOOLEAN, + equalTo(start == length - 1 ? field.get(start) : field.subList(start, length)) + ); + })); + // Negative + suppliers.add(new TestCaseSupplier(List.of(DataTypes.BOOLEAN, DataTypes.INTEGER, DataTypes.INTEGER), () -> { + List field = randomList(1, 10, () -> randomBoolean()); + int length = field.size(); + int start = randomIntBetween(0 - length, -1); + int end = randomIntBetween(start, -1); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, DataTypes.BOOLEAN, "field"), + new TestCaseSupplier.TypedData(start, DataTypes.INTEGER, "start"), + new TestCaseSupplier.TypedData(end, DataTypes.INTEGER, "end") + ), + "MvSliceBooleanEvaluator[field=Attribute[channel=0], start=Attribute[channel=1], end=Attribute[channel=2]]", + DataTypes.BOOLEAN, + equalTo(start == end ? field.get(start + length) : field.subList(start + length, end + 1 + length)) + ); + })); + } + + private static void ints(List suppliers) { + suppliers.add(new TestCaseSupplier(List.of(DataTypes.INTEGER, DataTypes.INTEGER, DataTypes.INTEGER), () -> { + List field = randomList(1, 10, () -> randomInt()); + int length = field.size(); + int start = randomIntBetween(0, length - 1); + int end = randomIntBetween(start, length - 1); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, DataTypes.INTEGER, "field"), + new TestCaseSupplier.TypedData(start, DataTypes.INTEGER, "start"), + new TestCaseSupplier.TypedData(end, DataTypes.INTEGER, "end") + ), + "MvSliceIntEvaluator[field=Attribute[channel=0], start=Attribute[channel=1], end=Attribute[channel=2]]", + DataTypes.INTEGER, + equalTo(start == end ? field.get(start) : field.subList(start, end + 1)) + ); + })); + } + + private static void longs(List suppliers) { + suppliers.add(new TestCaseSupplier(List.of(DataTypes.LONG, DataTypes.INTEGER, DataTypes.INTEGER), () -> { + List field = randomList(1, 10, () -> randomLong()); + int length = field.size(); + int start = randomIntBetween(0, length - 1); + int end = randomIntBetween(start, length - 1); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, DataTypes.LONG, "field"), + new TestCaseSupplier.TypedData(start, DataTypes.INTEGER, "start"), + new TestCaseSupplier.TypedData(end, DataTypes.INTEGER, "end") + ), + "MvSliceLongEvaluator[field=Attribute[channel=0], start=Attribute[channel=1], end=Attribute[channel=2]]", + DataTypes.LONG, + equalTo(start == end ? field.get(start) : field.subList(start, end + 1)) + ); + })); + + suppliers.add(new TestCaseSupplier(List.of(DataTypes.DATETIME, DataTypes.INTEGER, DataTypes.INTEGER), () -> { + List field = randomList(1, 10, () -> randomLong()); + int length = field.size(); + int start = randomIntBetween(0, length - 1); + int end = randomIntBetween(start, length - 1); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, DataTypes.DATETIME, "field"), + new TestCaseSupplier.TypedData(start, DataTypes.INTEGER, "start"), + new TestCaseSupplier.TypedData(end, DataTypes.INTEGER, "end") + ), + "MvSliceLongEvaluator[field=Attribute[channel=0], start=Attribute[channel=1], end=Attribute[channel=2]]", + DataTypes.DATETIME, + equalTo(start == end ? field.get(start) : field.subList(start, end + 1)) + ); + })); + } + + private static void doubles(List suppliers) { + suppliers.add(new TestCaseSupplier(List.of(DataTypes.DOUBLE, DataTypes.INTEGER, DataTypes.INTEGER), () -> { + List field = randomList(1, 10, () -> randomDouble()); + int length = field.size(); + int start = randomIntBetween(0, length - 1); + int end = randomIntBetween(start, length - 1); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, DataTypes.DOUBLE, "field"), + new TestCaseSupplier.TypedData(start, DataTypes.INTEGER, "start"), + new TestCaseSupplier.TypedData(end, DataTypes.INTEGER, "end") + ), + "MvSliceDoubleEvaluator[field=Attribute[channel=0], start=Attribute[channel=1], end=Attribute[channel=2]]", + DataTypes.DOUBLE, + equalTo(start == end ? field.get(start) : field.subList(start, end + 1)) + ); + })); + } + + private static void bytesRefs(List suppliers) { + suppliers.add(new TestCaseSupplier(List.of(DataTypes.KEYWORD, DataTypes.INTEGER, DataTypes.INTEGER), () -> { + List field = randomList(1, 10, () -> randomLiteral(DataTypes.KEYWORD).value()); + int length = field.size(); + int start = randomIntBetween(0, length - 1); + int end = randomIntBetween(start, length - 1); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, DataTypes.KEYWORD, "field"), + new TestCaseSupplier.TypedData(start, DataTypes.INTEGER, "start"), + new TestCaseSupplier.TypedData(end, DataTypes.INTEGER, "end") + ), + "MvSliceBytesRefEvaluator[field=Attribute[channel=0], start=Attribute[channel=1], end=Attribute[channel=2]]", + DataTypes.KEYWORD, + equalTo(start == end ? field.get(start) : field.subList(start, end + 1)) + ); + })); + + suppliers.add(new TestCaseSupplier(List.of(DataTypes.TEXT, DataTypes.INTEGER, DataTypes.INTEGER), () -> { + List field = randomList(1, 10, () -> randomLiteral(DataTypes.TEXT).value()); + int length = field.size(); + int start = randomIntBetween(0, length - 1); + int end = randomIntBetween(start, length - 1); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, DataTypes.TEXT, "field"), + new TestCaseSupplier.TypedData(start, DataTypes.INTEGER, "start"), + new TestCaseSupplier.TypedData(end, DataTypes.INTEGER, "end") + ), + "MvSliceBytesRefEvaluator[field=Attribute[channel=0], start=Attribute[channel=1], end=Attribute[channel=2]]", + DataTypes.TEXT, + equalTo(start == end ? field.get(start) : field.subList(start, end + 1)) + ); + })); + + suppliers.add(new TestCaseSupplier(List.of(DataTypes.IP, DataTypes.INTEGER, DataTypes.INTEGER), () -> { + List field = randomList(1, 10, () -> randomLiteral(DataTypes.IP).value()); + int length = field.size(); + int start = randomIntBetween(0, length - 1); + int end = randomIntBetween(start, length - 1); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, DataTypes.IP, "field"), + new TestCaseSupplier.TypedData(start, DataTypes.INTEGER, "start"), + new TestCaseSupplier.TypedData(end, DataTypes.INTEGER, "end") + ), + "MvSliceBytesRefEvaluator[field=Attribute[channel=0], start=Attribute[channel=1], end=Attribute[channel=2]]", + DataTypes.IP, + equalTo(start == end ? field.get(start) : field.subList(start, end + 1)) + ); + })); + + suppliers.add(new TestCaseSupplier(List.of(DataTypes.VERSION, DataTypes.INTEGER, DataTypes.INTEGER), () -> { + List field = randomList(1, 10, () -> randomLiteral(DataTypes.VERSION).value()); + int length = field.size(); + int start = randomIntBetween(0, length - 1); + int end = randomIntBetween(start, length - 1); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, DataTypes.VERSION, "field"), + new TestCaseSupplier.TypedData(start, DataTypes.INTEGER, "start"), + new TestCaseSupplier.TypedData(end, DataTypes.INTEGER, "end") + ), + "MvSliceBytesRefEvaluator[field=Attribute[channel=0], start=Attribute[channel=1], end=Attribute[channel=2]]", + DataTypes.VERSION, + equalTo(start == end ? field.get(start) : field.subList(start, end + 1)) + ); + })); + + suppliers.add(new TestCaseSupplier(List.of(EsqlDataTypes.GEO_POINT, DataTypes.INTEGER, DataTypes.INTEGER), () -> { + List field = randomList(1, 10, () -> new BytesRef(GEO.asWkt(GeometryTestUtils.randomPoint()))); + int length = field.size(); + int start = randomIntBetween(0, length - 1); + int end = randomIntBetween(start, length - 1); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, EsqlDataTypes.GEO_POINT, "field"), + new TestCaseSupplier.TypedData(start, DataTypes.INTEGER, "start"), + new TestCaseSupplier.TypedData(end, DataTypes.INTEGER, "end") + ), + "MvSliceBytesRefEvaluator[field=Attribute[channel=0], start=Attribute[channel=1], end=Attribute[channel=2]]", + EsqlDataTypes.GEO_POINT, + equalTo(start == end ? field.get(start) : field.subList(start, end + 1)) + ); + })); + + suppliers.add(new TestCaseSupplier(List.of(EsqlDataTypes.CARTESIAN_POINT, DataTypes.INTEGER, DataTypes.INTEGER), () -> { + List field = randomList(1, 10, () -> new BytesRef(CARTESIAN.asWkt(ShapeTestUtils.randomPoint()))); + int length = field.size(); + int start = randomIntBetween(0, length - 1); + int end = randomIntBetween(start, length - 1); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, EsqlDataTypes.CARTESIAN_POINT, "field"), + new TestCaseSupplier.TypedData(start, DataTypes.INTEGER, "start"), + new TestCaseSupplier.TypedData(end, DataTypes.INTEGER, "end") + ), + "MvSliceBytesRefEvaluator[field=Attribute[channel=0], start=Attribute[channel=1], end=Attribute[channel=2]]", + EsqlDataTypes.CARTESIAN_POINT, + equalTo(start == end ? field.get(start) : field.subList(start, end + 1)) + ); + })); + + suppliers.add(new TestCaseSupplier(List.of(EsqlDataTypes.GEO_SHAPE, DataTypes.INTEGER, DataTypes.INTEGER), () -> { + List field = randomList(1, 10, () -> new BytesRef(GEO.asWkt(GeometryTestUtils.randomGeometry(randomBoolean())))); + int length = field.size(); + int start = randomIntBetween(0, length - 1); + int end = randomIntBetween(start, length - 1); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, EsqlDataTypes.GEO_SHAPE, "field"), + new TestCaseSupplier.TypedData(start, DataTypes.INTEGER, "start"), + new TestCaseSupplier.TypedData(end, DataTypes.INTEGER, "end") + ), + "MvSliceBytesRefEvaluator[field=Attribute[channel=0], start=Attribute[channel=1], end=Attribute[channel=2]]", + EsqlDataTypes.GEO_SHAPE, + equalTo(start == end ? field.get(start) : field.subList(start, end + 1)) + ); + })); + + suppliers.add(new TestCaseSupplier(List.of(EsqlDataTypes.CARTESIAN_SHAPE, DataTypes.INTEGER, DataTypes.INTEGER), () -> { + List field = randomList(1, 10, () -> new BytesRef(CARTESIAN.asWkt(ShapeTestUtils.randomGeometry(randomBoolean())))); + int length = field.size(); + int start = randomIntBetween(0, length - 1); + int end = randomIntBetween(start, length - 1); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, EsqlDataTypes.CARTESIAN_SHAPE, "field"), + new TestCaseSupplier.TypedData(start, DataTypes.INTEGER, "start"), + new TestCaseSupplier.TypedData(end, DataTypes.INTEGER, "end") + ), + "MvSliceBytesRefEvaluator[field=Attribute[channel=0], start=Attribute[channel=1], end=Attribute[channel=2]]", + EsqlDataTypes.CARTESIAN_SHAPE, + equalTo(start == end ? field.get(start) : field.subList(start, end + 1)) + ); + })); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvZipTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvZipTests.java new file mode 100644 index 0000000000000..c4162f6ddc367 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvZipTests.java @@ -0,0 +1,120 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefBuilder; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; +import org.elasticsearch.xpack.esql.expression.function.scalar.AbstractScalarFunctionTestCase; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.tree.Source; +import org.elasticsearch.xpack.ql.type.DataType; +import org.elasticsearch.xpack.ql.type.DataTypes; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.Supplier; + +import static java.lang.Math.max; +import static org.hamcrest.Matchers.equalTo; + +public class MvZipTests extends AbstractScalarFunctionTestCase { + public MvZipTests(@Name("TestCase") Supplier testCaseSupplier) { + this.testCase = testCaseSupplier.get(); + } + + @ParametersFactory + public static Iterable parameters() { + List suppliers = new ArrayList<>(); + suppliers.add(new TestCaseSupplier(List.of(DataTypes.KEYWORD, DataTypes.KEYWORD, DataTypes.KEYWORD), () -> { + List left = randomList(1, 3, () -> randomLiteral(DataTypes.KEYWORD).value()); + List right = randomList(1, 3, () -> randomLiteral(DataTypes.KEYWORD).value()); + String delim = randomAlphaOfLengthBetween(1, 1); + List expected = calculateExpected(left, right, delim); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(left, DataTypes.KEYWORD, "mvLeft"), + new TestCaseSupplier.TypedData(right, DataTypes.KEYWORD, "mvRight"), + new TestCaseSupplier.TypedData(delim, DataTypes.KEYWORD, "delim") + ), + "MvZipEvaluator[leftField=Attribute[channel=0], rightField=Attribute[channel=1], delim=Attribute[channel=2]]", + DataTypes.KEYWORD, + equalTo(expected.size() == 1 ? expected.iterator().next() : expected) + ); + })); + + suppliers.add(new TestCaseSupplier(List.of(DataTypes.TEXT, DataTypes.TEXT, DataTypes.TEXT), () -> { + List left = randomList(1, 10, () -> randomLiteral(DataTypes.TEXT).value()); + List right = randomList(1, 10, () -> randomLiteral(DataTypes.TEXT).value()); + String delim = randomAlphaOfLengthBetween(1, 1); + List expected = calculateExpected(left, right, delim); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(left, DataTypes.TEXT, "mvLeft"), + new TestCaseSupplier.TypedData(right, DataTypes.TEXT, "mvRight"), + new TestCaseSupplier.TypedData(delim, DataTypes.TEXT, "delim") + ), + "MvZipEvaluator[leftField=Attribute[channel=0], rightField=Attribute[channel=1], delim=Attribute[channel=2]]", + DataTypes.KEYWORD, + equalTo(expected.size() == 1 ? expected.iterator().next() : expected) + ); + })); + + return parameterSuppliersFromTypedData(suppliers); + } + + @Override + protected DataType expectedType(List argTypes) { + return DataTypes.KEYWORD; + } + + @Override + protected List argSpec() { + return List.of(required(strings()), required(strings()), optional(strings())); + } + + @Override + protected Expression build(Source source, List args) { + return new MvZip(source, args.get(0), args.get(1), args.size() > 2 ? args.get(2) : null); + } + + private static List calculateExpected(List left, List right, String delim) { + List expected = new ArrayList<>(max(left.size(), right.size())); + int i = 0, j = 0; + while (i < left.size() && j < right.size()) { + BytesRefBuilder work = new BytesRefBuilder(); + work.append((BytesRef) left.get(i)); + work.append(new BytesRef(delim)); + work.append((BytesRef) right.get(j)); + expected.add(work.get()); + i++; + j++; + } + while (i < left.size()) { + BytesRefBuilder work = new BytesRefBuilder(); + work.append((BytesRef) left.get(i)); + expected.add(work.get()); + i++; + } + while (j < right.size()) { + BytesRefBuilder work = new BytesRefBuilder(); + work.append((BytesRef) right.get(j)); + expected.add(work.get()); + j++; + } + return expected; + } + + @Override + public void testSimpleWithNulls() { + assumeFalse("mv_zip returns null only if both left and right inputs are nulls", false); + } +} From 2c24abeab09849188820d6e9e5c88d7bdc864b57 Mon Sep 17 00:00:00 2001 From: Henning Andersen <33268011+henningandersen@users.noreply.github.com> Date: Tue, 12 Mar 2024 08:22:08 +0100 Subject: [PATCH 118/248] Use correct system index bulk executor (#106150) In one place we would always use the Write threadpool, which could let write thread pool queuing lead to increased system index write latency. --- docs/changelog/106150.yaml | 5 +++++ .../org/elasticsearch/action/bulk/TransportBulkAction.java | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/106150.yaml diff --git a/docs/changelog/106150.yaml b/docs/changelog/106150.yaml new file mode 100644 index 0000000000000..05bd8b06987c6 --- /dev/null +++ b/docs/changelog/106150.yaml @@ -0,0 +1,5 @@ +pr: 106150 +summary: Use correct system index bulk executor +area: CRUD +type: bug +issues: [] diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index 3e661c2efe72f..a2445e95a572f 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -291,7 +291,7 @@ public void onTimeout(TimeValue timeout) { } private void forkAndExecute(Task task, BulkRequest bulkRequest, String executorName, ActionListener releasingListener) { - threadPool.executor(Names.WRITE).execute(new ActionRunnable<>(releasingListener) { + threadPool.executor(executorName).execute(new ActionRunnable<>(releasingListener) { @Override protected void doRun() { doInternalExecute(task, bulkRequest, executorName, releasingListener); From 5632380ecde8cacc6a7452f61583571d7905ed34 Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Tue, 12 Mar 2024 18:34:22 +1100 Subject: [PATCH 119/248] [Doc] Trivial correction for shard allocator choice (#106216) Relates: #105894 --- docs/reference/cluster/nodes-stats.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/cluster/nodes-stats.asciidoc b/docs/reference/cluster/nodes-stats.asciidoc index b755baac3901b..c008b074acccd 100644 --- a/docs/reference/cluster/nodes-stats.asciidoc +++ b/docs/reference/cluster/nodes-stats.asciidoc @@ -2821,7 +2821,7 @@ The number of shards currently allocated to this node `undesired_shards`:: (integer) The amount of shards that are scheduled to be moved elsewhere in the cluster -or -1 other than desired balance allocator is used +if desired balance allocator is used or -1 if any other allocator is used. `forecasted_ingest_load`:: (double) From c18b4a81dc4bfe601ef657a2c5f79fc21ef054c8 Mon Sep 17 00:00:00 2001 From: Mary Gouseti Date: Tue, 12 Mar 2024 10:03:54 +0200 Subject: [PATCH 120/248] Unmute test fixed by #106104 (#106219) https://github.com/elastic/elasticsearch/issues/106123 was fixed by https://github.com/elastic/elasticsearch/pull/106104. In this PR we unmute the test. Closes https://github.com/elastic/elasticsearch/issues/106123 --- .../java/org/elasticsearch/cluster/metadata/DataStreamTests.java | 1 - 1 file changed, 1 deletion(-) diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java index 141434842a4bc..a07cd8e60411a 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java @@ -1842,7 +1842,6 @@ public void testWriteFailureIndex() { assertThat(failureStoreDataStream.getFailureStoreWriteIndex(), is(writeFailureIndex)); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/106123") public void testIsFailureIndex() { boolean hidden = randomBoolean(); boolean system = hidden && randomBoolean(); From 6fc5480ac0db5516127b586d5e3288ef1761f986 Mon Sep 17 00:00:00 2001 From: Niels Bauman <33722607+nielsbauman@users.noreply.github.com> Date: Tue, 12 Mar 2024 09:08:33 +0100 Subject: [PATCH 121/248] Allow customizing refresh rate for failure store indices (#106149) This parameter can (for now) only be set in serverless. --- .../MetadataCreateDataStreamService.java | 37 +++++++++++++-- .../MetadataMigrateToDataStreamService.java | 4 +- .../MetadataCreateDataStreamServiceTests.java | 47 +++++++++++++++++++ ...tadataMigrateToDataStreamServiceTests.java | 3 ++ 4 files changed, 87 insertions(+), 4 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java index 20b28edef5ca2..fd67a8ac7e230 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java @@ -25,10 +25,12 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexMode; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.DataStreamTimestampFieldMapper; import org.elasticsearch.index.mapper.MappingLookup; import org.elasticsearch.index.mapper.MetadataFieldMapper; @@ -52,6 +54,8 @@ public class MetadataCreateDataStreamService { private static final Logger logger = LogManager.getLogger(MetadataCreateDataStreamService.class); + public static final String FAILURE_STORE_REFRESH_INTERVAL_SETTING_NAME = "data_streams.failure_store.refresh_interval"; + private final ThreadPool threadPool; private final ClusterService clusterService; private final MetadataCreateIndexService metadataCreateIndexService; @@ -98,6 +102,7 @@ public void createDataStream(CreateDataStreamClusterStateUpdateRequest request, public ClusterState execute(ClusterState currentState) throws Exception { ClusterState clusterState = createDataStream( metadataCreateIndexService, + clusterService.getSettings(), currentState, isDslOnlyMode, request, @@ -124,7 +129,7 @@ public ClusterState createDataStream( ClusterState current, ActionListener rerouteListener ) throws Exception { - return createDataStream(metadataCreateIndexService, current, isDslOnlyMode, request, rerouteListener); + return createDataStream(metadataCreateIndexService, clusterService.getSettings(), current, isDslOnlyMode, request, rerouteListener); } public static final class CreateDataStreamClusterStateUpdateRequest extends ClusterStateUpdateRequest< @@ -184,12 +189,22 @@ long getStartTime() { static ClusterState createDataStream( MetadataCreateIndexService metadataCreateIndexService, + Settings settings, ClusterState currentState, boolean isDslOnlyMode, CreateDataStreamClusterStateUpdateRequest request, ActionListener rerouteListener ) throws Exception { - return createDataStream(metadataCreateIndexService, currentState, isDslOnlyMode, request, List.of(), null, rerouteListener); + return createDataStream( + metadataCreateIndexService, + settings, + currentState, + isDslOnlyMode, + request, + List.of(), + null, + rerouteListener + ); } /** @@ -204,6 +219,7 @@ static ClusterState createDataStream( */ static ClusterState createDataStream( MetadataCreateIndexService metadataCreateIndexService, + Settings settings, ClusterState currentState, boolean isDslOnlyMode, CreateDataStreamClusterStateUpdateRequest request, @@ -260,6 +276,7 @@ static ClusterState createDataStream( String failureStoreIndexName = DataStream.getDefaultFailureStoreName(dataStreamName, 1, request.getStartTime()); currentState = createFailureStoreIndex( metadataCreateIndexService, + settings, currentState, request, dataStreamName, @@ -384,6 +401,7 @@ private static ClusterState createBackingIndex( private static ClusterState createFailureStoreIndex( MetadataCreateIndexService metadataCreateIndexService, + Settings settings, ClusterState currentState, CreateDataStreamClusterStateUpdateRequest request, String dataStreamName, @@ -394,6 +412,16 @@ private static ClusterState createFailureStoreIndex( return currentState; } + var indexSettings = MetadataRolloverService.HIDDEN_INDEX_SETTINGS; + // Optionally set a custom refresh interval for the failure store index. + var refreshInterval = getFailureStoreRefreshInterval(settings); + if (refreshInterval != null) { + indexSettings = Settings.builder() + .put(indexSettings) + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), refreshInterval) + .build(); + } + CreateIndexClusterStateUpdateRequest createIndexRequest = new CreateIndexClusterStateUpdateRequest( "initialize_data_stream", failureStoreIndexName, @@ -402,7 +430,7 @@ private static ClusterState createFailureStoreIndex( .nameResolvedInstant(request.getStartTime()) .performReroute(false) .setMatchingTemplate(template) - .settings(MetadataRolloverService.HIDDEN_INDEX_SETTINGS); + .settings(indexSettings); try { currentState = metadataCreateIndexService.applyCreateIndexRequest( @@ -451,4 +479,7 @@ public static void validateTimestampFieldMapping(MappingLookup mappingLookup) th fieldMapper.validate(mappingLookup); } + public static TimeValue getFailureStoreRefreshInterval(Settings settings) { + return settings.getAsTime(FAILURE_STORE_REFRESH_INTERVAL_SETTING_NAME, null); + } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamService.java index f7fa34d76498a..c40c5a09e99ee 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamService.java @@ -113,7 +113,7 @@ public ClusterState execute(ClusterState currentState) throws Exception { } catch (IOException e) { throw new IllegalStateException(e); } - }, request, metadataCreateIndexService, delegate.reroute()); + }, request, metadataCreateIndexService, clusterService.getSettings(), delegate.reroute()); writeIndexRef.set(clusterState.metadata().dataStreams().get(request.aliasName).getWriteIndex().getName()); return clusterState; } @@ -132,6 +132,7 @@ static ClusterState migrateToDataStream( Function mapperSupplier, MigrateToDataStreamClusterStateUpdateRequest request, MetadataCreateIndexService metadataCreateIndexService, + Settings settings, ActionListener listener ) throws Exception { validateRequest(currentState, request); @@ -158,6 +159,7 @@ static ClusterState migrateToDataStream( CreateDataStreamClusterStateUpdateRequest req = new CreateDataStreamClusterStateUpdateRequest(request.aliasName); return createDataStream( metadataCreateIndexService, + settings, currentState, isDslOnlyMode, req, diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamServiceTests.java index e11f8c0cbe108..ea79bc8f13765 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamServiceTests.java @@ -18,6 +18,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.Maps; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.indices.ExecutorNames; import org.elasticsearch.indices.SystemDataStreamDescriptor; @@ -59,6 +60,7 @@ public void testCreateDataStream() throws Exception { CreateDataStreamClusterStateUpdateRequest req = new CreateDataStreamClusterStateUpdateRequest(dataStreamName); ClusterState newState = MetadataCreateDataStreamService.createDataStream( metadataCreateIndexService, + Settings.EMPTY, cs, true, req, @@ -98,6 +100,7 @@ public void testCreateDataStreamWithAliasFromTemplate() throws Exception { CreateDataStreamClusterStateUpdateRequest req = new CreateDataStreamClusterStateUpdateRequest(dataStreamName); ClusterState newState = MetadataCreateDataStreamService.createDataStream( metadataCreateIndexService, + Settings.EMPTY, cs, randomBoolean(), req, @@ -174,6 +177,7 @@ public void testCreateDataStreamWithAliasFromComponentTemplate() throws Exceptio CreateDataStreamClusterStateUpdateRequest req = new CreateDataStreamClusterStateUpdateRequest(dataStreamName); ClusterState newState = MetadataCreateDataStreamService.createDataStream( metadataCreateIndexService, + Settings.EMPTY, cs, randomBoolean(), req, @@ -226,6 +230,7 @@ public void testCreateDataStreamWithFailureStore() throws Exception { CreateDataStreamClusterStateUpdateRequest req = new CreateDataStreamClusterStateUpdateRequest(dataStreamName); ClusterState newState = MetadataCreateDataStreamService.createDataStream( metadataCreateIndexService, + Settings.EMPTY, cs, randomBoolean(), req, @@ -246,6 +251,40 @@ public void testCreateDataStreamWithFailureStore() throws Exception { assertThat(newState.metadata().index(failureStoreIndexName).isSystem(), is(false)); } + public void testCreateDataStreamWithFailureStoreWithRefreshRate() throws Exception { + final MetadataCreateIndexService metadataCreateIndexService = getMetadataCreateIndexService(); + var timeValue = randomTimeValue(); + var settings = Settings.builder() + .put(MetadataCreateDataStreamService.FAILURE_STORE_REFRESH_INTERVAL_SETTING_NAME, timeValue) + .build(); + final String dataStreamName = "my-data-stream"; + ComposableIndexTemplate template = new ComposableIndexTemplate.Builder().indexPatterns(List.of(dataStreamName + "*")) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false, true)) + .build(); + ClusterState cs = ClusterState.builder(new ClusterName("_name")) + .metadata(Metadata.builder().put("template", template).build()) + .build(); + CreateDataStreamClusterStateUpdateRequest req = new CreateDataStreamClusterStateUpdateRequest(dataStreamName); + ClusterState newState = MetadataCreateDataStreamService.createDataStream( + metadataCreateIndexService, + settings, + cs, + randomBoolean(), + req, + ActionListener.noop() + ); + var backingIndexName = DataStream.getDefaultBackingIndexName(dataStreamName, 1, req.getStartTime()); + var failureStoreIndexName = DataStream.getDefaultFailureStoreName(dataStreamName, 1, req.getStartTime()); + assertThat(newState.metadata().dataStreams().size(), equalTo(1)); + assertThat(newState.metadata().dataStreams().get(dataStreamName).getName(), equalTo(dataStreamName)); + assertThat(newState.metadata().index(backingIndexName), notNullValue()); + assertThat(newState.metadata().index(failureStoreIndexName), notNullValue()); + assertThat( + newState.metadata().index(failureStoreIndexName).getSettings().get(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey()), + equalTo(timeValue) + ); + } + public void testCreateSystemDataStream() throws Exception { final MetadataCreateIndexService metadataCreateIndexService = getMetadataCreateIndexService(); final String dataStreamName = ".system-data-stream"; @@ -259,6 +298,7 @@ public void testCreateSystemDataStream() throws Exception { ); ClusterState newState = MetadataCreateDataStreamService.createDataStream( metadataCreateIndexService, + Settings.EMPTY, cs, randomBoolean(), req, @@ -291,6 +331,7 @@ public void testCreateDuplicateDataStream() throws Exception { ResourceAlreadyExistsException.class, () -> MetadataCreateDataStreamService.createDataStream( metadataCreateIndexService, + Settings.EMPTY, cs, randomBoolean(), req, @@ -309,6 +350,7 @@ public void testCreateDataStreamWithInvalidName() throws Exception { IllegalArgumentException.class, () -> MetadataCreateDataStreamService.createDataStream( metadataCreateIndexService, + Settings.EMPTY, cs, randomBoolean(), req, @@ -327,6 +369,7 @@ public void testCreateDataStreamWithUppercaseCharacters() throws Exception { IllegalArgumentException.class, () -> MetadataCreateDataStreamService.createDataStream( metadataCreateIndexService, + Settings.EMPTY, cs, randomBoolean(), req, @@ -345,6 +388,7 @@ public void testCreateDataStreamStartingWithPeriod() throws Exception { IllegalArgumentException.class, () -> MetadataCreateDataStreamService.createDataStream( metadataCreateIndexService, + Settings.EMPTY, cs, randomBoolean(), req, @@ -363,6 +407,7 @@ public void testCreateDataStreamNoTemplate() throws Exception { IllegalArgumentException.class, () -> MetadataCreateDataStreamService.createDataStream( metadataCreateIndexService, + Settings.EMPTY, cs, randomBoolean(), req, @@ -384,6 +429,7 @@ public void testCreateDataStreamNoValidTemplate() throws Exception { IllegalArgumentException.class, () -> MetadataCreateDataStreamService.createDataStream( metadataCreateIndexService, + Settings.EMPTY, cs, randomBoolean(), req, @@ -408,6 +454,7 @@ public static ClusterState createDataStream(final String dataStreamName) throws CreateDataStreamClusterStateUpdateRequest req = new CreateDataStreamClusterStateUpdateRequest(dataStreamName); return MetadataCreateDataStreamService.createDataStream( metadataCreateIndexService, + Settings.EMPTY, cs, randomBoolean(), req, diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamServiceTests.java index 128601ff21250..cefbd31db1ee6 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamServiceTests.java @@ -297,6 +297,7 @@ public void testCreateDataStreamWithSuppliedWriteIndex() throws Exception { TimeValue.ZERO ), getMetadataCreateIndexService(), + Settings.EMPTY, ActionListener.noop() ); IndexAbstraction ds = newState.metadata().getIndicesLookup().get(dataStreamName); @@ -355,6 +356,7 @@ public void testCreateDataStreamHidesBackingIndicesAndRemovesAlias() throws Exce TimeValue.ZERO ), getMetadataCreateIndexService(), + Settings.EMPTY, ActionListener.noop() ); IndexAbstraction ds = newState.metadata().getIndicesLookup().get(dataStreamName); @@ -415,6 +417,7 @@ public void testCreateDataStreamWithoutSuppliedWriteIndex() { TimeValue.ZERO ), getMetadataCreateIndexService(), + Settings.EMPTY, ActionListener.noop() ) ); From 8f5e5631473040a4145a31e080061b42f8a44766 Mon Sep 17 00:00:00 2001 From: Niels Bauman <33722607+nielsbauman@users.noreply.github.com> Date: Tue, 12 Mar 2024 09:25:48 +0100 Subject: [PATCH 122/248] Fix DataStreamGlobalRetentionSerializationTests.testEqualsAndHashcode (#106220) Closes #106212 --- ...treamGlobalRetentionSerializationTests.java | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionSerializationTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionSerializationTests.java index 8c3d36464784e..491ba868dfd9b 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionSerializationTests.java @@ -59,18 +59,16 @@ protected ClusterState.Custom mutateInstance(ClusterState.Custom instance) { var maxRetention = metadata.getMaxRetention(); switch (randomInt(1)) { case 0 -> { - if (defaultRetention == null) { - defaultRetention = TimeValue.timeValueDays(randomIntBetween(1, 1000)); - } else { - defaultRetention = randomBoolean() ? null : TimeValue.timeValueDays(randomIntBetween(1, 1000)); - } + defaultRetention = randomValueOtherThan( + defaultRetention, + () -> randomBoolean() ? null : TimeValue.timeValueDays(randomIntBetween(1, 1000)) + ); } case 1 -> { - if (maxRetention == null) { - maxRetention = TimeValue.timeValueDays(randomIntBetween(1000, 2000)); - } else { - maxRetention = randomBoolean() ? null : TimeValue.timeValueDays(randomIntBetween(1000, 2000)); - } + maxRetention = randomValueOtherThan( + maxRetention, + () -> randomBoolean() ? null : TimeValue.timeValueDays(randomIntBetween(1001, 2000)) + ); } } return new DataStreamGlobalRetention(defaultRetention, maxRetention); From 12e567d29e2156f8aa6e6664bd4283b478cf0860 Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 12 Mar 2024 09:16:46 +0000 Subject: [PATCH 123/248] Consolidate get-snapshots `?after` logic (#106038) Today the handling of the `?after` param is kinda spread out over `TransportGetSnapshotsAction` and `GetSnapshotsRequest` making it hard to follow and adding unnecessary complexity to these two classes. This commit moves it into `SnapshotSortKey` which is a better fit since the behaviour varies so much for different sort keys. --- .../http/snapshots/RestGetSnapshotsIT.java | 2 +- .../snapshots/GetSnapshotsIT.java | 2 +- .../snapshots/get/GetSnapshotsRequest.java | 85 +------- .../get/GetSnapshotsRequestBuilder.java | 4 +- .../snapshots/get/SnapshotSortKey.java | 196 +++++++++++++++++- .../get/TransportGetSnapshotsAction.java | 88 +------- .../admin/cluster/RestGetSnapshotsAction.java | 2 +- .../get/GetSnapshotsRequestTests.java | 6 +- 8 files changed, 212 insertions(+), 173 deletions(-) diff --git a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/snapshots/RestGetSnapshotsIT.java b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/snapshots/RestGetSnapshotsIT.java index 59e07581499ee..88d910b61fa52 100644 --- a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/snapshots/RestGetSnapshotsIT.java +++ b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/snapshots/RestGetSnapshotsIT.java @@ -386,7 +386,7 @@ private static void assertStablePagination(String repoName, Collection a final GetSnapshotsResponse getSnapshotsResponse = sortedWithLimit( repoName, sort, - GetSnapshotsRequest.After.from(after, sort).asQueryParam(), + sort.encodeAfterQueryParam(after), i, order, includeIndexNames diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java index d01064b9fb8bc..a04d1a5c8b02d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java @@ -659,7 +659,7 @@ private static void assertStablePagination(String[] repoNames, Collection private int size = NO_LIMIT; /** - * Numeric offset at which to start fetching snapshots. Mutually exclusive with {@link After} if not equal to {@code 0}. + * Numeric offset at which to start fetching snapshots. Mutually exclusive with {@link #after} if not equal to {@code 0}. */ private int offset = 0; + /** + * Sort key value at which to start fetching snapshots. Mutually exclusive with {@link #offset} if not {@code null}. + */ @Nullable - private After after; + private SnapshotSortKey.After after; @Nullable private String fromSortValue; @@ -105,7 +104,7 @@ public GetSnapshotsRequest(StreamInput in) throws IOException { snapshots = in.readStringArray(); ignoreUnavailable = in.readBoolean(); verbose = in.readBoolean(); - after = in.readOptionalWriteable(After::new); + after = in.readOptionalWriteable(SnapshotSortKey.After::new); sort = in.readEnum(SnapshotSortKey.class); size = in.readVInt(); order = SortOrder.readFromStream(in); @@ -283,7 +282,8 @@ public boolean includeIndexNames() { return includeIndexNames; } - public After after() { + @Nullable + public SnapshotSortKey.After after() { return after; } @@ -291,7 +291,7 @@ public SnapshotSortKey sort() { return sort; } - public GetSnapshotsRequest after(@Nullable After after) { + public GetSnapshotsRequest after(@Nullable SnapshotSortKey.After after) { this.after = after; return this; } @@ -350,73 +350,6 @@ public Task createTask(long id, String type, String action, TaskId parentTaskId, return new CancellableTask(id, type, action, getDescription(), parentTaskId, headers); } - public static final class After implements Writeable { - - private final String value; - - private final String repoName; - - private final String snapshotName; - - After(StreamInput in) throws IOException { - this(in.readString(), in.readString(), in.readString()); - } - - public static After fromQueryParam(String param) { - final String[] parts = new String(Base64.getUrlDecoder().decode(param), StandardCharsets.UTF_8).split(","); - if (parts.length != 3) { - throw new IllegalArgumentException("invalid ?after parameter [" + param + "]"); - } - return new After(parts[0], parts[1], parts[2]); - } - - @Nullable - public static After from(@Nullable SnapshotInfo snapshotInfo, SnapshotSortKey sortBy) { - if (snapshotInfo == null) { - return null; - } - final String afterValue = switch (sortBy) { - case START_TIME -> String.valueOf(snapshotInfo.startTime()); - case NAME -> snapshotInfo.snapshotId().getName(); - case DURATION -> String.valueOf(snapshotInfo.endTime() - snapshotInfo.startTime()); - case INDICES -> String.valueOf(snapshotInfo.indices().size()); - case SHARDS -> String.valueOf(snapshotInfo.totalShards()); - case FAILED_SHARDS -> String.valueOf(snapshotInfo.failedShards()); - case REPOSITORY -> snapshotInfo.repository(); - }; - return new After(afterValue, snapshotInfo.repository(), snapshotInfo.snapshotId().getName()); - } - - public After(String value, String repoName, String snapshotName) { - this.value = value; - this.repoName = repoName; - this.snapshotName = snapshotName; - } - - public String value() { - return value; - } - - public String snapshotName() { - return snapshotName; - } - - public String repoName() { - return repoName; - } - - public String asQueryParam() { - return Base64.getUrlEncoder().encodeToString((value + "," + repoName + "," + snapshotName).getBytes(StandardCharsets.UTF_8)); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeString(value); - out.writeString(repoName); - out.writeString(snapshotName); - } - } - @Override public String getDescription() { final StringBuilder stringBuilder = new StringBuilder("repositories["); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java index 25e8a433bf243..68877f6144693 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java @@ -109,10 +109,10 @@ public GetSnapshotsRequestBuilder setVerbose(boolean verbose) { } public GetSnapshotsRequestBuilder setAfter(String after) { - return setAfter(after == null ? null : GetSnapshotsRequest.After.fromQueryParam(after)); + return setAfter(after == null ? null : SnapshotSortKey.decodeAfterQueryParam(after)); } - public GetSnapshotsRequestBuilder setAfter(@Nullable GetSnapshotsRequest.After after) { + public GetSnapshotsRequestBuilder setAfter(@Nullable SnapshotSortKey.After after) { request.after(after); return this; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/SnapshotSortKey.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/SnapshotSortKey.java index 599f41e8615da..72a449268bf79 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/SnapshotSortKey.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/SnapshotSortKey.java @@ -8,9 +8,20 @@ package org.elasticsearch.action.admin.cluster.snapshots.get; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Predicates; +import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.snapshots.SnapshotInfo; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.Base64; import java.util.Comparator; +import java.util.function.Predicate; +import java.util.function.ToLongFunction; /** * Sort key for snapshots e.g. returned from the get-snapshots API. All values break ties using {@link SnapshotInfo#snapshotId} (i.e. by @@ -20,36 +31,126 @@ public enum SnapshotSortKey { /** * Sort by snapshot start time. */ - START_TIME("start_time", Comparator.comparingLong(SnapshotInfo::startTime)), + START_TIME("start_time", Comparator.comparingLong(SnapshotInfo::startTime)) { + @Override + protected String getSortKeyValue(SnapshotInfo snapshotInfo) { + return Long.toString(snapshotInfo.startTime()); + } + + @Override + protected Predicate innerGetAfterPredicate(After after, SortOrder sortOrder) { + return after.longValuePredicate(SnapshotInfo::startTime, sortOrder); + } + }, /** * Sort by snapshot name. */ - NAME("name", Comparator.comparing(sni -> sni.snapshotId().getName())), + NAME("name", Comparator.comparing(sni -> sni.snapshotId().getName())) { + @Override + protected String getSortKeyValue(SnapshotInfo snapshotInfo) { + return snapshotInfo.snapshotId().getName(); + } + + @Override + protected Predicate innerGetAfterPredicate(After after, SortOrder sortOrder) { + // TODO: cover via pre-flight predicate + final String snapshotName = after.snapshotName(); + final String repoName = after.repoName(); + return sortOrder == SortOrder.ASC + ? (info -> compareName(snapshotName, repoName, info) < 0) + : (info -> compareName(snapshotName, repoName, info) > 0); + } + }, /** * Sort by snapshot duration (end time minus start time). */ - DURATION("duration", Comparator.comparingLong(sni -> sni.endTime() - sni.startTime())), + DURATION("duration", Comparator.comparingLong(sni -> sni.endTime() - sni.startTime())) { + @Override + protected String getSortKeyValue(SnapshotInfo snapshotInfo) { + return Long.toString(snapshotInfo.endTime() - snapshotInfo.startTime()); + } + + @Override + protected Predicate innerGetAfterPredicate(After after, SortOrder sortOrder) { + return after.longValuePredicate(info -> info.endTime() - info.startTime(), sortOrder); + } + }, + /** * Sort by number of indices in the snapshot. */ - INDICES("index_count", Comparator.comparingInt(sni -> sni.indices().size())), + INDICES("index_count", Comparator.comparingInt(sni -> sni.indices().size())) { + @Override + protected String getSortKeyValue(SnapshotInfo snapshotInfo) { + return Integer.toString(snapshotInfo.indices().size()); + } + + @Override + protected Predicate innerGetAfterPredicate(After after, SortOrder sortOrder) { + // TODO: cover via pre-flight predicate + return after.longValuePredicate(info -> info.indices().size(), sortOrder); + } + }, /** * Sort by number of shards in the snapshot. */ - SHARDS("shard_count", Comparator.comparingInt(SnapshotInfo::totalShards)), + SHARDS("shard_count", Comparator.comparingInt(SnapshotInfo::totalShards)) { + @Override + protected String getSortKeyValue(SnapshotInfo snapshotInfo) { + return Integer.toString(snapshotInfo.totalShards()); + } + + @Override + protected Predicate innerGetAfterPredicate(After after, SortOrder sortOrder) { + return after.longValuePredicate(SnapshotInfo::totalShards, sortOrder); + } + }, /** * Sort by number of failed shards in the snapshot. */ - FAILED_SHARDS("failed_shard_count", Comparator.comparingInt(SnapshotInfo::failedShards)), + FAILED_SHARDS("failed_shard_count", Comparator.comparingInt(SnapshotInfo::failedShards)) { + @Override + protected String getSortKeyValue(SnapshotInfo snapshotInfo) { + return Integer.toString(snapshotInfo.failedShards()); + } + + @Override + protected Predicate innerGetAfterPredicate(After after, SortOrder sortOrder) { + return after.longValuePredicate(SnapshotInfo::failedShards, sortOrder); + } + }, /** * Sort by repository name. */ - REPOSITORY("repository", Comparator.comparing(SnapshotInfo::repository)); + REPOSITORY("repository", Comparator.comparing(SnapshotInfo::repository)) { + @Override + protected String getSortKeyValue(SnapshotInfo snapshotInfo) { + return snapshotInfo.repository(); + } + + @Override + protected Predicate innerGetAfterPredicate(After after, SortOrder sortOrder) { + // TODO: cover via pre-flight predicate + final String snapshotName = after.snapshotName(); + final String repoName = after.repoName(); + return sortOrder == SortOrder.ASC + ? (info -> compareRepositoryName(snapshotName, repoName, info) < 0) + : (info -> compareRepositoryName(snapshotName, repoName, info) > 0); + } + + private static int compareRepositoryName(String name, String repoName, SnapshotInfo info) { + final int res = repoName.compareTo(info.repository()); + if (res != 0) { + return res; + } + return name.compareTo(info.snapshotId().getName()); + } + }; private final String name; private final Comparator snapshotInfoComparator; @@ -64,10 +165,66 @@ public String toString() { return name; } + /** + * @return a {@link Comparator} which can be used to sort {@link SnapshotInfo} items according to this sort key. + */ public final Comparator getSnapshotInfoComparator() { return snapshotInfoComparator; } + /** + * @return an {@link After} which can be included in a {@link GetSnapshotsRequest} (e.g. to be sent to a remote node) and ultimately + * converted into a predicate to filter out {@link SnapshotInfo} items which were returned on earlier pages of results. See also + * {@link #encodeAfterQueryParam} and {@link #getAfterPredicate}. + */ + public static After decodeAfterQueryParam(String param) { + final String[] parts = new String(Base64.getUrlDecoder().decode(param), StandardCharsets.UTF_8).split(","); + if (parts.length != 3) { + throw new IllegalArgumentException("invalid ?after parameter [" + param + "]"); + } + return new After(parts[0], parts[1], parts[2]); + } + + /** + * @return an encoded representation of the value of the sort key for the given {@link SnapshotInfo}, including the values of the + * snapshot name and repo name for tiebreaking purposes, which can be returned to the user so they can pass it back to the + * {@code ?after} param of a subsequent call to the get-snapshots API in order to retrieve the next page of results. + */ + public final String encodeAfterQueryParam(SnapshotInfo snapshotInfo) { + final var rawValue = getSortKeyValue(snapshotInfo) + "," + snapshotInfo.repository() + "," + snapshotInfo.snapshotId().getName(); + return Base64.getUrlEncoder().encodeToString(rawValue.getBytes(StandardCharsets.UTF_8)); + } + + /** + * @return a string representation of the value of the sort key for the given {@link SnapshotInfo}, which should be the last item in the + * response, which is combined with the snapshot and repository names, encoded, and returned to the user so they can pass it back to + * the {@code ?after} param of a subsequent call to the get-snapshots API in order to retrieve the next page of results. + */ + protected abstract String getSortKeyValue(SnapshotInfo snapshotInfo); + + /** + * @return a predicate to filter out {@link SnapshotInfo} items that match the user's query but which sort earlier than the given + * {@link After} value (i.e. they were returned on earlier pages of results). If {@code after} is {@code null} then the returned + * predicate matches all snapshots. + */ + public final Predicate getAfterPredicate(@Nullable After after, SortOrder sortOrder) { + return after == null ? Predicates.always() : innerGetAfterPredicate(after, sortOrder); + } + + /** + * @return a predicate to filter out {@link SnapshotInfo} items that match the user's query but which sort earlier than the given + * {@link After} value (i.e. they were returned on earlier pages of results). The {@code after} parameter is not {@code null}. + */ + protected abstract Predicate innerGetAfterPredicate(After after, SortOrder sortOrder); + + private static int compareName(String name, String repoName, SnapshotInfo info) { + final int res = name.compareTo(info.snapshotId().getName()); + if (res != 0) { + return res; + } + return repoName.compareTo(info.repository()); + } + public static SnapshotSortKey of(String name) { return switch (name) { case "start_time" -> START_TIME; @@ -80,4 +237,29 @@ public static SnapshotSortKey of(String name) { default -> throw new IllegalArgumentException("unknown sort key [" + name + "]"); }; } + + public record After(String value, String repoName, String snapshotName) implements Writeable { + + After(StreamInput in) throws IOException { + this(in.readString(), in.readString(), in.readString()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(value); + out.writeString(repoName); + out.writeString(snapshotName); + } + + Predicate longValuePredicate(ToLongFunction extractor, SortOrder sortOrder) { + final var after = Long.parseLong(value); + return sortOrder == SortOrder.ASC ? info -> { + final long val = extractor.applyAsLong(info); + return after < val || (after == val && compareName(snapshotName, repoName, info) < 0); + } : info -> { + final long val = extractor.applyAsLong(info); + return after > val || (after == val && compareName(snapshotName, repoName, info) > 0); + }; + } + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java index cf779445fcd6a..8f089b0353e55 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java @@ -26,7 +26,6 @@ import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.ListenableFuture; import org.elasticsearch.core.Nullable; -import org.elasticsearch.core.Predicates; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.Repository; @@ -154,7 +153,7 @@ private class GetSnapshotsOperation { private final String fromSortValue; private final int offset; @Nullable - private final GetSnapshotsRequest.After after; + private final SnapshotSortKey.After after; private final int size; // current state @@ -181,7 +180,7 @@ private class GetSnapshotsOperation { SortOrder order, String fromSortValue, int offset, - GetSnapshotsRequest.After after, + SnapshotSortKey.After after, int size, SnapshotsInProgress snapshotsInProgress, boolean verbose, @@ -223,9 +222,7 @@ void getMultipleReposSnapshotInfo(ActionListener listener) return new GetSnapshotsResponse( snapshotInfos, failuresByRepository, - finalRemaining > 0 - ? GetSnapshotsRequest.After.from(snapshotInfos.get(snapshotInfos.size() - 1), sortBy).asQueryParam() - : null, + finalRemaining > 0 ? sortBy.encodeAfterQueryParam(snapshotInfos.get(snapshotInfos.size() - 1)) : null, totalCount.get(), finalRemaining ); @@ -489,7 +486,9 @@ private SnapshotsInRepo sortSnapshotsWithNoOffsetOrLimit(List snap } private SnapshotsInRepo sortSnapshots(Stream snapshotInfoStream, int totalCount, int offset, int size) { - final var resultsStream = snapshotInfoStream.filter(buildAfterPredicate()).sorted(buildComparator()).skip(offset); + final var resultsStream = snapshotInfoStream.filter(sortBy.getAfterPredicate(after, order)) + .sorted(buildComparator()) + .skip(offset); if (size == GetSnapshotsRequest.NO_LIMIT) { return new SnapshotsInRepo(resultsStream.toList(), totalCount, 0); } else { @@ -512,81 +511,6 @@ private Comparator buildComparator() { final var comparator = sortBy.getSnapshotInfoComparator(); return order == SortOrder.DESC ? comparator.reversed() : comparator; } - - private Predicate buildAfterPredicate() { - if (after == null) { - return Predicates.always(); - } - assert offset == 0 : "can't combine after and offset but saw [" + after + "] and offset [" + offset + "]"; - - final String snapshotName = after.snapshotName(); - final String repoName = after.repoName(); - final String value = after.value(); - return switch (sortBy) { - case START_TIME -> filterByLongOffset(SnapshotInfo::startTime, Long.parseLong(value), snapshotName, repoName, order); - case NAME -> - // TODO: cover via pre-flight predicate - order == SortOrder.ASC - ? (info -> compareName(snapshotName, repoName, info) < 0) - : (info -> compareName(snapshotName, repoName, info) > 0); - case DURATION -> filterByLongOffset( - info -> info.endTime() - info.startTime(), - Long.parseLong(value), - snapshotName, - repoName, - order - ); - case INDICES -> - // TODO: cover via pre-flight predicate - filterByLongOffset(info -> info.indices().size(), Integer.parseInt(value), snapshotName, repoName, order); - case SHARDS -> filterByLongOffset(SnapshotInfo::totalShards, Integer.parseInt(value), snapshotName, repoName, order); - case FAILED_SHARDS -> filterByLongOffset( - SnapshotInfo::failedShards, - Integer.parseInt(value), - snapshotName, - repoName, - order - ); - case REPOSITORY -> - // TODO: cover via pre-flight predicate - order == SortOrder.ASC - ? (info -> compareRepositoryName(snapshotName, repoName, info) < 0) - : (info -> compareRepositoryName(snapshotName, repoName, info) > 0); - }; - } - - private static Predicate filterByLongOffset( - ToLongFunction extractor, - long after, - String snapshotName, - String repoName, - SortOrder order - ) { - return order == SortOrder.ASC ? info -> { - final long val = extractor.applyAsLong(info); - return after < val || (after == val && compareName(snapshotName, repoName, info) < 0); - } : info -> { - final long val = extractor.applyAsLong(info); - return after > val || (after == val && compareName(snapshotName, repoName, info) > 0); - }; - } - - private static int compareRepositoryName(String name, String repoName, SnapshotInfo info) { - final int res = repoName.compareTo(info.repository()); - if (res != 0) { - return res; - } - return name.compareTo(info.snapshotId().getName()); - } - - private static int compareName(String name, String repoName, SnapshotInfo info) { - final int res = name.compareTo(info.snapshotId().getName()); - if (res != 0) { - return res; - } - return repoName.compareTo(info.repository()); - } - } /** diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetSnapshotsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetSnapshotsAction.java index 7a66c6d7c435a..7482ae7683b4a 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetSnapshotsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetSnapshotsAction.java @@ -68,7 +68,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC getSnapshotsRequest.offset(offset); final String afterString = request.param("after"); if (afterString != null) { - getSnapshotsRequest.after(GetSnapshotsRequest.After.fromQueryParam(afterString)); + getSnapshotsRequest.after(SnapshotSortKey.decodeAfterQueryParam(afterString)); } final String fromSortValue = request.param("from_sort_value"); if (fromSortValue != null) { diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestTests.java index 14db6bdf84264..810d297602e8a 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestTests.java @@ -50,7 +50,7 @@ public void testValidateParameters() { } { final GetSnapshotsRequest request = new GetSnapshotsRequest("repo", "snapshot").verbose(false) - .after(new GetSnapshotsRequest.After("foo", "repo", "bar")); + .after(new SnapshotSortKey.After("foo", "repo", "bar")); final ActionRequestValidationException e = request.validate(); assertThat(e.getMessage(), containsString("can't use after with verbose=false")); } @@ -61,14 +61,14 @@ public void testValidateParameters() { } { final GetSnapshotsRequest request = new GetSnapshotsRequest("repo", "snapshot").after( - new GetSnapshotsRequest.After("foo", "repo", "bar") + new SnapshotSortKey.After("foo", "repo", "bar") ).offset(randomIntBetween(1, 500)); final ActionRequestValidationException e = request.validate(); assertThat(e.getMessage(), containsString("can't use after and offset simultaneously")); } { final GetSnapshotsRequest request = new GetSnapshotsRequest("repo", "snapshot").fromSortValue("foo") - .after(new GetSnapshotsRequest.After("foo", "repo", "bar")); + .after(new SnapshotSortKey.After("foo", "repo", "bar")); final ActionRequestValidationException e = request.validate(); assertThat(e.getMessage(), containsString("can't use after and from_sort_value simultaneously")); } From 04110202ea4cd81a22b7895c63ea526bc27a32cc Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Tue, 12 Mar 2024 10:29:51 +0100 Subject: [PATCH 124/248] Remove more unused inject code (#106211) Removing a couple assertions that we do not use together with their supporting code. --- .../elasticsearch/common/inject/Binder.java | 6 +- .../elasticsearch/common/inject/Binding.java | 4 +- .../common/inject/BindingProcessor.java | 8 +- .../inject/ConstructionProxyFactory.java | 30 ---- .../inject/ConstructorInjectorStore.java | 27 +++- .../DefaultConstructionProxyFactory.java | 65 --------- .../elasticsearch/common/inject/Exposed.java | 37 ----- .../common/inject/ImplementedBy.java | 38 ----- .../common/inject/InjectorImpl.java | 91 ------------ .../elasticsearch/common/inject/Module.java | 8 +- .../common/inject/PrivateBinder.java | 5 - .../common/inject/ProvidedBy.java | 38 ----- .../elasticsearch/common/inject/Provides.java | 37 ----- .../common/inject/TypeLiteral.java | 13 -- .../inject/binder/ScopedBindingBuilder.java | 7 - .../internal/AbstractBindingBuilder.java | 8 -- .../common/inject/internal/Errors.java | 21 --- .../inject/internal/ProviderMethod.java | 97 ------------- .../internal/ProviderMethodsModule.java | 130 ------------------ .../common/inject/spi/Elements.java | 8 -- .../common/inject/spi/InjectionPoint.java | 3 +- .../common/inject/util/Modules.java | 32 ----- 22 files changed, 29 insertions(+), 684 deletions(-) delete mode 100644 server/src/main/java/org/elasticsearch/common/inject/ConstructionProxyFactory.java delete mode 100644 server/src/main/java/org/elasticsearch/common/inject/DefaultConstructionProxyFactory.java delete mode 100644 server/src/main/java/org/elasticsearch/common/inject/Exposed.java delete mode 100644 server/src/main/java/org/elasticsearch/common/inject/ImplementedBy.java delete mode 100644 server/src/main/java/org/elasticsearch/common/inject/ProvidedBy.java delete mode 100644 server/src/main/java/org/elasticsearch/common/inject/Provides.java delete mode 100644 server/src/main/java/org/elasticsearch/common/inject/internal/ProviderMethod.java delete mode 100644 server/src/main/java/org/elasticsearch/common/inject/internal/ProviderMethodsModule.java delete mode 100644 server/src/main/java/org/elasticsearch/common/inject/util/Modules.java diff --git a/server/src/main/java/org/elasticsearch/common/inject/Binder.java b/server/src/main/java/org/elasticsearch/common/inject/Binder.java index d1ff5ff4b0d93..97aa924d32cb1 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/Binder.java +++ b/server/src/main/java/org/elasticsearch/common/inject/Binder.java @@ -52,11 +52,7 @@ * * Specifies that a request for a {@code Service} instance with no binding * annotations should be treated as if it were a request for a - * {@code ServiceImpl} instance. This overrides the function of any - * {@link ImplementedBy @ImplementedBy} or {@link ProvidedBy @ProvidedBy} - * annotations found on {@code Service}, since Guice will have already - * "moved on" to {@code ServiceImpl} before it reaches the point when it starts - * looking for these annotations. + * {@code ServiceImpl} instance. * *
      *     bind(Service.class).toProvider(ServiceProvider.class);
    diff --git a/server/src/main/java/org/elasticsearch/common/inject/Binding.java b/server/src/main/java/org/elasticsearch/common/inject/Binding.java index b2bb645089b48..9bc446a867aa7 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/Binding.java +++ b/server/src/main/java/org/elasticsearch/common/inject/Binding.java @@ -31,9 +31,7 @@ *
      *     bind(Service.class).annotatedWith(Red.class).to(ServiceImpl.class);
      *     bindConstant().annotatedWith(ServerHost.class).to(args[0]);
    - *
  • Implicitly by the Injector by following a type's {@link ImplementedBy - * pointer} {@link ProvidedBy annotations} or by using its {@link Inject annotated} or - * default constructor.
  • + *
  • Implicitly by the Injector by using its {@link Inject annotated} or default constructor.
  • *
  • By converting a bound instance to a different type.
  • *
  • For {@link Provider providers}, by delegating to the binding for the provided type.
  • * diff --git a/server/src/main/java/org/elasticsearch/common/inject/BindingProcessor.java b/server/src/main/java/org/elasticsearch/common/inject/BindingProcessor.java index 99a9d6fab9c1d..0865bf47090af 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/BindingProcessor.java +++ b/server/src/main/java/org/elasticsearch/common/inject/BindingProcessor.java @@ -25,7 +25,6 @@ import org.elasticsearch.common.inject.internal.LinkedBindingImpl; import org.elasticsearch.common.inject.internal.LinkedProviderBindingImpl; import org.elasticsearch.common.inject.internal.ProviderInstanceBindingImpl; -import org.elasticsearch.common.inject.internal.ProviderMethod; import org.elasticsearch.common.inject.internal.Scoping; import org.elasticsearch.common.inject.internal.UntargettedBindingImpl; import org.elasticsearch.common.inject.spi.BindingTargetVisitor; @@ -62,12 +61,7 @@ public Boolean visit(Binding command) { final Object source = command.getSource(); if (Void.class.equals(command.getKey().getRawType())) { - if (command instanceof ProviderInstanceBinding - && ((ProviderInstanceBinding) command).getProviderInstance() instanceof ProviderMethod) { - errors.voidProviderMethod(); - } else { - errors.missingConstantValues(); - } + errors.missingConstantValues(); return true; } diff --git a/server/src/main/java/org/elasticsearch/common/inject/ConstructionProxyFactory.java b/server/src/main/java/org/elasticsearch/common/inject/ConstructionProxyFactory.java deleted file mode 100644 index 9a0cd367e1650..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/inject/ConstructionProxyFactory.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright (C) 2006 Google Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.elasticsearch.common.inject; - -/** - * Creates {@link ConstructionProxy} instances. - * - * @author crazybob@google.com (Bob Lee) - */ -interface ConstructionProxyFactory { - - /** - * Gets a construction proxy for the given constructor. - */ - ConstructionProxy create(); -} diff --git a/server/src/main/java/org/elasticsearch/common/inject/ConstructorInjectorStore.java b/server/src/main/java/org/elasticsearch/common/inject/ConstructorInjectorStore.java index 29ccae98c7d27..97a495f97cfbd 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/ConstructorInjectorStore.java +++ b/server/src/main/java/org/elasticsearch/common/inject/ConstructorInjectorStore.java @@ -21,6 +21,9 @@ import org.elasticsearch.common.inject.internal.FailableCache; import org.elasticsearch.common.inject.spi.InjectionPoint; +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; + /** * Constructor injectors by type. * @@ -65,10 +68,28 @@ private ConstructorInjector createConstructor(TypeLiteral type, Errors ); MembersInjectorImpl membersInjector = injector.membersInjectorStore.get(type, errors); - ConstructionProxyFactory factory = new DefaultConstructionProxyFactory<>(injectionPoint); - errors.throwIfNewErrors(numErrorsBefore); - return new ConstructorInjector<>(factory.create(), constructorParameterInjectors, membersInjector); + @SuppressWarnings("unchecked") // the injection point is for a constructor of T + final Constructor constructor = (Constructor) injectionPoint.getMember(); + return new ConstructorInjector<>(new ConstructionProxy<>() { + @Override + public T newInstance(Object... arguments) throws InvocationTargetException { + try { + return constructor.newInstance(arguments); + } catch (InstantiationException e) { + throw new AssertionError(e); // shouldn't happen, we know this is a concrete type + } catch (IllegalAccessException e) { + // a security manager is blocking us, we're hosed + throw new AssertionError("Wrong access modifiers on " + constructor, e); + } + } + + @Override + public InjectionPoint getInjectionPoint() { + return injectionPoint; + } + + }, constructorParameterInjectors, membersInjector); } } diff --git a/server/src/main/java/org/elasticsearch/common/inject/DefaultConstructionProxyFactory.java b/server/src/main/java/org/elasticsearch/common/inject/DefaultConstructionProxyFactory.java deleted file mode 100644 index cc713893abd69..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/inject/DefaultConstructionProxyFactory.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Copyright (C) 2006 Google Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.elasticsearch.common.inject; - -import org.elasticsearch.common.inject.spi.InjectionPoint; - -import java.lang.reflect.Constructor; -import java.lang.reflect.InvocationTargetException; - -/** - * Produces construction proxies that invoke the class constructor. - * - * @author crazybob@google.com (Bob Lee) - */ -class DefaultConstructionProxyFactory implements ConstructionProxyFactory { - - private final InjectionPoint injectionPoint; - - /** - * @param injectionPoint an injection point whose member is a constructor of {@code T}. - */ - DefaultConstructionProxyFactory(InjectionPoint injectionPoint) { - this.injectionPoint = injectionPoint; - } - - @Override - public ConstructionProxy create() { - @SuppressWarnings("unchecked") // the injection point is for a constructor of T - final Constructor constructor = (Constructor) injectionPoint.getMember(); - - return new ConstructionProxy<>() { - @Override - public T newInstance(Object... arguments) throws InvocationTargetException { - try { - return constructor.newInstance(arguments); - } catch (InstantiationException e) { - throw new AssertionError(e); // shouldn't happen, we know this is a concrete type - } catch (IllegalAccessException e) { - // a security manager is blocking us, we're hosed - throw new AssertionError("Wrong access modifiers on " + constructor, e); - } - } - - @Override - public InjectionPoint getInjectionPoint() { - return injectionPoint; - } - - }; - } -} diff --git a/server/src/main/java/org/elasticsearch/common/inject/Exposed.java b/server/src/main/java/org/elasticsearch/common/inject/Exposed.java deleted file mode 100644 index 4f557212da883..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/inject/Exposed.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright (C) 2008 Google Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.elasticsearch.common.inject; - -import java.lang.annotation.Documented; -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.Target; - -import static java.lang.annotation.RetentionPolicy.RUNTIME; - -/** - * Accompanies a {@literal @}{@link org.elasticsearch.common.inject.Provides Provides} method annotation in a - * private module to indicate that the provided binding is exposed. - * - * @author jessewilson@google.com (Jesse Wilson) - * @since 2.0 - */ -@Target(ElementType.METHOD) -@Retention(RUNTIME) -@Documented -public @interface Exposed { -} diff --git a/server/src/main/java/org/elasticsearch/common/inject/ImplementedBy.java b/server/src/main/java/org/elasticsearch/common/inject/ImplementedBy.java deleted file mode 100644 index 652be0f3ed30c..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/inject/ImplementedBy.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright (C) 2006 Google Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.elasticsearch.common.inject; - -import java.lang.annotation.Retention; -import java.lang.annotation.Target; - -import static java.lang.annotation.ElementType.TYPE; -import static java.lang.annotation.RetentionPolicy.RUNTIME; - -/** - * A pointer to the default implementation of a type. - * - * @author crazybob@google.com (Bob Lee) - */ -@Retention(RUNTIME) -@Target(TYPE) -public @interface ImplementedBy { - - /** - * The implementation type. - */ - Class value(); -} diff --git a/server/src/main/java/org/elasticsearch/common/inject/InjectorImpl.java b/server/src/main/java/org/elasticsearch/common/inject/InjectorImpl.java index d1086eb64ecc7..8614fd99da088 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/InjectorImpl.java +++ b/server/src/main/java/org/elasticsearch/common/inject/InjectorImpl.java @@ -17,15 +17,12 @@ package org.elasticsearch.common.inject; import org.elasticsearch.common.Classes; -import org.elasticsearch.common.inject.internal.Annotations; import org.elasticsearch.common.inject.internal.BindingImpl; import org.elasticsearch.common.inject.internal.Errors; import org.elasticsearch.common.inject.internal.ErrorsException; import org.elasticsearch.common.inject.internal.InstanceBindingImpl; import org.elasticsearch.common.inject.internal.InternalContext; import org.elasticsearch.common.inject.internal.InternalFactory; -import org.elasticsearch.common.inject.internal.LinkedBindingImpl; -import org.elasticsearch.common.inject.internal.LinkedProviderBindingImpl; import org.elasticsearch.common.inject.internal.MatcherAndConverter; import org.elasticsearch.common.inject.internal.Scoping; import org.elasticsearch.common.inject.internal.SourceProvider; @@ -321,20 +318,6 @@ BindingImpl createUnitializedBinding(Key key, Scoping scoping, Object return binding; } - // Handle @ImplementedBy - ImplementedBy implementedBy = rawType.getAnnotation(ImplementedBy.class); - if (implementedBy != null) { - Annotations.checkForMisplacedScopeAnnotations(rawType, source, errors); - return createImplementedByBinding(key, scoping, implementedBy, errors); - } - - // Handle @ProvidedBy. - ProvidedBy providedBy = rawType.getAnnotation(ProvidedBy.class); - if (providedBy != null) { - Annotations.checkForMisplacedScopeAnnotations(rawType, source, errors); - return createProvidedByBinding(key, scoping, providedBy, errors); - } - // We can't inject abstract classes. // TODO: Method interceptors could actually enable us to implement // abstract types. Should we remove this restriction? @@ -384,80 +367,6 @@ private BindingImpl> createTypeLiteralBinding(Key(this, key, SourceProvider.UNKNOWN_SOURCE, factory, emptySet(), value); } - /** - * Creates a binding for a type annotated with @ProvidedBy. - */ - BindingImpl createProvidedByBinding(Key key, Scoping scoping, ProvidedBy providedBy, Errors errors) throws ErrorsException { - final Class rawType = key.getTypeLiteral().getRawType(); - final Class> providerType = providedBy.value(); - - // Make sure it's not the same type. TODO: Can we check for deeper loops? - if (providerType == rawType) { - throw errors.recursiveProviderType().toException(); - } - - // Assume the provider provides an appropriate type. We double check at runtime. - @SuppressWarnings("unchecked") - final Key> providerKey = (Key>) Key.get(providerType); - final BindingImpl> providerBinding = getBindingOrThrow(providerKey, errors); - - InternalFactory internalFactory = (errors1, context, dependency) -> { - errors1 = errors1.withSource(providerKey); - Provider provider = providerBinding.getInternalFactory().get(errors1, context, dependency); - try { - Object o = provider.get(); - if (o != null && rawType.isInstance(o) == false) { - throw errors1.subtypeNotProvided(providerType, rawType).toException(); - } - @SuppressWarnings("unchecked") // protected by isInstance() check above - T t = (T) o; - return t; - } catch (RuntimeException e) { - throw errors1.errorInProvider(e).toException(); - } - }; - - return new LinkedProviderBindingImpl<>( - this, - key, - rawType /* source */, - Scopes.scope(this, internalFactory, scoping), - scoping, - providerKey - ); - } - - /** - * Creates a binding for a type annotated with @ImplementedBy. - */ - BindingImpl createImplementedByBinding(Key key, Scoping scoping, ImplementedBy implementedBy, Errors errors) - throws ErrorsException { - Class rawType = key.getTypeLiteral().getRawType(); - Class implementationType = implementedBy.value(); - - // Make sure it's not the same type. TODO: Can we check for deeper cycles? - if (implementationType == rawType) { - throw errors.recursiveImplementationType().toException(); - } - - // Make sure implementationType extends type. - if (rawType.isAssignableFrom(implementationType) == false) { - throw errors.notASubtype(implementationType, rawType).toException(); - } - - @SuppressWarnings("unchecked") // After the preceding check, this cast is safe. - Class subclass = (Class) implementationType; - - // Look up the target binding. - final Key targetKey = Key.get(subclass); - final BindingImpl targetBinding = getBindingOrThrow(targetKey, errors); - - InternalFactory internalFactory = (errors1, context, dependency) -> targetBinding.getInternalFactory() - .get(errors1.withSource(targetKey), context, dependency); - - return new LinkedBindingImpl<>(this, key, rawType /* source */, Scopes.scope(this, internalFactory, scoping), scoping, targetKey); - } - /** * Attempts to create a just-in-time binding for {@code key} in the root injector, falling back to * other ancestor injectors until this injector is tried. diff --git a/server/src/main/java/org/elasticsearch/common/inject/Module.java b/server/src/main/java/org/elasticsearch/common/inject/Module.java index f3a43d80f31ec..38eddcdb200b7 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/Module.java +++ b/server/src/main/java/org/elasticsearch/common/inject/Module.java @@ -24,11 +24,6 @@ *

    * Your Module classes can use a more streamlined syntax by extending * {@link AbstractModule} rather than implementing this interface directly. - *

    - * In addition to the bindings configured via {@link #configure}, bindings - * will be created for all methods annotated with {@literal @}{@link Provides}. - * Use scope and binding annotations on these methods to configure the - * bindings. */ public interface Module { @@ -36,8 +31,7 @@ public interface Module { * Contributes bindings and other configurations for this module to {@code binder}. *

    * Do not invoke this method directly to install submodules. Instead use - * {@link Binder#install(Module)}, which ensures that {@link Provides provider methods} are - * discovered. + * {@link Binder#install(Module)}. */ void configure(Binder binder); } diff --git a/server/src/main/java/org/elasticsearch/common/inject/PrivateBinder.java b/server/src/main/java/org/elasticsearch/common/inject/PrivateBinder.java index f1da98316465a..fd80e6271b2cf 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/PrivateBinder.java +++ b/server/src/main/java/org/elasticsearch/common/inject/PrivateBinder.java @@ -24,11 +24,6 @@ */ public interface PrivateBinder extends Binder { - /** - * Makes the binding for {@code key} available to the enclosing environment - */ - void expose(Key key); - @Override PrivateBinder withSource(Object source); diff --git a/server/src/main/java/org/elasticsearch/common/inject/ProvidedBy.java b/server/src/main/java/org/elasticsearch/common/inject/ProvidedBy.java deleted file mode 100644 index 945de83cf9116..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/inject/ProvidedBy.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright (C) 2006 Google Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.elasticsearch.common.inject; - -import java.lang.annotation.Retention; -import java.lang.annotation.Target; - -import static java.lang.annotation.ElementType.TYPE; -import static java.lang.annotation.RetentionPolicy.RUNTIME; - -/** - * A pointer to the default provider type for a type. - * - * @author crazybob@google.com (Bob Lee) - */ -@Retention(RUNTIME) -@Target(TYPE) -public @interface ProvidedBy { - - /** - * The implementation type. - */ - Class> value(); -} diff --git a/server/src/main/java/org/elasticsearch/common/inject/Provides.java b/server/src/main/java/org/elasticsearch/common/inject/Provides.java deleted file mode 100644 index 587005f883574..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/inject/Provides.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright (C) 2007 Google Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.elasticsearch.common.inject; - -import java.lang.annotation.Documented; -import java.lang.annotation.Retention; -import java.lang.annotation.Target; - -import static java.lang.annotation.ElementType.METHOD; -import static java.lang.annotation.RetentionPolicy.RUNTIME; - -/** - * Annotates methods of a {@link Module} to create a provider method binding. The method's return - * type is bound to its returned value. Guice will pass dependencies to the method as parameters. - * - * @author crazybob@google.com (Bob Lee) - * @since 2.0 - */ -@Documented -@Target(METHOD) -@Retention(RUNTIME) -public @interface Provides { -} diff --git a/server/src/main/java/org/elasticsearch/common/inject/TypeLiteral.java b/server/src/main/java/org/elasticsearch/common/inject/TypeLiteral.java index afc0db15e3942..d39c4e44d2ff9 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/TypeLiteral.java +++ b/server/src/main/java/org/elasticsearch/common/inject/TypeLiteral.java @@ -277,17 +277,4 @@ public List> getParameterTypes(Member methodOrConstructor) { return resolveAll(genericParameterTypes); } - /** - * Returns the resolved generic return type of {@code method}. - * - * @param method a method defined by this or any supertype. - * @since 2.0 - */ - public TypeLiteral getReturnType(Method method) { - if (method.getDeclaringClass().isAssignableFrom(rawType) == false) { - throw new IllegalArgumentException(method + " is not defined by a supertype of " + type); - } - - return resolve(method.getGenericReturnType()); - } } diff --git a/server/src/main/java/org/elasticsearch/common/inject/binder/ScopedBindingBuilder.java b/server/src/main/java/org/elasticsearch/common/inject/binder/ScopedBindingBuilder.java index 88b7fd86370c6..dad91c3fb8878 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/binder/ScopedBindingBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/inject/binder/ScopedBindingBuilder.java @@ -16,8 +16,6 @@ package org.elasticsearch.common.inject.binder; -import java.lang.annotation.Annotation; - /** * See the EDSL examples at {@link org.elasticsearch.common.inject.Binder}. * @@ -25,11 +23,6 @@ */ public interface ScopedBindingBuilder { - /** - * See the EDSL examples at {@link org.elasticsearch.common.inject.Binder}. - */ - void in(Class scopeAnnotation); - /** * Instructs the {@link org.elasticsearch.common.inject.Injector} to eagerly initialize this * singleton-scoped binding upon creation. Useful for application diff --git a/server/src/main/java/org/elasticsearch/common/inject/internal/AbstractBindingBuilder.java b/server/src/main/java/org/elasticsearch/common/inject/internal/AbstractBindingBuilder.java index 083c7296fe883..60b6a74dec997 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/internal/AbstractBindingBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/inject/internal/AbstractBindingBuilder.java @@ -21,9 +21,7 @@ import org.elasticsearch.common.inject.spi.Element; import org.elasticsearch.common.inject.spi.InstanceBinding; -import java.lang.annotation.Annotation; import java.util.List; -import java.util.Objects; /** * Bind a value or constant. @@ -61,12 +59,6 @@ protected BindingImpl setBinding(BindingImpl binding) { return binding; } - public void in(final Class scopeAnnotation) { - Objects.requireNonNull(scopeAnnotation, "scopeAnnotation"); - checkNotScoped(); - setBinding(getBinding().withScoping(Scoping.forAnnotation(scopeAnnotation))); - } - public void asEagerSingleton() { checkNotScoped(); setBinding(getBinding().withScoping(Scoping.EAGER_SINGLETON)); diff --git a/server/src/main/java/org/elasticsearch/common/inject/internal/Errors.java b/server/src/main/java/org/elasticsearch/common/inject/internal/Errors.java index 8c44780e7b814..ea4b530f48b9b 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/internal/Errors.java +++ b/server/src/main/java/org/elasticsearch/common/inject/internal/Errors.java @@ -21,7 +21,6 @@ import org.elasticsearch.common.inject.CreationException; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Key; -import org.elasticsearch.common.inject.Provider; import org.elasticsearch.common.inject.ProvisionException; import org.elasticsearch.common.inject.Scope; import org.elasticsearch.common.inject.TypeLiteral; @@ -197,22 +196,6 @@ public Errors bindingToProvider() { return addMessage("Binding to Provider is not allowed."); } - public Errors subtypeNotProvided(Class> providerType, Class type) { - return addMessage("%s doesn't provide instances of %s.", providerType, type); - } - - public Errors notASubtype(Class implementationType, Class type) { - return addMessage("%s doesn't extend %s.", implementationType, type); - } - - public Errors recursiveImplementationType() { - return addMessage("@ImplementedBy points to the same class it annotates."); - } - - public Errors recursiveProviderType() { - return addMessage("@ProvidedBy points to the same class it annotates."); - } - public Errors missingRuntimeRetention(Object source) { return addMessage("Please annotate with @Retention(RUNTIME).%n" + " Bound at %s.", convert(source)); } @@ -266,10 +249,6 @@ public Errors duplicateScopes(Scope existing, Class annota return addMessage("Scope %s is already bound to %s. Cannot bind %s.", existing, annotationType, scope); } - public Errors voidProviderMethod() { - return addMessage("Provider methods must return a value. Do not return void."); - } - public Errors missingConstantValues() { return addMessage("Missing constant value. Please call to(...)."); } diff --git a/server/src/main/java/org/elasticsearch/common/inject/internal/ProviderMethod.java b/server/src/main/java/org/elasticsearch/common/inject/internal/ProviderMethod.java deleted file mode 100644 index 861f9ad77128e..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/inject/internal/ProviderMethod.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Copyright (C) 2008 Google Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.elasticsearch.common.inject.internal; - -import org.elasticsearch.common.inject.Binder; -import org.elasticsearch.common.inject.Exposed; -import org.elasticsearch.common.inject.Key; -import org.elasticsearch.common.inject.PrivateBinder; -import org.elasticsearch.common.inject.Provider; -import org.elasticsearch.common.inject.spi.ProviderWithDependencies; - -import java.lang.annotation.Annotation; -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; -import java.util.List; - -/** - * A provider that invokes a method and returns its result. - * - * @author jessewilson@google.com (Jesse Wilson) - */ -public class ProviderMethod implements ProviderWithDependencies { - private final Key key; - private final Class scopeAnnotation; - private final Object instance; - private final Method method; - private final List> parameterProviders; - private final boolean exposed; - - /** - * @param method the method to invoke. Its return type must be the same type as {@code key}. - */ - ProviderMethod( - Key key, - Method method, - Object instance, - List> parameterProviders, - Class scopeAnnotation - ) { - this.key = key; - this.scopeAnnotation = scopeAnnotation; - this.instance = instance; - this.method = method; - this.parameterProviders = parameterProviders; - this.exposed = method.getAnnotation(Exposed.class) != null; - } - - public void configure(Binder binder) { - binder = binder.withSource(method); - - if (scopeAnnotation != null) { - binder.bind(key).toProvider(this).in(scopeAnnotation); - } else { - binder.bind(key).toProvider(this); - } - - if (exposed) { - // the cast is safe 'cause the only binder we have implements PrivateBinder. If there's a - // misplaced @Exposed, calling this will add an error to the binder's error queue - ((PrivateBinder) binder).expose(key); - } - } - - @Override - public T get() { - Object[] parameters = new Object[parameterProviders.size()]; - for (int i = 0; i < parameters.length; i++) { - parameters[i] = parameterProviders.get(i).get(); - } - - try { - // We know this cast is safe because T is the method's return type. - @SuppressWarnings({ "unchecked" }) - T result = (T) method.invoke(instance, parameters); - return result; - } catch (IllegalAccessException e) { - throw new AssertionError(e); - } catch (InvocationTargetException e) { - throw new RuntimeException(e); - } - } - -} diff --git a/server/src/main/java/org/elasticsearch/common/inject/internal/ProviderMethodsModule.java b/server/src/main/java/org/elasticsearch/common/inject/internal/ProviderMethodsModule.java deleted file mode 100644 index 6a1d7aabed962..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/inject/internal/ProviderMethodsModule.java +++ /dev/null @@ -1,130 +0,0 @@ -/* - * Copyright (C) 2008 Google Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.elasticsearch.common.inject.internal; - -import org.elasticsearch.common.inject.Binder; -import org.elasticsearch.common.inject.Key; -import org.elasticsearch.common.inject.Module; -import org.elasticsearch.common.inject.Provider; -import org.elasticsearch.common.inject.Provides; -import org.elasticsearch.common.inject.TypeLiteral; -import org.elasticsearch.common.inject.spi.Message; -import org.elasticsearch.common.inject.util.Modules; - -import java.lang.annotation.Annotation; -import java.lang.reflect.Member; -import java.lang.reflect.Method; -import java.util.ArrayList; -import java.util.List; -import java.util.Objects; - -/** - * Creates bindings to methods annotated with {@literal @}{@link Provides}. Use the scope and - * binding annotations on the provider method to configure the binding. - * - * @author crazybob@google.com (Bob Lee) - * @author jessewilson@google.com (Jesse Wilson) - */ -public final class ProviderMethodsModule implements Module { - private final Object delegate; - private final TypeLiteral typeLiteral; - - private ProviderMethodsModule(Object delegate) { - this.delegate = Objects.requireNonNull(delegate, "delegate"); - this.typeLiteral = TypeLiteral.get(this.delegate.getClass()); - } - - /** - * Returns a module which creates bindings for provider methods from the given module. - */ - public static Module forModule(Module module) { - return forObject(module); - } - - /** - * Returns a module which creates bindings for provider methods from the given object. - * This is useful notably for GIN - */ - public static Module forObject(Object object) { - // avoid infinite recursion, since installing a module always installs itself - if (object instanceof ProviderMethodsModule) { - return Modules.EMPTY_MODULE; - } - - return new ProviderMethodsModule(object); - } - - @Override - public synchronized void configure(Binder binder) { - for (ProviderMethod providerMethod : getProviderMethods(binder)) { - providerMethod.configure(binder); - } - } - - public List> getProviderMethods(Binder binder) { - List> result = new ArrayList<>(); - for (Class c = delegate.getClass(); c != Object.class; c = c.getSuperclass()) { - for (Method method : c.getMethods()) { - if (method.getAnnotation(Provides.class) != null) { - result.add(createProviderMethod(binder, method)); - } - } - } - return result; - } - - ProviderMethod createProviderMethod(Binder binder, final Method method) { - binder = binder.withSource(method); - Errors errors = new Errors(method); - - // prepare the parameter providers - List> parameterProviders = new ArrayList<>(); - List> parameterTypes = typeLiteral.getParameterTypes(method); - Annotation[][] parameterAnnotations = method.getParameterAnnotations(); - for (int i = 0; i < parameterTypes.size(); i++) { - Key key = getKey(errors, parameterTypes.get(i), method, parameterAnnotations[i]); - parameterProviders.add(binder.getProvider(key)); - } - - @SuppressWarnings("unchecked") // Define T as the method's return type. - TypeLiteral returnType = (TypeLiteral) typeLiteral.getReturnType(method); - - Key key = getKey(errors, returnType, method, method.getAnnotations()); - Class scopeAnnotation = Annotations.findScopeAnnotation(errors, method.getAnnotations()); - - for (Message message : errors.getMessages()) { - binder.addError(message); - } - - return new ProviderMethod<>(key, method, delegate, parameterProviders, scopeAnnotation); - } - - static Key getKey(Errors errors, TypeLiteral type, Member member, Annotation[] annotations) { - Annotation bindingAnnotation = Annotations.findBindingAnnotation(errors, member, annotations); - return bindingAnnotation == null ? Key.get(type) : Key.get(type, bindingAnnotation); - } - - @Override - public boolean equals(Object o) { - return o instanceof ProviderMethodsModule && ((ProviderMethodsModule) o).delegate == delegate; - } - - @Override - public int hashCode() { - return delegate.hashCode(); - } -} diff --git a/server/src/main/java/org/elasticsearch/common/inject/spi/Elements.java b/server/src/main/java/org/elasticsearch/common/inject/spi/Elements.java index 47e5d7d0753c4..22f86d6991e84 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/spi/Elements.java +++ b/server/src/main/java/org/elasticsearch/common/inject/spi/Elements.java @@ -31,7 +31,6 @@ import org.elasticsearch.common.inject.internal.AbstractBindingBuilder; import org.elasticsearch.common.inject.internal.BindingBuilder; import org.elasticsearch.common.inject.internal.Errors; -import org.elasticsearch.common.inject.internal.ProviderMethodsModule; import org.elasticsearch.common.inject.internal.SourceProvider; import java.lang.annotation.Annotation; @@ -135,7 +134,6 @@ public void install(Module module) { addError(e); } } - binder.install(ProviderMethodsModule.forModule(module)); } } @@ -192,12 +190,6 @@ public RecordingBinder skipSources(Class... classesToSkip) { return new RecordingBinder(this, null, newSourceProvider); } - @Override - public void expose(Key key) { - addError("Cannot expose %s on a standard binder. " + "Exposed bindings are only applicable to private binders.", key); - - } - private static final Logger logger = LogManager.getLogger(Elements.class); protected Object getSource() { diff --git a/server/src/main/java/org/elasticsearch/common/inject/spi/InjectionPoint.java b/server/src/main/java/org/elasticsearch/common/inject/spi/InjectionPoint.java index df00c889fd3eb..945dfca96072e 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/spi/InjectionPoint.java +++ b/server/src/main/java/org/elasticsearch/common/inject/spi/InjectionPoint.java @@ -123,8 +123,7 @@ public List> getDependencies() { /** * Returns true if this injection point shall be skipped if the injector cannot resolve bindings * for all required dependencies. Both explicit bindings (as specified in a module), and implicit - * bindings ({@literal @}{@link org.elasticsearch.common.inject.ImplementedBy ImplementedBy}, default - * constructors etc.) may be used to satisfy optional injection points. + * bindings by default constructors etc.) may be used to satisfy optional injection points. */ public boolean isOptional() { return optional; diff --git a/server/src/main/java/org/elasticsearch/common/inject/util/Modules.java b/server/src/main/java/org/elasticsearch/common/inject/util/Modules.java deleted file mode 100644 index 1162bef25e682..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/inject/util/Modules.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright (C) 2008 Google Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.elasticsearch.common.inject.util; - -import org.elasticsearch.common.inject.Module; - -/** - * Static utility methods for creating and working with instances of {@link Module}. - * - * @author jessewilson@google.com (Jesse Wilson) - * @since 2.0 - */ -public final class Modules { - private Modules() {} - - public static final Module EMPTY_MODULE = binder -> {}; - -} From 9ab055889f34da9a26fef327291f4a174ba00748 Mon Sep 17 00:00:00 2001 From: Lars <3042474+l4r-s@users.noreply.github.com> Date: Tue, 12 Mar 2024 11:06:51 +0100 Subject: [PATCH 125/248] add String sha512() painless function (#99048) --- docs/changelog/99048.yaml | 6 ++++++ .../painless/api/Augmentation.java | 4 ++++ .../org.elasticsearch.script.ingest.txt | 1 + .../org.elasticsearch.script.reindex.txt | 1 + .../org.elasticsearch.script.update.txt | 1 + ...rg.elasticsearch.script.update_by_query.txt | 1 + .../painless/AugmentationTests.java | 18 ++++++++++++++++++ .../xpack/watcher/painless_whitelist.txt | 1 + 8 files changed, 33 insertions(+) create mode 100644 docs/changelog/99048.yaml diff --git a/docs/changelog/99048.yaml b/docs/changelog/99048.yaml new file mode 100644 index 0000000000000..722c145dae78f --- /dev/null +++ b/docs/changelog/99048.yaml @@ -0,0 +1,6 @@ +pr: 99048 +summary: String sha512() painless function +area: Infra/Scripting +type: enhancement +issues: + - 97691 diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/api/Augmentation.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/api/Augmentation.java index 9267a8e963045..21f940efda5ac 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/api/Augmentation.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/api/Augmentation.java @@ -673,6 +673,10 @@ public static String sha256(String source) { return MessageDigests.toHexString(MessageDigests.sha256().digest(source.getBytes(StandardCharsets.UTF_8))); } + public static String sha512(String source) { + return MessageDigests.toHexString(MessageDigests.sha512().digest(source.getBytes(StandardCharsets.UTF_8))); + } + public static final int UNLIMITED_PATTERN_FACTOR = 0; public static final int DISABLED_PATTERN_FACTOR = -1; diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.ingest.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.ingest.txt index 7f2282eaa714a..13678c4216d7a 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.ingest.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.ingest.txt @@ -11,6 +11,7 @@ class java.lang.String { String org.elasticsearch.painless.api.Augmentation sha1() String org.elasticsearch.painless.api.Augmentation sha256() + String org.elasticsearch.painless.api.Augmentation sha512() } class org.elasticsearch.painless.api.Json { diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.reindex.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.reindex.txt index a90d3525e1203..18d658d797b60 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.reindex.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.reindex.txt @@ -11,6 +11,7 @@ class java.lang.String { String org.elasticsearch.painless.api.Augmentation sha1() String org.elasticsearch.painless.api.Augmentation sha256() + String org.elasticsearch.painless.api.Augmentation sha512() } class org.elasticsearch.painless.api.Json { diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.update.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.update.txt index b58a8e720b21b..214fdaae26394 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.update.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.update.txt @@ -11,6 +11,7 @@ class java.lang.String { String org.elasticsearch.painless.api.Augmentation sha1() String org.elasticsearch.painless.api.Augmentation sha256() + String org.elasticsearch.painless.api.Augmentation sha512() } class org.elasticsearch.painless.api.Json { diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.update_by_query.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.update_by_query.txt index 7c0bf5b2985fe..6c569a165336b 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.update_by_query.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.update_by_query.txt @@ -11,6 +11,7 @@ class java.lang.String { String org.elasticsearch.painless.api.Augmentation sha1() String org.elasticsearch.painless.api.Augmentation sha256() + String org.elasticsearch.painless.api.Augmentation sha512() } class org.elasticsearch.painless.api.Json { diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/AugmentationTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/AugmentationTests.java index b51f0f2657278..e97bd1bb123ca 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/AugmentationTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/AugmentationTests.java @@ -293,6 +293,24 @@ public void testSha256() { assertEquals("97df3588b5a3f24babc3851b372f0ba71a9dcdded43b14b9d06961bfc1707d9d", execDigest("'foobarbaz'.sha256()")); } + public void testSha512() { + assertEquals( + "f7fbba6e0636f890e56fbbf3283e524c6fa3204ae298382d624741d0dc663832" + + "6e282c41be5e4254d8820772c5518a2c5a8c0c7f7eda19594a7eb539453e1ed7", + execDigest("'foo'.sha512()") + ); + assertEquals( + "d82c4eb5261cb9c8aa9855edd67d1bd10482f41529858d925094d173fa662aa9" + + "1ff39bc5b188615273484021dfb16fd8284cf684ccf0fc795be3aa2fc1e6c181", + execDigest("'bar'.sha512()") + ); + assertEquals( + "cb377c10b0f5a62c803625a799d9e908be45e767f5d147d4744907cb05597aa4" + + "edd329a0af147add0cf4181ed328fa1e7994265826b3ed3d7ef6f067ca99185a", + execDigest("'foobarbaz'.sha512()") + ); + } + public void testToEpochMilli() { assertEquals(0L, exec("ZonedDateTime.parse('1970-01-01T00:00:00Z').toEpochMilli()")); assertEquals(1602097376782L, exec("ZonedDateTime.parse('2020-10-07T19:02:56.782Z').toEpochMilli()")); diff --git a/x-pack/plugin/watcher/src/main/resources/org/elasticsearch/xpack/watcher/painless_whitelist.txt b/x-pack/plugin/watcher/src/main/resources/org/elasticsearch/xpack/watcher/painless_whitelist.txt index 89e313875c18e..2dc9b41bbba23 100644 --- a/x-pack/plugin/watcher/src/main/resources/org/elasticsearch/xpack/watcher/painless_whitelist.txt +++ b/x-pack/plugin/watcher/src/main/resources/org/elasticsearch/xpack/watcher/painless_whitelist.txt @@ -8,6 +8,7 @@ class java.lang.String { String org.elasticsearch.painless.api.Augmentation sha1() String org.elasticsearch.painless.api.Augmentation sha256() + String org.elasticsearch.painless.api.Augmentation sha512() } class org.elasticsearch.painless.api.Json { From ac8e8d28398ea1dba1e82e94c246712e0f747bc7 Mon Sep 17 00:00:00 2001 From: Moritz Mack Date: Tue, 12 Mar 2024 11:11:25 +0100 Subject: [PATCH 126/248] Remove version from rollup (#106153) For bwc the `rollup-version` field is kept and populated with and empty string to prevent checks failing in earlier versions of ES. --- .../elasticsearch/xpack/rollup/Rollup.java | 2 - .../xpack/rollup/action/RollupIndexCaps.java | 2 +- .../action/TransportPutRollupJobAction.java | 12 +----- .../action/PutJobStateMachineTests.java | 39 +------------------ 4 files changed, 3 insertions(+), 52 deletions(-) diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/Rollup.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/Rollup.java index aef7a266fff37..1748c1be86b78 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/Rollup.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/Rollup.java @@ -77,8 +77,6 @@ public class Rollup extends Plugin implements ActionPlugin, PersistentTaskPlugin public static final String TASK_THREAD_POOL_NAME = RollupField.NAME + "_indexing"; - public static final String ROLLUP_TEMPLATE_VERSION_FIELD = "rollup-version"; - private final SetOnce schedulerEngine = new SetOnce<>(); private final Settings settings; diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/RollupIndexCaps.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/RollupIndexCaps.java index 87f7a3de956fc..ebdcc1ed13e1f 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/RollupIndexCaps.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/RollupIndexCaps.java @@ -60,7 +60,7 @@ public class RollupIndexCaps implements Writeable, ToXContentFragment { ... job config, parsable by RollupJobConfig.PARSER ... } }, - "rollup-version": "7.0.0" + "rollup-version": "" } } */ diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportPutRollupJobAction.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportPutRollupJobAction.java index e66bb35cce1cf..a276971762c81 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportPutRollupJobAction.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportPutRollupJobAction.java @@ -11,7 +11,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.ResourceAlreadyExistsException; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.admin.indices.create.CreateIndexAction; @@ -53,7 +52,6 @@ import org.elasticsearch.xpack.core.rollup.action.PutRollupJobAction; import org.elasticsearch.xpack.core.rollup.job.RollupJob; import org.elasticsearch.xpack.core.rollup.job.RollupJobConfig; -import org.elasticsearch.xpack.rollup.Rollup; import java.io.IOException; import java.util.Map; @@ -188,7 +186,7 @@ private static XContentBuilder createMappings(RollupJobConfig config) throws IOE .startObject("mappings") .startObject("_doc") .startObject("_meta") - .field(Rollup.ROLLUP_TEMPLATE_VERSION_FIELD, Version.CURRENT.toString()) + .field("rollup-version", "") // empty string to remain backwards compatible .startObject("_rollup") .field(config.getId(), config) .endObject() @@ -255,14 +253,6 @@ static void updateMapping( Map rollupMeta = (Map) ((Map) m).get(RollupField.ROLLUP_META); - String stringVersion = (String) ((Map) m).get(Rollup.ROLLUP_TEMPLATE_VERSION_FIELD); - if (stringVersion == null) { - listener.onFailure( - new IllegalStateException("Could not determine version of existing rollup metadata for index [" + indexName + "]") - ); - return; - } - if (rollupMeta.get(job.getConfig().getId()) != null) { String msg = "Cannot create rollup job [" + job.getConfig().getId() diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/PutJobStateMachineTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/PutJobStateMachineTests.java index fc5805d7ed9d1..b1455c4738623 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/PutJobStateMachineTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/PutJobStateMachineTests.java @@ -8,7 +8,6 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.ResourceAlreadyExistsException; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.create.CreateIndexAction; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; @@ -25,7 +24,6 @@ import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.VersionUtils; import org.elasticsearch.xpack.core.rollup.ConfigTestHelpers; import org.elasticsearch.xpack.core.rollup.RollupField; import org.elasticsearch.xpack.core.rollup.action.PutRollupJobAction; @@ -33,7 +31,6 @@ import org.elasticsearch.xpack.core.rollup.job.GroupConfig; import org.elasticsearch.xpack.core.rollup.job.RollupJob; import org.elasticsearch.xpack.core.rollup.job.RollupJobConfig; -import org.elasticsearch.xpack.rollup.Rollup; import org.mockito.ArgumentCaptor; import java.util.Collections; @@ -127,7 +124,7 @@ public void testIndexMetadata() throws InterruptedException { String mapping = requestCaptor.getValue().mappings(); // Make sure the version is present, and we have our date template (the most important aspects) - assertThat(mapping, containsString("\"rollup-version\":\"" + Version.CURRENT.toString() + "\"")); + assertThat(mapping, containsString("\"rollup-version\":\"\"")); assertThat(mapping, containsString("\"path_match\":\"*.date_histogram.timestamp\"")); listenerCaptor.getValue().onFailure(new ResourceAlreadyExistsException(job.getConfig().getRollupIndex())); @@ -245,38 +242,6 @@ public void testMetadataButNotRollup() { verify(client).execute(eq(GetMappingsAction.INSTANCE), any(GetMappingsRequest.class), any()); } - @SuppressWarnings({ "unchecked", "rawtypes" }) - public void testNoMappingVersion() { - RollupJob job = new RollupJob(ConfigTestHelpers.randomRollupJobConfig(random()), Collections.emptyMap()); - - ActionListener testListener = ActionListener.wrap(response -> { - fail("Listener success should not have been triggered."); - }, e -> { - assertThat( - e.getMessage(), - equalTo("Could not determine version of existing rollup metadata for index [" + job.getConfig().getRollupIndex() + "]") - ); - }); - - Logger logger = mock(Logger.class); - Client client = mock(Client.class); - - ArgumentCaptor requestCaptor = ArgumentCaptor.forClass(ActionListener.class); - doAnswer(invocation -> { - GetMappingsResponse response = mock(GetMappingsResponse.class); - Map m = Maps.newMapWithExpectedSize(2); - m.put(RollupField.ROLLUP_META, Collections.singletonMap(job.getConfig().getId(), job.getConfig())); - MappingMetadata meta = new MappingMetadata(RollupField.TYPE_NAME, Collections.singletonMap("_meta", m)); - - when(response.getMappings()).thenReturn(Map.of(job.getConfig().getRollupIndex(), meta)); - requestCaptor.getValue().onResponse(response); - return null; - }).when(client).execute(eq(GetMappingsAction.INSTANCE), any(GetMappingsRequest.class), requestCaptor.capture()); - - TransportPutRollupJobAction.updateMapping(job, testListener, mock(PersistentTasksService.class), client, logger); - verify(client).execute(eq(GetMappingsAction.INSTANCE), any(GetMappingsRequest.class), any()); - } - @SuppressWarnings({ "unchecked", "rawtypes" }) public void testJobAlreadyInMapping() { RollupJob job = new RollupJob(ConfigTestHelpers.randomRollupJobConfig(random(), "foo"), Collections.emptyMap()); @@ -299,7 +264,6 @@ public void testJobAlreadyInMapping() { doAnswer(invocation -> { GetMappingsResponse response = mock(GetMappingsResponse.class); Map m = Maps.newMapWithExpectedSize(2); - m.put(Rollup.ROLLUP_TEMPLATE_VERSION_FIELD, VersionUtils.randomCompatibleVersion(random(), Version.CURRENT)); m.put(RollupField.ROLLUP_META, Collections.singletonMap(job.getConfig().getId(), job.getConfig())); MappingMetadata meta = new MappingMetadata(RollupField.TYPE_NAME, Collections.singletonMap("_meta", m)); @@ -339,7 +303,6 @@ public void testAddJobToMapping() { doAnswer(invocation -> { GetMappingsResponse response = mock(GetMappingsResponse.class); Map m = Maps.newMapWithExpectedSize(2); - m.put(Rollup.ROLLUP_TEMPLATE_VERSION_FIELD, VersionUtils.randomCompatibleVersion(random(), Version.CURRENT)); m.put(RollupField.ROLLUP_META, Collections.singletonMap(unrelatedJob.getId(), unrelatedJob)); MappingMetadata meta = new MappingMetadata(RollupField.TYPE_NAME, Collections.singletonMap("_meta", m)); From 3f26540fdb8468d1d82c7bc882a42548e37619b0 Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 12 Mar 2024 10:34:45 +0000 Subject: [PATCH 127/248] Improve throttling in `TransportGetSnapshotsAction` (#105769) Each call to `Repository#getSnapshotInfo` spawns a worker per `SNAPSHOT_META` thread, so with many repositories a call to the get-snapshots API ends up spamming this threadpool more than we'd like. It also takes each repository's collection of matching `SnapshotId` items, copies it to a list, and then copies it again into a queue for the workers to process. With this commit we use one throttle for `SnapshotInfo` retrievals across the whole `GetSnapshotsOperation`, and iterate directly over the original set of `SnapshotId` items to avoid any more `O(#snapshots)` allocations. --- .../get/TransportGetSnapshotsAction.java | 82 +++++++++++++++++-- .../common/collect/Iterators.java | 33 ++++++++ .../common/collect/IteratorsTests.java | 26 ++++++ 3 files changed, 136 insertions(+), 5 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java index 8f089b0353e55..c38c6ec79e656 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java @@ -21,11 +21,18 @@ import org.elasticsearch.cluster.metadata.RepositoryMetadata; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.common.util.concurrent.AbstractThrottledTaskRunner; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.ListenableFuture; +import org.elasticsearch.common.util.concurrent.ThrottledIterator; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.logging.LogManager; +import org.elasticsearch.logging.Logger; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.Repository; @@ -40,6 +47,7 @@ import org.elasticsearch.snapshots.SnapshotsService; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskCancelledException; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -55,6 +63,7 @@ import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BiPredicate; +import java.util.function.BooleanSupplier; import java.util.function.Predicate; import java.util.function.ToLongFunction; import java.util.stream.Stream; @@ -64,6 +73,8 @@ */ public class TransportGetSnapshotsAction extends TransportMasterNodeAction { + private static final Logger logger = LogManager.getLogger(TransportGetSnapshotsAction.class); + private final RepositoriesService repositoriesService; @Inject @@ -163,6 +174,9 @@ private class GetSnapshotsOperation { private final boolean verbose; private final boolean indices; + // snapshot info throttling + private final GetSnapshotInfoExecutor getSnapshotInfoExecutor; + // results private final Map failuresByRepository = ConcurrentCollections.newConcurrentMap(); private final Queue> allSnapshotInfos = ConcurrentCollections.newQueue(); @@ -202,6 +216,11 @@ private class GetSnapshotsOperation { this.verbose = verbose; this.indices = indices; + this.getSnapshotInfoExecutor = new GetSnapshotInfoExecutor( + threadPool.info(ThreadPool.Names.SNAPSHOT_META).getMax(), + cancellableTask::isCancelled + ); + for (final var missingRepo : resolvedRepositories.missing()) { failuresByRepository.put(missingRepo, new RepositoryMissingException(missingRepo)); } @@ -433,11 +452,34 @@ private void snapshots(String repositoryName, Collection snapshotIds // only need to synchronize accesses related to reading SnapshotInfo from the repo final List syncSnapshots = Collections.synchronizedList(snapshots); - repository.getSnapshotInfo(snapshotIdsToIterate, ignoreUnavailable == false, cancellableTask::isCancelled, snapshotInfo -> { - if (predicates.test(snapshotInfo)) { - syncSnapshots.add(snapshotInfo.maybeWithoutIndices(indices)); - } - }, listeners.acquire()); + ThrottledIterator.run( + Iterators.failFast(snapshotIdsToIterate.iterator(), () -> cancellableTask.isCancelled() || listeners.isFailing()), + (ref, snapshotId) -> { + final var refListener = ActionListener.runBefore(listeners.acquire(), ref::close); + getSnapshotInfoExecutor.getSnapshotInfo(repository, snapshotId, new ActionListener<>() { + @Override + public void onResponse(SnapshotInfo snapshotInfo) { + if (predicates.test(snapshotInfo)) { + syncSnapshots.add(snapshotInfo.maybeWithoutIndices(indices)); + } + refListener.onResponse(null); + } + + @Override + public void onFailure(Exception e) { + if (ignoreUnavailable) { + logger.warn(Strings.format("failed to fetch snapshot info for [%s:%s]", repository, snapshotId), e); + refListener.onResponse(null); + } else { + refListener.onFailure(e); + } + } + }); + }, + getSnapshotInfoExecutor.getMaxRunningTasks(), + () -> {}, + () -> {} + ); } } @@ -727,4 +769,34 @@ private static int indexCount(SnapshotId snapshotId, RepositoryData repositoryDa private record SnapshotsInRepo(List snapshotInfos, int totalCount, int remaining) { private static final SnapshotsInRepo EMPTY = new SnapshotsInRepo(List.of(), 0, 0); } + + /** + * Throttling executor for retrieving {@link SnapshotInfo} instances from the repository without spamming the SNAPSHOT_META threadpool + * and starving other users of access to it. Similar to {@link Repository#getSnapshotInfo} but allows for finer-grained control over + * which snapshots are retrieved. + */ + private static class GetSnapshotInfoExecutor extends AbstractThrottledTaskRunner> { + private final int maxRunningTasks; + private final BooleanSupplier isCancelledSupplier; + + GetSnapshotInfoExecutor(int maxRunningTasks, BooleanSupplier isCancelledSupplier) { + super(GetSnapshotsAction.NAME, maxRunningTasks, EsExecutors.DIRECT_EXECUTOR_SERVICE, ConcurrentCollections.newBlockingQueue()); + this.maxRunningTasks = maxRunningTasks; + this.isCancelledSupplier = isCancelledSupplier; + } + + int getMaxRunningTasks() { + return maxRunningTasks; + } + + void getSnapshotInfo(Repository repository, SnapshotId snapshotId, ActionListener listener) { + enqueueTask(listener.delegateFailure((l, ref) -> { + if (isCancelledSupplier.getAsBoolean()) { + l.onFailure(new TaskCancelledException("task cancelled")); + } else { + repository.getSnapshotInfo(snapshotId, ActionListener.releaseAfter(l, ref)); + } + })); + } + } } diff --git a/server/src/main/java/org/elasticsearch/common/collect/Iterators.java b/server/src/main/java/org/elasticsearch/common/collect/Iterators.java index 4b5cef4bbbd45..ea8eadd66acaa 100644 --- a/server/src/main/java/org/elasticsearch/common/collect/Iterators.java +++ b/server/src/main/java/org/elasticsearch/common/collect/Iterators.java @@ -16,6 +16,7 @@ import java.util.NoSuchElementException; import java.util.Objects; import java.util.function.BiPredicate; +import java.util.function.BooleanSupplier; import java.util.function.Consumer; import java.util.function.Function; import java.util.function.IntFunction; @@ -225,6 +226,38 @@ public U next() { } } + /** + * Returns an iterator over the same items as the provided {@code input} except that it stops yielding items (i.e. starts returning + * {@code false} from {@link Iterator#hasNext()} on failure. + */ + public static Iterator failFast(Iterator input, BooleanSupplier isFailingSupplier) { + if (isFailingSupplier.getAsBoolean()) { + return Collections.emptyIterator(); + } else { + return new FailFastIterator<>(input, isFailingSupplier); + } + } + + private static class FailFastIterator implements Iterator { + private final Iterator delegate; + private final BooleanSupplier isFailingSupplier; + + FailFastIterator(Iterator delegate, BooleanSupplier isFailingSupplier) { + this.delegate = delegate; + this.isFailingSupplier = isFailingSupplier; + } + + @Override + public boolean hasNext() { + return isFailingSupplier.getAsBoolean() == false && delegate.hasNext(); + } + + @Override + public T next() { + return delegate.next(); + } + } + public static boolean equals(Iterator iterator1, Iterator iterator2, BiPredicate itemComparer) { if (iterator1 == null) { return iterator2 == null; diff --git a/server/src/test/java/org/elasticsearch/common/collect/IteratorsTests.java b/server/src/test/java/org/elasticsearch/common/collect/IteratorsTests.java index eb1d5838c734b..351efa59f2381 100644 --- a/server/src/test/java/org/elasticsearch/common/collect/IteratorsTests.java +++ b/server/src/test/java/org/elasticsearch/common/collect/IteratorsTests.java @@ -18,6 +18,7 @@ import java.util.List; import java.util.NoSuchElementException; import java.util.Objects; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BiPredicate; import java.util.function.ToIntFunction; @@ -216,6 +217,31 @@ public void testMap() { assertEquals(array.length, index.get()); } + public void testFailFast() { + final var array = randomIntegerArray(); + assertEmptyIterator(Iterators.failFast(Iterators.forArray(array), () -> true)); + + final var index = new AtomicInteger(); + Iterators.failFast(Iterators.forArray(array), () -> false).forEachRemaining(i -> assertEquals(array[index.getAndIncrement()], i)); + assertEquals(array.length, index.get()); + + final var isFailing = new AtomicBoolean(); + index.set(0); + Iterators.failFast(Iterators.concat(Iterators.forArray(array), new Iterator<>() { + @Override + public boolean hasNext() { + isFailing.set(true); + return true; + } + + @Override + public Integer next() { + return 0; + } + }), isFailing::get).forEachRemaining(i -> assertEquals(array[index.getAndIncrement()], i)); + assertEquals(array.length, index.get()); + } + public void testEquals() { final BiPredicate notCalled = (a, b) -> { throw new AssertionError("not called"); }; From 312c733e4f25993c98045c1fd0aa4a8d1abb8b4c Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 12 Mar 2024 10:55:35 +0000 Subject: [PATCH 128/248] Comparator constants in TransportGetSnapshotsAction (#106224) Today we use a constant `Comparator` for ascending sorts, but create a new instance for descending sorts each time. We should use constants for both cases. --- .../cluster/snapshots/get/SnapshotSortKey.java | 15 ++++++++++----- .../get/TransportGetSnapshotsAction.java | 8 +------- 2 files changed, 11 insertions(+), 12 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/SnapshotSortKey.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/SnapshotSortKey.java index 72a449268bf79..14735d13ae68e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/SnapshotSortKey.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/SnapshotSortKey.java @@ -153,11 +153,13 @@ private static int compareRepositoryName(String name, String repoName, SnapshotI }; private final String name; - private final Comparator snapshotInfoComparator; + private final Comparator ascendingSnapshotInfoComparator; + private final Comparator descendingSnapshotInfoComparator; SnapshotSortKey(String name, Comparator snapshotInfoComparator) { this.name = name; - this.snapshotInfoComparator = snapshotInfoComparator.thenComparing(SnapshotInfo::snapshotId); + this.ascendingSnapshotInfoComparator = snapshotInfoComparator.thenComparing(SnapshotInfo::snapshotId); + this.descendingSnapshotInfoComparator = ascendingSnapshotInfoComparator.reversed(); } @Override @@ -166,10 +168,13 @@ public String toString() { } /** - * @return a {@link Comparator} which can be used to sort {@link SnapshotInfo} items according to this sort key. + * @return a {@link Comparator} which sorts {@link SnapshotInfo} instances according to this sort key. */ - public final Comparator getSnapshotInfoComparator() { - return snapshotInfoComparator; + public final Comparator getSnapshotInfoComparator(SortOrder sortOrder) { + return switch (sortOrder) { + case ASC -> ascendingSnapshotInfoComparator; + case DESC -> descendingSnapshotInfoComparator; + }; } /** diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java index c38c6ec79e656..28586c7a6410b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java @@ -54,7 +54,6 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Collections; -import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -529,7 +528,7 @@ private SnapshotsInRepo sortSnapshotsWithNoOffsetOrLimit(List snap private SnapshotsInRepo sortSnapshots(Stream snapshotInfoStream, int totalCount, int offset, int size) { final var resultsStream = snapshotInfoStream.filter(sortBy.getAfterPredicate(after, order)) - .sorted(buildComparator()) + .sorted(sortBy.getSnapshotInfoComparator(order)) .skip(offset); if (size == GetSnapshotsRequest.NO_LIMIT) { return new SnapshotsInRepo(resultsStream.toList(), totalCount, 0); @@ -548,11 +547,6 @@ private SnapshotsInRepo sortSnapshots(Stream snapshotInfoStream, i return new SnapshotsInRepo(results, totalCount, remaining); } } - - private Comparator buildComparator() { - final var comparator = sortBy.getSnapshotInfoComparator(); - return order == SortOrder.DESC ? comparator.reversed() : comparator; - } } /** From 92f61978195b90d9b43bca3ff6525fd467f2dc13 Mon Sep 17 00:00:00 2001 From: Ievgen Degtiarenko Date: Tue, 12 Mar 2024 12:36:25 +0100 Subject: [PATCH 129/248] Add index forecasts to /_cat/allocation output (#97561) This change adds shard size and ingest load forecasts to /_cat/allocation output --- docs/changelog/97561.yaml | 5 ++ docs/reference/cat/allocation.asciidoc | 16 +++- .../test/cat.allocation/10_basic.yml | 90 +++++++++++++------ .../rest/action/cat/RestAllocationAction.java | 35 +++++--- 4 files changed, 105 insertions(+), 41 deletions(-) create mode 100644 docs/changelog/97561.yaml diff --git a/docs/changelog/97561.yaml b/docs/changelog/97561.yaml new file mode 100644 index 0000000000000..cacefbf7e4ca3 --- /dev/null +++ b/docs/changelog/97561.yaml @@ -0,0 +1,5 @@ +pr: 97561 +summary: Add index forecasts to /_cat/allocation output +area: Allocation +type: enhancement +issues: [] diff --git a/docs/reference/cat/allocation.asciidoc b/docs/reference/cat/allocation.asciidoc index f9574ed933398..7bab1926cff09 100644 --- a/docs/reference/cat/allocation.asciidoc +++ b/docs/reference/cat/allocation.asciidoc @@ -57,6 +57,16 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=cat-v] `shards`:: Number of primary and replica shards assigned to the node. +`shards.undesired`:: +Amount of shards that are scheduled to be moved elsewhere in the cluster +or -1 other than desired balance allocator is used + +`write_load.forecast`:: +Sum of index write load forecasts + +`disk.indices.forecast`:: +Sum of shard size forecasts + `disk.indices`:: Disk space used by the node's shards. Does not include disk space for the <> or unassigned shards. @@ -99,6 +109,8 @@ IP address and port for the node. `node`:: Name for the node. Set using <>. +`node.role`, `r`, `role`, `nodeRole`:: +Node roles [[cat-allocation-api-example]] ==== {api-examples-title} @@ -113,8 +125,8 @@ The API returns the following response: [source,txt] -------------------------------------------------- -shards disk.indices disk.used disk.avail disk.total disk.percent host ip node node.role - 1 260b 47.3gb 43.4gb 100.7gb 46 127.0.0.1 127.0.0.1 CSUXak2 himrst +shards shards.undesired write_load.forecast disk.indices.forecast disk.indices disk.used disk.avail disk.total disk.percent host ip node node.role + 1 0 0.0 260b 260b 47.3gb 43.4gb 100.7gb 46 127.0.0.1 127.0.0.1 CSUXak2 himrst -------------------------------------------------- // TESTRESPONSE[s/\d+(\.\d+)?[tgmk]?b/\\d+(\\.\\d+)?[tgmk]?b/ s/46/\\d+/] // TESTRESPONSE[s/CSUXak2 himrst/.+/ non_json] diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.allocation/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.allocation/10_basic.yml index ed519438f1b1e..2ba01c3b5711e 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.allocation/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.allocation/10_basic.yml @@ -2,8 +2,8 @@ "Help": - skip: - version: " - 8.9.99" - reason: "node.role column added in 8.10.0" + version: " - 8.13.99" + reason: "shards.undesired, write_load.forecast and disk.indices.forecast are added in 8.14.0" - do: cat.allocation: @@ -11,24 +11,27 @@ - match: $body: | - /^ shards .+ \n - disk.indices .+ \n - disk.used .+ \n - disk.avail .+ \n - disk.total .+ \n - disk.percent .+ \n - host .+ \n - ip .+ \n - node .+ \n - node.role .+ \n + /^ shards .+ \n + shards.undesired .+ \n + write_load.forecast .+ \n + disk.indices.forecast .+ \n + disk.indices .+ \n + disk.used .+ \n + disk.avail .+ \n + disk.total .+ \n + disk.percent .+ \n + host .+ \n + ip .+ \n + node .+ \n + node.role .+ \n $/ --- "One index": - skip: - version: " - 8.11.99" - reason: "node.role column shown by default from 8.12.0 onwards" + version: " - 8.13.99" + reason: "shards.undesired, write_load.forecast and disk.indices.forecast columns are added in 8.14.0" - do: indices.create: @@ -42,6 +45,9 @@ /^ ( \s* #allow leading spaces to account for right-justified text \d+ \s+ + \d+ \s+ + \d+(\.\d+)? \s+ + \d+(\.\d+)?[kmgt]?b \s+ \d+(\.\d+)?[kmgt]?b \s+ \d+(\.\d+)?[kmgt]?b \s+ (\d+(\.\d+)?[kmgt]b \s+) #always should return value since we filter out non data nodes by default @@ -65,8 +71,8 @@ "Node ID": - skip: - version: " - 8.11.99" - reason: "node.role column shown by default from 8.12.0 onwards" + version: " - 8.13.99" + reason: "shards.undesired, write_load.forecast and disk.indices.forecast columns are added in 8.14.0" - do: cat.allocation: @@ -76,6 +82,9 @@ $body: | /^ ( \d+ \s+ #usually 0, unless some system index has been recreated before this runs + \d+ \s+ + \d+(\.\d+)? \s+ + \d+(\.\d+)?[kmgt]?b \s+ \d+(\.\d+)?[kmgt]?b \s+ \d+(\.\d+)?[kmgt]?b \s+ (\d+(\.\d+)?[kmgt]b \s+)? #no value from client nodes @@ -99,12 +108,11 @@ $/ --- - "All Nodes": - skip: - version: " - 8.11.99" - reason: "node.role column shown by default from 8.12.0 onwards" + version: " - 8.13.99" + reason: "shards.undesired, write_load.forecast and disk.indices.forecast columns are added in 8.14.0" - do: cat.allocation: @@ -115,6 +123,9 @@ /^ ( \s* #allow leading spaces to account for right-justified text \d+ \s+ #usually 0, unless some system index has been recreated before this runs + \d+ \s+ + \d+(\.\d+)? \s+ + \d+(\.\d+)?[kmgt]?b \s+ \d+(\.\d+)?[kmgt]?b \s+ \d+(\.\d+)?[kmgt]?b \s+ (\d+(\.\d+)?[kmgt]b \s+)? #no value from client nodes @@ -138,8 +149,8 @@ "Column headers": - skip: - version: " - 8.11.99" - reason: "node.role column shown by default from 8.12.0 onwards" + version: " - 8.13.99" + reason: "shards.undesired, write_load.forecast and disk.indices.forecast columns are added in 8.14.0" - do: cat.allocation: @@ -148,6 +159,9 @@ $body: | /^ shards \s+ + shards.undesired \s+ + write_load.forecast \s+ + disk.indices.forecast \s+ disk.indices \s+ disk.used \s+ disk.avail \s+ @@ -161,6 +175,9 @@ ( \s* #allow leading spaces to account for right-justified text \d+ \s+ #usually 0, unless some system index has been recreated before this runs + \d+ \s+ + \d+(\.\d+)? \s+ + \d+(\.\d+)?[kmgt]?b \s+ \d+(\.\d+)?[kmgt]?b \s+ \d+(\.\d+)?[kmgt]?b \s+ (\d+(\.\d+)?[kmgt]b \s+) #always should return value since we filter out non data nodes by default @@ -211,12 +228,11 @@ --- - "Bytes": - skip: - version: " - 8.11.99" - reason: "node.role column shown by default from 8.12.0 onwards" + version: " - 8.13.99" + reason: "shards.undesired, write_load.forecast and disk.indices.forecast columns are added in 8.14.0" - do: cat.allocation: @@ -226,6 +242,9 @@ $body: | /^ ( \d+ \s+ #usually 0, unless some system index has been recreated before this runs + \d+ \s+ + \d+(\.\d+)? \s+ + \d+ \s+ 0 \s+ \d+ \s+ (\d+ \s+) #always should return value since we filter out non data nodes by default @@ -240,7 +259,6 @@ $/ --- - "Node roles": - skip: @@ -259,3 +277,25 @@ \n )+ $/ + +--- +"Node forecasts": + + - skip: + version: " - 8.13.99" + reason: "write_load.forecast and disk.indices.forecast columns added in 8.14.0" + + - do: + cat.allocation: + h: [node, shards.undesired, write_load.forecast, disk.indices.forecast] + + - match: + $body: | + /^ + ( [-\w.]+ \s+ + [-\w.]+ \s+ + [-\w.]+ \s+ + [\w]+ + \n + )+ + $/ diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java index 068c809554631..570fb0ebc7c77 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java @@ -18,6 +18,7 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.allocation.NodeAllocationStats; import org.elasticsearch.common.Strings; import org.elasticsearch.common.Table; import org.elasticsearch.common.unit.ByteSizeValue; @@ -37,6 +38,8 @@ @ServerlessScope(Scope.INTERNAL) public class RestAllocationAction extends AbstractCatAction { + private static final String UNASSIGNED = "UNASSIGNED"; + @Override public List routes() { return List.of(new Route(GET, "/_cat/allocation"), new Route(GET, "/_cat/allocation/{nodes}")); @@ -67,9 +70,10 @@ public void processResponse(final ClusterStateResponse state) { statsRequest.setIncludeShardsStats(false); statsRequest.clear() .addMetric(NodesStatsRequestParameters.Metric.FS.metricName()) + .addMetric(NodesStatsRequestParameters.Metric.ALLOCATIONS.metricName()) .indices(new CommonStatsFlags(CommonStatsFlags.Flag.Store)); - client.admin().cluster().nodesStats(statsRequest, new RestResponseListener(channel) { + client.admin().cluster().nodesStats(statsRequest, new RestResponseListener<>(channel) { @Override public RestResponse buildResponse(NodesStatsResponse stats) throws Exception { Table tab = buildTable(request, state, stats); @@ -86,6 +90,9 @@ protected Table getTableWithHeader(final RestRequest request) { final Table table = new Table(); table.startHeaders(); table.addCell("shards", "alias:s;text-align:right;desc:number of shards on node"); + table.addCell("shards.undesired", "text-align:right;desc:number of shards that are scheduled to be moved elsewhere in the cluster"); + table.addCell("write_load.forecast", "alias:wlf,writeLoadForecast;text-align:right;desc:sum of index write load forecasts"); + table.addCell("disk.indices.forecast", "alias:dif,diskIndicesForecast;text-align:right;desc:sum of shard size forecasts"); table.addCell("disk.indices", "alias:di,diskIndices;text-align:right;desc:disk used by ES indices"); table.addCell("disk.used", "alias:du,diskUsed;text-align:right;desc:disk used (total, not just ES)"); table.addCell("disk.avail", "alias:da,diskAvail;text-align:right;desc:disk available"); @@ -100,22 +107,17 @@ protected Table getTableWithHeader(final RestRequest request) { } private Table buildTable(RestRequest request, final ClusterStateResponse state, final NodesStatsResponse stats) { - final Map allocs = new HashMap<>(); + final Map shardCounts = new HashMap<>(); for (ShardRouting shard : state.getState().routingTable().allShardsIterator()) { - String nodeId = "UNASSIGNED"; - if (shard.assignedToNode()) { - nodeId = shard.currentNodeId(); - } - allocs.merge(nodeId, 1, Integer::sum); + String nodeId = shard.assignedToNode() ? shard.currentNodeId() : UNASSIGNED; + shardCounts.merge(nodeId, 1, Integer::sum); } Table table = getTableWithHeader(request); for (NodeStats nodeStats : stats.getNodes()) { DiscoveryNode node = nodeStats.getNode(); - int shardCount = allocs.getOrDefault(node.getId(), 0); - ByteSizeValue total = nodeStats.getFs().getTotal().getTotal(); ByteSizeValue avail = nodeStats.getFs().getTotal().getAvailable(); // if we don't know how much we use (non data nodes), it means 0 @@ -127,9 +129,13 @@ private Table buildTable(RestRequest request, final ClusterStateResponse state, diskPercent = (short) (used * 100 / (used + avail.getBytes())); } } + NodeAllocationStats nodeAllocationStats = nodeStats.getNodeAllocationStats(); table.startRow(); - table.addCell(shardCount); + table.addCell(shardCounts.getOrDefault(node.getId(), 0)); + table.addCell(nodeAllocationStats != null ? nodeAllocationStats.undesiredShards() : null); + table.addCell(nodeAllocationStats != null ? nodeAllocationStats.forecastedIngestLoad() : null); + table.addCell(nodeAllocationStats != null ? ByteSizeValue.ofBytes(nodeAllocationStats.forecastedDiskUsage()) : null); table.addCell(nodeStats.getIndices().getStore().size()); table.addCell(used < 0 ? null : ByteSizeValue.ofBytes(used)); table.addCell(avail.getBytes() < 0 ? null : avail); @@ -142,10 +148,12 @@ private Table buildTable(RestRequest request, final ClusterStateResponse state, table.endRow(); } - final String UNASSIGNED = "UNASSIGNED"; - if (allocs.containsKey(UNASSIGNED)) { + if (shardCounts.containsKey(UNASSIGNED)) { table.startRow(); - table.addCell(allocs.get(UNASSIGNED)); + table.addCell(shardCounts.get(UNASSIGNED)); + table.addCell(null); + table.addCell(null); + table.addCell(null); table.addCell(null); table.addCell(null); table.addCell(null); @@ -160,5 +168,4 @@ private Table buildTable(RestRequest request, final ClusterStateResponse state, return table; } - } From c2b104bdb339515cbc20cddac038ba0c7470481c Mon Sep 17 00:00:00 2001 From: William Brafford Date: Tue, 12 Mar 2024 08:41:34 -0400 Subject: [PATCH 130/248] Clean up TODOs in BuildExtension and BuildVersion (#106155) * With downstream implementation in place, clean up default and superclass methods --- .../org/elasticsearch/env/BuildVersion.java | 5 +---- .../elasticsearch/env/DefaultBuildVersion.java | 5 ++--- .../elasticsearch/internal/BuildExtension.java | 17 ++++++++--------- 3 files changed, 11 insertions(+), 16 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/env/BuildVersion.java b/server/src/main/java/org/elasticsearch/env/BuildVersion.java index e1f5879ae9569..e19ad87932e7f 100644 --- a/server/src/main/java/org/elasticsearch/env/BuildVersion.java +++ b/server/src/main/java/org/elasticsearch/env/BuildVersion.java @@ -92,10 +92,7 @@ public static BuildVersion current() { } // only exists for NodeMetadata#toXContent - // TODO[wrb]: make this abstract once all downstream classes override it - protected int id() { - return -1; - } + public abstract int id(); private static class CurrentExtensionHolder { private static final BuildExtension BUILD_EXTENSION = findExtension(); diff --git a/server/src/main/java/org/elasticsearch/env/DefaultBuildVersion.java b/server/src/main/java/org/elasticsearch/env/DefaultBuildVersion.java index 6cec751a1cad1..8271b836269a7 100644 --- a/server/src/main/java/org/elasticsearch/env/DefaultBuildVersion.java +++ b/server/src/main/java/org/elasticsearch/env/DefaultBuildVersion.java @@ -23,15 +23,14 @@ * give users simple rules in terms of public-facing release versions for Elasticsearch * compatibility when upgrading nodes and prevents downgrades in place.

    */ -// TODO[wrb]: make package-private once default implementations are removed in BuildExtension -public final class DefaultBuildVersion extends BuildVersion { +final class DefaultBuildVersion extends BuildVersion { public static BuildVersion CURRENT = new DefaultBuildVersion(Version.CURRENT.id()); private final int versionId; private final Version version; - public DefaultBuildVersion(int versionId) { + DefaultBuildVersion(int versionId) { assert versionId >= 0 : "Release version IDs must be non-negative integers"; this.versionId = versionId; this.version = Version.fromId(versionId); diff --git a/server/src/main/java/org/elasticsearch/internal/BuildExtension.java b/server/src/main/java/org/elasticsearch/internal/BuildExtension.java index cc02495b39520..b1b9a568e3083 100644 --- a/server/src/main/java/org/elasticsearch/internal/BuildExtension.java +++ b/server/src/main/java/org/elasticsearch/internal/BuildExtension.java @@ -10,7 +10,6 @@ import org.elasticsearch.Build; import org.elasticsearch.env.BuildVersion; -import org.elasticsearch.env.DefaultBuildVersion; /** * Allows plugging in current build info. @@ -29,13 +28,13 @@ default boolean hasReleaseVersioning() { return true; } - // TODO[wrb]: Remove default implementation once downstream BuildExtensions are updated - default BuildVersion currentBuildVersion() { - return DefaultBuildVersion.CURRENT; - } + /** + * Returns the {@link BuildVersion} for the running Elasticsearch code. + */ + BuildVersion currentBuildVersion(); - // TODO[wrb]: Remove default implementation once downstream BuildExtensions are updated - default BuildVersion fromVersionId(int versionId) { - return new DefaultBuildVersion(versionId); - } + /** + * Returns the {@link BuildVersion} for a given version identifier. + */ + BuildVersion fromVersionId(int versionId); } From 19bd9f9a09a07431bf8e0802c90ea0dcc8fe7935 Mon Sep 17 00:00:00 2001 From: David Wong Date: Tue, 12 Mar 2024 21:16:45 +0800 Subject: [PATCH 131/248] [Docs] Fix typo in DocWriteRequest.java OpType enum documentation (#105937) --- .../src/main/java/org/elasticsearch/action/DocWriteRequest.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java b/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java index 7f3578ce9f16f..bfe1ff04b7b77 100644 --- a/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java +++ b/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java @@ -185,7 +185,7 @@ default Index getConcreteWriteIndex(IndexAbstraction ia, Metadata metadata) { */ enum OpType { /** - * Index the source. If there an existing document with the id, it will + * Index the source. If there is an existing document with the id, it will * be replaced. */ INDEX(0), From 7139546d5d50abc8c6aa2479119eb5312f3c46e2 Mon Sep 17 00:00:00 2001 From: Nicole Albee <2642763+a03nikki@users.noreply.github.com> Date: Tue, 12 Mar 2024 08:19:15 -0500 Subject: [PATCH 132/248] Clarify filters can be used while creating a normalizer. (#103826) --- docs/reference/analysis/normalizers.asciidoc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/reference/analysis/normalizers.asciidoc b/docs/reference/analysis/normalizers.asciidoc index deb04a9bd44ba..6acd415437525 100644 --- a/docs/reference/analysis/normalizers.asciidoc +++ b/docs/reference/analysis/normalizers.asciidoc @@ -6,15 +6,15 @@ token. As a consequence, they do not have a tokenizer and only accept a subset of the available char filters and token filters. Only the filters that work on a per-character basis are allowed. For instance a lowercasing filter would be allowed, but not a stemming filter, which needs to look at the keyword as a -whole. The current list of filters that can be used in a normalizer is -following: `arabic_normalization`, `asciifolding`, `bengali_normalization`, +whole. The current list of filters that can be used in a normalizer definition +are: `arabic_normalization`, `asciifolding`, `bengali_normalization`, `cjk_width`, `decimal_digit`, `elision`, `german_normalization`, `hindi_normalization`, `indic_normalization`, `lowercase`, `pattern_replace`, `persian_normalization`, `scandinavian_folding`, `serbian_normalization`, `sorani_normalization`, `trim`, `uppercase`. Elasticsearch ships with a `lowercase` built-in normalizer. For other forms of -normalization a custom configuration is required. +normalization, a custom configuration is required. [discrete] === Custom normalizers From 265461fb1bee01a3a580a8473b0468d05a353afb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Istv=C3=A1n=20Zolt=C3=A1n=20Szab=C3=B3?= Date: Tue, 12 Mar 2024 14:43:58 +0100 Subject: [PATCH 133/248] [DOCS] Changes element_type in index mapping for the infrence tutorial. (#106233) --- .../tab-widgets/inference-api/infer-api-mapping.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/tab-widgets/inference-api/infer-api-mapping.asciidoc b/docs/reference/tab-widgets/inference-api/infer-api-mapping.asciidoc index e43bbd036b44e..5ca5e0b7bf139 100644 --- a/docs/reference/tab-widgets/inference-api/infer-api-mapping.asciidoc +++ b/docs/reference/tab-widgets/inference-api/infer-api-mapping.asciidoc @@ -9,7 +9,7 @@ PUT cohere-embeddings "content_embedding": { <1> "type": "dense_vector", <2> "dims": 1024, <3> - "element_type": "float" + "element_type": "byte" }, "content": { <4> "type": "text" <5> From 8f7a2c4737eaf8311f56366ffcdb3b504867a9bb Mon Sep 17 00:00:00 2001 From: Pooya Salehi Date: Tue, 12 Mar 2024 14:46:49 +0100 Subject: [PATCH 134/248] Log skipped elections due to recent leader heartbeat (#106223) Relates ES-6576 --- .../AtomicRegisterPreVoteCollector.java | 8 ++- .../stateless/StoreHeartbeatService.java | 6 +- .../AtomicRegisterPreVoteCollectorTests.java | 55 ++++++++++++++++++- .../stateless/StoreHeartbeatServiceTests.java | 8 +-- 4 files changed, 67 insertions(+), 10 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/stateless/AtomicRegisterPreVoteCollector.java b/server/src/main/java/org/elasticsearch/cluster/coordination/stateless/AtomicRegisterPreVoteCollector.java index ae53fa19da655..e9659bde065d7 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/stateless/AtomicRegisterPreVoteCollector.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/stateless/AtomicRegisterPreVoteCollector.java @@ -8,6 +8,8 @@ package org.elasticsearch.cluster.coordination.stateless; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.coordination.PreVoteCollector; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -16,6 +18,8 @@ import java.util.concurrent.atomic.AtomicBoolean; public class AtomicRegisterPreVoteCollector extends PreVoteCollector { + private static final Logger logger = LogManager.getLogger(AtomicRegisterPreVoteCollector.class); + private final StoreHeartbeatService heartbeatService; private final Runnable startElection; @@ -27,11 +31,11 @@ public AtomicRegisterPreVoteCollector(StoreHeartbeatService heartbeatService, Ru @Override public Releasable start(ClusterState clusterState, Iterable broadcastNodes) { final var shouldRun = new AtomicBoolean(true); - heartbeatService.runIfNoRecentLeader(() -> { + heartbeatService.checkLeaderHeartbeatAndRun(() -> { if (shouldRun.getAndSet(false)) { startElection.run(); } - }); + }, heartbeat -> logger.info("skipping election since there is a recent heartbeat[{}] from the leader", heartbeat)); return () -> shouldRun.set(false); } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/stateless/StoreHeartbeatService.java b/server/src/main/java/org/elasticsearch/cluster/coordination/stateless/StoreHeartbeatService.java index 0ea515012a190..d21add7e6954f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/stateless/StoreHeartbeatService.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/stateless/StoreHeartbeatService.java @@ -95,15 +95,15 @@ protected long absoluteTimeInMillis() { return threadPool.absoluteTimeInMillis(); } - void runIfNoRecentLeader(Runnable runnable) { + void checkLeaderHeartbeatAndRun(Runnable noRecentLeaderRunnable, Consumer recentLeaderHeartbeatConsumer) { heartbeatStore.readLatestHeartbeat(new ActionListener<>() { @Override public void onResponse(Heartbeat heartBeat) { if (heartBeat == null || maxTimeSinceLastHeartbeat.millis() <= heartBeat.timeSinceLastHeartbeatInMillis(absoluteTimeInMillis())) { - runnable.run(); + noRecentLeaderRunnable.run(); } else { - logger.trace("runIfNoRecentLeader: found recent leader [{}]", heartBeat); + recentLeaderHeartbeatConsumer.accept(heartBeat); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/stateless/AtomicRegisterPreVoteCollectorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/stateless/AtomicRegisterPreVoteCollectorTests.java index ddb1ccbbd4f9a..f0b6d62ef9767 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/stateless/AtomicRegisterPreVoteCollectorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/stateless/AtomicRegisterPreVoteCollectorTests.java @@ -8,12 +8,16 @@ package org.elasticsearch.cluster.coordination.stateless; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.core.TimeValue; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.MockLogAppender; import org.elasticsearch.threadpool.ThreadPool; import org.junit.After; import org.junit.Before; @@ -65,7 +69,7 @@ protected long absoluteTimeInMillis() { // Either there's no heartbeat or is stale if (randomBoolean()) { PlainActionFuture.get(f -> heartbeatStore.writeHeartbeat(new Heartbeat(1, fakeClock.get()), f)); - fakeClock.set(maxTimeSinceLastHeartbeat.millis() + 1); + fakeClock.set(maxTimeSinceLastHeartbeat.millis() + randomLongBetween(0, 1000)); } var startElection = new AtomicBoolean(); @@ -76,6 +80,55 @@ protected long absoluteTimeInMillis() { assertThat(startElection.get(), is(true)); } + public void testLogSkippedElectionIfRecentLeaderHeartbeat() throws Exception { + final var currentTermProvider = new AtomicLong(1); + final var heartbeatFrequency = TimeValue.timeValueSeconds(randomIntBetween(15, 30)); + final var maxTimeSinceLastHeartbeat = TimeValue.timeValueSeconds(2 * heartbeatFrequency.seconds()); + DiscoveryNodeUtils.create("master"); + final var logger = LogManager.getLogger(AtomicRegisterPreVoteCollector.class); + final var appender = new MockLogAppender(); + appender.start(); + try { + Loggers.addAppender(logger, appender); + appender.addExpectation( + new MockLogAppender.SeenEventExpectation( + "log emitted when skipping election", + AtomicRegisterPreVoteCollector.class.getCanonicalName(), + Level.INFO, + "skipping election since there is a recent heartbeat*" + ) + ); + final var fakeClock = new AtomicLong(); + final var heartbeatStore = new InMemoryHeartbeatStore(); + final var heartbeatService = new StoreHeartbeatService( + heartbeatStore, + threadPool, + heartbeatFrequency, + maxTimeSinceLastHeartbeat, + listener -> listener.onResponse(OptionalLong.of(currentTermProvider.get())) + ) { + @Override + protected long absoluteTimeInMillis() { + return fakeClock.get(); + } + }; + + PlainActionFuture.get(f -> heartbeatStore.writeHeartbeat(new Heartbeat(1, fakeClock.get()), f)); + fakeClock.addAndGet(randomLongBetween(0L, maxTimeSinceLastHeartbeat.millis() - 1)); + + var startElection = new AtomicBoolean(); + var preVoteCollector = new AtomicRegisterPreVoteCollector(heartbeatService, () -> startElection.set(true)); + + preVoteCollector.start(ClusterState.EMPTY_STATE, Collections.emptyList()); + + assertThat(startElection.get(), is(false)); + appender.assertAllExpectationsMatched(); + } finally { + Loggers.removeAppender(logger, appender); + appender.stop(); + } + } + public void testElectionDoesNotRunWhenThereIsALeader() throws Exception { final var currentTermProvider = new AtomicLong(1); final var heartbeatFrequency = TimeValue.timeValueSeconds(randomIntBetween(15, 30)); diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/stateless/StoreHeartbeatServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/stateless/StoreHeartbeatServiceTests.java index 1df613a500f83..bad8385acfbf3 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/stateless/StoreHeartbeatServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/stateless/StoreHeartbeatServiceTests.java @@ -233,7 +233,7 @@ protected long absoluteTimeInMillis() { assertThat(heartbeat, is(nullValue())); AtomicBoolean noRecentLeaderFound = new AtomicBoolean(); - heartbeatService.runIfNoRecentLeader(() -> noRecentLeaderFound.set(true)); + heartbeatService.checkLeaderHeartbeatAndRun(() -> noRecentLeaderFound.set(true), hb -> {}); assertThat(noRecentLeaderFound.get(), is(true)); } @@ -242,7 +242,7 @@ protected long absoluteTimeInMillis() { PlainActionFuture.get(f -> heartbeatStore.writeHeartbeat(new Heartbeat(1, fakeClock.get()), f)); AtomicBoolean noRecentLeaderFound = new AtomicBoolean(); - heartbeatService.runIfNoRecentLeader(() -> noRecentLeaderFound.set(true)); + heartbeatService.checkLeaderHeartbeatAndRun(() -> noRecentLeaderFound.set(true), hb -> {}); assertThat(noRecentLeaderFound.get(), is(false)); } @@ -252,7 +252,7 @@ protected long absoluteTimeInMillis() { fakeClock.set(maxTimeSinceLastHeartbeat.millis() + 1); AtomicBoolean noRecentLeaderFound = new AtomicBoolean(); - heartbeatService.runIfNoRecentLeader(() -> noRecentLeaderFound.set(true)); + heartbeatService.checkLeaderHeartbeatAndRun(() -> noRecentLeaderFound.set(true), hb -> {}); assertThat(noRecentLeaderFound.get(), is(true)); } @@ -273,7 +273,7 @@ protected long absoluteTimeInMillis() { ) ); try (var ignored = mockAppender.capturing(StoreHeartbeatService.class)) { - heartbeatService.runIfNoRecentLeader(() -> fail("should not be called")); + heartbeatService.checkLeaderHeartbeatAndRun(() -> fail("should not be called"), hb -> {}); mockAppender.assertAllExpectationsMatched(); } } From 0dd6ce2df6c0922d1cf7afcdf06d9888128b83fd Mon Sep 17 00:00:00 2001 From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com> Date: Tue, 12 Mar 2024 15:48:03 +0200 Subject: [PATCH 135/248] Delete DownsampleClusterDisruptionIT (#106225) The test overlaps with DataStreamLifecycleDownsampleDisruptionIT and ILMDownsampleDisruptionIT. It offers no extra testing coverage and its test logic has subtle bugs, leading to flakiness. Fixes #100653 --- .../DownsampleClusterDisruptionIT.java | 443 ------------------ 1 file changed, 443 deletions(-) delete mode 100644 x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DownsampleClusterDisruptionIT.java diff --git a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DownsampleClusterDisruptionIT.java b/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DownsampleClusterDisruptionIT.java deleted file mode 100644 index dc915738f6d13..0000000000000 --- a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DownsampleClusterDisruptionIT.java +++ /dev/null @@ -1,443 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.downsample; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.action.DocWriteRequest; -import org.elasticsearch.action.admin.indices.get.GetIndexRequest; -import org.elasticsearch.action.admin.indices.get.GetIndexResponse; -import org.elasticsearch.action.bulk.BulkItemResponse; -import org.elasticsearch.action.bulk.BulkRequestBuilder; -import org.elasticsearch.action.bulk.BulkResponse; -import org.elasticsearch.action.downsample.DownsampleAction; -import org.elasticsearch.action.downsample.DownsampleConfig; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.support.WriteRequest; -import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.time.DateFormatter; -import org.elasticsearch.core.TimeValue; -import org.elasticsearch.index.IndexMode; -import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.engine.VersionConflictEngineException; -import org.elasticsearch.index.mapper.DateFieldMapper; -import org.elasticsearch.index.query.MatchAllQueryBuilder; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.InternalTestCluster; -import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentFactory; -import org.elasticsearch.xpack.aggregatemetric.AggregateMetricMapperPlugin; -import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; - -import java.io.IOException; -import java.time.Instant; -import java.time.LocalDateTime; -import java.time.ZoneId; -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; -import java.util.Locale; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; -import java.util.function.Consumer; - -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; -import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.xpack.core.rollup.ConfigTestHelpers.randomInterval; - -@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 4) -public class DownsampleClusterDisruptionIT extends ESIntegTestCase { - private static final Logger logger = LogManager.getLogger(DownsampleClusterDisruptionIT.class); - private static final DateFormatter DATE_FORMATTER = DateFormatter.forPattern("yyyy-MM-dd'T'HH:mm:ss.SSSXXX"); - private static final TimeValue TIMEOUT = new TimeValue(1, TimeUnit.MINUTES); - public static final String FIELD_TIMESTAMP = "@timestamp"; - public static final String FIELD_DIMENSION_1 = "dimension_kw"; - public static final String FIELD_DIMENSION_2 = "dimension_long"; - public static final String FIELD_METRIC_COUNTER = "counter"; - public static final int DOC_COUNT = 10_000; - - @Override - protected Collection> nodePlugins() { - return List.of(LocalStateCompositeXPackPlugin.class, Downsample.class, AggregateMetricMapperPlugin.class); - } - - interface DisruptionListener { - void disruptionStart(); - - void disruptionEnd(); - } - - private class Disruptor implements Runnable { - final InternalTestCluster cluster; - private final String sourceIndex; - private final DisruptionListener listener; - private final String clientNode; - private final Consumer disruption; - - private Disruptor( - final InternalTestCluster cluster, - final String sourceIndex, - final DisruptionListener listener, - final String clientNode, - final Consumer disruption - ) { - this.cluster = cluster; - this.sourceIndex = sourceIndex; - this.listener = listener; - this.clientNode = clientNode; - this.disruption = disruption; - } - - @Override - public void run() { - listener.disruptionStart(); - try { - final String candidateNode = cluster.client(clientNode) - .admin() - .cluster() - .prepareSearchShards(sourceIndex) - .get() - .getNodes()[0].getName(); - logger.info("Candidate node [" + candidateNode + "]"); - disruption.accept(candidateNode); - ensureGreen(TimeValue.timeValueSeconds(60), sourceIndex); - ensureStableCluster(cluster.numDataAndMasterNodes(), clientNode); - - } catch (Exception e) { - logger.error("Ignoring Error while injecting disruption [" + e.getMessage() + "]"); - } finally { - listener.disruptionEnd(); - } - } - } - - public void setup(final String sourceIndex, int numOfShards, int numOfReplicas, long startTime) throws IOException { - final Settings.Builder settings = indexSettings(numOfShards, numOfReplicas).put(IndexSettings.MODE.getKey(), IndexMode.TIME_SERIES) - .putList(IndexMetadata.INDEX_ROUTING_PATH.getKey(), List.of(FIELD_DIMENSION_1)) - .put( - IndexSettings.TIME_SERIES_START_TIME.getKey(), - DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.formatMillis(Instant.ofEpochMilli(startTime).toEpochMilli()) - ) - .put(IndexSettings.TIME_SERIES_END_TIME.getKey(), "2106-01-08T23:40:53.384Z"); - - if (randomBoolean()) { - settings.put(IndexMetadata.SETTING_INDEX_HIDDEN, randomBoolean()); - } - - final XContentBuilder mapping = jsonBuilder().startObject().startObject("_doc").startObject("properties"); - mapping.startObject(FIELD_TIMESTAMP).field("type", "date").endObject(); - - mapping.startObject(FIELD_DIMENSION_1).field("type", "keyword").field("time_series_dimension", true).endObject(); - mapping.startObject(FIELD_DIMENSION_2).field("type", "long").field("time_series_dimension", true).endObject(); - - mapping.startObject(FIELD_METRIC_COUNTER) - .field("type", "double") /* numeric label indexed as a metric */ - .field("time_series_metric", "counter") - .endObject(); - - mapping.endObject().endObject().endObject(); - assertAcked(indicesAdmin().prepareCreate(sourceIndex).setSettings(settings.build()).setMapping(mapping).get()); - } - - public void testDownsampleIndexWithDataNodeRestart() throws Exception { - final InternalTestCluster cluster = internalCluster(); - final List masterNodes = cluster.startMasterOnlyNodes(1); - cluster.startDataOnlyNodes(3); - ensureStableCluster(cluster.size()); - ensureGreen(); - - final String sourceIndex = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); - final String targetIndex = randomAlphaOfLength(11).toLowerCase(Locale.ROOT); - long startTime = LocalDateTime.parse("2020-09-09T18:00:00").atZone(ZoneId.of("UTC")).toInstant().toEpochMilli(); - setup(sourceIndex, 1, 0, startTime); - final DownsampleConfig config = new DownsampleConfig(randomInterval()); - final DownsampleActionSingleNodeTests.SourceSupplier sourceSupplier = () -> { - final String ts = randomDateForInterval(config.getInterval(), startTime); - double counterValue = DATE_FORMATTER.parseMillis(ts); - final List dimensionValues = new ArrayList<>(5); - for (int j = 0; j < randomIntBetween(1, 5); j++) { - dimensionValues.add(randomAlphaOfLength(6)); - } - return XContentFactory.jsonBuilder() - .startObject() - .field(FIELD_TIMESTAMP, ts) - .field(FIELD_DIMENSION_1, randomFrom(dimensionValues)) - .field(FIELD_DIMENSION_2, randomIntBetween(1, 10)) - .field(FIELD_METRIC_COUNTER, counterValue) - .endObject(); - }; - int indexedDocs = bulkIndex(sourceIndex, sourceSupplier, DOC_COUNT); - prepareSourceIndex(sourceIndex); - final CountDownLatch disruptionStart = new CountDownLatch(1); - final CountDownLatch disruptionEnd = new CountDownLatch(1); - - new Thread(new Disruptor(cluster, sourceIndex, new DisruptionListener() { - @Override - public void disruptionStart() { - disruptionStart.countDown(); - } - - @Override - public void disruptionEnd() { - disruptionEnd.countDown(); - } - }, masterNodes.get(0), (node) -> { - try { - cluster.restartNode(node, new InternalTestCluster.RestartCallback() { - @Override - public boolean validateClusterForming() { - return true; - } - }); - } catch (Exception e) { - throw new RuntimeException(e); - } - })).start(); - startDownsampleTaskDuringDisruption(sourceIndex, targetIndex, config, disruptionStart, disruptionEnd); - waitUntil(() -> getClusterPendingTasks(cluster.client()).pendingTasks().isEmpty()); - ensureStableCluster(cluster.numDataAndMasterNodes()); - assertTargetIndex(cluster, sourceIndex, targetIndex, indexedDocs); - } - - public void testDownsampleIndexWithRollingRestart() throws Exception { - final InternalTestCluster cluster = internalCluster(); - final List masterNodes = cluster.startMasterOnlyNodes(1); - cluster.startDataOnlyNodes(3); - ensureStableCluster(cluster.size()); - ensureGreen(); - - final String sourceIndex = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); - final String targetIndex = randomAlphaOfLength(11).toLowerCase(Locale.ROOT); - long startTime = LocalDateTime.parse("2020-09-09T18:00:00").atZone(ZoneId.of("UTC")).toInstant().toEpochMilli(); - setup(sourceIndex, 1, 0, startTime); - final DownsampleConfig config = new DownsampleConfig(randomInterval()); - final DownsampleActionSingleNodeTests.SourceSupplier sourceSupplier = () -> { - final String ts = randomDateForInterval(config.getInterval(), startTime); - double counterValue = DATE_FORMATTER.parseMillis(ts); - final List dimensionValues = new ArrayList<>(5); - for (int j = 0; j < randomIntBetween(1, 5); j++) { - dimensionValues.add(randomAlphaOfLength(6)); - } - return XContentFactory.jsonBuilder() - .startObject() - .field(FIELD_TIMESTAMP, ts) - .field(FIELD_DIMENSION_1, randomFrom(dimensionValues)) - .field(FIELD_DIMENSION_2, randomIntBetween(1, 10)) - .field(FIELD_METRIC_COUNTER, counterValue) - .endObject(); - }; - int indexedDocs = bulkIndex(sourceIndex, sourceSupplier, DOC_COUNT); - prepareSourceIndex(sourceIndex); - final CountDownLatch disruptionStart = new CountDownLatch(1); - final CountDownLatch disruptionEnd = new CountDownLatch(1); - - new Thread(new Disruptor(cluster, sourceIndex, new DisruptionListener() { - @Override - public void disruptionStart() { - disruptionStart.countDown(); - } - - @Override - public void disruptionEnd() { - disruptionEnd.countDown(); - } - }, masterNodes.get(0), (ignored) -> { - try { - cluster.rollingRestart(new InternalTestCluster.RestartCallback() { - @Override - public boolean validateClusterForming() { - return true; - } - }); - } catch (Exception e) { - throw new RuntimeException(e); - } - })).start(); - - startDownsampleTaskDuringDisruption(sourceIndex, targetIndex, config, disruptionStart, disruptionEnd); - waitUntil(() -> getClusterPendingTasks(cluster.client()).pendingTasks().isEmpty()); - ensureStableCluster(cluster.numDataAndMasterNodes()); - assertTargetIndex(cluster, sourceIndex, targetIndex, indexedDocs); - } - - /** - * Starts a downsample operation. - * - * @param sourceIndex the idex to read data from - * @param targetIndex the idnex to write downsampled data to - * @param config the downsample configuration including the downsample granularity - * @param disruptionStart a latch to synchronize on the disruption starting - * @param disruptionEnd a latch to synchronize on the disruption ending - * @throws InterruptedException if the thread is interrupted while waiting - */ - private void startDownsampleTaskDuringDisruption( - final String sourceIndex, - final String targetIndex, - final DownsampleConfig config, - final CountDownLatch disruptionStart, - final CountDownLatch disruptionEnd - ) throws Exception { - disruptionStart.await(); - assertBusy(() -> { - try { - downsample(sourceIndex, targetIndex, config); - } catch (Exception e) { - throw new AssertionError(e); - } - }, 120, TimeUnit.SECONDS); - disruptionEnd.await(); - } - - public void testDownsampleIndexWithFullClusterRestart() throws Exception { - final InternalTestCluster cluster = internalCluster(); - final List masterNodes = cluster.startMasterOnlyNodes(1); - cluster.startDataOnlyNodes(3); - ensureStableCluster(cluster.size()); - ensureGreen(); - - final String sourceIndex = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); - final String downsampleIndex = randomAlphaOfLength(11).toLowerCase(Locale.ROOT); - long startTime = LocalDateTime.parse("2020-09-09T18:00:00").atZone(ZoneId.of("UTC")).toInstant().toEpochMilli(); - setup(sourceIndex, 1, 0, startTime); - final DownsampleConfig config = new DownsampleConfig(randomInterval()); - final DownsampleActionSingleNodeTests.SourceSupplier sourceSupplier = () -> { - final String ts = randomDateForInterval(config.getInterval(), startTime); - double counterValue = DATE_FORMATTER.parseMillis(ts); - final List dimensionValues = new ArrayList<>(5); - for (int j = 0; j < randomIntBetween(1, 5); j++) { - dimensionValues.add(randomAlphaOfLength(6)); - } - return XContentFactory.jsonBuilder() - .startObject() - .field(FIELD_TIMESTAMP, ts) - .field(FIELD_DIMENSION_1, randomFrom(dimensionValues)) - .field(FIELD_DIMENSION_2, randomIntBetween(1, 10)) - .field(FIELD_METRIC_COUNTER, counterValue) - .endObject(); - }; - int indexedDocs = bulkIndex(sourceIndex, sourceSupplier, DOC_COUNT); - prepareSourceIndex(sourceIndex); - final CountDownLatch disruptionStart = new CountDownLatch(1); - final CountDownLatch disruptionEnd = new CountDownLatch(1); - - new Thread(new Disruptor(cluster, sourceIndex, new DisruptionListener() { - @Override - public void disruptionStart() { - disruptionStart.countDown(); - } - - @Override - public void disruptionEnd() { - disruptionEnd.countDown(); - } - }, masterNodes.get(0), (ignored) -> { - try { - cluster.fullRestart(new InternalTestCluster.RestartCallback() { - @Override - public boolean validateClusterForming() { - return true; - } - }); - } catch (Exception e) { - throw new RuntimeException(e); - } - })).start(); - - startDownsampleTaskDuringDisruption(sourceIndex, downsampleIndex, config, disruptionStart, disruptionEnd); - waitUntil(() -> getClusterPendingTasks(cluster.client()).pendingTasks().isEmpty()); - ensureStableCluster(cluster.numDataAndMasterNodes()); - assertTargetIndex(cluster, sourceIndex, downsampleIndex, indexedDocs); - } - - private void assertTargetIndex(final InternalTestCluster cluster, final String sourceIndex, final String targetIndex, int indexedDocs) { - final GetIndexResponse getIndexResponse = cluster.client() - .admin() - .indices() - .getIndex(new GetIndexRequest().indices(targetIndex)) - .actionGet(); - assertEquals(1, getIndexResponse.indices().length); - assertResponse( - cluster.client() - .prepareSearch(sourceIndex) - .setQuery(new MatchAllQueryBuilder()) - .setSize(Math.min(DOC_COUNT, indexedDocs)) - .setTrackTotalHitsUpTo(Integer.MAX_VALUE), - sourceIndexSearch -> { - assertEquals(indexedDocs, sourceIndexSearch.getHits().getHits().length); - } - ); - assertResponse( - cluster.client() - .prepareSearch(targetIndex) - .setQuery(new MatchAllQueryBuilder()) - .setSize(Math.min(DOC_COUNT, indexedDocs)) - .setTrackTotalHitsUpTo(Integer.MAX_VALUE), - targetIndexSearch -> { - assertTrue(targetIndexSearch.getHits().getHits().length > 0); - } - ); - } - - private int bulkIndex(final String indexName, final DownsampleActionSingleNodeTests.SourceSupplier sourceSupplier, int docCount) - throws IOException { - BulkRequestBuilder bulkRequestBuilder = internalCluster().client().prepareBulk(); - bulkRequestBuilder.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); - for (int i = 0; i < docCount; i++) { - IndexRequest indexRequest = new IndexRequest(indexName).opType(DocWriteRequest.OpType.CREATE); - XContentBuilder source = sourceSupplier.get(); - indexRequest.source(source); - bulkRequestBuilder.add(indexRequest); - } - BulkResponse bulkResponse = bulkRequestBuilder.get(); - int duplicates = 0; - for (BulkItemResponse response : bulkResponse.getItems()) { - if (response.isFailed()) { - if (response.getFailure().getCause() instanceof VersionConflictEngineException) { - // A duplicate event was created by random generator. We should not fail for this - // reason. - logger.debug("We tried to insert a duplicate: [{}]", response.getFailureMessage()); - duplicates++; - } else { - fail("Failed to index data: " + bulkResponse.buildFailureMessage()); - } - } - } - int docsIndexed = docCount - duplicates; - logger.info("Indexed [{}] documents. Dropped [{}] duplicates.", docsIndexed, duplicates); - return docsIndexed; - } - - private void prepareSourceIndex(String sourceIndex) { - // Set the source index to read-only state - assertAcked( - indicesAdmin().prepareUpdateSettings(sourceIndex) - .setSettings(Settings.builder().put(IndexMetadata.INDEX_BLOCKS_WRITE_SETTING.getKey(), true).build()) - ); - } - - private void downsample(final String sourceIndex, final String downsampleIndex, final DownsampleConfig config) { - assertAcked( - internalCluster().client() - .execute(DownsampleAction.INSTANCE, new DownsampleAction.Request(sourceIndex, downsampleIndex, TIMEOUT, config)) - .actionGet(TIMEOUT) - ); - } - - private String randomDateForInterval(final DateHistogramInterval interval, final long startTime) { - long endTime = startTime + 10 * interval.estimateMillis(); - return randomDateForRange(startTime, endTime); - } - - private String randomDateForRange(long start, long end) { - return DATE_FORMATTER.formatMillis(randomLongBetween(start, end)); - } -} From d8da8fa61ab2c7e747d538dde93e2934e15682ad Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Tue, 12 Mar 2024 07:33:19 -0700 Subject: [PATCH 136/248] [DOCS] Update transform health rule details (#105719) --- .../images/transform-alert-actions.png | Bin 214762 -> 195127 bytes .../transform-alert-summary-actions.png | Bin 0 -> 142958 bytes .../transform/transform-alerts.asciidoc | 114 +++++++++++++----- 3 files changed, 81 insertions(+), 33 deletions(-) create mode 100644 docs/reference/transform/images/transform-alert-summary-actions.png diff --git a/docs/reference/transform/images/transform-alert-actions.png b/docs/reference/transform/images/transform-alert-actions.png index ee3328ebd9907f165a259b23a23d2f076fdfb766..a78c02fa305cd0a09a91358d8e63ac283b821d81 100644 GIT binary patch literal 195127 zcmeFZXH=72w>GLEARrbL5s)INh%`ltbP$zJXwpMeIuTH+5CQ@!7J8MA^xlgElAuWM z2_$ro4go?Nzi*8FBN_Kda<6->HP^gmx#rAAeLZy+CQhakCr+?v zYCJSNapDw$`dT?nPyJ-dX-w_JiIXl)YHIqLYHED??r!!@FYHd7xE1Xc-Ka%taQ;0~ zM0E?LaYp)bW%OwSo1rTgK3})iKc!~;!NLV&2fzLbX!=tQFhjdf;~jjNKS$5z#_8Y@ z#)RPKg0K3De%?xd*5^;o_;uJ~KEMxiVLvpum0%?(pTe|Yf_=~na6H90pdIPDHr}aY zf6{!q_7!|`)@UyeG+B$l?jl}SLGMC|oK_v&#G`dLGf}QZ#I-$hl+=rMy6N{X7YAzD zGMIhjB;sB^sod)%e%i@6T-y_IKxZDjYdPS3aWwP%SM&b%lZ*8!x$!p&WjkH%N8>AQ zJ(cKSm4ES-`B9R6>ZumPSk~+qwVw|;1jRrGsGKXY&@c`IR(I*2aqXzB$fXZUNte}A z1g>sXjr_cEy?&h4`O|rixI0()_^)2fzap?oISe4eOak6?YL%Ka7J+xz?tfOp*#VgW zD(dpa1j%JMFpJGbW!+_8HJO{4DreLFKvdtKcVx2e~s zPn-;OI&q46b&~qxq`pp^pvk29pHEI9GHL&Hz4H4@_Ro7SPn=LWq4`kN`1Q%PNru#+ zhveq1Q0rV%Aw#-H`pso#`sXY8PX`z5v$I`NJugxY&y;Mu^?|POmLZ*xw8#g;Q9h zkw=W-qHb=sxz^RrTVAksQ;-$DlyFG_qDRPYQR{r6%Qg* z!h0fLul&;`A^s4MpI$QXbx>?O{eP+*br==TAS;T6kY8<2X{Gn19EYOci=kMn@vrc> zTLucwaO2W}#(wzr_fXyg@4zurqg<2HWQ3h9|DP&+`YrSI)(_qWlKsX+gK&iov$X5PrOMO?e**>mpcm)-MGx0~%waFg9m{D5 zhy#MW{0ZYStI;={qbZ8cLsxu@rd6i(tgw|KyO34JX^Gw;p;R_u<0vm2mW+`--k3sMH4xKu`$==HKQ83^#wo@rZQnrlLY3MtRfEqMz|D225#}o!WP;8jp?)LBQ z`2QII)0|jVO!*bcOD2l&$s|15q&=%kTDvTo++7gOI|g_;Gzj9{gk_JrY=4Y5t#}%g zgIDJM%TO_ze3togeQ7evFP%Z>Q9b6>rCv1{ja;qx;G!JQW2gMI+a&&Lf02`|4xgpE zzwVbicY*MXJFg635p#Th+eQI(zC@|X&FQso6M@GE>YP$aKxwy8R^o~m(@ai(TRSwB z{6-N%=vV^z-}uZf9$gE@c6G%UKiug`5EWSHPE2$jj)-O!cf2hH#=PXcTzAx2Fj(a} z`HOA^NjYX1q}xA=(F3jxt;pry^4XX`9prJ{2s}R2n#GzB`NYTxIxM z@0i#m%r8vCv3Y>xi3X2+;|h{~gZm9|&BUO)UTpQkhz(f%MLY1ykW49holvGXTxwb0 z<%nBJ`FvS4flb(CERQ-lR$wYpFU@A5#t0Ct;JbZ)&t`I@;#qdjm#yrQ%8lXGVX*CR z(UXK>6P$H??CmE-9fRtoaYY!i?^4>uYNL{+Jf1()Cnry(jt*hpv#)6Zg6d|D(GsTx z0lU!K6^AM1%8{IL+wx{1H(RFtET;mNprf_mL{NNG)7&a1zLC5ev)C(src*}Vv<_zB zNwV{cA#(P)jagkb54w-s?HhN~K=1VL2)l3Zj!nJZ5%nd)&pn4pvpkr5+2ZAXV3(*- zX4R1DeJIVp=h9o0>%KXo%5i6tgX3`fJhCON>0bYR-E?2SOtzAuwfRS5`#b%)5^J?G z^JzZQk;c73!iG5?YQ-!}R~!`|Roi}K)l=g7LmgkvGunYrnp!?9hk`F273haT)#P9=YlQC(E&lFdJ%PB z%Mq7%YLE8OIt}ZD4b)zp{Bxx?{48-}5=?%`?6*>h4%|1;Q{K6HuvWbcSQ=6plU)K{ z@fgq^$3C@39*4}=pi?>y50H+R8UL{65ma4g^yj|+Qsr@ggk03&CtkbZi5g%4xABA(k>a@kdH*i=t4Xm9b z-&VDz(X3pZdjC|F6MC`PhNLtKN)P;%+q~^a$9V#bt_u+X)`?g=>ALXmR^#`)_(GX% zwP(PjU5*POlI&7$_8#?)g9=eVT2AIbvSP&qFcW>7*MHf-#{^dARf9S@rw~_Wa}Srf zqTCau&I9Fd>BU{@PKT2^zN&@Jx`GGL{cX@N`aIIPEAxJ)lLUIn3Grw*cB*W;sHeI__6(_S+v?{5m2 zSIzgoKHFIX3#7nxVcu|oJVJMz73z8r@!tz{~a zrvmUtf_@iv`idA1!yB;rTwe`vZcVV-;h3-3^jJu_ci&_TgqAKSN-M`XCP~`wwi90Y zt-*=A(xcDtru7Y7h5254Z0jGRpM}4c@Fdyie9@!1N$$!XR6`A1anJ5D#PI|f}R8OyVb=w`Flx_D@ zTITzBCg;C0Py{`e4sQvjjWid|O@8p<%p<-g{3&!Px+;auT;?8it*Mo8QE|kr3RuKq zlDu2M(+2uhrQ_GOPaDM2DWh*I-NV_Ii}b%Xalb4!E{r)-Me|I?=NQ>M=XuT72PMd# zoG3pHcj=nMZ*0G=-y3@zapo+a^8k2AP+-l#gSvASDcC{JNQJ{Uidwf(2TGK~ZjDxi z9cN~$oL%t5>$yF5-NxcvDA{}a7X!Y_1?aX{>L!sWYUwJy?vv)sA*Ao?m4b;82O*+t zs4j#EZ7F0iahJz!>S|NMm+8Cnf0zlCzVjX1&DiC;Hna6kEFD@m zA7X>>=GboX;{ev_5ZYaY;?}-EmcZ_W5FlN_-LzbO;Y-nziP3B0fyWJ!wjeUQ5S;S; zv@F$WFpHmyEEYy1lAV#ByHi=yftBjRD`ahPC3cgHGk=(^jsqHV&iyf}iC9Ko2#L}2 z=t+~W(UwbILTOvjb3YGX=4AW!`XG!$nVu(~j=OyQyjhuQs&uL4s45rB#A^_jbPk^J zLy%xt^-Mc&;HTGbE$DA>pCkmzOr19t0a*x)&6>|6T*jphOGXS3>njZwbuWHd<|TnK zC5%GoW^83G_}zSik5Vm7+L8=h!`gqvE=h68VH6IjH?k@=(H%^Tm_ldT)$k>I@cjw6 zQ37<^_6tv-Tdk%S0N7c6O8$N4nTNDI0h-uvsMc5jZa<~mYXB%c z8CM!e;U>u4eJYo=;yQ4vY7ZBpZug{+RpF?wCZz`-U8ai_`kZh(PMp26>wS0%e?lX3 zcf2reie+5PJC5R5QSpgl=ZRQp%jBQ-XNg(4$WNYtW~BOeCAoLzns$N9kwOr{+Eyvx z<{viqDhG|Z^sF|mg^@CaKWkMp{D->BBZV2{1E|(~%ehJ3y5$2uuxME()!mzK1`NFF zjL8@T3tLLNS2$}+i=75h{RUs~KGC>&)MR9j83urt!pWg+^0e`MJPHaFpe%?tG=JkE}#Yynqv{w#h|5f&U3vv2#+VPjE z+Qtp?bV$;u?%3o1^j?FmvvtH2MsP{Lh;u_6i5W4rg=g>q6pjnNsXtgj9|fbb9)16u zpJH>oZxg*=n){?cUsZN*$VjL5g>R&%3-X8n8B!7j!bF~@RoQht6zMN|0PS*!z3DJs zuFmT_5WqA~wNc~Q-xmAQ1VR2B2SJQu{j$pa@za46wN*@M?32kwg+B~d)F~Qs)_u|o z)VvPTIBlcgd+j0y&7@moA@%wsc{+dswC_KVoh1R_8?bR};jWscfQ-9GT!n_YRr12x z_1VC2s#~BDa{3(IK^(2M=H`k%%5BhJ3v8wtxO$33MrK>`GjM zI7oFy_;jfMe*%Ylfl9(DMB78(WSj2KmOp=z@yO75sw+T)i{O%(V*KEk%Pe+ZxuWdy z8J@#WO^#q)-Av=k=kKVe_>f!Pdw(%s=H6H>T=vk#myi^M=oPm_D9nb? zk6BNS5xq0j|BI1NO&kTBcwtjNQBsp{t(Sa32I3-D8!J~nQU?>raiPV^Ca=Ii3-@;hB z5lwsuUf;cZphh!^4qf(rsV)pJKe5So+1f2Dj@7QjiWNVRf9&UpzwDm=+u^UpPMwcg znxsZN+HiC8I^@e=ZW1pC?je@QRF~6zG*>j&xX|#~LVSdhoJfD#1*f;EjXdLjo&%3l zs4T@KPu+_*11?uCXj`;Nk9AABkJ=@kqDR#9C4zLTLj>}UYq`r9&-go~`Ak7Uo~5_A zRiET)u?^02Q9UE6P%;AKp=&vP$SLK#zX)Dj6t$e6H|noZ&vANFqL(Izs_pI#+OrY* z;O#<7uOxt^$|9ADgP1V~I=cob6!_ZoWc?eV4Wz$9t+PYZOZ< z-8?>dEOfC+91RL-EVVKBD{_x46=6P9O5|#{4^_d|rr`2@p@=sy&(K$i-Z;S0a)8JJ7Ug z_ZMk@_&V55Wa%38-_%EWag#VFwRMBFzb)Lb+ z0{8(Q3X~=f)H^SO9s3KBT#9}=W(QmC!|{biH#>Znq#1=jCw$HRHsx>g#j_HV@F~Ib z?h%>d<5uJBeNc}4DGisfu#t0dpMCqvT@}5P{bWy+m$2IHUM_X}Hfr_|!C?Q~o*K_p zhk#aqDY&>;E=wXLtM>9pVbX|p@}E#`d%2^g8Jnk@Vz*Ld7laL{lyWa}9Lx$@h-TV# zddIoOs09L$P0sQtTrsh#veop(PuANWeltKRJq_|Bs`^DRY#;%MW4|K!EqJ2{ia_-T zqs5got|va`81$Z*3DW5O`NSma;VWNqDRu$9m7S|b_BMzd2BQU^9@jlhuW7oEU^w&8 zWy0vI+1B&))EeNrk3MP!E7@(W9_XZUHz5b@Af+MeH>95RUGL9)?$rjGhT~dPwBG@& z4U;SdwuA4*wbbG;mICS9X662dWme{ep1XsFQXy!D;l1V{+A8mLIrCb-+O?@#OD1+9 zk@(x(F7+A8fyX-8)71`1snh;9ut%9P ze`p0YQN#FqSU!zp)&Dfz)R8pAPSSuny_A<({&0w>IlSQC!=8?@|HFBqjufL!osawE zIlquUJdV?RQGC>48G~~;{&$)GE6x9V4sB2qqqAU3?;+Jq<8Te3|F2`O!FCck4QX&+ z(2Czc1O9cOgD$b%MW*?3bngONCZ0zoaNBpb&3!xWZ}wiR@9LGsnk; z;q$xrOll~(JqyDd=1BM`*8SHnIv8mdiRr#ru)EvRTkt&RKN=SUXy#gW4`kR&xBK?CwPzJuLU4wfjzPnWMU;3+Z_R<|Bv8U1c`No8T$4!PQXR&`d9~!q$x~X&@ zaSvH8P6CRUKUUsLV%>KS{D=+s0=>8ypHJm=ql>_~BOYu4VO#9NuDtldfU z1xP87KL3d=ImvH)zkEYj4sr;dwx_WUIC;k?!5!;N_mx$8cyKaq6qA`aT6cXRBd{dX z^he(p=ZRhRKM|7(dn|7jI&F@YkZ;228{D-vg`Wu^QfCsp@SfvjIa|8M<0I^Y>wn<; zZyZj0BPI)e{Kjds`$!^9w>BEYX#NPxy=!>dZ9v<7;pVcP(zPamKl9_7w=(;$9hyqM zzbKd4Ir+rxOb0L_Y@Fk)zpwa+j?2(t3mC$kKJ_Jx9gy*l@c+3&ojPl7MDPR>Huv=` zxZOlaqcW?Sq}?(mUXPX?83UAuc zSO%~=bX^L1`{%*qi@JA8r=B>zj?zhFMf&T_LWodt?0zEhvC{ zuaOU_fVAmU`%^&YH3EW+Bldn?+W)jiQ=9(r#7&AzcIcnmbb@B^lq6;@oW+-@%cXjd zr%*g?!{mS9)x|aAju#w%MB2+x_?uKZXg01y$`i8HeTf3~af-PHw4e>k2Fj~d1P%yFLx5&$;;e67=?t~fE? zxe;5|oJgdZByX(SB43k2|00U>=FQx=S8h0isiO{2JE+soX45JkC0xUOfxm64WRUGg zp@ndafm=iU9L#NdVti`uZ|C8>hT=(|ruLt2CQQHjQUTxH!ETJODuuIcY3r@p!@Ce< zwGwWLEp*36cg5c(rQh3VPIMFgD`&|UrAgCBT#E(;9H~jW9SF~#!O6T}g_fE(s;*|} zQ|Z>*mkfDc71?O?-jd(=kO7hJdUolr2dMCkeB0bq-|9wR3i6J~^1?<4{m;cd%jLeR zdfMM{n~r{3=3zy3Y|vjeDXNA>^=^8mKk?m9!*-yuBV>5BcAJ_zHR1|cbwua|4#74q zUDW%UV|k_qcUB? z?jsDgKJL>t5m)aDqXGFz!|!;!T2x%L*1zzNy8r8nZ{d{AnGSCDuC>o2^?`vs`I{JB zpBZ5k2g`#TxwyYVaJ@=V`)gL7eJ!u3`QvYc@_){p4@_q$i-{d*#!_jIDwFi;zl~!H zQz>fme}2~g(dUgv!u`AQ&5Zv5-#=Fpx_lOEf)>7cULp@#d<8TR)AKGc9y?b2y}lElJC2e_|I;FO!x%{jX*IkLL0-zu5Op5J}lBJgj|* zSffCS*TxaON|`E^vxb=ug@nw@5yr@UCZWLREgdX3GL3Cif4d$k+DX$7#fqybet@~y z1%>YhIOhi66@8f<(o#m?&!$h8Zk(R=>SZKz?lWnzaHZdp@rLQ?DO9TJvu*$5g8cWA z_#OuZ64v8eZ60^=;^EgCWn5lH+%~JKxpWKIHZ@&uSG_g~Yo&5|$Bu>Qudzc;kkZ)1 z=y0HXy=JlMkC~s|&+_$q{yc%%wE>girIf2L+iwMh2Hd>0gtedv3!?Cr%e&3_7 z<8j3KFA@hv2OcYi2fvv@2%&#_*c3g#l(95gjt0m-fqAZo3>TYrU%+-f`p0kiudAqG zn$PVKaR(t8(MR%in#b9nv`aDTx!bbr8m&hCo9kW49Mj#&ZpgSNMmM`s9WvtR%8?>0 zkSXC|ORGgoWsDB;N+ig-WZljS?is!>!JNGEZuPV{6svG=3^(&V#BjHO)f_6CD$abW z=R>&@2*4;i-j^nC_v3A+1eK$*rP7Grqg38ixHmcXk-6I0r%NB$_FvCC{;ryU9<7%* zSQMF*y8pCz`Ro4!wQn+-*OurzvM#udKB^rDiVJ!pfZJ@MtEN&S_~t%t3Ad4_=7)pw z-sbQW*F^d>H|An?a9baDp!QUgui9h(mD6I}boP(kS5DuF)kz=4`XCj2d~4Rj!uGXr z<~7y(aSV_4gSVl{e8QopFVzN|7_SA3;^E}HRIkNf%R3jLY?*b{mpHc^hBx3kKm zGEm#|v5{K!uE6y+DsyTv^?GhSOh$*9x<@L~j1%6L=VbARpVzSBNU@GQ>z5Y)QrsW` z)u)ds>UIlpfLzCTo19iicCKgI911#`8(A#K4p%h%WL(6rtsWWGpk}S%V_A3=r%45g zcyYN&pJZB_%imzS!oIT~)z?UF@S`&SqdggdH06(gH|S*$9p+4tolS{>MXR<3nteLz zhh5h@(@*#EH4Z+xf1ir4ij5#CD(`74NzLzR8O~nM0aIy?_)<$s z0vtlTnFBZDG9W&@Ub|1myHr8R5oa3n>Xk{GXP_@1^T$kKA5M;k5d<#~H7L4z59Jpv zSY2FrK0F@-^h5Xq@_KzQI@yhd5z3ALZ$r`r8;=z~dWXmTv)CNa)9>roLN^(Z*rs(;qqOZH_+r^&HE9iYd@q$gsx!G`R4Qg=Q^J|hCbE!@XT!>H9?w4=u45mYNR`(;9 zYxdUAwDSeo{K{YFRvfG=%J4qqFN?imadjs8gDH}Cq@K+>5Qb3k!lve`J&J}^wv2^O zit{90C-lplz205ArQZzsQXL3Rblr|*o4@^}D(B4dv}phZ8r?)?0Fv&`3Fh~8L{2AG z*>&G1CpFylney*;OFi6be{SCBng%VR@+cjZUXk&_VvRqgy_FTO7l@Pa=+um*&gDUy z1ygYNO_m4FMc`VGHjeymsr509@4>&RIf9t)a z{Eb&$3nZKzwQxwCEM4n4%H+FUcw^MdsuKmw2bUnKRjRJsloM0+;XrCky- z{8=yRlSeO`58X-gmkY=eu|!O`s+X7BhjU_}#b9L}0ZjihzaB3?LL)xV(Z^{k>xLdS zeNA5F(^ao;6yFj->=|zNeM-E??Zf)7LBmu!YKr8WdR?1hwR+#U(8^rT`E{0+?}8l$ zUi3?`XEbtY`r;Cw^go8)(Bx@n2k{0d`nkJ*s9usci=b6ttjNK>;=$1*Hdk*PjP^>m zjXc^18vmdH1%wM~PbGr%svk@4)i$ELm=q?Q=y+J8>CgIO&MrDy0+bHE>`AtuFAly) zsJrVoblSY;^+%B^8s^WJ#S>aF1kxVYJQj3$q`aOAw{kUZf>?{dlH?qmPit&ff)MqP zj~TWF${8r_&j_s8(EVUdk~G_!X^Ausv=UP;ZPW0f3H`{0vSL^?SRa#7Q6J zD6BOvk4f1{cBn43jwY-=*oX5QO4%B|f=5y=?=WoNE(R!@eb0I|r?h8MO#}}YT?3cM zaehHb9)VARUH(};B=f;j&+#Y^V{2-B%*VtVX%$+OKo5#M3eYIS?lIS@SbP6@?AlQx0G zsT>eo<2tG3id?@;zB_P@#Y}$d5HhQn0R6|Z4Y8UX9ZAZ5gX9|{t6Z!AZehgRJG@V< zimXNq6989kSRU_-sxdFMLpIh*cn`4;RLT#tAOXH7Lmjler~I9^;Da`8qiB-E*o8FC z0)a0&T`7W*qdJ8?P^P(+{4{|Y*gEZ%eRyo7;dP@;V>#`1a;w+tw~mNT z8*;(8`B<)Z#^}sc(111>uT*|KIMvt>t?G~pzL9ec8*%*0mBu5vbgB1KPEgT?&lKC` zT@-jQ+Git2l_#yJIEV|m+piotk0A3fQ46>P9Z6|Y{E@%aH`W(sgW`XXH0T#o<4dhG%n9G4ltk7X zcMR+asVup`MQ#N^zp@Xo^==X`9*i z2eI$jZ-L_$UU*$HL_w$EG3!_JS29WfhPeECh&h*5lTzLxrN$YDqRA z-Z5Aze47e3=;sj55nMU)#w32(8BacHKNVI6Th#*pw6b(?&aAUTy<<-oZ!-zG8hBLu zMDqMfVxHd7k51RY*{rAw{FZfxH#p<&Xom19`b~)Kai#J-Dsd_zof~NKMUH%@zqdfO z5wvzA1K$aINpnqj&g=JFq+Yy^k$NP7bh*$y^uWz>YwcVh4jvUPz3%n%S*UlPu|7-K zO8<7nGqJ8E)BvSJH_DJ?QbE6!9`^G}k0kUH*QgK1&-cKbT3d)=OvUl$1bxNv>DS1nf5-n0$^Q&}cC$&Kjqez^%5ngg4oSN?YKB&$s zdQRbGIZ!?1{*xl(SF0)EOX=E;O58$(Wr1^AaKCn0)oVu~d)wU>UHAIW+zA~y8I(C_ zlZSGtpLEu3h4mhU0C#$Mkuc~3_}qGPj4PPbCsl-d+4qaG%G;&fpSEP{)ZCNyyMT7- zoD27boe2-xHK8=lT~wT~shI3a4*hmftx^idg&g#Q_Q*(oN{uL-D5q$w%$!_98MC&( z!SWos`TcbM0dsP?AkY=wNu6|1S!-@kXYqKA_o{Bvw(p4KBcRR7@UwDRFVCO7a3bMs z122^ok0W4f;wcBGdQ)jHR(v(OsJr<9IPE|81Oh80@4S2-Lh5sP0%&+W$O44mt&tVQ z0u!Z)$_W|3BM!f{L55tNq!wyCr;ROdB&H9%i7u;M)a5*datgc2!nq%JMaeqo+8??!+t*P)0fdg{Z3R&dvZ{X1N!|JKz!KU^e^&mPkLTbBexS} zjb-rNRo~FLdsvG#n0n+ChBqfVzjWoK+(LOi+b+sR2URx<#CKgyS;ZfeFwlh^ZGcyH z+w?~{Y&JLVgMYc|;xJ`^kp=u1yxR#U4rYo6q29EAs}v}G(`D(76Q!^lKFU8#64E5K zQ>bcvmG!=dsq2gIzCpSTU5js7y*HQ9oo~6tOjJX-``)+ot4X zyH7Ab$I;9f<>^SZZre_`=lCBCM2qL?tH*k+yMG>y2L*h*`U}~@scB3t+S5BK8QCX{ zuk<-ds(3D?IVqRxeGlZ4nVt}`g=L97-q7<{>hlTKO{UDy5;YP2q3Sd++=};(J^NeT z&K-;<9zj2or>EOkFBgN7m+P!4J=sMqPj_VS@+{gQ?Kvq6bLtQBC1`RENPX9(dQQ~# z0Bwsp9r6p^0|l3#nT2x!*#1WYa91=*E5C>y@4c=?PV!)~;;Ns-wALop6=o2@WesDU zW)05q)h^O`7U0yRpdF!f4I(MLQmp;~Du4p>8?%-70Nyf})mk z=sEKY_1Gb=zSN9f*Aa*aCH*b;!+-icRJu;DO;-Xx@o+0?*mU)BFNEA%wOf|j!{A!3 zGQ}4=(jlbZFSN%-YgJ}`%ahAPy`!7F13YaWA}zs1pcpox2-cAG<&@}Hlbyb=Hr_ieVgLg{DI&P7)w+AUKiMRJ+h!(Z3h zb;VV6V;hdYM7~xz^Ue!5^eV54rT#Lx4A_`3R1lyu60Hj8OnDZ5c(3rI6u++eiAP$x zeX^E4a(2R|Zz;cABV3*;eics;3Szfa>sAxqa?`yJFZ`>p@z{zYhtdFY&RzJtSLKH+lwwJ;?c>CN zT>&xqGRlq=dDEzJV54-<_bZClSWVo(FHZ0;Qj5TrXN`WMSXx@1L*rvwXTAbzo=-2y zM=eCcrSyXBhEVpD+zQ+tXw$7YR{ZF*{ZyVBfr6*}2)ye=M|2cUn=Qy@708XiTH7eD zc*X!9sYkAKs1iGbh?VV+zhOx;Cs>$Lz#|o}s`u`h{Di*l@UFG_5%?^HDwdo{T3~l~ zn&)mQ`5C=#pBx%^wGN18q5-RDganPrKu^5&?NH6?Y0=}nrj|bg%Lpi|7 zjpN`%(sALrmODnm@cfT^(--lt*8VLS@Lpx33Dd)w!63a*W$$S#c~F}oUw-!V{kXxr zmI&+@gB*PGGX`6O$7VCa+22dUcrw_qr6+}t0w*$-$NQBgsUhtGrp#x=2ucm%xlr$@ z1yh~elaNt>oFrYO1$biFGfP1BiKw~Z6r%96XZ__C%0WFScL%37-`iKO`SC$$KK5Yr zi!w~+MTeQ~=p-Q*bLpq7GH?1uLEwq$*K@rDlDtEj*ZWj`WcpRh#=3jD{m|4=>v~uC zXdD~n!2upl{928#uN_bjccLoLx$n z%LNNI#Eu)%GT@QQKd5_H3ci(;16V^#eVjm7=p-T7#d#EZOfnwG`nr#4+#|l)Z~^+1 z!fV^iOFF|4Xwq;5Sfzp!)8|$1>Me>asgHXN2K(5MTYD5$9?-9-UTTgYBC+ekk$vK3k(SiDV3p50m zNfA2j&^D#4v|HJg>1$^`y`w{90bl3|xnW8Q_ZS?lefOgU*R(Af?b9a4a#Nc2e^g^S zqO#YUhKsQls^#f~thq%PnE*BJs|&uJ(?TO|<&owICF&k{ZQ)He!3AgP)7wmH-cSk( z`5&E)Wc_I8>{No#?N>GpII;^0<5a}RKE$4OFTa?>oD6!QOO|VRrIsHtdgt>i9D@GK zgz8Nr!DVP?jB?1Q*)HUW#n9L(M4p+^>_m__uwWvmx=+bS3vwAr>E3Ivqi40Qz04aa z=rU6A-TIzTx{X9}nOI&~wy>_;%^w}tjPjj_yX?^c)aIkF&h+}6&?+-d(pe9aT~G-3;)OkS%|DbmwK zM+-l>&3ktFoB9*r(AtlR7UKFeZvs2uv0o&9;H!&{5p-u%&Cv!o_*ISa)>uTpLcaqs z9UMc;Ibh{4X(8DQh1+mtuFF(?9s$hxYziRx#*SiR)o4IIgS>aH#*uN zI^OH}@r>h)N9#KXsFO1DM~TSx*_ogay}hhQsEIV4jPM^6#WxQ=6Yu~o_@Ys~$UnjP zRT2H|Z+_l0;BH=|=89Z&qGac-mORkXxNcmL&;CjThm*Lx`m0tn{tO*rR@R^jGyJ9# z9YX4))+2O$9NxgL%>j$c}LiWFEpyuQwv zmYfSk37%Wk#vYh|+mr$S#6C!p1t%QYDxjl%rhEb3=I4Y4RFX7d^zoY16lOJMg7KVK zk~z87yF)4fo(WSThuECokrP)_rr9)r_am2*`c% zptSo^7x$~lVvY83^!e=mhhakx&64y^dY0d)r6yS`F1SvGV7}$0J0~Tr=8bP$u)^6~ z(aknGDUiEs?IDD%!G@K%EghGTM9<){#N0Bk39WBR!U0~@^5qY=YWk}P3#Ik%TZeO* z%0lI?cPXTwz2tM?ylFujKA2ZqW>GsP`;NS`YFqUWA^Gy*X=4|^Xc3;Rx{@z)chR&U zzq7jSRs*viDuzFY$!@Z_NWPW6tj%4T_+o{cJ}@`BnvpOC-KbV)`SK=_V5AxE&|oD}#g`Quz}16!q-`7LUL`Rnn>S_Hk)u9GtfC3t_8 zAFKBM%thdRYjjShQI2K3SRc&9uIp_dIKmECXhvoQC_;;7<~xnn)owzihtK*uo#@HA z%q~R)7VBgTIik{`5E~2j?jb-Mk%>*X1Erie3bd^B&vm>bZs;x^Ri=8+C-ZAqtynx2 zy{S&Oi#K$dUEF#$C1bG1Fc%eE+?`me;p1Tk#8yvOB;K(=@l`-p3&_>1>mzHouq#0u z#(AC`LKc!x%KPhG6O$Q^<5Nx@U`$y!X{X@3OKcNPJK6ug%X@y8a=0T@8t4TzRl#ZH zc5_P6BpL5$e~ReExMgAGr!1$;m7axNs!Lxr8zZGCPF>RO5#&%0vOBzRa!c!m(-F0e zrhDdlzqT$mYmiE}Rd1(v52}`bwydkl_X#-My0uj3vI2h;UUXF;AM55A?l~f^hCH|p z@BIl)rN6xw>5nunpL*p!4TY!i0~v6U%9rKIf~irL2M(W+&Jiw|<_|d7^ryqK0B+iz z&Lf)-?*{JU9HVb&Cv?RLWyt3@Z2rjWt1IZPUj3i{0)S+@PS)h#UzOAZRtp=VTG#4b zrg;jr>Jp?}M>=FFHExJ-PMwTJ3)=NCG{7TbINV526O6OOIYny=oJ#Zb>}$jaoOk!n z@{UkOl%eCM#?7l2?~?<#NEPo5vfIYWsWgK86HZADW*R-pv&Tc^`%u!EC9|U!Ps*%1 zM*(dt>)A=O#YZW!9wtMsFzhQ2Q@yx`M&0W50F}3$XdkG138c3Sk4V`NJ3EdER7X&h zO8Y)y)WrU+F;2a#xpG~;^O$?wHmLT!w|y??{keGBMsJPD2uh-PzDAcfqkn^|_PT}C zLb%NPj#wyf>@mjAx@Fq`$#Y-M)Z&kE;}dX5q6oE_*w< zCA}*IB^hhPrb7ZMMt7mSgt3)ANyhL+(NM%af;A(rCt}Vb-x?O*1qOaoL^PZA5A# z>q}TJy}S=Sg=;%x&vwwnv^G}SMXrNc9KkQ|opaO}f6D{SLF9Z@Y+8Dk+QQeHlCK+& z=^x@3zQP^%{w#FzfCDQ>x^D4LEpcR8O*%(T}?DqJk>Ccjuc#ZI0=Ed&cL^z#R z+$BY9blhqGgNgO(J~^OXJ?6Np(`gKAWZN~rCE>ZO?M`+9=D4QWlwJTeJ_@-e;|Vig z?A7LSVvSTAj>(-5qi0!HQ#!JuiU5h80^Y<8Rf4f=b+2nnzsC^g+Z1^&y;y&%L5oa~o4m%PS08WHV72OQE*c*+eQkIe?qD1#wFT@Cl&{i+S)l zpwXnDZZhkAIg^Zm)s1$WNJSwkha}y5A5W@Y4q#npGqt+xr6|Aj^xp^?_Z$^#968{q z%x4Ts&@C6M0^7`wDs1tsSs_jG_ST;Bx*HN3=VT?-hgi(#gcA;uWoL^TPFrTG1krdm zYlm37j9cG8mtMHkue8(jqUKm1Hytr97w0KBvH|>E|^;}x1}~Mwb}=VL7zP_T&a|A5~rR(ZPa9drZQpLD91j44w7}A zPR2wLapDw*q{i&oFG+r+fP;m^u5Yit3-nBQ%}ru1fHojG*`sC1UpJj53ce1Nn5#*e zN%?jSMM*Wtd49XOmMmv>?&*L~ragB}qP1VE(ngJuUi2Z|b|>{wr5Qn6*Cug;*(t_F zo}L=kRU2_aMq`{?7&Q_>@#;79(}g`5&xD)0eN_Q@#og(O5L-;DY~NlA?pv!xf4|~w z?VXEuWiDyXNAixr*_?5%lZ%`Xi$jCfh?x9zf1TnHP4(o-$^1lxZATCUlwVg@c|0x2 zvK%iC(mPh%EkAPm$e0PvrE;GmxC>(po4o({cmftPBPK3(?Oh91%PsZu#qx692jYmE zkGkUfqdw9PAXWpa6~n8hbSHD#S`!@#;9*x&fw;hTrw|hEC$FZEXdKdCC8yr^|Dtf% zaS=n3t6umT$`f1v9(j`*f$6b@dFO@~n^dQlnLaQn1>V4w=G&UwFEXo9LjjMe|J!@W~vJBwT)Zigljca1R6*cYm=KBfXnIQGl3$`SNV#J~0x(&GVO#rl1 zx#pts)9u;I?6N@$iuK1^)?Xj&1}^(ejp|A|+S16#vGu_pe<90xKJ2;&I9G9eK60vz z{H>YtJO6bX;f;h)*8N@g;ozS%;zeL zEw%vt+NT@t)<8@N0C}CW*usW$1d^#8Gs5Hrm-_C~w1s4F8NvRu(lf$J(9!%cnb%vX z7*>};ooZs4jAamVjoiA3&3E+eO_7QZ!PGqWd1I))xY8}|l(~}Xfy(QE;jaI%8b?d< zdF`ehoLTKpWVBxK)FJX8$}M=~r=X~E3SYkPV!AvB#bsHeHQ*@YJhV#q-%D8894F>n zM;%0ct7fb3N*ut7=O@^$wTZDjG896wzsFK(l^c25u-k$^M|G)8{klX@xxjWE1E}Ws zq_pFV5rG#o4W)!g?X7b7`spv%6wYL$wK#`M1VDYq2KMDU!|J)ykyBV0TRKE99fqY?-)OX*zKFP3EfY{wX@S0fUf)TX^`ABUb@ zf0%!7<&vh`?I>lWjPM4O`|IwShqzpOZABlOc2|l_`NVK>qv8X%9I2!ArG{g)&Nv8d zLM2R?82y4%>0@>yNj*ArrP1#PVVxA082~O{k(A& zbxj)1=3ub^1$X18Cy2p$OXemnYX&i@>({%i%FRR8+>l4N*2&>$P8Si3I_7L{{D)h=1oyV^0_2nn|SxKI2EvHrIvcSE(Ao=}q3H5|TwdfHbv z&(m3;-P$8W^4p4Zr)iC&4*SceOJ|d0)^1Qsr5e4D=z$PTIL__U%1El2P@>e(8pz{W znQrVtQ)7m0?zn|1$lelbC?O8CZ8F<3?5li{lm13Y$RIyu1%3c8ZM_-7w8{r6$r*lm3a>la^d_lge6DUdPMeQqY5}-cuqPBXk9GUyDbJjbvz-qMW8M=@a%JR|Ktvdl}JJ+U*8LAh} z0;@S?YUf+5>|%_yNxO};IO&eZxb$VDtX(O1kd7iir-$+g7d9=6!C4{CNX*?^-AJ6` zWodS)&70?O33o5HUjOn<8ANj1kOSYw(HECaV)|dKpmKAcw=z9L`%W(D*1B*T|^_ip45*DGufZzU0D`5?(zV*a4LEbSck*gV56V+w;nY0?(FFokK_? z-QC?W#8B_0-p2hs_rGtw|K7D`xn^;VGiU65?mG5y?jMM+`zcj|r*%)yRJCum@> zB-p0!znd;kxe%ltf{hAxyua~~&=sY5Ok0;OqDaN~X1@LzLT`LoNeD9>7jnvIglcEp z*ZIn`8aX~Hn??LN7BBU!HiQiwo0PPCa>cl%%LaVO801CuSe5e1H>*r^C%Ze z-?L}SC4SpDG;E};noR$50wY$&mo!R>r%fosBtGh~%kl}5as4qaJy@?XgK*?W7Zh}d;3WmDZ zx$G9y{v843NuAXOE0?K^(g>1~k)JYpG(O*Z+y!Z@?3^pmB*-U=$QA!Q?If{F`!!0| zk|x9Y8P;2VBoyb2;efB9YRLd}Y0AAweXM2gFf%vfanPrUQm|&dw*s-d2a+>%l=VB! ziW4kTW|H?}MHFJTot-+agTEI(T27=RAoL47`U9&{s!1a2{z;)qTkNu8ab3OiWSFVc zc2H>Y_Dp3GVim(S^*UXk1YM!joR=Eh;;S&LQ4jfc*esc7a0WH#xIl6?^alCGmjR#} zJ6T&$FnbJj(46!BjUAk>4*F@CxIh+!@3uwHsJFBZG*3GY)PObAOu(CPFfNVPS%$YK z!*sYnb(Hi8%X>p!q|krkxNfsONtD+dg^P)gRx`ek|sm9S1joxbTtO zcTfj%imnT?Eat4Xcl$Qyo%ZhyMp0_(X(i=vND~$;Oh`}}&fUYUUw`|jEcs*hB**aR zmDLS_@dF~wwk3wxRsHq&qCJQts7Xx#u4{F$BQd#mxYj*6gF=>}L0GQ>k# zPKPl8oD9WiJ{8T@^CAFCLb*&aK=s?Z_<`?rw?Td}%9QYt04RjzJWIYU@?bC}F0Te} zX(93cZhiTQGx8B5vFC}%`H#jH06CD02ykshaEtK&4`vrwf+e7h4+{q;;=i6Lhycxa zxii!--`|zEKOHEjQFq*ns;jdD{^MCN4IDf~VO3`EPh-;`#Ik!e(Er!gCi1`CzCg30 z5*pA|NMrBk@*jPUh^-U=e;xw^&7aD^A9!!NA21vx0G#r_&F4S``g=d3rKLRXpHcjN zN1_Z2C!;$dHR)fF+ka+d`Ca7v{k#9`Sq>R69OXUC>;K@pLiPFH$f%{I^cN)dx4Hqn zUvRoPR#_WV|J7HZ1+3lDQY!O*J^O!K`G0FGgW1ctVrg-hD!!FwX#ZOee=6e!oN!hu zzWlx4g7bSj1xOGO01;Y{P6G~(KML~e-5~H#ggy07dGn9L2p}T^s@WVT8uef5mkMy? z`I!qp`mfPoVgh>diTw-1f9ZlS4xrPsK6w8%8tUhO&>~piY5$jC`Y-?wYth*LfEWL~ zQ;-7$8hMVj_P-P!RU3F{^X$WaOdl~l8PH73dSjda)V^AQxeev)UwK_$%=5E(SByyl zvp<~`E2!>TK(j(HtwdrEFlnB%Pl-Q%CcXGhb3P`-(T4EXmWZRfJ3tiC$^CaT(a!yP zeKmeAzy#c(Gp_r?U!MX_nQ!QbFrC)|bV~6}s@V^metwJtPEeOJ9@+G-IRqQPxy-)_ zTG4yzUPONBHbnS+E8KUm4lYxJ8+ey?{#sxIsitefGjWtpXy@G%M&4;ZHq^?^?b2W1 zaWw(MU#wH^0(Z~s=bw=xiZ0L|qzT;puIBnqc=whDyaqBKL!1aO0WR`@dNm zc+dQOU6Xv)1YqsGHn|_3m``nukCML&u(b@5*4F@*rZz-sk3r+_cgK59+ob z5)oTmt1*zXOa-jgkNkn{cjCDRz$cY^yUgRfH5(?tHvt#<5ET`j5bc{W(w+jIj3@K@ zk3Ppvo4d!7JaZUrPFnTQ3jR$_%DL@izTfAvzz4+Ez>ge9JR`@qf%UVQpX*%rhx#bt}BBKJSe*)?$R$UfaHA-i1Zf^t`R9$}CXZ zdZ$eI*CI00q4qNliQ9hezB;--IuZJGloo;OA>I+k^;fShd80roWZdzdtjligwnNYN z&(1oEs!bpFj9cy3d}DIi51Bg5OPX{9QbU^2KLc8HGSq#>>Etk2MOX+AZ_xbV+Eqxm zRKk0+>g|#*zwHOmI$AH7nY{mH4>ZRmXy|e=c%-B}Z9i@;>D0f=aBMF2p+l3s>5+EKl2{Uu0Kzt?(RsPXO0 zYDguEm1(*i^`CA|@c=jW;Pira7-1hcf1W9GMjKl)G3PC8X?EP%K2evqcQ}3hbc|OE z9X7AF?MW1I&I_mO%bp?f<0yegu6E54`|vV5Q5V9_n07hEXn`;2xQ2JN703KnNlQVM zq+$M;H5$_D%YU(ca?(JSxAUBRTURyK_iAC>8xO&T!|f}mBOxbDHu!o~_T zy+=uxi<@NmugL+Vp~*ffInNOvmg_Y%%K2natk1K`Y+B`Y1kLJBnY+L{BnQ{#%LJmLcUCF^Q2eTu=Cd>j-1dPQQOJk{#&7@_ zB*gu5q8&q>df7X82*IP5q4G_a{+x+md+mKYpvx~G?W-e2BeXk#y9(Nk1;qg~!~P0< z+~c1m&#ONn92zH`bX`a$;5iR!D7s613?75evYEHgWScP*yv)gtA6UVanT+&~%VFBe zjb&T*s@IQA6b9_I?IB}q^Iv8=V$2$kl6lUUO2!U}!m_Ukc$Z87bla^p*QK%6!PAwyN7rZb z06l?Fiikhyt&?>*e^U#9wKVK@CV+0`s4Ds( z?!D^lUZ6ut-6fvsl}dpQ1wcbs4>TO9`C-%30Hnif1k>5_X~CE4nDC}_+O*RI*LLNkes1L^ek9L6{@QVf+7qZ_V(xx+F$ajWOgSTuiMDo3gmtIYkJgh; zuSbltAK|eEGaGhu*nWCBGmg8GcDn(-xxgvaZ#xQp2k=r_1C=omg+fzY&WmtGp>CUw zSvwxDOkUo3_IYO$UB!+-LR@WZN+~a72UqHj)FnR3F*Cwh<#h38wb-o3za4=)a1x|) z$pUI zz^qEPHxOAKIe)o5QFOfh{3L!E`wEtaHA1tnv)$gq*hrQXfM>S8X<25}Ytv07n@9%U zgG|Fv_pOe;iE^750?1 z!)YCVTJh%5P-$ZHE9X|z>dy*?TIFXPXD&R`ndYS1x74)n!EZ?Z1Rf;8esD0?g|)dl z-BS%lX}pc0{n|k|ojI;O6Z@!|<1kxpF;ApRI--|V^W7bm4 zem*%$WyEAS#C;F#{rC1FSgTxPa@3g9E_B>*r88viRs3}jA8dksBLi!1YqkSVJL2V$ z?D1I9Jju9b5-;%4$nDRFg$gYRK*90Cnl|#ea7y70i0;|-J%X`=M03wPh{xBQCTc8iVdB>|^yIe-Ppy-3)SsniuXWHwEpU|Q558lREK0QZJMASWEGf5`U-YS3<2@PpN_wUd-?be9IH{OvDZ!Q|snF*%Tx;>Mjn;CtCiy3O2B!Hx>YkJIw!S*9n zgKX^lG&DxiBjw`THrvMY8cmBD5kD;2H9ofK+x|JnCNcT)H&+y-DH4n+>=}qd+~QG> zU!&D2n6lm#6h;-Krcss`6v_Df+!@&q_)EHY$eh!DgQtgw#T-&u%`cI2uzlQ+{g z5ZszHN=jKzbPAln2|?cBHyM-V&mjlDSO8PM%nkss!1BYHy$>zl0jBWesHX%jIJZL; zJM+VMVTKV>hjp{nbo|s*rCu`}Ou#f_S+RwKP7}LsHn8eCqJgoTUZZ?s!yCxzCh_$i zT(;dbIP~r#^}Kwnm??v$KdkAM5j`j;yfv2d>TP=fTB;C;cY2w_qGsf){k$~E>TsUR zHOR^wgGxG7S#3pMlEP5A%K1W9R8eR7TMWxXehs>l3an2{kKc%_nitQpV9$9y#HEX4 zH=PjBIAt>%6Uw}BRI4&CsAjD2>@woG=5eZIk^&Cg4VGq6Pnf=@LGP5UQmD%%m`@qC zB}024DXgyrIk2D;+sEkhJ^y6D6F}26VVPvhaI|)1tBkYT91;nT?{YA=-w&)fpMsdx zVOA(n?bOMbDH`6|Tp7=hi$0&FsjS|cYtWnvJ3q=Htz=p2d~?duels>9mWT{7=!CNb z5a-g+b;Zezsv=TYW~^LJ9k~loA#^6JzYN{9b1&B6rhz(-V9YW13=OX6Ql5unm^OvHJMp0 zBN4~S@kP#ed=g~EM~PJbUSm{5VYsWnp8Z-_U6F>jc49@qaBiY@OdgN&DM_Kk)RN>~ zL)vWnw$nPDhPgdU+s<?>NUsv64AVgtwJX$MJNl6v)$8&V@Xr!-vx?Bs=L!%K3+yO=MNGSB{HIFci z6livk#?hZE4bWq=TLfb9J2Dt{M=Fk*7IhaTKxAakC+t3Sja;63y&)xCDq$lRI-=0K zCWD51%*`_Sbq%_C zaj0HYwaZJ$(xvIVDR3VC6}bi7t17t{<0g253SHA6r%68yhH!I!h(R-5upQA8WwxDD z;9u!UR#tIPlJUtZ{$^a4Rw?oXBcn3>kUFCFj=qu=T8+!Kml<_l{hSxjLm9IcMjv+} zEH&E5AV6K{3`-%(*W9emmrH2@MQ|)r$;7j6O(c*QV``9r>If!#HDOAl`Rb+F+4X?S zJQlttoU4@b*m27h{V_shz=EFGqbc=iK=w5>>ST_(VhB!bK`kA&`%D?2`MDP!8>^4a zMh1~KbeljBB30XwQDxk52`oQF0$Wj^&vy{re!a*E;jRkv=KKp(!4ZYsCm;e<+k%Um zP`fL$Y29~gt2|GyOc$$s3qm;nKI<4{oR=unJ178fqmlJO0d~2sJd~tTqF;i|w}S~j zKakb#dk)wqhMokhmu6GpUO`W({U?vMHwK^S$1RhkfdJfA0Ea)8X-*o!S{Y+HGFCVK zs$B>m29t)D9^^R zgSJkLjYbKMM&qm9=!YFHpZlkSI0}6~#3%7Lo?%;qZWCK2Bk2T9H``J|4zy2~{26Oa z6OSQ^x93NelNZ-50&h3^V&yt+5nG|2xlwu5wOe`ej(vs2=R?M%kN*;t;BdrjduQn2IN{2qo?y6|4y-5?^~c2r*-w7~TT>N1(j)FL7x zaru!Ge!idgEPUykCu-_#P4G2!OFUmYi!c&VSpR`o(Ak!KW@tvVj8yP4v5 zpnlr%>FKR*bF*M;U`N4XX))hhc^W37(k-@Jj3kn-H@j06OPI~Y}-Ke_=7Lvb)aTKU|Ox1v}Oc6@|kza<-SEH!vA(6O>{b@5bim%%|XKd^Y0s5lMFCPMV`UoP>^918W z7#Hv2+bVL3=XuNx7JbL|ULWSFVL0y95T1fETa|eR-PIav>3~~%jqy*cf5Z!rGxGa) zUB*}K^<@iT^&^+%i5-@sS#0y_Q^T-6m?agzgoQ0YK$F?5w^eS;svK|Sa0+2$xM@ou zv^v`y?#TX%W+<3ZiIdwRpC*bDLN~^bk<>R{?}$Qkp+-%kok<78Fe=K>^Ql^8$+SoP zik=#5#P$b@88Y@OTyOa<;~x`prRVl+vkpS^GrCX2LsFHh%y}1!Zv%2>#(k|~U2_^$ zuU|FZaE(9xM4ttN;8?e$chkwIdiW{SSQ&odNGj#D-C}iY(H9nm?@i#2f?R!wm*10V z{V;fTNR@={??!WVI&C5J0v6$rcqOm-0tJi40G;jfbXTm(VqT^9J#=f-YoIj-z~EMv zvfxq53F8=!BMX{CI1Ck@DA=H9$zapoJmsE)*y}*&ws&&JOzBlGoRtkhi9|tp zT19cehvX)QZ38;ahX^ob#OE~5tPY5WS-~GGUG}ax!!mHj-=L%eOy6Bwrh_c+7ao-I z85NVXeG*FGvh@)u0&v++A8$;8GC^!&>pP{-*l&zUWCC`^Z^7QNvb@mnS94*qd=fyg z&K%#y{(izo$9WL}8KsAGt?EnUEDmS&8zM`*l&Q6~fsi0({WX^1j{~=XsOU!DqDiYF zO3Pekr?z(!0pJ1bq`k8N1Wjc#8OJ?aOl$s(u-`0NlZA$D#CN^&7F47gFj1*YAU9BL ziD8-h&TLWY_Ku~7=hej#Zs2;&tlQZo{+U}9w@QwNw8rNIe)~i z={VLDgfrQjox{`V@{faLZ!*+E0oGz+AWbnGJ;$~&v zC30fyrbv)``#||qa=1{pGDdl$i+b?}skVY~HxGA-zYkA00UpGIy5fXY9Q%y!omyL! zHM;r2pw@4aL)f~2H4PK;!FrB@Pp7i!Ud05cdY8e)aW`!d>gAwt&8T*v6z8{yKzHRaxeR1`9?=bx!%W$Wh~lM-!c=PJ!Kw`RA^ z(ir;Ps}QzQamp^X@)%d2ROg<5W9gSmvIpg*JM%f9WHEpaS(t6NFabY0N`7BZSS_58 z?E~g$4&k$*BLZxC9jUa|F&%6W)CeF~ZQAvwJ> zlj)6!YuA#Qu}mqK>N;q)u2F;-%;7U3)QLxg(RbK^bT`9( zUuTRgXjo;t5W6=n1|Fs1t5%IQN?xw8zjPc6IvJ8=RIjt9oz0NM!Mhst zrnC&kKWcas_QVzNOb48*P2@lbuqUkQM7DBC}c zs5y@=`!c<3{~a^^c;iZ%+=#M$*czLJ$CA<^NqV38MyXJQuzfT^-8z2zNUUu#38Q23 z33dm|yY97tj?aAy7m56?g69of(kUq^EmlL5oay7 z$L<{F+iZ`~Z+7@!>l>m@BU*cS#t_7IlubLZFJHuCQ_*;Ndn;nU>5AkP3*=b;+K=)e z@41aae7ahm4551a?bY*2ITh@d=sLmKG9XbY-~JHydDR4LQ)XcL5Sy>^V5sVS?XZ~s zm~TJ;CS<#B2x!)96-3EttsIXJr<9K6$Vx66#^7^qoUkXjH>B-ADXp4pX~Ut#15Y>b<{x z=36Qfg1!e2vXXea_Qacjb@6T!*9T@f{GC-Owv=f@>8DGIDBg zFsgdea4vm^^1yO@7Ob*;cPQQn=SHo3hGE)Rr>wn1ltf~xBP!y~*S?TvbysRVORtZ6 z4SOh8QF#ktzRHO&%`}P%f}J^46N8|7cZ{xOllT&DVB3ruq%t-JCPg97#G+kDT;kb! zMp$+E%95xRQ>4itXLOKV)6C%K>R0@Pe1s#zsPnd^0=VN2hr0kGHkl8xT2cU~N(n!r z!#Hr;Sh+)Wd}G7O=DMA**5&wd#({Fy#)H4jfiWuTeyY6v1qOY9IN4^Xnlj8L=lS)k z9FKwU7ZyQVv#P!d@s;h+dV%{1`5r|A5P1p9J?9ome;tAe_E8|0x3UPA|9IQ`!AH)d zk6oHPKnWT?6z49~Ghf9_TJu=B2AaEds2q_~ohXC2op-=3Z*y3BK-oJYW`^Vj0F8`Bi-G6{3(W=l5 z&_F_dnc+4&Q4_sJ-6ZFu95#Go)I075G6LYkc2B`kMb*N7OOsenYF%%W@%@1c{6&QW zWaP)GQn(6|%{}AG$^3tf(!GhINk#i8C~fIU`sQ;bM7=?q)hRQ-K)*;=f!=7oit?MI zP2>x(noTx54b(h8i?i@(882=-r_yp%ddCV;*H@Zg85e%y%_VS8(FTxgN~KoJL`$m7 zfs2ZGE-#d5H>tx>SG9+x0{w{aJttxWzXM6x&9|^WvX~9ToW_4ET*PWD z)YkJldmi$zx{PS#?yn3Aa2kjcz>$&)hSN)G95UbvR#}J*RBSIU*)Ng2j-za)!T|gM z?ewAHEwCEnx}Y*dmNK|ALE!wg!NSu%DK~)o3;^@5uAlBsFWqr3YhiNY>Q3a1+k7xM zgG>#qTDox4G^70J9B33bhdClE%TUYt;*xm=ZEZ8Ea3(ZThYn;Yp41&Ro5uL}qLItE zc~U_icP-&GYTif4NE^v;oVRh(K;>V(6qOq!81j1)HCZG7SDB)zk1q+Vm z{+a^xd{Z@1pz;NWsMIn^N?pjc}%#7hM8lFW$&NHHcjJ? zqWH`EApXn6td?-Ie>CHZn&s|q7R2t^&!~&2yG5vmq%y`T`{g)))lypB0z*~1fd{gC z!l%{tSqm%m;XgV}E2 zN^n~!BcLxP=(!M`pkw`j2B=p5@|94_m=0+RCw0;ca3Rm%7IF?%uj7o`bDq(qL&;SS zn#q4LW`b5<&0lA4P-m&cyA4=O{uROVBkwvl&Y!{DdAu8pY%P$pFiSbH)28#xj;zY# z$^FNq)TAKC9`xP6oko2`53g2-+#}C}43>!f&X@bGV`oO&b>(y3jUB+CF9}<&e$JSO zEZLQM+j6fJhET{Ad11P5cpSy3+%aq7!Qm47>Un;{jEEQL?_pb_%I!xw`-E0CkJc5< zUY6bj=65tCi0M~&A4I67Gd=#XZEn->THusgnlk*RLWp*v_VoEm4y2@)iSK0K{$K0d z6zB%3QreHTv#swf+n#3VALS_As-v+U&8jj)h165{oc{b}B$=iJMu~<%B73fyOUhZT zsM}*@y?GRZPoI?ND8IP=J#hgN2>eK02IKsTRn6k5wTfD{TJ0j_CjyS(8pUX;pX+jnLt*qh zU$=9td~gqf5KW`1nnr;3mzn{ZzXU(R{X*Aprpr!DS$44)5Zl^z<-#qBF{~C<{g8H! zf0jhFX*4P?k2x{;Xul1*lpCjqBGB>oK9K~A!=Ie#KpA#S->dF-6n4i$&q$6)op}+r zo?`si1CnOpOo0oy_GkZ=OLtL{l(`CwT-`O4McRQx=5IJZKHc5nV(cHYabFQ>8fH{g*TrZKH?D%|pOrVRh~Ve_I1vAtt~fIM ziTxbF^(Q_~Of9a}=46Fj(&V)C^$pHxVBa!klQwYcX_yd%Dc)KQFjhq*y&$u`Zp_}qWGaah(8cw-&ow_K#6g547!ur|@f`2p;Q|~Q;)(qS zjw;#Y2lr+_{Sc<}2Y2lu*)@T%^N#$6xVH8$amKq-uEszW*XCx4g3JCie&+?9(bLt= z$S<#>7({@++!TQS!-*YL1& z`uqmoyyKzcpVbTwN6NN;L*Ztr(Uwp!oxp`OIHsOtx%vt~)S`C(R5ZB6JFc90jJ7%f zQaztz@3|OT-32a5$vT5Q2~EW|N`D>U2XKW+*nSvKheJ40P*aof12VtI>x=_{b7b7s z?Yhy=A0;7K{DwFPV2v~y%t~?=|9*4h^&;_KD-Q^eKs~*GlmW9aBXo}-3K1JIoWvQS zMZ1OI=P2UQ<#fAcXg3gq?nr)kb8!^G>d;AriH0TtU>H)?lPn|)H25TphtloTwq8BS zNE7lzmYc1u$nNLneSaFS3)pR!l(!v$iXEzTL&43&a$oS238zT!{UzBXxIoywhDkl6 z@uJcHvW~+G{0QLpQ~G(>-?@AHtk`ZBgm=)aM-3Xy)=~CR8{Y6-Pb$G`$S*TO%YStl zWjvNot(4--iTvbA7T-QZ7XRgyQ2}1Cu?rHn!vUgpwO->c3kgmHJY}z@mHqCVSe5y# z3|w9_`fpPN=YEF{0=LNUCh+_I1WO?~81zPbdrHU)3Ya542+?Bm*Pg9yz-5HPWqFs( z*hnCfCgv)5dud1n#nGh9lDderTJIw)x^W(Tf8obgZhfXxRo4?qfx&xgN}G0Lk74xM z-IHaiVI;CU0xT7I$O7%PAKoaOfTMIq=*rq)qLy{!n;)=cR%pv5HZ@(e{P&i1( zFdK?b2o7@pGS5x-s1A|~T1cFE55(?Z{M4`m3ilPV$dU{kO=C8bDUP|+sJ;gK{XLsq z*rUx$lZB>~lk2l(=#p^I{zQd0kigkGtzPfvH^{9s+Q0kkdOnhna?xZT^0}0bmAqB( zycWzIADF~J-Bad|@d8Lkyg&KuV*+=1N{o)RCl3%uKbPH$e;gs3$o1AoN2@bZcQjAg zCfobrA7MJ6xQnO@N%w{PTv_x00ewUbUnVccJHT~wLZ-{yQl2O{>1Xr%%l-_8#Kho< zq*FxNJnf}25R|L&@T_f``Sa(`+J!8dm2{a$u|cXS5w1lU$;l+S>S>`l-;|faQH|PN zxs_Fn=T?wFsV>s0qw<+(Pj7cIV?gJNpH5WPA8SZRv05Z*0fb^t6ZpW93RyBDhOeVO zW`F)YN_P*c;HTJXz^LldL$eMjDmjIPMdhnM!V}oC`=#z@EIfnE82+-vd?X7}21CbY z%Tu>S7bm#N-bGAF(OqqsZMXBvlh-g%jGr~(oS$;B+Y{`lCT-{2YXi=fIE(tN;}9LE z0>VeIk57T(H+dr(WNvpfhS@-Te^5h{cQP7ptVo2El$x5AbtJ7aJN^ay#G%>zoc2a{ zgh@ElTTr9DN8Ag{0os3dp4<%X%6!*?v~!*^JotDK|Ksr(VeQ8YO6^88nGiIg6F=-{ zu*EMD;*U4*ziU+}++D{ND+M(lZ<7fKn2M0wCBF3CNu-eGFD`&CDK-<>9KUh{$oBbL zuZKWk^)8oH>ve$_5yLb2+$_l`>{Qj%;8Ro7)9uy$EPlsIvXT40Jmv$jX35zIgSUrR zxVX~e<~|r69-S0Bb*=s%UwHa8!g+hZD|Uwq>eIrnll^q1$g%Gl=>~!z+FGlV+JJMe zx>;Rlx|y*>J_cLn?#op+F&Ku)j(oAeVVFjE@IXF+i_Lh>!gXNYwJ7D2yB?fTSt|JQ z$xtKy%bw7zSa!c*ebdW6-g3k3$F)|_+QI2qSN5UJG9yDS-9#RxEIYO|DUtKZ$$CR7 zv-z61zIlE@X4yi5P|eZq)fE*>ipb3o@VGZ zM9+68SEl#FlABk&&A|ZLRKpob$t_dm&GK%fGHnD7c(wGg@#`e?X{IwjJD%}jqdtkq z$u_&G*wMV!$*~bM^nWbRLKgfvq1!8~^S4T+s1S1`+9J)x5Fmy_XFQ2vPLKQ)mBi;7 zjs4Sy@`fM5sq*fHD}@1&r`7uH(Sk6&<08$fN=ygQlX8<(*YVo7G4~@#=b}h$4z4MJj8=zZnR9FYNC{}u#-8`eswJ=## z=l!dlI>ED`awad1aU_t%x7ihQI*2j? zcN1+LYAX6DmSo_2FKi(>#HU>q;0_|#ZV<&tgQQq@42!qC%U+Af@J!7DV4_8dibQZA zDn--cMjpYfpP~~E+Ju&>=4ElD^dFPR-8tzC)t|_16rh>v zz!cl~G3FJ&SRg?b5FKaTmmNuc_hlR34BA<`dW(2>SC-dbZDIK<_6-&t{>yA>Y5Y-^ zh-Y{Ii9WhQ?lg+QfBLv~5vS}n+w0>~XLAYJh@g~yZrBqF=$PB=E2$qY9}h)vxMwj6 z4xqD~nj&Wt(629~wAr$k=%PK(fMgvw}rN$ML+Mwm}`$|V3Ye7&v@CB5-_TxPIw~^-S*34e+hb3OqhFzh)rN}6>vMue> z(^9E8{b?T;;nKOU<{@v{uPyfAqGtDI8sW^RcekCyT+QJ=bgJ8Xq@Y0A8glqmP@O3)F79+Ud(QIU+S4K+3HpkqIh0D zIiL<-Yg$SHQlT(O9u1{Ju~Ds~_1DzTb6E@j5yoIGMEEUkyU@z>TBoQgb}vauM(hUW zblC{4M6vvTd}m+uYOB>;E%m8#V+>Qz`bS^BBE0Dh(-MMiVfpeuctRASk5v>`)iar} z)yW9vm%0g=K=yrAlKNyArN0vM@ia>(tG_;fZ;TP5Mp)*x6nJiryeI#**cKB-gt1`& zxOb4jm_CpNywLO}_GbAu090h>koY=+&OCh*7SL98aKJq@M6DS~_c+odx3m0Q;LNs?i@P=IJp$Im<$PSd$$qHD2Eg2lKqneVYC|KDYdD{p~NY>2r( z4#DY6+cZ8Y=%W9Xcg**r*sT#&@9s0XTPxt?rlyO2g$_?~)=Nr5Ln8<~D<3vyaZM>V zR^&E|m85un)T_AoGeq-mQEN#8T+#dYhR_t-_0DTu3hr0F4&!U~X(e+^YCjj`BD)GUC zwWi2-paq*A>@SJ%F$V#=aCZ~PiARG z3p0X==sgv|STczLmZ3#MTiq)9hgb~`C>c|tUDFp^<4#((Cbgvt?K0cS6O>oBoG8-x zla~!k^wF{}P2RI!TJ$`Aw&&j&S-}0poCxRfPJ+5si3Po(y8YG&Zmk{t=?l2DduY_! zTk>HW8QlsQuPNX=pN51KjE6_yN0sU2wDf6T`p}={yI#xir5GVp{fRs^|;_)g`a*GSiUrV;br zwd37As{4T$%sMtcCU5y6O*=SCz^jc%*KYL*PM?KA5~_+R%>f4do1xoJ>wQFAtwEa}GT={5_Vn_)M4`;7Qk zh(z)RkzC!M(ayA=6mv`m>3AjiC!EFofslZ&HvY?-D81!dC*5<<)u9L47`o|_?+6nL zS)$j$!AIG+R(1ZXKEkxVSDRYIcF9S84q_%oHY8-tEmXOAsOadIX9}wi`jo-4-4l#? z$S^JCL>KULFxN9&OR5&1OEQJ>&aWdy{hCMQVh#L|b>M!bEb3vSVI$yH>~px`ZIa(5 z{)HrX{dv7BYnQ`Bthn@^fza zq-kph+N+09fXp8>MCu~Jv8_r9hN)#8()7ofhz@J?h0&tCwEI{j1fnvLUp=hx^4(2z z*1KzA&-3vBaTh0Pm)j2iX2d~r+lNVa*!v!+Ez7C=cS2a)0Iu-1JgPJ7aP=b{$Py7* zp(L91=AUGWy8zWd$yZt{q_5ttl(JdJqo4`&_b1Ve2KKvC4GVeZVOspJKk6U)Cv+cg zdRc;P8WkXLp=^?(X?bB(Z?D0)F&Zl?Icg=(%feovs7?3<*qHa)y~c=VHtJ4>?tzuo ztrgBktgvL_eV~ky6pD2BRLzzY+xb=}?57la@rT^bKyp=myS`OCR)9eSx(bM`5rv&$ zqWHfw+r9PKcg=2nP*1YCV3o+Xn+iQVXo<@Fe!RMtOVr2rY&gf7+D z`=1|KuU;E^ z^7-xwqz}2LQoHV~iw-*l8M8AF#|f+PS*7&eOhwchqt*x7J4c&wqq*%GYNc#X;eJKE zfKIy|tlpj>viyQRK7jk`^$1gQS7DeK1$(gCa-K8XDfSRLK0jP)NV}3_w-1$uXja?Y zDtJF8E^AY(P;h*fBzs*}Cbwi}r5lhW-q@=vl@y34CK=U+EGzn!@!5`bp4Mb&DGs(D zrBuAJ6Ko+~cWoG;$TZ;PyUQY*s3jwI18!E(x9q};q`&o+nc)`+ZL18WAduBj8k5VC zP0Dy~Ih8?`BA0Ttbmw#QZWs=OR;uH16bI`%Ut=8W*Uln^nt0MH+!!69av@AK|k zJU~24oECJyK^9261dfxBTlkb{A@_uQ9%@^B&Z~0|UOcV1cxT8SN0XkA%O)IxBZRn) zh20_rIB+X8YCIp#6f`$C;t{e!TIPqB*=UB-+K5tEg#rvDlONrhMNBVN-$fCr%?M_c z=NYfeK%MJ*oyUwFKXIm%M)g5H@>|TtTETSr2Po*PkYCjv;FQd*9?(rZBIXT@eVHqU zfU<@)eZep&T7vy9rA3h*5ADqGQ4lsG8TgF^WfJ(+YEL9OkAi$}N1j?9SGr=lxZrGE zJ&lNvZS2x&k7|x+1#;-7r@!j^#n4r+q=I@!YsKxXSdNorY0=)T2;<{O!cu=%_$u+{ zCHaz3CPg56wZA2uDm9~Gdi$6zc+GG5L_!_y_Gjn`{LAwZ4i!RlPT%zVEOmkZFzyoX z^%z(DmDl4eR}V!Pj_#iL{+Ux0jneaNOYbyM7pVlUv*fR0fkT=W4X<>anG_K9O0w1+ zXWral&(XL7l$h8pH9e_G;(50jd z@`|ds?2~0hT|@w&WO&QB72`6(%;qJ{ph80GFq9;@pkm2AOe-bP7^An3&btwXeT^B15rGa)mbJSDIZiECsO{e2W$wv(Az= z(Nx`R@k9}Kx7w|Z-c7tUpAfn#xnF6zecoL*W}5yxZGRPhr;-b{`K7u1pA({q5DviA zWFpPG01YH-=`1aHs|9-v86Vj!hgcoAmP_7tv>|ycTG~ahnP9^}SL^!tTIN#?MvS(* zW3+vWH%mLL0<^5gN>PdCqcaAhHr?%upoFQ>jmb(`G!!N~clzY-d5`nhV0Zcx(3!*M z*PMs50DBbC8|l%(Rcx_D-S^^%(Cy(bemn{jZ)%-C8PdLaGkUXX-wc9vAVhIjO7YAkvY;m?}k!)rl|bAzrEADt!nel zLdb6u|DDo3r*_9|(QW2sW2et84anOw2w&EU;-~3*w@F}hi$c&JO&vFQDk#jL6Fh8C$4vm^&d z@x1oh3}mtl`;f3e0$$SF;5OH93dA?}g2Zu+(U_LTi?mZw4<9gBR<1qK1WXPV{Mi#N{FT|7D{;Qp)F2>;R@DDb1ZzBD}#0xp9R ztFzxvvBqH-z#{GRnO~kop7hZB8mS8?Vmgrik&7ziHNL9u_i%e-F}k9Xbyo{01V7Yw z(wi~q52e63To-&$`Ar{%(c+%J^PF0)BN0{5W3kqcdPn<}eymaD4mc^kLM*tetQ zT%zuOME?%_Rni>~#@j}*f1JcShz`QZp&Yis{D+U|T)q&qdvV!rlE%jpr}PQI<5RUV zs@|k5abr*YyKp6h?iN(!k?s%qN6AP8kboYM1PCsFq?KRT_QWDMONO^~tf=R1EuPem z{M;|&T!ZcgTs^+eGxq48d;w7KTl#+0-{!>O@<^Id5{E|e(_iP-kE+*||Mvmc5=TG! z$2y*4-NkNnn0~vbRkNW}9v9eh^#LI|fEKt@%MbjiOLlIiF!y(L7Wi4X&^yk~2rFh^ zf1L;Nq{SDTat(1X>&Z2riOC6B1bO9WCTIj}Lo6&XZvtKUI zuPo*FSjQchB(|=^G9P7+a=vb3NF=?kpyZ8KB;9zB2o-@lpnQXsX51_5R~INq?5LpM z8O~pE5~{cyU^~z;8Mz+Z{f-z1azJS^2~M5;Tcg`>mPpYo|Fax!BzG23g6{s$-~U;K zP@oV-D>0`+hy1Ti{Vk3Hclvh0Cl3jDL0YhTQ?&5~#GKCIKzp8e&GAYqSKWD=&GVeI&CjpQhztYzf5 zTyp{G`slB)rLF#1tAw*0MWg2&hA)8ptS}Ewkle-5>T~JX5r#T0`#m0djT0)srSa%X zw4SWYh$K@*V$VDP3+Q~1R! z>IdCZ)6#sWE#^wU*zHeAa#|Urc@r~8^~SMhKE!37I89-;y}x&3nq9d?vwv%4c^j0J z?BgR&Blm(o!lC?W1ht}HsZoDsZr3&tMYi@YfrhR@*Cl#rK<}ScB;fUl_f3z8lRG4C zk6qIyZhtv4;vggYbhOqhO+id6DC@KCk%U{PXJ`xEV&lAtW!v8PHd!{3axJ-Zd$(dY zmh8#@h(*&QKmupnFjXUJj&G9Dp0!V?#PNZ#p&N6aiQvjFW}LUF(;1SoT+S=mInD*= z6tS;LVnn_d-g1O}mokq4Y!j>T5Nk{nv*GKhQbX=um~o&y6_A2r{y*%!Wl&sgw=Ehh zSa5fD3l?00ySuwu5Dyr2M(8OaP|Lpp5yhFl26mO>dsqrV4_y&)>U5i1N(ZbcL8Oa=atdU^HGtq~7 z6u>MQ_xj)%GH+}~i~kTJdEWCE#YWuh$Pzrg^-E4;xBiNizCn-Mc5cRDbS{l#9i+hJ zF~o+;eqlU$uQ7Ki>d@~sT|8a=AOr>fqc9s0Gr&-9aUGDtbqPGabF znhpfW$%e(oKIH)CLo6GS$hP?M^d12yr^-)NzV|s;?LZ3t^eJVC!+yn#*QrcfK=ibA zGCB(O=OAulbTq$rHmw@HJwu$O`J0XVd4)`_mX~6_m&i9bk6@}LYNG5Y8`pBiC$vCo zkmX(?#OolKzJN?Z;G3j$TiTD$SVzVmTUVcR#syx&+wVL}%jUk-cnZP9Eqk>Zd(JTG zw-+M?bN;XG{V@a*INF2_W)1|NIS0r+w~;Z)0ddAJz@T8O4)`+bX>sA;+>*JSi2U$+ zdY$HL7*$nJBq-Hrbogyn?^McsETirn$AnF6t_YKzdi7SG%Gd8?)vbd5-YlcA^^9CT z_mMy4qIEiadw}dLLcUZYTeuIqe0}3j(F*b4X#{vqB))_(?BQe-%zEda5e(iMjcd%R z*pxxc`~(w>Iza1GfRtyAN(y>*5Wv@X<5ei{=V*VFx^TTOM|gSiumWAM8*sY6*jp~q{k=e9o)jngxp`M@Ka!8t9lD4ImCX%(tRuXVwiN#(|v zN1I*1<#od(w!k>iaQceI^V~_Z%nE~6zr0lO_+p#x;%?V`9M}7N#D`}cdh%)aK~Gzw za=U{G^Wy37NzO0rWq{Ne^awzCU5dR%pq8R$$Fe^xJT^r#U|we%*2$HZoM1#x=IypP z-yCt-ZLnQ=-1=;4ZZf9=-F#CsXT<$qP#fL+tmw%B{f6J>Qf(^{C1-;;j_b z*0UU+^_}mqUI69AF63IVD12$4=4*3RCaxj4re7_9{bP0}HzJtRwCZA3zho~MYvEwj zR7arcq^M9!K|vvs@?hAb`c>zb*+qNB5;cZ5m@Jb6F93Zl+4s@0{wOgxDz=+NdR4B; z^IGfR^5m7~%H?r8o9P8431+|Di}J=K(9Opr6TsjqJlrr1mwnFY3K*y~B(t2q!1$y= zLg#shWDozi6|boDb*o&19+&M~_8e-8-p$C2qj3i)O{38$OW@^E;C?OeHd4{;XpTap z#$U|yXqT+Fz86mHJ`{Q6{>md;E{j`M0A`(zo_?e&KvH$5t_Fr%LqnA2XXNf}$IEEm zcn}6!2w=Mv(%;E*Ee{QVFV~FMe&JX>vY6tc=r^m{>+~1?fMAn3Q@#vurE$^6I-r(f zV>{^I*DNnUKw7Vk8E1Kt-SASqH@JRS(Gjm5=OOI~5bU?iXUqAN9Cj`MEHylQ(+)m6 zR6R8wVALHe4rWWp=5E+dR836CWz*kD+el`~JhCp7%z9moa_B4EUm>O5E5epvPAQKT zITg-E@~hyK`->8Da)##iT{_;{B~*s?&e|9(^NnEsYfK;GUv+4IXnZMY_LG~CH;0N~ zVzL_v1JaWWI-^02UnBIE7z!V54~69gmcyIfPHh&{>NP>@N;48R1l`*G&WBMdMM9={ z^$P2qi!~Zk4wzzZh^**59er-l3c7Rqn4jBKv~&Tod{g+4{-bL5?4@^!ws}{cX+XH% zQHa%uxqdOlj~EgN*<38FO8HtlAqy45N6@K?ir#FgrgdnrjmKn*8wKiQ;hK+biK zD1ti*kwaSHcLGc)o2H@f!*TMby}r$}8@ZL0AtP4kodNz`UWAvhvdANzD2NE_r~B)7 z%OwD+_u$7mMX_F4acK|f8ju$^T{nDlbp5bM2pjaWYp6(NxgFI_$^fG213 zGrvCk!c}8sY{>sKFEbG}nU4~`yaOau%EZF2(#2SruhVp4dKA8QK2U;pS13P#|J}gE zA{J08NrBW??Hw;&fOEe}xof()N!4z7r`GFvJbgp?Ui0(#EFoXB)Tfau_j4o9SnczU z6C%j2#8t0~hPkYanJ)wpHuRxIXS);Fipu%dblUH=e9HH?L!lAOQD4uxM_#`keAGn5l(i`2%|6IB!g@VCs-7;uJGP^qWGl zO6dz*Vqv*=8V$V9V@B24h&J&&?g%d6%NdUfaSOtp4Z{#gBm3@_bDm#jf&65{U+KaY z{2j;rik;SwAVHaNs|dW$N{8or)*1<+?W3!93 zBH*Xyf6wLMbvtQ|_0C^D6kAdalE`;inVq%BYkHC56GbQQml7o${Zu|zpZ0oh7icYp45Mrp*AFH0eY*5skeZ}QOqiI^o zG>5DyoWNObEr_T z+TsM8IEs@6b|eGfu2Ux>cbtSK#}BL9E7sHRgFBI=u>XEs>m#qZNzr)bBRc-Td^8I1 z<%fM(<$dy=cxL0f>0x`=wPZ6Ll5yl-5;C@;4*(qQs>jOHEPsfH)Y8l=TNklBPEroL zmC(FTmm7fmxzxb?(OD+L6e!sN(*^XEYulj1F^>u#-=1oQSG&Pi$|k#H#~Un{9H>Sx z4y&$zPH#||+HxBFK1GVd+RyoUmwqMfTfm3jY)0Fqj>-kiQwITne)Q#wFu#KCg5G3d zG(k2U^$HT7$?2=*`uK;`2Kxbg!p^}+Bd?N@&P9DynAwG*R%j2^H@vBV&oy6G{d{<; zsh*mY)kj&i-|?UfFA4X{;Cy~_ZG(doN-hZwU@pF}?p7#IEn77EZX)LUQLD!3P+|Uw zmHe{^8B+;az8V^HnqWg4$7P%rL_i;QgSb8n&5_iQaeev{M8GtC1 z;xsSGRAgQ~?SyHR+5tteRUX#QJnQu~Ys4affa64wA0wjx2_T9VN0F2~8@8ZPZ|T|3TL(%j@RkbIl@INHng?)q{x%Y1|e}<+OT|jOG4CqIUPhJE06U zDWOL=mnJB#!S|g3zs1Us8?qnWM0Hzy#j!0!hWJ6tEYvV9A{!9s-Rj=oxGa09PO4ZP zo?9=5E-zF%-5wBQ)Odj$Q~qS(cxXJBalKw}aIj=oP+!GTlT-7O+^z{{bdBa7ExFwF z=H&LyxW0sa`%gvCv=-^Zx_&>M3ia-f-N2N>9&+9hm|+w;XCO$Q%0J70=o?C7FJVS~ zC{Su&1igJ_zA-}OgY;M?iHGNzi| zOM7{fI(hoi6&Qjl15&yTXJ`b)fIq3I(Utf-+~8^MPkPynnzMy5s{C;3X-g1ZjnI$h*m-uATyQU`mouLim8SPm`lfKT)aqQMJIB%}w z1f!R~y=o@fCG&hb9)0&GAnxg4?WExQP*B=Ywdehkw=`g)l{2@-vs zIq;*$KD^r(Pq*aUf0JFWz~x{?L`C6!*2}%cx7_-qlkNLL^)?7ttdl}Q zS7}f-n0+FA(_aJWn!UX*4LYY*?vI@+w zo>xkRlEQpm&g>Z}HG5oEQyN+=C;G}AE0mzOK}_wBTCSHpy)-ICMjo`fJIu-ri}fct zu&c5O9<9rAw#UZMhw}9nVl}>LWJt`|+%}Lt?{<`-#jjRh-u5B$&Wr25H-y@g?^-k; z^L#NMe@8z;0BU3wDV&bVcl#5WPfnuC6Lz!Z8eLkaL4HY7ZE4&tl&UDu5|#QK ze3c+hXN@DLnNm%%Htcf~A!6Bsgap9e z6?PQG0O-%pxLlDd`3$-J4_*&-1WId0s_z_UPNh zhC0;>HPw4!<;*io3C!@L?!dIdWu2u_QH#7VGui5KX^QhQsilE_Kt=C*aD{uk>KF44R$9dOX659X zknh$8&m(*dp!!*^cT;>y#LE`ra{exu$K)60{xEX>M8xb(-FB}v^7L|aSz!Tmh3XYj zEs#kC)aOSpOq3}+&!5z_V=y3YMhm>%s{~rtdj??qY(0VmbyXfC2W((}vG4Zvu7Sw< z;K#G6@$ea7koI}bkBZFmCetev^QEe0Lapd^;-SHO4c8|LPDMn*#&r!Typu^!#lo_! zt9)Hw0TSH*ZlM@&*w9tO-ilQ8IF=M4VWlCoZsjh%Pvcq8F#38qCc|;A6|EA=C-%YV z{KlB)1Gw&Z%OAmf%H~ztJXP1xJCJE$^q-FN6C@tH@~dqvG%HizsNYW*caRx71p z!<0x?S&UQ|;v^AN#$tX_1l+E1?10K22T%>GjA3^_d$nkrMeplCq}$SF8;a$PTL4Gz z>BHDtCC`sT@l;UcZC?Q-f(qO`Z*vp@ovrc=J}d_9`kcaS-yt@9Z6BY<^AI2{>BdEc z*^vM$R!?wx@rDHY>m1I?F!}v(w9jtJ;L_B1)(F>cO$v^9P9QpLmI^=q4#Y0D|>9U4)4gXyZ-0`qM*LwxJ>4AowiM3c`f%P@qhUkLKxF^D7ZLoKIhe00sbmY|V^$!l z_NeLamejQ}f+yEiQFKXTw-sS(yK2QI%O1M8AIb$Em(|QYyJ>Fh4oorEaSAWV5G^~U z@}_|7v#i%f&9<9VvhnvGJc}@$CAvLdY|XaZpTMZq)<9Tp^bMSVx31ji3sE0qc3z!!u=8 zp8a(mm;DI<1}%-;{jv+i)8OYrllkHVJX7s-4GNHm|0g^D=L-pPaGD?@TI1+oOc(5| z2F;>Q$MRt!j1I5l%quZz@r^&Kd&SR!bPXolcfKnzoq%ACNGD zwAe>PBbC|MRibizaFYah>i zB460_uZDfjyzf$*9o9*KZqIQSyW@0OVYPN{;~P6PjvuHW$Tx3V^O=mrZso(By>cMS z_l{Wy3K_G27HH1`5q{+f`fP56KIa=08)dokHRdiu*)K#6K$^>=qER1nbnYUiiEIwd zz|8gictb=9?~~Q%#eA;ud&N~lPhbA3G7PYP6C8k76i4e%CNBS7xM5B<^1c$DAeLpq zn=_ua#rGIB7(i%eDs^>rUAWtDQocIz~7z0-fnz7^_WR2!3<^ z|IFKcbOP`8!397c9}_#zH>rwrTW0%QyNWhm1w2n^jcDyQ&)skWtP` zSrY)NRNDf#;H>9l;73<{ZdpstN;YU)X!wh~Poz~HbwHkxqyFA(XT`})ab z5hnHbUHi{#Sh-i->H%c^ul%Jx%0E18#UokXU36If%oVU)eSYedyWl#rj24aH70U0u z=`WDQl`8c0ZCN(% z`yc$$@7v=n4X_{;D7k@KA|Hd_{*Ceb{T;9TJBdFk6F_Y88z%j8<^SUaAyB2`{pTNV}C*(RDXcYSN7;9bF3xTB%Ihgo%9VbV&7d zkaWOVB4~t}^nWb~U|9&VP&wS5=r%i?vjm3>!05pZWb(v1KXL!O#^a#T28y{K*;PN4 z>_QTIdyKtd_-Eq&<3%SOxcOLC;bKEi<_!uCjwF55_GtXDB>{S`EHFPykqW=_kp3QU z;1j&X)`q67orN`=`!E_2XGas5g>_UFPlA9Er80>5J<$K{=!1Q~cClT8j^i;~R^G%I z#MGcLuNH7-8Ugj+mk4nT!25`!Q?fC3rfM|C*^v_%tV0Z{*z(-1vipxQ{A3IVLdHxXoxP5fH9yA%zv8;fIUhH8CQFT zL65&=?oR!=HL<}?6kv|K?XB?Hc*#(Xzf1bB^Aa-xOK!OyacXN}@it!)&8c+rco794 zC(7<$@u#l3(f@<_{Ovw~&g$||4`xk{mMEh0RF1IJp$$H|Gv@w z-zWSv|NkFGGG@Zw{8QM!0Gf|P$efNF=%ZPz$p9!o7y)l4k4l>@A1Jsy8JMN~ZVd!= z06X6&3bXy!m&JUC1TX;d#sj{#xYvDH*{gp$?kb4pi5N8`8b2REW#9=H!Xf1UDlhH} zMl{xB0sY2sbCb1J!!b%8^+3Pax%N~txr54P-z@QF0R+k<0C}_+7+YpLEaU>%Yq9{u zlL_cIH});?H!KY~27m&H8_MkE{_AKX_J{R;x<;xs+NL#50OXq{GUg{EopVH&H029q zaEipPi)%ecxs5##aHm)|p0N$9%JpB6EGPZ6m(mDVCxwrZCcnk@d^6~Jw5xkh@$XWH zK?lAEd|S>i>#s2ef{>ujk6@=0k9QG!Kfd&+l&B#Xl3arzQbTwG#l*!YJKy&#VYaDc z4@70>m}gmw3yh^m0Ek8j(@nU4_X?OiaME_J3eo>#(uhf5O-AEV7uqhog%H%7ru+C%Uvc--t12RRlS){1U=>FDlg!Ln9-V;Dzou07_vb#5ON+)N`;k zI5|~oDQq=e%{BU9>tE^0Ed;c#Q<&bq?(U1Yn%~_teT)@{Q;yjAEW3R|?%lOJwa;2B z@|NH8s?j8d%gKyZ^Fq;61w=%r)(QjoKQn9?^0UUKK&(zvOqJ2S2z9747wbKiVfcan z`f$3S)^c)=o$L_X+moEgtTRc&+y3RjzNEDQ7~qs*^$Ho&KKxP=Qqs8gyBlchcXd9^ z7yvFLRY+JkvRVpbSg{Xo`A9cPT0eC0Z+`+-yIlLYp6qmW# z0Sr5-m@c0YLG&)j(0g;Nf`|#NMi&CYgfHk$?`qMdz zoz^Ts##@+1>1z(aRacJhm*>ok{z*dm&r$=E{3yAZ;L6(J`|^E`=TJD1u19FO=ER%0Q5~s7k=$3(j|YG&xl_e7Hg0p`e_&&uW%B z0jQ!E==(m&!od}PEDPj3dndp@`U7`P@;1 zjdJ1>12_maAXQU<#>}T1ornQXskR=m+kNZVFEh||wv}=m^QCA1a{QC>wN)a)jx|9b z62csbNZL>1cD8BV>v3GeBKFKIzj6i_ze8C#M8tPPvt^6Ky;X~1X=FD0*K(%~U%wmE zB8LqLy|J3SrvU^4FJ(mOt7+D)7wZZ7`P33|*^A|w+ON{2h7N(OVK2KFXhRr++vQ~c zswsoh68HS_;=J-b;&}eDI{{DY7O{7kR-GEmko0G+Ge9Ww%yfIOd+r$`B-D@csuRdb z-x)eq&MxZ8@?LxXt5pff2$rEs(0Gf;`lnZaW(-CH!e|$B9xPxm<*4ItHp05MN=}Bq**9YGiz|&JWznk7o56 zt6;OL)ijwJ{x>fGqw~vz-L4q!{JE}trPe^_@xRb;_-5+g48UlW7aSuNR@xr+8&a-+B;^jRW&j^o|_7F?Y50xlq=d605&8o34^1Mkde30XF_(Fn@hV@|LzLq(ruMz zZx72c8}Y6I>?z4!ZGQU7$S2bNh7|tZ?mgx=Ec`z~N8!9ymTTe)yF_gvr4yqMK!YnSaDdVqZ|kr2&yXvMcBeGV}N z=$N1-HFNCMS{D11M&qXmfCp#N(N?o_h%Z+;=PMSUpP*&0OT%ir&9NF>FpAZRM1WQT zxquWVOo!EOXaj1sK0`fdLOAb}JW%RYk-1@Vl>!+ZF4QF#jRgctf3uAL^kFf7g^^^V zwIEvHIff9WP%VqoBYcJEZAvNW%96+BwvTiu&@=q+Sf(?O$wKd8l(_Vapfj31Axw7<*5rXyj9j1vQ3B8xktVNkiM_q`k-tqr^^lx zYn}H$7#T_hCwAlT!0Wfa8BQv5*ee&mW&Gsc-K`8<2+Z!4woDM2^Tzs zfO|9?<_J?n1e~9>w^E?<&fi)94qy*(lRf8XR0!+R`gF5{w#KG?xqEb&KUGhoH|ec> zm|cU|7bRwkbmKqy-N#l1$32Bs89zb6W8t}Qc8`N{{C#Llhf-!``7I33*1)a~taW9t z0eQnT>h9S+MIw0fo4c(!`hd?E&oj6jwzg^T_E-dGzC_93RYhQ^|JZ(Cz(q)2T_u%4 z{=@bY|A497M)ynj)`cnM%|fKzn!)WNmf?JxEqS$Z-PE!iv8YXuQZqfCP$a-@u)($G zqDk&Z=?!G?DHKJRvc5KWyGF#nF8kuadKnf+UmuCfC6g8Y?V!IdSbq9)%c{Y&>Ib&(`!ftXkM#+Jqo34k0r`!o)89+#~#g0EofE*`^8Sf#aYXPR_%k^}_Cdyrkdk5BTgG}<%r zO!dAz%;*%#F>Yt1X5Dge65r|y0q=QUf=62?I@%&`x z=2$~~Z$@qdm8bN&P10+4w|_LfV154mLm=Et(_EMyZv&82=aTtpMUthM&j>S}{p|kvBVRsHvNNQ=_Ms*}SBBGk?=aOY7ODeGs z5cM)Ouj}1qzMH=dMssy_YDGga%p{!nXC;Bj0A9qaRbCl_KfMS6*hp-y8MP~`iAJEU zcSY#gESuwo^FrgkKr~jTZ+z)`lX-T*x_xN^k7Jf_U&xHUdNh#GydT z__u$8+5KNpIsjq`3)L>^mQY9Z-o0mC-3L@NESB9tg9Y3tl7=gSNk) zC;;eXmyNFzNd=E3v*kZ2*Z1_1?k@Z6Y7h`zz1g44ueCf+=HH3M^SF^(oVGuSEo8#j zLm?vhEncx=1ITsP5HypqjQLDP#eakri3MR&#v2i1B#qPln#Q#X$Rlq+HR&;EAL|5= zq2UfSJ}IYaL2d_nE*KT_J8r0)OSgI8ZGPzy|E3tPHq|Ps?YW+>UL-xc(v53TR+L#7 z2Am9PJoUBJTJS~uvlXi%PxpQz!Xcm0<(80&Z7!g)0q>9XUOCorvps%Bns(H;6eDn_s#|+p6hv`&tkDvbCHe;j01&);`Y(e09L;wZ01bjX-3^iM z^&Wvss|jzdmBO9@Y@aixQ>L~@ZkHnv-1hg_&o{?Bf~@AxHlYx^y+v{O_qT_(wPEt) z^dpY&I?-7^VGKb)*N6Ap!-=uWCjt&J%`Sce*hc%+9edGtPSm;!wMM>CN$;~R*5hf) zs358nbsrEjzg5l24aqIfPYmJ`RwzyZV zv)XDaTUdnVbDYNetnABK&psW#U1>n0MDZ)763`Mqnz(bRzFapgGf(zCX)3hrgcg<& z&1`9x1^izb`XSr->P$=~O)J{nJ2v;+@>ug->+IW&$JP*0r(Mpi$WK;Gw8dbF(r@dNkQ^r zo8i~C&%D*Cg7MpE(&O^!y6VB=! zzGyBPSf>N{2<1sigsuIEl?y{dvnBPs{ps1}v>;@&=WC8to{?Bq;OvlqVkjhSZXsMm zJT>P|hqvFYvD;UE;#KTmY`YVKPQ2Pr^s)Vn&7aWRX3F&8qRjylKunm^wSO@T^i*;- z@s~vc8fHw!7l4xWBja*YhPKRW)yi<|a}yXn`_v(gJ3Pm{56>+V%e3C9D|cc1VI6^h zoHz`*Q*D6!16kEy`baM%Zw)$I7^!*|2LUkdW_47F5T-W&-Ink#mUfAi7!EwF&+4(| z*G)V_!=C0*rRzH50Xqx~#K%!>=B6BpSdE_}Sl->;A7IX{M_%T6kT|KrnW6+d0nPLk zuZoHH$SBAPg6|iIcy7SM;b^~ogzAj~{uT997yRQbEiZSnb~o<^DU-M029{5ksTyi( zYLwe~cm~^jE-7;?Efv@hUe0t&d?}GFyktD}wWqc(#|1D8gQ0oHi&SfxTLHf@rmUFv z(rKYWD?j&<jAVn*}NH@sG#a3bA0{ zY@1`IhBPWM;LiAag}x%(>A+{57~VxmQ>YPRzk95S&R6&XZpR>%HuVDGw$U55G-0lk z#Fy>&PDc;>EV46TNKz_^x`1eBRUPRnYx02i3EP9vZHP&;`s>B{8Oepg;*4Gh53bjziSEas-i>Y`Yle5JNx?nM-3UdE? zDrB`3Pp+F`#S1|^^9T|R>g(f4hnQ;Hbmm#~Y{emm zdJ~3;27D+0APIs74%SZ zllRI*QxpLN!c3NsjG^FPxl2Xt$Zi!9b=(w9UCWI7{4qLz3{Zq1`1aP7fU>Ff4-w5p z9=DUm;8;weUSlsnGnoKr;8l+ZH1BCUNJs>OAQuD&9_LvfD^BsfT9xN|=NqyN3w;Q% zZkB`JK99JitRV3u3Fx0tx4qjP%S_H2O7&4PjSVPrMWI0uV!)T%l+Wa5z+%!x<-`qP zVgOBrGj`oR1CH)CKuZAHXmJmg4#}kac=Ew5D4{Pu7LO?>msi8uHt6Yaz=f>907buD~Ju>>bK+Tgl-uN>`u zTXGtSQMa8woos0+n21?WXIL6T`T#=p#nE z`+B6WW5}tY_ZbV~KA*sMG^6&4QT`2?xr7K#d1g!A%_<@sJ^v1FXWCT9KZg={_JtLs z36gh)N5=v6Cagirizx3KY@^4DU=YbD4#!g^KZ~doOr8raY&vHt>zEvFB$%6Q8vMli zc^z(pBeyGFuP?-mrMc>ybsf-`$z;toUb{pEOzR(y##`8>HudORexCY2cgO#JCdNd| z*`UMJ$5;FO_ZIpeZz6UESMd%Wi!zVW__MHWYvV>J@A+9(drYFT+Z0A_B$V*MBAtR6?IF z*A@R>tbhNKvor5DJo$_ zJt+n->zN7lskgQs*|7cbErQOd*xW~~K$s_|AiA`gTLrW`)XBa)M(P;)X-xH1GS5?~ z$!I?3B5XL!d_;@bPcF469kf$2E^(s6wei7e{v9-UV}&pNO9;JQ^Te@5T`>k}83^bK zRq*D)q;~8!Y=9eR=aRZK)QIKU|Mb{`&+9C5Vy&QEtXky2q{5ZM{XyocA001MCYwut z#&(V#V<;(UXB5|6o%?%A%E>suag37}_bb5=dpTUz1=0mrn{BG94wo3*Iv-rpGiWK9~xIfR* z4Op<&*f(lpS$yW1)lRc|TrT&e_`L4qdNl2fdd>7TyU%4@Z1zg}N(C~8%IO*F!n{Wz!q9r78f4aH-htyTogKEn6sTl%UZ2S-+Pjs)w7F`?=Jbp|~SL6>p#- zq#R8E5P73v#1jZtTTXLIH5(+AioEJmo81BvEm?Pl>keo0rShxeIOG{CCXGF1BEMC* ztDZ56^s>bZrn#T}F!Vt(JhEeqRrl_5_G|YzEMR}F)9Os4M%%7du0;inNE1=+-IQ+> z%wm@epve`1G)d@`6452TQc>+~BEo9ftLpObj!8(nSH*_KY~aq$CPpLKVinbmqH zGAg`6LXBx}a$Y?^tRKxNv~yy+#1zoiQ2L@n!_=n^Xb|Ra3YSfvA1@g3ekw*M>k~0M ziXMX#b3KX}Yi7201NVQ?auA62`o|3O!?2a=Pab}!Fdc=oM&VjH&^) zl4HNd<}Z5;^RW|$9pS@bz3zvtJ;%W;h6r>fJ&iN7+3Zf($NJ-z?UB@)IA-J5rlPUR zvheWB#YX|v{oS?kfK1yU0G)Kc!=O!w`Y{Nwy|GMt`pZ6SY+3Rl6f1Oj$1&O4Nv5%d zsrrD_wCX(Dax%q$sbsQDG{t%}Qh~d4a^1hc(Ci?@6oCDzHabLEw1~;CHDkY&J1m_+ z9cwb4B|4PMu)X&(xRp#UiCUzU-&)T(U+2|fvsfh^M588ir8Wv_sw`oU2Az$ z%eTk1ySjjA+Q2@~V*P2{q?Mt?v!nKB!ubJ zK~=79C~2=To+Z=`PPa-+Y+{^OS%k+LI&HAR#xu}0 z^(M7bp2eB*ju}cE;#z6o#2iWq?L!7?It2v*S&GJrQa*mk3dTikFP|3F6{zH!icUoWedIvdH(4Jfb4>|?jBt&Y-mK3t^BPDb8;X8iw@s#wL03? za$1PYbU5K`aUj;W;saO=zmL{vXM7@#wF86Ij052!-*{I$RfXUwhYYk%VY@V92~8J> zUs!-aGxwV^%KN@x&x(bPiW<|=(sB>}!9><{7p5FkcSl#ynWFU@-tvA3rwR-Pr5|Z& z$81T9cwlDO^07qiv#h2POrlc}kcBk;oa0*|j`ZR;rJl6cz|1@Iy)92v*{tj6Gjp)L zE%GwRPV@8tlos2te@%EReec_v@g0z8nI^)2{$kh}cw`c;~@5uWN?fG1E#;gD$X+EOE zty{3Z%Z$aLRFFMd;ZT66G#}F7c!z9UKf9ZGuH5cd(1j`?;n$yb?cVr$zzpH*S>-JLq)>Xy(DW`EG#5?dMFdllNg?d#<1I2oxX+(^)2;rGk@-3QD3*ul#}{pTqp7BeEGi2|l6RXT3|iKa}h ztvZBSzj^uA@<-XWcV|AMy@b*26BX*}G`htpd|Dx2+}-sVY#eWvpel@26Hp7C2B0Br z;ZM$-|W+6c^DB5 zmCK9yP^eevUUDzBN5(Gnz%|A!~g{N?^;$JZ22GE zYuVd#qnvhL@)BV{SumnPZR$^-13_}0Cq-1HRW1vWO$Yg7Y9*dG%#}}pWna&IT&|c| zlk&1tld*Tm{YK7op?43n;9TNYg>AKmPW|NK5z^~ZkFGF{6>r5CqybgP<&CNuKqK?+ zJwxL<&1U~g`G6v|3|X8(zb^>v;c=K~sl05kBUbAyuSfVcUAGOvB+`kes~&A=4jO8A zjf8B;*PL7`L4gKLS?anNgCNnLN-k@H>2+B$#DORThWf0;fQw%0wm1suVs+Q}v zWDS2SlKZAtRXIt+|NH6-ea(>Yz4y@jcPkmfXMyL#6@I9kfv%`i&`3fO>LP7-OohIY zo9jpc!EpVHgh!cUw~^^*18hJc@nWO`A7wLyR*f%^Jze0==!kO^#{TDM6Qz&Ig5?E_ zD?Gp0exjl#v_ak59%!F^i2S#`>DLC3vprYJ4m1TTdU*NB7eT0~GcaPbs?;S9fize? za8D4~-NBFAET1Mi#Qd)F)I8tshq6TWVQQ>(!KuA*{iPhVaTc6^HIzfIJw!_faTXw$ z%DPcgaa;&O$YY8EUC~MJLy0vCM3TY`{FIMm_3cP43XCnwuHa7<*39-t;>2N|g?b_< zEh<;b67eKUO;ktxLOn_e-B;}4$1$d9`{C!HJMmR6zeY^o>$FF{>={o=|GX5? zo}w}h9h#(WGvI>j!$>HI!KuNruts|DXykXzr*Ri@3_V2DZ1Jt}GW&zlO|Llt)f`)Bt#lm z^nU>mL>h2@ULIH~*}X=uIicf#it*vYZ14Oe>rwVAhTnt5Mr+73h;rX$+=(h5Yf1E* zS3yK>r*8x46Pscp{-1)AmLWk-`Loh=RaaCx4z%5(?a$7vLTHot7m} zP01&-2xTMRAld7#dYgXC!VROIvQJU^bS&Vm@j{F+r3L-COKVCDiD?$?a%`b#X>b6A z&h1Z4=;eC)wNU>cvin(7l;`*k3hi~Y9KnJ_peK9TWKLr*1Pitw^Gyg$nb$~IeBsy( z(#4$pWO115r>LS#WcnvW3UX{8&`+$3n{Yx>kr z@V9yu13ZWOu9OeKelI0u=?+KPeo4}oW(XrBIM;U^&C%;5s54eK(B&SNSi=<^^3zhZ zyl-X{F|ANw(a|dRK6VyY>uS$-BhC%*T`lAwAty{*b7&#uaCZ-MeEXKIoE_ASN;E0P zE3>JGsM750sX%pndu4s^kD*5EVx@lR;aO=+nlQ5icJ9~K(xl1E@Z^?Q&nBuLh=H8& z>dO_z_zl0Gja~O&+E;Bh`oAjIK#dP+#Rb)kLtTJI!F`snZJJl+cw!fT|Bz4;Xa1ylZ zkAh&A?hSJ5#_9Lv?k{DbtwX{CPxFbr9{3*O0?@xL)J`u`Mv^;r7@0ejvfBj*~;cpa_qrV zgJ~!(4!E1%(+mk!r(=F@CTrH?T1;;Z6p`R7fdJtf%_`P*hSEp@QhJ=n<_8Fmw z8=P*`QvVzw8Bdv*G&Dgp$@PUTA5y&Y8keNeDJAXi3=Z*Ym^J9lo_G0J;XxJUo`y)D z6u!>&$%qF@cIfU0vz?s=?<2>sAAt4qzBzy)dRZfaM>6L;O{MbEl42qc0WZ}%KDd|R z&Dk7Cjr9YxsowKKzEsF_Nbd(rX!!7zC%m`7D|lQNcQAnp=YTnwIy`sC$J#LPxHRb) zK%TPI!!k21AhUTPJf0d;@+86a;9J-xp@VvKfFz|;E1w?x(S8!`T1of@1(q&~YRWl3 z^sB#;YxizSp@=9C-{l);A~grse5|pa9*6`MWZsxf53*VeH~qoAXdPEIfTkN+hsrxrQ1jzC?4-b|Dx5o6BY$PRf6nP5hPDEXu@*VK$Pd@T zLjrmVZj#F!Xs!(MC!T&f1oKC&x8$1NhM+{c z!qy4&G~l6o#)uQz9lB%-HJQ-!1X_xGyDi3ne_QX_%+QH07+Ka>4~3`XgrH^vw`@K^6qKk zqwmE*guImcq8oiwFs*jNICI{QUTm9w%dL>p4_E5Ox=&$H^iDd*;1~0{|UL0welw4u}al?jJdeGs=W=L~68|y9yC|m5aWR zO$&f1?j61#-UkwO#bPKE5~62Z^gs~e7YJt%;-esijAg~qsk_o0T^c5S)#&@YnkImA;3jsCR(7B6}F1MGRB9F(u_(H&>- zXf3P8&eP3npFO!?5)}|!;G+iw77zO`_ zy|?g+vfKYa6+xt>MN-KD0V(M&>71dYOS-$H1nC;OJBIE?5P?BKI;Ds1ZtmkbzjMxe z;{F47-L+h-?FfUjpPk=Ntp%c>9iJ0i1_X7ZIDRW!3U+dQFK&7k*s%|?pSLSy@}dZb z-znd4kio=%nb8mNOg0&QWMV<}s+{fnI$55Y`;pbZn{+Y$u$54Y%?I;}({8qljh};} zrv@X@FEb!mb61wtY5taicgF#n$mvs8-YQSq=sH2nWsNezojx-AMe;<)yid+{HYiK+~BkgNs?_eET8zOX_|0L~Il- zg}%2xQv{_21=8an<@*|T!R&qRq$FPmGOY8aWf>{`|4N@aLcU|?=Uzv)c#v3 z#lP#B$&aP$<;@g6566QL>=WgHI&q=P@e_i|yF9q;;C@mclRq;O&XwGQE_3VoF1SpV zyq2QHMg~UZ%bC`=4G_Fs-o8ZgPfX%=V-b|Jy~y;5s_!D82h_ou0yLu4As}>IG>XDV zRn$a}4rC@{UCdf^2iZUitV}%c-sunSUw33e5L#qzfvP;;&A{&m;g#MOd;BKkHB3b1 z{Z)@%kOL*j3*Mq&Oo1`!vyK$ol@wM8N$@l?T_h69)Wg;Ws%K{IMP(UumFj(9h~8?E zwDk2=2W3yCO*D#x(b6=1Abx#_r$nVE22$BsQcLJ zI}BwKRJ-w}rknb6r)e-rXtGF~nzuqKr_BAeRjTAuxi-H;|3ReyLK1QRkv!?aVkP0R zf}iQUa=v$GNiP_AFO7hlaT-C-Q*~jYd1hpyhZAn#KE1j|@~6J{9D#he+8^rik?lrt zF8p2JvSOdYd{(@8otdGeSPY9pxfd;V>dU#!tiowenIAo0nBn2l6bEvA$@lXB~FXV zdFb+J$7j;4H4>;|V#~|jCrOCTbbH!PCH%lH!|SzJQ9C~7I`IN4>r2)9ucubVl2))s zYDJ19guOREdo*LIaA(O?{Mw(JNqR4s)h9|t-gJB$SeewARbB;-KHp_Ie3H4+)UMVS z9QoFnSQ5M@gs?y7?w*2)^^XdpbL_71#-AjrC^Ja)HeoQyUE&3HW1y>_a%)Pga%f%A z1K!iRd#%5cUJq#qW~}_I|GBnm`f7-+^YL>bm+gq86^x^5nO$3|w}_GcNK)L+AP$RW z7y9Q11p0Z>(JEyLyN-Fk%5}IwVEAw3S!U5t{zc!L@|OY?JGDFGC9B}?QRMuvbd7So za^JOi`?O9g^0vXpn4fGYKJI|Sk*+Y_N0ITit&oWR4KaIug6vqb1(JH&exE+4{u1AA zQOtrcwo4X?DMs{AK6aLo;mYou6C6}C zmix#(nGIrl|3*|^Z@s*gcX*1_ezkLGk<>eWt)FF9D!!&W)A-FQaQ0Sgf;g^h=G+E=O1F#dTOYUSKI65Y8d)Qf>p$al zF0w=j`eMuTQhYyqR~bl4jJ5tCV2TiydYd?#(VgaQJv^=FJ-tee?o|=D&FWKpH)H?K zI9!dywKsdrp{PlDe`3B_cX8?k>QOp^(`CE_ro4KLa5-c1vk5`Maq&lHPefHE1_lL( zoBT!Z%A3Pp6bwimDFzyi@`XVwYI;!{YFwvG;8ri-Vo1hsmfO~F>0>}cpl`cApFC@% z%}Fp+)xe@xR|S6Kt)MDK{k)Puf3DH)Le=x}-02oaTY_rNva|5cs*+SOD^F5FM&2FaJIO+JpUQEw#Z$h)=R4g2r<(rdLt~d?QoBdWLEzbhS@D1$0K~G)3sXu{` zr4FCGWAvT2!LEgL1)ihA2il92(CGVC-MzvsEZuurUFH;77x{{*eNXk&A*?au7OKIV z?b2DW`y7F~vlz#G3`sIF2>+L(oyYVKBcLX1_q=xJ8O9RLoCpmS_eXEb&OFFZ@o`gJ z=@fUh*>Gp(nET`%lDi3F5vUXk8Pl_KD?5hM34!Ihu_#&mnb>`?Pse^KmU16IXV5lo zC1BhvrG9gttr?1r5fy28L6JVq4;qUq0e2xmH;eVNO&5PUA9_8LV|qV#lh$0`ZLv*A zn8-}WTEqyemHtKNTMKL zDG34~Z`r~)WfT63Bz&qm&~jy_Y^mUtviHjY+DG-$K1G*!dBBZIk^UjAW2jZ_9V^-FwuMND_v*L|2`5pOgq(tl*|>r7Va zFaf5i&)AYSg|UR%caP8K0t+n03W0Dm8!yhDzXLdKv5yke%xI}E$L!;vWs?JGb~y2i z59#~s5)&Zim54m>Bex6n7}MjV=NP4>`r#pw(F?->7Q477U=|> z=d0&cDrU^eOzSg*Y;-?U>q*NA%h5wXpdZ!63q+tbRhZ3mox(eth5bZ9viRtmEw{!d z16MH{`rDA?kT%*uh$kVh`Tpxlp5ai?*hbS8ud1%zN2egRb*K+~&K=ER{(%u*~kMEu+eWPU-k(o#+ zTnSXTGH-oQa7q>*ctrJleL@Z9t1@7~=KcuhoG;|2xI^B_9EuiMjmnMjO zxR}+Vl$B`wX2`!jYd94#bMAa;d#!k;^DLn{R;A)K4ugm=1?axdPHlUw>L5P zna;rBN&(Y*3JbQfd9m2NI!4+A_;*i1xPGC;$n*x^<7CgXc3g^?0lrn5=3XguZGI2F8N;0q_g1+H-+7@wvV`^f?B*I2 z`tqV`q4B*i3Da&?b>bh(o6Lnv!EnjKc9cII7nCTDQ83{NwlOpyM| zqt!oTv#Gs{`s--(6k=Ar@vBb=0Crw0tLzxPTQ9Ylz%HZGq59R);B{eJcK@+;3mcJHKp?#t^_Ur;=nDn$*iUW_7^cj{ppd@1nZovo{n+vhkBlUc2*>>aH8b8@e>v;)5p z96EdVh#ej`(Qoh<0?OV3maqbbu0U3p#yO<`ENv7fbN49?xv1|sgQPI&U53Fnafh%& zd(kadpQpY56BA8z324x4=W9kv9LKY9bdMO&n3+|`vU}*~56T^UxHgaL^$PZy|m%z{Iz`p;h6d&3euEhLja0Tb=!C4K3~Z>|(`q zwlQs^r$&bO$2;8?&dKed_`;zC<37dhvE^L~SqlatZ<%5r6t&5>dxSWpi={VD;9MWL z8HfzHu2xz7HeWj^{Me3u#tqX@>k}{G6fH-#NN_!L3i0}O+h8+IbLAYo7Wat+zk(-VK?o;u04Su0< z?F}sK&2Y0brPB<3dijxy$9M+yMy)Tb-Y`5r(hWJ0_p2_d4H;qmiC(SDu6S#ln(DB2 zJQv~pJ%Zf)>WsaUdUperRIwBS`sVX#5k0RHeVFv@L-};i6nXxKl$Eb$d&v|`1*tb% ze=x7bS1uwPgyo(P0B*N%9!I70W;mQNOk2GnKeE)u3KY8m1I1dw%xPYzCHkg zUz^Og+!jeXxx?x_?CEoI2Jk2&Z!DSh8BCYn6x0RQc^!&^^gG(H)BKd{fRv5CqnGN+ z7JU&U<%L`(>;vtZw_Gk^Z6`zgHP%7{lkWui#&}d_VdXE^vA!hJ+L(Wj^TF z-Kc&YFHAx`P4Yj$)L2Z>s1wG&})8T8GIckB!5dq}(7 z2@HPi;h9B6oD-PV2jN^xK6FHT(7DW4s+7k;T0{nfG(pBxcAdObOHgxi4sdKF^odP` z&gPsQHpTjcn~wu#eWih{L7(fzAh+;sX{F$I8hVdS(WK)kvYqLUdNO_o)hm(NlJY16 zk2Mb^&SD*E*}^L0QiAVt3_1pA#^24ydrw(|(il-vROk#SmphaMO8Ybz%(JI2VAS97 z7={Hhs7`|T(;^!xh5>PGUSI?IKAhnHpIuD*E&6FTjdD0hNSdSAYICU}Dmx1DYBU-U*2 z3~hTVFs73&(Tf3f5Pql-&NLMo>nzgA!shhz+j`A^eg)@O$qiDil_JzCP6#@D5|xy6 zxzsE6Hq1!>s;}?T-S~*hX4ZMa1`FJ>gra0w z^V5pnWY0DpWIx4EqI`VQobx#vMEjdM+Sx_DiAss#h{B_;OoVM!&iBS9Aa4|7D~s|l zz(aZ%lSnRoW94mnq9i7KpU>+bx9Z z1p*19x}sD_F-)4^DT;9tOY~*?zFpfi$+~|bXT=XM94#%*X^BLZW=1%?epHc zAfP_6dHH;*btBKS#3`P*Bkc}kBF`saFL*JIKgr3bMiI3dvc<%DI)zRjFT~e)*vIaC z$aLJf@H;=s^P@jb4Z7@1;Lb5T{O+trsu&S6JG5(Aa-D>FJKRFSKpBQEkS=gyqVzSe zLs+yEqyno_G`fD|TFPL)++_9@{Th^@uZ%6aJB5*`=Pn|ue?+h<%;bQ4b|x^q8#3GZ zo3Ng`KvrMJ8Qf!}8B_t$vdnAC6BM_>+u8Jwb@ zR%O(uhR4UOV;{1)Go75m+I0kfpfcJ|&*$heHF_D_J>R%tERxAg^TOf56KTNGoxT)# zwZC?|gIn$S-W`Tdg-+7BXRqM!aW-u0{=HY_4Uvnthlnu9AWd=8!!~+9qs}y~S=gRz zQKo&ug#lcXZ0^{3lXGtFjU#;F%!(NA425=X;TtSUgU$n>00ON1qq`d(5m?BwnYBq> z*?yYu@?#uhk$zL$wNt5TA@V(Dtu_}U{{oJ?$4Co!HM9op^3qtBUv|*!LhGV#Yq z;=n=2S?-)&cqn_+$6eIuq;ahAy95V&kH(+*{ge@xGAqmp)wQau&b3bYrXJ${)?tX|0X#KCVq@=t0m^DUfz*Jacx`m(`@@eRtG%z$f?y zaC`Lm^%K{xlHOAmY#RHy#6#aFlF;&l5Sjrkm+^P!`q8+Z&+eN!=ZU>ZptkawU3eZO z;_7PSC^48c>XmPD&FwKQK+%r}Vm1Z=mfo5<*H4^)W1;JhP1xod5c6Pk!coWvQ@E#& z#ROQW3y{LVD8y<$Qkqq^pSRrR7FxgbFzh-}tu<3Xe7s0c{mu(7Ul>ffV@ly@z5dmc z!8a(&RX*}Qd>tMDlfgHg^-@&eC0{ig2;r={lucnT&}cU}0zAca<0BW&)e#y#*X8EKNP-O-G5pc3jcfnjW1H%v1qXvU%>*hR>IWN zSUxo6dry&%`MMqc3J+Q7e-BLcT9y|~zqmQ_m5M4$HjYBil`@2=-9?w`!ahoDT=Pej z=)S;@WhD^uj}Y%?W%ofLtJ0>fASrZu>bTd1bB7xBvpScwa8t6#fR7j6(;mw9xFh(r zk#X_|^(p5_`6#G?OC6?bsf3TyRlLdIjtI#h(m<5gv*f&S<7Qhq808( zu#6)-y4-sNQ>DmiwJby7MxOPlIZs$L8k*^DTH%`}B-~97ma-kNOuK|k<@d=q+uRxP z-9j;K@-H=4m$j#bmF0V))4!-aX7WDjCaLd|!qa7ee|P_Unn~&8qj2{_2+n25Fz3!G*E5X(nd<`f>%Dv#Y4f)-_CfaEL7-lUXL)Zh>mWkQhZv=w8X(7^KIXDrhB z1ie6LTt%(wq-b_BlS*#>>DHXaLEipH%Iy`Jc=|gKU@bEllD$^V^r+GH%wHr7+R#6e^j(W?U}fh$d_}WV9n>ynUc1ih zynVLqY4f`Uteb?FIrA~=BU2))n(%Ghj&TMQ31(V$Q|=5%^cuQzPPx)C>7AL_X|!j6-uN5iCN2TdJn1$@^lkQe-Efu~ius^g1Vj zSe93DW9jkwunfm3j;*}@F@ZqwuJF=vIGFD}6Y6jqP-r5MzYKcqUKT~WohN%3NFw{p z*aHuIC6%CLa;+iZVSJ{>P<;3W-5|SL_2R8f!cv_`AdW?Sg9|H z{1ivm>CO1w(*kEUBzIA;=8eKj?n&5JQp%5OI`x*p;@Sq0sQ$v7t#!(9 zx>lkWLamnSm9|QFw98ne+?rQiVT^wFx3*_LW2vP@rm_QT*4brQO+qa`qZ|`g9Sb?G zg%V=dS56eGh%-&gONHO-nFPRbACQLEEStYE0MN2qyZ!?f~R(v;|oGfdyIeyCE` z{(Yd@)wSk^U`#@K+6QC}8OLeOr8hfjO=?0+Xr8#DJR{Ih{^cQCqDsq*dMzchmQpAh zc%7+Y8#Nrsm8y~ZUTSSHX_yIBvwEgLvHkj7xAL&MYJ$CyL5{Iiy+n1UqpYXiYD%@N zyGiGr2Q50g(cY}R^tY;9MvabuMKVQ(kbFg83&X111Vqf5zCE^!9R<S|^-f2=R}aYO z-b%n({XZ?@-_d)u5j0=$JoNQi>l4O!Rq&I#n9_jo2an#Xa2~gFm=I>)xs*Y9 z5{oWht(8qUA;!?FLYLgEEzmYHbT}bs-?k&{Vc3)!hZ?5dp8#8xd1f2u zJs5OuJ_FI+vu@Br6l=ctsmIQ zXWwg?6$31&{S7u1LP2+C3Tc+oq0iL*{y1`z9_g`jbyP;(&$kqi0SDe*kSa}dHJCQ9 z)TKa_%JtCqXhup2L9JcZ?s@$DHw=i7SkOKsD@@L|Y%_Av8`&O9n6yPe>dQ~7cqip^4ajuHzD{P_a-5R4_ zbk^Rf-5IA{8Q9yxN3*62ZC9h}pidFm0z8Foalp@LshL!FhsBFO06AJ#Mak}@6okNf z_!J`$n|yXKG&au}ygr=8YBd?8G-dsJcJ8aA`gn@R--q%g{U>OHn9Jyau=&5~OrN6r)p6+wY6oqy z_UGuMDHFVWpsI~?Q)b=K{lh_Dr#0>wJmr=6=O$Wm!D|ewGjTEood($4(R9jKDK+hC```(4~e5l+pFhXg^bk%NxJ^QArG<* z(C2&L?+HNwX4k*TX6$^*Sf9J}H4ya-9)u(C3j)NaY0NZRB?-M-3MMuIY`8ob*T@Pq z1C8MC2Z@*!lB=@N(#gk| z%3i4gc_Lbkwi#gJbPatFrvZpy4n?-u->&18UAv&B#u*R;=Sd~g1otsGnJ1sixI%V2gk=GB4B zlL7*EM-kA9eT7QbtB!!0=P$V|6Cl_r&ea}s^;Q}lvu36!6x32t>WVXdRS9~yQ?beumTZ~^!^ zW9aYjg`Fnfki5I4E|00xzflspf4{4yqHt}{7`tUv2y@+Q)3CMveeLU%tn;r$<6j$} z2J#AQ#VY5gIdKI;y4OuS5F6P9YAjSpv!xx*6imB49pkas%9$1m7%h+;n6pGjq}g?B z6=-^iXxX%LT-xkWEyCqd0OZAd(FK!UCQd?WVE0TyiM1C?adWh}_L%t{1Sm^xx%*PZ zRt1Dxcx;!@Bg#de{4S(Q<@a+0G;|7Yyfa11_7~>YGs@Sp;aDB&bOCn2>jhJK(jfa@@0iJ zo|N0RpAGT+zB?Erj!waHAdxvFwI%V#BB=WQ#Wnm3pdnxB#VB2zheY`~l}=QQK2^7a zeY;%ra~tQKK^apgX4a;OzSy+LXm#lEln$oyuE~Hfxvu|!s%4pH0Cu%gHvpF8xHU@q zQ%I*__p$EHqnh&+nuLz0AvcGDX8B473&-6UPkyj8(mXGFc6mKi-NzrSknXcX;yOF! zV7RFJY<1W>Wb3ZV`04}^1y0u_`AdiRH%S}J{#d$|XtX90!eFf7Jl_+}&-!vCU?Y_5 zQFspXA*|{qW=+R^5oW-~=WWpC58PJPP()vb&NPN7c<}l6343`)i{p{#?XS}TR^^M~ zkKVup$m_jmk3+k918J3tk%Tn>V?C_1Q%w&F+vji5g5?-C9w8+JH#=^ie?hf9267_L z4SA9~K9?i`&ov(K)0!^#v6;2!lIEtG|LbAx?}Il~1G)GzOd}~RGBO<5KtXft9dxX& z+Ad4TlMSVI_@6TFe<(Kol)br;947lTyjx?e?QUtS+>eCyB3WZPKb;;dE;41}{vFN! zx3AB$p8Z;364Kq{nVlpUP-|iaoR@Q~gdE6hDl}@BG%DSzR~+wec?Y0ON$OxGBcXH6 z3$F#7bPe(U|hUe{Ud9aq~>Q90E>c)hD`PBogP^{*@EBB>vx@5cr(- z^j#%U`vOhU{}8PI`#aJ+mwu*vL@3QdllFIl&VT$f@Yf~9PhqTX6!X-7Iqm*)_5+Pj z>Ob8`O&s_0udn{KZBH-Ko5YW^AN!ZC{QH*t_pje@0qK2jEXIJ@Utj%eyXlX-Jp=XP zuP4sGwpX43;`NpOU=xnNzWUepJfMF^3NN4}u3nEDS z$at*>-=o_f_HXHvDn4jrkvgufV>SEetN|ywaLPz?ra$6*s>J7qT>G=Ddxl)9JE>_r z9M;Cd0z*MPW2viV;qvmALmB*#7gy9mejW4GLC?$nqtl;jAsxvc1*4{qZLkgoV@$ms zjNJIAWazIQ0<^z5A{|K`0iH-$yq?NFQ10L_sa#HzkALlpVNL^x}YCeKPTu5 zo@~pw40M!9kMnL}7YgJ08*k^o9;n1)f1b*_X7{Vqf>YT{fiL+oP<1?-Y!?lJQ*yKa z_^|)zyYC^Ld#<*n@$?@m^+nXY&Tjw zGmQM})BabRiVy9ZRxdFbk$#7#Zf&3vEBgQbj{|NEub-O}>L(@Gv4&b9zVis?armBT zHYEP*Ch)H}{e%qg5`k|fu9zQxhDRS&tAKfk$3gPH-}##ouxJo$f5%E%VY0vjhN=0K zS=pbVvb6u4$8E&a!P`yNyyUOL@dC-;%8)NyWXJL$GDS$?SVYN~Pm*fLGWfprB&(xK z2Cq-+HwFLO|NZM?K2ah$=rJX{HAE8rx$)}%*5^=dB=0B0^#UoR|N5zKl1KybbZDho zw~-_SEREkBCRYZ_q5or{{QF!Eb^9~V-ICI={z(M?>m;Z0zZ3;@^_LDAGqQi#6#lYk z{QqwNESCSD=tR7}R-%w!OLs*q@WX*LBt8ze^@L-iG{?sUk;7l~?mP-Uw0O$Z96h>7I?*M{`G9vEZjpEe4t8clvD<~`0Q;Ee$l zJR{V{t)ghfgN#H__4%311H#%56JdsCuY_&HKpqahJvDIGVmk zKtJQM$t?kwBX+laF|9Ut;2!QM0WeV+Nk)s3fZ#Pb;0&~Of1~pr2!=9hP<`{?wlGvC zz^;Vyg~g)gTinm+8alb_HPQ9{wIgU(E;D@Adf)Z;ExQMY>XAzlfA@SdhRNnl50KVZ zX3O>U-mu`2PKQx}_uWsNW93%Ve7^^wI8F<^M&}DSU^8*E21HQAF{r!-flDRoz+OF1 zlc3hKbr!UK_iZB(Y>xk$UVcEQ9832tYX+~(to2k(kVU->1C|dx+QXmRs{jS4)2YnY z|MrRo;M_T$&-2c-d6cTVv}+?P8d~tc5)afV`saE5Bz5X< zPJOgDeDr&++1(FG|La%#d&P>>WGJh0yyB2q_%SqgyKW}{L?SN6I)mchN9N9~1*Jwcn%uDQI$wv?6(>uho63H$}|H~^O^-HKeEdJNGA2xW=Ol#%0H+O@73-iKfN zj)(Z|zKdXKA^&T6{j*(*e4vhbXJZL?I?0Rt6w{X7M835W5gSf2|BS0yv)6=qYuJ2l zjzBB0MYOROi%XX8R#AG{-7;nf+3jOGuFQi;?ss19yd}`^XXsGg=U>%;BjgrduGuon z$wYtdg$XDuC8{>~uz8iq-iN`_ax!Jj zQ6PvGoDU;_u)`c3GFQgLAr?CqSU$n0fwm}KNr}RH#j95=no-KPk?Bz83? zze+x;K>RROI&dN~T3b9hk23_+-d!HhrpD7}4cYKW#8@=3r^C7Ooi%Ir*oiu|SlM4R zKm5j3W6=A!QGRcOS zgXFyAmBl)dsf;_VK|`Ue-CcwBI`d+s+eLa419ab^cPari_9jaYFvxRL_e^Op)@ibT z<$*wKAbOKNs9kj2U!ww5u?P|;lZ_$WIFOnYKmf{17x0jstXyOOVmFhX+~CLxAr4AS zzO0M_Id^;JgPDGBmjQ4G4lGEfH|M%FHaFOSpgMK!N31UIGAr#LSCSJ^*PC-25{`A! zKkhlZv-NVsPna5FsXME}cE?NFwXM5j5{HY34b%-SIay=7Wp?Ju~Jh*0wAjnB_0USKz@L;rs8-)Vxpp zK|-zWYdV;!6_bd^_gDxEd0>Awa?W!*yK>AiUkUUj!RX1@OeT5lwCrSsp^AkLbEk<+ z+K0pg0Z^8_9;^BIxFkcWqp2))`%4cUgOvCi0cFMQgdcP=l2I75>olQtrrXk)b5z_DXYx zftBOD1U|JSIDA2Hf%Sgl&6xC!oUl7=>51~j@ILIvcPPlou5;cKV?C?CZ+RZ#m*ZGx z9^3$Z69@L>W@hmiubbp|8z)nx#hGHv7tTuvPIs^shS`yqX!Bg7*3QDEb-gC0lv$?r z+8ot{p8YcaUAuVCR5oFm9)k_nhN+6W(S8xKkG@OevZ?^D`rdUDvP1I4vw<<9T5d_W zx9bZysMaX?95eYDNEIq&(E{aMC{V&?kW`TL*gXAGsGg&$CgL@rd$C1fu z5yC`L&cCMx_VUCuW(jM7PQFLoNdX*P*s634dHTzO{G>4j*I?SvfB<%SY zC6|0&=WVwF_uRTt+v6o|1p0utOrn3_0yI*Vmzbr(%Vzd8<#?}AxIalhDsH^Qf`Vcw z&|ICx#`PT`;HEgs|E^gZDrYLcPya zS@h{Z8T5H|vqTF~fp9yBL|CQrj@|ZBGZr*YFe{yR6yBf7Rd@W0!mqJM=^ZO=i5g9; z9aPnE=DKIp?T7HO2EOX4Gn%B0ori0J^Fs8z2Pn(Oz z=i2$P2Jxh#CIYtxZ~^D;O&Tz{InMz2m8?6zKkz!e|0;%p7%x$)aVfK+RU)H&XPvLg z(r&JZ_R$NlE#4Y$ZZ$3W9ABzllD~jtN(h`7kbr&f#m!Qn(i=*XFA1Yj9>;7&1eBKp$#^~0*@T(1ypNmP?ytjD zv^Y;*|K)l@<^LYPEByDm!uBBG?u)qmqGoAWe!@45xjloNsMqX-2z4g;A@Z~3byrqzz0vf76DpH_;!2gqt{2UTpYQ2N$M$O|J;b~e4^RX>ne=+Z``Y3p2~1|GbX z&zmoX)klb^fZIPp?${ir_1Yce&Pzcy=B#?PxzSlTICis6vVBDgNgu9`j#&;64Tu7b z=>&L0N&21W*VLaZRmo*V-T?~wyPi!#rMKzeDth~8A=-W+FlY5*vb`%V6%1USEAiT* zW*x)4rhsD%^zuF_=5e7`0b-%9vxA~H#BODhs8l|)iO`{XjKy+GX^Uoh#q_FUr&J+x za_46NkT?=1==$bXsOEJKQk5%^2~UhNsg9Q5h*snJ)6-+QV5+WWebIFAjI-Yn0T0T+ zJu>SJ11IJd`P`ruC>>D5y$&9Hji*Mk=&|u6MbJF+T=Z$*o$@HHV!Gx6<#of5ch{#W z7?@lkyqMZ5e=3HN4WvMrI%0gww9%>( zQ$L#}sar~lz>bL{tmpA7?5&Pw^>H&j5!VVnoOIZ_(G4DT95b+F;S2iaNvrVF>rfxcK}(8sWS2Ji)2nmc&tvSY?3QHy^*9*_>AnGspoJY|*LKsa1AqP@MtqJNqRj*%Uw|lhRi8s4IU! ztsXH2bgjH|gq@*{`Pzsyp9PUw+oN#$tlhGo-g|%j zuP*PPJ$F|;8Q6kYw;x4sL6z(U*nY_Zj^|-&5YgV5L9EkngHv`^y;C*_zwJG=vfsDf zk_mkywMpeP7HTaCv=9kQRI!rIN=B*Ad6fdjRsyibI2u`+Jh}=(h9(NqYLSjzeT^IH zowKdiyydsQL^J)G;_6CPIv4vU-(8nZH9Kv<#J9S=4YhD_k+ap=9FKB$^tf-%_wFtl zs}8i6EhkDfm&HSHbe2Vs2CQX^J0J931Lu5aerB6l&f`g)%GO?#+9$ml6Q$6zzC{nM zT9b(|R{5DA7jPU6oAA?Vtv7mY>e}^|`w#>`r5d!VY%hjxD2I|1B-+6bq2|_6A%q$=G-#&NK#;iMobflY=Datbhca9gEPSe+OfA3zh*suzjnvaEKz`^b z9|AtHL@=C;{gFnCeKFAo0MXb7<{Px2IE>W9tU=LF=!O;>jDl;9`zYgG;x2?Q-vhdt z679%iUw~u|o1R-^jW&ojlI1!n$UqjXI_+dEb(FdZ_h` z>fq68l1a$%W0&P^EG~s1pNRn<8Z@!w3Bf>2aKmf-xAhKq{hE3W6e70b9za5S&I9Y440V{dJ2$orcSObV z1V6=emqI6Wybx{#!Ue7zcezKKPmDbgKa)%G2mb>W3DrgZctof$l_y$4c5F56EUhVF zXxc^uFQt>4U90ilKub9vwk=y*_$jNo-2ZgDZ0pNv55(p})A1((m#ac&)uBr@#-bbk zs1Gq&MApO%B~_)yy|0&XcERCh)nt3omsart3&qK)E6LifjGapQKc-E1t1n>s7f&Tk zk6L>8hGj5Z;VI!$1u8Qi9GBrPDYf^Z1@k?n(TJ0fq>U6MlMCQNS?MT_*klqex{PZc z(;De~Go5pb^TlL1MzuUDWiBjwO9UvaAZJ}tDZdo;063fA6fUbMfeULtnf%)fl-C%< z%nfm1nDP*MpE^~JYHEY+Z!te?a=u)s+77pM!c6Ukb{Sj8=`rc?$l)uz-7x4>g>Jl% zPgX-JlA?`vV;qnnw+$s9D=UD$qV(aErMnjzkR{fA-0G5{Mq)5h3g&1J?J}&#JSF17 za_RwcBDD7`r^BK(3y#~}P-PMDpg^1lseo zES47{MX6FMc{CWKl<2$;F?a62DT$+x#Z?+7;GvUl-f1AKC(u5ZH#D8hGgM(J%~VbH z;WiG37}zZO$m*$5wkCrj&v}AQ%w?#1ye1OprJDmhh`68;w80Ygqqo>Y(`lR*`Q8!=6YyM_ z_(GXh*K%qahr=a9A8b4^ho_6$ZfcGjL!e7e8_%rM2YuG7>oiIe0KsjM#UQ#RuUoO& zNwgpQdGm5|iU_QT{&kgJQL1G|CcdRmDofByaY_0|Njf>dE6N8&mU>KQAd3RgakISx z1$90wo2MMkQC{Er-D?4syj?UYG5-cTtYG^eHbWeW6h>y_Xudp=PGoaoCn+fmpJ2!u zEK?+z*p6X!E~T+4Yf%~&?WTc`@X(~F1_ZrMQE$%koJve3Y-0||*0iXEHA`KTGjFMW z=0^3=p0#1 z!fLxSA8WRV&a8pF&i5O0Uvd_&PIvrazmz1%!JO|QauU29G+t5mJKegi=R9;e4VXf$ z=_r%7Kus=|%WBe-_I>Q@VCRF$-Z1^n1+VQ!mzcjE^8Z}^c;u)!f6Au!F3(q~T5c~b z_E0(ty%4poz${t2lP()ToO2@%o0%L>Cdtc{2U1yrmC7j*s2Lvd+#go0uad%b~N_7lGyioC_)32RC zZ>8Va*>PIw2K(0#ptH&Tc+Q|z(i}2>%>@8o?6!m7^~YT4PQ&rls{j1k%hBv2>q5=rBE0oVL6NPD1HW0q^ z2abpRbz8Nk`%IOX2_Y1gWj#Q$S7N?Q{IdjqDWN;-Iv^TKRsk1ArB!Pd3&aohw|QWc zgJ~4_ikKlEILF>+d92gcw;1WlKpBHMttO{s0j;8qC`ruO4!7*Ze*1XB&+m>`$EF2u z=NRT1Y#MK?wCICTx^m4TUzV~wz`-uR^*mg%>pm_>Oi{i8K#Iy)-Z8#%*S&4X|3lYT zhef%5?F!P}A+3U>LkLJCAxa2Hhch(NB{>5K(y7udAkr<}-HddXbPqYe5a-={yMO09 z-*>+2;t%2h-gsiIb>Hj06Vb?U;_nsVKT%Ho4m7iAXndzr+(j9%10=NXIPs6h{%>s1 zy6~4v#KN~>ejvg!Aw**@eHls5cG!%{9876Zo5CMEfT=VM#0dlw&GkE_`hr4gt>-uq=agPi2LytTAG@Nzy{7fDK8JF+P9XqnfIw*kI;gUi4x6tv|=mdaNruRoJ&$@>ScM}4i!czN5Q*n^ zr_oni%})B2-P6Wplv?y2yk5FX|J5il=A7~G*34}27j;4)RS$rar2&TU`qZ35%Ts_I zrwHiUxZA0(>%CL9+ncY;u09X3QUXEWE%)9QmS+qQulmII#oSdejT#rVPoIqp|5ZyZ zH4S-{@U$3)Tu3;+MKSedwNM*axgE^g=|PRVdW-Y~1^^e-D*r*n@91hCO~=P4{N7i# zN=N2W_U(xlm;1umn0P<;=iHOui+CVDufa3mvxl=)m2SgQ*5mJMD*9tNcvR019|3Ko zHKSfw%Y#0HTk8CPmI1%(&IfqM+cG0Gplz>WvJ|3FoP3pW>%kh9Luv_6jjKDMs+}O} z?`+_KBKH%3@~{5Hs~=SCtzj9+0*ujgfP@K1q>K#*;@^dyg@G;*bSLXIz@8zMWNl!e zTO;Gy{93PGru-d5?>!Ohgw;GQsb8_H}* zLn#WC()zwkCkz&bts5hr>Zl9Qf2Xrj+=B zh)&$qq8lg=U^bKPeL$&K@gZrV5uijq3!`wn-lR|Z|WPMk)8An@~Sp2a)(qG#)Cm7m7fIY_o9_ng;pi-jQ{2`SXW z{F%RngnfG}M}k2?K}zw3(?2ASnVr)=ivk}9eRS?g6B69)xnb}8TBxDMy{3o39X{aD zXU3nTe4Atjf;XQKEk+{R*hypEeFsqiNyQ6AtA;AG^kci%Dzv3`Km>c#f1cv*ForcL z#z=5*|2r*~ce(ZT^_tnOqgCPGYziO5Xr1w!*0GAs||wK*nUlNzAcy;n_AxIk(Qd zK?JjVA1ep?x$WCecd{bHU^bZ>lkx!d4)AaL4 z9JLEYY3i6$CuF|6bCEl7lb zJD3$BeIVV@_~A#J8n^=7=!l)<)1&8H7MWFkxc>&wejW-R+jSZn9@~=q4K7N{JapU` z)VsV5&M=!Qk*I)RB3iQA(I_|wRGa)cYdv0bfQ#NoyrGuW_Pw#oDFQcX6M9R0x40HB z4K8`ut+(2J#u}Y0*E+CBC-x%v3Gdtvr|OTlIR|Q4eGq>dZ5pgOWx`FUbXRkV@SVZ> z{!fFUy}AX8se$aCs6Mg7AeGM_FPs72DOX!!3swpk@8f;!E)RJ6-Lp^JdKIq#P(9~+ zn3S7B=o$FRMBc5C<(JgmaY~WyQ9~GEyf&EWwBUUD**eL& zN!@JFj0JHX%SHRRWrFDUZ|b_y;h47BLh85}2q#l!Fhv)CjQ^2E`2_;&cZEn&67U&{y*t8>Yi_vR!xAHj$DwF;7$#tE}U4 zl9RiHeQ!fD=eAO|lv=oqPXQ+Um2Tqvw}^TjblM%P!(xLP{GE^MZ>Px0>795+upaz@ zn>#JhDVl$uOc>Ao1Do1gp$kZ+@f_y2-q@p7qg#rKh&WsCG}6RG<#26GABUzjxGB{f zz;~tB!SX$kcdur}M-{z3_$m&qC5)<@b?0V?O!7IkCqRa#K(!Y48$TJ*Yy{5`ECfwk zS4yU2SK3S!keaV>{eY3A*bPFN`Umwg8d5AUX%r0WMzR+_42IB7oH$j=T;tXt0;`wx zeMQG=c;3@b6M32jG@^!a^3|KdRG|w!t>4k;VqXbZpXw^iKzt1{F%oFKeg~z)-CRBB zi}lZP$$FkX@9!8ZM25rlodoyj3fcL#GO-V1>LA0FmNs98a3FWrm2LIiKYlH^3&-{_ z9@=W39Ru77>!cUCd{(Vv3tpG|2{E~_hg+bIpyrJ+P*W6gjXWBK18KRfe~a56UMyr3 zuh@WC-@tXUNJ%cIMg}a9G!Wik|6az2BOMVx6HXLOU(O~gDu?f25Cf_!v0tpKdjWxl zOU>|Ie88hA@^t^ipK5(Ca30311Y&#adGX~u%En=b_I6{ppmg$!!?;8QfXq{J`A)@r zTE0Bd+$1_z(SG2sbB88dO}g=6YhB^$#JZ8` zKQELF&o7$o%^5}0{HCy7{qY0Og5UsL?1SRlDI~{XxpY>-40+{w(|B&n@j<W-G@YsUdxH*{Kh+y&GDd$z|rvHz4|V7c4*K zZkbDAVGK@T5ysUl`oPooV}h`)?rvr2vl5zqEwgl(_l-ilBwnwqrz~oe z@nvkyG!zC?W7EaQ%uux){sP!;VnbhTqmHGO=yA?2Vf?ECe$+8@%aQlt3^lIXvl_fE zmdq&ok!U}mT|3jIJGEsAGXmL-dF>o$dG~lz3D=$RSyNoF(gyiV0M%A0*ln#>9qtoa zv6W)qL6AwJLh@Ov+OFaFJZy0J5-8dPee9Y?q6KBWxp;5g>b*xtefLvyk1@l-;L_O~ zQ^2O4(0{&qUu=G6QZ!(pYWKE$ase0TJ*b8MSK5B1r1urK0%PY47}U7e+kqF9=S3KK zz1$If#``vdA@N@#=F!J{!J^%t!duz?;g&w;MqlE-7~TvZ!9fl4=X|(jWA*y5H9eue zao94IzoLy4IleAdPUn|h)-$aU4kGx>7D>0o-MQxJjc}wyaoJ8&@Usky&;HPA%`_yj z0?fwP-52!~p}&nbRHyT5f_Ka_n(u2F=HkFP8uNVldUOyYf%mVeH6%%NyBn#g33lENJzjjEyX^{*01rAuCQoQ>CzG_S`}w0iEn@aN(WyU@e=sW@fes2 z6r!o=&6{ryu&vxw$=k{XriD_+%8WmKPfV_I2GH`+(mNxka1RLF;9F@*OSSmqIq7;N zGWYnizA0s!Yra-tvL;ZI>pw}jxIO_fq%cPfU#yw-rHaH1XPnJYbJ{OHS4cYBy(6T- z|21EuRj5ryfOZykg0*tg_hgRCsj@a|M(8F+FUr(*W`{yxNzMQJ0`o-^^59irDNFa< zi6TuU;zQCJ=9HsHV`v2FE^H*Vql^a44x1 zQI^5gzjlPE@MoW0WUZgwm3@#k2{lb^4{lNhtYghP5mUu1v0DN(?Z|!irMd8J$=li1 z=C9(O(-|p=e;GsHj^?`%E7Zv^ftoqmr+``by3POctenL^QvGN$SqS=Tgo=QAf9a65 z1)Z;CvlY$zyUN@XZekftUNyqG)SDSig?C^s(^!uH=+XH0nDfq>RUd@8l+`O!+$5te z5QNj@x3@A$+_xGnQ50@eK`=O7r++5g;Ce8xexMbj)fz_0^Kh%iZ3NG*T`)izmyaaf z)(xt1CjY4HA=6ET!xHuPO&5?}7YT9EQX%NF)3g$g_utj&BnQNO^nRPh|G*<;PtiP# zmQCdd8*5F3s&1@b{=7jE7bG1_K9>W1#&N5qK6l6F4W5EOn^I#P4;7joka>Lc1U8We zKKl^@{_&1)rSkPq-e&a%Qq(dwVEXvnK$Zc7_(HhiGDSOBKlQnb&00vMhUK0}Q0lDR zZuP!4Q~BnjNIm|@E%WLL4S_A-UI)Xh2ECh9^JZ>LI%cbEOhdsxZtoX3%%L6}Gu5^P zvU(*sBfJ8(%&4_V$|S0b72_JXM+WHeZ_pMk5TXJ>E+27X0Z;0f^~z4}G;gpM7^_Kg za29KXl$<9P1exqVQ8eq1RY&--957mV$#WOW21nyG=Ed`%hAf3WwlOPS0y@t^Q#x&X z3+>e6qY=i=LEZggA9kKu8kX!%Ii*rm%$TxsS;Ta%-E#xn9UHy1a*6Y2I2LXub4mVf z3s`0Y7KCez--+Wt(R=8pdnO>2l-ndS&W+0uLq3*|7hwAige;0g4(V3uvp3BbbE-$h zPO^c!TFDa6Z_>V5MuxkGMO^xAYoTc%?>zz`iX%}8|x@TZV zQ;RY!l8h02uB&}l*XTCCepB2KtWZDT*|mEtN?q#m@|}Dbg&PjUjdakvNUfM!tLL-= zD8F5rWr%xnenSK@BrlBV5^@=`Nby|G%rbR6j?Q>)T#QnZeoQCjlO^kTq9c@eohnix z7H0guXpY~)v3nVaNQxUms(eoKN`q$<$a8h)?z2WQNxfL%B4M`w2NDk<|Lds45F=Ln z=cxRQD-CN^4m;13%M@KYp?)gYPyi(PR|~@Y_R8bXj)0!KF~MNIP0-}Y+jlr>FJF+{VM2N>WVW5 zv`Hg5EaAFEK{U;?hptfVMkQ>oH*%A@r9LDxlJjr=BwOVwz#_3Fx48N9LZe!HYq~wc zz{qEIMV7-{CBFq#(#1PSnZK~fa-U;Cjq3$)kmb~>$o>WWp3+M8i`(dQ7< z^I6LNpqgG3HeIOjT^MCf?LB0^(;V79X(Y-uDdS4Etz_@gxx#yNTJ^Bj%fr zx8Cc4)R54kyv3+mumizaWLnUP!+f3}h1Lt?O3qgL#lyUQSzUI(D^IeDB`96$=oh%w zamD{!>&G9Mlo?C{?CPMW_v!xGDL$*=qyq55WC^OrNfd`l(z(-U{fvAi>2yRvgTdVY zU0a!C&5K{wBlEPY{qNYCi-P^5b$*?@a=y_7l+NgLMYhg_8DypD#!Gie{OAL->%JOb z+Sbs?eLthSbyc?bxSxpo%Wi};ao%-$iY{;ZP4>&^DoX&=3(>lXlp~u%6tEoz|>##nqxdYr%mQ z<}L~R%&HMpbWd3~#(%W{Wc@&3*rF5h!|wR0H4JcjbcS_~_K@}Q@_im}lTy#PrCQ^d z=-%n|{3mOnQPG7)SH_hN;Q9}JHd86d2?Dt;TaSsNEE5-85I=-Mu>UYY{AkD=hn*NY z`fJ7hq)d<9(81+V$wGE(9xIYS%`Eyk6?e^^;zb3#B>*W+{`I*d;p`X=qfl&b^$ zRrmELim#MSfDZIoXkXE4B94C4c@nXS(hLvhrs)zqy+lK-9ooO9n)+Oe7xkkY5Q7@n zbZLH8=oPx+W1l&!&isIDiw=mkPiDvk4prWIW5_x<%-3c{y+nye&I^L^%*Tr>^RG3JA76PBT zUTokQ!Cy}(ltwz)#T^!GjbWTJoia3d*Ss$uxA~>a=uuYu0ruNUu)~z;1tj-RvzIjN z(K4!1A7;ZcC;Ml~)&SG2M0)JKD0rUKu)(P9+J9!`WrW9*mzDuD$3f9Fb`yRH$euX~ zR#nLvu=83k8vybfRI51vOpuhfhk5yj~mJ7Q;BG1AAjK$^c@Sv0;?`+txL9 zODyoyJv#=IBUL5y6@SpoRW9C#Ew83uj(E2o zETq4ciEtnQxr>yZ0Tsm8KuSlSQh(+FPKRZsNyi#AxHZAx<(4K)dGyncPtSWA|%WM>%}#G6?L|VWWt|<6$v>yF8HF}=O!qO?#pAgh+5E}6@j60udM=~ znv=c3?XdiOrpzhk=kBjA{b;M-&GAEKUyiHGi1P!U^KP9P`xM%bG$sL}6g`LMMnV)^ z+MT)6^}dt%k{2a{?NjbcB3*=yGKak~ey8TG{U5Z-kA&n^nJhyLD>16-c&EySrL~jupS=7Pvj;lBV8hj=>F9ePlt*2r z{4J8iU=il>IToZNq{e4M7l1J+O|9{%Y=zOnYG?l2Z)#dV9;f!T{=o09M{<>Fh`#(r z%kCyA(L7@-lH`|7g5%cVuQ8yKKN?$DXZd}u#r$_4Bux~ls{kb|#UeZmEl!gC*rQDu7ZMq+fgBv?FHHbQ1KqcpfioBjWsbUv&xa8{g4#x8o zHn;e1k3EM)AH0h#TX;;R-_H5(IkbU6R)mNXl}{9yi{qLyeG@A9Pa64IngVCp{5Bi+ z@Ul}}hElLVC+=upKR?SeIiD|4R! zuS|F1LYE#sX9r{fNUI?BcsNhkmDd4zes_b+h|{Wh!{eRKw)!QQ2_PzjC2R4x1^5#O zMZHPkf59B!p_dc39`_6JlYn*}g8P8OI}CV~ZDsAh211!4<4}hu=QRGoZ)T)eKFXFd64gpEId9d^8;~BW8Tl3qD)PC*CSLVsX~O>h@2vd=o4G66dhX7 z-vBv>U}3+z7Q&Xgv3X-SX@!GGR=-TVdfQVsmX5?@Ja-fxEIwG`eU2Dr@+h=yT%}!(YE=+ zoC%T*xW${1blY1DUYv`VCy@z6`%HpbF8?T*06e=WX)?;Z(J)ObrK@MBS7$hAxK^=U zJ>N^gO$B39vB8w1N&Ki`(RB?gZi`fLZ*WxF^C$Ul*%Fj#A6blqb;(v(14uXtY42AK zyAW#=!K0*)Er8j1v(D_=Ov1s>DbeHkzwin;Ag1B)syq#7HXzC~I_H=eZ~)Yz{Hdt3 zW^phgr2u|*st8njSksZbw3T}UksvHe+p^Nnn{)N z1(y`g5R54BLXB-}aBq%7cCdkvl$@X=0N5(8SS`Tfj?3Sg9Y|W9>VC;pthyqywPW(u z^^&08unUyFz&Xg!d>1`m=ZqV)v}oH=xfxOqae`Y6er8#OCx)~fIx&n`AUe3yXDTg` zq~m}>GGp}L_#U$tfGf>#z5e>x35IY_?JqypY*)+>@ix43{M-55l)k?~Dv#U(a6T73 zR-UD&_Our;pTE?F)V>eZsa|S?rU;c)W8RHrRt*$?d6goXLm%VfynU?I#-vhy6TDFT zBCz=J-@Js{84PB;74kVOmirkg<`;LjgA-Nc2wh^EXG1)6k=XHI35vSuu!$uGJp6mXiPn|^b%@*7PucGj}Ab`h@ypb2i5R^IAKo<_acS0qz`g>l?&WX zYB9||H}!EZZ+d|RRchIg`0q0Qb0|CRlD>M@PW@Enc&+cT!|o(& z8ubA~(6ieaiRKUK8A6^_F`j>};NNcSqYP>P^a;M(RZILo`~3fy7bfBN0!ybK4tV`< z1AfN>1Dnc{!D&&MPN80@evD!oaiKXK{*7?Atd01h9<)PE8G z|Mn!9s{!aQjHjX~;eQ+O1{WBhaV`G*Pl)yBeEiSDkhUcSQaJ-Ny?c7m z;pzDAEdMVn=WkGS`r&8sYEFx$|9Qq>r2+cIRL>o54L^QCrKI*(Tj7dJeY`U;Ea!aE0M_^ zA@z012Pyk*T|@O}+}=!OP=(i|D?Z7^a`v{f3@#hf#*;<4nTErUt`#-ree`52Ho;`07QTEC zz=nbqbh>9U;mgG;nIxifJJ$etFj-1T##S?0_oTa;O>t$n`;5-{{F?Qwjys{$kcu?` z--j~upX6p<9Q`)E;T#6lm&kWrRZjP`HI;Y>|Nf{ydEX*bQTYMLj4kr&%TrBWNCZo3 zB};)D&?aY-V7FtwzEw_syyS~o)x1C_#vv+OeG;316$sjWz#8AV!6UZT-W6WqaMwnI z54k{bWl5z)d_`b_SSlS%j5E}R0nQ~){8k)5IR}8=&u0CJ>VUiu^)|=vuAAVlfe&@i zjM57 zo2zwS2B=1pU5AYw=gV0sz?51%hMRe`uQ(I~Wk$@d$C`n+*EYr1stXx`6H6(ST=TR! z*8A|z;DeSYiYT`8_E5!(cKqRTjm@>w?eNf>`jhK_b_bI$(kYY3zKKE|289coA<-L! zhu~|*MmRDW=qJMy>v#FP-H#02ujahDTQ#pxrvtAz1f{C*?9zG z#gH742u|x?#j<67?|}<>CnohYJmH!X@Ywzi1pU1=7e2eUmpU0QzO-YmtuIPseGmff zgI%k;od})F2f{;lw@@J0JE~6IYzvpegAf&cDZ#iKBDc~yYG@6_Pgrjq8S{tG8E=pM z(1bL;j+=9M`#M>!>q3>RWez^MFpE)PrUUM_vFWgMC$vBYI}yr<+oYs27q z5WTQEV0&V%p?UIPyl|y*KySV7UNmLnsi2i+kEPh(HRKzh1aKwA>8&G zBF68Xm4lxw4W1wCWr- zpWl9!-pl%vTetE^FV{Z6s`ET?oAF)#yw;!ePBE69ZKh(8s%)JmImp<$CpzQvoc+K< zzzAMlHo{~>3(JYOXmQ_pV^DP6!3)eX3<}nZy|7X#!`|hdS{(7kzVhx){h{+==(`eewR-x<|~^ zjTlDnJ2GVzv_3Bl)Y}H)nU9wh5@@Hf8P6v0%-K2j?8~pGic-y5M7g5&0FD1F`i)A(2o@sTQG5x(%GY;7MuYa z^w&QU*Zbl$`U@d(kMQjyoBgGyb0BUWd2>9RZT-nBd3Xa0L$nrK< zTw?LmV)#O9;#-H* z)d#~q1y-{#4z*Z|T{*mPbyyK)ef|4x%7DO0mPrh@mUv8{n1dqSi71k~5EEYebP``I zHgV%^NUVbqGn!+MwCVxAv9>j`dNnX>&O$&9xN{>zPd}l+o!yi_Os+pmr`$PQSsJ&iIn__G!}n^NBuPK zASX4-@_vxTN?naB>Wz0VSAWB_<52!AKpJ65b4WKnuq;}nxrLNXk#(tfXTsQK=SXlx(DtiFg^45EBoyn45x3emkb&d3FN}Iw z%q-^00CQ>TIRr+HA-XhOi{$m|Ui8MEAn9JUrg7BgFl29u)$|p>v{vZUd#<5=$>;|0 zEtL$lsZqUA`LxNm_xUPaXlniZw>)}K48n^|)L@{h7w6(`GRhWiV1`O^q2?Lkk`2=Q z%9bO#e$SVA{$XyH#XoV&V={Cm)ubFq=Rz*jH@qQ7Zw%|ema^tJIpW4B%tyq{A;4qw zGE^b^OvW8x#%v>JaTtg@z#xo|2k&Ngr5yJ!$E0p#5}I;fd#kS4^uY(r0e-I1i!+6W zcNhF9i4VR4NmqTFS2=CtVw(ejUhk!aUBx?ZE`JvWT4+okyL_pRo(G-nXu7Y-8eLw8 zJD6>Qxaco+lP)i(Mr&W#S>33)^i<~*c(>%8x0#~*$~_7xopYq82k8oI2HDi?L<{P3 z^P?oK3$bOEJSa66B@C(BbqAnyyT&SRUkF@NpL^>6@D)`c&fs-TX6<*|KTV`^R!-MT zm-2lP6h+Ua(0)fEd^x>2W6v;Duz5VS?%TWR(n8uvQZYAQ+CoWC?0HmI;v)eAX?HEy~g1}d~j{#b8D5OvHCpC80uREESaj$)HUCBNPz(rVfkY^HCoowU=Y z`BcB_I_VHumF&-x>liuoHe=X<#OWZJ9ELAcy*J}TomQ68d(*e$4pV9?3~gO_%mpi} zXRP;W4Y}mkV+iN(*4t?td12nb4VAln8drSp_#*@)gMc=GO*3t&$n$^8@)&;rH3s3Esu}e zDM;hdcr%PaD`Zk+j~`$HWs;M+vW+TbN2QRlMs5Y(e!Pt$9!)GVnh&t@fBh`Wgo(qc zP24~Y&d`6K6wXGI&hhAFxeVFeC1QsZp>cdqyb9@=I*s_|Wq>)|_rgZ6aigM0JOVBR+RD z#Y}7vHtkJmO?rRycD(yWPcVf~pXJ50m3IF+ScrB6A5L&`-Tkyy^<@F7Q(XAIrm3Aj zb>6$wuA2Z9I+B?HK3mSDJef&fmj2j^Y(eVR6QEx%xvPJmVAPLW6tGe!_4th9~wVDo3p0UIC+QlvNl0QETZ-SH*yO`oZ><-{zNIR@f`%a)Y8NZJPOQi z7v-m)9+8|+y>>KrsC%1h+d-$%HWvQ!KL`feH5z7VmV$mwtl5h1+O6u)=|wk!?}~c^ zbnycGc9F5W`Q1;sTm6%0upO%G7*;^Hav30Xpb&Za3ZIjNJ6*fw>Sfoj_@}DXaSgH5 z0em=8B*|s_QxM+-X!4|Rib-Q9(+GkJy4?Ju{;3BWp~nr6-&UDc8a|i^yJgYu4_y4!JK@{tReiN}rb#4QWxuRA%;q%s3KJcqj3D4m7pHOd zOEAUp#cxKv=3LTRJofq6>j#N4Mu4GPeioBTq+pCI!Uy>^#kbHV)5%fzc-0*FQ(lQH zgm-;u(h9S0>B6@z{-*3*DPBHCVj~em9GGauy8m?M7?ojT-qB(}k!|BO9bSm%9p&+2 zCrf;U;J$}zqvVI5%}*UToKA|3Zyqt6)697@GCrZ<#Oo-d_aM6c-iXE*fhkKd6k*7> zZ_cZFWLU^A{6y#WJ{6f*4L|T050n2&Ml) zTgbe*aft@P$LTS;$t06iAn=|b;~KgJ4|pzJe?u`=FHALto{K)4B9zr~YC_BQ=WJ! zZ|ai$#hb*;o!}RSJ(b-A1HqPzosSKDVGab)rc$6YVWt@xF?wO&CQ#+G@IYIFrUJK( zw&&L9_lE%iIOp<{ql4*a9;fq5%Z$;Eb2W;ol}+=)ScHTsu|&|lDAV6y;hRj9%grg0 zqFD)GeeP`H4BMU-=be}C3YQGn+@#m|*tSKZlgH(S=qWgPJ2A~z4icR;2`hOg_+b|* zOc4<|Oj2^)jV@+;NUroeYe+m0KPju)@*$qvJ)U|p!i2z`5E$`M{_*Gu2^*yX_#mFk zZ`k*sops@$ED`+LzRtzy(?3}xnJ`+2n4N7F_50;$RQ-x?Yx`V>y#+fY`>KjQi@Kk= znc8sM_`-Dj`b(aG{{=Ca%54O{LAtRG)K}(>O|)a4cl!xpDhC=*60&>1sytCq;GkCe zgVl?L+^Lo~mfQ{T%ebl2H)NgP?Li5yyQ6Jh#_evM zv)MM|%{2`j2z5YPhd zS@mxSW}cu^R^~H$bTnz5 z_|9GFR7TI%Mpw#2>g4Ti3HIK87Ik$tXe9?Th`NVC%zgL6w>y(cxK6452tdlfLiiz! z8fVX$aO|XPkkhijm8`DZ^7ph?SVZ>sP6plnYSep_*4-3A^RgSLoCLpV|8!3P1#GfO zwWamJ0zn~-rki(z_Gk$|-kjz@53SyZ#!cfgCV9MRBdNsVJN@0BbsT|SYA5ElYaw~^ zK_Pf6a4?9_+Det-I|~Babfk$`zw#>WqAovz^ zcS`_@5iD3*CvPDA)=92`& zb<2!lb4q?<>$%(vouAltZFogJIFMt>$~PF%FM#)=)_AK2r*+?aqCj(eCO8@cv?hNh z`Loh)=Gwm0V(a3QL(>eT$LW}xBv2W3lm+FHJyQ<2XA+bSluh6#+z&8AJk8u3mxS4v z_%e)0QV<=XbIb&$Z?O_l3TW;#c_qSDno}+b4RlkL$49nNgKh_5B-RIYb0`tJl1Ji< z*(b8fW_CChVr=tHbcrdMzj~q+uB9pHkc{arwb7C8C06)X3ESAcVqVXVp(1Qc9UHZ` zSKnub&F4v8F$xDq-ejjs`pPpsHt!E%!6^6DWc2$}=9N)cuI45MRiEnv>iAZW39E)_ z#6~aDyShODfnVh5=k9Q#4+n^yI@N*{JOzwq>2Wjs0u~5LAKR1(d>c+w!5e7k>mMWm zb(CB>1LtWC<3)W;oz^AP?pBW#vv?VZr;LF&-|YWd{(RSA6HH6`xNM?q;=9G*MRN(4#~Y&{M=@KF_xU~>!>+evjC)4UUI?#y1n_!lGK`l_SB@$^ zyQMU>B-y=IZm;hA^_G0oqWVGn<=OS2ox!88KbTn9UCjWiQE=fau&doQt;PJOV5S4U z(r?SR=lEJan(68r<}}EV7U)oCjVvvK-^uf#jYI5#EZv;A^?CPnwVL68ngn^|J%5*T zBtd@>n=8&KL3=Y8i6zH;U4}NsN8#F!Y(c*~-Z8Fyxrg8(ZAzW5o(bL;G{OO4*!?7i zgHQ-5s7Q>^4aH*FZSxXuOfFw1ieHL8u`#9mnM^%Om*zN_C1|Qt!J{V01@Ej0n^=^9 z5M1fjl-%W<(R{m1uNA(H#c#=I!6i+Pi7tCd>&8Vv=n8x|95G0wXUx#(b7rv9X zHj_>9mg66`Lc5xiYhE>@jPv2!zcC-12Aw*cbJaDxP>6czK_c%)7)edN)4C)cf;77| zs+qF!aq1ZeUWiC|0Oq`Z^Mwc`07A2CBHQGJ=FO=wVSMhzpcJ^D1obU4#gd&aM^NiD z{XT-gc2ABnGt$Zkn*woY^`t}@4p?{mrus$64vqqB=w4md@hGwJz-U*7fH;JWh#`uG zjgj`zv%9?{7c@A7d6o;)?96QN_pIh=UewX=EB)k6yeqA5n|>{pYRr;=*iVI!B`%V= zir+Sry8zz)W+n)4PDnv4Nl3-I1@%KCT@CRqV(&5tFQUenN=iyRli-RlePuaG_fR5L z*aFMd7w$(a?&ReC=|-KceyQU*6^{@3tFrc~NCX5DQ;@{-{tpE4`4{e*R!2ev;m(~u zC0*}YT>=TrKIk$$#XKNk+Wx)|not(C+&2q%VKsq%%F*%`#Bc1wh!uzE=zR+eEh&rs zPJ|@EYGaG}*hkc|`N(tv#MdSioLE%^PJ8gS*3qXg^EdRxya8fg)!L$xN!h6h=qWX! zlJ~G~t8BOPQ0E2Zp4%Z#`Ln2J{W9ff_+x>FJ3oc{MM}}l=kH*negQg9yMpRD6U?NgvUpK=Zm+u9PdD{k)jYe~5k&-(yd#QcZf`Nmka+P92P}I`z2rmV5WuBxL3OCf z{CHmprzA+7*L4;d8UDd`9-2rx6#|Z%KtB2XWkUz~ywd}wK&V4b+e-J81~|+dd0i`& z=gENgp)+r?KJo{dTizgJ*Jb`~PVS4d8eLQSOn~C3voB>@+^rMU7moPz?%Xd+?qmT>w$PqNqWQwB=aS|D!iksY`F8ZO-l4=f=x{15b4 z>xv^i%_qQZ;^hKDos?GbJ0j-zk%bOtpgpj;Oq^u^orhK;BwJ;HPRJftv^#$0NxhN> zSu05bM;+HSnjDqDiOz>z;&q>wyse+*$+5e@cYs#nx{6@xs<&XLV@LPUGhhp49>UKbtGl$2ef_J^4F{B+| zl-nGro>pHet;k#|S8wu5o?Tvy!^86RsEn6jHPa`OJxV4mFCLlCMu?dBk|G0&_??!F z+|+2U@B(ORhOS`gg0Xq8E{+z=P3rBLnyJR>@dA-N9?P(rUU!;jI~gJr#4jNPNIqkR@X4jPp@ULx2?xc z1W=6O4Im7XBDe!LykL8i#7e_ozE`uvY91B>sdCLExpRk>KdgUAPuD@fyw-|uzTz&0 z8^k)#&1Jx5Ti|}Zf?8E=(a*wb{L1zwox@-PbaK?Cm{8wwJWhABDCehU1%A(>4MU84 z*e3;^L@<#;=Y4_7(VNVEhnaefLsRkGpeP!fv`Un?!Qz~prSZk=Mf8t8pm>x!b@Wie zbJnu{DN<4wq?c72C!rL2-{VD~u@vv^6`e!d;4FtYpT~5AD7&P#lXv`eqYM>$IaUx$ z%6XE2>yIAaqa1iouFs_I5Ign@Ot7v)qe!<`odTp?X?gAtmS>8_1G!9q57c3sq)muk z&-gdhDb0AoJL~ozjN6J^^hKXkWb63nq?_#^I+z*G8J+Sd&u?1!!;C> z?+Bwj%`m~yh#|2Jb~J`~#3HTO2$>=8Oz*g=WP-#BeENsVj_2NbN?>NGMfIZKj@WfL zh`OK_Vhs0DR^vDR{Y2Xkbi#dF%<3gFvJd&fz%!0y6h9tB?3T9ZJ;o*u>N=DygI@Or zK{+|877kbP=sP?%Rvu(Qey7*qd-&~CNRi)`-5f^GZ<_n$YxK=g=#v;IP- zC%K}(NtO~-fj{o&`X><9AY;6t?4vZ*$NNa7Q>I&z#+OmItp!xRgR(TTbf$BPg&f1A zbNEM6%j$m7q?nGjP|@l+GQAr_`_9ev*zpxz_8C4?fFf+(jv#zhYp9^)O|f;qfN><4 z38*NuQqYVHHnq%@1Nq_=t9>7c>vYfpz)W|ZQ-Rskf>Be>1T~ya3hFbB=E2uZ_Sv`7 z(Z;GNrgaGxcPtLgg>yEuVa~PSru2Q=v|uO%%Kmh(@#rC+#CDh=|D( z(@qu?nJbBfD_vZ32NJmKBn*V|Sv)Vky@7hm$K_7NFfckDKFB@;!27^*aAzKPZ@zlyzO_lX#+68bC8eZ?&50Spo*To8=~ch0q5i zvyJIlJpWZ+7U5UQZs8+`H{4qx!<$MrVTOWVmX2h_0_S@y%8V_r? zOsq3hL(ocBA-v#uPo92tQO!2$9=PHn(-bJEN_taWUo}s6@U>xIxy}V2#%v30?#K>n z@lhP{VX**S!xmWlsnqGwpmT;jj~^yXhS?~Cd4~7yJ=H?R2L0-S5j0CcPv%c$-+B1- z2J+cgfU+%e9=@&k0j0y+LwMFSrF%vWt63E#ra!JGb`dU>wu=DH^lzdIbBLxuC{>UT zE>!bOXa(wQn?Hru-8k)MfE|^?^d8; zVvxVLmLU&95UULhWt4|cU=l$3m}IcUYPDWeDpX+!D%Yebs0MNlDi)#ge~k297ip2T z^pY6Uv(|1=nhL)g^r2}!vCgn8f}yH#m_)K`Bs?Gjbv0J9X+m~gcvtLK!AX&CsYBLU zE`DvA7G|*GGo{jV)1I7^^~;&b8j9ah@^3N zQo4M_GzWgPah_Y!MU1&c_G}hWIQWo60BJXHyX095#uAPojxfeX#<@Ns^^aFy@-$15 z4_C(pROtks()gawg6fe^7n1OH!dXla$NO-4=djJ?5G@| zzZk)>lUEcN=}Sz}O;4P6GCj-6dT@F7v1-TiSDqXR{3{+~4@F1(!`3gN*ZY~>AqL%a z^2eHCZ|qVvCj20$wrhvcxUFOy0>VFtlMi?{U)GB#VtM2{K=T84IH9{pSJ_s44=yA0 zU}5|?OX;qS`eUIq9jfDGxFRWxyr8iY%eWlxaXg*PC_$b=y`CF~!l_>Yq0;>c$M& z!eRmLJSs=>DE$Eb~|WPOS0T9qcNUFLm^SzP9U{9gk9MK5e`C z+cfcDmimtw&2zkIMsmaGIb&Ss^fblE{W(;EUg}n!QN?>u$UHq)pm>L6X!X$vxfF4w zyZw-n_a$%Bu4a?|Co^5olJ;}E>PLYX=kF$wYd&V|fw&HZq9g?EKK&e^^KyHRQuxT> zRWtg-K_{WBc&xC67`K#(+RiJ_$RybDQ(W9*EW@FXyJTTTB;zFUJ9GRi8`Hi;%;R5UP-6VgFF*y(fti%fJ^&y|L{`k?OXXGozO$uInL z0@k0h?yCDm7UJ;DF+h*p%?7;5pSrQYOA>3CSZ1g@zG?|j9zz*b*yuDW^_zShBR@%R zHQA|D+QvMY5(iD}4|_xkIRE*rZh9J#^Xrlt{q)D69bVaMKMAyezJ(>NXc$ePOuEbU zglR#A`vR8_^t&?(8nxmbphgr9z(i5tBBH++gf`FQtCkkOhfhxQyJGq0C%hj(rouQ{ zN}v03!4GUMLo2jgv7H1WZ-}J-*FS%3kYoHrPCV`|;!C6in#)l@g0yWwJ}E%Ai`Di4QvzvkJ)`$`-GG<+E7gc{eY` z{KCN-Xoo8y>cc4bSX6)yyT;H&dlbXYgL|8Rb6~Iob8~iK##77Oj5n}5J6szmG#R3g zmx_V-44S^x%Gam@Jz{1+{ZCQ+6E0nv%xPq5{Zsz?vYG zd%L_*vx&AwuLYx|$C=amv&$l$SotwK%r~KQ z=^y7$$@2eHXVA*`((ehGvbk;wKJDQn(jQ1?Dg-*EoVKg}i=8nV?{93l>48Z&{PV35 z@B$^!`Z{3%IAPYn-L@^|OdmMO+d&F|ZOh5#`IG}+d#U*ZHPAfo&yu^BxYf$f1C(_~ zF3YT!K5>auDAgaVu(Sg4nh?ori%>w_6a&MRs$B3`wm{b@>uvp*Rt-Fe5%bbxu3^Uj z*hyF~uFA9}FvU*VDZQ>Z-}VjJS|5cu9Vn_4E)twd-#_B9SJ+MjMOA7!lV3ImnQ_^ zNDG;?JWHRrZOpmqv=NFHQH0~*O4>9RpZU1${wa{ghfOV5Uz}Lw@KTu!W>N@F6Vj^` zA;F)X8(z7_fF(SA44*ISSSh855gtM-t%(P1hGDEbHAW;x1Iv<%YQd*%8HvuQTD4GB zI*ls(5;%LnJu=s~XNPXD0k7Dg@Noa)^kCmQkykP9>3Q>nMU`a0UA+=y!yYmolj4h_ zE3){baZ~Tb4JgdoaWbY*&<`WRCAREoPJph{TEBtaRC5O)1S~kg$Xs8_@Lp(t!QtsR zG4pF>)1Jlhe79WcbU@JF6cA=`b{G)Ks`sX4*|PRhQxSTqhGug05&D2usx5=vXyX!) zgLmj$feo@~!p2c3e{R79ONVaj?;6H-%Mdzh=D)jE1i*+e6U5r*i> zvqQ9Yye>}Dv4hi75BQwz4xim^!&MHBRcm;S_n)mDsbjXJ-c^EXhCNJlUw#r4(PJo# z9$+z?8_?tAcDm&1Z#_eh3!(XEOs$jrzzUlb z(lwr9Lef;wBuy$lSm|lA>ET_Vbe@2JfY%I z%wqm3E2BNI{fO)l-p)Z-o>r}U`vc<~9)|ji^COoj3g@XLh|6{rv_f$8QD##fh)JBv z{2yG4%|WqLp~9NlxbK3rEta;78I7eXgmT7ilOQ_+EYA~vO+1Ehhhicx$wb&?$%wLi z(D_`7%bTS5_4w64U>I`??DCii7<+4XI zs>o((RHa{$XV$QWDS;k2&sI~QJEX`d#ImVWqpAQ%kPT=!k0%g@NLg$SnyvtE9$#Hf zPdxG}8FE;u_DavJ#AZKy&3%H?W_g7d;PYe2uhBLPEv70ZHC@azjoE&8kN%Zk%nFh> zlWOrkXKM_rD?=mT+CNytcz{M6!>T@3V3UDMuPoxk`g}wXg+l7#`IR48s@k!JtjK-u zeWQr&%8r7y#1hjqi_0^v9Vp+mtyyDxy5zNLBZboSQB+BdfHxAhAWxopNR7+!dI^xK z6U$QtTJ#3BtuyO{&C?#&ZpJf7DX9_{?cg#;pS?eo^vyQ97B_Ll4W&z z^lM~#aOa|AQd(AZk{1Ryz1%cE?FwB02`$FksdgLpme%s)sU*J*J|gm6t=Vf9d5*A0 z>#1DmxF`D@fhl+9qZflODr~o*V|Z_lbtw5Y`^L$x5D~<&98T}e|J((xZae{>`;J*- z)Z@WsaUraDN|4~8`0bf-CS-(d72+@S^6Gl<$4`C^kE=2^y*JS-_z0Ovg7>oKQV?@B z@EO&q8hvZhMufk(3b#kGWCVFSexUUA3orr0RAgxP%t;Y(4kb=+C+bXPOmCVb%g>oc zJW^uCd~_Fzv!*#Rf~~0sB*_&7WyDpMq!W1;X2+5)_4+Gp?y;VyBp$jseIP~sY}5SQ z+}%>nO*)ptr!pQy{fz-Bbppryhc2)Y_DZd}%2~gUckf|{?My@4J?47w2(H9ET0pk|8VM~4VKL3G9SAtY_yvgQIuGi+nn zWaSECpIRj^b`W4Ut_o9co))PT@ea%@mTmI@T+m(K>Pv&KMG|B_i5ma>pa&UR%@I^= z?XRi5+bhU~cuM&Ao-}h6lNKqyIEE7mH^eYNO4;xIK?7Qkm>#vgK-?1zu2=f?@aXY4 zPA!$>0d}6s_C`#;)t9`wT3-oSn?6axugX!lG!=ie#J{lgR5+{$dS;4w+hsy5X00iZ zao)F$e0Lv0=0O{L+UL5v7M;;$fZxKw5An)G@?IhMShzY`rPr)db)&xn?`KQK z#5#XJ2VFN~tgE}o?Yz`vbuSFiKdi?FQ;9Cmj|%K#xQ|U4NFs!uZV~fB<||%0?b8OX zuYfLvW>UxRY3qr{ocZDwcs*vDqfeL9iT`(@)9}xANKzqe@HpF!SmjpQxu`yk+s=W; zvjl>P7ugxZ<~J+SO|M#7-h=%Gw3QhaU@YIjp z4Dz5{5dOzX5u!di#0D%3rCJFtJy#sI5k}Yfi6uHZY~Jl++?V_`gm7g7E@RnK4pd&+ z7DdTgt9HKh>e8$SeEL}&zrE3)axRzP*J9^XENR+f-ubJ`>Tg*&*6iDs*ip<`-5^L(GH&j4vxxDB`k6^&h7Wdj1rA0%p&$p zyp8Y;y`L${b}dIsI?XyhytAAlX@3yYwbyJ{ltkIJ;>&rf&?x_zXzEk9cNCNc~GjPta1rqHkvm`oHO;|KXwX)H}M_H}X>+WdK6^+foXuRP2)x<@}6ggaBZ?PJ7kX-4$dt}9c=M*-$Duz^pU3gjNVB}w~2vdWd4HP;7bsO~aj=b#nw(?~w5Uf3#F zzmKC1*Z(@v+z->qh5Zy4T>wcJ z@wvsI$Z{9v!S<;fVxHEO+GsCFC!R@^Msp4*%r9fe?Y7*Im>~3N_XaY z|Dg(AZC3}IQMbOq%*3XJG1QP8y_wCR9ZQ0>V5HV`Wc$OpcB!C;VIql5f6D^3+u7mO zC$4?DbaWStmuz=)$ zm>aAG4L;kYI4%eC<95z7u3E__u?lj+8TV(&zR3f+Se({fldganyr)1h6mVXh~b&zJ=GR z3%a-w!dDEy5x<}?h?IvJoP|q<1w&eM=gl7i?e{8CXJ&`($*q{6PSB^TGVwkz$s!|b zlKF>#*_U;z&6O%(lOSi_8dYC9z~ma^J33Ket|p|;UNi+m$35=S4E(Kw`De87Y!W3u znPm$IlNRZ&RM_yd3wDwl12gVu^P?fj?WL{ZgE6BtsjcyKIdS*%trzF%v7ybu&1|Fz zY7SF&QFa%qB@KpOirD0neT5U;T;%()5YA!56lYzalg{?th!lZ6v zIZ37G_<9+y)28++7(ZqhfV1U|Kmp10 z)E9L&=50OnPtNFs-M`}@C_>dMLYckh5gy4wjkT zn-xKCfC|{zI?-6M@n@XdjWpIjEMFM=t77#|g7qP4s@Mvky%2D|tNN_Gq3v1z4nFsF zRwc)9^Z-eBVSNbuft)gTHbNkeb{!Tq&2i|}+sC?CPByALe(&vA_<; zn;`$h(0W0`^wh_|V6fFXXR>%2ftc6%* zAcF|c_-%ns*pSZ)6_d^3b&%V70Kh{?5Q;H|Y~ zWOy4gz`^tikJu~|Aj4-j>15;M5R|$3lmNNh@283aYRRuZ`#QDmE% zgdNJu2<3sz>3bxst&;{*8vuO$QK(6Fk)H9M!v#M1!7h@~$q$!V{_RN7?!->XEh7aJ z@h?QjA=O9lDxkpV@bKBYOmlH%Ht|URqNPp5)LBWz0`$_G!&Ow$o1)nc=) zcNv$J=%j3u2bG;M2NB=K*d9;ERmH3SQ#rtMQRux5@}!HUO$?iH;pTLa8X4A|?i7mpsUM_aXTvLyMq(Bt75$+ldxcV^3)x_2XL18f-Rk3{v4zSS-LU1-oTgth2@aFC6@*E8+z0-Q6m{qpbLndIqE%5ro&q7n|=6ni$2R^4_L{`ylv z0XXrz5H(dPF){FbkL27NS3DhWD-YIEEGta*$a`y&Osyc14 zFQCLc&ho?g8cZxIUnPTk! zV%RKqM8jKroVV_?tM%)vbZm@J`Yw@wEj-Z{L620*SI1t|BT4L3DO5Xeey!V*Kza6q zvoI~_bEEKiG?0vzC4JsP1vYoCVAo;>F>UT-OA>|T&X^)2dA=$d&WKi|zFoxv#+d%; z@O0fLIxf_V^?zt_!#52{2n46kV34<%9z|?CH~=Jys`ox;z^wH@QAIYKIa)I+$y_M- z95GaR9ui5B}(XYuj`avfQ$f$M8AoWD+G)`M1vjyIn zW7w^9KAsAp7jYKqWP)06S6>f4Z81u8>%+O4oG6$s>p^1!`FzdVUdy=1@wQIfmH~E9 zwl_}q<07EV9R`y7Q(SJl^&bk`b!>!yLU->u14@QW5H~Nc=c5aO<`5w9# zMB$Lk__#9!l^O$MPl%2iUlPV3-m2a;FZb7}YN~S-wZFJt_ zTh4iIszIH57&Fjf2eEuPFzekb^knDiJ9D&ECF@2jyRi*qv7#&7YDLd&E}2?$%>7{7 z{1Iy*?HIX%yZAUqGy%50Q`!^@X4XJc9&J~E+=|`CK$@g6_ve#Dp?mGY6y8Ii?fA2$ z6q?oDRPjV9%(6~Ubpp8N`ZDoLAz{%luGju1sOnvN3$!=R?0r7KdLvCYQK8hHeVIw9Vg8yO!vLb(E`@fyVUbuy*!5*cn2FzCAvxFq2+OLMC=hTV@p7iD<%brnDwKt->_ z!n>B`w&Om4Nkm%e8VDqSzY^X=?r@Z(eoLd(kL!m+)B0k>OtPtO!8I&|*=E!Xhpb$^J+U5G;rd$!&i{%wc=RL74C_;r zOVopl-xNuFlsJeDE551G(BI3K0rC9H=vfcAy`IF^T0Tv9ven)2_h0|lV=O+D-USAC zO@h)-e^kt8@u};yW-a>AS`7a#97H8@=kP=lgod&_HnhH^RVQRJ@x9{9aVx=pmq~n1K7Y5&5rI`>?=&L!WXv)gtQ@VfjQ z1x1>#Gyk{OqkQ4_!iNFh0UFLi!@;Ocpm1Zo7m8E+a+B55PA385|Grws2O_-<1X7$p z*DRFUM%4`ts>ec(J$qUo7kC^n8A%tPDi)%SLBdTrlCK$+ z($XV}x^zF1OTk(+1`-1*5kjFF7k49t z{|mw?5F_uqR~Dq;-1)CMVAD4kkEQf|hW}1X{xhAhFu+=z>s^5T=c&J~bMWL|Bv|T> zE+)#X|FThdz6bCmsvqg`KgQ^<>-H}L$IYJ;a*dzQ`)?OIccWd6!L$9ZqHC5nl4)fb zzIgw3Ymxo<+uz$S|7|FPM^ON}oGOs~(>KzV^W zZwJO;fr?oW4zPl@^&yY8WhOO)f%L9I_sch&F57Y8bQ)q}k&MhgKjDX<;!qija8O%Z#Q~4~yYbYd zmi@2FqcFXUIEYg_J(uMAplq@bt0`wv7lJheNIU%yVzF@r&ZGEHA1GiC07oA;#K-Xi z?rPQYyPgJBOgZSJ7YVpbvZFDKk2`8F*u*iiD+$Hpx$t~g?t07po@kyg0cnk)`)v{V z@3k|E;CugE6`%27ca<8&lIpoI)>~xU)CzO%HZoDmKdaySB zTCVQ}QS{s5M92Byz4!A3$$hR52PB~SyX^p^q{xyls7{rPeWRGC$pF|=p?37kQByS1 zSE2whey8=*>2F^W_vTA}8ff+YwYL1B2mowhDl+Ve00w*Q(>3Rd{YxF7$@exLsAK?E z4%_C57_#ow#QcoGQv35lkL%l#FaP$@^K9A*>*e>a5B7Rl7TY5jhD*8H z!?NVF<_kB2@POmi^<|m#w-=U-$Hf*Dav7J&)$#VN_;^pIEc}9qL+Z)g(v^VpmI(0d zFqt9lb)`DloGQ~YWM~E!2gB$fpQpMh2!zsKDevLDUW$yhglCHXXj#x#fVrwWthd>_-(9{=X@3$Y zM3$Y|9%Nn{*wrx0Oep3mGo8bCqQYpUL;(#CoVMd+p%zSZn$?*K32x0Qq@|ZTF|!aViBMt3R!u=$0~imp)nn!HW-J$GPaEM%CMlQ}<@I+T<*z{L9jnVPc_nBCFdn%UxB z99z*meAu%#&;tvv}@aZmII(>7;enIpJ|0}9P{17*3)+S{mncwq^&VWH zL7(jH#FgxPEAeOx&l)gjXlm=PS|MUPn zK;Say(Hf37*~QLSJ}(rCO<9XqeC{x_wxwWHwx8fSFT*moQ8weQ)Nn$agSu*ALI8`~ zy3;TknRktvZ(c=#M3V1aYQ7y3rMbD-$Go}rQu@!BE*iqWpX%5PG5mAE`uFC(WkUs!ScAWe*^8+52 zv*EY~`^ByrKxw?H`Suoz*tDDqex|$aEr}!oBdf`L78o-!{|*4|)V8Ue$BsSy*$viv z;0oiPd-Z%&SM%REeDsSLm~|W8?z@*KePH-CPV_OXTK6YCaO=`UM)=Nac~l1NKbQzF zo0B8q0-p&NQj5SEtP$2a&;GD{4kwyrC&`CK3$YYYer!#snTGEsJe$zf+4%G7J(a0{xEy=z4~@(?M#%i&cc;_puKN7wV}LR9 zUQ9JxIc&Z)B&EONphGj^$u7R9QD6}_3B)9Bnz_4r3YcVl13XL=NX8`k#-6}8ekL4O z7#X_&jFgqD_wkX4U$fei{+5F+hXsUbKB*3SHASe6=QCvfT2RX{vJP5$$nJ5ZY_CQL z1kS9lE}Tr|fZ}Ja6TA1pc)pvk(@9@OxbWc1rHV4CUbme$gl|F)jcit!f?t1*bf(%E z+2IK?C_}VGxj#IKAh2j9GBmJt*1%DA`_|Kb${pFxK|l%g&_P;^=Tfd5w>GykbhdcepL@90P=@l$|Ku}lEz7MVvgWtRz_ZMAu9 z70>XgDk1AWx2N!@sp;(zxkoRup-MNB*wgFh37=AMnaKI$oL1{OSnWN%v_V0;CCzM1 zcHEmrY2F?3S_V*t`-U8-N%1Ckoo149DM$ByuyphXI!NbMH56HAx+QOt>Uq4vMu6ONmp7I@6|7c<;fg1 zIa#9LZCf zC)ZG>6I4baQF@IZWGfzF045V@TPStWCrpR8y%AVeC2=DJl{(21wxZKHy6txBhoQpX zvUf{H8QJ3jU(}=AUx@*?K~9ubYoW4faLh> zX#KSEnF$V4YEtfV)2m1jZyh&HwN5q%>cxVeSZLuf9L(*cmNu_Q@qs%x`cs*&CmK2GnE7$SHRBxGhRd*Sf~ zxJb(o+v5x#4YN+cL$;AuzyiJmd2BP^g{P908B~@4`R)rUhsWm&xLUEJ>H2|03#~k) z!q6EznLH9Zm2_ISo=~Xa_D)TjccUI(@g;ECYGYwHbkg@WEpdR3*xhn7tNVZrY(#pe zShi4%y1zZ3Z5a8EIjvB?d9a>L;dNieYT+A4?T9AcP>DkbFBDGro)o40_d!I|9~?ob zYzv(-f87Er!CR!yPY7l1l8O=9VJAFxQJ8(-#8voNGgIOFRO*k~wL+snBu&1VFQlgU zd35XA(r6(~QM(E$&Or4Jp6c^sEZ7e>z;#w6vPy4x67%Me4H%LJF=y=h$gpntVl&P-UcRo_$fwyq=qb`) z&eh@>|G>kv+?TfB+Na0%-Ght?$e0RSKWxz#VhLk;!&#kSCk{P%caBLM+Tdd$lk$1{ zxo94pkU_?jjdaI%AE^D-jYv$`46ohSap=Qb;mh8HP2YtNtNNB&$nVG|6w}(bB3BLC zUsd^*J*@8jGDrA6sO{Tx=iWO-UwyY>& zYKBPDvGt=$vhgZ%uCmxAV1OR20yT!Y%8*iN66qShe(Bo#y4zxR*XzUlGuwsFTluaQ zcAgob1Cu9}@#00GUOk=Qga~!2e6c7)-OjniVF!zkWU;hZp|9<**sGlE4w$GAT=qf5 zJH&!*K8~Jg5R`bU9I1r?d%QiYB8e<+AI^5z3-P zx?%z~2qz+AROcN7JnhZ;>V<}ZW`Y++8bHl@_1odDj-_Rlh<(k+=#$eZN=W;g&79=d zkRGQbFT};8k;-iT?R)xd{RWE8#jlZ5EbgG<#B*(prQvKm^?&trdAY{tGG5b0If_*e zNy4_*iO1%Wtm%~_8rM%L7seMZXWA=Mf7~9}Jq*VLQ}tNZ0L|6NZxn*gLG%suGBhs9s_1Owz52r-h;^X}@ux=n=&H{b*yTPvCmX zxFNnL>rVW{{f5q&4Gzbjg{q~DTt8|=6Dfu5*@@t|uK=+~9J24(x`Ydpxbp&&4xz_y zH{!7YYFOvD$I53KT|{af(N>>JmP6_aKFnSbZr0OXmEXq-KCP^a1$+}>=44$brx62y z2WhUoQVHRiFxRYNQYn=9T0TCmcNqXUylIbq)LESFzi}Xt3TZfJfX`Fcc6eh>8f^i6 z;Mb={2l%F(!4GYXc|ERP2f4HXv4o#rbxk?1}DN z>Mo2i2H^qPGnt8}?4UQaANM(;E4p>p7QR-+zW$dsHRm5mP?1f*HHGfMX2gPkRQgu?ulCFI{Y%4EO%?J z6Es(=w1+dVg&@9(X`02gCwji_0%~$lC1_*`vjelG?}MR=ry~t8w|n|^b@iHi*PC0- z?VTLnr58gZDdwg^WlTn8CdA%qN)kaN6@NvKj~|YEWJDM1oT7j4ibM#76^>tKeFy}y z05d;FYyTcPA+cf~p}}^|Z8BA-*|g(6d+82Po&<{31`%DcW$lF22+<*wk}rN(Si;P< z9<)oqH>XL(G%8`tKp`}k%g#g$*t^)3s+C%Vb+c6K;;|ZSyPi6o3Dx_1hs=71(8W`( z3y8R01YC>C4aM7*;Vh;&$rv9mk7b1CLGpAzv2}Z82(# z+g|lFU#(WAHdBqEA+DlI!RCf9=Ql#AnSEY;l;0DlR{DJGV#ut>h0{lsAS}vlyj;eO zDZw2auFg$>cbQs{!FfY(5MOU{w2Gf;9x!yxPY%~OL@k;2Z(C+~Lyl1IqFVM;VJ`gl zb_uy{Zzr)Y`Qg?t2j?!&wOA*i4Bu>#%zv z>fYqDn}){w>SY!wF$%XYc&c=D1^bfwl2xcq^m`H{1GLDH2IJ9Q4SYYIDOfi$yu(8p zHa&uCce+_ruUfnp(VPoaN%>Yiv?AG2)t(#Fgqyr^e)r6H?s@M8d8jMf;oW-H(Y!8@ zjc0}a+{%G0Q)t?6z_VMp7*8JAVNHPKmsD?R_W6i&Hkl6?&DKe~8x(qB~@{y3~U@!cpb*G)VIWS@~tO$FX_+aI8f zt2c!?ORnW2`CC8sdx-e51bw2w{yKw(+b0w(W^yX@w`4|TTt*jUEA~O{8d@Ttu8B@M zsI^^_5izo?QRSevu|5csBg;_PtGhgQLQo09C9@^DG+ZQ5eamVvmJz|A5KNG8op`u0 zA^6rI1JN1c`Qz0{$D9^M#IQq^%XnSU%GkAC@YN0+|AiH0Fx%|qVvqGoH?>~Nm*C@j zu&wc{&MU0^vqN_|8mW`vg~{lxCmS6szU>QJWI(wHz+MdA_i8i13I5Pif)!_BM9VGP zYw^XNSRh0gsA%Calz*-`k}vDiGLnpl(sI^x!r+Lkv@;Q{5+)110aDt+nlq!d6B^=` zZZk8Gleg%<HGBk`oZgdy-nJ0&T2yKFTh^I1u^V147y%E-U^I z(vB1T&P5*jz=+#Mzb0-d zbieqxDObNKmm!u`HGb+chEeh!;j1v#fn0_(+8tZUNPJzBpOG2>BLw*kl~H%xkU(Ei zHd`3&@deL)jY?U%+lZu`Fh*HdX7zINdzhq=f_|9dzK3hxh9s?fCtOG?&Mq3D$1Sg_ z&Y&ywwUd+6zSTAXQOCOlK-mfd{UPlJN0*F#`9;fO9~P-zk`u>iy=u`!8t_+S z;ziyZtq^uGZj_sF-Q|&DQt2>h{>EpwpLsQh(Zk}pdlxh{@--h!Kvp{0vUQdvoh%gC zjVDVlAvOsUCW#x{CaN>z%hwr#40M0q0dOC@s6T}4cfNC~-zJi9_SZORikL>Ze(K zziYj36f*CfuL0ByhRHk(D$Ow$9Vg*7Y6KPK5(=#}d% zBeCpZmlkoG<0Zl<2359S3IMnnWoVKrUnLm}|Li8yvts>FrH?`814Ba%jNI9mNUujl z!4aeKXO>O+0^9Gr*X}0obw-=r`wLnR=7&%RAC)pXFAAdvYv(#7It*%~)Mm-t?`Q|1 z&cO4V1JaIPl+Kzgbr*BoU$hD;otrn>ZCgqD+Z(ykeAxad?cn&z*YzSs`1wkg7S+i~ z;WquQJGs?{>sHUvpVOKDPJkc()IkuUM5(-XS_qUSAH9%oaT?B+D$w=GbL#%9#D(w%oN2Vs=cZdJlmWQMPPgN89TA=VY{wE3TF3g$=ArjmN} zpVIFMWgVf`6|&WDgVDb}`_Bh|QV9|`6#q~~|FxR`>;F13kkbGE?;!I3$T$SE2@HRW z_)9lJBoyJsJ_vy5_-(hw^e3+v3^jNMIucfmiOyo2QnE&vKQ-vnOTh zO}UjOU*5s0Lb`wY3T*7}6y`g}imX?GG@u91Ekw|cs{JxXqicwz9SME z4ontX-ST3o4;uqSF3T>f%pkPNfTabA}l{ zTjdr#jWA-SmcYc|VWwEItkBnuyuRh#U_X%(OuN z6h2OyG(#4cLttM`JdJ$N!jaB(S)u!NbG%3zb-9N!fjvEbp}!-5F9Zv6 z&oHsN=ZY|OWA+ZZKW^ZM$HB^pOV!6MJIj)D4DN|!!-0Qps&ssQ} zR%SOLHCF7DlPVeZ1{k2_%pc*F_TOW-0WJAch*me=L%_ug0N-JNE1+ITx7TfPQ=VJO zMjJASfSJXFhVowK!MmK@2l&*M{MXOENn3Tqwm7J>H=U1H+W7%{aiKzbONzxPg$Sas zxoWR9Pj5eJLEppmi$%GXcZIZDuhO$myPVargB?9Ud3`faJsCGE~8rJNb%QnPeA^LH&| zH)rRNt7@#BMmWBhCry%Vjx0&8KN2jP!}8Dxcz9amVN8Zwa{h$sk z;IZG~ldgGXB^nd3mbAScOO`#UpXakPpCd(TEGg7qOLT@Q_q{r!)?|5kmBeERp>@T={wB7CUi`2G;O#L<(dZkKtM$;0g)u_|BVAW36YxZ6njZsN~ zj*&?@-0u7hJV=Z-OP$-M1T~~nTcF7=qlZX2l2KFsQ`UJ93wzI9t)3|U?IGAcMmo>L zL7kQNi0Bb;hUDL^gDH{Cggl1f55uCf)H)nuzC&m`7Ys zSLjDw_}bBy$5uB;M{-4ZyFo>`-k3hXQ)rX$IBaC11a z8P}U#6(pe=;>^)B2#o~pJgAD5w(H_{&TR=>9G3=Epg|QS=`8n;<~u5tJ6d0fb9}IA zKY*=O;xW z_JaO9cc zX{`}5b~klB8VPgRwlK-_T|GX^v~oUOV(uwN)k4+{ini>E9dy8h?H(;wv4-jQg)S+= zq92|&${SC9FD$j?+b#Gs~Q)whbP`ac$C6#U#4bt5uof6X0-QA0B zB&EB%djX4jr+c5hzwg=Sy#K)W7cVca%e7?AF~=NZJkRI8VfNmbrv+ar)h@pM#W6p> z6*8(;#+B&DBX!y@eO{XW2;SqOeyJ|(G=L6?B-0>E~t4kPf7xW~fe(PD^#?MY}<=O9J zHl7jZiilYcmP>856U^|1*w+$He*%WTk%bZWTxG{&+D#hktC4239Zj+!XZ<0LDgDzY z)_QZjaY?;fZdAQpba>7mP$Jr@x-ia}*xb*?L-FR9H*-C}q3^<>db)ta>9QuC-o{UA zkB#*UBk>yJx$6%SHKjk!I0lKSE7io*cukC|Ly_{lripaj2 zKkr)(1>H^LM&cr$%hIBgiP(1BR_hm?2wVG$U5kPs9$8U9f8oUtCUcMEwWxp$oh@$b zdgr<2aR|9@u|QB2`dP`4%o`R_!r5%}vQpypZ$S}0+NKF1Pn1!euVE0eeld4EgRtK$ zk1Ax|BKcNK;i+a|?#xskN@jHmBNz>MwhK4!7|fom`AR`MUcvy$P!Ald-$=x1E6^;$ zM_m=9=J?3>kOYGYu$#gxQQJD5C-%H5Bv_X%EM`ldvgQ9?__) zp;&GxcT}CP9~w`~--l_YTe704cxF4>qmA{j;dnL5l{z!~DgL3yaLF0vOVikS7r(!W zFSL(Ku<~`O{u27)M5BxoN3Oc58al;kG9=3J2hKihz1<;!aw>Xad0RW+@|@c1Tz+jQ z#J<-j9~syQM@f#3?M8a5-?LTeoVs0@xi?=scbiU`>17fI;wESU$*nf|VvszSL-^VG zf*tt>;pRYRjYyg~1gE3eKPOi_BA8(JtS{>{3%D(z3B+|>{CaijnY^4m2=*r)?KgfQ zdlmJ$zlgZMB@3#xE^lRg8|wDS^u0+JCc5LS#08;EC^>*twIKHX*N7yp95Bu1ibQJ#kCZdXcaq zXtNJhbCagMWgUsmt-M*YYWz~663%SFZ}p+ao0Ip#ZCI`3jl26FT!~%+p*Ujzj$Xawe1P*9CqlsQcA}jhvRls z6bZR~&sV^DP2b`sMtVaw}%LV>bp&xtk}l*{MG~%J(F1=`8Ki_3YKhi!uG0MhOL!=q_p-Y zp~(la)&N%&@U6g9b~+?){|XY~(J?^YvS#c0C0*f3EXnVzUw$^iZ9<&1D%Cce7zdYG z9^2rYo<8bjr4i(M0!e%);c#%XOAoeu&(?k6oz+{F-tscVtkKSo=9JywJN&F;!q_&o z$02*%LvY+B7GkMrKHa!w(#f|{?zAzsZi37lVB5MV{U;waIH^B+diWDa{{lCeS&wPQ zJ5J9w;C9|@t`GTm?&$|XS&{-N7i6iRHR+KkFn^CvptvP(ajJW1w6(1L?r};<$$2y= zI|B`$G0y&V!66gF7L8mSU^HE7x74^Q;sdR36dQu!L`J$~--AoDH-yA{uXYfCEW4nK7*liTS=wfpFby6w+zC?2)@AHkixwcc{1 zu0eMzOr@T^D|t0mc=5fP4ziCN$6RJ>L?!DkI80#5kF2M$ESVKxXS9} zrcAvscpWYlq3c98;bX~ot-ud&vVkZUNY14XTSm@)P6+wiPWRcgATm%U_UAtdD2P~# z52sP=#dM;<|*tIndH*0E37<+Cp1YqS~TsuyKBW|ivY6%9TmCg43BoF7x zX|dDvg~pRh539b6^9e*ze)qY&|F}Z4S4=Lt+$vXDLiVYxrQ;il2ew-BxfQJs6n1(R$ZzuY|LL1ND zJy}k}>rA_EOQGlMnnOhUy4{2U+0o@k zMRapApM4Q`xf5lu6YZ$9r{8Z0>Fm~S++Q1LzkhYil@f%6)m-21I6N=k2f84a{lnS( zo)^~5-S-cT$Hpt1($cY;B!k|m>ePd%`Kr})vK)fRH>#kw`-yNp5s09%N1{z5k*qEs z-OMxnFwi^}GuZjGtf012Hg&~wuaR1DWo;eh&w=j=;{;k5r5ykLJe*c?=P(7>DbNyo zRli7i;6ttib6L;u_|>?8pQiVSr3ybCm*wprZvgjlGFZAoWnGqsgy~V!z{zd;X9qTT zt9O79q@&PSk~qj(Iik z)OxrWmL%^B5#1{+OOlDhUy(d#L+u%^#%ZgHe|!{M0X{7cj3Ic{ z)Mt=@iv?RPRaY!Lgz`bY>NccLHkW@j2Q--@!8{EEyYDz&H#Z|X21njvT@HtKYpAvodI^Dd#YvR(;XHa&;3>FGM93T z$*WN&t!?2(`zrQePvlkt1MNnM742)$9qS(Jl7P#a^99zD9r;2|FOSCFi@+(Rhk}%KZQjB^NzJ({KVpF?&eFvj!CKRL*;y9yVk2#AwRJ+U~ zmfBIO2`78qebJq|pd%Jq8{(F0gFjNsAAi(YJ|a9TakxOJcWUoP>gXl=+S}}hEk{H? zl)`2X=%X`PYhO8(e5bVzl*f>OaK4s>+qIKO(l5}7a;42%cPSLpW`~_=uEICJndmQ$ zWd6AKg}jY?)!?;b@%96a;B=Vt-(F?EJekf_fswH+;b$)o9Ok0_^^Jg6V3t;?=7qP7 zNkFz$cytz(oE3o8!PiwIwE>hlFdU@){Tqu@zgVqHyI=r;LyNv? z!%K^IVG5q!#pl!JmXt4W(UNYR;zR3AV>=9upv;+7)Z!FwUJs+xK$_ zj=sXp+LfLcGiu$`+Aqlbir*sm>AI&|H+cdn%fIKoeCEZ=$%+|}A0 z?Ul+y>A5DC>~)h*-5{-3I}xWI5h9@d@pU6MQc^{Vaey*WYo@c}TwA$bB59E!{8^D)ZJKZZ^hnWvX@CR@A8d!|F+EQ&Ve zllf1Fp0=WhQfD)HTD^81@$j)6u+*yOS4$3$K`qTz<6_))r#sJPFO1U^83S4U2Adw5 z8zo1TQ;S6f0jJ6LRg1OYzh~Mp0L^)9o9sQe^(vCW0OnUO$7rB z6DZ&`h0^kSeqU~CciZpgg05)n8H)_7S?#u%}<#&{&)`aFP*OSV&`yUQ8)FyIr_UuQj#a1 z>%=h47~W*oN_Wc?(5}ccpCq!*K?&oPkK*cGbh6+!oa3c;%f8G!>PS29(Z6Tb-1cL^ z*U5n_m&2hFD!9psEW$E=sSm6(b^SFtt$EYTa-Ya>sk@?r@F7E2j?;mo_U* zhs5YF^#Fdv z9FXMKBBoE(<|)&%XG`9o1lFyB{dvicvaZ7ldq@|Pe{|tia8JmbHfBE*kQa>%)3hUP z1YEg5RpJZvJ!TkxCuNr!e>81n?(9kuQ3(8i1c)pNx9F`|jxJzNhS8DYQ55?}w|*a! ztS!bDt8-5oagOtnc6505{ytjGeI9z$Mz9?C==%juuQdyiE~`)NDeWdadYEtluj@p9 zPSEr1sKE58qU9RdA;0*+X=Mimp}XV7q|i+pOoj>e>bV~>q=vZpk6R!a_*6CM%3@PK zSj&atVWB1Q!LoD^>-H0}9$*R+@Ey51RZC1&uhItF%?2%IY+N9~nJrU!6G=+sJfLIp4ap>+``yL|*|24-Xn z#-qO$pd7@yj@WSwoTH#sn4LC#tSBpH*N)4q?{L87xC+deCVhvR&&zW_itZ zbi!gO3>gYJ`m?Bq^UKnpqIp-OXAO!X(G2 zwVs<6Mbt0$=QIIE(DkTn>V;ITZR0>hZ6z0T5X2ecu1DofISRV*Mhqs{1bWr;!lJEF zZ%_wnK4S435f;Hv;q@E)x#4$C4Fib1@QVI@r8IgoOaXm_%d>gfDmRyE#g61CW?iRvs^J)TK4sFFsvUrM=%A3`qrs#E#daU(L`m>R_C9c-IbqNbZG&|Yo zS9jSOEEgZetj8B>Bm^=$i8A6KO+O08c1e?WgbPqMG&A2_|D;C~iNxKWUaRUw% za&o`)E8uFsew>oxClRAfdJumtm24sB^aS@SmEEi&jo)6UWx3JHUA-(ER3K{SXtR*h z3?x5Fp#IMCKTDZ#)hnsFQeRKMsW?JDjE-%#iBDJHdQmM^D-F5C=keT)1X@jWiMecw zV2e%L6jcqI|IXOhmB`zTebIiv>1Q!k;ZNi@=sm-8!hpu9gKcS)v5CP)gd>9t(5xut zA|y4OjZ9Ee$()fE&lP*M9`?3S1v*F>C{g_( zkDTT_Tq1XJ<(tN1PCPHEa{W-_Nu0uK=p^n-hUI1+tO1Yzg`FxHZzOYWQ6*17gwd;9 z<35T1aa{oP7)u}-gVu*iK$qp(bCJDj)3JQETt6^GT9 z1$y ztaas3XLb~W;G$0688N<4cf4dvuJV#xJW`@VPN%*CIfE#u2|rzOZf%V3^8!P3y{}Rs z3A<^UTnaSK>taXULj22GhYH^nvf2H^*}y?f2w0Cdl!Fo3;1KQe8Mh@$Ay-+M7I2H z9)!^>ab4h>mtI0`%!766D$^5>+6a!m2&m+Z-vW;YijFCIF3Cbc)FK$8MTK==RiZ*K zNoX{A%i}3m*5+0a&Y8reB`OWLZ6w1*h+TGzXTxgT>U~Y52hcthjgMAwy3q@k;HkcN zUzVj2sY4sxdV4I;L*s)~9fZwaAS2UkB(8Clh+)g;+(%8NIoCg-9*6m49B5VWoxSy? zkNwqkrQew~el75{;wN^JD*B6Afr3twptq5TeXnJg)%5Yrj{Rfti0Uo9Q4p#)%Huv_ zqj9n8gBZ5&6$8sda`8n&_vP7;*qluglWroPjAwteEV3xpgUxZYze75w zGnCR2uMV%2MuVVI8i%B^zYRrHpX!L_B*2dG!{WUQmyvQ`RW{Y+iKgL>{NQwQ# znY~6c$(?H%6ev0XzR#ymG6u)0U=`XMdTG$jQ3$k82cZ>fhD?DfpFd|g_CyamU=-~^ z5UOu!?m4d8bz1V-O{bowPS4kf4m|*7B(pkWp}I~w z0Cv*JKH@ez)iJx-^dfp%dwKRuf4;*--D*{;R-h19b4)83_sxsY1aG@~g9wG?T)~0&w{u=h#g75iK2ldO`pNSdl5E@*g}z%$o%l{=CAd3 z2XSUpaXVTc*duwvF7-nbYSmCaxcRIP3$_xp6LWmA4PenIHq;a#Cwci}0-J*&Nf$T( z560RE!ZySbFFNagDvAjDV}hllQ=||-Hzkz%l7O=oIR)ZD4n*tsYq!E6=aEFRLUe3p z*zodYvx;ZJva*fzn~Y2j4Q7f*on_k0%`+wL^HzMPh55%0~5`NvlF+c8)|Cfm$_aO|t^4Eu8pce>uswDjm^gOn=6nJgmKiZw;@yF_{ECPkr! zEIi#c{QE9!s;+R{a%Hs|Eak&*h0N2Xhb9@Cs zr(kKDtk3oN-P!JD{yi&*fY;~_cOC*7w$^COz?T@xo4-=qmUoCU?Tyk7fM=ovR9}OiL&1Hk42xH0Td5997Bu6| zU`j;h-1LP)OMhF{QP;5LY0)YNo~?z&65{%0BE4Mvq8p4+u9o3)H$PtMz1m#4ug!h71}L1*q|p4V~+n6 z?QUIApMb=^Qk2(d{&{hDTzb4w@Xt8y<15NT7kwelDbrCw)9_DNrB^&2Bk$W#{aye& znTYRmcfRSw*{5kpBsncbjYxd2RPWvV(knx~jn<84>>}!TGagZ@HB!0|pKQ}?gmfnt|6jTFF z6YJ*G!?D}iebP9UUk)*xA4G2Qc?rjvLqXUhRm?s-FB1dUv(HhFKqSvUE6LtGRaK7J zh*5hvUxJwd$??(D_sidYsC7wP7E$F(;=XTsObb|OWwysHYw)C*=>4=#vOHhOEjs5iLR5+Mp>>xy`!!?R2-=iy=|a;vN|eu2 zjgSN_Y`J+jdn(TLScwa45Z!@2QaGp_e_4;wwi0J3@8qDjY)D)naQrN2d%M*5@C&P6 zKIO{a>*T6^$iAXdy9K;l$^UdRU|<-}Glkd^Y2=|;PwS@MZI_;GiExttL}iU|0o3R! zN-#F~N!a&@bQ8gCvkPTv1DmVC5M@kO=SUBT7V*Z188sE)ARNp`4>PJKP z1M%gjrL+gc7OPXLde!zOtj^;cb?TiTvxR%ZWSgXue;EyHb8?M5Wj)oDSnyZ28sm+h ziN*PJ;2=hwSUlK=+Km?eW&Sk^OIL7*0|8K(Z|nos+*^VxTcb=O9Z1x9W<2dZ!jEiw zoprL`2VCKn9nq7rz68r|0@b*rvTM4Br&U4Q$yCm_$F&p3U@BjEkIGTl z@DIR6aJt1O4d_TpvuRtMB9j|cAy@($LQ0lCB4U4Uqy+g-K*V$XD~~%+1qV4SKSNM#_ltCr=$Ua<@nbLUaT3jc+ zRU_?_!82}MKkn>WOnw#V{`}r_md5{}!TRNpS#Uowgt?7tvBqq?fN8Og_q!(>M|Upq z9_v2chAdSeJ(W*agynK?rdPOLEPk7UL2DDmYNB!$DpddR_`EMkZ#X}Kz+|hS2&XUR zF?C|CI29D1r3I9{iq)A930Q4DTz96%HSfH{`V zWTD={5H{mMiu5@Zyg$CTrvhaBl2OFg7fn}V#7fzsDy{f!V(2f=!2W zs0lokX@y3$3>KlnUx5*T{QZnxZ@tt|?1p~?eSzGjJI;;61)$-%`;2QFj z`18YjXK04*xmRtW)x9=PCPl`(>v!>@xHF)al9?6jZvUEfg_N)__%PS(U$OQ8Ihh)9 zEXm#l1ZL%nx|$u#@y|nD>#rSm$C3oX7k(=j#+ymQIQyK%Xw_Sk5IpCyxg5KtOVpam z^9NeiiBWk7T@H!~p3%Ohk;6q-^5QBp< zeou=nm;y- zZJ{j%_BVX4+Xe#f8?^jZg`tbauODwZ3B0^SS<@VyI8b}B=k^-&eLq&V7Pf*aF1)nb zT++u1R4(568Sk+JkfLJPia3GSZpcKH?;oJTyIn9DII(pKVf;@1_NEOil6VnxQ6+Wcg`K*5kEDb*!E-}foS_a`AfhhW>0%; z6%d}g_mci0{yb=OxlIeY;b8tf?^7Ah{5tYHC~2W9KuFC`$JNjkE437~7=rmE2XG-i zTgsta)9PgOK{XP8K`h zsW6CZUH1a*V6r=HT*#Uo_b@f?v}$RtvBBQNFwE^S#(Csu)9ve|34Uo=q_PxwFozYlD%i*D(G%7{8n4Hk2Yov`v5dz^$MryV{K|#$Nv857vRnN>%C1mY>+Lo~)i}R}yrz*vfC7%8 zgmF*Q#U9EXMx`%(caE>yzhv;!p06%tZ+1&8ap0A?27%JDdKbphGj6aQs$u5Vn(XNy zA(!2KH;pzch$)aMr?#(#IPKS6=j13&*uwnGujdLIiM`gtTyE*V4zw;B`KE^#)+!*V zhU^idv#S5Y82QiKYD^4@+Y5R|Omx56t4{-DU?g;>L}lB$+#@@HtPaq0CRQMr$3pAuEkx{Q zny@gC00QDx)yR!}AsR9S>z?UUZ`c%F`Fxjq1VAE9TJ$bE!Z zc=8qu#U)n{WK2bE0!A$ca=Pq^)l)k;YTrP2P;v2M^o2~|{KCe8S;7ZZr&jJbu7aWg%Sz`dXNi@n$|r70 z@b$?<#b+IqTHmHU-zO*rZ)gWm{hK|QNWfoYT%Re?!HDDbh+wHlS1Db%uUc+_H#%7- zUv~@evJg`1Aq6MJ6$HS~ccPZcuh8rrdk1a($M}-pc9O5;pIBpq{1WI?RX`q!E9VV1 z3k6FH*#Su2rUI_3{Ib`my?lSnB$tI@Q378XqEcd#9{(V!nA38Qye| zY3-ri%tjr5zT>_&M{@Ts6)8~X!o~IzKe}+%yg>+A@sQnKi4ha%Xj~;$O)|D)MV=Zf zJ~hRz!}gjyy-(07wm=R=6+zDcNd?r!);ZEWOIc}GW~?~jYz=%KxI0`_Mc81_5U#|! zCWnS}+|2SNzJfbT*~{>hbf$`MAqh6a6(=SHZJ*?PmfOjqA4Cd3JgTslLaN>bf{S%v zxy^j9yYos$5z9XzV#e#8uDZ3KRz7c-sMa681(|0Y+{FWjUsz`SEw{(9pUJ&7GCXS9 zPNTx^yr?9-sMdVABtN8co7dpStdh^KT_*G71!fs!`kIH40bDob4DW{A1dax=PwbHC zxM(zf54NH%JpFZ8n_APS5&?I$(mV5hPb|atf)GMI>&4|Q-V&-YM*gTs4HU(~&(U0} z48b4guF>cq{U0*Xr&d2>udnM)w=EqNXpv2|m7q}=hSh(YSU!X{dOu*gUyWwa&GrjE z0lF6+kCp{i+`Tm>Qs*;BFfYVIt#Ll?L#C>Zo&swEWFO)lf}7y57iH5}KxQ*#GMspn z>~FfnXWNkvCSR>Eq!M?|6eZ{-(DPD73+~9ZEao+njQRA?`zc+(GYSF~BM$v$dvid& zuY}rYJs*x&8c(2Yd$hctfb)#>93TXpr^{8$MV!1iD12|!kFJ&6b5C9j#(gPj-PH$h zIXWw79^l!{KwQ1*HMDV8r2!m`B!jugrk{Vu1Jv2F=uVwz=KS1uFj!u@oMMI>-YMD| z=!~YYa}i~ETzA#!dq$~`_$JCNIUD=0FdBsuY?vCbGvg>>-zabF|FAK9or$G)*G(Ie zb<>k~;PK7C%iUWT1+>3|}Ke@H(AOi2mXBjKJ+A>xhKk3p;aU z6xy*gZsy9{cpaCRFiC%K=!#dt?0oCVmAv8jdtd4<>GXl`j*u^5oR&+~op!F)E)Q!acd zPBy2nw+p(=@PrTzzXs?;ucL)_sVne+(GOA^xWA-VG+Y2^eT`>1Tj?oA$TyGUsU5<5 zDC7Nr>cC?=Q_$Okdb=&Vt-+>JM-z>m48>4G__%)TaDGCfAsv^~lD2EkMpFfQ%Piv{ zi=Q$#Fsod<5rNvvyVB5z$n&XQy|q`%jURHfG+<~pDd`Yi(e=%!ikPGHh%*d=CW0QU zf6CntWK0MDLLoBg)dp$s*sH`Z3_V(|$i?>z`@%II_!=Ek;Bl$^fJSW;!6qAQVWLfE z8fsXirFy7j?Ez%*iVKX&kooT$OZHl-u6vFHPn^W`4Aw1oJOBr1Ojc$#jh$$ZM|S=) zaK@IQ$1VAKik?%PdE54^J$UEN>h2P-043afDDZgqO)#($pcm}NDzg{{39-X$#-64` zwTGno9#?;^KM!)+|G7is1-SS=jRCz-dt-cOy}dm^6pV>l_Dy3TGZz*iIx$Q&V$NK~ z%8UCkmI2PYDCNQG4>fv8*i@p`z&=u|#o6r;1#4p1N?2F$(e68$IjVIRs6sk`mcMPe zOeufAA^{oGwoxkWC3SN3{lIq9ZWEv>rB%rK-iG{^pu$snd>-KZcrgGaVF^H3_2c=Fq`xZpYOjv7)2dtW4w=f?{|^)PYlW58TXB!IK7udW{~kjEtebS zszx2j*HU=EB)%GOn>U9xt#JVJd`jt;FnBaiVayFD> zWB4>Z>gg;@Q7*PK-`C~N5?o(Up}h!@J~c}--D+|<$fqLRd0Y!btQtrRXq9?b8DEh(b(AEVchHWL znz;^2k(O>`3dv2tc9V${>mgnYkILQ8;~(Dp>Nr=6^D`k^vN$#OAtBn*MH*^;cK#d` zqFd#%Go&i=_{j-LoJ;$)u+Y3$+ClVNKGSkKU@&oe-n~x~bnMTN$Vngg=1=bZY5L}l;!}3Z-e#Zd4aAtzLzMWr zu_)Pvt6T2AEB=K41-VGju$O?%Wf?w&^W+1TJt`>l&g*tX!~J%p{il%cI!GF*w*7oQ zYqRYWcAUilSv5QCO^fVTGbTIQ0g*KN7Hylbd(e7hq1g$cUy8zAX0?x3s4zk@!aq7S z{*BNU_NVxJ(*Ztcv4%Xs_OWv{tzg;@cC%b@tW-GB!I?|kHk#-6gcL#VL*4$v5EqRe%&+H4P!V?mElU zdcZC0A|6cR>lVuWCS9OE;+D7GQX1g%0LBeck2k_BZriZ~J(uw8kGIbRJB)a9I`a2` zF~`l(KxK4bhT_%vPqDcr)gfieE`JGV%sS)TY4rz_p=}g0`({Jl)oLT(bL_U$md1LO z5*5Yh@LvGE=WQpkF4>vomA>3Ulg)4p<>9X`$=;Fh;?&|5`Z12$S86#KHi^ph`Q8t= z(qp7NJ8ieC4V#YHrMU;Ep-(x9xdp{a)Qg`pNZwWa2jEu$9`{6B7;v$ua&=7cM!oKh zy30LkIo>6Pf66Lf%3;US8{Y2^VzRgcJj*>CdU_8uyElX^|;c`6!yXAZf6)Fa0EU{Rw)+pZR;En#~)^yh)1pXAcv`2ASid*Y|~xG^?eK zrE+f)4vTN2OJ*#SnK*`(3;r)lQ_1)H^KPc{({++REM~){;|esc#!@D7JCer6JBozb zTSL+w;A==v??dix$AwaN2FMGG6hE{DRG6t7egPmF)g?gm&BJna4IO;i9T~Hm=yP-U z&2Mszr|L{E$JpM|zJEk>c0Y4==!iYEl*@6~YR|%wXw|ph>&J1k!;D2-uVuI&Oyp>K zOXrA(yu1KQ5$unDy}p5>!QUM90S@pISI-tjQ0+Y2b^v-KcQhTYgEKYZ4F$cSb-1eW zRqwJ5xN_8k(GT9!ObnBj={|M~#$tD{(c+BGee^9@zWW3y7GhM>fu)7kl_?@jClUs9 zkO7gST)QwT;W?L*d>F8~mzl5!Ok8?g?=!-!y(p0R%l&J7KW)Dz;U#TMDRNLipgT%A^dg z`%?4SInL=dCS}tP(wiS0RyvJTHtKOSKdw1Lli(1&)>Uu+Mq~ec6?9x#D+^3YWKE+g zylZqiIAb#o^-SYFRxfn9z9)zNF5KLG>vFM)a4>hKJ9<})IX=kTG4e^`{{~h6qeT=z zl`K|7ar+Xnnf}}wh#J(IxhVl|D%W*upr)v!v_jYy0F3h1_e^B84x}-Ho6=3jmhvXSMuhW(Bv~r0Jwesqaz^1IhQg znpDU~2f*u6)myF|EUJ#kkcO}W7?nC1&sy!T&9`K|NhMh=IU;s|D(J&P^^~`hK&{c~ za&vF*AJMYX+XTDN3Oj0w+={<1YvIXvF-1X@ zC8{MQNws>{XbKW|^E`3k56)D_MVH)S>SS)7x|po(bKb68Q{6MwTOSZEz$3`9ex%Vj z9j6_)Ouv&U+m@F~ioLjO94J~m@-B*K2ZF4Wy(m>)}FBMJ;s~V|mQ+`fPnQT)` z;9NNM`(@~fxI_eg0&tczj6O4pL3QRWU>FZ|OZ>(4mF(>nntwJ3(;vJ6b zxI;SBQc!sbwU+@PZsps~C(w(*Lh_B2*y-CIdpT8OR4J=&asDOsjJEe!C>^KeaeJ5@ zq&#i@{1^n?QtJRHWMp>wp2urd25>1)M>--&N>}!~+7&vQCnzB|?PGq9v42lid zr5t)No&7F(ze~7#Qlk}^s`qb~ZxDuR|A14S9Kf2Dc{js;)09)58qd&?>!GH(W3MyCLU4{5JW? zW?4b-ew*sIi#BKNkn>{0Xd7U-O*IsSY zNreT45L)W_TokJ`Z@E-@vt$hx#rEVU6aQwXDb3<14Dr2B0Zf^AaT9r?fU4>Gcmg=1 zzBtRCsgpK3C`K&rEXuZ-EmDLVWl6JKX4L!7Wr-9a{Lf^ zyZN|c^DBk>y==gdO(wCTlDx{?=#ruQ{DpdSazkzuGF{X-W8*Rd0_wIpzfF`m!JIX) z@>2yxnZ381We~g_@ILs{CFXE9h~rmC9(blOQ6nUr@&A z$9}ebP1tHsc&Oz8L=G9>`OD@>2&es3gC5#6B5MJgdS3bC#llL_sGW9Y@M- zWvB2!{Zd+)qN6d?;_&xB@e%)8H@|m+<#RcmSZlmUU-Dv+0c+JH&O~zf9&Rf>Vfukc z?$*804${f5-jbQ?n=T024o#tuz8oLCP2Jv0xk@WF)M+m@_qODVPZqa!3j^G}X;$;+ zNfc@dyK*VlXeK(OpZqzPq1M$*heI4`cY(A(YU^;OBxPNKnG}iN;5>{m!1d>f7;VFl zNP83QUsyk91TvrPG|mP19AyAIepOH3W7%l}0V|WlWFzeby@Z5KFqV@)9+iNpOel72 zX`5j1p4Hr29;X!3@RJ;xN;__U6n?&tA`v5T6lf%<%{eKc=|A!talpG${JCd2t6rRL(?vxdv z*@qvVy2ruQsx9FahX^H`6gKuAnuld28?=k?uM$1jRdP36N4*cV6&&06EwM}$k+Retwv zKt>?5SZBV2KeKZN=oWcR=YY2Qo#kf78p~U%D>*(bG`S8x; zeeK_Q;7PODA;H!niV?fDg0736(!;$AuA=Vpluoar4*!lcq+jA6*x&zg)1<24f7YFw zjA>2e^ucEWWXM58ymOzWhH?k!wRyA0O*m9X^>@WV-rV*35;UsaO6q|5Ao+_9Z6iR| z{vOc7{gm{cERbhxcG!;bN4S!8!3zlaf9P_;VqXO@FKNmHYgv^t4G~|fQl7uM7{;Rv zp?|Re{tL|Y--hkq?ElyA&ZY_f`(*((37yOUV9N)G73P0`cw&S_|JuGNGdN~b{`>CY zpL>k|AAN#fypJ-=zb}JuCwx>UFS(r-&41I*`{|}~ zNTZ>_y%Bo-UIqOP_A%2Nba5GBVXz2TTNK=KlxoIQ3&?o(RvpI#ZXL$k#)r?UY606* z?aL2O2+vt-9Fs|wDz1~ehZpt!)Of&uN?QLp>iq6xzmw@*+%8`Z{&P&*z`;Djx{_o$ zCjGZN4(PlE+P%ITaQ?sUr`umg%<5ir+yDMOFu3O;-@B8rSVsO$@%oQ_II=BlCEmYVJIoVEnG! zUc&0@JBt+LoaDh966tM^oL=4&Emllp5ZFocoX6!j@Uv}==_muj)m~CfzN{2beHUo3 z5BlFnZ+FUHjTx{>qpUR>S6kKnT<)06<7i5&Qk1nloT?5qcoYldM5<5sMGzqT;{nfT zdobP`OD{P^k^Z^=>wT={Avu3%{QF&PzFNp@bzA`UsP4aJuL;!u;~7DcvB3xmH9en{Z zZl?^->ru-+#*FDdD;KNjDAKHi|G(@gD^jGbNRZ!-J1tc5vG1wF%|>V~7kL$1bhLXx zd!n$()P&hbz@EqWo38>u_}=?Fgs;O>$`dO3lpd5w`Ga%ny@hwlatCA}xMFi?V*YefMyemI0NCk@aQd(yaM#*$OV8XVc3G+B@mCD?^=4iDL zGZz@)5yLdd{NG;2h|fqb@m9?CYRhKFkTXCP* zy;w0tct#v`g&g|)09|8{D4!OD;dq1Y!g8{{PO+baet1ZF80GyuOV6lRY;R^tKE zB8^30;d=Kv@cEL0&|yjcIpNB8R*UIMQQ%2M4#=GW))vhURy*S#e;-<(X6j{!(s-!3 zz!8!on

    FN!?vpuNe)7W4tmGAnB0!WRKACq_3SFi{8OJJq!B zJV|e2wYY4%X4QW$={Fw~Q6DLP1ikO(8C9H2Z)e?=;ifBTx7ZZ9D8pfWx?6j$Y3Vg zN-c}ikpf$5yVAnB&=Uj+z!z^nW}4n)qA0aqqq=e*|FE!obGZ8w%pcIMFp>p4<@`L3 z0tM%bbcz7DKFYLyqwDjp_h3bolvg_fMwb_aR%_Smw!$bjI+Z;#^~?E?Fq%#2+3e;5 z-%)|-C zZgo~G8LHMD#y3l+j39A_^O=)N=j0VA;p-08LK0Y;u5st^L4OfrzdN}*+!?&sc)LBk z$7NfjAB;m^Ju1^70P{;@rs<|-&cmvE+z`KZ>KItjiiDhPo@^ADIxGNNrbV}zCRfLh zJ)h9OEJy!<3%=lbInlfQrjrdN2OeSOYd1`q^RQN#O^x5kWw|$dBip6Xsk8J(@)g)w zEJ=-KAL;L*!Z=C&7OF_vIVnX7u* za|)D=dBN;ma+f@LPyD8)%CQT4@(Dj>7t!Rzm8sAl#=^jz>`rVsH77tZcgm&X5DZ5< ztWP#CM-+>-X1)Y@b^o8cOvEF^4zrbhC)d{ZTMkLvPa2!OnVtCV*j>DFidhULs4uE4 z>Cr1QSSVbw<>OSQLfOS$XT81^Xan8jnyP+F@1jQy;#)?Kt3ipcj#gjNa-~zO)l8Q(zUR~BL2zGTI2HWYH2S;L1gh01s0LAM+F1(e-ZanDt=U?Iw873CsLBSre6KfcRrk#=TqRFI$8;i}tU#q`#GDk0 z4|C^vd#s>+I4nTj2LgKeoKSA8YdbULQZy6+-LJ&G)`;}r`6_8EvC}fYHBTHs1EK(K z+grUqXS6$66h-%ur8k8qZpT2jH47%5j7_cGp4a^vMX2=D2LWw+t>c)(bXw5nS09LR zlZ)FoZBZ{bjJ;7px8p#DI|TjH@AR(@f#S^S{YiRqfKmD3$c zcsQ{HVsX?57Yn=>5y;(y6t0ij&G>ON%f+Gpoo7f{-?YE2ToV6#gvuKB z{4Xiusl3i^LG(~=i4vVAl>v|0YV&=22C|TI*P@p0FEBZw%m&@ke3bf)MQU&q9zq{> zCY$`H>a+_*+E*w%a0Xbo?01$l0@~K$EZS7_oYJ`MrQlm0ytv*pY2|2Da>H;)|X{Zd}QiZ#jxEe<^owQ0TkVmxjjdFelTwMy%o~0 zdOi>_U4>?A6^}8ohdUMX{t_@f>l-fUUJXRPz3Hi(&RQ358cr&MXk+ZUour9kr6f*>sr#+eQVE6HaUJYsjMJTS3Z= zkuSV%eX;aw6~(sg_jfnhj%gmmZAu4Pb!XDNylx>vd#%APq<*;f&3B7OA9&i#^2)LO z&37lwPsYcF`^YvH*86i_m31@7{6H@HFp{BSjS>?YA`XC{!txQxPt&FE5Qh8}V0~ zg&C34peux}fLPi|iA$7GD+%iK@hEX;VUtZrix&bN=}yzgPz7}e+$Jw5l2nd$OEjH) z5&?+`zJx#8(ImQyYEHxBbRP{^0bRd;kEE>DX2sWd4sBq8q`2k^DY(ck@W_o6XiV@F zUcb+gi#}{7{8dQs^IC#Z!X-#2Bca_DxmwnFpy_I~i+rMgn<21p>bHGU2$me(c zI3~i!OIyD8CJ)wnm_7iQoGM<~@`b*%U* z$<}=LeCm4Zb_oP2rFkC5$O$~Sq5bX$jzW}JqUe- zD1LE{4WBV)JdWA~47dDU@?P`_z1V@CMNdXZ2D|~ARTJe0)VpPajg`}4vuJc2Qn|D3 z1!+{rf*s{w->FHaoJTfRCQ2#V8FbF}PxCPm2sPT@$fq4pE!6W-yN)X`9p&eL3FURR zKw=Ba;ZQW!yMdgSvv%TLnTKj{JM_tWCH!)wmbbrHP6rhFgkbiLe@!4zE z0M&bwq|c|sb~7xez8iD15eV@9Q{f4C@`sy`)LAgkpDaAK+=lK^{{!L#^s8J9geRwZ)>PBM`(qpTU=iD+`Q$y*Z2LWYs zti%36*%0oDOe1@i9$0G?u*nAtsb8P%A6AJle)5lp7ITMFSJSbErO0_tI@sn+H zw|vW)ofl?rY;-!~Sljtlj-jmxK71mKUz-XZE!>_$cx(eUvb5_+15vbwz2i)8GVN0#KUHY<6_a# z(eoM|4^vNCf3^}Y@<2Tjzg-=T0w<>+_vqnDB=m3?dA^wm;g{vAJEI#yX7(2c4~3O{ zkC_pL7c6SBft0}Uv5t0w(vejznVk9(e^ceFSgKGAo0ROvDPX~`YXo)M5bY=I=~F^N zlt;_gy=kWV+DNvitR%FxQ0TC)p~CJxIcHuUR0Z$zVWP~y^@fTTwItWyd~^PFYJxZY z*KdZnmNNXYzwhM5>99b5;RFInqdXWV=oigB2b{c0zk2e#MrK%V4(n50Zad;yT! zE+1y6Ydy@zc+>nFi1(JWU(n}zySbQ+YX4_+1cZW4u&etD-I}>9mou9dRTzm_I(r{$ z(jKic=&`r9`=jkr!o*P2)|UY;g;crWfzY5VKHbz@W6knJg+aI6jWKPgo>>TL+ zrrC-m+I0qpS9F!PbXkQmVQ;oH zgF|^n(4WjzF>Hos#A4i&=kKKN4P(&YxKy7CdPKAGrdafoomWB_Z8qN=#0H-s$+O~p zQFTI?y_%hCM~P|)Comt@|ChTzsn>JFds+4{AL(SyK;2pjzZ1ir2!6RU9QVM|pMll^ z^oGVxyzbnqltj!e5&j7;go&1r@BF7kYiC9>yA3A`XIP{=BfNiNKjpDm511Ip4Cnip zO(8uk`oxv1!#Efj+C!lY0-dtUA)PYbdSd);x<^~TvE%U)h^qFJ??4{)K*VpOhU+^e z50c{DRD6~LR=hz6cTV$MYBAEefKqPHg8}gE_0Ey33y7W1kj(hXVLOHZQ09s)`SReV z3^ycjcXIKbc}@ho1bSeyw9blp{8XRe%k6V&3+C5qrQe3Rrz0qO0>whMeA#74U9%Pj z;<^(x)qnkK4F3`Z=6DkBwPX~cEF(i_CbuKQ+sk@uiXB`XPX>Vi9Yl*XN?S3mB5#Xu zCFZ6hAp+-VV{eO_vD6kL#oVVdXGb*)@ic($@8W!vF1}td+yeMhq^sZ$Ziq)X#PS@t ze|sSB{PlMbcZ;Z{G#3i)&5D1?Z>+L>RVg9t2ko=RTWL-i4VMJvAN#oDjGaCa2*6et zN-i|*TX0y~q6`@Xo8HAP6Ddm$?=#zj^9Klu~PyVZ23h0JiE$UWPJU?;U=%eKFeE%s3dGl!g3UOp@k~DAG)kuWV z$?@Ze*_jBJD|^OWB+Y_4TwOPN03q0TL)m0qG#-Lla2$rr+G33rIH0lD?BA~^Y*q>wssA^U?N5XPbXfs4ZK~j~m|$wD z;0}9l$n2|sibcea#r)==D|Cx%ww0%sU=dNCF=@+p&Fj%GEYMamtA%4zX9B+06J4JE zPT%vvq-wPV!R7ra$nF~V_(7@pbcQln5UihtP*y9oYH@P$zSxLJ@?r>CAYNhsnsGC4 z)@lZ-i)Ws0JPB|f$L8*75chKYr*bg{^|E}f4{Ea8o#>$>^gV$&SfxyCFXmwakS z0RAs!38PPQ)eH1V&i^`3^rT2#p31!7-%`vbH}07u@Ym6=HV=17uO~*e09-q>B@I_w zF21I{K|ty<0;Dc(=!zS(+$#qe+YXl=#+;hH@30b%)KZ}(wSd$2#~CsXU3vW)tyQ)1 zvT+-6&ByCA^`XH@?v7N+XsUjoy(nx{c*&55&nNs5ogVkDjQ+% zb_%z3rBQ$Y*<6Vag*+0lO`6 zC|%1KNCd0t^Ef-EE`HksTB$5Z3y0y+4N1=(8gF6elLAF_q?@1R^qMefWQ^t73~Hrn zsZQk$ZfD0PzsGz;KjzgJ`J>ad&#Yo?i>JQ7*F-}Sy|4Vp`sTDhu0*Yqsq%2)0=_>= zLd1Wrb#7;Quw%iornKBjPCkv_peP9zb|_Bg02H;Jni@49bNz!8&F>k6?H3YV?f2{L zcQjglRowtmphPPSoZUSZrHoLaZ3Sb?tqPbJmhoPVvz|=OJt-s)n42h^NpT?}&h~*v zJ+8N~N)@=R)X=6scn_Jxnlx%z%o^c{RRJy-AFKt^Rx!>n2`Qr9{2Jk?`pVkT$ZO zG!WDy>e14$uHx!@;2-bSj6C-yhN>}nUOKsY$pQ)}y$HnZs_KN73xM=WvEY2pCzItF z559jP^b@K}XHxuv<-_u$?Q>HI;D416D_4+%ly{wBms+7gVlK1E7c*InhPvnsnXW`C z)tz7@kSYK5k(DC)&jy_)?#F2FZHa9|2`21F$aD)8F9wnchEM9H7^twNmaY{d1@8`i{HP!bx z?TO=uy9>J%KWdOV#|QfsJM3|)&RVzpqf>_@7Ji!&kspeEIv2#KooNOif!7t*2Z zya+m(Pi)amzs;@{HU;h5X>_1{M1U3c#PYg1wt zC)GiQOcW@qG8#q$Y4D)pcN@xH!(i;^wh~R6{KdROOd^PjI;uV)hZDuJ$n;t%oX#5P zCH({F+f5vYFMbbmE-d~afz{XD^4cUjw!X57qLTfMRtIJ%&Ce#k`KY*k)fYUw_LPtC zyc_S%8e0DK#Ep41m=7nSBH5Ox{ZPmK+z9X-0~X)xb*~JOPhS7HSd6sWzC|XsP_}tl zUQnwKK;&{ib zU#!CK-1vXVH#wyIT8PsYy+zalwN zb65b)_s9hcj@^7#1Q{~TO!evq0{mmdDHJ;3)s=W-5H65CMW&BWCT%i(?+Q)G&33!8 zseXZh;?-GoXL{(tO>z-g7qUv?WBt}GUZCOD>YITOMf`(EnWSo2F5vwoy-aKX2tBaYH?xj zQzvKD;9n=8>3rif$x{)WeFnCQK6|6~yrei7xD z>|ueMo*<)*PlZw3euvF>iisTIc#FU0i6^*u8@s};w1XpyCiW8Jt-i_b#=5w79O)^F zX&by#|8(jAMQGcim6CBk3GCMLzkB<`b$!KuowajhKhd^&(~J*mioVz^nbE3dP6wN| z-{MHEy)dlGY@*Uc^P^L-JCj|h-QE^M@npWT@KgziC@%<;{ILdXF_xr|G)YMJ;7pw- z2EIYu?gJ|oFDk}Z@>izfYcl}vW-=$MR|}dxM07BLK&)^PzNQY)IqmJShFs{kN&-Fa zf9-*LNG}y1ds*JA6nmUD$DDC9xzb@}y~82t2yHz9aHfVrEAX3RH99q2b1)?BnmTL| zcY<4{Y>?T`gA@wS$fNit@>n+OzqHwa*Xi(n-u!kZS9kWU?jgg4<5y71A-v}s8gGrr zc^EJBuH9m;(ET+~W4wbbO1%KmrX`92PBRrI;gyZw zO=9DZZ65CLGz7gl@crtpS+v?MjE5bi)80ipEYEhk17#6Fzu1EBdA?LDEV?D{*3-{H zDNJAe0?6{XEop^IHD$#PHXS+!@<5U0!2e7Zy{xE9%`SAXpKCX@PV*!BcsmkA=VGc{ z4{Wbp6CqGAAGM;*ygI!7#xM}Vz`WbT`mw;>kbB^(wc+kWOU`ww!cfue!XTmg41++R z=G>|%xpuvsV!fR+NYyDCNMMy8w2GH(`O)3gs%P8?G&Nh=+6`WMqAS0NU<8vzM9>V} zuqSU-^Ew$S%}|K>M~+XJ_b)3tgvr2g?;Z;^Uk09y4kt|ke^n;*1((!8U2dp6*+$C) zRIQ!VmZprJCRldS!oBqgkuc|Rpd@mqJt%lgL64;Sb>WKwc7htV@)Ljw;JE3J0wxSa zXAt%|dV}{2WU?^f9F5%}oi5%%?Y~jQe^ttu0nQhpoK|~ye$i}Z4tG&+wo6{m`cURH z_W)9i71&qJSCE6-Vpx}d7fbe76()ZBsH{8nD~TsTx_-v_ z1hMFl_t7dolkT?|ZGM7w`699Se-R$QKP7&`t~PbYaW#MuKr9A@*zXcZ$?8-fYh%HY zcDN*uPfF%aF!tyyk=H0_cFKs-jS>?PvSNC5x)Dh*k^dVDKID8$3hX1Ic;p#oO8j!k zj-0#Y(4Ok2k=;LE?b=sqm#4mCn>m5@W$Ck5eXyq0dH;e7#Q~58Nc|A$AZ1drFcr$s zeIC-+?{j|Y_-ypWMn1*%4sXE|)(#ypUUBC%+ObP`vu$}tlx^T&t(ft}B%~Q{{}JKe z91kyGfz^r#!E*%zd`)~lmr9(ht`MSJ7&8t9<`ya6v&!P7QX#bOBAHWFTiDG3V>yx( zvVN&uZ6dqaZVx-Z{^(^$DH*bU&Lh63~4;n!1s-2_cuMue%J1zaY?|hpM}#Y^Z53| z#M%t?plPGVeTALxndd3rZafa1)=N*>TjB1JQkH~&J`KbG!OMJ>$3(^=da#6tquIeB zgd8ezY5bIcy%9;o;~3{yZ#KY5C*U1yLvdqcv0quP>ULCTw#7byiUPMM zSLq9+OY#T+HaTym-0w6eQ3FsW&U~(>9}M5@tp~W(WCrQfirv&dE*5@&7PVDC^ebBM zEveO8*$!rdU!TH-%@+#;yRY}vK;T-<^^J4ato>8A6-mkS3v#>$fYvoauX)P#H%zB? zu*aEPr#09l2Y^*s5eulT!)n^5fC{T|eR0s*e z85eI}1m->fu|S!rGV>?UuhR7Fi+xQIQ=k-eMuX=;>Ht_~>eX((pJpp(^8Ey%E8LA3 zkH-kdizu_b-6G3KnS@{%V67h5%xBd+OYtjO^~EB3j1R3o`l*9jrzY@69K?eWxNOuw zQw1P8_;=liW_%GHfDh;%r1K$HtB%F%4K8CJds9XFE&B=DuG-1^P-jaw4l3233-Pza zgcAly?vf=QK=K)ICBVohulDUGtT5JbZ@xYT_A52Pla0XweY9+)GBHId0=D(TorwZ& z{%Nr1Z2-9JWGS_&T=VY4eecqGEGul&UO8$jI|iQ$2?dT`y)jQo@mOgYU!1~gn)DG_ z2tb}An2-uJTSj(&9PZ8w&DNM@-ZP}hM?~j;eV3}8=62hBZEaDV=*3w9m>%A#Q#z_h ztpB)DCKx1@OPCxOxTjmgUCq%drBwm_x)C^h-gM% z9aVi#j4FTNTHKj<{z0E>i+;qLlQOxHJ_MrQSp~?7rt&=emxceiStFu)!P2|oe*qgA zt{c-DFn35YYzX4B1KB=x9(Re?TB@BETN$S1ltvh}=b(8~Mf0Ut-NDb-yQ zJe108=ydPhS+3t{%H?zv0gde|PTdF3B#LL^XWcx%G1L9}SP7tMnN1e&P8HCDEi8ia zflf8La$t&;ZqZ)#eO<1(^p$O!At-UgGwcLzA`EV>kfhOHhGCI9-45u0-G-9cl)ExgzKTB*F^f zt!Pz?-bKR}MgTK@DIg6PR~Q)`6_HhV5^e!;*!roL!1*!%(?Y%WP%0mFXUwK!XV6BL z&>eTVymmpRSm`-?COTyuiaT38m?Ss&?!S*^V+N9p@I zvTVK^Q9iSAxX4{;>06WM&1?!wnIqTq7nd{4i2KfSaxrouHSE#XfJ{8l4D#R9`QctK zRardXI5~Jh3^Q z66{X-A3_KsGK?#94*--8y#*z{F2&W4k?*b51)g%Qh&v8^ZLfNull0` zrQ%m0V)!JlVLi=AXOV{(Zd}T+?cMd+b|Y`H%$-aE%Z~Z0weals71WtwNeR_w4s3Kk zNp22qR&-t{l!%?@>%52%IP;OGz%F`LknZgu&p2*9E5}xc%9}}Eu@6YuG=!{BdA>c? ze4FO#wZ>FV>(MFW6f}%}{>VqgL`(sFq=lpZ*bVv7mle7%t#F)C{&*tRL63M%lc9Hx zY0`D&+4j(!LCuX7@zj%7(RCIJ;1FwNAooI=gG7oVSigE!Ov9t6tM%qP|W)BmcusTKHJibk47l?B)blW~(HL%rHOG6j?>v)=)EWQG~X33k3Ha%AQ?y zhY<%WUAh!OtPtWR{{prDTFtOP{Lm;~1Zbq)1ai^$cCv!JCGF0Q30JGVKQT^5c|N zVo~N7KjV&+i)M*pw~T0vRD^yVnvN&|0NS|x?8+-3mdy-@mN7!49p0WV$Y4@3x0Q8h zx`l_=U-xZ9fmWUOOkrF+V){apxt9N~3)cG6zd6)j9s@eEo>kQJA4&kPBZ}X`cS^BP zC05G&ZAu%_%@63Ay&sQ0*toE9_3Pfn6LkIRKiLdFk~yj3W7wqdn*gRRzx9t{4jdT% zs1>|CgQ4LIAho<%V&&}+U6W3pf>2ZC1Mpk;{_P86fC@fKRt*Pbyewwf;=I0{kd{5P z?!!nM@%doTu@dX9*55oq!CFkPDHWT9-TlGW4yb@dpMJsmx;_wXWdH068Q0rlwaqt} zf49w*=EeTlaC0PvfU9c*oGYDV6qR*R;;1N!kADKAPh5ObwqmpkqHm&`iIgD4Wye^mrHw5X>CoX;W?*b$- z5s%9i6ZK|&qaO&FCcJjzJM-V0)`!!kB6pjD?z*TooAqM~w%{0Ze#`SC$>GkKL`?)8!FHL5QTGos)UOtpa28OUVrX7& zd_8ugYzX?e@di<&NQ2$cxclauQaaX|Xw4)IB@(dZ)UMFGVVF_i=0R#Th;<#!oCWnf z-(*=7po*99Mth~_;xaZbTMw0JH!!f83^9uq8UX{cLOU61n*zOvBZ)WF9_>_MP>{CX zAJ=V&#M{1S-BXAv^QR}oZ?P#JCgQL)svcsZ_8*TCNbKf${eY}@mF)8iUZ)=E7pHO0 zVCv#ty&K59L=!+!B@fiS(=RN!1l^s5l9-y@L39Sk?VJjJDh=HYHht#+igU);}C zOi?&BH#KETt4bUKZ5Yp!#kium`zp()>eqSg$jFOUASIC}k<~=#$IZ=NMM)<=o!#K< zIm`X9q|W|V`uBab!-hW!b{Y=-la!lYoOi1+$_rL{Nh0d^?@Se}RTb1M62A;!g%^HN z%Gh!(1kjXU%3=6_9r7>H!$5#Nc9==1`(JM-fXIGL)BqqC**k?o_p(b?)9~jvw5C4T zC~NsS6&9vGlS=Q|Uzl^@5PHs)CU!Y^gn zE#}FwX})4JS)US^%~qB*NrmJdHdds&m3&cbC{k~bOJujCMcnh;I?_wi&uMx#_K$nO z0Q<_#w?9E&;Lk3?`w@td7MupX;{*hS+I4G%y6Ak1YPPajI!7*LWrYnK4EL7JyA|7j z2^$4!!<+eoVfY&SM`Vf1Pb`@z++-72+B3PF1c3Bk;lR{7SKPx+H96JtgXPap3-=}>56D{m0No#a4YB6TcnncXe zflCnjoA|CL@>!Uueao<{;E%szMd%x(B{k9kOZbFRYXoPNsmolpC`#E!W~__2ns|6%}{lia5Z&*MFu1gi_Ky|+Md+K>%;?GBR9b*0N}dK=K|gMq zXP)s3y7k8t&L>#JyF)P`=-SE92w3k{u`1~}Upc3d%WNxBR@SmeAkhK!z-iuL2<^uF z!Mn>}QlMJjXiq=vZMvw0&fYGi@~IH7N~~~(+V4!u1!2?ctDLi_vkc!meX1;aDZQi4 zP*RCC=2ChBDq0TRE4<5W{!PMDpGyT^3q$ayAT2AwuM(TNi$_V`H)#re`z z(sHflT+DWEI+hSgCJc|ye5(oYkW|~~;)nj>r5h}aeQZ)2g5VI}*FM=zVSGzjU=J*u z=B4qx<<|n$Rw<5N(i@9c3k~)$B+U8!B9pC5=q1+w|jt)f_cc zmxkqG^Lx{`YP&zn*3<|XIL`ut?16UoB+OcW$5!&s_o4az*YxSaDAjPYUCzR!{o-JY zw!~W6-C@Ge`ztu)b=2)%p^&^MH1T|}NkGFU{#pvQ!4`Hxy!3+AWLPSeE{SQPS>U$`KhY{#vBMME^&@=Y@(rwTcmRE4b{0_+w(1>H-=gLc4VBgSIV8Y*( z#koRH1DbqSl3QrrJ@wl)c)0i3!5fww(&Bn4e0|2BRH%}f-dzo->aT_$N|J_zmmCfk zDFS1bztWn>wOUQr>I2sr6&;h_rg~P;f;w9+D}>*cWqJlet<=Ne>)!tNg}2f~+?#Du z#iW#4KW@F?j4{~w{w0Rr_PTaiT>7@wPhRo-?%75PNhK1iKwEHXl6YNOeQuJMseD0d_Zh zK%odgcz_VXLzsJZ-5%oDmYAR2rh`I?4*o_Hg@#_YnAJL2w;!t?Bi z;EMvs2<11=uot)QSpDSDlKvX*1w4kqYSw>5lk_*=$g5U@6kuK_7jK4p5s2LjlM<0C z(!VMS8#wMwQ^xx~XEmHfJzlsaN;NI%e)IRSId1_z$>IWk`TlRgAZaY3Ql5f}=~O-$ zGA?}?G(eq$`V|_Z+dRtgGCK0_xy*khI4crASn;`=y8Cd z!TqH=dRo~3{1DK!tvF3ioBS8g@rpoy1t~6^ueBIFrMNmp;HVR-$dOfQEId_73&Oof z?i`&(N5DihMmsiJxDI8pmsX%ZaNnChCUJ-q zFc_(-i$v9%4cW4(?%OgAMHItV)-G-dE{je2&;DLZ-~o4_!E_>TR5sRysKy;%vg|@P zA}Nm0ks+Dgs+Xxe&_BAzzn=A9Uyx#gRY>mDF8e=!%zsxI_~y#*{`3Fi6PR(_rqHsP+=Y^aFfT{q0YdBrs%0WP99!K3}&h(3?bE^HM&Q zZhR17eDODJe(C;+VxI1O5~~xOQi|^%J41%#^7{8NoBLb1-V|E#H=6qPo`?#UC;piI zf37vbH%NOk4G^(gZn2utS|OtEIz@_=8el3_cYD@PLTnDko5}!7MuSe(rq9YxZ0&a& zARyUooNUmttY?;-jR+iX-RZq%7}7inr&Fs`3c`8)60o&HI81D&kCrxmw6>2{O1jhE zc5OT%ydIe4x7Jac;wWIi-T zVD!0}>P**l(@GT3F|jGZxgdP6C2k#4A>htow7Ppb`+_8noX6pw`iZ|=@jE-yB@K5- zfh--f47l$oPmE9hd@zq-Dt^&@)o!>j#d}UAA0B&Tzp^TdHUbQ%8f}28<+W<{R}0Uv zU*kn>#2g-d`CU@L@ZjR_ zn0W@mXD6L6&0+^&(0ot{Yd*+xK{oWcJ-fpeuA8M}Sd6^UoAncOOQ37BeCq{i_rJt9 ziO(EKkgBvkRfP85b)N4MFJ2xE2sj=z&X46dzinFAKd*i5)=6V=5c01FFKq-1Z-cu)=;@5W0LQuu8ubs%n)2PrXd~kud6@Rr~Zp#l<31p`Wl> zV;$3f07`wIN2|PxOW(~u?FHS`N`7v`WrX|l?gZ$;*y7TSxb{TzV*ADn`WqnGB@*wK{xUuqOB?5G}%GM?f$h?AJ zLPU$N^!c_2MaLAd=%Wp~uE;TYB-=_A zYP(|fr#n@&cHx%tycGzj$xpTyD%9al1#|>THas+UEhqgT_Awmxy;uDvp78 z!#ZA$6ldTB+id+ZFq%p_mL5U@k_=*iKv)8sP5pCH`Y>M3=jLQCTRAw|VSrv>#f^XG z^_Sq@6&t@&4Wsn{<}`}k@tUASUZ?wY;ZnLeGW85*JME5_w0gc}r~b*gDs8=!du2^2|R>C}}VOJVxN z1)4Sc5luJG^3Aqu2^``y;M|6(vz3}e0x{J$yqi=C;gw)6hUKuQC=I&K6va9xS&U8a z_t-+oLJ^ZaHBi$Bic(oO`eTC5_?d1%Dt!qMeD~XRYUtG2D65prd|S(1aq<80i~$HI z%lbH465+^nmpHT-tOW&H3Lu`jfvXi*{W1Z2)SCP zJX@cA6EjRBAEdoAQz?zQDY8^Z>hWLP&x zRfk^^YpygH1bt z=l%ZStp7t(Q2P5POWWq`bqpo1G4#f_0yUg$aW3u0o$4yyX>TuMUcP|*XuexfZ4FLd z=n6@rb7Zr8X`&oL2fNl;1`8B!S$J9A4XL%;t;UvwjI)gV7gk-cPR(#}m6=>mdl z(yOJrUWRMYrPS~@e)S&L8%(?0rIy{u`cvYE$RDQ7z`NQEapjJ`PdNQqt<0)kZkLV2 zBa?LT9a83_XSQQ-wOvdb|`P z6IM7pC6-Fobo;cgLWQ?xix_P-t>|E(Zil}&j-$r^$Vd&|R@OkQ^s$e?IX8U37ruJF zRbD!?g}n;VF+ZI)T-X#wB?UKLNb+RyzV_(e(Dl%#gchssAq1==Ysu;8sTj3!kn5%Q zQFQ7ZW(XZ+5Kj`zDm~Az75tOi3hg5ER;{GGM!|EF%fVie!Y(EK4oibG_Z=#wnSQb+ z^EtUzTMQLNq-%!}H19nV$X%YVdg=Y`+T*D89H!ut0PVBEix$$?Xf$NgV9V^XeaU`6 z_F^fkm({QhG8TEpgreuZw<4#Q?)EX<&6Z?YYb~{F>6t+cZR~SMn<~?V?k6jD_lqhS z#9=erz57K+uViUA0lZ?WlNnv)V_m4f>*4oq5(RGu_2`YMrDuqiOAnliQ>_)fvX?Bw zJq?L$Cr)Ci{O&6J_qVQmG9$CwgRrUge2_ikJBhV<3rKh&TKcTNyQfUB$gQ0i=3mtk z^V=W9b3j2BG~3_pq%xS&!uib|he1g&|J5+vP4MH%+y(ozURfCY3SgoMggVXZzO<(To5;)DBoc+~4LmE>27^lT6-CDHcuTrY?s9N|4O*4#LEuQbr0yw|3T z+;{EHpZ(FokRjLS)<9sTJ3V`&D%>gX7-9hYb4GW2q>CEMll{zaTb6}O^Ye5n_ovvd z5lh}S=;gGNnZ}CRrYv96mhj?@Eu4Mu*1bEpNmG9-XBXbxYzf&GEcw`|;Cb0wGs!II z)>-9i_WYtX7jpk9?yI;7so$epbUQ1)wR=BZ16*@bL*S{HD;|AR`a`B1{ZtQqr4{qe zYelDPFRIJtwLEIFc+Do>wPlTNv21{q*cpq=!1*JFTK{8x{FoU?4jKF`YWJ6FT{VlP zX6f8P&t0tN2H3Ro^?b!23`CDwUf^N#na(({@mp3oG49B8yInVq%+A@tQg zqS*=l8O6_CV*6V*HxlQyl=RrG7eM{To4SfTWVjj6i31^p5 zkEj#;|1c!6xS7zBn2+waFkZc5eXGI zJ%@b_FpRtM_U=7t`8G$(7zx?DgSzN}N3^*!jMqn9bHe8lExx9RIebvG#zyoALcbOM zaEEO}XA8k2yf90zYuHqnO~Cgm@H!AUv;NR6;7weK3{JR zGnZjA3&cI`LN#?o!RqCEYFI-=)_g z&+pJeY+*8+!fIIt)orc5I(>-586-@U)xdj`YIEWEi55`1E2~$0t+u+CK14olYaNpp zacA)13!juDLPCri*@yVJuql_GserN?v*28JS|dhz)$8V9qF~`+O7+ysL~((8VqcVZ zRJY_Nef1aqaMHuLO!y(nty)|X)5pZd439t#XwnI1TNS+PukZeRjfzZec&VR(EItI6 zQDcOUxMHa%)=OuMVuE>3rNX|=Dt!5J@Mu8Uv|6MQ9bW%v{>5!8ZlW|5n;k+U6AA%* z`gQcM=TCB+R0?wqj>%5;R<)7my^b>V|bgTD^~`x!qr1o6yojugS!4} zYZL*EvwZv?kEC6c5TqnIENUqgsFd0v;6 zMdBXMqw47@*OFpyy$=DohA1st?4}CRH1i<|K9I--6bMZU^C*)W65|9@$7Uds4>g{? z%nYjc6W%D@n`cf3xa}HxvILXI=uHv+mpYszr7==@Om9Pmj*cPVh!wY;ES=K5@rN{$ z)lZ8uLT-Y3-L%qD%rkJIzIdkRmO)0p_}{pH?zz;u=sqju*i{JtJJt z`;%Dx-7HZI#lg9K-&+5@mX`bbX_i!K^yZNCdB3FgIa&9LK|0F|+m5sLW|T8xjnY@E zGbr%!W#dBxI-UzL-%#31hdmu{O_pL}JPu~M?=YJRpFXCt;Mn|f*7$70-9*l|sh(}U zYVl#r(*WJ?@i3vm<=m4)+i9rhLetZ8;6!Nh;HrCp*vY^I`$8bYjq@@TSXx#-C%pOL zftj&T?}E8HavJ~V%#IWuN%xl+UUT70%gYttQIyD}2GY`%u#s&Box;OUT2mB=JUXN3 zth+zb10c#|&GC>oWsFXZKaTW6)dQ0KIO&a@;%$>k&WU&|1Ci(x7f!;;<63fMk(+`b zd;6`MviKdCtc&ziX#HT~vODp-`tvz-e?&HpUVF4E5PWOf4OW7~;T>GH*iRF@<{u6C zyq6(lOqBvS1I~7499Va>n)$W6&Q@~}nO`&hy|Whv#R!dVbEp?DVM-`C%NIy*mvJ2p zfX4z`FCGNmMz);?S-4ksFoq*JiS46OH~pN+A~Z>O^arL8G$oeHR^&zN#nSF3MkY!d zSBINPgxeX0*G4E2pUdw5L)}}4Mb&*_!-^;%9g(IrEbg<6Tj182JnD&%uc( z&(325UH7Po`s`T0S;=Ypu=JzPwKimoiVYzKnB2J4b9(_d_UUxdQ8l-Y(8_9Co@EMy zB|YXQ2e^Y;7te#?kNc?F%b_B{SjKPLw+A}8AZKz{4I7T(-b;!8w67n}hzgQYork=< zZmZ)A77RL=G@kow{T3hH#FX#S!Q#?(m*e9qSa<&LF1HF>O#c%><2{2Y<952fN7{|g ztASeZ=c9_QtKr$x5PCH7g*&r65FCTi{?m}9MfGk>?A=fMM`&S_@CbUlJHnXTaSyZC z>L-IJIt^jqhVDB&EB<~*Jpb{+z{;~#jLdPl9Hqr;^WWKnIxIv$nCY#hfTo$zK{A~ib#>)bqHcNPX zFnWXA7lhT0T?%$0)<|P;#q*nyB(JtwT)HfID-(4>2>X5kbvkH~O36>Cu5Eh;*b2bv zJOg82Xa>=bB-D{suu=}Fn%5=h2WvdLl5P(m=h=85%F>~8aU&#@;;|%SkdsU|U>FQ- z7(9jL?%wM+kM{n{;|_$_s|zSv`{I2t1;B?s-R1^loZ8GBJ679Hps)N9=S~HRiTu-r z&pPzw(m9Lc&A;f8C?xY2Yly@ey`Zh28Qb5D!L$!m6n()s+FeTF{%Ib&#G^H=0A5>7 zJyJr&0|Uw~=%hV!8t0g1=Ndd%A8Ua|By6UGFFSL+bX73*?N97@F9R^W<#D?sqa2K< z!oD#zelsMfc1S;cf}?bN!ZhU0K4s2zI94L!o0_C`uOdzm*E@g}$?W;GR}hx%WZM>w z?|6&e-DvJ<^sa8_n)1D^V#_5@LG_PZL-0v5cUz8E1M6z?gI4hJZ$1Q10pFq=S)&@{HQh~OhMczp8;f~IPoYU}`Us5GGAJKapZ@;1!rxrcr*3ewK^ zJ`taA#wQty4b!FSs^~#b*!2W@hMG&y=?`p7TJ0&e;4_`3p})qX4Kd^rx&FK|MB8^| zY)spF$SyjXxqFYc|3UQMZu)-01hSTh$OJfd-UlxCYVsj$19EODx7YV*Z*j}c54|@~ z#lSmj3t$i~kLbn>B5G-9h0Z@D4gvU~HJTXhDi_o1#yZLL58CX^#GTp6;t%#&?Fo%9 z&zsgMXC`gBi587jf&J-l%uMlc00vpqzG5-BFbvgK8_U5BnT}JwJ=LG`FaWX&w4saf zF53E%|MEwgNSifQ)~&|ECuY0HeWw>ej(uZvoXg9z1 ze&#IEzj&uD0Z;eJ(@=%F+eRa+LTml(54|NSS|ie^)GnShCiwrTOgvzW*|ct`CT#!K zCVJ0-G0^W9wu^C^RAu*}9Chn}tiEtAOg51|BUEldpy=rAa-vGGg!6Bg_EQ3vu6S@IoMY z{j!4QyFx09**koNlsE>Y7=?5v$8<;Efh5B?H=^-KqYwhf42O2PE}KSJh?{Y>QE0p! z&(Q2c6lp6S@_e%Sf-dEc1qF`f0v)G05ug;+XbU z7bG{vA^L_!I#7Qilo4slCcgz`FWj< znq9T$ox(g~Hxw^HQKZCWQM7o5Qe%wwG=slCm4be{+wNKR_JfOH{q=`1Dd%$|aP(5Z ztGnF0ZD--yA*J^C-Oh>*FTW0Ky_#{W4H}1E%=*(EA8UC)k6d;Kjbd>uENP?M7dsw; z>)q`DcJ2@?ypF}3HN_WxdBnl zB2^XZcA)%3vz-#PB27isGqDhX05Z&p#@h;qPx7t;^3ia(OScVBAA{XZu*tZ9LW5!V z9_Ad37c3}lso_vH(6?-d7<5fkT*#BLkrQIleDI~*3)Y~nJYO0hJR=u;KRNNXcLsuv z(O@4EVOA~ELHVx2v{$2d%N{p|dx_cFo~Y)@aiiftb`x%FROnS=H9T&}2sE>@(qT(` zwLICB@ua?EJYdk8c6F-|x_&WEappg#t7A zdKgA5lfTd%?-$zlvU|@C(Wp+~5e_9#^>)fXKC!f#Ai&{DSoN^%xdXC&9&)z0m7b$9 zck|uFsoY44{&PdHs_5G%0YFq5WD{<4yS=1$=FlQZ&^wCL3LiZh!DJVvJ7NxnP4}{a z13xn_o@H4K#(Q56&?0anPylIBys@q;NtgX>K&< z^?ee+@M2|<@wgTN=b3r&lNQZKqsq$gM+O{p^JI}Ai{=CO6w`-Z&#)X*|IWC|k~2JR zl&Zgt^Dv@M2aASyrB}L-sMchI&2OGqC8(wUapRzJ`m;Gov=3Ysb62~CHb;ahkQF_p zM|*eL+acF`0wO9?CM%PJ*NDvV(sXfDpKmFbP6;Cv!}7_((sk_ z4oW|UneChWIDBj+41=JM<4))&(_vq%k3PPr)XUWEa3ZtxEh?bAYb}6YUagaRmv9fF zsIPXEEzQb7PHJ||?TDX=5K10}(R7439rlB0t-+?tk{yBW!prU8Lnp6=#T8WdJsa!~ zoy#KR)ctndK>1Br>#W=BRE54qY@Grhy0(wcb#+cN@t#k=mq^#1GoG1{aes}zkJ8gCcBh&?8y)(%@DCpIx%dsPOtX;JsYzZ$NUnr!VO{kA+&iPWud-^)i^-`sq}}f-_XH+MImfeCG>NVC@RO-I)Hk zox;C)$#^F~1R-p8!FmIX9b!U!l~f{jN`=uLPCsGuc+Cpna8Z)tV?Eh8WQk&#|c7pw1!zUo$$l44g=Td1(@Gomq< z=;q$t>YxP=zi4jv+;m|N`D^dT9-7jMc4e={lp!i&Xl*G2WE~N_X|wg>@2@+%MA5e2 zq_CwKc&`a(L4;7otkwdaUXzyp!k32_OTEr03vV|jNSk7!M{?Phc3u~S6x7Z}afO$5 z$X#6XL>a{q;B4!YN*}UmQh&H55R*1q)}N3s;e>aIccxq+2*(n zm=|B^;GV{`^hv?fwjWek&l-6r@?GAHt}agc#+B%X8hT?9COmdT-#Vybyh{+;?{Iqr zFD{a5e7vY{@UBzJ#GQrGwQzXH9NEfxip=%^^!cP%&2e^nwM0|WaG9P$w6%@-`?A6r z)M%xgh{yVsYYA*NHd>4h$gUe7>uWNzXAtZ*wr3%+?7+8CZH|LE8a@y4X}TuceY{X9 ze}4z?y89;xc148v#ZPM(AUV}`b`O0#uO>@SqWmk+qsV>l(Sz0mrx3oho0kTl*W0lg z7h6wjfW-$Kbc}8S_BMJvdDrV09o0AOj}P7PlCGqxJG_HI_u{m08{WNSeotWq9aIrE zhBnb?CHvW72i-mA-3KHLa9$kVqKuxYmHA|j%wR?;D5GTpg81p*(2k5b8zg=+r z$6uR6OsWD-GqN~MrZ&#-cnh2Okhi%4(y#pa`+37FRhW1G{mIdTZ3=HOAf)D7Yfqmd z1}?FzF&A5OK1Pg58l2=s)y?j;zs+|oUdB2)Vfgz}Y3HVwQe&gVBQuFB9Q$hAF7$K5 zJZ`L!Lee8^%a?3VK1yqO&67s00L5UOMaTDkB=Y2|#bf;)u5=*UJrd|n+lk}%IL_rk zR7VM~bzhzI*#LAuZ@)%YH1AnbXW<1!E+iZ*|M+GcEwy$!3%ce(d~pg!}%I@(BchT1O1>;YGN^H$$S~w=O^SE3!ODr$t-`&f9 zP>9+$gA8H}M?Iw4JRsk^O7uxGb)E!h4se;(xt;=XL|&yJlTf4Tr(70f266P?PmKZB zq3`j6J%uiI$hW|qC4dqJGj!LV;;zz1U-#{w0ES`Y`)-Ig#;CW~>jBa&lHdo05m!r) z_wSeask9%%m}h0?LjxxycI7)2J~prMmqTWewymX)y`#Q?FcHS?r!m>xhv$alj}n<; zlj!;KJi31=Y*3AN;y)UzfRjDzJD)*%K#!M5i-U;p&dshz5*F@m*E#j5uvSJ z92rUr^Zv}_9pV2AX$BRn?bF~vOdUO}z*X3;@qtrr?ik($RRf5NmezZ}*3EC?!F#SR zzjBU~h0li*U3FyA4q{(@;1WPW_IFL4XHFlYWH@RjAMTk}=HF$+jf)nZkg$4)&-pL3 z=_d|(hKZo0EuOw!<8siS*6Zb0qi>Hd9QQFgtv?~n{S=! zOuRQxn*L^ZaQfCy)2t9?^U>1knBkyMXENCj)kJCZ8)zKBh6IM(K<6}g5I@by>Q(pJ zh~^)B%6sO)EG@}1`vFnvO3K!`Jx897W%=~}>q*y{DZ_u?`h}hfOaf93UJ;vu{*L$% zL}SvqwvQ+aGxC^me7sjUuFa#qNI*D8uhGB#&Y+wB%_u335Dm2`{_iBe#1)CfGlqnyU}Ncp8fq;T_XhdY3O)-OIxFrZBmRTm5_blmE@5~g;f{#xX&!Kkz`Tg$@F!3A!9V`_VO!Ys&-M=`XUk`0x ze&$t(IBowGb^ZOfe?9*{@&)l>2SlydWtxy1#Ot~}y56Ed_1iL#>k`uux)eMv;5y?@ zA?A}MT1~_hCO0PvMg;_#{tsOD`;^eN{-PZA3&1c1D!h*WxVS*c>`9+aLA--qY`UYU zE23il*~xxS9sac(m}q0ILZTh?ht1o7^w}BP9`?2jl=S`zj}M0u?%7XF$ep+#4nZ&(EzrKuNEa`OJ1f zF{Px@;jc!wQu(i`pS@Eh1R|duGyc2eKplXv zF#I=b1L^Vk4M(Mm7L(zo&`(l6s6C-?0Lyv5bjyX?tcSAeb&U*Iz)I6V8i&>l`~U_v zh%Zrgj?ETWa)gg%)dlY!U46t~&W;v4_-d>UWJ74s)HYXW{hrYCpRu>#qut+JN813% z97LtKNP^wR$_@g69A`E?q1Rb2hBGBWg}GW4Ydsqy=0z@y3S8KZvV;inel4zBolW;S?2!TfJj;~i92|dStF-lr+mC8 zc6~cfe70FPCNfjM%;oQFJ#i~BPac>4Es6QJH32XgDZSwWSe;FZYb@b!-sCBMgZZ8v z39vV7jW9oP{!aIkMOd}iGuZ32^?VpP3o1e|_*`|$^ZW+7|2KO5^MT+QkiLQ^n#^y% z_uX=+@mq=rXdFfN(Ho##E_#BgX9J`(Q3T^1TMU55T=9EU$FmO97 z-zO-A%voO^2?jj}((@BXi&b>Y3KI;v2*{hIkE}e$ID_c3w_@Rn+%Tr8M)$o>`=kmB z^{M|wVg3eOtic>y+9JhHMeVHiMMT-87XouuS`AQ{>QRn1Gku08a4Xm+0~`u!dB)wx zKjV!}5Vrw+_PIGA4|eT>qvzd!X6fG#f6TxdT0GbG@2ni&DUnHD9h`%f<5-%cMCm*^*4BuGGE!ADsF3 zwy=O*n5#Px{0frt#c6(C;+t;WCqq){2BYuvjn#_pJ|4p^qElr~sr8c4A= z%#D%#%o;OVpw-?3+CsRk9B%Lc=@?tE+-@f>`EO6|F)BmahRa57jr0FsmEZx(#|&YJ ze0=e8Y!}>(Potf3E*{91=$W=}q6ar!<#A#!0BKQU!?lhK62jnzDp#cL{;l)hKFlAz zY%hvj8htF2UuETC9VA^*EEA1<;@zd#R1f4{ebWKQ(BWwUg$zuPGamlJ#sHN)uN>$9 z?dZ#*`v@e%@#G9f0tRZ0a4`+&(CC@R{%4%ac?La_#aO-fPmVys({O8x2i0SRI^D*Y z4EqJMucZL~%HW(}wbzmlWv&Eu1?T3Z){Nbk0lHMg>pScg2R~S7PI}D$YnA*BS_QV+ z_d>wjH~zStrtb>c`>sXyL*k>Tr?j?YEA4&3 z-SytNB}7$pJ`(qxw8tN5Ed#`^J*Bje7}Y3oRDZedzfk@E<3y^Jq8q|By*A$wo?l|^ zzZOyF=C38wy*W)X60s3TBp5g-iJb{Ml&8a@zdTW~dUY!^X;yKklXtrI zgjwfm$1T^5uV$(#FV@-3vnze1ukqmJr1=T*7?PG)RTR&wLSo(g5L>=igA3uCTbr5L z`lF;#UowE@fx6@%w^A~{$OOnbO=qaK4VRXG<_IyJ564pig(%_1y%`@Ltq16*8-SVs zwMmqRKU{CUY$tdxo=+%y-oJ;Cv#dCC6i)pd&vCWiJWvGiN*kqi1xC@|xQQu#1C8MQ z4RpG}S#%%~jtKdz#Jd~81aBSQa|be>7hDxRzo+XbJ!QN;RX8G9&NlQAv$YMzD-(nG z7JXHk?6Z7nYgHh~zV!l+T9DDprGxNz2R5)F_4A9ref;l73#yxWL{*WrRlhg(F269Dwl%*dd8=dS_$^=2;PAlNPX4Zp^b zE%#F4rZP7M=AS-R&wSI&%LRJS|N7U*#DxgwednjJ9zmb~Jsm&4mVt@ZcgluFsrHAV z00Znv2fBNf&LI8A7U*7~-wXsl$asDG_wN0g>R;c!z@qc|a{cUW;J<(Rt>qhNQUv1v ze{EV^?+m}}kO7DuT5*w_>*T3~z?Mis#Ieh}6Z;0}nPTzeZpPxg-x0yCpZ=f}z9#m8Ub45UZ89FTFk zz}a?9xJ`b4=ik@;F0h;_7OZ~^-?LlY{;<%;8LAI&fYk$`{}Kpfd~9qC(1o1R3oa_* zpAuJ@`X1U7RRidy9Pu}m9ZM$yNA*ubumAs^FN~a>xY9? zn5xCQRTg{YyNcs5dQX>Pg{15ce`;6>y+Hj`dwh_b#>mYKRl z`QRMlv=afJDBF@2W&9;$8G--}7i2~WUv%qou-R7(mMw!nr&_iFT=42Pc#@**e+)Ar zD}JcVOc_fz*um@XIV&El4!B{HJ&M>I9UZNumsF45|LaqKtHJx-u`cWRah+>~zURh0 z4V4jSst@WLVn5pis1t)+Gr#C+y4WwI$VQAb2()S3A$;<)?*#$k#!K?~8fL7DBt1ns zNh#=c_aY--8^d*L47=35KMx2D4VKOvX%}k*fg}fAPf%@BR=Pt8KfdXtXi1Zq{H8AT zbby5NryYh88@S?&Tv$x>;QOA!T?rvq&Z~9BFSuN4m!hU?91m5038CLRMmlcGju0bL zfy(-NL5(n~V1O$8Y10%XU|4P)KKNa?=_17+A`M2Sy7bY|6yiCZh?#{6t1CAPMBvZN zuZJS3YGQQCO}?q5{dr6HsCZ+6JyBs&lSK)|O4)1IfZPU3$dauRHa&RspZWXe=i(3i zl3uWPmD88L?F_ScWZYdsmMiyrfBv`an6R`&fVaJ-R8c+b;5r$aM1tzQBQPzkP0KVn za*U6A0_PekFpEb!2RWWHo`IYrS5SvPX110~8g`s$8?TMJRGN@YJ3c2H`RN}JfT0AV z25l1-cL9ZXo!#cV@oVu#6P88O`JuS$b90fp#(~quCweo2TSf35+FR(q&HX=*U)6y< z1mSMR^3tml?y$sRgpWb({9fp2bOa96YnUqNwgxv0T^>;5M%3FAn9HZDy#xXj}W}&<|RCog(mrR zJkB47FAWTz+F4x;hyUC4rV$WE_F6&*6f3$E_9Hew&>sfc@tz0#Bip>j039ri%czEA zy1=Y^Z>saYTian~FbzrH)Ktw_hye(Jdoq%K$yR>7av|1GTs`%m$2#C*fF-i+H9(EreLLE0b95j(Hv%Z3sE|UKCk>2F(ruYgsp9)*-{VO#62gBZ z<#)?he2hr^bTdOm=$*;Ptgalqc&O1H25fep!YZajPv_gedTkX~E0Vzaj1HjPI>Sxp zc8(RjS$F>2y(cWcy7&F~NPe@Vr24dY+zgsT%=gUmQ2Vvu!?JO04ywh06t&v#aih%q zf0U&XK$d!r`j(%S0IY5@Y2N3Dp~G^aqWWhAfBwzJ&vWJiXX?sOFaqGsdbBm< zwfO~~{4rf2*N1MWZ#&J#Wq!#c`G19!2lniLt%hSm!FRqz=ARqht?EY(fjLBx4>Wvk~ zNZ+PWIYA!TU0)tM3e32N>r`5lo%=RlR|xIQh}9(}DJmV5_g-(z`kcdeF$E?~j0LPz zk0W>432saNG@)J*p@Ex)>KGA&n7`{lPfW{ueRz+fCW4B?TG7pkjzpC>?3nc zyNfSQW}LWcX;b%k+)g#{UA6mb4O^eP?KGV<2<_Ah+#lTZLtJ0Zfd%ax4$=Q)SqX*$ z1_p~CmuXGX(NVD2MVI$M@|+v(OB5?fGvoWtfVdZFzE|n+42fGyQh!;g8Hp{@TX?~@ zfA3^LTnmusRd77<3Gb1Ui0&th>X%G(ui4WF8{ELJMg`~JdlSRmCVX_<7H+SVb#TYv zN&jIb3Sz`-zDShbI{0K3w%FCx6;bJRqz#aqky{@)pRs~rgPD>H*Zs0y{HAsjh>v3+ zH|dkqsr+M9lCVb@a8QI678W|S2jRMSpC50Gat}QNI_US*uX5-9T-s;;WZLc#IgiOj zE^qrzTkDAXgtTlPzxP2E_a^jobG%rmOW{1Q+cpj;*NRV^q823#z0ow0vc zZr%5pn!=%e>+5x``8v5AxDOIT_KS*`BDaco@`N$Ezy47TF$fL68Dse6QT^WFzk7gR z6ceCXWsMuSf3yHI0suqIg}|cT?_YuYUGUd6e{PMv$+LfI`3AaR2(Y~Wk2L+6!w;~? z>rdu<=QsdTJ)7?DEmP??U=rs%c^JQDZv82m3pMPXh{RtCUw_B1t4rFg^)?wJjZHL- zh;Fmb01GBl(UA8 z&elt@%frW<$8;AbtwzDrt3i0wNp32!%u4UB&jx(ku9^>0cb4_=yA{!*h4({10H4TA zl6A=jp73rVII?Y<%@APHNblh0RphD`O+p(i;P|nzc{V zMfHB{&a|$Otc_she{u(n(fHj2%ZCJy@KaBgJQq*P?h@TYSG2g6?o0D-w2qar zaz$_uWO%l}^3pN0O+#ms^qto!;J!#iXKv)~^xm<@Z^%`MGba%i*;C1fiP`!7NcCJ+ zM|QvkzO(vXW@{lQvZU>C0vdMo(Xef}NO zAi$>UV4*!eqHnbPJ!ZiMyNZijLhOZkUjku&$gAc7;e&|t0Uq;KqZ=`2Cab)N}jXyH;4Qpo2 zzcr7Eq4Wy1)qtp(gQ$Vhy^j;mVa-=7agVeXnU&MFkd<6fFa z#Y3ipf6BK*t{UcNs+*V1bLn>OoRs5b=aEhZb54>188Eg?+aKlkkjLht&dm*rKOXyD zUe7v<;dogkT`xT@kVQi?eFkX*Q!iJ%2nTCb-U>1}WX<{6uZS|y? z-4``=N8lhyJKt@`Z^!LbgKh8i5X~!L+T5)zKpPO?j&S7 zQfHej{E$&(Qg_`+^R5e;XgnI%F){$EF1k|LH#%;fS&V%%cVCJWXydQUaR|VD68|wF zx$F3NS_u?nVem9n1zT_c+;pt{H1?H(Fkf6G|J3&lR9G-TXIdYx@A+^>-sL}*8@D(9xuENk(QtIW&r=+B8bIPU2 zTI2^joDOrfz{Q6Ys2y4XB%khT57StAqYkm=K%Uo~0p6rJdKmk#Bd#cbEgWBr3aBfw zwoh%Kvx!Ql{oqBq=_Hw+xv7(W&okPh&&B29eptHizFu(4niE;wvkJ0c3}K61E2KGw zZJgt-NIk3`?CnT>(vhq(6QJz9PTFTcxqXv=#8#B~OsIkT)$nYhFihm2f9++~q-Xni z9YTP7vmRFE1u=CdFkys{ zyBp3lwOu*#lA;8a>9vs?Uk?yVm`!}4;IsNHwER&rg)8odGF1^f&i$u@aHj#01^ki0 z!3&v+-pZk>Ks+(Ugvaz3ufHmN2jWHKp&a`|_%EhY8V9+xXyuM7_p1H*&&5#d**dnz zT$#(q)Asnc#jIuaDh5QUP*$Pq^Q}q(HB)5r=Xqxfz|{?}g6Q628w~C?M}5~6(mX84 z1rVPpvl@2ve!gB4yWU{De=5r;{)ZfA+NaTRbY+HT=BEfC$k{?T9yp6Ak>4z zsElQff4BguV%UB5f3WB@i8(`D;-*LU+T*b0|A=j;-}c9~tLGQem+r z#g2w(OJzgPWU!RV`p&o%20wHH{rT&*o}X6exXGTy}b(NM$4A}%}VKKFvW z7|msd7WegOx7dRr1s?B1hB346vR-w-!E?z*u>l;ud?Zbb%d(Z5lNB3gYGUO(wM%$E zodRa3I?`gReC@J|jPGS%l+|}e$sKv!RIVuHWnL=LlJULVGO)!oD#EP>Hz4cRF{+~^ zyX`A1dD*+x7&tvWQ_cJ|Ke94UPg#R)^B%`xKFEA6Vf-;&U*1}M1UTXCSu$B^I+SKO z)-haw8)UmDQ5i3L@yAkNk%Ev!&luf10=8+r2A#7@5}&=zpljW2#h{IN!A0FLkwEt# zW*+c(L+ea#mj-GBW4ZGY+uA<(=Z zx#gWNSb3BI{cgIS6(k7nzVSq5gzDFuIYTY0Q=gX@4@W45>J!l(%?^Y5W-&aKFyzVXmtocf7DVPn!)=JlNK(6J@<2X+-YcEt^D0UhDc=ag9 zu~Ro{+?UfLF5X?xPXF4teS(HZKp6T07Er zD!v}dCpR6z#XpCI>lbjo-U7AKyzSL65j8O$_s>PHSJ23kG^yIZDRsOWS`%Jh<*<7- zeuQI|ZY$zrk}%GEVed7}3u5O*_hoM6pZsK$oRn)yf9n2D`L?odekMy2$4i&qtIf=*+vAG@6E*eb3m5vpujFrAGu-Yl%q^T)N7t122Q^DC|P7y+ZOg>*z5&Y473oX_rtCLdLg!Ka+bdr4pD9c9Hft3pA- zvMocOT8yKfF{5IH01MXb2UC2Y*dede#;A%iR8!F0)*^HSizS8NGC=@TX8goYzB^0H zsyh0!52H161b=osc}A|UI`hqz{=J(R_djyq>NNw)UJMWOaVU=t7cWJNiPkQp-oJVZ zOQ;G|^tv?UNjDjZHiWuz6N>u}na1mp_V?AT#Ocgf=BJW|DkZrXC$B0f3T=MP&Z1Bw zl3)z75dIJd9N%0vl1Gdko#qRn!I!KjQct*5KDzHm+Iuk&IWB_`Rj{D9i6Zm^9Z!69 z3-YbyyJOhDLaUGS4_L>$X+Snl&5pjU_|=oT;OaU-0X`B7{})pUVIpox@NBxF!+fzz zOJIANx2P+dWly@9KPgt{KWl%iKc*-PC9T+!*#U=h*0b6?hlS7j%1QXwn8|DiYYv*f zpN)OXgg1BA@788o&jvf9mnI5vd6d+|y2mz`!Ov7`8I*&C@NBkRjT1GlM0W*>2n|fm zKn3Qud8d^mDpZc14881TC*3?Y*swevy09?od`*Q+%b#^SN0dfQn=UQb^}49z-_G~al%0R0)4ly<9I8{< z2ftW8NA9muXtFh2=PJWzb$GqkERM$=*F?y%ObsS%a?Y=Omf*E~7`5@tP=~e4bJLT> z!dN&9EgW=Mtjh_T19wdnC}+xlzQQj#Hj<))*$^Bywk;Jg~< z0jzjSwrmJvmMM049H(1zbDm{a?Zrz$hejdKwWAcXh9(+JySe7&vRAsXgC}04ica~F zpK8*+kIOv!ym2rs+OEKz_NH1bZ12=o6%heO9x0JINlNCGZ_^4}{@NbD3Zt!D3T-MlNVf*u5cIFdN3HO`w5OuB~V3%s9WjGBvnZB$E`ojCu8mq_Tsbc1XcRVJN-7g3I%pp593{ z@5{yb0qqsJ%n7Ib%N7#egMk-pD|`ovu*5F2HN9gSqi$+994RBcUQc@zVncW0{F$wE zF6uMgR=EDA@`H%k*!q$xMYW2rk49mz;ODua39fpY9-?KU*oudrny0RZ;~^yKUmlzJ zt5;W79%@Q}4iJq8{X?Kv{0Qo=O~~QrWiH{DGke{c2PI827a0Q*LT>z^0WKC?zmn(k zLI$%F-iTffkf7Bn?C1o7gwXVLOWQz8OF?u5#qT~yp3Jmd8qPVETwB56+5wn!QJ}y4}JHcai$v$d;^r>=jUf?XdDX#={`TN zeB($^u{kP{I%)JA?VDDBKOax7|OnZdYV#eb7r4^&=Gb77M}8Dn1T!PwN!lu ztLC|=c=tw6UJKeXT-K(qOi!hE(UN)E+~@CQ-_WkS;;24&ydRVzH5-M@Lk7)?`-+-nf?=1Ar^j!*O^$e{8RabY&qMNE zY^Dt4&kZ)ZA#r(fhOGSS!3_xI!wKW)X9%({B-R_`rNm4AcS4GdZxHO6=thgYjS+V5#T5`ue zeQE=NklJ(ve*RO0h&{|S^R4|wMJwvK`_tAGx6wFaEg0C;owE3Uf72D42K&oA8XLe)6AJuPt!_Qg?0Ymu4Qt$U& z)nAQ@=z6!QP+}Y*hBixKG~*8ztv#|ogwj?%Qn)Yll{ZRC}A$K`+(_I64-X*C9%lP3FAexgQTG?HL}N~Lg!|%%Gm8U!9KQP_KQkB|ZORYp*Lz1W$ ziz#MKwmXrC&o>Or(l^~>E{&v_F{T1IkM1?X-k04yF?&Ex zVeB2p8BbaNvF$Lq*!9JMI;doF0$W77ux3nM=R;fkev(uUwAWTE9eRVu0BSMRb$z!! zh9thsxI3uAp|Z8P&EEHX`Y~r``kTXer4XK*Kg?5jk#7f5t8$%+2rxW;MfodtR{Iu<_roEoe-*ST-&`u zkWQI@a1S*ae#8b%0d50U5RRY6>W_m`hXC<-=2G{`u=#I+O8CGrW_8k$s5|xXD)k3d zL-XA6o9~-vhQw!#3xxn7&+UrdG<%<(#cn@Le!+w<_wZ?NE8OL^OImR|uWgm=qYyjcytJ^wl{94%MiG!3r1@7%KqkZE#_SCuE5fsGzr!l(>5*Z1!^{Cg)m&emG zw%X6dUMi6soSc^JSJqB&Z&-dNLSxL3B`^1Ox$sfV(|VM|oh{PEyY<7Y#e(6y>3FKw z4S%FFL~+rg_wHDJN0^s?33cHK;7M)Z@>;+yF0j1ls!}t;3ozTVJWGhHv#iVZg)kMJ zE&yKl(DJEL-K=oh{mGn}lYuXsQoi-1RS&&{*PE;>HWtsnbb5!2xb3+zPOK*lE5KTA zlj#z0`i@;0oT2M@=AY!IHIm~qhjk+A_NR1Sw#Ap6dx3j(ZR5&ku8Rgm-^l@GcGk#t zYJM$aXtbX&03%FldDc2Y3~pMhDxE(xu5s+%7Ot*RG2GZuqQipVP2carI5*dFU$~?Q zRhIHvxQtu*n0&`S_BK>@nqG7GbY6wFUXB*}u10jvN0YHk0Tl3Xn-WvavuA09Sj=U_ zj0?{;DVOQz;>|R0c<)pmX(K<`BR;=|lC3`ykbEkjS;9algl+-TAWiz`QXbf!=5y zz!WIlUsMu+lk|Ifu4e*lsYeW>FK*8*^b74o9RDbXmS65yIs<#7rmF`hLN!M{a;Eemj~y-DcH+ zH0_B!&n-Piq~^Z92p*o!XwH>Xpf#h4TW8v%%wjqsvfNxh_3b`WyZV9fKE4G zeYfKQ_BAGY>9^Lv8268ea!_&y_m`E5XI_nw?z3c&E5~z%mpnxmyC{|lxJzp+6Gwzj zp%iPZZM2dO&30D4DBJcnbal#dX(3a|SJYyb+~95EiwdtcVSA4pGmkR~&=>f{ixFZw zZ%|ck?}C2aXT?JOQ=%f#))DGEhxq(uTcmw=F*#MCH8eKD)RqcnO2y>Gr!P8->#KYB=SG@K)61l+0a>p6DbTfm{?_;B!{DAjf9q1DM|AsDYa zb59m?C}=}js z@nKpYf63b|((^i%o~1kvOjUM2L_p*M1}>ft3x)($&CY(d&P>^w*fJ+*Ude6G8{E)7 z;7M0GnTPbWT9me~ekAlLu6Nh)8WG^n!mRxiHE`0^s z;@fxA4!=?uwx00b{3S<%e($+hI~q=UiAJCbR2ubpOi{mZtJnt+k;lA~{(@%F-TM@Y zbI165^!cky+8bX;D}AA5-859toZU+ZY788dY+*e<)DcSD9v?_}9b9!xA_!c)FRWf` zpNq(m4B-&MP1fBxWO1HuO7LRl0oxBzZFVYez{22jHy={vHdkW>aOqCEsTa^qKf3l z=|iV`OFU`%`IFKZ_Z{Ak?5jfk>j7tw4s}4si%{ZDbn58iP0yc!oK4CDAW%9{@Gw<_ zIw0-c!-ZYoy4!0Q+m)*(=|L$!lpd+^R_>zFn*U>oRHTpw>*|c9??dRfuWi0M0%v|N)>8BAoQ{=cC9LN0k9(=m z;S1=UBsZ#{u%w4PJ#>tD_}th=0sdCZ?cz!fz-AU>9p3d}xL(3&sB6ET6B#l?4BSzc z9c8CEe9^(~Q(IVwg=CQ@^f{?2o;jSaISUk5O(K3I(+GHZij&~h!By}L4Ex!h5&#MS zZm0)s!7YsviGbU&1UvblTGxLS;C@0uYX{%^j zqay@0vNxibJP+b_VtAhXbinAviko}Km^XL~Mr%S12lv^q^ii4CR9{(AbAS5wNF*7u4Y%vEISwaB0%4k=Ct_GH_8ev$yG{G(hy;9h*rA-m(U!o)FEqnSMr?M>ZCHAZ zs9n{}n-W;y4qn^kxwule>$KDu-_k%o2yo7>*J}Z15JN@T(3QoajYONp!{^+WXU5k+ zv}2opu5qI~oO`5}aWTcA4Qt5byLnM(N<((EsNl8%EXrT(PUx4pOMv7+Y2{$U7w{&! zPU%2j&|FVV+EhSO^zUBnt&Nm;Ig5l^BIu({}N!;-qm_9NWrKX_u+HGCHc_}1H zm2PI~b3Q!>xZ@S5l!55=d0i|&DS)#TC-ys%IdsBL@%VT?6`yQo)?FvNeAZ)U45x0M zm@d9&vyBW!87kXiyR_OPp8I!vYff#HoB#*1i?RC7`22tDy=gqu?H50OCzT~Fge0ku z#F%6&+1dyp`)=&AXAfggO4+l_*a<`S-B?H28C$loPhsr)G7N^_C*60;{k?yW{ty07 zeh+xPygp_==Q`(HXS?3#ToWT&O*YI>_znkXR_rA*fT1{0X_08iXm>n{aB$)4qRP}R z2ePjpl)j928Fq1!DrnrqgL4;oiAx>ttI@gmOR1&AyHcWF=Zib;XbbW}&*^PbpzDI8 zp^)f;nP%RVxZTq@#^mDJY02~+zF=`D!mMBbc6a}Uz+G7nCa#OX{YxL7r7fGc!@7Am z+M~Xl?p5}MHl16RE4nA3BU6+kz>ld{Kq~wq+v_8V&nJjt%HY|1@rWLk6 zRs;*I%Pk+}v%=6+o2kxKb9`3fCzIn4m>cT4(^panToBJ$ppDfsr_x`YS6eo2|Jpn4 zJ=XM^V)ihuA+Ot|7hT=A$eb{&`+}c~t2QKF6nrPoCzGZ#=Rrs$9Dxbr7Jj znT@vVOlN`qjs<#3L%twBFmBxxF%@ps*{?1Q2O3k@EP5CZaU&7Fk{=+sYn>>CEBCc! zPNlJ2D6p7Dj@W%d&~vThdwoM4)Y%f31B3w_qbM81)PfIbKdjWofas;yy* zeYra(jT%C05kE>mkIpm`9!2LoIbn|X-|SYI{-b%|qZHX|3Wx}-QbWP$eGX4cO_4PP zB>qtX>=4JEWI$Gq)}6qwRykjn+fRxuTcOHRQ~6SO zyT;``Cl2p>+nK&a_2+-kK!0Snwxlfbm3gQluf+P7OAvyDp*|h^??8J1LBGKYoCS#t ziP`6Rm>)HSF~nBK8TsZph%g8K=-HvAoLql2x4S)CfBa|)bPusC?6tq=GWY_w{7T?Z zcz;94MEt!k-9UQiRw|a>);X|el~_*i8ES6g%(3NKq<|n(WpsY13N6_nn!PBJ!#QCx z(WI#2Mo%_)^!%BB>57wGmWS@KN=EgCny}y=Xn1Rw_23@xp%S$l%(WUBkGoI((3@jN z4ygMSt)920500Ax-IVs!jJ!Mn#ztp;Q<^i^;h$a}qaGP=2yj$K?S4z$HO;yR=J&YM zLttU*vWN1C+(ayL#J*&mi ziB!Ky(#PRik_Q&JV`vikEPN?okzOJp)x(S!C3dN49wC>&1i?F8JAdME!6s+A7%iZ0 z;EA2jgH87;R*Q8>E(_FmTg=ezJ3;%XY&2RN>byKQ+SlLz2AsvpOs4hYy!Z!b&xzv5 zXww|b89+;Yvpn`rU%Mn}x3^fM`%~DGRk*c`^F-;3>KEcmxCA%fJVL76@F5dJBfZgo zd4A@~D$(*FLeVY9~o}2LmTd+43?Xb`*t= zRUb9FU$D8hNd1i)qWpI`wh~ylUbnm~I$n$PVgZY~RYY_{((m!r+rWh-Fj)%9BG#c2 zzX_d1>QfE4NTEHA!;32N8Z3Iov?*m*p>ivteGO)fhAkj=H`|+9L3smPaxuit0oYX%9`# z58f7^d0gRT$z@aCxn?6X`{FL}QerYAeDI(%wL#^B# zQrnpk{0+Ac_v7VP-e{=vgK+Ms#D+B#*fwi<8U&I!c*7N&N7Tx$fL-rh!ph4IpSO7; zBMw|I{kIj9;49_!YS@{UU;Ws3ikZ&BD!)@2E_rD+PF9I2)pxS}Rw9#HVTWpU;u z;D)S>B2VFfdllN@4Aa0ZT`4Z-pzkq?ey{c2l%t8o($*7JA*B{{KP~a3+@~4{2Bi>_ zINF4aMAYU+tIe5z@;-ummfLRgc%_hUkQC3#9S_%-R<|vnX&ZmKi-U;wu{k-%MMO^*FtE=Q)>tv>iMO$n<1+ru~a=>T}krm*Qj?pj$*BC3EW z)cupcNwkzR=*E{zA!{@iTMsM<&>{H>i5?#A*B!KgRJ+eRSvO)rCT5GPaBqm)LP&a5 z2C&@?83y3uZKL&)(mm*%w6o~e)$E(s4j0laD+#HP5mvAuyfB_(48czi@43qe%fj3; zdW3?NQF0!?-djX5voCdJRy?<;F%`xk&gE3RZJLa*U!8#V*h}EUtwqFZcoI+|W?RNc z(%0Q7<1J4SwxE?hrry~f5~I4B1jbqmKbFCpakKX)J^AY2wC_D(3g7ZvlONs=`|cu$ z#)lZhLmo#;N^aezs_<)N7+TlA4m-GZR+hB6jFT7+l~qsMpK6N^zaquNDA55AMri53 zQWXIus+u`3{FP%XTDk@`%8CuK^Vk6(pqzR`huwufRg^OUeAxC7oMwzy*r+wJey+~e zx7*0;B5Y{SQ%9#qXsWdIf#*Ncz5LXc+`qb$t5knzbyzH}eb3TvRqiF?o&JUJU``ML zQ;3`}EnkCq!G`8uAC=vnK%^t-GD~g|optEp1KulYMo9uc!j;7bl91pEE*WZAITA#e zj{@l|?CK2JzU2|}kggZ%51t7S|78-SX`7uB!?Sda+nYFX#$lI{UHao1;>n0$>Cb>j zUySXUD&8n^XrMwXh9gVbq1huLMUgFFjdGm2NBY^{0;$TgU5u7Od{0ZCL%)a=T2b61{#tWzt*ay-TD81}SuS4O^2$1cB1yYO22r(Nd zM-TGA^N>Xi{b@#YMLYzIh)52f&f6lsyL`W#AUT-}7~ZK_MBFijX)p5XR$M(-eIW%T zP%Ln>T$)lXQ^NZBuuxUvEl-#0?inCz_^Bqw^UUfqCEXw$bnT`tvVT+ik%OkivHbkc zQ$jh5fNmnr4ci0Bl~7GpN)G_Nmyg^3Vbk>}eT7ed33?)@fgbHkS3pGoP z?rVF_qoRazf`=~uZGHfxZpXDQ_(7mtC8uoBaKY3KY<6yWfX3~10@c!EgJ{UXInM$cr}AvuR=xZh`~iG+n?3Loyc4){>Xaz&i$@1ugK?Ud0>M#n z3_LzOPZnrZHjO=r1&Km6skagGc&dJVmE2jou$)i_97@%~H&6L}D$5T6-Xz<(AB20@ zU;`7^(8g0;GtgQX-gD9}hwgpdCA$1(DYMS3EZ41xO&QIZSXCOj8Ru8=J$=tkQGTr< zDi@cN8~pBXj<*+Jl^T6h<(Y*7=*uB+%F;u30U9Vun_vFYph@5k&{J} z$%Ru+&xkJrX}DE+O5_zEvuawCu@6%W17+As4+A}JF}F!Sat~nlA#&=kQNz%o#hw0y>krT{cphGkYPDjr&I$SHOj>1_PA8~a;E<8+K{zgnZ9NynNw$Y%vv zvHEph=h{m_Vm(x>sqqy&Y4?ApN^+MjoxwF;8t3yFBy=?S_#6@SrM4X*3m#}9W^M4UJJe8Ab7@y$Z~ zA_mKt3S0soT0Ym@WcPpwz0c>oX2Hn%(++p_QD#%9lMjgLUZ7SSref!oNWTzG2KNQM zF59Mmr~m#VeMQNhMJdK$qTqH*S|R=CF%$oGFqc9hvSfOgR9y0HcziY$8YAx_vAwuU z2!5*YA{bAdI($3be&}W(b0Ha zk!;5(GSuWlXw|Y6qzirBbn#kbh6E0~6N2TdY>;uz4*Zp4s2=s{sKQaErbymyO*F#j znW)6~-;Z+aF%xRvyKpN>ab|;P=9^q8h$8 z8Qr*|TCyT&I;Xnz^}%WLI6tivO7gJL@wpMC(tK$v!!fS`#xvK++1#Od+ zv^6Zm?JGx5YI?{Jtxe}7V0CcrL{(1M#Z{qk{BNp4#F)}uib$c@1_WN4b>6NfoiYJqZ+Ht z6~53WzQ}ihdW5xu!3Zb$@{_44jrJrO;qp6#Y&C2?dw%!MWy5%wXC z$N|uU_tZfXQ$;LFi-p<~>_!SUe(RVr;S60CdxD0|fm(#y6DD=CLG$lr>u+q(Tjh7z z5ArcGXVs)upg{>8oQaccI?DNc?2*n9Ihgis+JRxa1JvF9ZDU#{$aW6q`XJyGrS3-#nTi^V`NA518=f}Jbj$$ia?Szf$DNrOA z2*vQ@`S~723F>*Ec3Sx%V9vUr!OV6$-_Szs{`=*Y^p~riLW36NQ~946yGe4z2m5^>Dm_c#WsaT_*q{^7>C@5 zZ&TuPR?m$Bt7n?u+Z5=8Vd3^b>!OAYKH&Rdf}}WLJE9l8=yHMtYS=x*g^1TRad2=w zgHUMqn@D6se;#Bh^Kkf20{U7JSFy+UZuno53YjDx&axHS+Tt#9%l5d3@kq6u>aCx@!XTfHiP z*4~Of%D9VfHMYiFy~fx?=NBADiC*QGzZ`U$?|)`-tN)hMI_9}0wv}V&g{vm^>*Tbu z$C0M6HP5A?i&;#jF&_v;z=&hlF=xd$qql(8ohO=|jKl1?Bx1JX=xatjBU`3b!m!Q! z{tm+N0Urk36-yMZA$ctoV)8cH4R;FVz%CjP#~csq+6lw`ySRNbpbZ)rK{*dwNl_WC zeNC;?hqWiIo<2aRowHV>{N8%)Lk4UCq1ncI^b1m<@_>M-RnO46UmQ?p>O^srOe6ka z+NY&Rr;8R_;%n~^9D{+cAyer&q8b;Mxe;F4?Gi1vrC+@t2;97-+qQK!ZCZpM|fuQr_qdPe0USOlIY#hw)%{ZiZ7*fuS)xA-zZ z`4PB}*rMpQ#4gU_(WcbzyL(&ne))Q?hZOO}{fWDaoaui2p9sUQ%lVpi-6VoUvqhW5 zx7Ms&m!P1arPDPBdXo-w77~s_sS+U3vu(@>%MQ=NXW!&ClGq`t({l6Te&2)9Mby~LQrY4) z*MScwVh2fkfsk@(*7!5{Wv{PZO4{#=_V=W>wtf9Z1T`Sv8f~hSR9%+Jr=@4H`W|8r zHbjW2bR$Q%a1;CM%Q~YPr$!{!cFpsz07$`}y1jaYe#e<=mtki{S3Laz#lzg#Q9fx_ zroJ!PMVyK5*IE@R%p5G>05<1!&3+%#Qdj`^IppqMe%C^P&s4f5|KKUHg}2ScM6=im zFFj9W<<^o{WsjGOwn>ecfh*w-sELt2^n0l>^n}lSv}6%6Ewbejm|q9&!tw^v)V*5A z@1ZmCqVhMVaEGpWdaS82aWd-`E`dAo8@Eyh5kCxKIhVN^NKj)~uG@2LPzUQI_P-%& zvJ`O(>4+-a2pHZI0ZIts?eO;8TM`53p*rSim2H9Gd*W9$-9HX0QE3Zp$3na=fi^w9 zdYzWyuVUX`AmOV=qP@g9H-Zkz2^I5fJqW_cXuKmKS_uX;l{s;%3smUXS!vg0iPgCb z%?20}jY9-MXH}(YLqQf7#VjM31ua3YUmkg#CVahcVW|$uq$?;W_^qw@ z24Ut;x*hd8EeV+>A&x%*4D1old_eZydZK|^M&>~zhSOxd1U#8q+TGfgizd0FM;q}7 z41CfU&CoO-C8iUnSUEVS@@slf>uc{(YK~DHdt}ShZW$}%!sbd5IdYT`X4`z&_1o3ASCOI9^1?U(-dVluJDZhj8NG;%HXN;VRW!-P z2ki4?JUD%40Hm3Jxy}`UvcK~ML!NX`tuP3@f$YzrmO@|Ej0>F@dEPv)?P2=VRbAkBrFk4 zzaH6rtr!+~Ly+^5`}WzbmAVj-cgkS5Wc3k6H%epiS3t*PM&$6dSnKXqGv~xpg z=OKC4hC6{w6p+EclmL~QWKRP3uSjvEn}zj6M0C=J?X;@- zIs6Q>pDvL*jM5ReB<&&G>x;nr&;Y;4CQp7XnU7TQ57tM$#2*TKy~m82ms;DLV_JW# ziIWdP1vX?#;eD=f#ha*%-d|i5Y31Ht6wr#OSe)XF zO^h65bSM=mhi2>Aq?JtUYT_8u`wAQ+I{Jf`r7r;5_oG&~C(O2YSgvwi>+8<7+%+cjz_gyS%{6bea4w>ZgSIyd2gBxKyf_;56t?G> z7QB$vCDwVFa=hie)r1}9FXeaVn&C7a{*s2X=>BH*wSVvAzZp zyb#gwDAZuuL_DFVVt&a3=%QGhFTuff%YY`=t{l`-#gazNzTQmAKF>2qZb`pxoo6r4 zw%BTYIMeB)p^0M~tmJa%CO&u_>@yX0!=SCFN7X+ow^(KYw{+U>VYFEC{}C;Cwc9yaw?m<*f8_-^&UNX&eFm6gz2G zTI~PEU5>MlD>eLaM~N_4^mp#!P{g8i!Mnd!Iva5%-9=2^ehS zS4=WG#TN$nnl46P*(i3PI4M#vY_t3N6_?XsdCXY47TA#gyaT#8oOnVX^qonYC z_M)T?<+8>GEkMX~K`{`A`TS4?Vz zPVnjY*n^K9V-n?pP%TomVr9c@iJ}1tdqkpUbd>bW>I=8E6v-oxuEX<_=Peu9+>5<} z`lcm>;AuYVCPf_<;9x0WLL4oY(+1ruI|HfFH&3mc3M8 zECJ&zsNwfv=PQ=W)|Zx|QbL^E!|?U*z|u`gweLT-l^8zoo9f{a|87F5t{Ol-5o(?v zQ#%z$onyBTlV~8=*szya{C0wrq)jpD*mBGK^jZ~UUvAJr%Dtep-tG9US^uz1N7E7GJG>?)b5?g|qs{llTMwrMbzK5BzqPP%L-Dd#G~-Y(M0m{ggH6MH_^(lYefN{1 zJg(IQdqE&az%#E&Y>F#dV$r0H8-a*ymlE=bmpaUifr=LD%o%Zoyu!ylD zVA->sWKgqe7`{zwMI*Da^npn7_t~ zWlxM&rr~X}{c~~$vxD*NoJ;0-nSFKmImWrfUMcgzies|o#iSSjX?1B)&-9)JaSkb# ze$I-`X)qFN^^8%wHO(_W?YM`!%*7tMQzuSc{Ay`gXyrfj*TH&c`s<>G;)^#EkzV5> z#*J-BW7D}B%leSnA>kbyA~4YRk@o^~88JdOfl z9vz}}d7sOdK;CyYF&6N1KyU?EyXZOi@uorHfl06C&?^eNVTS|1oEVOHmPU#cOUEKF zu`M3fufNARJ`S8uXT;QEykb1(MvysY*ojNTiP-x!2aRU#Ei=0ZYY@fqY5o0TwOF&V zE zO&66!Dn=8xSBoVzx|ZE$Pj`z%sMpAHmlpT6r1_-=VdqMv18J4WGD)vE>F3lQccFE( zwWF8UWJ_`ateZpo>NLbXXN|&>;2|H=2$wva?&qgtb8KsUIY+#KF>s5h(YPycv5gfr zpndb;OG>|T#Cc#?jT(wD6wN%(DPr^;pVM`Zd_k_i#=^o1+izACuS_@#36Y z=_q7_Ix`S&Ep8F+?$ZT7*@fGXUbBoOcq}m3N*y$_g-FGYVeZM@1V##BZ;4SI4hp^; z@9pWfD|~BqtRYg9blGd-wv~ zTP2MpO{aJPe8%eY2vrWd8AJ6RJl6;)Ej-gufTWXbBb1zuh;#A0=_J{qpWE4*X=qv2Ba( zdt#LoN4X7|+7D8c4jNppN!dyLtxT^)KIC45$KB;9iB!Kd#PN*lMY4K~SJI6V3OV(> z43{}bO%8kd#}TVeYX(&2G*MeC#x6#0voKPtaPqXBL|?8UouU1#*O%-n4#mh-Z)|-ve)@GG_=0DwzJvWB&4~B9DSD z*Ex(s$Lt$aFN$>tAd#jJov*VF0>C-Xq+td=iT{NNE48el2z$r#~LbSp~L;`OHb`6iI zqnWd^eA&J+5QnDgq|gJ3Tr4u54r=Y3i+bN5{Q&++RaXPW_LiZ@26U+*TOlkjlFL?Y zDd0&01w*q$4tdgxQD>=*_A}_h#I(ukPyKo+UUGt7JfME{i4}i;-u2;?p2k*`nw-dP z%84xe^*h1I6f-h3Lr^bZHjn80g6l6;U;agYBi`Hgukk!tiEE9gf2P1UUVD47!mZlM zp*f-jER>2|Kp6&mC%HtGD;)#rla`=Px|?J0${6B-H>U##XCy=v3^axv9U( zD@sF#ri;?27VxJ=9qXb54;IcsE=|8VJrk*P1zVOani7U+QCljPYRNu%HLq1Mzqeqm z^NREjWQ?g~QYf?EwiEHzdGX)gK>y%V&2p;?#1!TqzL%|gVhORlpTay`Z?vUiE1M(X z^o-SYgV(suq2{2NYOQ4ok;9`CYhl{=OnN&LxDAx=$H|-)-Sw&C$n4W^KmW3Hq7>WX z%9obh$_G5va!VbF86(hM`&`1>1>H7a2K)6e9E?|5zKW3m@@7o!qyHhLbo>i4KHuivwFM&vznVjLx9R<6HC zy(GjCzWQS|5_gvi(YyU)%#{^9$q0$k7tHX^rY0z)WIv=Wj$g;RI*vvo^k}Wdv3Q<0R6lj-A8^7975S6= z77l$=V?!I0^@74OyAPyXItm?kQ|eXmLZWu`+&LA+&43(pPYpQ$)&nvRoA{+3|F~LJ zrX!FlF4e$W=NgK?VGCULZrkvb&k=1*?8^T@0)GYG}bHe2r!|ONa(nzgWMdwU(hYp@#25=4@jFB$j}A?`5K-# z*Rwhzi}sKOIsMiB7THw$+YgR#HZEQ+ztjB!9T(P}$48x1A%-Ff0o@d!H+g1_m3(p6&iaIDgaG zCwUohIWq4iJS*PPK#i*jI9BH}_qYFvb@;VCx-!Vm6pPS%Da98VYbytWl z*3fC#Tq^O;%D9))d_VVNGA|nxq*YPd9#?%cZV-w2=&_}ZtwwkBVF8#=#RcBnWM0-A zKaKcL(k?%xI3nMwb?L1>4Gnj#+A|D1=Q;IDN)c)XkmJ#voC_U)ai@wwi|A%(Yq3>C zxl!PD+FC7r!4kn-NaCdrUn5y)kiT>G9}y%KlM7^V#|6?oY1V3YK24r4C2T%sge^9;;oIv5i&31o}Xn62_WF2XD(;g^P)zMtYw@lLGaRkblj_XrkO2S`R_F07Nw9z>Hy-4RoCJ#LocOP1oYTIT9ShbR9*eFT@!_v2^{h}SSkuaw8wm*2eI!QFFh z)prNo^0*aYXEas#E(!Kt-Y5_Q?u;8nQF>&lCXERIR5)|{vjS)`xPhhV~hWn zdo@_T^cuXqme=G<}GMB+4gTu!TZ2a02s~Ya8{REz^qMC+t7^9HaSqN91AN9{9xJeG32h z?7wAl{g}_4&r*cf$Fq8>(H3?9p!hyzDIrh$o0Zne?WeDo+6b1O{RRK}(06jWa>=LK zK=-#TbB(p%TpE&o#_W?xk5Khwdim=%bWH^9*1`r70hd{f|%%EsE^5u3{Z+%8(=- zl&wNu$0^gBz8zAM2S4Bbi+fJAJ~1(+?SeNL^-^@55{wj$V|~E~UqLqZ1xIhFi3Pp- zM`M55qwilL@=8)qFP0rq7^TW?o3_s05dXQPOFTe?`r3`bYgR5R%KWB9Yxhs6$a?kH z!NacE{O4Zv*{K-1Kf(ryZ8WZsQtgtV?1AzS{N9_aV=!0ZQ7iZWt!po4|1#NX;KdJ% z#OIweNM0SHSMis!IQqXf1vv7!mo&o!X%~@7wVM2*;ZgB;;)~`uxTe#ADNEv-+%yQP zbu3^pDa@!S+RnUQ@Qbo^L|(^N#l-(%iBIm6B=ih2SYAXbAool8Ipk_Rj z53uZZ_h(`edcm81*r91e50xif{)EyRmy+uIA984YFRACL6ej0%9iMC!^$9s}Uwfz( z!)4o}mH!HRWbht0Zx%8b|1kDHb&7+cm%h9S@tH5aVJ)6MmN3zDL9RRy%!o*S4Q5Gd z-zj{BItuoCh2krod0+nYzpv+zeULPO3{EqS%2kT_atYeW^vz z$e&=sji4`{s?tp(zrbnqA8V*M<&!LovXfHft#~7lu$>jwVFcujZw*Y15Dn@Y_HMug zV{w{mk?P%AhS~qa%I~*R_!yZ(%gtEWLN!{iZYLvXnyp)REikc=JM~Pp8dr{X`$W$m zHLt+(Ul(gw;vAHu$V$C`+FWnHO(GcwE=sE<7rn<57v***Sikk+gEL0{|AemOg`$MxlxMW{| zqwjQNxiv?$PS7r+bR)iOmo@)HDgEYNNaX(X=)foCGZ(Ho5DS8o?8|5G?GHro%5Ez^ zVu@-e6lxM2C_!K4xj~5ce5d|{otg2HtF;<#@;BFxf#rtFt-k2DDZ}gmQJi{oB8uHT zvd#ya2DyoYsZZg(==; zfhj3G9XFTCPTmst|9?>* zNnc~BTFE~*0Qs5Q@;=GooF)%o!PeKC-?KFZAoDlMBH)qnX56aMUl-24{iv^(%w{QD z9aF*%DorR3$>4U*?4!`}wsbqj!C6OGD&zZ8&3*FEHA^Vn2q@aZ>{MBE-EENoa`G#V zDy0;vdFt{a4IHgs>)+7ouaU&=H*sN#JWbXRd+6PR9Yme2p$JkxpNQNHU$%ux$qPy_8d)PJ=BZDKr54-0Ja5{b+hm1LKv(vKmY)thKC?{~E8PV-sLM z@w0!JL2@X=ZB5-tN4*LcWP zH_n`@5hH`ZNa0EU(t;yi)DiKMp27P(C~D4FF6nB;w=0WOw-Ni~||Ko~8GE27j>UZxatp4uleuAF}+%GoMhp%PaH_ zA%REO{@3tRY+sf9iecoK`%?jaxCh_3zkKpSJxorX|5Mh7@BG9FSbp?%r4#@0?D8Z) z&g&62<4^zcGlzeo0Z+8{=$*)aJ-fmw`IUR}64Ozs-+kBLt6F*F{RshnepJm*!%XOFp$BhfVNPPg3qDmU7*i+Ur-- znEG@70tBUBdCnsNammpIX~m)*tM5p)XdZ;u3q6K6#^05Z^&@mojyQ5;mHIeRDlMr5ob0}fsqj11BYd)^KF)Vf zxB8cO1kn{J336+ErTF)f%`ZQHKI}qV$#az(Lnn^ridT82&vfpDOr3A?8*@cDRnEzx zL>4Zeq3{XSoO`jB+N@H9ZJn^o$v(HS_U4vi`?CrH@ljZ$NAIvj@Rffnly~AWA&@S+ z-M*nUG6{jQMMI7J+dMXw9g=%JjRH^=7ax2@QTWye=0(e)T2D|5myepGf`jm32^rm~ zu)uTYSP(S3r!bY7!3jhprk}-cGKPzKa>9=n{;@fmBB3Jdt6_)-*zp%BCd^i;3`JfBXo|pekzu-ZQ3cZx{N*fjBTq!(*iVs zY3B;V){L;DLKbG!5&yI0I*$9id@#7S*3ea9>+ElbbXosG4}3SHm6Gc3b&+ zk;Y;Ki8mO~#A3+DPvyXuEi-D#HHYJ{QtuT9d@nVs2EQt*3}!WAV*R|ozgle&>kc8_UW zt`2bMKm@zR2r@umZbWHL&_+2!{6rl0*O^Zmv+r(2*Eskgu{zZS_D{8qt#HFqpvW%R zLXwb__)SlJu;)ljG@H$ZMxDN`EQ20BJ04;91ML%fLgL!RT0;P zjUq$dkZEr&qn;e$;*HRZ8{g1YM8i6mvBlKav3C4HBLN;GSe33EOK3Rmwm8A63yuU0 zpVnSH{Z#G_D|1g)D>7y*7aQb>cB=ehmd7h^vKEz~3{ET6VO{u_o%Tg}pK`BWpJry4 zFs?bfQ86Xa!y)n^0VJFgK7hD*_uBiC>H{4hXUux!m3hPM#HnV^Ez!$N^CJh<*8K-d zIj*Viw6D4~tSicy#EUe3>kgwOE-glz(OdAp2o;ze8XOW9R>))@mrL*%)3zOy0yS*# zWR1D&pkah#opoz<*{6eybKaZ``*wO1tmHheo>_Nfn({?R^KFy3d6!ELa%Ns`b~t@k z5#3|W`UwKPI|7-EHls=kh6RD14zS33;G*#~n-i14vvQA4;4*In^jh8Ba${_M%U8K8D*Eg(5{DW=HQ;sG6@tZ>=zbj2Q4=u;E$CkTgb$0 z27qI@-yC4A+U4{2@=}FzMsL?B`>?U>iHs^oB!Ul^#L%rth`}+mT}}9)>!=k+lie2$ zc@d=|R9hh)^Jpq<*mX)Ol6ent(Rk;fpAutvykg>RIToM{X%hf%&eplsx;nmgaUyqm zz$L*u797Q<$!~jo5BWh&mZ`bl^3q<9!C*2~4t=K36Bn zG=E0R)J&^}PesUdGO98Xy|o}_bdj0=s=WR#jvBw|@dl$A0yWBmm8l22lP92+_-hlQ z3R*3}N+7$v-5`=_KN};44Z9hBDW2P`WLA6%rom^b3MsZ$NfS(wUv{+~mb_GxQ3Ktt z$KP8295AUYIML3tv%@$|GFe&+`ZMS_(@6H2!rh`n9k{pnSVDjGflt+kv6Zs5cBZh! z!jo&*83cMe^Wt5Hfz19{f6Sw4>ew>t0^Q9#MCxYOmHzTkj7IBc8+=y%RZA(^A&!)6 z$63c|IV~;xJ02YdPt(3HK7|aP9$LGV>R#N5Vk`M2i9_kAy`}PWd}+fvzGAQ_b-0YL zzx?P8?^5WHlT{+kG}Ud973Z6%=CA;DB5H5drO`}=uRFujZ3$$|H6AtQpdWj@F{}}N z!+T?;kG^HgX?oV<2JCsifv(WffeI^ez9AmzSoCoA8*NBsx zCQXI(4ppgh(aO$h``Nu{&p?-M-fB%L(dT*Z!P)An5viGz+l(UzHMX868Gl7+b@N`Z zwY~7UsBMF$A3fHExu_Su;#%btG2?RN}U}62;X|YHad(65UHg z#dG(XCfzMa10vm;9O=TCw8X6a#Gr%>XRXdw3|M;0%2eMLx_n2|VDTV-K6C~pA7eU> z)MMAjQ#pb}&K|r<@pyZxZB{YhjlJ^og)$|4lh~jwR^nDgdT*}Q9kpSax*@roA$(In zNMz>ZE}oL8w`=$>hw@2_jN|p|WOKUNJ>8w4Ks0>s=!qb!jbUczm1heZUNTQe4}Z>&zM`S6>Mc=|3f=}LvPiaT9p@2zR9FuGWBO~lB=`rZy|Cx{ z(=|iLvAHrxwG!Wh>MKeo-oiSXOJaC|ivazeB|Nb?v%HxTP~d#^QRma1cbm>Fj!e66 zJePcm4K33UsBU$R39q$~X*H1&!@S*-#iN!BI}hFloU2=#vHtA}$Z|DCE_`F|{ z=f3!+H6b5Z?m=k1iyga;W=9Icg)=M6uI}2~q0NS$fE7i=_$`6ZX)^!9L1QOlb14Ue z*+&%XED+bSbyXtvL8&=~|x-!}=|L(@qo)a|L05 zeK+n;$zKt#fY7^&(Rc5AWkW%)6er!z1*krtw?Phy1n5rSU0hck%p@m&N&e zJi3|@QuElHizLCkfVV5+(MtyU4I#>%JXx?9VW*p@araSdJeZfSQWTG}s!7jomif&s z0CBxf5@kL6wdMhAy4L4BR1?H*VIJu_CY$fz?#3K2UqD{cTZog#GP71&JEpfbl~{ac zEL@K@RWAv0oR%5C1DdRwZ`5hpc!N%Na^@xVlqciPxQQT1E7aMa5U27N6>-;tL|ksh zJU-aq&*Eaa%zxBq$G~TI!qNB3Kp1-(BAs$xoM#@NXm6on}R~+y_%m9d~0$KVB``l!4%Msd|yBOWfnXrG0EO zStTszhLsvE6dQ&#bClP(^V!MvrrkYoeCXKbVo)x2ZqeN`+B_v!FS|~M5zJS5+ID!U zv`y(mv#wmr7k(WM=VafDklbK}5tog?yxewRS_&k4*qr_Sfnf7;DQkoW?s~}8`Dl%U zveHB+8Tp7?f~E_lj0`%M1E;&w0*VnI^kP4*Z*mCUky4(MY7ygCZyr~xgyqEYizI%e z?TY1?_%z!o)I+(@o{UgYDE|g^+={?Wr`E@>iE{ocS&L&cI9@YWy>yKaE|&Qfp5%Z6PuYOgoSs&Co6a+3QD*r@_0wS3dw-Of`RAzk13IIBm7*8 z>_9pSp|g-b=XkO={m(f@fY=X~Xp`-6=nU3oPj{E4B|*XGN6GnCP@ z*m^Ik%+e4;Smw(Bs2V*!o%>^gNvv!w@}mp*etvR0L;Lq2GuSD0?qPyVxz$wULt1L1 z&#qpYI^1D#;fb^HS=wOm9)aFfl!~sA)Td3k_PUme z-inSYht;W07xjvi+DFxu2%?7}cc&)Q!#5iFOMH&mT%37pQ{K65>nWG10YF@6G#u3; z&cKk+-8s9_^_PRj$9H<_n|p_ZKPy}2UhbJfQIyw4i&3Y7=JO+IYOYO#YjY+Ob*D#P zrMErKqqH?D@Abs`4(9pA5>mV7qRvFl6W~{S4@hq4YR*jW0vAI z0@~6yYA=m^edc)VpnG?VVQRGjb(YNrMbnGX3MsUJ1^CJe276*z7iY}UI2;X z!KdXjKkLjEFvm*a-@D)Sep6C!5+wV<4Kn>sn(*;efGZAT={`LSf-JWV&ZH`HUKC|5 zOyuP7E{}~8N7&VSOrUSulZyMzsau%!vz=u-e$)E_Fx-s^JI}#E5dlMF-3CQfA#2O! z63ZtZg|En4I31r{0KYP#Oxs$!3qoqPqx2Cw{pIzznDV*?BqKSrl;PwzapVo5-VV$T03RW1_C&ak*t!TQRzbhp7V#}@brTsgq4I*hrpJ4Q|R(YcS z49(@48H3RoVopmL@@Y+M^hu_KbXPj9GR8KWfhBHI#xp-CKX<>m2h%+cv9Q}54#T|t zE(X(41}B1D;lWtC)de;@=zyq$2NDos$5q1 zx4;slpWjT-Spym+vGI7nd^B&6P)7E?sM>nKT_Lijw8=szsWL@poIU_>l6HAkz`O#j zZM5l)!Cn7>#nI>F6x5w%8B(sbkckL%912+dQsqg!uA-y5eBER0ZQyM*)5DRyL4}Hv zpqL(opD>{7FgMOR_2M+=rB|Ij;iH$gOnR;SROb5{D-jQEEA7c;ivCBqzKb9S{4O!D z6>(v0q(mocekNL#&i(_=T$La!1j7{V<==yNO_=0bKE}{Z>n_qyIM36#cV}QF7qtA0 zq}oW~T6V56K#;oWV74ww+Ek|o*aL5nEL4-RWMm2LZJ&qGnY`CXN&XCdOtTk7;?h?7 zw5#*^sR7bF7Uq^Uo-~SyWejy?^uQbCKX`y2X1}RF126D+HwI)};1}H6um+I$ zWd=eOo>U}AN_F94P*0)l{>wc)%q<8y%N?fNhTN}EyI}`fOYu_v&0alWNWWL(b| zu8VW%W>L1Q9T1yN-@&YQ0(E1sIf0{V6cUQw5*K^~#W`)c5Pa_MTH^EBgch79ERBi4 zQb!fx-K;ckh%XH`>qu)J=_3cp1E0Gd@u0m%MZ%7X!L#Mk*q;7T4?jScJ7X-Ku+-&An;susT#L z9>V=#jvhZd{mb~i95{$o@m}?z*vTT}e1pi93+U+8HGQd;8)mk0if&J>a?uV}VlGIN z$|tppRad3ptzRl%3F}5Do+@pMPj&!+RW&Ff6}3A!u*S}Y;XQE?)@oO8|MA0^7q-TA z;tBZc1;uxC!4ph2K`)2oowy;s%zJo4IiMHB_}FAJAm>*4k;?DD)H%oI+Sd)S97pir zzyyf1$!)&knyAB7_yLK^hhX#9I-#7rhbWA{diiHa!2f(FZcP5%NC1lQNshf1*jdq;NPp{!9ItFf7Sia60I4 z`iF<{)RYNqe`TGy$`>oaCl)@`hx~4!D!BJ7tBUX{^GK}WwZ*LEksiXwm2cxGhZ;HA z)lSX7i)5qt6Ln}4_kaSdRrHmmBRnFf(PZv+Y$T4feF=i+6Nwgo{O(YP4&h)cRU3qa@WX=B1GF}w zVVfwG(?Le|kIDm7Pf;hVHXHQPQKxMK23L7*nH4(&peY&zqVi zPSdT&skihedn_Y7kF0aT*t}BvgwYPn;-PKq~E|kootC9V0TPc{a$X zxI$@0Vj}UX(k&ZN$Wo5>y9i@9>tb_-WM6 zvlYf~nqpPikWgpV&wm)J4x%V5gAGE90|lUOn{gbYu+=xRfwAG=0hB)v-+!AZ-9HW?9&%WktrMDfZvWj0kqTqBF89w3e(Q>VJox_&QTo^OEv=dzw+x&(*cU|KL=SV* H;kW+)=R&Re literal 214762 zcmeFZ_g_=Z);_EtSP)TBl+G<(Dbl5b1*G@hdk5(?grcIfNUxz6>Ae$zfOJBWUV=gh zp#-G_NFcoNInTN8=X>xkct7Wd@Yygs%$_xC)~t11*X)n#s`BJFXl`7(bctNy^-Il5 zm#*{@KGWAo34ehtl$;)wH(6IUYExcyG;xy-j~ zk&)M^oNOXr?my)Xf5kv^FX|IVk5|S>ZKS`k@Doe=>R-OIv)6?m71X}AV%7f$o+Z7q z;D?q|$a~hk9h6CpzF8MCS$0Fi`y|wGiGP1CQB#78(`xsP<&DV?OgWA!al;6V3~nGF zb8<6cSWW6=dQhh;#aX{_7;)PD^+)d>9Dj$lYC|QaWvE)v!Z0NNISg#@e2?27wx+oj zYn?yJ8$Sd<&r9M?1XsM`BoTiJzi-x+oa#Uh#w-P#<#ZZbbNv-Xx+HU7Y+AkZdTeg| z>FjelzPG6f2Tz$pn3A6DQrb$!+{-(at5IR?HM~TfG52e9*gMcRostA*;gKB5tgn9D znlr?GZ#(A}dD+kS@XUKJU*ogkzKr%2dggUYTZibY^RBebA1_h*5|1QYvR#xxYJ4to zRN8y4DoGMF>^b6Ojv-HV)-d^m;I-Q|!g=El^a;j)W}~lQtDzo#9OA#Pr++s#ni>sWx+HT+;ias$-{tM4tC`=v&3ArJi4FOn zd4*Gv(wT$9V<+Up?5lfsa1DVw^j}9S&D}VBwqqVKEmfM!-qw&UJmLwciEm=9>~YZ2Pvn-#vyE& zl{jDi=RLo_!X!^}EgG@$4pD#ce_XzjM`(P7Q;)nvbd4$G|NZePpXf@j$3d5e;e{r` zo#TYYJOIb5KXlozahb?UG{8cX`%jN;PiP#MN__Mmz5V`m?enG38ahaICe_8K36ZHM zG}<#N-nn3j%U3+KLpt?iP4!jD{=-QBV*o=!V?9OTpGG12SjL=7o|mim-TyoG|9QCE z+=RyY2+co@g6=-k)8zaI$&=6jV1N%)Oi!tHUU2?t6tOpMKO7n-8*=#T4+eNicKcxz zCimW-M)BWi{O>gWcQyY1eKjKTqd2**6YZ~$q|DVJUmxz24!Lq0y-s?pqu6EP8GSK^ z$X;kU#6hbw4kqQvViQxkjKxZ z{)pHroRmCH3c#rRTBA&tTeJZ;aD6MO=SGHzTe1i|fc;>sFPil!Rpg7y7awNjT4I0a ztoZIt#myg(qvh)IThXZgrdLr^SF@C&Onj3zr<%yQCWpPZrX!LazU(2MJR17bZl~BL z^N}oCB?DhKoF%|svW1yLnl)d}6Z3Xt*MWY1Le6lSwK#(e?Oe+8ms3M|!k0pA(n~-e zrpt`Jf-u6pVdF94c(RAL4V)=#<% zhF31(^BC`5v~h@xBN3}(&+Lag%v$1YKqt^cQLzdi!9Q(a{U)Jz1uVC-f%rIIIDO`H z(wG98w|%9(em-3ecy^c)^z(Zkzg4fYLAzhga5hv%u*zr0uy!?8e2}MYn+tqvmqR5( zm`{Z$C7hHZj#X)oZJ1FSKh!CfesY3bMGOS11tM&Ql9TpF#q}QRmTw>+zt$J_7Tloz z3$}$pCwm?k1G|3V1!t?;M=5yENfDb$qbAi&F_4|Gy*ixQr73|fp#MwlR^AemFw}F66S@p0+VWY*;_5Grt4Z9ZY(HoKo|#nX5(Y zP3f5pJwh6C8#lkE;?iHyukfkVp?8?A&fTupl5|+(T4<6MbSSnyQjxr{`*Tne*<|{+ zH737(akg3JR%B2-JW%GmWn%-`}gv~G5YCaC8wl6x1{*4T;5sx z9>MS?%z(VT{d1B*o$Zk4D3qhB10$y`R8{UcRSIi&PU)wH8ObzNX>gpDOr19S*o9waxQYcRxHshlQ>b&xk5)VWIB`nJh zwdf|DVb@SsUQj*Wb2=FfjTro_@e$5b8rt|`!BP5L44+$t{OD+d$D*|K2%=t0C{9(wZ>n%A>jF zU%S;W9ybQjYk2n>AgMMMV~95u;}S#=D+=m z{lyUFefyB@HMFD+CR6ldsb=hjq*3&d`XeGMO17Z;)IFlL`*+ZYjfq0f4V&?hzIv2& zMDhX&c6Pnu+&2Q8G~F8O(^`0RUO3fQO`~$W_oU@4wvB8NBGu)8C^R#a%x$EJ-ue>3 zpwJRST8UjgH#cv~E>cM;&I)F$=1Njt>Z7*HWERV&zgQf2nB}~@(z{G6#&w|h&6Di| z+;dcNaW9-g>}h=#RkZRC_rV6I^G`!O(8sme+Be`qTl8t7p1O}!Zd9Omp;d88JS`F> zc&DlIwCZKMY~c4{D8{iXAg%Pa?a87iFU#WBJK$R>TUBjxJgq-Z`{iAcqP#lC!Xu(9 zcNwnSzffZ^m0hWodRnxv0oh0AsMlATsQqLMt1@n>^%5^}vNr*U+vNF@xBvWc$)Hwn zw<6AhUh+VOF-`E7lEE`za?t)1(!&shw$ND$0{0CS9KCPfYMg8oS@Hu9#2RIJ>?xQ^ zR+MGGbZd{^>Pg83MN}F$45nm2OTJV$!2?umM^bsjF@@)AZQ#$v9mU z3z!y-7I#aDVAy^_u?)=Kc!~QMAA4)Rn71Q!WpuBxEz7Tc7 zC{o57V&>YC^!lLjn^swPj5G6prtmTYxBIc3sj4B2Nv9w+2Eezt+$nFtvBcft_sLFU z+e^Mz-zRUa;q&EgCNZcomAfv{FNGKrwrzwgySmSplXG&HV#=ij1^9-&lurl5b#SN9 zBx|!ZYoRmr`(-)uNOFdQ#*NQyggDl+v}NDE;I_3BTN^nKzsmR$dil}k3eu;2G4y{S zvjUS3o*mv3-Kp0?REU@*DDk#%d~QtAJ{!_#Szh>29+9I}ku}n+zxa>>jG;W(-D|jRKYM(PsL@xRvieNHm z#va7}Cl{BA`$^$M>6B^cS2669?TS>I=5cw!C;=6`tKwRV2`aki22EU1XbWv6RQO== zAd~bKP3A-i%DW9$xd6Uoj7arC3??NwRkbMkxPE)e247AUa%zO2VyL1A>V=p6_Fd-M z{p*qvnp$zL+=xj~UkfAs+0oXhbaVcm*U1_ssCSB;_(DAMP-7}1w%VGmpais8=}+`= z3?;`%lF*ZTZS%=T2)WC>CBFqWJ}ay0xaCqzVwbrrP;VSA!S1`YN$M?Fy?V}Nl)Ow! z_rf>T^2>}FkaF%hcXxa&6K=P~F5>=>JnTcWHrw~zOS; z;XSRQ&^D6{k*I+>k!4?^TzWAEPBn>>MCJ5T@aYLM+tj_zR^^qn7*;Jjeg474m8=Mo zIdPVnqL4m)w)eA@j{1~Jk-XeqhJ^z~XC&4V(B^~dyU%NISE@-L*L^FFMyN5$$;xgq za1@NO)vb^R8};3a7{CZQzGT?FBTt7WP3d}EVG1#H6l4ApuOQ1jx$P4}BUCFwS;^=# zo15o0a8n5@Hz5VshufEg)&q`Qm|f=UlW)OVwwUu-E^dPWqMGiC+U?Vb_6K-jMM-V) z>T>+qQS<2XUm)@HNiy1Zjb0ANpDW*!cQft^oZ0U4Z!UUe)xRu3$9g|1<1*@aRJ+Ji zu2tL!p>84=Ze`rs{r>sLv-5V(?nt72p$|oF&F%R2rnP1Z#E4WYwM3+up8_7cRn>{L zzJMLbxZbed`M^i+7lT{retig-?*U;WoqxLJb|Cf|&)ZmCH8F)SV{?lVRq#fqx_+DU z0PjZ2R3$zAu%S*g<+O;sx|u1%&-P3J?8L!+i;VhUKW|@df<5!(E5=(cKJH>rJ0hv| z>6uOCTfLzIB&=V4i!MF)vy3Lb-uus}Spk9?-&$N|tN+?awAvGPo_$!i1OKd^nru9JqvDo>G96lX4HdN>D-Ld<5e|>BiR=hZ7fP;K>LtZ%*!wL@hu?mrSUl- z1CAOJpvEh}Re}`^7a^eG9X$z#-LEQLFbdd^BC9Q1Y(Fhg$%Dwt@Zd|zk=B+p)4sy{)b zn6cXPRJk;kaN&00)7npxZN9arWaB-%tkNX|RjtAE4UVspAL!(E6drtfamz>AcQLpO zM>UjPbhaH!l);Y(Jo%Zw@jz_DT@~*dd3w6fJ5g&Bl_%zm3BT}6Uzt87h1O`~_uUhF zC#72Qr3nUGpi*)C#t{^DhLS$#N?-#%5Iz4UG_$+{)3L$UrI@K4E7J%&o7>$-1>|{V zh#g*axc_T{2wX13rBh+3)2QO!bz0rQ53b^+8UwY!0+!&6e2NT#+thGqVFQ3o{`$dek8je-L zC-;7kjgn5Mtw*;`TwmyQ}adS%C>8@Wpjtb}byAyhCe9$fyQ;+}Ph4`ihs7N#JK+>XDM zccM1{+#)j#*Lb}aB*0@V^lJa{b+gt5{5h=vhPb6f&kcEJc!7|I2Z;fK_VS|e$SSnU z*4v*x0Il1Jc7trHnQNqerF!sb>-+X;8;b`j;I{4BI%EH@Hhj3>1wm#oc8<>U?nYD;$-ouLT+)J^udl^$*w^eg7m15Onl5S04 zMLxA$``phgy1ua{7nPr9am|4dtuY-*j^O0MjXCt|qtw z2ThRwCdy8EztaZ9utdC{A?R>7wJPn2UPb&FMtY2Xyy$X06}MqO>jSgfhSF(aFOfdy z#1un>Fv}4^@AFyU$j~NjEBam_ZYQ+X8Z|M7&Jc9v!l4(~>f5WyVnx?ETB_83z7`SM z_VJ)#xAzaNcbd_q*{zM@dP7_VA7lne-0`;$FufQu`&~lI$)d^Wp0Q}2j=i9W zxu^+ogDhq5xKPlGaj`x1X&1mM$j1}n*o&%IAR>Yhc=wDY!Ao#{V5yZsVyxD>-l zpB2l$;NB<3*^tYBp*BpE2>lUde?@;JQ&h<-9#>s(Im)GRj;;LJQ!{qc8F}jNc+$|| zb3lxqaglObYTpw`n4lg5@E~{0%5^RJS(#3qt)^tz5TRU5(_X4p3~;4b1ZE#?*y}^A z_p6|28VAVe@_;UHeE=3Spj<@j*3Of#H2&biQ&^YC=#umvxP)q;QmtZRAK&7hjMLOW zg;PjJa!!vw-zE3g+niEy={ikS&5SV$Jc{Qr5yRC%c03S+16pd5Xcu{-fT1b&y@%qi5UXw{1mzKo>y z@l!&*d0>@DKhKy>wH^lseQ*}B?hkZFwCzr7_7XC!9SU)lN-mA}u|$EWugEn^JJ+(8 z3hX#2Vm;%oj^_~Y^skma5lZhOkWDNdpYpBU*}pWVI`@!S07X|1Ih|J!38rd`zifTB z$Ck-s3&lnB7tS!=l}E4bcxuncesQ2^E+jIw3QSH!R!?-x6X!hF-_Fw^qT2~LkDF5ss+;bp2pQt`)T0bP%s5MU7NVcT0Iy zVHPm(Zg2c*K4SI?^ABN{<$F^qiZ%rNhrO!(G^|}Z=)-};&bPNNhl>Z~S(8AlX{^-a zh6hY6+lzX+Sf43?=t)MvqJ=`ZRkj#LEI^q!c7O#Sp%A#*^C=SRo5_0>WmB@B-1}82S&X@8P&E67X0fVE33O6*;&TXyeRG_UeJD`znm!_< z;z^XAIp@?V)s4d*`g-81^lL1?s50dpngP#P$sX|R!784=+e2q?Zi#^srpu5)H;(lk zdMe2(?7QE`)H=`cA7v&#n{hhagcr^?P8HaW&@Z+u`FIjp+<&aiQGYhccfl3!Rd#+D zubH$uIr1T}EBNwM7a}V@I_LkSNPg%4iT6UoNPBD7H3s8}VD1-E6T%&9?+MJsVsmUJ z$%THHLS!NdM2hB>e6c^Zxn3iX1u{yttLqbh2m_>OQ5fvl>b~=LWJ{x}-aW5(Lthqnk~tO1cmB0WO|o>jvhRP_ato6= zzRfNSmJ=(%eA&+ZgP(nVy(2Ja9N^#39mN6Yp}CzKBtwt0DB!Kl&sqv+K(uA&g~f0Yp) z>Yb;)qg;pgb`I$zY*ha)E;$Z~0-#6=za(y>={Ab`p|H*5fKHZ6wU3^zLu*79*dE$8 z9tMR?@3epRRBbfXW=#CoaMkIEWY5V3&cR%K(%F>f2iI2?7V=IcULi;AZ+*@ z-sV_4$vEn^0^SJz`=85*5xMF!e|+!%i=y}klzcHz*3KkFRl8^FOOIL@ku{tCx{fWY zdX7G^72D53dE4p3ymW8KkMq;jD_s zfyk1^h;K`jLOX<#tFbfn!KueB)0wkuZq-ZQjE{*j8C5QF&Y_ui$NUD?M~uMc`uJ=7 zBiHv*1ZLB{kt)jlPGIx3apJgUVbAEx58L7mFb2tCz*EbSQ<|&u62@#FA%w}j$uY?uCBRP zdr!UmWvkoCta+}4@{^@@NzA0YyZeyr`To_; z?VX9>J7MkpFaDmuzvUL1_p7bAe70ZVez_IT`|?bW`k4=RT#_{}TI34f`~zlqjY<9K zWy(Olq3Y$kNf8Z0?@pty_TJ5%owWsjuz4gj_y_;$A!6&bwB(uZGF4lAfhyHtAp}*D z>ZYB;iC3e4vQgTDYAbF`D|!G5K7pi^GYHT;ByO@c35un<;2pQsLnmk^2kgzcHp~yaV(EjXpMDs_vTx~14gnyxr-K7%#zWMX61rQbz5)F9(r0x z628C`J#md@O@`rOdiY6&sK4)3wKLyc{MQihpcjr)!g!_k1<YY<)Jm)MvW}Mf^UBkDZN$+1pG;ww$7?k$No-;1XS+3ON#pr_ty@hyEdJLjG?5p7=7pLtfQcfNOZ0sEwx#dH-u@3)54{w6!U#&*Wq_FB zjXPcn-kv@vm@KfsF&CIF_*`yym8^VfNEO7dx*o&8HhJ|R#h*DsajVSrlN|kCR{IU- z#Fw_}(X1T2A1f7KCGp>aO^0PUs6X(@O8cp@k3T^&ju)<@T7!k%mbWEr_40EfWN}>g zo`N)ku?ou^e4Zbal6exw^LzWncW=c>pBwY)l<6j!LMl^U+>#p8w)hzGVVYi~K5H-A>~|puDZaRLrq)NSw=)+V%x?@zQG4(+q_prLE4pOP}6{;4inN_ z0lZDBiFKUSolS7n_&8?ywrqlS;X$*sm`qMJ$pO@1U27JX2mCt{hyHl*5V#QIs$L$j zYCDk&zch#Z@g)9MBx>C0lFEX;(asBbPz-dZs9ueWtM_hlHYV1iz#@*cH*C21sqEuI zb~R$_qh-&CMXyzFw1V`n9xZ-H?d^@)iTp2Vk3wX4t{DfQk|sL3j&LG zYv}nEuY%drn%%a`6{=@uoMSJsV%MRV+%GjJRuNGcuhmm}IaZ!7z>y@A7zOp;dFlV|GojA+j?r-)=uFEU{BB3| zm9N91j&GE4->#o}^PhWBx~HtIMY24VyTsKJ6*to0br-PTvRAtGQks~weZ9lLW2yC2 zZwxzX(MP{^T=%F+O|+#<1Uak33%P3`lJE|A4k_sjH3a1L6sctJd*}nt8f0~=@FoP1 z{!<)2e#d&X7n)E(W1zUH7avvTGe9ysgR>Ijo!s0ysZLk~`vJIMq}yor!FQQ{yW^cKu~3?_7%;hGDOV|l znSahD4kKhUsO<~1EuptK>A{v$gBub!EA+Udchm;YJ~dwU0i|3>hf&w}H!&)DD#3URkrCnt7IpZD*83vit5 zUv(xsr#{5xuO+BSC7kR(c;Fi;6DB}Wk6c> zwsXWzQwk2Z%q)F=TGlSOa{q*&z)-$VeT=5nZ1drEYkR6!6eKMeZ~J~;h!>4@LDU;Sh}jae}~G2Kw?jP6Y*u%NGmU5UUz zeg)z!&hE2FZ_uDbcOXZm6L~gdqP3pvbFOWh*L`#0zjAjWyp-(0TL8T>?K!!0A(zMU z^xg$t+kUBmsuuJoquyU5llaF@=I}qeL6F@t{AvASMPa75ty+m0u(-3*b~KIsXnAIa zTl~0_crDWt=2bOA(7$Y@s(q|+oh*vUbjOyUjhu+Z>-Z0Mdb5Ku?+AEY!El=R%#QqM zl|up>%-e~xIBT98wQC`^jukkVr*&(Q7hIi5TaH@_96i(YUMNNTh$2U%M-PA#PwMmp ze`4p5tKjUuasI$*=h0k?aMD_}EeN8A669H_L)oS<9Pk*bW=T^U`gbD!DLJG=R#V;! zwZKmH!~x!#oknct%Z^mDlb&M;R6u6(FBQ!aS*1zEDPxMilm(w*+e}~t{5W7KQG_o@ z_Q<_z-!F$WP!RRHbQ;De&dLoroGz>N8sJakw~54kUe!dp_w9nsdmgMpy`*XGi6RnR zJv^EyAzDg8&H5FFLhK(Vbx7OGWgBFvDWV;oZcQ(sz3|7GtMvH><15L*UQhyw!F_%* ziZ9}@ErA-F2JWc%X+vgA5l{*lXL(|$#9@+~jahv-p6+m`Hkael*g2hX$J#b!q{prnRt^RMLQAaWv= z%x42`Pp_ancTM<^yHyz8;4A=|guU6{&0ZSQU|Q!>VGKAVHP|nuTu7_#sa65;^TjyI$FKA@P2XfkP?(nQGZ7}*B9r{;yp3Teh?I;r~ zvhtyfSiG;yv>>xs#cH>H^)V#g*`-MKgh{<9eKzhP$qT3}P$2H0ansc@%-Y-CDEg(^ z$-SvDk|^f=coZz6#?$_NYK0Fd0x6p(otNA+SjeqS+@~7cW%(o@(6`vJlM|VE z4BXEP1?L=$N*fjERh>tXMWSAFoTV0og2mCzea=*a{M$6B&qDNkw(+C`2jUf%3HNST zo&5;26f1T2wX;qER0uJZo^5ROyev85ZVHs8gTMlm@ddo&V<9@)=|Y~3i{Hwc^j;D7 zJYC@bU^65GV8WE9JrEshC-)zivMzek2 zqX>aWnRA)bE}8+4wK(}_AfB(Ff}c&?oC{)IO2jra!`RdP>*##1^;xrrLdtTgSsXKT z-tDKw?#EU#MI8o?*@3%DgY+>N@czd7UsoCJBX86PogFqoR$@pWxON~&=R84@1(siL zxI&{I@Fvr`_A~nPn|GFZGzoO|tHQ;;@a$hJSZDNG0cQ3=R1>JU)0Iy45U-F%G~XqP zPR3q-o>QBMg5GV`!HOSHXmJFIJCp%43 zwcWQCGJd2Yn%x-#7EChxG=>PGLzgPU!wei)keY_2WpD6&z_bFBSgKRn4DI)}v3qLOP5O%@*sze-V0Y9~&+eZVVf3-X?jrZA^@1+||f%QM% z2agkRLx1ck70QNVT_wTYn*zvR-5p2LYiGvxw>Jw@vOWftYZ6D(bhiG&Ize($N$(52 zncOY0Yq#=4w>&^#7WRqFOus;%tmC|Y)4qK=L)Up6Cg2%rV);HJyQbtcF%S9Xsdv1$ zq1^ja0jEds^J?Gi^uWoOnU+xV{o_i!GTav)zI+(4UBiy(B%W6=VefY<4ohcTIVfRN zNqP2V#$j@w!K&j|k&{i8 z(;`kqQ1-}QGu77EC@kRabaK$YGqg*gJw(va!N+rGXO6YoQBG@~;c@3`i9^Q167Clv#ZQV2Jo`8nzT zb-Uhr?ruwPr+~+3zeTN2x0#Q|8uqUUy0v5=dblm2 zQ=f2v1|%5Chf9#t3Lk7{B*`<5WHY~(PhWD!Rn+z|wE4?2pM_(NS7VbS7-7J`52QD{ zJz^J5;FR@x{r2?ZFA^1nsllrJkgrtK_{SV1^DctEb z_CN0@Qv$3B3pt#$y$fpt`BVU$**00h3pC^d*?b2Y&Qtf#RrbqI14DS*syPs$5q zD@EWz3tRY~u{($#QLz0mOT!$`P=}Sh#Dq5g!`|34CNy6^LH|2YH!8V1HGW52L=>cv zaVd(*xlYAKv_jsuMU?6Dy3lZEq!iXZjCK<{5jENhs+)&LK#X<}06p&%fnYESHf7^e z1=V8&N&pXvg0tP)cBptciVg+s=a$pRcGpc$WS^;^PKK1mPBCB&+AZ?t?AbNs8_m!=D9GNMu#&-g`?CMzT^Umj)#w_{LRZ*Oqj^c z!P~Tz7Aiaui-EU0oOgHg2v-1L} zcDhXn+Qpw&7sy7v^sI3RL2X#I`k6#~hKb7#++x}M%n;b+3ZQe@o^>~^3*2p)Oxtyp zcUv$WAGc324$4hA?52?R+md~fBF-E~(0mAg{yetl^)^p|My7SMbZEqOaW9Mv_R*hC zlU=DPp+mXrO1|Lv^fIL2`MdVGw?2|4yFoqQ-9e`(TLoY4?h$(QkAjfN zA+*8McF;$~sAq~X@O#?c`sKiT8v^LOx*ymnKm}NmnMXP)I{sdi0-~K?qe-2yaG2zH z2;2uhAO~Qd6|hm#1%9LEzJt;2%*jcWVn|Q|sTE-nfj^)R23*47cjzU3&|09uRir6P z+sp!IZHD7{$AHUOt9RU?Ca3dU-n*mEiyO+_Y|x0 z&KbhUDYY=GYLo3NAkuiB1#Wyw%hHPUxx_(c~h_-!=lV=@G(>JDXAZgX8p(DF* zk^9T&e$#*6~E0A_6Sm^dTDTRh21CvtOx7QeBxC<{`Eoj?v7);P`WztD{`7| zM}wzNXzEeuXc+wJ!bAD_S3#P5t75>mrh$7n@@?y}oo zYsm4ymM90-eXB_IlRmK=4c9lT$p_l0DNn5j7o|F0Zec%Eondt8rydHm)@tOx zhxz8#9r=}Xly^(CqTqV9P_@&Y*?$mOj^NO|==_b2JhW_-S-yR2PG@LMm}z}jBkv71 z$}xW66?u}rYmp>ocgVvaZ;_`h7-1er?#3ZI5ueqN%I2^@OZq zpbrVlAgIl7JK@M1`yyk~+uqin{I&NI)Sz#cSN!AkRN$_5P)T*9YwcvM%@9O|oU8qp z9Ny}KX=p>9=d-i|H@a`4aoc_E%4s}J3p}SPAS~4gpVgjx>DtWrHJitq0wtv>KN)|&LakCOKbIzPUhBSUa%#Z0fXb=^#0XxxFYL%|Yj>Fz z4vTk#_f;OQh=p>S$$^r&groM;C5!8T7O>K^NDyJ+7pd=tS^w&`e3gZJa z@>Avy6|RFgPCc&MBoswyl98J3)aEg3m7( zNs5gPo%*uQ5$X${|5IPEu&YDd1?;ZORJkXKOXD1kP1*RThq14`vP%lWdJPq|#>xqk zrp=?QP?@3p#`)v^L#^HArndaS;g)1foMo?U%G47#<}+1kBZ3wuHs(Ayo|Y~UHBnYz z5wjNd+{X-6>AU?V_4K$~jo+_dy^0K+-=9_dn%!7>(7)Un3NQ-v9*#`oGHw#OWOFrF z8C;DRPOIyR)Etx^O!Hc}(%UNrTAT~z1HWD>2+w>#_3c0fcAjzA*Gx*2<;^Cq#!3Lg zhrtotNy6`Rju`D~eF0Xv3gYt768zpYLXPITONjXfpx>7}fV5BraU-`JL4SOqBzrQn zoN2+QvpD$d^o~{u#^Gr4nYnfWR2s~z%*ZSZM@^OOOa9nO-nT1kF!>H|*_d!>4>(Lu zwOvQt;7hwhOUQtKv3+f-xM9gq)7G2^=Q5+Geke>d=V}k7_BOZlgD07KV^lU1O;0!Fk<{tJF0BHV&gsgh7^9<+;Pq#wfJ5)`lM%sBw(+n~ zn+bZwyr%)TVLgZI+f3;AqpfjW zZUN8)s#TJREA4^QM*e#Z&Lz*~F9GGc^0`GB+q;fqJ%_Squ*UVcG7ErS*d~>)Y0S*MW}(&NY)ByG!$edfL{w;K}JBVm;#8~%O6uY&z!?>p|h52w1jy5-BqcoJLDFbR|l z#es>DijyKL>8y&cvi|)pfPJ0<=n^QJfLX!>`cxD$LMKV@8{l?El(btpXJSmFPi_K{gVFb3M88-$%NolDJ^cX*U#il8&qrZu~Zoi^L zY%Xq?f6Gg9Cfu?Z^GwUN1tE+av0u%+<3SQ=v<9RRi3`Up&yegKX_$U0=IAgZXqbKF zBT06gD3BunNB|<{(l-xq#8!%#t)I@(qQLn`u$k)d1UP#6$xZ!}tt#?7x9lM=$E5%_ zzR=6`E_u9fABtm$-9*4*-Urh=Ny*&%5%5i`R53e!?TG=*Ot}fIPNDt~#$~B@;k-B- zko9yHUm|pM;qtiyvSBKz(PbZpUG!f-Y=6i}j79tJG>BNu+c{btEg1!vpc>9F3j+ic zIc|`xZ9P`tHJJa9NplkQC zW%daKp32tDfR|lBPM0Eq4|#F)>ahgj^#>%?Q!BG%qte{IaPJJ<_@OZ)XAvkBz=v0A zsTw7oIQu4G(-|^z;~rBRLFPh+KJc_o!qL1cy9 zVA-pEt|kN^B2bH<;#_lSa!NV(%j+m~|3vxeni7pf3m$9gVV$yycu(sV$J=qtU+y?~ zB6f9}Po5Fc5J)JiU%_iXE5MB%&U1HlJaiprDiIx16Kj%Ked#)y<{_e{} zn6bHaoT!?Ee+)~MpM6mT}9CKm-B8kGjs_^TxEty!dw&M?x!f9*Sd(&yk5F zCG*bn#l5=eBTZs56KCjRxV!2tK!9L!FY5!o?GfPnPkju#$l&Snf$UEmE(`5Bbv^}` zmm?ShhPNi6d7xB`H*BUrHG&Zx6EEmL*Sx)W@@BXCTPFi#>;7rmUg5s&w@P;KWA^Mt zEe7BZhjEc>8Ht>pli8B;OI;*U%07#=p2$=AnnUr!GRpE>N~03{VMhX$-UlkJ|7c@E ze^Aa62)F{pomPI&m91I=a7itFdq)d54cO>-{f=G6ygZ~4%U1K^mT3PO%f4<65xR0V z9f%#!A0ORU{i$a5xcX?hVzKYF8O_f~fvYp#{LCXFJe$l(^q2u0aq!Qz9%iWFVP&|QE3I9R#0NeX=r>}iy zZ>x%ZkQ6!oRk=u2-s@+p04Uh8*S(LP8yg>5?S(~2wm+h0{noVmBG1k4ptxpL$|HK& zr|wYYT~ub%v5nC@VFd(*cCbr$;`HrAXBKXqZoH;Hr*ozmZYCUTYq!OkwEXNkALx&> zTGR;lM2=9>EBHVt*Me~1P*X^kzUY2=mT41~zV%npvir2L%Pb3E zypsUS+6W^kks1rWS8si!)oHRFF>Jx1rA#GFQ1ge)sXNX~&*uFD%d?u;05`RX$M-Ws z8l+JhQ1RW?nUU#`brINTzJuMSZP(&KL)$(?(BWXC#?^enc|i%#T@Il0UG}dk*RPqV z{870zns>&wM8K(ez6S-kgFpQrrougRS`WSvyMe`4X1rny9M+!bs8^%sF2XV0uapU0 zf2g$XKh-4}6u|2kx(-`(#E$z!0C4MuflWEmP;Z`8Y_2G*3#KLq{$GYt36WjKn zy)zaRVE9*wuv%;IP;y&juwZ{a>f`}w|5)zsP$;ZFiY>-BP6t|%tideb7kx#nWa~O# z{)hUdG`mc;Y;n*I>I0vOn&e^hO-AA8hD~$wo6neQgHOEQE2j&Ju7kd_sn9A#U}~5X z7kf#Lp}lR}cRi(w%3|pkGNw8UV_C#7v-ya#Bl%VYvwVzu7+dzhm#d5{F*L$&|a%3 zbsqa8BE7@BwuAFc(%J{<8BA|UHMT?2ONxh9LDIVK$;lXVZD&-xP2{8JW?sv8gS|2S zwQd>H6ZH<1P7{XS0I#9GXnp8eIx43vbjqPB|j& z%>9PHo1m^--I{@Ovhf;;5ON$;bZVO>x`my$y7Dio~J5J|kNBd#LH1%l00Skxuf{)Hd0Hc=OW$}i{46lND@)Ow>I4fIcWs>7eDFPO;A-r<_e{u?)oVfkFEpS-J>mulB zbkLiDxd{|1g-imTO8T|Fs-e!V-&Gaf3i_c~XoIgIBI%9X`cVzvzHPtMnk0cg4o6!l z5O`J3a4X*SF|*C7CB8!9MaZ(MW|OLN$8F(pOznyP7wKc!JpqL zZl(gZjM#afozvB!xS!~ha!^PIK?(3)n+TE0dCruYx?_ivKnrwK62|z$COncX(N{Z# z86uPIQvh6?QRzD0EaRahf1K7CE9Gc6s$t8R7;Eo;oQtzspR5~-3N#6SMqv0fT{ou| z$nK$DKMRC$c}55D_=>_q31D_cd2?aMyLyZK_o_KZ1I_#i?yxz#L4_sNpQp;fY$*XT zUcsets@1v)u^+H2-Jm59Q2K$HSI5DId!+P&zj8~M1mQ5Quc?fHp(ZkUQce9gs*>4h z{+*;a9Ahxn4wLsv9D!L4`CQVH$Dr6b9GEu8UhFe|Y1p#vGR8R}B$CVWkA{8KfgQ?k zlo*fd&FzfYc86~pum7)tYe58YVxdzh8|84CD9?sC3M`5t3CG9d@6<4%3M0|AC&kkK9 z1zWVj5`}3`F1HD~6I;QD!3LiR<(O5O8C2e;Kd{?orBjwKzjc^zw*kq3roG;c_nQt7 zazZnK#E_z0*A3gZBorOikN5!_n({se+y12jt?1X|@=wo7G`~jT%M+Wt-%c`vq`o(7 z{n&nzrcKb5v|!So6o3V4ZF2VgmVC83D4cx=Czm}Y6xd(S(*6`2D*zrmuq`z%(da-{HG zU6n2DGk0KYX(v6^Q?06Ta+rPd*#i1WfU0dl{*sH69T5f%6P*L8&m&qZ^Wzx+2bZaZ zg9_TurrX?^FE_+W>6XHEz;@hRg@F}9&+LjL9fEhg&n-|J`5-U&y;F4Om)y(C-QtbD z<*GjN2Ja6OI6Wop$h)9>gIoMwvu}9K%Wz-;-^z{MhvKMfi)<=1Z6F@t_V7ET8a6M~ zb*Z&gRtd20u0Ghan~)Pc@Q-}(HP@RldO)Eg1dHykQR5NUOBYQ*?E1_bD1aCyH7qCL zL9c8(aOt??g4lajrZqk2v{H>;YMLgYC0JhiyxWfn?}{?Ux=K&cG>~eTDWHvhLh|Ok zO=w6*?p`9=PE0obaBqW|A~Ke3(yQOafAj?qeVX?{(W>~I#TRlISwHXXi}J zC8|5~?jR={EFJ$8yi^A|KzR2^ru>%fN0>@s=u<#qndV*D5@q@qMcDDCBf`5>l#|70 zUILNdd|384<*mxwwj>5{6FvZ=y&~JwHNBiZ~e*rQr!eF z_(9E1^DZ8?`bqu@L-VwmutVN3t>iHI2h>ZPSh2n=W43YIy2ue;qnLt^5gcKdO*2tw zAab?v#DdcizgaE(3M~Em1!Qd!MM{1BdzNjJu{TOzG4Q=J;LKs}Yt5b@(D$e5(KM31 zWYMgF)dZDM(IItKj+EoMSZRLTet%~0jrfC<52U6CTAB&?nyd{9OuUCH&8*XepWRUc zjE`{2M{`|NmVn#2(9Z>7wna_-yGhC054#M<&247;A=XDF=NisqIUP8KMiqs!Vi2cr z*>n&6l-`)t=-U>C@D9}UdE-L0RT+BtMNu{tcNgOkxs#IA-;z9;UpMS!l%%z=(mVp0 z<7NU^A>V_&;wi`zU1sXv1%?nLc|SWlomp0J>8GwHg0`}XcUxT(jsC6o|Y`0cXp#!b15WeJ-9=F;O-s>5ZocqxCeI&?(UM{?r!&F zCYhP{x%UU$PxJ$Hcb%$TySA*gD*E4J>DMc)u@{Nnjw2QGU9f}4Y-vJsjWyrw_sDSy z0C<>Eia3|anD0l+uGMJQ$MRTd7un znib=N#QhQqwXeeWsrZvzGco6_cd(1N%fD^zzg~5dE7T@k4ch$Q7*_xWN38~=b=?D_ zBmb|zev$^1%k28A^!_Oq{R_qPGX#X>)Jh%>UjO^Me_mzPL%~I0CGy?>zf|`A|NF`; zPrJjU$A2Km6pH!)>4xI8g4^`>gHuyLx|jca-YHzXW;Sh+NoLIzp?F}2j|f^+@c#`v z$`xu~!cFj3-o*SflYY1m7((Iw5p#XrLim1_p$Kwlzqc^JtLrHLFRu=>Txg14RT3!$ z4VP}^7}MkTTKm024Q;fb3tS$j__O&H_MjiG^(ZJ5#Pmz1e4HTu1|Mb_Biz__N?px= zy!f;D0EY@{Img7PBBD-dtLOT2)$jjQc?GrnFv2zLCHhJC57ghI zCy~#hw#oZsNc>*@&*EoQKwaqeJdb$?e6a4cLgFFU;>r?HZs9^YGtO7(}bTN=;Ff zqq0vmN#*pfqd`XqTn@=aKbR0uD$KkoK%WH^4Cg}ee?O^0Afy48DkvmcYB}p_<3Dwb zGT&!!2F10(vdjV-%E+{K4NA=2KMk^1^!lq%K~4!k{k!%*ya&uS;604AHzs>x(=EFs9PV5*W>kxhfczJE7kpEtz4E))F=)4jlWoDF*T2rQAP+TTM1fM^X3 zVU}q!=>=D!ir7vOiYB6V9}fNuLMp!z~UzjS5=Oh|X)_2(V$!~0a@84l`P z5WCOEfi|6_ZR1Eb^Gq%5xXis%R9tD*h`T?}_szGV5Z)@OfUiWS==S;HT)s&5pE1M5 zNkKUHm?jv&0kA{_x&e(=B4Gtone*XO=y)oJ7f?MhirL7&%^J+0Z=>eV6H1FvM4W&# z74?`Tq~NBZ_wUM3=m;Hq9YkeBc?n|HW{kMmt~AeEo5JP+Rda&bcpW13B#~h5tqSjI zz7qN(4*>kn^PBzUEA$mUDK*v68FgxAV|St?f0E1kU&~U$sW@(PjBAA{l*y7xbZ)q^ z8Bkh|y^6J2%<@9RMNlIBGj*@%*V=Ab2!l6M&niq5c!$|;P4Q+j{)4{A#;7=+lp&E$ zG+l0WVRP#zO1Idz#YrHZww;0M8HfubHpOvT`)5qfuSAxk&6A&DDEH`P^t5`b&$q3Y zQ?5-Zlx^;+_LG}M1|1imE|Uwr{qHFM3;{J1ojIrG!cHt*t20AL?>O<{T1L?;S8MfR zb2X%aYr?at%t^K-i*HhWYQn~BmhCprr)j7yg9Biq`2CZgP{_A01ws+Jsc5oZ*0kKt zT`KQ##W!9Fbk49X>&X^C5@)6-{(@fCudgZC3-2W zPfw`2@SFM}Gy%EQLzxWo)Yxe4(|N~yIK*0#k*=rVbm&#_58(x~#@FlEc;*uKz#}v3 zvS96tj^4eL%50hKUP^B5tnuEC(x6O(W(v<$?ILs6v$#HeSC-=+387v1IWF<5#w*j> z!wlIBO_=z3x5lIL{ne{kvws*|)E1ELKERIQp1bA;?K@|1Z)au2ZOuCc?3sJS+V1U5 zY$WH-%WDvpn$@vgvNZtw3rX3#-!Y3{HPk^WX;m5F{QgyKx^4Nr%aM-?Nl{rF!#_kX zAxX@!d&2viZT;He+qr9zt45kAt-JmEfcHGRZInLzP4WzEheH!nC=0dwi;u>J>Hl$Y zeoFNEHUL!9;MU->y7kta(8r_7dbaYObu+KjXR$1G*U3XK`6%`2&tC9Tfw~}uxio^5 z0Ss6VUR&NoFt2QP@L;qVYx08?f0fN%N^MHHTi$zM|EBOXA{Ln?QfYDB*IXMdV7lqc zpoj@>z1y95oyAR^@W(kkLK-|jA`hbQylFuh16vYQA*=rmN@COFBNoXgEvMQp9S0T7jt<9PlIp#^Z+WXo^Z4mpNofbE46Z0aU zioue3k@Jz-JdgAJdaG^LvnGG+7nK?w(p_-%QyE|L;Nq&-PZ7JQS95Y*3yq9ubK1yxZ_+~lvJB9R1B1k4dc8En&$#Z+M zuPT+UvRO#s<6SLT{Pl{PP_y`UUpKGDaDm68&OS$7Hg!j3P`NoyRANgVGT}Vm= zOlKm>kH&k+)`S+h*F=%VgRoC^B-5OpbyakKTeQS0p8roadKP+KcE?E=h|;dDK#jGj{A7gt25eWt1CzS z8))D|?;Teg&7{|^T<@9zQYdHFt3Dnb+#XQL@Se#5buaZ(%#L843PA!s4Hk)eG3^>f zhEspy7D)Ix2&Ty|4ddh17|d*LS4x9vK67!Fs>Ogfq6+Kqf7o8emA)|FE|h^X#5R!) z0oU7FKvi$t=Y$=^*s@&}VANP@k|LALE$ZNt6t;%z$C5LE{@c;K5^Z0qPa1Jr%rr~f zZKSD7Y}BoM=T@j_q^+F(&qMeM!DAL^RA%p(0a`~Zf}c{dG{z=;K5Y?=PjPJSLJh!0 z;PL-suPhJ_RX>lJsaH3WzZgDorj$DssTPx|;!U3Tf{+QJA}kc0v~TrA%o zi_Z!~P$^Q8q*W2{pA&EcjI>4n3eGb-yaYA<%Hr!5idQ22_5LT)EfM;en^9rOB^T)LU^h06D zJAj+Wl(d3yCmxXO(Ve9{^R2>oU}!=wsy#_B(E9h(NNAB9z;qo6?TbvanLN2S>V#z_ z$@c%FI1xPwiT|4II@mq?>hWtjabR@8(#5u|wEhrNC+F|Tf}A3HXjoiD+~Dx=0g#^) zbpql238j67kl|HIG{$)e?lZ;)9o0I|4tW&!pRNFT@M`Kmn^*|q8A((>H&n*QW0{lK z+H}C_r`Nw(p3p8TGsa1(2gt$4y!Nxd&*BDEQ}v=lU2+Nj<=>u_Iv4WlbAT*rg6*uN z34fU&m4@k7vP3TN-apv}T$~I<%k`wkyL%$$BO;d1%IDVx*Yf)GND3)`pLNmukWZgPlR)n^BiOjj)y0)3g&= z9db&=TURv1aSg=P0mkm-8nquZub;-~57!dr!z_vyL7UEi7NHZc7qI@U`P!>FE=*2I zjaD7;S&V5mTimlcwngo9f=m&wq{iPo|6lpUCw2YXPpYh!A-gQFm3CcXC0J10xf-?;{P5xKeCWVv^#OD5bd0yFQhItu2g^#!QtWMQHo`jF| zXK02H4lmI~P*t&21mpQ8>sl^mOsr^i6mfn#A5@_%Tqw?v2<%DWFj@p&m1Es8c&+(; ztwF@lejM>aPQzVY(gT zG>Wcc3p23(C-R4f9zv2;5q*|L231ls{6l8-n|PytMB%iX>D=Xmva8Nw{dj8tfOk5L ze@Adxm~iBcl@cK&ikV>pPML)(V_=80-$VLUN9YsqGd_Nav8+r<%Vj19?BR;vEqA|n zSfP#mm4h)YVKh?#xekAP)C(ZrXTA=+^2&bp%^rAg>Z*04Gf6_c{d3X+nSfsVHVNBt}3KM?U^pw#QjQ{P#!w@;5a4 ztMVxxLyq!s?HY~t)qgf47?_N3P0Hoyzl9h6enTK!l81B^Vj*_2tUi5A$R9*2b%9Ve z0ao1--jm#aszf9bLdQ64BW zMVUU`!RY(Hi~7Q^8XE3XuB`Zfgzv;jVy|PNdTykCXP`oU&_iu26nlMoOnZ4fdR&UD zasMEc;Nm>}Dm>b=?t3a4y@Lwe`J{Gd?_BPij$32lF(~`w1p(zOO4$TJ=28MEM3?{& z-Z)_teibdil}^Nbz^Z?Kg53ZF9i3Pr`LA%fPrqaM{bg)@Aisjt2uC@W!5Id;N1S%Y zNI%YhC$*bS|A?jT!#o*~jt%y8Bndo5>7(PGpVGQNKRs@K4Iw^_52eTBkP_p`Y?T|^ zerojsYuDM>Ira5n+-#-VZZy#-FCqDd<{oRG0)(%AUZCie?s|Q+Yyv2coz!ZAUiif@ z>D$n=x>4d|vK2Dlad}n!3*+e1g#h*(q((Ag}*hnX6m42 z;owc!OM`6|!=Z^JzFS@^>5U+qjKoGKmy0t1^F$j@r=CDyySKT;pxLMbkTxj+yh4b2 zZwWwn7S5APr>~DjNO?`l_+uDeo zTB|2?!rcm-rFt*RqSYdcw$W*Iu|RKeaVDY`^V2uS%S*G=jZ$mF)>eB3KgG z9?fJ*<+M&vE4{Mcl3A24QYkbnavcO@jR$T{*Ow0g*rf!3TwX%6&-A)V9!h2t=W*I2 z_c&_ER~Jl=2cY`{iF$I;zXbV+eM;g9U6#gcmaOZR(VgoIyQOaLipz~VI-$SaA2kiO z-XGBca~TUzhuy3J9b*iK$7cWN`1vZc!8n>#-q{afQ)N1fhjnh}TS}FN-O<=?^Uh6x zM4Q;&tSt#Fp`*SG>a?T2UXJ_aUgP=9;0Ql_r<$#(fed>s{{}JOO{zcQ%tf{tEQR9fbPmI_;!aW z8%5wo0YD)WOb){mdy=$0oE1+M>PTN%Epq0`a2>QNv#>wzJ_+0(I9#WVyj@2W%kPGd z^%H6qYUD!hua?e>d=8Z%sJck3`cH@N4(eh>LEI_XBoDe;57H=MFNMRZgu#w-DQbIn zp{97Mpt6Nasl;@6&bJd#mZMRBZYJcQ%c#42P{C4d{~Lgc!&8gs#txL2%tTYEL7i8&BNeZlJvCdRr+s@q~_lfa(ik- z>oV>0q3e0FgtclnizIwq%~)>RxdW_1X`4}J=jrjjQHEj8+l6#6F}P4zq_AUL7t@@_=^U_G8+ONF~tYWxsk{sI?Z~=lTGr2>Wn< zaU=>1qaB`igt=~KIDCyaIai1Z0_#IXGhk}OmubCPbDinW#o0ZP=f&a^mQ~S90%F}J0=>G z&I@5fQuqFg;m*C}q$vQH{4mJ8Gu!AyAg+XnRLNzIor3=%0YU@@O+Hub)Zr zr@V^Er8EDDqg>#U3HtdAicV;(!2*Z_Q2N6a){Am?_s8N)6SZ#Tx+EVTZY1|sa@rnj zG#e}#9!b4-oT!SuHKbeIQNS!lZCMP0wp1FG70i$kFW#W4!QGr6;FXYJsuh0{S;wYP z60vlmi4`@6EM?M&mcnF`>;8n5+w69(1Y8pF{-^+eCt=H0j->M_fySRl1#QQHNoMSa zMWy7hn#MNuT_hZyom$$R$jb#z-O!LPZJVBeXRR%<4$#p4LKbwXwMINlo&sq8lkCf7 z3dlM^|M&`(0s?62*!UcFxrJc+=t$p}l0oqLy^$kOxI<#397XCFd0+dfRf|+CB{Cza zUXhY-(T zXsM2q1KdjlRY4c=G&be+{g!K5Wd^$yU2pEj*JbjVtqBgF zy*F39)KLp(1I4E3WraZdjGza%IO#OOrZn_z7$p)PZwvPZ_e*>p`aV_)02Pu8xLQ3E z263#@Dy`|FIC@Q`6keAM&Kl7!8s)qRiLcx+erjr6!AO_7Sq;BPO8yO$GTtGGc(nJ1 z{IbyS^{%Hb--w@XKFL>`kDKR>!38*LwLQJOcq<%&nD;#lLm7ZTIckzHtIYCUsi^8h z{9H&AmhXWFHdQpRd|7KKjz2h}1g$*K?tI5^+06rQNNH}wtG}5@DIID}zpiU@oR`Aq zwISnoJyDqDvQ&}h%MY9)n?ZVz@GV%ZFsM4vtTrd5)dFV`=X^{&5z=4JWB3W%Dqv4r zf1=UoO4J>CD&;Cec*6Zr=efc?{iC*>ME!BS#I{chNyfHro=B&TRFMizw}ZFhKq6Bj zJ;Ke#=&o;@UJGSLCRhJ5&=h}PZxRN2yuSn+{%m!Q!Jv|xTyO$mW1nv^fj556J8gu5 zI*}yo^S9pjzyU@nRDi~uDXMBL?GnJsVw&}m?b~Q?RDM*dR!zJJAOHwt2F#vO@l#NlA-M zK>k_LrJD%2=ppCP)ejEJ&IDb$u`)dHF>U&B?Ir$O=FlNPgCt@Ku|%Qr+W``!X$T2h zI2IkH&!$$z$FD>+hCyfSV){AYIOZmLrIPxAiL+z%x9CRZ`npsMf!KuAV`^bmFCk+t zW=gd?9l0Wh76Fy=Mv>I2+7I(eLtn&Wb9y78ts)3{TthM8Ls-^$<&!%)1&IVU2Vl_( zVFTe0D~)d*VLaLBHpT*nqt2A`N5|GkL0d+x%5X;~9v&-^Y& z>ZgKs@vo`n(&o@&aM{ZBY>B|aQBpUNYllw>Oej1+Mb~?6xzQRno>7(06Ng}69*_^$ zOX2iF9?oD}eRpT55p6OzJ8nc}zFEXE0lQc5C7>H+jnu0-*dA}x#O3BaC$y{c(M0%6 zsg79XXJU|QDiF-eQf!|3pD^*6pGc&zx;5vfu>nr59WD?Wa_d<@eI2%vw#-tm{jB-+ zY@!Egzc87kH!H{x2Nf|B37sIsWn(x{-KV-CiEor&e!Ab)d4|$l^Sx@kxaqzbJJ|E zd*`o|-OMXbj~%Aq6YD$)77m;(7U->D)PmDgu=@Nr6AqeS_qTma>PI@{EeY$r0sU_r zEh&a%W&268W8~6KxREfQW=|BM;#>4)8Fhwj;@(Q4ThhUaQ8c;mW?D@J&~xPalg!J2 zPGG1YK*fCwQ^Lx{Vn7r~&FqCwaa@pam9Gi|7Bk31J~aK8|mWjwSP+~SyO)sYtG zGaX4QAFK+?;zMXlmrUg>$rcGi-#Npf@FTW9!=f$}99?fs^<0uVK5p;mVysJ|l9!X# z$&fo+TwlX>zuZE3RVn!W49{vQLv(sg1e5L$}Z zX3oPw=q3XfwO;smq-?4KDhrsW(Iie;lFoM*G}4Ry&2MrOWcloYS#fF@UcxUo$ZWkt`^eDQ8tQYwy`C>s~w zS1K+HP^h|sj?K}Ku07yZVe8E~D>gft#PNI$TZ5Hi6av$w;=d8VNr3;ADL#i*Yta1^VhRe1S-#e^S4+Nasb2I7^Xje7HgG8QU(k_awNpfjY+C)&ST&wtCUe(#wz zo(p9h=ibpqt>W+XMcapmw@@YTOIc45qo*g0V&pQH%@X(%c$LBATQGGhB`BMqL2{|Z zW3-Y}D(;y(;aW`bj?w2$U-0BS*~{x z1XhMjA_?&klQGX>tZlmBpe(S(p~)96xN52=HMK5JZ_XnPg_bbQMD zI8Sc7z)9Nmx&;F}<2jP(IhvDVaojN$;lrtIeeXVPjwXgAFg+${)!rn1GZ`lHY-^?P zdXI&9EuOieW3iHE3#Q5C=>Vj@C~mrpPLchV8ouoZCuVwQ?m&i!tTcD1z51Bk+4EAx?y*H(n4Gr9PA_BBNFDB)+!Gh(8c>WxL zYYN`LJxDf{6^mO<)*ni#iQjW;Ut5VdOIDb^F4DI zrFu__o$mJ3NK0AsaOx;#dFU|90nq?QVYjo5ue0@Rz1I}!+zyofdAJ)oKX`$AE8K0G zs4!=lzUz4|AwN~Kelyq^&ZtKM?7m*S9gCE5Fqzo_B?G0#=K<|O82xFx5hH1uPI4z- z43*t>t&fi$den3b(tpu+>=hZ=YLkmzy?&^vT|erAo!2hPyR-h-_5;{5$~15Da}$0) z8-g9H&csupi^%5(>6TowAcTpEuaClf`MWS=Unpg1fAAQwbC^%br0PTxJ#u4dFi+Ak zKt%*uU5unROhwr(19L5paBk?)3VkL;-dvCn%uiz`W_=b|SG{*e> zlb$lNBw@C%ohP|30)oTaz_?qIK!s+rKQ=@Ks%Ll7%yQ!8&+sPXCuWN zF*MqZpdriTCuQ?;ziRu&`giQeCZLXY(C}!?q>FX7IEtvLT4GS+`JsF&##m2sBx7W3 zk~PDfEN%R*9l#|>#@e>e&F6C1%b44ybB*WAprpFbNq526u1~dmO?qiGwf}S zkokU$p53H3l8`4=FYP6-T^?N*=@)(v&O4h^Lv9SBg|sq1pd5zrIwxhH{nzf1(}qmQ zOESvg%QiAEOP>TLWsMioSxIJd8tu(19oALYEh17>W)?4+l>1)3^Cs?;L)Z+|mx2s$ z9j=-~7n-Bx()dKFN1- zl<&BUZbR5%^!f1)B|(~63yj*)HXgOl6-FZLaGAm~6VvTrk_wMOH7Umuwd@7qDM!u;>Vk z2_v1EYfvVbAK4wXKBa$nycU$G5~-oAmQLm5h^JSK0228f&d%K;;u$nDtamBi6QQ5r zCWny->OzT2a4=-fE5!BVCF?P1bQ<5Px|L_UW;k=YYc@FtyoU7|%h#U0N-!hPv5*E! ze2L(xwq^uKUi- zLQ#$T8K2+piWts*aiY$JE3A~eHJNkK+5DMivoB-1gz}z9Xks|MPJ%C-Fvj79Evn85 zw?>mQ$B&Si76fF(qr+?1g-)i5nCGV|0pcS2Ur{Bn(1E_U5dqZ$+HQz^@^>SV+|TNE zTHOc=YG4VpmcisI({Z4pCYemW_5%&Mi`NDD+gEj@c$frMJr#fx5&4ZzpR`dQh|Fjl z5?rXQY7f|LmflWJZ5v%$syY#uC`*o$1qt8uD~;2Pn_eVU{`5O2v1E*@5_w*7J5yS4 z+$|Idiy_NuKTfz4GPB2hD)nACV*`pND)JD+#bwL>WX46E06?t}?75?iCjt+6qJ#+4 z+nw<@T8)l0UIGT$MOS;~vv9BZj0yrKdy9M;3q}H=#6SHE zs63>MG)0A*66Zv$G{pv@95_VCJyNkKQ@*(!RymQkP$w@Xj|BpD0vbOX8%xIOcNl&l zW^*Q+F3^kuB)GP(@9exh*{nII3oc2$n$~z_KN$#_zqFpa!MNO+&lIqp)Wndv>R%Hf z-tXAn8DF3H%5yh2TXNTthTxPhubug23ioXa;|mo*+eov~^!y%KEh%D|R8HH<5FHlhyS<*GzH8ah!0MFR{=(PQb zXW0;%v=VZ4uw35TRo`@dko&buO!*x9aOdhUi#TuCE?T#R$2B7uNdOU~Fu$1UcGCt) z3N~JSA|aF7wEIp}Ib#jn9`|g;pj}fJZD^_;j|?SN|AZ-4_JCP@J5+mpEU3_ZXMO}R zS7$z-LFsi{vlPqYL*0A1@J$u=SM3DO|FZ6A^zQ&Og94J0G;ap&=e&~Y%}2*)f)5X%#P#xLeXJ5ZSdDR~;ZX5K=c_>B)1Te& z;JJkwtC|2$zW3Pl3PQ5$=amtmuJbM8qo|KSWf?vD0J!-w2Pl_VnO&I-v`Sy@&E!JC z{|vt*#Qak1FM;YPLzYcL%hrhc61srIoZrFeg`b?ic^aEpB)hgKGla5siC2>|CYz}Y zzXNcc;CI18!aKeTVrHQ=excd>loxJnj(z&4Cs-mqgs2dhKogfK+aDVP#UUTwjiNu6 zVlwh{yd0=|-o0*5W7T_tR7yR#tJ#M-lY}3#wA102hG@|E>gjf>fDh)LtM&4@Gq|+; zx|yWe+?6?N1Jfay;{UafP z>kGjJ`Pat^@$J+TWbQYM>woV%jqzmh5b{7sVRJ=O*HUn;w7Io_O(Ct0nnIB>C6?c< zT&J`+B&}koj*|KLD#e(g8{x5TbtG$6*K7KNrKIh0?HedNh_jc^A#q@-_;;4sq$p<~ z_^W*K8L91fO7IJ)`CkJ~!6;Wc6x)pWP_EvD(4uh*IdG2hbaxtfQY2gkOi(bjYf zwT`i>ECf+ADe~RG+E%3(v&$&WK2&^HbqYPZB5btyi5x%23rC^c3Hiq1VJz?dRVj!~ z0#p=7vk^@h3DkMBn>r8NALa}uRM@K>?0x>qfto=tMt#*}nXQe4e+cw`Px6zG ze9cWS5>^CxYJig@3Dnz$v$+9H6=Hbw1J2I`g1Vfrm4-2NmJtaIS_(uzx`y=pj+vn# zG_Kd(2T>`cVPIhtus(hiwAykkQf-Q)+}avSFD|-7BVWw|y>t>NK?j-$*!8lBX#A?= z!(R8-7y@|!lQCN0u{vIp{0+a{y(#T<*?DX3dlUDo9Xa44P({u1NPtltlNXaC7WL&2 zG(=M+%<-ZknfZsq;Ah~ZvTg*jw9lw1eyDs0G#HB0D6@UQ0U*+MFH*FV{2H_XJdF}x z^D<7`(bzus9?ov_z%XN3Wt7C zz*&QO1y=w|_*&CbTnJj>G%r7vGfcTHzIf&nvHHwj(a-Kr3cZn~xfl+r^)AN{dt-%7 z?_<+f}FJ}X&&8O2i4 z9&x>czjwlfF7S9R%M8_A#Cr89{ARo+%cN28sb2CW4Dz%I$!st(o{*dU_81g#b*ekU zFq^0t=%oajQIIIPm?z1YuCj=P?GIO?k)21hAvV`OiDj(3!WIvw+%)d0T~>3-AMa^c zZNB%}bzX`%+aW9Mab5*l-rL2(Os(ZKF8C~zx9NggA7LavGhApk8*@*WD0Ukvxpsft zh`em)W2J{SCfwRxDlDB-M{ZDiuTC4SreT2D#}6_>n) znw&K@rLY(RgSqvyzlQ~>&CnNVklq-Kme?*8d5 zCoOklx&zx0oGn(GxshIvt)9M5q}7bU_T&{5c>#Nmn{k#m$|P?+>|U~riK>c0DT9i_ zG@Oa^M+@NTGlOxN<;(nsIWx&ho2Hv^dE&32Fss41fDq(l%~vRh3>fyoKoNtC9dZwX z`W+LiKhx2{{2KAOA)eN(+~CvUgo#cBaoKzLp#q^7LTgt0RemiDVB7&vsN=yHz9da* z5MDw#Aatp*4E+Tv1o24x*tj3;B?1~mVA}c^R|d6}kS~|1sM-4lu6DE+2`5F%;sT+m zkY#I*CN79XLKiFL14$(b@$|E=fX&&LksQPg(C58l$u|8yVN6A|IE1_V%c*27Hh3+` z^DP;SQNaeFTBUaKh9k>|X&DJ>vT~iuNnk9jye;GFY+z*w3b8q9w*riHt${EsNukNl z?=(DzwBz5psXAvhgk?W{C6p)ww0AU4c*%y0Wm%RDfM9f~RB#dqpxg1zP9a1v6%F)(gVa02N-d>-jQ&*$gPchosDlJC39v#9Fm%UiHZez<8Z zHRY8Ha}W{#RZ*yTgE})BTEb!~{(j>J(}#^dGmDm57#}}p@lq+5;zN}sZOkQ1mrgId*^xyn|HQg3AV1g z51m$;q6U~k$xZ2f6ZMZcl^W|Rn4eX}|AH@gv-Z+$7E6+js0qV#TG3SNr`kmH?4;#Y3esT3yUO~i=m#@ zDXrW!HHm(Gid#t}u|fqRQAEQlGFxA1R2x;`e<=9m3=s=>%EahKeik)I9I@um{81#E zHF(N5CTTtml174(n{j#f)SkCBL?t1V`^DwZqMcYZggQmA1bJr0YNaIXEiCDCVm5zY zKR^&3dwPU5zNS<6PF!%dED<6k=69zbq7Epm|rObm8x}Rnp*L_FO90& zB1ulIr+|qR_?x*iaH|H5g2{RhH7Ju zQp#h?K7Md+mjUeD3?HZy{=*~Qw_|Y3EL$*&X`wf#t1-k}!>^08#E^!meIBgdBI9Pg zdvLdKDI&`H`iKv+UkS_jkz?#&zRF^~=kvsC)CEHl#^F?xm51Aevevtor`Kt>i=*G> zJ`RyA@n@&r(LNnvXxV0Lox6O{yKX-O?3~1aW0R`30r-pEep_>*TRfFHM8@_tyQ z@*Q`4^=pH&o5xSn)%%Id*gg^@sY~!rt0@a_H+;dx$x!KhH|>deO8$t$(~X#WIc6q=IrT zS6W@AlFvUv>E|aE)yq|xR8c+IjJ$7=;z@`o#HQ1(Yy_?8ECI zVCLSEId%rd1^wNqCb1_)%|HQtLBN|W)~PoSD~n)~$x4BHmQgKMnKChj+;R%lF956b z|D37->12BM1#mucgTx;1_MsuU?KE$+c-$|Nu55FAt~On7&yE{!fwKi-^!;iQlZIUL>8+O065P+g)e=# zLlRa0g|iqT6ol4(8M%ehtXAaCOM3 zsJR8s4Iu%maY0McIS_8O|0+JVJ4Ce`fv|-TX0_cOMvXohMI)8nk@gLJinj`!pOlhz zlu$qPVra8TLhZER6&J?FY@3fi_;9dMxxcI_9Jz@SB)=R#{fOuoptpH{%i{QYpwl4{e7RGy~qZTT?v^GAwN(kgT0Edi<2Ho1N<+aekFdoCO&)fO+R***t>_vRS zlJ{M^N@ZH0ByuZ7wopMcdUO;Lp>ZfX~tQIBo= z(w*m=(vqIv+#v37e02Bg9Y=S8wTz#bzg&@TIbH7@i$@M$6TcOaO|2Psf}8omW`-=4 z@_pXjXy)Md;x@3{(PIggL|)Hr6HDi>*0VkoMhp4TjvV|Nz1>EIUuf=8d1fHa{l#|I4lTy#Stm&|h5NkiPxJ`Ntn2FS3cq7ASu;Z+v#&O@nO|o;k_>=5Aj!*9M2<;4`CQ0>PSj6E320=IW3W ziv5~s`lP=m0B9Av)qstHElTL@yNmsn*!@j0(i)jG?^-@^xU4|#Zmeq4`7gR0?f4?& zw(F>mj`hG1*q~62d5*{Az{f_O5z5j zMhIB}tPXSmwBlxUkkjUyzaDlJ3W9Gj*t5uuQu>GF|H6abg$wOXb&OOmw+I;#O%Cl| z9r8vhXYx5MDCu4YXhni?@edb$#wKPGWoAl}oz4LzJ^1rZAEy17eEh~TNS!w zXcB)()^TSHO-H9fNFWh-lV6-8h32>B(qbV;vZ4as`4%%`290_V@2s#bxePwHpJfXs zI;KXlVTO0_%Ztu=N+(jnwy|TE_cQ;=w)AF4Vm@d z$H23T!T*F6R}(#fQ8z@7E?Y?2luj&VuOgiwsjRg|IYSB=px0y#G7s<zb4-KS#2>H*dT1pk4Nkn-JStGuDaW~6 zuqt=|Kro1llIH{T|6PT_?+i-hcx`Cm{U-c2HLq1DeFF1eA?jDqOP)Le~d3V5w z=Y4-PSYS znh(={Vv+bvf_1ND_Oiv0II)3t4i|qJQy5ow%GuU%Y175QbS7$Ahh$dARMqSY9cbD1 zsY0ba&nygPjNtyFm@qw=yrA%eqLOvAS(~;e;oXqweMhfL%VV^M**iMGdL&~++-c6+ zbVSB%st_aw3Rj1!ahisY7itzugXhN#jH)eYVQ8ZTqZUuI!uG(jdqtD`<@ZsQ#VieJ zf1S@hsQQLO#8dSse{Y>%M-J3$2+CDRWQzh=nJ;8`9ZT?#=R*|I>VdczBC6#pnY#r6 zr1-9isBe1crs@^FmhFe;!r0A@I#J0{B?`ukC@1^^LW~!Is^LUe8_}NkSIOpwvq9ES zUwot8Z&-}7At=%CtAsC@fX_IN5fY>b{s@t-4zK*uIyaQH=)tbg-8Kq|U}{>n z^7?C6Pw?1UmlKQ41GzNjBp|V|<82VXv(S*vj-|F9h07h;c)cSZf=sL`dgppJg{Ns2 z6HDTo%3%#&yh(*47qgfI&kVM6?@txtkp85BT1_R1)vmR)W$~CH|eT+vl%3nM>`&IX!m8@x}1ya{6@9zq11A13a5-*$&%eGzDT z{~52q{-&6K%k*uH^{QxQ&ZQy4jqioj7rurIKc^p^L8yV0GtBniNeFRC*{xoYi@60- zzAsCgw-{X^snSLTPZ!O`3qbwIK?ZX`1)49twWF_ZT70>%?tKTguo}o@isFo4BF&yj zX&vy@J~#cmjMVM)S2!hN36+E!K9wJZGy3bK_+{?(;=Ym$*Ef8&HF?}?f>F37}I)d zn$YUa&)~Te4KY>fLCj4Fr_MGOyoaFlVFJf!$uhj*{z+lIU_D1XN=4}g2jK~MA}H-I zOJBnyXV@`}4X=!du06DJ+k4U*}aOl)O|#R3CatPyoV0>1-m!`JzhU&!&W{ zJ}^dPx7lbK8He3Fl~0hUENtEHbER=yifZFXe;SSwrJJ8*wX&WS{0c?zMRUZEu@!|^ zU3SO#0JIvr=bX@;MiRUK3G-$I+T{cRVr5>L&H`E=mi`*~oKaira`)wUxGoUaF)9nG?^!NCh>_|=lSY%$TN$%GpYToNz}o3 zL)HNq?Dw4bv(>G4>oxIK#}T3G%Dg$U`RXnxGHPT3IpPE{sKu!rOWgd~aBVNIn6(pRJq6to6kI zI{SK|_T^_w$5RzsTcanRI{Ct<+0;#HOnsBS}>|S*@d>^>&)4wlF79Rb3K2&E3tL0QdE8?cQ_6=JTb@lWU)_!}Mvo&qW z@u+qkeB=y2eA-wh*7wPVbDW*hx)t)bUt!^Q%HOv*r+eefEn$wo0H`VSVF*6VzhI=x z4IYUrp#)qi`yB#VlCGktB^t_UZ!6))0Uk@laSZs}<@o8UtErd6Z*jVKb~o&%)lE97 zuLooMXN@{P?AH?)Igd6WgFvg}v(439Tka&hRPs76IS&EbC;iFw%l+0_FS`|?7lLTB zUj9(Fib|(CYe<}nwa4fn=EeaF)EoVclhFfZtusBDyOg}qq&I_E(CYclPSX5;^=$0P zD(o*|>JZ8`M(zS)7kjeL&Nj1Nyd6vGW`l{m5zL?1DW53NK3|u&mjF+TWAi;K|5!%1 zc_yx;$qmE)8|L%(Q8ESRyJYCb2?v%>RFi*r{G=MkV#MPvp9I@D@wz{TqKdKy2Jtmz zP0ZRyVw89}|Bx{N{yq?;>_sxjjPv05(Ft%5K~55fiRNdu)<8~n5Fs1#%nS{XlTwjZXs9&nTR>OLvxHL-ww zoLB^I@x8G-Z;sp|21$s|`!>l~!vgtCft&k*m5jf$*gzPFbIqw~w$I!GsY)s4A5;E# z27Rb^LB6*n=tX|@ReEr~eJttYZ^-n%7;cJ8wX;O7bKLwNzTN_;uBC|yV@6}(mt4>j;4zu^{nV#;|t5@%VYrt#C z)vTKH-oKB5U2ha&-2efrx9JRuSGjax`; zqtUonE2wmPsz0o}4>Q;bXEFo*hZ!T!UI4%Qn@Ep`g=D;#=LnU~cPqHXLF&0maCcc5 zye~bd+3N`Sj}8?`bl7{~(7;*6S&$y`Hf*GGb*Lw(3v{JXg6>&MMAwr)cMWB*hvv3v)u5JZ&F6;ZNoAV2gp_#2h%IjFHn(w z{Uk$tDt`_!;1C67#G`M3KpWoo}el zC+98wzHv7e|M3F;IBnN}ti{`I4L$RP zUx%arJCW(YUI?|{EuFcep7j3_ymj%lB^!xXTMB1XSlV~7r#B)0a|8d(>|YfS7J?qk zJsn8j(}MnbaBUDLp@-IW)BW}6rO47B;o?6pyAmI9Wz@5T{=8 zf0RC^iTU4y5paR6pMUS`QqF0WZ~*`3G5&}$|3qj0_wj<))|J3hV2S(hpZ~QA{<>F@ za3C&;d_lc<_kVoz_x=5*VLtJB1k$^I-Q@oN8!r;)ElCf-Gx-+WpI*%W^W*M!AoP;a zifCd6s$Q|2^M2c_24+q;z6z(*#5f{Xvm!askV zHQ}8-n3gvz1vPTSr*_nLFqfi_zyyv>KHmc1^Aqj!^&b86<7n&ER_iNLAj5BJ#vh~O zWf7jG`@brs9aQjC4ZBOH{aU(+eoa$eDDsm|d&(b7&uCRf=@E1ekO;P#(s5DREI7aG z`^%I2V-6545k;aoAdctl`z-kig7-SQ5|SBxAg>`2aL0jwBs!x5R!4T|jd%1vH~Zh$ zdFDkBLD#Mp^Vvv|!}&z1!TPpE5MOa z?s&Fv?%ud&mhpHNFik?4WHKf40L<_!;jcyMkO!5ppeY4&E+5Wq_^I%Gn<;`U8dD1% z(*$7A>Paxii4m4_@1)S=ZR-9t6@Sc#CzRJ4sGohjUhM7NT?yb62slrY2x%sPLV=jT z-gFW|)`gd>5BO`8|Hli&iy{D4=5ZI7(^WW2Y6ub`N4E^@%3nqo^T!Bp`Vrj%s(6}ABA*OjIqew_lCCmix?`U zAh{foE7JNJ7l29e0_>VYBSDg$5R{uy9=h}=@dp%e@(+|dgI}mg|BXxpI6#%Go-ty7 zwxp1U0g4CSPdm3Zkm$O-K66m3R`Q1>QW#hjDYwWM^z4qT;9ERD;7(PoS}gz4TQrQp zcQr}f1rZ{SBi;c8GsXj~*56h=3FC58x$Hg9mXYGR=5qQO_kjwlTqR9MUrbl|TvDj=R zXb%BzDo3^(MlNVebZ@3~@#^8Qe4oP{h3gSM`;+?+{Qw;WjfFi2kg0Cpp4M55`Skaq=XDt%0GjSYyJE*Q@-$gYY|W z5Z+vccw%@9ItPkY-hx4+Gzs0Y^&chPg^o?6;t8l6v^=GI{vRPFn08jb71)Wc5i=8 zYMuydXXecK(2mQMwkZB!0f64vw4`#!)Y*&}@QAtzI&khxPW8XxbYO&B0Ai52#k?Pj z_fD<#*iH)HlDe|fyY&12^IkykwXW9<16K_5LKlte1VBM65Kc~ z&u^3(D3OHc3;~u=(73G1=AKXWWR8_YaXMvBZlOJH+(L27&FD<542RtqjdrJ#W@^xr zqE+T~ngU&e3n)Q2TpZUYD^gh5`?ZE(rgOD=2xd!7k|)y5O|2wY5&0;l6t6!-klP!sv$)BR`wMLRsI5iTo%W zyclSA%QyRM4tEahk6P{!(fO?!4g4C;@Ds%YN!w;cw|us#_VIHKW~|g&^f_wzt;ei1h^10IiWJ?HK$Rk!wXH=>F55-FxMwHN zQS$dsnjb4MW>I>`V|ZB6XkwI@;jWQp-?k_rZFMa}tcBT79MT zT=1^{AO#5z*x@u?Xjxcf;vg}I?fbtywcqQRmIDlr!7nVOK#K?!r|xu#SkSLCpRZX} zzCinj0@}u1e83CW7et{maeedAf_ zLspG`eZxHFg?U$d=X%1ru(Y;>JuV-0CHJ7fEMD8QFQQ46SWK4AT!Sc}Xd2&G*My1d zHhtPKujx(Gi+)-^hf?TLHL$^I1i_-0OTAt{c9kLg9&tN`e@!fkDWi#5+xuMD- zcFg^pPzKE)SBe0N`<4eeB)>dg)jAA%sTF4R+Z)Z z=zO5{B}WEyAfH(W(AaKJne-*v2RBph)A~uP!nxpFj(~y$dgf}2)X>zkn%S=R0ivJD z--Tog*0k(=N7_nEMHTq0j(%fJXWuScNUyKuuDpG^)cnZ{6`bV8`i)r&#A+$jGy`y^ zgkY453|TyHG%yNjOtSTa?>3{j87CLD>^S^NQ>Z5mm0!9HB9giFc6C*&JI&+Tcu4M|GiH^Y?;8Xl9&cek{7$GT-sR>;9=^Jd$)ve*)Lj?I zkr_XrL2O%mvaw@I3W*%3$d%H9mV|906Uears+r)uk;}M}Y;HVR&d#6wtqRW&FE`@ zat1AduC6cLOs*#xAEBZbV#uQ!um%;n=dD}CT;#bljgwjKBPMdB@PpUnU`WKYq|<3{ z^Ch$toTY^$(&&^VA~^$H9D5wZJo>Tt)G6MsUx{uakcsbv6a{2dTwhn$C#kED3@xr8 z?RWq@B7&}tZFrC5~=`u0A2(kR|AmLYU1|ldP5GLZemgzq}+kGN~4|^h_q!nA|VTF5HPopX~sxY zGn9rtaU`bX#|K7YeRgeFhTSCrxvn=HV~!4?_btv@O!xy9=W%Bt&Tex$T`-F%eKjRn z3ph7w-nq@?fD7q>~m3rw4aH zsmOCG5O;H~3zR4Nq6+Y+r!&KWJ-4;%mHAn#wD78v1|fFc+S2mTY$N9#+_m_wJ)&R2 z9xbY@vf@R|yl)LT>6(1rDLbty=?o`=JD8oB)1w9mhB3GCg1jhFT==2Iuq`P9;Bp~7aGLT^SM2}Y- z=*jt>z`@a1anXk!JU5-f7_mO;NQLpR2nfe!zQx|D+)c@Pp>G|{do31QVP#^^@D71T z!Q!$pBT&va9HVnFX-Z`_?+`ga|Df)pCuSps^jW1>^14y*RZNZMt)i4JD`^;*we||T z?XrC<$w-Vx;Sn3nD?q80b|j@hGnhfVb?^YHcM?gEA?NdeRN{l?m2GQHKrXotqa%h# z=qH4NNs%KJjiGC~(#b!_fPnJqMGvH6yQ?IvGDAD(0!4L-0y%iXKou-NzI5y+hOi*2kOdfVuN26%s*vGw@5GI9Q7DJ0Witcrkm#;eCR3dpNOo(> zf!(`zW5rJ*{f@i|X71Of|DJgOnm}Afv@nt~yhQ`Q#Fj&ZkQ8lqIgupf3o;nM`2NmT zD1rTf&;if5fkL=Q4(C>_a;T%xUU$H(;LrI4z%MyuiRoL z@}+B5?#AGy=z_^|l}1_-iA4j+y*Z*KEwe)$IngB0ETmV{-&h`Sd49N#zmr*)LX??Q zYp-X)zFd67MzK!TH#0z59#3uC=kAfWTKh^@pE1y}0fnM&FIytwCVp*+G(P$F)Gkz$ z50?;!p1zF^U}okUAG`W|W7ktnfV?vp_i`hl*?IIZu}yhnQhuR~G87;wN$e~Pilc+j zkFWCzNIGcfy-wlQpiA6kdPh)6QSUN;eOs3QC>e@09wsVCq^krXnt6Y$Obir3%8L#n zIU@|eq77=N1X@6W+z*9+<(_-}7xGJE26M2K+xaGC>S<2dL`MdB^ZN2>Yf!j`Idv@^ z)zHCiL!w4bSdI{SeBvT<`ZnHVcNmG*@9z4Q6-fa_4_|db0SOMSrD{q%dy*}$p5R~^ z$y@5VGi#_NkI3upf~eoOm0Iw@N8SFs(x1avtR7|Z{*K4*l*Zn5h(nGLfhdl$L-yL! zFQyLp@>*$`g}I!^xRvj*GkT3!H8R2uxm!-l{csrFI-lO-P^>&oPot`!`)~aGOBC-B`+59&Wm z_hu}Dv|aY7eHQKzdSvbP*iU>7(WD)#2~mx=C*tWQQnx{WP9-!7hiO5PZPK`1`jgQ! zZr(yccI&H>ygT<7iQjqRgk4{y-)Q{bD47rMo3Q+6G&XxhpN~Brmv&m9u=8i62pyiX z*TCB2CDX*!#fl%RE!oK(nRTCazp}z^`qYykMwTyTIdVv*t0$CE)Ei80$!)XgHvNoI zdzaQ9?L>0L3-;FDVC6$n%`YXcIVq=uaEf$Edf^0 z8U$|}`XnwX8xGfvh91~Bgm7r`ks=|is`zVQypRyX+%8M%OY4@eUu3hp^42gjInVdB z9Y-NV8ZmxnnklwSt}men%ZI%v$vc^?m)veo%J$d#1o@ar?U)j}O4B^W+4okbhYe|@ zSYV;8fuZGkjgy?V%pVBw;c>V%g?VjFAgLt)Q*mvbbArUOAwVw9TLCUeJ*-?Y7M!~?dgD(_aCLAv_hMkyTK8{MW?=ZLJqSD!2 zUkW@lnQfXsX#J%xnKa+#Ae~Ic#fk6-)_pTHL!-eWj+B4{RWLQy0i|alQ`U~#@;yfK$_0X@7j$*QO`Sk`(kv6189uI ziVfk&)MnQDei$wTunu6cuu1Lc620Evy>Z$MWs753Ppv42ZkglMaa`XpcS^L}RK3li z?7;U#cO^!r(aaML$C10*8<&>}AY1^64Y@j--q?*NyoR!FsKy}^o1mr&S4%2^!SN_z z21>a$)%QrW%H*xG=AR!Kc=oh&yXlW4&uQbPofMB9q0P34TEvm72nvdqtQnUbQ&${R zr~P_||0H4@?z^3qvg4jM0Cw%Z?bxy9xFmkY`gr;4^9ss;UV0m&5T3vTlhaG{Bm91K zaaU^N?VeU9{Z=?0ax!ugR0Bcx*iOV4hBfh68AEn=sb*cjf}9FGTnbw8%;0EVdTpcG z;pxhe=|a6}`l4gTw#C`kGaN<(iPKtFWyKb10v`%b77Q24`sm!%ayzACIgJOK_F3E= zZl@tk{7G(%lnhe>C3(Z4h?$8kNpCVnXn1kLNYDLP+6`zF#sRz@$zA+`4A&FhO>JBr zs*BeLq`mfZTYU*!7grok8wrjyd)5&km3CaG=fa3dvr`~@uK~zq*$RiJb+yxqrqk4% z+~%JN?~(_ATH?=^KgDgF+NT#YlS}F=U3*d>OueGzU;E7Br0nc4Py!hpmGd>M|I7{V z8ziy>o;n<}+ZFF0(BQ^hHv9Aesx`+zGcM`O%hq?MocnxBM7ljLlCuqy1)h2b&5sC& zwNimkKsyrM2#q{OQw@A%I(tl7iR`#?bxNLmsZ{1-g9V!v_x*4BO^BgQWMmPv@joDp z7VIS7X^f!>6J$8ZR8@dDBYFIFK<8+36SvE z!lj?D@;n84#z(Z4P^NBPpeQmyd1GZvqP5 zsod6@y9yiO$?g*#?Rs2aiUCQY1fl5cQIE*q&?IvRC&9sv*J(@L9Sns=Q9&*o!#COd zDFafF<<2RL`@XCYm1)x1woQlI5g5T}7n{)ymHxwhWgSAe%kskX6%$+Q8AP4&xfln* z2+G+UEkT@A8i#)#3_tCd>~XV|lZf$z>-}{JA|?+WXWsDB3^M-DqH$@s2nHe9*qGUa zpE6nzPJF%BZ$hhH(nBi}2KV8AAh+ebW{mq31mv|{!T(Bd>!XOHG+;UBz!t%3N}KBS z`zDt^#^v_lm~XJ!K+pyaG9XiBOUJSG#p~f+Szu1F2y01Wkv7#@SAV6h?sY*D{Giaz zXfa%>QpYZ69~1uJt)RUA6e3&`72PPSoBo~poqSXN&_XEvnQMgt;sm0)yHdF(qsgLDmG+?j|e3P>U?WAAn<>B`#Q;3M_DQYPG}9JTu&xBQfx@128jK5*Rp_bc+@ z0vn4nnZF`DpWyi-Bd46~*=Xc$@#j1IH^r# zyist7Mq+{|nF;M<3YujF%{~u~XRGzV($7tTp=E%Pm~%bA5=ICBSwEEBTkhizJG~f6 z&=Ni{XhYeR+}PlO-qI+KV{F2;RMoD#n)BfB=Kwk6^$$@uAMYZ#Z$`4GG}W;hs)CGZr={0yMA&yJHT|0@iZc z>SBdvWcas{M;*PJhI|(M7McJvU#uKvc;&mwO5Or9~{xGe90`$ z5P4UlkH{6uT~rFp$YRaQ90Nk1U+L!?#A;~?St=l6!|A2gtbQ|HqBuSQPh0kh#XS13 zXHIM)S5$QQvwKlx+-C7cZcB+|G%T%MfP>?~m5R8U^v9-vStQ!?JSofKq5<gHVew>mIN4kRX_q5Zl9KqPh553h=KBLwG;TR%rv&y3@xmU^L1CHpJJw{WUY%9)_sj{yEHXE(jF z4~0C5>JrXBIh?vGQq|0}&-YK=3n9ilOiv3x5_jmb(x#@n@UUOkx0HvN9ljP3DU0ho%ghy`A$ zNE9DBl9Pg25m+L{6jQZO4!K+NnI6d#~ zjUMC$G;9!l#Zs!NYhQT!PATYZxG0h-MLF4-p=>|Lx!}LE<15HpUqv+!BPh& z29bxlCWB21$u-q04c$;+FL&R5B4EEcoQ^D#r!d90mgm8;)M!$wpt?R?7Bz85v|Y*5 z>vqvF3ECM+oIYvY1rTyt~i{p)gz)%EL&M!ku* zf3d~!@Z{lhmT7O?gbQ=EUea@B1>|t78UZ4>MDmIgB~CMO z8K2fDL`!SNIn?kc_0BXAu9|m(r9SD*P<_)n-cCgnh4XjCV-a2-K%zOH z{27qQu=wj?I#+7O4JJEQUIuyxxe4Od!0(H?Cu}}n7n>~_%;+%cE#5HvVt-avSaV~k$C-YXD z`yz)E2mXK)lXnDX_S#GKZPF{08#wT&e1|`{fY*2dxI{nPk#bKaa`DpB?xfzSgb=#X z$$cx4%ay6Kd;U;wlXG4n?rSg3hCE)B8tbQ&>!J@z69CyAFGxDyoMivnXio^N*gXUB zbW(F6wunmcF#C!Uxv;FQC;eGl*an~)j-FrQDNAW)xm=6eIluV^5^{SY8au6_ESzg^ z1W4lvhN3MnL+Dbc^|ZmP(Kg;XUk!rHkcCq8BoOmGKN#E9Phedd&@_2?{(7I&rjML{ z7QwrYsAvi?ZaNG5zPNtK(o!>1XIpA;;QOgL=4IQ|Qb@|dsq)Gdcs)iMD7&5{0;4~Q z!}KEQ*hSQP%=+SRZxExU7Vhdb)~Etf>-LGD+DP|Eh;6XxcM%|z9O$4TyC~IqAmDBIPy*qk;E?DiXY~(N zs^m9dSrV~r`EPQ~9~2;7f}dB9(;OSC50Yy8ApsYVkW+Nn-aQ~F`UoQ!^zsN{h)*t( z>`2B%X}>ic{y;l9qTEbr{2^=#69ZVS6mvPe78 zNGfQBqOS{@L%*q#|08f^{AMD4WoyPgn$@D607wq|J{IrXHJ@}p2+ui_erN%-yeS0$ z7NgDQvEMRoz(O1=({SFK$fc|_JD4IT7x251&k{uHC6l^CJ2@<5+*|kdkKuY0?Z(^x zOAFw$e8JbZ3yt=zSbC=|&jc4@ut{_}F=x>>et`ZWW$#cjEBf~6DTgk~OzStfi0DRs z-wU}ib#|)58>3W=H9*OByihLRm$8qv0MNbx`oNPf0N5drhC_cQ=6{0UTUBElmDEk7 zj7S71UK4(y$@^IrGtP%MX6vj3Z*!$mlK{;ZYV2F*q(jd(hv6jL{k?HL8zDAt7aiw{GtH%<9SGP4CHDUGf2#vNOQYGa-G*EUaqi-!i%A>xTVNl8H3 zEvL!R`g53$rm$o(OBA=znEUFBx6RQW^u7&?WYQEMoW#C*p$S@nnv; z`?_Hcgz9=?kqq?#aj~A22F|2Dr$4pa<_S-_H6}bOF^zyi51aK4p+w14j#%}&B01p_ z>)x=`jfNHK({&)y2Pn_MjyqI|1=YW#EQZ+vu_@WSyPpRDVHegaD702 zDMN8pDpe&+-7@Nnkcxs>cD@p*h--a)QOgHUe4|3|aAxtq7a@@6k>lw?|Bd1LXxx`V z(Pg%zHiwUb+3beg6N}Lx8IK`g)1V;C+j)J2r*u+T4RE{f%8Q>z+g~mVy*&ZZ%}j-t6v1A zc*5)F_sQDmZPmxi(ZwD4l)rgR1gKzlS__$ya_8>qe#(sj&dVO;*dF`@f;q5xn^)~@rQ9~iui32M)F-(nuLJz zS5c>#I|3(-l_tl%o7B%aA|aNWtr3`C@cbnBh|+&7KPqb$WQL+B8Q);BTgkWJ2{ayT zN~?}iq*iD06*cZH=aND^8(p?{*O{v5-_v*=F%% zKQaH1LLm)7!qlHRd_S8i8l*D}WLJMP=sMY>%b{qMTzX-bHHkcXv7i3gLTUzueJ0 zgWN>Zkt5ix^jYJ{nQ?}lm5(opk(iIq)(Ne`6Y|X(X|9H+LAKxPzNfqs^P)*CzAM5Rpv?B4!n0Q z4<JgZPK8Eq%%Ia+o0kz!R_!9Hk z7RuoSZj~rblaE0uvRz-d#kLD|Iv*x%kj=EgIE=@jR9khTNf_l<$FPtFT>2s+liGmd zN!;t>MU4cko|sNAWlUDLD56@AT`!1LnbVc!s~AA61uaJvHx-xF?iZc&Q10a}MCMGr zwY2CD$!EhnJQHM}F%Q|$3j2s0a34j5nGzL!`kC8h`$6LnK!Q@@Nta=|K&Csl5D%&e zXjAmH$}rS6dC*H4JkjRiN+l4xdJ&VUfU>tUombs@Y{RFPgh+HlkiYQ6RhhN?sr0w;`drJ=qy0wg@h%Er>pRY0an4jYRvf=+`6}PE@yyjrd z)vO!8;-4*=$JYcZ3-NISd+FVm7UA0w`6@$ZlgT{a?(b&lV@Wa#5@TOIMUx7#y^Oi{ z0(Ay6vo#?-twyJd}EKZ%-Vp+eSvAFHgoMN=D@tbs+=f~R^ zw<0QRfyDW`(>Hu5FklQOcW0g-qPDilf^3`%`D(NzsfM^Y$#EXOIk?E!lIG; zlE|*MaM>ic3w#lwrqR$AzdD{20E$?^bX1EKnO!xqB<^4M7ij8zXqnV|^TJEJyb8Ra zi;?fz8HuZBmw-X3^M-*A5A}dRH3ET7yUPcCCeU5`Zfbd@Qi52AJvL}d?e+wldY1y| zaxhge?l1q7*KPhC(K_Jt=r8O`>sU(K(FZW+v;0rD{clR|pK0i|-(3HtO_VX!2qiHV z^9NBx*l>A8-1YDRfk){=?e-^C7?{{(mgka1UIEYoh1A4XwpB$dK!gO)FX3Bin;MC2 zYiF-k!5f>X2nUbm+qQ?^@w31zBZ@feN{FS5R}xSQvp-ov0}UF~;UeO!^ue!BX^BSn zz)>Y>7h&~s*qWFy3f>JfK<5o7d%HMhYSfis3*3$Kvx{VfAGmwuWb+meO{RG?lYe4) z$*Gd$`L^Yt;h~R7$<*Ex_VLRYlw4jx>#LiBDeXGv?{+BR1|*eG{%3%^%(xKL!y3_8 z^6M@B=s;wCY37t8{%&_>%svY`YmxN!s`EGS0eLpN9gE`=3N{@|$2 z+2nwK*B$-s;jSWS%ZaVhUpDrtbqv97y?sJ!KPqtwMJO-~HIc8>r(Yi=D_o3U66ko1 zKo&eX3w81M#ylq5_+r?-#VEUwmuMR6myR zaAgCs3PIcr>VcHK=f5AY2TFm_Ap^qg%?zp z?K@pelDILFD)EUN(IcVXT1bDFiToJ@cp#l20ZY~eL{&Yk12*3zq{a@bd6DnJCF#Zs zhK4$R0;qC}-Cu0S5{0nO)|k%P-ekW^->Yr}5fE_%UO?{1=MV6{@%ZkkRm5!SR*k4< zI#VJyTwpxT&k-t!H~a15T%6}BskCmC&&k4zQ4ZG& z%qsBwov|#qm>z2$xHQ|r5(p&7uYrA;XT{cod3Ir6g?<*|ecvuTSeqy8u9A=W)ccL)Ompys0giCg9z(BGI!!5w zHc~foj+4l}Rv!V)AHr;Z&%vK&^9=g!E~{j^-&#)#-W#5;Fz7=-x0juXpje*0GQ57p zGF3vJGwi8+Y5U;Xm~XZDpB+V(8r??)U}mR(lWlE|;QX(~EWB^ti=+bzHb4lpy+V!M zLGR#DTIF<2(ZNYe)>TuiftQuRYIXVozAV9BItx)Ncr?c+bu0rrDNIc9wav7=B`-)F zC$-ugfM&zKU(L6z&^%@m7!Ix7M=CkAzBDk45gu^%1ANhTxG|bqx^!Dox^jh!_;Kl2 zuEFe%ws__Q0Ev5%v^LV}xy&n7HEME@B8s>5nX~ljtSvSjT=25~D~kM&>GI_D2&~}E z*xSUQ`*t{<9c?kTk`#u+F6HkN{baUiZ|pA36ATq#wrHDIsO6q1>{oRnPQZa1mMx4h zxqX71>Do`LmKPH?1(m;!*TZ7|q9UzwQpjkffrq?aBX!qI&_Q|yHpxs36>;W%GXkz| zvSGS;iO%;G5j96lA?J1_hC(*S8%iXeqFLNr7W9okgFK5&%inD_Fw9sF_^TA zspbf?_uk5d*X2@vbOPfd79m{8W=CBb2v=toLD|M_C94qzvV=}-jsSF0T+!_ys z%9&f#_@KmTg^9cN90uddk*U>~VlA6zio6_DM&oJ2N59-=rE-S`=zgip!NK2}DwK2c zrDvp30yMuRq2()8d@tUn%(z&xi-X9^6rU-6pztI?=cMM%3B zYCe@zb6JYMyVh#3$z^k0*j(46MTy;}P@pK>*n3xNwXI5dBC|b!hSgo~JUvo?72W(D z(8Mb#k=~_7JUg&r2*IeAc<)08<^9nBi_DBzJZ?;s-uXtI(o8x7#*D61xuQeS_HviD zcH*XMDA8KzkOg}hNQcA`iyNW|!|wwT)ce$MgGt1v=c-Y3Wfc-y%1ay)U8mv1UPkAU z{4(K6z}*baWREIz^=^H=u^DEbP24@WyoVW9(A<}_x)Q_ohSyBSa-4ucKo{`<{pZ3f z0Fi}2pyj^~0Ijs}S4?IVns_XO5;o7k!aOeWcBS$paMp9>3*$=$gj-pq5^y-3x`B9= z(&^@a)Gr)^TC;fZE0t=sM0wB+7NMbuvr&RkSC^H6_~G~R`(~4TCBm#<5$u;R{NY6P z=BsZVf9%{(y{Y(qhA53bb1+@x02krG2Q{VuU#kkQNO!SZp}9dSIF4`i2yE87DeP!i z*qIEo9{;MYmS)JSEN0=Asc*PNa~AQrWO&kkKW0bmGa9OR zk?C&bMr-=4>(e*y9PPTlJPC38+g~5x>K0(p+nlaEn=n8U^6E^=ov!hPXP7DKuZUL} zxf9r(u9%i7ehY{e@N+7w5{n&pjL~@q9r5PB7w;L=PfCL`_tKH?HmrPb=9q&$X;i_=$WuPrPiX7M+M59!lP+4UMms`%@nT+GMO%jy!$?i z-|Uw5b(wC~y5);3|5zd1o9a_!4yXmM1%XVfwk>fdAhFI-Bln|L^P;6He z)*T!VEurm;^l|vo3(gzuEB5)%l}}s}HYXF}HTeq3@b+cFDPmg-wXXTfmHPG5Gnf;E z<5>8W3dK7cK-8`qf2H45Gh1jaEpuCwWP;jdI{SiLtXNvpv_+S+6Vd)$($GPSG-(RI z>%J9$pQUmcaLHqM^tb`Udtm&EK!cGkK*MahuY|*ad!+SbqvRtVOG+5NpMwkGmnPE9 z?%XI%N7_DT&a6SpiJVb~)Z!n@&cP(1t3Mlv(%CF!C8B53T3A$TzSF!%J#%0@aJ7{B z?=A*5H6kY%YL8bh!$i_r;zWUtEB&FAQe*h;2ui|{-w!9(hpV4P!GJd8V8-)u*XIdq zhuZ`v}3%>ChaY%ZT zD>jALMO&HDh2kv5(jv`9@7cM}r?7}XYd$42FSfjHm8uMr$GJ!XQDkX#X6xr>sotw3 z@ieTC%gn#~P!Rnu0-3(Q?}mM98N{S#;=@JNy{dVOv6PL1(R z{<*irpr3)b5(?W#?nO%FSsU;Rm-YX*y+c(0Jxzm*6AM5SO^Pu%(~%TumoB6L1V9}H z4f}I#Y#K!x62h<;rIM2nCk}0y2V^vP!yZE{MI1UDW)sbAq^eQ31N`_~v&l3n!&|Pp zfm5N+Kt9lzTd;gzZZ+O(6RVddyv9Tk1!xTYbHj>#I+!Dl=3X5T%NNCp(iEP3X(B-u zF$dCjnuvh(jC`81)M7UiAjv~fq`_$9R`TvG-J$obj^w;uj3ooe9+y(69Dyfkb6X>C z@B^(PuxVzCxvc!Oz#oiWmCLbO<%LdB#FB_H4+O;^SVZZ) zjc)NGaf2kL!fdLfy<_vGUzwVH>(^j!DVL7izUBABE$`4#kd=))oNx3;^2+r&s#l3r zPZ|s=C=w9*Bz&CY39#dm`jplDE00F2g#gF(N+yzIxauOwVX3e{52tI?+G(vM9Z;>YLG1tAb5q{NZP7sG>14H2W=P(ssZ4B{XAD`?31sl`!;Q zhs4@CzQxV=E^RL;XYa4P3B5sRgJRx3BfSkZT0aymB6{@eg0{sSeNijRcRwYh_Gzg0 z^z#qlON=vjy>_6U*tJ$qZb6M=OUTd%h+$Y(8x9whEv9kUE?hK;ClU}SyUX7osi<&b z;caOLh4Crd2IHcqKAcRm0LDAb?Sb5ryx|=_H3L1|s7FFd@m_7{V&wl}lHWi(Kz+ry z{HYxT%932W+ONv8-o@D{xvK5xi-wXlM;Lp_KT>2jKBF_m#aNI0@-O>=mI9hA9-%tlJZCIumJNv$Ou~HRx56SR zk701(3pIIxWP2}4h(+Ox!F1eNZvyva<%e=>oIoMu=fmvz^gy%EElQAy+!`k1#fxeP zH*bYE_W(s&9>-J{;B*W9rDTJ~llG*t_TrA{TXAZCimq1Yo)2H+Fz!WQU-tdsC%Zkq zHUnh>PdJ$}UY)q8VRTzFa84erRP3hV;(-P5uvm3zk2l`pnSw^qC}-JlbTYjl1OX63 zT~C_*N0-E(UgR$Xs3*tcq&fSwpC05Z(#VS@Nyv}u?OltU|jAF7e?Q9a{_I^ zAup<2%J!YX!Ifn&f}gGy5`mADkK#+NfN?wFwmvz3OYAZJ-XHE09?SPT)e_BSDJ(8K z3A_Lv*ldNCh;zqC0lV}P<#mHr&sUtUoSanuxW2#gL2J;nW!757aB`$!KPL>q=+)Mb zuOcd%pBF+M3DqX2B~sW(}5`d72^ToXXb_RgyQ}VhLxW z&WgN)?G1hvRlf^&)tevk8`8lS90T5aTGOSGW^!uX2naiuzntgaT`p+xeFJcXJ(_F{ z`^S0IWGwzuH^8z>}8s9Ki_g11H=GAJwt9%u@+_JE}&1&gwxkImPv6mT9oj( zTsqBLdkrSDE(*1~g&qg@>uX3ACc|GWABJc*9(Z00%)UySe8g(qpv>*Q9Lc@owfT4TDOY zdan_q=`dIPB-OgQ#g$OSt3Ow4#q)(FDL0EjoL@}_V%60cokUk;DwfUTxpBLWaF9w9 zJIVfk?7d}BT;0|!oZt`~g1dVM2pS|fL4$h}+}+*XLI|#b0158exYIbn(`ayahi^aU zJ?A;+-uu1(Z{2#U__4d7dhcGWUUSYd#~Aa^P_lt~CCNh~Cv{*RY{|nP#9??lgA2m8 zUk{7jmD){>jv`y&M#lH15uDp9OW2MHiHIMr2zcHO%a=L+HhTsK9P&)v7_+pqx&V{^LT9IFVb1z(%jJH!LPqL+c|tU62nF9Q@)}*;#`+ zM}L(HTT~p7y?)~;`8l;v8@Dq=_j+8EuZ+uL?1@GpMWEIK=hCp!sC*8!&S?M20M2$& zgx#AeTu<8TS(C|7H{Lv5m+(40bNjpIr5p{Qy*56b;#n_sXwaKeZx>pSf^Iq7*zSLw zbeLdjhIOc_yNIS}fjQ{q+Wz<-sT_N>UJc({6?E4BY)$PB;fR3DIf>rByKXVSE}SiY z))h|m@3@O7LB5tbGaZjSsG*2GA(a(w5h?Z>7wP$a;-=Rh5Cgiq+M|jm7iAVPtWwAl z^qXZ7v2|3o`HX5HqLjs7<12&181JJ@WI*ux@_1SYNTPByEYxw8uK6l}VUtNw#%C?; z9IZS+5XJ{euCQR1{lC7x|Me5ps_+7Y571(3dnsJ5J$DyR4=H%+Z8Z)5xDaz<8t!a^ z^r>eZ=56gOrcRjV*+bM#79nO4JHL(;Z{cz~i?h;$2+!6E7<54~-q|`@1$XsQ)%mxV zEV%+gV*LtfMbb!hm6F0*g`eEtD-j`9f?<0!?Y=|q+$iEGHRXn2Hvu+EfxChXaFVfs zsk=)1uji|Kpw8NaYCWKtm@n+l(IT`B?lo3EZ$3zIp0#~QnfN(Z089NY8;-i&C|8(T zD62ipLqQM<=)5f=O&I;BG_+mF#*vt{58gS-QnhPV*$#}sjAwr%rl1(lzB2l=`pv|8 zIEgjW@^Xu`VqIIILNU_m&Nuy@@7ys*p{KrZE>~m&2XlnWa@yiNmBrp%WQv~;J75c> z&q2U$z~16+OC7dZcV%q70Fm~8JS<6j9D;LW&j@om!k*iI9O9M&Go#eoE_Wu+90?=$ zekFDxuRJ&0?EDDTDkm&0HyDX>9ne{oX&Zq*n%Fk@Wj^HuvPsj_sRA=CEIw6 z(eMVpOE~HtwII_yaa%-jPhG^!wg0;HI%a5EbT(#|>>628MgjW+X@`nlRFGy66NBTC z5xYDgMQ6Bw7l4@n)G zR;EA0)jRpJRJ!6wKgd550x6y1w)yIdGYiT&aSzn`0y7`Y292b$xCr+KzLZ!#w*m<< zubLT?fWI)xO?Hk_MT@CV9Eu4Po7^A7n8BI#^sUCdb=)DM6ni;4|4N<$gQtASW~pOc@^-jHjPWY9D7a zI1ZwW_lz>W@r_{v9lV~fI{V3wAl-Assil7W zQMp0;xAcZ=G*mZ6y%rEMzlS05O@ZLJlc^>}_Zb#d{#*@YT>zy}8w2&AWngADz!#;2*P0K;I=wedsNQk0rG%Q0P@*U%YH{`)6=0tY`b z8#6w3h`Kd2Ygb?>FJ1#{Y-U)4v^V3@(93IN=S(F#M8Gox>9~_7>|7W0z#l*<$LYQj z)KfCAwQ_(BRG=EL$B{#0Z@YHQ*67Yq-unIDb0yxr6U&-{C8(~sOZE4G^~5a~yh%Aq z*pHw1J@gt6aa}|nyIc6EKgaqOH?D9vVKX@S%?6Zb?C}=K@J47&x(=aTW7+0FETV_^ zf8)P(93M?G*r;0*@hSi-ZchH#pM>KaUd<}lciW6MyI}C&-l5=geea=Ro`BEgWY=9q z>6t4)Y&@ixB?L7N?>$57;T%tayIrHj>83u~0P*?L4ySOesTU4U!+E^i zW!7!el`56zUB^PQZu4o-l7bf|@M&AjEK3u@a{F^x*jkhiWtLQ97vCU8q6S(hiuZ>e zp2*&i(JH^5J02;5g+uZ(CyZ~e1VqeN7sC`&*5|9b+WvSYm}$P=d^^^*Y>2sV`Fi;i zzgEKF_8yG)Bx0bHH?oPRL{lBM;grQ++@wUvp;H(g*!NqR(96wlcF9=@ZjyfWRsj!2 z(nS6rm2jsp{U>jnw{|xf(PH1FIN!2)omog`zU#&VhhIQj?cahs&BxL!31?!C^88_J zD(I{yHQ*vwCzT>o&Qh`#)LlYe)@j*Jy~9I_Lx>~^WKiP!dp0UN8M89d)^Du1p}2OD z=OL-SDA;%Ol%NFm?>pxGFEY+|#e>o`i^^9{;~BkSTzh>RTuwi`f%;4MaP^&RrJJgX%=MC`DIm+I~JG_WdSGG{6=m(>R4G+@Pxm zb0BtKbqS6+M}{Dfq8gF9CDREFKF;sQHQuzB)vdnug*17k4UxgfJE*o(5o1wOFyaiT(j?n4qi6fZxj=FK7;XEz$3eOt+QQc3sxK{WOv*K7gKEFA9^wg=vl2&==jK zxca!(&mtw(>lM$!b+-{bJ1QA^HGlUk)i5zo=&YR>JzrqyML?Ad0%{1RPGgmSB_uC# zQDoqXutPgXl~PV`FN}XxwHyE537TU4mZF7&V&iEw7p~1~ACzMwdmOvX^<6_89ny*2 zF$4@CDA~WuzbuK3D8vBk-XW@NAKth(DzR`LTncKGoB6uB9)MKrQ8x{>1NwhbAx)R_DTr^oqjv7T;du z%v?AT7mPv-7wc>-qlm9ss5;j(rW@Su8wGdy1H_h#iI{qZ9G?`cH-i<4?ysdowGx*J z;1&6a;hGatw#{xngNNa#e=1a+HP~K~fqJ*kHo?wiB;ptq@^($&>)|F7dnC>pYqN3g z-g14W^U19YXY3b@A!%wVm|ycoIgE|p#KC-o2j&17`;4MSD&IEIn{OFp{8CHbXa`)m z7SCJGHbTAFrCRfe>?7~>JgUPim)l%y<}C0e4SrKDX7x(9#k=4}vb;nyQK9cY`*-lb zHb)rYHzq1Pv@rR~S^HC2gm^{2ZmQMPm&1;Te9SP<5``F0*W-kI_jt_^Aoh9syVB3n z%k(7^!EbtrR_2Xk`C*V&>5R?K;Y=wS?&LF9F|k2KhsPb2dNB0F2$ih|9cpIN)v#+Y zsnDrH0*dSXg#|Ww-S%rqM9&n zj;-3$nnQh~UPE@x4KI`E9woamMq#A-?04HOWlw*^P^Ca~SV%1&y4p+qq}fI>3V%A=rBrhK=@KLKz=t@v-75 zo>ltW&Ira1F4MurM1AF-BRzv^94ydYT?tmjx);jks7Lc`XKAj~Lso2a)OIw9(_v4d zSQ3#Tf{yh9a(SYYYIC^^m$ib#bpkDY)cp&@L$5>l^uH8%0R}6T!o}I?`yp<2f`^PO zUPPlGGeu9eQa_};jZ!MPeZ6aD<^IgAwb;k*LOykuMPrvx90sJQKE_n=M4>bGZQ+BH zZ>A%E3)BuBS&hfkS*gN!#D)&UqprT6(GD65v&Gp`N~eb{go!45Z7ZA7u4kA@Bun#| z2blXuzByL3H33Ut4`O>v=E7279`)2nG-G}WEpZ}NudFNg+m~FL1KR2$oCcYKHulxB0f z3U4AUB9FD&^*?mvgxDX($USKk=-Q#07Qy!D|oZkoRG;KmxjH{eF}K`~Kj z>i`+tY4j1s9zFG#y-R{WO8DVe<)Ms`YGVBENEwdP=WIE5;9%amylbVsTrIvY?ZdyZ z86%k&2GQ7|?S)xy(7q=b^)>>z7V|h4EKDY~)$6=SDO#J8>~N%n2?@1#B`ONmv>R1# z>OF&SM{_?&%Sn35&R{pUv|I(qhHit#Mh_JeIW@}$4Yxwv9`?VdT`%8QRpb+mAy;UT zQ;zMhC zUxK6W=FhZcejpLBJ^$N7--IeWO$2$wyNBJ6yXd;{bOf`}Bu381*cmkcoFHY^!6pyq z4wv|BQRi?P$g#0MVz=9!fb5vX-+LCAV@^Tv&wNB^0^v0FA_hzPZQTIf`J8lZ=Eq=!oF;3Ovsezl zrv{-O%9cqiS{N0vUjn?_c>nv<5_W#n4QXwmVr*L z>opLDa`kP4y^5FFiKl0Ep?sz;onpp3n)dU;L*Y79QID*Ydx-*bj)BCg0Tt#B&*>NN z^U&& zBLUnOFInDuRD{TwJSzYGOv&i53}FvoJ6-=bHWqZB`U1?0_Gi4DfeaDSw>73`8D+R~ zEI?&Hbo4H=7}00%XQ*|&=(hhUBuzK{n(DN7;}pSomnjJpjN(i|F8_5b_L#o zGsHT}Pw)}tG&}8!1vaFKsz|IJH8ue-fpJXMp3_C&yIo05d1Yj(x8gr|+pb2=jWqD{ z5!OE$vD_NtxQ#okw9Aj^RsF&p!&fw@nJoG`62C^ z5Au$7!?_ZRlv3??OSkdY*`16>vWRyPI~0tKuL9r>VheZZvE&n^?6A##OLBgF z>B9GFvmVunUoeH|{MD?vOhPt()78U*(t?``>9T-okt+d;TYB0UPAMv__#%e5XLUZJ zznd6sHQ!F~Rd&m)Qi1KS7j2+iWHGLl40gZ`b_pv2M|sj4F?F1D@O< z4TqZmp+bcoIKnZj`KxJ-KRlct2b_hQW>x%4MwKsiWRHgvcq}s<^r_sC0viE;9x3Up}pFp}b!#RhwS!_Qo z-nKY2`0w9zM^B0ovYm-`XDIhI#L^(1>k7Dt>K$lR-P_NB`|=F7+l?0^a#5y}*b@%A zftvbBt!Q*IM&z7UCP2MF=B_N#IDFT@*qyXi3tna9485-MEA$(Lm($t8m5oJcjh-i9 z&Dn+z#kW$Jr+kEpo&cmrE9gea+3MC! z`jV+!ksFV5p;){?+UPL`GLG#_Stp_ZtYa^qlTW(Z={#NkP9(4Kf^N9x8;&ZF6RN{7 zyD{M=#?y8m*Dq~Zl;n|}v=x`V7Lzp{ z{h}gtu3%hR#EDnr6W8)nt5ePXVt%Nk+!C~2vS2d)mEAy`Q0p8uNd(vZm<8e*_q;0M z1V~LQYm{D@YbhMK^zLAV`hN>=3O|a5kBG&2f(&Kt)o%mdmg0T75+aLyuDzZ*jSdQ> zxn`Ng<5|VL5p$z}EAIEyEXF5H$&v@ItE`5CsU*A2Jq+*33|fx)G}P&`%``ga&5Ou# zR?Tt`d@QU7N6A>ffR+^)!ug`4!H#(uMhN_>uhh(eoghOSD3!x-Ta8c%#5ia(To+yW zm6I?e<0;oSX>vIpJE?2V9-Gp9c;0xiQ{p;n(yJ>gbMDQaU%;7y%%t)3+!Ex-p|`+i zcosu2?T~9>c`#tOfzXqD&Ox!0hwC^RD~A_x;<`r8jDxM#1GPRXVnS@WK>&7EE?}taG~W&=GZb z25q@LgG=m1kBvLDRxf30PX@~@`d9$4CtUR)`12)WGKFf#;JnFwY?I1-kt&Me8IEs< zVobR4>ICO}GH!U$SQ=}}U}_Qi?ljZsE1EQd@9NaI#d)#i;c0S3UEBrT(8Au5dQuJE zUU<*6&BPq%$V>QRIM=A@WtJW9pVQ{Tg4sY@Ve|0Yb)5ajN2EAa1IjL)fuwFuA%>8- zyHfeN-M2FO)h+dm2iO}J8R*g(%PKy%@1fFQyY%s}1j;6yJl;VcrUd}6)UhDEy z%d`*fHLPu(XO_Q{inmSQ3`F4jPUSgk)vyO!05-du)dm{Z3C$`q!=M=ap`;8QF~tJtUi1t_0x3!xc4`gO<>5k2 zIm%_*A7@3bYoyM)GKG&YBu83n3(2iCW4bwpw2oM943((U5)!j{ny~b6j-x|$8Ke>nIDXn zlJd>Sc?9H#c6LT3ny1MAvoQ8y$0Eew@%RHGkqVw=0mS@AUB zLBs2c8c^>nP+4)|O}O2Eoz!-l8J+cOhyLwfV;gwfnqbC=MYS zm~P2zgy9_V_2q1Z$_M;va>lbYo%vcDJx~hvVy9Eg0Yz!slB4}ngLSB)F`&*Tfq;8T zF+H<)+-+3Q@1Z#}&u;i5Hnq0tgl$K!vpgWbHvFTajsE;v7G7c%lX8eKxzj}(CNi1^ zQ-&_cy%yhQ59fVRWF7@K=ld2(1$fmRkN|& zb7>LH68T}a*aG>qiOhGyo}`y}+R<>$1irWAqb|@UV+sNv!>?nt5N^OD6Qz&a|7L~# zsa?MKKC={NjM-IXU(O<;kvUElxdVGwf5nWK92edv6I!N=<=GAO zm+pK0#fd$KCFiK#@)HNEW9_ae;-`xBz~V6m5=?BCN~-s(#YU{@pm|wpgzUcn3qq&K zpRi2vzF9}>*u;pVuRTcDs>|fk4RZOy*i2M4=>!A37hZXg;USLXzc7*=@Sb@(crXu~ z0sXKxFbcR{GKZt@6hZc~hC3z7fieLc)RjGqm=VFDn!VJq`#KQzn|H%ym#!ygeL6CefAgrgsDbVA=oQki4l4H(Nnu+q#9tUM(ENxM#GEfQEsDMi!!%FS{~ zUA=N|m)8Ph5%8_C(nHUE5p&td!NY;E#>}tFiCcf$67OgYmp6fQ07IW^)MnvunTL;E zt)*#axw-xfj_cfw1SS9-;usIv1kj;Bra@VF#fNZ15^V1rYh#qIJi{jKLi9XlWGLh4 zb>+wB%~dV=OyU=o&{Zg}IfOV0Z0yIC11^(&e@1#=%{PA{6E8WE*E!@ha`%OElFZ z_Be$X!sOsT2nm1j`IZqL9xJ7xw0aJpzvcr`Ha`U%gA7x<{M`%$%L8b4xPCC&ktoOF z`LyY~GdP}a1+1?ZLf4}&kh)_&(lEPI&%3J|MmvhEX1m>pW$AXQ`fLDDq= z464(fqp&!7Ooi6RstU!d``T>|+4x3{G9efI_8-pg6~4bZBk6n$Zt`g2<&+I+gF374 z$9?dsgP*%~P`&VZI33umKl{4ZPLw1VQqA5n67>4W1ck84zYC2ji- zrpnirad_Oj_YQEfZ3M6`Z>eBz8nLjSJFMJ#n$ISIlnFG#1OAhZOo0jKp-0r}b9U|L z`IPsbLE)sywX~hx{=q}uyZN!$zi#D8^yho&vwSB?3XaVGljQi1ZYb7kAl-eLbtFUS zC)si}>}cy$K*>iN2&+Q*C#Haq9hiii+MF~vTTrGW?d6;Pn#ih*8@i`q-mX-TbtNJ} z0tOVt%x>gaN&m?;dk;*!x$$TEQKR&)-0AQ3{Xd)dRg3_~=Rg^URQ6vb(0{gP|JvJV z08dF=EZObLf4=d5T!#t+&u$bDJ^%i{yT`}B*!gHBWl8@ImH)YdvQ!7CABkSwtMwxN zxA*vqFZA)FS@z$_|NnMjqzo|I=5uLyKmT|4_&*5@|Lanq_yC`?`w71~=zn|bzh8c% z0+tG2U5D{M`cD6`4xiu8;{l)ZsyGtk?|S{;xW+$jhJpwz)wjH1!G8yqzaz{)F1mAo z&silZWlQzn-Gd1r4!%OvnEelg?e7Qt>jL}gFQUdHvoZYG_4|{5c>(-)_Y)TbmhFGG z2g3g!zvch&?eRZb;(tzze;oP$J~94hOZ?B4_`hjO1j+QYhy5?1@cma*TUdh{ndnmi zzsC__BDI8|3NSiyPLRW(g{^&|#voZZ;3wW@|4t0NgCezc?eTzo>hon0(!hWh>cJn96ivuwr0z6O(ll5Uj)CoiIs&T52&K12_UxL`O3vYPz z5Eioz#mM~S#GzlO-Y(jQ2akL~02;C=PO@95QOtVS{i@63*UzSs@2SFRxw^C-#hByB zGA0CMg`_~*!%KD5*B3(wMZQ8~y|?=LD}hX4O1C`|5XCUYpdKr$x9Ycf$K4#%dU?29 zt1Qk1!6Qr7nvWJndi2?}I`9T48jZxD{7VT))r8O=|JW&yFL4-!_s%DP{GGozgxmJ3 z7W@eu0mxpc67q>HOUO%(&7eV5BbiC1y9&Sm>G=6+T<{n2Sjt@z%&Dwax{xoB3CWRo4W|=-MFdKBVK&FkBYHR|I z@#MJUS@-&F!(Xl_o&nvh>xnXOF|#HjcydoF!QE*=BKG|Wz5#Y$y+pm~ewi^=`(~Bkg zfc?efd8Z%&p%U}Ff^}9>0v%S?%XC@71DG+5_3;kJ?eGp5&@N5Igv_LI4lfS3x{UF6Sc zku#jWtVMqncY6Nj7Fr?y{kyoEPJzCodbH@$wKUoo+%j7wJdoaGA1@*=5;z_YA z2&*myBg(r@*@(Ce^H)3dpiKe;(pdu*FVfd5JDzG%(t)hm@h`~DgLi!}k#>FlbVGz* zvqGP~#tgcZL^&PHX)>ULRau)&qceC)w4q8-@W?`x_edAV%&LWQyXvuC zSj0iPJf$B%J;i;>qK|CRgdiH#B3b_x?7(8}R(FHGAWBRfyKq9&>_r~QYU3~sEm2Rw zYylS~Ksu(vW{5MHj1_i&LC)rgRd07*RL_p5LVL2Db}bbIciZd2RYA>ejqtEIm9$soS-O_w>)OuXLPdO(bOl^<|$ZxcP14oC+7q3*y=Sd~Q zhLBC03fOsc#(mLd<`F{KtXdqmSia?fq`v&E*R(}xRv}0j1Wp?w=H{G#izKk&;0R6i zN$db2+piA;x8-_`eLBQxlqmp51HX@qSvV`*X}y~r#OSQTYj)7ypOU(5>bs$#<)dv`xP5AtHoy($CXQ@J;bvR$GhPXct5a-=STJlW> z;-mcTucFSk$73vV{WSlKWy{xVRC;7?5KU0{5|5X9gkO;f?I$&8sL=O9zwtBo6ZLK zL~!8J$x?L!mRH&9do!gpCjGJbd63!lK9_`R+?{%N@rK<&9OcyCT43g5JY?WcM0)}0 z?#0^RK251<_N=aX@IR_%DqmFn=CjP1COQtMKGz_dxr*KIvJ{F5R1X1CROcT_o+`Uv zF%f|lfDr=ta}|&DZAT_6wRyj==^R!t$4;qyrJCZ1h!1RNST4nOctW~pU))Bn!Ot8E zHQHP>Z%@TU9d|I2@K{;7wfY{nq2>>6*TO$x$JyWq?X0aOQHedsHpUEdHrKzyzwK(7 zdqZ+mw_96yty=r>t=bGRTlfuo{PwG5#0TPG800qB+x>=>aG?bdzYkk16eNOa&4n#V zk-FaNL*9pSx&$^D23$TUzu7mroa<$_0zF?vcq;mK(=z(Y)o_^KW9R`PYFh5DVM|qep zCEt6G$5;ZRGayEecqD5mNfTQNe^|PLB}ugePFPULUA<^(+&! zh&=lSr3sluYZ0@>Qe0O2&>Vf1f6p-Wo489V~ZuM zA8+5ZD7M_Zrd+)`m_GKA-9)m^VMVOZ^SnfxV-@)sFe5+^+65HDRe(a5KomL|p}?w9 z^ZkcmN6rjG*;0o?<(-A4!(0Tn`3;H*F#j9Z=FebWRR)hW@Dh6+FxlDk#5|M5u>(fj zgI|6=yw`8H{Yk`S8i}59fOAw;`Szz;%<0C!&{~Ger%7nOb2<6fZnJ^0?D9iGknWD! z_TA+^WNagP6Iynr@L!tqP`;NCh?S3eyYDSG1i>bTd`SKqhsXLrZf|k!7@YT4=(^di*;j2$b$qpVes^Wj8g(w)9<#DGk+tyS5jAG=w-^WsyqH8c@z&ij z^wB?8r}sO4)2*U@Uy5xZV1JNI_6k^6ua-DM1`QNZZ+&g}-RvCAk-ah%qmtvWa+3`hsj^IdIL`>4?m})$h6x;a6AN}<^%E9yq+owa{plgNoB=hjC&&gHc$BFTDBs9$Z{K0bKLY4-N6?Ya+0 z;(l@5T_66O-+NrxHeVRaUdHuu_IbW)q=ZLcK(Jb2m5J;X=_I8=H^!X(sb&D2)dTIX z=FD8R9`$qh=0|iWp&rTNH8}?rn*}vA4C55{Jep;e)Ik>AW7(|;!M`LNgDs4x1={p0 zzPdeOmmbA{Qlekiw3btx;*7`ZH#;uD@On^;Pa{ceFG2 zee&`eHzxol4OL%lYa*Y)`$?D9gcs-Bigp=3>dU}ky?goGkw#%G$f{XZ7f%6J4tqt2 zt5K@?np@=I<0c4PBYANITRLTh#go62gld+T9Uex33XwH{$vnLSl;chpG&QMMro=hw z7$k2maL-2$sm^#j3%&(n!(nkT-ank*rpTHbt`yqv?VhISJW*lzpJ=Gzb6>|)w%F$r z4R^U5$A!oBmZeh;QHJIOES|LYM5Yy0{oL<+k0uHu9@gGQwDAY5qb5H#3|Mu{BO?sn zU6S4i9|Om|ix4vQjc%hoZ5wpf+bdq`QkPYugTXSJ$(GcXpM||eQGn2G(!(mx)ZVOVMaYY4 zzkx>U;8nic5KjNL`3CzXWLetDCVk+Ky6wE-kew$iu zElsc3E^vqjE&}c|Ci;FQtMH#T;Q?lq7R z*LFV&bZLvRta_sC)D9PF!IpHLDj}d~FQPZNv;%~h{VhX&@LI0E;52&h zybZ|<9@7+iOqF%QxVljg$!Y3M#%Eq;$0mhd-~py=8E;k3a34pxZa%Z%BbE{euH zT+$J(nb?{$tYnR0v)^^(^o^6Ne4bwu|`0O%{6VzRV;SLJgF2Mn?;eM((>Fs>^h$yt}nyPIRLPbY2CT3#O!Ef)mz-~~ZJ$i{{jlMMjgt|H5 zb+XlruG><>@k~O5#!YCd-jejTZVaVpdrOWRJzzRD^MPl;sVr$Ze$jfi>}S$H8xTUh z2+QO=%YcL{O|4Fr8svlq^0iyuW0RoNk0dCs-e3@jHg>0ekz;DN=!$!?b7G;_V1JF% zw~ArXAElTpQhp}cbD9=zCR~_u`Xk@^(`1{EA!zbt4$Vm~v-X>gv*Ag%b*VPJYyXfvXL~Fi@Q<)1`B_ zU-?lqw?BK}{OZeSk;yTtg6vPC=q)_R@> z@PpItd%vtE^Nd_5RnV`2F`vXd-Vp63%b&|%T>$a4&->L$pQ`bymCdWecaWC%nmpcn$w)ZDuZjs|@4b^hTn~-_Et)27`WDNYvqevD@!xT0hsnXf{%~+Kuh0SP`CEFzdE$8cuq5}7~9DD zSJwM*yPf-zZ)_;D*J=as2|xpe7PH=}qD~gdze{A>!WeH2-_7Al1QsbBKq)jq^*yoQV>SHa_Tu(KQn6mw z#@b|ln{EcLjR`6>5RoF?`S=dDvyBSMc*P)lOT`hf^82U`FDt3DSPwC-j(#}NToO}tcx0@D^U(_+Ul8d}~^!kvq0zhfn#{8i>K)w~dH^U;+ za__o3CCfnG*ILkDQq$DWS+g5M52@UjW&MIEetHl@NL!{g&XM2EYBu9w|NB1aPpzbA z0M(Bks!teX6MF8oBb6yQln1rr9oTrwx%nxlWqR~&_=g40;*w-s7Y3k7^lRk3$qxw~XnL5Ei52z#TTXr+OD%Hx%VVj@*l?pu|k zxfZcwN&v3`Z z&U8Vs#p{pz%ITL=3uaJE;H_B-KW(vGF8fv=`?%Ui0QwdAsz~wG zYj5In!2e1QH1DbM?0O`#hqxq!)SF21wkn~X7@nqa>9j zKH3gTmCx8t^dw@d5|;0fswN=`^I2+8SQirTxH^FQXKJtT%#8#cA}4e zmPGy%N>RPU0#H&40+z4}Q^6f!u4n@0vX_Q*5wfQL^XH?}N$j66P*~&_gRRP6<9C7c!<;A)-n8u5q3NXqPz$5`CM`HAN|f zr_sVvw3$mFVl2^~sZCjl_9G>VQU_kKeE=}JCK#dX&b%cY9aPp| z+>Lb<;U69B_cwSqd;9DuG8o`Rwc_{_BzJ3p7ApUnH8L9w@W$rIFeK@R%A-d0NlY^w zg?i*+XsQGwG#6Bvdp(-A}((_gigV_N~uB|;4I49uf zmEMyg1ubMh`6%*x?^cZ6!q1)POa9494_1B!iKNMO|JuN zH!wr=>znoKaT%4~DK3m0w?$m&GRhNr^#NS^!^}(S8vrBYCqSjA9UzzH#)C1iYOozQ z@f>wfhurDzul}sFNRTWItWZU!NKQTR%OW!=?H&+_|#XTtVBRw_-+_2QwSyVc+LU8;H*q;_{yC zQ@CY#ZDwjVS!o|VKfMOP913GVLtyppHj~%xY*-9J&%O0!POdqS!z$S@?uPL(f-HFe zKLMA{a|$d(2i;0VJC_e_<~~R6o-`R4(a;psWLtr*9X))0$elsTcD!ROm=hQc=r~Wt z4!W-IS^^n~uYgw5xy+`df_W3H-v=~Sa42nee%OiKt=9y^aRP!`y{{(cGCNcZJ}PJN zAGtlIux{T5lfV5~7WrQRcLxDG1UbjJwf)-n`%m@L*8`7vt?MFCEe#Wh4fr`|%EzP&P~3V*Q)xSRP-LVcvRGtU7y5UR3C1IfZcfQ?es(aER$ zvU3|eU-Qe=i~qE3zJu%bYxQcbz5^0{v=T6JHqu?KUh#*`cqdM`!LlrgI`eobpwk-9 zUG3KMyaO!0 zXpy6DWI=*slV+ZOP#hBYYColn9Ca{C3=P^!C`@1O%{V>eRqTfqQwVWTTXs7hO_1|q zhg4m-SK4!dwGetL3mrRbZ&`K)Hg_w?WQDu@ul|hZ&Yh9fAFuctdV{^wTGDS7n!PVe zv^YJR*R<=~E{vVBU9HT9yga>LsYGMM0%t`mxQJ-Dv85K69xb4yNx#t zdtXLpBAwEKR#>UF%8#8tACw8vYS-1AWgYAg&wN@m_8^X#7Q+|8Gjs?e zl$^#*^#@-kDkio>IU~P{4jU!r`i$LEE+xO~ddlA8Jb${~ZfX88W#1LLmWZs(sePLY z9STBvc%J^ik-*GAFl~czde*6tkQWsM$c^GHMHeptuiN4#c!6X*Wa)i6vz!56<4bTJ*`DwH24EcAx^h6KmcODV|Q$Mnz;$nT`oK0z+zIT-Em z+EUoO*=1X3S&>osiOziNu2=ssiMK86;8a&sbW(> zwssEl7%1IF3Vo{pYS$iNoz1DZHdtu$0@{~-SqY*V!K6o`LLp4CR^nMnfqXz5K}uK& zcs3Aen$r=8Y)7klQ8e*G#BM&zj@k!1OmjB6LG*7ui+v1otQ?mQ4bO^|H6jV z$3nT+E?Nrz#c=!0h+r=VVrNt~Y1~?QI4+Vkjl{6uJP~!gjpnI=KB9dK(P~YOQ+lE= zctsuDV1oARAjdk$-h*#=tvm4E!&q{)oU@j^CPD}WQftV@8br^9JHgtu!dicV-v6-1> zEq9>Gs0R|Qlp{1m(uTw8yfw00FG5Cm00sk&_*dRd61`s# zU`f&1!7}2!b@kK`wc8?NO1L{X0y(lDOgL2G@hzjYMMn?HyPzC@6w0$c0>0LH;ga7z zD&)#IS~}JU`&`HHk<|ZxbbV!19a-0AkRZX`-Q9w_g%B)2aQEO27Z2|4?oJ>;aCd^; z;O_43HkG&Ep6;3X=Kf%@SeH{(r;hBiAK5?r9*jg1)ELw-!Ofsm=ff2>iEcijjqA?rPD zfy`}fS!TcDB%>;nk&L`Dszo3>Y-3Z0gj0~oj>$?cEftRSyCt8uaZ$ds}qd*Y!4rt+vqHC}vV zXi6Vdz`f@DS25c2tzzYUI#WV%n%hc7JvfcC+;hByG`Lo;P}OyBN*ZQZhe_&w1c>AB z6k$rQLF6yzyV>MEFOc%n5_Bxov-mCnhe#A(514b@cdLfRh2&TSGQqrmg_R(5kr>yx zSv_08rS?{JKe2pfNO4B9{O5hKzHcieMynx^;WT&Ne!OWp)iHX#A~}m9WO_(ZL23pA zh2J!dhto6!()kkKQTU=^kJnl6*CeVol~Be}N?TzKz(pmJhZdS(CBa|zf1i{|~l91RW0 z;lCU{(2a>{PY~;A)t{hZ>CzDt(vNP!_ofehceunITjBV0LMm79Lxq z+~{Pv+Uk~GXTANYYVgilN82M;1aqa?I#`8Rhn0<<1@Oh}%miqq{8rxg+c+%a1*)TI zl_&&OO6^a>NvuI8Ao98xKg12BkqlntsWO#ebes0rWt!uel68VSvA%b@^K}-}R~Ass z#|HR3b~&7gKWAZFv45Kw&i!yr&@X5Ky)Xvy$rf$L5`Vy;mi@dDn@Uo-AB03u_9kJo zKUz(0O5o_&da}8W_VaHJK3M>v+`>8DOftij_>RRu0nK2gRbXepGG^*^yili1!I37G zBEK7#CUxpYL!RLd{0AwQ$y`Oav8!N&Q}^9h^b)j{Jv7nC~mur&!* ziFp+<%rWpUcR*zl%z;XJjywpveqo%8FciceURcidY1U)G{kGPL5?oOeOC>)XD@Z}` z1vpn`S6CQ2oM<*!(AQhd7iGzm{Lp*N&C{$4ff%VE!VPV?K4NphODLcV%j-5XCbS4p z3N1D0#Iy#=mRvn%HQT-zzIpLZ{1_Dm1@RddF@h&{37ca6fsu@u2- zu0vQ10(u|OI$ePDfy14V?K;A>2{=g(F;3_GO$! z*=fI`c8X9q;zKlXp}@dIv5f6^@yB3fJVt+X?z)R3cuykjdZ$oY-HMdDcfvLY?Kf#* zux9K(U^~5zs(Cg5lss9yklR@R7H+5mG%lvt>||bkQbKKKRU~<`Yrz_SJWW}nCLew2uv%mfL$XC4GC!@+^qUDB`C6%>;Jzr~9 zHzUdEy8`DAdt1lV!^viH5Ij>OXy1?}c=O!vgqM}G0}Cuwep<_W)99(srU}wnjH#;| zJGCSf;eYyqoM!DgiOB69wjSDV_wdbwFCk^KviIev>1>|)WD&+@Ve#bIx(@;HmL+{4nXClqY)m6(TUVE+8>v~#EXnPbf zqgbQXv@FF=Z7U?7&Scd~@Im(V<=Hr+(c9F%>it;PqU|mEaa$HY!>CzYcoqIs8WQfgd|37-LDlR513&GyCX zHcd|J2S&UDiiXYC1ILaG9P0&lA#7w4uHU+uOJkRlue;gc5P>GghlEf@ifSumz@fkJ zBhGr)L`d2zbmC-SQB2Q z4s-xF7^;%4c+ z#@dWNc`47JA39-}bd2`H?JL%Jeya4mR*lcWHCk>QBrC>Xp~nioQ=VwwERWi#lA?Tv z@3|hdhFuYO9Q~Hu0F->LxdZv1V4|}}?oBtU?wtN!R@2vW2NF`$vWF`o!$`h8WzugE zfcbS;k=)9UPf>XMiX+d)?a{k)OuY1q9!Auf5G80*yAQ&%-;WLBKu6UB#)AQfs#xL4 zjIKlwHUMuf&ol5KPr8;X&!TP?a7#`8ejGE%(#K#utX6@2Q<XGB4ec(v1p}}*r zPSO7WI$LkX1t9;Qh=h(Yk2ggl^=!{OtrwhHR zvarrl<(6UY?#o)-_*rCgrC3JOuVwd}lt)C>W{bqI8P1Sh>gI}cwyi5~`KP;hPZwmR z@W$x+U%PlGI>3phbD3uC-pc9rc2p3M_$)y|o_%1naKf6F`?_3~h<_x?;`p~oKIv!e zDARhoaizCLb7$j*L4^Ai5}}5W`EQ7Yy~Wxhq}24;;JMA0M517x>!y!wmhMot_Euy} za9&#>bCHpU8+AfzjZP56ROi_XM=T5zy!ZPyl43Y+MBKZ# zhtsu=9=xM1&_B3T=;mh~)CA)?qw49Ss-QmU=(*BQf=Lnvp07;9U53h~T@#nuy^7T; z*zJJCVQF?6q+Vba{2x9qr=`aUPQ&-pnc4yC*YQy)#(Lsq{_qzl@)J7H_H=Ci>jp7M zJ}hIlN>A+;K|o&4aIVvzrXvx#i6o6xYMb%ivo6rN8g#GRG3&UN80gufxyDoWd z;SG${D3LX*1Q`9%=p^1e1I^wPmUm|c{t{r_=P#M4+L;>$qL}GdT`4)B@ckQoeSclS z7C5-$mm?cNkEc4a=0jF;`c~>-L?UyQ>izOjhZyGF&jM|$C)A%384VKaXr2V~y@rOK zEjV6j_5{WNL5C+i$Q$jx(g+z|AO?D)FS6KggyOeAkreTH#MPw;@sHyiR2hoeqo_2o zer+J}p50lJmF**R>bvJ)$^V7`_{W(AE+OTdln5PdcsfY7*mH2Lx-i&L)8Y|qqR$b@g zf^*p>z=?9?;cM6-r^;=err}A(K3OP&+II^o-cC`2ayR+iOB*4N-!Xf-;WMIh=qd%j zh0JDNYhAI=R57h_eIVdtPi&x}_v4r$AS9tNMWGSiN}^#@cXht860-V%g(0C78+QVH zUUpK4k1}gr6_!25kU+ZL!I{7(_1QL~1?;w)xjyl!kMPnAG~;=G&)Rh+;}c{Sjpl?v zt1zV>WU$Z=*~jzVq!ExNqDmL`7wUzugov~(a4eHX*4(WX63Wo-4G!=L&DZQk*>X3M z{4GEHMgr;EUS2KplPqDR7D`_OLUd< zrlOu-G4s7aF(X)5$ff?>CA0GK44W7euOep}FLhC7kQmCQh`5nzt+-;b`$hP7(PH|$ zXMVf07}DSFtN?j10$^Q>y0KWRVlZq^iSCg>_RqSM!iTapp_c&AibAy^aOg zsFU;7n9YhCZ7z}=!76kUebSfA1~}#PDZDIt?E>DC%>h5Lvc1kvd_yyU>e>{bm94Qv z8HZ9wdlg7iE0hM3Si=BI+sB(szc~k7Q>*Z$u7@wRE=RvNr7>RYZu;E(-+1ck+`3)v zB#9kp#H3jHUGuYL3wAxAXCI#pF6B~7^<2Y387)!dX0+gOn8{>IWluaBID|H=;Fe6j zyc~MZLOp03@(1vOW+y(97NX77Tc@9}w-TRo!zA5hahO}ZH@6}Xrt2rbF!x-VDU;m% z=2LD*TbPbJ`;TsmG!VV9*@>=r*C*mdSq@TJbFFR->{tMlc8+4bx%ei!zyL2=)%H>% z(RWAj^W(bM!{&R;Lg)pt3}(-`Zq`e!90TV)J=#3n2V{lsH5MO)Jjo4E56+}Z%ySh( zBV$=X7pbB6;$3J++r7@WXQOC?j!}8cft{fX-j3G9?gOLf zf5kDtpXmJF1Q7D$3Yu<}ul^`Z0Chv9a8D66VFo}o+0*M{;5Rf zo7UdS^2IL-5&Es}YzYnGkM&vBtFxO1<`Wf>5xDo?Yug6AT&WnUgo}-2V?@{~0YNWb z7N$$+e{iY)C~!Aq)?$N6LNGY?O46FfhEW^f&RdsE2PPeCH~IcbJvjl6)m{t55WUcz z*NHgSh&4h4%$0;fo^vz=;`_E8ZT@vTh|>)LkN>By6{$a$8ouqK`EvNU=bwvh!utn`2DH!uIjrIT-1}e6 zF>^wq+#Hb6C}d3p2TGIu*Z+VVvmb=@f0v!=G!f~{vfE_m(IzJU({TY3-@0zayL-j{ zy6XS?GZ`PiEs_`Tw$K0Xrv4UL15I?Re@s#T3r74umD7LjZ%v7!6W53=V;o_ZWpI46 zcxv}2U@opXmWk11w=M?fQS-N&o|4H|e^srkKL6Gw6#-}LQ84H7;BkW4@?Dm2w3lH! zckBneOHsUnSRZ7)A7vT;S>15D{Qg~s1?V<1VDP+AeKKf2{msVfd1w4hX-VRrvHO2K zUko}RaCgkCW2pRSvRx4S)Kt3{o%oCLzl&@EE~GB8w#%-+wUkB+qWAj?v-;v>o&GUy z^G&KC2Wq5fw)pzRpNED3m;GPQOfLP6t)E;v-ueG6F7sFa0nK9eD~*&IO8v7w{OkXL zwtyzOQzS2YOF3rn|29SV&ye`{%_p-1uVrRl&}lEAuK({_{=eT)XQAx3?zz6&aQ)5! z`HbcSFlY3?%kljCvYmfyG|40K2jn%?Q~rNK`Nk=r!&*Q=C?sZ{)iTe9yXXJ&Q+|^J zE-Q&&te}ir%Ys<+KR@W265w%gw1L-d_tIVHUFqie@5lT1@7TT!74QE)9T8Ik{YNP# zlUx}G_CXtI%Ed{{ znKrQi$NbP8NVQQ2KeKBVhvP7if4BXbmh*t&xVtK)5HqS(e?V<}Y!|=vE0cpQ-As!T zmEG5T(rmc9Z`kSnFr6!bT`d{TZPo4_v7ozbJMO*z3YFuI?8I?Y)cO%|afMg(vZt~P z-4|A$4E7OHUpu&n?7OOmF+AxCQnkOm05Hsw0ZEFK3T|zciML*Fmyd=C(AO36+$c$Z z4=`XN{cyO})M#}w@k1$>D?cPMI2s(!m(RSz5~hMTl1ZwJk<+R-C$6(pEo4nvWAMk4 z2=8o0O3!$#_?!!=h|w>fDRlf33QZulJLQo$pB4K%!`4NK>%mWrtUTPPcKPE21Mx}+FC|E7WAd8jvCyV2YA`hekFHDTV zSzZp-fi+|GpD|WFZ^LC34GAhFYl9}*b;>`xnlufk0D14>yRFOL|NFHl3Hja}YsxrY z4|F7dS8w|8Av&{wdp&S-UDwqE3j-93$(RCmB;fe(_et@g$4YbKEN5*U%1%Hs1m>Ts zl5|;AYEt6CfB|(n(E3*l)8~K25UM2P{lyNpXY0}L+#Y$CgD<6pPNf@Td>H@zN=yRu zSQals2!&~dQlF)4oDBNMFOa1eb(~!PUR+`};4}DL<$(xJj7jM_U@{sIAq6W7C=O)_ zdT0%tnzKQb35Rdhc}w(r9weysQaK zPC@Sr|2S~N{i!lzUi@GeAa)oZHFlT`&k#9mY)QAF9MDzxMK@T?OOOkC!my-lQp?8D zDVq)X`m+%`scG8>-qe)qH59$Ku*f!CA2t5NE3ar$l_taE4UwJhP}N9`Xqjp$ zX(Yi6P%9qcpn3V#?b_qGIr3t0&%Vay*>sn0Fjg=f2xxvfeeQARFm0XCksCbpQAj28 ziG0;cmeNt7SBuf}At~N?`@9QhIQ=F2w4=c?gO9>lXFbI}(`W+Tu`P!!BfEKaI_BZp z;?OArxse^2M5wH?#pjAw$muR*C5&viPzEcWAdP#xRl5aRTjz&eX+%1Yq#o=*6p`xz zLyT}$dywUH2{|q^Q5lht!x!&Iql^lT7Ezazg?!f@PQ_M`fw`l2ZOC$PEFa5D)Uem+ zREe$z5NRm%mmPj>2vdBE_HJoiCHxl;ApgyoeV#VE!028SKv1G%SsozXCsWf+6)loi z=7?d|pa%3SPQyDE92Cl|2%Hv2e#MeIeZnE8iLGDF|MYn}fX9v+A;U8L}$r+`;pVi~c z_ryEfl{P!(6yeAg@rb4*mFql0xJ01Ao+#}!^W8xU1ja&uV@H~MQv~s!RoZAc?SC%^ zAJ^hYza?kQu0C|4sf>uO2l{m7nLJE>mys&f8hAxCJiEio)73sR+~b;Z{0J{8GhOUH z9X8RV%>T6H}I|``X=|t5q0Zj@J zgX4V0tsTzdL8G`!{jlz=f1pvdvCpVmBev2(n~=&r7uCKtc^VpA0IKf3xNZ<0?wB-7 zCwC8R7ys#Y+XJpEc^f;L?e)5O`q1~=l3(H%P^9cYVRzAHW+SX6qh%FUafOh5L`U|9 zgok$0bR_oGlqE*WW0l~PtfdhdvDo_HtEQuT+KK%rKK9dRb+c*W zbcqhss#6831EJ?Te~7BL8LKG4(IJi z^D%9{d<@Le@w%;ynPE2~{K};Y*eo(BTpv?u)EHmT8L5(YEqY@gE&7PGzwV>V1LZx< z#q@A?_}-ibleB7u$Fp?sO`)aB84!}3C@&#^g`PvUM&pOAE;Pg>Ue9;g*zGUAWP4Vu zwN@&Fi9AU$mpx{Wm41k{^K5tj63G-_eeAt-i`s?By6h3U|8#qME=7XcKr=U%>9L{h z^L2IU_2VKW&J5H%9w+7G%4|a4<-{1u4nz_#swQ`y-pX86r#siaU*&H!JNK8mtt6_m z`&#dq`V*Qbf!+MR*7-ki`hO%nA7`ME>A=Yi3AldAGPvH`LAMc$?N1LStmI2#OzQ5) zs+lW5)<$iwxdso1RQWgn1!h`18HSf#LBpN?!^70FY4}o@P={h~F9&#iRy|+K*G!mW za#zKt!hf($ZF|r6g_A(%1|S!U5diDb0Ps~PPpj}PXU?bEs?89?M%}evU-j}EQC4uD zVzb!5C*jfr&^ zd%f&c2-^X-tb8Wdj`jVACmCCK0ZeQE0m7l?;dBAU=f1I@hiv;mhJVkjphakj!X@*> ztYn^K_@k%Z$Y*kDXwe4WNMIIeKJIPCYF0QUZ@W*RhIpk>%RO2p{vG=Ou_`GZMrZi@ zmk^e+pv7fZ1)N{`xNPLIMA{CC^LW`4DNp@|d^Y=I97g4@a zLDpyP_lj)XK{&^{2d#7QES*M1dym;XyKh$td>zLvKj)%goc>H4aBO3~S%JTR4S|WJX2X!}E{GPhm=cp(p88^LkzF;+Q z(dgn#>lnVP)jn2&qE}$*+u&M3JU9KR85MTZ)idA1SoVM$x&rUnjjSz_DP%K zDCxXD~WHK}`V!sDetnRKM9k)*M-)k<({iM`kR8Xg3;Hl!=o- z#GQ89xcDc;_(Vu6dl;4e!Nhw-L15kQlYpKp`BwO$dtE?B@K7JM!{fGj_U{mqs8`|xw`BQztJ8Kc@4dh7{%Jvv z^qQ`hm%eZtf`D}~Em%u^@`JO=s^-4ak5}{uFMAOrB%&afPiB#uEAn<-f=3PBmk;?4YaUD%msGxsx5b-Cc9xfw_>0zXx0m>;- zI8IBYRkXg(BmvPhB!}C|*D%);sp_irveH;IhseN0m$UmiQ4+NlxWd(Hw=z}KL=k47 zb#0NjrHW-ch--M*h*TS!My3X$8~81(g(-gTdGBzTY*)(?WMwd?M(Q7PpRf^x12O{P z^$!cQrbLjnD^en|?UME{oF7gUgu3}gY&knWT$xK*K{gB!H?2xx_EeK*5kRXNg=1H- zN}KW8U%cN9#7T86J8X6NUN%-2e2F-2*m)T}a8Bqz-<0z04ag+uYOHCX?zc2jI%gg( zu$qW{lkns=LcqC4Cg8e1F|nnRT}$KGK&v+b>8u8O+<{oVFP~XXbf7AwE?S|j2EqM1 zas-3N7GHlnHfAZ^z5(HB)w;AvADFy%6JFW%_IggaA=?mGcofQh$-z(%nwdoF`++?E z)b-f%m~lyjBPnCWeN%YB%o*fy6)NTNE{pAhRhQsojT;5km@{{uaTNMoTTmVU+b2p? zSR`@&$i$}R04L>*Dzv^Bl&T8khM%gC)+Wlb2w>c>%q1qWRUF}`tCmfT$kPF{Y;W~4 zI)l~Tpq}UG-NA1-S$1QBSuL9?Mq`t-<-vF2Ib#OiHpBc?$ z1R>xlki_!>=cV{5L-m{#^@g8qU3koJRfH_#1Eq`s68mv|1siqS0y1K6L{W{6;H`(J zForI62(#&At_JwqNdhr7KrzC}c(ih?4Y8ol0JgFtE|Q9oOH}VzC%U@dGG~ zY9KDH@eO1a+=gdv{{BnYd#q{RlKfsqmf?aRaD7W@;Vd-`iXn_d*KIK3?<}sA-(A@% zmFg?=KZOT*B}99xdN?;sLfWP;+bhSYK`^OnPx7YUBq+f$-%JI#MI#zD+@{>{f+nX+ zuswX5PH(@S2f>|k=E6y^@Q>kC;%M1%4SajV*obGlZF_sF>A@aE^LT@3_nZ`8E`!Cb zQmXE{a(mx3!y`QxUBPaA}gJgKpD8ZuSW`QN00y`W+7oy1; z+D`P_fITDvZvN>;8gDx;_0SonbWKb9c+}COls_qe2(3W{`2uiwwy8iZ0@esljCrnK z^(+hImWM`x+u}w`OB>P zW4glNaIs+kQRjw!ptY9*^~pQ~j$gUC$9zQUJVG6hqO4mi>L|xNGQl zm0shrb~bHs7pKL;u{|RKJb7M=?a5(t4!Suc-bwn?{l%UOWFp66CuWwG z*0dWCjTgTYBYePOKJBL0)n{cWfmY#o@1AYJ@Fjf54SojJN33QZj4a_Z6Q*KT4+Pie z$ecF0f>)C#uoJ!~u38a&f<#v2v6%-i-1SVvej`H8wzvpa{-Jcrjh`#lU|D294KVIe zDbI0+sX%BeKoPONAo?^a;<9pa5je{dbdhGT;d`@LzlR1uxq|f-ssF@9N&*lV#neKu ze#xX1DF!~!ZO8+%sAqOJ)x$;7j`zq>ZyxvQzrC)$jv;DzR?jz0!Z#r=Ia4_a;?H6U zv{RRdQWnWRvoLo3CPaWNB=9eeAVL_~YDso;*oAYqAq|rIB06F=l6LCI(scOFOi&AT z9L*U^aU&J2U+nr#M86ySZX@Y$nyI`l{#>tjKRcKP$w95~Ad;^nwM-dgHFL0j(a!=6 zf@iXsCxwFAn2W6Zg-oBDhiSIm#oAepF5k$nXTTC(W(htzFAWq3ybQM;2M7x?yZQ-u z|7#hAP=JKL?&kBNc1g)LgBrP_d~@m_9TXkY2NBdM+DNDQGlyd{(@gT$dk$b0NqkpZ z1(TWOTsXK6J)Fz|UbXBOpT>ZqkMMH&O>A$wi>yNke#pAX0J4h+SQ=U1e)T zMop@N_+aRh3F`rmECF68@!R!T;r>TB9#o5ik`z^wN@lu{@}Lw+>dvQ2mJTdcyX!5x znArBpmHRbiKz!`GU^!D#29>xA&X$6e9B@WlFP^0UgUA6Rp7{9a-13k|?5`lgcSbw~ z!aY9#bO-TrJ|gEH?q{_A3GiMWOgVL$tqNNo*LQ_XpuX$xspiamo^w5{KbFUW!zK=s zd}s}rFfAC8h<7b`*i%7)*5b%O=Sb!<|1w*iVg^+1^WBVXa_x5P_0FPzp*%?x*}3^7 zQ@LX38%362t?}IhTHKv&*r5a_ci~tA=b)VthZk6H$p_*%|4C21fp3JRFgfJGxUfsc%l>iStJxEZg|e2z z^hRWYY5rKq+{KGvf9@RF?P|Puds3kzVVVAnCKB2YO+;iFM;DhCEXXag z1CGvo%Q)w~^f%*Xocs+7l@TDdVUzBV>CNHA*+C`gNTjk8A0wR4#J4){{neV%iCVE` zh&y36U~1_)iR3NG)*Iy@F}&sQhU|tyX*N+XpCZl1!w5k^5pgkzMK3b2xT}09ioq1T zvRl`4fn^C97MHR;hO1li9BN2gE8!LqXcYgFh5du4l7zxLNB;-j&Cw9Kv+fZe1_hDa z%J`Abe7R9zDL{kVBR%n5;CH+D(kV>|c^qzSGTbW!HXGH*@!yACFxxqD8{3&E4CCGJ z{X{wA`fGXyaS#@nSe32E(Er?nh~Xjdluz^G4^D%Z5gv;1nlxj*Fw9WZPNw{lNo_eD zAqV#+jLykyG>anJn@h~bW>y*o#ap4pgvOtFpd8_F);0m4DGt?`RCgHmm*pTc zHpfz?yPhmjVN6}WH@^_JsHc;EPN$^tjr=QReTQ12+|pbtKk}pt{48O%%;g(i0`$aDTD1A`fvVj4V~;;E*BiCjn$w>HXJm2!@6y5Loo8> z`57(E?)>6r&DDA4=`F%`BzMqYr!P32T>BrK zMx=)7o8GC%b))hbIkG=1>(gY_Hutqy;GG(32dQ6^|NGerUEW{CRaGEs-e>f$4?*i} zaH=RQ6wrtL)tz>K6zmwFXe!w93PZ)zb?39WZ?L+h=#tJ$>41QSuBt8aS5ejZHrB7h zgS47L6Cg+1%#iQ>tPG$xwA+A;1eX||&8nxLX$%_O_W_}6EodjwXY=NNhNYb&a$x9W>{UQW#Xrl%9$t^t2$ol!cv$Hvz{*G5%sgH*$?NPRSj{vpe5{B${YDA<^ zB~eZ8Ai8a%lDCMT924$^&d{z7KwBB@>nVDKJn9bHBg`Rh@3lLG&K_Kc@n%+!jYq49qpW>PBD&IqmO+osjs zJ~)oj(F{(#7uAj|g7sg7GJWq%wk!E<0gJ@?UYWJ!Wxl>p-nZwSK>kuYJNQJU7P{ZP zk4ht$g-g3w9;s9%zh)Atm9SFP{x@}L?ziydN~d1jk*pw`{saqPb8TMo4v)q`)YT?K zm&;-oM@?9UdpBE}D(F8y%3T)&)X2HV^xq3=KI^s$UgqAgv+v7)k1xyE=w^|E(S+Ygk5`VkV(f=N4A9J9RBZ(_bkc| zN3gu-Y}M>haKn*h6EE=4W@D?={bN;AkRpU>+|lb*kA0S9R-%XT4mepV3v>mMZHYmw z)Y$*3N}`47|2Zs_f;Ha45N{ zz4^q_-kTRp?sQZe(F~?>TE6W-blP`FcSu}j2*+lMy+^?QwIiohP@;5RMW?2!S#O11 z#lkx|mdasfJeJ{B^Xa*{#bMflx73>`@SSe1m|N4!1GP{!9%}KL&+DV?>v^M#bwoN- z>)MLfEroLClDCJ{waDOhMLM)$tX%eaw$);@oJ+aUuO!a$1X>O4O4}Lru+sIS`oP5~ zgFrpc)28Bdoy7QldtZ)&%G{!(_LF9HZj<7ePoy>rVOZtH*9V+KST507LG9|*hJXRC zjMT`_;o$gw-lt3Od5uBO2i^k6tsP}O3Bc?{v7#zAE=-aYz&w#8HL8`a?Ni_3wutqW z@H6!#de|@X)fF1{y@PNaXrC-970yX!oa}f42si2qNV@G$oD;H{{)DO`hRSF`iJp1X z;XJux_S-|VpZsC2q1jfioJ**ozVX`w!u0om0?o*1EaiDwMwZW4nr^0Ql@y@qs!R^= z#sRL0pDuUg-d1j&)Gs|65zQ{%!N;2f^1U2e42n=+!;(FGuCp)!8sWkYnQb2o-?_h@ zrM~>L4Uv|@7s#b^t>}DApLqn!K+_Q4bz>|Lm<72vHRw&r8ljroSUI;%sX|Kd^fZjT zs6T|GnIxZoD!z0a!Ii&X_~w0aA96aiDtPGWBI(1w>Qbv9VuAUwxOKn^A+cE4#YHE# zV6p_K(xZ8m8Q>%*uMWtbp`DMspqc<_Wu~f*Yu_QhoZ97Qr>b->f2Q+p91@xAnPe?m zXNisGp*Y_78_Ijn-djynN4<2OEauDwX(B3+=m?T=qfLu*o66e?meQb~&s3Y!1?!`M9y`wldLZQ4z*+F~?Mm*eY9=SmPw8Ykc>r(B6ATi<7n(pj zklozV>v^avv`*i7fOU_zWf5;0Ah*{&;(9LJbR5lL27#KHlv>5t{p+6v9haWpnGg|& zvIsq<$fjIX0qg|ZjLR@SR^vv=+7A;I(t^*fPPEq#4E~LdP`V&(Z?)MU9UggQ7dhR= z*r*fei+oEHnsw=F-Ve?Z(Q610RhrXtWSO~H46d)wnJoajCSUNtbX1qr=p_wvSBgX# zrS}pLbAJ;l=%f8o4SK$P}qq>J?RTuAOoL}@UZA)t=pt@h({@$=!UF$h~j5# zMF9B)tn1~}^a2(r?H!)>n{(228D_FBJk3XOd(jNQV_Nlo)Zui}kDsmNJFQdGO9(M`S(8p0ln?U*#mO=FTea%(-o^ zCU38h8M@mxL>DeNW1Ut_yZ4jR4TQ9EOjbPfihy$;l#FlgkZh~=FRrtkoZux#{^3+V!h#x19%*v3k`C^iZE z{EGs=&QLtE%bigymRxM-E)Iu=hqb087TYY)Y@pHNPq%XHLAz{BW^EA^uq>?)BFajM zw@Al9S@};?7_^>VZJDpjrmr6}aFo>ynezMi*&b{w`Im0*42Z;G9k+){vo%mhDXc-n z8*WjS`%m`>%|smUIVY_9iiHr_`uG_u?LjyFw^GS>KO=v!0f=x_cGsPvaPb2hRRbSR zZ2Q2*WW!cFxZawLjmW;8bj}DKMHS#9$l)*c>t)Eo{Kfe+0^jN?k3G@|)+P>@pw1Pv zdq&U(kv`@EI%q@z=Ll9Vqkcwv5Bc1YG8M~$QzHe6QXe`-#hQR-vRjKP>MesuE!(#8 zfM~U9{2C%*Z*7~?rDZAO62<(qm$={b1hOE`1D#vN_QgZV}{H0;_g_hTGD`6~?|G;p)-snJn zu*T;#*|V1)F6wN@&lRUNLLbb4c5X?fzadH(1$ewo0_JevV53{bgOPciUv2M-fx|Jj z+s%BjK>5o?cM-!n;>2=$&T>(t6KNTgxBMwq6f9s)kXznSwZN9Gz!P$JrIIR zHHrNp_$8A;gs{W8M=5+_f2z*JTtR_1fnG<;mRTmZ*I6eN=jo-%4QfVp9YIzlUgfj5 z!i@@2phW6v%o9vtK;NZ`8zB6oGrmFeX%U_YEU`bf>CD~jB|x5{t$7z)HF=F$%sP8Lwkk`D8ZZZQmwEL)f5T1gd)Dl|KCpxpy|YzesdA^+Gu~?s&RL zr-%STfJ#z`*JBl5%Sb5#iDEUF_MG4!Y4lQ&4?1!wXHnQeLZ55xh3+LHaFXYVzcx8+ zTexlfGRq{dYQOM%Z-L1#PrOKSu&kCHEzmQSo7yz; zq73$1gNlOw<>#1(mxKNen~~o$4zW7$+nz2C7kbcV7jIXim1dQ!7}LLI>6NF;{&VE{ zo0dw+=8IcL7Rqnm#2IY>)!%ApnQE;o!(WM^d-6Q*b?Y*lC34r-sh(Jtn2_RJr7>Nd zoso!&K3P0~Y<;xyJ)DAFUky$fFlw7Q?;t+DjJaWcmpjBG$=Z4n#m~2o``I71qf+2U zrHcd`X8TTB4Iee$1gbYc67n<7Tg>3CKBwYap6LQGK(mF04(S*l@bdCjKbLQCTrWoDbp9i9~Kwg2+y83QoOxAJ)b|!0h9B3V%3L5;5A|rd#H1{&S<~|vTGDzsq@P! zvH08>Bt*Xn|3w3=N07LXPwT##?g-#HNSh>f?x zci9S0i9nya>NWkHiuxygAL9hQ$CdiGeQe$d0} zjbuE@u?YY>W>UZ0o0Xwgc^X+8P#CHOTEcPJ1{oO6WeZ1(!Kt}jZp~EtvPz5(199~U z9x2I&+A`lP@3H=1E}C{VlIXer+kS97e>Cv~oGn8y!qvMmK*mnyi+gASxc|h`y~5{SiQZNYHzuF&>!; zV!c!S;c8!weGDA}yNxj#T{(h?_ef8j!{o?hnjXtpn|=$)-ROsGCZD()%5q5{-DAp; z7kO-u8DS8Z%x)i_vvzxL+$ht6ZP-vr9hkHkrfi;=)eSanb!}mRs^YK>PFz8$fc<@D zt<+>uWX4Z^2nYmX9p8#Z%N_X^tCuIY%;|i37cJ7BGZ|TlA0D^NbxTacQ%6yzxO6w- zc1kP;6`*rNi}D5o7dfMp(g|FFH=}0*pRgFpIul;1?0?*#_kYd~T@RKf;} zuv0REJNp_?{WVi>5BbHc3^Ga)>{suChM}zAbV=nKo#?gVI6R>; zv?7xy^qVv?9k^33dLzorRxmMqa~e%#s;wjkMKt)NV+M=dJHuu+Ca~{b+%TKkaw2E( zu*2tU=#cO1y=?7}`47bM-_tiK;3wXZNo}3Yd_Qxnft$>zLp*a%e%4&AO)sxxh~Jt3 zg9wt(@;+dr=}#~x*?@;EZD`qcVX{blvP(uaK4~gWZav#z&~)Nh5icShv4LBsJ zX}dkqa?m-}6k7Ien3!GlWN}@cR2F))D#*%OEEf5e2*g-CS{wz@NFPs)sl_10dHI)D zqxvW0iQIGY5`ab?Id_=3!JciK%`hd|o~Gb)RD_TI-2 z|MQT&`m>pR!_`E^K7$mtS%Thw+!N=cb*I(2Hye6itUgV@a-N<6sra#Tam)k zJjEyCOxA*t;Qkk7Zy8lp(1(2sk^<5t4blxF-AH#z9U7!Tq&uXgyGugifHWLRx~01X zq&pAsZ0^$gxu5sLyOvAO2iKgv_sr}w^PlVbUFr%Fb#u$^4-93R<>9M=&n}T9&~u$m z3w+PJp3Ndw+hhU8fYH?*@Js%cCrGbFbPK)8`0=zSP-;}vZO~j;o|N=#r3m${ih?th003FU!}XvhU0o^af2v06`Wd-+h07$M_OQ`5Uq{ zCieM-bOfHlWJVNShwmc^=i)dE855d{`SKln)B6tCo6T*Nl|FsfC5ow1 zE!-i>55bRf)gB0?6DyYhXmdvaqXt8Ka1i6Yka}RwXTMWHvIebI=%JkZTw2g7xN1YZ zT#~Ga9^x^q6E9!08NOMXQJ41C@^!ZFTlwM2In7}Xoe@AHVwjxvn*5bH4UEVVkjXOU ze}7+F{b5XXQ~{_Rz0_E4c5MR|&1?lqNzbs2BzEsSTvAg1cect%0`3j0T>FWpuM?*W zfh#I7Ke+k!K**tz;8r~B&KC>z&E?{KAal?~H!b>)#T51p)5FCZM;EE%d(WetLx7Ri z_EV-A8{>IPS8=(niABGJ0H?25cbT@+l7>eG)Ee>g8GhgPe&Dqs=&jQh6WAsbQS`uo zt91Q0yKgsrAcfCx8ds8RV#U;gOKJaeHh&R1mtiimOrvlE?~BYQ|Bh3ACckUHIJzK> zpyx#_CKbfO{CtsJddq`R*?XhsLJ(Y4Z87?7?fXP#+WxakMRNgcLs#*^y^CdlGiShF zE!R}qS=q}f<+V97y|**|)j7C+Bu!B2t=;0j_RCbcwKeV=GRNNAi0PY)1x&ktg^~RA za+k(>QVYN}Bf-T0zXyEffNzN+m3>3lVoA{kSnQH|6@!^{Y9uVEdNgPF`R3l`Wl8RX z7xO`}4*<$DYcl=VEMOlN zZd~}@^V8{8iojST=N@^jqCS5|6O^?^=6fj5v5HfJB{=2KSp;p#d432mpEowmZIgg3 zi~gts{!e(2FCVwgv*Q_OFMVCB|pV_)Pq8-{OQ(rZ;@ zsDcljIc#s`nWY6zOnogUZV^Zxt?OTDa=RYF-rPseQskl$O-D?8(W$lSOA+qfx$}`# z=YW-NcHu^bcmbJ8+chvJrNC~dKs3-J4OxP_22)nA)bJqZ`IlEy@mI@_(pT^SCiQk# zGJ{FOwK<4O8EC;mwkTq zcF!J9QosGeU=;B%cFKx?HV6!aHI=xTEo@Fl)fcb%eTM7ge+1St?bt7Z_i{x8n<`$d zx4r0?o`sU3#sd(~Z@Fa}ua&YEznc$xq!z3f!DG`5Kb+_%F=|OJ6TA`0^4jIqhwrKa zu0li~k$N)}t3e)>52-8Vcl+QgE~^~l2xraZPb)|?*bOaCu8l{_%>|!!;ywaPC~7(u zjpb+)6323#DFz_=NaF0y<09Gp|1tB$ftf$B(gI|f+oEV8zYcB=--zYr21Ax$)+gG_+m{=9Q# zP!NU>TSo`VAjnaR`B=i!lUxs*LU>y^>UD6~U{Uo^U^6;v-*ScQE{;f6e&7pwL(7VB6t?I&ht zLZ-PjR0kX)1mr?gOV8eA3;qFh&;i@eUq_=Vz0bM$IP!Og6Cw12%s|FuGX1zVyH zq2#(|^uhZu1Gt^|6Da|kGx=}6Bp_%2^Iru&{>^yGDmyiKfv+6YXRqI#DWPS9rCK8^r39^Ez z*q+wKq_>(P$O<>7n`qY3l($&2{B$7%>d>nrEn_*B{)8O{)6X_Yl5I}gcDRPV(Lxr~ zvN-6?Q6gU`e%=8fcX(_z)8RCY7kEQtZ$m^b5a)y6e@s_D_He$EW#81sC5vTMzDJ$| ziXR9&9@{NV;dZVrvCyNo1$$?=9w2W??($%Pwq&wxU$(3ZP_{t~@ea<2%DnAp{e!TO z&h@`E1=_kMSGBDyd2_B@`JXBf)>!XyST2^**xqW?gg%ndFCIAjy$~he#AZ7g%*7^W z!c6)pCI3q5!}#o5pK*2XpVAu}b(+kt5WPa8_NF%~{P)6SpRw-3qx2vID^2!ven1V- zJX!BPr;S|7$m1CSa-VpBA9=TPH~5n)hCNo+1eZ!yftus6=nnpu%b%_#I>{q{5V6vu zioI72HA-<|#BVRX-+SWCPZ>_D!u{elS9)c5`ecCrzkhrE4S{?uc&e@g zpk?4@s+KUYQDM$7dC0Jd4PD09yCZgtv<9c(FialPR?ka-Y#!A@c}t5- z*pevAR1pJLl7~U3*N~I;Ywh{Y+!>}|AiX*Jz2zi0%hsf4l=Nz8v+>LSW0mw8BNAxA zFsK3O4DvA`Kr35GGV=QU!?j77{DWt>&3uivlC*z=6F~8gD+RMKU+n4A0xLg-^VTZS z|G6H2xe%Z+xoy2uX~5`08AC0Hw==%^_P)d4C|CvV$5ooyuQ)yjQq;dRME@*mh~xqC zucQyfa~QyVh#?sUs{jy}f7d(y^FbmGXkB{TRR8ZDF~Xj0wr{gptkGc5q@&##KM9Gc zg!vdELF;gPPN)sIm&&y3MW3vcmWf)PYhv&PW)VGcTyk3yM*z~O%+yw#7JY`J7kT4{(-$r1G=A{pv5i4xETuMD5S5Q%eMW%qDn=g2x=+EvGfG--xo=)vrba z{jP1Nx$Ms4es;*k`Un15l2<5sE| z%tD5FZ2$|vk3R9aetk7^U5xepU%qkrCj(`I_rt8`pXVw)1~*>VBKB$$oS7HxpV{Vx zM9lYTmUlLMCdnDNiGPg20zKH1vnz$h-odQDzg&Fv?&d6@K|hvl=tuJ6{!nT)UzB#i z?Pnw9Fr)2B_b^jkp5Bm`B(Xqk816vG`2KaTQ&<#LvB2!>B zv9ASrZ$z(=m-4MN%CtYOzbK1@-*xckmC&?1;C8F|k|@K_{s0V`jPK||KLO+wOYsbM z>9sK#DpagU?wg0_;zT$7w&LpNPr+&3r$_Xh=O?N7OQjV9bYJE0;)!(LjNLmOSxi^0 z7;HRFmy*oaCcTn~Jf!WcC3(};Hf3>tUFt3j0GVx@(ZaNS zfSD5{Uwo8VZxBAQWV_%xFze`BS~r6z3s6iL>~essN3jO!8q$p^SrC#K(OtbnNT?oXixE*d(-;<|sC{cxq8 zozGDOC91SVX|NVOcF`ll7%vBW{cbq+ark3B7AG1C5_cV&xsy(tPyAge&D-`HRMX?Q zn}^;Nv+r1NOTpH#>pRiH(bK4;M-$?*IMIU1F7C|+o>zw`bdhsrN*SSOs?oQLy_Mz% z2!yB0w2lJq4no_E>@Ttb53S}8Be`b&6WP(sdg)-swXs15eLqa0O#2q?S6q;hB7$3! zk%*QKZ+)^BI=*?cyRXs2g1g$Z706WbHw!brvem0qTY0ZLc`gheop)ckLo?T8KYU|Z zI{f|gR3u2m6H;1?Hj;Q2+BajchLXN1U3|m+>Qv+sCli)*!sYhnCDgRQ$H#37rYAo! z=Ax*HHQVQ|!*3>a=79!%<{r4G*sxu~&9|KhUgWbY|1>WOxmNMaSXDx8k6P~?N1ij( zf{7N1a&?}@hx3#5^p?=XzSBA4DA3;bT0vMU>o?TT6fzEll2iO+cr+i$$BWb;No>(p zez;)4+*5FRmnOlVWbwFe3{#|{0~<)J-p3eh!T7b@ z2);N{k!%YVLm7MY;hTL~!@Z?Ql)X9i!Z@KHdN|C7as=%bmq|a6xo=^I_okI#4>If4 z4-SG~KDqzAeq%!-Y|``LV(Jt8f|EBy-k!BJ3gr<%OjZ?)i6E)14=k0}rpZ^bzC|FR zr96Up(UmWv1+Q)&XurGq?%!}OK$23rCz!+i9j#`{LOX@;831fdozv#f;RbRD{w84F zRn{yAZBOZ`p8}8{EWqE0eZ}j?y@H-ENn4DrmKgdQue%-PM?O^NZY#u~HaT|5uuxu$ zccvoqsI`LAtaV_SnC**`;UQu(d4)xMG>1Rx6$BGBbno}oc4F#Y!2ZFQe^6$F)W>mk z6)1%Q7HWfG4QGN;#i9T<>`q}QA8<7$pS9AKNDmPO@*N(#dxfB<{D2nrs=@ZS!0Wyo z{nvF^yLgOvGRv^|0* zc!J=F&P`%iEdVcAW8P(Tm4XoPo6(}M&Rvz}e!&GZ(^`&q-GzoyXdTmZ%5m*Ovwf1k zSrNQq(i$Ni`3VLma!=q;OSh-J{l*A{PmJ$4JcuQ7$sG~vo@72OP;}zSy{FaBm-M=x zK~J;$UJrvk)Uig1qy7{FR*zPH}^cPt!^W5=y9AOgaTQcumUL>~7K> zM8gDKA)X1eB*BwE$caZ%c$Fl5_y>%jYmbtBMBvNlvs0Vh&|l-ny<9Fz^$X`*rWNd{ zrFYp)J*!o4f!j(7T>D%cl?HTw>TT0m1Wy}XGYp8a-R`Dm!{-s8Uf~9l8fZ1AfzY?x z`X7gs#nEPXniJ?i5}wv;sYa-rKCQkdppf?w6m_nyd!}zgYV?W^XSAL0_{3d>In2v%Ugr znKr&+3;T(liwuhRJ#L$6$|6EdLE9c;E*nTLDai7Q{hx!aSr~Xxya6D=1sKCgaqdfm z9LiMkTScKu|BNXFmc7U1e)uhnoCU4|er%t<#fJ-qukgp|h zu>cjWah;x?Mav});1a8#Zeno0U_I&~rxEE3P3lJ_edVsb|4XzkfmT{XFSlqKa zs4}{Ru@5s8-2#Mk(7(MmtrJz}{K0if#K_)ay?r_FIf(Ru02Gqq zJ8N!JfCbsWUVQ|R?IWD@_E~xi!OQnL(%cuu$W>UBQC06Ev{Fp_23gx4`sq>pj{yH5 zLvB?8UUwzTjP~HxmtPdP+dl~)@-4w}X$S8SnpXBd_A@V)_GaE9!a6CA35Ui3e#F6| zQlfBs8)<6rbFIKpj3P_YB6(kytC99U^ zsABN%-3bNat)`0iYy0gE0on7b^GvmE#I6= zn`NQSjs>W6UGD2H@o{+tD+5`-y+y<{1YGCk_?z_9 zeIf}mrILGY+TGS*$M2njD-Dv7Gt?5DPnN)+ioA_F$ay^hXjCshwWadKEns2CwJ*#J zI<#n2()2iFPe^-%-D-ygu%x4a>Vduhhi=_zC8N<~Kre+dyT^KDK=G;@~qA#4Xn&i{6BQ-hRx#FRY<9pc3*=*To6ZLbVHqc_tFHpT^fusb^Jpl#c-MnZT6LiNv!BxyimP=n3AMw488RY z4x_f=Cz32Cf;O>g)g`1Yakl7dWPDPvrn{*-;9s}lChR!mR%2;omMcxwf37Wh0eMqr z0lw;b#&a3u1x~qbeC_{Bk^Hhhd(kru>3%SA$#vjF;zOLm-=h{;1ZlR>I@)HbNqkfw zcy62xN^Wd1N9o@@2JpN2wU4K(;9)JqDQWj95|iaeFw#?7x!aJ_xJ+06W3s~Tg+NDz(WXN1 z9HP;o^{jcd4jQhcSBHqXCr<8&&N@*x+s{_e!sU^0G3}IyescmhQH8cZzWlxz*gIfz zjn~4aH)_cza3Gz7-?iH#z;5BtU?0V$E%i2T0dOO>k0#hmo5FR#^<4uSPi~rhbmVI) z0Psc`&q88RSmVGZ^Kax>OaPJyYC6x3K`Og->k1+5bCMDNh>k548iN@8Ly-%Kj|;it zA(^}6lV%@>MRsgzu-+wvBvb{$fG+cy??`HHK?TZX7Qs8|p2(O;{*%MXY*&{K(+71Z z_zVP}v{3mwBV!~(1&Sd(dA6aRc=AjVWUKHWBr+VJW(E^=h=w^Pft6N4RnHO85g5{b zEr?;>LioZ6J`rG9qGHPjnWR9dgk0{fkyGzRKc#Y92wNC`zMkB$C(~W*tQ7zWqKk70 z2(xm@ZKQpDE7Y6(^Ca_mQHbga{q1L=?K zTeW)_O6644e8uId`;m>muRhEXr_6vbVbz!^1wV)$|G+HmP40ca_k-RLQvmWov^NVo zyEvW8W@7k)e@fitL+(D&Fh`xAWdv_-jU|~>rIB-?kmq=~!vd%C>nk&{qlgI$#Srh% z5cT87C(B>z%|t1zo27B^6i~|Xuzi_kk$sf&SsxzOeWEEG^tLqi8q{cGcp{=E;QzDr ztJ>9arO{MGT-ez{WiFA2At9FW+eM4%~4tV)M^ihhRxvuL21%;!*Ty3BMiY3lVw zC5;6&GPaAMqnoN|aph-#!feEoRK=tn$0Cmqd`P&^LhBNm-0#GCVoYjR>F7#gP68W9 zsA(QNo@)D$R47nqG5l)5)pzrLlZ@YIafK%PPC0t&u+96+t@@TlH;cUizEAkxez|5D zP?1|7rBEi4X+iTpTBH8Zl>@j3lOR4c!NmgC>RM|!_!!9#H04Hv4w`ddHXA4!?FM-7hYt>?eE z--PcVju(%5$=~<&o*#eXy8muI&!p4kiNLGIolKb$He82zbVQTL&@-}cZqM(sAJnoN z4&s#$`%WQ#4%gjWdD4#PE3*FMZN$rE3KXrqM_xC#Ue=iU~`sq0GwtKKEaIo z!>c7yrM07ot8DG(@F2@T0O2)ic{M!sIejhHYm{o+TTZv@n=|S2c|=D*^6rsXwefY% zaRoBabhdv+wyWI8Jnh~WOV!*wAZLF6EmCVBHow<6N(yZQ&BcKFSG(iE0_^sM!mb?D zrM!k`yMkNYXsU&5`rz!##RpRC-L?c~)fL8M4l2@xukXk&zB=>73Z^kmvIPXVSMMlY z+7M0+(dmkT`-4SLT`$2wFOI%>vUfC;63it zO+<28%$Tp@IGI}>{C6kW>&WPijlrxAyXk#+m{R}b&UNZ4@5aLYDVRYiClg*S@nUwQ zw$O5%KA-yyut<96ygAv`E*H_Sd&h?~HeMmVhc)^vCI=ohrS$Us)_MR+;TxzORzTrPD1;(5fol>b?`fn48#rv)H1D8RPY$Au80cgty0J(%{f*ab?>K|5O=zBjLojmf0L1-~O) zaB&1o@uVZCO*&H<f7Kb2H0a1DGe@<$!Ibht`AlH?+XBDFvzL5XqSf!fQ` zBG@-_;+{pHp`wA)WNR@ePl3l9fr{{?Wf}fz@llABUg`W`M;jh;?4!Ot|Gt(1cRk|_ zuo9*Za{%l4#u5dm{auEPOvFO79At{Jp053ecA5F=Qvf38E#J5{QQ%Pvr7d14uX9r(^t{B&G%n>mwP03SMI}r+o)39afIpK*iVaCO-awn zr-KgmmAA36rC)bZUNE5*p#$#_c3YZm_LMqcxuA^H_0WwaYnepxpGrIomb4~Z1+&4( z&geM6ngjT3BTX_-6e*C8bMdGHZ}l)}IS=IaW$p7x;>=27zInq`H`?*QtHK~W6SuVz z?fmcB&PmjAI8Nh$W6l+ho0hXnqPprcvpq-b!j#=i>Qls0 zWQn23oRMHQtQ5ms(H5N%()>g=1kw3}+@KABpJ-o87yP5aM~VKZN}3>Q;=$iI0PD;# z`aeB%2NG?TaYS7sW*v-sCfMVK_SXXL1x*aAfnA5Zhopp?$+46UMpL1UrA$7AlRSZt zwbC#|`(*y3s*E>YiIFrLYP#q5$5Mt+Uln?thL~4zt`b{Ay8Bd>TpBPHEwJlabYy!M z0r+D8LQ9iWcn&^`cl2V9Di@(Agm!@>bG|>+UUEmsD8-^}8uT{C{qEVAN@38Qz?2hE1SWM0hdqG-Gy>r~L>-$`ZjyC%0-R zk1JG49%;neP&YNpVXBA#)xlR8m3by-BuGBjzzzw~k?5 zj16XF^j4{CFPUw#kqm9c;(pjIR=uqWApIdi`o4*)(~6ejbE*0_a!!lk8I5+LvoTsw zT*fX)lo%*X>}SS&yFWyEXz^Gq3F@b4LLH&#>RU;O3EBv`hQ}Pra8gql-c5v`ac&6S z9)qQZ>y*5|En4+pGB~vAxJUb^ZPN7F+O%zQTkZXq?;`F!Y1fv%4_w*+u0aX$X0#*= zA1EBe0fn2}8hDCQ5ZtJ%8SaK-I2Ml)4;YY5Cd&_0*QOExO_ zD?L>#Hu#DB#D!v2hOa7LarLOJamTZfi{x)p8?J=VfiY9g9B$-hzY=nER`I%P(X+{Q znlz}8r(#W;p<*65_jC4?RPVVgol>5W@}w;joKP&5L(Tl;xw)1W?DxE9j02U^8!~z? zH2MGN5QQhNGAj)?-VROzN`WCXoYF<2m|AnTL^ zJHPc9ZY35lWt@;L-{_rT>T!Trv_PPE^& z+vpTPpeSCp9{0HC`Wj3ra<=`@8~Xvj8H;$N>6mXELOk!B@+K&K2H||do7>$SSxJN# zF>0{xlW0S&&9bzC6o@pWPyDXxK}nE!`?hKUiTaE6tv|GBKwsbf0#kn9+cEmn?yM%M znpiC1rG-7WB{XH-Nl{+#5{r_i!oLTt;taDy8KLveCIajiKQij=QX5VfY#}AM8M_U( z%P^?ybfQp5yAG=ll|fV*+tpz7s0*xXcCjOyoZw9G_S|Ph8skx( z{`U?snd-@TwTfxe+#Gp-f4F7Ydh~oVo#XZ>Ac7QD8j8{yO(9rwjAU56UO23K!6uEQ z`xyma8H9O~E&2gCeHd6UY9m$`KRPWjazT?}N z;;>l9dO?Wj6_%(w!gdCk-!u|6L4l<-I+WRWzx^e$@A zPF8~SsCe2Q;7-Y>Zk8hV5VOjNrjBKhz7Q~^bK6%GA4};T2P^`;*t;;ZrBg~!dQ%tC*2gj$Q389|0=UDN1;i6V3q;}CD#MJ$9%K zag=OUX!e0L;IkxDYSSOw#=IRBcJ+BPLv`O0htrFpAXLV`i4BNAPDd%C|9Wu)a6~K- zfqe8yrnKXzSpd=b5^``M_ZFh_1fzC>b4%@Elm3rZMCAMlQX^zi_ix4ilTgHg5bOFP zx-Dbw1moBzGYu2HdduiHL9m|~Mxfi}#%TVxn*KhbK2z=5thld#c*alQ++O_HPQB$V= zXa=4k>t(@ll%!atP)`WzQhjg>Gv>el8fAqQ1ZwjA2m7 z{miytO2s9QlO$+;XCbSQ8!~G5&+B=u_9Xw^2{Wqy`zu&TVBbw*^jBmq81NzDL{s@= z%-+YoWDQ!KS8ify*|!z{n;=6k+A^J6KLXpSWkan z!xIk^phI9NjsOblU%yY(cAaz-vw4}fN5b?P94LYWwS)fEPjN{il}Dw93uB(mL!VK$ z?@52UUwB(4bHR^h_X@GEq)X>V-sGT;_X&rvzSkH@=%@qMH-@7BIbwLaD*xiA454R? z4{R&0zf%BNHirnk-GBG`pWBcQ1BS&7uQjb=MiPl9ItrxQFqNY)zQ$5b?iE)pfZw)) zvtkhD=SYX66e0JVj>Ihftnsum^#j)@VjM4w zwMpfa-kntcA)w6uKUoR?e1xh@fJ~-B&Gpx;j1fRDZ-af48c430KjukKFJ~8P5r3jp4uklU^_!AX|7{ zq?noX(M(R}o4S|Wu`4o@ew`|CX3YAuvURc&s|NPbG8@6pU*C_3J-8SNJ+<(f@o7R% z?HF48nGEEcShI~z(Rb&AAH1J+N8rXzbH7oDMdY--wb5>HOFe{MC=kh}mdZ(N^4Ngj zPq*WB?OG`q4uW*)yK2mGjCs7s_>*w&)BYUCpNF8Q3KvBz$N*qBCVOjuG7Ju5WVu=v>lV)oTm}p-qoHBO zMkEu9gjJ&`GTJj$tR5Db|97X340|1rh8Xr3uG zEPH1=rI9>zkki`I0^M8Qk_q6EzEy*p81hX=lcrcc&J|IRvgmyOu@(bxAuQ*Mvv8PI zm0ywyyiVoN-?WiC4EsE=anQL&^@$sx53&MKO4>%()O+A%63PZlIZT9s-9P>IkHM|_ z)*q4VHwyrNqPG~yiU;DCXpY}%0P#z5Jp(c9oGM$|TdAhfm}aCAAINjPR5(6kF;G&- z#%j}xZ^fme_RGg=r^-sNcudz`YFn&$k7SN)E+3vf`1FE24*(AmwCzgh&}RhNTKe%b zBD^u-u+%fqCyd@mXV1Do5#szOm4yh9JYmEn1mcSNi>mi@k>t}z1*(lxuO@xzrf;K< zNDLYr#&|zBC%O{6V@E_p0VW8Li2~rEL-4arE+-LeI;W9 zV!bLcfozW$#Gv4_em31de8_K(a$MR!D*PRl2<`TTJtv5GoQbjc&r3oCHtm^27Qk2= zY<-TndCV%cIsR4HSx|uQ#jjDuXdzYI@vkkkVmfUk{1hmy+6=-+%S8rBmCpq}eB2&j zx!P_~`liXk7=}e1UP%Uc77A|+n3J-v_FX|AroencL2eW=lG)vnR=5&S6s z0LX#Ki0)0*`cL+R^z=^`lhtP4osZs};RC38-0wacn)e)`mq_u0>9j?Y?!`@8I4We) ze69A6qDy5?y>CS=R;m{$!@N6S(IqBZwp~JHQqb(@!jCq8m2jp}_hf3G43?WK*v#fc zEjlS#je0L7E~wiTgYlW;4Sep08)+<6ixSq~p~!Sfi3@Ymz+pss9^4LpydBb(f;4PI z@lW0-$bHi)A^(iBiIRf4LhAeJaJWBvBqrPCKqc3=wF~ z=(2EdsyY(|n~U5FYV;_dAiEs(vX?BmazLY3@|(6y9IYJQ2iH*#B&EQRmeP7airsDL^y@dfaMHUMo5^^?uXq;}U`*auyTto3 z_gF}v|8f{C&068BEiIj6s!rWnOS+`i+v*-Iwnf>k@p1+YTZIgg1~zWj|=+a^{!fT%||(2>}PVk zcY7*DH|ftojx?!k2l20E^5$KEzp?la7U3uGF2GkiEc9`-uH0dhO*3a-pl_VEB2`6~ z>TxyRbAEv{elnQO<_rk^Q6jI>V7H@mhr0Ii8%KM)K|Yl)RBHJ0Ie(y_em+DHV({By z2t>ZZqHB9pF?dWjn-{0PQGE+_K3zXkD#gEU?yRk5JQfrBbuczEm^WvBHC6ZyyF|G) zVGhR*St$nd=*tY}^Cs@RY$sW^g8vRnpwU9%ip?v!JibS`y% zK!ow#tIIww`qZB{-fF7`06zrk=VpEt@7ix{+1^6?mRP>;VWcdrz|YPI;S+PFlt1{i zc%9CbIVc>7!ioa`bxZzotxFYLaXg}D7#fC0(ar3*L*&$4PUjlEZA>WJt>;0!0v1Q1b!^(dXSW7tXZ}Ud;F426B%R1xf~C<2XF58ZO+yMN3%uiO2AwJ%gI`Fq?rV*+I>tPFp}B?#@Ww{-x*2NzLi*pnkG|JBRmD`;d@u z4JUEcP|S!ha@&m$2Wi;{`;lRwjG(-%b-mnv|7r-21++#Ro-2Hn5^xZ0J2!b{BC#FE zXLfzWG;suEh1^T9UsVUWU^w?grehs@P2W|HsR8#`<}LIaaRwJwg>LAMCdkonzIIs1^N0@N(P0cW#O)UL+Fv`qBQ?#^Ef?pXF3c`!Efg_2JA&1RGp& z(E?X8K=74a?=te;1&}JT!L6Jy0 zU9Lk8biA4SRchkgqWlDIT+s5G5k5ZZ#z)VFj(ju9j=LG2b)>ctJ)y+w`*JkM6E14x!s_JEunl<`=Rc{arpP zhh)vHJT$V>y&zMnM?=$tDJ{@X1u9RLL!U3)+VIXKS;|+*$O`ZJ=q1#s#5VX*#c0H= zY@zq)Ziu&#;Y!~5K4Ivm}QDmGBM?ZQP9QTJ|TBe+pOkHs|IVxI!ugAR& zXOqz@U9exYS2<)u;P(?20DTedvOWKxPeK}f*2UHB&F8ty-GuyJ{@yFf;CJ<0vK&?@ z3(U&so3R6b=pW%O5UH#RTZb`M6jwdwN zX#0KXTV4d{>z;$At<}MFT;DVu<|*X+AeFp{nlQ^&kDfQ~!m$Xfrx^4xD1Eb}iF-Y^ zNbdrajZxiwj^of_en5V0Y*yuCE$tJFWP1te#!L*<*(ZB%bB|1a1EMr#dG2$BA(3cq z|>VduZ0EtKHEI z`e8&b(R3m$DcC-(`vdYA`5;;>5{a|=O*00EwP(lbEd?1)l&wuKTI^6+?{tghZS)A1 z$*lzP_Z~K%puxFO+(Y^dR=D>j7*wk}II%51zqY8WQieBFjbl?-)vO=KLo z>^eWiyA!(?F@h8FV!c!mBl)p-qSx-Q^a6K?_BrTiM}XU{0*~>8uP}H6%_~1yD65|& z_*er93fp0)Y!((P{~~I7aAR!Y?_l30tTz!t_KjgwaANumwi_P8sh7*EDjg_u5)PUrGYFbj~=_PoGP5Ox#frui@ z!gR&TuQEIpndQOcvk^;|H`hj9+2xu2toVa zWu|Mhlzz#cr?Xu2v1V>mE?KdhguQbKAMX94Q_pnSl3{q(Aau$pJd6WK9azEBRcr{V zY6Azk@cz8Ep2YbTz&Mj4{|!y6+At|kg-YC zu!{&5YCXa9lAd$w_dG@`a$|>d!TjqtZz<|9hCQ8+h>x3*^H{RbO+d!^b3N3^5>KAM zVl0!tTuz}TaQgzrDPIrpt&`Y2i|N&t&A@z{3Jk)@>_ac{nN;2cMJ5~UQr;)J;tF-d zkz0%=&3z)W+Zovjs-#@luN*lwyA!86aG?{&VaySe`a+_{`!n+XX4^n9g<_X&P>+o< zKL{0aylfRnn{|NcUpZ=sFxKG)5>@xJ2&=MP@Mc~8fC!EhL5Rk5vj`d>e=C-NX3C@V z8kYMqnbB1lc=B6rW7_j30f-yVuU7DDh;#)+*PVi1B76tRB#83`d)21>zifn^=M56- z+7N$!7^j`MKYU(IRdF(m@y@~afDoNbE6;;1`t@RRivQSbA#>{ywVFi|#&+O^*?jjF z7HOcb_GqWKUj#6}W`VmYPCi)%uub*fUL)a+|w~Zo(APfYh zB!BVcsbxg!aNOwtOc13)L!=gKis0z`EGTRQL7{?alG{mL=hh;$eGr*7h~|7R+t-U! z;e&MqqUR-vvODT{3~WBO{d&tMcK!jZw!E;xv1OC#RXR@vuE8-Ck$uh-H*M zNJG+e=JA3(i01>A)W*Q?#0w#|kiyZDUv`ik-U@?`v|d48n%kpK zwd^=&6i*tx6*uYVGMJz-3I0@AoNSSh{FUUv!v0f)?&aHrVXk*`nMx?5*ydMV5(sQa zrGlR5>eWW($;R(qd^@fYh@(4e@)xd1@R9vd8;2!;x6e%qIOlD`z-!GN$3kCO79yQ}RqI2Jz zYQEJwdYuA4%XOxMW4S9i$OK(Dd^{OVc=OIy-d7|_ zXV4((&Qas$j}&hOwjMxQ=Z?l@xMaUejV*>WWFDjM&hyR`z-ojQ%M{daw07Az(RBC} zxI)H$Zee`$l5noV@$`kbyh>Wn+0WO=h~375Z>T1tt~ z&lqH&ob^=E%6RQZID9PXQzGbjImohEUaj_-ngSeKrD2uC^J^&}#vFA`yQdQ_3#ooS zrXCUWcB>na^Hq~m96V7z3C*XgrB;ZQ^l4;FtD(`?-`ul7|A)P|jEbX6+eV*25=cmJ z4=zE11PJaD+}$O(yEN9pg1dVN!QI{6A-G!_cZa6oE8g#&$uo0iex0?>ud^0E8dgzN zUA1f1y|4QcxQ>YZn!{h2V0RTUb<<3E{OiMI)=sa3mw3ORo!1BZ9>2}HNq?A*y&Uy1 zCMH+M%lN{gZJ0U3^#VKx%qrzpVor&R27|IL9L%M-qo&7qjAs;i!eyWxP;#4#FPAXQ1+v0?mdr@2%O=Wr|6Y zQP<~{)0Q(mDIXKK;vad#c)p_X_O;!+cas|EdE}k(E0$;40>|!#sAjTxOTWT=9jc+3 zZFyu#Fv}@&&C{T_MAa@oSPu^85JC@Pg%f{Rs5|}(2Dr6o6cbXBx_C;!qHX6Gv~R41 zL{O>5l4mJpb+M6YMV_B*d>Iola}B-!GUj5juiSxQ*>=gSlrO+%`tB?)ogX}*aZO97 zRUz7X=U(tIciY^&AX+z?v1Mb@g?huhe?1=bey1{ za7CntT&hnb3`pMI%*c#epVU?al$Wrh7a}YUjCvW@RcRb?+s#pNEQA-I)eW^(&HPZi z*8tJs`%z!t@}}{Ob92u!;sd|#MWoLI(Qeo zA>||a&qWe{HEKOge75{eE1{!`B)5AGLJ;QKsqvV1gAEKjQni&f47BKy`Q5_tswQMP z=2S6!IXfsNfVs{Q(d~ka%IITP3;u)`w$>`l&mv6i!}dt(2dPB=V8osfoWv_au6s2(R@t=K<2{r-fSbHi@l%Zyr^Z4h5@Yj@ z@eN!S*SEu#Ym<<8B`&+QDD7r9g)vFxa{UK&PEl&EaH-8^yje=j;W>m?A_48#5E!fB zmD&Q!tBfzU$U7yS@txcKl8Hw|Z8nEX3_VlQF0~^thpcLwiMdjTLH_nlqh21<7VUX- zSw|DrGK4U4TcMZI`CkXIM;5MntcdQ{Mji{vaIl>-Bs$auT7_R@xd%alrnZio#I` z`WKPHg30aIrLnL|3-;CeU9nrc4O1>XwP>N+ocY*DP9h?XRXM)YoYc)8@Y0Rk&%V&H zxh8JNvIp|cJb8ME8Mf6iI=9vEGNA}lAIo@)LP_6NOK>uGmFSmTk?ZnyrCjOR>2IcE zN9oHW7hKIhPdxF3H#{~A%=d(9(x4DBAUU@L+eNA%YENJtwEkIgH7qa-KXf-wJ2^&H z$eBVFfqOV+-#C^(JuCBFRY_RmqwP1+PZM(^k~_#{!ova<5$xW-OwZbM$5IXawY&>6 zR*$ELt4|05*iSYir(ey!pTgRme#F}n^Swn%*silz7DuJ3VHg9zN z@RYZFEj8y3I#mSkrh|;jEEyVfLg$fhVoamNWp!oH#OqxwAj=O$c;mr6M6vc65Wnjx ze`x?hg)u5EE@{Wc0DnlMUq2&r9^b)!X9ZdeQYpRXzGeLaQn&vBB_H%!D4%ZzDW&0J zAPB4#HE}V>r^{lx3G~bRF|l)-kp69 z{Da0LG4s1v!4*Xx)(3FAi4bXQj$i#=N#SwMHP6+vGu(${3p_N(@Of!&tCO!l`_Tr| zBvroVEuBOgUaB+c{#J3^&M44miD?OKzV4u~_fT8U*md z-F|_yf!~R>ipwG73yK9@?w1y6WXFuwd~*-89Upg9SzUM3g#GBfpXi&sI67&(-Q8{( zlp0M0BamVF1ND6z)#5Btmq!RW<6O{nZ>kBCCN}*Bh8B+Jc}ZT~&{KZxZz%B#URmm6 znTr>vbGYvj+Al*28~UnUA4!uRr(^8AI?m5{o#p0Luh;RfEX0TE2CO5Zo-yHhF26io zH;mUB#&r{kyB?|zAN|!DSn&uoUM9I;6O!o--~Vb!-lsr~0tgduKZ4)A147&x24W}^-^=l!vA^tA_$Lh7O#^rG z!x!h7ZjwHLn9~=v=~2BnUoA7aA1d4fC)VtJ9`gqIh<~B#?ez{PD*Es)ZZ`UKMFR$N z7i6e}syyPw1x_)=dswB$v%iK;h4v(RQmh{Ts;FsZGJB4k6Oe^MB^?ji%Xzh;`;rFDOK4*a@j*oZrg{@NF%B23~hqM$GV#hufz9CTM9sBp3ptHDx6_57grgw)a`P)zvbr=_G zO&+}n%^Wt?XbU7~wx-_d$YW{~;SKaIi-Gp4dovP|Cv)VH7xi%cHt5mgSE0x6_Kq&N z_3x9vE&`s5B7qC`m$r*yd$VUqIqZu7z3M5jydikW_i|TnI7*{xyFUz2 zd@a@6|7!K}Xj6ED|4Du{jc-$}59?2d`r~s{Y}4%tUk-Q$%ZS&({)qaaUy^Q%gcoy7 z)J%hu@u3$-xu4mTf)mx7-_Qf?X*o=mA1YbCC`q{$mEFUZyp7Ih=Vl!T-l(4vJ}|zn zqyCn7=u*-Gmi8DGLir6JZ)5+$CCsejjXTIm=m|VMN>`yi2 z>~Tahpbu(Ec2Up}I8kO%N@km*6n#07ZCoYl?bW{D9kvKySGrkn9mm=7E2*rO%2yt$cFU)Vk=vBC z-;%#0jtKq33qVQF3-W?KKM7_hTNL*_43BLYCYV&BMs?NP^YLZSGNY5Z>9_1eTnv#T zkRJVOs*oCshx$>@tvX@4Vu5xy#bDxEL8HE^Kps|*ascTfE3S76Z~$x#S`uI zzL!BN73S#~7CA569p41;BQo})5a=6$T{ks$3tIQBcBE6&%r1hYtT{b9+In#d3cn?rK-1v(8Idhx{Adr>Hjsxu~E1M z{4LL6(Lc^m5@@t!9exkfl(k)@u>45pKa3iTFWS*lQKi#jPxDw35;-@A3r4pepkBd}(X&RkXXSd<6-q!v3#BDn zW>e|%1mN3G3CNV92p8+5@Z%5Izz=X|?eHCsUl-cIZ40JNX5YG>))%8E?&Vo6(siHaoJ7y!GSg$l4 z(}lEpNirAet=P-#&w|xQjJ8sf2g2cJL0#WuDz#ug9?y?>?{L1{!>Q)W3#N|^!-#RO z9=B2{D_xKIU_~B*$SdHa&BU!&7`Z#pI;UN9#nQB~7mGyjotD@|5-(g})lx7?ckMRJ z-vhB)l5y1UdqSbBWmn$O^qYyzFMx0oAE3`awL2NZr^7{~Qs_TbN?61134Q=Y(rMKy zdaGo0<0!+e5)9@%wk|b%%5Ku(v|MZC18fgr8#T78e7wqO+fB#bYj2t~s^0HK<~ofw z(xfR~-_GhWCiv~O^7)tw>x2RXO^J9~EQ-`K+Tebxj6Au_X>c_*b(9{gGWa;0%zO1+ zokLnyBLmv$nhSc6)bq8&lU_^40pGH=_-WcVzr2G#ZgvkTdqLJAlqpGUZtFXLBwep_jS|RhT~CeZzcvn zzHhv6=3(o$sca<~@)BjoN?BY;U7!!^OQ^C7zfbQZMbxd1_cTN~D9;*XQkZ4N?#$Uc z-PW>zSod0rL670u{O3&db;90SGC<2j&i#My>YW zg}3^jm`I<vt4!Lm8u(nzP!e$=+p}{@6)=vN(aAS=*z}Pkhb|4EuRgi7)HVJhEdCPF0d9!$~ZHUKA*I{S)o;G zOnM#g7HZ=c_`Vs%oB|aOuCAc|4op5dUL;ela~OzhT-^g?W2$ci;9aWsQ90v-6J5(T zdrX!ey7HW^@qR61r3OxTjiqyo-L*V4I;MrEvO)bT-qoP`4aQ%#9Jub!$Sl-amYtd1 z(?Eveov#Ra-NTgg)h9H>vY?^MQ&LyVI@e}?Vvz7>@J@IVkKC@_qKQgB>y=uQn1nU! zENB4puxaP5%rnT)&{rS7xcx%LvcP#0MuZEnu*pbDPSMerM_b5d#;z4#iEcaK;doT6 zR-mXN+5#=DYPE*4nJ-u7R6Pt#6)4ur8{oknpv>O)fsL&{xXM&ZG$jD!D$ON#JYRu) z_tN3mPA_EbT(V4;OD@^mGU0m3F%lhRWs)qppbLGo-A%u&ZAC5^qpcq(9xfYUW8Ve+ ze(HKX>ct)$X3^zI#FND>zc{d5ZH52UI5O$(F-i4q2#N9Lal=oqK(vKhbZhi2K{o%D zpMoSNJ?0TEs=-|=n*d26NaH2tl5{e&ZbMn##p3<_Ij_;=7QLpO)``j-3^FTFLZWfB z8J@bom&aADqMJ~%BxAHas>5}S-A%w)q|j2xr^ongps1zk=zPp{(Bl}|P3-}Nm!x*T z1oL7oMl?1WhvovrLtN7)LV#WL7XSS!o%Rjqb&+CWLkUH*=$u5uzANnK?w?SQ!P{d?0)Sn@hZi2@4;r8L)cyiPADYr zoi9u;Tq(q?sVN-x*z_*=_K{D$#oTW*rB!zpC z!isq)QZn$WvRXhpXAs`3e_XszvSsHP&(IqKRT8tKIK z!zEsj?1=Q(jjb=q7NQ*0Nn2EAhv8*|ZFW!?t8b3F+zz_W74X!I7CK|$V&CeUm^-U` z@dRZk@NQ-9*^X?|GTj4It*KR|0SdKItM!M{kYP8n#h06+`50$Y>)!t5_~L&B=a0!R zr`BJIfw{9dbZ3Jb;bp8)C)~;jIipWDNi%Tn4}Cl^Vn5XjCy-C7UED~4dw zOtr+k7wweH?|l)CS$skuT;C2uQWcGRGj0E45hcTrB0lgWs7;NW{QmQ``VkD(H`HV2 zV4fdn8LI{#0QqijZ?2O)9_O_Cu8%7=shomnc!Uh4oS0X~^z8tj=W=3y=<3A|3&5gxod(AP(D!?Kf^pHx1YD1RAyE~(bJ)O-gc+zvGy zwKRASA+8~H)LQOEPRcK2f^}m`$M|soas|^2^)u_aD0br=En5U$qZh1}xoEvt2g}(H zlQ5lNJcG$Z+xf|%l+nd?=CBipU9O>; zQ<4jOg);ZDf;Ecz+ac;-z-ye^urK`W_VTyGa?V0M@|b|>n+LDUX*Gy9q#!{p`0J}4 zU9Zcx8t1}EiKL@=z9`LmYxYLZ%Vy(lSe^Go#83sf5#vTu>ZZZCUkFtjog`oz6`*s~ zOR*g$*e}2dUmTBPd>apMhI#9eEbt}zuf-qg!5cllQ_iYCZT5ympA_Gl-S60e)XDHW zrV1vFFOoh!o2{nTtS*;y^N=86J6dT`14038*3bYL z$^MYw9PtcoP!=Re0!Lv1XA@av$2$K=rdio|$lW$>lw97hxx5b$WIXC^@&)g)FHJK*fOfa0xMe|mz&UK=V(Zq+5yYj@4%0@()2R&FPS%!a%_d+WVeUTHZi7eP{EzA0+z0fawvfrbq>qsa|QhbSfEq5voE0YD3+@)5PL?ISs2(u;y7U1a0a=MOLaHX|?67Vq-qm2vYtsqU4^9ZJ z$raqg$;VSl1o~O(=;>B@dCV{3kt5ii!snIn$@VBcveyzf5an@?D#RIl3S#xMdV6U( zv)srjLP79|gN2PZyfyZy$p>>vv5EO$iVWeLk#i_Sl{D0&SEmLNOmfr$k>8Q_gkVkg zef{t-PHON&E0U;e&(fdYQzm_WmCinb*S*z3)p)AkfGZA@5vv@CD^?_1d3VPpo64tD zs8;qvNC%1I5b%Is>#LI4$3B}e2&NOGqP5qxyx81Gr z2YCl{1%OBx_4$u2rA(5jrTbXzyfg0-EvO&KlzoSq-r&2kwp@R|@)I$Zl(SMh2_@oK z<7o>OAuRnt{U$r~i2Ofa8Js9t-oXnsIbClkY!*N>1p+!O@v{|dwtBq^Z602zHD3=>oFnd(y*?U~qNn`UGIz*Hqdrd6IR_J~ z<1y?d5Q}p^nwLH6#t6TUh#Vit5=P)?m{=j^Wza@W*YC?N~PsYVKiV&U#Xz$EfAND6gX|AfFV3Y+j9r$u$uV$uY8^>zjt zU|>WxH=YHb&ep$dk~&i5yuU?T-OxfFlBL51Syf)-@K>un2qU z0FBQ^@l*bP^^%-)L=f|Dxp^25M@?8IIPcHwo*iX^UH0VNP6YKvKefHJ$dOHN^j2ZD zm_F0uFWp!&@VGv_EDFR~$O*+sA9KbE(QLZvl1(L^?tZ50qIa|u)Am&^UvOtT`z=0) z%*fA|vOVDXs88l!wgJ=)F?8OA)&>*RWj0ILl9jX*-a9p>P@A$>wd6HpOlQ+xIQyM= z?EAM6U94OTNv-A^t!2V9M^@{FxhhLH^%~3lAt{%O9i}mtBT0V_#>NT$JGUbZJ?gS$ z>^_@`dx@sOvSybf;5%)Fwhq+V2bzJ$SzRCsSp(X^-5faD9(5 zTu78>!34}at6#a&u>T@3?m58 zwk;a$)_t#5gz`?Dgex}$R7;oqm@lh#Gg85->*2jo%Fd4u?LX9jq;_ZZooDg0^o8Hj z^m>m}Sma-F%K{d`&Pr~1q@L&-l%0yvD9GOi1f=WpOYLB>jCvQ2$332DSq%)ON*n*G zGl>#_d*b6DYa0;~xoOP_@IBr;nu>sqn&gP+Fy>MR&2dLM3E3Uqe0Fqgc^5Mw+|J^Q zb{1wmw|4%m<7R#OayKt?=Zf3)KzY&Ywx>|}6qMG`XW$Sxe@R(_|lbZwSlzn&q_2n*kLGu78o>xmX{jXNh@IqQVu$$n|l$5M;&FOfH zyUX$@jo-8^w}XJYIHO9D@-I*ibEE%Ql0)xv)ZQq}R+as{VT*By0V`HrN*Jq9*8xZG zQ4?bwjXT3+9!lEyZP?(Daj|I`UoNgwu(m-GFH*2Cxuxmoottz9v&u;@5HsYU{2H@A zPsCyM0T6Qr50_3@V4T~3+iDcCJX!j;K}Mnr$c3;G7*?ftJ-7z8NtuDjuxKJI^bSV7 zTIs^sd^Eeb#K%?wo5cZ)l@rBgQm;B6Gmze-GfqX;22O3f}X3~#h z*(GF9@j_SQza@E#OGJco4@`@L?D-cEFOu%LI z6@88@OFTK9Ts;L3pWF=FhQ-1AbMM0g)HZKk$Mw!(3}Dca&S6gAOvhbLFiFP}#urpP zDo$35^*ty~YIm)zo6~{L+E?szWt+9nQ)|O-Q%=q%J9Jy{*x9 zAg1R9vH#rR(rv7jS`{vXR%7R@3aD9Up95qMi}(EJ$X^*UZ}F$5Gs(rgC%?=^F4iJB ze8qc)RE5{daxX)pbtULAi#UX%^1#6;Wi4Z?Q)XjrUSc927M3IF>}r$}y!_bV7wCKB zH;)gW%p2X|AKCkqSc8>cpyw4sVXzAC9WSUFwLh#Lnb^434K?ANWA?hMcCTuAJXObB zI@}a7eFAYhlK$?s{M1Eg2gd@;Dh?MNrb+Uc6BJ@T?Z^Ybeur6(a?^I{XRvNxn0Jy6 z+T}+#z-IJC_ZE=N#C_F(YsosZ3X8`@Es6G#o

    s}m!q%TT5W>F!PJIShu6I4x&}_gY7a-wWhtVa&)SaeRK&&fjg3*O#zx4P_cn#vYEXaokrQx4Ds5dH&X|Buv)LyqKZz+&UR;NJD+T>StB z*Q-{}Y}XZ#*jTCAO!Uc0ElvhlmsYLtCm$B1>A1CueL0R<=sgn1%{!hJpvZay6HF*J$46R_9wK z0QKqGWb}72d0BEI?8J|;cya|fl9nPkutcj(5Sj3^MVUx>YL#ZcGfv?{POQ&E`uipD?*v$+3x|MziBhtPx9OIZZ~h# z9l&9nJ3aD*u%ve=pBIX~%E+;oh$+!yumCcX>!=-b(#;duvm0}!uydq0fHruOs$AaCN zI_w4`@jn^JEioQnGq8A^y+^N4C7+9>#H3N|SKc>H7*0;VjLsJ2Hb~6(U%Obpk&smc z=Vht5{YG02`zibld6tDvGu0WbQ>PBcLxPxcYpP_npAjLYsl=m2wPa6r`WyVh05Xp%s%1%xe^H^Y$yIH|Y6fuE^=Eo0X0l zwm0*3(`TNtF9O!DB%oFCrN(5UMvJlFQQWYNEs{rZma~`pxOtoEYsy=tJa1B|`0}gG z$VG?^YiiD-P45#Nv$VPOcM8(~%mbP2gpV+#uuN0+GF`9aPX<2-xID!(|9YNbLW}v) zcrY%8!HX-I&#MvW_=wMx!USyt5mibwSwW3TRLMN{F^<1nDIy*yuU;ECa=Bnd>4S%p zCSVeynVgWh|dZz5o&{m&-x)C3h9D zpa0;qP6OFh9#Z}c3U(Byo+&qg$Gbej^=d>+_B;#j%Tcc&lT1gSuLgzEXn#7Jxt%K&!9&2V(Q$aOSa-A0cop#1DWw<7#TT#dOHo z3Y2)~5LI)b8l*|&eM9Pl8c1M#k|7yQ){v>c<&^tcrASq^p(kZ1jwW*eXbDU?PO4^j z7p6BN9LIi8E2Ta)2|P1__RhB%1#iSq$=Ni8eur?O%*JR`ZN28nUsIW_wH5=M>ucj^ zRkW|$ET*q%yXRfKb<;^77Zz<%67n*p^(omMzgS-$Qp;BjW+W6Vyw}uCCkS zt|A}l?5UV1LEIwfZ&n5il-tc<{@L_Fgm)#)hC%N^3-miJ+T*xIjjFMYQ!E@R*3sgu z8O(;mpeuhp&mW@@Y=?@q8JZ&zzmfX}D=VZV{^Yskosl7EJz#Xy9!>H3>_ge^30K~) zZ22E2 z6Fr<0v$|F$QZ;MMXmuL>?Q0j0mYehzs14}JJl1bVe^r}r+Km7Ky=4KyQFg}!F#JLxGCg|{*2(8%lkaTbBv7t0u&ZPYsp)!4$knc` zxK;Jy74B~kfj#y5&QXWS79zY3+3Jbm@M`C+Ww@JG;wDBfj0d-DpHIGoZUa`~PF<9L z=jOtZ=#zzgOeRiyUGBdbK?CnYW4O4HQ^blDcWSS-yf955hJ4LdOtqIcY$+_gf50Es zGPo6~Yt==iai+B&d=72kzoc1BCXstmHg%$q8|zssIt4`oxj=sd{x^}T7%x+3ZQ)c?obx*;@;b$Qer{Li?KJxf2{iD ziq-VpYYpbrBHp-%*`~lHP+nV9qGG_LN|~w(r20+^q@lzeC zs|K(ldJbx5D3G8GBR!~G-7llfV#)T*YR=RL5`JQV_?VRU?!o(dt)6&Z%K*T%lgN(r zmZY?UWe<|r1byZ1o&H<{=wjuZ@f0(FA1>AE&xO^^SNU3gs+Qm{482~Cqt=ifJJ>x! zU(bUKCB|2oj=pwCXn4C~RhG{*Q>ruBB2h*qm7we~`Ba4?6F>Br$MS>u1`?0GO$kZk z0aR0-$XczYFTHAA^x15s)ON(L3PTT8(r;}z?^AFyf{(@N_>YmnjBAa}xzB3~_q
      W#WFxp%CdA2oxrOcAmH~03->9panS4YFcDv4bcPcNAilEhjhF@J&Z_V=mn z(R8UJ#ZE(`gR(Y;BCp7z@)ni_OGY4 zMUu|;f8X?xWyXLuk}EK%){wX<{PYZvtKIKFR!n9A7oAUD$;`aHOTX*?mSy`YvVG(V zH~XW@CQz$D)9fow*Coq0qY6sFv|AOQKkR3g?#Y+ zE1y4#k$+o&ppyX$kXNUL=V^Z=um9={0T_9C{Oe!-?1IDq{d=E!XY&z{<-gXU|2RqU zlc&?W(fax~N7TRX>}dxd6F!!4kNrQBGyi8hbe^a4d%!07>%SZz>FEH&`=eF=`TP4k zmjkjgeA`|i{{66j`;7kn=>X@A=f(f|0+4h(nc}S3K7;$`HUF0nKK>HG0sh}!0+-E3 zsz&l?2A#+2ReuoG$$vW;AApeWvgu1Dxy1i8a7aj>Y(+K;sU#c% zDcVQ@o|_#f=?W5$>3_LJ^1#kD61jC!nl2XNKRotBKFk)=)BPI;_)nJD{VV_mR+XB? zPy|y5FT^!&-QNy5+}ct`FctqI>n8i#iv_3z|M7G03jC9!&QQ~_BrPDtAFtU~5r-r@ zf(I`59~(9855`3Cpz?qsC1v6_8{y8}vz;%0!! z)*>9U6fJJk24ibDtX&lHRCW>jmy+Yp^|xaIt~rtbbQDv9vPrTZ#s1(7QskflA?yki zg=6ttOvKfK zc$bg&evhVIgSkMvu`26T_`iIQF9E=6m_+?pe#Jn8Id`UWE)%R5-Nv97YdKq5$iD>B zL-%#+koj|Ve;*wJaQ1P)e~W7;84aK#@x{=hhtL1R)n^0s*(M0fOdRdWI5qX-G2`({ z7<-aOk&f-)&bi*9giDv9H0Vc%;hW3c@FwEYzuPNC3FN=}c4ANAiZPu6`t zF>KbF0MV;HPBjyVFauTIo{zN|wYM9@kbQm)R9uO1jB28Q>|kp?As!e~_D|Vw^8$Q0wvYD@pze5Eo zTY%3unA&BJ*M~@_SD~rv4PoUi1dxWIrTI*$v)?Nl0tD|GS<=7{w_@JvO@gZVj`J<1 zWogP|J*(hH`rWv-XmT-^0`-bC1cv7OMdjEkKni@7mY|jK3o*fi?B}T!hV`n z5T0h0O)mSmKhF><4IH^x#3@EkIYu*avlU>h$b2(W2V=_-kJbh(SB}(hLgM`Z*Vuc; zwizA4u%OV+xwglL{j@#ny5+67#}5Jh834{;F#Bp720aG%Ut7*dj^L|ozWr8Z3Qkmm zUL?w}`%`RjKgVK%hV9YO}G(83vu$dyjsn!=%1R5y{%B>RxyMSs1LI#D1; z9Wa#0D3$CmojwLWZRX~=EOzwfw^>9x!IVm5Sg$J)@bUECw+9(-%kfK9VIT4WK3Mgn zcg_bFzMf2m5gr(`7}P#_nHJ=hty*O8ch~la_x09IRx4-ORW`2+6mzFm6VJD0V}bm` z6pLgI`w%4)px!0%Mh#*iGK7LT(fEV@c|%Y$5!H!4>UP!giZ|D0kJ_9cx6Q({#_j_$ zPD@LC-L?p?P9A%CQSi-oucRzb9n?Fc+GNA6Q7k^so{J%4H#i`5%$4>XkD?lp?NmFU zLIQ$VCku|Q?>@u;K3}Hk0sd0oPd6iJ7`r@vk7wyQ^&4z?u=F6|0)BOIE{ELQh5_ty z`>G?5^*~EWfTH(8_v^`ft%g01g*`xF4T7V!1mF_uHBK4$72UfuxbhpPA&OUsWni>xnWXDP zSKHoq!!6UmQs~0`j@fvV$z-7(y6)cnd^GKL+1#IEv*Q3*R*!B;Ph21ho$daVh~9HLilw*?~2t)#T$6aN7lJ#;zdIHe5y?}(gY!X|1I;ky>7kvd9Rhr`>C%A@f>@WAd z07##tAMfJu7Zx*U(DX}hSnJmCgLEATd=XeFONhXDSzBW9p1>-d3Eerrc z9S0BQ&mrwTewtr99!|_?73-s>${x!zf=I|FoOdL&ZQw**}Gp|W@Bq+VP! zy3I+L)F)qCU)mU$7Noqo*c?!*XmWg@zTRh7KWc^g|F z0CJ*KOC!pQJ;{UG%e|R|Tsc0X@{;*-i%Wc7rTs{Lo7t6AjV;Yl(FX-GmhOyb@^49`J2|{ACr^O} z(y#-hWKPFY44aUX1zp~zyTd>%K%KWQt&`r#6q1pNcGAga zi|guZ+KZIh9m@j*a>ytqO`6T}_E+dmAF*qHqMjG|V%@P*tC*eMOreHO_+d2S=Wz;& z7!DYM6uCbB{9ze_1h@WzBT%EEx*|S*45c{=izWZNC4T+q=L|ZH6vE#k;y#DAT@pNd zPUYEKU|0QWQ*6lV8ggcZqd=*9M&-q$GKRfl6Iw3knLw{c>9|cl&bW*C%=uljR@h%J z%Nly55+pKP;g?3zoc+a{ANCv^z>57NRx=2s0#?iiw$Glw}N8zTj1&z8s7kQl_eUm!+bNvE+nakQ! zVwr?fVy}Z)Oj!C?H=E< z^>8Dvi?-kKv~xPIY3uw!L$|u#KD)STfv2@i7jG~M5CTTTR^2wZo-nv9U1pfpTdMu) z=$Uz4i?{IbCurl?`cN76;8!2v5ffAMBY?!8TL?9ailfz7n(u`J-lv_EsOqh9_0n!z zf8M78aM(U|E8gsY6V!3Z8D#J#plIE=VaRcr@`opn>a2H9bT;1!X3v4P{M@q)Bb~-( z>cofpNn7ysAW#r|!{`B!&ysVQLf7~6)kd6w9}B{TxIk0Adnq7@f#a;*58P6mBbvq9hk6;gAUhUE8|%690f3Nn(=qG4 zb>TW)wH!a!*h9oH1YFElaexL5eJ5bs{GV&m`)kA}&)Q#9&bx|s3Ik5nYx{uNkyV98 ze4z^MfD|KdndvP{d@B1R^|HssjDf9NRwkQQt<(O(yj8>$IuYOdpUB+GiU#`Di?vbr zw?{a?>4fq(J+UgUx1!b?`GW)u`((qEgUJ2!OBs2{8yq3O4BTAii~_GpUJy4QsFiB= z;q6n=+IvX9VId^5jc4-DXSJQ_M#Ceq8cx5CCx6~Sz86T$CJq^dj4-%^wuUu!`P*^s zz9rt1+Fm98$_e3yTTLig{8@Gu0*^6=V92ld!+7pD64r+}EKmJIZYO*H##S%Ai1BR6 zZQhTWb-HzVi843M4R6Sb|HA zKR6t>;9ZUb-rw)tR$jjKmMRl&#^Z6T4|~V0C`X_l3BF2WtU*XGD1yn$fYxtizl>ru zbdVsoUO#a7aA0Y4F&W5xGN&lfVs{jgg(lycbz*Ke{;&qBqKnH$sHho3MvnQU=|B<+ zU#H1o5BryHGhWB+(jW*Jm|bLvpKTmRv9FF36(h!ezTxOpBb`b)7ZE=l#>*5*;(y!l zD*H<`%sX5%(@F}MC_R7>0&EYfd6qv*_$6hyegk7V$g*qdkL|PmiPf|_0RvgHufMl@ zEfEY9;1Mn^F?z;d`u0mLXNoB-0`eglre8w_w&6|-3G_;uD+Eh&uW@L9*Ti&moiOmX{eCOT!>OtuA3|h}PhggIavvvUd!oADGg=4)p z(0`{$&jbyY5oKsx5}A_`ay!Sx!(#Fi1rfO<(W1Z0?)S9eoOYmW;Duhwp7Hmsr+azSFp(hd2Q ziyC>Zt;CoB{ve5-?jjrgPmRg|f z8;xU-sOz$w-b!Esow^Q7Pn;<78jNc_UecpiSSq65e_Bjo1B)s9eRsQ0f2K)PdbkfN ze8lG)ubxqA)zUqK3LPu+3w)!Pt}>Oua+4>mQv%RK-qZwIjm`pgP)1q^ z;%iigFjuT@6hvB|2za@I6xKpc-lI7*K5$}{0#?+=rDoj4%L7b;Z$Wp(zYM0TInO@Nx9 z)oT;fgcD!fozEE1qm`=JG9>?i&ofco%jz|S@#JaYWk6>;w}F7`B2>u*=VT*;$?&D? zrHqzb8b^gBpDu?sZW`WSJc(E8OAHO2HMO>n184$v3`qqalbnIz20nR-`_ls55{L5K ziIrMwD6*;a&d=T0aNnzdpU_(I8rfw(pJMqAxMwej4q{nMT})@Q#UeS4U7uxTJDP$^ zTXLe#hpiE-{Ob%RO%9FLzSpHC6D%Kou_QDc%oFm5$1_>BTDnm%WqJO2k^HN_ zJ`1cDz9hC2RxI**Q2hff1R%s988{Y`QTs-*9VG+^&FNhBWg1u0*Ap%elL7qcqQa=W ztr!|5^$7AN>+feY5KepfUP4t(2w0533#;u*EWK)4yzkZJpb69)O8I*oNwx zcp$w7={t!7#^FL()_E7xTBQv&}gyx?c@Nru`) zD%qRn1(Jjm)F%Bt`Y8TPeX{3s!5FQM4#%;qmimlh)vQ0@2hm=YAySGY(el-s&f3lu zn`+&=6tuaHKLH&-ju5iNo+x3#rH0GC!C$u4I2~?81Q0KitI7mdnpalbbBXD) znWYYR2U!@C2p@W=M79UxS=5zUM&yQ-$NuU>L;HoB`puN`2b5Xt%F0`q!|}>>Oaq9r zcN1d%z}R@b0u|Vn?0Qpm1H^nVq@)fB~Og49HePI7N7I>@jjs|BtOxlt9)scrd< z15n8bru_XybII%SG>xtrG^|k-xolTvLl)I0&Lp|K?!s86f<(yk4CrtUn3glyZGQq@ z9=(W$fPV39z1&83#kmEX!4Kd8y2!K5)ODxBpz_*S8bX8|X1`~|<@_SvUJ>9R+#805 zR6DOtwtfswb9C@F5PE5tq{2#8A zZhrjpoUgUX{#DgS$YCcwCLfrnI&9`D4tnqGmE~BkG*%Yl)aADoShIhLbVM7U_{KnE zk}nl=-13cA>v&1xDxK3&MDz~6uHZ>)a0K8|;_Bow3DtM_T5l$f{CMEF4C_Q*-V_mE z=6}?7GzoFC4NQz;qA-a>GKn0J{YGqM<-+aps1NO)VRMU_E>=(h7P;R|kJIkvaqi@k z22Pe5&SL=5+8kH)7BBJ#IrI9^+H}es;P4;44@8djKBL z-$Vzk)~4FS$UR!G=HO0YzbDPzP2?Pr-AqU?7N9BCKw~>jG+c{gjOPdnPy@wJH$F>a zFgzhE)IRm6{8j`3T?T((Z~<+quN24SDn&Emk-BAj%qCyY(R+#Lbc9X>Z*=Q$^z4Oh z*{JOg0LIEi>k(M@n0-zTQ}{-^BxO2B-^VjXh4Mx&5zvnwqA?~+VD$omwtWI@7LJ)v zlEj2bb*z{s-R48aYs8)~ z;F_5~KKMoB$D|{SuJ9{z_bFymSfB@+Hu2CqZh7w_;sI$U%0i+IW`w5zmJ+>ki~@mS zMbEGIn5A;5(3&P*?0St!8NJuw@ZR`YWh?y}5#m2**a>DjCwu=H(L^lbeyfhNaplYZ zq3x}r;@Y+?(1hR+EWsT@g1b8*f#B{IJh;0BcL?qf65OG1cbDK^P-t-1Lf&GZyHEDH z@7(wIfRAd`RxQ?)nsbgm`sm#^fUa%SrUa;>KdU}ph-!S|&`DqWUI=G`fW5EF#OiTx zneuZoFTq{?NRkf|;5>H<2|ki6imzm<4vLI=eA-L`04O;5*Lv$eHg<7DCi3@Z}U>E&5ES%?*%2Bm1?u%eCD8}J|N`e_Hy*zqVcueyfhK<4=?LJ z8I_HpCYu`JY*y|y6=mdF1pC<3I zx*3&tHn0LZW7WRY`2EP(gGZKrzd|)W`p@^uXl;b!JDekAe(6AM`1ZpPu9T(2;R(Bd zJ%E3OwrV)sI+MrvdI%VsB7Xh*3j`8ZAV6j7v?jl^p5I-MS4A?fo%cIv^@1MAcpKM! z-LD%rgab*F)V)F|X#Pa0fn5iu_50aQR)k3V%1;QHlG8+p>`xpuslt&~n-W+E0 zyB)q7KE@xmJYk=c&hEHfeWGD#i2_-MQpo7fKUp{7exxO@tL2!(vr#^!4|ANk)1>bo zW8ccGHK2dUxjFlpq=!oUo}U#S6K+kGtSkHhMaM&8v&hMjoBI7HQT@XtT^Gb;C>~-` zKoSO4Y!gE+w&3PPr_};wXrq8s0TA!*YZVzGfoRu-QiB){CT*0F5i>lFMY5RB%(J*)iG!J0*+HP#T4{D{BGvLKAU~<|fQONXN@vl}dDbttR z_XbyB1CPAVlph*9Y|va5$c;wm)DjPTYlTNx&BNc1j>b?|TTL#R1!~*_rBL{z{^(ad z1ujCiore`biKtcj{d=8$2R%mL?dZxnFbNH%xYw#%6vpW-I$janhe;Cs{{A%6G!K)0 zfUSGD4MEU98zDbJG^vj$l6f>pjDXrI)_b5*VDPU*h?Y!3&@N(NBd#VLc z*t~{{d8_I-VO$g z?rYl1tCgd)dB^+P$X#6mh&CJl<8iFz^wy5;Sn|BkFD%gaHG*})>X$HUj}NnBWwGKU zh3j>rfE0>vfOz{ANTHL@C6nb(to%(F7M50(o(Z%a;1<}-V^lAl%&Y+vjFKVpE#`L) zt#V-L`1S}oHq|n~>8rinx1MS;G>YQ2E>xs~Ft)Q>gPP09scMlzOBlP^lDx?<>RDO07q`4yl$!fv?XdhNhEWH-pe1ZYY1Mtx4^;t*j|WmY!au{HoavZc0*q19J6IK^ zF@U@&+(`kj8}xN-G8z7!5bZw6Uy0FN4|Z$5n}*?P8>&r5#QK&vY@_)ZLDD#Fre%&{ z1T8L`ZAY{KGAgiI;FJkgVIxiwvE35y%oj&fGLxxc2O7#k_?^BNO|^Gh_$gQExPCdvN<<$GWC{0AF&aa#;Nc@NL(3`m$JT zswN`K%(nLqmLNnS+*83^Hfc;?Q9`rEb3&sIrAHZ&1QvCp-cKZ*#oz^2mIo5^<%4qr zGxG2$J8|sVer9J9VBSNdsANDf7f{QVq z`{f%ev~urUqa+IuLmS)l;=OH9fg;8VzzA}y>qjjnaHK<({q~0x;J5Vev(W6V(NnB6L3V& z2MHwlYU7gdY3p?yYjG792kpsZ-THrqSE<`mIr85_>~itBwk`=3f!1UY*OTq&S z(~GQa?o4;$uvq9e*lgB?xb2Jp)#l_(ZS2A%J8nJ^4nbaIMeB_R_aEejCVy!Oh({BH zxHgAVmSsJpfHKp5a6CiP_Pw#qXFnhy7Qy7^QT07lyy~pCpxVD2)K)JwWTjb)=L+_>$HL4isz~0QS54%2f1Vc`$7*EZy2o& zHY_z)%a|t#b4Vojj5mh^DZfw@F<*8-I{i~E!@$oV6^dR(ZNjyb168oOnlhiQbUqjM zg3DdEgb1^{PY~JpVy%^0rbyEpwK5_gz7SFwh>VR`XjO1juuD0r=S;lREWS$yi0u49j5c-iApliqga<`uq^ zSBOLiWVXWkYMDWfA3Q0!{)p^%u)fg!O`pG<>??vjnAz^R-*s2!!KBvV?h;tC>7FM$ z6Heyni-ip`mtm8^+fQUE7;{Z7<K6PJaer>y8 zA1OAeFg*UD4*_5Dr^_uUdbFVh+04*9&BIANkNgzAwA;sbMONc1}GR8;;$Jj$TqqT=pw^g zeWe8o5ToJ@>LS|;(E1f>)nprGU+4_+|9%2tMl`6z@8vcA9BH$?H&sUNH zUNYYSA6WAdR_5`#>@e2YSZ{EYCFvw10S6~ns%MZqP-*}q6g;NM#^r8-YS0&{oGG^; zNwC&1y3U%~{_ba6{B0fQMTi>!22vFkt)1~4Y!oWlYyHHTs9j$6!?&+VVH?=xe*zh> zI048T33^xI^pD8+wQqi=C%YI?5pe9TpYg?$582*iXUj}C>2a);UHaW?aNpM*BR-RDjcfb67+-C`V7FtMr2B7oUEj zlpf2TBhO#|8|OJ%uC)5Lf9apBvadR3`*FQopC>hdXad!88gGM@`HN0zg`2s$!tL+f zxrSnek_Llbrfr<>u*LEyy|w8{kpmJhW3bFi$2uI87FQ(Ea||N%^u*{WzPlqDY);uE z%SQ?3)3kdkj5>&~88p=aj)_$dg=nL93$N}v7Er6daQbT`iAk}iYxo@yP*kiXH7F0! z13e8Z3s4AQghVV!{xMQg&)DE+id2?wVXJJw&2Cdes~Ntpjq#r@7kS)YCL_E)Wp6s9K(vvmK-Lb3k~>H!L%&yg|!WFo*jXio4C zN$p?MOx3XVz2{WA`!}Uv_EbC&r2nI zfp+HD;>`b_4gHRkgX|vtrn###Fnj|z@K?Jh3wpWZ-<&{WS&b)tp}o6j*LW+JX?{`a zxD(5I9QYt{EnQ{t-~Zog*0Vqs|M7$jV9-x0P8`! z%4i@yId!x)Ryg)eg~Xa9 zO0onzR7nn|7o*mVRnyqS(QbB!4K1} zuPFH1+FmZ{vKGT(wO#+_B;QOm<74>i*u)XSz9R2KB0S%&;ON2jBybgq5Oj(XFim3d z$Ot{g;C9#&y0KrnQZ-ItHI6^Laz>WxDWQ_UtoOXBin#Igqt|Pg`KkA0A{~VuxTcJk z#%dL{ztGahYrP#+O65{UL}K-6mu~CvyQdpvuGiw0M9a{=9{I+|Kk7IVL*-ALy1${Ck!GEq|F-+h4>%Xt3O+%3xg=SUG7rI_Csr=%PE- z>34LSoJW4bd*V8Rgc9%8V_Iy-v5*Z(K6HZYA!{F@w?Wj>`tW zSyzp}k3CnecOP+|Z}2Erammn}j?G}+x+9Ip2xK|)boa7&7WeQ0z6%bJx*ey&C}diHd9$MFH;=L6ey z#LwrWgVTElFSZm(v?6VanE(gJBtSx0vNr;M-^%wbj29ZLCh1TQlP_R8q8vrvS1NYy zcB&6KVe&2;RDzWD&x^& zGD4ubR23TMU3J>)C6L*fEE5bIO8!b1eoBSqt+t?%NtjzR0X%OL&Dgdy(kAj`DCGn% zziP4g&PqSr#gr{3yR9R}(<>-vx>g6flk2_9RYqM%<)B@u_dojttR$Si2Yk+2*;bCd zlhH!d7=2%W_avS)7}PUoF97!xcsb0 zONdX~tAtx%s_#U#rjsYS4!`Z{cO9bV7YHrau{BZIi9QVXiv)f*#bG>oFMXkVQ)R}b zsRsD0B)jucxDGl$M$fjHsBJ*iWs52QsE5eLXh&V7jq?9MEeD!Yuir(=d5K35D93Gb z)atlXl_Q%`(JJ;+rcm*tI)noT0x1W~uyNsZgY2vz)-K!&;d`(r{uC4x1}en6^{ywe zw-7!j^-+{vzFb&BS635dwi#%+h2zgr{fckD(3hZ)dfXR0&X(ns^ct)P_Q`~rZ;7@q z#j+5VP+YBVo2(5{6jvN7OW(~w8EX61A5Nsm^zX%^^>E0Ey=E^`s9r4JT%Q!!^;2g7 zp;3cO&oiwG+n5?CkSk-*-%rA4+u!9azSh6ZI?dc;i<={S@hM#ZN*Y-CjJp!?@Adnu z4Gf?2VGPZr$xx7tR;h%yJa2d^Hz02Lexk4|BbH5^a|K&_rO|m=EQMuH4WQV!r7Mxs zd@jfbl3EjMQDTZeTrugGhQWd;?_+UCWSg6bK$iy zKtsdmBX2CWTq*^ zr8Fj1HK`I&6YiG`Q3x2xlQz(JxxZ;4fX&a! zAB_dzuOVRLEVbWJp^p3U5k<0iO9rNuQLYH7#$-&RVDto8gaAG&R}v>7FOu+6;Aw8D z!Eolg8lOfCjvFl!? z#r&h0yT^-xOorlw?aRLAx2elud4grt+0x_$)5KC22L`j{UFYbkQNh*bz3o1*7lLI> zqr8aIO0fk;x^8SXnDCukX5uW^ePy=~Fv&f5*q?9G%Ot%nYSArxuhFo5czM}a{-I#( zxoHLlmTBQ0@+*5HAt}XB4o*rSzh?b!&>SbM*ZiNyb{`qr<(2Q=n;1V>^4gphEew zY7VyurkEAC(lz|m18=C|i4d#tEq!s}E&pfZjo)^=8GO6J0j~%7ppp77-XEY{g};0H zsw1^Zr%X7%bfcs+?mqraQ+x1Ij3Q$_%N}}ZWu=emd*LB8K4e(`W-Z)|PNOJ5R&$8u zr%W9myj~S-!mS_Nd`UpP{IX%==ZK1cY^fz+zi&t!?$J0P2Ojs4U*ou_{ecY9ZF8-3pHz?UQOsLGNg;LIk1ZO1O2(a1`jrW<-8&{*$&>Dyr4r>nApNu~P0Zk*Sb z{Y#w4Vq%QVXnZaA)HADZiheiSr|yFeC79F@MQp3%*d98&LRT_Ii4Oj0Vjs=3r$=V# zm|-#x@aA_cXQ>u0wiZbj-{C46H7Sbu2zI1cm9A@#slBHb)Zr`M ztL_-l9on%lxXNrPGj_~#y9Z{H*U0XH2`SmP=9^J*mP7F+6yTf`i!SbE7TO?J#I4&3 zi>gysZftoNIi$vaBy+}{igFrNfHVVBBcJtR^j7HJ!0)cfGg6%M8lrJIawRlmV{RYq z?L~4y`>YGP6D%=le0WD4j0&RA-qxx+s1|fh6mWc8juq0|H<0zd>UzD`@P$M?-;RH{ zJv{oXp>PPcQM=Z&CgS#d53h%V@K~e%x17_Fb1aYu=mu_crg`Dgre7<4ruhr|hurlq ztS8x~N`AoiPo>&o&S&+ja`-S}TRi@YJZW^g;HPZc1QILm@{K|_F z&im4Ctmbop+5Dt2h?G=3SNgn|E1o`g?9C4sb+?$vN9Wk{U3_f1*+b#4NOjJJvoM2u zXiSw0QYda&o}9Yy)Hc+5BsSEzXkAYCqfJ?^=Nwuy!}bbDX>W&wk{EHd`@pwt?~%Gm zqe=G0-=yyir?9%a+K3pTzST`|JIc17LekA+Q2f!)O?93cWItbJ9HS)pnyqIRX+oJ4vIq+oOf<*G~37XpLs_9ogxgl-w#H=`A1b6r}A`-rW`3 zFpSSrDwVG42KN`fyjT_g#--=gQQV(5Af6{Zi#KgsYy^9^kwGwttN9;%5S~Yy{^5B zFH9iayjOyM%{+0SLhGska>4cRE;&VU1>+hn*La9?rX(Jt#$E94>$%q@zDE|Ik|4o{ zt^N3JlK~#OHuT%4K&R2MYKZ*#)m$UjtU}q3vywZtpw_o5(s-ufpX+ZtgyzNbWFC7K zbjuG%PVZgTOWFSc1PW8b9^b<-;RH_0cLXN@hl`6f#IB*64;P&#ttJ->>L2%;l?C~)^`Le=zpza` zk*roP=tfarU)+@lc4jDXtneu-sP$yurHkfVp02Ffe4ex3LaZQ~*h?FOMIy;uk$$yz zG?6pTpo^ZbcT7+O$xYr=_9)EOBlWxs^g3K64k54kh>UxznN;`k)w@T7I>I}W*0$n! z2BnFpX=flTbaS}wyKI0S_R&nV9j$|lP%K(;zBD?TQ(o-Nod15r+YAh}fDL-+qARh_ zwI}K8yFW?6BB>UMXwY*@nl94lX>5g*z8K$XK^Fq)&XKdin->s@jx{b%iqIC3dvbHt zqbU|a;e*ZubVE@V; zeciuc@W`VB3+&lNOO7{aSLziwQ64Y4tn|Egc=mqf{R-WF4cjD7Pc-&CWU)Svw`f~z zJ?O#;f&q3WrLp1jx_@ZBwy_8@*fyg7jnd3g19`Ao$qVG*$FoCXk$z=>^Fa$QVCrP$ zE~o5gnS0XLq~n=;5rLx`09Ka{-P!79s-IoCKURV^z(m)54rIA=Wp(cS;DrYzp(zeB zT<2fQ?5gkz+1JD)$?dM34MwXlU|Hbf#d3JxDZ&z#^Ku#W7ue;DL zb{m=`786*25btkn=s8Av_zcR}RqaQXeZM1N7bo*l1Kf}3a`QO`AnunGUvUB}UfLNS z?k#kL(c~Gft$xhQcdu;cv^xIU!JP9MmC1EyItTDB514Lh2-@s`s}v@z1*rbN!c23| zR7=i@egt@;p0U88HZPG!#p3kH!)s?A-hcDr-(a}2Lc*~#I7Xf5^0s}3I%Kg_k=Z9N z;mXc`Gxwra?ZsfnIxNZO))KRIEkG2DNIBeqIvk=rMrS8^_5lo7m{%Y!ko_|A_`ay4 zDox`6_JcoD2&3-8 zt&as`<%B2L^uU=ak>{ZJL})<+30DAeV?$Q0+z4Oasbv}abW99wp7Oc^WXqziH$)JR zHz(`I61C$c;$Y}LeE-i34kSUxP36Hn!RMRqMUiWRv!*eEwg^>VUnr0u-)n0 z+=Z9JcG1;bc&}CBiIP?x%xLZQ;dyI-f{@oV3%-M=byd|wIodLqXLS3HhoZ&3DUMnm zvsddE!pc z$WZKpvkAz)KX$Zd3;8)}fo-?!S5x1}*+TE5B+IEYxh| zt|A!`&7)wdCM?{KWAC+2Tv2m z$3XeYmb^AyaV*`gu;keV?ilp3%@MIN* z9wfs10r82)o7OExXI`09_K5~11P|g+0HyFKNu#Ty9!FWj>t@Rb3?)T_P)VruJOwo; z_+ak&hU_!`s3yRWuUho0kWj&=1d%i5={pNrIa&1G9sa1(55>*3H+Rjby`K%P*trl+ zy3P#3N0T{1cF;roxahK91sW}M70By=_MIoTS5WE&k!KylcW#59nHHa^WG!-5Bl$km z0JMfqi${6*O!6(1v$mn1KEl&Zzm0wGVn}oqc=ec2Hlj)FT|SK*8 zf7~|2Pg{L35$MR#0*}v`+BNTsPEFLhkHBZdvq_@%+KO4#(T(xxse;6e(M8(TG%V?| zKVaI_dUu(ili!Zw$TVZuoMJkzcm``U@M>xwHyF}$brKFNwuBVfm8Nz%0Od=BQ<&?g zV8kMFJLP(#0Rusc(ei6EKF{v6hdZ?%%PG}bX(SK4;!y`i3w+VRtHZ^kC8A?qSQb6q ztm9se7W%Pb@-ANjlc_18_Eq=Zi1Jocwkq?*Iwo`VYF!sbuVtIjDs4GUSpMmZoawsn z9+R78vVL*}Ge4nT69=tNCDdKJxXDW9caPW?!u{_w;uR0Zf?j$_93xGUMM3=WOc3JL zG(QX?Vt-~!L#9t#cE;t{b1%~)?EutP{=KkwV0^6$=7}F|U~>yu7Sv#btbpnmtEpfZggAI#iB>g%DJtQMDezQya)^D%#)E` z2XPB_fn8TlPYYCYe^3QcP}h>se!*_^^+7Juk9weQBUmU5MkZC6jp5ACmwNCVrwC*; zWVB=2j2yP=F=sn#$@P*cFa~QK?@zKswM++ko{OTCY{~2ELAwED&hNo+Lcf5fdG2$^ z@NLMcotw?~hwECvwHa#pUiIuj8LXT~%pS0R4@K8_emin7dhY8#uG{EwAM$C`Yk3Az z8k9nUX?XO#2>L7Lr=N53^T*}Ovjms^kn!McZUdcLR_XX3FHh!}m1h@Q*7m$7oorLs zpTt?t+J zU}7F_ObY2J`&;d=`3NS&)A0HxGwE)3)i)`EPji7r*<{NsZ^TRXU?dIT$*@n z!G3FfwpJmLCTQrBH=|TARTh~24GhT_>$KEOoRh_J+YJoliRSvKLKQ2BT@e)z!_BI4 znGt9j>o!7`(L)}T-`4w_QbI!F$w@o!$YEgCEHAWCr_x1(UM!=Jd7)a}tMq6EkO7Lo zP*{EXWWj8PV02Q@T6$oUP6?A8fN>|*kCAEtiO|2#7D$QD3_7}d8Fd@+&JWY3VV@l} zb1meHVBYvDy768^YhG0WdVJHqQHq1X6Y zAlnar3am!F_2D)676L3VwYkzMok!M~m=8DZ#cwKC2*J15QJ!Z6NVS4`x;5ZkKdzZi z9BD_ihV_*o`m&DBz_UeA3~wD&tId(+`Vqw>12+=dJ8a79_A101hROzQU*Hdh@=Z7y ziL9Tk`{ljg^qFtjT7<;b-nR}k3z6Z}$4IGGi!et2i#gPr@ryu1U>0=6k(pngop*jyusXqPk|JHyHrV3!Yd!Weci=YWPM-`_bzpGYrBWx^}4^E8E<{#Q#wAYi3TT59ll&Hx1}GkfW5f!FzLQb zE}ku^E2dVh{&s%o)YjLHeJPgk3uK(?=9>d}5dSzO3EvZhn|PZjaR>V&%R97?ycm&QgI zO6t%uU0^yZ8H2%A>q`>ZN9swpy0zG`&zf%cu%6!iEtp8BG-!H5Scz$n$c4_|jq3sI zvd(O)#`V+Wwkd3)hbnP2u57L^;0@2b@0U&Ox*ORRtw#NVIXf&qF3KsH?B?v9&IU@o z5A)bo)hLV|zrt|^Z+SwQ>Ktlhhgm}J>!f;=7@<1 zl=(6z2Ot-?cAhf(1H`Demm8tM3$VLwu#hTx9~0`?&odJ1fmK;*X*;^~Pq~;ctju@o z3<75;hAS~kdo#WXp~qpM+U7|<{9wpcA1v{Aex*EA=jqlULvmL({#Z{9AZfQhg3&bm zb?g#8p}nHzO6XQPPddxWi_->dVqH67QbMoiCmTUbjL$V&X1z_eT0JQ`3NFCX^Sbxv zfc_jUwmKKzUM|?Q@3{@^u<4(?*NxRLZ!=|a;By{LpBJdgIEk^xU(A=zT31jlU+wV( z3Ym7Nzb4k>FdG)8K?1M3V#Y{_nz;Fx z4Sz60Lm}ibDdB*fwi4=wxIg|JJCMQf&H$AH+KpY{sA~_rwOw{p?0fGuWz*U*rx454 zwGlylwowx6NLD}!|5$a(!rP#ptMh{B7`ek(^uiqbKpRnE(sUR!n}OuYhUT>{u(Q1V z5x?kgvhCrLp_T7S{Banv+!|yvmQmN=;L*N!42~R*8)6jN3P*BYU^ux52%V^Vl;08h z?Ucgp7NiyP?NkvDT#`qIr)a!H0|rJW{7R`E{tpjAc!Em4%u+(6!o!ME@OsM7B3rcH;ROhvAp@ROs_m6Plmu1)<>K_U8m`4 z;es;zBMh6MYFuxfdwhrFiNVMH=156ylnbgNiFGahg>y&&6WqNWK>)wQWuLj^m#-mM zdWj05%@@x&!z!r?Po_lsUjaIAY*orivU}#}%K$mXLR7o!N8q71CtRW1z zB`tTmh<5H09g`b-HgD&x#4|>woFEb8GxhocEP-=2KGzz84;yqhYDgUguQDUz@`qz* z!ME*4PPmp$i#4rXV7Pmdi=BEr5+Mx!Ry=4%@T``Wp^y$x>^$73UElmSiM3x$9YXC2!2>)P1g8n+waEacJX zTb@p7ur%AB4BwxJ+gKjax#H@|gAF_I4#TU0aMxUfWJB$Urh?ZI_~LRv#9L129XBtq znI^2RnKA=?=x_QKh(Y!w0P|5prGJkq0@6|c?i@tDf!tu&jP(h-TztFZ--H1Gn~i0E z?uyAj14YP)fs`fzmkGpCZ+RKAvL?|5*=4xpO#d(C?mmowbe=S;TQ9heGVCCTkYY>Q4C}) zYTZwiuQR!S3*E#qX7)F(^Wdexw}D5egVpN`r?EV|`&jimx-G{O@b)r&p5UVmoDOO7 zzQg%;qZUQoYjW5agtC4{)M~XarT1|Tp^{7yB_{8_CDb)mvAstxQy?Oor5@F@!=~K# z3)6v>V`W3b>MmDhAV_P{G;W7diwAJxZ?@5S+f6u4VYtss8qFa9&mm=m*EYRu_ZF=O ztE&aGY+Ja(lh>tQX|<*>nDKptHh|^Rk}SGcDvJvs zd+y1+L6@CS+*!V$T2euKWjEBL1`{%W@Nfz3!hv9667uxC>mZ~5CU!zK-bs= zLK8ny@?lftK_~MS+IInv8Zu1$BhO>e70vwEgD$$bu+v<+4YFRh;i3v|&f&>TrLu-In;> zAk3h~LiI%J(W(wiPrO)jPF<(u7Ae9l+$1B%(!7uU`zkDi=GZxb` zoFmcU^M@@grec>uT%Ac(nsSomw@MhDwhbcpSy)6ut))k7(V9PUtr2w9$@%Zuo$?57 zuao*v=uO5vD#UShrgYy6?*j(QEoHCrsNwa`8%*7IWW19;8|hjAVJ%K~x8`}@1RkIZ zV)NlcfJH`N{AXTATfJyk!%@F4i|yMU9%ZRdzmicvoz)Y% zKxJ)7Iv^I61_bOh!(KCfK8jplzd=FgRFbblVUGEZ^vjT5B#VSI>JK&mFiNHhm4ZEg zO%*P;=~8??tsuWQT8L<N!x%cdo(fns&tZk#fCT<;F7!NwUrF zc(n#RNR6MoDF3mkOxlup%N-eKyp}Xm^M{!-4Vi_ShAz`IzFHd^H_EgA=(v23W_Acw zQR8XUL|%)O@W_g1Vyntz4N`3*Ovy zOz7UKd5^4WI1CSq7kBHqH3?Wc)@84VrAFqzFA^-#!=Y@&jZOa&R)YP&;~MUGu*4XY zkKdfpj9+%KAi2@u#A1KWjs6@Fy6VYZpBq(eHa$frrNgx#q^#p{EZxm4**(lx9_0R> zy-PX)(5;S5n}Fidyl8+wXQ`AKoq^LV-IRT<#&1zJ18 zQ2w4xDBh9#@Fmggeq5cYCiMZzInOf|y4O z3;Q&7g)v;z4%8mz@O^)QL)FTo)1u^X2kl@qE@j=&>kdd{oseR12m$ipUDmp`iv>y< zRCkaN|5c2177yTHOz)oMzUwwo_3_nEvFdip(B~tr7N#?DZat@NML-@aqUBoDgEjOFdSuNWt}py{Uegf1nYTU-plw< z%Quw#xRMb(^?2m9xyQ;~Rwk8y;R34zf3Y!^wW2{h8~5xvDmi~aP$Q9wt`Z=mjSH(J zw(5jhhmsHB=5slygk_z2DFVk7l1*U3$(O^w{L32kOmw(dUYCp|XXh9~Y2I6l8mkpb z(;9&6WA#x70bQ)JJDj0Ag}=c;!yvTHYu$oxbclS~$F^I_OqGh!0FQQ}18;ZUXZ8DL zPhfV=Zy!sB!F9trC+k&692cvL`Yy*}U?>%{C*Dls=7G7Z$twqklF8%l_d>D-$)As_ zh7#!VKd<@8m$7wY^LgLp7%5%JsXC1yXo`>ZWz|7h?n=pP-{F~1F8$yS!DxGF6{hU@ z0u$w6(C^cma}Ub&F<-I@^~WnnrN{mp5>MeMLRm~YkAwOcuu(zSiC&0uFlpE4JY`og{#W^vIUW2oA8tleADt6iiAab-;IJ4x&na1nD`gSg?OJ+gs!<*aW!uEcuI zyV(#P2`d8n%4Ge6Wepei_g?XmMMh6*vRnK<^Fa+{dB^g%wyiK0#j4!(X&o{3FstUU zy{Hkg=9V*?UVaT;-k!K%AWIyIyIce zoH%tm@Bo|vyI=YeGDNst(M6>_RI2{j4*rL>#R{Z-`57qtB5Djcuyw!bVaSc7B@GVG zKhfBqofckuu>bWD-)kxiUA=+p1C;20=#T#kaNvq>&yq^g_<@HOHh_QIcHPK>d6j;^vgVwe>{-mJ)VoJthJ%|0N*JcOe zpd`5s)Q<(84AdABiovW*L^{bVx+!|)!U0PDm-cqZ6C(rWSu{8G_NZa%nv}f)+)edE zc_IJbJc~DuFeaYNb+xyPQC`EWvqVfTCK+nwA7yF{x5&LN%%@=a70fQ|3m$@h$i@e2 zm^E|jW_|A~3GTRJ_;3fPGJN}rME2`U31Fhdy&e0e;ehL2J(XT4YeTKo7{I|s>j$0u z*Tb(pQEo}MpXqJLWi^oHKQ@uEY;zn}QgZ#N-2aQK`i2k&qYq%okw+5DBn7rH#4u>p z72~s^4*;~JH(s}nil0V8H0$M`Tl_Y zJCEwsQ;0`Dn%)<^RG^nhMFa#26LMsUv-8?y3-fMsl(*oBLH{~QVqrX=TpTLZcmOoz zK@kqz-!$IUrWjvv0<%d$b~bm?8lJ_WdCFwKI9WJ43)l;$>C@ zR?kGf9Aye?9%jAR|8(2`8u_R5?vn&h?@hDqNf;5I-OTB0f9gaYB~3%RxPL=i|LcWB zsPJIURLb1t=I{j>6i-k>fxec#LFgY5{l8hlKw*g`(2%k$(qUn@!#gUAmn=O`{KEhG zC-I~nySI(|1CJs!R}~~@uqE;Lr1}$A{p;4k699#L|Mo@zKo=4uGs1qTPqc2#^Zoa3 z2?5F+_iqzyPbO685`w@6&0PQQUU+(kOaq{uJKwYG0o+(gJ3Qm6`onpN|9(phWW!k| zm~LGETe~~NDwVpO&_eNF2kbf4F;J+UOe(x2{r~?R!UHH^{?-Ec z|9Bp9>?-lZ08Fd1P6FzfBzBbxa{z*%y*MZ+mWr*xt)tfx zD})P}0c$S>(EriI6VcCu)jU-|?a1D0248LBQiFZBoM`NTLK1@>L-^xe->%htV@;0a z=4oiz_LE)*OICEGe)_w*Za*Y;Q3<*LQS~-9VW(d`U3LnZjQ`=`~u0| z*5EG3p7Hi!i>fLBuV6(m<4BVZKfF0VXWV3|G9Q62P64Rn((#>hxkE_@g5N3aXd-yr zs9x&>eA#tQ|G0N@?`ZUgo3EM$_#V`;s1A3bR*)Z5uY1`e_M~h*-R2N^0HsyK@K2YP z8n1>yoMP+RtS}94&jBH!vT2E(iH1m{VcWQtQCOrm@W6v&b!iw7{`)~cwWI%=22DF% zBJpD9S6N_|02X*F=T~rmcme~(^A%WCYK3*f&65{&--OChy&-0^#D9~dZRnI512Rk~ z!rDi5JOn;eR)viNtUw8X7M*~rGWa3TZ1o$LRe7DDoFF~_O6cx+Wu*w<8i4bL8Rri~ z^{;vFyCL@7DD=l^_fG+%&0QzOq7N*E8b9srMOW+EEM~;qf_{8<=O{VGr=ou1M`1k? zV2~b(Dg*!b*9zn4f4IR*Na_4;gJAhW1AM*_*r#IVx8LgTcwTWz)&Aqm9t-;%Rx`R( z2XF;qEUs5Z8o}UtM|9BWNaN|%h3ag=pCzA?ooS1A9pJCPy?`Z64B@TgHmO&KpM0=g za^cqHE+6^4a8$gHz2|1zqZaeM7IGVLAmIP`#8tK@!FVK=>UYt2zO9Z&!QMvUr2Xx% zPo)XAqj&`IuP9sXxPo^qf=Q^arQd!A;zFtRcfm6M(1_QAy(Hy+bL7q3OACBXqv>}u zz>b~pNnG5EE{4bjB5&0JKvgr@Xevk^LmGM=N*x`nT&O5vl&P)inZ9Vio4wa}3Z+ z((2kDKFN`(Gc|eMgmHj2JbNy`rDmWreVNPu9yXv15PTaNWSz1iGMpaI zjwG?mchrgnZa4Xp%;kFQb8~glwph4%uKIX&UAKqtWiG4&)8#X13&gYPkc%<_;tGN? z`mSb;dkeOZ`Rt+B<&bG@NlLEyCbFuBd~ec$VjK^^)d9MYR@03;TAj^gr~3@|-b&Na z*J1fFKY)C{ILn;L&|q(pq&3Z9YjM9Icx31$i_y0aJl}u`2XymFNVevkddG<_>4KiU z(byKhW5k6J&LgE#-dNQs2Wd$Jk$~(7H~z)GYz#$iGmE8ggx3hFv@DC$t>q1LMdh+# z_R4sXFRf7G|Do$GprY)$_Hjidq)`x1I#jy5K|#8^yFt2JLRz}Jq-*HzZWtK4Vd#|p z-+s@dzTfwI|7*FHvtXDx_c{CQv(MhwzOFallDnJpIvu>+c%HCDwmATv#QDSDQ_*?Sj~lod|4h$ zxenR5HYPl~IYm4s!jBDCVV9CKld zPJ>$fL5*URcX2X*3}kU_hI_F)%dj`LjWYC__D0@yVYVNWNbglsnvePVeFSgxu$a9K zUMX?4acoletF<3=LOeG~kOs@ZvMjOCU|MKZMno8UEuTaZt=)9J9i9>FLW5=gMZ1g3 z&O;-)SBt1IgB$*W_U&>Y!w8*VZt-}HuMEdtSANpZ1*<1* z4x=K>*#KC~#8sjQx%igEh#tXgAFBf`@#UR$R{_rE7(tMx7p*2OkY4F4Dt7?R)CY;rmL8$;+^aI=7 zI?jcT*FwCBc5|}1=#?9nO9^etRqd>w%`-$CSocR}qgMw1sh2^di&3|mXAcvB=$w!M zS4OkkB@IH>XQ&LXBI zfp5g$I3WIZNGH_9bKBY;P|9x{AF)&T&M`WIlJWVz$ORz3Ay~iTJo4K#+xldE<*nY_ zatc79c>~@DtYc9}j;<&9visgX??_GVzMS{3B@H*%Vq!-r3rSBx^|n!c6KOT=i@UPge=0Jgoc;!w-1?Vx{tK#n>9%` zi_2PQZ75_bMCkXD-4#AX!O3mp7f2&~kJQm9Z&*J#J(7OctRGo&73cY&*Q$Bbt)&_Y(M}x0;-Gc&!2qn``AqcJ0YoNiE zh31+4d>Npe#JW|5NRTC%4d;+62{i&8`^PgQPx{Y3L)KDzq_lpAT;%^X8%^nBI3Ic= zk&#JJegG$HYjy>TJEn`tEKsnU>%eVt0$DEauEzQ>I?u6cGpn;$lMGu~>x2ZtqY}hP z$6O%^zrf)xPFfy+-xJbUBYQtiz-}VFEmdMEh2!e~+VG(M>lw@_BXTw)ir|P!Cj^;p z0>J+g8BW_UI-d*dt2J6>yPwWE4?^K?1&Q1}cAk*SeM(SNh4HpxV~zQsso}Z+QT4DM zGK=*qzZ?v`o;g;z&w|PdaF-OzY+sbMDuYD=MX&-2bw#Oef->9fwrJ#*e3^R_?6uVQ-A91I}r%d1pKTq8}H(6-MPva>+5hC-V*lQY zK4+{+X$5ym0L5;|SyQ;DK7rFCd?@WTIqxc-AHwPROEedrx0gOxbV^{(Ky)=7aFC?yW}@M+OE905&@oFJvqbV?WjBser)Zs>V4o68=xsf{@P zEnfMH>!0$uwl3}P*gJMOLWPoann77;mpghWOdombB3{#7EBAOXje9=uQ+q*ce0U+x z({2)FDg;#rv(Zi#Z)?9c=Yi zLZinW4=H$BCi4KkF%j-5iAO^X&ymWL|Ux-}&TzIRtcS!LuQ!KN&G zsSS66e!#$C*#G_W4fph9r`yo5(8eucKA0}<7!Eh5D{OC?a;{V3}r#Df4TYt_z8fD+lg`~cjNOD?f%i~fQ^VIqE& zTfeRfz{6r+TmmH|0SjKsAeQZFpowZu6+P(nJl?w(W1?3LLbYAhh=TQ_lND3xm9XJ6DVsnLBRVEU6U_|E3@y*orS_nV<6@k;mN zD_Hc=K4t`TG%9IdG!f()Ze9TFJ-)CXRVw#kAR8tTu*-8W_G_xwF;6lNS|3N|^uCWw zio<$7>im?UGO1xYX7#Nw_hXB_d@=6g8mz0isJKl0eRbX31V>9_Rbbs{Wlqqg#?)_2 z2QMI)M!SAw#C7iko=5npo1d7$@OM@84K)B8Ij$wvr`~844IjM}!9!Fi--t%x^9~^H z%rCwAM8`lql>%O^=WT8Ry_t3RK$_4XZ*ph8%h-6mqs=DdU(Qwlt99OpM|Zvj(wRix zU-Gym7vJzX-Vbxh{kFOPgP4e&hPG64o55G}adIUX+608^t|e`F{YmRO&1B^K(-BO4j{ZH%cnSw0JS|_x5J&zQjs&O24pKA4V`#Z^Cwm3ZY zqpU#X2i*Js^z>Iz)A4eR&%u1^|IZ?iZUokp1Zli3)fE&_(hf9G0)%wfpIvCmwB z?gr$AAgiPd+ME?AbnVBe$6K-9nM^=|Yht=#(kA}i$dpMxdAIUdhk?i6 zp0`Clk>C@~kL^Kvs!b}tTeWnzBdent6VFYoHA&=lDWk}rq-Ku7lPf6Mfy6!>-0JSC z4x}n~EgtYZ-%8c+Y07oqlW^NM;f_$jruyp`X=GIziWMZDPTW{1l|Y3YWlaE`AOb_> z;%`PP);n*TPzT}IV}{m=XsK3$b$x)|sL6E<#e|BgwY_`BbJu7@?@H#{+=6DHC<95r&eUkL`0qb1XZhqQWslV^1KJZ47B zT|YU2Tr^7+-kOT3Krc06ylIn#pz~P8SM*zRpKmO%Mr86NJiBEXSFeckcwB zH@7B08GWcpN$swwja+|dG;eWKzpO$qdo~(uwg#*1sE9H*0f&mii&(}Cj?y0oq^~Ix zv|pb!P%D&s&swtC`t4#=dp#1jK0QHu8+vKG07ZwF9Oc#S2szXS!f(UoK^dNAo|w%F z{dZFmzPH7+7lwr7|Vifp{82M0w7ogi-A#7{HzSKBNt8#PNrd-DNTtvd`c1IWo&5%t}P zT>J0he{+sD@<@+wHms&}y7vT!c89ouu&6-o1ZKD~(xAr69E8VYw56s#AJ)M*}3qDAa&S_p4ZLx7rmi(u)GOLCpWM*ZC6Th;#muS*veL8!PV@k=XXe}FTQX~y%tvFX zrG>Set0JY6sOL!7st#BCF0B`XlzLxjlm*Uai{_LvS^(IUgj!955F=NfR2r*Co&}^q z^LbDh!5Y2(mc>QLfmTrqwQ8m4)uE<-^_%quZP7xG|K7~mEjmCh zOZWt5Gx0y#L@1uZ4ts7lkMr_$tRy-}HnZR2!5GZomNHYII|~i>uv_+MK%ax%zX1IX zMK3yD7(bGRbLPRaP>sefZKJl#DI*lJak}7TUCgH3`sd@sZtVpSgC|-a_lA13o&Lwj0zVBJs@yYqLMQLY<6fdpRGQc+hNK zKenY5)HtX}@e@iu^KcWz@SRPLHPB*hE{HJ{RB!7r)Q?QBet7jVzaK~-@?}#@28p9F zp5gUuko&0IobL*ka=9ek!dF`BQNB!W;V5hGnz9D5JRtsjq{Df%lwhS%X>pHrzg>(_ z@`BV_lX~2uMgq{IuARr9h|B$hbSN=+EI;NT4;qyUTaSAjkg@q1=yO7xh2_)^aL#A> zjRU8pDk%NS^&SsWOUTlP`y1ZPNr$%mL|)(~ z0nw28j8)_n-$O_Cewa%8n6PsQ(fbQPYt(SP$FkCM(g@I0Z=~63cQXLLIU1y*yR6~) zF|;zC>3q@kak;X<`(~cL=|_IjPa>I48RkhL__YuMP_^+9Z|n41rU0!Qn*Drl)U>1g z1gKHfV#0GQ0$v}rhU#$=SnjKp;B4)@tHRZgtuPp92uayW#dPm#79>ovxypye+l4Ey@*(?x@aG=_+TEtQGaGFU=q4 z4u6sYWH=AtTkg}dQM5{7hAy}wjl%D)-rA9dgUTwmZjZsKw4C#NmR$cEfpn!a1_Z`?(o1T_j&J?vV)l+&;2Y-F>`Wc@H%;B1 zi5$yTGp&0f13*$w9jY2z+iV*2JQkpCVS)zaR~d#`O@7?u!4}n^whkLIn=*XyY-xbd zW=M38+PKG?6KcG5Q37$|a6)tD0XQS!z#mwV84Hf)IN=tpuwIw>e&g95Tv2@WrV*e_ zz*k4SSXkP@W)U()n$2{3Oyz-I$RM7~3+pqyRYh{B%cCa_q!XeR1aJfTOhYQOFZ(ck z9;ssJEInihvL?II^B_XS#0z?BD9$N8UlT{87M})9=$N@qCSF--RMYW7k+<*h)9+U* zUJ?Q=Xl4EV+1(ID^!r9mr$|$IuNbE#7fIf`WBlurz_RJ%9zEh`xXr}yXt-w3qb!{WU>Cnxftwin%1|I2A=)tfR+_0NP%xbt82qP)xf`}r4;m*OLM zfoApn>N?Qcz(8zp_%oDIMeC~q-iL|S>=e>+)RVE4uk!K1(A?HrYK>krV3A5$E7CRu z+U`H6aNne5xT!X)DY~5YYni<;V@bPf*p%0sZhM_JSyX8_sKkXYvDO*ETM=>vy{(=q zR$bH*$qG7Ue#1yM?{Hf)`)KS^?DA?2VAuM$Zs<#*)1q_K%)73v!A1{YV=sx_QTzn? zH~EFX+Fa^(-RkGeM$`4_uu$(sbU%U*>?uw+L#nG*Jgn7H)y8$Hh#V7rGKXz(E-zc~ zNZ1T-ETB~`XRLO1;0g3}k~85^v*5W4_qo}Z6k^K{Lb(8Oh2h!;F2Tnlr9z!DA>?a= zA$GULL@>aO#3G-?w5-9_JQvAld81)hmHHrpD1(k6#4M`gTu?FL7BI-X$zN$8a zqPl6mLyzB$on5-Xd684g5x=_sj61G6lHo8pSZ}1GAR=T{&Th&^%o4%bN^Oz?g`3-z zsPbUGd6rtwrQ4nF%MX!-0*Wx}i}x=_VjWA4%8MWsb`kCwYoJtFn5apjjr$5RG4w7A zM|d=X)A!$HgPB==C{~`a7AA;qmCH3h)$$?b#mQD~rX?=b`TpF}Zi}vR&u^9hd0W3{ zNZasyyQQ1ExwY1i6RY@c=BDOgcru2b708Huam86%@Ve<{11aMNTxtTi)jY*wnVC*1 z8-U887%Xpun=^ihTh$!GJ#xAHcxHlukn;E z-+PA}RO{dzn%F@zGI)bBu9fS`r%_w?uBM%->eI!Jx$JhJexhRcCev5&czRfEI+D>n z5;pA#&X!uW%~Lx&mB~6dJeC6U*?LtTwtR`?mz|x5Q-w@>4-X5pfx;nNSplwY20i0q z!Fpd0lDuy3>C&|_9NSeWI$XSZD?cpoBx2}nh==?}y&9FfeQBFC z`veVjwFAdRMb(ovpIXg!ZY=?18ZYWJOC>%B%MdW25BOs2bj$@BiA~)IH^X+eWiKOj z7O$LP4a9;1NG+EtC5hk?x7>GypLoI^q#rLD6yXFQkcfKeVs$mvxtjX#Q~}UiG+Aox zC2n6tgtdVcOSyVIRW~CZWZ45Qodo`vzIZk{`gL}H+3M+PsRR87oNu2Im5SAjL*mdG zJ++&r@U*_+7;!lrRdvIhaVK9S?b70_55kkSmRHFvzhXhkz#T+-L!5E_2F^@*?fcHc zsuYX)EuP!Ycd*T0LmsMOz}tOMnm~fGkU|Z-PVC*5UE$fRMo^P0$0W;p*T%zmcR%jP zOA*~xPm>b>mk#vRBN}qSc92k`!#RG`!mMM9(9|#j^0T$v*vnX zyxPQiV$oJ5IEr^W7r#6Z5uJx*wAxg(o~q}{er@);YaGxn?2pVm0(TSrz?LPEXCQCU zl&^P-;YTFyW8QzMe9}!MI9(#Ym=ufM>u*fd)Jt>|BZ~^Z9jnu*vDmqFfsk36M2weg ziK5&f)EIYqbUN)Q!8(+38<-?flJ(6l9yk&Qxu$mwA$f}O{cjSvG$fDJr2(XDu_~u5 zdYaSdxtpqO0TlM?3~BV@veB#Hd542npsV{~Johhnu2^Z-WUPbsR43a}mk{;JRsLJPwt1pAH3t>H?DfUYLUf?{7^8F*DfvaJzRUX*K+UkihvYHNC}&` z$OWc39&G3}((A#pnd1TZ%RC^()939e4TgS+b>7Il%`w|Nb-X~Aqo=kkJ)KaiG4cz1 zA3&^kpXM^vnJj*kAdC29r}l%`N|HdU4eCXxOTqeyNF#KyiIz&~{rWiVD`CjcPeTNg zr4n=aG6&eor$XrPzwoSf_Aamzfh(|Z5Ct1Ad5wEKY$DRwJTR;Jy6TrDH z9_?;m3?hVegoK(d>AgubT$nJ8dFMB|g3n?h%GZvRo7^tH4WzAdJT;}Q@oOvlkgCy^ zF4@i?wU)FzgiXCdLG-nq$vNLK?X*0uIU-&gS0PUTxLQh&bkD9+GFz)oPXE_IHXgA= z8G!?dn-h8Ac)Z42&%d*FKd<8EPF*oP)48J6x(ztJh{+!+kG~1VHCQT!-xYj*rfO(5 zZS$J8stBEcjuK-4`R=H=Q$wWjP`=}J*Qfl&EN)Pm=Nd7Z4`u8i3l;)h7qgq`SQQK% zUakHrnEiE$I%{ma_P)7`0S6>|7P_P%l;(BGP&WIdt15F3Z?w2{6Rc*$lFiM{&Jc># z5jhzN?+Mfxb`~}o2q5P2%4#1GOO9a}13UGS&*oNVj5Gta)?D9k=9K-!y!!EBw3k@S z`;=7|z9~x8oiH5cQPJzDSc#Ah(ail=0{mKcCHGHqAP6@bd7EUy1bb$Wc6_ zC>=$4i?mcI&GX=0ZT^zSe|`?GmX`}aZKIx`cDM*#tb3oSZlv_HPO;YAOADxXx_AXn zI2}gcA>_!=t{j`8^t&JY@H*h{7R{#SMW6`Y%oUF>b}wFkiHnQ>lR2lS%QnCHR$7f^ zqaXj)%ef_q(PWH@@o~1GEp0MivU>fiZhBcu5>a~S!_fSFqjj&Czy* z){o#@n$+j|_bLN`MF3LjaaS)$s^QS%(Z~|?@v7z20r#ykfV8{e@}>22eewERBeCg- zBSnpa^X^+-ci~_IT(Vlc1|v9dcu|WUcng5HP)m~{P(F{{SwkKm7^>);gLl?{3rM$} z=E;$^g5S-+~SiUxj6<>vXFBBE^*UabJ2RNttlCP)EX z#F2H$qT5AJaX6j%pWlAsW)0Qy-oZ_7(WMZ9wF|6Obeztsdw~~jj1b_O`3O*SRR*AJ zE8LGJGP#ZQa%Wxw(Ng(o(6w}>@u(4EAdVc)>j1~jBHKVbUPOWCOpROp(M%g|EE=;E zP2e$FgdISUQbFie?(2fbl!FY)(bxR(X`mm^E-O$cP{Wkk5%#QePpkdj_mN|FGQX-0 z-mR=F3$e73PcjbK~?h8Kl|w3OIXc!8q7t zz)_Z)#4T3e6rc}53BsYpjCtO1WZKIigX=_pT-ce{&|NjpRW)}(or~1;#A~5@+tYTKn?K6SL`RBP7{e1Fp0U|rR0F9R0 zLv>0~qYga3Ba3&x7^nXx3I4n$di85$FKS?$q%$VVjO7IMiCt?7)Pa-+n+B&CKI##x@fHTuHu;$fE1KL(Hsv1!*1EX+^lSfq}D;K4J}>+f{nC z<8U;We1Rg_{5`t4#d6EN?ZchR=UB?vjo~+id2lTZRVQR>CKl|!ZggarFf9+t*+{|F{aP$C37w;*W~C!~DpN5QbE-HD3=BuSHx9i2kfcXA6Ax9v(UFO^x&a`>J^ZtSBdV|L>6g`3caT z^gfh)aB@WIkCyKL`Ft6ib(a61VUa~r{{pwh<)^0p|2=pHeUJGc1NuKNh*SZnU>vnH z7nkIJe{3>--O$uCi7#J_wRzCTLVh8j`;UA(7_^Q z2g;=p#Qy}m2K%<{G8r38C!x%20~C+s-&m`n{{F2FFcprepM-^9eH)A~OK|@eYyZDS z@ITKYnguMFCSQfAoQO)Hczh|vxcsoy|3SR}nWuzb@6C^5l~ky!5kJRaDnrZr??yt@ z|Lgnjr}D(#kcXzW+NhRl=%!SVxpi0R|2xLWA%ql;vveTm_z6?3E-2(R9eEW~`{5rZ z2r^-ql-Sc7RbZp$<|wX#pOc5;^SLP58jVJxkG_To|GUDUf;#k${?r>NNCJWp}!&@zn>7HmL4! zB@i;b-yDp+RGXO%m=w!MX1Zciy%~MM@#QpTy7XU|q@2L?T&0Wtg(h`a8D(AAZzCbX z#cXrxs$+hk54TvHI63IknGfhP7XcW|=KyS|q$2ijb*qlTn~wKYpKreV&KL%V=UPn7 z`opt8g9ZO!+tVr2GOWbSh_@Vj@;FR`XK8+BBXd=+0W=WO-RAsCC)vMA zEB_GhhstcIo#*ZstJ%%EbLo}!aeV-c8}niP3cBGqYN{nQZB)s?VoCh>A1Y!-bCk*W>rLXrQN|K$Qx= zyB?sh|7XvmlRUkD-pk>Rc|FpEjuu7jG%55BvNR`ww;(6NXi(R`=E6b{P)Gm(qX4lR z7R%kq+-R#S`^ZW|@d4o2RzLWSnZX)a%Tlz6!R*x{JJTH>)l5CVv33>EgA^`xJYIm2;T{NtH1Nhbd z%%+d96u(zHL+jm6lkifFvpMftXiZ&q6hIn=v4V?%mZrb_$le|PCBRwF?}qQgbde&E zV_z(Nu{qc`MtpB^#DM%1=$6oJs#y2=vpLq{z0ce-w0aq7LkaY?3i71$ecQJN3Sdss zt6)yG6`2$}{Pq_(=(y;U|1~RMAYN(851?#uqeS!BXZ4K^7l4*1fuRHLKAJH(HH~#1 zG)-KoMMr%KP&cKLHFZGVXa~sW5zC6UCj>Hp>*X(c36aAv?QZG>0M=cFb~m+-CI(1P z0mMvEEaFB!fsmYZW0g55n-$2PrwaM+QPCga&Bq{N9f3J2xGogIK)66cBBxd*zcmyk z%V;QOJgk<;DEJ}fOOD))$&25uG5lHYyy>WoBtF=Sew~*-zZWyXf3L{+@R5OjuKY_# zV66%O!0^cWp;eRB?6KV9=a_`}+iag=j=@%=89^5J8&d3c7e?z-I})N1{vOY3DNkB5*uT=%HnCfaVsboLb6S0NcD0FS==56m)ZYe{w%`U!7<~lgfP)Q3k z4>wP@*wJa%?>9cF{p&eD!&ogpT|Cuu{eHq zHA~W^Lh29G`8ml$bI}BGkn--PiP#nHUg*zN8D8_bZ9k>>SW&Y|N-a9_ zojggVACQ_989MoaGiKgkB+H<^{v7LqZ5sh@h$BP~mB{G10QvS?r}oyCC7xy$mPIBW zQzxSWT-mH6}|E;oR;zr!OnW)M^&m1{nS@XUk|J?ttaiiBOFS zb_0cNTOwmK^oM-gf4i?pen&*`2rXBb(t^9Ada2gT3|m#rlz^f_jJtLzXHRh6DNsj2 z{Q8wmyi~2ZC*Hww9q$LeQ;*7Ip5$Ev9?MMhOo>_msPn~N8+_tZ&X+T9PybNvm+Anu zs4r*6*Vi>r`O_m8$>{Tbq||_sGo!t&ASZ{+DE&tamxAOgOd2O-!7*Du>`Ieyy)v`$ zHF5zGt+{jDD!n+NhCycYse;|v#z6`$hlR1S&JZl@WJf$Ut5@^btjpT2k5PpfGfxKpI1$k{a&$wR)jhI-MxahVxPqfED#dRc zs(XnY$3}tlrrKm{c+2;WF#7R3pvur)Sp0!DuB`QXBp#`xi2_#G3Ec9XHW7zrfp2qQo^i%jvuDg5!Fss*iwzFVovG7U zIZF)=;@U0Yb8q%bYJ2+t0de>7=SZ*ni^^KxQ7M(4Z|W@9gh6u-8pHv5hU5p2`zb_L5HB>Mzx-9T|G7Ua07KcaPTEheX#*Y~6I>Gt1WM`^T~P zPzpWK=WAN%C3s0va;+f6`~x*!E>AEPi1PkDiE(GjEVpi#X~UAQEBRjS{h zj+D6eeEBLIY7}bP=#V;mMZ8=g#5GqS{w|Thj5(CugocP zMM8d9OAC9d7~h=T=6vu}2RN1ERI9d=JO=9oTG4`kKS%)#EdAwVMMj!bWz&Eu^7VUx z&F>LFUwbz4p9)kBiMNk%!sH+vh9W9wO(X8)_Ra zYiP+4{yr-*_wO*@K6T^I3@>KStc2;?v!GX3vNXYk69k<-yAY43jolhf4oozvC-&Y9 zHJ^FqM!m2ZuIrtY5O(YYF?74Wj$a;@D4huVsfaCAD#^M_LX*UXW&K#mnF}(X{gF$e zg`BUnSlQgY0z82;so^p&4|gv(-AgJKi)ta=j&>y^D?5BH^Hkh{Ite2-=BseLF z+5MjV`*4lqAAnsD2zyYJhUZl)b79Ufl_=A$v(X)}l>1Mm9wI(O9Sitg9N+M3=8|>6 zVN#Isx4DY~BqY!x5iVzsZ$WwJpJ+RVAG|_R%-t7B6H?9DM-%7_;HJJ14Oj*y3=ofy6?o9*4*8Qf{Bf>&2s+-G z;w1W%m<9?W`pOD}3`D+``ywDfpUIj-i>3;f8+Bj$Yr_8;_9H?}OiT~~*!mTGk29zf z={h506A!-MK<-no3dMQB?-tJKm?v-)x~TN{)&A3g#C`;*>@8ECSt{^~d(>429RZM* z+NMe&hR0ABT%<5K<}H zcxvU5#XV0)NqPEUgV6i(!5hJC&woP2l86wydgL%7Rf(ro0y62^<=9A>PCavJ$u%cx z-dO&zSA{ib0`A|hP!NK_k*V`jedCA9PMq+))Gk%us>hMPK^@^#**?Z5U^fDD6maaRc$w;!ughQ@T?U7s}@WODSbL<%l{ z3BPn{aN&{eDoxRhm%_VTLFiQJ3vG|WMqYxc?@7g|QpPu+VbJIy0*%hhC^h3u#HcJMT<|$%( z$hsUTM}|_B`H@-*IbfKl#?^;2($wlhS~Sz%<4-w~xXk=}1Lrry6unT5-{*)QI8(GV zEq;4E)bj>uycY15=3lgWY}PW%cB+OkO^40jZo)2{d@G6`!alE|n}_F`;jBq6XwCp> zTod6oN~zbb4-&*}fB@_PmI)T1ZhLNR^V zs+pIK1q5~A_GaWhVrkWBhmoe%PSv;hW=a?v5>Ce)@e)A{Vz~6#=$a2KZEs>1e3N-Y z3GK0JHW!D+2JaQU)oYa6wB%67-b_+XG^jA2jLp{{BJbe-ZLv%+IMlExGFU>DuCe0p zlpv0w%rH0pog<`SLhwV5$v6syO&+|FwP&F4Ndm z^Co2)Oxt-)8X<&Fegn_Io|Z8QoQzE!{+_1GV+;puH$MUwb$N~q3>`gWIQznB0zG%0 zs^<9VO^Y%ua|HCHB@Lxb>hQM$ejlc1!3QE)?kdYKcgue_nZ8KnA3I~-N!d3pD)QsJ z<@n*!DB&QJUsZY9&5zAZ z&uwr~J{aXVc)&eDxlW$)SiMAxN?8TX&e_EY5e#Ph_XLsYM%0nfMblcGlvF~ymN3W& zdDeayIA5)=j%s>uUKv+}0v_iawdwQvk$>=Myh5_jvy!nf&Gg?*-zNfdLp`Ll?3)&4 z@+@%1wAj+T*h5B@-5#4pZDW_ka$cc`ioC~DZwKin=U(J(oFkX%V1vV4U$*uzGN@m>{5<`2K!r((hrIFv)3DPB!{1+woI zo1GK7jve#LE@3qu!}{J|8EHfs3pz9X=wjMe4F@xZP|ZeZ_5Y;sh!VW-C2A%CCFtD9 zN#p8)Oo_o7WJa|B9D($88!=*qI6LG6bHe?KfN;_U9FLEQainF$1rpGGa9q_SC^WU_ zCp^5tV)Y(GpNz{ABETt|I$*|?g!YX%J5aOiejHpnX)mK0NfRqg9)Qo;BS-v6^|wo0 zgSB7D^c%~Vlq8ET7y)5X@C^9*?q!ci!S@@f z!QDE(apa6Tpy#Mv7=f71f;bm_4XP#d)n6M5Y=V#{f(`aLIeRp0hyH{*Kc6AG(L?nr z;9nI~86I#pE`W3gOLVb-{7(CqvBp2r`&gm>nl+I& zB6w7hE9_xy&V-Nfz*X{sFG=M$7l6Ja&O~IDeojqkR;vEqt4ZD;^6=X^hIsJP0#eU5 z6qE*SG0eGiD?q!K-6N3gtW&Gv%L7|TKwMEuk6Ji5&I#&w3bK-wjRKBW=Z_%#R~e}7 zG*>SMAF}m_^4k9-Sox8kI-%>L8R)|fGDb>M=tIfB&w{4Ys6v3#)VAC@|KQRWcUVn-h<@%d#c%i(+ zZLY_(Cuv!r6XVRaiIxKkAvp*VOj5=;Cge8%ZrGf#dLeGsz7nUw%H|+*iAJ6$%<_$6 zVv+G+p=NEdz`bjw$q8mHc#zFPhOc~qHs^nj{_D%OC!+9;R_m85qVOXEjA}-Z3V2mD zLWKa%okFXuhk{gtFDhL9=PJJ`VPhrv(*t(`Q^P09%D2-kd!;(_IL#wL&`2e>AO_Si z*Nv!v!?o^^%%m#ASetK|CwCyk+~mRURWC_( zw!}4B&e=N7$BG*WK|AJU)CXkG1imDM(WY|s)g5^v%VA8BBiVZ5u4*)!+}))NRg)>uwGpF@}sz&)=OOtA(1$tuZUA%I8axbPjVWYeNR z__r=d$(Lt%4r56)B1}pt%2T7k))z9X`^ZPE4Fw{-;jVR>4W>NsOP+9Iy!Zr3~v^UlXs`##8;o#X@ z9d?c6QVp53@CqVI>OUj^1+QQ(+j{YdATDlWegYLGucyA?S3{rRF@laS8 zc@UUurPt#XDQ+mjRotg2C1Q0y_I;ZsU$TP)C40}Qe?mk3Q8~9kSrlFjqDeNDRXYjg zK+;Rm+evnEwu}e0^uxecLN;OApB+_k`jZAZo3jF=oosr%q-z{UZcT6#fK``H-Jos*iH)Df(K* z(@OF^az$RTH#nh%8kL@fDUh_(S?m~nwpQfC~QLC~zrc-!CR7z8Wl zvbAeB!82NJt1JbXVBd+*u?a1v7k@XLV)HY#slt1}4T|lNk9VhMR4j8T4e_-@CRW@| z;OriY^K$lhZx>KVS5&M8@!MA;t#vICeOA>qaa8mofgLiEmcd%hF_p>F<(eTii3d4N za@6l5G1;GaH_*sw#VgA8Ewm_~6R+{2|MdUt>`+9V4Bw|raHc`>_}tmgg6*@A{V{ht zTP`t7;r?uY9T?fPdXQ1&b{S`=1-xqiyq7`joH(+jshjyMURgBa(pf|C)4DFPXHCY2 z4n{7d9YZcihCED%CW}p93Pry^8V_sl@0xh6*a*Ep5K6T5juthp7l837Rc_3iQh-nT zAn#^~ElA2EtxQzB5IbndD@N#Ru+PYaKc!qU3k38ue#Hak`vb!L!0!HvY4|6~Vx;hX ziUiloY4e#?W(H1CYn{Hi;KEEQWCX{4euQNKS$KTTeOZ9ee1{_9L? zO363x)J+p-Vh2q*8XB^kQ<;Cr@0rPRrRlYKIX0)Fq7R#4%vVCHCar|5Qj?uF7u^|M zN@l^+XoEsp4$P?0l1fs&DvW}~e{a=zPmwj6+J98+pz2j~7c)%PT=?3{5R$zl3jtZo zm4m7jwW2Hyi$<#fRx2QjCF6kT&-12pgk8}ET#@b}T7(jI0IK#pXprOSGf|DC8Di`# zrmrz9cH0tz2&Ca^haH_poytm&| z)fu_w^LonR@u!8Z)z*&%Z49d=3=dR|v=YZz0=O6D<|Z~$uG>c#*t=!mYwBc_f_Qf_ z868^}8fuI{>RGNeQPdzy*nWzl!0<&jNs;l%%dCMPmeWWf+4D$~wS!*{s<3u&szg1{ z$5mW*Rz>c=$I-AwCR^X6r=y7vrSoZza6M)v?CExv#xA?(d%m)mZxwW*+SG+Z#SJ-=^4nUmDH|A2Cb%K?**s_8vTZqgdv}Lpx zS(#GM8q^a$B^Q}ynxb8z`ix59Q?)>xhTwBcRj$-;Qm^KO8H4xsdFxGga*Lqz6WsXv zVjW#f#>3yQ_=tMk#k6kG11_{Y!>)d%IzM&s?^i12rhSU#09N4`tmV3dyb7ldkMW5gRzu; zAHkcLZ9>ZgI7So67W0C9a`-FYQ%;0%TPu*(*fo5r@~cJxkjifbhv_i>%I)G)8ID_A z^L$H=zIk;zB5Noi{rPigZqe3a!Qqb~VIEvf_V)r>*ixL9%D|mxw>;OQv?)2zN(Og; z38frLAZsyyFMI#qwclW>w5NaL;KttG;y$B#K1ZKAC|vP!YI1a7Gae=c7_W#!q)~31=98_O%@_)sg4lI8SITtgMC}B15ng^v}St;zkRs5 zEMVj@@|SBNbc4a~P;JG;tE9O(7|XefZ4MV)PUno%2hVYtay;*ECTgXv=Parua|_x?SlOm%^o$i{j?4ii68qiumDJgw#Rhu_VpZU&y=8;PAM>}BB$_r>0-vGX zOBhj!DyJuBHf+`Z7)i##-T!E`Bz(OW7U&#QEv0`Dk$MH2`Z(6ISoVz0V?26{HL3!t zfBK1yy3Vn4!*^iu(%0a7Y$3`)U{C*f$KJG-iU(+5(bH(Y)O_^7xQ?wSwz%gykkCa! zihZ@N4y^-ueZlpt#EB%jes>GQ7INSOH-JXy==b)lEPHK_LMnwUKm7~n?i5<~#lfan z(_?z`v@AO!w9wBE`aLrqsFO5f$d@L*l*ZZEof~0iUk+Gi7CdaY)xru1u6wf$ONwa2 z-&voHZ~QUaYE#Ba4WO(&Q$FCRyY327L* z5$PDDJEUQ#0frdjyL}#U@8kEq+rMr(VCKGRUFTZoxz-X7{UN^Dt$#20j2b9>zHwjb z{1ur#OSm8;_S`zoe7k-#)mMRQtvT`eQbsx#A*ZMN9g+En1n$nsg-99Nc-G?DcJ?Uz zKpo?BbVavLW@*7BF`wwqP3_|He17 zC7s3R{zGB^hJWcG*baj8ox_#Fxyq1-W-nSQph3?z{t&Pd>30Xr%r#TG-M?B%9cEPC zbQg$MRWw$)YyQG?>+OhS3fsr73?1u^2`=Z#)Mv>BqJeSvL&5tKw#%DwYq?;Zd# zv~0tzdirt(!zSgkr8+ckrAvcq9)r{^+`jjEmPy&$mx`drNxXl~w=f=CJRQwf@;0tN z7-U5?8S-NeWvJb&>yOWPP=ol0?t6i>8}a%c6S1hDGREg;!aI$tIg!;C$DydMaiXQ5 z4%)bFZJUL$P;&cZ*u`E43_DDOAScB$svgr%iR$h_^t9kor?sV$*z~8==y{*Hl3?48 z4wp<7)pb505~t&65O|M6?Hcj`LRgCsyBtJ>^YXNC_Q+wS1Zn8HCr(2UP=^C08Mn6m z6~x#^0y)nDq4+O-Z!Oqt!%|lVZ6v?Gk5+&l_YV%ApCxrCaj5-$)`Wj4#m)qY;g%Od z1mCPfRZ7!^KHo#uCo8nsR9a_E@-6=f+SAeERU<}CPSPB24cP;HQx!YYPB+9|pttur zBL(VV1fiqRgp1#xC6@g+`SH+-aCALi&m>1IYph0cCA6fxJFoGX+1P}Iw)7By)<#dk z9WInMLL-$rUcyA$Hez{R+C=NQzOu|D5vTE5lbvpv5uY>rb>f#M z^8$nP(NJOMCW=4e&ZLQesl-VP`(+_P*wbymhKz{<76rXxiaDx%BfG?$|vGl7teo3`!(N1sdx*wD_u`&;odRVFbVtal9 zf8CuFq-hf5A#O&DJXNc*Y`v!h)TZ$<$-h9+UFI5AcrMnolTa~MatYtNVs1-)Q{GY3 zOZx~Y-3e#1;j$#(Ul^kn`T@fh=HRUagO*kHar~&DK^$sh$)$p_#c*+jVJ`H5RCqT<4s4@A^d7{< zLSQ4em6w9-(kAZCaKO?BtwLQG+0iw-S75g5x+QT54`x5Ev&Pbk9ovO?9xuq;$Qf~s z8|sObq6yWHE=$Q zgV|V9e%1S;)qbe2s=Jes<{n$uOY_06r3PBR+*gT$9Bmf*;skadf1oUCk`AMFD#-D# z(o8*p!A(Odni4f4=bM1KpzXs(7TJ`e`c0KpWRpaoIUZp)y?O!AgCt$DIK9f}E<=}` zf^q+%-zB2!)Dd6Bn&r2s%6^7TzAiLDN$bF+<#~t-oEQ=+ zGwt6&23ibz#@FefC1r!&1`J7Vgjt+|G4b7TZ_OtUo_^DNoZPi>=F)o-aC3?R=cGjH za3p(WOF#+wES?SeOt{88Fs$^c>Soe;RY_I+cC7PG!%+{=)fQ<*4Us#+L3yXgj=M#N z7Nm4y9nC!>=$q9Jx`AF9{6PPyZo3xldEQsm{^%$+gkP4$8He0tzC^ga{uwHVw*3TR zXV|r{S=_KB*dlg*;Mme=VBF_zQ0Yj}-#FXgaT~qwv31A~gqy}S!1K-LCc)46;^p~f z$07doCddz7BSJ3Y?=DyzZ+V}tn{jcleqd|-%#chUVPig7ZqZv%2~-x-sN|LvCHd6X z3+BHHT)ckSOk9JlWf(r?NHmJT6tY5B)Msw+=QLpc_y=r(gBBuYM*B zsxYLJl}q}z31C|6AoI_nHpnrk{9-Qx@ZIpNpr98yVbl!`4fD+}%x4P{4|bZIL%NfA zt?HMfLm%>nFw5XTY@g{=zvhI1r>svQzl9Npe5Ye%Sj~JX`a)KB^ltz<*+_z^s`zMJ z*xGtCA>U8&OTXZHkqgY)Vrl9>zz)^p!Ylh2#%UW^-Wo!|gDik;j}^mq$RPmo#J1fi z=kL5+idK{|3$V6|^ZWG)=N7NkXau#(_-s{v&?kJdBnmruqx#+V9SQ$Xu4#6+4Bp*0 z{g8wVQ7$zm`cb}>Rt7+|%vl?LO0>EEA6l0CQ5vF64qRP}hW9IE7RpV*sTG8R?`%zN zja!rhc!Gt?Pw`w&?X>x8TiI@o|$gj641dO`sgrd@E1 z6Hc)3kwh^POzkk$o`M+66Se}5HRf8No^i;01{de@Njk8BxJc|H7%%`c9P%Rmj}3Qf-- z2Z#Np%`U=Sy;^Xijl_RtRZMI*+-S0eHA@lCsfF{5s{k6@qE7U0d^7K!9SmY4mb{8T zRa?M8>9X+o?NZ1j=KIewM8v9R2l?Q({SP$`T%k6u`Y)M5}KU2}_%b6_N_y%=al-sgv*L;a$Opre=P5DVEL#21hqEW}j*gHu%l+Ef}wVqRtppO#fkWa{wYJ-0d$a?ahdXP048#+G^;9y;&utgIIy*{Tz68r z-BBj)D-(lb0~v&}538mf@SmD)7FMCAUigGxjL}V)#zXsh3r$ggyXVe*I+qIBUZJmZ z-PuN*eP=j!Yh{QtRsfnn&2azXBMlEG5=Y*3M^CK2Ji1GHK8^49C`PF8(5DD~=bz8N;;A%t6 zwXWmGZ2q1e!;20v_R5b8ii_)ZYB&_^WZ)^gb2)d=J;k*shZ9u3?b-5YMAL4D7U4;T zesNHKaqWv}kkq;H64Z6}5bEaU`1Kt9npET{eKQn*AF zhbzTZ)5U#WGVnBvx6XV10@Dq2IOr3M;Se7=1sK>)9!2p-xKwTl;*?0h`?IW|HLlqx zF&x9VGyEGdy$$<7Xy70iAs-lp#2`@aOBvn1<+2^9}$)Jh+ER0p1Co z5!Q8g_=c6x=sua_WVF;uVKULtD-Q1^mBd|@T|P;cKnk;m<+ebT`-siH!5%}#I|F$! zMirTFZS;D071WCz5`UPt+H@#Px#$Yk6_pK5ASvUO-)jstQdo|K>B{KjC{Wuvx`Tzn zv3i(DZq~nQB^;N+?=MZGPqe$xab7k>b+#S_vj=_LmZo%$b)EI-n>w4T{Pg^^q+`3T zi_uv8E_k5$`m&XP^ zMi3&d`!*$#`0o3m8p{i~JJHb!B6_FE6jm~{&L5|pJFFcLGp@%Tl0lW$Q{8JnGqDNF z)9kvt7`vuc&V*ez?yL53|GVXGFnllrsuMJOH2PjWTKntZRanBx&k#-H3K+ zIxit~$dkBG)XOnibcNp3V14`hR7-Gf$2ldxMXKlJey@O8MCG!TCc>v~1`i}&X4?lk zSMsxk_r`5hFB`hYB0$JWQjl<-PJ%akPlYY|m`i)B(5${JYez!9)#$Mr5}I}0qVrdC zs>bJ1CE@PKj-Ry;pT)L{>A-tawd_(C2txak`DIUqG{6Jm5Hyu-B}E*iwATAuMf)p# z{2uTu$yOXtmnkm%r&mF%Lh(H#o$v`qIZDUX#0fjfq|hD8sgZOG9R#n{7t`~@#hf_w zl;sEUCwKYW7iCz2r^%!NbW zzP2hEdY=slmON|ivt5V}xn{(^(TEbg&A73cy&@khpo}+ugfGjV$v*ed+Vl5ouTv{c zehNDpIrUrL}FBn!fkYOH7YVMeSgb82P>?Q z?U%lOZ2t!Ra?7we4a#uqzNO|8R&$p|S|-O_X-@LwBw&S-s|j1H&n5>;B{E!$G0Gb$ z$|oga@(Ci0>dzdCrcZbcyQixu%nU&XI^Q>Vn8zGLjifM!9ZCmf%+YUsS||GgLOG|H za*WyTQIFEo@BWf4cHWT` z0b+Td2tsrxJQ1rN@8Wk>5f}f`mPhli7U*2O}h2-ax~7U(;(NwCkWw*4aj_P`vCDo^6^Z(HBhm3Kj_L z)|31cZ5z17T;(_D;h@XhFP0|{mV!e+&-4XplKYpSz^;B_rC^f60Q9bl92Gr1{VdKY zuVq0;-+umrSF;Mhf8^F;b27bgr~2UmJJf9;vc6eF81K7zn z8=#9AZ=8m>ES8AXIw--qx(+HQ-LTdxOzTQO#ytyZ_F-EX=lkQY??)X;JtW2N= z2V*eP&ZaXS=3Dpn(Qn2wVu|bzkXmTIp0o)w`J~b4eZmc63TFA*bBCKzBXy{gD1>?w z+>|k1r####WI2l4`pWeprBB1c6KzP7%o{ajl}`RvzKFhe{`!?ZhT24C^ZPHSNj!Ml zUy-B&cKxc&XDoawqgBqfmELHv{M@AH`5a@=66o_H4dGdv3^9h>V1Cs-dINB{Ez(J- z;T!lo*NbqIk=j@ZPx(q7_L>fmlRhL}xZ{BP{9EPv1}^kn9-lv0^?rC-rPdYwp_cSv z*~w#P^ofetNEYiC(;q#HCP)n*hT$2*O$CPwAc^0w;XU3ms)l$mGVMvMm<)w*mJv}| zO-W(_QP6LKgFRG;>#3ac(DQ1~4$$SSE_ZC~<-$WW!k?XGWuVl2^^sy@c&E5eRSAz#p zlm}Y*09~cfn0;bAHtADV7wIi@{#pjt3jrD^4Q-z&hq7va8rI-UGiK9py|Hx_7v_EP zLGm3RZren&8Bs5)&~`#V{3?IKx0@ji@jlc)wMx=7=u|g9+dd@rezx%)FGXG^VQr4w z-XcVmlNgYbIJZXLQ&ksqeudL+Fr;CHtKv43+IO#Mi=p!$_d~7!WNS*ix;Iezp)$~{ zSQ|8EF&P$YnUrWj|EUH%v{~oHtu7RT^^$l*ivh)*SnL%($syE^LRA{&{Grp+O7PYs zp3YGzO>)HBG#(CZ(e7UzvRrJWSuv)*ViUQa4^Z}O`nr!l@tG9_oIIgArE%sp9Y2|3 zLow?zoy%9ifC(MaZEw7KBhn}HgmTl@?Hf~U(hqp_-AZZyd}svW^&@wDN=aiZIdUt_ zoY!$hddVe;bTc1sQ>fz{&hZ6_W$s8mO0Yu1!@Ta$G@Y{m!~P0`cWzZ< z6?~y_Tcy71^7+qC9bQVnYPR8(7pJ_YYI{P=dSqg@a?Xy!6*0Y|dA<`ydwQjw8^&cM zEMFTLgyU5=*2!f*Juv-iF7oJoRdK&;&o{n*vH0fd%-;rR-e;*T%Ya=i;%jo`N**t+>@x6TucJQ*r*)wX`1)*=t`c7J zZeBkU1jC%t#iB`|(O%wa&$d#ll8bMzCiggI7#FZiKN7+H^BTW%SLT_Aecec#5h{Pi zc^xKkpAj22Ay#x-{$&jx>16tCbGETnxem>nIxqSDt7PV1Q=k?k@!0Gp6qKmD{u0as zm)oMd;S_RH$?&hxUn1+S9j+ghrbf4^LFUztx_4a+k-6xo^=ho3Hj1?GV_0b(gk!Jw zYJbn;jbl9#>bia5fV#SEnXhBqX+Eaqi7!S&eeOnYsH-Q2tdZy2xKbkiF5iD;oTENV zB&d8HpaL1LM#|()#9sUTRuYEy{#u}md(8&$0i4bgnMlz;y_$T#KZ^L0$*f`gQT6nl zfXRM-PHbO7Sm{|U?KtJ%rNhRa_EjmuX~C;)X8HR=k*D)v_fRFlNM#CN&pLIdkiF4A zJo9y8CQU&0^+ln~uOS5aPZfjT=S_l?(I9*RwWazy5cP_4K(8k62US~HcJ<5ALY}wT zA%7DY=zMMsaK(v_`H5Z`6_!`c;MI5V6%Xb7-0a~(l^&{BQHZcqmbS(l3ISoI*b!re z7@0VB?Ps#_#c_wkouyx{b~ga{0bTYwTmISRcbcnH-$KGkVmul*ugRF-`!Vt6%EyPJ z`uY#~H1#cq)2f;q8YPXnH5g!k6zP*rs9OG5_t~eOhD5{aKA;~GbA=V@o5J_JS-H75 zwikD8{sAw)@rx&egp8J$J~jGkSE8b?G4#cd=V;pRy|*Qu0AgyUiHQ<$aN9$R(XXle z{%qn9J^Kch=Vl4=0byLp(pnx}gCk`o6D+oInxRR&U*BX3rAo0$#Ju{u?QXuLGaryP z)4HxJ;S&32fGob(K%d;BO@VQoDo+V?j&^0xvzpFKI?sMg0=f{m!8=&etzrjF2Hf(L zg`%>g*U23viZr#raf)$5Z5yM7);8OdI>Ja!qqwV0N9>!{LJc)lTfe=p^Jj0;Io|*h z-l8}fk6&cVCo{m!`cQYuEoa_OR@rh)f#IR$=IV%J!qLv|SSypRK{XIj*+b=Go5Eqr zZexOwNWbxYJ>Rw59EMFN6{s=;CaNEsGOqef*hN^%oR;m1rvtGeZmCeq(VLs0xXfXVyYNY+>$nz5$sObeTILt42|bhMqfp*_&RL)+kvd zSHLyc4Nv@&yKc}-eE`{ZYcAllgG=9octq>nqy8SG|FR8v+}EevGuHDw8@u|qFguJN z*rNq%p7OX+5pqNi^)Uaf!+g#Yy9hwwjyerQ6 zw<-j+3qo%F|3BqA@hygHxS^D{y*S|OD+Dp$a7KJN3_mHg%bu5!`jMOU@NLDAq!|~4 z+!Sk`RelSMc84(g&(8d>5x%B-e0?lZ#XPL6P4KcAGgLHv3J`DQFACj7^6d_-qvM2L)7s)q9#3AORa0?R(t z|M53;h+Ei#U-G`d+192}cf{*dq(6jl&EEd+mHhkcE=iXs#SEh|pETp|$4^7PR0a$* z{i~OM=kvcGd_5nhDdc)?kYJP(ur2R{swS-dfAfF!`<)@c;1c&T`K~Pee^>eVZFzl( zf%%i*psM>{Wk@0@t^?QgqKx(b_zvG2^b#GKzIp;KSLFcyGj0E_0UZx87}NIf$-l$> ze;#hW{Sa1qqrndOKi?9B4PeO-+KT^Un*aHZ+cCf}Z+a|@|IfGV{e5vzh+F=36#iKJ ztNFOk1Pt@`!S~Xs|L5iY{x|U+fRTB;hyNIts~Nt63dBtBfMJrZeKM~4UvJ6%1eht% zS?>Ru>UU|tFj)uiJ8b^fTbclSsI>Q3>3>aingreemwFmm*v$WW%gzu9CD6vOLSXYb zk;r2prWURuiux?|)oP60ewitO%PSz{avL|1*D5Y@oke)3-b>oSx0|hC5L?gdQ2u1U zxan(a0Oi}ui}SC}o9e!+!iL|ExIw*)Wi2?*zUnv4mUS^u;&%7Jnk2UCE*Gk60fwUI z6IlwiM!z4DShL0M5@Jkb+msGl9M<2)&jns+#a(;WyaqrtDts9re2N*z6COIx!PwVx#& zI*yl(PsSbt)QB32hm(90YTvHgF-!y`W7dxyYE^Owx!rd6BA4_1=55<4TrO@(nJ#>P z#gzj;9QDeB8Y2_UQsZfT&a@u#+B^k9xr_lak4(z;pm>z60dp)p@z3Ic7+iOHAWej8 zj^5gs1sW;n02J2(@{BG!TX1RVyiN~#h1*Z2)4|iLC!XF&%Bv$QVffp|5f|&r@;otX zc9}XAHJ_=?f{p(8`o`ynK%0lt9Jd{gqN-p;az45E!&%QwUBua`GobTk(?}aCnaY%L z1!A5>fKj#{{FY8ji(!wP5R%~Bp<+nHkQa)nVBPbE*%>SG)o6%)Qa_VE#5VG z&%63_sJ%6iOfAkCt6<;3IbOXT!diy?tTLTbJc*8J|9O=qZl#Mc~q7d?l)lfj?8N&cDOhQrbP?i{^mh^>aB z_D+BRT7+Uc8EK>Xp~CJK7cw{0g0AiC=Q;7Zv;Q$F@7#XdwSB8>S@u0FfnMeSmCr2k zt2sT}YS(N;|p!%U=}=auL{ObV!AQ>E!|-vx7um#lP?I&I16k% z76`J`Dow|k%J}@S?%$Ze~5B+$A*Gj>J z!(dQH7ofFZavK852O``Y>b5GQ!KH00zI^~)PBu%q_s)GK@iGoT$T;1fNQ-pq`pYk~ z3~ZU5Hw*&?vy*L%PD`b_(F0*KN9X7%pgQ>m6JP~`LufDk=jDUlw1533V02w)xYiw> z9_&Obibd}b@PltRnd~WBOq6}51HPM= zU3N(ZvrIygfE8oaP9`J%tr&gC2cA#Y0`c{va$Y~jv}{^!@)iCTfOcJAcAiyOj@)it zjWynR#P%tLds)%fKi_(4bfsRXM?u({ah#D&$$TK~!25lJrSoI1Z?-e8cAZ~6pd=3w z4$H~5jjSY$3Q<6|Y!_Dt6?4<2m{Q~MFRL9f{TjR)HQP)t?GA;_hC}GdcFU1h9eV34CQjRJ&SyLL zIk`{zP)^@cTor7f#DnVkeYkcOQ`CRq>GeW3-a21FZX#nDjjBOKKC zkY~Rv6Fh6JvOWQaOg9EAB}m}dhI?!hKq?r73RYD=Stp~coc7(-&?N3-J103 zT^Xl?VmuEwM#?Tu$7RF+3r&|v0T(mUt{1`zw1MDI6yHx7Lx`fsU-b)NUiiQs-`|+p z%2tGJ_bhaHB^ZMX>>sX!5IIm6hqr_zXRn3NLmZcZS^u(tJ|WY0KarQVVI$LbDstMa z)ROZm3z<0*^YZA*RYII{+<2#3AG?don4DGLpHfJsS7DbAagXE8Jfri~Y;+V1OF8lr(S{zTKvK)tnHf!+kJ zq4>x>k`VZ&HO2XKutUe?3{ud&o3%%Uqki|xSOn4m^uk2ZO#6%J zmCuy3hUNpvaKL3VR%DQ}T--^Lf_#TfTa||P z$KyZsK{P-@OY7PQa1l={DQ4z z!KIaezoVWi-FTwWX8c`U#flnW9oRuyh z9Rq%Fd0+Z-r5+U3A)iVG>fK#bb?sAKalVbwS=+~zjVf+fd5bvNL<=im;v?s2f zaL@^E4M#r_Y%}25Se2hb9O%g()^%(d^L5~Sn_V5#6`zJkqt|DKFd~eYMSpJN|LPqb zbjyy}^s$gy`S$lm@h9SzocXuIK7#s^)H)}=KeF$l*Ju}ja(2#k_kJA|jp7_K+wC$- z;xFw}s8h7l`zCb_11A`-Y_g@5E=@dC6hEgjqxIQqTKm;wbQ(zU60%*I+;n#LtzE6b z$~#TB^Ux~K9P;B|XV04-xwVmwbqlWQJ+=F?VC`XZN#}?VCCYs(cQ#?B-!9PBY^}W? z_gqJo!0rMNiAp6Es&nxntDr_RfDp#m7~x$v*4yX+H8S|D;M&3tvC(-#wq6t=}W-mnYC9^juaO!*BGTj#xiZu59E-dLKULd0?;GTI4!b zvBmb-8K$07J2^MR)wbtc^$Uc>Y#j|#jH zh{v}jHq(1xIh<>Fe_t!Ir7V8|6cEy{+2X(j&Mo30CA=y}GUJ;LYR ze7H3*9$IFEK{yodr?5L zN2hi?4N~qci`RQSioRwnP`}-<>T-!o7Kcm$d5fegm@!g$A3s9T!tayzFq8(40Y`~( zHPtoJY$KF^E)tFq69oyr20IPP&gPup$Ar84%wTj_+{o*@pg2(f|Vw780ad*Y{k^4!pmosjm2Rm~J{Um>S zDWDMf>eDzvqpl5{_MgP}=^U5t1$KYSNnJv)wKlkx+Ft;Yag!ar@1l#1C;M6P`iH$O zrFLV`C-w9{ENNxy6-3sZID#?u!<~T*j@73dgPUQrm8Wsv@!180ZMx|1!yw!A?i=5J zrV^aZH!WvV8pTB#F&{((v+r-%H;G=1pU-j|7dM{}9kEi$xkK{;QEY5$lysY=2|I=caV7HIGfA`Zb@oZWwt5Bby%X7E0G~=bE=b9j8=~_`EY40e;@Xnj%n0wn_Hq(j(5Y>&cv2G)m?P(1xY8y+*`db_NcvvP- z_MYM?C$N35oN;DG7#(AZU5FVE^peAWu4+-)BKB=RW9#j>r04 z;5>S;Vf>%mB4oxE2=@ayyT`t{@NZq1v?|_(8r_wd_(YbvDOeD3xjd$S@8mk>SBl4m z!G!BvLp{Y^4PD;n?aVkWQiOudFt5GE@I9V>Ph$pqjCHG54te)N+A$(A=j3r`>s~e_ zXaSdeR>UJ?Vn-e;ZgqTNRC@;nKbrQ+UpIp#(AsF83V9woH}?7yiLK<+US!c+)dJYU zMpM3MCel1hBfv*{9rbWDeF|FE6^!eqaT%B70+&Zq+D@zWi)}jDW-k$p=|2X1 z_rHT?YQ4~U$e_S>NoASXZrAuLwQbhq;jTaCOOGrxnJM`>v%1e|ImN|b&v`E!7c$=r zcZq{q=*PjOfvuov%y|;n3#rZjYgG6wW3#x9yPY}U%U0!7`*d*5DYxs4a}&&k=j>pQ$lqexn~%onlEmHG5pGKxs&QmQXazj~P8*#Q!_a*Jv>afbnxo``%JMiK zs>Qbnv>AW`sVjL2la0NCReF_oRKEjG&*{-tn&c%arKNJ{7G^}(X67g})RB)(wcYuB zqo;c>TOVepuGRHRI;=WOQ$0j{oUU2&lh+~fO@X$OXV<8_ZfSnE@$q3CB*>RNaPK+L z>pOH+92Ydruh+#?IxLgM-vV`~I-r7PL%P8e+0<9DBi{}A-_~pQw3Px7`2IC|3j4B_nEn=sgS`LmxQ2THtFE-@3tNW(ggf5)&prb+5Za3A}{ z#gr?op;vmP)JHdfiEnOplr8^92wpGS`!bUt@hAA=A@d2}Bbmy%liiK^?t*Md$;dEa z&$Mx6Yf64LQP#R_Es+`G5u+bFN{y9K++l+8n5FIrw1jg)tpl^5q7by5T7yWO&G(vH z0Zoc2_nC(3G!)%cQx7&ubnJHL7yF3o2<>??OPXQ=E(z+|*rc?#tW9Xz-eh>h-Xo%_ zRy2nnU-ubS3UrrAc(=c@%%O37e)NXhK;lwDN#LzNl}0trv2QT#n9l|2=7(JCEe!f+ zLbxbnN*x&$-?k#Zr&6T`?ZKXu=$hEenMfHLhMfe0VS8GilM((4Hksjx>u~Q3V0GF= zZI7P3Jq0oa@b7N|8e4s_J^}x&-byJVFfTa@VfPeSw<0+>M@7=$a&6 zJPU@_q{nI|eO<8KSFdq;(Gf=dVq~(M9p+Wli@c06-`8WaP2qK{>`mp70VR>!8n&W@ z2-@1{eZxxi+V}3?cXA=$_#WpwT&O*!(I)-#A%lEU!JBX@;LD>C9JbnK`8bxwMr053og9^Vu39Cvm&^eH}~V${%chH8kS7hPXaCs%r# zer>6coY3aatR#}=G3!5|id4xyGu12G^)0?_W}rDf;NFwl|s|KFgSC8oJw#I0Rrz%2l#XN31gk?biw2D@O9vSmnD1 zP4^&kC$S1QFiYz^7Jw|f!wWY4Yx~aYI)$NFd#E=|(*u@CB)p>|#Hxic?l>-zLwDTM zwFyOV-iV7e!ZYQ2k%7?8K4OXcxQeP{q+C^`{2Ts!`i=q!R^;E=U@mtbki_cKFLss9 z1B& zR+7;DIWgn?#HsoDVwJhDCJn^%>-=Y8PM)tynSDkt7nsIoe>2J7f`9kio^cY+g zW`>FA(0DKHrEU1J`M26nSMU68x<>+5C&V=pO7A~imSxgC&&Wu!iG($sl|6FUuvm3- zFF$Yg4|l_*Qc3vvSssNsn(KDdJ9&GLWWO-0lv?Pc@Mps!+10`G*a*E6gGU)3?rvco zlO1_Vn7s&YW;aKtvW)j!l;5$hY>Y44`)8h=&ao91Oy;}{t=Y@BL(yBZ@rHq246MZk znuRjE>Uti+1KQHwzK`XPzZ1QoM;y!l=wdr83Ds4hTKBI5o(PZ z$4VhtnzuL_4)X2@UCjBND^1^3>mSK4@D%FDMaHW}UbjsW*;rd#zTD=tEqo4dMShr2 z8K1%~4R|U3LybYZ)P@C?*Fegu-u7)i6edIEp0ablb!FomV@u5#*q4eBSQG)S+zFg4 z$nM8Ww_u(qvzwY19$OWc`=BnvH-!-WIQCbp`U-V=p-7mOYb&P447NM_(qd6ke}J*x zz;JDVl^%-mNj%hD=)Ye+^`h2(QQG@wp4#3~kiOO@lI_EAhrDg)uGO7TrqyUU(8vY~ zg4Oer^@)iG6p8p$=Oj3^U(;C+<%_OjlCc(7!7*n9CPN9~e^plkFcJ~A*k!zh^mG0B zX&A?Likwaghc6;~KZYEDXoQ=rCJ_5&Yd&tIK9(kXFFR=o}pULW}hL_D45_^e7x8Bp}^&qgN7d0Fmj>{k{0yMM{%F}V?wN3%ZXVIZ!q(K8$D?l z)k(pG_NG^pGuUCS*w&T@k{4YL9#OUgT!e-oRI`8BfvH1e^OORxkS}WH{oqT05sjtw zKGqV$An|Q2SSK+{Lx?_a8L6f8z-@(ZCo6qD#r%9z+veyne0;RBDdysKbn%yE01Pf; zj->gDw4xd0J$c|D#NH5TV&{R-1Ps4-+sagIGZDM~-f^skUi^_b`Jz|+FY9L4WU`Dk zZnPXYa2?bwv9+>Mhl?JfpfqnH|BufgVG3{;1!bKw=104&L?|lbyXWgU`oE}*Eg$El zbTfY0T9yueaFc_hmF+Q7HrCSI+WWHs>>-u)WMzMyXdbePeYg+0TrS$O6dFJOl4Ht7L78f4k zcn)N2w8oK6Do?yw40N*PA-UI)CMiS7IKjcalmW2l;HIOaWbDXF8U`O<2P4fi_c)w zM!`0-+6tFv`yBgmi*rj4_GO|0g=GSdNFukLa9p>mB6|mpO;f_(LKbwsUl^LRVo2ph zKA}q#sXb0o3rjYo{o-9C2YP` z9F3@MN^a{7rz)15ldkAX)YeW}PnfSlX;c(3g)o{&pHSiZm)k&D+5LF?#sgIT3PI$; z(GLLXJWU{dP$^PWjr86yN2k3lry6eDd7o5eul{Lw)3yncQWYd2@ryZzq2} z2gd3)Wz8p*J=S%hBXV(^J5c0>o`~1Gv?l~I&Mp{awbK=#gZwtXi11r4TthD#LJ&i; z91Wmg5HFP3U#=12AjoI;NNv<>8}0A@ALREtLMGBQTJa@Aej}JA9_f_K&jE|Y#*Ns* z_S>HGk>{0dVwVRolz5ZDU6TT|rKj0_&QGd9=j686>X)Kemwj4oBABK& z7q>+srz3$2*14>qv$&OBaoOB@!?b({t<7D*##W@oGuc3%4dYmzsha-2*t`T;Or5?JGBV5cyE?|wv<8u0j$T{)>>E+veuK{?fGzT ze!4vh%-`&5-&e8RlSm}C_JM_qazGTpbRyRj?{MDm^XnVq%je!FrWvrJwI9vCm=pO~ z?q8Sn%kAEhS~q%PO3Ux^C{rd?34?e5V>*p!`%P0g6Cq+YZOOL}X;%kI7X$3MZLXtr z&vr)QF>S53wIc#>AN2l&oJlub^iHZy7(p}Bh8JN6eM+&NM1bHqyep;hKhzqoZ{D74 zm4{CLZ9&=k3S@~C5$b55_Be{QzHhX&3NOwn^u)ZN3U>QKIducHYdu*Wm26wvGNnmX zD?{s;aYt<34gRqpaJX1s(fe$VY{xo6Gs^D+pUa#V*=79#XZ1mumC44ih2BKB8tXC; zLRc9yOIjPX=9&8eb)8`aSjT9fZbJe+|FhmHO2*_WRc|I(B0sr0NMJw2r)No=p{II} zWGT@M7QfSUsT3x-^4emqvG62aNFNYkLns6{D4K#hs4>OOiMEXrymIr_;riZ(p{|#y z_A4jc1NBfi7k>UW9^$ewthC!I9Q%RdLu;TeBO}ZqfFHhpflCutv!XX^g_!AGEhy?u6-799 zu7?a{8`o?W-$j@H6!+OwH#hr_0a!TOnOy}x67?88*=-(rL+^;y95~u< zTDx`^uV}$Hi-eOCCqny(H}_CLEan8BzE@*Dl6*=!+GgIMiN+zh3qBf@1nwrTQOmy?d+`VSK>Q42sk*?66sn}&G$ zF?ZG2BZ1V@hf4%PhXk6ojfXZN0(`6;#KK^a6iaf>v?d*w^}G8TC&?me0yy8l_wnm; z5K!{%<}XOR%OSsA%H_!?6`2Ti?P?=)gO3;-t(&Fp6IO-VBwKH6G5Vuk_T1HN055R? zISG`-TpV};QyBZ(jE_NqH*X@NT@HQ*;;JP!l^5f)x#G*d^^0GqarhK7Hze?R7mHl> zyWKSRWA6tF2%4D&ttSG-;HFq&9t|LKHADn`{F-Pg`U&g-l}$6Mowc}n{8?B`2hp0g z%o##}<1KXuU_QPidY!yYVULT_bL=A>nRQR7n-ynUuE2K(Ry&;{q-HiXuL@N2RSVX9>Cj?$E2LZO(c19Tv8v{1Y28UGv|BY{}! zCUL6u_C5ircEWP6l*%+K@)!tJpmBfDD()hGC=UW511;sm*iC1aE}uls&VxF}cw>L2 zqZ-T#!eWTUrS8+y_<)hWVDYA0n)LOfhWAdob6d_s-WqPvQ%R6_ z8CGghfT^DT>ZY-4Zsh(IcloA=;C;2)jlgk|@Hq}qh)Z~W@cVc9x$3G)itKALq$!`O z=J<371(%v7v=e4r_g<~`jA-buG#GNdzGZ7FWj)j zTT$soiXM@eT@JXeZ`Ai!0)^18ILh<$$Qz%^-X(-W#PDZcTRxuIp(pP@JZk?PgXy4w zsP#n3av`mX-8S#EEiMK0UWRzRCoFcLA$jGS8u21+1_qLFY&}%^?1C0E;doV z*Df&In&lP0M9*8t+dpUp$^Y8a*L0NFqdYxBgR@#fksGzn%RI^v0c?awJm2c-YRz|H zB4I$;?Ppq@?Qd`=62u%!HKy}SThlfQ;;7w+_hx$;d)I2vldQy(?fOvh1<~jpQYek}6A85uYW8&m+*!*f9!$ZP5fn!eRXlq7XUp^9*8}zBy4AuyH$EVN2 z)ru-^y5K&Vzww()tpamt6IL^=-fBOk(5$cwby^#k#iYFYDgO$GM-$*Dmc3SiQ!>VO z&+3Wu=IEQYmlaVXR(^@4N=@&1zn=SK2Y zkRWey^*}TBc;yW!(0&`}vfSIK3qOxra4R+IOuGRgYT*hHtqHhOqk}_m8t=3|$ZT_s zR;}Nb&FBX0S2Sg1FcY+vJjEKw*&;pkrx|-f3pM0p?ajbXayIEPe<*ImsCc2W?K6Mx zSrm{xerzE>FM~TPaylqm3{l{Uu{A}%cf_Kaf}0@@gX14Iyuf#pZ~_YD3bSPHxOL83 z!)m-28RJYhC~a65MeqyzoEqZ~s2ookd@)(pb(NE0#?2XLjuE&9m(YJtAZ~6j)6e6jfH`1N;V_B5!P)rgNK7^|82$A|N}nVbC^tq$y08S{T-Sil35 z$&G11qPU^9L}NTof{k&kPS=8i(v!2K9@rA=Gn2JUt`UC>YEFJ8-f!1*=4bBK<@m!9{2g~RYBUnbMOHh1-%xpyn-P$ zy8J(;=%3$ncmW{i6oV|KD=ht2Z}IN~kN^wl${O__p|6s${@+icWBjc%!Bc#Q>WrO!{f)F1yLCvtSS4-yJHeG3&a0@N`oXsM%30hoP3e*6Rwr5#yZs z>VLjJ{g!jqWc>%SSlkO$GqV5CF8>bLgybJJjTl9}VI_La+JH*{nN9OuI&)Je_98Tq zah60_JdmPHC-4IDzFpii3l?GMI2nU~%ibVqGv2M98__aD3HTgFG#e6eXGpBS7c@lL zLzlNM*ewwfC_WE|fK&1B9dC__Ut4ionf=yv`WX-awAVW;qkJDj)9ME#pW^0kzgRWUtUhj3_V? zAfM$-5_Fw{EVmQ%y*RcnGxL@+I|4FgIF{XQLRT0w;qJf}QBY68G7X;Rp|2kv6{Hg3 z9$oZ{fI=ucZI##S_Ud%7c74P7%xsLY21I=azKqgqZjez@+Lg2i{Jz+%7hr%kZ8skQ znr*D!UecZ)+zX7zzGlSm+*QX|j!d)V=;X&a;|Sdx!;a@C2wIO>q9afM$3be7sT`1E%&B^#aMRmsMVulDsb03 zPt%YF_CCV{?9V;D%Vs{rmo=~1;K1Rz3Q+zr&dU(R+DF8c?Y z`p$<+XW|tl)kOlqb8K`skt4YrdO2a#vQJo0ZSqP`FTQ_uK!w@$AwpGDooK!O{N+6N z+xe0hTr;JgqUamA+QEH-)#^TQ>$Xq*$EzM%6azYrQCfLuErspI8D> zKrLNYZg9qV3K>yMx{duUo)AZpv+e~;|D(SC#v=y}A1SJF%FYm+gqs)Gy}dDc4vR$6 zpXfA7iYCLMVZK8>4b*hBny(q#B+$y@2!6YIyxK8!H}zdfP5|;DI?*wU{|(N3$4oc_PjEPz`idj^YH5GJ{EUnTXmTYUfPCRWa?D^5ljp&>*w|d~ zj7@j-5|)7Me2;%*&D5p~^3kTleFDPSqeKW1-~36yYFFeLq;`KfPm(=Ppt}i(4XXBJ zS9eBP@YuH~ZY1Gz4llbi`T@PBO}1|1)G64qa^zBO6rgb1Pu@hP+0u}=)68;1ZDB1!fxOp>P`e0D*-jKv;F9vAa_Hqo*T14b{J@H*>QFY(g!6Ec~kK>0prf;3?v~D%b z8VTa`?#!+l{EHXU+t*JcGW3pGtkNRz&dLGF*CdZ56nPx$hbM~P6&H{*gLSi#*4?^U zjl14{!=17*_t!;|497PQHZm~sCQfuei8h6{;irKZ`RqeJnIjAf5*nhy!6M_+dEejE zb{_-Z>V6n%Xb@Zp>gvjYDXu7GGWQu*MESC}V23zn#|7!>sun)M!>BnJFUg+J*of)P zk$|s`7^}{U_Lnobkb6jm3HZEDj$Ss6yBz}&x@hqS;t~0WrCagSU2}|sL1NR5JA0fU zOFAeF-ZuQFSLsLRGAy!V=bKS~R*3!DdVf5?j~FvxrK7dad2npdN%yQ|K2;#Z75fxL z5dhbYV2J7s2)jr{rOJs~aYJG6c`WVz=DXW_4GyjmyCeNUD5Zshenkz6FLmYT#1^f1~{#{yAA zh{(*^=BliBarc7U@$&hiIP*_f;5bTKz+X1{6EreJU!{D7AzS~jze?;d>&KIQ#JXDyr|MMkO^G%@af7F?5tAc+M0SBM-{(HL=3)v zvN@_DU){ZXd3m+&&F&qQmtk#{JK5cO5AH^9{&-+0x~%f;y&BMED#Th+>Rsv5U9n{y zVvCdA<%d{or?6>CzCB{s?D~zKDBUZepyQRcj%p2(pVNNV{*E*cJEWcfY6gLv-@WiJ9bo<&dE!FAD{o0*~e2GYJ5UGMF8FJ;4E} zTrPcWr0s2xUhBoiRx?+A&)0_-j8kO7;P0a(T8nO1Zu6{sqkEBJ&seHMj1sY3JCeB~ za2m=wGSNCmkVj#^jU3dfb>;g79NM0E@Os}jA(F64)CwDb-`&2%Q9?vw)aH9>(|pr# z=z9evlDvq`9Nypg+(z`O>Eq|mpV{6S*xU&Szv_?S>cRhVYiMSOR< zHeo2(T#WVAf~z?aRu}Kg-n#1bZeG$9SYO4*GKG_oQ3>(x1>I3CnzRGw@-rQFPR=P~ zI>?nncX%4{L2moBSFT{hFWBtYMXxB(>G<^HMIx5#kITbFn$|l`4S@%8rtB9YuuX0Y zNH_8K>hL`f6x{b5WMqwv#hT?^1$z@G;y9T-?-l*6`yi$ZffN^c$~i7=MoYJKihM;` z=EK&PWa@ZEa4<;5G6{@a;t>>lA5x2~(;AERMCpd7!e-bucTJ0cKt~6LPbI<_ayhJ> z=YB<8*7s6q!R4;{btzSl$gRxL5Kf^9M(q1# zwf-fOxG2RX+$L~nu@2sY0N=yHxboLem9=AlN#X~1KliIcj*bgf$#6n2T-1xB!baij zz2(*f>_HIxMkSr$*bGVkJ*LZXJpre(%l_c>dG7PeK|)mI`Raa|)4{4Y>er=^8J}ls z?Lta6`s{Xtc*4j&7FiOu?cX6T8?BXrg$=b7utbRM3BAnQOR&o^m&^j@Y9!nq6Lcx< zM9vFlU$nDzvN@rDn86%wSqfg+^8JTV`!m0k(uPLNk=0JuGY<(3jWtg{cvnsFF!oj0 zldw5P&pRt{v)3OAut;`Z7)0THj(bp@b)SR6gzZ_pi$e8Ge|d#@MB!IsYSA@UW7v`dYl|UU4?xu}ekDP{pDMil;47w8I7ilFf-Kv$%hRfGhD z8C=#j!Veih^%P@bqr3`S_4K?wk<9i-2=nhwn59+WYPkEn1INrBqzZ>&1(-ZoTx?*_ z3C=bv>ug3>x(C%GIr53`7-?!I`UR3C=r%gjL)n*1GV_3=)darVZQxaIL$f6{IEa# zjLE2^^lqTq<$s55K9y^dwke#&I!^NEoKM>Q_)~|Lm``1@9r4l8jp96MQ~7Mi_SC1W z0woGpwV`_V$u@+KgI*t|o(-e~`O*SG?1q5pw_yr$@*>t@{)cEa`R^-~h;1snfU4I4 zjh0oiwS^x8(+j;}D6EQ#l8wqtpi$A-nRz&~lyaQ%M^x7LOeJ)NL}pQ)*9cHSc5V2^Hfg+1Bt=;;O(m$sBr3E=rN~A6lfN zTW!b9=#SDYuAD+c`3yPYm~{K-c(FjMA(xjiH0!M|dUCEn_GzF&y?#g>jpiNLk#BB) zqF!ZAU{X6*Hf?Lvtn|1#!xORp*Dmnu;*HFw%-A>m4BFq(rYhLI@hwZET^` z72nhWZ^6bze27MNU1)MmIc>;4U~|2{J>kwKWrBglv9nOvfys2}TDggiY|Da67ccX; zIi;H5n4KdvYYTzzwl$W#b|7+JqK7>BSpC8Z)u`q6ls>I#d5O6=?%=2+wm0LAP|1@F zYck6z2By0*8EQ`Dov%MxDX*6s90QB=Tb1G&?nJLd>r5KW=lVg*W-c4m^vYF1bGVs- zO?Q|36}tY5*0UA5_c!J_$hw^dO=pV@7qPwXJ-e3NkDk(dppkE~E7Yoy&mSl&Z@PCt zo}*M!$=r`D6R>v?9zdIGYBp(PTGWz65F^!QXm5V~N?I5?h@$6lkGId4DPo{#9kJyL zi`-jt!0W)@#J}vdk0go0tq*b4`ShPIxc`}0(!e`LFqxjf^x0qj!OP+Pxc{cy*TwUc3mw!Tf^xMyDs2qgM`F zBz6bZ5HX|uJ<{bOI|e(4A2G(-^XE`dP&};@K;9HdEq53ms!d$%+Htq z1&i5Dr61bZ#d3ALVl#_wTcgf{#Uqq-0ky{Mf+x*yd6#^)!W07}^=0^MxK@RH7J2tZ zUXW3Wd>Dr_QRp3o$jfOUY0LYx11~&GyPU>vGUmk(DJcOOy`D4>hiZ8r@LZm}+-}}N z3TA6pvI&^ye2d(|S)B0^v-2a7J|vU*x9UwP(i=FIShkxPw=}$)hR`L;qiG2m8L^$jaho}14|I2JRwjnTnAH#;N7e7buf@JkGf`Z zS}l=ZlkyGDP(-}q62}9&YjPXP^w{957iku6g<*7|ZcUY~F6PqqhJUVKw@)Rz9qNrd zUz|xSUnlAe+1YuwJ8AjpG4z|O^3)3vG(!xpjMX%0U%-F$of*g-#|cQQ-e33~2B5at zwBEG|{G@|aNH`!PA;lSzgsNvI>A#rC%T_B;Rg#XQ%V!Qw)kFQ53L00S=>xJ{2fb2Z z-vDjhd$6THsrSqbilBPBVV0w^SK*1a;nC8a`lMAHz4qpCYa#v z1={ZY!D8cZDxGj)DiaPp_JhFT?0VDq&Qa^|^vz&VTMcwO18%M zN4NIyXr_>_@A3$`X$^G^WR3ck6-6X7rS@HZhwLI`W|-ad^nS59%IlJq;Xlf#KjA-0 zB&cPtdaA+d*D~lBJ&|O-q4+l~0EDI%`28`gK6vJ^>=_n|BNthu+bFyuZhUHCs38m) z@<0uYH1P!Tdxwk!eOFx`M-QdGSlWE?s5tuRzMpSYiqsL6))hqgrUQ8!Fm^^ubg@A(`KbjS=3v<{5A3&%<#M|^$8_zJgp5W&fOhuH52*$DQ3Y1^9X} zvwNkRGOfFKoc2mio*^aJw1)H(U5)jZ?#nSD;4l@5a7OGcsp9KT$z@uqnJYnfG;9&AR>-7l(@-fX$d8vs7Il(X+Ov?Gr0x zBrfN|ohw)1T@R)3us4dcxsEb;T=y#c;{fhl5`lUe$`bGe@Lrb!S~$nSF1%bshU!WQ zBj!-G+YVO{Kw*AqT-rRR1@G6krF!LF3E5{eFj!#96b%fI4#p6?>@jv~<`6-wc)=`H zmOl1;)&90F%8f=vXMP^?sfJ7Mh+M5uU08`gtf1}aVtS2&4x3pkSJ1Ya$8zZva9 zvJ?VWdFwXk+(02l=dpm4+4dCNy03h<7Bc*j%sLA@EuIM?b8K7t{(ai9%eh$^uXC(Q zoG>{MCmi+iIpWTy zvbnsG@w#*GD@S=reF%4$Dbero#2A{vOVhFPb608zQ>;7?rv(ql`$k1YUA|9X6WO8m zVAZ2Ki5TWK;~~wMRk2EsRS>|VxH)q_dqE)#x2YT{N zB8Ag7Ge#X&4e-{En#lLeEAg40ZE_6(4K{c*bCA>MY-{lBwHVCSSXoAJmuN(Be;(1w zE7u}yx9g9iFN~QsqxiZVu;ZeXb_0dju~pCW4_)rWQ%FJy}5fuHaqHLpi4HW8@Z0kniw;nid`A5*{nY{<*t#F5#qLA&)kl}6_;frRw ziWnLN?MYPp=A_cQrZDqAyO{h%uQP_)^R>OTWNAkg9;w-mN+6%BZzlwWFZ-j|nCrwk zVMb(RB!R9(bSSUKE2bo;kC_W^*p@G-E#^Rm2V_rAjIt2|&&}?@NK>o+*GdAAT0H0z zDl*@ZfHF1~WMpI~%#Nn8ZuYhzU-RMg$>HqCxeGSUgXc+O0Yq@a;%PC^&&~=*T2l3x zBg7zRXlOhWj4xkGEi=E)@uRBh0(=zS+ptqQDp-k@6qS{+1b^Wv6_K}xOV5wlS+A1c zkm_i3*qgwk{l+(nP^e20HNNi_;5^Ki5`tr#Ef#E~=TCW6kJ=T?XEi#Spz2~31a>CV zBz2?mH|&7gQ1=yPDAb)-k>Z?gYkgxV*+F#*1G9@5t#F`&N2n}SX5r7(;YlGm@BMHv zW*EOLo3b%8IqenhtM%sJ=Ar)%+af=}JV)_53fLlMG5AW1;{_@bz-A?N_La(Rz)n$i5cG9UP-P@X? zp%W8*bP@O{=NCT0!|8Vp#FQHzcBl94bwFU;;8!=h-Jpjj2$nm{+rN+-a#~2Kg;eU} z={Ek-iL5Gmy9nW7-((K9KibB`oe(sHFjpu5(7-=j8x_mIty&WTJYxkEaT5v~Q{({Z8x3n2B z%<4R?u^j#E12NwEXMNK4A(HVYCsB>8hoWsXM}(UfN~$IU^NyT>pinv)Do4(WM;}!6S>EF zUn>KuumHs+@D?1!UES*EW|T_0B(#y%E$X(GIP9KAXVGMp-gz`LL4LKF{I#YQ_@K6M z8@pI?>_cZsh@4Wz60zE+1|rPe8)@}hJj936LX=&@E4~Q&?)E8$4{ZZvE--xF*ICWvrw`8<%Qt#PdZS5 zp3i2VY=AW$*Y18if!NsVm?07lrM|7iT*`wOH}N`EVLBz@fHHClo-|6pi6ZTy|ASWj z`!LXat^P7PC;s;$r#1$SmZDeQ$6mBb6)w@sbSR0Z&dshff3~9dbJp|0_)O-0 zQp#+^;Nb2W8RPuC|Bx&*f7Lw+ghdPFFr;qHClWZWq76GlHs$-p%s!KhK7qA=TRI0SfUn_nat z!7+Ykk@Cd9yz1_@?pM3kKY^o1HtI7aX(WvTvFrJ$q-XkWc{edeXl9NTKu(SQ`;-1* z5(Nq6^u=xNIw{B;ep=8o7b5_x7OD1Hl5l1dK?9@fl#Zn#{ohaZhpZ!V0H!aFnOV2N zDGq3L(FdH-lza@b`e?zIAz1X(fLN?8)S`?)S$;V`(;yFvbOztY6NUF@a-18{!t=j#2p|M>N5ILymXZX-jg|DHa- zK2gYxM#zw%S_)ctu>9BZb7PKb@6-efGaER-`_) zCsJUm|FRnWwO0PP$aa4GP9PXtcfYTuKi=^gy$7V(--k(2<_STbt+%fwdqGPdhQGKz6Y)4?T=FZ|CsL30I~%<*k_GC`qQGfSp2Y5c z&%SHfN}vxf`THYf!$1w|d(hgd>4|>J$};^KRL*8|Pae{BP^;mD*_EsC@J8U~{6*n1 z5QR8kb^-m|D1j2T>AQ6Il41b+S;F^i2eOo?7#jILQItjIi~VU^i*A^_sC=9Pjx3I%CH%C z$u43O5bTe|>&Xvg2r>iv;bcZo(7KoFTMNUm`zMD#doiXOJyw1gMo_KZB%6Dltcgcl z5){M1!9}_sme6q9Z;hycuATD$oj(fk_|4p~F)97;tLOF$FdJ3f2p_BPTI`mZTsOAy z#y2}qB&Db})7Gpjy>1o@@UK>-Zq`Ch`qm4R)XbR)Uw`c3Vbrert|kExuP(Nkz|qZQ zwYzEZaR2mFdfOU&cC}1@cUm*nj(>-qI{ytY`#0ma^dqB*^Y?@4?}<9B21jB{nmMDWmNn-Isu?cKq$lSuguA zA5{-owZ%BLR;ytQp(0tdt>t%06oDDlGV8`-tx7!rImqcw1TTAM(7+-SPUHcP5g$n6 zHS$WU8}JL+u?0XOwai>wXi6~fPGLZKx?-C~#CxxHRihfgtn{r8s2S;)(z%3gzb%`8 z2^!+H+zUgtb_lz4mBXvAj%T`)$!NK?bLaL1hc-k^o(>DrZ@$y4b9ncVs_iX0u1m6* zAtNNjM5|E}I#Hmed(fmgm{U3qUsCT0k-s@=)kO#L-^~ts${gvqFyko6^pz?BZK z?Vc~I;4O=2I8lhAUJTh-!v)6SxJ&AA>P?ClbFIz8^!pS(*JMmO%!G_m!wxX`Xxm4$Z6YmHJi>*Jy z)^>bZ^eCyIAiIfddz!7+JjZmf1!FWqu;_*$hU+Z4=KTTx_(U>`%K7lgl^NjQ)l&4f zRru<&=7|wxh{^N6G!=~5iO4BjHdq^3ZCqg-{v{xHX zPY-z)bL=*ybKkh4V*N!+;jF5@rmR&+^el+)>2u`ry*Z$5T|ZZage7`yJNP1{7-BV5 zRNizifr7JT=4qB#yY1s_cp7Ulw$UPS+C`vCD6Rlkx-JNoH5&vE%5K};-iEwk6W(kt zRNh2RaD(8#6*zeZ-9DpWCmZ|~4(n1JH>K3ccCko|r;$+$9O(FE!ObZX1zNXs>1naK znWO5nrkE?vCduYA-=;6dm-qjG)MN`n4;}LE9tn(Z5)p9|Cgg!6Y)bqP3ajS*f@5^W z%9Z<65Zj^JnGOG^7^21kaY^D1jH158bwX^PJ4iUXJy`3uJ5DZ+K-BTn>`yO%eQvsW zPAVbD&gi2vI)N5T1@KN>906&v?k(dAk&vN9@Fmd(`4_``KhW+dx&aW)hoUi? zP1+Z9;G|?{bey)kM04Gm4{!DxfcAUS@Zc~-I71xr6;Tw<%5?Uh!Y-Mkzwh_fa?t01 zhxE4BB}gly6u}1HCcafj;082a-rx#w(S49@OWpf-x9_mX@k(8Z`CP;Va0Rq$k#?44 zL_`pF?UL9`&(XZSlp6H`Qx+63q0dAQnkbB+KTD9H{T4%HW?u>-&>km7Rmzy1egZY~ z{o=CQH2OQe18~I;l|Dbdc3#V!k*JE>BdiLuK^{C-qoVTzNVo-{cV7c10hQ*9}qM03L-{RMeZ3+o*%ovWXvD zAHgGecmt9h&fCvV#_N=xYBhkE`McLT`vEVvt_GmrZ`sFkCedv7^U>GCe5#aYVQ$bQ z51+973V=3yZww5q6O_z{B`l&wU5*`(E(`chrR;m7C>jp~KTfv!23jic!+jRKzpmuF zb0Dxp-k1bS`hRaH%|;^PQ4DNIMh2|N3PbUD_1rx?3t-xu-JH1OdYyiHwzoWZ1$f-{ zZa3Tl0yQbX=fq+5-u&K&YHIh>ee)#yyARDVi$keV33^_-fu`Za?0PTi#6vo_F)N?z z(Wffid9vz5ZrE@je+$EaUnvDYyIfG-ly=hVP?R}ZdU#P%D08>LKgtuLe0vw|@|%p| z2s&s0^?U=~x2ZJesj|IMv*9DZ1r@VUH06~8c9CH5E^7Jf8T3xYKtfqS0Pt0uGF3Db z=xodQWzK9M(e$7KK7w!qjvau$VV*scZuL6S+)w+l+U@ zs7vZu65b|``-}TlC-Dk{d$BOjHhO(XP1AEc`!c+e^ZiLCnsa})0)kM0=-NY|D{VoO ze!_9o4af*k{kHK%^W% zy<2_T`mz%m;_qa{sJ6XN<`QJ0!8^a_DIBG% zPmY=P7gxAL>B-5f*AD5bUu1R!7TXyi^vC8Ul$WIG+o2uH@`oSH3tFZfF1r7q%X%Wb zdokUx!xQB9r_|tQLmd)w!*^GiScXG3>U#Nf#hXSD^tU^np1|k6$3Pj=?7UJJyXC{0 zy?Ql29}anDxoQ9XEAa#hv7g~Y841!gA2kw?^IeRh*NidXJ+1)l#5$NBMLNg~HE9va zk$MA@8w6D6NnV7CP6l9>Wd3Nmw9m-c{yoCVR{+9b+H*v53X07nb6#x?QREU~U+`-; zEV3jXJ5jv-*{nw6?}--OwpZ0ev{Cf^i$F}5S4gM+a(gLz80ckCE)llImord*2UnU6 zIP=XG!mPTP6~|3jhok;z(&DHt1nT>1bf3wkabv%XT*lN5e^z?H7t~j2{!M4jgMaqJ z*==w$A-(%0$dS-(vk$9q31n@d_Pw2=PxO7ssfQ@}b-qe#dGe1|-NrgUh`K2o_7_+9 zhew{?HN?>OljpRbN?s0W)5Hu}HE$l;1&9EJRP2>%`FqGjjItzr>{5DEQa4HX47_94 zaOJu3@uK@5@`2&blB^_l$fd$E(CvoyDF361U$Pf?R_sprnym>q{4E6`xc?AcC}(`K zX+>7?PTPI|?0C##JU6aCJaEYxRd9>Mp0PA1Cw7*hp#!&zrJuQkVwp10gK2$$0tC``MN^3rp+KXv( zjf;k680HCB;1$`4Jfk@Qs0vh3kP*$+&U9_w@HZKe>GNtvUz3j@w^Nf_dttp=^NjtC znc-%;hkIY;Rxcu5T_7TF%5RrIV?ywiTo^ITa==>+Mj}`!J}u-J6NxD~uM{NW;VZSz z##_bawM<^*AWP=3_9z9vQq;5OZY}|MXywx)1f=IrU0~h*cPvfzL$MEYEgdLQs|T=G zMnp0^FinP6W_w-0YrQ*x3tytfhqhzU`x_j+%b5$5Ov8&K>u-Wc_?(qncr3@Bg-cZg zqaT0(Mtz>`?MvfWD@kGZ>r@U4X#Yo&e~OPTVdjECVX2Pt3WrIjPH)=8LP2Wb*h$z$ zpBqm^Jho!RyE zrgbcLj)3{F@K5270F+y3&Ir2@F1ZK|09a50flNFgfs;HMa{)w%5kGhTMcr>w-E5_h zs0viP;Ze0c+pb|IiL zK~)Hq0)mmey2z24?DxKj&rrw-yTgc9$E&og>T5txk3B#l1Te$Qt+&!NN3~ZnVRnj& zk&d%lgCvnS8Qxty3g1z2HySFbz*9i*?)|Y8ph6=V-><`fx-9t0TL3lRPn|DHDHIN} z@7{OU_&Mo15=Qn&yZ9Qi2O>)o`gYGx74w?hfx{|Hc1hf*;XDq~Eto64Q&ywiqLAgb z-d_TTFX}9_0LgY@$iTE$unte-?_^%~6ChoreKPA6%%D{vwXe(PZIFpTT5^1C-#gh@himjSg5G`d&!kx+Db zS~{=4@$Y(n%yc1RQ1#cPmcXram%XdCtnIH}RA_H+FD@V#7b&un{ug&I4ErZ{Ux^&V zppq*;F8YI%lvHT@TqOGP5Awd1k#(HPb-8Jp?~H|2#54q8@5iTqcrMv1@ZUYA3Kb5! z8FMKJ7{e+zQ=s-le$d5v(zvoLU#a&BkI0@J^9(0cI(rk#iN)-xQd{G7+cB#Ti^|O2 zhl`E%@Nc+a$Q;nx{vWh3_ri~G#4%x{c|R^+PZM4{bJ#H^2FChW z@ptKlaIu@~4Mj4&vU!`r50gnX(9=zAD`bw#o>$rjmy5Lf6Tu4K1+C^B682{l0R2TA zPzR^$tzM#&l|p*HXz$FgE8*j;VnbryCwRa7AgcsqqW>cCiP7JcD7AXsI^r;B8Wr$| zqtT(vm9`E=iNf+bw~EQv0yMt1v0L-J=yN2HRT>I0Tf6bLNa3jZagD^=1Og_Y?4T3D zTc_I1%4gLF2y_zwJx8QUo_wPY%D0gW?|a792Z7-nVj9$BfGTB}EaGSQbrd77Y!vA3ML&8Yn8a!vPblE<(|9j3(!jNCOB|N@wX-iy zYPsJ}BYj9x{XfX9j!7N^fmJP&e{u8JBu>LGnrkRblPYrf54%7Tnp2sB4qZ&1W?V8Z ziFfDYvg0O13K;@f_H0&jP2$J2T5JLdQvhFI3r2GFu1hVW+TcP0n$xMDb-^3=Oiujb z>z@|a*SODKpuCJ_8d z+S5Lg_9db&OSQ}QD{Nh<*A)`yNPi{mmxBOLk)|xvntf~3`vymE$-pa4n^EOJf@FcT zMu!-^X4oA+$o`botI4U8)IK@q^LGn}`KTYywj)p?#MiuxF{tG)QS;)N^~sF8yi6J> z!9ll%T~44G(=SserLF2aw}!@ox0 z|GjIwR@@uKPdI74WC4WA9g?AOYn@-{Mhm`86lv+|L-`EuD0O5@4h#6C8uTzvmUp~R z&XQ2WqDxTO{;FN|F256#oR}{w)IX3!#b?OHo(?D*FpG=oM=&ftt`b>dbUCO$g4yfG zkE$$CSZ?Eag%P7)=OV15z{$#ZfoBjvA%gi@kXIB@G6R#6PdjYdnMWUKS8&h%|gI0Ua;2lh+sj&n~3^GyUUM>KBAP5`=d>mYA zcCXW*Cj&i$*KkcC-SDGG&HZ9xCEa}H764^t@J=y05`w#*T$DB9EAFMXysK>8Oz%nHuKt^gy%X}KWR87Cvgabo|Kiv$PvlZK zqj{>2Ce)5M0PHEMSu3HmR$?QbJs8x6p03XI-i4`o!oJOeT0hN`X<#|??(BVc=ZUf+ zzj31>5k&yXR3i@$zlzIto65)AYo+jZbeNIST(3;~<=O4dN>>|2eT#*<%x0d^O+!&w z3*n&_&J8T~=TMkO_RS5|a&i4=KEzX3Bcg*kdxO2f7$@A*ryIc!L&;9|0Ia(^O!3=P z{q*oil-H;lOoVBPh^`i!Aex?c!V)XLCg0`Tvkg~}k_y^Qr|a&JT=xPn`P2KSuW50n zs%ATQ23`d3OBS$5*i4l!8whKXdfg7+TLlOeNx}OX;1Cm+WT<`oSeAvmWkuvtGjnkS znym!SVkEnusX8)M(ul=gHEABMYBC=?E~t z(EC{DWKD%2`nA$s4$K#v5Pp5N@%>a;6_yrLXXL~yG7sqp*#FG40m5_#-p`JyQq^Wg$5&_^iljlNfo&C{juN6y|(2FM`s zeq#K0)$?ZuIbhS7^Js!^l+7M=m&PcxUcSe(Ok+12h}Gb6R)KGu)^f}p`PTVVU2~=3 zf?xvKhrjvIC^Pw2GKiNWD&+|#QH;-~aJCIPdoDn78?ny)_nSq2 z@&+U_o7LMy>+FVw$`6QwN#N>kYCyPAsqFDN64;YkIT zfLTGwGQypaSZ@x*A@_WL{ee1iA*9V~@y!h2 zQ2v7+>Vw!yzZp!+<(V59=imNH;MePCPyx!|PU98w-yy<3UQtd4h@n{Oq5c0w@Bz0# zF!nmZt%LjTFY6bb`#F{x;3;#0&)k0B>Hc`dOBi4o_4%GN{jN^0Ht?!p@ z(8K)?fAESr)e3WiPwOt@{k=eT9gA746vVeLk@bRthka=cA-}ctyUssxt=H^O-lJGw z%W(&vHGxdMw-U51WmpXd;EXNhA6IU?g}hwHo^$1Q5WpjA1|CBNWvv zJm4FoKEHJ`f|fiKss*ieq5uBWAhbfJyZr4PeH-NWLJQK*KQUMuMAjs!58xVyjQU>V zS0>0U7JJB|84f(*M1k4f5?JMSJe-*OMDu?mfeXEF&7n_Vm zK7HaUisq$MUo0a<5}Pf-Sk#l*#f z!8H2TtyvYug6druuZeAS$ip)t0h3Uvt$`%UIz=*C^`>_ofp6=xIb{Th@o2d#%oB8= z>Z44pfx(R&r|QKcY`y?w@lcXy(wb zZFx7^u3YslYX4G3fe4xKH6_rx&bdA(OR^*9Lp>XlV{Q8zKXf6WR;tvd+Q{DyJ(i}Y z;UIAbcecv45bN**payEm12*+ILT)=LA)!y`@(9vhn3!132MBSDZ)>z<=(a}Fh zZmQgD1jkhDZ_7Z9Qd4A=h4fo4w4{w0% zAlq#4{k4(Xx#_dNY`a2x7{F|XYF!7BM6zz@EA&37N0PVW5L?2A)TyV-88FXf;e zA`9(YtduguDo_>i4>VP8?^R11KAc)L&@>y0o?VOLW^cJXL0l=f&;(M7H-X_vw&i^) zz<|9L?wufO+D~=crqd{RIjfeaWrqjzq_Sk*rj)_;Y{Rw*Q;eVDOPu02-Bd}f+dD(W zY;?@fZk<8=Ntm_S;t!}qNb{$g{nBmFH(1Cq_CMkli~I6LZtN=b{aM#|m6~2RSz!!6 zTjuECjzNzbB+Q9*#uZrY4u@|dH$+2=Qf^AL#I^i|xx(94J4U2j4ckw{Gy&PDNfX3-s1ASX%p)0!@%Q zd1`TxL4q%%XykY3;NXB2P|dX*y_qPJP(+e_U7oawAA`6jd06~Ti_N^;#pLxwTKO#+cD+yG#Ch zwDt=97{iJ-Jp3se?3jqsmE@RYY_!#Y1;aunbjpk-xXYV(u}&iq*HCr!lZ z91lJ=6Du7dhLaAJH45@2;=WJ!5*}6drfPj}B+G-uJ(L8SOA+_Wmm|h<;I--dmBtXS zI!T)pI-w_QS13`lcf<#0iwLG2_gfuE=TTw0@p`U~XzvB7}u(zFfE37gsn}2yZ;n%>qa$5DTh=7d~G_E~2I)M7(7A zF^5`=@cg)`AD}5O4(GbRhB|7w7OGY_yQqJrI@S8DW_=-w4N#@QB?T9z;^gjFUhgzX zmvQ*sROBy+TqkY^x(7GfEVze~BED!U^E%TV!+>0OAF^WMjA03m5KFuA+lV$WcVp7HB^RV)Ix~tMpeXaP-TkxkgR*Q90>NOe6WT!W z{2)maFC0fFdpmnK?mU1Y%TyjTPAZicPE4(2WtF!Aw_HM8sOKI8J7AB8` zgNLb^?NJ?Ooss3IcOXpfvbHP2qew#}Wch%jSH!8VC~4rYLrvRDb6N_2iBcoMFt_3w zKfJe7mczE-crNYs#!oY_OkQJPttVCWc~5o6{T`$L`xT)I)Z>A%bcTf0?7vRc|GHk$ z2PA_x(`PI1{~@~lS17F<1cdXL(1ZQ|>vOJgfN=i)g0Bw$e}(gy`cLZ|sJ#@^4UcOj?=YPVX5qG7P1R|_ zq|hkQk#BinXNE4d4g)mpq@1lZi}9GXf0K6GHb$yn24f;EXr~;&uEeS_e$Bpi`VYwz zsW5a@!Z%ZJrs6ERttPyo20Ex;0@x&u9`E;34FM}IFC;$hR;$AGC{aTDWoS+mY^Ukp zPzzw!S-}0gNz-9ESHgIX*vRTZC=MMCv-0Lrww}(e^psSlU&4@qQINkR?YF%H05kj~ z<@oDl?zx+~0a^Fsy$C-)@9ghA>vL!57?g{7pOxexI5~3G{`=0<`^MnDyaTf{8iQq< z8cKYn)VFc->R8kw{+WOIE67gW$KQ858LUA|)UyO_TruB3(pk=t%FO_ujh{r3C@$NZ#Sz&-?V^`~R)?va+&va(2$_J-f`A z*)zWpU|IZ7BiL^ar)ZK2%{y%yVd5v>yLm&3Uog>z(^o|0(SDHmQ~Nc*AA5pz?#1Z= zBxA|r*?a=(v;`h@6*=%$)U;?+I6lSw%<%oI#PDKu-1LfYU>FSsOan@U^78R&efwHw zH-OV;d<)~3r-p{9@uFcFl)W*hkr*%&HIOR?PfHV-Z*(KrPqEe4MRsNxOk}<^BfLq~ z$K?BIZ!%dZ>VgRnqjc)Fe@ly*MZinF!redamy~LM-lPxzZBT=506 zpks9Dyq*}|IJG?fUL>`hKn8;(yA7a&eJg$)UITLH)?T>X`Y`h`i{)b-vTyOSkp&gcn)6;K%?=wM8hMjFc&A>K|#IC82WOr!zdk{Sv94{b7u%6vZaWpbf3w{ zU(2|P=LU4>0_|vW>#QJ+hc>4+kIU+T$)Qm%^w??Onbwg3&YsicdQYWaL5T+?cmJpScCm!e z+D=kM7*Wr0VBrmHbCJM<`>tg=*s7{PmC4CS(EO-B<;P)B-fW?|{xpG>h(=tS&6Ua7 zKGdJuKI$&+Gso=EA@a zqD50Elpl(Z-xn{m)5(Y{dqg{$k`%yYDKW2+V9IKe&e?r0p?|+g16N3gTYd>s^uLs< zbL7;2tf*JI6nahjv%!3Ha5yJZEJbIc%Joc5P$o)%bh;O!pSSToi%>e%h7KaYa@n4j zuOcKN{8k043+NEAC8conLWnqiRo4x3EI;7BrA}if1_r? zSSnGQ7hLN!xf4x-?j#WM&K(P6S_WlNv>}bGqqx(D5>1pTFMWbpoZdlas8}9RH$tPh z`FDN9_(rkE9d-3X6Qe3oj#nGAE`VFEQ?Nzq-(iWBiT{XBCSfcRt%zWS9Rt9I#@trm zL}E@g<=YGHQ5BAX5w_1tDAEmkQ`WN0WqqL`H@Bs?%8kYIhzz&fl*WP!?fk)!01C^fRnO?t03Cp=oYkmS)dHu$olIe_)8q92gukWHdlt*3|g#?93-}LUaIO6t(8F9Nb+YOb00Y|tM&Id&paXIbu zv(!!dMqoqwW}SP@5UTHeA-5 zolnUH-|uovx8(CH+gLm8S2)yqKfkN$LO~yBM|7+mr6zM(GWu8p0Vg|Cj=b(ZVUjk= zO>s^vvHZBF{2-KIC-L6atsAf_{zTq);?p>_E0mnknht#-nCcAa+FQ2!! zQJY+cGqGSu;t`M6W;Um_vfqch1#7iR(N%7VS>UZ&$G6kNYp68(h>`=`4~KUPK&DAa z9{-m9-6&KH&(YTW{uB6>8i8PE;>JDo0S}KmkV(33yiJtCS!;C4q!HtQD5sNNJg7_muc?N}kx7A9t-?yy}*_ zU-*zZ!apD9r_vkWYgj53q~K^t+4Ny=#D=pIT~Lf{b1JKGNjI;M(8>h`l0;|a;-~tm z4?^|Ui_dWyc1@}uNSWwNd6i~rgE4gYpRoLyEh-2d=Owq9J-wA{Wh4aZ2x}SqEq%OIG-(C2x$mFnjO7&n5$_iMF9&wG!-1IR6Q}8V(S?C z-3F|ZU~|1@wl>#iE&Qvly|2w#i&Kw^M-jD0B_8doI^~4;ISu;OL1wyET}S&V#?uJ` zDk0)@9OE2t{;hFXSfYV6C$G$rJV~xGy8gP6#F!eCnwxJwVwXB=1B>Ghz%nGHZwd9wUeJ82rCoOf z@1;E+x#^m{ubL!_y{4tj-YvSbJ}Z_h0imOFr2*CJGFy_?$|?_SdJM#sUC(9 zyS0n#G$jwRA=ar3l=yGJ9~Y&t=cE&*PXcvU$bT6DeTTjZ^@{lEq-+ z#Yr0g;yRN{4-EA-cUu=4h}SS=9-`%in^cBU?N1A!NZZRD+`u37ApL_9Q&XualmPZ^ zskuRko*myF!C7^`2ALsOIdmRpH3HgF=qiUarjOXy2YJm;kEOY#NM*XclKSBG?A3FZ zy_Tmo%U@Ndq_dU%=ba{Q-GGH;jn!Wc!kJpx+Dv=5fS=f8N;Sr1eHi0=)Zn}=6;sUU zvi)d0cPIpbw9ug^kfnI(7w>Xe#dWz`;`9zU!2Ud*mAZ1YnXDPhD5;%;5}9U{7nZx6 z0NzDh=dmJT41)vyiZ5|lWUm;;F0+h|U1e+wbERKMfK5}RF%DTOD9vwR48O33sA!J} z6HZrxyVggoRKJ9OQE`usfahhbzB|aapO*B=pNjA6Korrue13tsQ+GaRPmzDXl=FdC z9G`iYA*6+C=-!qf-jXs6FHJ=K2Vz0qS64By%a;5{-`%6jtaCdw30Lbearf9wax|G* z2K(FZa5!69eW?n(xnb=1?SY(eL4$6?(%uHm?ht3Y7oB}gj{3Qgpi_z&QzM})zRDZ1 zMfDHw7$A>&S+usx+zy;I2~8EWtMlRQQSBRGn=E?;?GK09>;S&Yn!bT6R~+I^L{%og zOPy972`WfqUauIwoaEo3v*Yuf7qPqsHe`)jz!)t1*iq9Cjky zwlHWTb%dB}EsiO9N4~J1MH-OYpZb2Q7~RtXk@H4z*VwyJG?IEh0g*Z!n30gjAwm!` zT^Um4WHw)H=g4o~mKZw3#sozNz)vey;i%_Vo1vNU*U5>{-*q%Q;z=-!?IIQGSb7Or zy0`Sq(Zm#a!?)^VFBw~0tdgG|DCj4+(u&cU zE)X#Bk#*pPT3ue5s3O(v*oN)-A=5qy`5T&D^bv6@*%Xok`M@L@qaF9m2-!c{X-Gc8 zFaa>V*Q`9L`TGGpP znuEEz_=Mwj)zgDZbm}XVmAkT>JWQXA(4w#at{8~2WHoR)1!Eyg7B`SfQzLgFHMWvV ztYR*$;8L6;mmRhf2j0W$dMQ;b_2Hy`TM{$8@Dq{#^H4ETBvkjcL~AM60;BuT;iltFGc@y0PM3-)u+d_B}HuLHey|9U8)>iAr>5 zIy<7bU&jEh`7HU~QM*z8lS&)5vD=J}oqBG@YZ{|)sNqG9F=Rcdvb%aV+0Z~CZi0RK zXd}@z1OqABVuQY?wehl2YK7WScY=JyKdNniPE~5cRT>kUK>T=~E37-{v^w~#h6^5- zXmWm)Ntfz>^6!};i}w`qyh3bIFu}~9O$tpk32u*524GYz8b=VWfgiycM2eW2o#562 z*rF|(&h+gl-kcV0p8sKArULV3&7{B=KnYktFxML`SH?#?9U8GN;5-s^QI}rv06>Xa z!bEd}-xc2tG9j&A_1Eb3xgVmlf>SN_x}z8R9=F=-=t&**Nw{> z62M{2NE2DQiJaof^vz%fja~f1y)@U&ni`nOLiqmU=40|9c#Nm}YEEBjIzlcngP(yV z`TO42;C%hPayj=8V4ehpkoan*m5Aop_||xEaGTw|t@4h#e|h4a7RUZY8uMNH5=t^sBdZXI9o|gm^7b=1Du2HfSF>xca=nT& zRjQ#DgKE*7Y+h>@P#WDY0Q9Zuf}EP+s?N@X2-Z-nRMT^kvZiWxbTn1|gyz_JrhZ7e zhW-BHB*8pZIma~k$@=jto4A{sDEKYU;0~i|gJ3h#u}p5kqWMo}is%=tjIe_&@nt0P zHTQUTJOa(mjSr*rc@Xa=@thw=c2`XC?h`kB;TSK!`M{FzKp3rkANpU8 z$nu*jdPTl+yA8|e6U7=FRUT+|ma<*tN5Z5b=S;K-sUiwP(*k;MpzHn|zNc>vHW2v77i0cLGvo8QDBmU>;ib#zF^Te87T^_37-JG^4cBH51ij)n=<*7ISg4L8kxE+HE8jy0-SM;g?!4IM>iKq*^%7@vthY%i)&?ok91F>Bun67#blb_ zy=Jw{zG2UAw-dHcFE+L_c%)_5e|Wz>F%Mnw?I3;eUi%;@AZ7oRQ@D=ZwHbT%^(uuE z`8zs6_C1-gvKvc(OWxfvGT+~#Evl2Jf?8SB{;L+a@~L@i#4WT+{2?KTC-yA^2Rox3 z76=t&?f`3kp8Sy}nj6LE2)gcT=gZc2$c@DD82} z%wdsQd@vQ;JR+ouuTEK;mwq+oWyfx`eGVryg+Wui7PuPbRaU691o90UdrDT;8&7}lkD>|zEWrFbQ5C1|M_|r@1H{5d3^Iaf^_(Q z|MI`}FU2?=wlin&4V2{XYX5BQ|Al^?^hHdd$9i99w=_^4zwP3i5M!Q)>2*BE`XYf( zd4G2GuQ4lq=DU%<5$pgW!1vd9#4=OJNkfk@(N^F9#`x!;)-nwo6)NkKz#W}{8tBhL-3am(W3r8L=HTF+0FR6F0Y z8D*~E{ML-;eyc7!bIDhg{-^mTg&2wv@x4f6_;G0UD%=KY;CYos+1JKdWy!*dHT%L*!4Kp)@;C7m8_~c0 z>jGqE?bzO{H;%{|hpES4h4(#gr5x8qNSOrF&}fIiN=`{tH7j BU)KNt diff --git a/docs/reference/transform/images/transform-alert-summary-actions.png b/docs/reference/transform/images/transform-alert-summary-actions.png new file mode 100644 index 0000000000000000000000000000000000000000..3ec2bd86f3735e754fc59451ee91607540b76707 GIT binary patch literal 142958 zcmeFZ_g7Qv7A{OtP!Ssf0#ZZ;=^dm5P?X*Tq*v)(dI>GC6&0itKuQ29(tA%RBE19% zy(%RE0z@DXTF8y(+_TTQTmOM?jPHkJtgNu|u6NG4W_ji_d9SUhM16(v3JD1bwTkjH z9TJiY-Ne`YB?{shclq~1BqXHHjtUCeDhdj0+Me!qjxM$&B#+{K;x*LBUfh1mN_nqz z=9xu0UA@<6WSZkmzH!-gs%KQC3y=Hnq*1Rtt36bDCYPN$suh zdf#$dr%^x#Fe?D_qcu`G`y*q$8J5DTZB?phtx>K9wt@+KZheDQs@V#>Z7AFnQwo`F zl4be%B~Y$lN7cm?5DU$u)!dnEK0v6#m(F9>2G#}&#+&n;XJe?cGG6cX*3nvA!AT1z zz1FBY=!*SNV`1uk=u&ViJKzoZjClR5f1AHU$Z7HWgCIv9KC%0fgTj=z=$`MX$Jxc2 zUCm*O8WiqK8c+RJyZYeX3%mEd25}cWa;ue$ZrLgha~L^Pd!MZ^xCL^28%5qxg7RKz zJX-TLd8+Zl{>?3pmFO#x-?*L`;vq|z#W8LzK?_$=MhQOGx-0ZI*IV5jL6YkeIlj(J zF{4T2RF21wq1Ss;Q6o4q0PTy&@m)^gP_t+OzYjG;=|8qLRI$_0AmJt+Um_umbR@Yz zJR&8&7>O?u60&Tv|D3tdolXAt@%*nZ&vq1?Nl4^LRGvN2^C#U}pv<6o&eZ-RWf7Rd z4!gwORsP8A!V9jkZ-?)%7`~{&QCxaOe(}YV(n~t|CGRe7up8cpV;_4WHTIYLV%N;v zYJ#a}iabA#HB_jjrlv+3j=Z)f{1D-h_}5<~WS7{&{_!SHLDQSjF{|cvQ=WwM!i|5t zy}ocMdiTjcPX6m-Zyec$ZsAqy!qMMu+mSVzphbe&N1ig$b^()EuU`N8c+TR&0#^yD-}O z=-C8)_l9(XyYQztH4G&S_%e;0AG$Y@GxjPA_JgSN?6)Cn@;?ZK%@(BIqoLX~UBp-A zf#6_}aC`be!oxLXSf|nwbbBCwiVTMfQI!i`>?pC2VaBF&Ztk9hhCgEW=#Xr^n_acpTEVy=) z?`3tK%PdsVGEHRo%S}Ou`X6i<)J~R0Zr`J|lfGwq>WOD(TAo~Qkq2ZU_qi8?G?K>t zqUtuFfV4^)v(5!cgJwv2eZ%9uB7WDpyv*fq2ipA)<@l(jyan$phrfv$lrd+@^WGTi z_>hM_+aH!Tz&?Na2h&temp8@TNtX#wEjE@2=&#kw>{0ps_esbYl-Lp(%Kc?n(oZpM z1Xb7xYT%Grn!Odk{OYmmX5Ksae%4R89HE!qVmKk0$DptKBhwmOZYl~%%cHNSn)K-2 z+5m)KBV81%n#>)jrB;uhnf9o5M7+Pe-$pp69KA?J_K=w|VCCafQ>3u{Fz<3Ce>&~nLYr%|4ze})G%t^Mjjd|>qHkf}Lu!6- z@yn9GN_&8U(=f+0X*bLdx&`9X5-m~^S#DURGg)?u5GvEjuU=m!ohW{pygv+3spn37$xpRj1ll-z&J;B!zzp z6NKfu(EzGy@a_y#P2LnzG2dQ+vg`n&PD*R6<9wl@PzR)F!8W z69Q$NC>bD!VUm$U=_jjmnI&_k_objBVtsAY9~mW5XgRd$AHn4I%pB*xSY<1?Wz}yl z{J`{4vWDE%pA%K~>7<_OOD;1}DNs+F4Ag@-IjGcKs%h*IInei9PClV0q->kjY zpUgu#$#Xdm9wGbj11)#&5TWIXXzxv4jhNSBYGw9^TkiQD2)aFFoR!tZ7;MH8SRf_c zt@ULyu2I1kv-sJ_Bh7B;({`=vx`}(_LVdtKy)t|blfVMNTs_T@|0w7GGkYabM_V19 zx!)-xN%*Q}vpJq*KtCZr_mDdME%R-}iP-c^!xD)k=8U4+dqa4L{oX&_jywhJyMgxU z`1?vWgZ3-ovI!;S&0iT_eF3C}=Z}8wp^&~UXaUsm*$7l9CYMi?g|IKUHECR9mbzPc zuA5c_C~h_n)%`T<`y4G^9k3796H_4ns9QMnZhgK5xF9fB?|MCOcwYdUXkfu~8|$x1 zDJnnNsz7f9v(r$0E*bON!+n|iMhd7((hK?a>ayu1G;RN}ajgRjw#uP>Woijb3D^N- zcZ#!yE<3vG7uVM{AU?+e;)}hkea13ak^Q<_aw*77pG)=?K;S@?bq{;FbGw>=Qcw&< z`3dYX+xLLBvy*h_E^;KShrMsy*?T+i^Lmi)gFkAG;SQ6W)vFQCoW$`d;Y74^^HDBU z1Q?UFqa`Cw{0?F;ienfO$dV&k%k~L%VQK+?;7}eyJJ0CHju6`cBj7wSeVaC8C=M&_R{Xx-k$wDn|yK zK;W{!Whr@Dphl|bjSELP351`0;1mnG+Qvf)<2}S!c8{2Ycpx@4yT`|qP?X52%t>>J z$Ef-*nm8V_=Gv=*plXw5yuf;Z)F@11<3r5h+7QldZe`IAv#Z=t=W5-3;kuxcw#%0N z=L~Z{qG7!*o?7(R0vx9z2Drx$T91}Xv?Iw`5G2S!hQzgk^zsx<=8E{?OzD13)=)M8 zaGtIbI%Ha7SGM52=<~?_i#$?ndrqa^WhQ92FH^b!D*@-#9!&kY^G09b;yGhe@+_@T>lY zz|e~(w7TRHQqT7F3cBR@FVEDsXt)>41I9!Jaq^hc@Oq6oQDjXoo%jgqaK1+C{o&93 z0$!u)-eBC~{lXL;^w8nV`AY1B>}gcgXr?Oh>}C&x08=IB=cB2yK}$g>)D85)+fR=K;}c zUj%4f_lDK0jjS-PIxdxeq!}M=k0Ce@fXBcJED z-rO#^eh@k3d?ztZ-U@ca$Wc2L+_v%3viUtLyQRbCEK|2RAYlInnn{;((fo}LIS=EF z%Sqlbe3_D*VuRo&TyH}JLv=(%m*N|;Saxt1dE3~_X$zU#LcCv9aU+0TuM;vah z)QE<&@pkD5(IoQUZCs|)yfQ8_1T=5*T!Xf7xmg5mJj^Yzp-^jPcU%fl;W2h%D+|E~ z>QtDUIoiyYD=7XUDF}1NmFR8>xdr{4cyur++FtE7Rc=#J6MogNcIfgXmFqt8JFK}M zOf3tAPcp$VIkN@(dHG4I;72Ugz2LIBrz|)YUUO)wpTvysc@PMg_>kwwLa`y;{fh4+ z%T|g}?>7UB*JXI^AznGP&C5;!r1|zuvB0xxf5SuFRE3pVwQ^xrZyGvQ<58Y;W;h z!`fS>mi!MU^fn_?Eq2exriRX59cX9?-}-$$nP2OP2cHb4ZeW($ERcg~>7^kpjMmlU z9QR>3jw=)O5tP>{oT{}JJ0c>34)K1(6k-SBqk~HqcjTqv4C3$^F3#LUZ+@jRFrUy2 zIX_O(o9cM~yYYrxT;>oIxB8lw%B^`4CRXgmVvoEx8EYfJ6%xY9n~h=0sUj- z9Ojj(sK=Piuhg|`E>YEh^D0=s^pb2-3jl>%GEmNLf_kp?bD^?;F?_zNiwbqy4NKrc zf2|UOipxd?hNHRnVj-mw&8m(O6!g*+g^<8`qprOaixg>CJDB%arS*&(8-6Vi5*2F7 zK9EA-pC~bqHfk91`K=0MyLMrjT~OR-7MrTO{#uo~RZOCUZ&t6|^ylTnP?J_uI=*wE z)iYh(A}&nCAh`^FD(g2ap;Pa|;6ACcpjqUqmcpMndykH9q;E4KUnXErx%Cp~;B|e2 z+I=4@(%>0XDICAwr@J;`U_s9;#lTx;))LchD0p}8pH;E4HmLDZ^hYf#v3_t$!=)&~ z(XO?y&unzIEvIJ2DusHjb|S~eaPS0q1Z5i+rgdMxUAXO&xq%c2SgBX6*Jl|?hQuti zNLYqAiRe1b{k(a9Z4HDc*Kf1io{gir=iOLf*1oT)h%~5a1?Q`Uf{E2y-y-bPW~nup znE0Bb@Wmy@1JqM~2Tjz}%RXBp=G+G5tAm_KuaTM>Ae;rxlK#83+`OO}VCjNCX|w70 z`us`RT%GIv3fPIx$jUk8Y(4tXK2qQ(-1o32jl{IA6&^*QCYq^VoMh_#JEVZf5Hd^3ptzvLDRH!x$8>GL&i)9fu_~vxE}oV&3TEr@j;^Bkz3B;LzWPegsp&)90>ScGLm_OCSctKVO5Xkrx&UDXUT?DB?RJ|emV3t8 zbR8^e8P8)-ZW&3AM1Mm|B?%dhv5l0QwJda|3ES>5m~XHsuXQ_DKyPES2GUsUpGV!k zTw>=V`TD!Op=|{1Sp7k0RjjnPsA1FVec)1P#dw*Qeq)!lzNP^r+Ai>C$LVJyW}n|P zc&VF@^Fcype1dvT6UDY)I-kTkY87d3L0K zKAqaPoElZz#7ivt4=rdpuF#7>^o@0^UOl({dNk@8_>T5*XH$2kA2x|t39@L8rl$PT z)fzq?qqrtz-4nEYwJBQFRxkXw1b##HdlCf^!r;XU_A62gsL htl83xX(B4_QgP6 zd~P1h5HeykpJkh=N)oQ6pZ@6=sp2Qn@KRNU^E4D2AF0EQyh{i$^6!+;Emn`T)l8Fl z@k!JzNoO>yClYq>R>mgqXWtc6=V7Tn~tG%rKQ}lkv zrFMB=_mf!@tC5rm=gA9oP4if2f4YSG$V$$&b*CuCt5?9SS@^QMfI!iiA5Y(0>5F8A zawc-bBo9B~7{O)bP5J+-NzoPdQi(`l?3>x0nHBxx%_v9D=T`vBF?RvCia))Jq2)D{ zS8T<_1j#yPcyG)Lb$QjfHix6!+s+>F3B7yOVy3Wu5$9d?1zp$E)}J20B`DEXOVeeQt#q1TiYj7ktyw`vKSPQuN?n% zc-~D+osF0HRZ%&F=AWKlo~?=qhCZC!&lCIY5hm6)R#!5rbCRqUS-XCVUB57ackhW1 zW@@Dl*Kb(^TbMkQ>c*WY9OpOuZ`Mz=z;DlpFs8a;(d|E>bBh-*MR%RjET8{r(O2w5 zD3ha9@566)e2MJ_hKSKF<1c%J{%JEpH`xT~X!NlSEJl-iGw@| zD3yp?MqTDf{u2QBt%WE9|InxNtbY=MI#Ie^e`0 z)mPR0Kka0fpD2Sx$PHZNpM*d|+=`G@zRsU^;!lK3=`dRFPJhF6{(p7;3)=txnmT(K zF%k|K%+ca}$MO?_sZgNniT}dPHKOO_5cln4C3Jo>D$|2hz#Z(;YDuoXOk$AP%=GkDT zt2L*K){CwfkD=?}uYB}dLH{=+e*BUQbbfV3K(uhv@wM51B*#`y_TwA?2iIT%4M6p- zaf86YHWwF@q!1^$4UiDV0=r!wDzCmDtHwbiC{60Pgt*S*_XqFvO~0MrxE#arVVR=O zMV?nXY>*{tJ2M|LtL0=lNbxnP$IV%L&@G+w5#ii9{8gdLYx|Rc*YkgHt>0-!NQ}U| z$WX#XebGybuLpUOf==@;cF%uLX`);>3}qJSs9fn8>b(Ca$D3y?UszBF=oNj844zTl zko0}JoHw|Prv9*Di;sAfz$NlT=(KN@`EOnR?JJcd!piOaO+_b{=Y;x9@~d*{+Zg%ZP@m)JJ-$+AqKH3L;rOYGGY2-a6K=2z&`~+S#HMf4EzE z(vL8QtKD+^D=<02ofw>9u6qyBdaiFSG+cig`6A&zR`$P_zT(Lix-<8p&%BSRWK@O_ zY{Eaf^qD2|%0;`3*BeQ(@Ou{(tnO#-M1vSHg4&*fikDO^6J9sI_r^Ji~pqka(tM&{P%P4 zzjvrv9`>X zf^?Vk_P5lGwiGVui3eACH2k)oF!__4l~s4#UAmcScI&@^50DQxeaFeHL1AQ1DEg_q z`2&8xgoJFAhK(a}UYHwH_1dZ~J5+!D zU?th#vA;bp&%gQyljk&PXNubGcEHM>;j|86nf>T|NkQZr>hT-E`BqBqKj43CVeNf4 zD}@+OS+w5OpW$4nOkdvEY30R;0w6~!7oO?cIDswJ(Y;dx_Aq4 zm{>sc+?43+1*hWh7Up%+y+2Cn{`Zdq{tep`E@$04_J85NP<9KGF}gnckLCQ&HOd4s zPCh~8N2$ODgxo#8-@22;B;{YybL+vG^_=hMFyEn~6f#V+Jg#!*H~7O}3unn!8mdH-4oH7{$Up>u4dn%soucudi2D9M*H#q!1RwD0x z@oAUMQ|720vu_5jLYJc*X=ulDXK_bPQhxuA1pkli+NhHe?T=ME1O*<;o(M05)_n0~E59_hL*17O*=tx+J zPY|p#E|kC0+R*if&4$VIUeNh4n&&M;hx0Dp9ArdO**_RGG%1KY5pbP0B_bglL}1m= z{GnP>92LKm1;6X-_}h=P*#M#-;6HW8W=EDDYWfIW5XvtdV#FQkx1FQ_^p&3Kk8cZG zePn2OWvZBO&BT=J;czii4X~2dd5}Z5K;2hDq19gaQqO(YfNWi8K(njG>~nbQIUZ+G zRUs2_IpNu67E8agC9h$z368gEqZ_?iTH{2?^Yl09|1#%KOio1gt9b{b-O<;Qm99~U zR&}Fl<(RXJ50&C`htG4E=hnXOWWEZ2G#rw3t59WvUqU&#q#~Yo#w%~ehS0i@&}E8f z{nhn}B`S*RJ7!)>cVMQgGJ}pfUN^0O zrOHqQ;DRgy@COn!pFy?kPGnT{{0UsT1(FriOyY`V=G;%&^vO=_KLvMfGLA?6{iMaU zUch1^m*kGKX>KEaBxpni^V%=%%TGTMa&@hT9zma#^jTo(o%J80Kw?jGb-_=`?g7~>F^#&+ zF;atC7^@t$9d@39w>>aKJ6!wQJ<2;>5Dhy(MZ+Ju&mi)lllQx+M#hKYsF@ z=@m*S^fo%S-r}sb=|vZnN2RKJHm6?fZr6;_&`fA2wuO4V;D9w7sKg%mF%h}Ta6SKL zMI|3dcXS-#XM522BH^?AJ;$??mV_3E_o5P_cJ&6`8_gU-_LcKYbB@qXYd#+YySZQu1+;jN_%|} z3)*(=)6KORpUv&4QnO%L9>*iHQ&yKcBXwW;vG>@Y&sM6?;TzRkQx7ut$6SY~l7mv@ zF1-{Ex4zKM-u?lREA>7k=I%_sWkf`{$ZJ>Sc+3 zJdv(IAw&0Q|C94<|JOHBwmZ;c;a5k>8w)K4*{@!}rGt*%*$jXui~{c0CC?@6*QBZ3 zMtnLyMrGfX@z?%^j}u#S>drn^kkIuJE0knp@YA?(rz#?#YoFU4bi7y4C)WE=XRw|s zc%7TaQA26)8~9W1t_vbiVANmnMA}Gh|3rlO<84=Gvx_~_efCZ}LX2{V!{$-J2A?;l z@&MV`lZr!rGZRf{Y> ziEO-+!;)h;3MO1cqRxi{{J|6jgS1;NuQ1KG3Nu`x?s=_F zenyzp=eH@!pA}Om%B)YK1#GdY@Qf8g%Wk08!in%ELP$7d{sF?ottBz^V8TGP0=N@r z5pt|y9wO#Ae>5Nfd&KzqO0qfQ(I>|x&qC#WJB|E^%Tgzt)aDvl!V?V5CnyMd=t>Sj zIp^uce9tviowg7&$2lZ7jZEK<&`&L)COGL;i+vHZM(uk6RZIKTLC04ZWMAbCW=b#d ziP&M4bV?0h_{oiZq+|^oEUR#b*sq0C4O1C+B|yN8ZSVI^F+8>_1vG>!+NBOJii}~! zrW4J{)^|1ApNA^(o$qoI=~v-rF35MYO^!zb)K-<`R0m#*5o8mXn)s)=NQGpxN`)Aap;o^ec=w1+W{ayDihaP!A#@M-uKl z{$;c~=zXjYEbt%?UB~>8$QU6-@7Zla?wRlhGy$D0kf^Dv^=T@n-iH`vS0Cc z3%X4sLqmU#at`yAnO60~#PGp!oJ;Rd;ji?u3{p06z`>TB(8ZwrZ5$Hi@u~FOx(=;f zug*}UOANCoo0CYRm)NZ38s4TNUq$=`X}yR@*w?~K3za>NU^8dI?lyJs}pN}jrr zxE=6r8B_Y@W2)YlG;LQ-$6-LpT&q$MH2 z5Guy@@pdBFCy3X*P2?+J%FRJu(V|A5YM?@#9Gr_fa-H~54+6NF!Q_UZ8)=)A#s$_>)&)^uE8(}k_Uw@1Ws3{0_&gkOxx znEG}(acvQZ@g!62yYlHO^2A7pyF(SfV#Y@ z1tf;iny%7cvG~(sIw&DTe~IZh=c)ui5TDr0XLeY#ALu+S&Cz)LbR-Z855#2w#!9Bh z-Q;H`okK&gxOs)^i|%A@L)-Uo__*CV2yi>N_`qpoNX`gdq+6)6BnwabuK$w2zSCN+ zfAT#d_F)UFD?~G-LI9uEdRjiWM(n4h%N<3)8RLxi0YF^Swx|1`%VW~n5kTGZP)-1L zWTkJ%0zDd5(8{6liFbRpE`AQUxOvvq1*BexS;f64G;RI2MfFY5as`T6TL%T}>~3BY zqWZ9rX%9D$?-z%ePHM7nN`nd9&x;{9(<$;3ji(58LS5OZ;0;$;9a~D{`(T0 z&gE>2{xjWd8G>-3r$vOJ_)sdb`F4JUcm;&boGC9J%ok<_4ihnmrbu1MLx}`*cW!Ap zd%$@Bs`p^ZFCL{HW;M=<8R%h!*#}zJra%Ms`-x<@XKj{q*p4&SExTu!DPO>-wF`LT z&CQ`&-vz0rwaKi$@K>`YC2q`1^7)5)#d(3h{ znerP;p|O3P*X81Q9~A&{u(pX5zF8qB0WD04I7_%P`LkK;K@RGK4SQp9DbN^C#WhX3 z+X!=Y`_wG6Mf)<+7iCI7&VnQsAeKww-DkL@_DHdN8)+_SiS|b9%aXO6NLH^X2a*|r zy)p*fggTJ|L@k78QEzWdZ-ZHO@Y4#$DFpkU6H^oef3bDg0|fYxj3ZOOoa zuSTowT-asZvM~5ihTun?JJY;5S+;Ukx{i+&@g8j}S3XpN;&QaKGKXiatJIYIz7$am zcc+GZt+eXQbUKiwq-JD~mEDuC9#GGCJi{<-e|=qhb7#Tb0}?qrAsX`a8H4buYhg+< zAN{r;!9EX^V4oH8baSF)MRouJ=1;VTY^+?fzI!iqw{X=~G#WM+HxwQ_o)wvHmh3$! zI%$bfUzoE>JP}zagp=R`U?-bwO(R!(_#8ue{B@0oeBM8KS)1X5 zX4(iKm!C`Ch4`*YmVJeH!)Y|^K9Gwxnz#v&*%mjYApi6ww<=;xGW-y}UTg{<`I z`!9y}E~|ZbDb$0TC#1lgGkV?ZUQt1=eb&a-86I7oH+Z$V&K}-cX~OcwWgHm&#!F)N z4rpBdNl(mFV?&$6Xsjn3fIAHWd0eNMd2AVYc>#zamwh5sri<46miI(~SxU_N3+Ap= z$z(a0$()sq+)nf3ml=AA#%V4&_mJ;df;V3tTKr(87x2&m61&5BS(m?0XQ#3}-mXQg zR`sxW_B+HCy3AC&1+?t;^Qdeivh1|v4jRWxjXJ3rH|S@1Od66vz1KQ%+8Sc=ceow1cP3IO8=naAm2}b&d1#zdW`1Qrr^j+FCnDHE zM6B9qE+;YTs2+ykMj~2CA7-M5+v$Fu}vKbojf0z+Bk*2~#d%{T6pV_VmGc)Bk`-?_%12 zEVMlEI=q_z2;EXbta6*GSEt4+Qr5d&J4LPt(a{_ZMLzO>6n--h)ZZ;52+sNrJZyaK zCQPF+mj@tmYujg;u?CxHaBT}I)a^9xn;##iSS;ikr%a~Z)Fo0CZNK_i41B~~OAd~_ z_U5^Er#f%BBcl6fXr*qkKLLbyi*FmcQ6{fK&Q8M_Ekv4vTdg-Cpmd)_-k!au#dz2M zFpV>q%z5$afd+;pGi<5AV(0;(BZ1+%;e}hvV2Z`4O*UyH8zBXj=?j4?KPt{_I_~lv4W6AX`klqp(8JRpx>O^;F%|Q zA!!HcS3)sE_K;e11BEC!rlOnGKmm-AvdM3&R+w*<0kljkJObKlyv4>j2T`LOK4#D( zrIz$9K*kbhpCd?L2)cb@gG(>*=aOu+HmN>S z79P_bc zrE;1z0D~#@nprFk;&Pv%v5k21fI;6z946R%EqOCGqXv;T-4N3!AU&%S;+RO^8IzIc4BBvf2&MJjTa&5%OgXoH`#F zIm2UR-^fU#AfYiWIHkDbNf(gQ%aZ9K8w}%LC&#EfHjipDg6(ZC9Yd+~oV*_e7?~&S zG0bz6)RPXuQd%Y{{F#H_PB;Vi9Nj*1E3GYL@#Lh8|R{Im$hsE}Vi<0VA>D#d$-v_X;@)QQ>^r1ukPEBTe6+8*!H_f%GO0uSef3 zcJ|ftq_I^|M2i}J!4**HO+UYrYn!!|dr#Q=U4`F){`SP3Vn5p^Puu+GQOKR2I%cJv z*A}pC>Wyt$XK-uQzY%=4X0o)JPd7#FlHP`1NkUn0>9Yk8;ee$9Hgu-O*4KFGt z2{&PdhnU1Nq*}g4^DX1KdV`sTtHa&y!@I5)F%lm?s94kes6n!x&iVn%V|1s zWI59UYeaGfwfEzf0S2k}VxmH2gfev6*p|Eze8XpQ^k&|majBXDy)vCHW{NxOWa;7$ z4|6vx~8neK@*ZSt~-Rv zwpT(0M?Da<17aL$*R-@)zGL;aU7TKba0H*v$}+{x+l5Dq2LNYgWC*w`wEd^{$6JN< zLyPu#q>r}wl0L2COZ7_)g}S&^!C1UmJ#q3W8&pM1KDM5JpyxrB?i8Q)4BB%Tj$NcK z4b2w5IOvHGpOH^rSgNv)Boe>dvhW{mgb8m{XGI1%@LSXEhP8vP3qPRf47tS3z4n8_$QK3);$QK>%b_g)Gh{YO+iO%=dG}-vhbaquW2c~`h4P$%v}ad8J-uhX1TiM{@I0M3(={(V}T!Ad`IApQR9iV~RsO|0-} z(`;@sqeQb_i1}euOJ(?EiP!A_6xZ10DFKPj{^4>9i5c5%n;(Jln0CS z--=5$56gMzX1xR7WhvFG6Q6*sg8fz^k|*&vH;Q4}yO}thm5w6pWYP=}42=(Aq>!=v z#6b1u#)S=8S4ab&pG*~c{k0+Z>NIV!Nu=+7zzpxXQPC{i4*5{sm5Ayzz>9o6E@#Q`Q)x11w|Gzhs;X6F*AjzLGL$bQkoZn zps;Fw=gDzCdkHcPUWeE>`&qO5E#4cJ$jWBkE;SoOd7D)Mk-Gq7U+Ycd=@Ri_4{_f+ zWZOehqG?fliDQ_W@%rP+LId4Nl7-JO+zqF>Q==h<2S;0>Az4tBjx8TYIQei9<>iaP|l-Vue!>zoH6RN6_re*?^*$2T6CegJUN9`0~W~x0)vrU7~`>&?;=>5;Q=!?^P>Q4rj`0 ze+NwQbq9);<bZ-s@c_rQWchTc5K50-3#RL@eF4xYSHywNIw>~nwLlI=r zKjU_gS&N-naS0F75k4#CtXrxS@oh1uXc4aZffEu=CUrKF{(+8*J7B+H z$4|L-5Qa+}t0xGesF@@Rj`qKFxTk-h*E)gD$}Iku7PqXH?Y}LRP@{d*NMu8#=%8&U z0pGR}o*SH^q#hYlm)N7`R>N)k0-?q?c3abh%#Xc6= z!Fr_A_Sdx!W{cGyMS{Gs>s33&UPZy!gZ?aj6vSu>oAj7hp)DW-hhVQhWgWhWi(920T8;NjTGVFr6WzmhTgme*G6l9#=O z-d9<3KCXDE25v@8mS|5-GprQsn!^s? zNAca|^K#GhoofkuGlcLnJD!xbpZ+!P#iChX)}}~3EB@4Sb65Y_tYZ^fDA zDV@L}<$T=z^d-pzFdvhwUd+}K&qO&)C;{<^=@)JDMymYGtn?b}X6Nb}NI;dyukVYr z(>a#%8I#SD7^z!+h-nAm_t5SGwjra9zN}#mb$FTBhly1vqv_EexRdndQ#C{FXvj&=qaF zms!Mxsf7Fyzsl8!kDrzZT`7Z48T&Z1rz~?cu{&n(I!v^T8|n)6Va-1CPKwd~at=VyWBg}7o4L^au;=^b4TNev{b)um$-yhB zFUxms3>AyGm|f^*55h8%mK#*WAk+p!Utunld_XVPv}N5BDbPyDQJ_qlc-*3P;Pg@> zwu)S{a`i2fdpsmWb+hP}ib6jvH!uBoIXYwSxEjJPDYjgFytn2R9vGN=t&QsT`e=3! zI^yloxLLW2k)NDFc}8yH-kj+4g-c4oCq~unZ^~1@986E}qlQe_D2GP69104C7@H<$ zav+~_qMrTr>Cx`2vvd>6Q6MJ-Z+^=xb5w39^*C9Gam@y;`i5zcJ7gE^l=Y%hU4Ig9Bf4GNY5igNrKR24FgGBh)nYc=czK9ZI#Kn3zAB~Qr&#? zQ6JIem}UJENDi@a#XBQ1CP{sI-M@>kP(i^*xl-fgbe%X`*wC_9$KWI#IM`?d+a%8S zS|ioB&|t(zaaZHhtsb9+j?CL|T^a_o(;0ci-9OKW%V(Ka1a*gTAoCZSwaJ0Ng%1n|wyq_OZqb3{AQ3kawu; z?>Z@VtJl_}dgD7UzjM?sRfwz??!mVE2YfH7o(PI>LGPTdbm+kyyP(jC5 z8?nN!eXp9%q0@YZ+OWWvqbU>tqA>jMr8L1GUQHud=oiBr=$0TXqChhZiJ5~<3)><9Gt+<)}Q<8Q}themf(< z(45!^I%v7Gjy#`bS0S6OLFo2EONw{Xp(1Y6Z+>*(@{N}w5M7#!#5tZxsVDau!e;ze z<0*rY^ff(<;?QTUl((qAx@k2q~aJH-5b&FjYgYO zB|OR8FGT^JhrL(^kznKn^jN-bwYZNE)wA^*y3*I@vY)t1p)&o3B@+Nj;xkp&g~tr3 zt6o6ArQpaU{@_nq=Fpw>p9ki!9WV+acXJwHi9C?-PNbmb7nVcNu0lf3X_6K?ZvEOXBCNSyWQ6~coRsOwUe5W(!O&BEAG8#E zOQl;>J`<<0WxCYzHWtO&#nI&^8CIXf+*{4K%|mQIZK@6Qy%#4KJ|pJU?H+C?^o>8m z_g8sElCXV+St9$Vyq3RGxh%FQsUim;V_ZeD z=fuI*J~KPo7t&kW>9W}7}F}(}1k$r=jd!{|+THXHe3#4o0p##IwSQIiU zu>T+sE^Rc&`0ZDF_174eTedRrYXEAn8kg%DFd0NajLxrW5@19?Z(FC{)JP=qd$vuF zOZP_!Q%=`8v$d0%Gly2R$k=gFL8@gL$M(&Gu9TXL6oOCl(xiqI5lE6V@s5xCHJPo% z8a#$C-JJJ4xspVj@$b4op|!yJ1+>GJ4@C|!4!qc}a9<=TINU-QWaojtsPU^XhNJmt zsJ+T6>KVW#Fx-~Cc4({1YqdyVApPZ$doarDED5c6ipGXlkf!wArMHLPTXPu6(PYQ} z89Uuj%RXO#t&QxMRl9Bar-hiAG-2x+h}@REc#8O1yHj_$0SVkZN95sFLjQI}^6WyO zqNkLt?hWF=>kO6XocNt#PI-RB`~ zRVIfTpY@i*t@W||&+D(bn8P<9p3id9OAcQL9w$#N6q}?vze%jN8{)<-08=-$!gy#FE|ZLOF?Zm+^ci4N@I*L;y-z%&V-wU<&LPQyfOGZe1YznNr=o(F
        xsn;TZYmshcxGOu}gBEtC7N1%MT|81LG$w!A^$_z#yB-)%5) zLY&t=D_}XwB7&3Ed*^Vj@bnE7{a!6anE;msBT?p$VjG(K`GAg{$l>~754vsdqn|ft zTsQ=l>VqQ@=_Rjoo^m+oNG)fXoun>Q(5Gj%F?gj+}qy_1a5b2I#Kt!cyfT4#OnjvSDm?3B0gX_A_ z3(xcN{rIf)JZs(G_Tm7JeeC_;_20jEUNiS%$Wftg4PL$AG}+>jLHnXn!QGLA^!4f! z9tN2VWMT!f5m3pPw9S5thp<~t9^UK7@4>()TdNdxJwdzhF$+D<2H4RTKE|kju-!CJri};UIM$citkceym;7zF z`|46zx{z+5Z$Pl%Rc+Q~Ua!d;+C}mT>{^p@agEKK;p84-d<(VH^Tplz}nPq&-zdJew{#=f$4+kGbB|4rFyC6zShjo`($*S;b>TYsdRdH&A&CAMHZ(vz^wrG7;Sre;JW2~2T6mWn1e_5hE1jV#PD}0|UM;oW$TAMF0pqMc ztV^#vZp4=&h(V15$Bo~6gK zjoZDof8{ac&i8)VEv6)eAWVE1$SZ2qbWw;)bG1hM5bIYse8b_MTQXrT`KJ1?6$)S| zB=Yp==3f$OT71CD*D$Ci#bm>wx2MP(#o*`p$d_a3T4eec0MU6O=#n*;CjlftYS%B6 z8Tj1?A7d}!p5L!+I!E%KBYHX(VN0^5;*i3H{;6|3ENf~?M@ujzr^*Y$2 z<{vv_l&s3SpKM9lwM%24HnrAYm%h6dFrA(^MxTDdmaWM}^fTj=E(hZ@&?DJu)JMui?@(RVbL=UK^aYDB0{=^A??44bBiLe)Xnx)E_d300ym2T?Y+s+)oOB`ft zeOJmG5olvP01coHuo)&9^srDSjEoXN*GAqVLa*V+f6G4q3m(Mmd5+a#D`3+nFDYf+ zd{C{833T!C0KkAt`*P5ZPb|*yk=Lzweq&29tDlfooD=WX|2g6BEJbE8i8t%acyp~s z2dm^o^QGSR zg{}J6wEf=RU^iq7xo=JPA4uj2Ks5dr(apzczp}--0s5`%+2d4-EN|I~#lyhwlmEb2 zD^rB@lMIukhx`Xu`kSJPoFc%1yEC89_qTuk?_ZG12ijCMnN>ydbhmt3x>Xb{dT#kpR6?=2 zpPnfK%)=Bm%|y??V|@ReKT^C0kooTLTs}qIoc`9mD4;ruK8bawcTxXAhsiPD0f>Ir zT(oYU*5W@UW~~!iXKzvg!)cQ0v~-OGh{wq2uctpbExkSAS88S7+x!E(pCVEjXgfv2 z|7KMKFIK(|ROjv1J)4Vv>aPFo%KuYcY5wk$`}HeUy^3l)QJGKQ{2_2D^3DvETvsNOsY>IL!P%YO>oO8^sB>C(eLuwBlCd^_20-h?LTo`&mbNs$eRq6d6Ye+=qy zAgI52$JU+uPq_XT)DKRAI#lb{pPuob1oaQMf*YsddRn>$=nh6x#LS=msuU}q21&Ex6zVg+Rp#F8s=E9$X`t`~DMD$VYPtRVR%ug{WWzN%? z=d^SOP*$YAee?St=O+cAIYpoNi1+pGpCQ$U=ynqpjm+fv zTA$gD2y+x9YZ$;bT<_C96B5HxW!6QuDm#0>adIGC_%7eFZPh=<7DeSbTHn}e;A{7Z zzJJy$ju% z(L@7~!IZ@Ns!cvXMkx_!&E7C_>5WB)saE~K<6XTH{n{dfAC6^!u4oF65l4nPz}D_n z&XTa|SuIX&j0ix|a2v82G&s6#eb%dFS?Wto1;Di8ZkZMX>FSQng&F-hq9zR)#$xH0 zZ*Z{D7&?d4ZXVA7C(r(4F0WcTQOCvU$^kJzX;23MZ{XP<#ol@kh?EJr6=j{!KN^&# zaR?Bu8wX$(H>@C;Du7dDEgy~O`6-BM6B<*j-mew)$A~CYPX_0c8BPParyknI1U0 zr`%T!6oA?%UFX#8FKGlsF%zZzu*_DO-Pr6@K;M$xs3peGedvvh{8NJv94*jZ!)c9Z zpx*rBMt5Y7capC28h=ge(x_a|Y0XJW7q%12VP&Xkdf9Pq$Zrs;4gmGvnYj>j5CIGv zWxHhu3*;!DHNJ76F#tT65<;Luq;>P^^2d;Q-nKnJ64PK^XMo1U|8_f}?E!!z-~cqY zVxjzqUtB1zIe!z5NNoGfM&35It8y|Z}BVX7_xU>`I~ ze~-HUiUoqDazy2O2ak@;_~OfSHSq45A< zmiZ8e@!H;a_(+fd6NE}}>({c>>xKDxAw5#L?|hWOD@%By=h1c)oZKPRX@cz(3=MQE zfe`+XB+;3h_UD4%Os`MA1N-3~Sdg3dB!1sXvAqL?w}&y3 z(E=C|B!DjOc74~l$M_6$%e+OEka_nLQMP5^B1O_S2}`-f0>%na9o3X{3Xh9%+N|LL z(0KuBv+?592dlrdEEC;H(;1|FpL(4y^7BQ|^T`UQxv6XNl4cWKj^Rwh36toQiog@BUm)oH3 zNA2rZwg2>`r~7x99DsJjVI?u$ANb9VG+l7A$L{W5az9WvVerTzFI{b@`~bYwC)&_S z#xDfGJ{r_97-Gd?3#H4g$ z;FhvPEK$qs*DC`=hmGBU4y{utX**ou>`adDnoUmjD=?;A^T)emHKedS*=E8=mGdl{ zm8E&?U^Ey{Gc`?z0CLDVeuur4sfm!dWm2Jf+tFchCqROB_&br5 z{qW)7A+4+<0CJuzxI)%?ux95eIte19A04T1phvZFJpB}3rY1(qsaqCr2Qo%cp9mC7 z?a4X$^zHbs4L8?jREDI3q@>4ozPyCNda(tt;}@2f>}F(myMyt^N`7fJ&RoL<(0=&B zdM~Fz2GZ848w^HY1B+(Vb;0r!A~q|eetk=lfxdwHnf+w;S?gg16Z12f9{Vt=h z+58FI%c5};)4JZq0?si|rx?t*v)hxxoe?7>{dJ@xV@KQuf1C|96uP=;Wem>+Z#aT> zbpcticHHoPiGfix$caMc!)i)t5gx)UYZJ=8?#)iQkE3bKCcTbr12VA5^o`I z%#$E9yFH(@JoOlZHndo8nhHXsakp$HYu?khrb&2wHuN&m4(?8Yd660#0iuUUuAyvO z2`(sER)&B@l$2xmngzuFPRa>ks&n7{xoH;RoC~Z--D`blE`Z7ZXPYGE1BAE8A+UQV z2DXFjPGGs|Q4ELmOApx#*-ktmCc9-FO19rRdCh4(Uc-q_WP`gJATQbsPc%1rMUZ<% zSyQO_NlY61m;?y-Nk4<{4LxFN+4i^x=jeWOX1z)XkT; z!G3gVava(@-7Nb7kxVMRBeS{WS)7hQ@mLWhs)m=;RS;5kD zB#)vg5rvq|Iog)44;(pu$Yz1{Qr@g=)&|yhdM19OQn;flEPP{=BvTV+cyLd;s2$58 zcGJy-fIqA>boqXxp~;%OQceY>wnZK_>5807rV|<>IS^ibjiR%zRYA!ESDZ<;rAaOD zf=*5kv3y)xx{%E)a!L93^>*VehsbxvMHipF0E8%J67Q>YDtkBz(qFF~A71xPA`=}c z0N1z5R|2ZcAbEc23aZ5u_JU)0Rtm4?WyV#N#*Dclsw`MLFy(Va!WzT zFmkCCX>|=wqp6+GAlQIG6W`C;v2%1>yG1E1-OK7N-U_(|XJ61y4=+sVlp4Vs9JP#+ zzZ4brrA(<0z1Q_f48&OFJ}uF9ZP0K1!eT@umlPz~xm&&a+80+TSszZf0p$F$iM~HD zb-Vi4Nl=+Nq$HY<{H9N&sS?+=bu1ZRo8B^uE=C>6sDShbfqnXPL95%w)vvTG>2l6nJ_=K&qo`K+X$j5VR9md%G@CMY^^IkC zSi(sG!SySN_j6Rw1m)?muXS6SImr&DWkNO45u8 z0D`O8=71{S4!hsn8=Ae1b;!P@@~eA2#MHaDNJFQxZIBY~cf1IT(wgT53;XU7Xg&k? zSuMrfqT2yBaf00I>U0HwuKNekK7A5cM&8*`xe~39;XO6=7juES1AvAP*KdvbiI-{^ z8F0;lvh<*Eh_e4V-L=%!a!Ky+8*1^IemcLQ1q8ysG%@*~XOcZ`(HFr;FyFtj47qux zP9W_|-p4NU!IkeCf$uoJ7Y^*-NP2(!e26lUfPWl6Z%W1R^O>E3q1%pB{r+katV!6F z8kI0D>=pdMZqVdL71C>SnzkyFfsXcPZjZg)@?K4k#)gdD7-14bzV|6=Gi2eSw(<(t zt03B4v5aj(jw|wD<9W4N2hvYr&S~6ADL{rZm+e)@7RB|1JNd%}I6CIr?+KZpG?14; zH%rJpP`gi#g;Tp*yIUA)#MJb@^skc=M&XtRukYGoCm(SZxH(3 z?`&*r&hC~Nh&n&i^rLkO?rmAyc!#8vda8-5x4nTd2?n~ulb6XgDduqx4HVE}>ji)HJN3m^B~hTChl$1}*&Q&K)Kn@E5dHQ4(MPF|>7>(6=73Z;Fg z*`bHq+%*P?g%f?h{BeN)uOx(={Ut8rMx&I<3Cb ztkzHwhV0MsPo4BvNm^F1%LCCffTH%Xb&8}H`red|TAvGx*OWLaQ)hOSbeold?XzYG z4Rl5ydG*S++O1RFj4G!KGFhvHi!(`wjY%XrG!mkjd<#spfpr*Mc&II3(09u;kbkZH zRGYz+P}%JO_-Ja&sLmCM?+Cq2H$*fMw|QE&hAhu>PEJuweE){w>Ifi+Fj1B=^7+M2 zOq78aI>jcaJayE-^YMl4b{3D%y?G3D$^#tlL($ANckimU5jt@o$k}(FRU=sH&EXYl z`yM?H^_zS%gh~z6ed{XhDz1sODDeteKsb7Odps)qo_B?7Fre_YDuIGIQ==RzfO^v6 zmt~I!=v73OMMQhWC?~5V&UXA$QO|oL=dS3lQ)*f5FGLD0ni{98JW~C!r5NeeshU`y zw$Er8BffTQUs)l&`CR&DjkrVONO{7Y&5V&`#`1HIwkDExCh86R$7S361ScJ)R$0vm zAfMEc@}~XGjetEKgL5J6h}80yP1uyRQ^wH89M>>qRK~YG9MM2o@r~|*fh#M!1>oE| z?`>%tO#$^lCMkCfKPMaV)O_OaJ2F6jdRV^9Op{gIdMAo>4P$IEaRYW#9V}K;1<-)~ zE=o#{D3*uhJO=6*VbT{$YcALQ{awZm8KyMdiPgAa#hRkE%*NEt;-ov1a@lCH73P_r za+^xaq@M$vD7PH^O0!=A4LE;ayH5`a(v z$){qR_{U!)IAxJpdz?>YIlQ+euUvHIuaR};T;bhZ23i*UV#cVEo%45>EDPlDz*ZcI zyA5y68@tB0#D-tr-`bX(t|#Dg)N;TAFTDzr6@BKn?w)J7P>2epd*z^Z13ATWP_asOMaE2LNw{dv_-a2pHEmz zH~DfKum1-0(7y5-dK#pDEZ~JwhKuYLXurD9Y`-&(b;cb|1eP!1gLV@htK_Y!8-I{8 zF=k|Xq36LQJ+DmFZ?|IH1)_L2Kne{+Nk0>Dnp%>&>$vy{cI6HjLs3&7cp#QN(=VI}WS5 zE43z_aSw;iqmMED@wp!y$8(|eX6^5}4PYL^_Uw>mQ(kKkh(VjmLz!&{fXbJ>jZeNH zF#jE+puvPYp1BtG%J=640g@P3#7iO$b=*5b6`F>=lLv6i45- z=+#tRdS6NX+CB)?^#=GI-`o}x>1Fyx)(qUDwS!PMb&2=+=&3&RE7;f^aMo)h*5mcD z{9KU6=$X*F%ZsP5p@EyJF>e{%Fe9f!yFkQI_WR$6p4&HL@D~+j)2{}0{vbUo{e9== zW{3N?Yi&WMgN*XH>3~$XRx~X5J@Ktri8Z#N+zG(qWtwPz>ab|$c3#iIZtK_N5X&EZ zaL4w$i-So(oS4JoZz}-$EHiFNljKrZF>%yJZJ~VFK~doLV#X13WcI|j{vzv$p*Mz~ zUW@FlZC6N!d{Ji^+=EDB#5#371OQe~_4=dOWPoVf?A10GPQO^ZDmd*49B2>U;f~RG zYZ@W%?UlRaedUe&p)PI%M3=jmf?kK~1XZuJa07%gH>!noA1|Z}2u>`Kxi#As zAqMUdahzbI^RH2g7+v#wmaY^Q&6@2T>Ypv^w(2o%!fT?IZSO1Y38bP@dXp=U2x|$J zThY5~OY6dVPniP702(~8R1KZoPiPC(K3OoHu6>f} zCY?(1+_2Un)eyTQif*Wl#c-vPZ?LIYx_VkRrj_qc zh83*y;V)?Y)}o|fRzQOuvSYdBt>xY#P0={={6dlBGTp7rouBN(6C--6TK1MP;yMB~ zu7xx881eBzfIW{C8`N(I`ZQDs4dA60*>D!wU7b>_5Sx=*9WIOQ*KgCx&JZ%O%*b{x z?H?R2H962i(yc-{9V_!@rg$V?<4htN-@mY&u&wclV5?@Q80UtA{k&M8P)^s;?G3(6 zXb0e~+5(x7EmUOeZ0j@8NEq)1~$~E$z?1kcsb)>n5f!b zyunV;O3NyLL@ttgQysfLPYS8Q#W^ikxav6xoXvwB*7NUIYZtt*tr6@;KcG^Jej4TG zSYQ{3>!K5-@S2~y*v(z+?&j+KOhqNJ1N@?USK;`hI*V^;$U+cSmauEJ>zb|L7PVTN z>G9m4`RFNOCH?Kbc*EDxxqZ&qcpets57?sNyyEek;ZbS(l2uyj5-Gl;C*lnTIw~p} z@xCm)aE8uvIz45GWkSGyXU-9L3?I;WM!ePRE^B0g&v)Wod!BJW$gYf;mcyOVNk@|! z8TFJ8t=2DW#~#0D<=sFL-5SlcT0httQHpr{EJ;wk4--`GcFrC0c#ugU)|L~OJ!vo{ zTEV_+R}en1O6`8Rbm{4dwbYa)-K7s(OXlLc$vR&}@`xoXz{|G#9XpPfHSiUzhJKsE zt}|dR@o27ek?X)XLH(i;<_b)P3e4xqP` zIwLalMOl@jskspjhpzGw_u950@O%$49=g{pJ|pU`ey^ERm$wdn9by(;1e-$|5|%N; z%u|P$<3~U$W_q^ohV5}LXJzss1|-Vt{fX7qz)rLDSB^iI#IFa=wEVALOF0t)OyLI~ zKE|cs2#3bqL}VS~6L4QGwuYJ1Xy4cW6Em3!1tK+8hZp=qV=8+VHQC9%8D?NM+iA zp~QIAvUnM0+s8Ke&OQJKqC80z!`>oYBPWZ-NAf$qPa7s>$*8Rcy<7+`A60+(>m?Oi ztws4NiB)vDU52hlCku*c1U9~)8JE*&Un{L1WxHU^{Pin_g*=Npl}?8=$~U@A6XiEG zkK+@}S^K4MP`8u)!}aPF!0hWWyds@eVOBokcvt!%^S_)WT<16i2g z=FlTpmei~C)Y2B(qUY@)tHX3cAM>;Qn|k!92X@1pni{Qa0m^L*t>8n(z&#+f=2xD? zS=O*hqE{JsT($el*krvdL>_ZBvn9x5P%XZM{Y{4HL+EH49bvaJNZbM7D^izefm?(` z6jX7#by-k}buOF~!<|pk-&z&qz~fE$gM|DXWhJZ1n8h|xn2tJH{8*8rq@+aSi%T+z zRE0^qu_t=kh9&fqdHATvb%fp%_NX}HZ8E{d9bot4i4<4=F!iO*$eG=yb9TM<^k)A_ z+Xa@Rc?|bxse`nBJ2xUAw_;y`)T2|hC}I?0wc|?FvF;+$e0;VjH95_RyI=&!_|!GV zOG6le6>3xXYXDTgbqFy%-G7PJ(EUMOk!6aC)8tFa75I@YL%&+&9E&;(sYw?(oUN7q z$PR_JFR}H_^HB|e8E`b+OTV3|t`6mgf8_9PuPBXUq%kDplP(Sv5!ACCnu~fNl1_Wa z!Du%f@u+pdK5nNg+@Y=2rKO!sB%xv*Au~?pZ#Ui)?v)CfuF&H4(vOp^FtC0^v&sJG zEje%npY1u;?z}c$iSn1qxKjTd5WQ#B2dPgOe@eo3$^+&yJf6HoiDgA&tlsmX%Hu#T zuhC*{q+}~AEkWBa+Gck6km(Jf6-BS-2z46kC-C0%G?1TBa;VME)_Y=vU1}>RxM$@F zv&LwRphmuwLW*8IVPC7i(!z3hIsx~#e`zpE6L1*2jFUG$@{V0$H5@X0o?g7Zhi=Oa zC$_OK(YgDbC){68L^?9`yxDhg(M95Y`u=t*kWYoXvWaP5dO05X+~dk6$cQl1`XYaY zFxBNchwC?q9t*X9M6c%y%0ATb{D2+Rewy)h+HRC3C~HlZoWMA;-O5z@(Lem_NQhZ) zDscI~HEQhg%pmLbQy`lsBnk+s)MoaO*QKU-xt>Q<3c9aRXkY#vj*x5;QmOW8-?~>3 z2I?pN@nRGght+RGrg}?H*V}7*V%p|2*TJz~`hI0LTZcjYgEEr#Ly~U!xaXLyv!uso zD=aQZbzM$U?Mq47ec<W67zL)eVt};H(i?h1*zzcoTh||TR3$q zk{btNQaY5jW2LvcKXzp3pQc#>`E^&8pFQpbzFs#CsV7rDp^8{7*R8F^5YZyZQrJLQ z#8jW!_CuQohX=c*;^4ZJj8-i;thV6c(w8Ic?UX~ez{&MhFBizJF{^j8K}HLjD_)SM zGPO;<_R$(ew@Ph_WU+9WzEbSQbT=*Ki;tH=T}&8wnn+By0WPi%*RlOZkobV}P^ zlPzd9sL)%y$BNaQt@ibdSx!{k%GHVK??gZB(;QqOtLA4qEMi%4aQW=H=8mpvMy7Lx z+b}Ao1}O{v3qpBEeZ1{lIHX%+o$VyYtZhrB7Y6Bi=+Ky+#ysO1UQK-K%isHr^DILO z4t0m1mZZlNl$&(MC@I%Zws_Zp``CBS?SXnW_-g=!hlRqTFMyC`LzCsCE=1_J+CJal zqZj2?fs;U5IK?lQyxlhU(yj1njM=x(mXbhQp|-m&2MzC`FYZ~+jD#^Az1008-r@@N z!&|L8o--@dZ&{u(Iq$)w-WVo%EPP=8Lwi+ntKAi?W72m~mfO?o`sfce&(#!*(uPRL zt2J)qdK-RKO1~x$;|kJLe)l^FM(%<=l-)?C!Nd;OO$>govT(-eZo!<|Oj8%E zxRPqg?AC92VXrwXWgHn>=>!?G+!b(J{Km-L|JvlnQm0L{1(wOK(^ylK8Z`~raw4Y> zvoJK`J!l&4Nj8+sas5@MK`lx(cdLULJBxWQJjqxqkkt^%4HIk*Ue-EvoucgN?o9|W z$`EgEW`P=`Wk4*wvKZx=+DD%|+mJFxAIJoD$CvUw0#?dtYrKupudYlf*0^E2GlN8W z`xa0V&-Tpbd{L7Q0a3O(?CkRC>2qy!=mqdX-q7am~_N#x?tl>%ZUMMM{T| zz*1Ytg`2!oy~L-#YGobozabd6%GWifLC_N)pGh|#epOh+ALXS;MCWIzXjOhx;7(G1 zsT9W1Q7DEYHtDMM^og$%yESWccez2yH?10_#8sOlidKr4Ly(>S1(SM;?#* zzQ?J%#O|9eyAH)QK7Fi`oLVW#GAo6_AJyMWbyV_kHOTg8w(lz$dv66M~ zZXv$UI4ac-t!GVu-n3@UgwLd%`4s31*|<-$P8R`GCu{ptsv2mV@9(V9^v$XkAYyt|upsOCn(RFKH zvvF(bR?skBXW&Vn7Ha3(ruq`Mk)&|4izrz#2Me!mfp(_bU87o!`s+Rpi~O$E(B<0^ z)~fb?^s2LKwLNr)xJDYEqpwL z%UPXlY10|UX5Y?jfOxFwCS*W`ee$5v6N^mSG7keXNEpOX68&)V(3^)KK zqK~{AtX8++?6Wr!m8eX+t#Rp%k*@5yLygBjBb2dSW!WKKR^@d>92@+FRieBrgHlOn zhNz9k%y!LE-CSKWm!_u;hnDDskImXQefHFnq1 zeaA|(DJdzg=;89G%7nA&{H&jRta+WWtK$}5KG_1xipf(c|6C}dFW-bh=D-`A;O5~s zSm}z1N}gBe*OqlWHX3sR*{>JC@4la9wklY_c41YL+TX2-gvH8A%z6iT3%?ZPJWaU# zyOeBlmNc=hOb@I~4(vg$wB$kNC)$D;?Ghm62GY-VbQq(EH=`(cEo{sb(SPpBGBQHiIeZy~@lF>7`${Qr^t=S^T0Eg!EUKLfBKI9%epcC=0)huU&_}Ejo!g`Te_`V89F-ykY^OsN0mep_qG1cx+Q@ef7!73ak zsAe*=xJX2`sh)g~>X*7&nRWh`))5PwaFOLddve1hq_tH#F)4|4((;?Wu4k-y+U$fQ zY{=5fGWKa{;{3N8Sf_VQ1}7+T+WMUxH5Dl4zV48FNSVI$caq2(7n0phhs6Jsn6Ve? z&cB0|{BSQTxzocUm%<7k7EmJ>FzL$Rc+V*=-TlfW-(S{rnso-++v^gcQIw93^{~@p zVbg7!v9bN9{lJ$%8s*2ISH4{rYREN%)KSG^?|e*l2x%gVu}ST!%K~Ll57RXI)<2Fp z&jZJB)}JyT{~cAi{gw%@Q{+49^>+j`MNUUGs6_dQwfNX78ZnFBnJ}v3qeah>c z8I>&c|1`qL^}lkD^}YmoE)_wmw@7Z9_=nK1wiI*U==TGnNiY6RKDosF1aiT?|F)zo zu@;1hm8F!aF@#^TE!8xcf8Tif$zN6Y3n`x568uhl;-+GWUgYK1mrg#nYtOdi`4G?k z*!~}E3)o1`(z`KlJ)ulLT&8oOzX=!3H2j0*hg@zeF#mA!aw|#yef}vR3Niaj3*g`L z#N(6s_N}Wc|DUFfEr5?u)>?Oc_fPXCn-(x}-0$uAqdiq`^vQ8)SbO9TA0d~k2uy(Q z*;r}*89D#jV(pmwuXWds zEq1xBzSK(ht!u{)i5mMc6Hk}KiHw&>f^07 z-(9PRR0A;MWGrAXOu$E|K6vn;dmpfDO9*N|C^9dwQf_oKS{zg$=MT#3cd7UV-w;ns zPvn5gNWP%vHNKM*B=cRoT{94K;7Kn=k zZMLi*L~7ryW9DCafEjehHaa4DOT50QSSqcHoBe<;gYl#L zv=HEeu}Ga>6?I;+=gfBYiX>G9k%7ZZ_VP5LILETPWp~|T?+q%w{e0|IFAn%`Ya846 zIg=piXXrMRJc>3v+cRp~dqMdAmQY?F?n8u-wuHwT zMU^bCQ(P}d4h4TU`)?{-YO!NPXKaEA`yi%0CNjo$T6 zt!3?%R*vqtb14-ih5>$lSrh2XijZ}BsC zWe&Vs?KRos3f4-bJ%J0U*rM@RV(-to z<@3nWdo)5nmq?>*$ZwO{uSh%ibQ3|`J^P#iT0psLCut)Q5l3CXj>HS0=bo@naCP`= zQBtpgul70^14l6DEO<~vx_{?#*|X9jHxkkUvUY353Ebl_D`f76G5wsw+%PhCC)vQG z;;kt=r{?uJ>LRuMDd1gCbY@mr-qANltl8%RfY53XqvKqP0opzE)qyeNFz)5*5IDnc zVyqmVi8Gl2&uUX*rd4^{=?3vr=tt>{&I8+FZ>Iu9k?tUW5t|ynLjYkY0kwfY_ge8e z*f>~4m$ai{jB&^u)5A>PG6zmD)Z?yG%I>=83j4LbPZ=2J8DVg7`gnyA=lrjY-yO@*A#%_-md83+K8U1!9b)hmdkH3&wfRfHV=c}VJK5PjIeqgygeD z08|lxPwf+MU+K9PaoP3^re83;cPZ6mk+Js2v_Y6kG!3suw!B}Xk#ol+wgCu(Oziv! zGW~_n2i(d>@(*(v<(VGfHZtC^+)pVq#@^p)x5^0VLe8^85UY;R_RQcqG?U1N5UkU& ziii06W&##a!U8#X^bb9vg(o-~SXf#00lSa1+fb)Sd<@dLvwUkln!pMmK}efojfTuL zWPGX&`R6Hdbe>GVVS+Aw%m301+ziiH*mVO@xh#bgO443jvvW)y(v;x>SSB1Jcdg7f zMB3qqxq`rIRzt5W+IovX@;v6;leme%bbNkhH+}n6nA*=CU}(bE4mKO#CBigip8=lF zQNgBmwd#y~@C+5d)RlN4Crtd~_la3)6B6%C`DwjhA04lHH{%n-4B8xb(^Z1C6n)I6Lr%OsLe)FNm}9{D?g9$qJDWWrOS> z{0-2GKVb*vF8bkhT;88iJs8pkgKQaa#g#+{0&m!(O#~EL?r-4wY^Mbx9p{Q)v%A8S_K8App5$ z6*@i!k&UOtsQ2K=fn$qt@Qk!Nk5B*(EZ#tuA@`olv|ed@)<@N4rWx>IxUe%1Xro(A z148(ZBI4G5>XSaS(~=$Vp>fzBxcYu%+T(@Lo+sQvl(_DhwszuGrof6F$%0C^7-EvH zI6}WZ87=~QsdbX-`4z9Lzpu5C!Xbe5N0-X>tDN=5nPst($HOq#W>Msf#KB^^o#UlT zQ8v9dLtkC%nRmSyQXMd{LDyQdbchM$I~&dgS_`z3$)~SqpSpLnzK;DmoCQd~$l`8p z-lnwK*nY#Z-@pw_l~vJaXXu)S@;nowSj5JWfH+aA#!nCT?%V-TfkL;CO%wNheNFJP z3TxI&UWT7pDGi!!D?%9b~}Mwh|hg{n8T@nB%j&3UL-T8f(=@o`K2|`P^gv0MgFpNPFZJy;$ zUoUxuSOy!05=@GS!e-A(vY2q$6^EB^$4Gyjaw+EVdA)dwNNqDXd zB0;53`gFEgKM#gEfp;GY9yPmm`_R3H1*dVF1Z-aMo$|mmV)L&;_LmV*f!|Uv)f~=i zB?|qr*>SE+P8~Tx8HUhtvE~ZIJNYXIppn8?GtjM_oR+kHhT;HP!d@^d#I@#k-7yuSmELP)iDC8#T_9 z8`RfDUu)MCGtCv;&?Nt0>)Q;(O}5_#xh#H>XT3_*i5-|ynZFUcwBiPrs?BoT{01*z zYS-!Rjl3K}_I_20Mkx3yUfNhWOG0uZ%QQT_OTAB)z$Ms0XF|r6r39lM7`joS`1n?3 zSJ3f*7t20@DnY}e!A@@beQH1_rAHXVzeFiw8M`tE?OL+mIT|WZF#}TFTt;g_5eGZy zXS0XJJn2Wqfa#d38lEdCx6im1pvZahFM>=v56~i;c{C+L4pTzIJXZ5(NFO>PTkp?) zm~V5tLbE@m!%HXi_BHOIgp}}be>!sE4mZJurM9Y2l_m%;0vZThw=bi+G=ft*50m(& z?U!^RwMadK$m!K@7MvkX*KP`}cn`fk!XYrKw+pE9Y}z(a*|gld^m{>sC-I~#L72$W zhrKQJ>qa>j2b6Cg8)b&dRRLSkmkE@{D6dGG;KuIIN0)x(`!408-M%ly67Hd zp`F}aZ6_INt@F3iBl?!_xba$2V?U%R;B-TCdVDeE$hnI3@}N>(?yn5;a*~BKxNmZzR3c4qL(7OK~z*!S>KT9@y32ltGugpJ40z( z8$@Tim_(WH@$cb3qil+^?88wEl>2AH z3-|w=42>eIthZ=HNrB3R+RNpIJbvPVwwsZh7EfQlvHaWs+Tzi9q4`|plaH39USR=^ z)1+LdNpj}kV9s1{+rwg+SC;YZ1CvE})p)==wm#U&;(MZ^;{}Se_u;VN>4itK`;oU? zm6^P5afhV}=0Bf7P=*4lHt zFcm#59+q;)fommQ)0-E#uj5jW%q}u6Kt?{PbPDrfk8uP5r|cgn;OZ5oKVh;v(yDjo zfIV<1&#qyGh=kkVAzHZM)priRPlEJOM%4=DdgWV~#>3W`1De_fv!?v!sn-3euMAYJ z1Az4qL4eYsS{$^~`hM9&>(0_e*zrLnSHnofCawnlcpFZ4eb=fJj?YlTbP&rUDpc1d z?WG9^hZwN2>!fTS!mM?3(b4?W+yX*X2g`{8DXO`r*ddWD{wHDA4#WvZLE^P^NdLYp4c=T?^#@-7icVS@X@CNV zPRM$e=&8`B;U<^Pa}n$BpLz&WjP>?E{#3gVfSsgq-M64gYc|5tum-Y+CztN`K z)W*(dm-bbTgZrc9GDRFPNQT3fglGZDyr*%QNi^NBbpM z;`=$e^uZbzE>Cgo{ z0bNgEh5=4;IP>3NX%vxEAxN;FgaX?mp)AtK)djoW6e~QY=NxNZG!+YDEycMT zS6r?}T-TC|Kptb^_C7cS?mj1{V$&(|U+1~VYcHg7 z%&Ku@s9Ujv`$;0hlD>Oefml3Zov#Win=`*G4M>)~uIgwW{%j=#z3y+HBO{GaC-`6# zib3U|j@sOXQCZ=$54h=W+S_f(O}dU~MZlGO*2}%7mu4?8JTORH8dNxJNj;nm`-OJ> z`IX+Obs?s7yn6Q_)_QfKIxOjtMyl-bhMO0t!cW!Hv*8GLOpZ`D=JhgT=edr{+LnPD zEa0hrsRSO_gxjrT^$6{~R)#71!Pkz-WyoX(j0li^V;i?Yo63PumL+G+SAPqd0=sdZ ztu0k(?^%Zsj+bddA`;$~e)*H1O>B>n&uq z|J^lU322LOY#sK@eOrNoR0BD#qWu%11LWY0+yo;x$ zx=1D_u_3Hlnr+Id)BTP?qK-;3X%S$us>54pT#Lv;{Ac9}6k|G?w3K}Ep&Acz6_)DM zM3Qbu;lF(do-HO;ViH1W=KON&u8*#jXSJ(Z8XZR_`?#Ut&u|8f;G2oY&)IB-#@h~8gOxHM=b~I#Laa=Nt@oYYz7^yb}emP z8y6eDDT~(`ihHpd6NB^|qEh9^Ygz8}wcWYv+WD?cG3joJ( zSW%qD_!dcf=TS_PA~pAmJGE+>4+C%&?Q^=~>-~8UuE?p~y=dN|Ch(%Whf%g&X9-VxtH2)t3bE_sl{AC8A068i!cDxhCGYw4BnBXnz?b}?N};};mN_Jaq`zde8coLSh#tS7VVh0mbpIO|ih=nQ>qcRmUn z*>K3RO*AoQ*N53jAaS<3=uiuf zK14J+G5yQxwr9vJOI692S3`X1o;aX7aNe;P`~ygv8PoF5zbMwAyH(qvj|tI0s0;M9 zcpUqTwjQebH8)`lJHzR`11A6(?^=KUeNGhBk)g!&egoX`DsrQYoZd4%hW0SCRtnG! zL>Yuxi|`fE*OfcA4;)?1K&H~2@k8&YW-Wvg#CAt+YalyjH_LuYPn}$4Zx{fp$KjYB zPy5jwtD9rMIXIl?kI>4J%VuixQGv}uLlV0u(|PTuiCv_Ymev5v>I^~*Mboq-F2UV>-f1dz;9=puwUt$2 z5=PZ$r2J1Z<<+&jDCm45;#lns7yBDwp3wKSR$eF2nJUXlDjFK;qjkA{J|&*HFNQUi z2aXX*NL9zoEhMkWSMi8#!ziX+;;_qst>Vv!i4xbFfsu5El!TJ6gTT&AF#IJSP|pb2 zEb~Dx)^BdHWN0h}GBF#OaFxGhrG+V6&?fArmiTp`GdX-H*Qel=)I}j&T-?++zKiySmwj%;S~536vf0~IrIZ=S2_LX{XE&}nFPCD zRb}oJ_|@`D0}L!D#}m39dz-Y$nIhCsd#BBz*q3Sbo=EAM4c!&-CX?&oIb*x7ZGF|} z--i{HLhq};T#I7pIgv)B0-Foq%-~)>t+t>5+qH09^JTmNtBt|!e`|*=Y;xBuC)}ou z43+XBAl0DFT>C?ro#ZDIyGZM3o@;Tp-U37dmgsq zMLNek>nubW4EPWF`X9E2+b>j64;1O1fZ+?!QF-=ijZ>r4>G=3&lh(2r`-hZhGV)RX z7MuTt;+p=Fu6kzkv{2O>jYCoV4=Bv{@Wr>{xV0>3KK1W^1^zFL0gRo&P?7ofrpLd= zrYQIeUyE|dg#3;d{~9qhBY@TUi`4y>PW2y!kpN>wU2rBu`wub$aO1dNaVhToHHZIp z+qGZXS4az5tB~KO=dTg90-&BWQC#ouX!oy;GVT{k`2XJ*1@L+pZL`C*ao|?SJwXXQNcB(0t`S4@~ zAOj@+@L;H{JPHl(3ilq!L6g_Vn z^WSCs#Ubd@i~)|Wk3A)`qbO>c{{;&VeUSn18hEH^X(c;C?TNkV1ApA`aF&8{rPrPg zHyPFOxK$XNuixNoF#qvlPe$+YxtQ_%`nzgL4DRTl_5wLGP^y7JbJypjut1(TH_u#Z zCQ^2~GRQD~?K&s-=J#8~1%B*&3d`Gt96RZ6q-?9HWWPnr&1Q^%(d`JV@V?HKoXKJ3 zX zU|>`jPufn$)=0o?WfDSjxwUdusc1#g?D${fEL7aEsR;f4iBA)RZEe#q0@tJ<7{R7y zrZQ>JslnqXqr;X`Qud34mOh+f<$Lc87}FK}^s8}Ipg%Mt-O5h_Cdf>lJ?j9fRDQqT zuP{m!?@^JNL+EFl<|~djzN`wMtuRoQwrMwO_j#p*gWM{27wUI_XcV=@B)-QQ^)&4F z$!f3rxb;lEyp@c7nNcJL_f@iCtW->*Bun|>sX9Os8#TJ1q;IDoYsj zo?W#qQQ{HNYs&DeZxSLTyEF>$JVJV~M|R!)(|GR} zzJRTOJ3KM`lSSV1OYJxVLd5M{oxR^}v62`fT%vjxVU@b0<}fNp*V0s>ua-%V zLJNk=Xx?u!K_BNccO|`uQzLr2Wor2B56`hD@ebgM`FZXJoXlfTZLo%QWvi{G--^LUA34s@Awe?x#1U zBLo1hfO7?BT z$YYvjE`piC8#|6O+2uwqtms2xG|N9d+Ep5?8(c)VcBjVIr><$o4}S3^zr$+h=y zlMvT+E_pVRy`Op2@N#thdPbUcOTPLxkQ3S^aL%$pWsWWM)Mh5i7>Qj>x}(Biy{%Z! z9E7i*@)SEqMDqg8I}-@)WD~S{U3-qjZoVbn$EF>a-ys=k5-D`DR`18y@nSR2GTFE| zjKbQ~>_r`T)#Hj;)7|Ie^4(Xiuy)X{!3Cd_>8y~AKc^s{ z8AxR>NynRVRiz?HbQpK)Pd8?C2TUGlYhaZ=MkonUeTQ=jfN~k9tjN2Lb&6l1j}`5d zR{ru||L)TY7Xt>=S9wakoa!5WnqiA4Y^C?Nd;h0ti<&$pu zntpK-##O@y5f9osnJHw@T3HVB#F{I^Pcj$74#^mE7Ee~7&xluNuU0R|t!aW#J2~FF zoLKwR-R0juA(Jhoi%OrBcSlEaAcG!6*>JbeFcFeXs_NY?@*)jkLKOM4UCD!MexUDe z=-fl&!E5MvvL3r(@pmtsEjHQlAUv}dP6cbIoi0h!oaqSaICP;&#W@XYd%twr!h;~kBkj;7`!jaAdn}&tIX!jllplHr zoLwm&4~o(+XMaxj^1{3R{0NMgI~y&&rU}X@`Y?;KJY!k5D?h%;iWC#rRzSAYdRu&f zk6k>i;KN6uZc(5*_k>5pdrtV%5ACQAzMOYhi?;~wJS6K20RkA%i z503%?aTkHpoIwa*3r)RV%pyRJJA=<;G3qGxk!1;-*|H6C%paE?L8?2KdGS&Y?|1BY zV||T)R`r!aHqWd^5&JzFlr_HhDF(f4y?uARC(&izelpUQ@`5O>5b(C}+2v$;P0F*0 z2-_iznwrfM=WoXOZ(%(&lhD({tiP|aUyJSuHV|*gGFS_*Cm=y{r+W)ua;cKOxX`+m z?c0u~DXI{h2R?uk_qX1|`%s7272Bi1Dul9Ugy=Xr$XMT|!R9Hpiy%SPU1z7^Fdx+8_k2|#(mH2N;%8HNO?>{!2>#_9v;oELk113CpW+Fm$F~`W@i=V z%bt%Y4+!R#QzfN2x=0o~`r=-^n>9F>x#1exn9y@EX4%^GM8FBaXG>*_!5=ihC|>4O z*X}NdeA^fovitqVjqGr}IM)*R7Id~TsmLa|qRC?s6bM))Xvl%i$t@F&$9^KVqp43nKJL}n?6QZhTm4qRGtR#7L z%g~{?47_3_agQfP;<;8JVWcBa}Xs8qc?CHHtR`2|!$d z72{mx${dp|FC`!s3oTH-3R5Ixex!93JbGB-cGhT0Ddy=Z7F(@%bkGb zk31#V9n@mD67YPLq@(&W@&K)4XIy?uUBhbpl=kYjpQ;>N14zzet#5koQct$=`!Q|R zSL_;vcw-ARC{{T7aAOz&_6;EXig#XSYT_H)%me&Kbfsg zU0_G^lx}Mo>{mD*lZt_DqkB@XRTB4+Ye-nTcBHF8pNlT{(wQhr{>q?trsQU+M$BuH zL$ToQ8;~G(W$_GK$EZq$JXG5DTi@QsKZ{j=rAG*YFUgE~-IH zMyUeRxw^zAMz*BY_sadj015niDL;M^k3*eAmh8L4{PGJ}g+zu5q1Q33Tkh^fgfL zM-I5Tr_JM^dRTiC_k-4IhETBC*=6o02%2#X4{{Y_F6b0H+Ywu^6yuTKlzc;Mg@0M< z!7()B1%qjo>KT_OHF-3Mq*TEJFl*({UG`EE$mAPX0b!ObdyGnSJtaT==qGIAHPq@EH!CA zm)-P_t-G7^WDTi?G4tqzv+PVCJi-DC4uiJnb8`Z^w_JRa=rS@!`W|gD%G2-F=OPcs z#^sd{s=Jv^FQspz=OFz>fmp8y@&;1Fe)mVT2!j#drgEV)ICZpdGI(m9yW_UbcJ}rS zU?wvCjO8JoZ_q1AR*&yMzQb?6@e0#%ixV7tba?EXs|$*9v2Uen)Ma#7Wlrgb%%A>lW;?KRNg`&sR68i zUC~mv_B7Ufhg`uMrXmF=ms5Gq_|IKkIQ#*s_J{F`#bVM&lq^m9 zf%@DUp2j>M@o}`UQL2{$lXdlvMFd*dyG5U>Ei3+Wo!>?6ss7lUqEzP{o~h<{WxrS2H`*~Yw2^5q`vLzhY@-_c4fBVbdW;??3zc~u ziL+~4_bAe%Ax2BQcfW&T1v0b+LuF1WW z%GyYcm;J zdA)YmUNV)${pAL0_v-;OBMhjB3N7XV#IFza&Vt^D1+TwXiTo7>Jn6k>KWl80BbBy? zm3+Ld)B7x!u%KLFcW)n@)#Z7fQdXk{j@`LF=|%B~E74DX;gdpUu#lm-k7C`e)wqqW zb2BE`G|jXtP72F=N99asA|i(3%#!#+3r~TiNH#WH?^pSP)tD?~l6jBrIsYr{wVAAx z9($T0i!gUj{6nwH z42b-Q_x|Hq+wlnf4cN_s+k!Di^jS{;Y3jGE`6inV`6Lc76z^-* zVQ;|LD$jVC`=Yx?-H}02Gb`snZB043wva`mxe1#iBfxm6a)(U8$UbnxgK ztGk(IOi66tX3<}enyEU*E~@HMD@vo)}js6x%Hd(16=QfJEQ>cE3jene`pJ=)(R@2<83xqh+T^x^zmMst& zkeB`MHL(2M!tNL7HDol7Ix<#Q+`3iYY&=^G1H3H(euFwCEIj^(3t`J4it+hUyjM=_ z0wa3x{H$#Es~FsSBCnEG^tj*IT{@SL!mv2#Z;3|5-@ys4Z0oomS3XHiFV5djZam&f zBe8}uJs^ysqg7{Gvvm?r3+}gxR*SjzO_n?vCeXAttC};6spT$JqNm#O%s$#VWgKm@ zS%b}2jIS;g8pa%ObNa#oS?#@J*)Un0yVMsgd8$c|eVr5kVm<6&Mk)Z<)Xh$-Y z%!nFY*h9F0^00BQbP@-0HouT<^E?YBG73Rrd>5CxDpR0}m+KH4!O&-V;$>y?=?&#Y zNYkcbHy3Y;S>iL!tY+=$H*Lx1I+;bWr-iSihYgIvX2dY*tgxHZo%^EYu6Ocq`|ESr zZLG-3FAr&%zN+zG{`j`k=)(8+@{YekrglFd&Gf;-G>x?sZ6Z0%rmZhLt!0fawrKYo zjhY|#-*mD570V0HJrLa^B;-crr@L3q^cp$H^a4A-ZfcKcjM9XU`0oUZzaTN%yYAr~ zD%M?FmFfkmtOlR_GDt31%VMk#)745KR7o_qy5ogXBPD8_b?b?_Yl7IiecUA2zX?Np z9Ym9CY90)!yZg@*>(^PX2){?Qc!or$T{NdN-D*mh`y1)||I9BV?iUb`%U|<5b>Y_# z>2!g-c@AQ5{`YnI3vZK_0?>5&7Dv!OEcsuC?K|>|Zvemff5{_#+eaF`rW-EP0CgF5 zfl<#2Dh56o2S>5fooN*=2APuIKb^2aGbVu7i{RqDl2%sE1n_4@H5CS_fKb2@7N|A+ z2~4Tm)#2fx$JhFC`{k~~00R`L~fA51}rMSGgkfP~H0e>=b%b6sY z`!BiGf{)&|s1*Ov0yw2A($0zD$OrwZ8A{EY`pP3WBDYN=J&pg%$#oe#=;p`k$oXwd zzkYV$NU}C|ZjYBTCKpP{%si$!XNcTr66Cf;G3G^CV`jJZC+Me#SjTMq`WpXdR{u7G z<%g>BB~&NY$ipzp9TDRvaTWAWYJAI~G&PA3QTC&3nwfu)8UOtOsi_s~q+>#O&Rdze z?kg!zuJcmU{`y;$lYZ#+LwtIke)0*Xb!#8C==i9!5?No=XekQ z`-1BF3{~x0)Xq<0nt#`3`_FU+V*>k-{cs;Y)gKQ5a`b@h4EwD%FaIjF@l;^!0^9qrxeV9?RSW=A4UI`s`%?? z3y^pF|H3Qct}N5LNur)sep{}#tayK-KHcD0GghoDbpb13Ik8e-7_9!`^2Y=yMgzy` zC#TS_`}^awChM746(4rFwBQa!zvw@+X4PvSJ2}^aBe5_dpSD!-Lt|p=CG?P1zMkw} zmcrH6&}r-Rs_V;9g5E1rP^w1+{}x4ot^oCqrs7|#84V*lXE*wn{;~)t&7`%aVtR zhNhInmQ(OH;Bc%FL|CX@?XXVU(~5HgRO4Q(B?yf@ypQc2`jC)0E}+QXYun@|y?3?S zZWNHQERcuJjMdmFOrqNlt$hD%>@@fheS98}${q?I6nsB~JHAJ8Fs@N)xxe70nk{MQ z4s@5AXQk@AbN9*rUGGuuJ}|cZNcYb3po>czzV~(9VtmSrd3_s;w`DmXu9g_%_+rD_ zViiR$-xisv4z^z+M&%;sH@C?)Pw5#e({huRl+L|+aaX*tES2$mgMyULTLhe^KYHlt z|9V|+&}Fd+g~Mvh+Ua=8zQ|O+-UZ66Q!5FA*<`sLd1qdnY>#XXv#1PbF3@l8&r~N6 zuxRC;zIp$njWJvc%ss15_46z$-TOwag#FE%;HW*kPJn-Fvg0#ZD&Pe0@k4_jaBt7q zS2!bFIAtv?=)}Y!_Fd6>e1XAL#9X$SK&v%jqNAC--~hin4?g~Nom2jL+Yg1bnWfqx zMhc5sQOWA`;v#nmYT^S#M_HeA_ zc595Qqd{%=pE;k^5!mKSMSzIGQ$j)sbZk=PMjW=kP9b3=-7 zk$V3Ypyv3ca$`WH9#%5X9Et&@y)TeQQEfLF*cm~bGw>7p-{S*DIT!wIrW(P+s6;Vd zq;Y8`rwecPzP{fBos9zYOjneJI8MR$o^;L~c%Iqwv+8PdF}?#^#f>Fw6FtY3^s_A! zLBP_ptu%@qznWRrraR*%<5yVLC4$fnQNI|&7l$Ffx_X5#LUp@X4jr52J22OBwsRgT zmw!6NyE_-|w$js)cuURu%qt+1`D^-nX|2wEShGHJ=!4$az(F1$R6gW$TAjmx_=8A% z$*@JW%oFFKuSNdP`H_`!W7=ZTa-fmmNCqvL8f)(HTU?~IN_(a20`eo1o&x>noen~Y zp_;{kOn$eWWYd{+%^vm0T5pVsYG^jxi#hrWqx|&3P>^CQjY_gUW zF`ev=h6O%y>tvcu;n-rjKoW*g-^6j#Dps0B%(F4y|LOhLqIe|seQ2$x8r93nxev-f zDCbkb+0g--u~L$K-YDb~>rmMj%N?EkSdJl_Nz)@(-HyMoGlf4NAJf=y`zp^l$N~iu zSGh;vnRJ_ZvGPiOXq->r{G?_Gn_b_$WXc%;Ki&=>hx6!KlXTnHhmLd!fmu(lribKTp=1vSE7-Qvj-0bh zJXcF^9P56gu)oUDHSqkhpO?4GC=yJs#~-IhI`m!oPHK1gn$4{D!HW1e=O<*-tqJk0 z;fRj8zf!V@EdHThkKvopTh$hhuO# zMlBXkHoUI>OCiB2In#_h!z5@ycV0d6FpYnvk*2yxtw(3~^vHt1SHin4PI_zz z`sHV{HY{j@tqlX4Bj@$N>+OCq)G`dbMGn||kA%qM!`^pv6yIhlh6h==t8J#kttLxF z-UUTa+6UnkWkv}mi?B5ONF$dG7yCgagCY6NE7(Zu;=ya4|7;5%vF`fZj-|_O41k0a zjF^Y5jrGed*Jg4afGKOYK4rg4MvZ)Xr!|py!H-Q$C4d?y{bmMNgw#6kI-uD=aM1J- z$?diKI|??ttzB|1MY%|4Glo#96bsnnl+z=ET&n2j_s`F5;+&9Er}flV(<@7ZvN zHTTX#GF*w>Hnf!KR|OwmkaUy_v&D%qB@HSiHCoWFVLr}aKH2HYNaFYKN-k~LDV)g{ z$Iz~Bp=`LSKVx(*1kDuAi>vgNJy&~~FWi)ZNu2P(4v6Iu?;G>vJBBvPCWnc!xmP&R z3z7F!=FCcH6zU#4l)dh>-ESJJ^ZV?!L&fEqaxrSgpd{N)=O(Bu+D_a#Xwv$we>$|U z2`yS+eS~{x(S98+OrYxY?Cjd7Durpw31>-zktXLmuV7+txy!lR2eyTUyD-(*#sU3` zpn=McIY$8<_73mMjs7b4HrDF(tc@~dITpfBn8V!b5K@{P*)3%br_CcvQC4U6HS!Ql zY9asQZLgX_r2XDK_k)(pV*jT&{SYQl!5%ir`4cH&pCsea^^z*<4YG&WgoJXBR~H*G zyFVn+Q|^>#RYGpAT~VO?Zpl~Yy2Z+MxM~H;pZj+Z%97vvo4fRy$z%W_nmne4J%ReO z0zse=Qx-ELE&+?};`Ug>I?!)*B!;?oi>S*CsHAupzgDY-U+V)@#Lxj9Z`X5O1_#T( zoddEyBS443_#E|eqovdz84t(vm7qNFm9raW@#W>^$7>pkN{st*(!B)z`K5Ta=tkhn z>2Q`!-mdtfK9YK;EdyD6e0;e@&!a~8-b^(+;7>qYcBaHqxIqhFgm0Q?N~?kPPs<*k z3!yg~RG?=!=PUQT3L=TQ!Dg84*yD9RAB6;XHd%`UZ7o?PJO7HsZ=T;+5kXqUs@3$eH zXsxaJ{6fuBZVi$rCojvp6@~Ah{<RKcPIl1?!S|{t&d9tMzSa$3X=S^%c^5Rm$*J z8jR8&XP|p!K7AAHnCkm^5F0F&gi0KBI)dD}zB$%}ELKq`_e=#{s}_hh-&D0d4W^v= zfR6e-f6aKcV8*xCAT`C(j|{HROqOYPnNU;(As2{G9lKV<7LVvt#9YI@`O!j3Xx$T; zN{u~sRIc)SuM}VV?^%K3pR2v0n4CRrj zF>6Ja)Gcq-+(wS1HoFwi-_CvL`|_G*1!l@nUV752OjMtHfv$;~$Yq0(9Z(+6C_VGg zxJY(Fc^CRsURqW*2q9zRx`(DxXbZda+V6 z_A@C868pehdA(s~zVt`F?abB6WAXJD)VcFMsTgX~mM&0FxCaw3RYD>7(~&q)>VJ2Ex>M=}#1IK+~HMNC_d z7%-cJG%YTYW7Q;+dhcEd(1&vt6Upl$udls2ytTXcXYgIFNv9X6(pjx7QXf7jxi9~e*vg6$Qejdry$af^%}N<|AI&Y{ zxV$7EcTN{{9SUQ4tbjM9Z|i-ud&Y4({&_4zL?Ek-ftJ#RAGvA}FEBc1x7IVhT|p9t z6HC)5o}+c(s46P#evw2NcVJ-qvIQx0Z9@=2!Wsv6M%Xl&riT6C_P#Q?FkaFZho%Pq zcxl?%$6|5ds6g*4t2Tu+Uua0T>>1m#+}r$d5rysPe{^9wEkM9*TOJ&WT_cuObf9H#6LMgDH4XCv6k8-;@$-w%@B!tbFOc zz9j+(uUv=ID?oX{$K&xaLn2WLvl02|7&dRPvf+Zodj!gZzUI8aC4SwvzJFzcxbm zP9liyIiR`;gpB7Mw)NOT7jc3Y@8R*imGgo5sQ(TW$X~qva1fjNl_p&9#w2&<^8=8A z5W9b_eA17x!LK7yHxw5mL!Ybcv(`2^3u60*Tu=5H^5`fGp1i-wv&+Jix4{Um9+zoE zD5}BwM#TJM$+ZriA-kpFw-Esxy9p&u2UTl9j`N++u7%w~MRb@l9>c4UcP{2d(&}wO z^NoRp35kXR#PP<~pU3nM74)j?y46D1%>^e4;1!{T-}d}arss!sE~@gg8CO~lQI#qS zJMwiypU{Bi#quJj%jN0aZ}#qw<%pp@cUCYi)XR;=eMo(yMLu9g*BTO+MKajC11Wr^ za$`B4&nPfnO%osXK#jGP$~Gk~*%UKYlTvEBV?ru{XV{mSZGt9pkw$F&{Zl{LnuHa5 z81qOH!BNnCP#OcMS?Y>W>ujjWIRbfF&Y9c}c9gJ3e|f}9P<~nR6o*p3z6_`PDE3Sh z_w7}g9KMxiFD|+QYOikJyqcnUuoMdQu6VR%CVWT4-KreUh5C78qQ2S_KMaG^7?Xh(?wa0g+oIUa_{tX8df59 z9`C`RuIW$bu^WixAr@l9<~>RiQJ^=^E6$E1u16-Qt)=xWUOp|@;`Pp&l6vM8xJKY0hf^`^b+>TkQc()%(n@D^l8CzZr>NH7~; z>I1Ddv<4AnQA@(rcO&5@`Q8`D*x`*bvb;$WMB9gZ9^@mZyL>0q0RaJpjomj+!}Kt!c;!{n@zQ|-#v20#N1@~CSrr@|_muZs5fdF2Gz}8rp5xL_ z@gB$2e!Dt3o_7^`&)-BI?mdtiPQo)G{WR8j(O$x}=*;+d)86UCYWo%heMSiW_o?zN z1q0AkYCIkZ8+-bvqI3u3vVOy6)tnvHh&J(@PMUOcdVYPSGe4@`bJ;_m*g)P zp>b6xuIX-~$J35+uyz9N+rM)mkSex5NW*ff&4 zHP@|rc{>ZS`hl7&^3B4;%zQs#NyZaTQT`7*m~F%G^3vU8Ld1ak%7i z^|D+WUK3 zn=du2I*#B`GKXzkDQ3>DyG#PJ6t&D#4c~Kx)HjA6v$X{F(%w@au6_p5zj>oM9tMIw zFuFV=S3L?FA49=@^!j3MN0c#yMm{xjIaLaTGY7N?9I#CyAap5DOdeo%H?n+rnZj<) z9P#|5V%{){j87iuX0B@M#2X?kEBh!+D%KJoQ$x9Zd1G8dLi6Mnxv{u~C^IkO#Q}9j z`NUIB2Ph}#`+zJSXxqtprvM#7z{LtP!5%+k-sunbTehCFj^)Y~PSKrp#K=wDUXuG% zj8ENp1)7*)n%hiuz$GVF$kL=6hRC#<@<$(%@7|4Qlb$;^;Op9;C1gB36Zcy5l$Dgt z#X=wW`u;=U<8-*J`5To?0=vFl5@;SFv24XhD??cw+UnOZ-VQAJPMdV27i$)ty3Ikl zGR`3;lR_Rj#Zo45j2V;{by+5m&w6c5v4kfD%bC+e@m#corMYmDB)gohxw(M_H)i!F5U2cYdm|PMM#VH^`#57XXAI)2^s+^`(?)c zy5NeJJW6d_qpXFq??|GGL4K+~+xQ{eX}#DF;QkVtVGD4loU%`#x4)lwN~Q%&-Mh?nHkk}v!v`~)to5*F|0NSj*8x?t#T*-p@cUXcL`Ysqw=2c*(4i4sYWxa?9F0 zuYepZhx;)idtS2R5Zvm|YAH%4ajjcISVV_Y{qX4ncDVFB;Ckg&BPNOj_GcPrYd3mA z_|B6z@EG3WME31xG_m|_bwA$hDer7jqg=f>xmCi9>@s7fw1uv{d9U2wIA-%~vLH)r z6;yw!Ppvl%-&HTuQ>#-7?MMs`VP#Di4R&0wJ4RldKt!1wyv}s5i$z%r5VJM3Arw$< zJ3W&@Tnf5>tx}%IkDBjggm22r z6QcB5(zd;bm(7~Sn#oDrrjH-kmAbC!%rQ_OW!092=A;G3_}<(c?{kHjhSiELblOG_ z#If6eb7Wec63Tg%M6FXjG-sJ2jCTT=N9~z7^;Dsoue+MCZp!LQfJLdu*fZ{Wbs9~D zo=Ak7EyL?{1aHY+*pGW52jEntwKavoCmMHd9rzImOwxCVTT2IGM13P-ET)RbonFiw zUZFdfsm*?HuCaK!GFTgd@WYQj%Rnj2G31w$VUTgC1+?B&{t>S8hDA<}OdJ!0&MK!7-$qdM?8V zX>B9LOb7`H;c(dzjrpLi+#X=J+^zwQtp2j$xHffiIsmu~dko!=lR3e%&DTwm7;;8l z8&;nJfFs{}xND(tx7qK)yX*08+-sUipW8;ph}W>GRU@~{T~cRm=VHxrep(W`bQ;0r zI;CMsvG86$bnNmI1E!UkVW}s#4H;on6+o<*)5H-;`6laMx!{k&@-#zrjvhD2(R96~ zAtL3~jV6gH<;yngT%`hKYBgxPBxyUVC3Q$Nvz_eoyowTpHHpYKG9ZC`nqR9)%k0a` z1<~RdZj)3zyqr?I zylkDSG`i!S{JGI3Ujx#FY>QDe5TF%#V$BWyJON?Xo#};J+NB-tyn0pMlbyPLZb*%} zAv3yj98e^#Yr;B-c0a3fs~sBN2eDm-&kpz!7p64{lUZv}>ZVU;1-A`44Bblbsb?_- zWxa805u53=4d)F^X}3sWvty5_FS9qxb0}!m^hLYa$c);sTfeo`%SjLf$%Pb$cj$zw ztw`cxhD>f)Q&UlK(u`FjN-^OWKQZp(N^F(L#5?rK66XtG zXs~daC4m!SsAZx(M;LNs_*@hi^)eHOPdXzsVNZLqN?lWoAo$EW(T_?tw#9+q~4NfMliz9#rFo$ur_mvlqc0;x&a4T<0W*YHHWJ zfam*{lq$ldg>Nd&2S=N(FSj&eiOqC$Qro=OiN_0bw4-~IA6I;xN!N`@;jrCQ4CXxR z`cKkr_31sM6HZPON~|Rl1z>UXbdh-^FL>-Toj8s-4mOG=oMSt~Wua`Jlewy4W0Pm^ zQSY(?i^J^sGk(IeB0T!?id_EjWXiVPFzPRl8KXbFdEF5FCI;BXw1sTVdWUf(0Lu6R#mLm_oP$Z{^Q9|LxGR49q8q?KBV7KomuIl zw@ks5^Hrv_ra=?SM!oDFM6I@wFO=_@6hu`QcRHY2TI#G=DavyHds#sn<-H^qR__c_ zdcu`*_FJW0G5>}E0`C_zVHiOubw9-=2+TVuH~q-DD>P%RsNW>UidMakwJs)3KtR4c z#D8TGjz=25t!}Bhu(~?u);n2_3gxb+Os>zN9Xh`>-SOczvZtmNutyiy75`fuE=eC3DHs_1-3_0^{bFW^OUr@0l zGR49yj|c|Z`v^WIF?EDV3>}f|@9bLc7kr^2OLr+^YWw8S3+a~d(qKHMpgEg>yQ4z8-S@Je(y!LQs+V?_wQ-4;m0kHo~!`Uk-Bf0A;O2f@HnFm6()b3=Tv?X9X+zrO`Zja!5@mxwq4e& z3g2({-1$jQ;4&#U;npHzxgr2(%LUI43f(3H{p8F+yK0k3Ybl@mlPimvFC0Ty+ZV$U zo^6owRfO89P)XhZ;zeapBf`NqG!&1gJ~;P}Q`#ce8H%3vTtc9TKti+mBf*3SKM#Y6sjmt)0v^*n94 z@bv6?FH6KS?)kx3bG}*5`$%iG!~vZK>DyhNtYv*m#p)taO*GQ70p@s*>1&11@XF5t z{+%{=t8Yagh14f;6Lvq>%CbTQFM5e%c#<>E$m?!Ha-Ix4sq@r^r^5tk`gN*1=7Xj;mq-A=TH##H;m(s)S&=d8(P9^(=_c8Zpsw^} zuP`r9vzadp2py%-$Ht}7_YS@}QV0{%tF}_xF++==vW;<53$4{(;QL`)s8tECFk@8B zkFIzPYPBsb3xA&Q+4QqXe_T{>3!lqQHh9)`1eXA@icsq@XBW(?J%95nx5vgI$m{zr zy!ImtPIR_(y5|*HP*S4kCAOwFA&b6Xj23PNNY^8tom!KCEztghyFjJj}yg zM~rX%0cHJJPaQ9tpfarzB7xDyTs7xKnzhYA=b%TuuP_vEx}?L@VoZJ(wcqm(-zdpZ znP-zGiajaQ@lo~a^PmSZwaoAr!CveHBtjh6eH$f(eEZ@B^@1(kxXgMXDg@pM#zvM4 z%kviydz;ozvp`mccrr=Ao;%B>T6#DK@$(p~%N6hQLD6E(wC-}2>sanv`Uz7P;%t@a zI5faw${X@1qrJM*wRZyPS~Wdjo!Wi(62~yx6bQ1b6>*EoJ+#%N&F{}SaQgyj@aIX7 zgl?~h-LFrExuKNI%xPx*7d;k}aA(YwEA>>#M+`Xk8MSV7u=moYIg|YgWFEcJK+DhE zaWCv$@VYEO5%LR%xPU>Uw>Pde+sx&YPD7Uo`eb71lo{!&#tY!;S!lHu2uu6|rI{>k3N!&#_FlAGtPsNx=m%Oe%sg?~UkbGl&G% zoyTpK$bH@tRiXu2|3xV9C6UQc&Uu)sx}gGa7_gAY%NJ>ykzROg&Gxm*W@%rZe|d?H z@Lma-P|bs16S3vU;Ah`J3y1UTpGUXrun~roaX1^u6zcJ`=0hk>D4QC3e7lbVP@kWy z;{o7T*Tx`3Yu(7j5%Ym0IB)jceur51>RS2oqFgP6*{ItD)aRU=TbR^rDJwLZ*k4Fs z%w63X=#fsQGDq?YpX7Yyk9frvmA-Hj{x}zJX)3y;6L4Rv?;&*tKhRl!C~zg+o@@9N z-F%+Uufq&ux47=0c1>y#h6 z9XX?XHrvrSb>3$PHM;1(3wpFRE$dNHjXT_Tv@%zTn#^f!qxqUi3!X~ATotKhYIh2B z>7Dx;Q=nGV$#jdU14w^`n_eHZ-CLPE74;YFeoIA;5xiGAlCM-4FsC4Nz5Ra5-ybzu z=#~(`mZN&C3W$NgX#Bz6&B!;~BCj;tSKE0Xc+#ju`4do!mblL~*f3czX7g>kDzd0HuJX#Z{^(ks{l%;Qq|Gof zsw}3TW+oB}*6dW7XolDQv{d~O9$5DEfSR>)KsP++NTW2?=9Js!%c$!f%xcrNi-4s6B)`3QLA=bl<2XXCiNz!?K<+r5TJ>i)d{tH$M_% zJ;Ss?qs`R+a5qw9UD(DszOEQq*+NPesLN6IH9-6GRRIA5@)HVH;Kuk>xoYBm&{6%K zyb^0xgQJm9ch&jf*B2b&OySuLT(*=_bJ;i7m(ecQqdHXbZ2)d3D|B5vs`TW{OJuRh zR3ZfZWT2h^H1v$UIU^8jTd8D*vv_?GZp!C%xFxGbJHc;u#$OcptoBI_+~Kon{5chY zHts>SZatT!xclw3!saE9qnejTlaOs{fW5Hros*qusM+9+$h-%l{SWI|K`E!QI`0h2X(qa0~A4?iO5z!QI{6 z1}Au6kgv15uWt74yZqnq>w#vfy86_qBhPcrg?07F;Ix0`C^> zPV;>>*YQC%{A2Z@X9VsaR)*39ml_>1(a0p_h&6ffxa|^3)7@(|4!+0h=7?biSlR~$ zJkPJolgYvQ`chSquT9+_-}%SmBF(zE7edXS4qFUsH}@v;<2y8EPwNsdk;COCk z6(v?9f5_LB>l9bQfAz=OQnT>iS@D>O<}fg90X*ouw?GG$>Ud`5pzmbU;uJDzIrq0O zC2TVs`aU+8&q-+X7>GsADwk?--2dk5{ITy7yUDuQIc+|9{LhGZCmpO2|*KoLHBFlmt`;Z}G;xSm3X4rOhwE>tnix>+B{QMNBWtsWL7%v>~-~ zjPJAs@Jhkm@dh8XZbLfa(;as0s0(?` zaHF56rTSg1DRpp8?KGNvrF`~NWPg|MUM{iqRF;`QqEe1vp?m$b-YEDkA*|No;sVdn z&Y;2~wNfj(yephHEz?RJZ-1&(@M_7iP8pB>KWg(|nQSIm!l`50GlQ*ORMNQdV7L7Z zhI;k$^YYH_!bjiDW)-BO@WmqGpP`*0J%Q(MW%PRd+fj~HOQP`IjfN)MExBgG&vyAJ8FMJSr$f|g5+lMF7WwQAaR>E8BMMl(j3jjoP^m;b)VhU z(xvtrLBX>Lmgbdg*r7=yqFG-QyKT3>9ZoQ7S={5_ZldcSNuZ8xPgS66;k6qalF=`f zBoe$i89f*3QP6sJVgK)g1n4O>wL?r_et#eKphNj#9$Lj)0SghDOzB;Rdb%93blYAE zG3hrm?y`ao#-4@N8*ewSXd(Mt<=!8i0Gm1dV!T0(;9 zX{4IElWi!QS{v-5(DgE_+XUZ-*yB+!0<)oItvYu|m z&&{ujR4XViOf=}Fy?xF_+q7TxYQQP8S6QfFUN7SU$OkM29fgyXAPlMxPhJ=vCgGk$ z)THsa$aIOIqh?DbD$tTIYOpOOBn;T?O`1f_$}-i}y9B*kH*at_eESpDK{}zQriL9$ ztIjQYYb|`~1^^P>>t5sx&{Vmmx*;c;P{3{8-#w$Grw^0(_Js@xWnd=F5%UqG^18+S zc95=!Y5x=R;d*`g$wBS>kEid|hjT$rQKl)7tNk2TwmIH)wuj=S-@w2J?j4zks+S$! zDj2ySo`79YK-=72F4E(kr0fB$xE&l6gB^PkR z+#Q0R+kYIk-+^m?e8m4b9ElnujA+o)*kURD$v^NXLH-!QV;(;5MA$ zX`Vj$i-7!fqP<7J4MtZKz8C-T8vZfFqvuY(k9--!KXzsOF|B`G%%wi^tvI;%{l@#h z)*}3s=eupx5^RkBF-t-)y63yNrSwzF;QM$M7YGI(oS=wJMX=^EQL`sTXab7$38`6jkzludUE9M`2 z892YXnI8m?hECV6R<_tMZuZqhU5v2jg@OCM`wOt{Q73I@c;PSKyK8Lg1`{^I#{q*muygWq9dyjtx7e?QEHYU$C=5{Hmcw5`n4 z=~^Ek|6)}wdAxr3!UsVjGsuV0qx4g3_v5|ZSbg|8MQ88dUMKnb3ccukQe2VTL{X3Q zi!ljTNgwx#IB!YdE(J`qt3^cfj2)<;_X5-Xn8D|l6JQ{IVhsdiXG`6$-Vyv_b$*a~ z&Caq1HQGrZ(pibUy3X(9xau1MUFX@9ydxhe6>D?dJ0T+{jnvH&?z%XI25$X8`L6Pt=Fje=U=n?f8NM{IGM?V|&3!V|{ ztk7yer;4Z&?rf`J+Fy|=`R{%82)@aa-_;eaORGUZzJ!jH!rGIO{t4n{=IymNKPKigDzeSiuy9wfuYyedjt zkP$m!OMt;T3N3?OR3~2W$mPf@L;#2K-R7rayg0@Cc>Ry9^?{I`-II9M4Xy1G0(J9z z_X(tOx*iuonc-VuX3pxs^WF;$ZJmlSQVc^6m0B`=^H^BYhI{aHC{%;-rZyo+n4#36#$;1E2!~bu2Xx(U6v$0|7qoAVd`_aq# z;uk0QzbzRtQgTJ)1{$=xrJ?F=^6?%H{*Gc~MnpgVg0mr9f{wq`OUxz3v z;7Ww`X5jy?BQzfY;BxKu1P}c$E1#Vha5YfhO#adi1D@mu3orujol^zJuZ@65k3|7T zf`ixx^uPX|P7aKaPr&g0f7|$iLzX>!f6$1}X z`UJ2;6B2~_`ovB1HVrv86`|v;NwV@ErL06kfE{oIxt3TEftgbfXH%LR|-!ErquUTg?O~jPU`Vgv9&YV`6CP zIO`S|ag>(pQAJM!gBm~9BoPb0kT*?-6^l9H;aE147*&CaStja=-{?9QH`1Dzp=j$FKMZOEbs z$WA@<1-hdW4J?KdD8MNxd;~+@A6YDx8$(T%iY4q#XFU1iwDFOrAOC78BdC5_W~*Sp z5vS9xO_51s7r~@?mjwjc9c=U0gas-6HYUc&y`DU{^wBEg`Lg6;rmsx*4>tB2{PtQ6uz`#7) zT;JH(ty}TPm&D4BrBo?W5;t8cB704%Hky}f8=py=Q)4la1kf3Luzo2 z!$a!{>9#(G*(w@@l44Rh+tN6Z>FHO8L^x}m3coZQDpVP^!T=4rh<1}m00Ze$GyD0 zy_t}o^8C`9<0lM$zB!!bT(wjU;Jdz3Vq#Mka%clwuFCc+1yH8zUie#L_!FF_JZ zJ*VFzfGg|wS^&a4AiL~u-`<6{pgnovD<6KOZL)luPU(1Lh}@qJ2>WuO;?Zo4KR}_C zVl7fVVP_LX*J{KN#9_0{;&Hhet0`8cA}00*dTR3nZPTqMpZr>nais8vVRa9u@re4P z5XhAa0AQp-rwaYLgIG$PCiWVWi6DT;H~jdTLz7Irgz-3-jl5N*TwQdFL{c%0+j+5H z*0+OBv$oPHA^9s3-iNj}6K1`mxvKcD$T$L6^jg(PU;!Ucoe(x`J9)0!afO#!q5e6~ z*&tyt>uS3T!XUtK=_kj*d@I4w^gZRu5}xLdn9(26kS}7bT+%VPmOU3Z0FG8Um6&8e zVXHq@i-0hi$Ff)_wLgZwl&@MtF_>V9pZzj|gs)E))W_SYaG=5i?CS?bvs;&wb(Veb z)NAN)YQDunZLzGmxrR<1h$k%xVDy@G^BRq$Q9hLDWT)+)zNX<}zuyPWRr6htqbl@GMln3RN=kB8^lh6SFiX{*MxVD+<7RibY@Cxf&O*H zH(#qVFfcG8h%na`sJy#MX#tI}%~4)VWd>|LT{n(s%=;5W0%NL~SU%K zOS+a($I{RgGq$|c2pxhs69Iwu#23DP<4bNX^@bdI!i>8#27@qX`LAjLQ?oZn$;^{% z*{PQ{QjFRlyJHcc2e*8DVnxct#@h2=X}8sdpNo-K+SNHuSsbk6qZVqE6EHRD$ai1j zTl($Wt(|O38mafpP!MLb ze{8V>xIq&mS{z_-=d8QErrl9TYzpT~6gaWAt~R^EH?Y!my=L{pqAM!10+({uSS-xf zLkGq6_0dXo?occintfza*aZM2r*cW<-c+$X%)z2iF4KGXr9oY22*lx#Gs?-3LN-mo zqCYlxll^ln$74as)3Y=6zns+b`~Z9CspRBlOk zzwryh)Jq8Hk}>Lo8zm{_v3oS1BZ%}Gih(S zR2n#!s)E)WR$Cv>SK4#+HR^L6`_tW1ETl?dw#x`)xn1CZ|Av<1Pfd zJkAH8>I$A7k76n1Q&s}=NX5xXvn@pi=9rClM_MqW>*LZn?Ijo)begb%u7E)+;3Vo3 zVUeCV_bA*At1wijl*BHrW~r?K;nOnoJR*nbg{LbGp!9K_Q5>R1-9eZ=H&?qdelNaqVDt=T zsj&pB+iu?_QAskMnWEtF?tPC}vjq>PjHI^^4uRt7H1%H2`TLxLi9-Tc&o38_N7qP1 zLm1Bk;qN`JmQZ#stjiUE=C5`&6Frjg`s|upXC~b`;3HkuZpSK`u~)+&wNee`V#esG zsNG!Q-~MZ?R^4{WFVkj@)L zUlNG?%%in;`xCgCrX}SJ0fg;5XqsDprdq|vwvqF(p7cEDFs4xg#0RD0QP{C`wnDDN z8-oG-i+Q^(nfs*3aE!iFQ<@Nt^%m3Eu2{}WbM>TmdL>ibAagg2HH*~^*?H}Kwx$v5 z@=zjFxx?3@XJ%nxX#gAa^0vP~wG>Ti$Hf*(m!U%YE_B0jLfQJDJCyzAdq=o9Q_DP! zf^O9(q$Exwg|}YgIxY*GZyrMX6xDuc>Dxp{nmgdpBnfh1e%K1vLRXnO`NV{5Y# zs$+SNsG-?ei>xsn)}0C>B^eUQLeY)psATr1h4LHk{oO3AQn;O?zJ@z>Nz_xzH?iCA zj1KQ%*`rdJcTp@dsLi`wa0aeyW9rm?+)%V*Di(b^I_66xo5_? z6SpgssEyS#WbuQz`(tU$s_nN7I|CT^u-p@?OrpCIGYBaI-rM|NsR!~?cTpdG3a?tlUm09M3sEZyUc zG9Fhg^RPAC-w%=qdw83YF0~J?-dSl3R?Q{ZR?N1|u5mS+`oR^0ar+0uq4W)l*NCH1nNIwOf&c9Mlrvm;Z{t-F$#E-bb{6m`Ob*yo-6 z9b}5`CO~8QH(ys-FG_b=oKNAoNCJcRgvmAoVpTS}ktV90T~|qp=&QKRpYb}}vIDy6 z5omMmSqwOzR4swy4Dn`f-%5Fc06MI?53)TK>Zm!zMHD7*KMy1dy1j>e0Gi=uFjg;O z$xFwJ;=4z55>tjm^NFF-w_A*5wZxE(0#HY!eLhs0Es{AgVX2t$a$T#vxhgOx+XKmn z3T<3<0#()sP5I0G(b3zYxlkOo9mo~0(zGP;=-u}X?zbHJ3C_|3tO@l60X7x+moiFvvMxwY$ocVAnca);E zuT!YZX6^@I(lxxe4>xKKR_3j5Onj6at*-$PdVEYqy@;0Tb)rezc;d;!?U|JkE2V|7 zAr5TsvqM{{pqE!%NZrF-U1aHMH}*((BBQp&2J$^X=E@Q8{a_A{3BhuVK5jK9&X4a+Xh(w}B>%P_w!i=JaOY$pu_L6UZ%yxXmCYfeL zsfsYd6i)-$L=Xk_rP#;{k!lOoGsfTE-t+MN;6U5Y1Rpg<<2$YL3SvY%1Pv(qgwUg9 zQ=flM;Kkg*G><_bmbwvFf?9hGy})m8B>4)_GP4~4bE0LpkJZ5yImrX}9s zRck1q7OX$s@@4JA`Hzk8w}OW&9NGH-aZYRAAJwRaz6XF6 z&pgf=$6+_YAu&!~u}Ze6XonC{PRn$bO8YC;5g*$>4d0)pyO*XUVUI=#nQep>)hDbD z&0yX`bB&yV*kC`#tIr0s3fihC9o1V2gGB~G4GRiV$Gr#m`TLdo^1c;ck@kyaL1r-- z_h^LfK#d;^fN}QtG3G%0`Avccs=MoIwo2C5hLyXk714EM3gJa$GHXwZNZm%fu5XnV z`@{Q~bmQGhXJ-S_Q$;HBDnd;0@Edh>&ZnF5mSx02czh1+d$pa0Xpy44VL{jXHg!~O z-r1Lzd%*c@sl<2FE9h`Nps@_vM?${!km%ZB8ZoomMz6rAQr(-9v|ox@z1Ut4gA$TBLjK>VU!zrYh8murI2uQb)3%|22jJL4;Jduliph#6cj4oS-Wp@ zs-)Mia0XpatS2w|X*jioC`JDR@BTe&){76Q zzISm!e5!jUnCXe7@|8|h93YT27WdFeGvK=whppd}st`7IR^gN}x6e96U#P-S{PwMe za(p9jAf28QoYg$tW3xqku}jCJQm8O?$0ATuSg4TQ3`*-f83-mXi;1Vt?fwv5d*pNv zl!{_n_gs2t&9-|64sp=i@1|C!jwUtyVjM|ZvVKHi84Wm>nO%}`Z#xFgtZQ*OYz9A5 z$AQ9v0>MGu)Yj)CPk`Z>wM4g<5=GFO+RuB{senXh`7-9nQXdfGv1`P)Oq zr9ODb{y2XP!vWWI0iG@%RgJ>I4m~Ulwy?;0MIEriP=DwEo`JF5OZDK~df5_01O)Qb zOeXT}ptm~uVdANVS?9h}3CN47^2q9A#{gweqy0l`RPI11JAa~fPlGKqga{wR&e|O( zZvqusSDB+h!=a}MN>7J23kS%3$LcZH?DP6zu|&62p*qxJp2(E=R>rsGoQXGx{ZskQ z^_GrE`h(9wJJm_6kW)1A9PSp0a7hIzV*Ya`{Ys;IbV!bZX6W@gu3vR(H z{I+d)tzdZwcg}D-M}Mp)$WDv#H%CNiqB8zQZ8y&ObzO@{`bLdyMd(J&dkItS`DBnZr_nK$@fC2 z!OeFL4sYe9d?~An)#(+r7*st!-#ecfh1!Rut;Dh}@OlK|ytm!%!Sz4%5W$q@NmFB# zNjWFyxtxu5VD{K^xC~oI=5}u<1?lz5MIA)cn!ToBx3-_`iZyPf8Ad$w6n0BZ(#mh^2h6kSsa8cYNrm{&Vb$U?m$@Pw?-lZbK?BT$+&AlyaU+&&vzg z6*xCO#_=Mg-5$IN-q*IV*><;%(j9zxC0>ES9H?I@irQ=5#b{k@r#gB|?048>&~Ms& zxxE503zw%8XcrBCtH&k=Q;*in8CxT#SSKfMc{PylbfWLj1L=Fk>#SM%WRK}bLw+Gj zFjKAYdX%uR=$s1JPP;_Sw~ddST*S!Q&csvdy~EWRR=hL<90NgL4Cw2|D&%Ba>E^g4 z)D(!~Sc$P^dO369yjQysTQW{}|7@tlQpX4uO4_$_(5aUr$_^x%`MC7aLt>H6&4Tv^ zmfu()aa#*`I7wE;wxW#Jn|a{t{KQ2WKSQP0#f%jJi(TR*q(mL#7>^Z`96zs2%P=VY zBR+3P<~2z5PQBq&YFN8egO#;iXSg;_yTAD^t|sTJzN4EFanRC4d4A>7su~RD@VqWY z6OuTCaZ$~^gaxC`CN0BleNH38;gCy?PJwQFlZnPy?Gp(&sN^D{b|-6pqW$BP0WTl_kV`x+hj+t66j6n8h&6qAoABH3Z@5|C zrT`m^5x)Nu}ZPN)_uj7$VkX!ffPq% zz59IE=w+G~7LCeafkRBwgNI{*B51U-a~*mRbyyFr#bD? z%=R|972$6gnIG&w1ud3AH|#f4Xx>0PLzR()T^!%`8FZwIZ00rE5LqJArCK#5CT1L8 zC@cqpl+?wzyMb(7LKtiwR*5mMDX207l%!i$Yfof04z+b7e%zlglbXk%%~@-*H2;~; z;)cs%gI~P1MOEjGVDyS>@Y6-uqr3A729flTl!T>OpmJ_}BoX&=zQk64H}1NrvF6XB zWrbGKb)~h9$Vf=6W%ifi*~D8yzB1aM-0sON78-#@W1@vH^KRPtMthu8W3S3DYf5Gm z+44_inP9i7r5b|aNUl+N(jMXDvT6N&AEL%fgepb%^f7bnQnL+07c3R)!m4occ=Y7q z8`RT1)(Tu2FVNUZY@uOAp#jNOO^jqi^Be2yyGvKRVve{KKPI*&Mn2`CK2t5%G1!VK zUf47EbiEJ2+yS(_v=Mcz`>Exwn(*@<<8$f-F@S1GOl~7(k;JY`C7-RO^utW755NQ-?@R10r&liB4c{DB1{A22 z7FGD6@ZKf}WdNSG&&-4C&%RRfmysWG(b9|BR<^QbFomkssgwGN9am1ThV zlhVK!2O~GYbpn~}e6hcqWcE?~F{AP!)KLF+3rq5=sL3e!3eOa&r~>D{@8Ha8oRn23 z){p}iHt%FQRp}S>Tdl`~z>QVMQ<~sn@=zQu$eu{(+tC&v1bnv{^8*n8HpK&xEDnSAS&<1tD z?t9myyi?ka7q+G^W!Z|93elF9bE6}Xth)zCOup89<a5dfeBjJ^6}Nr+hsd2dp2pBxj-H`Ac)yj+ymY z%+>YQT6qEW_57GFUt=gP*}^uKuB%ZQ)9_Ymtnt;sDv`)i3J2d?sLkp8>{$QlU?igX zryrWqo9li7o}cteq?1){?>MIV$GX-=asM7VYW0C<+OC4OJz44p0MhirPY_?rE0$^0 z+^EP{u+CI&(M;BKY0$<`@fb674pHB z-<+Aw!z(zr-PVBaY@f|IVJD#Tgm`G zF`C=m6m2c3WX+|G30!jbh7OM4Z%-F3UNg(|#8jFjtqrt3f;K#>Y&D>diap+n*UbVb z@cK0(OL>a<$mh#06O>J#>&q%#nyPdOKt`KinE2}GH0k2vw~^5b+&2UCB+Qv-1{%~z z1Aj0X_I`ZvMNhVNBgZi{5z5d$3>(ZsFy^XO-8V+`t~ezCgvHI&Nl)sko^%-!BR$ zHo(ZstAjvqLzfQsB$$(}&|gzLq~f^cL1u_p9l@$tXQ_i(1EnQ&DK4&Dt+1qPJ-W-2 z!Js|iqM+`cI^szY?Adufzk}`RHX5FrR~SA=KYeKqoKdjn>M(H}9hAMpNJ|S>Gs4$; zyXiT2J%VdHrW~wzVA}i40f}ypagBo6ES5EmyDyRJI-1Q2KPZHDfcvzb+fYe~MN?cS z7sxzw9WboK52x~o0-@cyjydFp;IHd>#0PU z=9^c<*9{u~LSZi|92XdFLajv&jB4-e{3NyBW@*Mds2ti>)8s}(=vEe~mJ4$^S}Ior zy?9Mq;J#L!wV8?7$5N-iMa>G*uOuLVC}Kn-Ra?w0?h7-{`9Ie?!t5~A((WAZGGEKt z=dq5lQX-Rx`QABdiK948dD6Zaok%QjW`)VF+gu2cI$bM^47aa?Y0@YbPc}WizNS`^ zBapSW-o9Qmw+xJppDPJVT=S^9NIWjrZ4n66SD1mgum$LUV*h*O-pgF1wc<8@2y?G* zD_}r2l6bAZ@^EJ5UJmn3T8eCUT*gkzLxtQ>Vy|^q6jMUr_{6^5r`yIFvr~qttrU}> z_dmT$0M^h+E#1=`9eTOSfBT;z9Dlf-bMgFPb=VW(dB!7Y=ToY>k@fq7*K5!AmP?au zRL8HDoG~OTRvv!%K!XhQXt4s{ZI=RN7^5b0?>ZG5sZ(R{PPvmxE@(McqM z!WHlgVKi|JRyuo=&7r4)4qTKk|CNl>`M_V9Pp{cL4pfQqeLRBVJ}v3sY4kU!1^+PF zqhQ8~s`!sPS1TTTL+O0s`anre;F#Z8Z4;c#a%O&gFgH25rD&ZRu(*`~B1nZHChI6Zr8m|M+FVVG-o>UKDq`5PXG zjW1w93D1;iE7M1;KUL59qDPIR9w!k`ElfnzhJlMK`D3$vUh4y6k* zk9I8fdqBTNI<)>9VwA8)qg*6)8W|GH#XuNG zqyLjJfP>bXiZtU(_78vQ&$r^)^AqyKt}pdptcw5atq45ehJi5ICuYCIj03U7@9aMr z9HJoze=O}kE-FcWGB{2OKT7|xU;lA||C3?85y1HPZ%XmcS@JuO0t^l=-q$66m(4#1 zYTW=3NzS;8JOhK@)`)CoW+n!zPK-nxrAS*_+wwQp64YOB9<84!H3UQX zG5KnnF4LeO&JO)}g_eWM{#=%K18#il24$p{vN16SqT&rU#kOF)DuFGa0wigAK>?yl zF_l8jE8#dQK>#I_$?bfqw$$Sa(RzLqC`pujYwP2`xK4sos==(|1} zBTXODtdkElRW6h{eEX`ma-gG)^Op2!NY@-$K*$ge6-`YpgZ7^v3k5ZQ9y?ZGr`h(@0S(R!jqLpr(?j#-I_BHvpr z2rM`rs=%p3IBO^q`~?07P2jr<@P5tHQaX`f7c;t4;-#83IY6!X`D(i( zrWn8KS_=(DAktxSefpZVHdQ~l0{$o^xr>nx`m|4^w38LI4O78`7-h=^pSYR$(|fU?m>rjaKT?FZg-`Hl?pP0om=8h0pQ z)uQjWF&UkV$Cdc#FBTk+^3LSPCQBusy?F7WpB?-BzY#Y-NAaR1j^GO39&(r@Gzbg<5M8kKXlK|WR$IE?DCRKXOo?8yumby4a zfV$p4Wd^aoR%82>u<;|4KT?%&{{a+zO37(4v2Ot=T?}uOTm}5_Mbq;a9dlJwp>ZZYU>U1`6@>OZ7?X7n3%W#F!GhE zKck9j%N=|2Gv!M!L0nK?uh)Z6yNGeCKd=JiG!cZ&obrl|luUKBc|fqKvg<2_TYzml zE+fNhd)_NA!;Su-#%wAJlP>8~aE%`7U$s*r$xr$fP<3c*t>YO+7f{VX@kGCiNp#Y0 zxhohe|GkAq;mZVfb5?Ts9x*p9Y|>;FZOA9Q*l=2DAOT~&A0))SSZPEP?B8HAJ4%Fs zoAMrg8z3i|?0iGDTWc3CZopwFaFq0|uxd_}cDXvtCiV*+WgdB1so}$>BbCM->kf0v z)VRTk?pB=03x!zYs8z+I&NkR$x$!|oYxW*rEeb0aDLKH9%PK$fUJunx)(<7U&jqs7 z^=`<~3Sv)R)+eVq-O3w(H!ObtV0_HmnoT+XD6GWNZP#Os%S4|G}o8uFpPF+xtX9h-vcfgWtkS8(3@j1du6S_l{V*{dNpSF$T z^~f118y6Rt3$_69abado*28BxJE&slcR3EfGMPE`h)2ghL)ClyN52?duOA0l7y`#{ zm_N-zfW`=Qa`#zFB_@g-^)h%c_(3IRA`czK^eUOyGgyiqoFds-BO>_liQ7no?Jzgl zf!j{JKY_;$>%k#%wHI&6$L?KN#vNZnLf91=~tJF7uFmXittu$_Yx)uCpDh zY%)2K=kI-A+6cfOTt)yv@`#G^@HnbyKE`r` zgV9-Ub4glUvzzlon>(bs*>s@j_g0}eG<9LdGXJp%UYLZ0)?=v78=mc&PM$t&?y7&! z+;+Zj&ckqb308vLg!asGyl&;wNakG2N-EPuRp9A$b)U_m^%8OuBkP~dh1Nf0h@s@( zetOWYLNOZ`qK;wE{fs3Ag_^qVWPWllc@m}XZQyd1>$?<(!kb16O0;e1@9^Cvhws=+!Hr%1&f60ETLqr;5X2QQSHxAJ$s|lC56RQcx#jemhS;K|e!^c;zcQX?34L7=n9Gc5t zyv|zq?br`XH}agvWmER-T!;d4d4r=+Bk>wspsNoQkg-Be2nmt825<|TGb`@#0MJ7g zD79sJ9GWMM5^Vr>j1B^_O#Uxm%2#&)5+it6v{t`M0_=8ob1}&M**~elv{6h#Vpzu; z>2A7d+#hQ= zlOJ!ir_sFPZ?Ri0RImE9U@pK;*!XJ#&s;X70xrC z-T2Nlkwe1srD0ljOqa^VDhp?mXUEH>#Y*M!K!=`0^58*hz_O==CW+7gmMiAuxH~Qm zY(VOk+tcD(_6LeN%Vkj{ocHgbGcwf_gI-Z-Jy2b#c)lU$oo^~`Ev)9Va`V+Dug^7h zxHYX>c!rNOT=JFJWS^umL!Y@E)2h{rg|TM|h9L}gK&*h+0@X&3?01vqWm%v=OA4JN zYe%JzbW|Bi<0wZf&?U(7Yo=8d4-2598G7&+wEGkA^PfpH-a$V#^QYFk4osJhXSnn1 zj%Gp%>~o@c<%~`@M>egP{bvEEA=hS0!o~?)YjbAX zpL4r7a4y1i{etJ%a(}fAH<4{WoYHW*bY&~dx8VP>S@frg0DbiG68~()e|RhRDnln5 z0I<#^5jd!!a}9EgLccCxM+bJg^nv+?JkI5J(pi^b>U`mlHnL;r;A>Sa(}f~g%xQcL zocHG6j-ws68=av+04lNK_IAK(T0cI~w$jU)58u(8&1AA5(}US9uH%k-bpO5m9+Ve0 z>fg$KjH+$jM}{?bRlR*zuKsPm%sw?I-D)Mle|o2d6%<-i=Xm!N#H&5TTDGrr=6t~y zwv2Ma!ELq7EiNt{i+qSuchz{tqL=EUn4WiiHqtWhvO~x`^g+<}rQ6M=*F6v7%wT>d z?Z5k-0eaJ#T+Z+}1KKFh_gJs{@4O@jFm^a@IgFm@v9*rL#apkI2w4R;p#xE~ew$SuA zyK=zH-kcT9x}1I8FCRjUnPKP~W}PPV=7o&n9F!>L*G^!-lHy?wK$ZL>#+1}ubnVoQ zJ-00a?190-7=L_sC0>|$QkD4hiS##-=%;#CO>3O{`iQsghszLA{5QNGcpqFFN8Uo> zd2gJuQYJUx0q)21bbDuYY_=me=j}PDt$9%!H;>GNyW^%B6vJu7YCi3wY{~6@xo}zR zFv>0t_CPb+1yBAly45_yrA~YBkY^jx!urmxE4Ep-G1Y32B4IVAc?Q4U;~PSn$`yY5 zkmx#O^Jp*{0b14V!e=Hohl73gHpOAS&t_t%8AaZjm*ClXizeutefeofy30M8tYjxV zF0@zE3PQrRVh2gFpHAf6Pw{JV#ovL=IPuz?4zkP-6H1aXs&G>@Tbe~s1 z$puTxZ3wAy{ygkO9C`A`h%;g7->~L`OXaggEbZ}vl9NU6ZoFIw>$#pYW7*aK)*ZYu5VR?f0p&cp6 z3P;uSYr|=Z(xSCW07Tbbt4uausF*$_b2<(wC_KG5oQ*vA-;B$mxmhN0y!n4;T zP|=aZ5(mn|lG}Z^JvV;2?5ygs01vr3zu9q5$=MeT1&sp}4h3|kv45K1XV>YM*zabS z1#g_Qu41ns?+-FXH~T2kQ7I_%j!BMP#FH<5)mzNx_|jB98J*oYXS9hx?^X{cVfbSs z+{Z0I&$i#RuXwf#3$7NqCV~HfF4pXi)#jI0rCV<-M>{S%kEjox6wNg2KDd}Soas#7 zv5Oz$xbd0FNex^;w(sf`_#_EHJM!`b4>79D4|m@}ezzVwqxmBb8Ee_x${{kjl6X!( zWCgSV=e%k@$`BN?#dgTjA;q#f)g#q;e8>@Jo@EIc;hcIp$nqu6zS@#>VptnY(g5~^ zy|p`jq{%xxA-EZ*X&G~Ku|Ibz=V2n7kSR!D=fgO^u*-QC53+is-|ic36tgRxC*HCM z^Poa>phbEff_(w0Xq;y058A6F$J24@!Mj^}zwESOZ?i{55j$qYMmMteOO-_fY_}e18eCKuP%?{@nZr*#hkSTan>f871-=eAh5-X~l)d z={N%MIdt1FM?We?rm}^jXgs82Tlsi2WU%D$`I|y{@N)8#$P^c)1?!vL{D$MmFX5$E zp1cSZ{=044=dHJW=az@YaMz%h_EUOsGT+s%2m-GFKvPJ!ai~r_B0ak-eanHqS?=SF zv25Mw3acig<$7Of0aF#-3F+x-EU>l%KJ#6=zH))-7&{|f}MDLvss_3 zm|XIuPIiU%qfxFJ(@F?=G4f&}LKC`YSy_c=y3 zopItw#f4x;j`~y5E^UlJofl6z@T7Kv;2yrKT!Mv5n$u)GdRma4;sAQe`c^ zXL#{GenfYbC+4!k^{oZ??FP^hb74*6Ew_CXvu;Wi_p+;(qc_7$BEv2;WoZD=j(CSl zjDuRD#!h=xBM?eX6uFEX`|~6h|2>Y2Eaw%@p3xb+PwwEg@UTkZ8)5g!f;CfqGUKHnsi+*I6Hu` zoRKCj^V8iI3l#{8BzMlM2Uq;% z4cb>5;rsN^)R~!k7bg8q!SVW&%wvH>m3Wo|~!<1X{sna*mxxPNaJD~Am?_dr4ul8v>_c$J87#QNkd zvb@@mjkle+cqBr|uJgfc!^AK_9;Bl`Q3uy(FhasnrsDY(hV5cXOA^;W#ch!;`$C-+ z#T{&gEL#NmYWHr#qK>(+<-UfGhi3^){dqZ2OOr;+ZR+(=JnsV%ed7*X9p&g3LhZsf zVmw~;*+Rp>k~Pj7GCRQg=19ucJ$Q;-zwmTO!jIAXU|>$S0oJ%bys)?|WSQCb`Qx7V z*~kUv^(M(~sTUs-KfFXLC*Bt3e$LG@%h2FOp16+lVuduzL9X0j{I?1(_imO9N4 zPG&hxRi>d?Hp`}=bw^aElF~{1e`XHRA5;v`I6m$qr$nH!mX6GXGGhJpIsL;GU;0$!E%*1ilgF4g;G>&on5ivR zgF9G5g4m|d78PUrV;4`!wjZOljmzV4ZL4h^W){=GeRV0jyqtF5afLWA*b+)h@_T^e z37wpwzrrU$3L|4gj(pu)aX*HP4ySXI0JG_migHfWD#xtN^K(d49H!H_z162Jzt>c@=&;Xfm?j!U%`3*6U-mR5{B{<)I2yuEFOC2hDAcgAoyULZd_ z*DtFK!@~~u<&lk65ZK)Symu-<1MAAT`Zi6aTh@fP3_WbCHdnvATg(!+HCE-&VI7^B zgU3zP`@})LsqoEyU&V2l3kl!sxc#Fz#g7?(3L!3&SM+Eec7+EBQBDh5Mh_E?HRi>- zX)*rD}|ITGGFf^~`xRH;cLB!*g0@R5oc{)tJr9AzaUts8a+F zDzZ^XCmkGVKuHl}SRQFXL@uEbbzx-_d@@hKj&_jfriaVV>Us$r7HhA6k0ww`c{O=N z(+#SKuFm%LQGH5Fd #km4)_q9w(vjfo}3Dw${qO`%Y-oQt|3(}ChnFu9=_I!_eM*N7ffz9FB2`dzm4;&k&Xj|&ZQ+S(44X*Z(Ru6|6}hfqpJM2egz2; z=~gMFQ%bs|B?T0al+#ZQN>Ky`gDUR9A#Rp9%%&pU$C4<8uL)%!=* z4ehO}dFO)=uah@ixOdR}xx?43Sa7y$^yYATQ{^*rl=G`J2?AU9G3bc26euF>-Weu> z7{zvhQ^qb#1+*;=fgOcIv#;>`r~ytjkg<{3({D-Y{Oc7Kqor;HdQ5rSZv#nAUoARp zC%K<&Qd`OsTYTEf>VLOpxrKdML>$uNH>;X88r-F7)Ui`(1X|8Dl?m!p zLwvgB7C7z418!rF4G&VowcMv-Lr9-Z%Y4@E(AxsO_PC5Ea|zUK!?mTQ{z~|5*@NS} zwQ9*0)%@S#b1~A)NB35y3P1FLPK%+=%y{o$=kUm&4%O2m#7~U`4`cX)uT? zg`uYnd}!mzixY5h+6XzmZb7nJ0CFRbr0lN!G!yG?;11WRf+7&@558GAlF*{PZFuy& zkjHFcyvC#T&@|}Gca&HllfRJFJz0+#CMzI>(_$fw=ZPD`R4 z!F)W5_JkkOL4^OX)PV^1JFxwS>tfJD`;al6sr~jWA)^llzwI$;e|R<8x4X<4(^_m+ z4h`)P1!q=CvF+^22jANfxAqgNyo+_}Unl-xWECE$O0}IU9Fv+jEKXouH?y?ZKo4!t zorgiZ&N?fhF;HO2n=h@K#F10aPR2QY! zo>M26aQz?fyq;qW>1Qnf&_6zuphX*jD(DcMmth&j3l*QqKekY{j3%3LSSjCklKog^ zq0nt)LdWqSRa*fz{1NAFFVBeGibi3e<(+rRwG&@Mhh)81^xhf0+WFjkm-vBm8qWN3 z!UMT2NOU9ZzSnNY9S(~T`}6*_0wf>Md5QuWJwKt|Hcr+6liL>Uk)%QkIN2S9Km6{4 zl7iE8oMGCZ-TH<=e?RO9B;uTABWMn|v?j3c1(qh52My-y`Q*`?Tdh*|Z~CE^wA;;{ z>0Ec`mecY2x<2>y)(D$^Z+iRJteNK|i@^G@LVUbBT59)Ra6<LH&E{g4+y8YYsg> zU1x4>X*okMvsKkA9hjkoAST?AqhG)*q*Y3!FlFFg;VG#sQ-LsvCQzWn@6jJ8_f~w^Ut$wM#p;{cgudwyYULx%P`7#>wao zB!e@jqmht?CVdnZyX|wHj#zTn%n;FW`tFT?pE~|kLZN5@9L5UTVPRng%LBOuHJIm* zmdCuoB^!Tv$vc4@OUTS>0he$-0MR8ag<0lfCcej5;=KE=iio2RwdgUPAIjev`Sj#^ zDQ&ijk?gf~{FngGR#NUQ8>=TRhhRZ|40xQpqrQ&xcn7-Azke$VD8*sh9koM91Y?0) zA)Dx$$IE$dT!ut8WNVI=)XTTBmS;AplcJ{TB}PyNPR-mP4GU=g2ZP(%Yh&f%cOqhV zcJ0j8JnqHsl@RWASs$()bDzamK;KNQlDK=#MJUPHzTb(n+;r1T{@izMVby8O!dH5i z@05Y}ZvQwp|JCh0XQ%6ds+}WaBcTunAY9(qo)~)$ytX~L-v91R=XI+)W`m!9 zFd;3MbGd983CIgMa?&Kr5O#f`K+m)`MkJ*LF1ql1px-zCWu_($ZEZhAxPu2f5({LVIo($U5Qw|)Y1epa!~1V zG`q9NHY?u?1?b_!Z8y@AUD(vl&=2{zYhS6@P+V=ej}|&!VEEX**fZjBT+tc*BRMpjJ-Jn1!Y3TCj+E%%Rb-vXe zfwUgJ@h-E}QH0xj>Vy+=$B8PdxprR>=VOsEt0;N6&)yJdn$GD}ir(fhHKtBE2hD2r zds1mFk4H;P7+r|kh%=9$8luJDo!A)=3ZVSaNt9UYw^Avwgl&X!057ltmHZhc`@dYrdEc%iH z6N7jZYtH7>-Gb3>vn%sBt(gnlxjjTu1`Ke*J-(=kQ;B<|W`?0$)O+4PVWijh6!hl_ zl@jjr+>IaNR(Yth(qt zs$G&b(c?Rb-&NPOG9(Xe4boIPNw%7JN#7P^$|v^q;hKyW#LBDXFsG)cw`n>}7`mC8 zs5^XmnmqN)H#Uh)&YG$(LviWM-uQ8?r&CKkMsSxH(e~2v9!FQ6FR80gTLdk!@zWNA zr|ngSiM*t<=%@BU8Qb2wi*}AJ$0*8AYKPEB?1RyE7F}2QadVKnU!FPn@@>}G<$#Fi zHUN2q#aq_2qwyIh0}M2Tkb4@dciA|V*Sol|!-dhNGH|03W*Je^+8n&`6Sc{r=mO-j8uUq_pfWKNQf>eT z=S88_;5nmq&?sW*NcDhkqh$9RYA2iUcgV{8C`_$T_KQP_Q*rz9akE?J>?h3GSE_99 zw-yWBe#(Kmn==+LtykasGnLfolwoliN7;EbOhtup+{@B#D>)OoQZA+vhKA|pLuK46 z$uV{h(1NOuzh)GGzB`a9k&Yt;Enx4{QA&#&CKV_IWjOS}s^ku3Gvb&lZij&OS#Bvd zMvx;hXJ9IV1b{iYCqsGEkUo*O=;ecQiloMR93@X`5g1qM80=4Ss&??^ z<*gq6=(|vhspIoN7~>ACH@MQyllh=G9AWPWtO-UETVP_-glimpeAN@o$L~}-D({c(NyZS^>kb)?$t_))^yG9 zJeJtc`1bsRSb(sikE!acoM3- zs*_g>*l9K}S2YENm)ZA|P#=YW43W%hAF4G=4kbs3J9l$1yLdnD9}hHe_hU0aAaIjxL%sZPm`3r6E~+eICqGE2OZV;D+< z%7eiw(}p}TiZ%5_5wrFj5l+ zBQK2hWl~>`yga!H?$WU{uM`J8N)H|bS5;Z=Gzllj(bT!xgwghaHjPqxz~Qom^x=t2+{aWUI!6^nk2)ep|;vM?2?e&2)r7`5jodga{{6(FZLB(2~ z`HbigCvD`mW#p$|5-^k*JC~ucFG`PaGE=p2Ctc<>xwm1xK1k2c4(GLBoK zToz)v9(yHcv+?*usH*`x zm&}&PvQJ2t6ElxMILe866CBI*#&JWQ(8{Sx0Vab( zjQnidQwxmvcM!P}ew~!@TZyxFY05g=rzg}fs{T%a3~vEbY#ZpK0gmB zvXBSxVl~Y%c9d>=E3|seLB;ik6CUgQ+`ADk5jv2shB;*9h=QmPXJ*Q-#u??G;WgejZa!#M#ZN!KtB25ZV#%7A zd<~sqM*&9D;KTRwX_v<}0)$Z@@;NIPU6@ zCbv3bxb_iSF+~)G*5EBJz&QpxV=O-uKoW}$kukzgVCbdLt(%yH z(Akte{<6<~{CS@lG|Gf(&OL)xMv6rSa<#3z=Gu>0q8U9@OoSLfmc(>^bc|=LsiCWH zvFR`j>6%Ap0Oil&{RC1lg~&V+Zv?!tUPb4gQRpU3>^cY*Jc|JER4aflS13+nV`B|5 zj#sqyhNE-H!{R2RQ@=jY#5=C1Qc_f0p*U)MkH=R1-CwGGsUN7y6^gm0cy5*_kAA=Y z=M}c>%Q{s;l_KZO>7I01vtyhlL*-v^&QDGQW}Tm~_Y9#qj_eh$0JJW_BQ&MR!QlRQ zKSDg2K>C67FT`Lui~0_ z{wJK@Aj!|~0UEqjamgOwzgKzl@YnhH`>i)bH330=@3^lZ=cNz>SjzW}*^lC{8cGTi zAZV%K?jN7}iL(lk;e+*RoPPR2_K#2f8q^a4K#+qj`tOndzQTMDW>0rBtBdL~7hLoH zgbE1aVL|>ipMMbMCkAFO-}$LA&M#j4Jt(0*K#*8s%=KSH{6Sbl3Yb0BD!L++Ujz6( zsQ)e}VCnx4ms9bh=W8-#)sxO8GBXr}*k{;wum0Eozcz(8IJXcLKis-%7*k*eQJzrFA^be${=5ku1(Ogp^%2ok z!}ta^t@MF34CSiXMZgBKK=RG6AfP|S>irtXiKva}7#z_*{`cosZ%_d76LQnws$rlo zgWsl@S*ZPfjr-Sfl3_}M#rp3;{&yk&Z|_L&zSn~F8gkYHIm26R3n~_YpaFE7U8ewT zYwJ5K`jhYVD6B8OQSXi2<`fV`LSKk}kC1&?n0Tj9E2Li6qgTldV@~h1&O2g@4W^dS zF$X2v#_iNW(=@jr{mW0mArv5Be{{oD!TGs8ev1mA0X(?So>0Ucu96iEm^3{iwD)aL z2pCjpw!Mzjp2fb11*JlxKt6`_sq3!0V`6A1IYc&*iijw-qzd%-vQ-VI5^%78u{K7k z-@n9BQl*^flM2f4JzPHl)kqWFx7$m%CjIdgvy^E~21^3jO?p%nmC6Hxu<|bNRiRJt zqRNjVx3Ww1n$gSWM(&KeR8?iut7K8+RUZ`;S&p~iFTXy!@Dec`&udqXRgbc;?(MQn z$WpMeDSa;6dahRLCGaI&erXDa(9OPq#sw5b^eC~xe=Eo;(;=9bSJ*;-l}Ne)e=F@4 zTx;!PS6A1r@U7G zZ*APaZ?5IFyJ{EY&gGdPbMRer(W67n@(_f6=?^eWdQ{N2{u1MV%<~K;kfA;l`>!-B z9b4nAs((DY%`>A_K6PZ4_B1={Va)DY8E4wsSBJ4(h)jb1HV6j~8AuNbPnSFW);cmM zFA%%H1ew@YrUhlHH9fa;OUJix$Bg#%@k*7N!#%gslZ3)f+u$E}K^oCYlCgHj*+{N5((gI{bw%a` zM>W}b@ks~_ho(AHq?u`=@$MnOMJf$=KAiFIqvu0;`nEuy$TaDBIo5g@_Z&rHS?v!j z;*}Im)pSG^4ToGM#_!o{c7vbC(@WJhUoO*LbquxOCpW8>h)I@v?Fp)}rnaT`GzH!n z&jQ#_C$B+Frwa(UUOZoa>LByjXxsUlTZ^~pz)Gj>+ z7<4jKO3&&2?f&d|mzuYgP;XYs$5A`9YyHc&V#~e4+!IyLo9=wxC1T3le$TxaB65dY z?f9tYedpuYo-~iP6N1O2I@MktQJ0Tv?-V+4KEmiHvkXw3-bZFXlNv3t5QW@Zl;uf% z;^x7Pv*0$EXnkU{HVwjRXMv~4G5Stl9y5rgwu=V+`pouok9(Cfb;Y=K&mTx>@he=?RJJ!IN&3{lp@hUMlXh*WqbX7~9z$GUncFlOsR}e|<#YoflCUqW5|kVqPuFU#0K#ni zDL2_W_i)=QOfPuVavmq1?~tuKNk6VABU4{(cQ;UVK6wA!72O@#7U7 zl^FvD(OL`_86Ke@NlKSFxMtB5xbKWR*E?{(6HZ{qe#l<`ZU_%;r`K-3}St2r6Qdqmi1~o#!BQ zKp^`dqeZZM)Q$-A#5p}4G$8u5)qX2yWa?xl5UJt!<@2`LS=*Z2Qrpi}yzI{yJ!IwO z2^a?rPBSf&7GiElb(*xt6&tSQF@}Z4l5?2#MF7XA_-da$df`*UPlk_e{gpo{2m>&R zrnR+!^CMD6(BC{PdCePSyp1k?*m>c7TcQhHh^MrCZnwXcHzQ6xM|ipE@sWMo&X+jb z$H8j?yB?LohdH|BxNy}^KmiIvuSc1XNglkmXo6Gnt90@i`In!?54ZKquHq#wJfN;- z5Cj?i3gG%<&cTy)s%Ixdx38KSxeJSU@y*BKKOXoc9}jVWlok{$xzdhN0;;}^>y+`s zt4^{raK_Ab2Iu}gLl=xUtqkZ%(Bg^-J?folO@fjJdl%ir71tW*p<0wfs!OSrIHpxv&Asv`qH7nbG>wrb2!Sdet zx7-zP^7of!rv^U#x@?oyX={XLx?Vd{VTnxUv|_rwjg%Z!*7x^#mC^`Jd)o1um2CQ_ zO{*nFF%vb7{%Ml=Maojr(xH8Q`j$&W)xIW|j3Gp(LoHZRBi89k0S^Eo_)+hc%H+WCVcj$WGOxLO2_rU;k}**q4$Uhd-H2=H}+43yZ#8f ztipPq+>D^rgf-n}pZhWn;}-YV^uBk8uvco!F)l;gxQDw@^76(JqCfz7ARR2}QhFG- zeaITqFCr}#12aQM+1aF$1YE+dK-oke3sKL>b8H8htbARJ!O8R}gtVZ5h@zpOL^VyK zUPTmtu{B3s`AZ-Begjupx>yZfFGzBw8Xw$GvQ79@wsCEv_1yOcvguQI?EF)3ce4Rjy-GCPyU+kq5>~FX^tFIYX@o|mBwm^X zU`1nHNWdp@j#q&Tn9P8Ar)M8`%UxnrV1@WypouipyjS5Fegr@oZQ7<8Q(qLP_Vo77f<%mn^HVGD9JNY0 zx@S3L5>AU0fpUGYW3d#sz2~IZ!#_QMB zk#B(n!K0N*P*N75$z4?S5B=lkJ}&?YL97K<>_{s)Z;8=r>m5_{9XBV0Fe`CVIwNq>PbqxNVYq$%oC4|!RCL1g3SkpsG_fPTE zzdc?50&|4;P~G>(?;dV8L+*Lrw3|O8+n#F=KJXlV;eNAAF9$Pkl)3hVip{8_gt2TX zOPZ36t+xa3;iSyK`8=tYRDV`v+KYN5+oeqYRP*5i0mj6GMchNTO>8x@B%#IBR;Bc| zt=aS8MxdIsHWEPQb8NLi+%dgUx=eWOxfWgaU{0t%9-ZVWPw~vbj|wS}+&u28|2Lb_ zFnF8vrbEAzV~o^8JO+qtmvcEK6%`e0;tIh*MkH{;a=7qAe$|tN`0SSfH8% zFc5`l;>GS|ojAe`uSe>>kq>f@UVrWgZ3^;G@8J5uUcMa^zkT{5TyGxt?+M3zEA;%i zW7+iSCXPOsLssXFYf^F@qjj9CeypIR`>_sR1`#bStz6XLJ%*4{-mDuwV#|H&HOC|% zbhG?x(2DcxB8%P;{yxBisoLkl1rEy(OLL^cn?M;9Y9~1s%_p^5n(FDZ&GxI)1N{MV zUF&H69HxCj6;8b3&r8hL2I3z~;Slj3IXK1M`g{AoA@2gYU_BZ8YHJ@dJOo76YTQCr zfbWVCmE+^+LP-S%wWe(=Vo$yvvQg*-b;Pe2zum~UUXq4U937?C9xXvH_D6{n{j;1R zxO`A|F$z?x@ZUSIeJi15EljFD4?IcKDjF`;?GQUT=HBJyzPlOQqgGxmVKeXT0+E^)3x9X#ixd*TASFmnz22+85HRZXP2 zWwp9^5Fyn?G<7BiuhUu>@A~ZpHkILaPEBf$BYouYMw$%fO7)B#iW1P+ z;;`PoT=de;jlYe4b zn_EQv+i~Lk2ydZ3JLA@ZSH>t3F}DK<4U2VYE-5oOiv%_z9`VkC15ETNZXj9;k29pxOW7bPCSLO72aGg*vyITG<1u>fSNp z5aFNl?B9Ei>=ArM(ufEWMjOc@aNuEIL^&GlE^%-NB^#({IruQB@8xM`$QQbQKXZ#^ zrSZi@yf^~I*RC6B;ua=ufXj8~jV}X~T70egj zAlY*OsZ3F^;1Ln%Y4*j=^89$+Yb}&LZ`(Rgqr5H3WZakZHr;roQ!(`9QJwb7x|mwY zSB&dp<-PnLZ4ICmheELhNxPMNqlj73msLf^u4*?GqO+FkAZSz*n+YpnX>}r;5El79vx6>g3{x%imQK>i&EotF&!b-pSpuPL+bi|b;i581 zr*3Olnx%k(;bM>1nC`lxCHW<5TURK#I#%Skm6{|HRGJtbFdr?^+>r>Ed%Clu^l=6V ztPulG@{o9NjE&i2>x{qfDn(*kJ10op; zt$vlsxDS!E7gDA>Lm>?F##1okpA>C0*5k}RhrB^0fCA(YI##|tGcbB2H7Q{94n4Z> zb2?~q7b#CAS7M~vAHi>Zp3Ehn9vAf2S0WJDF$dD1PsA1@xCd`sXh>v+IctyI0>M5J z{aijrrB+}cixy<^5+=A90&ONGWdij{wzL~r{}z-t$U>~)Ykn+ZukB!;zh+;+drYU4 zUWDeF=y{Y0m+iD>pUStrqj&4WtxqDqvN-;Ajsl7(QiOU0!XNSHk7XGe8dg+xR}J(- z0s?LwXN6aql`}2NgP^|gqc@?4Rb2L!c;z0Vq4Qw^_nrsvHdt zOMKIKF0Q|cdq?xdwf&hJ2s@0R@9R|uVNZc@si~=c9lS|#ai;Tny;bNE{iN&eGZR%_ z^?D{GrScgv`&L??p&2_1YlTT4A_VS==?VMFJT&b~HcntLxYWJfT|*d2~lIGyI zcOD+xyjLkhWME&ts#Extru@fw9CBg*VILpaI{sFMeqRY+_~*Po5MDKm{0slw-tO$m z-%7~ESYH8S02N+@<*H%mUijx$mZ8H}sn-ZtK;1`pt9#Wj8t?&XF*F!mc$G$f<3dNi zajWq+@xQO$F@oRn^O%i%|L1P_haWLTlUtCQ_EwQwVfMDdudkZkEwb-Tcf-QAzHr3j zzP_;e(o*#kk0z>#5T{@)mxiD0hr-3;J9F=j$GcNhEq!L${32X8hn`+Gy>q(mN0$cX z#|z9c1qMcrwGl1l+Zv#jM&o;f@rcD8LVCJ=p83C7PssZwcsq6@3MhH+PKZoj;LS3h zK343Or?3B@Q2hV!VbRApnEKW|?HHKw(SPO6m3}-7(i`OA!6tb!3?3hglmb85zYMnvcA&`W`sP z!A&XUPaqwxo37!bFY2s!ES~Q0o1gx5g_|lQE}ov6Vbn%~gE?AFt;w2*d`w9xm3A`? z_WIbqjJ9PtbWjhAaFki4Kr~A^PjN)t$tgZVd;u+&zr}G4M$b5UX$J^B!S3=sSxnHC zDIpb9Nvw;Re-6Npa$Q=S#eU;;tzkXSo_#|}!}QFCF+CF6f=eXX!1b~*48z8fqD;l( z($f5q8vWTAmBz+#fi?zy=Ay8WFk)?2Q1wzxKxsUfQO+OG{3@D%w6VJ>XLYA}+gT4UWRqpwQusRST z_+~6Y$N?qb4NOApDoRwbKWXLqD`#qKzLd>+R$O4Lc`m90(|&Dm)y-vC#v?PBQgY}d z(~m)FvZ3r9WIjmW%QW1pK2L^;44)<)9TqOPKnFcnrv+A5cwqd)(Pt_&+H#;Y+3ItS zS4oK8`N`}#e@OISnjt_H&|c2sBcR%od_9gu%#yvNXZ^EaC>Nqin293eSHS-t0vR(2 z9040upRUAEF9bRE%p=>|e=q#S3#Kk&B0ArXT=m<3KS}oAi4FPR#Xcc%Wb>gJy)hhS zB~`3PPn|}A8QkJ0LFyRoV=$1X&0IR+7~Q!fLE@rds+_l`)C7N(&{?^8fh4y*&7OI` z$magJ<8U5AZ}Gfzi*{SvdHoc_*1XhiUhdrVcY^#MyQkp`@Fboxb;MiMtg1{`#(=nl zyxOz3R^-xu-1KYf+E4?ue?6=kRDCKi={2nFS!v9xITDXL-L`Us|2oaI2cbXJ_j9k3fM_%GHsQP%7Em(x#@?N1!Qb8wm9< z`1xm)x$bF@Ir&SdPMD|vLm9{?0B`;(oK+McIRK@UH&3kM+Zc7gAo$^pw*r_`b= zxrW`$RW;}o1l39#dhHei<2E=oV?7c?5&+sFzuXYPgNdZq1*cMe8LZ4ZH=F);f4ggFAUvrv~!Qg%0(nDIm5+ zQ$5m51*0xLHn-jHoGs8?RJ%$v70A2D!b)=2jX-mBQ30*a;33G~v)#^hYS~*IwKLC~-|hNxY=?GxZpiP_AVIidFc8ao z^(I}dC<7lVvr?oaUy{wC;PAXtE5!mm>Y{F+zM)~;7vy!j z>@gc7!EWCh2-4c|T9yPF&Y2*+T9a0@qDXzUH6say5WM~(oZyr6m(=Q3fNSgwjQ%js zD{dyeBW%q;h&}rk_mZQCRv4)ixuv|ZS5)>dyJi=#O3i5Ymfp0JjyjNoA5ujNjAYPN zGe_P2a5lQkrJaJ(XrBpSXnHOOp;&82H69(I7|p@qD5O>N)t%f>M$!k?A+R`(QOzWt zRXfhSSP5T23hnkJCOvIZM$kdcZZPonZdv>}$m#QVrK_T46Dt$XBPDQVT$SJ=E?+uf zHs=ios!TvDhh}mkqRjpIBQGAll2oxrDTjnmv zDdRgI>$SCaZc_#QE}PUMsu#CDVbT%vH^F{uk=3t$wC1)BmF3j3+Rm$q8&{Wcivq1{VTA^c&wM3Z-P988_S6bM zu+7R%s=>l}T1y?+9rK>n<-}BrGN&#D?&BP`w9MGL8sXEU6gY-+s z6|az*JWp(Dl-l>0l&JHdC{!uf-;e4+d| zd(II^tbkrREQUpz-FP>gdSO;uKAOXNj$>h9HRf(VfTm+V!iM*FajfmaN8P-=xd?thn1d#e$lo^t3VzMHJEFSjRWtv z1M$T(jujx^wqir*5q$bd;cJG#$4>epX`R1={bgFFVxzxp0~WFS!{I`sU{LF*#GD9X ztJzgv`p|NWV#IBwB}igu7o>6*zp+i6h0c{!r#(g9_3_ z(eskg^#bUT*+6umg^u~w46UV_6B~tXjtfsJXRd1gx5|Y%pn6zyg5$1DxtrR-mAd78 zDLc<+?-p;*EyRB0+s43Tiel2_sXTUI1r3lQwq~J~7%0|lE?0Q!QpoER+OB1R?wSwz z<3?sx?YhP0j}czkDnCG!l9DnM_!6|1b-YGOrCwt05bf~!GYH<3MK>FGY}#)^R(+gc zHE6Pt{d9RxvGnXiPlW|C(e6&4*miKZs3N(U61vhXo2H1t?x>-NZylNfQmePTLXP@dQ;gKBU21&Gtf@egxn zfP*c{W<98McPAR%#C)PER&x#1t94_rSTCQ~qyX*2IQUCTS|vzj?Mu61Kn$5hgF=o| zdnY|o8@nj+Ja@OX*#t*go&+D8|Ip-`U(woHt6YcX)t*|upL9FF-VISu1mUI8`FfMA zB8<^POA=1}a!eH)?%mWj9gy=vH}r~vlEuSkA6<799Hoi>R_(SJLuxpc-UdRLe+V3p z2efUCj}LC2VV#mwreDCDgT)fTIPPynf#Ws4FH?zE(qdAV80nv2GHsmHo>V&z{^K#! znn+krw;|WmQx&bXC=8G~?k+fw?8G&FfFZzMPcCRb$Qw)UvkofG9TBOJ6g(DvQQ(3`$TgjC&e@AwDpWNV*|6o6d zZx^*)(fMQjP&KLcLpFPk*2BC?ok~JbpIU_0Vdx_$)hrE^yW)$3+14e|Udzj#(7=+G zlDpBUxZ^|G*L)!nR*nNU=Pn^4wj(i{x1vi;fwws#GL@)CTF*Tk4e^W6nn|5<+&SDT z77rZ~DxX7}YXqJHH`+7xX3c!T-V#}}(IAkT-maNu(PP1WZBCX^hPfzO+g&b*za(CL zB~e5MHCs88A)MOd-bZauVW!F*UG8r?s=T0mrpsJr^K7{h#I_VQ0+f1AuiBqbPk}2E zT5O|d3h#s+4(6#acbQ&FGoi8fo;|k+la}yc8i5Ajp2JHSPCM4hz4|IoSDMA+6cx^% z=ddd=U)7_M_(>D$pyriD@Zp0k55^!Jn^6=}=MG9>B;C`9nVB)YFdpIarG7JWD>h5ll)Wx61j3WbD`Pe6jl+TC=pPTHSP4Z{I=y>xftjHXl~2I)sp#`EMN5WlTfZX3Dsa9*v0?{zYO`ct-3LjP7^T7qBp z-iIxcM~&7XRr9M%#2*>e(RJ)~oSo3`a_48>T8!US;5&E7FU!e?Kb^?6fe(~2*5bWo zqNSZ!)w?$~jsKkU33jL-d|4PCEF6EAIw5u2bFr&k!Pjc3W_Rv0=r?$ZGOOwuZdS(_ zksQBf1!D0HpMz-niX-d?#P(gnJzqYoH=M)K zEB>7^B7}=D$M777od&;cs>x%|H=*xy&|MqJVudUvYVnJZzJ)jM`VBp}ru0VH6FGB5 znmcU2Z8ndzZW(nX%*wUp;o1Am+cY}__xC1i{w2J`G<0@~>i8SKy&&S%gY(X}uG9U( zkRm3+UV8D;dmT&HZ0ONNA zWhXp5JcdsYpXrvxUoaevg12bg>l&dc-31+x*SDbCpsuuoft6(eKZR-#;s$bK z@>U&w%7hsV^;P+bfk?sGs*f1&nw2uzNxZ8j z;gS+Vt~gPrrIIZpdu&}gqg*VvII5pRzeY;WySP0JK!ckjd>^mGnu7GD;+?F`_6Y4hoh6W@HpT=HeQXu$a&z#OtD| z2ynDdD3ld>C{r1QBDNq-OodJzN#o&e%=@;J>nIMbbZGe^Aft8O^Ny ztn)nq^Ua|j#I-=mKR+`egBN{y0~79^5Q5(|tUvy!D;C!|4i#+Zk@anSTTR_K;E|#1 zcyqRu!hfcnfy9d}qXqI~&MdLfuUF9Xq^!!EEKM@>-W$mzzH(NdVPv+XksO?t)px$G z?>>qktkkNZfC~Thfq#EZ^o%q6+2zyLxqjO5)TdIneX!%K)H-+CdB0Z^%kA*E) z6Mb>xqY##+=YNmp=NsVeg-~FT=`l!oI^xG&HSB-g7{D@cc}9M|!<*vW1r=L`g8aJX`1{&@$P)IFgl)v%Kl2KGN!?I(@@ zqMk@E=lXvpQ3@N5wJB`I$mqkn|M}P-FKi*yBmRRa{<=>x>_!Mn6onu!s_fN^docq| zgv)h6BL7e3{PTrj6!5g+`k6ksN44i?IP>S_6kY_4 zZ{9~YMAEaqEV_^N9~M%GJcJ<@uxzYkBo^|tUcct2~~;sF8em#QlUpr3O+zoqlF^R z{d|3!V0N>WFkN+|C{rw5-I79y0ZYZ$F!8EAR97rS_B3n8KjYd2yj%*PiWDyCI_-=5 zA*n=q=Z{&MC(jPlJ;;^E&c`F(`fp)7dKj!-Ng;(QLb5w)72*b=(06W70wE0{UfWQ{ zkD{HhepuVu#>O2RFIx0JFrK$DRvE2_X`|V-&Z&exZt=%uHz?Ga2qR_^qf5nKCMyN} zrw8GfF<;IQ+6Q>oxn09}74ZN|vLJv(S;pE@Ns7{?%q8qUK9`62Qui!Eah=c^@uQYr;ASxP0s6y5-FuoD8VY18e*;-C?;H>(2_E3WM}|OpyW3^ zox3sT6i~u?B5j4OnGxLW4zDYj56BK@Z}3dW_IJcM8ytx+gMdb~gn-Da7X6}ETB8>Vf3jW!OFEfD9n4?Vy2>9=7{ywJuJ2(F?_$GCS{h_${zXiwl+vL_WEqS^> zni!{t^7Zb4wk|2|hXq00DE$yUo#)ta_b^%DLEm|NrF8dumNkclHs_~rM5n&(zmK3z z%t%?U;T5KkPLi4FbSk9*s9xD(`VLkG^yOEq8^zjuZhHx;g@%a-yMrI^sTIsjN2-`2 z2PX*l3BA5?m&ac81eDjj2U$Uu9HfQ^a1s8b`*vbW0(UsgvmC$WQgow}GJ#Ot%<0>3 z^TU!F8F8Z1q=KFWTODL>qIJq_vf^JL2s%6&i{7XfA zOQyZfE2Ux&qwTSTJQhZ+Lg2s8Mw@{Mb#2)S9gSwn$x5f%n6qJ-?AMX>PjM4@OL1Dm zk4el?jZVJY`{;e+wKq_}UcAKnJm?^JLIcuti7X$?f`(A*#e{k)pHHagV2ODvR%PUY z?qMwWSf9|n<&yX)gGl((HP*`KnQZFO21yHF?^4gzgS1t72F;R3)jB<^9D28S<3MVw z(xwq;@4o8Zn40kA4Ci^`{&eNaMD?IA?c1~MB)1c%XpvolMFREGHHppMBQ#SiuUsNM zlvQ2{f7*IV0fdKEXKDsVUgyV#sB@pwh~qHHX^ z!0Brt4T)5JY%`7;C|_%HsMzfQsPg9nXkW&9b=8y~`6q~Wz}C-EQW zvQ$Wgza5NViBfIa7IN0Yu$=VRuYMXZ3#}p7gFU)$L-eXRW7MjX+#ioIWlo-EIC{Gl z8(@|p&NHFD9yf1y98BSaFpH9j0)+t;>yu>FcqC_VneD8$Tc?QvMv~3M!Tp1CbK6;bPIw@Mwu(b`WktjE1wG?-J#kA9B*w z0Kmf)pZUJOHo~i7YS{3G&&&|L!<}qcdko8HFbjUi0ah)gzUd$;ynaCARP;(uTD9H^ z7Z1D6cZJ!30t2PZ1{}P*r`=)yIU=5FV`($S3e+V#COZVzjdFZKP8K%({wRa|o2lU*3fBL@zqJh~BY(lvUx~ z@a4`M9?!Bj8T?q*k6vQy$$g@LuhjPBPPMlJwYQ`|l`NuA5xEEJm8Bt{6sAIxk2sSD z^)?5g4SN=&v$>C&MUQqC8#J9}vBciv`>rh# z@~pxMhLA3EsC=^Nt2|BDxT#*^{8YzjC~zWvqvl}#M6j*g07~WQ$w9?yP=UO6e$T49 zjjUk4g(^9N!f8!|F5xVm!z?L`WVpn`w*(tR_+pig5@xpgeqttX4bd?yy)Zw%DvJgIKP5sde|ablLdXb)$TXQS?wob^Iq^ zbl~2+$WpFpmd$~G{h~`%OwlWdRFE8>@p%?gR9&ue7c`3B?OEEwUCw~v|HIxpMF$df zZG#;ropi@$$9BiIZJQn2wr$%<$9B@OZQEu~^*i4?>z{wtT+QuVXQfi9I(5$8&wh|( zoW9cLdXw*ayP3uL6pG8~0&`3~Li=F1L3Kt;Om-n^K3>~94z%NlY^QZ57x0ueDRkOs zL6v)51C%~?N$yj5#-h!(5f#Btb>>XBm#I$mx!i154`Ds?{=cgByR zaoYzN%Ur-XKhbHmP+TOucp!=xEyZMZobeL~tL|nF=PIrYk_W9tIzfxze;44C^A0pR zojKP@ES3=6f}%=ITTjOF@5i;L$#Q*`o=;r{ri6~XTLXZa@)k`^C7BtRHS3kv!@`om z(PH=GwhcN!?+^L<*>y`E9W_S*jXNv1C@fWWP zC6}Gyaj~?f5@+{Wb%#aAXclM6E&4ItI!h~%=f}=ttt>B~U!P#`ba3lBnM|{mm@|w| zMfa71-0APjkyW`@uVSk`dK< z^XLsjLfZwZA-nUrbf4Pw%Tb-g!ww|3flViBK{K+K`#B zd;)yLr|msFUa$B0`u(csI`eOVVfdx3LZ{4S!u7>bI3IzQm=@ixH+>MOPOk{oP*En? z2-JT~)^6MWMmloAn{HxPdOEu(>#XSFa=5C!-yG!-W^hGklF8yJtJ=*(AiJ$V4M*-& z9H`v|WLCUWR<dLxMyq2eGT;IGc=T7) z(UDdI{;>>h?b2$P#_r#Qtrpkik&mtuF7;bIt5m|HD1Q#_jL>u#o%964sMlVYoLQA1pp@tMQzRj`7%yqTktoP^uK zz!7i#hgk2;Si#Tr3Gd_Gp3QIw-Qe-OFW+YCG8z^HyvLwMdhL61G!yO!D^)jg!v4TT z-PeO8B(1l6jW^i)_oppbgL}=ZQLb@^dlu()a0cshGdJP4&g3`X=o;t~W@tu8axV=e zm(NcwF6Owh~fcdyyT4tW_vu=m(2$ZuY2bgSEy>JA6bERt|I)0);I#tVbjZX&Z} zyR(I@cZU^j==Y->3`6vjW|gseNtCD`EUdnD*^-G#QelB&Gn2@mNfbP7NoMQ6;Lz%zF(P8?R9|-P%PR2A{mHwz|U; zs;;3Zybd4o;?@eS3`4c*Roy=lH!~vZfUnMWEdiIc3)$~k#8;GJ5Br9E?#=r_BZ2~w zf=;Y|y9_2^VYv?ZzG4cQv(33!ZH6$&3q!f3vBt3E5@b--i}0roko>vVkJ1m0!U<1G zR9Rvasb{K`z|J4IAAwEN-4^lfNg^rx@#gWQB1z6ZCN z(i{}6wx3QGPd5yqrZ#{$&LkWJKLHV%R6GXnLs+vc7Ngl#-%hb@UeKaM>j=?%y{qOC z`506B1C!M{A78O@UJ#zL<9_m+^L3lclj*9#?u|j#D~}XIVyDDGIOs4V9~tr9uj6TX zzu#BKkHh#r;lWYqz@7MR1dvFoEq}x2kq}Q{7qMMHnl&L+gfkX+q1T@Q9~uOzk^2hw zO*>JUAipvMEPY39O2V@w@l-LF%UQ)#DTW69?L0dnZL4<+NxSo9JEl@&yuX{o_#$Al zg1Wh9OvF^1W?Rae=k?QHk*%|0SaNi{&?~;&==mDSE?IYar5ZewqTtI`9FWgzC#4vW$pdeK79us zPPMYh{={a<&MmT-3A=ou)$hc_%2~jR34Ya#`$v#oG?a|1A>t=sazhFCihRG6Cr50h zIP7sejhMrL^#J+-{mLl)$K?snt7v~-*b0?wO2!`VOL6KY6a$Zi0Jku{vxo$z#c1q~ zk4jF-g>S|sRF3DZot2k#PEXASc9t?8{Ud>onPB=hkLR=I>%tMHc}^;$vBUx|5o4r7 z>~hmwdL!?v8h`$}4DWmG=5K!`4}n6nLzv!CWc&})mOVG(yG^}=`OD=k6c+@^Bm16&pWHS-Hap-iea);-oy2;cesiDTG)CH zYLdzvB1i*_>z@*$N6MH^o-(rc*jXG;*J*dCluCRXldxR;sqp^lHqnwC)z3Y$C8)=k z;y|0|@imcWBb3jRAeNB_Ngr$Thc65Thk{tZ_HjY6P4|b6Q-^Da(TpVBLh`VeL5=t@ zc$0SfWO5-!AyT=L+Yr)5&^+H(S3r^wC`b3noM||Rlg{)Elvg%~)2fD)K`r)lcS?-X z+<2P9U}#*yLZR(=JF?L5cPr&gz`RBNfWNg%=eHDNkj9JHUJtU6-;wS{&ZezH`gYUx_HLV6jnXI9;LZvg(l0V^u4M? z9fOi?eBz_~IxcEQy9o64hJmTODOL2UeLH!ajDenyIazF>wI$9nF1lxny7<$2d}H}& z{!ho(oyDAm_#d1f^=T^S0ZftJW%dapPG^WsB#3kXq|4`pU83 zfXr}65LC@{|CMv=a4Cyl)%Z%pK7$R{s`Wk5OSL=GQe)7#p3!>RL?-B_9F_l;y z6Tjsz@#EO)5@h|L6p6tqUrkAt8t4Otkv!#}2C5S%C&M?a@3#A>P-@oYw`HW7KF{Z7ZY zdxV6s$VJ(lHX}UUZ?u*21fKg1m{7hR+Uyp>SB6p4UM$s=w7gxm6MHjwh#j(K|Fn!g zC(3~*Q9pnhM!9$C?=$J~-Q@9NPE7?)Zbk19I4?!Kc5glj+0A|QZfnuyGM~#$r2l;Sb zrPgDio*%DKJmz^ix%i7-j5v0j$(O2l6>^)h@x5gJ6?Ab;(%?`;t=jD}D!ePdg^}gk zou#R^V%dBfexy3ZGU72B6&^2E83M@!qn7lX)bjvXBgkyEIvxL|7ZMS};2&D?5{M$$ z>F%VY#P;>4kOY^nL`qm(H5k)`H&xN?_H~FRrC=WKf&6bXm@U+J7NEv14BCPm2|b-I z%b-Lb(!Zb46EX{LlJ$JvQBd*vdo%pNgM=w&c)+@KS~!mkw^5QE-Yd_khqHK}t0ZxW z3UP++#NqRY10ys)iwr!RvkN%UhVI0UfD^53!8|4bTD^w5HkkuO7*UBT6KUE*#wwoU2?VMmLt-bGjHepohMAF|@6vV1Z5T-P;HP2;2H)2^ z`s&w$^%E*Fx-eQUm7nyZ2;ukl2m(@;hhc6AI{PbcztVCkucG#Vqh7yZPiEvwQg zi!nyOMA|aR20+$vi00-HD1;Y&`m#^IHXerGFx&FcIR*VT3IBmVy*At723L=G1(5rC z`}3767S#@4A9QZZJb)~nMA*Ki#O$yo62k_$JT+V-6$12rV4tHc$~TDS=!Lzf_G2&Z zX3D1P){>mBc#D^WV^}N@iK$!P!o4d_Z%KnS550oNoZY$w!{v^fhNPG&O9P2jI$muI z7(@~DWXoS^b8?Vu`vS|Xm<~Ccd>FTLv$xfhY5*N`yQ>C~s!t$gkQ+j&75?uOOp&_< zvX<)?_nqHiF7uu}Zo`J><<-y4VsO~i9`MsCg4t{vP>0F+Q~&&ZSi&^^{s=Wv*()eQ zZll){g~vS}LID{vs(^$xPK(f$vG|ln{mX~V8^Bh7MwlBoe1G{_y;_t8a=)Y(ZT1mm z8=&ps)}eltY_nC@50^ha1F*h|gg~M9zOsl(%~}bRz&GRjBpxk%1D%sYPQ~gaUu_Bt zqCZ7}j$}KIV|pVHGjh7xoauwZCI+~7%Ab3?@Z(bV!iROP>sTd|JZ$aa$(RhQsv7{EQT4>A@j?@#vPu7kvK(oFJIH-E*< zTFKH6)51HQW_t^d)!2jhXCC#{%)vvHKIo(5nFf`%Fw}KwE<%YXZK>Q zq4~irt{PJ%5OE5wQtpPrdk!QhDthLs(mP1&K8?jo(H=EE-cqLx_@ZFRqG9 zbF2zGjHEWs{`uR^HO)vxlDsH#3?1B=-cLxJI8k=13u6gnDKat)y*<>w6?OWjMiR&_ zCVzB_YWOf3w<#}VJdwpDNIc@(Nu2>9!FA^@7vaV#J&aLG(>3M?qZnCVUvXu{hVJ8# zYmRb!WVD&}+8`MB%t}e=Vb2ppFCa<%vGwf>Nm_a^dLck%z=N@)sr;QUHZS%`N;kq$ z8DOzp1C3%@uYZEQF}A{M$W*8fGqz%}mS6QN)pLmpVpAY~)wX#)n97-Q{5pbT#nbsr z+spXXi*xzs)q4G*mFgJ%RL#tv~$TY9y0GajIcVEkG^mKH0a>Awu1^ zbU#Sq0KFIqBQOs2#%S>A_E?|h-G>-dZfCxGlo~9V7bY*p9goGJ@lw9wOjSLMu0gk& zePTP1Rk_t;scfOl3XsP15<*IGP7Q?p$($B1xH|b6*F2kG@pOs3YDYQ-gGp$^?DMgHW;^{> zah8LqQ!iiok_Yb;pC5wFf~Q-q2rDAo5=*WXUf6w`AK81``s+nvOfo?gn{D!?_Jv+T zVphKkGOX?XWUh?6%4VM?*^nLo~MjYJ#$9DJY zcQpUYB@~e649(cG%G+WrY17H<1rfguGOtLuou{j(B+_~=39x8bGY&$s;9^h_tE+oz zxm;i4sEs%;d-*4Su3!+8tTjS8D8B4kg26y^eNJc34GkQfWKq3)e-No6D7CTulY3{d=uaN6c*&EgwN2a_ubxq9?7NllI7D0F<3wTX?$MDk>GP~RZbYcG zS|HbP%F5*O-dB&G0)?jeyVer+N<$^vsecjzsWa%n;8q=mrohJMV(`hXd^&=pgE#omR zb+24I-L3U*l5<=Y-(j&*y~>52z2_OkZH5#Ul`?k-ksiagqn$q>MR{G1rRV$obim-? zH*;$_QNP?_2sq69!9NjQ2q0~?j_s;`TA%sm8Dg?a2k$Sp>TGNz#?gL$&U0CP!G%giH1&jaC)AQ}2pZ01n88wJh_;BYv5H6FOR=BqOX6~MFg1yJ| z{ufyG>ySJik6ctBTBFwu7ojBw$9mAF+1m(O>Ll}@OzCbln(F%9W3es62PD?u=7&fm z;p(E843{S3YRgj|#9r5Q-2;rB(zu?Fe;Dv~{xn?DNxL}gl^TWZf*kFUmvo^RJzaK| zSEbrPFp19Uy2%gez4T4=5&UvZJ+(RC>2Ymf!rw#Y_3&QbBs$^T*tM^RQV~3WDF%jk z<#v!#ZqvY06@1so6n6{OT;&niW>+1iD$;Y?;P|7u&=F?zP5?$l&vkh!y7#x zr1UF{QpU!pHUCn$M{Sp|iFOxN_a7;GIPkR#GBk|gR@uBNEr`yOI54fOwcNM12m-J> zWDHDqbU04b!#dIf{?8pYEJsJbZI`}*rv-R07GfkkuY1HY_=-Y9HB|DRoTcTWc-2J@ z!8*8c+uex5&*SIbNKHDMLCp@=GA*;}#;yow`1^snV$m@lqGZW39t5=VXTMrs8r|y3 zS*m1`F{@G9`R=(A19THVQlC(|l1x`Nh9Q};z0`D`%-gyiI+Q-biHk+C=Ia(~O~noO z8Vkdo61T#mhWUdMis?G^3$j_$`l zdu#Y)>CIY&K6E+!`Mr}yFBWiYb^aUIG5l}OHozNXy3lu~%0Rb5noVVk57i{Gn+N8qNHwSkd^J}u+D8>Lp1w)iPKF>STkoP%5 z(;= z!N&xF*$NJ(p^3NsQPw|?@JJ)A+P z?7P2_FuUlgOE6ON27IB5<&wLOl($d##>f#jFz1n2rC$a{nW4Za^9$$*Sj)@_2S49c zbciO-u@iKNE!^crb8tn*`?zhaO1}((_4iCJxAFpS1o|3g8VwvOw+5q=uU0geWT@2j z<~{O}m^sqQtn%rPsYZK*pybL|vqdHGnM@mk48uUlvVWlQ+6s(v=8>7zHd7bn^(}OJ zmSfCFC9}uL+GE!PvVV{dHOrpXnFS^GI4tNY!NKJC^6N;AFe~OyQlz^58h7ONQFb3_ zfF1Pw^o*PrC--i|=d@nsnZ7%3RT~MaKQdS=bc4t*{c;GLL~>8nx`7I*6R!cU=$_BCybZN14HucBT36bV)Y z(`KZ!;~keDABFnz*?D*n_B9o+3yk=F-0+2icWX>DU}7!AuKk4jX@6nT4k#I9g&^|^ zq+@)xbQaJdO)86loruyfjS;?#tk5>M{dAFy6vufqU>O-6mPg&lAJSWi|J_ zMXFPb3dLOj4$a&R%vxz$e`i}6cKRE}T#)v}ae(P;;?|4sT?mtLpe^G=ESEm71;Be( zZj0$8epCMnM)_XOyFHjmWZXje?H8;?r-iw`+B>rBM>@K3!`E$t=?@WL#;EPtqDkiX zzV3jR1uH|_X!g=Jh|-@)C1O*3)dpG?$l)!NLKG|`0qxp+k&-AUX=FS|+gdC!#YJ+u z=q26_>6s2@%@-3)WSuqu9{@t?sadEKX&m2L6ceUr`q?)Kd#WNpl*(Zp7DkIkv&kqDNEt+SbctG~o^uhFztPK>i#ibLMAq z_{P0$B3i`g#GY<|Jy$#eG^raj4>)unf}duB*EB!}DfaD37| z|NL{@g&tuB?HY&OJ`|`SSoYFBk zt^NB@FFF$wk$(OwNasqX2rNOug%gGTs8<`YqBht`;UViRwN+|=p!(P2<{ zXlS!bt+?9kGj|jG2G^)tsdfY7hySWVlh!T3`0#atcmyD-O< zuo@ZTSVU{QTYtyK-`;=_nu>2{X4+gCL$5FX%xBma8l2hBFsAQvaW^s0 zfxf_y>xXaRa;MQaMtysveP+Dq)H+I{hq?@aoKy|NeR<9mF?oeAuwljoqkI6jsOzr)&!HFc$<+mI*?sU3>d4(EFVptLa! z**5Dem#j-FpQYM*b0fwc{VM(}_>0ko|2CaEEurgW2naS#UbC*>e7xL1Hw8ut{}xl- zSFC~S_qzoYL%7W*iQ*O$*X00+yO52?oN5VFeMV06+4ktRY?++T4Lql57e#&Ed(t6e zT-fxqFL)gmDIoFCBO_Y1LUW{f-EuZ9)S6ua(V`6TY<~iiC;c!#zG3{;W6P_VYk$`> z2$YyxTr zpS@KlhUUOs|3tnFBRl16vp1nEx?`ve<`=<-@C-RsuWpSrd|Jg(4h=YwP##*?Axw-a z0{XKDV~EkdfNs5yd>PP_T`0)3P1||v*OLb>{^1O@*C}9{(-wI+cPxk(>mvO2feHhuoMwClCjgsbwK?oWc445j1tkIb9? z@K{Jf1aJ_%A~?|Lhi$mB00sELb4CH1AoedSV^2)P9fd;R_jQ4Ssq58=?V-Apy2t2n z33tFGMCqi&m&mbzukDXa%TFX^UIa9X!|!%4x0A3a)FyU9hY7Mg4jm)qvFi&1fttu@ zlh#JzG?u(Kepxb9KX_fv*UNrG1*ak_!bP7g+58T_-a^&IHEW6X{(Kj+N(M__Se)O`u|UUv(-501`V&wl+4dQmwlGm<2QuA{jPxYx_hoiSjET0am+(7{TDtCaKuOgAv1zL_{JT#>TujBu9s= z-MQvP5{xBy=q(cAF-QJ4<_L-P=~NSl=#q@12&B_-*WVlNSyD zG#-gp$HvWhb#+!Lo4VJKYw*gSWJ)|a`?NSjucz@ce{6dUoo|Qf(}HeR^CwsdZQCUQ z5Y~x?TKzAxTYZiJ>;U@1>X1RAo8i(-sA2gFKutCrZ>eWlOv+Icod{emIi*%avSq zz0oqImyW-((GjLGpMWj7Ar4VBJL}}~d0WcxUX4LS-+2$j5VZiW0Y^mGs5| zu+}rp9MD#;ExbYDd;XeOxqtlw`<_e7-@GQB!OGdBz5O>zt9uZ=wUI3AvwMhZvI9Z5 z{mc}=dcOCQAwvI~&-Ws<$*fl>xvf%;Z2usdmlqz}EeDAxY^rfl8V<}ZQ`$(cUHM?| zWK9{p;&eGFLb?3ws4N?C0`JZkEkS{a0BNg8$oJL{elbFZuUezW60agj0scyx?tU&Ftj>-%k7YBZjOgMfvMa?C2Si6`$sDoO90hoZ+Xd7cRm zk(T`h7iB*W6d%G+=Us;uMa^*|E%wqgwOMZoi$8a66S<=~%O|Cz{v z{~ydp#ZQ2~Py(?2)fE80b$PpJ*0f7S$5Tt5{c>2nC~v$9#UvP`e5u7S8j|;f0H;C* z%)+@Dem#@Vl-j}0v_E|zqx-Q2@isZrB-yYBkPO0p2yWe<7z$46eTmU&6erfFucZ^2^O$GHWq|Iyd;n(hsjf>;68@&mBi*6*(#@Wu6O3( z_Q24w{PY!?^)1S&g#i`(Fv5f@mE$h4;JshEyagAfTMwIsBk}{YRG0aq7;bh-gh`fu zt}dfS_%G(DE8av0Yi1YtDd>5ibC9>doWnwSo5Z3S?+(|GBp5Frf?t#Zs9`3{J>NfI z4)Leq&qS8hsx^w@?L}}edG&yc! z$?5+xj?Cx4iO)=AyceD6J#7Y#SWE`PDg$er>ix2V$>h?{#+RX|;{oUH1u#}q^NLJ! zp_|BSAS=072y~s1EZqxH#A+Y{B?Zl^A{|FyCX)cCN376nj!tDfKqi6w3yhji%N>pz zd=m#rR~V*p{w6K33+xKg^$1d2J4`8~7HoyjYy}Y|H0qT8_=?`88=WnHE2{y-ZIjg9 z%_qcYib9^-@zQMMimerzX1mRLJ}?OWD9|C-n-QyNM3*L*->XfF3t|MJSz)J)d;)+M=jsXnIJNm!9W~H5-hfrrBgsLd(&j%2MXrC z5h4nkR~|5<)86z|S20Q%j{N?IYM5-yeTS5$xWH;9KWw?#rMpet{MD3c@Q{XA@5}MATJ<(9akw8e)ZRa=%3#`k1VipsBax|7 zZIP$`nT66+AM8hYu)@+8%`9$1_AxLjGD14%&A$Le$bt*M_g+L#Kw_ z3s#ZGnCG0LYqVtDyvm+8gE&d7#|fFYJAt9MCun~nSi)NT=u&mucOcs1NeoEnZoc<5 zvwm<HoC8~5A8wUQif1q#~`9#JvEd|Cc2!w^8dmabH$hd6frtX#+GgzI*rneL0N_J>3Th}IE)x#y2P!O0x%2T5XH7nzd?8i04@+{!2C)1?92F| zuFaDACX1TjcIx&@{n3D`LSuNcd;iVO=ZE`hLFn9**%&mW{?nyb)osq-qYl?9WhSbr zX)xH%){D*K{PXliGY6+hm+sRg)f#m&S;_&4IMoy~ExE0qZhkps#gLSewRREMwsurU z_Ux*I?`+k>L2|1Jv`jfd+Zuq_Y`Ia0H+#-{wJ@)zaVb`8{B;evWL;4QJjYD<I1BtXP(n)VqEG@kH+)hpOw5ACUVqq5I)ZM{!6_`W{_Q3nnVqFp@j8w=3@|jOTH}?h7 zIF%RsnCE--rww!>5X(cby~xYI`np4T_+M2H&DKa!{^F-_450lPB*ZxPdi%y$Z#B*jFCxuXz#) zZm}e?`Jqj+^J(M?11Tp#41F^0mj$ekwdyCt>^|pe=S7U}OjCHOrB~J;`P}b8K87(p z>DB-WG2U{1_YFx%?cSXu|YR-QGSz(05a znM$XGv~xVw6En$(88@Vg*RcRcf0ENtZP%b=he(8qs$O={`#LcHCH-g4&nm87_}OX_ z3F=zgVJ_{dg*Xgi#tN~Kt~^1}*TRbj$=b)lx%OY{q2(8u*n91?+=&N5d-4Uv8b?BDJ+BOP?kDe(T+JX{Zcnv>awU zxbW_CgMZ8_Fi(jso%b@?6L}!AXzm@|70wXCDhA4rpa}-GBakhv6ewA-fmMn`M$tKz z2evzlJxCRuajx7PaKo`g!k-)Z)l?+?GtIg9^e_tIHc@!AMa70Oa2c06>2b+(Bs8Q< zUrzK3gcO6}{?3VXlSM;YoS@u=*5(Ym)&i?t2#rCvucffeNZbcTq$O=!ot3+y8;`rZ z0P$(vA;tLnFhZs7yxj)%8{hN|a@{!FMKFNWf-}*QIe0$h^bXyb9d_p3ljyliCpw^4 z5iLiHPhc$!gzg03ynSuo65HyH6xPJ!D9kZHF*xtb#L8e*Ohk zx}W_DB8~uq{-kB{%^M~gFc=yq8!ue;*OGAhF{{b8iVkU-oxxxm+xufX5{V2cVq{E` zJtE7+H5|?ZTZb5J(Q|`rE;oe>gM*O-%|coHu|#OJt+lI2GHk#`o_%r3FUC|+JbJgh|E*|N{qz){cPV2cPV{=9n3+uVoY@Oe}O3( z2auKVwCU=pKkoM8(MzTH&q0EvhTS6ta%qglUoGLjlSS!y=k*^x-yolsHg|dpmSpqf ze=hi||NiS>5?vKPe9h??x#H>vF1hl%D1Zc;?sDcnOvm{8XPF@?lH2iHvL~(wF;(WX z;01d3vq3e<1Ebrl8c3KIYq)dvHd;+n%+x2cIVAQJQQA}RT#7G@Z|k?A*#`5ua!tay z;wTT3;33ZG`VggWwT>%a4F9y~SmgctPPG7M6o0qJ{7Afeuu;XWUp-qZ??AE>6_v|J zi6wx`yIaJkPi+4_fO)_v+61Q`c)NLavp1}aE7Uo-j`n~krIdOzZ<1x7`$G4wLnkz( zhGZN`GXFI)2plCNz~3nxrV(x&9T36ZMEJJq1N zw&XoVER&aN(jAhs5hDSm9E72ApbOsC5gkMfo+GF_>2ReE_0!yW)h()W`PGXYw*6U*l#lF!MT zmlA1g!tH=B&ap$0m^Ld5l{cBj#eV+;P}{+LMG~?j5yCtw2@X|r(8J%jUy;bU;Oq%cnmbkmBjl=xC~1_ z2n(NbVpDoOJB?aTE%QQi%)u5h?qmeFeagJVRmt=Py6o`f}T zVpW`ij>=c6%>-)T411CUF>;`BfoOPtJTNjC>}&F{JIo$n{}a1m>W1=#0)k^SLb-c1 z7ig&*9!1a37wVfg{JN!hLsmwHEj?5hGX@ht|9lR;&DIkLV^~PrIwGOpr9&-pI)xW~ z^-lyH@I?vLtWOwi;-JcDTKrH*Ew+Fr;6~9SRtF z_z*^0Z*WljM7=wtsl_1l+b!1I75YL+ZmXoSC9-+i$U0xd{Zh19KYm-hg*+@ficNxE zCMn zkx!cdB6j8=_?1J1++qq4g9z+m zy8KrU_g~$+mLhnrs??v1bHze2Z|OV4lrZ3Khe*65Iq3nq62#DvM!UlBa^okcRL=iH zkRI!cINg?uWQ$fXsR6u_c>)GdLfb4t+mGMxHFNMo2LEs768~8;n1-9%f7Jea8^CA7 z_rLv%n84pB`3*mG4v38Zx1as@w}6%R|6k_+)3c#KR`*WZ5Q^*pYUlw#2tspv(9VgN z;;+t%xZLK{zT9XT!{&aU>Uuje{7;|AkVSJBN7Nl2h20!#I2;|;;dU1axcF&UGymrW z0wT-*hfw<&wY)n}L*_8ze_1}_nE$+0ZkJ+)f75BT2;E<>iWNQIjx`EL9M%jOo=S}Z z2JER%!~v%*f0DT^2ex*AJ3|!=jzob)r>V=QRU+>uGCWU^H3@YYimZQMaL)?{Ml67& zRD5XNI*wwlDqlJSO$Cb?!LQJ_hh-%7({c5vyp*m}NpgS6M^=H{xdU**+x##95Sl7x zAg!64!^N5`p6m&=1s;&SAbu0-3GQiiI2zdQB}%eNV=Nk&ljXI(Lv*RBP^m&o1(@58 zW>lX~-5I;I3`0`?vuX)|eV?;l(ihVxxf&k4>+RB`f?zDg%P`G6)CauluXmJM+pnPK zosb+tnfEa|bbw8ZN3AZf?!0E@eRJ_20k!krkEd{t%NrR3+l@ASWpy;4&Uw0v=O#9{ z+r;IL7t@C~05;v_bSq8)M`Uw)Z7Bb1Hr4&{sEH&*@Ja|}+1$L9Er>#;F(7d~_M!{p zz%}m9rkQd(`Ow<5k;y=8mTn%43i}-eb#HkZC?%)UL5-w=(D$OhPI|1UO#KuthJCn&c z8eo%G3y+rfdiRfS%m@CCz-GIhYqr=C_4{gh$4S0Q!T%CYdshu1l9q1=!ji9)~1{f%%Q4&_xh+<0+?A$#$K|Me=f3XoTKcId zg&OYzTt=%IdVeBAlS{KPo^mTsFhpw%vz+%og5m#eDZn~LxPq25px*o;+XhoC?IAcz z^X{Z{`!7SAast<7zhPV^Shr}_OYU;Lb)WY?k1f^}vE}12_46{2YcuGZKMI8c8ycyS zPqd%8`2V`60oMgi+L+j@K)TU5C-c6%Y^TNNK+%uL!8zJz7 zhHx;Nj0gdXD#wAVQu(Y>v`{Dx1}OR`BP)@~WF$zcbR)dpzK8CS$%JY$nxL>3;U$yd z`6wNbsIgNjk&|6^Hs>CEYayhXgza$3HKk)Sj*WKZI;PrH)B;g?k1s=lGN*g=iFko=Ax#a z!Q~cS*_^iE0hH#bpUDNxU(kKt{=pH;uIiyaI0*nZ+c9-K$A>?WVIlh4e);F+1r6syP zol^g@rJ529%d<_U;-nL4l@-L%SCcZXR_ax$)oRs*0lP8=Wzy#7iASa4alum-{y7_B zWB3x8RP!{9EOrMK{X|w->xV3xHj2)Zg_+1}T{?AAdh2(m!$=$^`-l)c>PwfzDF)ln zJAY}v0695Lz)`KPw_0vYo(+D=u7HW zD?U87&4`AGBO}ZzOsBFHQ~VOi<>%!!e#Sd{6wJhE$M2P38&WIzn6OJb82p+tQv~d_ zlRhS6%5yqxcl`gPF|}tJ{^5DAyY;>cv)st`ds5Z)qHzWi7p*%V(EP@z03QR~Ih$(r zlGpt%FL&tUXeNr)$HDG?F~AaYz`7e(^2AZbUMmx_frO@w9@goaee`5`uN+7 z4~E0!Y5m|jm{*w?TwYqNc^~!Y!Grl!Xb;7j<#!0!L4c2-^f>*bk7q_U(Czgm8hFx0=ZN=f*=$E_?!oJmd!!5?({hG(s%3&3_f`CF^*$Dz8+2 zpLkL^u2(IUf#-0hHj@t@D$Dx;h>~qoyvazVKjk_W%K_?>u<3`koXP5MWzL^BOvt^2 zKBx1t!tG1C$VZbORz=FXGD?{khPs{%s&M!WtC4+-MA^W88v6ZW?U|-<+#cADhC}`% zx}@6pzeJZcc)j(6DZw$kX|%8Rm&K%U#hol3$|AVBEmJ_K90Aa9(DCH3uQ8CH=Xv6_ zHJvZJ2Z5jk;&M8ns~x8|Xwa(hJlmFIup88xXAzA5xeG3rxl>e3lPCx^pGiPYY zgdZ(tH%Noa%tX_#_j)NrqTlg{Fi1cC$#7x8x~E52^!tn$j7I}=rSzVcjGAgZNa{VN zw|6Y^ayS0@WmqXqiP_D>wLF+(scz5J*ucw|&j)aXP9cwfuvl?EFDSaczTc_Go4Yl; zTHf30Ge#>ENmxA~>k0U;9Es$3Jq8uc)oQWpYRknXb7yk+dHVJ8>iPO~$MB#B^0ZrJ zB!}}C1G7c8Vh&o8pSE?$5HMid&&uFIvV&j#;+SLXtePNJbG+>3fU=c+ZPu6=)q2=Lp` zPe`c_T5XRvPrdc7%74q<70~lKNWvFIqsullt~eZyxamR7ar|;%c8Jvr6IS$g@D7No zUbI?$yZ-@@tSP zoD!#T=!O5#Ey8n`L!nVv4nKX``iqd&evbk)+KuN8y%mh7&xCH#*sL=#6v2#urNmGl zX2j~{#v4Q=j{;mRhSkF=HV?9lC(%4ALaH$^lU$lqQ9nsB?XiN!)L<8nCv7+07pJK>el7Lne zNM&ehznfe`eRLJo{#P(N-@4`dOh<|jcx`zFQM=HMM*s0&fO*Db-9#tahwx&_Cb&24 zbcaWvQuQv`oZuA`bV_pCoJ3Jh1o$595O2IR-} zmFUh`>qJ>`bcy9ekT_^C4fgEwdCgwH*ntjXGId>t@nkxmA=78;BDs46H&m=loz`J* z@kyTL@|w*x75EY-6bH^{|G2SW9Q-O)rh0FFGB+&2Av%{WZzA@2Hm1%DSsLJ;spzHi z)U`^HU!v-OP%=v%nsYv1Al%^WiSr4vsk5xk6(;AYhY5|6HnVW5odwb`pP;Tdqa-OuN&F`SiijketLxIZ?+o$Cv92u$3bjcooODd8ouH=rCHaACwn zAoXlHILh|8R8Y`X^Zns-B_>WLHNFlF*DRCep)pw?`t3oT3paL_2baT9QDTKmd`K;s zTHVw6vC@|xPS0Q-XdKK|8}dk$g9folH~;AcktxNhfV7ffGE%zHV`TkQx(d}A)c>Qs zw+yRl>-vWk36WG3=?>`z>F$&cY3bNBY*Ij^ySqzD8lOi8v+hr`o2v4HBL%%+m zuT=B0h(hj?DpHlr{iNbV6F`+&r%rIfocJ}465+|#$xtTgcscc3HTO$Is^Ourq>oR^ z4x(%`v31%%hsN!iW(dnLAtTfNa&_2WeUm%(UaI_~jM+!0T^;+z*cW}EDvhE0O4t4* z_7|FG3qS*^F^=yf1bqSIikhem+siMo=vxb=H%?)K<)a&0jQ}WzpV{Ggg{E!pSUh#< zJXayoKL|rIQ{4Qco8?)H7&Si4cIpBh6v6Grd~UvI=&t0OYOQ>qp6e z2IZ8R(2Rs-%D}I+n4G@EJlSd{!{I13@dyTPf$+8X8&nun!T=cGx%nmPO?b1#5~Qpd z>5tpX%VHGAJYX;56InI_IK-Q>4e*cl+_J<%O-gLZfh7}D0(4sbaXYU8=L6*jOdG7x zl!#dtGI*xaP1)dX>X`VWk2R{=%|N1aj6^y`c&=9lR$BpRrK`zE(uX4EwD{_S!~p-< z`&UIMoBQZiZntak!Z0WD7$acd;7pss*!`GYF6xj==4*buJ-VCbaNJ@D+6i_aO%;FT zpf6#;^Hva$zh>V4%n6G!)jC(??o<8Cr&gWu>Vo^;vM8LF)d=kU5Z3du=!zxVN6=vRR)08Fa$r7!9$>C*JK zt|wbxngYCiKO-Jt`K+OLJnGWjp6D;>L~1mN4+i0}jl<59QGiv%SO#qRBZ$m_xToJP z5R{C$CEPq6fyJi)EF4A!J%lC->z9b|Kr5n3it#CL&y~;#OeJLK(xx%t#qLSgFx{@g zPOWuliJ{)dZ;=`g7Xp}d{Ui_nrfCg0uO#K%Mx5Z z8e*vIs)l2mBN?qZ&ce7NpeOWJQj1#rg2O?=OFS>Z3GKv;*=lRjt;ZmU*d560U;DYq zmRq!cQd1J+S*@_bc9Y2<;JFJtI}r2rPNVN~f!XzWEget__)FH73@=0i9aC_~F%Plt zrAIau=J3#VcB1jGXyW#}91$b&B7Uu3;|ArTTdAY4sWwTN^qR$?eDa%gqn||M@=PJ6 zE5q`&T#pL{=$c@=mN$pnKipwKUJAP~CVYJh|Cp4PNA`QUgRFK#m71eGOQ&A=DYF9e zY*r?aB&FdY26(f_j6Yq_-Ph+i#LGy^0zxn$mfHe>l=la{eI{I%rVW4tZ5FuQUTnmW zWrwM5V~y=E^HrJz@9x(?RvN8ee&MhEmFFR3J-EHbBtn-O_$`dImZFjr=!R?V3m+9l zWO><_c9W~p2DC81WLSn^bvxylth>ZCf-?U&a z0$Ivb6tNnfqrA#TX#B;sjT5CRqxaRPIBZ{@O>qEODv}?x(*6sZyyOEo=;&Kr^qa(} zZN@*fP0PM0j>BsthC%g_dr2FUvY8kpAD9dpSlWOMj2=;QywKag2!gPA+V z*3=xB$xR5LykQyCMT8F5ZhBU7qHe_T4C~h7v}mui8{8A!;(S>}eA+k`I|#bBfa?6t zbEcI~oT5Ph1Yx|zY>BX||A*Gw8-$KVux?=aih)yeTTG`By3_aP91=pOTWqa%J}_?F z-hqk$h8y{FSVWPA1-9M0n!Z{pQZgK3Z}ev+>d%7_obq;q@$nHHpk`b)t+hI4ZevYC ze$O^13)0L&kmyuP->=(d`;IEM;da$n4Oy%BJyqId?#hU7TA^>LNgohJk-%n`cMqW9*$Nwyseie4Dhw!I2% zywOG<+3-2I;OHk(SexC(06B#`PEj*?7n-awb`#dDLt`W#aj3S)){;Y(KOw!91`rt^ zV-`1PUBgO(mo^iVa1_G%A(wes>2L2U|6>KQ0-#6pP_nde^y3+xzX?( z3sZe{Gdo02;KO|(*j1|Yr4)VwL4|R@d6Q5#Dn&|$u4kb3&I(p8uD&9ClY}Ny6%@4C zJSD2t^P=;KoVkMBn;rzfL8rFm7oZ)JVEZi*NX&(y)R9ICg1YIxQo0H{x$dm6gd8Xfqu4pvCF(M3|=88MdC1*dB7Q}LH) zd!Z%m#_D*rBRQbvk^Rd=N@sejkV_5agUaly8G zx3n#*(`a&UYGuMH_tZE`ug@E5_TrLUo8fQ+Vsi2>kI>3<$@{AM9l_eHexiN>PzR7( zjI$2Fi0S&mm$ep{=5{DygqbD2w3_a(gBcIR?}|Lu0`+fdo}ue@C4*GOvCn$=bfc%! z;i7(Z{H)k%H!8Lj_D5SY^F-PdbTG^zDZRefKL{S#F7IdXW~2k8YYm6QVgjp+i$@@c z8wbYFa5KU)%Q@ZKoc8-Wo8Q%RjEex4q?FDJb2qVt{u(d<<@gN~;l-%lPlK>0pV`^p zI`F5V&#Ni40@BMjdVao_-X3=L%YLuTf8e@F*IA3HKQ{fZtMR__05!wK$We1kVCZE)Asj#0 zZev2H6V$>f9Rio6z{&Imofr!z$!Cq&UwLS^1B>Nog^W07rF-ahwtQi_K5M z2CS_h!}jRX?9*kk}4C?g?l|!h+Wu$tv|5T@=%l?*l#W5gMn>p5EYoNtaXJPxbz-leG*=$Fk9t zsQVDUM#IFB!QqwCuPv#}OZ(8JFTz4rgFn)&l_I9vr(Oi?MQ*_#uM;|;nBylSX7VER z)@n>&bv-_hRNEqSJ@XUNOPxH+ITJb7Y z_0URV3YqFX7WKwpdab*09d)2F@E@9rC=+*B%a#i~9w%8L!^WFgbC35YL0`Xtk!m}q zvE_$aU+U`rH&erRXFGM7r~bn(<5A~RQ49bx?ODW zB-8{ju1w!CW0?~jyZ8xAy1B9EwvQrUrvpf{6{pB}E9R&M6VG!nsG+yC%6F0HzP;YH zi#Vm$j2!KT??0|;w~O_!(Lt5Q^L(LlEdSBwl(2K6(kbqv^t7-x?gFFB?Jdt5V2pP^ zZvbw-lF1Nk7WH*g6k~#~4M^l=)(*#$hGBA=wXgBe@cDjzfxzwg=QdC%<;T?o2Lp9S zA?&(@Ue}2q-?rod$z&M!wPoT@?IE~aAf*Oq9of>fc{$fq6%bR5DZfE>IUHg@*i9QSD?2>QMd` z1Vk`ARYI~%id0p(8;AVzPoSZA7d7W|3fER}PxXdemN@NJBOdV?rjlr2b$v#iheeD7 zf-Ud(kCFbWXOqIAe7tL1&x2&5OrK z|B`OYt5^qt%l5lklH4qUThijsgb!~%OB-XaxLW!;yb~gJNAsYe&e2~qozw(CAU7D4 zy-sbGo5i*A`}eV|-NbYpFS(uekdscHTzQpWC!C(-o3I9{z)lr2bH>DP5m3`!3o`uB|))*n&Xw9+w=L# zIZQXat$1Be#2;8JCoc1@MU@rn5*6lK1Xs}f(9#g5Ii9Mujdc9zA#s8wEK36u>UQ>a zPO+WH+oVzKszZAaoh7-}H$ z&c?(t%t+;SzL5ByXv=@+&Q@69eQ+{xDYt_sfBP*l3#R(lG+$wd-vg> z=|%CnsS@7sdw=h&#f@oS@n6YP=;bjxd>Oo4>3$(Kh;8^=v$~z_1t&0JhIj`f@v{Qu zSA%`xSG|A>E;r{E!qB2vo9yYRlE{NOul{D5YNld##oleuQesfRB<+U6pw?#QR&^x` zRp~6ji6eczzbwy#@u*+1HjzR}KBflglyzG$$1r(%;_kd%!{ZUwMRM)iaOt)~tk-+9 zHM?K4JOrhR1o#Uu{I&+EEZ;f}PnaEXf1XW?ln-fgoohQcUbwbk=q}Z6pestYJ~YDA z9o(I^v}n8JtzEgsw0Vqd%Z3nG!;chUB%eC8zmkP=ozyrZu$BIvstUE$@6c%lz-@8N zaZO?*{hM#w@Q*h%<{gz9Ewmz{$PL2>Jb*Tx-l9<$_?(AD36!s$iIDf47d_&mYXf$A zW57S2aY*i^++2ZnnD8NjIL7@+CXz)7r;M-PL@aU*umj~diiwStF)$(*`Cg~>ujXi5 zjIa^l8G5m-YL3G=DbD3&m^xZKZzyOI?@F&t5}Ozk)V-K2N-bh4SBJYyd&YcyK)_+a zvyl8r--`as9iOnz(y%y$i@7Z0tuD(lIx5ndLV*JmfesY>x?9IpPeX?-gMKizK>&~I zB@NW{45Oz3d_9F6?yx?nOV%eE;^cpL1K8r*Y8txin#N_w0COiQsg(E~`zOv8%bxdZ zRjKfM6k#WPL5ru#{k}9IYPJ8e=XkLBYw<~FLtLkd@c5V57{n+TBuv^GvOa3H(oOYv zKmqf5)H-8ibJ{mM98oCL4)0qpHY!vjX1cAz`4lKQ6gsn<4^2j)puhe9wJ9fHce(U&_ zEv$aQDY=2KK;Z`JLh2#*pXHm@Uw&4JU2Zj@LokQ))We%d!k-gAutf)Octlp?z~LR- z!*_G@Y~dRwb9&hwzieeLT_-Ze56l6?Vl5)&tJ${5=4;i`YdLpLugr9oggw9qUf?3I zAcHJ4Z=YOy36}@=mjWl#0D!cE+vUS&8>MqCIgl=Qo_2w$t&Y3qb z{(80>skOn+6gM!QM*H4}0%Xt`v6Gn$<3_AjG4V0qsT6h43K2(+d9^Btr=kGb!<0_W zHd&LNYWO)JV17r+>peE3xgJR2#8sWxZ#dIgyNgIU=aO9oK;p=Az4@Nf4A}6*M@!AJ zHQn7ROHW4CwA)9IwXj-g_8qW{^1ZkJqMV78lqUtJPmgI^KPvs8D#1xaSklYo=Nmz;J2V#9`Py(7IH;F0mZ zj5tOS@z{~Y37WJ-qK^pvr|jk)QxxAyebCgl{LE~Vd*@Rl3f5bhagH%gB~CX06q9a$ z^DKo?osOY2!PYSn<;E_2DrR&0DYbIIP1h3yl+lTwf!#Ff#-r%JZSX|fD~6nDfgz4M z%jsuEHFu%dM`>8>?lGogb$*-kZXef>Kih>YP-n8!z@y^rO)l!x*}Mw@;1OSS^*@3q z$I_}a@q*SeDdqOEs50WIIcdKA>4Hh$+3MUVNsvGBp$s81#41mjCSj4{2_AH*&g$w^ z+cC=a6nVmW{)ODK9d^jPSHbPhvJg1DMsD`r2AH<=y>9}*ZW%CtJ$xbFw8c;46sA-- zqBPU9VbSl0Wp824XB=0=5V_F_@YbteZOGV?%?+~K5OG+Di%_@ zngltS^~&u-_0kNvHj9DfUVC$#w#Qq!B2yVGSGXbED_!z0gg-Z9AHV#RNYVdSdVq}w z=~AX zb9FrLpw`(QKAd$x$4_`O!NFIU*6N#jRxMJ-K*puHm}TEzu)Zhh2i|58^vI+@j?~vX+ENi zmc-8HDfl-1=x4QRf0g|YK&%DR*#WOxXJ1;uL5ndk(#{-9!X#~ zqv;BxF}f(dh41n}(NCaQ=$5p$yEFCr;7m3Dv*YNb&nVb>lKAG_Hy!V+I?6yNpLlcF zPK-aD=BMBLGca}#@$K6E(PC~qrB%c>SBvV*L%$KDIJyBTEsEx=2;~Yta|-i#(kw@c z4&Mx^A+@NgZmQ{6jYu(F4;c^1HJ=-#t)P52#v7svyhMYsr%}q&8e<~e&9|H$nW+5; zt9t`48{6@d_iT**thSh>@&S8Xz2StJu52Z!MoxS-?Ol2%Qu#ctw({^$K3lO(wp;{tQ^cIrCHcAx zr~5^DjiQYqncg@K5p#a{AyH^YP&6FydtWiNAEzwsq#7}RNry7c2i$!qg~?|%F7 z+$lo+N=&F`RGs;0?sFSXGd*_K7e9kJsU4AI@<1xXI8zBkxgaHcbr7M0N=M$O*%r4fJindx=oS ztSZBiK+MJt)OQRQYgpt?BJ?p`?|c&ND|jcoM$*U4Yj(<-ve5JA{|IcPi-KXxu7vEW-U&EH$2W=+A!<%J{2NSnpWvXOrvrsu zqg9im?g;0F)a6FXYJ_Cnk-OQ6q6nITkDPCiCE1k9)p(N7_7V{%vdz>x?V|7)eD8UygfOt9DrCgYSa&0<2g;%xix9nmgOIsLcDW=*FOTAkj`p3o=OF){!^T0yMbG=K=qHYf=eEwF zuWr1BH{yru`Htf&^6%IrJuN(cOlglG+Ouss_EHIZ1aEOt%GEJg3BSpvc{j}NN_oAv zP~wo5zPWvFwVl(BCfleBpU(ILRGWx*O|pStxR@&0TMU%ShTw8c)W^ZU$sYZ2B$gmB zTy1@tc1u(+z+R%!uuytQpL~Nuq+Mwoy6@@1m3{hlNaypg*jLM~ZmxG)CliSV7V>ouG?sr1BtttL;@(K}c0fWiC6R6ux zp44`ZeQ$RF*SrWex9Q%k?HKo!_l42fe*ENe7Si!vwQ8419)l zoVU^La^!)tt#it z@Ibgt)tbZG{SL%4E_sD&@Pd)(9cWZPWCr!7v-o&H{HIGq??o+4Q2YK?$dF9nwmw$?_%E?kzW16uf5!Vs*QHE zQJ)Hpyi2&?xw^T|5v#r1rC3qNnJSBHje!eF+)5XuzVujkYlrmkY!{ENt|wauoKG($ zgQWep;;xyF*^ll5?wsRK-;VA_02L3h;5Qusm4kLI`^OComkpIE=H&qRKDq!=2h4aM zI{xUv+h%2*;Z$yEO0)?_x*~?%cUjV@Ia)`KTDl9Ah+WPnt8te`o$wo<=(J*PqtfSS z&eLV@DstOJ#u8d z0tpxl9=HY$#|{#zgm%;bW`&XRbQ9;~{%n1$PXa<$iE} zik7#!LkrvslGD!4?xJUERl5f|Ev;4ZEoY7kxE?=zYKHj|NYd;#8~raM;%2HXxP_IP z+Mx6JWs?*9<~yaexo0}c98Hv`3k})(SL_of)R$h#x9=m8lTAuB+Of0B_KzI$5BX}p z?SBszWW5l*NS7}^R9Ipv#&Zjv9Pt%SVv=uX)6z-!`4=x*G+J0r!gDU~ zs%}#Xw!c+vpFT?>q}koufT(xFF{QVvCp|Gtbipu1--}h#VUcgx+39mzQ$x6E`x(L_ z_ICc7O+CZA9Kbyt7Q$t=P{+ae;yz>qaxYyA$eDx#AJ?nvX|VFS>bG zQXv*-*!WV}{8bb$cqEd8U&p!|T=6Q98&b&69myezF%x`%_w%xRJhjg2uBWr#w@; z?`_lz%SozXH741?290ih8Wii*J;?9%No%R|wQihrKSmbc-8B-&VCg2tj~zzI)XAM* zrpaEGb#IJ&wVlEI6iC^07)JOqm60$3wx70BfAokJM^Z#k`TQ3PM45NN&Ipz4$^Gj4 z-BOrn1*VTZ@jxqL=Gi9Cn@b5NZsMy~#)p&HVOx~*eT^oZc~`f+*DC9QQ6SD}z1gG? zp4w|fJFEH(+UQ|1{GT}(-WFUHx;>3Nf@Zqt+nG)61Z+p^yayd0E*#d26I8}Bb;J3% z-ciW93K=)vWnMW{Bp`gfAN^^>NBogO-omRy&Rwz4ZynfM*xH>g5K+Qzc7U`1s%U3!eKRm9k7%dpnH91Ri{pb9$pP{}i(+@VL~2pJ;%9JOjUeV!HJKbu z#dPIOUN7{CA|YJgB^ms`Ir3Mh=f|)HNa$F>Rv4dn`5*VJ?il8}Dz+U|?4gdQIoeJnjJmO3DAIAS@nERccnnXaa`2U3;Mx5p= zssi~t$Fb0tAE`eiecUGfi?jcItB(Rfa6T`*o=Wjhbj~5B^W}OoF?`dmg?zF|T76PQ z@Y@jiudMzhu(2?Lf&5c0A&bnhyJ(m7N-wY~IXosywdJnpTvb)$Ow;DmB~yxvqgt_+ z%VieeN<;r=u-~5q>fkJ|iAPJS%CQ@cC%PF18~FRSC6PkFzS`_Wc{WM=D4(G<@IRPS z=PqAhLyk+9F8?Es|9Q|S)z(n{4|RSx!NcD~S{lcnNohAZN}aflz!(m{s!s5fPJZnn z!&O^3ml-3{H3A)|e!yAu0x6i>1OL7nO+%((hLXsBh~+M`^GOVB*URx)i$0<45!C#? z_tDa&crm3RiBA7~0ip;l?~D#Y%SSTt?46pmS4H2M$-SA7Ookn24YrvTgsH5IG#>Qb z{;f+fzaB)8rL!-OPGf~S7g8ch-Mp|?h(eXn(38=(Dq{+^Zu9+Uy{&*lDI4`BU+{lP z$lomReD(FfU&dEtkSXt^+_4fOGs%Q8DX*c|$1Y}@O}U5$a_;r3V0y#A7K^tqe=-|*g`Y1N^&Wa5*uA~861D!Phf3Dfw{^qgHGPSQau{fpZ+OW|PkKUNeD4Et>abi$msk)b4fe!yX# z8GLikrV02K2kmHWT!8jht9=&V;tw5TTmVI6%&Q?N^C z9A9WDk+nhJw0w*$Jr;KO&tIntm|YV$9~M&Qo&z=g~6eg(|4GWT>OJ zRtEUz9(i>AQ@WyluMh-cvF;C9wM8?sQkr^wNM`=;sPj1ZCF;g^xT7CwL|7v^KIAZw zGK}>#aeK+gc?gHtigvBa5v20!`?q3Mg{qR($Vh!F;$5MC3I)1At2hyaGMx4wgumBM zLJWY~Ij&8B{}*|DdX9*Wb)-a$_7A25rr|LRY?qK%Fpwql588bEnEuF{iT~B%!*%t4 z2mHSz{r|ntun8>+;RO6=FZ#<=QZRyJ-=872e>Mq8$vUa9zEYi5Z`2NQ-Yc#kV8!#E zM;!an8+FNME)|_roonJtF~ZvrIaZ?T;q0$6b2I)dsAl&|RdxQ)hb!&;>C3%0ao85B zKhEGRK5Wo??w$~nAbHopR(w&3zQJLSe%!Aq?Yp(Stt+ZWTUz_r@4PGHG z#oJB|LCH>w6%ufZN}a3lJ?^u9Pql>$B1a7Aq!Im(t^Bt=<1K(>+Va&|ZKJo(@xr^5 zGFCW)8R?jXw;P_-ihP{fo|MPYCt-Zx_%1AH?*mpYplEb}=6XA;?7Ck@K81JrCu>f>2fLRK#aHv~^$eB3OCkkw*uxs@FUPxNjH*x& zG0LBY$9P9_=`u#N!*l48tf=B-C8u4=?M$9N&NtQo&{r99^U=T6JCA`jkK-=H93ISF# zEg46Y@!!XlKiE%30bFEakot$g3*7Zd1UL>zSd;bt$%ntjmyiK28tBA`{mV}KG!QsU zM$AbhdH*riU*kt402ezN-#oN;?{7ta@fgOCxIU8ge-3{C^LQdAZ=AqaXr9pi*`T%1 zvbdfQLc6t%Shxz;U9rM<+22-KOBk-42O=>}L6jc-Q7^Uhdc~29*2BSa-tDJWRPEtA zFB%x#9#T(2Ku=mflQ!`parN%4?xD$G?A*Rr1bm66MDoIwB!pl-clR-ZgxrQ4VoGGz z)17*O0vJOjel6NFo;yFwc||A#>?%=qOS&)gpibE#60Er-h{sNc%Vwd8d21*ny=bd%cN!KaRt?N!#hAxRd(?OV%1p!5ccRKbS9kw_0SH3%La{`64b&Eedg-r}cfFCyvtzEhAKC z?PF#mw;9bN_CJ;aR;C1q5@BNbNU9uEGUx908AxRI2pp^M=IzS@8a?MVdpK_zdUYuV z9sCik)mt>uTtI2>Z4Jr-Dt`N8{x_KwUcm;bmPKyRUpHB*-&R)bOp_YPrkzd9KHuQ8 z7!e*>UTU`1=@v$P;epR;kicWVAZlG!u`gp2(wYG2ka`A7?V)FRh;T%z4kQogQoJ!n z8e`*a?+WF4KcHPXoCuORx-$%GakpV8RPB$W*7j_02yHyt9jusxU7UDn8QC>mohw^; zY4`K-uF;)bzciBT#KNs{z5A__q(g((S<`9(L5WdY(8=M&ca?!pr^&Uu5%g|sQNRq7 z)o2DIZ;R@;*?Y${f{osJdastulUXHA#xLL}Ne(*6=E`@kH=mrQL0=#Jdd%xt4n`Lo zUNGw+Q>pblGQYk)avp$vdQ%68)Kt;sl}qTmmWV4{y49^58=EXVe(nfPlU6GCdX zqAU3mc4#W+;S|lHD_ZOYe~I1T;v({^CaZH|CV!tx{4z?nqwMq)dxDYtk`R2)udF?# z7`7{f$>E|pihY1IWX3VnN(5Hx;W>W%rcQp>5Te&c#+4S%^$my_t$41NZ!PaR!hTfAW(eGX)6~QUH>k0G5Ix; z$=Bf2o!{#qSElKbVQ=*&HHaB@Js7b=w-X?$2uK-Jp+6lUvMQDD|fExSd3bx$@Kc!T> zclWoeI8SCJRXSR1Bh_qUI_O0*@m@1PDspb;xZO==uS*1LY?K^FuAk`6ZapTtKejm* zcC4M>?kHUB-kkkdV_Aqo%5Nq@2*cXj(=vmxOqX$;n>Qw}gNfy0tTAslR;y`FyraRE zkGD*Zrn_=vNW6kzZ@s-%;B2&@G5N9joTAm?XkUsFVJvu1NA=R%fo4Ucez~!IRbJRl zdjGUPCqD7DWnaY2YSiNCK@2*}T*c5ldlUOw&4!e(0i7mm1l42;QFo$#IDQgGL^P@; z0|o<>L7RJ{u3P4&YSp(C+x41mu+QPlOpD;#@f+;1h7wjQ1#zxgOEQkMVYLC?!ej3z zqqnsJHJJN0;reiY8h=CyFiQ0GGPjPW62p{|6iKXa*w?c7-?mfYR)Z)>)7)9DfRC%J z@r!eu>{-2BwbOT9@)4ABywU_bry^kcoz#lW6?vnpnkn8AoWzR;EmC+1Fz z+Z^*-#oPRv{z~18blPEjid$^fwEbLF@!o3Tk)&cb3v@>(ExIv3ca6kd>fzK8KMCZb zhLljv{Ul+x-sESWrbFN86@AhGS&2uBkng&a+rxTysDDH-(u

        <<@ouKPnUw%Z6hWty%|KTR1hiExl_pXW~ zY5*-yJ|=|Y6;LCCmo1+@wlp#wey3EC%n^L{D<@{#U70`MM5VzOjoO}X7w+^%T4{}6 zEhJJL{!Z1?Vo zDJO)~?JFId<7B+<7efNP3_&JY@HYF!tA{QnA_16uewXwkvK|yj9HjE~dy)g0qR*BOtT~H)Bi%CyJIY;th zk?ej%fpQZNjuPHD&@>rPF-?1+5fM)bX+D=yb@S2Zu8DIh7(Uu@?gg!`1ctvgxw?&= zTs1v%kaD~ABXr-?5ltT{^kz*y^tv7{<&ySzk=J~!Sh91O1-!gp+C}emZ&WpSk6ru%(6KHe^?%!LfFB+kFfsm9vH$j-Z~DLVGEYL1-gh1AtwNaEkX z-?FTdtWNtqPuPYj-%~_=OSehE#TtDy7c^~eRHnn5qI7SBo+;x9R6b_ur)8cz&U;PY z^t}PwzRA>a)s_jVg47V^b~rIag|ZxJLj)!L+7o;|FJY){7KV)j?wXoh4K!Q4fbVoT z&&2n^+Qxg;vshLAK_VwDI}JJHK!FfWB?wZN~D;IvK(QG!47uTrT5Z68gjPF~gQ|K15 zyU4^$ay!7m&mBFtk9rA7SMAYFqYV_Y7%XYg9g@FvFs0y@jjx0drYxf}j2-RFmnS06 zwUkURzUQj?5f>M_)#Dh=AB$ZXx3kCWR@IW2NVJ$Y7aMq{I*NE|HL{%Ktm~dLPwQOo z4EemaXF~bs30lAg*3xFD9?6Sg;Hwk{-Y9WY&BoN{S77F~pVITo(Fe%&$U@sQ_(S4h z-*yziN9(gGTb8WC&yo7BO;NN~-^xe2-;NCLF^bX+xwhzxI1ZH$eC_U{*v#Gax>+x0 zcAF_;N*;yqq%xl+N7c7fZD`!li1qkj6V|P<+2lQD1&u7F$~q>o#wzVML9F8%EN99+ z>aV5~f6MZyBovLw-p1q9s;6$jtA$s5fG4Z63Y4Z@x^t9H8~v=K#|H>W`Dzqgc%wSLd$2Y)~x#|4sp zmd(OCnkN3lWVlc;ILuXAW(c{xCFB^nHwIVXUb0r-?5!;rG1XPu4Piq~K`VZE4<)ET~QQq6Vxd5w%gL60g zwPGz@d!6=u7hzU~Jm}ZbTtd}(PqTFr?HI|+mymJ>cVc;&bQ;2bEn>f9=z<1(;GfM{*(=;JY?FfuY*G z(UmZ&-K3*Y4cI(ScgFnl`)0SZro)R$r4K#DgGP|D=wDZ{0&!DkQI>fI!6n&0)9jXB1v#9pKePN}qv z1`DkvbW2mhRINqTk7dS)pVEbfwOlU4Bwn{&2S{J*rSTNnXH2s$WuPh^P3D`{n5wIx z&bOq_lds$z#bTV>vGfJGzQ60Ez1T-UT8)fvwx!tAwRX*md~njBP8V=GG9G{h(xwet zR)bD`M$!y$UXFSs%FR@MQEF+QH}CVh(gs70r$&gQ7ILD0$+tK+y7R7qIkf!QCM@Qr zrU#%S)BTgNjrB()HTP$eviJ7FB09yPX~uFa$&DDd`}R9bie>LUSPDSQ9*yA2xPj() z<(436!tvbBdodi=I5fKaXCUw$;aRZUP)pk!n&MYD>JtSiX}23Qgx>45?bY}>6uCIQ zOuM3J`ho41I z%1W&@l@p9^lZH-ajE`Q*>KzDgOYcI3$YLxZ5 zT%379sV~^+I9Bb)&v1Un4TP)PHM6WT%=rdM4~)jyI@i$RkY^6igPS{-L^qsWa302y z#ghE!YNdD(e1+!8CcP-dkDASBc~XiY=VzVg`2Z7ABrk!qJB|}P*C=Ne1CL*Hca*qx z_NlI0wm9coHdC1+Sz2lvg|Wf>iLaQ(Dvy}Nr6Hx-Bm-^0sg2qroQZtAzKcTFF$JR| zh~gCf<4!Xs9nOzc;56bnPvTFvb<31bpcl+3o!YWfsMZqC!Kb+i+sQgK(S58aZPd#y zpY3isHY^u0pd;?pX&VAs&PM~CH=9cHX zb~h7IC^o#xv+E8JZt|}T`*YV)K!mGuo+4M$wb2pWq*^DpY*e%0obN-#yy=iY`D_+m zvYC+p-zvs;S6szyjFzIHdy|(JoBCw;zUrU32*|l_eX@wYlgcEv> zrKF2O!E5UJao)M}(J+C6Cy;=Kf;*>_)^zDIyrA!CCb5WmIF#U>)mFJVws4H+=5qXK zy4K2U|H}+pk@#!-S__r%ra`(7s@DUV!X$)*EE4ee1k<=O;iC)ISB@a^yGHW}Q;!=( zxhMA5u}{`IvPl|}W-4%&8jh=O)_Q2d9btmm`CYL=xLM(bxPKlq9z6z9Z|dniR$f7! zrwMyue^_#&yuoL@rp4x5Gcy}DB23<;Z@}kJ_?%3f6`D=4ISDg!WjU3DD2GPxCU7$2 zxMQAZbk^1_zIuWOG&?O;;U-~l&GsD4lLIyKnWPakL8d2K=a_&qf_^w38CN_u)j7<4 zkfby1fd2d>TthfDgb#E>p<(t}0I2NOw!@i-#6;PatTtytqpCE#=_<*>I0Vrx5Yx#j zl-NTmRze_nF+RwtG<1`Fk83Gu!BaA^H1jAv)kGf2K0<;H3RK z-viJ9FoxbOqRQwG`3QK>h+YAMC}#F@{^7;=`$m}i@bRzOfM*Ya5x-sDIYNM|oLZUl zff@9BISDMmJq>}o#e66Dr<3mYbG?;;V1cwB^@hm*c)|bs>1iY2M{ivup?(l!`#qD0 zXTVGf71mPzjRfAWfk?u+mI&Ox%|wwHn8~XzDH#7of?og$V7!70i2k%%{+bCj5cS9v zinaeY62R&JBv=AHfAw!Ok?;d%qFi7n_irR<1=5RVowtF_4=B zYpz$zI#+rtPKF>b~1Oj@i zW2Ly8m+v@Xzf%uY8ShFhvced~0@YiO{|S-bkNDK8B6i>Z7aUu11ga6CC< z9FO$C!|>W;Z;x%&TipSc)-Lkr1z!c?FGH!_!(`F{g5>Q#niT&pLw`qE3L*rMSYj%) zpSqSRKlS!~E>S6vzautMELOs*%XSe@${6RaH1L4q5ysxpNwtx*%g`7z;|)6%WdVK* zVf&fSStmen%NExR_783;RKrzGP94y-9|XIOH%C`BGe1SPo|eP$&!|n%#K0;--haFG zV1Tb$-oCi(@$E@9Ggh3>h(5NaA#IuzgW8SVJbgamYm{V|&X#^058QefVEWn(QyDw~ zsk+WpY_#7XNcVjLgclVv#8l%_Mu6bz4{N;o>?qK4tfEzph^+?^sF+vkx{b1VCGY8_ zIWS}9B~f9Xl93H%dJ1#ai@JQ4cg2<0>nm#G`eSMz1qxs>t>|M!lR0`Bw1PeWVIRdR zkA|HoQTYmm`42eJ%HCV!6*-=vNh5WtbkjwuywLY8-aFd0f{JC&bE2PPA&tyu&_58g z-(Srz^bT*T-q{oXAqRi4&YK8`+WkNL63{p7=JXH5;xBze2CP!SZ@6O*+*7}wOJoHs zwh^)Zhr#OKvh70#tWpVQ-)|2|e*fGYSwOaC3hL4SB+g%}Gy+(qa8PT7*Z($?s{8mO zF5ByWOP_ff1*}q{d6X@m-w*opPXs$K6SM4fp?@Qu;0r+C>. [role="screenshot"] -image::images/transform-alert-actions.png["Selecting connector type",500] +image::images/transform-alert-actions.png["Selecting action variables",500] // NOTE: This is screenshot is automatically generated. Do not edit it directly. After you save the configurations, the rule appears in the *{rules-ui}* list @@ -92,3 +99,44 @@ The name of an alert is always the same as the {transform} ID of the associated {transform} that triggered it. You can mute the notifications for a particular {transform} on the page of the rule that lists the individual alerts. You can open it via *{rules-ui}* by selecting the rule name. + +[[transform-action-variables]] +== Action variables + +The following variables are specific to the {transform} health rule type. +You can also specify {kibana-ref}/rule-action-variables.html[variables common to all rules]. + +`context.message`:: +A preconstructed message for the rule. For example: `Transform test-1 is not started.` + +`context.results`:: +The most recent results, which you can iterate over by using the +https://mustache.github.io/[Mustache] template array syntax. For example, the +message in an email connector action might contain: ++ +-- +[source,sh] +-------------------------------------------------- +[{{rule.name}}] Transform health check result: +{{context.message}} +{{#context.results}} + Transform ID: {{transform_id}} + {{#description}}Transform description: {{description}} + {{/description}}{{#transform_state}}Transform state: {{transform_state}} + {{/transform_state}}{{#health_status}}Transform health status: {{health_status}} + {{/health_status}}{{#issues}}Issue: {{issue}} + Issue count: {{count}} + {{#details}}Issue details: {{details}} + {{/details}}{{#first_occurrence}}First occurrence: {{first_occurrence}} + {{/first_occurrence}} + {{/issues}}{{#failure_reason}}Failure reason: {{failure_reason}} + {{/failure_reason}}{{#notification_message}}Notification message: {{notification_message}} + {{/notification_message}}{{#node_name}}Node name: {{node_name}} + {{/node_name}}{{#timestamp}}Timestamp: {{timestamp}} + {{/timestamp}} +{{/context.results}} +-------------------------------------------------- +-- + +For more examples, refer to +{kibana-ref}/rule-action-variables.html[Rule action variables]. \ No newline at end of file From 10dcb8e8bd4d845fbaa232c60fb8326471ee5b8b Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Tue, 12 Mar 2024 07:35:02 -0700 Subject: [PATCH 137/248] Add systemd native access (#106151) This commit moves systemd access to the NativeAccess lib. relates #104876 --- .../jna/JnaNativeLibraryProvider.java | 3 +- .../nativeaccess/jna/JnaSystemdLibrary.java | 31 +++++ libs/native/src/main/java/module-info.java | 2 +- .../nativeaccess/AbstractNativeAccess.java | 5 + .../nativeaccess/LinuxNativeAccess.java | 10 ++ .../nativeaccess/NativeAccess.java | 2 + .../nativeaccess/NoopNativeAccess.java | 6 + .../elasticsearch/nativeaccess/Systemd.java | 55 +++++++++ .../nativeaccess/lib/NativeLibrary.java | 2 +- .../nativeaccess/lib/SystemdLibrary.java | 13 ++ .../jdk/JdkNativeLibraryProvider.java | 3 +- .../nativeaccess/jdk/JdkSystemdLibrary.java | 65 ++++++++++ modules/systemd/build.gradle | 4 + .../systemd/src/main/java/module-info.java | 2 +- .../org/elasticsearch/systemd/Libsystemd.java | 38 ------ .../elasticsearch/systemd/SystemdPlugin.java | 50 ++++---- .../systemd/SystemdPluginTests.java | 113 +++++++++--------- 17 files changed, 281 insertions(+), 123 deletions(-) create mode 100644 libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaSystemdLibrary.java create mode 100644 libs/native/src/main/java/org/elasticsearch/nativeaccess/Systemd.java create mode 100644 libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/SystemdLibrary.java create mode 100644 libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkSystemdLibrary.java delete mode 100644 modules/systemd/src/main/java/org/elasticsearch/systemd/Libsystemd.java diff --git a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaNativeLibraryProvider.java b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaNativeLibraryProvider.java index a513e89b6a3b3..7d43cb2e3d4bb 100644 --- a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaNativeLibraryProvider.java +++ b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaNativeLibraryProvider.java @@ -10,11 +10,12 @@ import org.elasticsearch.nativeaccess.lib.NativeLibraryProvider; import org.elasticsearch.nativeaccess.lib.PosixCLibrary; +import org.elasticsearch.nativeaccess.lib.SystemdLibrary; import java.util.Map; public class JnaNativeLibraryProvider extends NativeLibraryProvider { public JnaNativeLibraryProvider() { - super("jna", Map.of(PosixCLibrary.class, JnaPosixCLibrary::new)); + super("jna", Map.of(PosixCLibrary.class, JnaPosixCLibrary::new, SystemdLibrary.class, JnaSystemdLibrary::new)); } } diff --git a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaSystemdLibrary.java b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaSystemdLibrary.java new file mode 100644 index 0000000000000..f06361e8807c5 --- /dev/null +++ b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaSystemdLibrary.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.nativeaccess.jna; + +import com.sun.jna.Library; +import com.sun.jna.Native; + +import org.elasticsearch.nativeaccess.lib.SystemdLibrary; + +class JnaSystemdLibrary implements SystemdLibrary { + private interface NativeFunctions extends Library { + int sd_notify(int unset_environment, String state); + } + + private final NativeFunctions functions; + + JnaSystemdLibrary() { + this.functions = Native.load("libsystemd.so.0", NativeFunctions.class); + } + + @Override + public int sd_notify(int unset_environment, String state) { + return functions.sd_notify(unset_environment, state); + } +} diff --git a/libs/native/src/main/java/module-info.java b/libs/native/src/main/java/module-info.java index dbbbebf5fd393..ea049ff888cb3 100644 --- a/libs/native/src/main/java/module-info.java +++ b/libs/native/src/main/java/module-info.java @@ -14,7 +14,7 @@ requires org.elasticsearch.base; requires org.elasticsearch.logging; - exports org.elasticsearch.nativeaccess to org.elasticsearch.server; + exports org.elasticsearch.nativeaccess to org.elasticsearch.server, org.elasticsearch.systemd; // allows jna to implement a library provider, and ProviderLocator to load it exports org.elasticsearch.nativeaccess.lib to org.elasticsearch.nativeaccess.jna, org.elasticsearch.base; diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/AbstractNativeAccess.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/AbstractNativeAccess.java index 5f69101696884..fa23966dbeb79 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/AbstractNativeAccess.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/AbstractNativeAccess.java @@ -24,4 +24,9 @@ protected AbstractNativeAccess(String name) { String getName() { return name; } + + @Override + public Systemd systemd() { + return null; + } } diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/LinuxNativeAccess.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/LinuxNativeAccess.java index f990dbdf2d9de..64f13c12f7735 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/LinuxNativeAccess.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/LinuxNativeAccess.java @@ -9,9 +9,19 @@ package org.elasticsearch.nativeaccess; import org.elasticsearch.nativeaccess.lib.NativeLibraryProvider; +import org.elasticsearch.nativeaccess.lib.SystemdLibrary; class LinuxNativeAccess extends PosixNativeAccess { + + Systemd systemd; + LinuxNativeAccess(NativeLibraryProvider libraryProvider) { super("Linux", libraryProvider); + this.systemd = new Systemd(libraryProvider.getLibrary(SystemdLibrary.class)); + } + + @Override + public Systemd systemd() { + return systemd; } } diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/NativeAccess.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/NativeAccess.java index 5091c75041786..77b638690d1b9 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/NativeAccess.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/NativeAccess.java @@ -26,4 +26,6 @@ static NativeAccess instance() { * @return true if running as root, or false if unsure */ boolean definitelyRunningAsRoot(); + + Systemd systemd(); } diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/NoopNativeAccess.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/NoopNativeAccess.java index 2bc06f21c9775..6eb6145699fe7 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/NoopNativeAccess.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/NoopNativeAccess.java @@ -19,4 +19,10 @@ public boolean definitelyRunningAsRoot() { logger.warn("Cannot check if running as root because native access is not available"); return false; } + + @Override + public Systemd systemd() { + logger.warn("Cannot get systemd access because native access is not available"); + return null; + } } diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/Systemd.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/Systemd.java new file mode 100644 index 0000000000000..4deade118b788 --- /dev/null +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/Systemd.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.nativeaccess; + +import org.elasticsearch.logging.LogManager; +import org.elasticsearch.logging.Logger; +import org.elasticsearch.nativeaccess.lib.SystemdLibrary; + +import java.util.Locale; + +public class Systemd { + private static final Logger logger = LogManager.getLogger(Systemd.class); + + private final SystemdLibrary lib; + + Systemd(SystemdLibrary lib) { + this.lib = lib; + } + + /** + * Notify systemd that the process is ready. + * + * @throws RuntimeException on failure to notify systemd + */ + public void notify_ready() { + notify("READY=1", false); + } + + public void notify_extend_timeout(long seconds) { + notify("EXTEND_TIMEOUT_USEC=" + (seconds * 1000000), true); + } + + public void notify_stopping() { + notify("STOPPING=1", true); + } + + private void notify(String state, boolean warnOnError) { + int rc = lib.sd_notify(0, state); + logger.trace("sd_notify({}, {}) returned [{}]", 0, state, rc); + if (rc < 0) { + String message = String.format(Locale.ROOT, "sd_notify(%d, %s) returned error [%d]", 0, state, rc); + if (warnOnError) { + logger.warn(message); + } else { + throw new RuntimeException(message); + } + } + } +} diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/NativeLibrary.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/NativeLibrary.java index 39a4137aeb0f2..cf2116440a8bc 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/NativeLibrary.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/NativeLibrary.java @@ -9,4 +9,4 @@ package org.elasticsearch.nativeaccess.lib; /** A marker interface for libraries that can be loaded by {@link org.elasticsearch.nativeaccess.lib.NativeLibraryProvider} */ -public sealed interface NativeLibrary permits PosixCLibrary {} +public sealed interface NativeLibrary permits PosixCLibrary, SystemdLibrary {} diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/SystemdLibrary.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/SystemdLibrary.java new file mode 100644 index 0000000000000..3c4ffefb6e41f --- /dev/null +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/SystemdLibrary.java @@ -0,0 +1,13 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.nativeaccess.lib; + +public non-sealed interface SystemdLibrary extends NativeLibrary { + int sd_notify(int unset_environment, String state); +} diff --git a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkNativeLibraryProvider.java b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkNativeLibraryProvider.java index 48364bce57fdb..b808dc3151058 100644 --- a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkNativeLibraryProvider.java +++ b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkNativeLibraryProvider.java @@ -10,12 +10,13 @@ import org.elasticsearch.nativeaccess.lib.NativeLibraryProvider; import org.elasticsearch.nativeaccess.lib.PosixCLibrary; +import org.elasticsearch.nativeaccess.lib.SystemdLibrary; import java.util.Map; public class JdkNativeLibraryProvider extends NativeLibraryProvider { public JdkNativeLibraryProvider() { - super("jdk", Map.of(PosixCLibrary.class, JdkPosixCLibrary::new)); + super("jdk", Map.of(PosixCLibrary.class, JdkPosixCLibrary::new, SystemdLibrary.class, JdkSystemdLibrary::new)); } } diff --git a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkSystemdLibrary.java b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkSystemdLibrary.java new file mode 100644 index 0000000000000..682b94b6f4f74 --- /dev/null +++ b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkSystemdLibrary.java @@ -0,0 +1,65 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.nativeaccess.jdk; + +import org.elasticsearch.nativeaccess.lib.SystemdLibrary; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.lang.foreign.Arena; +import java.lang.foreign.FunctionDescriptor; +import java.lang.foreign.MemorySegment; +import java.lang.invoke.MethodHandle; +import java.nio.file.Files; +import java.nio.file.Paths; + +import static java.lang.foreign.ValueLayout.ADDRESS; +import static java.lang.foreign.ValueLayout.JAVA_INT; +import static org.elasticsearch.nativeaccess.jdk.LinkerHelper.downcallHandle; + +class JdkSystemdLibrary implements SystemdLibrary { + static { + System.load(findLibSystemd()); + } + + // On some systems libsystemd does not have a non-versioned symlink. System.loadLibrary only knows how to find + // non-versioned library files. So we must manually check the library path to find what we need. + static String findLibSystemd() { + final String libsystemd = "libsystemd.so.0"; + String libpath = System.getProperty("java.library.path"); + for (String basepathStr : libpath.split(":")) { + var basepath = Paths.get(basepathStr); + if (Files.exists(basepath) == false) { + continue; + } + try (var stream = Files.walk(basepath)) { + var foundpath = stream.filter(Files::isDirectory).map(p -> p.resolve(libsystemd)).filter(Files::exists).findAny(); + if (foundpath.isPresent()) { + return foundpath.get().toAbsolutePath().toString(); + } + } catch (IOException e) { + throw new UncheckedIOException(e); + } + + } + throw new UnsatisfiedLinkError("Could not find " + libsystemd + " in java.library.path: " + libpath); + } + + private static final MethodHandle sd_notify$mh = downcallHandle("sd_notify", FunctionDescriptor.of(JAVA_INT, JAVA_INT, ADDRESS)); + + @Override + public int sd_notify(int unset_environment, String state) { + try (Arena arena = Arena.ofConfined()) { + MemorySegment nativeState = arena.allocateUtf8String(state); + return (int) sd_notify$mh.invokeExact(unset_environment, nativeState); + } catch (Throwable t) { + throw new AssertionError(t); + } + } +} diff --git a/modules/systemd/build.gradle b/modules/systemd/build.gradle index 0f5c2a4c2fb19..351211ffd3c0e 100644 --- a/modules/systemd/build.gradle +++ b/modules/systemd/build.gradle @@ -11,3 +11,7 @@ esplugin { classname 'org.elasticsearch.systemd.SystemdPlugin' } +dependencies { + implementation project(':libs:elasticsearch-native') +} + diff --git a/modules/systemd/src/main/java/module-info.java b/modules/systemd/src/main/java/module-info.java index bd92851fde3a6..b3f5b64ff312f 100644 --- a/modules/systemd/src/main/java/module-info.java +++ b/modules/systemd/src/main/java/module-info.java @@ -12,5 +12,5 @@ requires org.elasticsearch.xcontent; requires org.apache.logging.log4j; requires org.apache.lucene.core; - requires com.sun.jna; + requires org.elasticsearch.nativeaccess; } diff --git a/modules/systemd/src/main/java/org/elasticsearch/systemd/Libsystemd.java b/modules/systemd/src/main/java/org/elasticsearch/systemd/Libsystemd.java deleted file mode 100644 index ba34a18c83e37..0000000000000 --- a/modules/systemd/src/main/java/org/elasticsearch/systemd/Libsystemd.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.systemd; - -import com.sun.jna.Native; - -import java.security.AccessController; -import java.security.PrivilegedAction; - -/** - * Provides access to the native method sd_notify from libsystemd. - */ -class Libsystemd { - - static { - AccessController.doPrivileged((PrivilegedAction) () -> { - Native.register(Libsystemd.class, "libsystemd.so.0"); - return null; - }); - } - - /** - * Notify systemd of state changes. - * - * @param unset_environment if non-zero, the NOTIFY_SOCKET environment variable will be unset before returning and further calls to - * sd_notify will fail - * @param state a new-line separated list of variable assignments; some assignments are understood directly by systemd - * @return a negative error code on failure, and positive if status was successfully sent - */ - static native int sd_notify(int unset_environment, String state); - -} diff --git a/modules/systemd/src/main/java/org/elasticsearch/systemd/SystemdPlugin.java b/modules/systemd/src/main/java/org/elasticsearch/systemd/SystemdPlugin.java index e3dca57472ade..947d1fa58e963 100644 --- a/modules/systemd/src/main/java/org/elasticsearch/systemd/SystemdPlugin.java +++ b/modules/systemd/src/main/java/org/elasticsearch/systemd/SystemdPlugin.java @@ -14,6 +14,8 @@ import org.elasticsearch.Build; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.nativeaccess.NativeAccess; +import org.elasticsearch.nativeaccess.Systemd; import org.elasticsearch.plugins.ClusterPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.threadpool.Scheduler; @@ -26,6 +28,7 @@ public class SystemdPlugin extends Plugin implements ClusterPlugin { private static final Logger logger = LogManager.getLogger(SystemdPlugin.class); private final boolean enabled; + private final Systemd systemd; final boolean isEnabled() { return enabled; @@ -44,18 +47,21 @@ public SystemdPlugin() { } if (isPackageDistribution == false) { logger.debug("disabling sd_notify as the build type [{}] is not a package distribution", buildType); - enabled = false; + this.enabled = false; + this.systemd = null; return; } logger.trace("ES_SD_NOTIFY is set to [{}]", esSDNotify); if (esSDNotify == null) { - enabled = false; + this.enabled = false; + this.systemd = null; return; } if (Boolean.TRUE.toString().equals(esSDNotify) == false && Boolean.FALSE.toString().equals(esSDNotify) == false) { throw new RuntimeException("ES_SD_NOTIFY set to unexpected value [" + esSDNotify + "]"); } - enabled = Boolean.TRUE.toString().equals(esSDNotify); + this.enabled = Boolean.TRUE.toString().equals(esSDNotify); + this.systemd = enabled ? NativeAccess.instance().systemd() : null; } private final SetOnce extender = new SetOnce<>(); @@ -77,19 +83,25 @@ public Collection createComponents(PluginServices services) { * Therefore, every fifteen seconds we send systemd a message via sd_notify to extend the timeout by thirty seconds. We will cancel * this scheduled task after we successfully notify systemd that we are ready. */ - extender.set(services.threadPool().scheduleWithFixedDelay(() -> { - final int rc = sd_notify(0, "EXTEND_TIMEOUT_USEC=30000000"); - if (rc < 0) { - logger.warn("extending startup timeout via sd_notify failed with [{}]", rc); - } - }, TimeValue.timeValueSeconds(15), EsExecutors.DIRECT_EXECUTOR_SERVICE)); + extender.set( + services.threadPool() + .scheduleWithFixedDelay( + () -> { systemd.notify_extend_timeout(30); }, + TimeValue.timeValueSeconds(15), + EsExecutors.DIRECT_EXECUTOR_SERVICE + ) + ); return List.of(); } - int sd_notify(@SuppressWarnings("SameParameterValue") final int unset_environment, final String state) { - final int rc = Libsystemd.sd_notify(unset_environment, state); - logger.trace("sd_notify({}, {}) returned [{}]", unset_environment, state, rc); - return rc; + void notifyReady() { + assert systemd != null; + systemd.notify_ready(); + } + + void notifyStopping() { + assert systemd != null; + systemd.notify_stopping(); } @Override @@ -98,11 +110,7 @@ public void onNodeStarted() { assert extender.get() == null; return; } - final int rc = sd_notify(0, "READY=1"); - if (rc < 0) { - // treat failure to notify systemd of readiness as a startup failure - throw new RuntimeException("sd_notify returned error [" + rc + "]"); - } + notifyReady(); assert extender.get() != null; final boolean cancelled = extender.get().cancel(); assert cancelled; @@ -113,11 +121,7 @@ public void close() { if (enabled == false) { return; } - final int rc = sd_notify(0, "STOPPING=1"); - if (rc < 0) { - // do not treat failure to notify systemd of stopping as a failure - logger.warn("sd_notify returned error [{}]", rc); - } + notifyStopping(); } } diff --git a/modules/systemd/src/test/java/org/elasticsearch/systemd/SystemdPluginTests.java b/modules/systemd/src/test/java/org/elasticsearch/systemd/SystemdPluginTests.java index c2d0983e4f825..712483e9c603c 100644 --- a/modules/systemd/src/test/java/org/elasticsearch/systemd/SystemdPluginTests.java +++ b/modules/systemd/src/test/java/org/elasticsearch/systemd/SystemdPluginTests.java @@ -21,16 +21,14 @@ import java.io.IOException; import java.util.Optional; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiConsumer; import static org.elasticsearch.test.hamcrest.OptionalMatchers.isPresentWith; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasToString; import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.ArgumentMatchers.same; @@ -104,83 +102,68 @@ public void testInvalid() { } public void testOnNodeStartedSuccess() { - runTestOnNodeStarted(Boolean.TRUE.toString(), randomIntBetween(0, Integer.MAX_VALUE), (maybe, plugin) -> { + runTestOnNodeStarted(Boolean.TRUE.toString(), false, (maybe, plugin) -> { assertThat(maybe, OptionalMatchers.isEmpty()); + assertThat(plugin.invokedReady.get(), is(true)); verify(plugin.extender()).cancel(); }); } public void testOnNodeStartedFailure() { - final int rc = randomIntBetween(Integer.MIN_VALUE, -1); - runTestOnNodeStarted( - Boolean.TRUE.toString(), - rc, - (maybe, plugin) -> assertThat( - maybe, - isPresentWith( - allOf(instanceOf(RuntimeException.class), hasToString(containsString("sd_notify returned error [" + rc + "]"))) - ) - ) - ); + runTestOnNodeStarted(Boolean.TRUE.toString(), true, (maybe, plugin) -> { + assertThat(maybe, isPresentWith(allOf(instanceOf(RuntimeException.class), hasToString(containsString("notify ready failed"))))); + assertThat(plugin.invokedReady.get(), is(true)); + }); } public void testOnNodeStartedNotEnabled() { - runTestOnNodeStarted(Boolean.FALSE.toString(), randomInt(), (maybe, plugin) -> assertThat(maybe, OptionalMatchers.isEmpty())); + runTestOnNodeStarted(Boolean.FALSE.toString(), randomBoolean(), (maybe, plugin) -> assertThat(maybe, OptionalMatchers.isEmpty())); } private void runTestOnNodeStarted( final String esSDNotify, - final int rc, - final BiConsumer, SystemdPlugin> assertions + final boolean invokeFailure, + final BiConsumer, TestSystemdPlugin> assertions ) { - runTest(esSDNotify, rc, assertions, SystemdPlugin::onNodeStarted, "READY=1"); + runTest(esSDNotify, invokeFailure, assertions, SystemdPlugin::onNodeStarted); } public void testCloseSuccess() { - runTestClose( - Boolean.TRUE.toString(), - randomIntBetween(1, Integer.MAX_VALUE), - (maybe, plugin) -> assertThat(maybe, OptionalMatchers.isEmpty()) - ); + runTestClose(Boolean.TRUE.toString(), false, (maybe, plugin) -> { + assertThat(maybe, OptionalMatchers.isEmpty()); + assertThat(plugin.invokedStopping.get(), is(true)); + }); } public void testCloseFailure() { - runTestClose( - Boolean.TRUE.toString(), - randomIntBetween(Integer.MIN_VALUE, -1), - (maybe, plugin) -> assertThat(maybe, OptionalMatchers.isEmpty()) - ); + runTestClose(Boolean.TRUE.toString(), true, (maybe, plugin) -> { + assertThat(maybe, OptionalMatchers.isEmpty()); + assertThat(plugin.invokedStopping.get(), is(true)); + }); } public void testCloseNotEnabled() { - runTestClose(Boolean.FALSE.toString(), randomInt(), (maybe, plugin) -> assertThat(maybe, OptionalMatchers.isEmpty())); + runTestClose(Boolean.FALSE.toString(), randomBoolean(), (maybe, plugin) -> { + assertThat(maybe, OptionalMatchers.isEmpty()); + assertThat(plugin.invokedStopping.get(), is(false)); + }); } - private void runTestClose(final String esSDNotify, final int rc, final BiConsumer, SystemdPlugin> assertions) { - runTest(esSDNotify, rc, assertions, SystemdPlugin::close, "STOPPING=1"); + private void runTestClose( + final String esSDNotify, + boolean invokeFailure, + final BiConsumer, TestSystemdPlugin> assertions + ) { + runTest(esSDNotify, invokeFailure, assertions, SystemdPlugin::close); } private void runTest( final String esSDNotify, - final int rc, - final BiConsumer, SystemdPlugin> assertions, - final CheckedConsumer invocation, - final String expectedState + final boolean invokeReadyFailure, + final BiConsumer, TestSystemdPlugin> assertions, + final CheckedConsumer invocation ) { - final AtomicBoolean invoked = new AtomicBoolean(); - final AtomicInteger invokedUnsetEnvironment = new AtomicInteger(); - final AtomicReference invokedState = new AtomicReference<>(); - final SystemdPlugin plugin = new SystemdPlugin(false, randomPackageBuildType, esSDNotify) { - - @Override - int sd_notify(final int unset_environment, final String state) { - invoked.set(true); - invokedUnsetEnvironment.set(unset_environment); - invokedState.set(state); - return rc; - } - - }; + final TestSystemdPlugin plugin = new TestSystemdPlugin(esSDNotify, invokeReadyFailure); startPlugin(plugin); if (Boolean.TRUE.toString().equals(esSDNotify)) { assertNotNull(plugin.extender()); @@ -198,13 +181,29 @@ int sd_notify(final int unset_environment, final String state) { if (success) { assertions.accept(Optional.empty(), plugin); } - if (Boolean.TRUE.toString().equals(esSDNotify)) { - assertTrue(invoked.get()); - assertThat(invokedUnsetEnvironment.get(), equalTo(0)); - assertThat(invokedState.get(), equalTo(expectedState)); - } else { - assertFalse(invoked.get()); - } } + class TestSystemdPlugin extends SystemdPlugin { + final AtomicBoolean invokedReady = new AtomicBoolean(); + final AtomicBoolean invokedStopping = new AtomicBoolean(); + final boolean invokeReadyFailure; + + TestSystemdPlugin(String esSDNotify, boolean invokeFailure) { + super(false, randomPackageBuildType, esSDNotify); + this.invokeReadyFailure = invokeFailure; + } + + @Override + void notifyReady() { + invokedReady.set(true); + if (invokeReadyFailure) { + throw new RuntimeException("notify ready failed"); + } + } + + @Override + void notifyStopping() { + invokedStopping.set(true); + } + } } From e42e47e803001bf6881d9586774698e456f5ac6f Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Tue, 12 Mar 2024 09:07:20 -0700 Subject: [PATCH 138/248] Rename LocalExchangeSink and LocalExchangeSource (#106245) These should not have the term Local. --- .../compute/operator/exchange/ExchangeSinkHandler.java | 6 +++--- .../compute/operator/exchange/ExchangeSourceHandler.java | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkHandler.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkHandler.java index d1a2b8710cd23..945fdff50d31c 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkHandler.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkHandler.java @@ -49,10 +49,10 @@ public ExchangeSinkHandler(BlockFactory blockFactory, int maxBufferSize, LongSup this.lastUpdatedInMillis = new AtomicLong(nowInMillis.getAsLong()); } - private class LocalExchangeSink implements ExchangeSink { + private class ExchangeSinkImpl implements ExchangeSink { boolean finished; - LocalExchangeSink() { + ExchangeSinkImpl() { onChanged(); outstandingSinks.incrementAndGet(); } @@ -155,7 +155,7 @@ private void notifyListeners() { * @see ExchangeSinkOperator */ public ExchangeSink createExchangeSink() { - return new LocalExchangeSink(); + return new ExchangeSinkImpl(); } /** diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceHandler.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceHandler.java index 859b1fc73c3e1..7492fa8c19385 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceHandler.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceHandler.java @@ -44,10 +44,10 @@ public ExchangeSourceHandler(int maxBufferSize, Executor fetchExecutor) { this.outstandingSources = new PendingInstances(() -> buffer.finish(true)); } - private class LocalExchangeSource implements ExchangeSource { + private class ExchangeSourceImpl implements ExchangeSource { private boolean finished; - LocalExchangeSource() { + ExchangeSourceImpl() { outstandingSources.trackNewInstance(); } @@ -95,7 +95,7 @@ public int bufferSize() { * @see ExchangeSinkOperator */ public ExchangeSource createExchangeSource() { - return new LocalExchangeSource(); + return new ExchangeSourceImpl(); } /** From 39779b73dd4fbd4383dd9c00c70f21298d8821ea Mon Sep 17 00:00:00 2001 From: Ievgen Degtiarenko Date: Tue, 12 Mar 2024 17:11:17 +0100 Subject: [PATCH 139/248] Fix testUpgradesLegitimateVersions (#106248) DefaultBuildVersion has an assertion to only accept versions that are >= 0. This change updates a test generator to create versions accordingly. Closes: #106213 --- .../src/test/java/org/elasticsearch/env/NodeMetadataTests.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/test/java/org/elasticsearch/env/NodeMetadataTests.java b/server/src/test/java/org/elasticsearch/env/NodeMetadataTests.java index 46d6beb56138b..f60812977d578 100644 --- a/server/src/test/java/org/elasticsearch/env/NodeMetadataTests.java +++ b/server/src/test/java/org/elasticsearch/env/NodeMetadataTests.java @@ -33,7 +33,7 @@ public class NodeMetadataTests extends ESTestCase { // (Index)VersionUtils.randomVersion() only returns known versions, which are necessarily no later than (Index)Version.CURRENT; // however we want to also consider our behaviour with all versions, so occasionally pick up a truly random version. private Version randomVersion() { - return rarely() ? Version.fromId(randomInt()) : VersionUtils.randomVersion(random()); + return rarely() ? Version.fromId(randomNonNegativeInt()) : VersionUtils.randomVersion(random()); } private BuildVersion randomBuildVersion() { From bfbb155985e6ccf50af87c70231460ae44361e08 Mon Sep 17 00:00:00 2001 From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com> Date: Tue, 12 Mar 2024 18:38:39 +0200 Subject: [PATCH 140/248] [TEST] Set logging level in SnapshotResiliencyTests (#106238) * Set logging level in SnapshotResiliencyTests Some tests check that the expected INFO message gets logged, so they require setting the level to INFO for SnapshotsService. * spotless fixes --- .../org/elasticsearch/snapshots/SnapshotResiliencyTests.java | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java index edde9f0164a6e..0a53db94b9aaf 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java @@ -188,6 +188,7 @@ import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.BytesRefRecycler; import org.elasticsearch.transport.DisruptableMockTransport; @@ -1390,6 +1391,7 @@ public TransportRequestHandler interceptHandler( safeAwait(testListener); // shouldn't throw } + @TestLogging(reason = "testing logging at INFO level", value = "org.elasticsearch.snapshots.SnapshotsService:INFO") public void testFullSnapshotUnassignedShards() { setupTestCluster(1, 0); // no data nodes, we want unassigned shards @@ -1469,6 +1471,7 @@ public void onFailure(Exception e) { ); } + @TestLogging(reason = "testing logging at INFO level", value = "org.elasticsearch.snapshots.SnapshotsService:INFO") public void testSnapshotNameAlreadyInUseExceptionLogging() { setupTestCluster(1, 1); @@ -1519,6 +1522,7 @@ public void onFailure(Exception e) { ); } + @TestLogging(reason = "testing logging at INFO level", value = "org.elasticsearch.snapshots.SnapshotsService:INFO") public void testIndexNotFoundExceptionLogging() { setupTestCluster(1, 0); // no need for data nodes here @@ -1571,6 +1575,7 @@ public void onFailure(Exception e) { ); } + @TestLogging(reason = "testing logging at INFO level", value = "org.elasticsearch.snapshots.SnapshotsService:INFO") public void testIllegalArgumentExceptionLogging() { setupTestCluster(1, 0); // no need for data nodes here From 68b0acac8f9520037db0150f4863ea91ab4c45ee Mon Sep 17 00:00:00 2001 From: Jack Conradson Date: Tue, 12 Mar 2024 10:11:55 -0700 Subject: [PATCH 141/248] Add retrievers using the parser-only approach (#105470) This enhancement adds a new abstraction to the _search API called "retriever." A retriever is something that returns top hits. This adds three initial retrievers called "standard", "knn", and "rrf". The retrievers use a parser-only approach where they are parsed and then translated into a SearchSourceBuilder to execute the actual search. --------- Co-authored-by: Mayya Sharipova --- docs/changelog/105470.yaml | 5 + .../query-dsl/text-expansion-query.asciidoc | 74 +-- docs/reference/rest-api/common-parms.asciidoc | 31 ++ docs/reference/search.asciidoc | 2 + docs/reference/search/retriever.asciidoc | 225 ++++++++ docs/reference/search/rrf.asciidoc | 228 ++++---- .../search-application-api.asciidoc | 50 +- docs/reference/search/search.asciidoc | 11 + .../semantic-search/hybrid-search.asciidoc | 103 ++-- .../10_standard_retriever.yml | 519 ++++++++++++++++++ .../search.retrievers/20_knn_retriever.yml | 73 +++ server/src/main/java/module-info.java | 4 +- .../elasticsearch/plugins/SearchPlugin.java | 50 ++ .../elasticsearch/search/SearchModule.java | 24 + .../search/builder/SearchSourceBuilder.java | 47 +- .../search/collapse/CollapseBuilder.java | 5 + .../search/retriever/KnnRetrieverBuilder.java | 171 ++++++ .../search/retriever/RetrieverBuilder.java | 234 ++++++++ .../search/retriever/RetrieverParser.java | 30 + .../retriever/RetrieverParserContext.java | 42 ++ .../search/retriever/RetrieversFeatures.java | 30 + .../retriever/StandardRetrieverBuilder.java | 229 ++++++++ .../searchafter/SearchAfterBuilder.java | 5 +- ...lasticsearch.features.FeatureSpecification | 1 + .../KnnRetrieverBuilderParsingTests.java | 82 +++ .../retriever/RetrieverBuilderErrorTests.java | 102 ++++ .../RetrieverBuilderVersionTests.java | 80 +++ .../StandardRetrieverBuilderParsingTests.java | 112 ++++ .../searchafter/SearchAfterBuilderTests.java | 31 +- .../search/vectors/KnnSearchBuilderTests.java | 2 +- .../retriever/TestRetrieverBuilder.java | 93 ++++ .../rank-rrf/src/main/java/module-info.java | 4 + .../xpack/rank/rrf/RRFFeatures.java | 24 + .../xpack/rank/rrf/RRFRankPlugin.java | 5 + .../xpack/rank/rrf/RRFRetrieverBuilder.java | 134 +++++ ...lasticsearch.features.FeatureSpecification | 8 + .../rrf/RRFRetrieverBuilderParsingTests.java | 88 +++ .../rank/rrf/RRFRetrieverBuilderTests.java | 151 +++++ .../test/license/100_license.yml | 38 +- .../test/rrf/300_rrf_retriever.yml | 331 +++++++++++ .../test/rrf/400_rrf_retriever_script.yml | 342 ++++++++++++ 41 files changed, 3609 insertions(+), 211 deletions(-) create mode 100644 docs/changelog/105470.yaml create mode 100644 docs/reference/search/retriever.asciidoc create mode 100644 rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.retrievers/10_standard_retriever.yml create mode 100644 rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.retrievers/20_knn_retriever.yml create mode 100644 server/src/main/java/org/elasticsearch/search/retriever/KnnRetrieverBuilder.java create mode 100644 server/src/main/java/org/elasticsearch/search/retriever/RetrieverBuilder.java create mode 100644 server/src/main/java/org/elasticsearch/search/retriever/RetrieverParser.java create mode 100644 server/src/main/java/org/elasticsearch/search/retriever/RetrieverParserContext.java create mode 100644 server/src/main/java/org/elasticsearch/search/retriever/RetrieversFeatures.java create mode 100644 server/src/main/java/org/elasticsearch/search/retriever/StandardRetrieverBuilder.java create mode 100644 server/src/test/java/org/elasticsearch/search/retriever/KnnRetrieverBuilderParsingTests.java create mode 100644 server/src/test/java/org/elasticsearch/search/retriever/RetrieverBuilderErrorTests.java create mode 100644 server/src/test/java/org/elasticsearch/search/retriever/RetrieverBuilderVersionTests.java create mode 100644 server/src/test/java/org/elasticsearch/search/retriever/StandardRetrieverBuilderParsingTests.java create mode 100644 test/framework/src/main/java/org/elasticsearch/search/retriever/TestRetrieverBuilder.java create mode 100644 x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFFeatures.java create mode 100644 x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilder.java create mode 100644 x-pack/plugin/rank-rrf/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification create mode 100644 x-pack/plugin/rank-rrf/src/test/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilderParsingTests.java create mode 100644 x-pack/plugin/rank-rrf/src/test/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilderTests.java create mode 100644 x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/300_rrf_retriever.yml create mode 100644 x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/400_rrf_retriever_script.yml diff --git a/docs/changelog/105470.yaml b/docs/changelog/105470.yaml new file mode 100644 index 0000000000000..56425de6c88e4 --- /dev/null +++ b/docs/changelog/105470.yaml @@ -0,0 +1,5 @@ +pr: 105470 +summary: Add retrievers using the parser-only approach +area: Ranking +type: enhancement +issues: [] diff --git a/docs/reference/query-dsl/text-expansion-query.asciidoc b/docs/reference/query-dsl/text-expansion-query.asciidoc index cb0a7c6ea9c01..927b5d0a85886 100644 --- a/docs/reference/query-dsl/text-expansion-query.asciidoc +++ b/docs/reference/query-dsl/text-expansion-query.asciidoc @@ -155,47 +155,55 @@ GET my-index/_search ---- // TEST[skip: TBD] -This can also be achieved by using sub searches combined with <>. +This can also be achieved using <>, +through an <> with multiple +<>. [source,console] ---- GET my-index/_search { - "sub_searches": [ - { - "query": { - "multi_match": { - "query": "How is the weather in Jamaica?", - "fields": [ - "title", - "description" - ] - } - } - }, - { - "query": { - "text_expansion": { - "ml.inference.title_expanded.predicted_value": { - "model_id": ".elser_model_2", - "model_text": "How is the weather in Jamaica?" + "retriever": { + "rrf": { + "retrievers": [ + { + "standard": { + "query": { + "multi_match": { + "query": "How is the weather in Jamaica?", + "fields": [ + "title", + "description" + ] + } + } } - } - } - }, - { - "query": { - "text_expansion": { - "ml.inference.description_expanded.predicted_value": { - "model_id": ".elser_model_2", - "model_text": "How is the weather in Jamaica?" + }, + { + "standard": { + "query": { + "text_expansion": { + "ml.inference.title_expanded.predicted_value": { + "model_id": ".elser_model_2", + "model_text": "How is the weather in Jamaica?" + } + } + } + } + }, + { + "standard": { + "query": { + "text_expansion": { + "ml.inference.description_expanded.predicted_value": { + "model_id": ".elser_model_2", + "model_text": "How is the weather in Jamaica?" + } + } + } } } - } - } - ], - "rank": { - "rrf": { + ], "window_size": 10, "rank_constant": 20 } diff --git a/docs/reference/rest-api/common-parms.asciidoc b/docs/reference/rest-api/common-parms.asciidoc index a5ab70ae85181..6757b6be24207 100644 --- a/docs/reference/rest-api/common-parms.asciidoc +++ b/docs/reference/rest-api/common-parms.asciidoc @@ -1281,3 +1281,34 @@ Default: 1, the primary shard. See <>. -- end::wait_for_active_shards[] + +tag::rrf-retrievers[] +`retrievers`:: +(Required, array of retriever objects) ++ +A list of child retrievers to specify which sets of returned top documents +will have the RRF formula applied to them. Each child retriever carries an +equal weight as part of the RRF formula. Two or more child retrievers are +required. +end::rrf-retrievers[] + +tag::rrf-rank-constant[] +`rank_constant`:: +(Optional, integer) ++ +This value determines how much influence documents in individual +result sets per query have over the final ranked result set. A higher value indicates +that lower ranked documents have more influence. This value must be greater than or +equal to `1`. Defaults to `60`. +end::rrf-rank-constant[] + +tag::rrf-window-size[] +`window_size`:: +(Optional, integer) ++ +This value determines the size of the individual result sets per +query. A higher value will improve result relevance at the cost of performance. The final +ranked result set is pruned down to the search request's <>. +`window_size` must be greater than or equal to `size` and greater than or equal to `1`. +Defaults to the `size` parameter. +end::rrf-window-size[] diff --git a/docs/reference/search.asciidoc b/docs/reference/search.asciidoc index aa5b4c9aac9b7..b39afff876eed 100644 --- a/docs/reference/search.asciidoc +++ b/docs/reference/search.asciidoc @@ -52,6 +52,8 @@ include::search/point-in-time-api.asciidoc[] include::search/knn-search.asciidoc[] +include::search/retriever.asciidoc[] + include::search/rrf.asciidoc[] include::search/scroll-api.asciidoc[] diff --git a/docs/reference/search/retriever.asciidoc b/docs/reference/search/retriever.asciidoc new file mode 100644 index 0000000000000..a872f1866f42c --- /dev/null +++ b/docs/reference/search/retriever.asciidoc @@ -0,0 +1,225 @@ +[[retriever]] +=== Retriever API + +preview::["This functionality is in technical preview and may be changed or removed in a future release. The syntax will likely change before GA. Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA of official GA features."] + +A retriever is a specification to describe top documents returned from a +search. A retriever replaces other elements of the <> +that also return top documents such as <> and +<>. A retriever may have child retrievers where a +retriever with two or more children is considered a compound retriever. This +allows for complex behavior to be depicted in a tree-like structure, called +the retriever tree, to better clarify the order of operations that occur +during a search. + +The following retrievers are available: + +`standard`:: +A <> that replaces the functionality of a traditional <>. + +`knn`:: +A <> that replaces the functionality of a <>. + +`rrf`:: +A <> that produces top documents from <>. + +[[standard-retriever]] +==== Standard Retriever + +A standard retriever returns top documents from a traditional <>. + +===== Parameters: + +`query`:: +(Optional, <>) ++ +Defines a query to retrieve a set of top documents. + +`filter`:: +(Optional, <>) ++ +Applies a <> to this retriever +where all documents must match this query but do not contribute to the score. + +`search_after`:: +(Optional, <>) ++ +Defines a search after object parameter used for pagination. + +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=terminate_after] + +`sort`:: ++ +(Optional, <>) +A sort object that that specifies the order of matching documents. + +`min_score`:: +(Optional, `float`) ++ +Minimum <> for matching documents. Documents with a +lower `_score` are not included in the top documents. + +`collapse`:: +(Optional, <>) ++ +Collapses the top documents by a specified key into a single top document per key. + +===== Restrictions + +When a retriever tree contains a compound retriever (a retriever with two or more child +retrievers) *only* the query element is allowed. + +===== Example + +[source,js] +---- +GET /index/_search +{ + "retriever": { + "standard": { + "query" { ... }, + "filter" { ... }, + "min_score": ... + } + }, + "size": ... +} +---- +// NOTCONSOLE + +[[knn-retriever]] +==== kNN Retriever + +A kNN retriever returns top documents from a <>. + +===== Parameters + +`field`:: +(Required, string) ++ +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=knn-field] + +`query_vector`:: +(Required if `query_vector_builder` is not defined, array of `float`) ++ +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=knn-query-vector] + +`query_vector_builder`:: +(Required if `query_vector` is not defined, query vector builder object) ++ +Defines a <> to build a query vector. + +`k`:: +(Required, integer) ++ +Number of nearest neighbors to return as top hits. This value must be fewer than +or equal to `num_candidates`. + +`num_candidates`:: +(Required, integer) ++ +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=knn-num-candidates] + +`filter`:: +(Optional, <>) ++ +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=knn-filter] + +`similarity`:: +(Optional, float) ++ +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=knn-similarity] + +===== Restrictions + +The parameters `query_vector` and `query_vector_builder` cannot be used together. + +===== Example: + +[source,js] +---- +GET /index/_search +{ + "retriever": { + "knn": { + "field": ..., + "query_vector": ..., + "k": ..., + "num_candidates": ... + } + } +} +---- +// NOTCONSOLE + +[[rrf-retriever]] +==== RRF Retriever + +An <> retriever returns top documents based on the RRF formula +equally weighting two or more child retrievers. + +===== Parameters + +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=rrf-retrievers] + +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=rrf-rank-constant] + +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=rrf-window-size] + +===== Restrictions + +An RRF retriever is a compound retriever. Child retrievers may not use +elements that are restricted by having a compound retriever as part of +the retriever tree. + +===== Example + +[source,js] +---- +GET /index/_search +{ + "retriever": { + "rrf": { + "retrievers": [ + { + "standard" { ... } + }, + { + "knn": { ... } + } + ], + "rank_constant": ... + "window_size": ... + } + } +} +---- +// NOTCONSOLE + +==== Using `from` and `size` with a retriever tree + +The <> and <> +parameters are provided globally as part of the general +<>. They are applied to all retrievers in a +retriever tree unless a specific retriever overrides the `size` parameter +using a different parameter such as `window_size`. Though, the final +search hits are always limited to `size`. + +==== Using aggregations with a retriever tree + +<> are globally specified as part of a search request. +The query used for an aggregation is the combination of all leaf retrievers as `should` +clauses in a <>. + +==== Restrictions on search parameters when specifying a retriever + +When a retriever is specified as part of a search the following elements are not allowed +at the top-level and instead are only allowed as elements of specific retrievers: + +* <> +* <> +* <> +* <> +* <> +* <> +* <> diff --git a/docs/reference/search/rrf.asciidoc b/docs/reference/search/rrf.asciidoc index b162083ebb926..813d1f6557bed 100644 --- a/docs/reference/search/rrf.asciidoc +++ b/docs/reference/search/rrf.asciidoc @@ -32,28 +32,20 @@ return score ==== Reciprocal rank fusion API You can use RRF as part of a <> to combine and rank -documents using result sets from a combination of -<>, -<>, and/or -<>. A minimum of 2 results sets -is required for ranking from the specified sources. - -The `rrf` parameter is an optional object defined as part of a search request's -<>. The `rrf` object contains the following -parameters: - -`rank_constant`:: -(Optional, integer) This value determines how much influence documents in individual -result sets per query have over the final ranked result set. A higher value indicates -that lower ranked documents have more influence. This value must be greater than or -equal to `1`. Defaults to `60`. - -`window_size`:: -(Optional, integer) This value determines the size of the individual result sets per -query. A higher value will improve result relevance at the cost of performance. The final -ranked result set is pruned down to the search request's <>. -`window_size` must be greater than or equal to `size` and greater than or equal to `1`. -Defaults to `100`. +documents using separate sets of top documents (result sets) from a +combination of <> using an +<>. A minimum of *two* child retrievers is +required for ranking. + +An RRF retriever is an optional object defined as part of a search request's +<>. The RRF retriever object contains +the following parameters: + +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=rrf-retrievers] + +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=rrf-rank-constant] + +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=rrf-window-size] An example request using RRF: @@ -61,19 +53,27 @@ An example request using RRF: ---- GET example-index/_search { - "query": { - "term": { - "text": "shoes" - } - }, - "knn": { - "field": "vector", - "query_vector": [1.25, 2, 3.5], - "k": 50, - "num_candidates": 100 - }, - "rank": { - "rrf": { + "retriever": { + "rrf": { <3> + "retrievers": [ + { + "standard": { <2> + "query": { + "term": { + "text": "shoes" + } + } + } + }, + { + "knn": { <1> + "field": "vector", + "query_vector": [1.25, 2, 3.5], + "k": 50, + "num_candidates": 100 + } + } + ], "window_size": 50, "rank_constant": 20 } @@ -82,10 +82,17 @@ GET example-index/_search ---- // TEST[skip:example fragment] -In the above example, we first execute the kNN search to get its global top 50 results. -Then we execute the query to get its global top 50 results. Afterwards, on a coordinating -node, we combine the knn search results with the query results and rank them based on the -RRF method to get the final top 10 results. +In the above example, we execute the `knn` and `standard` retrievers +independently of each other. Then we use the `rrf` retriever to combine +the results. + +<1> First, we execute the kNN search specified by the `knn` retriever to +get its global top 50 results. +<2> Second, we execute the query specified by the `standard` retriever to get +its global top 50 results. +<3> Then, on a coordinating node, we combine the kNN search top documents with +the query top documents and rank them based on the RRF formula using parameters from +the `rrf` retriever to get the combined top documents using the default `size` of `10`. Note that if `k` from a knn search is larger than `window_size`, the results are truncated to `window_size`. If `k` is smaller than `window_size`, the results are @@ -94,13 +101,12 @@ truncated to `window_size`. If `k` is smaller than `window_size`, the results ar [[rrf-supported-features]] ==== Reciprocal rank fusion supported features -RRF does support: +The `rrf` retriever supports: -* <> * <> * <> -RRF does not currently support: +The `rrf` retriever does not currently support: * <> * <> @@ -112,42 +118,48 @@ RRF does not currently support: * <> * <> -Using unsupported features as part of a search with RRF results +Using unsupported features as part of a search with an `rrf` retriever results in an exception. -[[rrf-using-sub-searches]] -==== Reciprocal rank fusion using sub searches +[[rrf-using-multiple-standard-retrievers]] +==== Reciprocal rank fusion using multiple standard retrievers -<> provides a way to -combine and rank multiple searches using RRF. +The `rrf` retriever provides a way to combine and rank multiple +`standard` retrievers. A primary use case is combining top documents +from a traditional BM25 query and an <> +query to achieve improved relevance. -An example request using RRF with sub searches: +An example request using RRF with multiple standard retrievers: [source,console] ---- GET example-index/_search { - "sub_searches": [ - { - "query": { - "term": { - "text": "blue shoes sale" - } - } - }, - { - "query": { - "text_expansion":{ - "ml.tokens":{ - "model_id":"my_elser_model", - "model_text":"What blue shoes are on sale?" - } + "retriever": { + "rrf": { <3> + "retrievers": [ + { + "standard": { <1> + "query": { + "term": { + "text": "blue shoes sale" + } + } + } + }, + { + "standard": { <2> + "query": { + "text_expansion":{ + "ml.tokens":{ + "model_id":"my_elser_model", + "model_text":"What blue shoes are on sale?" + } + } + } + } } - } - } - ], - "rank": { - "rrf": { + ], "window_size": 50, "rank_constant": 20 } @@ -156,17 +168,31 @@ GET example-index/_search ---- // TEST[skip:example fragment] -In the above example, we execute each of the two sub searches -independently of each other. First we run the term query for -`blue shoes sales` using the standard BM25 scoring algorithm. Then -we run the text expansion query for `What blue shoes are on sale?` +In the above example, we execute each of the two `standard` retrievers +independently of each other. Then we use the `rrf` retriever to combine +the results. + +<1> First we run the `standard` retriever +specifying a term query for `blue shoes sales` using the standard BM25 +scoring algorithm. +<2> Next we run the `standard` retriever specifying a +text expansion query for `What blue shoes are on sale?` using our <> scoring algorithm. -RRF allows us to combine the two results sets generated by completely -independent scoring algorithms with equal weighting. Not only does this -remove the need to figure out what the appropriate weighting would be -using linear combination, but RRF is also shown to give improved +<3> The `rrf` retriever allows us to combine the two top documents sets +generated by completely independent scoring algorithms with equal weighting. + +Not only does this remove the need to figure out what the appropriate +weighting is using linear combination, but RRF is also shown to give improved relevance over either query individually. +[[rrf-using-sub-searches]] +==== Reciprocal rank fusion using sub searches + +RRF using sub searches is no longer supported. Use the +<> instead. See +<> +for an example. + [[rrf-full-example]] ==== Reciprocal rank fusion full example @@ -179,7 +205,7 @@ to explain. ---- PUT example-index { - "mappings": { + "mappings": { "properties": { "text" : { "type" : "text" @@ -234,26 +260,35 @@ POST example-index/_refresh ---- // TEST -We now execute a search using RRF with a query, a kNN search, and +We now execute a search using an `rrf` retriever with a `standard` retriever +specifying a BM25 query, a `knn` retriever specifying a kNN search, and a terms aggregation. [source,console] ---- GET example-index/_search { - "query": { - "term": { - "text": "rrf" - } - }, - "knn": { - "field": "vector", - "query_vector": [3], - "k": 5, - "num_candidates": 5 - }, - "rank": { + "retriever": { "rrf": { + "retrievers": [ + { + "standard": { + "query": { + "term": { + "text": "rrf" + } + } + } + }, + { + "knn": { + "field": "vector", + "query_vector": [3], + "k": 5, + "num_candidates": 5 + } + } + ], "window_size": 5, "rank_constant": 1 } @@ -351,10 +386,11 @@ use `_rank` to show our top-ranked documents. // TESTRESPONSE[s/: \.\.\./: $body.$_path/] Let's break down how these hits were ranked. We -start by running the query and the kNN search -separately to collect what their individual hits are. +start by running the `standard` retriever specifying a query +and the `knn` retriever specifying a kNN search separately to +collect what their individual hits are. -First, we look at the hits for the query. +First, we look at the hits for the query from the `standard` retriever. [source,console-result] ---- @@ -407,7 +443,7 @@ First, we look at the hits for the query. <4> rank 4, `_id` 1 Note that our first hit doesn't have a value for the `vector` field. Now, -we look at the results for the kNN search. +we look at the results for the kNN search from the `knn` retriever. [source,console-result] ---- @@ -460,7 +496,8 @@ we look at the results for the kNN search. <4> rank 4, `_id` 5 We can now take the two individually ranked result sets and apply the -RRF formula to them to get our final ranking. +RRF formula to them using parameters from the `rrf` retriever to get +our final ranking. [source,python] ---- @@ -478,4 +515,3 @@ truncating the bottom `2` docs in our RRF result set with a `size` of `3`. We end with `_id: 3` as `_rank: 1`, `_id: 2` as `_rank: 2`, and `_id: 4` as `_rank: 3`. This ranking matches the result set from the original RRF search as expected. - diff --git a/docs/reference/search/search-your-data/search-application-api.asciidoc b/docs/reference/search/search-your-data/search-application-api.asciidoc index 29624a5bcd83a..0b7510d20658d 100644 --- a/docs/reference/search/search-your-data/search-application-api.asciidoc +++ b/docs/reference/search/search-your-data/search-application-api.asciidoc @@ -215,31 +215,35 @@ PUT _application/search_application/my-search-app "lang": "mustache", "source": """ { - "sub_searches": [ - {{#text_fields}} - { - "query": { - "match": { - "{{.}}": "{{query_string}}" - } - } - }, - {{/text_fields}} - {{#elser_fields}} - { - "query": { - "text_expansion": { - "ml.inference.{{.}}_expanded.predicted_value": { - "model_text": "{{query_string}}", - "model_id": "" + "retriever": { + "rrf": { + "retrievers": [ + {{#text_fields}} + { + "standard": { + "query": { + "match": { + "{{.}}": "{{query_string}}" + } + } + } + }, + {{/text_fields}} + {{#elser_fields}} + { + "standard": { + "query": { + "text_expansion": { + "ml.inference.{{.}}_expanded.predicted_value": { + "model_text": "{{query_string}}", + "model_id": "" + } + } + } } } - } - }, - {{/elser_fields}} - ], - "rank": { - "rrf": { + {{/elser_fields}} + ], "window_size": {{rrf.window_size}}, "rank_constant": {{rrf.rank_constant}} } diff --git a/docs/reference/search/search.asciidoc b/docs/reference/search/search.asciidoc index cd7d496d89d7b..5dae64916dd6b 100644 --- a/docs/reference/search/search.asciidoc +++ b/docs/reference/search/search.asciidoc @@ -558,12 +558,20 @@ Period of time used to extend the life of the PIT. (Optional, <>) Defines the search definition using the <>. +[[request-body-retriever]] +`retriever`:: +preview:[] +(Optional, <>) Defines a top-level retriever to specify +a desired set of top documents instead of a standard query or knn search. + [[request-body-rank]] `rank`:: preview:[] This param is in technical preview and may change in the future. The syntax will likely change before GA. + +This parameter is deprecated and will be removed. Use <> instead. ++ (Optional, object) Defines a method for combining and ranking result sets from a combination of <>, @@ -731,6 +739,8 @@ preview:[] This param is in technical preview and may change in the future. The syntax will likely change before GA. + +This parameter is deprecated and will be removed. Use <> instead. ++ (Optional, array of objects) An array of `sub_search` objects where each `sub_search` is evaluated independently, and their result sets are later combined as part of @@ -752,6 +762,7 @@ with a top-level <> element. ---- // NOTCONSOLE +[[request-body-search-terminate-after]] include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=terminate_after] + Defaults to `0`, which does not terminate query execution early. diff --git a/docs/reference/tab-widgets/semantic-search/hybrid-search.asciidoc b/docs/reference/tab-widgets/semantic-search/hybrid-search.asciidoc index f7d9ee1ad6443..47403df450bd2 100644 --- a/docs/reference/tab-widgets/semantic-search/hybrid-search.asciidoc +++ b/docs/reference/tab-widgets/semantic-search/hybrid-search.asciidoc @@ -1,36 +1,41 @@ // tag::elser[] -Hybrid search between a semantic and lexical query can be achieved by using a -`sub_searches` clause in your search request. In the `sub_searches` clause, -provide a `text_expansion` query and a full-text query. Next to the -`sub_searches` clause, also provide a <> clause with -the `rrf` parameter to rank documents using reciprocal rank fusion. +Hybrid search between a semantic and lexical query can be achieved by using an +< as part of your search request. Provide a +`text_expansion` query and a full-text query as +<> for the `rrf` retriever. The `rrf` +retriever uses <> to rank the top documents. [source,console] ---- GET my-index/_search { - "sub_searches": [ - { - "query": { - "match": { - "my_text_field": "the query string" - } - } - }, - { - "query": { - "text_expansion": { - "my_tokens": { - "model_id": ".elser_model_2", - "model_text": "the query string" + "retriever": { + "rrf": { + "retrievers": [ + { + "standard": { + "query": { + "match": { + "my_text_field": "the query string" + } + } + } + }, + { + "standard": { + "query": { + "text_expansion": { + "my_tokens": { + "model_id": ".elser_model_2", + "model_text": "the query string" + } + } + } } } - } + ] } - ], - "rank": { - "rrf": {} } } ---- @@ -43,36 +48,44 @@ GET my-index/_search Hybrid search between a semantic and lexical query can be achieved by providing: -* a `query` clause for the full-text query; -* a `knn` clause with the kNN search that queries the dense vector field; -* and a `rank` clause with the `rrf` parameter to rank documents using -reciprocal rank fusion. +* an `rrf` retriever to rank top documents using <> +* a `standard` retriever as a child retriever with `query` clause for the full-text query +* a `knn` retriever as a child retriever with the kNN search that queries the dense vector field [source,console] ---- GET my-index/_search { - "query": { - "match": { - "my_text_field": "the query string" - } - }, - "knn": { - "field": "text_embedding.predicted_value", - "k": 10, - "num_candidates": 100, - "query_vector_builder": { - "text_embedding": { - "model_id": "sentence-transformers__msmarco-minilm-l-12-v3", - "model_text": "the query string" - } + "retriever": { + "rrf": { + "retrievers": [ + { + "standard": { + "query": { + "match": { + "my_text_field": "the query string" + } + } + } + }, + { + "knn": { + "field": "text_embedding.predicted_value", + "k": 10, + "num_candidates": 100, + "query_vector_builder": { + "text_embedding": { + "model_id": "sentence-transformers__msmarco-minilm-l-12-v3", + "model_text": "the query string" + } + } + } + } + ] } - }, - "rank": { - "rrf": {} } } ---- // TEST[skip:TBD] -// end::dense-vector[] \ No newline at end of file +// end::dense-vector[] diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.retrievers/10_standard_retriever.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.retrievers/10_standard_retriever.yml new file mode 100644 index 0000000000000..23682a19ea6f7 --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.retrievers/10_standard_retriever.yml @@ -0,0 +1,519 @@ +setup: + - skip: + version: ' - 8.13.99' + reason: 'standard retriever added in 8.14' + - do: + indices.create: + index: animals + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + properties: + type: + type: keyword + name: + type: text + fields: + raw: + type: keyword + color: + type: keyword + count: + type: integer + + - do: + bulk: + refresh: true + index: animals + body: + - '{"index": {"_id": 1 }}' + - '{"type": "domestic", "name": "cow", "color": "brown", "count": 1}' + - '{"index": {"_id": 2 }}' + - '{"type": "domestic", "name": "cow cow", "color": "spotted", "count": 2}' + - '{"index": {"_id": 3 }}' + - '{"type": "domestic", "name": "cow cow cow", "color": "spotted", "count": 3}' + - '{"index": {"_id": 4 }}' + - '{"type": "domestic", "name": "pig", "color": "pink", "count": 4}' + - '{"index": {"_id": 5 }}' + - '{"type": "domestic", "name": "pig pig", "color": "pink", "count": 5}' + - '{"index": {"_id": 6 }}' + - '{"type": "domestic", "name": "pig pig pig", "color": "spotted", "count": 6}' + - '{"index": {"_id": 7 }}' + - '{"type": "domestic", "name": "chicken", "color": "white", "count": 7}' + - '{"index": {"_id": 8 }}' + - '{"type": "domestic", "name": "chicken chicken", "color": "brown", "count": 8}' + - '{"index": {"_id": 9 }}' + - '{"type": "domestic", "name": "chicken chicken chicken", "color": "spotted", "count": 9}' + - '{"index": {"_id": 10 }}' + - '{"type": "wild", "name": "coyote", "color": "gray", "count": 10}' + - '{"index": {"_id": 11 }}' + - '{"type": "wild", "name": "coyote coyote", "color": "gray", "count": 11}' + - '{"index": {"_id": 12 }}' + - '{"type": "wild", "name": "coyote coyote coyote", "color": "white", "count": 12}' + - '{"index": {"_id": 13 }}' + - '{"type": "wild", "name": "rabbit", "color": "brown", "count": 13}' + - '{"index": {"_id": 14 }}' + - '{"type": "wild", "name": "rabbit rabbit", "color": "spotted", "count": 14}' + - '{"index": {"_id": 15 }}' + - '{"type": "wild", "name": "rabbit rabbit rabbit", "color": "white", "count": 15}' + +--- +"standard retriever basic": + - do: + search: + index: animals + body: + fields: [ "name", "count" ] + retriever: + standard: + query: + match: + name: "cow" + + - match: {hits.total.value: 3} + + - match: {hits.hits.0._id: "3"} + - match: {hits.hits.0.fields.name.0: "cow cow cow"} + - match: {hits.hits.0.fields.count.0: 3} + + - match: {hits.hits.1._id: "2"} + - match: {hits.hits.1.fields.name.0: "cow cow"} + - match: {hits.hits.1.fields.count.0: 2} + + - match: {hits.hits.2._id: "1"} + - match: {hits.hits.2.fields.name.0: "cow"} + - match: {hits.hits.2.fields.count.0: 1} + +--- +"standard retriever single sort": + - do: + search: + index: animals + body: + fields: [ "name", "count" ] + retriever: + standard: + query: + term: + color: "spotted" + sort: [ + { + name.raw: "asc" + } + ] + + - match: {hits.total.value: 5} + + - match: {hits.hits.0._id: "9"} + - match: {hits.hits.0.fields.name.0: "chicken chicken chicken"} + - match: {hits.hits.0.fields.count.0: 9} + + - match: {hits.hits.1._id: "2"} + - match: {hits.hits.1.fields.name.0: "cow cow"} + - match: {hits.hits.1.fields.count.0: 2} + + - match: {hits.hits.2._id: "3"} + - match: {hits.hits.2.fields.name.0: "cow cow cow"} + - match: {hits.hits.2.fields.count.0: 3} + + - match: {hits.hits.3._id: "6"} + - match: {hits.hits.3.fields.name.0: "pig pig pig"} + - match: {hits.hits.3.fields.count.0: 6} + + - match: {hits.hits.4._id: "14"} + - match: {hits.hits.4.fields.name.0: "rabbit rabbit"} + - match: {hits.hits.4.fields.count.0: 14} + +--- +"standard retriever multi sort": + - do: + search: + index: animals + body: + fields: [ "name", "count" ] + retriever: + standard: + query: + bool: + should: [ + { + term: { + color: "spotted" + } + }, + { + term: { + color: "pink" + } + } + ] + sort: [ + { + color: "asc" + }, + { + count: "desc" + } + ] + + - match: {hits.total.value: 7} + + - match: {hits.hits.0._id: "5"} + - match: {hits.hits.0.fields.name.0: "pig pig"} + - match: {hits.hits.0.fields.count.0: 5} + + - match: {hits.hits.1._id: "4"} + - match: {hits.hits.1.fields.name.0: "pig"} + - match: {hits.hits.1.fields.count.0: 4} + + - match: {hits.hits.2._id: "14"} + - match: {hits.hits.2.fields.name.0: "rabbit rabbit"} + - match: {hits.hits.2.fields.count.0: 14} + + - match: {hits.hits.3._id: "9"} + - match: {hits.hits.3.fields.name.0: "chicken chicken chicken"} + - match: {hits.hits.3.fields.count.0: 9} + + - match: {hits.hits.4._id: "6"} + - match: {hits.hits.4.fields.name.0: "pig pig pig"} + - match: {hits.hits.4.fields.count.0: 6} + + - match: {hits.hits.5._id: "3"} + - match: {hits.hits.5.fields.name.0: "cow cow cow"} + - match: {hits.hits.5.fields.count.0: 3} + + - match: {hits.hits.6._id: "2"} + - match: {hits.hits.6.fields.name.0: "cow cow"} + - match: {hits.hits.6.fields.count.0: 2} + +--- +"standard retriever filter": + - do: + search: + index: animals + body: + fields: [ "name", "count" ] + retriever: + standard: + filter: + bool: + must_not: + term: + color: "spotted" + query: + match: + name: "cow" + + - match: {hits.total.value: 1} + + - match: {hits.hits.0._id: "1"} + - match: {hits.hits.0.fields.name.0: "cow"} + - match: {hits.hits.0.fields.count.0: 1} + +--- +"standard retriever multi filter": + - do: + search: + index: animals + body: + fields: [ "name", "count" ] + retriever: + standard: + filter: [ + { + match: { + name: "cow" + } + }, + { + range: { + count: { + gt: 1, + lt: 3 + } + } + } + ] + query: + term: + color: "spotted" + + - match: {hits.total.value: 1} + + - match: {hits.hits.0._id: "2"} + - match: {hits.hits.0.fields.name.0: "cow cow"} + - match: {hits.hits.0.fields.count.0: 2} + +--- +"standard retriever filter no query": + - do: + search: + index: animals + body: + fields: [ "name", "count" ] + retriever: + standard: + filter: [ + { + match: { + name: "cow" + } + }, + { + range: { + count: { + gt: 1, + lt: 4 + } + } + } + ] + sort: [ + { + count: "desc" + } + ] + + - match: {hits.total.value: 2} + + - match: { hits.hits.0._id: "3" } + - match: { hits.hits.0.fields.name.0: "cow cow cow" } + - match: { hits.hits.0.fields.count.0: 3 } + + - match: {hits.hits.1._id: "2"} + - match: {hits.hits.1.fields.name.0: "cow cow"} + - match: {hits.hits.1.fields.count.0: 2} + +--- +"standard retriever search after": + - do: + search: + index: animals + body: + size: 3 + fields: [ "name", "count" ] + retriever: + standard: + query: + bool: + should: [ + { + term: { + color: "spotted" + } + }, + { + term: { + color: "pink" + } + } + ] + sort: [ + { + count: "desc" + } + ] + + - match: {hits.total.value: 7} + + - match: {hits.hits.0._id: "14"} + - match: {hits.hits.0.fields.name.0: "rabbit rabbit"} + - match: {hits.hits.0.fields.count.0: 14} + + - match: {hits.hits.1._id: "9"} + - match: {hits.hits.1.fields.name.0: "chicken chicken chicken"} + - match: {hits.hits.1.fields.count.0: 9} + + - match: {hits.hits.2._id: "6"} + - match: {hits.hits.2.fields.name.0: "pig pig pig"} + - match: {hits.hits.2.fields.count.0: 6} + + - do: + search: + index: animals + body: + size: 3 + fields: [ "name", "count" ] + retriever: + standard: + search_after: [ 6 ] + query: + bool: + should: [ + { + term: { + color: "spotted" + } + }, + { + term: { + color: "pink" + } + } + ] + sort: [ + { + count: "desc" + } + ] + + - match: {hits.total.value: 7} + + - match: {hits.hits.0._id: "5"} + - match: {hits.hits.0.fields.name.0: "pig pig"} + - match: {hits.hits.0.fields.count.0: 5} + + - match: {hits.hits.1._id: "4"} + - match: {hits.hits.1.fields.name.0: "pig"} + - match: {hits.hits.1.fields.count.0: 4} + + - match: {hits.hits.2._id: "3"} + - match: {hits.hits.2.fields.name.0: "cow cow cow"} + - match: {hits.hits.2.fields.count.0: 3} + + - do: + search: + index: animals + body: + size: 3 + fields: [ "name", "count" ] + retriever: + standard: + search_after: [ 3 ] + query: + bool: + should: [ + { + term: { + color: "spotted" + } + }, + { + term: { + color: "pink" + } + } + ] + sort: [ + { + count: "desc" + } + ] + + - match: {hits.total.value: 7} + + - match: {hits.hits.0._id: "2"} + - match: {hits.hits.0.fields.name.0: "cow cow"} + - match: {hits.hits.0.fields.count.0: 2} + +--- +"standard retriever terminate after": + - do: + search: + index: animals + body: + fields: [ "name", "count" ] + retriever: + standard: + filter: + bool: + must_not: + match: + name: "cow" + sort: [ + { + count: "asc" + } + ] + terminate_after: 3 + + - match: {hits.total.value: 3} + + - match: {hits.hits.0._id: "4"} + - match: {hits.hits.0.fields.name.0: "pig"} + - match: {hits.hits.0.fields.count.0: 4} + + - match: {hits.hits.1._id: "5"} + - match: {hits.hits.1.fields.name.0: "pig pig"} + - match: {hits.hits.1.fields.count.0: 5} + + - match: {hits.hits.2._id: "6"} + - match: {hits.hits.2.fields.name.0: "pig pig pig"} + - match: {hits.hits.2.fields.count.0: 6} + +--- +"standard retriever min score": + - do: + search: + index: animals + body: + fields: [ "name", "count" ] + retriever: + standard: + query: + script_score: + query: + match: + name: "cow" + script: + source: " $('count', -1)" + min_score: 1.5 + + - match: {hits.total.value: 2} + + - match: {hits.hits.0._id: "3"} + - match: {hits.hits.0.fields.name.0: "cow cow cow"} + - match: {hits.hits.0.fields.count.0: 3} + + - match: {hits.hits.1._id: "2"} + - match: {hits.hits.1.fields.name.0: "cow cow"} + - match: {hits.hits.1.fields.count.0: 2} + +--- +"standard retriever collapse": + - do: + search: + index: animals + body: + size: 15 + fields: [ "name", "count", "color" ] + retriever: + standard: + query: + match_all: {} + collapse: + field: "color" + sort: [ + { + count: "asc" + } + ] + + - match: {hits.total.value: 15} + + - match: {hits.hits.0._id: "1"} + - match: {hits.hits.0.fields.name.0: "cow"} + - match: {hits.hits.0.fields.count.0: 1} + - match: {hits.hits.0.fields.color.0: "brown"} + + - match: {hits.hits.1._id: "2"} + - match: {hits.hits.1.fields.name.0: "cow cow"} + - match: {hits.hits.1.fields.count.0: 2} + - match: {hits.hits.1.fields.color.0: "spotted"} + + - match: {hits.hits.2._id: "4"} + - match: {hits.hits.2.fields.name.0: "pig"} + - match: {hits.hits.2.fields.count.0: 4} + - match: {hits.hits.2.fields.color.0: "pink"} + + - match: {hits.hits.3._id: "7" } + - match: {hits.hits.3.fields.name.0: "chicken" } + - match: {hits.hits.3.fields.count.0: 7 } + - match: {hits.hits.3.fields.color.0: "white"} + + - match: {hits.hits.4._id: "10"} + - match: {hits.hits.4.fields.name.0: "coyote"} + - match: {hits.hits.4.fields.count.0: 10} + - match: {hits.hits.4.fields.color.0: "gray"} diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.retrievers/20_knn_retriever.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.retrievers/20_knn_retriever.yml new file mode 100644 index 0000000000000..66f88315032c3 --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.retrievers/20_knn_retriever.yml @@ -0,0 +1,73 @@ +setup: + - skip: + version: ' - 8.13.99' + reason: 'kNN retriever added in 8.14' + - do: + indices.create: + index: index1 + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + properties: + name: + type: keyword + vector: + type: dense_vector + dims: 5 + index: true + similarity: l2_norm + + - do: + bulk: + refresh: true + index: index1 + body: + - '{"index": {"_id": 1 }}' + - '{"name": "cow.jpg", "vector": [1, 1, 1, 1, 1]}' + - '{"index": {"_id": 2}}' + - '{"name": "moose.jpg", "vector": [2, 2, 2, 2, 2]}' + - '{"index": {"_id": 3 }}' + - '{"name": "rabbit.jpg", "vector": [3, 3, 3, 3, 3]}' + +--- +"kNN retriever": + - do: + search: + index: index1 + body: + fields: [ "name" ] + retriever: + knn: + field: vector + query_vector: [2, 2, 2, 2, 3] + k: 2 + num_candidates: 3 + + - match: {hits.hits.0._id: "2"} + - match: {hits.hits.0.fields.name.0: "moose.jpg"} + + - match: {hits.hits.1._id: "3"} + - match: {hits.hits.1.fields.name.0: "rabbit.jpg"} + +--- +"kNN retriever with filter": + - do: + search: + index: index1 + body: + fields: [ "name" ] + retriever: + knn: + field: vector + query_vector: [2, 2, 2, 2, 3] + k: 2 + num_candidates: 3 + filter: + term: + name: "rabbit.jpg" + + - match: {hits.total.value: 1} + - match: {hits.hits.0._id: "3"} + - match: {hits.hits.0.fields.name.0: "rabbit.jpg"} diff --git a/server/src/main/java/module-info.java b/server/src/main/java/module-info.java index 9c142d18034c0..83b8606da2997 100644 --- a/server/src/main/java/module-info.java +++ b/server/src/main/java/module-info.java @@ -358,6 +358,7 @@ exports org.elasticsearch.search.query; exports org.elasticsearch.search.rank; exports org.elasticsearch.search.rescore; + exports org.elasticsearch.search.retriever; exports org.elasticsearch.search.runtime; exports org.elasticsearch.search.searchafter; exports org.elasticsearch.search.slice; @@ -415,7 +416,8 @@ org.elasticsearch.cluster.service.TransportFeatures, org.elasticsearch.cluster.metadata.MetadataFeatures, org.elasticsearch.rest.RestFeatures, - org.elasticsearch.indices.IndicesFeatures; + org.elasticsearch.indices.IndicesFeatures, + org.elasticsearch.search.retriever.RetrieversFeatures; uses org.elasticsearch.plugins.internal.SettingsExtension; uses RestExtension; diff --git a/server/src/main/java/org/elasticsearch/plugins/SearchPlugin.java b/server/src/main/java/org/elasticsearch/plugins/SearchPlugin.java index e985e279770e9..73927037b1f81 100644 --- a/server/src/main/java/org/elasticsearch/plugins/SearchPlugin.java +++ b/server/src/main/java/org/elasticsearch/plugins/SearchPlugin.java @@ -37,6 +37,8 @@ import org.elasticsearch.search.internal.ShardSearchRequest; import org.elasticsearch.search.rescore.Rescorer; import org.elasticsearch.search.rescore.RescorerBuilder; +import org.elasticsearch.search.retriever.RetrieverBuilder; +import org.elasticsearch.search.retriever.RetrieverParser; import org.elasticsearch.search.suggest.Suggest; import org.elasticsearch.search.suggest.Suggester; import org.elasticsearch.search.suggest.SuggestionBuilder; @@ -111,6 +113,13 @@ default List> getSuggesters() { return emptyList(); } + /** + * The new {@link RetrieverBuilder}s defined by this plugin. + */ + default List> getRetrievers() { + return emptyList(); + } + /** * The new {@link Query}s defined by this plugin. */ @@ -256,6 +265,47 @@ public Writeable.Reader getSuggestionReader() { } } + /** + * Specification of custom {@link RetrieverBuilder}. + */ + class RetrieverSpec { + + private final ParseField name; + private final RetrieverParser parser; + + /** + * Specification of custom {@link RetrieverBuilder}. + * + * @param name holds the names by which this retriever might be parsed. The {@link ParseField#getPreferredName()} is special as it + * is the name by under which the reader is registered. So it is the name that the retriever should use as its + * {@link NamedWriteable#getWriteableName()} too. + * @param parser the parser the reads the retriever builder from xcontent + */ + public RetrieverSpec(ParseField name, RetrieverParser parser) { + this.name = name; + this.parser = parser; + } + + /** + * Specification of custom {@link RetrieverBuilder}. + * + * @param name the name by which this retriever might be parsed or deserialized. Make sure that the retriever builder returns + * this name for {@link NamedWriteable#getWriteableName()}. + * @param parser the parser the reads the retriever builder from xcontent + */ + public RetrieverSpec(String name, RetrieverParser parser) { + this(new ParseField(name), parser); + } + + public ParseField getName() { + return name; + } + + public RetrieverParser getParser() { + return parser; + } + } + /** * Specification of custom {@link Query}. */ diff --git a/server/src/main/java/org/elasticsearch/search/SearchModule.java b/server/src/main/java/org/elasticsearch/search/SearchModule.java index 5b17203ded132..97b747c650c1b 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchModule.java +++ b/server/src/main/java/org/elasticsearch/search/SearchModule.java @@ -86,6 +86,7 @@ import org.elasticsearch.plugins.SearchPlugin.QuerySpec; import org.elasticsearch.plugins.SearchPlugin.QueryVectorBuilderSpec; import org.elasticsearch.plugins.SearchPlugin.RescorerSpec; +import org.elasticsearch.plugins.SearchPlugin.RetrieverSpec; import org.elasticsearch.plugins.SearchPlugin.ScoreFunctionSpec; import org.elasticsearch.plugins.SearchPlugin.SearchExtSpec; import org.elasticsearch.plugins.SearchPlugin.SignificanceHeuristicSpec; @@ -227,6 +228,10 @@ import org.elasticsearch.search.internal.ShardSearchRequest; import org.elasticsearch.search.rescore.QueryRescorerBuilder; import org.elasticsearch.search.rescore.RescorerBuilder; +import org.elasticsearch.search.retriever.KnnRetrieverBuilder; +import org.elasticsearch.search.retriever.RetrieverBuilder; +import org.elasticsearch.search.retriever.RetrieverParserContext; +import org.elasticsearch.search.retriever.StandardRetrieverBuilder; import org.elasticsearch.search.sort.FieldSortBuilder; import org.elasticsearch.search.sort.GeoDistanceSortBuilder; import org.elasticsearch.search.sort.ScoreSortBuilder; @@ -305,6 +310,7 @@ public SearchModule(Settings settings, List plugins) { registerSuggesters(plugins); highlighters = setupHighlighters(settings, plugins); registerScoreFunctions(plugins); + registerRetrieverParsers(plugins); registerQueryParsers(plugins); registerRescorers(plugins); registerSorts(); @@ -1039,6 +1045,13 @@ private void registerFetchSubPhase(FetchSubPhase subPhase) { fetchSubPhases.add(requireNonNull(subPhase, "FetchSubPhase must not be null")); } + private void registerRetrieverParsers(List plugins) { + registerRetriever(new RetrieverSpec<>(StandardRetrieverBuilder.NAME, StandardRetrieverBuilder::fromXContent)); + registerRetriever(new RetrieverSpec<>(KnnRetrieverBuilder.NAME, KnnRetrieverBuilder::fromXContent)); + + registerFromPlugin(plugins, SearchPlugin::getRetrievers, this::registerRetriever); + } + private void registerQueryParsers(List plugins) { registerQuery(new QuerySpec<>(MatchQueryBuilder.NAME, MatchQueryBuilder::new, MatchQueryBuilder::fromXContent)); registerQuery(new QuerySpec<>(MatchPhraseQueryBuilder.NAME, MatchPhraseQueryBuilder::new, MatchPhraseQueryBuilder::fromXContent)); @@ -1198,6 +1211,17 @@ public static List getIntervalsSourceProviderNamed ); } + private void registerRetriever(RetrieverSpec spec) { + namedXContents.add( + new NamedXContentRegistry.Entry( + RetrieverBuilder.class, + spec.getName(), + (p, c) -> spec.getParser().fromXContent(p, (RetrieverParserContext) c), + spec.getName().getForRestApiVersion() + ) + ); + } + private void registerQuery(QuerySpec spec) { namedWriteables.add(new NamedWriteableRegistry.Entry(QueryBuilder.class, spec.getName().getPreferredName(), spec.getReader())); namedXContents.add( diff --git a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index 72fd84cda760b..741a0e680b522 100644 --- a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -39,6 +39,8 @@ import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.rank.RankBuilder; import org.elasticsearch.search.rescore.RescorerBuilder; +import org.elasticsearch.search.retriever.RetrieverBuilder; +import org.elasticsearch.search.retriever.RetrieverParserContext; import org.elasticsearch.search.searchafter.SearchAfterBuilder; import org.elasticsearch.search.slice.SliceBuilder; import org.elasticsearch.search.sort.ScoreSortBuilder; @@ -70,6 +72,7 @@ import static java.util.Collections.emptyMap; import static org.elasticsearch.index.query.AbstractQueryBuilder.parseTopLevelQuery; +import static org.elasticsearch.search.internal.SearchContext.DEFAULT_TERMINATE_AFTER; import static org.elasticsearch.search.internal.SearchContext.TRACK_TOTAL_HITS_ACCURATE; import static org.elasticsearch.search.internal.SearchContext.TRACK_TOTAL_HITS_DISABLED; @@ -120,6 +123,7 @@ public final class SearchSourceBuilder implements Writeable, ToXContentObject, R public static final ParseField SLICE = new ParseField("slice"); public static final ParseField POINT_IN_TIME = new ParseField("pit"); public static final ParseField RUNTIME_MAPPINGS_FIELD = new ParseField("runtime_mappings"); + public static final ParseField RETRIEVER = new ParseField("retriever"); private static final boolean RANK_SUPPORTED = Booleans.parseBoolean(System.getProperty("es.search.rank_supported"), true); @@ -1285,6 +1289,7 @@ private SearchSourceBuilder parseXContent( } List knnBuilders = new ArrayList<>(); + RetrieverBuilder retrieverBuilder = null; SearchUsage searchUsage = new SearchUsage(); while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { @@ -1353,7 +1358,15 @@ private SearchSourceBuilder parseXContent( ); } } else if (token == XContentParser.Token.START_OBJECT) { - if (QUERY_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + if (RETRIEVER.match(currentFieldName, parser.getDeprecationHandler())) { + if (clusterSupportsFeature.test(RetrieverBuilder.RETRIEVERS_SUPPORTED) == false) { + throw new ParsingException(parser.getTokenLocation(), "Unknown key for a START_OBJECT in [retriever]."); + } + retrieverBuilder = RetrieverBuilder.parseTopLevelRetrieverBuilder( + parser, + new RetrieverParserContext(searchUsage, clusterSupportsFeature) + ); + } else if (QUERY_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { if (subSearchSourceBuilders.isEmpty() == false) { throw new IllegalArgumentException( "cannot specify field [" + currentFieldName + "] and field [" + SUB_SEARCHES_FIELD.getPreferredName() + "]" @@ -1611,6 +1624,38 @@ private SearchSourceBuilder parseXContent( knnSearch = knnBuilders.stream().map(knnBuilder -> knnBuilder.build(size())).collect(Collectors.toList()); + if (retrieverBuilder != null) { + List specified = new ArrayList<>(); + if (subSearchSourceBuilders.isEmpty() == false) { + specified.add(QUERY_FIELD.getPreferredName()); + } + if (knnSearch.isEmpty() == false) { + specified.add(KNN_FIELD.getPreferredName()); + } + if (searchAfterBuilder != null) { + specified.add(SEARCH_AFTER.getPreferredName()); + } + if (terminateAfter != DEFAULT_TERMINATE_AFTER) { + specified.add(TERMINATE_AFTER_FIELD.getPreferredName()); + } + if (sorts != null) { + specified.add(SORT_FIELD.getPreferredName()); + } + if (rescoreBuilders != null) { + specified.add(RESCORE_FIELD.getPreferredName()); + } + if (minScore != null) { + specified.add(MIN_SCORE_FIELD.getPreferredName()); + } + if (rankBuilder != null) { + specified.add(RANK_FIELD.getPreferredName()); + } + if (specified.isEmpty() == false) { + throw new IllegalArgumentException("cannot specify [" + RETRIEVER.getPreferredName() + "] and " + specified); + } + retrieverBuilder.extractToSearchSourceBuilder(this, false); + } + searchUsageConsumer.accept(searchUsage); return this; } diff --git a/server/src/main/java/org/elasticsearch/search/collapse/CollapseBuilder.java b/server/src/main/java/org/elasticsearch/search/collapse/CollapseBuilder.java index c77a5c3c09f81..05dc86f4d80d8 100644 --- a/server/src/main/java/org/elasticsearch/search/collapse/CollapseBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/collapse/CollapseBuilder.java @@ -195,6 +195,11 @@ public int hashCode() { return result; } + @Override + public String toString() { + return Strings.toString(this, true, true); + } + public CollapseContext build(SearchExecutionContext searchExecutionContext) { MappedFieldType fieldType = searchExecutionContext.getFieldType(field); if (fieldType == null) { diff --git a/server/src/main/java/org/elasticsearch/search/retriever/KnnRetrieverBuilder.java b/server/src/main/java/org/elasticsearch/search/retriever/KnnRetrieverBuilder.java new file mode 100644 index 0000000000000..fc2d4218ea1ec --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/retriever/KnnRetrieverBuilder.java @@ -0,0 +1,171 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.retriever; + +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.vectors.KnnSearchBuilder; +import org.elasticsearch.search.vectors.QueryVectorBuilder; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; + +/** + * A knn retriever is used to represent a knn search + * with some elements to specify parameters for that knn search. + */ +public final class KnnRetrieverBuilder extends RetrieverBuilder { + + public static final String NAME = "knn"; + public static final NodeFeature KNN_RETRIEVER_SUPPORTED = new NodeFeature("knn_retriever_supported"); + + public static final ParseField FIELD_FIELD = new ParseField("field"); + public static final ParseField K_FIELD = new ParseField("k"); + public static final ParseField NUM_CANDS_FIELD = new ParseField("num_candidates"); + public static final ParseField QUERY_VECTOR_FIELD = new ParseField("query_vector"); + public static final ParseField QUERY_VECTOR_BUILDER_FIELD = new ParseField("query_vector_builder"); + public static final ParseField VECTOR_SIMILARITY = new ParseField("similarity"); + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "knn", + args -> { + List vector = (List) args[1]; + final float[] vectorArray; + if (vector != null) { + vectorArray = new float[vector.size()]; + for (int i = 0; i < vector.size(); i++) { + vectorArray[i] = vector.get(i); + } + } else { + vectorArray = null; + } + return new KnnRetrieverBuilder( + (String) args[0], + vectorArray, + (QueryVectorBuilder) args[2], + (int) args[3], + (int) args[4], + (Float) args[5] + ); + } + ); + + static { + PARSER.declareString(constructorArg(), FIELD_FIELD); + PARSER.declareFloatArray(optionalConstructorArg(), QUERY_VECTOR_FIELD); + PARSER.declareNamedObject( + optionalConstructorArg(), + (p, c, n) -> p.namedObject(QueryVectorBuilder.class, n, c), + QUERY_VECTOR_BUILDER_FIELD + ); + PARSER.declareInt(constructorArg(), K_FIELD); + PARSER.declareInt(constructorArg(), NUM_CANDS_FIELD); + PARSER.declareFloat(optionalConstructorArg(), VECTOR_SIMILARITY); + RetrieverBuilder.declareBaseParserFields(NAME, PARSER); + } + + public static KnnRetrieverBuilder fromXContent(XContentParser parser, RetrieverParserContext context) throws IOException { + if (context.clusterSupportsFeature(KNN_RETRIEVER_SUPPORTED) == false) { + throw new ParsingException(parser.getTokenLocation(), "unknown retriever [" + NAME + "]"); + } + return PARSER.apply(parser, context); + } + + private final String field; + private final float[] queryVector; + private final QueryVectorBuilder queryVectorBuilder; + private final int k; + private final int numCands; + private final Float similarity; + + public KnnRetrieverBuilder( + String field, + float[] queryVector, + QueryVectorBuilder queryVectorBuilder, + int k, + int numCands, + Float similarity + ) { + this.field = field; + this.queryVector = queryVector; + this.queryVectorBuilder = queryVectorBuilder; + this.k = k; + this.numCands = numCands; + this.similarity = similarity; + } + + // ---- FOR TESTING XCONTENT PARSING ---- + + @Override + public String getName() { + return NAME; + } + + @Override + public void extractToSearchSourceBuilder(SearchSourceBuilder searchSourceBuilder, boolean compoundUsed) { + KnnSearchBuilder knnSearchBuilder = new KnnSearchBuilder(field, queryVector, queryVectorBuilder, k, numCands, similarity); + if (preFilterQueryBuilders != null) { + knnSearchBuilder.addFilterQueries(preFilterQueryBuilders); + } + List knnSearchBuilders = new ArrayList<>(searchSourceBuilder.knnSearch()); + knnSearchBuilders.add(knnSearchBuilder); + searchSourceBuilder.knnSearch(knnSearchBuilders); + } + + @Override + public void doToXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(FIELD_FIELD.getPreferredName(), field); + builder.field(K_FIELD.getPreferredName(), k); + builder.field(NUM_CANDS_FIELD.getPreferredName(), numCands); + + if (queryVector != null) { + builder.field(QUERY_VECTOR_FIELD.getPreferredName(), queryVector); + } + + if (queryVectorBuilder != null) { + builder.field(QUERY_VECTOR_BUILDER_FIELD.getPreferredName(), queryVectorBuilder); + } + + if (similarity != null) { + builder.field(VECTOR_SIMILARITY.getPreferredName(), similarity); + } + } + + @Override + public boolean doEquals(Object o) { + KnnRetrieverBuilder that = (KnnRetrieverBuilder) o; + return k == that.k + && numCands == that.numCands + && Objects.equals(field, that.field) + && Arrays.equals(queryVector, that.queryVector) + && Objects.equals(queryVectorBuilder, that.queryVectorBuilder) + && Objects.equals(similarity, that.similarity); + } + + @Override + public int doHashCode() { + int result = Objects.hash(field, queryVectorBuilder, k, numCands, similarity); + result = 31 * result + Arrays.hashCode(queryVector); + return result; + } + + // ---- END TESTING ---- +} diff --git a/server/src/main/java/org/elasticsearch/search/retriever/RetrieverBuilder.java b/server/src/main/java/org/elasticsearch/search/retriever/RetrieverBuilder.java new file mode 100644 index 0000000000000..c9b12f03beb53 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/retriever/RetrieverBuilder.java @@ -0,0 +1,234 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.retriever; + +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.SuggestingErrorOnUnknown; +import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.index.query.AbstractQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.xcontent.AbstractObjectParser; +import org.elasticsearch.xcontent.FilterXContentParserWrapper; +import org.elasticsearch.xcontent.NamedObjectNotFoundException; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentLocation; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Locale; +import java.util.Objects; + +/** + * A retriever represents an API element that returns an ordered list of top + * documents. These can be obtained from a query, from another retriever, etc. + * Internally, a {@link RetrieverBuilder} is just a wrapper for other search + * elements that are extracted into a {@link SearchSourceBuilder}. The advantage + * retrievers have is in the API they appear as a tree-like structure enabling + * easier reasoning about what a search does. + * + * This is the base class for all other retrievers. This class does not support + * serialization and is expected to be fully extracted to a {@link SearchSourceBuilder} + * prior to any transport calls. + */ +public abstract class RetrieverBuilder implements ToXContent { + + public static final NodeFeature RETRIEVERS_SUPPORTED = new NodeFeature("retrievers_supported"); + + public static final ParseField PRE_FILTER_FIELD = new ParseField("filter"); + + protected static void declareBaseParserFields( + String name, + AbstractObjectParser parser + ) { + parser.declareObjectArray((r, v) -> r.preFilterQueryBuilders = v, (p, c) -> { + QueryBuilder preFilterQueryBuilder = AbstractQueryBuilder.parseTopLevelQuery(p, c::trackQueryUsage); + c.trackSectionUsage(name + ":" + PRE_FILTER_FIELD.getPreferredName()); + return preFilterQueryBuilder; + }, PRE_FILTER_FIELD); + } + + /** + * This method parsers a top-level retriever within a search and tracks its own depth. Currently, the + * maximum depth allowed is limited to 2 as a compound retriever cannot currently contain another + * compound retriever. + */ + public static RetrieverBuilder parseTopLevelRetrieverBuilder(XContentParser parser, RetrieverParserContext context) throws IOException { + parser = new FilterXContentParserWrapper(parser) { + + int nestedDepth = 0; + + @Override + public T namedObject(Class categoryClass, String name, Object context) throws IOException { + if (categoryClass.equals(RetrieverBuilder.class)) { + nestedDepth++; + + if (nestedDepth > 2) { + throw new IllegalArgumentException( + "the nested depth of the [" + name + "] retriever exceeds the maximum nested depth [2] for retrievers" + ); + } + } + + T namedObject = getXContentRegistry().parseNamedObject(categoryClass, name, this, context); + + if (categoryClass.equals(RetrieverBuilder.class)) { + nestedDepth--; + } + + return namedObject; + } + }; + + return parseInnerRetrieverBuilder(parser, context); + } + + protected static RetrieverBuilder parseInnerRetrieverBuilder(XContentParser parser, RetrieverParserContext context) throws IOException { + Objects.requireNonNull(context); + + if (parser.currentToken() != XContentParser.Token.START_OBJECT && parser.nextToken() != XContentParser.Token.START_OBJECT) { + throw new ParsingException( + parser.getTokenLocation(), + "retriever malformed, must start with [" + XContentParser.Token.START_OBJECT + "]" + ); + } + + if (parser.nextToken() == XContentParser.Token.END_OBJECT) { + throw new ParsingException(parser.getTokenLocation(), "retriever malformed, empty clause found"); + } + + if (parser.currentToken() != XContentParser.Token.FIELD_NAME) { + throw new ParsingException( + parser.getTokenLocation(), + "retriever malformed, no field after [" + XContentParser.Token.START_OBJECT + "]" + ); + } + + String retrieverName = parser.currentName(); + + if (parser.nextToken() != XContentParser.Token.START_OBJECT) { + throw new ParsingException( + parser.getTokenLocation(), + "[" + retrieverName + "] retriever malformed, no [" + XContentParser.Token.START_OBJECT + "] after retriever name" + ); + } + + RetrieverBuilder retrieverBuilder; + + try { + retrieverBuilder = parser.namedObject(RetrieverBuilder.class, retrieverName, context); + } catch (NamedObjectNotFoundException nonfe) { + String message = String.format( + Locale.ROOT, + "unknown retriever [%s]%s", + retrieverName, + SuggestingErrorOnUnknown.suggest(retrieverName, nonfe.getCandidates()) + ); + + throw new ParsingException(new XContentLocation(nonfe.getLineNumber(), nonfe.getColumnNumber()), message, nonfe); + } + + context.trackSectionUsage(retrieverName); + + if (parser.currentToken() != XContentParser.Token.END_OBJECT) { + throw new ParsingException( + parser.getTokenLocation(), + "[" + + retrieverName + + "] malformed retriever, expected [" + + XContentParser.Token.END_OBJECT + + "] but found [" + + parser.currentToken() + + "]" + ); + } + + if (parser.nextToken() != XContentParser.Token.END_OBJECT) { + throw new ParsingException( + parser.getTokenLocation(), + "[" + + retrieverName + + "] malformed retriever, expected [" + + XContentParser.Token.END_OBJECT + + "] but found [" + + parser.currentToken() + + "]" + ); + } + + return retrieverBuilder; + } + + protected List preFilterQueryBuilders = new ArrayList<>(); + + /** + * Gets the filters for this retriever. + */ + public List getPreFilterQueryBuilders() { + return preFilterQueryBuilders; + } + + /** + * This method is called at the end of parsing on behalf of a {@link SearchSourceBuilder}. + * Elements from retrievers are expected to be "extracted" into the {@link SearchSourceBuilder}. + */ + public abstract void extractToSearchSourceBuilder(SearchSourceBuilder searchSourceBuilder, boolean compoundUsed); + + // ---- FOR TESTING XCONTENT PARSING ---- + + public abstract String getName(); + + @Override + public final XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(); + if (preFilterQueryBuilders.isEmpty() == false) { + builder.field(PRE_FILTER_FIELD.getPreferredName(), preFilterQueryBuilders); + } + doToXContent(builder, params); + builder.endObject(); + + return builder; + } + + protected abstract void doToXContent(XContentBuilder builder, ToXContent.Params params) throws IOException; + + @Override + public boolean isFragment() { + return false; + } + + @Override + public final boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + RetrieverBuilder that = (RetrieverBuilder) o; + return Objects.equals(preFilterQueryBuilders, that.preFilterQueryBuilders) && doEquals(o); + } + + protected abstract boolean doEquals(Object o); + + @Override + public final int hashCode() { + return Objects.hash(getClass(), preFilterQueryBuilders, doHashCode()); + } + + protected abstract int doHashCode(); + + @Override + public String toString() { + return Strings.toString(this, true, true); + } + + // ---- END FOR TESTING ---- +} diff --git a/server/src/main/java/org/elasticsearch/search/retriever/RetrieverParser.java b/server/src/main/java/org/elasticsearch/search/retriever/RetrieverParser.java new file mode 100644 index 0000000000000..d2703d8a81260 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/retriever/RetrieverParser.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.retriever; + +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; + +/** + * Defines a retriever parser that is able to parse {@link RetrieverBuilder}s + * from {@link org.elasticsearch.xcontent.XContent}. + */ +@FunctionalInterface +public interface RetrieverParser { + + /** + * Creates a new {@link RetrieverBuilder} from the retriever held by the + * {@link XContentParser}. The state on the parser contained in this context + * will be changed as a side effect of this method call. The + * {@link RetrieverParserContext} tracks usage of retriever features and + * queries when available. + */ + RB fromXContent(XContentParser parser, RetrieverParserContext context) throws IOException; +} diff --git a/server/src/main/java/org/elasticsearch/search/retriever/RetrieverParserContext.java b/server/src/main/java/org/elasticsearch/search/retriever/RetrieverParserContext.java new file mode 100644 index 0000000000000..21c24647753cb --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/retriever/RetrieverParserContext.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.retriever; + +import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.usage.SearchUsage; + +import java.util.Objects; +import java.util.function.Predicate; + +public class RetrieverParserContext { + + protected final SearchUsage searchUsage; + protected final Predicate clusterSupportsFeature; + + public RetrieverParserContext(SearchUsage searchUsage, Predicate clusterSupportsFeature) { + this.searchUsage = Objects.requireNonNull(searchUsage); + this.clusterSupportsFeature = clusterSupportsFeature; + } + + public void trackSectionUsage(String section) { + searchUsage.trackSectionUsage(section); + } + + public void trackQueryUsage(String query) { + searchUsage.trackQueryUsage(query); + } + + public void trackRescorerUsage(String name) { + searchUsage.trackRescorerUsage(name); + } + + public boolean clusterSupportsFeature(NodeFeature nodeFeature) { + return clusterSupportsFeature != null && clusterSupportsFeature.test(nodeFeature); + } +} diff --git a/server/src/main/java/org/elasticsearch/search/retriever/RetrieversFeatures.java b/server/src/main/java/org/elasticsearch/search/retriever/RetrieversFeatures.java new file mode 100644 index 0000000000000..ad664616f4564 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/retriever/RetrieversFeatures.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.retriever; + +import org.elasticsearch.features.FeatureSpecification; +import org.elasticsearch.features.NodeFeature; + +import java.util.Set; + +/** + * Each retriever is given its own {@link NodeFeature} so new + * retrievers can be added individually with additional functionality. + */ +public class RetrieversFeatures implements FeatureSpecification { + + @Override + public Set getFeatures() { + return Set.of( + RetrieverBuilder.RETRIEVERS_SUPPORTED, + StandardRetrieverBuilder.STANDARD_RETRIEVER_SUPPORTED, + KnnRetrieverBuilder.KNN_RETRIEVER_SUPPORTED + ); + } +} diff --git a/server/src/main/java/org/elasticsearch/search/retriever/StandardRetrieverBuilder.java b/server/src/main/java/org/elasticsearch/search/retriever/StandardRetrieverBuilder.java new file mode 100644 index 0000000000000..4694780770617 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/retriever/StandardRetrieverBuilder.java @@ -0,0 +1,229 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.retriever; + +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.index.query.AbstractQueryBuilder; +import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.builder.SubSearchSourceBuilder; +import org.elasticsearch.search.collapse.CollapseBuilder; +import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.search.searchafter.SearchAfterBuilder; +import org.elasticsearch.search.sort.SortBuilder; +import org.elasticsearch.xcontent.ObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +/** + * A standard retriever is used to represent anything that is a query along + * with some elements to specify parameters for that query. + */ +public final class StandardRetrieverBuilder extends RetrieverBuilder implements ToXContent { + + public static final String NAME = "standard"; + public static final NodeFeature STANDARD_RETRIEVER_SUPPORTED = new NodeFeature("standard_retriever_supported"); + + public static final ParseField QUERY_FIELD = new ParseField("query"); + public static final ParseField SEARCH_AFTER_FIELD = new ParseField("search_after"); + public static final ParseField TERMINATE_AFTER_FIELD = new ParseField("terminate_after"); + public static final ParseField SORT_FIELD = new ParseField("sort"); + public static final ParseField MIN_SCORE_FIELD = new ParseField("min_score"); + public static final ParseField COLLAPSE_FIELD = new ParseField("collapse"); + + public static final ObjectParser PARSER = new ObjectParser<>( + NAME, + StandardRetrieverBuilder::new + ); + + static { + PARSER.declareObject((r, v) -> r.queryBuilder = v, (p, c) -> { + QueryBuilder queryBuilder = AbstractQueryBuilder.parseTopLevelQuery(p, c::trackQueryUsage); + c.trackSectionUsage(NAME + ":" + QUERY_FIELD.getPreferredName()); + return queryBuilder; + }, QUERY_FIELD); + + PARSER.declareField((r, v) -> r.searchAfterBuilder = v, (p, c) -> { + SearchAfterBuilder searchAfterBuilder = SearchAfterBuilder.fromXContent(p); + c.trackSectionUsage(NAME + ":" + SEARCH_AFTER_FIELD.getPreferredName()); + return searchAfterBuilder; + }, SEARCH_AFTER_FIELD, ObjectParser.ValueType.OBJECT_ARRAY); + + PARSER.declareField((r, v) -> r.terminateAfter = v, (p, c) -> { + int terminateAfter = p.intValue(); + c.trackSectionUsage(NAME + ":" + TERMINATE_AFTER_FIELD.getPreferredName()); + return terminateAfter; + }, TERMINATE_AFTER_FIELD, ObjectParser.ValueType.INT); + + PARSER.declareField((r, v) -> r.sortBuilders = v, (p, c) -> { + List> sortBuilders = SortBuilder.fromXContent(p); + c.trackSectionUsage(NAME + ":" + SORT_FIELD.getPreferredName()); + return sortBuilders; + }, SORT_FIELD, ObjectParser.ValueType.OBJECT_ARRAY); + + PARSER.declareField((r, v) -> r.minScore = v, (p, c) -> { + float minScore = p.floatValue(); + c.trackSectionUsage(NAME + ":" + MIN_SCORE_FIELD.getPreferredName()); + return minScore; + }, MIN_SCORE_FIELD, ObjectParser.ValueType.FLOAT); + + PARSER.declareField((r, v) -> r.collapseBuilder = v, (p, c) -> { + CollapseBuilder collapseBuilder = CollapseBuilder.fromXContent(p); + if (collapseBuilder.getField() != null) { + c.trackSectionUsage(COLLAPSE_FIELD.getPreferredName()); + } + return collapseBuilder; + }, COLLAPSE_FIELD, ObjectParser.ValueType.OBJECT); + + RetrieverBuilder.declareBaseParserFields(NAME, PARSER); + } + + public static StandardRetrieverBuilder fromXContent(XContentParser parser, RetrieverParserContext context) throws IOException { + if (context.clusterSupportsFeature(STANDARD_RETRIEVER_SUPPORTED) == false) { + throw new ParsingException(parser.getTokenLocation(), "unknown retriever [" + NAME + "]"); + } + return PARSER.apply(parser, context); + } + + QueryBuilder queryBuilder; + SearchAfterBuilder searchAfterBuilder; + int terminateAfter = SearchContext.DEFAULT_TERMINATE_AFTER; + List> sortBuilders; + Float minScore; + CollapseBuilder collapseBuilder; + + @Override + public void extractToSearchSourceBuilder(SearchSourceBuilder searchSourceBuilder, boolean compoundUsed) { + if (preFilterQueryBuilders.isEmpty() == false) { + BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder(); + + for (QueryBuilder preFilterQueryBuilder : preFilterQueryBuilders) { + boolQueryBuilder.filter(preFilterQueryBuilder); + } + + if (queryBuilder != null) { + boolQueryBuilder.must(queryBuilder); + } + + searchSourceBuilder.subSearches().add(new SubSearchSourceBuilder(boolQueryBuilder)); + } else if (queryBuilder != null) { + searchSourceBuilder.subSearches().add(new SubSearchSourceBuilder(queryBuilder)); + } + + if (searchAfterBuilder != null) { + if (compoundUsed) { + throw new IllegalArgumentException( + "[" + SEARCH_AFTER_FIELD.getPreferredName() + "] cannot be used in children of compound retrievers" + ); + } + + searchSourceBuilder.searchAfter(searchAfterBuilder.getSortValues()); + } + + if (terminateAfter != SearchContext.DEFAULT_TERMINATE_AFTER) { + if (compoundUsed) { + throw new IllegalArgumentException( + "[" + TERMINATE_AFTER_FIELD.getPreferredName() + "] cannot be used in children of compound retrievers" + ); + } + + searchSourceBuilder.terminateAfter(terminateAfter); + } + + if (sortBuilders != null) { + if (compoundUsed) { + throw new IllegalArgumentException( + "[" + SORT_FIELD.getPreferredName() + "] cannot be used in children of compound retrievers" + ); + } + + searchSourceBuilder.sort(sortBuilders); + } + + if (minScore != null) { + if (compoundUsed) { + throw new IllegalArgumentException( + "[" + MIN_SCORE_FIELD.getPreferredName() + "] cannot be used in children of compound retrievers" + ); + } + + searchSourceBuilder.minScore(minScore); + } + + if (collapseBuilder != null) { + if (compoundUsed) { + throw new IllegalArgumentException( + "[" + COLLAPSE_FIELD.getPreferredName() + "] cannot be used in children of compound retrievers" + ); + } + + searchSourceBuilder.collapse(collapseBuilder); + } + } + + // ---- FOR TESTING XCONTENT PARSING ---- + + @Override + public String getName() { + return NAME; + } + + @Override + public void doToXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + if (queryBuilder != null) { + builder.field(QUERY_FIELD.getPreferredName(), queryBuilder); + } + + if (searchAfterBuilder != null) { + searchAfterBuilder.innerToXContent(builder); + } + + if (terminateAfter != SearchContext.DEFAULT_TERMINATE_AFTER) { + builder.field(TERMINATE_AFTER_FIELD.getPreferredName(), terminateAfter); + } + + if (sortBuilders != null) { + builder.field(SORT_FIELD.getPreferredName(), sortBuilders); + } + + if (minScore != null) { + builder.field(MIN_SCORE_FIELD.getPreferredName(), minScore); + } + + if (collapseBuilder != null) { + builder.field(COLLAPSE_FIELD.getPreferredName(), collapseBuilder); + } + } + + @Override + public boolean doEquals(Object o) { + StandardRetrieverBuilder that = (StandardRetrieverBuilder) o; + return terminateAfter == that.terminateAfter + && Objects.equals(queryBuilder, that.queryBuilder) + && Objects.equals(searchAfterBuilder, that.searchAfterBuilder) + && Objects.equals(sortBuilders, that.sortBuilders) + && Objects.equals(minScore, that.minScore) + && Objects.equals(collapseBuilder, that.collapseBuilder); + } + + @Override + public int doHashCode() { + return Objects.hash(queryBuilder, searchAfterBuilder, terminateAfter, sortBuilders, minScore, collapseBuilder); + } + + // ---- END FOR TESTING ---- +} diff --git a/server/src/main/java/org/elasticsearch/search/searchafter/SearchAfterBuilder.java b/server/src/main/java/org/elasticsearch/search/searchafter/SearchAfterBuilder.java index 249f2c95ddc7f..f5c3727f56676 100644 --- a/server/src/main/java/org/elasticsearch/search/searchafter/SearchAfterBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/searchafter/SearchAfterBuilder.java @@ -211,7 +211,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - void innerToXContent(XContentBuilder builder) throws IOException { + public void innerToXContent(XContentBuilder builder) throws IOException { builder.array(SEARCH_AFTER.getPreferredName(), sortValues); } @@ -277,7 +277,8 @@ public boolean equals(Object other) { if ((other instanceof SearchAfterBuilder) == false) { return false; } - return Arrays.equals(sortValues, ((SearchAfterBuilder) other).sortValues); + boolean value = Arrays.equals(sortValues, ((SearchAfterBuilder) other).sortValues); + return value; } @Override diff --git a/server/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification b/server/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification index a42c1f7192d49..71b6aacd56ea7 100644 --- a/server/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification +++ b/server/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification @@ -12,3 +12,4 @@ org.elasticsearch.cluster.service.TransportFeatures org.elasticsearch.cluster.metadata.MetadataFeatures org.elasticsearch.rest.RestFeatures org.elasticsearch.indices.IndicesFeatures +org.elasticsearch.search.retriever.RetrieversFeatures diff --git a/server/src/test/java/org/elasticsearch/search/retriever/KnnRetrieverBuilderParsingTests.java b/server/src/test/java/org/elasticsearch/search/retriever/KnnRetrieverBuilderParsingTests.java new file mode 100644 index 0000000000000..cbbbe7d86f4e2 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/retriever/KnnRetrieverBuilderParsingTests.java @@ -0,0 +1,82 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.retriever; + +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.RandomQueryBuilder; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.test.AbstractXContentTestCase; +import org.elasticsearch.usage.SearchUsage; +import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.elasticsearch.search.vectors.KnnSearchBuilderTests.randomVector; + +public class KnnRetrieverBuilderParsingTests extends AbstractXContentTestCase { + + /** + * Creates a random {@link KnnRetrieverBuilder}. The created instance + * is not guaranteed to pass {@link SearchRequest} validation. This is purely + * for x-content testing. + */ + public static KnnRetrieverBuilder createRandomKnnRetrieverBuilder() { + String field = randomAlphaOfLength(6); + int dim = randomIntBetween(2, 30); + float[] vector = randomBoolean() ? null : randomVector(dim); + int k = randomIntBetween(1, 100); + int numCands = randomIntBetween(k + 20, 1000); + Float similarity = randomBoolean() ? null : randomFloat(); + + KnnRetrieverBuilder knnRetrieverBuilder = new KnnRetrieverBuilder(field, vector, null, k, numCands, similarity); + + List preFilterQueryBuilders = new ArrayList<>(); + + if (randomBoolean()) { + for (int i = 0; i < randomIntBetween(1, 3); ++i) { + preFilterQueryBuilders.add(RandomQueryBuilder.createQuery(random())); + } + } + + knnRetrieverBuilder.preFilterQueryBuilders.addAll(preFilterQueryBuilders); + + return knnRetrieverBuilder; + } + + @Override + protected KnnRetrieverBuilder createTestInstance() { + return createRandomKnnRetrieverBuilder(); + } + + @Override + protected KnnRetrieverBuilder doParseInstance(XContentParser parser) throws IOException { + return KnnRetrieverBuilder.fromXContent( + parser, + new RetrieverParserContext( + new SearchUsage(), + nf -> nf == RetrieverBuilder.RETRIEVERS_SUPPORTED || nf == KnnRetrieverBuilder.KNN_RETRIEVER_SUPPORTED + ) + ); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + return new NamedXContentRegistry(new SearchModule(Settings.EMPTY, List.of()).getNamedXContents()); + } +} diff --git a/server/src/test/java/org/elasticsearch/search/retriever/RetrieverBuilderErrorTests.java b/server/src/test/java/org/elasticsearch/search/retriever/RetrieverBuilderErrorTests.java new file mode 100644 index 0000000000000..0e5490e989a2d --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/retriever/RetrieverBuilderErrorTests.java @@ -0,0 +1,102 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.retriever; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.json.JsonXContent; + +import java.io.IOException; +import java.util.List; + +/** + * Tests exceptions related to usage of restricted global values with a retriever. + */ +public class RetrieverBuilderErrorTests extends ESTestCase { + + public void testRetrieverExtractionErrors() throws IOException { + try ( + XContentParser parser = createParser( + JsonXContent.jsonXContent, + "{\"query\": {\"match_all\": {}}, \"retriever\":{\"standard\":{}}}" + ) + ) { + SearchSourceBuilder ssb = new SearchSourceBuilder(); + IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> ssb.parseXContent(parser, true, nf -> true)); + assertEquals("cannot specify [retriever] and [query]", iae.getMessage()); + } + + try ( + XContentParser parser = createParser( + JsonXContent.jsonXContent, + "{\"knn\":{\"field\": \"test\", \"k\": 2, \"num_candidates\": 5," + + " \"query_vector\": [1, 2, 3]}, \"retriever\":{\"standard\":{}}}" + ) + ) { + SearchSourceBuilder ssb = new SearchSourceBuilder(); + IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> ssb.parseXContent(parser, true, nf -> true)); + assertEquals("cannot specify [retriever] and [knn]", iae.getMessage()); + } + + try (XContentParser parser = createParser(JsonXContent.jsonXContent, "{\"search_after\": [1], \"retriever\":{\"standard\":{}}}")) { + SearchSourceBuilder ssb = new SearchSourceBuilder(); + IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> ssb.parseXContent(parser, true, nf -> true)); + assertEquals("cannot specify [retriever] and [search_after]", iae.getMessage()); + } + + try (XContentParser parser = createParser(JsonXContent.jsonXContent, "{\"terminate_after\": 1, \"retriever\":{\"standard\":{}}}")) { + SearchSourceBuilder ssb = new SearchSourceBuilder(); + IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> ssb.parseXContent(parser, true, nf -> true)); + assertEquals("cannot specify [retriever] and [terminate_after]", iae.getMessage()); + } + + try (XContentParser parser = createParser(JsonXContent.jsonXContent, "{\"sort\": [\"field\"], \"retriever\":{\"standard\":{}}}")) { + SearchSourceBuilder ssb = new SearchSourceBuilder(); + IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> ssb.parseXContent(parser, true, nf -> true)); + assertEquals("cannot specify [retriever] and [sort]", iae.getMessage()); + } + + try ( + XContentParser parser = createParser( + JsonXContent.jsonXContent, + "{\"rescore\": {\"query\": {\"rescore_query\": {\"match_all\": {}}}}, \"retriever\":{\"standard\":{}}}" + ) + ) { + SearchSourceBuilder ssb = new SearchSourceBuilder(); + IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> ssb.parseXContent(parser, true, nf -> true)); + assertEquals("cannot specify [retriever] and [rescore]", iae.getMessage()); + } + + try (XContentParser parser = createParser(JsonXContent.jsonXContent, "{\"min_score\": 2, \"retriever\":{\"standard\":{}}}")) { + SearchSourceBuilder ssb = new SearchSourceBuilder(); + IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> ssb.parseXContent(parser, true, nf -> true)); + assertEquals("cannot specify [retriever] and [min_score]", iae.getMessage()); + } + + try ( + XContentParser parser = createParser( + JsonXContent.jsonXContent, + "" + "{\"min_score\": 2, \"query\": {\"match_all\": {}}, \"retriever\":{\"standard\":{}}, \"terminate_after\": 1}" + ) + ) { + SearchSourceBuilder ssb = new SearchSourceBuilder(); + IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> ssb.parseXContent(parser, true, nf -> true)); + assertEquals("cannot specify [retriever] and [query, terminate_after, min_score]", iae.getMessage()); + } + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + return new NamedXContentRegistry(new SearchModule(Settings.EMPTY, List.of()).getNamedXContents()); + } +} diff --git a/server/src/test/java/org/elasticsearch/search/retriever/RetrieverBuilderVersionTests.java b/server/src/test/java/org/elasticsearch/search/retriever/RetrieverBuilderVersionTests.java new file mode 100644 index 0000000000000..593b43bc26597 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/retriever/RetrieverBuilderVersionTests.java @@ -0,0 +1,80 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.retriever; + +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.json.JsonXContent; + +import java.io.IOException; +import java.util.List; + +/** Tests retrievers validate on their own {@link NodeFeature} */ +public class RetrieverBuilderVersionTests extends ESTestCase { + + public void testRetrieverVersions() throws IOException { + try (XContentParser parser = createParser(JsonXContent.jsonXContent, "{\"retriever\":{\"standard\":{}}}")) { + SearchSourceBuilder ssb = new SearchSourceBuilder(); + ParsingException iae = expectThrows(ParsingException.class, () -> ssb.parseXContent(parser, true, nf -> false)); + assertEquals("Unknown key for a START_OBJECT in [retriever].", iae.getMessage()); + } + + try (XContentParser parser = createParser(JsonXContent.jsonXContent, "{\"retriever\":{\"standard\":{}}}")) { + SearchSourceBuilder ssb = new SearchSourceBuilder(); + ParsingException iae = expectThrows( + ParsingException.class, + () -> ssb.parseXContent(parser, true, nf -> nf == RetrieverBuilder.RETRIEVERS_SUPPORTED) + ); + assertEquals("unknown retriever [standard]", iae.getMessage()); + } + + try (XContentParser parser = createParser(JsonXContent.jsonXContent, "{\"retriever\":{\"standard\":{}}}")) { + SearchSourceBuilder ssb = new SearchSourceBuilder(); + ssb.parseXContent( + parser, + true, + nf -> nf == RetrieverBuilder.RETRIEVERS_SUPPORTED || nf == StandardRetrieverBuilder.STANDARD_RETRIEVER_SUPPORTED + ); + } + + try (XContentParser parser = createParser(JsonXContent.jsonXContent, "{\"retriever\":{\"knn\":{}}}")) { + SearchSourceBuilder ssb = new SearchSourceBuilder(); + ParsingException iae = expectThrows( + ParsingException.class, + () -> ssb.parseXContent(parser, true, nf -> nf == RetrieverBuilder.RETRIEVERS_SUPPORTED) + ); + assertEquals("unknown retriever [knn]", iae.getMessage()); + } + + try ( + XContentParser parser = createParser( + JsonXContent.jsonXContent, + "{\"retriever\":{\"knn\":{\"field\": \"test\", \"k\": 2, \"num_candidates\": 5, \"query_vector\": [1, 2, 3]}}}" + ) + ) { + SearchSourceBuilder ssb = new SearchSourceBuilder(); + ssb.parseXContent( + parser, + true, + nf -> nf == RetrieverBuilder.RETRIEVERS_SUPPORTED || nf == KnnRetrieverBuilder.KNN_RETRIEVER_SUPPORTED + ); + } + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + return new NamedXContentRegistry(new SearchModule(Settings.EMPTY, List.of()).getNamedXContents()); + } +} diff --git a/server/src/test/java/org/elasticsearch/search/retriever/StandardRetrieverBuilderParsingTests.java b/server/src/test/java/org/elasticsearch/search/retriever/StandardRetrieverBuilderParsingTests.java new file mode 100644 index 0000000000000..bec534d89cc03 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/retriever/StandardRetrieverBuilderParsingTests.java @@ -0,0 +1,112 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.retriever; + +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.query.RandomQueryBuilder; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.search.collapse.CollapseBuilderTests; +import org.elasticsearch.search.searchafter.SearchAfterBuilderTests; +import org.elasticsearch.search.sort.SortBuilderTests; +import org.elasticsearch.test.AbstractXContentTestCase; +import org.elasticsearch.usage.SearchUsage; +import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xcontent.XContent; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.List; +import java.util.function.BiFunction; + +public class StandardRetrieverBuilderParsingTests extends AbstractXContentTestCase { + + /** + * Creates a random {@link StandardRetrieverBuilder}. The created instance + * is not guaranteed to pass {@link SearchRequest} validation. This is purely + * for x-content testing. + */ + public static StandardRetrieverBuilder createRandomStandardRetrieverBuilder( + BiFunction createParser + ) { + try { + StandardRetrieverBuilder standardRetrieverBuilder = new StandardRetrieverBuilder(); + + if (randomBoolean()) { + for (int i = 0; i < randomIntBetween(1, 3); ++i) { + standardRetrieverBuilder.getPreFilterQueryBuilders().add(RandomQueryBuilder.createQuery(random())); + } + } + + if (randomBoolean()) { + standardRetrieverBuilder.queryBuilder = RandomQueryBuilder.createQuery(random()); + } + + if (randomBoolean()) { + standardRetrieverBuilder.searchAfterBuilder = SearchAfterBuilderTests.randomJsonSearchFromBuilder(createParser); + } + + if (randomBoolean()) { + standardRetrieverBuilder.terminateAfter = randomNonNegativeInt(); + } + + if (randomBoolean()) { + standardRetrieverBuilder.sortBuilders = SortBuilderTests.randomSortBuilderList(); + } + + if (randomBoolean()) { + standardRetrieverBuilder.collapseBuilder = CollapseBuilderTests.randomCollapseBuilder(randomBoolean()); + } + + return standardRetrieverBuilder; + } catch (IOException ioe) { + throw new UncheckedIOException(ioe); + } + } + + @Override + protected StandardRetrieverBuilder createTestInstance() { + return createRandomStandardRetrieverBuilder((xContent, data) -> { + try { + return createParser(xContent, data); + } catch (IOException ioe) { + throw new UncheckedIOException(ioe); + } + }); + } + + @Override + protected StandardRetrieverBuilder doParseInstance(XContentParser parser) throws IOException { + return StandardRetrieverBuilder.fromXContent( + parser, + new RetrieverParserContext( + new SearchUsage(), + nf -> nf == RetrieverBuilder.RETRIEVERS_SUPPORTED || nf == StandardRetrieverBuilder.STANDARD_RETRIEVER_SUPPORTED + ) + ); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected String[] getShuffleFieldsExceptions() { + // disable xcontent shuffling on the highlight builder + return new String[] { "fields" }; + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + return new NamedXContentRegistry(new SearchModule(Settings.EMPTY, List.of()).getNamedXContents()); + } +} diff --git a/server/src/test/java/org/elasticsearch/search/searchafter/SearchAfterBuilderTests.java b/server/src/test/java/org/elasticsearch/search/searchafter/SearchAfterBuilderTests.java index ff963835f55f6..96ebc5642fde7 100644 --- a/server/src/test/java/org/elasticsearch/search/searchafter/SearchAfterBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/searchafter/SearchAfterBuilderTests.java @@ -29,6 +29,7 @@ import org.elasticsearch.search.sort.SortAndFormats; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.XContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentParser; @@ -36,8 +37,10 @@ import org.elasticsearch.xcontent.json.JsonXContent; import java.io.IOException; +import java.io.UncheckedIOException; import java.math.BigDecimal; import java.util.Collections; +import java.util.function.BiFunction; import static org.elasticsearch.search.searchafter.SearchAfterBuilder.extractSortType; import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; @@ -47,7 +50,10 @@ public class SearchAfterBuilderTests extends ESTestCase { private static final int NUMBER_OF_TESTBUILDERS = 20; - private static SearchAfterBuilder randomSearchAfterBuilder() throws IOException { + /** + * Generates a random {@link SearchAfterBuilder}. + */ + public static SearchAfterBuilder randomSearchAfterBuilder() throws IOException { int numSearchFrom = randomIntBetween(1, 10); SearchAfterBuilder searchAfterBuilder = new SearchAfterBuilder(); Object[] values = new Object[numSearchFrom]; @@ -71,11 +77,14 @@ private static SearchAfterBuilder randomSearchAfterBuilder() throws IOException return searchAfterBuilder; } - // We build a json version of the search_after first in order to - // ensure that every number type remain the same before/after xcontent (de)serialization. - // This is not a problem because the final type of each field value is extracted from associated sort field. - // This little trick ensure that equals and hashcode are the same when using the xcontent serialization. - private SearchAfterBuilder randomJsonSearchFromBuilder() throws IOException { + /** + * We build a json version of the search_after first in order to + * ensure that every number type remain the same before/after xcontent (de)serialization. + * This is not a problem because the final type of each field value is extracted from associated sort field. + * This little trick ensure that equals and hashcode are the same when using the xcontent serialization. + */ + public static SearchAfterBuilder randomJsonSearchFromBuilder(BiFunction createParser) + throws IOException { int numSearchAfter = randomIntBetween(1, 10); XContentBuilder jsonBuilder = XContentFactory.jsonBuilder(); jsonBuilder.startObject(); @@ -97,7 +106,7 @@ private SearchAfterBuilder randomJsonSearchFromBuilder() throws IOException { } jsonBuilder.endArray(); jsonBuilder.endObject(); - try (XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(jsonBuilder))) { + try (XContentParser parser = createParser.apply(JsonXContent.jsonXContent, BytesReference.bytes(jsonBuilder))) { parser.nextToken(); parser.nextToken(); parser.nextToken(); @@ -128,7 +137,13 @@ public void testEqualsAndHashcode() throws Exception { public void testFromXContent() throws Exception { for (int runs = 0; runs < 20; runs++) { - SearchAfterBuilder searchAfterBuilder = randomJsonSearchFromBuilder(); + SearchAfterBuilder searchAfterBuilder = randomJsonSearchFromBuilder((xContent, data) -> { + try { + return createParser(xContent, data); + } catch (IOException ioe) { + throw new UncheckedIOException(ioe); + } + }); XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); if (randomBoolean()) { builder.prettyPrint(); diff --git a/server/src/test/java/org/elasticsearch/search/vectors/KnnSearchBuilderTests.java b/server/src/test/java/org/elasticsearch/search/vectors/KnnSearchBuilderTests.java index a077367604e5e..c650f54321060 100644 --- a/server/src/test/java/org/elasticsearch/search/vectors/KnnSearchBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/vectors/KnnSearchBuilderTests.java @@ -220,7 +220,7 @@ public void testRewrite() throws Exception { assertThat(((RewriteableQuery) rewritten.filterQueries.get(0)).rewrites, equalTo(1)); } - static float[] randomVector(int dim) { + public static float[] randomVector(int dim) { float[] vector = new float[dim]; for (int i = 0; i < vector.length; i++) { vector[i] = randomFloat(); diff --git a/test/framework/src/main/java/org/elasticsearch/search/retriever/TestRetrieverBuilder.java b/test/framework/src/main/java/org/elasticsearch/search/retriever/TestRetrieverBuilder.java new file mode 100644 index 0000000000000..40cc1890f69ed --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/search/retriever/TestRetrieverBuilder.java @@ -0,0 +1,93 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.retriever; + +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.plugins.SearchPlugin; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; + +/** + * Test retriever is used to test parsing of retrievers in plugins where + * generation of other random retrievers are not easily accessible through test code. + */ +public class TestRetrieverBuilder extends RetrieverBuilder { + + /** + * Creates a random {@link TestRetrieverBuilder}. The created instance + * is not guaranteed to pass {@link SearchRequest} validation. This is purely + * for x-content testing. + */ + public static TestRetrieverBuilder createRandomTestRetrieverBuilder() { + return new TestRetrieverBuilder(ESTestCase.randomAlphaOfLengthBetween(5, 10)); + } + + public static final String NAME = "test"; + public static final ParseField TEST_FIELD = new ParseField(NAME); + public static final SearchPlugin.RetrieverSpec TEST_SPEC = new SearchPlugin.RetrieverSpec<>( + TEST_FIELD, + TestRetrieverBuilder::fromXContent + ); + + public static final ParseField VALUE_FIELD = new ParseField("value"); + + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + NAME, + args -> new TestRetrieverBuilder((String) args[0]) + ); + + static { + PARSER.declareString(constructorArg(), VALUE_FIELD); + } + + public static TestRetrieverBuilder fromXContent(XContentParser parser, RetrieverParserContext context) { + return PARSER.apply(parser, context); + } + + private final String value; + + public TestRetrieverBuilder(String value) { + this.value = value; + } + + @Override + public void extractToSearchSourceBuilder(SearchSourceBuilder searchSourceBuilder, boolean compoundUsed) { + throw new UnsupportedOperationException("only used for parsing tests"); + } + + @Override + public String getName() { + return NAME; + } + + @Override + public void doToXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(VALUE_FIELD.getPreferredName(), value); + } + + @Override + public boolean doEquals(Object o) { + TestRetrieverBuilder that = (TestRetrieverBuilder) o; + return Objects.equals(value, that.value); + } + + @Override + public int doHashCode() { + return Objects.hash(value); + } +} diff --git a/x-pack/plugin/rank-rrf/src/main/java/module-info.java b/x-pack/plugin/rank-rrf/src/main/java/module-info.java index a28e907610c26..4fd2a7e4d54f3 100644 --- a/x-pack/plugin/rank-rrf/src/main/java/module-info.java +++ b/x-pack/plugin/rank-rrf/src/main/java/module-info.java @@ -5,6 +5,8 @@ * 2.0. */ +import org.elasticsearch.xpack.rank.rrf.RRFFeatures; + module org.elasticsearch.rank.rrf { requires org.apache.lucene.core; requires org.elasticsearch.base; @@ -13,4 +15,6 @@ requires org.elasticsearch.xcore; exports org.elasticsearch.xpack.rank.rrf; + + provides org.elasticsearch.features.FeatureSpecification with RRFFeatures; } diff --git a/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFFeatures.java b/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFFeatures.java new file mode 100644 index 0000000000000..816b25d53d375 --- /dev/null +++ b/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFFeatures.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.rank.rrf; + +import org.elasticsearch.features.FeatureSpecification; +import org.elasticsearch.features.NodeFeature; + +import java.util.Set; + +/** + * A set of features specifically for the rrf plugin. + */ +public class RRFFeatures implements FeatureSpecification { + + @Override + public Set getFeatures() { + return Set.of(RRFRetrieverBuilder.RRF_RETRIEVER_SUPPORTED); + } +} diff --git a/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRankPlugin.java b/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRankPlugin.java index 135f8907faa9b..4d7c60f00ec1c 100644 --- a/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRankPlugin.java +++ b/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRankPlugin.java @@ -41,4 +41,9 @@ public List getNamedWriteables() { public List getNamedXContent() { return List.of(new NamedXContentRegistry.Entry(RankBuilder.class, new ParseField(NAME), RRFRankBuilder::fromXContent)); } + + @Override + public List> getRetrievers() { + return List.of(new RetrieverSpec<>(new ParseField(NAME), RRFRetrieverBuilder::fromXContent)); + } } diff --git a/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilder.java b/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilder.java new file mode 100644 index 0000000000000..ea8255f73af88 --- /dev/null +++ b/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilder.java @@ -0,0 +1,134 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.rank.rrf; + +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.license.LicenseUtils; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.retriever.RetrieverBuilder; +import org.elasticsearch.search.retriever.RetrieverParserContext; +import org.elasticsearch.xcontent.ObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.core.XPackPlugin; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.xpack.rank.rrf.RRFRankPlugin.NAME; + +/** + * An rrf retriever is used to represent an rrf rank element, but + * as a tree-like structure. This retriever is a compound retriever + * meaning it has a set of child retrievers that each return a set of + * top docs that will then be combined and ranked according to the rrf + * formula. + */ +public final class RRFRetrieverBuilder extends RetrieverBuilder { + + public static final NodeFeature RRF_RETRIEVER_SUPPORTED = new NodeFeature("rrf_retriever_supported"); + + public static final ParseField RETRIEVERS_FIELD = new ParseField("retrievers"); + public static final ParseField WINDOW_SIZE_FIELD = new ParseField("window_size"); + public static final ParseField RANK_CONSTANT_FIELD = new ParseField("rank_constant"); + + public static final ObjectParser PARSER = new ObjectParser<>( + NAME, + RRFRetrieverBuilder::new + ); + + static { + PARSER.declareObjectArray((r, v) -> r.retrieverBuilders = v, (p, c) -> { + p.nextToken(); + String name = p.currentName(); + RetrieverBuilder retrieverBuilder = p.namedObject(RetrieverBuilder.class, name, c); + p.nextToken(); + return retrieverBuilder; + }, RETRIEVERS_FIELD); + PARSER.declareInt((r, v) -> r.windowSize = v, WINDOW_SIZE_FIELD); + PARSER.declareInt((r, v) -> r.rankConstant = v, RANK_CONSTANT_FIELD); + + RetrieverBuilder.declareBaseParserFields(NAME, PARSER); + } + + public static RRFRetrieverBuilder fromXContent(XContentParser parser, RetrieverParserContext context) throws IOException { + if (context.clusterSupportsFeature(RRF_RETRIEVER_SUPPORTED) == false) { + throw new ParsingException(parser.getTokenLocation(), "unknown retriever [" + NAME + "]"); + } + if (RRFRankPlugin.RANK_RRF_FEATURE.check(XPackPlugin.getSharedLicenseState()) == false) { + throw LicenseUtils.newComplianceException("Reciprocal Rank Fusion (RRF)"); + } + return PARSER.apply(parser, context); + } + + List retrieverBuilders = Collections.emptyList(); + int windowSize = RRFRankBuilder.DEFAULT_WINDOW_SIZE; + int rankConstant = RRFRankBuilder.DEFAULT_RANK_CONSTANT; + + @Override + public void extractToSearchSourceBuilder(SearchSourceBuilder searchSourceBuilder, boolean compoundUsed) { + if (compoundUsed) { + throw new IllegalArgumentException("[rank] cannot be used in children of compound retrievers"); + } + + for (RetrieverBuilder retrieverBuilder : retrieverBuilders) { + if (preFilterQueryBuilders.isEmpty() == false) { + retrieverBuilder.getPreFilterQueryBuilders().addAll(preFilterQueryBuilders); + } + + retrieverBuilder.extractToSearchSourceBuilder(searchSourceBuilder, true); + } + + searchSourceBuilder.rankBuilder(new RRFRankBuilder(windowSize, rankConstant)); + } + + // ---- FOR TESTING XCONTENT PARSING ---- + + @Override + public String getName() { + return NAME; + } + + @Override + public void doToXContent(XContentBuilder builder, Params params) throws IOException { + if (retrieverBuilders.isEmpty() == false) { + builder.startArray(RETRIEVERS_FIELD.getPreferredName()); + + for (RetrieverBuilder retrieverBuilder : retrieverBuilders) { + builder.startObject(); + builder.field(retrieverBuilder.getName()); + retrieverBuilder.toXContent(builder, params); + builder.endObject(); + } + + builder.endArray(); + } + + builder.field(WINDOW_SIZE_FIELD.getPreferredName(), windowSize); + builder.field(RANK_CONSTANT_FIELD.getPreferredName(), rankConstant); + } + + @Override + public boolean doEquals(Object o) { + RRFRetrieverBuilder that = (RRFRetrieverBuilder) o; + return windowSize == that.windowSize + && rankConstant == that.rankConstant + && Objects.equals(retrieverBuilders, that.retrieverBuilders); + } + + @Override + public int doHashCode() { + return Objects.hash(retrieverBuilders, windowSize, rankConstant); + } + + // ---- END FOR TESTING ---- +} diff --git a/x-pack/plugin/rank-rrf/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification b/x-pack/plugin/rank-rrf/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification new file mode 100644 index 0000000000000..605e999b66c66 --- /dev/null +++ b/x-pack/plugin/rank-rrf/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification @@ -0,0 +1,8 @@ +# +# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +# or more contributor license agreements. Licensed under the Elastic License +# 2.0; you may not use this file except in compliance with the Elastic License +# 2.0. +# + +org.elasticsearch.xpack.rank.rrf.RRFFeatures diff --git a/x-pack/plugin/rank-rrf/src/test/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilderParsingTests.java b/x-pack/plugin/rank-rrf/src/test/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilderParsingTests.java new file mode 100644 index 0000000000000..d63e8a14b59d5 --- /dev/null +++ b/x-pack/plugin/rank-rrf/src/test/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilderParsingTests.java @@ -0,0 +1,88 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.rank.rrf; + +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.search.retriever.RetrieverBuilder; +import org.elasticsearch.search.retriever.RetrieverParserContext; +import org.elasticsearch.search.retriever.TestRetrieverBuilder; +import org.elasticsearch.test.AbstractXContentTestCase; +import org.elasticsearch.usage.SearchUsage; +import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class RRFRetrieverBuilderParsingTests extends AbstractXContentTestCase { + + /** + * Creates a random {@link RRFRetrieverBuilder}. The created instance + * is not guaranteed to pass {@link SearchRequest} validation. This is purely + * for x-content testing. + */ + public static RRFRetrieverBuilder createRandomRRFRetrieverBuilder() { + RRFRetrieverBuilder rrfRetrieverBuilder = new RRFRetrieverBuilder(); + + if (randomBoolean()) { + rrfRetrieverBuilder.windowSize = randomIntBetween(1, 10000); + } + + if (randomBoolean()) { + rrfRetrieverBuilder.rankConstant = randomIntBetween(1, 1000000); + } + + int retrieverCount = randomIntBetween(2, 50); + rrfRetrieverBuilder.retrieverBuilders = new ArrayList<>(retrieverCount); + + while (retrieverCount > 0) { + rrfRetrieverBuilder.retrieverBuilders.add(TestRetrieverBuilder.createRandomTestRetrieverBuilder()); + --retrieverCount; + } + + return rrfRetrieverBuilder; + } + + @Override + protected RRFRetrieverBuilder createTestInstance() { + return createRandomRRFRetrieverBuilder(); + } + + @Override + protected RRFRetrieverBuilder doParseInstance(XContentParser parser) throws IOException { + return RRFRetrieverBuilder.PARSER.apply(parser, new RetrieverParserContext(new SearchUsage(), nf -> true)); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + List entries = new ArrayList<>(); + entries.add( + new NamedXContentRegistry.Entry( + RetrieverBuilder.class, + TestRetrieverBuilder.TEST_SPEC.getName(), + (p, c) -> TestRetrieverBuilder.TEST_SPEC.getParser().fromXContent(p, (RetrieverParserContext) c), + TestRetrieverBuilder.TEST_SPEC.getName().getForRestApiVersion() + ) + ); + entries.add( + new NamedXContentRegistry.Entry( + RetrieverBuilder.class, + new ParseField(RRFRankPlugin.NAME), + (p, c) -> RRFRetrieverBuilder.PARSER.apply(p, (RetrieverParserContext) c) + ) + ); + return new NamedXContentRegistry(entries); + } +} diff --git a/x-pack/plugin/rank-rrf/src/test/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilderTests.java b/x-pack/plugin/rank-rrf/src/test/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilderTests.java new file mode 100644 index 0000000000000..229f900ef3d15 --- /dev/null +++ b/x-pack/plugin/rank-rrf/src/test/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilderTests.java @@ -0,0 +1,151 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.rank.rrf; + +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.retriever.RetrieverBuilder; +import org.elasticsearch.search.retriever.RetrieverParserContext; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.json.JsonXContent; + +import java.io.IOException; +import java.util.List; + +/** Tests for the rrf retriever. */ +public class RRFRetrieverBuilderTests extends ESTestCase { + + /** Tests the rrf retriever validates on its own {@link NodeFeature} */ + public void testRetrieverVersions() throws IOException { + try (XContentParser parser = createParser(JsonXContent.jsonXContent, "{\"retriever\":{\"rrf\":{}}}")) { + SearchSourceBuilder ssb = new SearchSourceBuilder(); + ParsingException iae = expectThrows( + ParsingException.class, + () -> ssb.parseXContent(parser, true, nf -> nf == RetrieverBuilder.RETRIEVERS_SUPPORTED) + ); + assertEquals("unknown retriever [rrf]", iae.getMessage()); + } + } + + /** Tests extraction errors related to compound retrievers. These tests require a compound retriever which is why they are here. */ + public void testRetrieverExtractionErrors() throws IOException { + try ( + XContentParser parser = createParser( + JsonXContent.jsonXContent, + "{\"retriever\":{\"rrf_nl\":{\"retrievers\":" + + "[{\"standard\":{\"search_after\":[1]}},{\"standard\":{\"search_after\":[2]}}]}}}" + ) + ) { + SearchSourceBuilder ssb = new SearchSourceBuilder(); + IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> ssb.parseXContent(parser, true, nf -> true)); + assertEquals("[search_after] cannot be used in children of compound retrievers", iae.getMessage()); + } + + try ( + XContentParser parser = createParser( + JsonXContent.jsonXContent, + "{\"retriever\":{\"rrf_nl\":{\"retrievers\":" + + "[{\"standard\":{\"terminate_after\":1}},{\"standard\":{\"terminate_after\":2}}]}}}" + ) + ) { + SearchSourceBuilder ssb = new SearchSourceBuilder(); + IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> ssb.parseXContent(parser, true, nf -> true)); + assertEquals("[terminate_after] cannot be used in children of compound retrievers", iae.getMessage()); + } + + try ( + XContentParser parser = createParser( + JsonXContent.jsonXContent, + "{\"retriever\":{\"rrf_nl\":{\"retrievers\":" + "[{\"standard\":{\"sort\":[\"f1\"]}},{\"standard\":{\"sort\":[\"f2\"]}}]}}}" + ) + ) { + SearchSourceBuilder ssb = new SearchSourceBuilder(); + IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> ssb.parseXContent(parser, true, nf -> true)); + assertEquals("[sort] cannot be used in children of compound retrievers", iae.getMessage()); + } + + try ( + XContentParser parser = createParser( + JsonXContent.jsonXContent, + "{\"retriever\":{\"rrf_nl\":{\"retrievers\":" + "[{\"standard\":{\"min_score\":1}},{\"standard\":{\"min_score\":2}}]}}}" + ) + ) { + SearchSourceBuilder ssb = new SearchSourceBuilder(); + IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> ssb.parseXContent(parser, true, nf -> true)); + assertEquals("[min_score] cannot be used in children of compound retrievers", iae.getMessage()); + } + + try ( + XContentParser parser = createParser( + JsonXContent.jsonXContent, + "{\"retriever\":{\"rrf_nl\":{\"retrievers\":" + + "[{\"standard\":{\"collapse\":{\"field\":\"f0\"}}},{\"standard\":{\"collapse\":{\"field\":\"f1\"}}}]}}}" + ) + ) { + SearchSourceBuilder ssb = new SearchSourceBuilder(); + IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> ssb.parseXContent(parser, true, nf -> true)); + assertEquals("[collapse] cannot be used in children of compound retrievers", iae.getMessage()); + } + + try ( + XContentParser parser = createParser( + JsonXContent.jsonXContent, + "{\"retriever\":{\"rrf_nl\":{\"retrievers\":[{\"rrf_nl\":{}}]}}}" + ) + ) { + SearchSourceBuilder ssb = new SearchSourceBuilder(); + IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> ssb.parseXContent(parser, true, nf -> true)); + assertEquals("[rank] cannot be used in children of compound retrievers", iae.getMessage()); + } + } + + /** Tests max depth errors related to compound retrievers. These tests require a compound retriever which is why they are here. */ + public void testRetrieverBuilderParsingMaxDepth() throws IOException { + try ( + XContentParser parser = createParser( + JsonXContent.jsonXContent, + "{\"retriever\":{\"rrf_nl\":{\"retrievers\":[{\"rrf_nl\":{\"retrievers\":[{\"standard\":{}}]}}]}}}" + ) + ) { + SearchSourceBuilder ssb = new SearchSourceBuilder(); + IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> ssb.parseXContent(parser, true, nf -> true)); + assertEquals("[1:65] [rrf] failed to parse field [retrievers]", iae.getMessage()); + assertEquals( + "the nested depth of the [standard] retriever exceeds the maximum nested depth [2] for retrievers", + iae.getCause().getCause().getMessage() + ); + } + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + List entries = new SearchModule(Settings.EMPTY, List.of()).getNamedXContents(); + entries.add( + new NamedXContentRegistry.Entry( + RetrieverBuilder.class, + new ParseField(RRFRankPlugin.NAME), + (p, c) -> RRFRetrieverBuilder.fromXContent(p, (RetrieverParserContext) c) + ) + ); + // Add an entry with no license requirement for unit testing + entries.add( + new NamedXContentRegistry.Entry( + RetrieverBuilder.class, + new ParseField(RRFRankPlugin.NAME + "_nl"), + (p, c) -> RRFRetrieverBuilder.PARSER.apply(p, (RetrieverParserContext) c) + ) + ); + return new NamedXContentRegistry(entries); + } +} diff --git a/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/license/100_license.yml b/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/license/100_license.yml index fb74c935c774c..c84c66f8aa31d 100644 --- a/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/license/100_license.yml +++ b/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/license/100_license.yml @@ -49,7 +49,7 @@ setup: indices.refresh: {} --- -"RRF Invalid License": +"rrf invalid license": - do: catch: forbidden @@ -75,3 +75,39 @@ setup: - match: { status: 403 } - match: { error.type: security_exception } - match: { error.reason: "current license is non-compliant for [Reciprocal Rank Fusion (RRF)]" } + +--- +"rrf retriever invalid license": + + - do: + catch: forbidden + search: + index: test + body: + track_total_hits: false + fields: [ "text" ] + retriever: + rrf: + retrievers: [ + { + knn: { + field: vector, + query_vector: [ 0.0 ], + k: 3, + num_candidates: 3 + } + }, + { + standard: { + query: { + term: { + text: term + } + } + } + } + ] + + - match: { status: 403 } + - match: { error.type: security_exception } + - match: { error.reason: "current license is non-compliant for [Reciprocal Rank Fusion (RRF)]" } diff --git a/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/300_rrf_retriever.yml b/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/300_rrf_retriever.yml new file mode 100644 index 0000000000000..ec7a31ffd9ceb --- /dev/null +++ b/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/300_rrf_retriever.yml @@ -0,0 +1,331 @@ +setup: + - skip: + version: ' - 8.12.99' + reason: 'rrf retriever added in 8.13' + + - do: + indices.create: + index: test + body: + settings: + number_of_shards: 5 + number_of_replicas: 0 + mappings: + properties: + text: + type: text + keyword: + type: keyword + vector: + type: dense_vector + dims: 1 + index: true + similarity: l2_norm + + - do: + index: + index: test + id: "1" + body: + text: "term term" + keyword: "other" + vector: [0.0] + + - do: + index: + index: test + id: "2" + body: + text: "other" + keyword: "other" + vector: [1.0] + + - do: + index: + index: test + id: "3" + body: + text: "term" + keyword: "keyword" + vector: [2.0] + + - do: + indices.refresh: {} + +--- +"rrf retriever with a standard retriever and a knn retriever": + + - do: + search: + index: test + body: + track_total_hits: false + fields: [ "text", "keyword" ] + retriever: + rrf: + retrievers: [ + { + knn: { + field: vector, + query_vector: [ 0.0 ], + k: 3, + num_candidates: 3 + } + }, + { + standard: { + query: { + term: { + text: term + } + } + } + } + ] + window_size: 100 + rank_constant: 1 + size: 10 + + - match: { hits.hits.0._id: "1" } + - match: { hits.hits.0._rank: 1 } + - match: { hits.hits.0.fields.text.0: "term term" } + - match: { hits.hits.0.fields.keyword.0: "other" } + + - match: { hits.hits.1._id: "3" } + - match: { hits.hits.1._rank: 2 } + - match: { hits.hits.1.fields.text.0: "term" } + - match: { hits.hits.1.fields.keyword.0: "keyword" } + + - match: { hits.hits.2._id: "2" } + - match: { hits.hits.2._rank: 3 } + - match: { hits.hits.2.fields.text.0: "other" } + - match: { hits.hits.2.fields.keyword.0: "other" } + +--- +"rrf retriever with multiple standard retrievers": + + - do: + search: + index: test + body: + track_total_hits: true + fields: [ "text", "keyword" ] + retriever: + rrf: + retrievers: [ + { + "standard": { + "query": { + "term": { + "text": "term" + } + } + } + }, + { + "standard": { + "query": { + "match": { + "keyword": "keyword" + } + } + } + } + ] + window_size: 100 + rank_constant: 1 + size: 10 + + - match: { hits.total.value : 2 } + + - match: { hits.hits.0._id: "3" } + - match: { hits.hits.0._rank: 1 } + - match: { hits.hits.0.fields.text.0: "term" } + - match: { hits.hits.0.fields.keyword.0: "keyword" } + + - match: { hits.hits.1._id: "1" } + - match: { hits.hits.1._rank: 2 } + - match: { hits.hits.1.fields.text.0: "term term" } + - match: { hits.hits.1.fields.keyword.0: "other" } + +--- +"rrf retriever with multiple standard retrievers and a knn retriever": + + - do: + search: + index: test + body: + track_total_hits: true + fields: [ "text", "keyword" ] + retriever: + rrf: + retrievers: [ + { + knn: { + field: vector, + query_vector: [ 0.0 ], + k: 3, + num_candidates: 3 + } + }, + { + "standard": { + "query": { + "term": { + "text": "term" + } + } + } + }, + { + "standard": { + "query": { + "match": { + "keyword": "keyword" + } + } + } + } + ] + window_size: 100 + rank_constant: 1 + size: 10 + + - match: { hits.total.value : 3 } + + - match: { hits.hits.0._id: "3" } + - match: { hits.hits.0._rank: 1 } + - match: { hits.hits.0.fields.text.0: "term" } + - match: { hits.hits.0.fields.keyword.0: "keyword" } + + - match: { hits.hits.1._id: "1" } + - match: { hits.hits.1._rank: 2 } + - match: { hits.hits.1.fields.text.0: "term term" } + - match: { hits.hits.1.fields.keyword.0: "other" } + + - match: { hits.hits.2._id: "2" } + - match: { hits.hits.2._rank: 3 } + - match: { hits.hits.2.fields.text.0: "other" } + - match: { hits.hits.2.fields.keyword.0: "other" } + +--- +"rrf retriever with multiple standard retrievers and multiple knn retriever": + + - do: + search: + size: 1 + index: test + body: + track_total_hits: true + fields: [ "text", "keyword" ] + retriever: + rrf: + retrievers: [ + { + knn: { + field: vector, + query_vector: [ 0.0 ], + k: 3, + num_candidates: 3 + } + }, + { + knn: { + field: vector, + query_vector: [ 1.0 ], + k: 3, + num_candidates: 3 + } + }, + { + "standard": { + "query": { + "term": { + "text": "term" + } + } + } + }, + { + "standard": { + "query": { + "match": { + "keyword": "keyword" + } + } + } + } + ] + window_size: 2 + rank_constant: 1 + + - match: { hits.total.value : 3 } + - length: { hits.hits: 1 } + + - match: { hits.hits.0._id: "3" } + - match: { hits.hits.0._rank: 1 } + - match: { hits.hits.0.fields.text.0: "term" } + - match: { hits.hits.0.fields.keyword.0: "keyword" } + +--- +"rrf retriever with multiple standard retrievers and multiple knn retriever and a filter": + + - do: + search: + index: test + body: + track_total_hits: true + fields: [ "text", "keyword" ] + retriever: + rrf: + filter: [ + { + term: { + keyword: "keyword" + } + } + ] + retrievers: [ + { + knn: { + field: vector, + query_vector: [ 0.0 ], + k: 3, + num_candidates: 3 + } + }, + { + knn: { + field: vector, + query_vector: [ 1.0 ], + k: 3, + num_candidates: 3 + } + }, + { + "standard": { + "query": { + "term": { + "text": "term" + } + } + } + }, + { + "standard": { + "query": { + "match": { + "keyword": "keyword" + } + } + } + } + ] + + - match: { hits.total.value : 1 } + - length: { hits.hits: 1 } + + - match: { hits.hits.0._id: "3" } + - match: { hits.hits.0._rank: 1 } + - match: { hits.hits.0.fields.text.0: "term" } + - match: { hits.hits.0.fields.keyword.0: "keyword" } diff --git a/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/400_rrf_retriever_script.yml b/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/400_rrf_retriever_script.yml new file mode 100644 index 0000000000000..90edcfbffd2b6 --- /dev/null +++ b/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/400_rrf_retriever_script.yml @@ -0,0 +1,342 @@ +setup: + - skip: + features: close_to + version: ' - 8.13.99' + reason: 'rrf retriever added in 8.14' + + - do: + indices.create: + index: test + body: + settings: + number_of_shards: 5 + number_of_replicas: 0 + mappings: + properties: + vector_asc: + type: dense_vector + dims: 1 + index: true + similarity: l2_norm + vector_desc: + type: dense_vector + dims: 1 + index: true + similarity: l2_norm + int: + type: integer + text: + type: text + + - do: + bulk: + index: test + refresh: true + body: | + { "index": {"_id" : "1"} } + { "vector_asc": [1.0], "vector_desc": [11.0], "int": 1, "text": "term 1" } + { "index": {"_id" : "2"} } + { "vector_asc": [2.0], "vector_desc": [10.0], "int": 2, "text": "term 2" } + { "index": {"_id" : "3"} } + { "vector_asc": [3.0], "vector_desc": [9.0], "int": 3, "text": "term 3" } + { "index": {"_id" : "4"} } + { "vector_asc": [4.0], "vector_desc": [8.0], "int": 1, "text": "term 4" } + { "index": {"_id" : "5"} } + { "vector_asc": [5.0], "vector_desc": [7.0], "int": 2, "text": "term 5" } + { "index": {"_id" : "6"} } + { "vector_asc": [6.0], "vector_desc": [6.0], "int": 3, "text": "term 6" } + { "index": {"_id" : "7"} } + { "vector_asc": [7.0], "vector_desc": [5.0], "int": 1, "text": "term 7" } + { "index": {"_id" : "8"} } + { "vector_asc": [8.0], "vector_desc": [4.0], "int": 2, "text": "term 8" } + { "index": {"_id" : "9"} } + { "vector_asc": [9.0], "vector_desc": [3.0], "int": 3, "text": "term 9" } + { "index": {"_id" : "10"} } + { "vector_asc": [10.0], "vector_desc": [2.0], "int": 1, "text": "term 10" } + { "index": {"_id" : "11"} } + { "vector_asc": [11.0], "vector_desc": [1.0], "int": 2, "text": "term 11" } + +--- +"rrf retriever using a knn retriever and a standard retriever with a scripted metric aggregation": + + - do: + search: + index: test + body: + size: 5 + track_total_hits: true + fields: [ "text" ] + retriever: + rrf: + retrievers: [ + { + knn: { + field: vector_asc, + query_vector: [ 5.0 ], + k: 5, + num_candidates: 11 + } + }, + { + "standard": { + query: { + bool: { + should: [ + { + term: { + text: { + value: "6", + boost: 10.0 + } + } + }, + { + term: { + text: { + value: "5", + boost: 7.0 + } + } + }, + { + term: { + text: { + value: "7", + boost: 7.0 + } + } + }, + { + term: { + text: { + value: "4", + boost: 3.0 + } + } + }, + { + term: { + text: { + value: "3", + boost: 2.0 + } + } + } + ] + } + } + } + } + ] + window_size: 100 + rank_constant: 1 + aggs: + sums: + scripted_metric: + init_script: | + state['sums'] = ['asc': [], 'text': []] + map_script: | + state['sums']['asc'].add($('vector_asc', null).getVector()[0]); + state['sums']['text'].add(Integer.parseInt($('text', null).substring(5))); + combine_script: | + [ + 'asc_total': state['sums']['asc'].stream().mapToDouble(v -> v).sum(), + 'text_total': state['sums']['text'].stream().mapToInt(v -> v).sum() + ] + reduce_script: | + [ + 'asc_total': states.stream().mapToDouble(v -> v['asc_total']).sum(), + 'text_total': states.stream().mapToInt(v -> v['text_total']).sum() + ] + + - match: { hits.hits.0._id: "5" } + - match: { hits.hits.0._rank: 1 } + + - match: { hits.hits.1._id: "6" } + - match: { hits.hits.1._rank: 2 } + + - match: { hits.hits.2._id: "4" } + - match: { hits.hits.2._rank: 3 } + + - match: { hits.hits.3._id: "7" } + - match: { hits.hits.3._rank: 4 } + + - match: { hits.hits.4._id: "3" } + - match: { hits.hits.4._rank: 5 } + + - close_to: { aggregations.sums.value.asc_total: { value: 25.0, error: 0.001 }} + - match: { aggregations.sums.value.text_total: 25 } + +--- +"rrf retriever using multiple knn retrievers with a scripted metric aggregation": + + - do: + search: + index: test + body: + size: 1 + track_total_hits: true + fields: [ "text" ] + retriever: + rrf: + retrievers: [ + { + knn: { + field: vector_asc, + query_vector: [ 6.0 ], + k: 5, + num_candidates: 11 + } + }, + { + knn: { + field: vector_desc, + query_vector: [ 8.0 ], + k: 3, + num_candidates: 11 + } + } + ] + window_size: 3 + rank_constant: 1 + aggs: + sums: + scripted_metric: + init_script: | + state['sums'] = ['asc': [], 'desc': []] + map_script: | + state['sums']['asc'].add($('vector_asc', null).getVector()[0]); + state['sums']['desc'].add($('vector_desc', null).getVector()[0]) + combine_script: | + [ + 'asc_total': state['sums']['asc'].stream().mapToDouble(v -> v).sum(), + 'desc_total': state['sums']['desc'].stream().mapToDouble(v -> v).sum() + ] + reduce_script: | + [ + 'asc_total': states.stream().mapToDouble(v -> v['asc_total']).sum(), + 'desc_total': states.stream().mapToDouble(v -> v['desc_total']).sum() + ] + + - match: { hits.total.value: 6 } + + - match: { hits.hits.0._id: "5" } + - match: { hits.hits.0._rank: 1 } + + - close_to: { aggregations.sums.value.asc_total: { value: 33.0, error: 0.001 }} + - close_to: { aggregations.sums.value.desc_total: { value: 39.0, error: 0.001 }} + +--- +"rrf retriever using multiple knn retrievers and a standard retriever with a scripted metric aggregation": + + - do: + search: + index: test + body: + size: 5 + track_total_hits: true + fields: [ "text" ] + retriever: + rrf: + retrievers: [ + { + knn: { + field: vector_asc, + query_vector: [ 6.0 ], + k: 5, + num_candidates: 11 + } + }, + { + knn: { + field: vector_desc, + query_vector: [ 6.0 ], + k: 5, + num_candidates: 11 + } + }, + { + standard: { + query: { + bool: { + should: [ + { + term: { + text: { + value: "6", + boost: 10.0 + } + } + }, + { + term: { + text: { + value: "5", + boost: 7.0 + } + } + }, + { + term: { + text: { + value: "7", + boost: 7.0 + } + } + }, + { + term: { + text: { + value: "4", + boost: 3.0 + } + } + } + ] + } + } + } + } + ] + window_size: 100 + rank_constant: 1 + aggs: + sums: + scripted_metric: + init_script: | + state['sums'] = ['asc': [], 'desc': [], 'text': []] + map_script: | + state['sums']['asc'].add($('vector_asc', null).getVector()[0]); + state['sums']['desc'].add($('vector_asc', null).getVector()[0]); + state['sums']['text'].add(Integer.parseInt($('text', null).substring(5))); + combine_script: | + [ + 'asc_total': state['sums']['asc'].stream().mapToDouble(v -> v).sum(), + 'desc_total': state['sums']['asc'].stream().mapToDouble(v -> v).sum(), + 'text_total': state['sums']['text'].stream().mapToInt(v -> v).sum() + ] + reduce_script: | + [ + 'asc_total': states.stream().mapToDouble(v -> v['asc_total']).sum(), + 'desc_total': states.stream().mapToDouble(v -> v['desc_total']).sum(), + 'text_total': states.stream().mapToInt(v -> v['text_total']).sum() + ] + + - match: { hits.hits.0._id: "6" } + - match: { hits.hits.0._rank: 1 } + + - match: { hits.hits.1._id: "5" } + - match: { hits.hits.1._rank: 2 } + + - match: { hits.hits.2._id: "7" } + - match: { hits.hits.2._rank: 3 } + + - match: { hits.hits.3._id: "4" } + - match: { hits.hits.3._rank: 4 } + + - match: { hits.hits.4._id: "8" } + - match: { hits.hits.4._rank: 5 } + + - close_to: { aggregations.sums.value.asc_total: { value: 30.0, error: 0.001 }} + - close_to: { aggregations.sums.value.desc_total: { value: 30.0, error: 0.001 }} + - match: { aggregations.sums.value.text_total: 30 } From ff8650a864019274040f2d0d4af31e18fd77abd9 Mon Sep 17 00:00:00 2001 From: Craig Taverner Date: Tue, 12 Mar 2024 18:18:24 +0100 Subject: [PATCH 142/248] Support MatchType in EnrichExec (#106251) In preparation for supporting more ENRICH match types, this change includes the TransportVersion changes required for serialization support. Co-authored-by: Nhat Nguyen --- .../org/elasticsearch/TransportVersions.java | 1 + .../xpack/esql/io/stream/PlanNamedTypes.java | 18 +++++++++++++++++- .../xpack/esql/plan/physical/EnrichExec.java | 13 +++++++++++-- .../esql/planner/LocalExecutionPlanner.java | 2 +- .../xpack/esql/planner/Mapper.java | 1 + 5 files changed, 31 insertions(+), 4 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 418720284eda8..6ac2c24739805 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -142,6 +142,7 @@ static TransportVersion def(int id) { public static final TransportVersion ESQL_SERIALIZE_ARRAY_BLOCK = def(8_602_00_0); public static final TransportVersion ADD_DATA_STREAM_GLOBAL_RETENTION = def(8_603_00_0); public static final TransportVersion ALLOCATION_STATS = def(8_604_00_0); + public static final TransportVersion ESQL_EXTENDED_ENRICH_TYPES = def(8_605_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java index 384bfd164b0a7..515d6cb5c92b3 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java @@ -512,6 +512,9 @@ static EnrichExec readEnrichExec(PlanStreamInput in) throws IOException { final PhysicalPlan child = in.readPhysicalPlanNode(); final NamedExpression matchField = in.readNamedExpression(); final String policyName = in.readString(); + final String matchType = (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_EXTENDED_ENRICH_TYPES)) + ? in.readString() + : "match"; final String policyMatchField = in.readString(); final Map concreteIndices; final Enrich.Mode mode; @@ -526,7 +529,17 @@ static EnrichExec readEnrichExec(PlanStreamInput in) throws IOException { } concreteIndices = Map.of(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, Iterables.get(esIndex.concreteIndices(), 0)); } - return new EnrichExec(source, child, mode, matchField, policyName, policyMatchField, concreteIndices, readNamedExpressions(in)); + return new EnrichExec( + source, + child, + mode, + matchType, + matchField, + policyName, + policyMatchField, + concreteIndices, + readNamedExpressions(in) + ); } static void writeEnrichExec(PlanStreamOutput out, EnrichExec enrich) throws IOException { @@ -534,6 +547,9 @@ static void writeEnrichExec(PlanStreamOutput out, EnrichExec enrich) throws IOEx out.writePhysicalPlanNode(enrich.child()); out.writeNamedExpression(enrich.matchField()); out.writeString(enrich.policyName()); + if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_EXTENDED_ENRICH_TYPES)) { + out.writeString(enrich.matchType()); + } out.writeString(enrich.policyMatchField()); if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_MULTI_CLUSTERS_ENRICH)) { out.writeEnum(enrich.mode()); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EnrichExec.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EnrichExec.java index 0bfaa2db2be5d..b803d0c20d9de 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EnrichExec.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EnrichExec.java @@ -21,6 +21,7 @@ public class EnrichExec extends UnaryExec implements EstimatesRowSize { private final Enrich.Mode mode; + private final String matchType; private final NamedExpression matchField; private final String policyName; private final String policyMatchField; @@ -41,6 +42,7 @@ public EnrichExec( Source source, PhysicalPlan child, Enrich.Mode mode, + String matchType, NamedExpression matchField, String policyName, String policyMatchField, @@ -49,6 +51,7 @@ public EnrichExec( ) { super(source, child); this.mode = mode; + this.matchType = matchType; this.matchField = matchField; this.policyName = policyName; this.policyMatchField = policyMatchField; @@ -63,6 +66,7 @@ protected NodeInfo info() { EnrichExec::new, child(), mode, + matchType, matchField, policyName, policyMatchField, @@ -73,13 +77,17 @@ protected NodeInfo info() { @Override public EnrichExec replaceChild(PhysicalPlan newChild) { - return new EnrichExec(source(), newChild, mode, matchField, policyName, policyMatchField, concreteIndices, enrichFields); + return new EnrichExec(source(), newChild, mode, matchType, matchField, policyName, policyMatchField, concreteIndices, enrichFields); } public Enrich.Mode mode() { return mode; } + public String matchType() { + return matchType; + } + public NamedExpression matchField() { return matchField; } @@ -118,6 +126,7 @@ public boolean equals(Object o) { if (super.equals(o) == false) return false; EnrichExec that = (EnrichExec) o; return mode.equals(that.mode) + && Objects.equals(matchType, that.matchType) && Objects.equals(matchField, that.matchField) && Objects.equals(policyName, that.policyName) && Objects.equals(policyMatchField, that.policyMatchField) @@ -127,6 +136,6 @@ public boolean equals(Object o) { @Override public int hashCode() { - return Objects.hash(super.hashCode(), mode, matchField, policyName, policyMatchField, concreteIndices, enrichFields); + return Objects.hash(super.hashCode(), mode, matchType, matchField, policyName, policyMatchField, concreteIndices, enrichFields); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java index d7d2e99426a97..af66a1ea069aa 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java @@ -468,7 +468,7 @@ private PhysicalOperation planEnrich(EnrichExec enrich, LocalExecutionPlannerCon source.layout.get(enrich.matchField().id()).channel(), enrichLookupService, enrichIndex, - "match", // TODO: enrich should also resolve the match_type + enrich.matchType(), enrich.policyMatchField(), enrich.enrichFields() ), diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/Mapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/Mapper.java index e9fb028d7c520..fd0801d35958d 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/Mapper.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/Mapper.java @@ -149,6 +149,7 @@ private PhysicalPlan map(UnaryPlan p, PhysicalPlan child) { enrich.source(), child, enrich.mode(), + enrich.policy().getType(), enrich.matchField(), BytesRefs.toString(enrich.policyName().fold()), enrich.policy().getMatchField(), From c33955e4907c9258ae5ddd09a4fdb24eebb8871a Mon Sep 17 00:00:00 2001 From: David Kyle Date: Tue, 12 Mar 2024 17:19:59 +0000 Subject: [PATCH 143/248] [ML] Make task settings optional when creating Cohere embedding models (#106241) --- .../services/cohere/CohereService.java | 3 +- .../services/cohere/CohereServiceTests.java | 36 +++++++++++++++++++ 2 files changed, 38 insertions(+), 1 deletion(-) diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/CohereService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/CohereService.java index 172a71bd45434..4f476e60ee2db 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/CohereService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/CohereService.java @@ -36,6 +36,7 @@ import static org.elasticsearch.xpack.inference.services.ServiceUtils.createInvalidModelException; import static org.elasticsearch.xpack.inference.services.ServiceUtils.parsePersistedConfigErrorMsg; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrDefaultEmpty; import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrThrowIfNull; import static org.elasticsearch.xpack.inference.services.ServiceUtils.throwIfNotEmptyMap; @@ -61,7 +62,7 @@ public void parseRequestConfig( ) { try { Map serviceSettingsMap = removeFromMapOrThrowIfNull(config, ModelConfigurations.SERVICE_SETTINGS); - Map taskSettingsMap = removeFromMapOrThrowIfNull(config, ModelConfigurations.TASK_SETTINGS); + Map taskSettingsMap = removeFromMapOrDefaultEmpty(config, ModelConfigurations.TASK_SETTINGS); CohereModel model = createModel( inferenceEntityId, diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceTests.java index 356da0ece08af..dae4c20d00d78 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceTests.java @@ -123,6 +123,34 @@ public void testParseRequestConfig_CreatesACohereEmbeddingsModel() throws IOExce } } + public void testParseRequestConfig_OptionalTaskSettings() throws IOException { + try (var service = createCohereService()) { + + ActionListener modelListener = ActionListener.wrap(model -> { + MatcherAssert.assertThat(model, instanceOf(CohereEmbeddingsModel.class)); + + var embeddingsModel = (CohereEmbeddingsModel) model; + MatcherAssert.assertThat(embeddingsModel.getServiceSettings().getCommonSettings().getUri().toString(), is("url")); + MatcherAssert.assertThat(embeddingsModel.getServiceSettings().getCommonSettings().getModelId(), is("model")); + MatcherAssert.assertThat(embeddingsModel.getServiceSettings().getEmbeddingType(), is(CohereEmbeddingType.FLOAT)); + MatcherAssert.assertThat(embeddingsModel.getTaskSettings(), equalTo(CohereEmbeddingsTaskSettings.EMPTY_SETTINGS)); + MatcherAssert.assertThat(embeddingsModel.getSecretSettings().apiKey().toString(), is("secret")); + }, e -> fail("Model parsing should have succeeded " + e.getMessage())); + + service.parseRequestConfig( + "id", + TaskType.TEXT_EMBEDDING, + getRequestConfigMap( + CohereEmbeddingsServiceSettingsTests.getServiceSettingsMap("url", "model", CohereEmbeddingType.FLOAT), + getSecretSettingsMap("secret") + ), + Set.of(), + modelListener + ); + + } + } + public void testParseRequestConfig_ThrowsUnsupportedModelType() throws IOException { try (var service = createCohereService()) { var failureListener = getModelListenerForException( @@ -955,6 +983,14 @@ private Map getRequestConfigMap( ); } + private Map getRequestConfigMap(Map serviceSettings, Map secretSettings) { + var builtServiceSettings = new HashMap<>(); + builtServiceSettings.putAll(serviceSettings); + builtServiceSettings.putAll(secretSettings); + + return new HashMap<>(Map.of(ModelConfigurations.SERVICE_SETTINGS, builtServiceSettings)); + } + private CohereService createCohereService() { return new CohereService(mock(HttpRequestSender.Factory.class), createWithEmptySettings(threadPool)); } From 9b16b5044c040f51a345ac209cfd1ba718abf24d Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Tue, 12 Mar 2024 10:26:55 -0700 Subject: [PATCH 144/248] Allow running node-level reduction (#106204) This PR executes an empty reduction plan on data nodes at the node-level. With this change, we have the capability to replace this empty plan with an actual reduction plan later. Relates #99498 --- .../operator/exchange/ExchangeService.java | 2 +- .../xpack/esql/action/EsqlActionTaskIT.java | 15 ++- .../xpack/esql/plugin/ComputeService.java | 103 +++++++++++++++--- 3 files changed, 100 insertions(+), 20 deletions(-) diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeService.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeService.java index 8065ac6f3086e..efb646daec0e5 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeService.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeService.java @@ -91,7 +91,7 @@ public void registerTransportHandler(TransportService transportService) { * * @throws IllegalStateException if a sink handler for the given id already exists */ - ExchangeSinkHandler createSinkHandler(String exchangeId, int maxBufferSize) { + public ExchangeSinkHandler createSinkHandler(String exchangeId, int maxBufferSize) { ExchangeSinkHandler sinkHandler = new ExchangeSinkHandler(blockFactory, maxBufferSize, threadPool::relativeTimeInMillis); if (sinks.putIfAbsent(exchangeId, sinkHandler) != null) { throw new IllegalStateException("sink exchanger for id [" + exchangeId + "] already exists"); diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionTaskIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionTaskIT.java index 60f174773a1b8..5d022cd25cdab 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionTaskIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionTaskIT.java @@ -35,6 +35,7 @@ import static org.elasticsearch.test.MapMatcher.assertMap; import static org.elasticsearch.test.MapMatcher.matchesMap; +import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.both; import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.emptyIterable; @@ -58,6 +59,7 @@ public class EsqlActionTaskIT extends AbstractPausableIntegTestCase { private String READ_DESCRIPTION; private String MERGE_DESCRIPTION; + private String REDUCE_DESCRIPTION; @Before public void setup() { @@ -73,6 +75,9 @@ public void setup() { \\_ProjectOperator[projection = [0]] \\_LimitOperator[limit = 1000] \\_OutputOperator[columns = [sum(pause_me)]]"""; + REDUCE_DESCRIPTION = """ + \\_ExchangeSourceOperator[] + \\_ExchangeSinkOperator"""; } public void testTaskContents() throws Exception { @@ -136,7 +141,7 @@ public void testTaskContents() throws Exception { assertThat(luceneSources, greaterThanOrEqualTo(1)); assertThat(valuesSourceReaders, equalTo(1)); assertThat(exchangeSinks, greaterThanOrEqualTo(1)); - assertThat(exchangeSources, equalTo(1)); + assertThat(exchangeSources, equalTo(2)); } finally { scriptPermits.release(numberOfDocs()); try (EsqlQueryResponse esqlResponse = response.get()) { @@ -233,12 +238,12 @@ private List getTasksStarting() throws Exception { .setDetailed(true) .get() .getTasks(); - assertThat(tasks, hasSize(equalTo(2))); + assertThat(tasks, hasSize(equalTo(3))); for (TaskInfo task : tasks) { assertThat(task.action(), equalTo(DriverTaskRunner.ACTION_NAME)); DriverStatus status = (DriverStatus) task.status(); logger.info("task {} {}", task.description(), status); - assertThat(task.description(), either(equalTo(READ_DESCRIPTION)).or(equalTo(MERGE_DESCRIPTION))); + assertThat(task.description(), anyOf(equalTo(READ_DESCRIPTION), equalTo(MERGE_DESCRIPTION), equalTo(REDUCE_DESCRIPTION))); /* * Accept tasks that are either starting or have gone * immediately async. The coordinating task is likely @@ -265,11 +270,11 @@ private List getTasksRunning() throws Exception { .setDetailed(true) .get() .getTasks(); - assertThat(tasks, hasSize(equalTo(2))); + assertThat(tasks, hasSize(equalTo(3))); for (TaskInfo task : tasks) { assertThat(task.action(), equalTo(DriverTaskRunner.ACTION_NAME)); DriverStatus status = (DriverStatus) task.status(); - assertThat(task.description(), either(equalTo(READ_DESCRIPTION)).or(equalTo(MERGE_DESCRIPTION))); + assertThat(task.description(), anyOf(equalTo(READ_DESCRIPTION), equalTo(MERGE_DESCRIPTION), equalTo(REDUCE_DESCRIPTION))); if (task.description().equals(READ_DESCRIPTION)) { assertThat(status.status(), equalTo(DriverStatus.Status.RUNNING)); } else { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java index 7af37a3eeb114..ba3d8564e1334 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java @@ -592,7 +592,7 @@ private class DataNodeRequestExecutor { private final DataNodeRequest request; private final CancellableTask parentTask; private final ExchangeSinkHandler exchangeSink; - private final ActionListener listener; + private final ActionListener listener; private final List driverProfiles; private final int maxConcurrentShards; private final ExchangeSink blockingSink; // block until we have completed on all shards or the coordinator has enough data @@ -602,13 +602,14 @@ private class DataNodeRequestExecutor { CancellableTask parentTask, ExchangeSinkHandler exchangeSink, int maxConcurrentShards, - ActionListener listener + List driverProfiles, + ActionListener listener ) { this.request = request; this.parentTask = parentTask; this.exchangeSink = exchangeSink; this.listener = listener; - this.driverProfiles = request.configuration().profile() ? Collections.synchronizedList(new ArrayList<>()) : List.of(); + this.driverProfiles = driverProfiles; this.maxConcurrentShards = maxConcurrentShards; this.blockingSink = exchangeSink.createExchangeSink(); } @@ -649,10 +650,7 @@ private void onBatchCompleted(int lastBatchIndex, List batchProfi // don't return until all pages are fetched exchangeSink.addCompletionListener( ContextPreservingActionListener.wrapPreservingContext( - ActionListener.runBefore( - listener.map(nullValue -> new ComputeResponse(driverProfiles)), - () -> exchangeService.finishSinkHandler(request.sessionId(), null) - ), + ActionListener.runBefore(listener, () -> exchangeService.finishSinkHandler(request.sessionId(), null)), transportService.getThreadPool().getThreadContext() ) ); @@ -665,17 +663,94 @@ private void onFailure(Exception e) { } } + private void runComputeOnDataNode( + CancellableTask task, + String externalId, + PhysicalPlan reducePlan, + DataNodeRequest request, + ActionListener listener + ) { + final List collectedProfiles = request.configuration().profile() + ? Collections.synchronizedList(new ArrayList<>()) + : List.of(); + final var responseHeadersCollector = new ResponseHeadersCollector(transportService.getThreadPool().getThreadContext()); + listener = ActionListener.runBefore(listener, responseHeadersCollector::finish); + try (RefCountingListener refs = new RefCountingListener(listener.map(i -> new ComputeResponse(collectedProfiles)))) { + final AtomicBoolean cancelled = new AtomicBoolean(); + // run compute with target shards + var internalSink = exchangeService.createSinkHandler(request.sessionId(), request.pragmas().exchangeBufferSize()); + DataNodeRequestExecutor dataNodeRequestExecutor = new DataNodeRequestExecutor( + request, + task, + internalSink, + request.configuration().pragmas().maxConcurrentShardsPerNode(), + collectedProfiles, + ActionListener.runBefore(cancelOnFailure(task, cancelled, refs.acquire()), responseHeadersCollector::collect) + ); + dataNodeRequestExecutor.start(); + // run the node-level reduction + var externalSink = exchangeService.getSinkHandler(externalId); + var exchangeSource = new ExchangeSourceHandler(1, esqlExecutor); + exchangeSource.addRemoteSink(internalSink::fetchPageAsync, 1); + ActionListener reductionListener = cancelOnFailure(task, cancelled, refs.acquire()); + runCompute( + task, + new ComputeContext( + request.sessionId(), + request.clusterAlias(), + List.of(), + request.configuration(), + exchangeSource, + externalSink + ), + reducePlan, + ActionListener.wrap(driverProfiles -> { + responseHeadersCollector.collect(); + if (request.configuration().profile()) { + collectedProfiles.addAll(driverProfiles); + } + // don't return until all pages are fetched + externalSink.addCompletionListener( + ActionListener.runBefore(reductionListener, () -> exchangeService.finishSinkHandler(externalId, null)) + ); + }, e -> { + exchangeService.finishSinkHandler(externalId, e); + reductionListener.onFailure(e); + }) + ); + } catch (Exception e) { + exchangeService.finishSinkHandler(externalId, e); + exchangeService.finishSinkHandler(request.sessionId(), e); + listener.onFailure(e); + } + } + private class DataNodeRequestHandler implements TransportRequestHandler { @Override public void messageReceived(DataNodeRequest request, TransportChannel channel, Task task) { - DataNodeRequestExecutor executor = new DataNodeRequestExecutor( - request, - (CancellableTask) task, - exchangeService.getSinkHandler(request.sessionId()), - request.configuration().pragmas().maxConcurrentShardsPerNode(), - new ChannelActionListener<>(channel) + final ActionListener listener = new ChannelActionListener<>(channel); + final ExchangeSinkExec reducePlan; + if (request.plan() instanceof ExchangeSinkExec plan) { + reducePlan = new ExchangeSinkExec( + plan.source(), + plan.output(), + plan.isIntermediateAgg(), + new ExchangeSourceExec(plan.source(), plan.output(), plan.isIntermediateAgg()) + ); + } else { + listener.onFailure(new IllegalStateException("expected exchange sink for a remote compute; got " + request.plan())); + return; + } + final String sessionId = request.sessionId(); + request = new DataNodeRequest( + sessionId + "[n]", // internal session + request.configuration(), + request.clusterAlias(), + request.shardIds(), + request.aliasFilters(), + request.plan() ); - executor.start(); + runComputeOnDataNode((CancellableTask) task, sessionId, reducePlan, request, listener); } } From e14db090db26a3a2f6fc9b2336433f79e92b3efd Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Tue, 12 Mar 2024 10:27:57 -0700 Subject: [PATCH 145/248] Support fill existing LongArray with stream input (#106217) Today, we can't use LongArray#readFrom(StreamInput in) in ES|QL because the returned big array is not tracked with the circuit breaker. We can integrate the circuit breaker with ReleasableLongArray; however, we don't know how many bytes we should track: the whole BytesReference or just the slice. This PR adds an alternative method, where we create a big array manually, then fill it with bytes from a stream input. If we are okay with this approach, I can make similar changes to other classes. --- .../elasticsearch/common/util/BigArrays.java | 6 ++++++ .../elasticsearch/common/util/BigLongArray.java | 16 ++++++++++++++++ .../elasticsearch/common/util/LongArray.java | 5 +++++ .../common/util/ReleasableLongArray.java | 5 +++++ .../common/util/BigArraysTests.java | 17 +++++++++++++++++ .../common/util/MockBigArrays.java | 5 +++++ 6 files changed, 54 insertions(+) diff --git a/server/src/main/java/org/elasticsearch/common/util/BigArrays.java b/server/src/main/java/org/elasticsearch/common/util/BigArrays.java index d1367f41d9d87..36451932edc1a 100644 --- a/server/src/main/java/org/elasticsearch/common/util/BigArrays.java +++ b/server/src/main/java/org/elasticsearch/common/util/BigArrays.java @@ -298,6 +298,12 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVInt(size); out.write(array, 0, size); } + + @Override + public void fillWith(StreamInput in) throws IOException { + int len = in.readVInt(); + in.readBytes(array, 0, len); + } } private static class ByteArrayAsDoubleArrayWrapper extends AbstractArrayWrapper implements DoubleArray { diff --git a/server/src/main/java/org/elasticsearch/common/util/BigLongArray.java b/server/src/main/java/org/elasticsearch/common/util/BigLongArray.java index 1044b5fb78ee4..0e0abf812b248 100644 --- a/server/src/main/java/org/elasticsearch/common/util/BigLongArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/BigLongArray.java @@ -10,6 +10,7 @@ import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; @@ -134,6 +135,21 @@ public void writeTo(StreamOutput out) throws IOException { writePages(out, Math.toIntExact(size), pages, Long.BYTES, LONG_PAGE_SIZE); } + @Override + public void fillWith(StreamInput in) throws IOException { + readPages(in, pages); + } + + static void readPages(StreamInput in, byte[][] pages) throws IOException { + int remained = in.readVInt(); + for (int i = 0; i < pages.length - 1; i++) { + int len = pages[0].length; + in.readBytes(pages[i], 0, len); + remained -= len; + } + in.readBytes(pages[pages.length - 1], 0, remained); + } + static void writePages(StreamOutput out, int size, byte[][] pages, int bytesPerValue, int pageSize) throws IOException { out.writeVInt(size * bytesPerValue); int lastPageEnd = size % pageSize; diff --git a/server/src/main/java/org/elasticsearch/common/util/LongArray.java b/server/src/main/java/org/elasticsearch/common/util/LongArray.java index bd293a1356406..59321d1957f4d 100644 --- a/server/src/main/java/org/elasticsearch/common/util/LongArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/LongArray.java @@ -42,6 +42,11 @@ static LongArray readFrom(StreamInput in) throws IOException { */ void fill(long fromIndex, long toIndex, long value); + /** + * Alternative of {@link #readFrom(StreamInput)} where the written bytes are loaded into an existing {@link LongArray} + */ + void fillWith(StreamInput in) throws IOException; + /** * Bulk set. */ diff --git a/server/src/main/java/org/elasticsearch/common/util/ReleasableLongArray.java b/server/src/main/java/org/elasticsearch/common/util/ReleasableLongArray.java index 44764ea1e1715..2980713e2e652 100644 --- a/server/src/main/java/org/elasticsearch/common/util/ReleasableLongArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/ReleasableLongArray.java @@ -59,6 +59,11 @@ public void fill(long fromIndex, long toIndex, long value) { throw new UnsupportedOperationException(); } + @Override + public void fillWith(StreamInput in) throws IOException { + throw new UnsupportedOperationException(); + } + @Override public void set(long index, byte[] buf, int offset, int len) { throw new UnsupportedOperationException(); diff --git a/server/src/test/java/org/elasticsearch/common/util/BigArraysTests.java b/server/src/test/java/org/elasticsearch/common/util/BigArraysTests.java index 946effda16a76..3512a50d5578c 100644 --- a/server/src/test/java/org/elasticsearch/common/util/BigArraysTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/BigArraysTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.breaker.CircuitBreakingException; import org.elasticsearch.common.breaker.PreallocatedCircuitBreakerService; import org.elasticsearch.common.io.stream.ByteArrayStreamInput; +import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; @@ -245,6 +246,22 @@ public void testLongArrayFill() { array2.close(); } + public void testSerializeLongArray() throws Exception { + final int len = randomIntBetween(1, 1000_000); + final LongArray array1 = bigArrays.newLongArray(len, randomBoolean()); + for (int i = 0; i < len; ++i) { + array1.set(i, randomLong()); + } + BytesStreamOutput out = new BytesStreamOutput(); + array1.writeTo(out); + final LongArray array2 = bigArrays.newLongArray(len, randomBoolean()); + array2.fillWith(out.bytes().streamInput()); + for (int i = 0; i < len; i++) { + assertThat(array2.get(i), equalTo(array1.get(i))); + } + Releasables.close(array1, array2); + } + public void testByteArrayBulkGet() { final byte[] array1 = new byte[randomIntBetween(1, 4000000)]; random().nextBytes(array1); diff --git a/test/framework/src/main/java/org/elasticsearch/common/util/MockBigArrays.java b/test/framework/src/main/java/org/elasticsearch/common/util/MockBigArrays.java index 902e089679f49..8ed9073d45625 100644 --- a/test/framework/src/main/java/org/elasticsearch/common/util/MockBigArrays.java +++ b/test/framework/src/main/java/org/elasticsearch/common/util/MockBigArrays.java @@ -539,6 +539,11 @@ public Collection getChildResources() { public void writeTo(StreamOutput out) throws IOException { in.writeTo(out); } + + @Override + public void fillWith(StreamInput streamInput) throws IOException { + in.fillWith(streamInput); + } } private class FloatArrayWrapper extends AbstractArrayWrapper implements FloatArray { From eea35742b7566c2e9c9dc3c6b81ab457b3592119 Mon Sep 17 00:00:00 2001 From: Mark Tozzi Date: Tue, 12 Mar 2024 13:52:26 -0400 Subject: [PATCH 146/248] [ESQL] Copy BinaryComparisons Translation logic into esql (#105711) Bbuilding on the work @alex-spies did in #105230 to start to decouple the ESQL translation layer from the QL translation layer. I did this by straight up copying the QL binary comparison code from ExpressionTranslators.BinaryComparisons to EsqlExpressionTranslators.BinaryComparisons. --- .../planner/EsqlExpressionTranslators.java | 136 ++++++++++++++++-- 1 file changed, 122 insertions(+), 14 deletions(-) diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsqlExpressionTranslators.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsqlExpressionTranslators.java index 8ba8efac981af..33f8b4a5eddef 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsqlExpressionTranslators.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsqlExpressionTranslators.java @@ -9,8 +9,17 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.lucene.BytesRefs; +import org.elasticsearch.common.time.DateFormatter; +import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.Equals; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThan; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThanOrEqual; import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.InsensitiveEquals; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThan; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThanOrEqual; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.NotEquals; import org.elasticsearch.xpack.esql.expression.function.scalar.ip.CIDRMatch; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.NullEquals; import org.elasticsearch.xpack.ql.QlIllegalArgumentException; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.Expressions; @@ -18,32 +27,33 @@ import org.elasticsearch.xpack.ql.expression.TypedAttribute; import org.elasticsearch.xpack.ql.expression.function.scalar.ScalarFunction; import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.BinaryComparison; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.Equals; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.GreaterThan; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.GreaterThanOrEqual; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.LessThan; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.LessThanOrEqual; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.NotEquals; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.NullEquals; import org.elasticsearch.xpack.ql.planner.ExpressionTranslator; import org.elasticsearch.xpack.ql.planner.ExpressionTranslators; import org.elasticsearch.xpack.ql.planner.TranslatorHandler; import org.elasticsearch.xpack.ql.querydsl.query.MatchAll; +import org.elasticsearch.xpack.ql.querydsl.query.NotQuery; import org.elasticsearch.xpack.ql.querydsl.query.Query; +import org.elasticsearch.xpack.ql.querydsl.query.RangeQuery; import org.elasticsearch.xpack.ql.querydsl.query.TermQuery; import org.elasticsearch.xpack.ql.querydsl.query.TermsQuery; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataType; import org.elasticsearch.xpack.ql.type.DataTypes; import org.elasticsearch.xpack.ql.util.Check; +import org.elasticsearch.xpack.versionfield.Version; import java.math.BigDecimal; import java.math.BigInteger; +import java.time.OffsetTime; +import java.time.ZoneId; +import java.time.ZonedDateTime; import java.util.LinkedHashSet; import java.util.List; import java.util.Set; +import static org.elasticsearch.xpack.ql.type.DataTypes.IP; import static org.elasticsearch.xpack.ql.type.DataTypes.UNSIGNED_LONG; +import static org.elasticsearch.xpack.ql.type.DataTypes.VERSION; import static org.elasticsearch.xpack.ql.util.NumericUtils.unsignedLongAsNumber; public final class EsqlExpressionTranslators { @@ -108,18 +118,116 @@ static Query translate(InsensitiveEquals bc) { } } + /** + * This class is responsible for pushing the ES|QL Binary Comparison operators into Lucene. It covers: + *
          + *
        • {@link Equals}
        • + *
        • {@link NotEquals}
        • + *
        • {@link NullEquals}
        • + *
        • {@link GreaterThanOrEqual}
        • + *
        • {@link GreaterThan}
        • + *
        • {@link LessThanOrEqual}
        • + *
        • {@link LessThan}
        • + *
        + * + * In general, we are able to push these down when one of the arguments is a constant (i.e. is foldable). This class assumes + * that an earlier pass through the query has rearranged things so that the foldable value will be the right hand side + * input to the operation. + */ public static class BinaryComparisons extends ExpressionTranslator { @Override protected Query asQuery(BinaryComparison bc, TranslatorHandler handler) { - return doTranslate(bc, handler); - } - - public static Query doTranslate(BinaryComparison bc, TranslatorHandler handler) { + // TODO: Pretty sure this check is redundant with the one at the beginning of translate ExpressionTranslators.BinaryComparisons.checkBinaryComparison(bc); Query translated = translateOutOfRangeComparisons(bc); - return translated == null - ? ExpressionTranslators.BinaryComparisons.doTranslate(bc, handler) - : handler.wrapFunctionQuery(bc, bc.left(), () -> translated); + if (translated != null) { + return handler.wrapFunctionQuery(bc, bc.left(), () -> translated); + } + return handler.wrapFunctionQuery(bc, bc.left(), () -> translate(bc, handler)); + } + + static Query translate(BinaryComparison bc, TranslatorHandler handler) { + Check.isTrue( + bc.right().foldable(), + "Line {}:{}: Comparisons against fields are not (currently) supported; offender [{}] in [{}]", + bc.right().sourceLocation().getLineNumber(), + bc.right().sourceLocation().getColumnNumber(), + Expressions.name(bc.right()), + bc.symbol() + ); + TypedAttribute attribute = checkIsPushableAttribute(bc.left()); + Source source = bc.source(); + String name = handler.nameOf(attribute); + Object result = bc.right().fold(); + Object value = result; + String format = null; + boolean isDateLiteralComparison = false; + + // TODO: This type coersion layer is copied directly from the QL counterpart code. It's probably not necessary or desireable + // in the ESQL version. We should instead do the type conversions using our casting functions. + // for a date constant comparison, we need to use a format for the date, to make sure that the format is the same + // no matter the timezone provided by the user + if (value instanceof ZonedDateTime || value instanceof OffsetTime) { + DateFormatter formatter; + if (value instanceof ZonedDateTime) { + formatter = DateFormatter.forPattern("strict_date_optional_time_nanos"); + // RangeQueryBuilder accepts an Object as its parameter, but it will call .toString() on the ZonedDateTime instance + // which can have a slightly different format depending on the ZoneId used to create the ZonedDateTime + // Since RangeQueryBuilder can handle date as String as well, we'll format it as String and provide the format as well. + value = formatter.format((ZonedDateTime) value); + } else { + formatter = DateFormatter.forPattern("strict_hour_minute_second_fraction"); + value = formatter.format((OffsetTime) value); + } + format = formatter.pattern(); + isDateLiteralComparison = true; + } else if (attribute.dataType() == IP && value instanceof BytesRef bytesRef) { + value = DocValueFormat.IP.format(bytesRef); + } else if (attribute.dataType() == VERSION) { + // VersionStringFieldMapper#indexedValueForSearch() only accepts as input String or BytesRef with the String (i.e. not + // encoded) representation of the version as it'll do the encoding itself. + if (value instanceof BytesRef bytesRef) { + value = new Version(bytesRef).toString(); + } else if (value instanceof Version version) { + value = version.toString(); + } + } else if (attribute.dataType() == UNSIGNED_LONG && value instanceof Long ul) { + value = unsignedLongAsNumber(ul); + } + + ZoneId zoneId = null; + if (DataTypes.isDateTime(attribute.dataType())) { + zoneId = bc.zoneId(); + } + if (bc instanceof GreaterThan) { + return new RangeQuery(source, name, value, false, null, false, format, zoneId); + } + if (bc instanceof GreaterThanOrEqual) { + return new RangeQuery(source, name, value, true, null, false, format, zoneId); + } + if (bc instanceof LessThan) { + return new RangeQuery(source, name, null, false, value, false, format, zoneId); + } + if (bc instanceof LessThanOrEqual) { + return new RangeQuery(source, name, null, false, value, true, format, zoneId); + } + if (bc instanceof Equals || bc instanceof NullEquals || bc instanceof NotEquals) { + name = pushableAttributeName(attribute); + + Query query; + if (isDateLiteralComparison) { + // dates equality uses a range query because it's the one that has a "format" parameter + query = new RangeQuery(source, name, value, true, value, true, format, zoneId); + } else { + query = new TermQuery(source, name, value); + } + if (bc instanceof NotEquals) { + query = new NotQuery(source, query); + } + return query; + } + + throw new QlIllegalArgumentException("Don't know how to translate binary comparison [{}] in [{}]", bc.right().nodeString(), bc); } private static Query translateOutOfRangeComparisons(BinaryComparison bc) { From bef6363649edfde2a8f6061bdf3e20c4c8313a7c Mon Sep 17 00:00:00 2001 From: Kathleen DeRusso Date: Tue, 12 Mar 2024 20:19:21 +0100 Subject: [PATCH 147/248] Fix typo in text_expansion example (#106265) --- docs/reference/query-dsl/text-expansion-query.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/query-dsl/text-expansion-query.asciidoc b/docs/reference/query-dsl/text-expansion-query.asciidoc index 927b5d0a85886..27fca2bb56375 100644 --- a/docs/reference/query-dsl/text-expansion-query.asciidoc +++ b/docs/reference/query-dsl/text-expansion-query.asciidoc @@ -253,7 +253,7 @@ GET my-index/_search "pruning_config": { "tokens_freq_ratio_threshold": 5, "tokens_weight_threshold": 0.4, - "only_score_pruned_tokens": false + "only_score_pruned_tokens": true } } } From c3bc2712de49bd84d7dda28bec303d6b5d1c3ca7 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Tue, 12 Mar 2024 12:20:16 -0700 Subject: [PATCH 148/248] Mute HeapAttackIT (#106263) Started failing after we introduced the node-level reduction. Tracked at #106262 Relates #106204 --- .../org/elasticsearch/xpack/esql/heap_attack/HeapAttackIT.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/external-modules/esql-heap-attack/src/javaRestTest/java/org/elasticsearch/xpack/esql/heap_attack/HeapAttackIT.java b/test/external-modules/esql-heap-attack/src/javaRestTest/java/org/elasticsearch/xpack/esql/heap_attack/HeapAttackIT.java index 8c87ef5977114..ffa817ed09677 100644 --- a/test/external-modules/esql-heap-attack/src/javaRestTest/java/org/elasticsearch/xpack/esql/heap_attack/HeapAttackIT.java +++ b/test/external-modules/esql-heap-attack/src/javaRestTest/java/org/elasticsearch/xpack/esql/heap_attack/HeapAttackIT.java @@ -10,6 +10,7 @@ import org.apache.http.HttpHost; import org.apache.http.client.config.RequestConfig; import org.apache.http.util.EntityUtils; +import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; @@ -57,6 +58,7 @@ * Tests that run ESQL queries that have, in the past, used so much memory they * crash Elasticsearch. */ +@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/106262") public class HeapAttackIT extends ESRestTestCase { @ClassRule From 3dc500862c5638fb90068142eac6f6f69423ae1f Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 12 Mar 2024 15:46:04 -0400 Subject: [PATCH 149/248] ESQL: Support partially folding CASE (#106094) This adds support for folding the arms of a `CASE` statement. So `CASE(false, a, b)` becomes `b`. --- docs/changelog/106094.yaml | 5 + .../function/scalar/conditional/Case.java | 56 +++++- .../esql/optimizer/LogicalPlanOptimizer.java | 26 ++- .../scalar/conditional/CaseExtraTests.java | 169 ++++++++++++++++++ .../optimizer/LogicalPlanOptimizerTests.java | 11 ++ 5 files changed, 263 insertions(+), 4 deletions(-) create mode 100644 docs/changelog/106094.yaml create mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseExtraTests.java diff --git a/docs/changelog/106094.yaml b/docs/changelog/106094.yaml new file mode 100644 index 0000000000000..4341164222338 --- /dev/null +++ b/docs/changelog/106094.yaml @@ -0,0 +1,5 @@ +pr: 106094 +summary: "ESQL: Support partially folding CASE" +area: ES|QL +type: enhancement +issues: [] diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Case.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Case.java index 17e096005fc1f..66756ffa14c60 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Case.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Case.java @@ -88,7 +88,11 @@ public Case( for (int c = 0; c < conditionCount; c++) { conditions.add(new Condition(children().get(c * 2), children().get(c * 2 + 1))); } - elseValue = children().size() % 2 == 0 ? new Literal(source, null, NULL) : children().get(children().size() - 1); + elseValue = elseValueIsExplicit() ? children().get(children().size() - 1) : new Literal(source, null, NULL); + } + + private boolean elseValueIsExplicit() { + return children().size() % 2 == 1; } @Override @@ -175,7 +179,6 @@ public boolean foldable() { @Override public Object fold() { - // TODO can we partially fold? like CASE(false, foo, bar) -> bar for (Condition condition : conditions) { Boolean b = (Boolean) condition.condition.fold(); if (b != null && b) { @@ -185,6 +188,55 @@ public Object fold() { return elseValue.fold(); } + /** + * Fold the arms of {@code CASE} statements. + *
          + *
        1. + * Conditions that evaluate to {@code false} are removed so + * {@code EVAL c=CASE(false, foo, b, bar, bort)} becomes + * {@code EVAL c=CASE(b, bar, bort)}. + *
        2. + *
        3. + * Conditions that evaluate to {@code true} stop evaluation and + * return themselves so {@code EVAL c=CASE(true, foo, bar)} becomes + * {@code EVAL c=foo}. + *
        4. + *
        + * And those two combine so {@code EVAL c=CASE(false, foo, b, bar, true, bort, el)} becomes + * {@code EVAL c=CASE(b, bar, bort)}. + */ + public Expression partiallyFold() { + List newChildren = new ArrayList<>(children().size()); + boolean modified = false; + for (Condition condition : conditions) { + if (condition.condition.foldable() == false) { + newChildren.add(condition.condition); + newChildren.add(condition.value); + continue; + } + modified = true; + Boolean b = (Boolean) condition.condition.fold(); + if (b != null && b) { + newChildren.add(condition.value); + return finishPartialFold(newChildren); + } + } + if (modified == false) { + return this; + } + if (elseValueIsExplicit()) { + newChildren.add(elseValue); + } + return finishPartialFold(newChildren); + } + + private Expression finishPartialFold(List newChildren) { + if (newChildren.size() == 1) { + return newChildren.get(0); + } + return replaceChildren(newChildren); + } + @Override public ExpressionEvaluator.Factory toEvaluator(Function toEvaluator) { ElementType resultType = PlannerUtils.toElementType(dataType()); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java index 7a5e39fea8f95..59f0d46bf618a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java @@ -17,6 +17,7 @@ import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.Equals; import org.elasticsearch.xpack.esql.expression.SurrogateExpression; import org.elasticsearch.xpack.esql.expression.function.aggregate.Count; +import org.elasticsearch.xpack.esql.expression.function.scalar.conditional.Case; import org.elasticsearch.xpack.esql.expression.function.scalar.nulls.Coalesce; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.In; import org.elasticsearch.xpack.esql.plan.logical.Enrich; @@ -84,6 +85,7 @@ import static org.elasticsearch.xpack.ql.expression.Expressions.asAttributes; import static org.elasticsearch.xpack.ql.optimizer.OptimizerRules.PropagateEquals; import static org.elasticsearch.xpack.ql.optimizer.OptimizerRules.TransformDirection; +import static org.elasticsearch.xpack.ql.optimizer.OptimizerRules.TransformDirection.DOWN; public class LogicalPlanOptimizer extends ParameterizedRuleExecutor { @@ -120,6 +122,7 @@ protected static Batch operators() { new SplitInWithFoldableValue(), new PropagateEvalFoldables(), new ConstantFolding(), + new PartiallyFoldCase(), // boolean new BooleanSimplification(), new LiteralsOnTheRight(), @@ -1586,7 +1589,6 @@ private static LogicalPlan normalize(Aggregate aggregate, AttributeMap newChildren = new ArrayList<>(exp.children()); @@ -1606,4 +1607,25 @@ protected Expression nullify(Expression exp, Expression nullExp) { return Literal.of(exp, null); } } + + /** + * Fold the arms of {@code CASE} statements. + *
        {@code
        +     * EVAL c=CASE(true, foo, bar)
        +     * }
        + * becomes + *
        {@code
        +     * EVAL c=foo
        +     * }
        + */ + static class PartiallyFoldCase extends OptimizerRules.OptimizerExpressionRule { + PartiallyFoldCase() { + super(DOWN); + } + + @Override + protected Expression rule(Case c) { + return c.partiallyFold(); + } + } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseExtraTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseExtraTests.java new file mode 100644 index 0000000000000..19cc49c180802 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseExtraTests.java @@ -0,0 +1,169 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.conditional; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.ql.expression.Literal; +import org.elasticsearch.xpack.ql.tree.Source; +import org.elasticsearch.xpack.ql.type.DataTypes; + +import java.util.List; + +import static org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase.field; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.sameInstance; + +/** + * Extra tests for {@code CASE} that don't fit into the parameterized + * {@link CaseTests}. + */ +public class CaseExtraTests extends ESTestCase { + public void testElseValueExplicit() { + assertThat( + new Case( + Source.synthetic("case"), + field("first_cond", DataTypes.BOOLEAN), + List.of(field("v", DataTypes.LONG), field("e", DataTypes.LONG)) + ).children(), + equalTo(List.of(field("first_cond", DataTypes.BOOLEAN), field("v", DataTypes.LONG), field("e", DataTypes.LONG))) + ); + } + + public void testElseValueImplied() { + assertThat( + new Case(Source.synthetic("case"), field("first_cond", DataTypes.BOOLEAN), List.of(field("v", DataTypes.LONG))).children(), + equalTo(List.of(field("first_cond", DataTypes.BOOLEAN), field("v", DataTypes.LONG))) + ); + } + + public void testPartialFoldDropsFirstFalse() { + Case c = new Case( + Source.synthetic("case"), + new Literal(Source.EMPTY, false, DataTypes.BOOLEAN), + List.of(field("first", DataTypes.LONG), field("last_cond", DataTypes.BOOLEAN), field("last", DataTypes.LONG)) + ); + assertThat(c.foldable(), equalTo(false)); + assertThat( + c.partiallyFold(), + equalTo(new Case(Source.synthetic("case"), field("last_cond", DataTypes.BOOLEAN), List.of(field("last", DataTypes.LONG)))) + ); + } + + public void testPartialFoldNoop() { + Case c = new Case( + Source.synthetic("case"), + field("first_cond", DataTypes.BOOLEAN), + List.of(field("first", DataTypes.LONG), field("last", DataTypes.LONG)) + ); + assertThat(c.foldable(), equalTo(false)); + assertThat(c.partiallyFold(), sameInstance(c)); + } + + public void testPartialFoldFirst() { + Case c = new Case( + Source.synthetic("case"), + new Literal(Source.EMPTY, true, DataTypes.BOOLEAN), + List.of(field("first", DataTypes.LONG), field("last", DataTypes.LONG)) + ); + assertThat(c.foldable(), equalTo(false)); + assertThat(c.partiallyFold(), equalTo(field("first", DataTypes.LONG))); + } + + public void testPartialFoldFirstAfterKeepingUnknown() { + Case c = new Case( + Source.synthetic("case"), + field("keep_me_cond", DataTypes.BOOLEAN), + List.of( + field("keep_me", DataTypes.LONG), + new Literal(Source.EMPTY, true, DataTypes.BOOLEAN), + field("first", DataTypes.LONG), + field("last", DataTypes.LONG) + ) + ); + assertThat(c.foldable(), equalTo(false)); + assertThat( + c.partiallyFold(), + equalTo( + new Case( + Source.synthetic("case"), + field("keep_me_cond", DataTypes.BOOLEAN), + List.of(field("keep_me", DataTypes.LONG), field("first", DataTypes.LONG)) + ) + ) + ); + } + + public void testPartialFoldSecond() { + Case c = new Case( + Source.synthetic("case"), + new Literal(Source.EMPTY, false, DataTypes.BOOLEAN), + List.of( + field("first", DataTypes.LONG), + new Literal(Source.EMPTY, true, DataTypes.BOOLEAN), + field("second", DataTypes.LONG), + field("last", DataTypes.LONG) + ) + ); + assertThat(c.foldable(), equalTo(false)); + assertThat(c.partiallyFold(), equalTo(field("second", DataTypes.LONG))); + } + + public void testPartialFoldSecondAfterDroppingFalse() { + Case c = new Case( + Source.synthetic("case"), + new Literal(Source.EMPTY, false, DataTypes.BOOLEAN), + List.of( + field("first", DataTypes.LONG), + new Literal(Source.EMPTY, true, DataTypes.BOOLEAN), + field("second", DataTypes.LONG), + field("last", DataTypes.LONG) + ) + ); + assertThat(c.foldable(), equalTo(false)); + assertThat(c.partiallyFold(), equalTo(field("second", DataTypes.LONG))); + } + + public void testPartialFoldLast() { + Case c = new Case( + Source.synthetic("case"), + new Literal(Source.EMPTY, false, DataTypes.BOOLEAN), + List.of( + field("first", DataTypes.LONG), + new Literal(Source.EMPTY, false, DataTypes.BOOLEAN), + field("second", DataTypes.LONG), + field("last", DataTypes.LONG) + ) + ); + assertThat(c.foldable(), equalTo(false)); + assertThat(c.partiallyFold(), equalTo(field("last", DataTypes.LONG))); + } + + public void testPartialFoldLastAfterKeepingUnknown() { + Case c = new Case( + Source.synthetic("case"), + field("keep_me_cond", DataTypes.BOOLEAN), + List.of( + field("keep_me", DataTypes.LONG), + new Literal(Source.EMPTY, false, DataTypes.BOOLEAN), + field("first", DataTypes.LONG), + field("last", DataTypes.LONG) + ) + ); + assertThat(c.foldable(), equalTo(false)); + assertThat( + c.partiallyFold(), + equalTo( + new Case( + Source.synthetic("case"), + field("keep_me_cond", DataTypes.BOOLEAN), + List.of(field("keep_me", DataTypes.LONG), field("last", DataTypes.LONG)) + ) + ) + ); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java index adcb1f611a343..1ce383f2327ad 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java @@ -3405,6 +3405,17 @@ public void testPushdownWithOverwrittenName() { } } + public void testPartiallyFoldCase() { + var plan = optimizedPlan(""" + FROM test + | EVAL c = CASE(true, emp_no, salary) + """); + + var eval = as(plan, Eval.class); + var languages = as(Alias.unwrap(eval.expressions().get(0)), FieldAttribute.class); + assertThat(languages.name(), is("emp_no")); + } + private LogicalPlan optimizedPlan(String query) { return plan(query); } From 24df2215dfae734382cd7d6c19c00cbd3e458e38 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Tue, 12 Mar 2024 13:29:07 -0700 Subject: [PATCH 150/248] Make ES|QL tests compatible with stateless (#106136) Co-authored-by: Elastic Machine --- x-pack/plugin/esql/qa/server/mixed-cluster/build.gradle | 1 + x-pack/plugin/esql/qa/server/multi-node/build.gradle | 4 ++++ .../xpack/esql/qa/multi_node/EsqlClientYamlIT.java | 8 +------- .../xpack/esql/qa/rest/FieldExtractorTestCase.java | 6 ------ 4 files changed, 6 insertions(+), 13 deletions(-) diff --git a/x-pack/plugin/esql/qa/server/mixed-cluster/build.gradle b/x-pack/plugin/esql/qa/server/mixed-cluster/build.gradle index 51c4a0250a74d..09397710bb856 100644 --- a/x-pack/plugin/esql/qa/server/mixed-cluster/build.gradle +++ b/x-pack/plugin/esql/qa/server/mixed-cluster/build.gradle @@ -6,6 +6,7 @@ import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask apply plugin: 'elasticsearch.internal-java-rest-test' apply plugin: 'elasticsearch.internal-yaml-rest-test' +apply plugin: 'elasticsearch.internal-test-artifact' apply plugin: 'elasticsearch.bwc-test' restResources { diff --git a/x-pack/plugin/esql/qa/server/multi-node/build.gradle b/x-pack/plugin/esql/qa/server/multi-node/build.gradle index 2f26003cf7ce4..5fbc4c57b39b7 100644 --- a/x-pack/plugin/esql/qa/server/multi-node/build.gradle +++ b/x-pack/plugin/esql/qa/server/multi-node/build.gradle @@ -1,5 +1,8 @@ +import org.elasticsearch.gradle.util.GradleUtils + apply plugin: 'elasticsearch.internal-java-rest-test' apply plugin: 'elasticsearch.internal-yaml-rest-test' +apply plugin: 'elasticsearch.internal-test-artifact' dependencies { javaRestTestImplementation project(xpackModule('esql:qa:testFixtures')) @@ -7,6 +10,7 @@ dependencies { yamlRestTestImplementation project(xpackModule('esql:qa:server')) } +GradleUtils.extendSourceSet(project, "javaRestTest", "yamlRestTest") tasks.named('javaRestTest') { usesDefaultDistribution() diff --git a/x-pack/plugin/esql/qa/server/multi-node/src/yamlRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/EsqlClientYamlIT.java b/x-pack/plugin/esql/qa/server/multi-node/src/yamlRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/EsqlClientYamlIT.java index a90cce0a566e7..d3ddae16e8af1 100644 --- a/x-pack/plugin/esql/qa/server/multi-node/src/yamlRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/EsqlClientYamlIT.java +++ b/x-pack/plugin/esql/qa/server/multi-node/src/yamlRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/EsqlClientYamlIT.java @@ -10,7 +10,6 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import org.elasticsearch.test.cluster.ElasticsearchCluster; -import org.elasticsearch.test.cluster.local.distribution.DistributionType; import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; import org.elasticsearch.xpack.esql.qa.rest.EsqlSpecTestCase; @@ -20,12 +19,7 @@ public class EsqlClientYamlIT extends ESClientYamlSuiteTestCase { @ClassRule - public static ElasticsearchCluster cluster = ElasticsearchCluster.local() - .distribution(DistributionType.DEFAULT) - .nodes(2) - .setting("xpack.security.enabled", "false") - .setting("xpack.license.self_generated.type", "trial") - .build(); + public static ElasticsearchCluster cluster = Clusters.testCluster(); @Override protected String getTestRestCluster() { diff --git a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/FieldExtractorTestCase.java b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/FieldExtractorTestCase.java index 39c21651a7e02..d107f8a147fd6 100644 --- a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/FieldExtractorTestCase.java +++ b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/FieldExtractorTestCase.java @@ -1440,12 +1440,6 @@ private static void index(String name, String... docs) throws IOException { private static void createIndex(String name, CheckedConsumer mapping) throws IOException { Request request = new Request("PUT", "/" + name); XContentBuilder index = JsonXContent.contentBuilder().prettyPrint().startObject(); - index.startObject("settings"); - { - index.field("index.number_of_replicas", 0); - index.field("index.number_of_shards", 1); - } - index.endObject(); index.startObject("mappings"); mapping.accept(index); index.endObject(); From 8ebf5bf73d1b475f003c2901b85db4d7b57848aa Mon Sep 17 00:00:00 2001 From: Dianna Hohensee Date: Tue, 12 Mar 2024 16:56:47 -0400 Subject: [PATCH 151/248] Add stateless cluster state thread pool name (#106257) Relates ES-6834 --- .../repositories/blobstore/BlobStoreRepository.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 52cfa2fd5275f..7bc35de44ff9a 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -181,6 +181,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp public static final String STATELESS_SHARD_READ_THREAD_NAME = "stateless_shard_read"; public static final String STATELESS_TRANSLOG_THREAD_NAME = "stateless_translog"; public static final String STATELESS_SHARD_WRITE_THREAD_NAME = "stateless_shard_write"; + public static final String STATELESS_CLUSTER_STATE_READ_WRITE_THREAD_NAME = "stateless_cluster_state_read_write"; public static final String SNAPSHOT_PREFIX = "snap-"; @@ -2001,7 +2002,8 @@ protected void assertSnapshotOrGenericThread() { ThreadPool.Names.GENERIC, STATELESS_SHARD_READ_THREAD_NAME, STATELESS_TRANSLOG_THREAD_NAME, - STATELESS_SHARD_WRITE_THREAD_NAME + STATELESS_SHARD_WRITE_THREAD_NAME, + STATELESS_CLUSTER_STATE_READ_WRITE_THREAD_NAME ); } From 22d015b550108fbd80ac92e7ec8851af0b0d8bec Mon Sep 17 00:00:00 2001 From: Dianna Hohensee Date: Tue, 12 Mar 2024 17:45:56 -0400 Subject: [PATCH 152/248] Blob repository related comment improvements (#104478) --- .../elasticsearch/common/blobstore/BlobContainer.java | 2 ++ .../org/elasticsearch/common/blobstore/BlobStore.java | 4 ++++ .../blobstore/support/AbstractBlobContainer.java | 2 +- .../common/blobstore/support/FilterBlobContainer.java | 10 ++++++++++ .../repositories/RepositoriesService.java | 10 +++++++++- .../repositories/blobstore/BlobStoreRepository.java | 2 +- .../blobcache/shared/SharedBlobCacheService.java | 3 +++ 7 files changed, 30 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/common/blobstore/BlobContainer.java b/server/src/main/java/org/elasticsearch/common/blobstore/BlobContainer.java index 77c225f5d94cb..ae48de05a620b 100644 --- a/server/src/main/java/org/elasticsearch/common/blobstore/BlobContainer.java +++ b/server/src/main/java/org/elasticsearch/common/blobstore/BlobContainer.java @@ -25,6 +25,8 @@ /** * An interface for managing a repository of blob entries, where each blob entry is just a named group of bytes. + * + * A BlobStore creates BlobContainers. */ public interface BlobContainer { diff --git a/server/src/main/java/org/elasticsearch/common/blobstore/BlobStore.java b/server/src/main/java/org/elasticsearch/common/blobstore/BlobStore.java index 8fd04906b1803..4db5a7dd083da 100644 --- a/server/src/main/java/org/elasticsearch/common/blobstore/BlobStore.java +++ b/server/src/main/java/org/elasticsearch/common/blobstore/BlobStore.java @@ -15,6 +15,10 @@ /** * An interface for storing blobs. + * + * Creates a {@link BlobContainer} for each given {@link BlobPath} on demand in {@link #blobContainer(BlobPath)}. + * In implementation/practice, BlobStore often returns a BlobContainer seeded with a reference to the BlobStore. + * {@link org.elasticsearch.repositories.blobstore.BlobStoreRepository} holds and manages a BlobStore. */ public interface BlobStore extends Closeable { diff --git a/server/src/main/java/org/elasticsearch/common/blobstore/support/AbstractBlobContainer.java b/server/src/main/java/org/elasticsearch/common/blobstore/support/AbstractBlobContainer.java index ece00bf970037..dd60fc27c814c 100644 --- a/server/src/main/java/org/elasticsearch/common/blobstore/support/AbstractBlobContainer.java +++ b/server/src/main/java/org/elasticsearch/common/blobstore/support/AbstractBlobContainer.java @@ -13,7 +13,7 @@ import org.elasticsearch.common.blobstore.BlobPath; /** - * A base abstract blob container that implements higher level container methods. + * A base abstract blob container that adds some methods implementations that are often identical across many subclasses. */ public abstract class AbstractBlobContainer implements BlobContainer { diff --git a/server/src/main/java/org/elasticsearch/common/blobstore/support/FilterBlobContainer.java b/server/src/main/java/org/elasticsearch/common/blobstore/support/FilterBlobContainer.java index d231e5046e1c8..571ef6c434fdc 100644 --- a/server/src/main/java/org/elasticsearch/common/blobstore/support/FilterBlobContainer.java +++ b/server/src/main/java/org/elasticsearch/common/blobstore/support/FilterBlobContainer.java @@ -25,6 +25,12 @@ import java.util.Objects; import java.util.stream.Collectors; +/** + * A blob container that by default delegates all methods to an internal BlobContainer. Implementations must define {@link #wrapChild} so + * that the abstraction is complete: so that the internal BlobContainer instance cannot leak out of this wrapper. + * + * Inheritors can safely modify needed methods while continuing to have access to a complete BlobContainer implementation beneath. + */ public abstract class FilterBlobContainer implements BlobContainer { private final BlobContainer delegate; @@ -33,6 +39,10 @@ public FilterBlobContainer(BlobContainer delegate) { this.delegate = Objects.requireNonNull(delegate); } + /** + * Wraps up any instances of the internal BlobContainer type in another BlobContainer type (presumably the implementation's type). + * Ensures that the internal {@link #delegate} type never leaks out of the BlobContainer wrapper type. + */ protected abstract BlobContainer wrapChild(BlobContainer child); @Override diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java b/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java index 095f70a3e5966..f7a2a605a18bd 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java @@ -69,7 +69,15 @@ import static org.elasticsearch.snapshots.SearchableSnapshotsSettings.SEARCHABLE_SNAPSHOTS_REPOSITORY_UUID_SETTING_KEY; /** - * Service responsible for maintaining and providing access to snapshot repositories on nodes. + * Service responsible for maintaining and providing access to multiple repositories. + * + * The elected master creates new repositories on request and persists the {@link RepositoryMetadata} in the cluster state. The cluster + * state update then goes out to the rest of the cluster nodes so that all nodes know how to access the new repository. This class contains + * factory information to create new repositories, and provides access to and maintains the lifecycle of repositories. New nodes can easily + * find all the repositories via the cluster state after joining a cluster. + * + * {@link #repository(String)} can be used to fetch a repository. {@link #createRepository(RepositoryMetadata)} does the heavy lifting of + * creation. {@link #applyClusterState(ClusterChangedEvent)} handles adding and removing repositories per cluster state updates. */ public class RepositoriesService extends AbstractLifecycleComponent implements ClusterStateApplier { diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 7bc35de44ff9a..80d6729c6812b 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -165,7 +165,7 @@ /** * BlobStore - based implementation of Snapshot Repository *

        - * This repository works with any {@link BlobStore} implementation. The blobStore could be (and preferred) lazy initialized in + * This repository works with any {@link BlobStore} implementation. The blobStore could be (and is preferably) lazily initialized in * {@link #createBlobStore()}. *

        * For in depth documentation on how exactly implementations of this class interact with the snapshot functionality please refer to the diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java index d4c7c04c5b26e..5b767f2461f6b 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java @@ -62,6 +62,9 @@ import java.util.function.Predicate; import java.util.stream.Collectors; +/** + * A caching layer on a local node to minimize network roundtrips to the remote blob store. + */ public class SharedBlobCacheService implements Releasable { private static final String SHARED_CACHE_SETTINGS_PREFIX = "xpack.searchable.snapshot.shared_cache."; From 20f8e2f90aaf39ecb133c72112ac70211512de05 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Tue, 12 Mar 2024 14:55:10 -0700 Subject: [PATCH 153/248] Chain mrjar source sets (#106092) When building MR jars we may have code for several java versions. For example, we could have main21 and main22. This commit adjust the sourcesets to chain together, so that eg main22 can utilize code from main21 (which in turn can already use code from main). --- .../gradle/internal/MrjarPlugin.java | 28 ++++++++++++++++--- 1 file changed, 24 insertions(+), 4 deletions(-) diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/MrjarPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/MrjarPlugin.java index 8c5d671e00fe7..46fa38a44f564 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/MrjarPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/MrjarPlugin.java @@ -30,6 +30,9 @@ import java.io.UncheckedIOException; import java.nio.file.Files; import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; import java.util.Map; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -56,24 +59,41 @@ public void apply(Project project) { var javaExtension = project.getExtensions().getByType(JavaPluginExtension.class); var srcDir = project.getProjectDir().toPath().resolve("src"); + List mainVersions = new ArrayList<>(); try (var subdirStream = Files.list(srcDir)) { for (Path sourceset : subdirStream.toList()) { assert Files.isDirectory(sourceset); String sourcesetName = sourceset.getFileName().toString(); Matcher sourcesetMatcher = MRJAR_SOURCESET_PATTERN.matcher(sourcesetName); if (sourcesetMatcher.matches()) { - int javaVersion = Integer.parseInt(sourcesetMatcher.group(1)); - addMrjarSourceset(project, javaExtension, sourcesetName, javaVersion); + mainVersions.add(Integer.parseInt(sourcesetMatcher.group(1))); } } } catch (IOException e) { throw new UncheckedIOException(e); } + + Collections.sort(mainVersions); + List parentSourceSets = new ArrayList<>(); + parentSourceSets.add(SourceSet.MAIN_SOURCE_SET_NAME); + for (int javaVersion : mainVersions) { + String sourcesetName = "main" + javaVersion; + addMrjarSourceset(project, javaExtension, sourcesetName, parentSourceSets, javaVersion); + parentSourceSets.add(sourcesetName); + } } - private void addMrjarSourceset(Project project, JavaPluginExtension javaExtension, String sourcesetName, int javaVersion) { + private void addMrjarSourceset( + Project project, + JavaPluginExtension javaExtension, + String sourcesetName, + List parentSourceSets, + int javaVersion + ) { SourceSet sourceSet = javaExtension.getSourceSets().maybeCreate(sourcesetName); - GradleUtils.extendSourceSet(project, SourceSet.MAIN_SOURCE_SET_NAME, sourcesetName); + for (String parentSourceSetName : parentSourceSets) { + GradleUtils.extendSourceSet(project, parentSourceSetName, sourcesetName); + } var jarTask = project.getTasks().withType(Jar.class).named(JavaPlugin.JAR_TASK_NAME); jarTask.configure(task -> { From 026fab92b381c9490b3906077f2649931bfe52ac Mon Sep 17 00:00:00 2001 From: Volodymyr Krasnikov <129072588+volodk85@users.noreply.github.com> Date: Tue, 12 Mar 2024 15:23:22 -0700 Subject: [PATCH 154/248] debugging, decrease log level in IT test (#106269) --- .../recovery/SnapshotBasedIndexRecoveryIT.java | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/x-pack/plugin/snapshot-based-recoveries/src/internalClusterTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/SnapshotBasedIndexRecoveryIT.java b/x-pack/plugin/snapshot-based-recoveries/src/internalClusterTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/SnapshotBasedIndexRecoveryIT.java index 8e506d6ac8b51..1465911490f61 100644 --- a/x-pack/plugin/snapshot-based-recoveries/src/internalClusterTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/SnapshotBasedIndexRecoveryIT.java +++ b/x-pack/plugin/snapshot-based-recoveries/src/internalClusterTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/SnapshotBasedIndexRecoveryIT.java @@ -70,6 +70,7 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.junit.annotations.TestIssueLogging; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.ThreadPool; @@ -936,6 +937,10 @@ public void testDisabledSnapshotBasedRecoveryUsesSourceFiles() throws Exception } } + @TestIssueLogging( + issueUrl = "https://github.com/elastic/elasticsearch/issues/87568", + value = "org.elasticsearch.indices.recovery:DEBUG" + ) public void testRecoveryConcurrentlyWithIndexing() throws Exception { internalCluster().startDataOnlyNode(); String indexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); From ad47ffbb2d5029ddba2305084d4d9df6df13ef6d Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Tue, 12 Mar 2024 15:41:09 -0700 Subject: [PATCH 155/248] Mute SearchTransportTelemetryTests.testSearchTransportMetricsQueryThenFetch see https://github.com/elastic/elasticsearch/issues/104184 --- .../search/TelemetryMetrics/SearchTransportTelemetryTests.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/test/java/org/elasticsearch/search/TelemetryMetrics/SearchTransportTelemetryTests.java b/server/src/test/java/org/elasticsearch/search/TelemetryMetrics/SearchTransportTelemetryTests.java index 93712880f2ce4..0a9b498bc0562 100644 --- a/server/src/test/java/org/elasticsearch/search/TelemetryMetrics/SearchTransportTelemetryTests.java +++ b/server/src/test/java/org/elasticsearch/search/TelemetryMetrics/SearchTransportTelemetryTests.java @@ -81,6 +81,7 @@ public void testSearchTransportMetricsDfsQueryThenFetch() throws InterruptedExce resetMeter(); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103810") public void testSearchTransportMetricsQueryThenFetch() throws InterruptedException { assertSearchHitsWithoutFailures( client().prepareSearch(indexName).setSearchType(SearchType.QUERY_THEN_FETCH).setQuery(simpleQueryStringQuery("doc1")), From 4e09a86e7100a3aa6e26e450193c66fb42ecf3d8 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Tue, 12 Mar 2024 15:47:11 -0700 Subject: [PATCH 156/248] Mute testProfile (#106275) Tracked at #106273 --- .../elasticsearch/xpack/esql/action/CrossClustersQueryIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueryIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueryIT.java index c9ee644040a43..ca93f8d090996 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueryIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueryIT.java @@ -121,6 +121,7 @@ public void testMetadataIndex() { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/106273") public void testProfile() { assumeTrue("pragmas only enabled on snapshot builds", Build.current().isSnapshot()); final int localOnlyProfiles; From 99719f2c8254e79daa182ce02ffde9c7f27baeba Mon Sep 17 00:00:00 2001 From: Dianna Hohensee Date: Tue, 12 Mar 2024 19:23:15 -0400 Subject: [PATCH 157/248] Shorten cluster state thread pool name (#106274) The thread pool name is used to report metrics, and MetricNameValidator enforces a max of 30 characters. --- .../repositories/blobstore/BlobStoreRepository.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 80d6729c6812b..41e849b4d2ebd 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -181,7 +181,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp public static final String STATELESS_SHARD_READ_THREAD_NAME = "stateless_shard_read"; public static final String STATELESS_TRANSLOG_THREAD_NAME = "stateless_translog"; public static final String STATELESS_SHARD_WRITE_THREAD_NAME = "stateless_shard_write"; - public static final String STATELESS_CLUSTER_STATE_READ_WRITE_THREAD_NAME = "stateless_cluster_state_read_write"; + public static final String STATELESS_CLUSTER_STATE_READ_WRITE_THREAD_NAME = "stateless_cluster_state"; public static final String SNAPSHOT_PREFIX = "snap-"; From d471ccb5bb4e5226f843ae688b0b2b205a842131 Mon Sep 17 00:00:00 2001 From: Panagiotis Bailis Date: Wed, 13 Mar 2024 09:24:51 +0200 Subject: [PATCH 158/248] Adding support for hex-encoded byte vectors on knn-search (#105393) --- docs/changelog/105393.yaml | 5 + docs/reference/query-dsl/knn-query.asciidoc | 4 +- docs/reference/rest-api/common-parms.asciidoc | 2 +- docs/reference/search/knn-search.asciidoc | 2 +- .../search-your-data/knn-search.asciidoc | 21 ++ ...70_knn_search_hex_encoded_byte_vectors.yml | 163 ++++++++++ ...175_knn_query_hex_encoded_byte_vectors.yml | 162 ++++++++++ .../org/elasticsearch/TransportVersions.java | 1 + .../common/io/stream/StreamInput.java | 14 + .../common/io/stream/StreamOutput.java | 13 + .../vectors/DenseVectorFieldMapper.java | 286 ++++++++++++------ .../search/retriever/KnnRetrieverBuilder.java | 10 +- .../search/vectors/ExactKnnQueryBuilder.java | 31 +- .../vectors/KnnScoreDocQueryBuilder.java | 30 +- .../search/vectors/KnnSearchBuilder.java | 76 +++-- .../search/vectors/KnnVectorQueryBuilder.java | 72 +++-- .../search/vectors/VectorData.java | 168 ++++++++++ .../vectors/DenseVectorFieldTypeTests.java | 7 +- ...AbstractKnnVectorQueryBuilderTestCase.java | 53 ++-- .../KnnByteVectorQueryBuilderTests.java | 9 + ...a => KnnFloatVectorQueryBuilderTests.java} | 11 +- .../search/vectors/KnnSearchBuilderTests.java | 4 +- .../search/vectors/VectorDataTests.java | 199 ++++++++++++ .../AbstractQueryVectorBuilderTestCase.java | 2 +- 24 files changed, 1136 insertions(+), 209 deletions(-) create mode 100644 docs/changelog/105393.yaml create mode 100644 rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/170_knn_search_hex_encoded_byte_vectors.yml create mode 100644 rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/175_knn_query_hex_encoded_byte_vectors.yml create mode 100644 server/src/main/java/org/elasticsearch/search/vectors/VectorData.java rename server/src/test/java/org/elasticsearch/search/vectors/{KnnVectorQueryBuilderTests.java => KnnFloatVectorQueryBuilderTests.java} (56%) create mode 100644 server/src/test/java/org/elasticsearch/search/vectors/VectorDataTests.java diff --git a/docs/changelog/105393.yaml b/docs/changelog/105393.yaml new file mode 100644 index 0000000000000..4a4cc299b7bd7 --- /dev/null +++ b/docs/changelog/105393.yaml @@ -0,0 +1,5 @@ +pr: 105393 +summary: Adding support for hex-encoded byte vectors on knn-search +area: Vector Search +type: feature +issues: [] diff --git a/docs/reference/query-dsl/knn-query.asciidoc b/docs/reference/query-dsl/knn-query.asciidoc index e9aeea68c06f7..c11782f524950 100644 --- a/docs/reference/query-dsl/knn-query.asciidoc +++ b/docs/reference/query-dsl/knn-query.asciidoc @@ -87,8 +87,8 @@ the top `size` results. `query_vector`:: + -- -(Required, array of floats) Query vector. Must have the same number of dimensions -as the vector field you are searching against. +(Required, array of floats or string) Query vector. Must have the same number of dimensions +as the vector field you are searching against. Must be either an array of floats or a hex-encoded byte vector. -- `num_candidates`:: diff --git a/docs/reference/rest-api/common-parms.asciidoc b/docs/reference/rest-api/common-parms.asciidoc index 6757b6be24207..062f832b6f79d 100644 --- a/docs/reference/rest-api/common-parms.asciidoc +++ b/docs/reference/rest-api/common-parms.asciidoc @@ -597,7 +597,7 @@ end::knn-num-candidates[] tag::knn-query-vector[] Query vector. Must have the same number of dimensions as the vector field you -are searching against. +are searching against. Must be either an array of floats or a hex-encoded byte vector. end::knn-query-vector[] tag::knn-similarity[] diff --git a/docs/reference/search/knn-search.asciidoc b/docs/reference/search/knn-search.asciidoc index 136b53388baf9..7947c688a807c 100644 --- a/docs/reference/search/knn-search.asciidoc +++ b/docs/reference/search/knn-search.asciidoc @@ -121,7 +121,7 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=knn-k] include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=knn-num-candidates] `query_vector`:: -(Required, array of floats) +(Required, array of floats or string) include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=knn-query-vector] ==== diff --git a/docs/reference/search/search-your-data/knn-search.asciidoc b/docs/reference/search/search-your-data/knn-search.asciidoc index ab65b834c0ce7..030c10a91d005 100644 --- a/docs/reference/search/search-your-data/knn-search.asciidoc +++ b/docs/reference/search/search-your-data/knn-search.asciidoc @@ -242,6 +242,27 @@ POST byte-image-index/_search // TEST[s/"k": 10/"k": 3/] // TEST[s/"num_candidates": 100/"num_candidates": 3/] + +_Note_: In addition to the standard byte array, one can also provide a hex-encoded string value +for the `query_vector` param. As an example, the search request above can also be expressed as follows, +which would yield the same results +[source,console] +---- +POST byte-image-index/_search +{ + "knn": { + "field": "byte-image-vector", + "query_vector": "fb09", + "k": 10, + "num_candidates": 100 + }, + "fields": [ "title" ] +} +---- +// TEST[continued] +// TEST[s/"k": 10/"k": 3/] +// TEST[s/"num_candidates": 100/"num_candidates": 3/] + [discrete] [[knn-search-quantized-example]] ==== Byte quantized kNN search diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/170_knn_search_hex_encoded_byte_vectors.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/170_knn_search_hex_encoded_byte_vectors.yml new file mode 100644 index 0000000000000..71f65220eba1e --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/170_knn_search_hex_encoded_byte_vectors.yml @@ -0,0 +1,163 @@ +setup: + - skip: + version: ' - 8.13.99' + reason: 'hex encoding for byte vectors was added in 8.14' + + - do: + indices.create: + index: knn_hex_vector_index + body: + settings: + number_of_shards: 1 + mappings: + dynamic: false + properties: + my_vector_byte: + type: dense_vector + dims: 3 + index : true + similarity : l2_norm + element_type: byte + my_vector_float: + type: dense_vector + dims: 3 + index: true + element_type: float + similarity : l2_norm + + # [-128, 127, 10] - is encoded as '807f0a' + - do: + index: + index: knn_hex_vector_index + id: "1" + body: + my_vector_byte: "807f0a" + + + # [0, 1, 0] - is encoded as '000100' + - do: + index: + index: knn_hex_vector_index + id: "2" + body: + my_vector_byte: "000100" + + # [64, -10, -30] - is encoded as '40f6e2' + - do: + index: + index: knn_hex_vector_index + id: "3" + body: + my_vector_byte: "40f6e2" + + - do: + index: + index: knn_hex_vector_index + id: "4" + body: + my_vector_float: [10.5, -10, 1024] + + - do: + indices.refresh: {} + +--- +"Fail to index hex-encoded vector on float field": + + # [-128, 127, 10] - is encoded as '807f0a' + - do: + catch: /Failed to parse object./ + index: + index: knn_hex_vector_index + id: "5" + body: + my_vector_float: "807f0a" + +--- +"Knn search with hex string for float field" : + # [64, 10, -30] - is encoded as '400ae2' + # this will be properly decoded but only because: + # (i) the provided input is compatible as the values are within [Byte.MIN_VALUE, Byte.MAX_VALUE] range + # (ii) we do not differentiate between byte and float fields when initially parsing a query even for hex + # (iii) we support expansion from byte to float + + - do: + search: + index: knn_hex_vector_index + body: + size: 3 + knn: + field: my_vector_float + query_vector: "400ae2" + num_candidates: 100 + k: 10 + + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "4" } + +--- +"Knn search with hex string for byte field" : + # [64, 10, -30] - is encoded as '400ae2' + - do: + search: + index: knn_hex_vector_index + body: + size: 3 + knn: + field: my_vector_byte + query_vector: "400ae2" + num_candidates: 100 + k: 10 + + - match: { hits.total.value: 3 } + - match: { hits.hits.0._id: "3" } + - match: { hits.hits.1._id: "2" } + - match: { hits.hits.2._id: "1" } + +--- +"Knn search with hex string for byte field - dimensions mismatch" : + # [64, 10, -30, 10] - is encoded as '400ae20a' + - do: + catch: /the query vector has a different dimension \[4\] than the index vectors \[3\]/ + search: + index: knn_hex_vector_index + body: + size: 3 + knn: + field: my_vector_byte + query_vector: "400ae20a" + num_candidates: 100 + k: 10 + + +--- +"Knn search with hex string for byte field - cannot decode string" : + # '40af20a' is garbage :) + - do: + catch: /failed to parse field \[query_vector\]/ + search: + index: knn_hex_vector_index + body: + size: 3 + knn: + field: my_vector_byte + query_vector: "40af20a" + num_candidates: 100 + k: 10 + +--- +"Knn search with standard byte vector matching against hex-encoded indexed docs" : + - do: + search: + index: knn_hex_vector_index + body: + size: 3 + knn: + field: my_vector_byte + query_vector: [64, 10, -30] + num_candidates: 100 + k: 10 + + - match: { hits.total.value: 3 } + - match: { hits.hits.0._id: "3" } + - match: { hits.hits.1._id: "2" } + - match: { hits.hits.2._id: "1" } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/175_knn_query_hex_encoded_byte_vectors.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/175_knn_query_hex_encoded_byte_vectors.yml new file mode 100644 index 0000000000000..9f850400a09cd --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/175_knn_query_hex_encoded_byte_vectors.yml @@ -0,0 +1,162 @@ +setup: + - skip: + version: ' - 8.13.99' + reason: 'hex encoding for byte vectors was added in 8.14' + + - do: + indices.create: + index: knn_hex_vector_index + body: + settings: + number_of_shards: 1 + mappings: + dynamic: false + properties: + my_vector_byte: + type: dense_vector + dims: 3 + index : true + similarity : l2_norm + element_type: byte + my_vector_float: + type: dense_vector + dims: 3 + index: true + element_type: float + similarity : l2_norm + + # [-128, 127, 10] - is encoded as '807f0a' + - do: + index: + index: knn_hex_vector_index + id: "1" + body: + my_vector_byte: "807f0a" + + + # [0, 1, 0] - is encoded as '000100' + - do: + index: + index: knn_hex_vector_index + id: "2" + body: + my_vector_byte: "000100" + + # [64, -10, -30] - is encoded as '40f6e2' + - do: + index: + index: knn_hex_vector_index + id: "3" + body: + my_vector_byte: "40f6e2" + + - do: + index: + index: knn_hex_vector_index + id: "4" + body: + my_vector_float: [10.5, -10, 1024] + + - do: + indices.refresh: {} + +--- +"Fail to index hex-encoded vector on float field": + + # [-128, 127, 10] - is encoded as '807f0a' + - do: + catch: /Failed to parse object./ + index: + index: knn_hex_vector_index + id: "5" + body: + my_vector_float: "807f0a" + +--- +"Knn query with hex string for float field" : + # [64, 10, -30] - is encoded as '400ae2' + # this will be properly decoded but only because: + # (i) the provided input is compatible as the values are within [Byte.MIN_VALUE, Byte.MAX_VALUE] range + # (ii) we do not differentiate between byte and float fields when initially parsing a query even for hex + # (iii) we support expansion from byte to float + + - do: + search: + index: knn_hex_vector_index + body: + size: 3 + query: + knn: + field: my_vector_float + query_vector: "400ae2" + num_candidates: 100 + + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "4" } + +--- +"Knn query with hex string for byte field" : + # [64, 10, -30] - is encoded as '400ae2' + - do: + search: + index: knn_hex_vector_index + body: + size: 3 + query: + knn: + field: my_vector_byte + query_vector: "400ae2" + num_candidates: 100 + + - match: { hits.total.value: 3 } + - match: { hits.hits.0._id: "3" } + - match: { hits.hits.1._id: "2" } + - match: { hits.hits.2._id: "1" } + +--- +"Knn query with hex string for byte field - dimensions mismatch" : + # [64, 10, -30, 10] - is encoded as '400ae20a' + - do: + catch: /the query vector has a different dimension \[4\] than the index vectors \[3\]/ + search: + index: knn_hex_vector_index + body: + size: 3 + query: + knn: + field: my_vector_byte + query_vector: "400ae20a" + num_candidates: 100 + +--- +"Knn query with hex string for byte field - cannot decode string" : + # '40af20a' is garbage :) + - do: + catch: /failed to parse field \[query_vector\]/ + search: + index: knn_hex_vector_index + body: + size: 3 + query: + knn: + field: my_vector_byte + query_vector: "40af20a" + num_candidates: 100 + +--- +"Knn query with standard byte vector matching against hex-encoded indexed docs" : + - do: + search: + index: knn_hex_vector_index + body: + size: 3 + query: + knn: + field: my_vector_byte + query_vector: [64, 10, -30] + num_candidates: 100 + + - match: { hits.total.value: 3 } + - match: { hits.hits.0._id: "3" } + - match: { hits.hits.1._id: "2" } + - match: { hits.hits.2._id: "1" } diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 6ac2c24739805..a83b0ea0c90e5 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -143,6 +143,7 @@ static TransportVersion def(int id) { public static final TransportVersion ADD_DATA_STREAM_GLOBAL_RETENTION = def(8_603_00_0); public static final TransportVersion ALLOCATION_STATS = def(8_604_00_0); public static final TransportVersion ESQL_EXTENDED_ENRICH_TYPES = def(8_605_00_0); + public static final TransportVersion KNN_EXPLICIT_BYTE_QUERY_VECTOR_PARSING = def(8_606_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java b/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java index 7281616a8d25f..c4952b8cae51d 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java @@ -693,6 +693,20 @@ public byte[] readOptionalByteArray() throws IOException { return null; } + /** + * Reads an optional float array. It's effectively the same as readFloatArray, except + * it supports null. + * @return a float array or null + * @throws IOException + */ + @Nullable + public float[] readOptionalFloatArray() throws IOException { + if (readBoolean()) { + return readFloatArray(); + } + return null; + } + /** * Same as {@link #readMap(Writeable.Reader, Writeable.Reader)} but always reading string keys. */ diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java b/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java index 69a5135215eba..33fb000c1bca2 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java @@ -534,6 +534,19 @@ public void writeOptionalByteArray(@Nullable byte[] array) throws IOException { } } + /** + * Writes a float array, for null arrays it writes false. + * @param array an array or null + */ + public void writeOptionalFloatArray(@Nullable float[] array) throws IOException { + if (array == null) { + writeBoolean(false); + } else { + writeBoolean(true); + writeFloatArray(array); + } + } + public void writeGenericMap(@Nullable Map map) throws IOException { writeGenericValue(map); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java index 47efa0ca49771..22b8549e14969 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java @@ -43,6 +43,7 @@ import org.apache.lucene.search.join.BitSetProducer; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.VectorUtil; +import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; @@ -69,6 +70,7 @@ import org.elasticsearch.search.vectors.ESDiversifyingChildrenFloatKnnVectorQuery; import org.elasticsearch.search.vectors.ESKnnByteVectorQuery; import org.elasticsearch.search.vectors.ESKnnFloatVectorQuery; +import org.elasticsearch.search.vectors.VectorData; import org.elasticsearch.search.vectors.VectorSimilarityQuery; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; @@ -80,6 +82,7 @@ import java.nio.ByteOrder; import java.time.ZoneId; import java.util.Arrays; +import java.util.HexFormat; import java.util.Locale; import java.util.Map; import java.util.Objects; @@ -88,6 +91,7 @@ import java.util.function.Supplier; import java.util.stream.Stream; +import static org.elasticsearch.common.Strings.format; import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; /** @@ -338,11 +342,16 @@ void checkVectorMagnitude( } @Override - public void parseKnnVectorAndIndex(DocumentParserContext context, DenseVectorFieldMapper fieldMapper) throws IOException { + public double computeDotProduct(VectorData vectorData) { + return VectorUtil.dotProduct(vectorData.asByteVector(), vectorData.asByteVector()); + } + + private VectorData parseVectorArray(DocumentParserContext context, DenseVectorFieldMapper fieldMapper) throws IOException { int index = 0; byte[] vector = new byte[fieldMapper.fieldType().dims]; float squaredMagnitude = 0; - for (Token token = context.parser().nextToken(); token != Token.END_ARRAY; token = context.parser().nextToken()) { + for (XContentParser.Token token = context.parser().nextToken(); token != Token.END_ARRAY; token = context.parser() + .nextToken()) { fieldMapper.checkDimensionExceeded(index, context); ensureExpectedToken(Token.VALUE_NUMBER, token, context.parser()); final int value; @@ -383,44 +392,49 @@ public void parseKnnVectorAndIndex(DocumentParserContext context, DenseVectorFie } fieldMapper.checkDimensionMatches(index, context); checkVectorMagnitude(fieldMapper.fieldType().similarity, errorByteElementsAppender(vector), squaredMagnitude); + return VectorData.fromBytes(vector); + } + + private VectorData parseHexEncodedVector(DocumentParserContext context, DenseVectorFieldMapper fieldMapper) throws IOException { + byte[] decodedVector = HexFormat.of().parseHex(context.parser().text()); + fieldMapper.checkDimensionMatches(decodedVector.length, context); + VectorData vectorData = VectorData.fromBytes(decodedVector); + double squaredMagnitude = computeDotProduct(vectorData); + checkVectorMagnitude( + fieldMapper.fieldType().similarity, + errorByteElementsAppender(decodedVector), + (float) squaredMagnitude + ); + return vectorData; + } + + @Override + VectorData parseKnnVector(DocumentParserContext context, DenseVectorFieldMapper fieldMapper) throws IOException { + XContentParser.Token token = context.parser().currentToken(); + return switch (token) { + case START_ARRAY -> parseVectorArray(context, fieldMapper); + case VALUE_STRING -> parseHexEncodedVector(context, fieldMapper); + default -> throw new ParsingException( + context.parser().getTokenLocation(), + format("Unsupported type [%s] for provided value [%s]", token, context.parser().text()) + ); + }; + } + + @Override + public void parseKnnVectorAndIndex(DocumentParserContext context, DenseVectorFieldMapper fieldMapper) throws IOException { + VectorData vectorData = parseKnnVector(context, fieldMapper); Field field = createKnnVectorField( fieldMapper.fieldType().name(), - vector, + vectorData.asByteVector(), fieldMapper.fieldType().similarity.vectorSimilarityFunction(fieldMapper.indexCreatedVersion, this) ); context.doc().addWithKey(fieldMapper.fieldType().name(), field); } @Override - double parseKnnVectorToByteBuffer(DocumentParserContext context, DenseVectorFieldMapper fieldMapper, ByteBuffer byteBuffer) - throws IOException { - double dotProduct = 0f; - int index = 0; - for (Token token = context.parser().nextToken(); token != Token.END_ARRAY; token = context.parser().nextToken()) { - fieldMapper.checkDimensionExceeded(index, context); - ensureExpectedToken(Token.VALUE_NUMBER, token, context.parser()); - int value = context.parser().intValue(true); - if (value < Byte.MIN_VALUE || value > Byte.MAX_VALUE) { - throw new IllegalArgumentException( - "element_type [" - + this - + "] vectors only support integers between [" - + Byte.MIN_VALUE - + ", " - + Byte.MAX_VALUE - + "] but found [" - + value - + "] at dim [" - + index - + "];" - ); - } - byteBuffer.put((byte) value); - dotProduct += value * value; - index++; - } - fieldMapper.checkDimensionMatches(index, context); - return dotProduct; + int getNumBytes(int dimensions) { + return dimensions * elementBytes; } @Override @@ -530,6 +544,11 @@ void checkVectorMagnitude( } } + @Override + public double computeDotProduct(VectorData vectorData) { + return VectorUtil.dotProduct(vectorData.asFloatVector(), vectorData.asFloatVector()); + } + @Override public void parseKnnVectorAndIndex(DocumentParserContext context, DenseVectorFieldMapper fieldMapper) throws IOException { int index = 0; @@ -566,23 +585,27 @@ && isNotUnitVector(squaredMagnitude)) { } @Override - double parseKnnVectorToByteBuffer(DocumentParserContext context, DenseVectorFieldMapper fieldMapper, ByteBuffer byteBuffer) - throws IOException { - double dotProduct = 0f; + VectorData parseKnnVector(DocumentParserContext context, DenseVectorFieldMapper fieldMapper) throws IOException { int index = 0; + float squaredMagnitude = 0; float[] vector = new float[fieldMapper.fieldType().dims]; for (Token token = context.parser().nextToken(); token != Token.END_ARRAY; token = context.parser().nextToken()) { fieldMapper.checkDimensionExceeded(index, context); ensureExpectedToken(Token.VALUE_NUMBER, token, context.parser()); float value = context.parser().floatValue(true); vector[index] = value; - byteBuffer.putFloat(value); - dotProduct += value * value; + squaredMagnitude += value * value; index++; } fieldMapper.checkDimensionMatches(index, context); checkVectorBounds(vector); - return dotProduct; + checkVectorMagnitude(fieldMapper.fieldType().similarity, errorFloatElementsAppender(vector), squaredMagnitude); + return VectorData.fromFloats(vector); + } + + @Override + int getNumBytes(int dimensions) { + return dimensions * elementBytes; } @Override @@ -607,8 +630,9 @@ ByteBuffer createByteBuffer(IndexVersion indexVersion, int numBytes) { abstract void parseKnnVectorAndIndex(DocumentParserContext context, DenseVectorFieldMapper fieldMapper) throws IOException; - abstract double parseKnnVectorToByteBuffer(DocumentParserContext context, DenseVectorFieldMapper fieldMapper, ByteBuffer byteBuffer) - throws IOException; + abstract VectorData parseKnnVector(DocumentParserContext context, DenseVectorFieldMapper fieldMapper) throws IOException; + + abstract int getNumBytes(int dimensions); abstract ByteBuffer createByteBuffer(IndexVersion indexVersion, int numBytes); @@ -699,6 +723,8 @@ static Function errorFloatElementsAppender(float[] static Function errorByteElementsAppender(byte[] vector) { return sb -> appendErrorElements(sb, vector); } + + public abstract double computeDotProduct(VectorData vectorData); } static final Map namesToElementType = Map.of( @@ -1158,66 +1184,120 @@ public Query createKnnQuery( return knnQuery; } - public Query createExactKnnQuery(float[] queryVector) { - queryVector = validateAndNormalize(queryVector); - VectorSimilarityFunction vectorSimilarityFunction = similarity.vectorSimilarityFunction(indexVersionCreated, elementType); + public Query createExactKnnQuery(VectorData queryVector) { + if (isIndexed() == false) { + throw new IllegalArgumentException( + "to perform knn search on field [" + name() + "], its mapping must have [index] set to [true]" + ); + } return switch (elementType) { - case BYTE -> { - byte[] bytes = new byte[queryVector.length]; + case BYTE -> createExactKnnByteQuery(queryVector.asByteVector()); + case FLOAT -> createExactKnnFloatQuery(queryVector.asFloatVector()); + }; + } + + private Query createExactKnnByteQuery(byte[] queryVector) { + if (queryVector.length != dims) { + throw new IllegalArgumentException( + "the query vector has a different dimension [" + queryVector.length + "] than the index vectors [" + dims + "]" + ); + } + if (similarity == VectorSimilarity.DOT_PRODUCT || similarity == VectorSimilarity.COSINE) { + float squaredMagnitude = VectorUtil.dotProduct(queryVector, queryVector); + elementType.checkVectorMagnitude(similarity, ElementType.errorByteElementsAppender(queryVector), squaredMagnitude); + } + VectorSimilarityFunction vectorSimilarityFunction = similarity.vectorSimilarityFunction(indexVersionCreated, elementType); + return new BooleanQuery.Builder().add(new FieldExistsQuery(name()), BooleanClause.Occur.FILTER) + .add( + new FunctionQuery( + new ByteVectorSimilarityFunction( + vectorSimilarityFunction, + new ByteKnnVectorFieldSource(name()), + new ConstKnnByteVectorValueSource(queryVector) + ) + ), + BooleanClause.Occur.SHOULD + ) + .build(); + } + + private Query createExactKnnFloatQuery(float[] queryVector) { + if (queryVector.length != dims) { + throw new IllegalArgumentException( + "the query vector has a different dimension [" + queryVector.length + "] than the index vectors [" + dims + "]" + ); + } + elementType.checkVectorBounds(queryVector); + if (similarity == VectorSimilarity.DOT_PRODUCT || similarity == VectorSimilarity.COSINE) { + float squaredMagnitude = VectorUtil.dotProduct(queryVector, queryVector); + elementType.checkVectorMagnitude(similarity, ElementType.errorFloatElementsAppender(queryVector), squaredMagnitude); + if (similarity == VectorSimilarity.COSINE + && indexVersionCreated.onOrAfter(NORMALIZE_COSINE) + && isNotUnitVector(squaredMagnitude)) { + float length = (float) Math.sqrt(squaredMagnitude); + queryVector = Arrays.copyOf(queryVector, queryVector.length); for (int i = 0; i < queryVector.length; i++) { - bytes[i] = (byte) queryVector[i]; + queryVector[i] /= length; } - yield new BooleanQuery.Builder().add(new FieldExistsQuery(name()), BooleanClause.Occur.FILTER) - .add( - new FunctionQuery( - new ByteVectorSimilarityFunction( - vectorSimilarityFunction, - new ByteKnnVectorFieldSource(name()), - new ConstKnnByteVectorValueSource(bytes) - ) - ), - BooleanClause.Occur.SHOULD - ) - .build(); } - case FLOAT -> new BooleanQuery.Builder().add(new FieldExistsQuery(name()), BooleanClause.Occur.FILTER) - .add( - new FunctionQuery( - new FloatVectorSimilarityFunction( - vectorSimilarityFunction, - new FloatKnnVectorFieldSource(name()), - new ConstKnnFloatValueSource(queryVector) - ) - ), - BooleanClause.Occur.SHOULD - ) - .build(); - }; + } + VectorSimilarityFunction vectorSimilarityFunction = similarity.vectorSimilarityFunction(indexVersionCreated, elementType); + return new BooleanQuery.Builder().add(new FieldExistsQuery(name()), BooleanClause.Occur.FILTER) + .add( + new FunctionQuery( + new FloatVectorSimilarityFunction( + vectorSimilarityFunction, + new FloatKnnVectorFieldSource(name()), + new ConstKnnFloatValueSource(queryVector) + ) + ), + BooleanClause.Occur.SHOULD + ) + .build(); + } + + Query createKnnQuery(float[] queryVector, int numCands, Query filter, Float similarityThreshold, BitSetProducer parentFilter) { + return createKnnQuery(VectorData.fromFloats(queryVector), numCands, filter, similarityThreshold, parentFilter); } public Query createKnnQuery( - float[] queryVector, + VectorData queryVector, int numCands, Query filter, Float similarityThreshold, BitSetProducer parentFilter ) { - queryVector = validateAndNormalize(queryVector); - Query knnQuery = switch (elementType) { - case BYTE -> { - byte[] bytes = new byte[queryVector.length]; - for (int i = 0; i < queryVector.length; i++) { - bytes[i] = (byte) queryVector[i]; - } - yield parentFilter != null - ? new ESDiversifyingChildrenByteKnnVectorQuery(name(), bytes, filter, numCands, parentFilter) - : new ESKnnByteVectorQuery(name(), bytes, numCands, filter); - } - case FLOAT -> parentFilter != null - ? new ESDiversifyingChildrenFloatKnnVectorQuery(name(), queryVector, filter, numCands, parentFilter) - : new ESKnnFloatVectorQuery(name(), queryVector, numCands, filter); + if (isIndexed() == false) { + throw new IllegalArgumentException( + "to perform knn search on field [" + name() + "], its mapping must have [index] set to [true]" + ); + } + return switch (getElementType()) { + case BYTE -> createKnnByteQuery(queryVector.asByteVector(), numCands, filter, similarityThreshold, parentFilter); + case FLOAT -> createKnnFloatQuery(queryVector.asFloatVector(), numCands, filter, similarityThreshold, parentFilter); }; + } + + private Query createKnnByteQuery( + byte[] queryVector, + int numCands, + Query filter, + Float similarityThreshold, + BitSetProducer parentFilter + ) { + if (queryVector.length != dims) { + throw new IllegalArgumentException( + "the query vector has a different dimension [" + queryVector.length + "] than the index vectors [" + dims + "]" + ); + } + if (similarity == VectorSimilarity.DOT_PRODUCT || similarity == VectorSimilarity.COSINE) { + float squaredMagnitude = VectorUtil.dotProduct(queryVector, queryVector); + elementType.checkVectorMagnitude(similarity, ElementType.errorByteElementsAppender(queryVector), squaredMagnitude); + } + Query knnQuery = parentFilter != null + ? new ESDiversifyingChildrenByteKnnVectorQuery(name(), queryVector, filter, numCands, parentFilter) + : new ESKnnByteVectorQuery(name(), queryVector, numCands, filter); if (similarityThreshold != null) { knnQuery = new VectorSimilarityQuery( knnQuery, @@ -1228,12 +1308,13 @@ public Query createKnnQuery( return knnQuery; } - private float[] validateAndNormalize(float[] queryVector) { - if (isIndexed() == false) { - throw new IllegalArgumentException( - "to perform knn search on field [" + name() + "], its mapping must have [index] set to [true]" - ); - } + private Query createKnnFloatQuery( + float[] queryVector, + int numCands, + Query filter, + Float similarityThreshold, + BitSetProducer parentFilter + ) { if (queryVector.length != dims) { throw new IllegalArgumentException( "the query vector has a different dimension [" + queryVector.length + "] than the index vectors [" + dims + "]" @@ -1244,7 +1325,6 @@ private float[] validateAndNormalize(float[] queryVector) { float squaredMagnitude = VectorUtil.dotProduct(queryVector, queryVector); elementType.checkVectorMagnitude(similarity, ElementType.errorFloatElementsAppender(queryVector), squaredMagnitude); if (similarity == VectorSimilarity.COSINE - && ElementType.FLOAT.equals(elementType) && indexVersionCreated.onOrAfter(NORMALIZE_COSINE) && isNotUnitVector(squaredMagnitude)) { float length = (float) Math.sqrt(squaredMagnitude); @@ -1254,7 +1334,17 @@ && isNotUnitVector(squaredMagnitude)) { } } } - return queryVector; + Query knnQuery = parentFilter != null + ? new ESDiversifyingChildrenFloatKnnVectorQuery(name(), queryVector, filter, numCands, parentFilter) + : new ESKnnFloatVectorQuery(name(), queryVector, numCands, filter); + if (similarityThreshold != null) { + knnQuery = new VectorSimilarityQuery( + knnQuery, + similarityThreshold, + similarity.score(similarityThreshold, elementType, dims) + ); + } + return knnQuery; } VectorSimilarity getSimilarity() { @@ -1349,13 +1439,15 @@ private void parseBinaryDocValuesVectorAndIndex(DocumentParserContext context) t int dims = fieldType().dims; ElementType elementType = fieldType().elementType; int numBytes = indexCreatedVersion.onOrAfter(MAGNITUDE_STORED_INDEX_VERSION) - ? dims * elementType.elementBytes + MAGNITUDE_BYTES - : dims * elementType.elementBytes; + ? elementType.getNumBytes(dims) + MAGNITUDE_BYTES + : elementType.getNumBytes(dims); ByteBuffer byteBuffer = elementType.createByteBuffer(indexCreatedVersion, numBytes); - double dotProduct = elementType.parseKnnVectorToByteBuffer(context, this, byteBuffer); + VectorData vectorData = elementType.parseKnnVector(context, this); + vectorData.addToBuffer(byteBuffer); if (indexCreatedVersion.onOrAfter(MAGNITUDE_STORED_INDEX_VERSION)) { // encode vector magnitude at the end + double dotProduct = elementType.computeDotProduct(vectorData); float vectorMagnitude = (float) Math.sqrt(dotProduct); byteBuffer.putFloat(vectorMagnitude); } diff --git a/server/src/main/java/org/elasticsearch/search/retriever/KnnRetrieverBuilder.java b/server/src/main/java/org/elasticsearch/search/retriever/KnnRetrieverBuilder.java index fc2d4218ea1ec..3c4355e56d21d 100644 --- a/server/src/main/java/org/elasticsearch/search/retriever/KnnRetrieverBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/retriever/KnnRetrieverBuilder.java @@ -13,6 +13,7 @@ import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.vectors.KnnSearchBuilder; import org.elasticsearch.search.vectors.QueryVectorBuilder; +import org.elasticsearch.search.vectors.VectorData; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentBuilder; @@ -121,7 +122,14 @@ public String getName() { @Override public void extractToSearchSourceBuilder(SearchSourceBuilder searchSourceBuilder, boolean compoundUsed) { - KnnSearchBuilder knnSearchBuilder = new KnnSearchBuilder(field, queryVector, queryVectorBuilder, k, numCands, similarity); + KnnSearchBuilder knnSearchBuilder = new KnnSearchBuilder( + field, + VectorData.fromFloats(queryVector), + queryVectorBuilder, + k, + numCands, + similarity + ); if (preFilterQueryBuilders != null) { knnSearchBuilder.addFilterQueries(preFilterQueryBuilders); } diff --git a/server/src/main/java/org/elasticsearch/search/vectors/ExactKnnQueryBuilder.java b/server/src/main/java/org/elasticsearch/search/vectors/ExactKnnQueryBuilder.java index d292f61dcb085..60b0d259961da 100644 --- a/server/src/main/java/org/elasticsearch/search/vectors/ExactKnnQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/vectors/ExactKnnQueryBuilder.java @@ -22,7 +22,6 @@ import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; -import java.util.Arrays; import java.util.Objects; /** @@ -32,7 +31,7 @@ public class ExactKnnQueryBuilder extends AbstractQueryBuilder { public static final String NAME = "exact_knn"; private final String field; - private final float[] query; + private final VectorData query; /** * Creates a query builder. @@ -41,13 +40,27 @@ public class ExactKnnQueryBuilder extends AbstractQueryBuilder PARSER = new ConstructingObjectParser<>("knn", args -> { // TODO optimize parsing for when BYTE values are provided - List vector = (List) args[1]; - final float[] vectorArray; - if (vector != null) { - vectorArray = new float[vector.size()]; - for (int i = 0; i < vector.size(); i++) { - vectorArray[i] = vector.get(i); - } - } else { - vectorArray = null; - } return new Builder().field((String) args[0]) - .queryVector(vectorArray) + .queryVector((VectorData) args[1]) .queryVectorBuilder((QueryVectorBuilder) args[4]) .k((Integer) args[2]) .numCandidates((Integer) args[3]) @@ -79,9 +68,15 @@ public class KnnSearchBuilder implements Writeable, ToXContentFragment, Rewritea static { PARSER.declareString(constructorArg(), FIELD_FIELD); - PARSER.declareFloatArray(optionalConstructorArg(), QUERY_VECTOR_FIELD); + PARSER.declareField( + optionalConstructorArg(), + (p, c) -> VectorData.parseXContent(p), + QUERY_VECTOR_FIELD, + ObjectParser.ValueType.OBJECT_ARRAY_STRING_OR_NUMBER + ); PARSER.declareInt(optionalConstructorArg(), K_FIELD); PARSER.declareInt(optionalConstructorArg(), NUM_CANDS_FIELD); + PARSER.declareNamedObject( optionalConstructorArg(), (p, c, n) -> p.namedObject(QueryVectorBuilder.class, n, c), @@ -108,7 +103,7 @@ public static KnnSearchBuilder.Builder fromXContent(XContentParser parser) throw } final String field; - final float[] queryVector; + final VectorData queryVector; final QueryVectorBuilder queryVectorBuilder; private final Supplier querySupplier; final int k; @@ -127,7 +122,26 @@ public static KnnSearchBuilder.Builder fromXContent(XContentParser parser) throw * @param numCands the number of nearest neighbor candidates to consider per shard */ public KnnSearchBuilder(String field, float[] queryVector, int k, int numCands, Float similarity) { - this(field, Objects.requireNonNull(queryVector, format("[%s] cannot be null", QUERY_VECTOR_FIELD)), null, k, numCands, similarity); + this( + field, + Objects.requireNonNull(VectorData.fromFloats(queryVector), format("[%s] cannot be null", QUERY_VECTOR_FIELD)), + null, + k, + numCands, + similarity + ); + } + + /** + * Defines a kNN search. + * + * @param field the name of the vector field to search against + * @param queryVector the query vector + * @param k the final number of nearest neighbors to return as top hits + * @param numCands the number of nearest neighbor candidates to consider per shard + */ + public KnnSearchBuilder(String field, VectorData queryVector, int k, int numCands, Float similarity) { + this(field, queryVector, null, k, numCands, similarity); } /** @@ -151,7 +165,7 @@ public KnnSearchBuilder(String field, QueryVectorBuilder queryVectorBuilder, int public KnnSearchBuilder( String field, - float[] queryVector, + VectorData queryVector, QueryVectorBuilder queryVectorBuilder, int k, int numCands, @@ -169,7 +183,7 @@ private KnnSearchBuilder( Float similarity ) { this.field = field; - this.queryVector = new float[0]; + this.queryVector = VectorData.fromFloats(new float[0]); this.queryVectorBuilder = null; this.k = k; this.numCands = numCands; @@ -181,7 +195,7 @@ private KnnSearchBuilder( private KnnSearchBuilder( String field, QueryVectorBuilder queryVectorBuilder, - float[] queryVector, + VectorData queryVector, List filterQueries, int k, int numCandidates, @@ -219,7 +233,7 @@ private KnnSearchBuilder( ); } this.field = field; - this.queryVector = queryVector == null ? new float[0] : queryVector; + this.queryVector = queryVector == null ? VectorData.fromFloats(new float[0]) : queryVector; this.queryVectorBuilder = queryVectorBuilder; this.k = k; this.numCands = numCandidates; @@ -234,7 +248,11 @@ public KnnSearchBuilder(StreamInput in) throws IOException { this.field = in.readString(); this.k = in.readVInt(); this.numCands = in.readVInt(); - this.queryVector = in.readFloatArray(); + if (in.getTransportVersion().onOrAfter(TransportVersions.KNN_EXPLICIT_BYTE_QUERY_VECTOR_PARSING)) { + this.queryVector = in.readOptionalWriteable(VectorData::new); + } else { + this.queryVector = VectorData.fromFloats(in.readFloatArray()); + } this.filterQueries = in.readNamedWriteableCollectionAsList(QueryBuilder.class); this.boost = in.readFloat(); if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_7_0)) { @@ -262,7 +280,7 @@ public QueryVectorBuilder getQueryVectorBuilder() { } // for testing only - public float[] getQueryVector() { + public VectorData getQueryVector() { return queryVector; } @@ -365,7 +383,7 @@ public boolean equals(Object o) { return k == that.k && numCands == that.numCands && Objects.equals(field, that.field) - && Arrays.equals(queryVector, that.queryVector) + && Objects.equals(queryVector, that.queryVector) && Objects.equals(queryVectorBuilder, that.queryVectorBuilder) && Objects.equals(querySupplier, that.querySupplier) && Objects.equals(filterQueries, that.filterQueries) @@ -383,7 +401,7 @@ public int hashCode() { querySupplier, queryVectorBuilder, similarity, - Arrays.hashCode(queryVector), + Objects.hashCode(queryVector), Objects.hashCode(filterQueries), innerHitBuilder, boost @@ -401,7 +419,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(queryVectorBuilder.getWriteableName(), queryVectorBuilder); builder.endObject(); } else { - builder.array(QUERY_VECTOR_FIELD.getPreferredName(), queryVector); + builder.field(QUERY_VECTOR_FIELD.getPreferredName(), queryVector); } if (similarity != null) { builder.field(VECTOR_SIMILARITY.getPreferredName(), similarity); @@ -434,7 +452,11 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(field); out.writeVInt(k); out.writeVInt(numCands); - out.writeFloatArray(queryVector); + if (out.getTransportVersion().onOrAfter(TransportVersions.KNN_EXPLICIT_BYTE_QUERY_VECTOR_PARSING)) { + out.writeOptionalWriteable(queryVector); + } else { + out.writeFloatArray(queryVector.asFloatVector()); + } out.writeNamedWriteableCollection(filterQueries); out.writeFloat(boost); if (out.getTransportVersion().before(TransportVersions.V_8_7_0) && queryVectorBuilder != null) { @@ -460,7 +482,7 @@ public void writeTo(StreamOutput out) throws IOException { public static class Builder { private String field; - private float[] queryVector; + private VectorData queryVector; private QueryVectorBuilder queryVectorBuilder; private Integer k; private Integer numCandidates; @@ -490,7 +512,7 @@ public Builder innerHit(InnerHitBuilder innerHitBuilder) { return this; } - public Builder queryVector(float[] queryVector) { + public Builder queryVector(VectorData queryVector) { this.queryVector = queryVector; return this; } diff --git a/server/src/main/java/org/elasticsearch/search/vectors/KnnVectorQueryBuilder.java b/server/src/main/java/org/elasticsearch/search/vectors/KnnVectorQueryBuilder.java index 7e65cd19638ce..149dedd59df46 100644 --- a/server/src/main/java/org/elasticsearch/search/vectors/KnnVectorQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/vectors/KnnVectorQueryBuilder.java @@ -37,7 +37,6 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.List; import java.util.Objects; @@ -62,23 +61,19 @@ public class KnnVectorQueryBuilder extends AbstractQueryBuilder PARSER = new ConstructingObjectParser<>("knn", args -> { - List vector = (List) args[1]; - final float[] vectorArray; - if (vector != null) { - vectorArray = new float[vector.size()]; - for (int i = 0; i < vector.size(); i++) { - vectorArray[i] = vector.get(i); - } - } else { - vectorArray = null; - } - return new KnnVectorQueryBuilder((String) args[0], vectorArray, (Integer) args[2], (Float) args[3]); - }); + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "knn", + args -> new KnnVectorQueryBuilder((String) args[0], (VectorData) args[1], (Integer) args[2], (Float) args[3]) + ); static { PARSER.declareString(constructorArg(), FIELD_FIELD); - PARSER.declareFloatArray(constructorArg(), QUERY_VECTOR_FIELD); + PARSER.declareField( + optionalConstructorArg(), + (p, c) -> VectorData.parseXContent(p), + QUERY_VECTOR_FIELD, + ObjectParser.ValueType.OBJECT_ARRAY_STRING_OR_NUMBER + ); PARSER.declareInt(optionalConstructorArg(), NUM_CANDS_FIELD); PARSER.declareFloat(optionalConstructorArg(), VECTOR_SIMILARITY_FIELD); PARSER.declareFieldArray( @@ -95,12 +90,20 @@ public static KnnVectorQueryBuilder fromXContent(XContentParser parser) { } private final String fieldName; - private final float[] queryVector; + private final VectorData queryVector; private Integer numCands; private final List filterQueries = new ArrayList<>(); private final Float vectorSimilarity; public KnnVectorQueryBuilder(String fieldName, float[] queryVector, Integer numCands, Float vectorSimilarity) { + this(fieldName, VectorData.fromFloats(queryVector), numCands, vectorSimilarity); + } + + public KnnVectorQueryBuilder(String fieldName, byte[] queryVector, Integer numCands, Float vectorSimilarity) { + this(fieldName, VectorData.fromBytes(queryVector), numCands, vectorSimilarity); + } + + public KnnVectorQueryBuilder(String fieldName, VectorData queryVector, Integer numCands, Float vectorSimilarity) { if (numCands != null && numCands > NUM_CANDS_LIMIT) { throw new IllegalArgumentException("[" + NUM_CANDS_FIELD.getPreferredName() + "] cannot exceed [" + NUM_CANDS_LIMIT + "]"); } @@ -121,12 +124,17 @@ public KnnVectorQueryBuilder(StreamInput in) throws IOException { } else { this.numCands = in.readVInt(); } - if (in.getTransportVersion().before(TransportVersions.V_8_7_0) || in.getTransportVersion().onOrAfter(TransportVersions.V_8_12_0)) { - this.queryVector = in.readFloatArray(); + if (in.getTransportVersion().onOrAfter(TransportVersions.KNN_EXPLICIT_BYTE_QUERY_VECTOR_PARSING)) { + this.queryVector = in.readOptionalWriteable(VectorData::new); } else { - in.readBoolean(); - this.queryVector = in.readFloatArray(); - in.readBoolean(); // used for byteQueryVector, which was always null + if (in.getTransportVersion().before(TransportVersions.V_8_7_0) + || in.getTransportVersion().onOrAfter(TransportVersions.V_8_12_0)) { + this.queryVector = VectorData.fromFloats(in.readFloatArray()); + } else { + in.readBoolean(); + this.queryVector = VectorData.fromFloats(in.readFloatArray()); + in.readBoolean(); // used for byteQueryVector, which was always null + } } if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_2_0)) { this.filterQueries.addAll(readQueries(in)); @@ -143,7 +151,7 @@ public String getFieldName() { } @Nullable - public float[] queryVector() { + public VectorData queryVector() { return queryVector; } @@ -190,13 +198,17 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeVInt(numCands); } } - if (out.getTransportVersion().before(TransportVersions.V_8_7_0) - || out.getTransportVersion().onOrAfter(TransportVersions.V_8_12_0)) { - out.writeFloatArray(queryVector); + if (out.getTransportVersion().onOrAfter(TransportVersions.KNN_EXPLICIT_BYTE_QUERY_VECTOR_PARSING)) { + out.writeOptionalWriteable(queryVector); } else { - out.writeBoolean(true); - out.writeFloatArray(queryVector); - out.writeBoolean(false); // used for byteQueryVector, which was always null + if (out.getTransportVersion().before(TransportVersions.V_8_7_0) + || out.getTransportVersion().onOrAfter(TransportVersions.V_8_12_0)) { + out.writeFloatArray(queryVector.asFloatVector()); + } else { + out.writeBoolean(true); + out.writeFloatArray(queryVector.asFloatVector()); + out.writeBoolean(false); // used for byteQueryVector, which was always null + } } if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_2_0)) { writeQueries(out, filterQueries); @@ -326,13 +338,13 @@ protected Query doToQuery(SearchExecutionContext context) throws IOException { @Override protected int doHashCode() { - return Objects.hash(fieldName, Arrays.hashCode(queryVector), numCands, filterQueries, vectorSimilarity); + return Objects.hash(fieldName, Objects.hashCode(queryVector), numCands, filterQueries, vectorSimilarity); } @Override protected boolean doEquals(KnnVectorQueryBuilder other) { return Objects.equals(fieldName, other.fieldName) - && Arrays.equals(queryVector, other.queryVector) + && Objects.equals(queryVector, other.queryVector) && Objects.equals(numCands, other.numCands) && Objects.equals(filterQueries, other.filterQueries) && Objects.equals(vectorSimilarity, other.vectorSimilarity); diff --git a/server/src/main/java/org/elasticsearch/search/vectors/VectorData.java b/server/src/main/java/org/elasticsearch/search/vectors/VectorData.java new file mode 100644 index 0000000000000..a92644af1fcf5 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/vectors/VectorData.java @@ -0,0 +1,168 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.vectors; + +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper; +import org.elasticsearch.xcontent.ToXContentFragment; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HexFormat; +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.common.Strings.format; + +public record VectorData(float[] floatVector, byte[] byteVector) implements Writeable, ToXContentFragment { + + private VectorData(float[] floatVector) { + this(floatVector, null); + } + + private VectorData(byte[] byteVector) { + this(null, byteVector); + } + + public VectorData(StreamInput in) throws IOException { + this(in.readOptionalFloatArray(), in.readOptionalByteArray()); + } + + public VectorData { + if (false == (floatVector == null ^ byteVector == null)) { + throw new IllegalArgumentException("please supply exactly either a float or a byte vector"); + } + } + + public byte[] asByteVector() { + if (byteVector != null) { + return byteVector; + } + DenseVectorFieldMapper.ElementType.BYTE.checkVectorBounds(floatVector); + byte[] vec = new byte[floatVector.length]; + for (int i = 0; i < floatVector.length; i++) { + vec[i] = (byte) floatVector[i]; + } + return vec; + } + + public float[] asFloatVector() { + if (floatVector != null) { + return floatVector; + } + float[] vec = new float[byteVector.length]; + for (int i = 0; i < byteVector.length; i++) { + vec[i] = byteVector[i]; + } + return vec; + } + + public void addToBuffer(ByteBuffer byteBuffer) { + if (floatVector != null) { + for (float val : floatVector) { + byteBuffer.putFloat(val); + } + } else { + byteBuffer.put(byteVector); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalFloatArray(floatVector); + out.writeOptionalByteArray(byteVector); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + if (floatVector != null) { + builder.startArray(); + for (float v : floatVector) { + builder.value(v); + } + builder.endArray(); + } else { + builder.value(HexFormat.of().formatHex(byteVector)); + } + return builder; + } + + @Override + public String toString() { + return floatVector != null ? Arrays.toString(floatVector) : Arrays.toString(byteVector); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + VectorData other = (VectorData) obj; + return Arrays.equals(floatVector, other.floatVector) && Arrays.equals(byteVector, other.byteVector); + } + + @Override + public int hashCode() { + return Objects.hash(Arrays.hashCode(floatVector), Arrays.hashCode(byteVector)); + } + + public static VectorData parseXContent(XContentParser parser) throws IOException { + XContentParser.Token token = parser.currentToken(); + return switch (token) { + case START_ARRAY -> parseQueryVectorArray(parser); + case VALUE_STRING -> parseHexEncodedVector(parser); + case VALUE_NUMBER -> parseNumberVector(parser); + default -> throw new ParsingException(parser.getTokenLocation(), format("Unknown type [%s] for parsing vector", token)); + }; + } + + private static VectorData parseQueryVectorArray(XContentParser parser) throws IOException { + XContentParser.Token token; + List vectorArr = new ArrayList<>(); + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + if (token == XContentParser.Token.VALUE_NUMBER || token == XContentParser.Token.VALUE_STRING) { + vectorArr.add(parser.floatValue()); + } else { + throw new ParsingException(parser.getTokenLocation(), format("Type [%s] not supported for query vector", token)); + } + } + float[] floatVector = new float[vectorArr.size()]; + for (int i = 0; i < vectorArr.size(); i++) { + floatVector[i] = vectorArr.get(i); + } + return VectorData.fromFloats(floatVector); + } + + private static VectorData parseHexEncodedVector(XContentParser parser) throws IOException { + return VectorData.fromBytes(HexFormat.of().parseHex(parser.text())); + } + + private static VectorData parseNumberVector(XContentParser parser) throws IOException { + return VectorData.fromFloats(new float[] { parser.floatValue() }); + } + + public static VectorData fromFloats(float[] vec) { + return vec == null ? null : new VectorData(vec); + } + + public static VectorData fromBytes(byte[] vec) { + return vec == null ? null : new VectorData(vec); + } + +} diff --git a/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldTypeTests.java index c3d2d6a3f194b..27adc72fb5ed8 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldTypeTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper.DenseVectorFieldType; import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper.VectorSimilarity; +import org.elasticsearch.search.vectors.VectorData; import java.io.IOException; import java.util.Collections; @@ -179,7 +180,7 @@ public void testExactKnnQuery() { for (int i = 0; i < dims; i++) { queryVector[i] = randomFloat(); } - Query query = field.createExactKnnQuery(queryVector); + Query query = field.createExactKnnQuery(VectorData.fromFloats(queryVector)); assertTrue(query instanceof BooleanQuery); BooleanQuery booleanQuery = (BooleanQuery) query; boolean foundFunction = false; @@ -202,12 +203,10 @@ public void testExactKnnQuery() { Collections.emptyMap() ); byte[] queryVector = new byte[dims]; - float[] floatQueryVector = new float[dims]; for (int i = 0; i < dims; i++) { queryVector[i] = randomByte(); - floatQueryVector[i] = queryVector[i]; } - Query query = field.createExactKnnQuery(floatQueryVector); + Query query = field.createExactKnnQuery(VectorData.fromBytes(queryVector)); assertTrue(query instanceof BooleanQuery); BooleanQuery booleanQuery = (BooleanQuery) query; boolean foundFunction = false; diff --git a/server/src/test/java/org/elasticsearch/search/vectors/AbstractKnnVectorQueryBuilderTestCase.java b/server/src/test/java/org/elasticsearch/search/vectors/AbstractKnnVectorQueryBuilderTestCase.java index ad9c95b5b80c7..45ad9d514ba82 100644 --- a/server/src/test/java/org/elasticsearch/search/vectors/AbstractKnnVectorQueryBuilderTestCase.java +++ b/server/src/test/java/org/elasticsearch/search/vectors/AbstractKnnVectorQueryBuilderTestCase.java @@ -43,10 +43,12 @@ abstract class AbstractKnnVectorQueryBuilderTestCase extends AbstractQueryTestCase { private static final String VECTOR_FIELD = "vector"; private static final String VECTOR_ALIAS_FIELD = "vector_alias"; - private static final int VECTOR_DIMENSION = 3; + static final int VECTOR_DIMENSION = 3; abstract DenseVectorFieldMapper.ElementType elementType(); + abstract KnnVectorQueryBuilder createKnnVectorQueryBuilder(String fieldName, int numCands, Float similarity); + @Override protected void initializeAdditionalMappings(MapperService mapperService) throws IOException { XContentBuilder builder = XContentFactory.jsonBuilder() @@ -75,12 +77,9 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws @Override protected KnnVectorQueryBuilder doCreateTestQueryBuilder() { String fieldName = randomBoolean() ? VECTOR_FIELD : VECTOR_ALIAS_FIELD; - float[] vector = new float[VECTOR_DIMENSION]; - for (int i = 0; i < vector.length; i++) { - vector[i] = elementType().equals(DenseVectorFieldMapper.ElementType.BYTE) ? randomByte() : randomFloat(); - } int numCands = randomIntBetween(DEFAULT_SIZE, 1000); - KnnVectorQueryBuilder queryBuilder = new KnnVectorQueryBuilder(fieldName, vector, numCands, randomBoolean() ? null : randomFloat()); + KnnVectorQueryBuilder queryBuilder = createKnnVectorQueryBuilder(fieldName, numCands, randomBoolean() ? null : randomFloat()); + if (randomBoolean()) { List filters = new ArrayList<>(); int numFilters = randomIntBetween(1, 5); @@ -120,11 +119,16 @@ protected void doAssertLuceneQuery(KnnVectorQueryBuilder queryBuilder, Query que Query knnVectorQueryBuilt = switch (elementType()) { case BYTE -> new ESKnnByteVectorQuery( VECTOR_FIELD, - getByteQueryVector(queryBuilder.queryVector()), + queryBuilder.queryVector().asByteVector(), + queryBuilder.numCands(), + filterQuery + ); + case FLOAT -> new ESKnnFloatVectorQuery( + VECTOR_FIELD, + queryBuilder.queryVector().asFloatVector(), queryBuilder.numCands(), filterQuery ); - case FLOAT -> new ESKnnFloatVectorQuery(VECTOR_FIELD, queryBuilder.queryVector(), queryBuilder.numCands(), filterQuery); }; if (query instanceof VectorSimilarityQuery vectorSimilarityQuery) { query = vectorSimilarityQuery.getInnerKnnQuery(); @@ -193,7 +197,8 @@ public void testMustRewrite() throws IOException { public void testBWCVersionSerializationFilters() throws IOException { KnnVectorQueryBuilder query = createTestQueryBuilder(); - KnnVectorQueryBuilder queryNoFilters = new KnnVectorQueryBuilder(query.getFieldName(), query.queryVector(), query.numCands(), null) + VectorData vectorData = VectorData.fromFloats(query.queryVector().asFloatVector()); + KnnVectorQueryBuilder queryNoFilters = new KnnVectorQueryBuilder(query.getFieldName(), vectorData, query.numCands(), null) .queryName(query.queryName()) .boost(query.boost()); TransportVersion beforeFilterVersion = TransportVersionUtils.randomVersionBetween( @@ -206,12 +211,11 @@ public void testBWCVersionSerializationFilters() throws IOException { public void testBWCVersionSerializationSimilarity() throws IOException { KnnVectorQueryBuilder query = createTestQueryBuilder(); - KnnVectorQueryBuilder queryNoSimilarity = new KnnVectorQueryBuilder( - query.getFieldName(), - query.queryVector(), - query.numCands(), - null - ).queryName(query.queryName()).boost(query.boost()).addFilterQueries(query.filterQueries()); + VectorData vectorData = VectorData.fromFloats(query.queryVector().asFloatVector()); + KnnVectorQueryBuilder queryNoSimilarity = new KnnVectorQueryBuilder(query.getFieldName(), vectorData, query.numCands(), null) + .queryName(query.queryName()) + .boost(query.boost()) + .addFilterQueries(query.filterQueries()); assertBWCSerialization(query, queryNoSimilarity, TransportVersions.V_8_7_0); } @@ -223,12 +227,11 @@ public void testBWCVersionSerializationQuery() throws IOException { TransportVersions.V_8_12_0 ); Float similarity = differentQueryVersion.before(TransportVersions.V_8_8_0) ? null : query.getVectorSimilarity(); - KnnVectorQueryBuilder queryOlderVersion = new KnnVectorQueryBuilder( - query.getFieldName(), - query.queryVector(), - query.numCands(), - similarity - ).queryName(query.queryName()).boost(query.boost()).addFilterQueries(query.filterQueries()); + VectorData vectorData = VectorData.fromFloats(query.queryVector().asFloatVector()); + KnnVectorQueryBuilder queryOlderVersion = new KnnVectorQueryBuilder(query.getFieldName(), vectorData, query.numCands(), similarity) + .queryName(query.queryName()) + .boost(query.boost()) + .addFilterQueries(query.filterQueries()); assertBWCSerialization(query, queryOlderVersion, differentQueryVersion); } @@ -245,12 +248,4 @@ private void assertBWCSerialization(QueryBuilder newQuery, QueryBuilder bwcQuery } } } - - private static byte[] getByteQueryVector(float[] queryVector) { - byte[] byteQueryVector = new byte[queryVector.length]; - for (int i = 0; i < queryVector.length; i++) { - byteQueryVector[i] = (byte) queryVector[i]; - } - return byteQueryVector; - } } diff --git a/server/src/test/java/org/elasticsearch/search/vectors/KnnByteVectorQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/search/vectors/KnnByteVectorQueryBuilderTests.java index 3bf92a60275d8..6c83700d0b29a 100644 --- a/server/src/test/java/org/elasticsearch/search/vectors/KnnByteVectorQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/vectors/KnnByteVectorQueryBuilderTests.java @@ -15,4 +15,13 @@ public class KnnByteVectorQueryBuilderTests extends AbstractKnnVectorQueryBuilde DenseVectorFieldMapper.ElementType elementType() { return DenseVectorFieldMapper.ElementType.BYTE; } + + @Override + protected KnnVectorQueryBuilder createKnnVectorQueryBuilder(String fieldName, int numCands, Float similarity) { + byte[] vector = new byte[VECTOR_DIMENSION]; + for (int i = 0; i < vector.length; i++) { + vector[i] = randomByte(); + } + return new KnnVectorQueryBuilder(fieldName, vector, numCands, similarity); + } } diff --git a/server/src/test/java/org/elasticsearch/search/vectors/KnnVectorQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/search/vectors/KnnFloatVectorQueryBuilderTests.java similarity index 56% rename from server/src/test/java/org/elasticsearch/search/vectors/KnnVectorQueryBuilderTests.java rename to server/src/test/java/org/elasticsearch/search/vectors/KnnFloatVectorQueryBuilderTests.java index 6b1166cdd16dc..eeb5244d57943 100644 --- a/server/src/test/java/org/elasticsearch/search/vectors/KnnVectorQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/vectors/KnnFloatVectorQueryBuilderTests.java @@ -10,9 +10,18 @@ import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper; -public class KnnVectorQueryBuilderTests extends AbstractKnnVectorQueryBuilderTestCase { +public class KnnFloatVectorQueryBuilderTests extends AbstractKnnVectorQueryBuilderTestCase { @Override DenseVectorFieldMapper.ElementType elementType() { return DenseVectorFieldMapper.ElementType.FLOAT; } + + @Override + KnnVectorQueryBuilder createKnnVectorQueryBuilder(String fieldName, int numCands, Float similarity) { + float[] vector = new float[VECTOR_DIMENSION]; + for (int i = 0; i < vector.length; i++) { + vector[i] = randomFloat(); + } + return new KnnVectorQueryBuilder(fieldName, vector, numCands, similarity); + } } diff --git a/server/src/test/java/org/elasticsearch/search/vectors/KnnSearchBuilderTests.java b/server/src/test/java/org/elasticsearch/search/vectors/KnnSearchBuilderTests.java index c650f54321060..564c8b9d0db11 100644 --- a/server/src/test/java/org/elasticsearch/search/vectors/KnnSearchBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/vectors/KnnSearchBuilderTests.java @@ -106,7 +106,7 @@ protected KnnSearchBuilder mutateInstance(KnnSearchBuilder instance) { instance.boost ); case 1: - float[] newVector = randomValueOtherThan(instance.queryVector, () -> randomVector(5)); + float[] newVector = randomValueOtherThan(instance.queryVector.asFloatVector(), () -> randomVector(5)); return new KnnSearchBuilder(instance.field, newVector, instance.k, instance.numCands, instance.similarity).boost( instance.boost ); @@ -213,7 +213,7 @@ public void testRewrite() throws Exception { assertThat(rewritten.field, equalTo(searchBuilder.field)); assertThat(rewritten.boost, equalTo(searchBuilder.boost)); - assertThat(rewritten.queryVector, equalTo(expectedArray)); + assertThat(rewritten.queryVector.asFloatVector(), equalTo(expectedArray)); assertThat(rewritten.queryVectorBuilder, nullValue()); assertThat(rewritten.filterQueries, hasSize(1)); assertThat(rewritten.similarity, equalTo(1f)); diff --git a/server/src/test/java/org/elasticsearch/search/vectors/VectorDataTests.java b/server/src/test/java/org/elasticsearch/search/vectors/VectorDataTests.java new file mode 100644 index 0000000000000..feabc100d1007 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/vectors/VectorDataTests.java @@ -0,0 +1,199 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.vectors; + +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; +import org.elasticsearch.xcontent.XContentType; + +import java.io.IOException; + +import static org.hamcrest.Matchers.containsString; + +public class VectorDataTests extends ESTestCase { + + private static final float DELTA = 1e-5f; + + public void testThrowsIfBothVectorsAreNull() { + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> new VectorData(null, null)); + assertThat(ex.getMessage(), containsString("please supply exactly either a float or a byte vector")); + } + + public void testThrowsIfBothVectorsAreNonNull() { + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> new VectorData(new float[] { 0f }, new byte[] { 1 }) + ); + assertThat(ex.getMessage(), containsString("please supply exactly either a float or a byte vector")); + } + + public void testShouldCorrectlyConvertByteToFloatIfExplicitlyRequested() { + byte[] byteVector = new byte[] { 1, 2, -127 }; + float[] expected = new float[] { 1f, 2f, -127f }; + + VectorData vectorData = new VectorData(null, byteVector); + float[] actual = vectorData.asFloatVector(); + assertArrayEquals(expected, actual, DELTA); + } + + public void testShouldThrowForDecimalsWhenConvertingToByte() { + float[] vec = new float[] { 1f, 2f, 3.1f }; + + VectorData vectorData = new VectorData(vec, null); + expectThrows(IllegalArgumentException.class, vectorData::asByteVector); + } + + public void testShouldThrowForOutsideRangeWhenConvertingToByte() { + float[] vec = new float[] { 1f, 2f, 200f }; + + VectorData vectorData = new VectorData(vec, null); + expectThrows(IllegalArgumentException.class, vectorData::asByteVector); + } + + public void testEqualsAndHashCode() { + VectorData v1 = new VectorData(new float[] { 1, 2, 3 }, null); + VectorData v2 = new VectorData(null, new byte[] { 1, 2, 3 }); + assertNotEquals(v1, v2); + assertNotEquals(v1.hashCode(), v2.hashCode()); + + VectorData v3 = new VectorData(null, new byte[] { 1, 2, 3 }); + assertEquals(v2, v3); + assertEquals(v2.hashCode(), v3.hashCode()); + } + + public void testParseHexCorrectly() throws IOException { + byte[] expected = new byte[] { 64, 10, -30, 10 }; + String toParse = "\"400ae20a\""; + try ( + XContentParser parser = XContentHelper.createParserNotCompressed( + XContentParserConfiguration.EMPTY, + new BytesArray(toParse), + XContentType.JSON + ) + ) { + parser.nextToken(); + VectorData parsed = VectorData.parseXContent(parser); + assertArrayEquals(expected, parsed.asByteVector()); + } + } + + public void testParseFloatArray() throws IOException { + float[] expected = new float[] { 1f, -1f, .1f }; + String toParse = "[1.0, -1.0, 0.1]"; + try ( + XContentParser parser = XContentHelper.createParserNotCompressed( + XContentParserConfiguration.EMPTY, + new BytesArray(toParse), + XContentType.JSON + ) + ) { + parser.nextToken(); + VectorData parsed = VectorData.parseXContent(parser); + assertArrayEquals(expected, parsed.asFloatVector(), DELTA); + } + } + + public void testParseByteArray() throws IOException { + byte[] expected = new byte[] { 64, 10, -30, 10 }; + String toParse = "[64,10,-30,10]"; + try ( + XContentParser parser = XContentHelper.createParserNotCompressed( + XContentParserConfiguration.EMPTY, + new BytesArray(toParse), + XContentType.JSON + ) + ) { + parser.nextToken(); + VectorData parsed = VectorData.parseXContent(parser); + assertArrayEquals(expected, parsed.asByteVector()); + } + } + + public void testByteThrowsForOutsideRange() throws IOException { + String toParse = "[1000]"; + try ( + XContentParser parser = XContentHelper.createParserNotCompressed( + XContentParserConfiguration.EMPTY, + new BytesArray(toParse), + XContentType.JSON + ) + ) { + parser.nextToken(); + VectorData parsed = VectorData.parseXContent(parser); + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, parsed::asByteVector); + assertThat(ex.getMessage(), containsString("vectors only support integers between [-128, 127]")); + } + } + + public void testAsByteThrowsForDecimals() throws IOException { + String toParse = "[0.1]"; + try ( + XContentParser parser = XContentHelper.createParserNotCompressed( + XContentParserConfiguration.EMPTY, + new BytesArray(toParse), + XContentType.JSON + ) + ) { + parser.nextToken(); + VectorData parsed = VectorData.parseXContent(parser); + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, parsed::asByteVector); + assertThat(ex.getMessage(), containsString("vectors only support non-decimal values but found decimal value")); + } + } + + public void testParseSingleNumber() throws IOException { + float[] expected = new float[] { 0.1f }; + String toParse = "0.1"; + try ( + XContentParser parser = XContentHelper.createParserNotCompressed( + XContentParserConfiguration.EMPTY, + new BytesArray(toParse), + XContentType.JSON + ) + ) { + parser.nextToken(); + VectorData parsed = VectorData.parseXContent(parser); + assertArrayEquals(expected, parsed.asFloatVector(), DELTA); + } + } + + public void testParseThrowsForUnknown() throws IOException { + String unknown = "{\"foo\":\"bar\"}"; + try ( + XContentParser parser = XContentHelper.createParser( + XContentParserConfiguration.EMPTY, + new BytesArray(unknown), + XContentType.JSON + ) + ) { + parser.nextToken(); + ParsingException ex = expectThrows(ParsingException.class, () -> VectorData.parseXContent(parser)); + assertThat(ex.getMessage(), containsString("Unknown type [" + XContentParser.Token.START_OBJECT + "] for parsing vector")); + } + } + + public void testFailForUnknownArrayValue() throws IOException { + String toParse = "[0.1, true]"; + try ( + XContentParser parser = XContentHelper.createParserNotCompressed( + XContentParserConfiguration.EMPTY, + new BytesArray(toParse), + XContentType.JSON + ) + ) { + parser.nextToken(); + ParsingException ex = expectThrows(ParsingException.class, () -> VectorData.parseXContent(parser)); + assertThat(ex.getMessage(), containsString("Type [" + XContentParser.Token.VALUE_BOOLEAN + "] not supported for query vector")); + } + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryVectorBuilderTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryVectorBuilderTestCase.java index b6e5c7161edc8..b327aee0931f9 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryVectorBuilderTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryVectorBuilderTestCase.java @@ -132,7 +132,7 @@ public final void testKnnSearchRewrite() throws Exception { PlainActionFuture future = new PlainActionFuture<>(); Rewriteable.rewriteAndFetch(randomFrom(serialized, searchBuilder), context, future); KnnSearchBuilder rewritten = future.get(); - assertThat(rewritten.getQueryVector(), equalTo(expected)); + assertThat(rewritten.getQueryVector().asFloatVector(), equalTo(expected)); assertThat(rewritten.getQueryVectorBuilder(), nullValue()); } } From d54593f158265d2142c1db8b766267efead5e77d Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Wed, 13 Mar 2024 09:01:29 +0100 Subject: [PATCH 159/248] Fix a downsample persistent task assignment bug (#106247) If as part of the persistent task assignment the source downsample index no longer exists, then the persistent task framework will continuously try to find an assignment and fail with IndexNotFoundException (which gets logged as a warning on elected master node). This fixes a bug in resolving the shard routing, so that if the index no longer exists any node is returned and the persistent task can fail gracefully at a later stage. The original fix via #98769 didn't get this part right. --- docs/changelog/106247.yaml | 5 + ...DownsampleShardPersistentTaskExecutor.java | 11 +- ...ampleShardPersistentTaskExecutorTests.java | 125 ++++++++++++++++++ 3 files changed, 140 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/106247.yaml create mode 100644 x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleShardPersistentTaskExecutorTests.java diff --git a/docs/changelog/106247.yaml b/docs/changelog/106247.yaml new file mode 100644 index 0000000000000..5895dffd685a4 --- /dev/null +++ b/docs/changelog/106247.yaml @@ -0,0 +1,5 @@ +pr: 106247 +summary: Fix a downsample persistent task assignment bug +area: Downsampling +type: bug +issues: [] diff --git a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardPersistentTaskExecutor.java b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardPersistentTaskExecutor.java index 6f110ace53fc9..883986887fc3d 100644 --- a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardPersistentTaskExecutor.java +++ b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardPersistentTaskExecutor.java @@ -23,6 +23,7 @@ import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamOutput; @@ -134,7 +135,7 @@ public PersistentTasksCustomMetadata.Assignment getAssignment( // If during re-assignment the source index was deleted, then we need to break out. // Returning NO_NODE_FOUND just keeps the persistent task until the source index appears again (which would never happen) // So let's return a node and then in the node operation we would just fail and stop this persistent task - var indexShardRouting = clusterState.routingTable().shardRoutingTable(params.shardId().getIndexName(), params.shardId().id()); + var indexShardRouting = findShardRoutingTable(shardId, clusterState); if (indexShardRouting == null) { var node = selectLeastLoadedNode(clusterState, candidateNodes, DiscoveryNode::canContainData); return new PersistentTasksCustomMetadata.Assignment(node.getId(), "a node to fail and stop this persistent task"); @@ -175,6 +176,14 @@ private void delegate(final AllocatedPersistentTask task, final DownsampleShardT ); } + private static IndexShardRoutingTable findShardRoutingTable(ShardId shardId, ClusterState clusterState) { + var indexRoutingTable = clusterState.routingTable().index(shardId.getIndexName()); + if (indexRoutingTable != null) { + return indexRoutingTable.shard(shardId.getId()); + } + return null; + } + static void realNodeOperation( Client client, IndicesService indicesService, diff --git a/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleShardPersistentTaskExecutorTests.java b/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleShardPersistentTaskExecutorTests.java new file mode 100644 index 0000000000000..06f6be27e9f3d --- /dev/null +++ b/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleShardPersistentTaskExecutorTests.java @@ -0,0 +1,125 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.downsample; + +import org.elasticsearch.action.downsample.DownsampleConfig; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.DataStreamTestHelper; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodeRole; +import org.elasticsearch.cluster.node.DiscoveryNodeUtils; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.UUIDs; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.downsample.DownsampleShardTask; +import org.junit.Before; + +import java.time.Instant; +import java.time.temporal.ChronoUnit; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.Executor; + +import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED; +import static org.elasticsearch.cluster.routing.TestShardRouting.shardRoutingBuilder; +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Mockito.mock; + +public class DownsampleShardPersistentTaskExecutorTests extends ESTestCase { + + private ClusterState initialClusterState; + private DownsampleShardPersistentTaskExecutor executor; + + @Before + public void setup() { + Instant now = Instant.now().truncatedTo(ChronoUnit.MILLIS); + Instant start = now.minus(2, ChronoUnit.HOURS); + Instant end = now.plus(40, ChronoUnit.MINUTES); + initialClusterState = DataStreamTestHelper.getClusterStateWithDataStream("metrics-app1", List.of(new Tuple<>(start, end))); + executor = new DownsampleShardPersistentTaskExecutor(mock(Client.class), DownsampleShardTask.TASK_NAME, mock(Executor.class)); + } + + public void testGetAssignment() { + var backingIndex = initialClusterState.metadata().dataStreams().get("metrics-app1").getWriteIndex(); + var node = newNode(); + var shardId = new ShardId(backingIndex, 0); + var clusterState = ClusterState.builder(initialClusterState) + .nodes(new DiscoveryNodes.Builder().add(node).build()) + .routingTable( + RoutingTable.builder() + .add( + IndexRoutingTable.builder(backingIndex) + .addShard(shardRoutingBuilder(shardId, node.getId(), true, STARTED).withRecoverySource(null).build()) + ) + ) + .build(); + + var params = new DownsampleShardTaskParams( + new DownsampleConfig(new DateHistogramInterval("1h")), + shardId.getIndexName(), + 1, + 1, + shardId, + Strings.EMPTY_ARRAY, + Strings.EMPTY_ARRAY, + Strings.EMPTY_ARRAY + ); + var result = executor.getAssignment(params, Set.of(node), clusterState); + assertThat(result.getExecutorNode(), equalTo(node.getId())); + } + + public void testGetAssignmentMissingIndex() { + var backingIndex = initialClusterState.metadata().dataStreams().get("metrics-app1").getWriteIndex(); + var node = newNode(); + var shardId = new ShardId(backingIndex, 0); + var clusterState = ClusterState.builder(initialClusterState) + .nodes(new DiscoveryNodes.Builder().add(node).build()) + .routingTable( + RoutingTable.builder() + .add( + IndexRoutingTable.builder(backingIndex) + .addShard(shardRoutingBuilder(shardId, node.getId(), true, STARTED).withRecoverySource(null).build()) + ) + ) + .build(); + + var missingShardId = new ShardId(new Index("another_index", "uid"), 0); + var params = new DownsampleShardTaskParams( + new DownsampleConfig(new DateHistogramInterval("1h")), + missingShardId.getIndexName(), + 1, + 1, + missingShardId, + Strings.EMPTY_ARRAY, + Strings.EMPTY_ARRAY, + Strings.EMPTY_ARRAY + ); + var result = executor.getAssignment(params, Set.of(node), clusterState); + assertThat(result.getExecutorNode(), equalTo(node.getId())); + assertThat(result.getExplanation(), equalTo("a node to fail and stop this persistent task")); + } + + private static DiscoveryNode newNode() { + return DiscoveryNodeUtils.create( + "node_" + UUIDs.randomBase64UUID(random()), + buildNewFakeTransportAddress(), + Map.of(), + DiscoveryNodeRole.roles() + ); + } + +} From e944619e015030437c27008f42831c3b75dfa796 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Aur=C3=A9lien=20FOUCRET?= Date: Wed, 13 Mar 2024 09:05:47 +0100 Subject: [PATCH 160/248] Fix typo in the LTR guide. (#106276) --- .../search-your-data/learning-to-rank-model-training.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/search/search-your-data/learning-to-rank-model-training.asciidoc b/docs/reference/search/search-your-data/learning-to-rank-model-training.asciidoc index fb026578bc00d..6525147839412 100644 --- a/docs/reference/search/search-your-data/learning-to-rank-model-training.asciidoc +++ b/docs/reference/search/search-your-data/learning-to-rank-model-training.asciidoc @@ -109,7 +109,7 @@ The FeatureLogger provides an `extract_features` method which enables you to ext [source,python] ---- feature_logger.extract_features( - query_params:{"query": "foo"}, + query_params={"query": "foo"}, doc_ids=["doc-1", "doc-2"] ) ---- From e0087e9b0dc70f7887d2ff50afda3bb4f21507fb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Przemys=C5=82aw=20Witek?= Date: Wed, 13 Mar 2024 09:34:28 +0100 Subject: [PATCH 161/248] [Transform] Do not log error on node restart when the transform is already failed. (#106171) --- docs/changelog/106171.yaml | 6 ++++++ .../common/notifications/AbstractAuditor.java | 6 +++--- .../CannotStartFailedTransformException.java | 16 ++++++++++++++++ .../TransformPersistentTasksExecutor.java | 15 ++++++++++++--- .../transform/transforms/TransformTask.java | 5 ++--- 5 files changed, 39 insertions(+), 9 deletions(-) create mode 100644 docs/changelog/106171.yaml create mode 100644 x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/CannotStartFailedTransformException.java diff --git a/docs/changelog/106171.yaml b/docs/changelog/106171.yaml new file mode 100644 index 0000000000000..9daf1b9acd994 --- /dev/null +++ b/docs/changelog/106171.yaml @@ -0,0 +1,6 @@ +pr: 106171 +summary: Do not log error on node restart when the transform is already failed +area: Transform +type: enhancement +issues: + - 106168 diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/notifications/AbstractAuditor.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/notifications/AbstractAuditor.java index 8371f018a6bde..d02fb85f46b1e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/notifications/AbstractAuditor.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/notifications/AbstractAuditor.java @@ -103,15 +103,15 @@ public void audit(Level level, String resourceId, String message) { } public void info(String resourceId, String message) { - indexDoc(messageFactory.newMessage(resourceId, message, Level.INFO, new Date(), nodeName)); + audit(Level.INFO, resourceId, message); } public void warning(String resourceId, String message) { - indexDoc(messageFactory.newMessage(resourceId, message, Level.WARNING, new Date(), nodeName)); + audit(Level.WARNING, resourceId, message); } public void error(String resourceId, String message) { - indexDoc(messageFactory.newMessage(resourceId, message, Level.ERROR, new Date(), nodeName)); + audit(Level.ERROR, resourceId, message); } private static void onIndexResponse(DocWriteResponse response) { diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/CannotStartFailedTransformException.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/CannotStartFailedTransformException.java new file mode 100644 index 0000000000000..674af59f6ad2d --- /dev/null +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/CannotStartFailedTransformException.java @@ -0,0 +1,16 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.transform.transforms; + +import org.elasticsearch.ElasticsearchException; + +class CannotStartFailedTransformException extends ElasticsearchException { + CannotStartFailedTransformException(String msg) { + super(msg); + } +} diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutor.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutor.java index 2fc001599c78d..ae9678893df9a 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutor.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutor.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.transform.transforms; +import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.SetOnce; @@ -62,6 +63,8 @@ import java.util.stream.Collectors; import static org.elasticsearch.core.Strings.format; +import static org.elasticsearch.xpack.core.common.notifications.Level.ERROR; +import static org.elasticsearch.xpack.core.common.notifications.Level.INFO; import static org.elasticsearch.xpack.transform.transforms.TransformNodes.nodeCanRunThisTransform; public class TransformPersistentTasksExecutor extends PersistentTasksExecutor { @@ -203,11 +206,17 @@ protected void nodeOperation(AllocatedPersistentTask task, @Nullable TransformTa ActionListener startTaskListener = ActionListener.wrap( response -> logger.info("[{}] successfully completed and scheduled task in node operation", transformId), failure -> { - auditor.error( + // If the transform is failed then there is no need to log an error on every node restart as the error had already been + // logged when the transform first failed. + boolean logErrorAsInfo = failure instanceof CannotStartFailedTransformException; + auditor.audit( + logErrorAsInfo ? INFO : ERROR, transformId, - "Failed to start transform. " + "Please stop and attempt to start again. Failure: " + failure.getMessage() + "Failed to start transform. Please stop and attempt to start again. Failure: " + failure.getMessage() ); - logger.error("Failed to start task [" + transformId + "] in node operation", failure); + logger.atLevel(logErrorAsInfo ? Level.INFO : Level.ERROR) + .withThrowable(failure) + .log("[{}] Failed to start task in node operation", transformId); } ); diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformTask.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformTask.java index 8a78be8417020..ac81579e8dd71 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformTask.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformTask.java @@ -240,9 +240,8 @@ void start(Long startingCheckpoint, ActionListener Date: Wed, 13 Mar 2024 09:03:42 +0000 Subject: [PATCH 162/248] Reduce usage of `SAME` threadpool name (#106279) These days in most places where we mention `SAME` we're immediately looking up the corresponding executor, and therefore can just mention `EsExecutors#DIRECT_EXECUTOR_SERVICE` directly instead. --- .../http/netty4/Netty4PipeliningIT.java | 9 ++- .../TransportFieldCapabilitiesAction.java | 11 +-- .../search/MultiSearchActionTookTests.java | 9 +-- .../TransportMultiSearchActionTests.java | 13 ++-- .../JoinValidationServiceTests.java | 4 +- .../cluster/service/MasterServiceTests.java | 3 +- .../CompositeIndexEventListenerTests.java | 9 +-- .../ClusterConnectionManagerTests.java | 4 +- .../transport/TransportActionProxyTests.java | 25 +++---- .../org/elasticsearch/test/ESTestCase.java | 17 +++++ .../AbstractSimpleTransportTestCase.java | 68 ++++++------------- .../saml/TransportSamlAuthenticateAction.java | 5 +- .../TransportSamlInvalidateSessionAction.java | 5 +- .../saml/TransportSamlLogoutAction.java | 11 +-- ...nsportSamlPrepareAuthenticationAction.java | 5 +- ...curityServerTransportInterceptorTests.java | 2 +- 16 files changed, 81 insertions(+), 119 deletions(-) diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/http/netty4/Netty4PipeliningIT.java b/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/http/netty4/Netty4PipeliningIT.java index 653733b064ba9..130a1168d455c 100644 --- a/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/http/netty4/Netty4PipeliningIT.java +++ b/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/http/netty4/Netty4PipeliningIT.java @@ -42,7 +42,6 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.ToXContentObject; import java.io.IOException; @@ -243,9 +242,8 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli if (failAfterBytes < 0) { throw new IllegalArgumentException("[" + FAIL_AFTER_BYTES_PARAM + "] must be present and non-negative"); } - return channel -> client.threadPool() - .executor(randomFrom(ThreadPool.Names.SAME, ThreadPool.Names.GENERIC)) - .execute(() -> channel.sendResponse(RestResponse.chunked(RestStatus.OK, new ChunkedRestResponseBody() { + return channel -> randomExecutor(client.threadPool()).execute( + () -> channel.sendResponse(RestResponse.chunked(RestStatus.OK, new ChunkedRestResponseBody() { int bytesRemaining = failAfterBytes; @Override @@ -270,7 +268,8 @@ public ReleasableBytesReference encodeChunk(int sizeHint, Recycler rec public String getResponseContentTypeString() { return RestResponse.TEXT_CONTENT_TYPE; } - }, null))); + }, null)) + ); } }); } diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java index ad39066c253a5..e28434623601a 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.util.Maps; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.shard.ShardId; @@ -91,14 +92,8 @@ public TransportFieldCapabilitiesAction( IndicesService indicesService, IndexNameExpressionResolver indexNameExpressionResolver ) { - // TODO replace SAME when removing workaround for https://github.com/elastic/elasticsearch/issues/97916 - super( - NAME, - transportService, - actionFilters, - FieldCapabilitiesRequest::new, - transportService.getThreadPool().executor(ThreadPool.Names.SAME) - ); + // TODO replace DIRECT_EXECUTOR_SERVICE when removing workaround for https://github.com/elastic/elasticsearch/issues/97916 + super(NAME, transportService, actionFilters, FieldCapabilitiesRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); this.threadPool = threadPool; this.searchCoordinationExecutor = threadPool.executor(ThreadPool.Names.SEARCH_COORDINATION); this.transportService = transportService; diff --git a/server/src/test/java/org/elasticsearch/action/search/MultiSearchActionTookTests.java b/server/src/test/java/org/elasticsearch/action/search/MultiSearchActionTookTests.java index f682e75b89a07..fde0f190737e1 100644 --- a/server/src/test/java/org/elasticsearch/action/search/MultiSearchActionTookTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/MultiSearchActionTookTests.java @@ -15,7 +15,6 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.Randomness; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AtomicArray; @@ -31,14 +30,12 @@ import org.junit.Before; import org.junit.BeforeClass; -import java.util.Arrays; import java.util.Collections; import java.util.HashSet; import java.util.IdentityHashMap; -import java.util.List; import java.util.Queue; import java.util.Set; -import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; @@ -136,9 +133,7 @@ private TransportMultiSearchAction createTransportMultiSearchAction(boolean cont final int availableProcessors = Runtime.getRuntime().availableProcessors(); AtomicInteger counter = new AtomicInteger(); - final List threadPoolNames = Arrays.asList(ThreadPool.Names.GENERIC, ThreadPool.Names.SAME); - Randomness.shuffle(threadPoolNames); - final ExecutorService commonExecutor = threadPool.executor(threadPoolNames.get(0)); + final Executor commonExecutor = randomExecutor(threadPool); final Set requests = Collections.newSetFromMap(Collections.synchronizedMap(new IdentityHashMap<>())); NodeClient client = new NodeClient(settings, threadPool) { diff --git a/server/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java b/server/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java index d04e41c83699d..561bca2695337 100644 --- a/server/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.Randomness; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.tasks.Task; import org.elasticsearch.telemetry.metric.MeterRegistry; @@ -37,7 +38,7 @@ import java.util.List; import java.util.Set; import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executor; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; @@ -143,10 +144,10 @@ public void testBatchExecute() throws ExecutionException, InterruptedException { AtomicInteger counter = new AtomicInteger(); AtomicReference errorHolder = new AtomicReference<>(); // randomize whether or not requests are executed asynchronously - final List threadPoolNames = Arrays.asList(ThreadPool.Names.GENERIC, ThreadPool.Names.SAME); - Randomness.shuffle(threadPoolNames); - final ExecutorService commonExecutor = threadPool.executor(threadPoolNames.get(0)); - final ExecutorService rarelyExecutor = threadPool.executor(threadPoolNames.get(1)); + final List executorServices = Arrays.asList(threadPool.generic(), EsExecutors.DIRECT_EXECUTOR_SERVICE); + Randomness.shuffle(executorServices); + final Executor commonExecutor = executorServices.get(0); + final Executor rarelyExecutor = executorServices.get(1); final Set requests = Collections.newSetFromMap(Collections.synchronizedMap(new IdentityHashMap<>())); NodeClient client = new NodeClient(settings, threadPool) { @Override @@ -164,7 +165,7 @@ public void search(final SearchRequest request, final ActionListener { counter.decrementAndGet(); var response = SearchResponseUtils.emptyWithTotalHits( diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/JoinValidationServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/JoinValidationServiceTests.java index 9b1ce4611169b..5d2193c5ed3da 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/JoinValidationServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/JoinValidationServiceTests.java @@ -99,9 +99,7 @@ public TransportVersion getTransportVersion() { @Override public void sendRequest(long requestId, String action, TransportRequest request, TransportRequestOptions options) throws TransportException { - final var executor = threadPool.executor( - randomFrom(ThreadPool.Names.SAME, ThreadPool.Names.GENERIC, ThreadPool.Names.CLUSTER_COORDINATION) - ); + final var executor = randomExecutor(threadPool, ThreadPool.Names.CLUSTER_COORDINATION); executor.execute(new AbstractRunnable() { @Override public void onFailure(Exception e) { diff --git a/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java index 453d9bfecf2ab..6a24c8fc88078 100644 --- a/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java @@ -175,8 +175,7 @@ protected ExecutorService createThreadPoolExecutor() { masterService.setClusterStatePublisher((clusterStatePublicationEvent, publishListener, ackListener) -> { clusterStateRef.set(clusterStatePublicationEvent.getNewState()); ClusterServiceUtils.setAllElapsedMillis(clusterStatePublicationEvent); - threadPool.executor(randomFrom(ThreadPool.Names.SAME, ThreadPool.Names.GENERIC)) - .execute(() -> publishListener.onResponse(null)); + randomExecutor(threadPool).execute(() -> publishListener.onResponse(null)); }); masterService.setClusterStateSupplier(clusterStateRef::get); masterService.start(); diff --git a/server/src/test/java/org/elasticsearch/index/CompositeIndexEventListenerTests.java b/server/src/test/java/org/elasticsearch/index/CompositeIndexEventListenerTests.java index 10778cfb86e46..2ee721900b691 100644 --- a/server/src/test/java/org/elasticsearch/index/CompositeIndexEventListenerTests.java +++ b/server/src/test/java/org/elasticsearch/index/CompositeIndexEventListenerTests.java @@ -18,7 +18,6 @@ import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardTestCase; import org.elasticsearch.test.MockLogAppender; -import org.elasticsearch.threadpool.ThreadPool; import org.hamcrest.Matchers; import java.util.concurrent.TimeUnit; @@ -60,9 +59,7 @@ public void beforeIndexShardRecovery( listener.onResponse(null); } else { // fails the listener sometimes - shard.getThreadPool() - .executor(randomFrom(ThreadPool.Names.GENERIC, ThreadPool.Names.SAME)) - .execute(ActionRunnable.run(listener, this::runStep)); + randomExecutor(shard.getThreadPool()).execute(ActionRunnable.run(listener, this::runStep)); } } @@ -129,9 +126,7 @@ public void afterIndexShardRecovery(IndexShard indexShard, ActionListener listener.onResponse(null); } else { // fails the listener sometimes - shard.getThreadPool() - .executor(randomFrom(ThreadPool.Names.GENERIC, ThreadPool.Names.SAME)) - .execute(ActionRunnable.run(listener, this::runStep)); + randomExecutor(shard.getThreadPool()).execute(ActionRunnable.run(listener, this::runStep)); } } diff --git a/server/src/test/java/org/elasticsearch/transport/ClusterConnectionManagerTests.java b/server/src/test/java/org/elasticsearch/transport/ClusterConnectionManagerTests.java index 7ef22abf91b0f..c197e4c296ef6 100644 --- a/server/src/test/java/org/elasticsearch/transport/ClusterConnectionManagerTests.java +++ b/server/src/test/java/org/elasticsearch/transport/ClusterConnectionManagerTests.java @@ -469,7 +469,7 @@ public void testConcurrentConnectsAndDisconnects() throws Exception { final ConnectionManager.ConnectionValidator validator = (c, p, l) -> { assertTrue(validatorPermits.tryAcquire()); - threadPool.executor(randomFrom(ThreadPool.Names.GENERIC, ThreadPool.Names.SAME)).execute(() -> { + randomExecutor(threadPool).execute(() -> { try { l.onResponse(null); } finally { @@ -547,7 +547,7 @@ public void testConcurrentConnectsAndCloses() throws Exception { final ConnectionManager.ConnectionValidator validator = (c, p, l) -> { assertTrue(validatorPermits.tryAcquire()); - threadPool.executor(randomFrom(ThreadPool.Names.GENERIC, ThreadPool.Names.SAME)).execute(() -> { + randomExecutor(threadPool).execute(() -> { try { l.onResponse(null); } finally { diff --git a/server/src/test/java/org/elasticsearch/transport/TransportActionProxyTests.java b/server/src/test/java/org/elasticsearch/transport/TransportActionProxyTests.java index 29590cfca5ead..3346bd40aec5a 100644 --- a/server/src/test/java/org/elasticsearch/transport/TransportActionProxyTests.java +++ b/server/src/test/java/org/elasticsearch/transport/TransportActionProxyTests.java @@ -266,22 +266,17 @@ public void testSendLocalRequest() throws Exception { final CountDownLatch latch = new CountDownLatch(2); final boolean cancellable = randomBoolean(); - serviceB.registerRequestHandler( - "internal:test", - threadPool.executor(randomFrom(ThreadPool.Names.SAME, ThreadPool.Names.GENERIC)), - SimpleTestRequest::new, - (request, channel, task) -> { - try { - assertThat(task instanceof CancellableTask, equalTo(cancellable)); - assertEquals(request.sourceNode, "TS_A"); - final SimpleTestResponse responseB = new SimpleTestResponse("TS_B"); - channel.sendResponse(responseB); - response.set(responseB); - } finally { - latch.countDown(); - } + serviceB.registerRequestHandler("internal:test", randomExecutor(threadPool), SimpleTestRequest::new, (request, channel, task) -> { + try { + assertThat(task instanceof CancellableTask, equalTo(cancellable)); + assertEquals(request.sourceNode, "TS_A"); + final SimpleTestResponse responseB = new SimpleTestResponse("TS_B"); + channel.sendResponse(responseB); + response.set(responseB); + } finally { + latch.countDown(); } - ); + }); TransportActionProxy.registerProxyAction(serviceB, "internal:test", cancellable, SimpleTestResponse::new); AbstractSimpleTransportTestCase.connectToNode(serviceA, nodeB); diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index 052b9a7165a6c..f1db2946aa572 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -73,6 +73,7 @@ import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.util.MockBigArrays; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.XContentHelper; @@ -168,6 +169,7 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.CyclicBarrier; import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executor; import java.util.concurrent.ExecutorService; import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; @@ -1196,6 +1198,21 @@ public static String randomDateFormatterPattern() { return randomFrom(FormatNames.values()).getName(); } + /** + * Randomly choose between {@link EsExecutors#DIRECT_EXECUTOR_SERVICE} (which does not fork), {@link ThreadPool#generic}, and one of the + * other named threadpool executors. + */ + public static Executor randomExecutor(ThreadPool threadPool, String... otherExecutorNames) { + final var choice = between(0, otherExecutorNames.length + 1); + if (choice < otherExecutorNames.length) { + return threadPool.executor(otherExecutorNames[choice]); + } else if (choice == otherExecutorNames.length) { + return threadPool.generic(); + } else { + return EsExecutors.DIRECT_EXECUTOR_SERVICE; + } + } + /** * helper to randomly perform on consumer with value */ diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index 0f87ca4684c58..40c48a4d3fcde 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -928,19 +928,14 @@ public void onNodeDisconnected(DiscoveryNode node, Transport.Connection connecti public void testConcurrentSendRespondAndDisconnect() throws BrokenBarrierException, InterruptedException { Set sendingErrors = ConcurrentCollections.newConcurrentSet(); Set responseErrors = ConcurrentCollections.newConcurrentSet(); - serviceA.registerRequestHandler( - "internal:test", - threadPool.executor(randomBoolean() ? ThreadPool.Names.SAME : ThreadPool.Names.GENERIC), - TestRequest::new, - (request, channel, task) -> { - try { - channel.sendResponse(new TestResponse((String) null)); - } catch (Exception e) { - logger.info("caught exception while responding", e); - responseErrors.add(e); - } + serviceA.registerRequestHandler("internal:test", randomExecutor(threadPool), TestRequest::new, (request, channel, task) -> { + try { + channel.sendResponse(new TestResponse((String) null)); + } catch (Exception e) { + logger.info("caught exception while responding", e); + responseErrors.add(e); } - ); + }); final TransportRequestHandler ignoringRequestHandler = (request, channel, task) -> { try { channel.sendResponse(new TestResponse((String) null)); @@ -2225,30 +2220,15 @@ public Executor executor() { } } - serviceB.registerRequestHandler( - "internal:action1", - threadPool.executor(randomFrom(ThreadPool.Names.SAME, ThreadPool.Names.GENERIC)), - TestRequest::new, - new TestRequestHandler(serviceB) - ); - serviceC.registerRequestHandler( - "internal:action1", - threadPool.executor(randomFrom(ThreadPool.Names.SAME, ThreadPool.Names.GENERIC)), - TestRequest::new, - new TestRequestHandler(serviceC) - ); - serviceA.registerRequestHandler( - "internal:action1", - threadPool.executor(randomFrom(ThreadPool.Names.SAME, ThreadPool.Names.GENERIC)), - TestRequest::new, - new TestRequestHandler(serviceA) - ); + serviceB.registerRequestHandler("internal:action1", randomExecutor(threadPool), TestRequest::new, new TestRequestHandler(serviceB)); + serviceC.registerRequestHandler("internal:action1", randomExecutor(threadPool), TestRequest::new, new TestRequestHandler(serviceC)); + serviceA.registerRequestHandler("internal:action1", randomExecutor(threadPool), TestRequest::new, new TestRequestHandler(serviceA)); int iters = randomIntBetween(30, 60); CountDownLatch allRequestsDone = new CountDownLatch(iters); class TestResponseHandler implements TransportResponseHandler { private final int id; - private final String executor = randomBoolean() ? ThreadPool.Names.SAME : ThreadPool.Names.GENERIC; + private final Executor executor = randomExecutor(threadPool); TestResponseHandler(int id) { this.id = id; @@ -2277,7 +2257,7 @@ public void handleException(TransportException exp) { @Override public Executor executor() { - return threadPool.executor(executor); + return executor; } } @@ -2311,19 +2291,14 @@ public Executor executor() { } public void testRegisterHandlerTwice() { - serviceB.registerRequestHandler( - "internal:action1", - threadPool.executor(randomFrom(ThreadPool.Names.SAME, ThreadPool.Names.GENERIC)), - TestRequest::new, - (request, message, task) -> { - throw new AssertionError("boom"); - } - ); + serviceB.registerRequestHandler("internal:action1", randomExecutor(threadPool), TestRequest::new, (request, message, task) -> { + throw new AssertionError("boom"); + }); expectThrows( IllegalArgumentException.class, () -> serviceB.registerRequestHandler( "internal:action1", - threadPool.executor(randomFrom(ThreadPool.Names.SAME, ThreadPool.Names.GENERIC)), + randomExecutor(threadPool), TestRequest::new, (request, message, task) -> { throw new AssertionError("boom"); @@ -2331,14 +2306,9 @@ public void testRegisterHandlerTwice() { ) ); - serviceA.registerRequestHandler( - "internal:action1", - threadPool.executor(randomFrom(ThreadPool.Names.SAME, ThreadPool.Names.GENERIC)), - TestRequest::new, - (request, message, task) -> { - throw new AssertionError("boom"); - } - ); + serviceA.registerRequestHandler("internal:action1", randomExecutor(threadPool), TestRequest::new, (request, message, task) -> { + throw new AssertionError("boom"); + }); } public void testHandshakeWithIncompatVersion() { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlAuthenticateAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlAuthenticateAction.java index df0f199f03aba..590757f9d7d32 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlAuthenticateAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlAuthenticateAction.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.TimeValue; import org.elasticsearch.tasks.Task; @@ -51,13 +52,13 @@ public TransportSamlAuthenticateAction( TokenService tokenService, SecurityContext securityContext ) { - // TODO replace SAME when removing workaround for https://github.com/elastic/elasticsearch/issues/97916 + // TODO replace DIRECT_EXECUTOR_SERVICE when removing workaround for https://github.com/elastic/elasticsearch/issues/97916 super( SamlAuthenticateAction.NAME, transportService, actionFilters, SamlAuthenticateRequest::new, - threadPool.executor(ThreadPool.Names.SAME) + EsExecutors.DIRECT_EXECUTOR_SERVICE ); this.threadPool = threadPool; this.authenticationService = authenticationService; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionAction.java index 44b4302b7f554..ade43984c6bab 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionAction.java @@ -15,6 +15,7 @@ import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -59,13 +60,13 @@ public TransportSamlInvalidateSessionAction( TokenService tokenService, Realms realms ) { - // TODO replace SAME when removing workaround for https://github.com/elastic/elasticsearch/issues/97916 + // TODO replace DIRECT_EXECUTOR_SERVICE when removing workaround for https://github.com/elastic/elasticsearch/issues/97916 super( SamlInvalidateSessionAction.NAME, transportService, actionFilters, SamlInvalidateSessionRequest::new, - transportService.getThreadPool().executor(ThreadPool.Names.SAME) + EsExecutors.DIRECT_EXECUTOR_SERVICE ); this.tokenService = tokenService; this.realms = realms; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutAction.java index fc73514f38ffc..f5a55da7b1916 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutAction.java @@ -13,6 +13,7 @@ import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -50,14 +51,8 @@ public TransportSamlLogoutAction( Realms realms, TokenService tokenService ) { - // TODO replace SAME when removing workaround for https://github.com/elastic/elasticsearch/issues/97916 - super( - SamlLogoutAction.NAME, - transportService, - actionFilters, - SamlLogoutRequest::new, - transportService.getThreadPool().executor(ThreadPool.Names.SAME) - ); + // TODO replace DIRECT_EXECUTOR_SERVICE when removing workaround for https://github.com/elastic/elasticsearch/issues/97916 + super(SamlLogoutAction.NAME, transportService, actionFilters, SamlLogoutRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); this.realms = realms; this.tokenService = tokenService; this.genericExecutor = transportService.getThreadPool().generic(); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlPrepareAuthenticationAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlPrepareAuthenticationAction.java index 4458f3ac304ed..e18ca43c018f5 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlPrepareAuthenticationAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlPrepareAuthenticationAction.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -41,13 +42,13 @@ public final class TransportSamlPrepareAuthenticationAction extends HandledTrans @Inject public TransportSamlPrepareAuthenticationAction(TransportService transportService, ActionFilters actionFilters, Realms realms) { - // TODO replace SAME when removing workaround for https://github.com/elastic/elasticsearch/issues/97916 + // TODO replace DIRECT_EXECUTOR_SERVICE when removing workaround for https://github.com/elastic/elasticsearch/issues/97916 super( SamlPrepareAuthenticationAction.NAME, transportService, actionFilters, SamlPrepareAuthenticationRequest::new, - transportService.getThreadPool().executor(ThreadPool.Names.SAME) + EsExecutors.DIRECT_EXECUTOR_SERVICE ); this.realms = realms; this.genericExecutor = transportService.getThreadPool().generic(); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java index d49c1be8a7e0a..6d5ba44ccabf7 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java @@ -566,7 +566,7 @@ public void testProfileSecuredRequestHandlerDecrementsRefCountOnFailure() throws logger, TransportDeleteIndexAction.TYPE.name(), randomBoolean(), - threadPool.executor(randomBoolean() ? ThreadPool.Names.SAME : ThreadPool.Names.GENERIC), + randomExecutor(threadPool), (request, channel, task) -> fail("should fail at destructive operations check to trigger listener failure"), Map.of( profileName, From ab8f4351b2c76ebb4662c582272a85282249ebb7 Mon Sep 17 00:00:00 2001 From: Ievgen Degtiarenko Date: Wed, 13 Mar 2024 10:30:16 +0100 Subject: [PATCH 163/248] Additional logging for testRestoreSnapshotAllocationDoesNotExceedWatermarkWithMultipleShards (#106255) --- .../allocation/decider/DiskThresholdDeciderIT.java | 9 ++++----- .../allocator/DesiredBalanceComputerTests.java | 4 ---- 2 files changed, 4 insertions(+), 9 deletions(-) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java index d664d4ab352d9..fff708bbddc1e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java @@ -164,7 +164,10 @@ public void testRestoreSnapshotAllocationDoesNotExceedWatermark() throws Excepti @TestIssueLogging( value = "org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalanceComputer:TRACE," + "org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalanceReconciler:DEBUG," - + "org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalanceShardsAllocator:TRACE", + + "org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalanceShardsAllocator:TRACE," + + "org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator:TRACE," + + "org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders:TRACE," + + "org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider:TRACE", issueUrl = "https://github.com/elastic/elasticsearch/issues/105331" ) public void testRestoreSnapshotAllocationDoesNotExceedWatermarkWithMultipleShards() throws Exception { @@ -291,10 +294,6 @@ public Set getShardIdsWithSizeSmallerOrEqual(long size) { public Set getSmallestShardIds() { return getShardIdsWithSizeSmallerOrEqual(getSmallestShardSize()); } - - public Set getAllShardIds() { - return sizes.stream().map(ShardSize::shardId).collect(toSet()); - } } private record ShardSize(ShardId shardId, long size) {} diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputerTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputerTests.java index 4fb1093698430..989e810bbc2b8 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputerTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputerTests.java @@ -1300,10 +1300,6 @@ public ShardAllocationDecision decideShardAllocation(ShardRouting shard, Routing }, DesiredBalanceComputer.class, expectation); } - private static Map.Entry indexSize(ClusterState clusterState, String name, long size, boolean primary) { - return Map.entry(shardIdentifierFromRouting(findShardId(clusterState, name), primary), size); - } - private static ShardId findShardId(ClusterState clusterState, String name) { return clusterState.getRoutingTable().index(name).shard(0).shardId(); } From b085d878c7b31c1b2657f5b12f0c6f94e13a6620 Mon Sep 17 00:00:00 2001 From: Matteo Piergiovanni <134913285+piergm@users.noreply.github.com> Date: Wed, 13 Mar 2024 12:02:43 +0100 Subject: [PATCH 164/248] Field caps performance pt2 (#105941) * use map instead of loop * Update docs/changelog/105770.yaml * Armin's code * javaDoc + renaming * Update docs/changelog/105941.yaml --- docs/changelog/105941.yaml | 5 +++++ .../action/fieldcaps/FieldCapabilitiesFetcher.java | 5 +++-- .../elasticsearch/index/mapper/FieldTypeLookup.java | 7 +++++++ .../org/elasticsearch/index/mapper/MappingLookup.java | 7 +++++++ .../elasticsearch/index/query/QueryRewriteContext.java | 10 +++++----- 5 files changed, 27 insertions(+), 7 deletions(-) create mode 100644 docs/changelog/105941.yaml diff --git a/docs/changelog/105941.yaml b/docs/changelog/105941.yaml new file mode 100644 index 0000000000000..8e2eea1657208 --- /dev/null +++ b/docs/changelog/105941.yaml @@ -0,0 +1,5 @@ +pr: 105941 +summary: Field caps performance pt2 +area: Search +type: enhancement +issues: [] diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesFetcher.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesFetcher.java index 8025923dbdd33..6028a6e21ecff 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesFetcher.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesFetcher.java @@ -162,11 +162,12 @@ static Map retrieveFieldCaps( var fieldInfos = indexShard.getFieldInfos(); includeEmptyFields = includeEmptyFields || enableFieldHasValue == false; Map responseMap = new HashMap<>(); - for (String field : context.getAllFieldNames()) { + for (Map.Entry entry : context.getAllFields()) { + final String field = entry.getKey(); if (fieldNameFilter.test(field) == false) { continue; } - MappedFieldType ft = context.getFieldType(field); + MappedFieldType ft = entry.getValue(); if ((includeEmptyFields || ft.fieldHasValue(fieldInfos)) && (indexFieldfilter.test(ft.name()) || context.isMetadataField(ft.name())) && (filter == null || filter.test(ft))) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/FieldTypeLookup.java b/server/src/main/java/org/elasticsearch/index/mapper/FieldTypeLookup.java index 2b4eec2bdd565..5e3dbe9590b99 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/FieldTypeLookup.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/FieldTypeLookup.java @@ -211,4 +211,11 @@ Set sourcePaths(String field) { public String parentField(String field) { return fullSubfieldNameToParentPath.get(field); } + + /** + * @return A map from field name to the MappedFieldType + */ + public Map getFullNameToFieldType() { + return fullNameToFieldType; + } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MappingLookup.java b/server/src/main/java/org/elasticsearch/index/mapper/MappingLookup.java index 0ae13241b7f56..673593cc6e240 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MappingLookup.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MappingLookup.java @@ -399,6 +399,13 @@ public Set getMatchingFieldNames(String pattern) { return fieldTypeLookup.getMatchingFieldNames(pattern); } + /** + * @return A map from field name to the MappedFieldType + */ + public Map getFullNameToFieldType() { + return fieldTypeLookup.getFullNameToFieldType(); + } + /** * Returns the mapped field type for the given field name. */ diff --git a/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java b/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java index e36c4d608d59f..fbfce6aab403f 100644 --- a/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java +++ b/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java @@ -336,13 +336,13 @@ public Set getMatchingFieldNames(String pattern) { } /** - * Same as {@link #getMatchingFieldNames(String)} with pattern {@code *} but returns an {@link Iterable} instead of a set. + * @return An {@link Iterable} with key the field name and value the MappedFieldType */ - public Iterable getAllFieldNames() { - var allFromMapping = mappingLookup.getMatchingFieldNames("*"); + public Iterable> getAllFields() { + var allFromMapping = mappingLookup.getFullNameToFieldType(); // runtime mappings and non-runtime fields don't overlap, so we can simply concatenate the iterables here return runtimeMappings.isEmpty() - ? allFromMapping - : () -> Iterators.concat(allFromMapping.iterator(), runtimeMappings.keySet().iterator()); + ? allFromMapping.entrySet() + : () -> Iterators.concat(allFromMapping.entrySet().iterator(), runtimeMappings.entrySet().iterator()); } } From 9b4a528f955a929034d1aae0b509345d50ce007b Mon Sep 17 00:00:00 2001 From: Kevin Lacabane Date: Wed, 13 Mar 2024 12:22:10 +0100 Subject: [PATCH 165/248] add missing logstash capacity mappings (#106218) Co-authored-by: Elastic Machine --- .../main/resources/monitoring-logstash-mb.json | 16 ++++++++++++++++ .../monitoring/MonitoringTemplateRegistry.java | 2 +- 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/x-pack/plugin/core/template-resources/src/main/resources/monitoring-logstash-mb.json b/x-pack/plugin/core/template-resources/src/main/resources/monitoring-logstash-mb.json index 706b582f5c3af..54c6e0df95e78 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/monitoring-logstash-mb.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/monitoring-logstash-mb.json @@ -87,6 +87,22 @@ }, "queue_size_in_bytes": { "type": "long" + }, + "capacity": { + "properties": { + "max_queue_size_in_bytes": { + "type": "long" + }, + "max_unread_events": { + "type": "long" + }, + "page_capacity_in_bytes": { + "type": "long" + }, + "queue_size_in_bytes": { + "type": "long" + } + } } } }, diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistry.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistry.java index 753700a7ec913..4477057b2399b 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistry.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistry.java @@ -77,7 +77,7 @@ public class MonitoringTemplateRegistry extends IndexTemplateRegistry { * writes monitoring data in ECS format as of 8.0. These templates define the ECS schema as well as alias fields for the old monitoring * mappings that point to the corresponding ECS fields. */ - public static final int STACK_MONITORING_REGISTRY_VERSION = 8_00_00_99 + 14; + public static final int STACK_MONITORING_REGISTRY_VERSION = 8_00_00_99 + 15; private static final String STACK_MONITORING_REGISTRY_VERSION_VARIABLE = "xpack.stack.monitoring.template.release.version"; private static final String STACK_TEMPLATE_VERSION = "8"; private static final String STACK_TEMPLATE_VERSION_VARIABLE = "xpack.stack.monitoring.template.version"; From 426b6106567aa3b548a7f34737134654e2296c54 Mon Sep 17 00:00:00 2001 From: Jan Kuipers <148754765+jan-elastic@users.noreply.github.com> Date: Wed, 13 Mar 2024 12:56:15 +0100 Subject: [PATCH 166/248] Revert "Exclude internal fields from job APIs. (#106115)" (#106277) This reverts commit be1cf31cf8b7dfaf0950fdc2eabdf703e42a1fd7. --- x-pack/plugin/build.gradle | 2 - .../core/ml/action/PreviewDatafeedAction.java | 2 +- .../xpack/core/ml/action/PutJobAction.java | 11 ++- .../ml/action/ValidateJobConfigAction.java | 14 +++- .../xpack/core/ml/job/config/Job.java | 67 ++++++++++++------- .../xpack/core/ml/job/messages/Messages.java | 2 + .../xpack/core/ml/job/config/JobTests.java | 25 +++++-- .../rest-api-spec/test/ml/jobs_crud.yml | 2 +- .../rest-api-spec/test/ml/validate.yml | 16 ++++- 9 files changed, 103 insertions(+), 38 deletions(-) diff --git a/x-pack/plugin/build.gradle b/x-pack/plugin/build.gradle index 72e63b3255999..eae3031512d4f 100644 --- a/x-pack/plugin/build.gradle +++ b/x-pack/plugin/build.gradle @@ -109,8 +109,6 @@ tasks.named("yamlRestTestV7CompatTransform").configure { task -> ) task.skipTest("ml/jobs_crud/Test update job", "Behaviour change #89824 - added limit filter to categorization analyzer") task.skipTest("ml/jobs_crud/Test create job with delimited format", "removing undocumented functionality") - task.skipTest("ml/jobs_crud/Test cannot create job with model snapshot id set", "Exception type has changed.") - task.skipTest("ml/validate/Test job config is invalid because model snapshot id set", "Exception type has changed.") task.skipTest("ml/datafeeds_crud/Test update datafeed to point to missing job", "behaviour change #44752 - not allowing to update datafeed job_id") task.skipTest( "ml/datafeeds_crud/Test update datafeed to point to different job", diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PreviewDatafeedAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PreviewDatafeedAction.java index d03a6d5c0c7c5..8d4e9d25b94a3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PreviewDatafeedAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PreviewDatafeedAction.java @@ -60,7 +60,7 @@ public static class Request extends ActionRequest implements ToXContentObject { private static final ObjectParser PARSER = new ObjectParser<>("preview_datafeed_action", Request.Builder::new); static { PARSER.declareObject(Builder::setDatafeedBuilder, DatafeedConfig.STRICT_PARSER, DATAFEED_CONFIG); - PARSER.declareObject(Builder::setJobBuilder, Job.REST_REQUEST_PARSER, JOB_CONFIG); + PARSER.declareObject(Builder::setJobBuilder, Job.STRICT_PARSER, JOB_CONFIG); PARSER.declareString(Builder::setStart, START_TIME); PARSER.declareString(Builder::setEnd, END_TIME); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutJobAction.java index efb4dacd83ba4..400bdaa3a27ea 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutJobAction.java @@ -20,6 +20,7 @@ import org.elasticsearch.xpack.core.ml.job.messages.Messages; import java.io.IOException; +import java.util.List; import java.util.Objects; public class PutJobAction extends ActionType { @@ -34,7 +35,7 @@ private PutJobAction() { public static class Request extends AcknowledgedRequest { public static Request parseRequest(String jobId, XContentParser parser, IndicesOptions indicesOptions) { - Job.Builder jobBuilder = Job.REST_REQUEST_PARSER.apply(parser, null); + Job.Builder jobBuilder = Job.STRICT_PARSER.apply(parser, null); if (jobBuilder.getId() == null) { jobBuilder.setId(jobId); } else if (Strings.isNullOrEmpty(jobId) == false && jobId.equals(jobBuilder.getId()) == false) { @@ -57,6 +58,14 @@ public Request(Job.Builder jobBuilder) { // would occur when parsing an old job config that already had duplicate detectors. jobBuilder.validateDetectorsAreUnique(); + // Some fields cannot be set at create time + List invalidJobCreationSettings = jobBuilder.invalidCreateTimeSettings(); + if (invalidJobCreationSettings.isEmpty() == false) { + throw new IllegalArgumentException( + Messages.getMessage(Messages.JOB_CONFIG_INVALID_CREATE_SETTINGS, String.join(",", invalidJobCreationSettings)) + ); + } + this.jobBuilder = jobBuilder; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ValidateJobConfigAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ValidateJobConfigAction.java index 76cba60667c32..48549ae100e36 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ValidateJobConfigAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ValidateJobConfigAction.java @@ -14,9 +14,11 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.messages.Messages; import java.io.IOException; import java.util.Date; +import java.util.List; import java.util.Objects; public class ValidateJobConfigAction extends ActionType { @@ -30,10 +32,10 @@ protected ValidateJobConfigAction() { public static class Request extends ActionRequest { - private final Job job; + private Job job; public static Request parseRequest(XContentParser parser) { - Job.Builder jobBuilder = Job.REST_REQUEST_PARSER.apply(parser, null); + Job.Builder jobBuilder = Job.STRICT_PARSER.apply(parser, null); // When jobs are PUT their ID must be supplied in the URL - assume this will // be valid unless an invalid job ID is specified in the JSON to be validated jobBuilder.setId(jobBuilder.getId() != null ? jobBuilder.getId() : "ok"); @@ -43,6 +45,14 @@ public static Request parseRequest(XContentParser parser) { // would occur when parsing an old job config that already had duplicate detectors. jobBuilder.validateDetectorsAreUnique(); + // Some fields cannot be set at create time + List invalidJobCreationSettings = jobBuilder.invalidCreateTimeSettings(); + if (invalidJobCreationSettings.isEmpty() == false) { + throw new IllegalArgumentException( + Messages.getMessage(Messages.JOB_CONFIG_INVALID_CREATE_SETTINGS, String.join(",", invalidJobCreationSettings)) + ); + } + return new Request(jobBuilder.build(new Date())); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java index 8da0209e10293..fbb1a137bdc13 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java @@ -98,9 +98,8 @@ public class Job implements SimpleDiffable, Writeable, ToXContentObject { public static final ParseField RESULTS_FIELD = new ParseField("jobs"); // These parsers follow the pattern that metadata is parsed leniently (to allow for enhancements), whilst config is parsed strictly - public static final ObjectParser LENIENT_PARSER = createParser(true, true); - // Use the REST request parser to parse a job passed to the API, to disallow setting internal fields. - public static final ObjectParser REST_REQUEST_PARSER = createParser(false, false); + public static final ObjectParser LENIENT_PARSER = createParser(true); + public static final ObjectParser STRICT_PARSER = createParser(false); public static final TimeValue MIN_BACKGROUND_PERSIST_INTERVAL = TimeValue.timeValueHours(1); @@ -115,12 +114,26 @@ public class Job implements SimpleDiffable, Writeable, ToXContentObject { public static final long DEFAULT_MODEL_SNAPSHOT_RETENTION_DAYS = 10; public static final long DEFAULT_DAILY_MODEL_SNAPSHOT_RETENTION_AFTER_DAYS = 1; - private static ObjectParser createParser(boolean allowInternalFields, boolean ignoreUnknownFields) { + private static ObjectParser createParser(boolean ignoreUnknownFields) { ObjectParser parser = new ObjectParser<>("job_details", ignoreUnknownFields, Builder::new); parser.declareString(Builder::setId, ID); + parser.declareString(Builder::setJobType, JOB_TYPE); + parser.declareString(Builder::setJobVersion, JOB_VERSION); parser.declareStringArray(Builder::setGroups, GROUPS); parser.declareStringOrNull(Builder::setDescription, DESCRIPTION); + parser.declareField( + Builder::setCreateTime, + p -> TimeUtils.parseTimeField(p, CREATE_TIME.getPreferredName()), + CREATE_TIME, + ValueType.VALUE + ); + parser.declareField( + Builder::setFinishedTime, + p -> TimeUtils.parseTimeField(p, FINISHED_TIME.getPreferredName()), + FINISHED_TIME, + ValueType.VALUE + ); parser.declareObject( Builder::setAnalysisConfig, ignoreUnknownFields ? AnalysisConfig.LENIENT_PARSER : AnalysisConfig.STRICT_PARSER, @@ -152,35 +165,17 @@ private static ObjectParser createParser(boolean allowInternalFie parser.declareLong(Builder::setModelSnapshotRetentionDays, MODEL_SNAPSHOT_RETENTION_DAYS); parser.declareLong(Builder::setDailyModelSnapshotRetentionAfterDays, DAILY_MODEL_SNAPSHOT_RETENTION_AFTER_DAYS); parser.declareField(Builder::setCustomSettings, (p, c) -> p.mapOrdered(), CUSTOM_SETTINGS, ValueType.OBJECT); + parser.declareStringOrNull(Builder::setModelSnapshotId, MODEL_SNAPSHOT_ID); + parser.declareStringOrNull(Builder::setModelSnapshotMinVersion, MODEL_SNAPSHOT_MIN_VERSION); parser.declareString(Builder::setResultsIndexName, RESULTS_INDEX_NAME); + parser.declareBoolean(Builder::setDeleting, DELETING); parser.declareBoolean(Builder::setAllowLazyOpen, ALLOW_LAZY_OPEN); + parser.declareObject(Builder::setBlocked, ignoreUnknownFields ? Blocked.LENIENT_PARSER : Blocked.STRICT_PARSER, BLOCKED); parser.declareObject( Builder::setDatafeed, ignoreUnknownFields ? DatafeedConfig.LENIENT_PARSER : DatafeedConfig.STRICT_PARSER, DATAFEED_CONFIG ); - - if (allowInternalFields) { - parser.declareString(Builder::setJobType, JOB_TYPE); - parser.declareString(Builder::setJobVersion, JOB_VERSION); - parser.declareField( - Builder::setCreateTime, - p -> TimeUtils.parseTimeField(p, CREATE_TIME.getPreferredName()), - CREATE_TIME, - ValueType.VALUE - ); - parser.declareField( - Builder::setFinishedTime, - p -> TimeUtils.parseTimeField(p, FINISHED_TIME.getPreferredName()), - FINISHED_TIME, - ValueType.VALUE - ); - parser.declareStringOrNull(Builder::setModelSnapshotId, MODEL_SNAPSHOT_ID); - parser.declareStringOrNull(Builder::setModelSnapshotMinVersion, MODEL_SNAPSHOT_MIN_VERSION); - parser.declareBoolean(Builder::setDeleting, DELETING); - parser.declareObject(Builder::setBlocked, ignoreUnknownFields ? Blocked.LENIENT_PARSER : Blocked.STRICT_PARSER, BLOCKED); - } - return parser; } @@ -1025,6 +1020,26 @@ public Builder setDatafeedIndicesOptionsIfRequired(IndicesOptions indicesOptions return this; } + /** + * Return the list of fields that have been set and are invalid to + * be set when the job is created e.g. model snapshot Id should not + * be set at job creation. + * @return List of fields set fields that should not be. + */ + public List invalidCreateTimeSettings() { + List invalidCreateValues = new ArrayList<>(); + if (modelSnapshotId != null) { + invalidCreateValues.add(MODEL_SNAPSHOT_ID.getPreferredName()); + } + if (finishedTime != null) { + invalidCreateValues.add(FINISHED_TIME.getPreferredName()); + } + if (createTime != null) { + invalidCreateValues.add(CREATE_TIME.getPreferredName()); + } + return invalidCreateValues; + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(id); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java index 52c97ece1b017..ad7a6b998fafd 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java @@ -222,6 +222,8 @@ public final class Messages { public static final String JOB_CONFIG_FUNCTION_REQUIRES_OVERFIELD = "over_field_name must be set when the ''{0}'' function is used"; public static final String JOB_CONFIG_ID_ALREADY_TAKEN = "The job cannot be created with the Id ''{0}''. The Id is already used."; public static final String JOB_CONFIG_ID_TOO_LONG = "The job id cannot contain more than {0,number,integer} characters."; + public static final String JOB_CONFIG_INVALID_CREATE_SETTINGS = + "The job is configured with fields [{0}] that are illegal to set at job creation"; public static final String JOB_CONFIG_INVALID_FIELDNAME_CHARS = "Invalid field name ''{0}''. Field names including over, by and partition " + "fields cannot contain any of these characters: {1}"; public static final String JOB_CONFIG_INVALID_FIELDNAME = diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobTests.java index 047f3a418c36b..4fff2804f9350 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobTests.java @@ -43,6 +43,7 @@ import java.util.Collections; import java.util.Date; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -100,7 +101,7 @@ protected Writeable.Reader instanceReader() { @Override protected Job doParseInstance(XContentParser parser) { - return Job.LENIENT_PARSER.apply(parser, null).build(); + return Job.STRICT_PARSER.apply(parser, null).build(); } public void testToXContentForInternalStorage() throws IOException { @@ -118,10 +119,10 @@ public void testToXContentForInternalStorage() throws IOException { } } - public void testRestRequestParser_DoesntAllowInternalFields() throws IOException { + public void testFutureConfigParse() throws IOException { XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(XContentParserConfiguration.EMPTY, FUTURE_JOB); - XContentParseException e = expectThrows(XContentParseException.class, () -> Job.REST_REQUEST_PARSER.apply(parser, null).build()); - assertEquals("[3:5] [job_details] unknown field [create_time]", e.getMessage()); + XContentParseException e = expectThrows(XContentParseException.class, () -> Job.STRICT_PARSER.apply(parser, null).build()); + assertEquals("[4:5] [job_details] unknown field [tomorrows_technology_today]", e.getMessage()); } public void testFutureMetadataParse() throws IOException { @@ -553,6 +554,22 @@ public void testBuilder_givenTimeFieldInAnalysisConfig() { assertThat(e.getMessage(), equalTo(Messages.getMessage(Messages.JOB_CONFIG_TIME_FIELD_NOT_ALLOWED_IN_ANALYSIS_CONFIG))); } + public void testInvalidCreateTimeSettings() { + Job.Builder builder = new Job.Builder("invalid-settings"); + builder.setModelSnapshotId("snapshot-foo"); + assertEquals(Collections.singletonList(Job.MODEL_SNAPSHOT_ID.getPreferredName()), builder.invalidCreateTimeSettings()); + + builder.setCreateTime(new Date()); + builder.setFinishedTime(new Date()); + + Set expected = new HashSet<>(); + expected.add(Job.CREATE_TIME.getPreferredName()); + expected.add(Job.FINISHED_TIME.getPreferredName()); + expected.add(Job.MODEL_SNAPSHOT_ID.getPreferredName()); + + assertEquals(expected, new HashSet<>(builder.invalidCreateTimeSettings())); + } + public void testEmptyGroup() { Job.Builder builder = buildJobBuilder("foo"); builder.setGroups(Arrays.asList("foo-group", "")); diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/jobs_crud.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/jobs_crud.yml index 24e869781f677..3c4439444d1a1 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/jobs_crud.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/jobs_crud.yml @@ -1130,7 +1130,7 @@ "Test cannot create job with model snapshot id set": - do: - catch: /x_content_parse_exception/ + catch: /illegal_argument_exception/ ml.put_job: job_id: has-model-snapshot-id body: > diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/validate.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/validate.yml index a2cfb65b08a11..1df34a64f860a 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/validate.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/validate.yml @@ -76,7 +76,21 @@ "Test job config is invalid because model snapshot id set": - do: - catch: /x_content_parse_exception/ + catch: /illegal_argument_exception/ + ml.validate: + body: > + { + "model_snapshot_id": "wont-create-with-this-setting", + "analysis_config" : { + "bucket_span": "1h", + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "data_description" : { + } + } + + - do: + catch: /The job is configured with fields \[model_snapshot_id\] that are illegal to set at job creation/ ml.validate: body: > { From 1af4428195b04e4885cfed5fdb3590224313a7ba Mon Sep 17 00:00:00 2001 From: Rene Groeschke Date: Wed, 13 Mar 2024 13:51:42 +0100 Subject: [PATCH 167/248] Cleanup SamlAuthenticationIT (#106227) Remove comments about awaitsFix --- .../xpack/security/authc/saml/SamlAuthenticationIT.java | 3 --- 1 file changed, 3 deletions(-) diff --git a/x-pack/qa/saml-idp-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticationIT.java b/x-pack/qa/saml-idp-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticationIT.java index e8caf004e043b..3f1c532d1adfa 100644 --- a/x-pack/qa/saml-idp-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticationIT.java +++ b/x-pack/qa/saml-idp-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticationIT.java @@ -251,7 +251,6 @@ public void setupNativeUser() throws IOException { *
      1. Uses that token to verify the user details
      2. * */ - // @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103595") public void testLoginUserWithSamlRoleMapping() throws Exception { final Tuple authTokens = loginViaSaml("shibboleth"); verifyElasticsearchAccessTokenForRoleMapping(authTokens.v1()); @@ -262,7 +261,6 @@ public void testLoginUserWithSamlRoleMapping() throws Exception { verifyElasticsearchAccessTokenInvalidated(accessToken); } - // @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103595") public void testLoginUserWithAuthorizingRealm() throws Exception { final Tuple authTokens = loginViaSaml("shibboleth_native"); verifyElasticsearchAccessTokenForAuthorizingRealms(authTokens.v1()); @@ -273,7 +271,6 @@ public void testLoginUserWithAuthorizingRealm() throws Exception { verifyElasticsearchAccessTokenInvalidated(accessToken); } - // @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103595") public void testLoginWithWrongRealmFails() throws Exception { final BasicHttpContext context = new BasicHttpContext(); try (CloseableHttpClient client = getHttpClient()) { From ebf35500882fe8b590b96c70d281a012ce68311e Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Wed, 13 Mar 2024 13:54:48 +0100 Subject: [PATCH 168/248] Add SharedBytes.copyToCacheFileAligned without length method (#106193) This change adds a variant of the existing SharedBytes.copyToCacheFileAligned that is not limited to copy only a given length of bytes but copies all bytes that can be read from the InputStream. It also adds a variant of the existing BlobCacheUtils.computeRange() method that computes range of bytes to expand over a complete regions, instead of being limited to the length of the blob. Finally, it removes an assertion that a CacheFile cannot write a range larger than the blob length. --- .../blobcache/BlobCacheUtils.java | 4 +++ .../shared/SharedBlobCacheService.java | 3 +- .../blobcache/shared/SharedBytes.java | 32 +++++++++++++++++++ 3 files changed, 38 insertions(+), 1 deletion(-) diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/BlobCacheUtils.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/BlobCacheUtils.java index be2971bfa319a..940578d3dafb2 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/BlobCacheUtils.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/BlobCacheUtils.java @@ -63,6 +63,10 @@ public static ByteRange computeRange(long rangeSize, long position, long size, l ); } + public static ByteRange computeRange(long rangeSize, long position, long size) { + return ByteRange.of((position / rangeSize) * rangeSize, (((position + size - 1) / rangeSize) + 1) * rangeSize); + } + public static void ensureSlice(String sliceName, long sliceOffset, long sliceLength, IndexInput input) { if (sliceOffset < 0 || sliceLength < 0 || sliceOffset + sliceLength > input.length()) { throw new IllegalArgumentException( diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java index 5b767f2461f6b..934aeef26843f 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java @@ -1006,7 +1006,8 @@ public int populateAndRead( final RangeAvailableHandler reader, final RangeMissingHandler writer ) throws Exception { - assert assertOffsetsWithinFileLength(rangeToWrite.start(), rangeToWrite.length(), length); + // some cache files can grow after being created, so rangeToWrite can be larger than the initial {@code length} + assert rangeToWrite.start() >= 0 : rangeToWrite; assert assertOffsetsWithinFileLength(rangeToRead.start(), rangeToRead.length(), length); // We are interested in the total time that the system spends when fetching a result (including time spent queuing), so we start // our measurement here. diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBytes.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBytes.java index 530cbbe6c6184..62c4809c04c1d 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBytes.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBytes.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.AbstractRefCounted; import org.elasticsearch.core.IOUtils; +import org.elasticsearch.core.Streams; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; @@ -184,6 +185,37 @@ public static void copyToCacheFileAligned( } } + /** + * Copy all bytes from {@code input} to {@code fc}, only doing writes aligned along {@link #PAGE_SIZE}. + * + * @param fc output cache file reference + * @param input stream to read from + * @param fileChannelPos position in {@code fc} to write to + * @param progressUpdater callback to invoke with the number of copied bytes as they are copied + * @param buffer bytebuffer to use for writing + * @return the number of bytes copied + * @throws IOException on failure + */ + public static int copyToCacheFileAligned(IO fc, InputStream input, int fileChannelPos, IntConsumer progressUpdater, ByteBuffer buffer) + throws IOException { + int bytesCopied = 0; + while (true) { + final int bytesRead = Streams.read(input, buffer, buffer.remaining()); + if (bytesRead <= 0) { + break; + } + if (buffer.hasRemaining()) { + // ensure that last write is aligned on 4k boundaries (= page size) + final int remainder = buffer.position() % PAGE_SIZE; + final int adjustment = remainder == 0 ? 0 : PAGE_SIZE - remainder; + buffer.position(buffer.position() + adjustment); + } + bytesCopied += positionalWrite(fc, fileChannelPos + bytesCopied, buffer); + progressUpdater.accept(bytesCopied); + } + return bytesCopied; + } + private static int positionalWrite(IO fc, int start, ByteBuffer byteBuffer) throws IOException { byteBuffer.flip(); int written = fc.write(byteBuffer, start); From 27f31b1e2346ff9e80b6d5bdab7ff2436dbb2813 Mon Sep 17 00:00:00 2001 From: Julia Bardi <90178898+juliaElastic@users.noreply.github.com> Date: Wed, 13 Mar 2024 14:33:40 +0100 Subject: [PATCH 169/248] added unhealthy_reason (#106246) --- .../template-resources/src/main/resources/fleet-agents.json | 3 +++ 1 file changed, 3 insertions(+) diff --git a/x-pack/plugin/core/template-resources/src/main/resources/fleet-agents.json b/x-pack/plugin/core/template-resources/src/main/resources/fleet-agents.json index 5facc229bf503..fba17ab0b3de9 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/fleet-agents.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/fleet-agents.json @@ -58,6 +58,9 @@ } } }, + "unhealthy_reason": { + "type": "keyword" + }, "components": { "type": "object", "enabled": false From 2a8708167399205b6e368e04ee2f5657682c6c85 Mon Sep 17 00:00:00 2001 From: Simon Cooper Date: Wed, 13 Mar 2024 13:43:20 +0000 Subject: [PATCH 170/248] Add a check for the same feature being declared regular and historical (#106285) --- docs/changelog/106285.yaml | 5 +++++ .../org/elasticsearch/features/FeatureData.java | 14 +++++++++++++- .../features/FeatureServiceTests.java | 12 ++++++++++++ 3 files changed, 30 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/106285.yaml diff --git a/docs/changelog/106285.yaml b/docs/changelog/106285.yaml new file mode 100644 index 0000000000000..37a7e67fe9395 --- /dev/null +++ b/docs/changelog/106285.yaml @@ -0,0 +1,5 @@ +pr: 106285 +summary: Add a check for the same feature being declared regular and historical +area: Infra/Core +type: bug +issues: [] diff --git a/server/src/main/java/org/elasticsearch/features/FeatureData.java b/server/src/main/java/org/elasticsearch/features/FeatureData.java index 273617205ee47..2dd35e648afaf 100644 --- a/server/src/main/java/org/elasticsearch/features/FeatureData.java +++ b/server/src/main/java/org/elasticsearch/features/FeatureData.java @@ -41,6 +41,8 @@ public static FeatureData createFromSpecifications(List> historicalFeatures = new TreeMap<>(); Map nodeFeatures = new HashMap<>(); for (FeatureSpecification spec : specs) { + var specFeatures = spec.getFeatures(); + for (var hfe : spec.getHistoricalFeatures().entrySet()) { FeatureSpecification existing = allFeatures.putIfAbsent(hfe.getKey().id(), spec); // the same SPI class can be loaded multiple times if it's in the base classloader @@ -61,10 +63,20 @@ public static FeatureData createFromSpecifications(List new HashSet<>()).add(hfe.getKey().id()); } - for (NodeFeature f : spec.getFeatures()) { + for (NodeFeature f : specFeatures) { FeatureSpecification existing = allFeatures.putIfAbsent(f.id(), spec); if (existing != null && existing.getClass() != spec.getClass()) { throw new IllegalArgumentException( diff --git a/server/src/test/java/org/elasticsearch/features/FeatureServiceTests.java b/server/src/test/java/org/elasticsearch/features/FeatureServiceTests.java index 26d880d0a5d8e..f91b159f99899 100644 --- a/server/src/test/java/org/elasticsearch/features/FeatureServiceTests.java +++ b/server/src/test/java/org/elasticsearch/features/FeatureServiceTests.java @@ -84,6 +84,18 @@ public void testFailsNonHistoricalVersion() { ); } + public void testFailsSameRegularAndHistoricalFeature() { + FeatureSpecification fs = new TestFeatureSpecification( + Set.of(new NodeFeature("f1")), + Map.of(new NodeFeature("f1"), Version.V_8_12_0) + ); + + assertThat( + expectThrows(IllegalArgumentException.class, () -> new FeatureService(List.of(fs))).getMessage(), + containsString("cannot be declared as both a regular and historical feature") + ); + } + public void testGetNodeFeaturesCombinesAllSpecs() { List specs = List.of( new TestFeatureSpecification(Set.of(new NodeFeature("f1"), new NodeFeature("f2")), Map.of()), From 146d13ca3757ad52ed9df5c507fd7bfa187927af Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Wed, 13 Mar 2024 09:54:29 -0400 Subject: [PATCH 171/248] ESQL: Allow tests to depend on cluster features (#106271) This allows writing `required_feature:` in a test to skip the test on versions of elasticsearch that are missing a feature, opting ESQL into the same versioning as the yaml tests. --- .../xpack/esql/qa/rest/EsqlSpecTestCase.java | 3 +++ .../testFixtures/src/main/resources/string.csv-spec | 4 +++- .../elasticsearch/xpack/esql/plugin/EsqlFeatures.java | 7 ++++++- .../java/org/elasticsearch/xpack/esql/CsvTests.java | 4 ++++ .../org/elasticsearch/xpack/ql/CsvSpecReader.java | 11 +++++++++-- 5 files changed, 25 insertions(+), 4 deletions(-) diff --git a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/EsqlSpecTestCase.java b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/EsqlSpecTestCase.java index 18b9206f9b89e..a05a6a284011d 100644 --- a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/EsqlSpecTestCase.java +++ b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/EsqlSpecTestCase.java @@ -132,6 +132,9 @@ public final void test() throws Throwable { } protected void shouldSkipTest(String testName) { + for (String feature : testCase.requiredFeatures) { + assumeTrue("Test " + testName + " requires " + feature, clusterHasFeature(feature)); + } assumeTrue("Test " + testName + " is not enabled", isEnabled(testName, Version.CURRENT)); } diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec index e6c73f9054c51..73c508aad03f5 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec @@ -299,7 +299,9 @@ emp_no:integer | name:keyword ; // Note: no matches in MV returned -in#[skip:-8.11.99, reason:Lucene multivalue warning introduced in 8.12 only] +in +required_feature: esql.mv_load + from employees | where job_positions in ("Internship", first_name) | keep emp_no, job_positions; ignoreOrder:true warning:Line 1:24: evaluation of [job_positions in (\"Internship\", first_name)] failed, treating result as null. Only first 20 failures recorded. diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlFeatures.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlFeatures.java index f80915e0788f2..75dee029ad523 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlFeatures.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlFeatures.java @@ -14,8 +14,13 @@ import java.util.Map; public class EsqlFeatures implements FeatureSpecification { + private static final NodeFeature MV_LOAD = new NodeFeature("esql.mv_load"); + @Override public Map getHistoricalFeatures() { - return Map.of(TransportEsqlStatsAction.ESQL_STATS_FEATURE, Version.V_8_11_0); + return Map.ofEntries( + Map.entry(TransportEsqlStatsAction.ESQL_STATS_FEATURE, Version.V_8_11_0), + Map.entry(MV_LOAD, Version.V_8_12_0) + ); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java index dd937c11c9642..56b6dcdd1ad8b 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java @@ -218,6 +218,10 @@ public CsvTests(String fileName, String groupName, String testName, Integer line public final void test() throws Throwable { try { + /* + * We're intentionally not NodeFeatures here because we expect all + * of the features to be supported in this unit test. + */ assumeTrue("Test " + testName + " is not enabled", isEnabled(testName, Version.CURRENT)); doTest(); } catch (Throwable th) { diff --git a/x-pack/plugin/ql/test-fixtures/src/main/java/org/elasticsearch/xpack/ql/CsvSpecReader.java b/x-pack/plugin/ql/test-fixtures/src/main/java/org/elasticsearch/xpack/ql/CsvSpecReader.java index a3b5147988b13..5023b5a4bf877 100644 --- a/x-pack/plugin/ql/test-fixtures/src/main/java/org/elasticsearch/xpack/ql/CsvSpecReader.java +++ b/x-pack/plugin/ql/test-fixtures/src/main/java/org/elasticsearch/xpack/ql/CsvSpecReader.java @@ -30,6 +30,7 @@ public static class CsvSpecParser implements SpecReader.Parser { private final StringBuilder earlySchema = new StringBuilder(); private final StringBuilder query = new StringBuilder(); private final StringBuilder data = new StringBuilder(); + private final List requiredFeatures = new ArrayList<>(); private CsvTestCase testCase; private CsvSpecParser() {} @@ -41,6 +42,8 @@ public Object parse(String line) { if (line.startsWith(SCHEMA_PREFIX)) { assertThat("Early schema already declared " + earlySchema, earlySchema.length(), is(0)); earlySchema.append(line.substring(SCHEMA_PREFIX.length()).trim()); + } else if (line.toLowerCase(Locale.ROOT).startsWith("required_feature:")) { + requiredFeatures.add(line.substring("required_feature:".length()).trim()); } else { if (line.endsWith(";")) { // pick up the query @@ -48,6 +51,8 @@ public Object parse(String line) { query.append(line.substring(0, line.length() - 1).trim()); testCase.query = query.toString(); testCase.earlySchema = earlySchema.toString(); + testCase.requiredFeatures = List.copyOf(requiredFeatures); + requiredFeatures.clear(); earlySchema.setLength(0); query.setLength(0); } @@ -61,9 +66,10 @@ public Object parse(String line) { // read the results else { // read data - if (line.toLowerCase(Locale.ROOT).startsWith("warning:")) { + String lower = line.toLowerCase(Locale.ROOT); + if (lower.startsWith("warning:")) { testCase.expectedWarnings.add(line.substring("warning:".length()).trim()); - } else if (line.toLowerCase(Locale.ROOT).startsWith("ignoreorder:")) { + } else if (lower.startsWith("ignoreorder:")) { testCase.ignoreOrder = Boolean.parseBoolean(line.substring("ignoreOrder:".length()).trim()); } else if (line.startsWith(";")) { testCase.expectedResults = data.toString(); @@ -88,6 +94,7 @@ public static class CsvTestCase { public String expectedResults; private final List expectedWarnings = new ArrayList<>(); public boolean ignoreOrder; + public List requiredFeatures = List.of(); // The emulated-specific warnings must always trail the non-emulated ones, if these are present. Otherwise, the closing bracket // would need to be changed to a less common sequence (like `]#` maybe). From 9bf1d5da67a28a8ed5874be38afc403be237b63a Mon Sep 17 00:00:00 2001 From: Moritz Mack Date: Wed, 13 Mar 2024 15:25:26 +0100 Subject: [PATCH 172/248] Add test case for affix update consumer bug (relates to #106283) (#106284) --- .../common/settings/ScopedSettingsTests.java | 28 +++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java b/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java index e9acdb973f4bf..94c0b849edf8d 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java @@ -20,6 +20,7 @@ import org.elasticsearch.index.IndexVersion; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.transport.TransportSettings; +import org.mockito.Mockito; import java.io.IOException; import java.util.Arrays; @@ -41,6 +42,10 @@ import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.startsWith; import static org.hamcrest.Matchers.hasToString; +import static org.mockito.Mockito.clearInvocations; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; public class ScopedSettingsTests extends ESTestCase { @@ -536,6 +541,29 @@ public void testAffixGroupUpdateConsumer() { results.clear(); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/106283") + public void testAffixUpdateConsumerWithAlias() { + Setting.AffixSetting prefixSetting = Setting.prefixKeySetting( + "prefix.", + "fallback.", + (ns, k) -> Setting.simpleString(k, "default", Property.Dynamic, Property.NodeScope) + ); + AbstractScopedSettings service = new ClusterSettings(Settings.EMPTY, new HashSet<>(Arrays.asList(prefixSetting))); + BiConsumer affixUpdateConsumer = Mockito.mock("affixUpdateConsumer"); + service.addAffixUpdateConsumer(prefixSetting, affixUpdateConsumer, (s, v) -> {}); + + service.applySettings(Settings.builder().put("prefix.key", "value").build()); + verify(affixUpdateConsumer).accept("key", "value"); + verifyNoMoreInteractions(affixUpdateConsumer); + clearInvocations((Object) affixUpdateConsumer); + + service.applySettings(Settings.builder().put("fallback.key", "othervalue").build()); + verify(affixUpdateConsumer, never()).accept("key", "default"); // unexpected invocation using the default value + verify(affixUpdateConsumer).accept("key", "othervalue"); + verifyNoMoreInteractions(affixUpdateConsumer); + clearInvocations((Object) affixUpdateConsumer); + } + public void testAddConsumerAffix() { Setting.AffixSetting intSetting = Setting.affixKeySetting( "foo.", From a698dee579de3e19461bcfac0160265acde55131 Mon Sep 17 00:00:00 2001 From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com> Date: Wed, 13 Mar 2024 16:36:12 +0200 Subject: [PATCH 173/248] [TEST] Increase timeout for rollover to exceed look_ahead_time (#106290) `look_ahead_time` is set to 1 minute, the `assertBusy` loop needs to wait for longer than that to get a readonly backing index. Note that this is only relevant when the `UpdateTimeSeriesRangeService` kicks in to bump the end time of the head index. This is rare (it runs every 10 minutes) but can happen. Fixes #101428 --- .../xpack/ilm/actions/DownsampleActionIT.java | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/DownsampleActionIT.java b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/DownsampleActionIT.java index 5eabacbf1ab3c..2aff5257a6ebf 100644 --- a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/DownsampleActionIT.java +++ b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/DownsampleActionIT.java @@ -327,7 +327,7 @@ public void testTsdbDataStreams() throws Exception { explainIndex(client(), backingIndexName).get("step"), is(CheckNotDataStreamWriteIndexStep.NAME) ), - 30, + 120, // look_ahead_time is 1m, need to wait for longer than that. TimeUnit.SECONDS ); @@ -374,7 +374,7 @@ public void testILMWaitsForTimeSeriesEndTimeToLapse() throws Exception { explainIndex(client(), backingIndexName).get("step"), is(CheckNotDataStreamWriteIndexStep.NAME) ), - 30, + 120, // look_ahead_time is 1m, need to wait for longer than that. TimeUnit.SECONDS ); @@ -465,7 +465,7 @@ public void testDownsampleTwice() throws Exception { explainIndex(client(), firstBackingIndex).get("step"), is(CheckNotDataStreamWriteIndexStep.NAME) ), - 30, + 120, // look_ahead_time is 1m, need to wait for longer than that. TimeUnit.SECONDS ); @@ -547,7 +547,7 @@ public void testDownsampleTwiceSameInterval() throws Exception { explainIndex(client(), firstBackingIndex).get("step"), is(CheckNotDataStreamWriteIndexStep.NAME) ), - 30, + 120, // look_ahead_time is 1m, need to wait for longer than that. TimeUnit.SECONDS ); From 663126ea0faafaa82506dc9aec869b242855c0b5 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Wed, 13 Mar 2024 07:49:46 -0700 Subject: [PATCH 174/248] Fix write pages in BigArray (#106270) While making the BitArray serializable, I found that the writePages function breaks when the input big array is resized. When resizing a big array, we overly allocate the pages array and assign null to the extra pages. --- .../common/util/BigByteArray.java | 2 +- .../common/util/BigDoubleArray.java | 2 +- .../common/util/BigIntArray.java | 2 +- .../common/util/BigLongArray.java | 30 ++++++++----------- .../common/util/BigArraysTests.java | 15 ++++++++-- 5 files changed, 27 insertions(+), 24 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/common/util/BigByteArray.java b/server/src/main/java/org/elasticsearch/common/util/BigByteArray.java index c5a04e273e487..0dce5ca21ffb5 100644 --- a/server/src/main/java/org/elasticsearch/common/util/BigByteArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/BigByteArray.java @@ -44,7 +44,7 @@ final class BigByteArray extends AbstractBigArray implements ByteArray { @Override public void writeTo(StreamOutput out) throws IOException { - writePages(out, Math.toIntExact(size), pages, Byte.BYTES, BYTE_PAGE_SIZE); + writePages(out, size, pages, Byte.BYTES); } @Override diff --git a/server/src/main/java/org/elasticsearch/common/util/BigDoubleArray.java b/server/src/main/java/org/elasticsearch/common/util/BigDoubleArray.java index b2a55973cd44d..8fe6fd80ccd01 100644 --- a/server/src/main/java/org/elasticsearch/common/util/BigDoubleArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/BigDoubleArray.java @@ -129,6 +129,6 @@ public void set(long index, byte[] buf, int offset, int len) { @Override public void writeTo(StreamOutput out) throws IOException { - writePages(out, Math.toIntExact(size), pages, Double.BYTES, DOUBLE_PAGE_SIZE); + writePages(out, size, pages, Double.BYTES); } } diff --git a/server/src/main/java/org/elasticsearch/common/util/BigIntArray.java b/server/src/main/java/org/elasticsearch/common/util/BigIntArray.java index e053baea9aa5f..b40574ccb9af8 100644 --- a/server/src/main/java/org/elasticsearch/common/util/BigIntArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/BigIntArray.java @@ -44,7 +44,7 @@ final class BigIntArray extends AbstractBigArray implements IntArray { @Override public void writeTo(StreamOutput out) throws IOException { - writePages(out, (int) size, pages, Integer.BYTES, INT_PAGE_SIZE); + writePages(out, size, pages, Integer.BYTES); } @Override diff --git a/server/src/main/java/org/elasticsearch/common/util/BigLongArray.java b/server/src/main/java/org/elasticsearch/common/util/BigLongArray.java index 0e0abf812b248..d39ef7a7841f9 100644 --- a/server/src/main/java/org/elasticsearch/common/util/BigLongArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/BigLongArray.java @@ -132,7 +132,7 @@ public void set(long index, byte[] buf, int offset, int len) { @Override public void writeTo(StreamOutput out) throws IOException { - writePages(out, Math.toIntExact(size), pages, Long.BYTES, LONG_PAGE_SIZE); + writePages(out, size, pages, Long.BYTES); } @Override @@ -141,27 +141,21 @@ public void fillWith(StreamInput in) throws IOException { } static void readPages(StreamInput in, byte[][] pages) throws IOException { - int remained = in.readVInt(); - for (int i = 0; i < pages.length - 1; i++) { - int len = pages[0].length; + int remainedBytes = in.readVInt(); + for (int i = 0; i < pages.length && remainedBytes > 0; i++) { + int len = Math.min(remainedBytes, pages[0].length); in.readBytes(pages[i], 0, len); - remained -= len; + remainedBytes -= len; } - in.readBytes(pages[pages.length - 1], 0, remained); } - static void writePages(StreamOutput out, int size, byte[][] pages, int bytesPerValue, int pageSize) throws IOException { - out.writeVInt(size * bytesPerValue); - int lastPageEnd = size % pageSize; - if (lastPageEnd == 0) { - for (byte[] page : pages) { - out.write(page); - } - return; - } - for (int i = 0; i < pages.length - 1; i++) { - out.write(pages[i]); + static void writePages(StreamOutput out, long size, byte[][] pages, int bytesPerValue) throws IOException { + int remainedBytes = Math.toIntExact(size * bytesPerValue); + out.writeVInt(remainedBytes); + for (int i = 0; i < pages.length && remainedBytes > 0; i++) { + int len = Math.min(remainedBytes, pages[i].length); + out.writeBytes(pages[i], 0, len); + remainedBytes -= len; } - out.write(pages[pages.length - 1], 0, lastPageEnd * bytesPerValue); } } diff --git a/server/src/test/java/org/elasticsearch/common/util/BigArraysTests.java b/server/src/test/java/org/elasticsearch/common/util/BigArraysTests.java index 3512a50d5578c..3434d8993aecb 100644 --- a/server/src/test/java/org/elasticsearch/common/util/BigArraysTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/BigArraysTests.java @@ -247,11 +247,15 @@ public void testLongArrayFill() { } public void testSerializeLongArray() throws Exception { - final int len = randomIntBetween(1, 1000_000); - final LongArray array1 = bigArrays.newLongArray(len, randomBoolean()); + int len = randomIntBetween(1, 100_000); + LongArray array1 = bigArrays.newLongArray(len, randomBoolean()); for (int i = 0; i < len; ++i) { array1.set(i, randomLong()); } + if (randomBoolean()) { + len = randomIntBetween(len, len * 3 / 2); + array1 = bigArrays.resize(array1, len); + } BytesStreamOutput out = new BytesStreamOutput(); array1.writeTo(out); final LongArray array2 = bigArrays.newLongArray(len, randomBoolean()); @@ -259,7 +263,12 @@ public void testSerializeLongArray() throws Exception { for (int i = 0; i < len; i++) { assertThat(array2.get(i), equalTo(array1.get(i))); } - Releasables.close(array1, array2); + final LongArray array3 = LongArray.readFrom(out.bytes().streamInput()); + assertThat(array3.size(), equalTo((long) len)); + for (int i = 0; i < len; i++) { + assertThat(array3.get(i), equalTo(array1.get(i))); + } + Releasables.close(array1, array2, array3); } public void testByteArrayBulkGet() { From d9568ae3289f9c3e398a3a03909f17a866c602f2 Mon Sep 17 00:00:00 2001 From: Jack Conradson Date: Wed, 13 Mar 2024 08:02:38 -0700 Subject: [PATCH 175/248] Fix skip version for rrf retriever yaml test (#106272) This updates the skip version to 8.14 for this set of rrf retriever tests. --- .../resources/rest-api-spec/test/rrf/300_rrf_retriever.yml | 6 +++--- .../rest-api-spec/test/rrf/400_rrf_retriever_script.yml | 6 ++++-- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/300_rrf_retriever.yml b/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/300_rrf_retriever.yml index ec7a31ffd9ceb..2c7c5e5a50697 100644 --- a/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/300_rrf_retriever.yml +++ b/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/300_rrf_retriever.yml @@ -1,7 +1,7 @@ setup: - - skip: - version: ' - 8.12.99' - reason: 'rrf retriever added in 8.13' + - requires: + cluster_features: 'rrf_retriever_supported' + reason: 'test requires rrf retriever implementation' - do: indices.create: diff --git a/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/400_rrf_retriever_script.yml b/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/400_rrf_retriever_script.yml index 90edcfbffd2b6..7ac41de12c5e7 100644 --- a/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/400_rrf_retriever_script.yml +++ b/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/400_rrf_retriever_script.yml @@ -1,8 +1,10 @@ setup: - skip: features: close_to - version: ' - 8.13.99' - reason: 'rrf retriever added in 8.14' + + - requires: + cluster_features: 'rrf_retriever_supported' + reason: 'test requires rrf retriever implementation' - do: indices.create: From 8485dfee3f43afec9f1676b28a5ca1045f575942 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Wed, 13 Mar 2024 11:08:12 -0400 Subject: [PATCH 176/248] Mute ESQL async docs tests (#106294) They are failing and tracked by #104013. --- docs/reference/esql/esql-async-query-api.asciidoc | 1 + docs/reference/esql/esql-rest.asciidoc | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/reference/esql/esql-async-query-api.asciidoc b/docs/reference/esql/esql-async-query-api.asciidoc index 0d15eb313a61f..6cd23fc524f96 100644 --- a/docs/reference/esql/esql-async-query-api.asciidoc +++ b/docs/reference/esql/esql-async-query-api.asciidoc @@ -28,6 +28,7 @@ POST /_query/async } ---- // TEST[setup:library] +// TEST[skip:awaitsfix https://github.com/elastic/elasticsearch/issues/104013] If the results are not available within the given timeout period, 2 seconds in this case, no results are returned but rather a response that diff --git a/docs/reference/esql/esql-rest.asciidoc b/docs/reference/esql/esql-rest.asciidoc index fc06cfea904af..de2b6dedd8776 100644 --- a/docs/reference/esql/esql-rest.asciidoc +++ b/docs/reference/esql/esql-rest.asciidoc @@ -222,7 +222,7 @@ POST /_query { "locale": "fr-FR", "query": """ - ROW birth_date_string = "2023-01-15T00:00:00.000Z" + ROW birth_date_string = "2023-01-15T00:00:00.000Z" | EVAL birth_date = date_parse(birth_date_string) | EVAL month_of_birth = DATE_FORMAT("MMMM",birth_date) | LIMIT 5 @@ -308,6 +308,7 @@ POST /_query/async } ---- // TEST[setup:library] +// TEST[skip:awaitsfix https://github.com/elastic/elasticsearch/issues/104013] If the results are not available within the given timeout period, 2 seconds in this case, no results are returned but rather a response that From c59d4bc93c346f4a7e3fd37e18da00a1603a6517 Mon Sep 17 00:00:00 2001 From: David Turner Date: Wed, 13 Mar 2024 15:11:21 +0000 Subject: [PATCH 177/248] Separate `?from_sort_value` and `?slm_policy_filter` predicates (#106230) Today in `TransportGetSnapshotsAction` we combine the `?from_sort_value` and `?slm_policy_filter` predicates into a general `SnapshotPredicates`, but other predicates on the snapshots returned are handled in different ways. There's no particular need for this generality for these two filters, we can do the same thing with a couple of plain methods and avoid the need for readers to trace through the construction of the `SnapshotPredicates` to work out what's going on. This commit separates the two predicates out and renames some things to clarify that the remaining `SnapshotPredicates` only refers to the `?from_sort_value` filter. --- .../get/TransportGetSnapshotsAction.java | 174 +++++++++--------- 1 file changed, 91 insertions(+), 83 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java index 28586c7a6410b..3563192be2eb4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java @@ -30,6 +30,7 @@ import org.elasticsearch.common.util.concurrent.ListenableFuture; import org.elasticsearch.common.util.concurrent.ThrottledIterator; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Predicates; import org.elasticsearch.core.Releasable; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; @@ -124,7 +125,7 @@ protected void masterOperation( request.isSingleRepositoryRequest() == false, request.snapshots(), request.ignoreUnavailable(), - SnapshotPredicates.fromRequest(request), + request.policies(), request.sort(), request.order(), request.fromSortValue(), @@ -154,7 +155,8 @@ private class GetSnapshotsOperation { // snapshots selection private final String[] snapshots; private final boolean ignoreUnavailable; - private final SnapshotPredicates predicates; + private final SnapshotPredicates fromSortValuePredicates; + private final Predicate slmPolicyPredicate; // snapshot ordering/pagination private final SnapshotSortKey sortBy; @@ -188,7 +190,7 @@ private class GetSnapshotsOperation { boolean isMultiRepoRequest, String[] snapshots, boolean ignoreUnavailable, - SnapshotPredicates predicates, + String[] policies, SnapshotSortKey sortBy, SortOrder order, String fromSortValue, @@ -204,7 +206,6 @@ private class GetSnapshotsOperation { this.isMultiRepoRequest = isMultiRepoRequest; this.snapshots = snapshots; this.ignoreUnavailable = ignoreUnavailable; - this.predicates = predicates; this.sortBy = sortBy; this.order = order; this.fromSortValue = fromSortValue; @@ -215,6 +216,9 @@ private class GetSnapshotsOperation { this.verbose = verbose; this.indices = indices; + this.fromSortValuePredicates = SnapshotPredicates.forFromSortValue(fromSortValue, sortBy, order); + this.slmPolicyPredicate = SlmPolicyPredicate.forPolicies(policies); + this.getSnapshotInfoExecutor = new GetSnapshotInfoExecutor( threadPool.info(ThreadPool.Names.SNAPSHOT_META).getMax(), cancellableTask::isCancelled @@ -332,7 +336,7 @@ private void loadSnapshotInfos( if (repositoryData != null) { for (SnapshotId snapshotId : repositoryData.getSnapshotIds()) { - if (predicates.test(snapshotId, repositoryData)) { + if (matchesPredicates(snapshotId, repositoryData)) { allSnapshotIds.put(snapshotId.getName(), new Snapshot(repo, snapshotId)); } } @@ -390,7 +394,8 @@ private void loadSnapshotInfos( if (verbose) { snapshots(repo, toResolve.stream().map(Snapshot::getSnapshotId).toList(), listener); } else { - assert predicates.isMatchAll() : "filtering is not supported in non-verbose mode"; + assert fromSortValuePredicates.isMatchAll() : "filtering is not supported in non-verbose mode"; + assert slmPolicyPredicate == SlmPolicyPredicate.MATCH_ALL_POLICIES : "filtering is not supported in non-verbose mode"; final SnapshotsInRepo snapshotInfos; if (repositoryData != null) { // want non-current snapshots as well, which are found in the repository data @@ -424,7 +429,7 @@ private void snapshots(String repositoryName, Collection snapshotIds for (SnapshotsInProgress.Entry entry : entries) { if (snapshotIdsToIterate.remove(entry.snapshot().getSnapshotId())) { final SnapshotInfo snapshotInfo = SnapshotInfo.inProgress(entry); - if (predicates.test(snapshotInfo)) { + if (matchesPredicates(snapshotInfo)) { snapshots.add(snapshotInfo.maybeWithoutIndices(indices)); } } @@ -458,7 +463,7 @@ private void snapshots(String repositoryName, Collection snapshotIds getSnapshotInfoExecutor.getSnapshotInfo(repository, snapshotId, new ActionListener<>() { @Override public void onResponse(SnapshotInfo snapshotInfo) { - if (predicates.test(snapshotInfo)) { + if (matchesPredicates(snapshotInfo)) { syncSnapshots.add(snapshotInfo.maybeWithoutIndices(indices)); } refListener.onResponse(null); @@ -547,6 +552,34 @@ private SnapshotsInRepo sortSnapshots(Stream snapshotInfoStream, i return new SnapshotsInRepo(results, totalCount, remaining); } } + + private boolean matchesPredicates(SnapshotId snapshotId, RepositoryData repositoryData) { + if (fromSortValuePredicates.test(snapshotId, repositoryData) == false) { + return false; + } + + if (slmPolicyPredicate == SlmPolicyPredicate.MATCH_ALL_POLICIES) { + return true; + } + + final var details = repositoryData.getSnapshotDetails(snapshotId); + return details == null || details.getSlmPolicy() == null || slmPolicyPredicate.test(details.getSlmPolicy()); + } + + private boolean matchesPredicates(SnapshotInfo snapshotInfo) { + if (fromSortValuePredicates.test(snapshotInfo) == false) { + return false; + } + + if (slmPolicyPredicate == SlmPolicyPredicate.MATCH_ALL_POLICIES) { + return true; + } + + final var metadata = snapshotInfo.userMetadata(); + return slmPolicyPredicate.test( + metadata != null && metadata.get(SnapshotsService.POLICY_ID_METADATA_FIELD) instanceof String s ? s : "" + ); + } } /** @@ -587,81 +620,7 @@ boolean test(SnapshotInfo snapshotInfo) { return snapshotPredicate == null || snapshotPredicate.test(snapshotInfo); } - private SnapshotPredicates and(SnapshotPredicates other) { - return this == MATCH_ALL ? other - : other == MATCH_ALL ? this - : new SnapshotPredicates( - preflightPredicate == null ? other.preflightPredicate : other.preflightPredicate == null ? preflightPredicate : null, - snapshotPredicate == null ? other.snapshotPredicate : other.snapshotPredicate == null ? snapshotPredicate : null - ); - } - - static SnapshotPredicates fromRequest(GetSnapshotsRequest request) { - return getSortValuePredicate(request.fromSortValue(), request.sort(), request.order()).and( - getSlmPredicates(request.policies()) - ); - } - - private static SnapshotPredicates getSlmPredicates(String[] slmPolicies) { - if (slmPolicies.length == 0) { - return MATCH_ALL; - } - - final List includePatterns = new ArrayList<>(); - final List excludePatterns = new ArrayList<>(); - boolean seenWildcard = false; - boolean matchNoPolicy = false; - for (String slmPolicy : slmPolicies) { - if (seenWildcard && slmPolicy.length() > 1 && slmPolicy.startsWith("-")) { - excludePatterns.add(slmPolicy.substring(1)); - } else { - if (Regex.isSimpleMatchPattern(slmPolicy)) { - seenWildcard = true; - } else if (GetSnapshotsRequest.NO_POLICY_PATTERN.equals(slmPolicy)) { - matchNoPolicy = true; - } - includePatterns.add(slmPolicy); - } - } - final String[] includes = includePatterns.toArray(Strings.EMPTY_ARRAY); - final String[] excludes = excludePatterns.toArray(Strings.EMPTY_ARRAY); - final boolean matchWithoutPolicy = matchNoPolicy; - return new SnapshotPredicates(((snapshotId, repositoryData) -> { - final RepositoryData.SnapshotDetails details = repositoryData.getSnapshotDetails(snapshotId); - final String policy; - if (details == null || (details.getSlmPolicy() == null)) { - // no SLM policy recorded - return true; - } else { - final String policyFound = details.getSlmPolicy(); - // empty string means that snapshot was not created by an SLM policy - policy = policyFound.isEmpty() ? null : policyFound; - } - return matchPolicy(includes, excludes, matchWithoutPolicy, policy); - }), snapshotInfo -> { - final Map metadata = snapshotInfo.userMetadata(); - final String policy; - if (metadata == null) { - policy = null; - } else { - final Object policyFound = metadata.get(SnapshotsService.POLICY_ID_METADATA_FIELD); - policy = policyFound instanceof String ? (String) policyFound : null; - } - return matchPolicy(includes, excludes, matchWithoutPolicy, policy); - }); - } - - private static boolean matchPolicy(String[] includes, String[] excludes, boolean matchWithoutPolicy, @Nullable String policy) { - if (policy == null) { - return matchWithoutPolicy; - } - if (Regex.simpleMatch(includes, policy) == false) { - return false; - } - return excludes.length == 0 || Regex.simpleMatch(excludes, policy) == false; - } - - private static SnapshotPredicates getSortValuePredicate(String fromSortValue, SnapshotSortKey sortBy, SortOrder order) { + static SnapshotPredicates forFromSortValue(String fromSortValue, SnapshotSortKey sortBy, SortOrder order) { if (fromSortValue == null) { return MATCH_ALL; } @@ -793,4 +752,53 @@ void getSnapshotInfo(Repository repository, SnapshotId snapshotId, ActionListene })); } } + + /** + * Encapsulates a filter on snapshots according to SLM policy, for the {@code ?slm_policy_filter} query parameter. + */ + private record SlmPolicyPredicate(String[] includes, String[] excludes, boolean matchWithoutPolicy) implements Predicate { + + static final Predicate MATCH_ALL_POLICIES = Predicates.always(); + + @Override + public boolean test(String policy) { + if (policy.equals("")) { + // empty string means that snapshot was not created by an SLM policy + return matchWithoutPolicy; + } + if (Regex.simpleMatch(includes, policy) == false) { + return false; + } + return excludes.length == 0 || Regex.simpleMatch(excludes, policy) == false; + } + + static Predicate forPolicies(String[] slmPolicies) { + if (slmPolicies.length == 0) { + return MATCH_ALL_POLICIES; + } + + final List includePatterns = new ArrayList<>(slmPolicies.length); + final List excludePatterns = new ArrayList<>(slmPolicies.length); + boolean seenWildcard = false; + boolean matchNoPolicy = false; + for (final var slmPolicy : slmPolicies) { + if (seenWildcard && slmPolicy.length() > 1 && slmPolicy.startsWith("-")) { + excludePatterns.add(slmPolicy.substring(1)); + } else { + if (Regex.isSimpleMatchPattern(slmPolicy)) { + seenWildcard = true; + } else if (GetSnapshotsRequest.NO_POLICY_PATTERN.equals(slmPolicy)) { + matchNoPolicy = true; + } + includePatterns.add(slmPolicy); + } + } + + return new SlmPolicyPredicate( + includePatterns.toArray(Strings.EMPTY_ARRAY), + excludePatterns.toArray(Strings.EMPTY_ARRAY), + matchNoPolicy + ); + } + } } From e58f4b4ef98f3ddfbcfe2b727681c2ed8afcc064 Mon Sep 17 00:00:00 2001 From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com> Date: Wed, 13 Mar 2024 17:37:09 +0200 Subject: [PATCH 178/248] Introduce TimeSeriesRoutingIdFieldMapper and use it to create TSDB ids (#106080) Supporting non-keyword fields requires updating non-keyword fields in the routing path to be included in routing calculations. Routing is performed in coordinating nodes that lack mappings (or mappings haven't been created yet, for dynamically-defined dimensions), so the routing hash they calculate are passed to data nodes and stored in a new fields, namely _ts_routind_hash. This is included in the _id field, in turn, so that it can consistently reach the right shard for get-by-id and delete-by-id operations. A few interesting points: - The hash is passed from the coordinating to data nodes using the `routing` field in `IndexRequest`; adding another field to the latter requires updating dozens of classes. - We explicitly skip (double-) storing the hash to the routing field, as the latter is not optimized for storage using the TSDB codec. - The routing hash may not be available in Translog operations, it can then be retrieved from the `id` prefix. Related to https://github.com/elastic/elasticsearch/issues/103567 --- .../action/PainlessExecuteAction.java | 21 ++- .../join/mapper/ParentJoinFieldMapper.java | 2 +- .../action/bulk/TransportShardBulkAction.java | 4 +- .../action/index/IndexRequest.java | 2 +- .../cluster/routing/IndexRouting.java | 40 ++++-- .../org/elasticsearch/index/IndexMode.java | 19 +++ .../elasticsearch/index/IndexVersions.java | 1 + .../index/codec/PerFieldMapperCodec.java | 4 +- .../index/engine/TranslogDirectoryReader.java | 11 +- .../index/mapper/DocumentParser.java | 2 +- .../index/mapper/DocumentParserContext.java | 5 + .../elasticsearch/index/mapper/IdLoader.java | 45 ++++--- .../index/mapper/RoutingFieldMapper.java | 2 +- .../index/mapper/SourceToParse.java | 4 + .../index/mapper/TimeSeriesIdFieldMapper.java | 8 +- .../TimeSeriesRoutingHashFieldMapper.java | 116 ++++++++++++++++ .../mapper/TsidExtractingIdFieldMapper.java | 60 ++++++--- .../elasticsearch/index/shard/IndexShard.java | 10 +- .../index/termvectors/TermVectorsService.java | 10 +- .../elasticsearch/indices/IndicesModule.java | 2 + .../search/DefaultSearchContext.java | 29 ++-- .../cluster/routing/IndexRoutingTests.java | 10 +- .../common/lucene/uid/VersionsTests.java | 14 +- .../mapper/FieldFilterMapperPluginTests.java | 3 +- .../index/mapper/IdLoaderTests.java | 68 +++++----- .../index/mapper/RoutingFieldMapperTests.java | 6 +- .../mapper/TimeSeriesIdFieldMapperTests.java | 117 ++++++++++------ ...TimeSeriesRoutingHashFieldMapperTests.java | 105 +++++++++++++++ .../TsidExtractingIdFieldMapperTests.java | 126 ++++++++---------- .../indices/IndicesModuleTests.java | 2 + .../recovery/RecoverySourceHandlerTests.java | 3 +- .../index/engine/TranslogHandler.java | 11 +- .../index/mapper/MapperServiceTestCase.java | 13 +- .../index/shard/IndexShardTestCase.java | 11 +- .../test/InternalTestCluster.java | 2 +- .../SourceOnlySnapshotShardTests.java | 11 +- 36 files changed, 586 insertions(+), 313 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesRoutingHashFieldMapper.java create mode 100644 server/src/test/java/org/elasticsearch/index/mapper/TimeSeriesRoutingHashFieldMapperTests.java diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java index 874b2316406ef..47defea0a1f95 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java @@ -56,11 +56,13 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.OnScriptError; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.SourceToParse; +import org.elasticsearch.index.mapper.TimeSeriesRoutingHashFieldMapper; import org.elasticsearch.index.query.AbstractQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.SearchExecutionContext; @@ -808,13 +810,18 @@ private static Response prepareRamIndex( try (IndexWriter indexWriter = new IndexWriter(directory, new IndexWriterConfig(defaultAnalyzer))) { BytesReference document = request.contextSetup.document; XContentType xContentType = request.contextSetup.xContentType; - String id; - if (indexService.getIndexSettings().getMode() == IndexMode.TIME_SERIES) { - id = null; // The id gets auto generated for time series indices. - } else { - id = "_id"; - } - SourceToParse sourceToParse = new SourceToParse(id, document, xContentType); + + SourceToParse sourceToParse = (indexService.getIndexSettings().getMode() == IndexMode.TIME_SERIES) + ? new SourceToParse( + null, + document, + xContentType, + indexService.getIndexSettings().getIndexVersionCreated().onOrAfter(IndexVersions.TIME_SERIES_ROUTING_HASH_IN_ID) + ? TimeSeriesRoutingHashFieldMapper.DUMMY_ENCODED_VALUE + : null + ) + : new SourceToParse("_id", document, xContentType); + DocumentMapper documentMapper = indexService.mapperService().documentMapper(); if (documentMapper == null) { documentMapper = DocumentMapper.createEmpty(indexService.mapperService()); diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentJoinFieldMapper.java b/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentJoinFieldMapper.java index 508e438932e68..e1df6c130c9fe 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentJoinFieldMapper.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentJoinFieldMapper.java @@ -293,7 +293,7 @@ public void parse(DocumentParserContext context) throws IOException { if (parent == null) { throw new IllegalArgumentException("[parent] is missing for join field [" + name() + "]"); } - if (context.sourceToParse().routing() == null) { + if (context.routing() == null) { throw new IllegalArgumentException("[routing] is missing for join field [" + name() + "]"); } String fieldName = fieldType().joiner.parentJoinField(name); diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index 70168f6a2b516..fe7af4bc26e6e 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -689,9 +689,7 @@ private static Engine.Result performOpOnReplica( indexRequest.id(), indexRequest.source(), indexRequest.getContentType(), - indexRequest.routing(), - Map.of(), - DocumentSizeObserver.EMPTY_INSTANCE + indexRequest.routing() ); result = replica.applyIndexOperationOnReplica( primaryResponse.getSeqNo(), diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java index 5bdd197b80d2c..a8d6220415a43 100644 --- a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java @@ -881,7 +881,7 @@ public Index getConcreteWriteIndex(IndexAbstraction ia, Metadata metadata) { @Override public int route(IndexRouting indexRouting) { - return indexRouting.indexShard(id, routing, contentType, source); + return indexRouting.indexShard(id, routing, contentType, source, this::routing); } public IndexRequest setRequireAlias(boolean requireAlias) { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/IndexRouting.java b/server/src/main/java/org/elasticsearch/cluster/routing/IndexRouting.java index 1ed9d759c4ca8..fb2fcf1a02ad0 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/IndexRouting.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/IndexRouting.java @@ -21,6 +21,8 @@ import org.elasticsearch.common.util.ByteUtils; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Nullable; +import org.elasticsearch.index.IndexVersions; +import org.elasticsearch.index.mapper.TimeSeriesRoutingHashFieldMapper; import org.elasticsearch.transport.Transports; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParser.Token; @@ -35,6 +37,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.function.Consumer; import java.util.function.IntConsumer; import java.util.function.IntSupplier; import java.util.function.Predicate; @@ -74,7 +77,13 @@ private IndexRouting(IndexMetadata metadata) { * Called when indexing a document to generate the shard id that should contain * a document with the provided parameters. */ - public abstract int indexShard(String id, @Nullable String routing, XContentType sourceType, BytesReference source); + public abstract int indexShard( + String id, + @Nullable String routing, + XContentType sourceType, + BytesReference source, + Consumer routingHashSetter + ); /** * Called when updating a document to generate the shard id that should contain @@ -153,7 +162,13 @@ public void process(IndexRequest indexRequest) { } @Override - public int indexShard(String id, @Nullable String routing, XContentType sourceType, BytesReference source) { + public int indexShard( + String id, + @Nullable String routing, + XContentType sourceType, + BytesReference source, + Consumer routingHashSetter + ) { if (id == null) { throw new IllegalStateException("id is required and should have been set by process"); } @@ -237,12 +252,14 @@ public void collectSearchShards(String routing, IntConsumer consumer) { public static class ExtractFromSource extends IndexRouting { private final Predicate isRoutingPath; private final XContentParserConfiguration parserConfig; + private final boolean trackTimeSeriesRoutingHash; ExtractFromSource(IndexMetadata metadata) { super(metadata); if (metadata.isRoutingPartitionedIndex()) { throw new IllegalArgumentException("routing_partition_size is incompatible with routing_path"); } + trackTimeSeriesRoutingHash = metadata.getCreationVersion().onOrAfter(IndexVersions.TIME_SERIES_ROUTING_HASH_IN_ID); List routingPaths = metadata.getRoutingPaths(); isRoutingPath = Regex.simpleMatcher(routingPaths.toArray(String[]::new)); this.parserConfig = XContentParserConfiguration.EMPTY.withFiltering(Set.copyOf(routingPaths), null, true); @@ -256,10 +273,20 @@ public boolean matchesField(String fieldName) { public void process(IndexRequest indexRequest) {} @Override - public int indexShard(String id, @Nullable String routing, XContentType sourceType, BytesReference source) { + public int indexShard( + String id, + @Nullable String routing, + XContentType sourceType, + BytesReference source, + Consumer routingHashSetter + ) { assert Transports.assertNotTransportThread("parsing the _source can get slow"); checkNoRouting(routing); - return hashToShardId(hashSource(sourceType, source).buildHash(IndexRouting.ExtractFromSource::defaultOnEmpty)); + int hash = hashSource(sourceType, source).buildHash(IndexRouting.ExtractFromSource::defaultOnEmpty); + if (trackTimeSeriesRoutingHash) { + routingHashSetter.accept(TimeSeriesRoutingHashFieldMapper.encode(hash)); + } + return hashToShardId(hash); } public String createId(XContentType sourceType, BytesReference source, byte[] suffix) { @@ -334,16 +361,13 @@ private void extractItem(String path, XContentParser source) throws IOException source.nextToken(); break; case VALUE_STRING: + case VALUE_NUMBER: hashes.add(new NameAndHash(new BytesRef(path), hash(new BytesRef(source.text())))); source.nextToken(); break; case VALUE_NULL: source.nextToken(); break; - case VALUE_NUMBER: // allow parsing numbers assuming routing fields are always keyword fields - hashes.add(new NameAndHash(new BytesRef(path), hash(new BytesRef(source.text())))); - source.nextToken(); - break; default: throw new ParsingException( source.getTokenLocation(), diff --git a/server/src/main/java/org/elasticsearch/index/IndexMode.java b/server/src/main/java/org/elasticsearch/index/IndexMode.java index 05afc14e0f0cd..05169836d6617 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexMode.java +++ b/server/src/main/java/org/elasticsearch/index/IndexMode.java @@ -28,6 +28,7 @@ import org.elasticsearch.index.mapper.RoutingFieldMapper; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; +import org.elasticsearch.index.mapper.TimeSeriesRoutingHashFieldMapper; import org.elasticsearch.index.mapper.TsidExtractingIdFieldMapper; import java.io.IOException; @@ -92,6 +93,12 @@ public MetadataFieldMapper timeSeriesIdFieldMapper() { return null; } + @Override + public MetadataFieldMapper timeSeriesRoutingHashFieldMapper() { + // non time-series indices must not have a TimeSeriesRoutingIdFieldMapper + return null; + } + @Override public IdFieldMapper idFieldMapperWithoutFieldData() { return ProvidedIdFieldMapper.NO_FIELD_DATA; @@ -185,6 +192,11 @@ public MetadataFieldMapper timeSeriesIdFieldMapper() { return TimeSeriesIdFieldMapper.INSTANCE; } + @Override + public MetadataFieldMapper timeSeriesRoutingHashFieldMapper() { + return TimeSeriesRoutingHashFieldMapper.INSTANCE; + } + public IdFieldMapper idFieldMapperWithoutFieldData() { return TsidExtractingIdFieldMapper.INSTANCE; } @@ -322,6 +334,13 @@ public String getName() { */ public abstract MetadataFieldMapper timeSeriesIdFieldMapper(); + /** + * Return an instance of the {@link TimeSeriesRoutingHashFieldMapper} that generates + * the _ts_routing_hash field. The field mapper will be added to the list of the metadata + * field mappers for the index. + */ + public abstract MetadataFieldMapper timeSeriesRoutingHashFieldMapper(); + /** * How {@code time_series_dimension} fields are handled by indices in this mode. */ diff --git a/server/src/main/java/org/elasticsearch/index/IndexVersions.java b/server/src/main/java/org/elasticsearch/index/IndexVersions.java index 0ddcef2ac3a08..bca7b963becaa 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexVersions.java +++ b/server/src/main/java/org/elasticsearch/index/IndexVersions.java @@ -102,6 +102,7 @@ private static IndexVersion def(int id, Version luceneVersion) { public static final IndexVersion UPGRADE_LUCENE_9_9_2 = def(8_502_00_0, Version.LUCENE_9_9_2); public static final IndexVersion TIME_SERIES_ID_HASHING = def(8_502_00_1, Version.LUCENE_9_9_2); public static final IndexVersion UPGRADE_TO_LUCENE_9_10 = def(8_503_00_0, Version.LUCENE_9_10_0); + public static final IndexVersion TIME_SERIES_ROUTING_HASH_IN_ID = def(8_504_00_0, Version.LUCENE_9_10_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/index/codec/PerFieldMapperCodec.java b/server/src/main/java/org/elasticsearch/index/codec/PerFieldMapperCodec.java index 2a2ae7245d996..ae497af887d9c 100644 --- a/server/src/main/java/org/elasticsearch/index/codec/PerFieldMapperCodec.java +++ b/server/src/main/java/org/elasticsearch/index/codec/PerFieldMapperCodec.java @@ -115,8 +115,8 @@ boolean useTSDBDocValuesFormat(final String field) { private boolean excludeFields(String fieldName) { // Avoid using tsdb codec for fields like _seq_no, _primary_term. - // But _tsid should always use the tesbd codec. - return fieldName.startsWith("_") && fieldName.equals("_tsid") == false; + // But _tsid and _ts_routing_hash should always use the tsdb codec. + return fieldName.startsWith("_") && fieldName.equals("_tsid") == false && fieldName.equals("_ts_routing_hash") == false; } private boolean isTimeSeriesModeIndex() { diff --git a/server/src/main/java/org/elasticsearch/index/engine/TranslogDirectoryReader.java b/server/src/main/java/org/elasticsearch/index/engine/TranslogDirectoryReader.java index e5eeac72927c0..e054fc52b562e 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/TranslogDirectoryReader.java +++ b/server/src/main/java/org/elasticsearch/index/engine/TranslogDirectoryReader.java @@ -59,11 +59,9 @@ import org.elasticsearch.index.mapper.VersionFieldMapper; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.translog.Translog; -import org.elasticsearch.plugins.internal.DocumentSizeObserver; import java.io.IOException; import java.util.Collections; -import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicReference; @@ -254,14 +252,7 @@ private LeafReader getDelegate() { private LeafReader createInMemoryLeafReader() { assert Thread.holdsLock(this); final ParsedDocument parsedDocs = documentParser.parseDocument( - new SourceToParse( - operation.id(), - operation.source(), - XContentHelper.xContentType(operation.source()), - operation.routing(), - Map.of(), - DocumentSizeObserver.EMPTY_INSTANCE - ), + new SourceToParse(operation.id(), operation.source(), XContentHelper.xContentType(operation.source()), operation.routing()), mappingLookup ); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index 9a0e391102708..1fda9ababfabd 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -99,7 +99,7 @@ public ParsedDocument parseDocument(SourceToParse source, MappingLookup mappingL context.version(), context.seqID(), context.id(), - source.routing(), + context.routing(), context.reorderParentAndGetDocs(), context.sourceToParse().source(), context.sourceToParse().getXContentType(), diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java index 01e67377adafd..92aa8662eaf9d 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java @@ -12,6 +12,7 @@ import org.apache.lucene.document.StringField; import org.apache.lucene.index.IndexableField; import org.elasticsearch.common.time.DateFormatter; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.analysis.IndexAnalyzers; import org.elasticsearch.xcontent.FilterXContentParserWrapper; @@ -231,6 +232,10 @@ public final SourceToParse sourceToParse() { return this.sourceToParse; } + public final String routing() { + return mappingParserContext.getIndexSettings().getMode() == IndexMode.TIME_SERIES ? null : sourceToParse.routing(); + } + /** * Add the given {@code field} to the set of ignored fields. */ diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IdLoader.java b/server/src/main/java/org/elasticsearch/index/mapper/IdLoader.java index c965a77f1b5bf..ef15af93f6e34 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IdLoader.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IdLoader.java @@ -66,21 +66,24 @@ final class TsIdLoader implements IdLoader { } public IdLoader.Leaf leaf(LeafStoredFieldLoader loader, LeafReader reader, int[] docIdsInLeaf) throws IOException { - IndexRouting.ExtractFromSource.Builder[] builders = new IndexRouting.ExtractFromSource.Builder[docIdsInLeaf.length]; - for (int i = 0; i < builders.length; i++) { - builders[i] = indexRouting.builder(); - } + IndexRouting.ExtractFromSource.Builder[] builders = null; + if (indexRouting != null) { + builders = new IndexRouting.ExtractFromSource.Builder[docIdsInLeaf.length]; + for (int i = 0; i < builders.length; i++) { + builders[i] = indexRouting.builder(); + } - for (String routingField : routingPaths) { - // Routing field must always be keyword fields, so it is ok to use SortedSetDocValues directly here. - SortedSetDocValues dv = DocValues.getSortedSet(reader, routingField); - for (int i = 0; i < docIdsInLeaf.length; i++) { - int docId = docIdsInLeaf[i]; - var builder = builders[i]; - if (dv.advanceExact(docId)) { - for (int j = 0; j < dv.docValueCount(); j++) { - BytesRef routingValue = dv.lookupOrd(dv.nextOrd()); - builder.addMatching(routingField, routingValue); + for (String routingField : routingPaths) { + // Routing field must always be keyword fields, so it is ok to use SortedSetDocValues directly here. + SortedSetDocValues dv = DocValues.getSortedSet(reader, routingField); + for (int i = 0; i < docIdsInLeaf.length; i++) { + int docId = docIdsInLeaf[i]; + var builder = builders[i]; + if (dv.advanceExact(docId)) { + for (int j = 0; j < dv.docValueCount(); j++) { + BytesRef routingValue = dv.lookupOrd(dv.nextOrd()); + builder.addMatching(routingField, routingValue); + } } } } @@ -100,9 +103,17 @@ public IdLoader.Leaf leaf(LeafStoredFieldLoader loader, LeafReader reader, int[] assert found; assert timestampDocValues.docValueCount() == 1; long timestamp = timestampDocValues.nextValue(); - - var routingBuilder = builders[i]; - ids[i] = TsidExtractingIdFieldMapper.createId(false, routingBuilder, tsid, timestamp, new byte[16]); + if (builders != null) { + var routingBuilder = builders[i]; + ids[i] = TsidExtractingIdFieldMapper.createId(false, routingBuilder, tsid, timestamp, new byte[16]); + } else { + SortedDocValues routingHashDocValues = DocValues.getSorted(reader, TimeSeriesRoutingHashFieldMapper.NAME); + found = routingHashDocValues.advanceExact(docId); + assert found; + BytesRef routingHashBytes = routingHashDocValues.lookupOrd(routingHashDocValues.ordValue()); + int routingHash = TimeSeriesRoutingHashFieldMapper.decode(Uid.decodeId(routingHashBytes.bytes)); + ids[i] = TsidExtractingIdFieldMapper.createId(routingHash, tsid, timestamp); + } } return new TsIdLeaf(docIdsInLeaf, ids); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/RoutingFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/RoutingFieldMapper.java index 3141b73174897..39686c3f30555 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/RoutingFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/RoutingFieldMapper.java @@ -108,7 +108,7 @@ public boolean required() { @Override public void preParse(DocumentParserContext context) { - String routing = context.sourceToParse().routing(); + String routing = context.routing(); if (routing != null) { context.doc().add(new StringField(fieldType().name(), routing, Field.Store.YES)); context.addToFieldNames(fieldType().name()); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/SourceToParse.java b/server/src/main/java/org/elasticsearch/index/mapper/SourceToParse.java index 12f74263a3bd7..6a020127019f5 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/SourceToParse.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/SourceToParse.java @@ -52,6 +52,10 @@ public SourceToParse(String id, BytesReference source, XContentType xContentType this(id, source, xContentType, null, Map.of(), DocumentSizeObserver.EMPTY_INSTANCE); } + public SourceToParse(String id, BytesReference source, XContentType xContentType, String routing) { + this(id, source, xContentType, routing, Map.of(), DocumentSizeObserver.EMPTY_INSTANCE); + } + public BytesReference source() { return this.source; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java index 1ee7caff497ad..2d330e433d444 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java @@ -140,7 +140,13 @@ public void postParse(DocumentParserContext context) throws IOException { ? timeSeriesIdBuilder.buildLegacyTsid().toBytesRef() : timeSeriesIdBuilder.buildTsidHash().toBytesRef(); context.doc().add(new SortedDocValuesField(fieldType().name(), timeSeriesId)); - TsidExtractingIdFieldMapper.createField(context, timeSeriesIdBuilder.routingBuilder, timeSeriesId); + TsidExtractingIdFieldMapper.createField( + context, + getIndexVersionCreated(context).before(IndexVersions.TIME_SERIES_ROUTING_HASH_IN_ID) + ? timeSeriesIdBuilder.routingBuilder + : null, + timeSeriesId + ); } private IndexVersion getIndexVersionCreated(final DocumentParserContext context) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesRoutingHashFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesRoutingHashFieldMapper.java new file mode 100644 index 0000000000000..090fe7839b3e9 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesRoutingHashFieldMapper.java @@ -0,0 +1,116 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.mapper; + +import org.apache.lucene.document.SortedDocValuesField; +import org.apache.lucene.search.Query; +import org.elasticsearch.common.util.ByteUtils; +import org.elasticsearch.index.IndexMode; +import org.elasticsearch.index.IndexVersions; +import org.elasticsearch.index.fielddata.FieldData; +import org.elasticsearch.index.fielddata.FieldDataContext; +import org.elasticsearch.index.fielddata.IndexFieldData; +import org.elasticsearch.index.fielddata.ScriptDocValues; +import org.elasticsearch.index.fielddata.plain.SortedOrdinalsIndexFieldData; +import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.script.field.DelegateDocValuesField; +import org.elasticsearch.search.aggregations.support.CoreValuesSourceType; + +import java.util.Base64; +import java.util.Collections; + +/** + * Mapper for the {@code _ts_routing_hash} field. + * + * The field contains the routing hash, as calculated in coordinating nodes for docs in time-series indexes. + * It's stored to be retrieved and added as a prefix when reconstructing the _id field in search queries. + * The prefix can then used for routing Get and Delete requests (by doc id) to the right shard. + */ +public class TimeSeriesRoutingHashFieldMapper extends MetadataFieldMapper { + + public static final String NAME = "_ts_routing_hash"; + + public static final TimeSeriesRoutingHashFieldMapper INSTANCE = new TimeSeriesRoutingHashFieldMapper(); + + public static final TypeParser PARSER = new FixedTypeParser(c -> c.getIndexSettings().getMode().timeSeriesRoutingHashFieldMapper()); + + static final class TimeSeriesRoutingHashFieldType extends MappedFieldType { + + private static final TimeSeriesRoutingHashFieldType INSTANCE = new TimeSeriesRoutingHashFieldType(); + + private TimeSeriesRoutingHashFieldType() { + super(NAME, false, false, true, TextSearchInfo.NONE, Collections.emptyMap()); + } + + @Override + public String typeName() { + return NAME; + } + + @Override + public ValueFetcher valueFetcher(SearchExecutionContext context, String format) { + return new DocValueFetcher(docValueFormat(format, null), context.getForField(this, MappedFieldType.FielddataOperation.SEARCH)); + } + + @Override + public IndexFieldData.Builder fielddataBuilder(FieldDataContext fieldDataContext) { + failIfNoDocValues(); + return new SortedOrdinalsIndexFieldData.Builder( + name(), + CoreValuesSourceType.KEYWORD, + (dv, n) -> new DelegateDocValuesField( + new ScriptDocValues.Strings(new ScriptDocValues.StringsSupplier(FieldData.toString(dv))), + n + ) + ); + } + + @Override + public Query termQuery(Object value, SearchExecutionContext context) { + throw new IllegalArgumentException("[" + NAME + "] is not searchable"); + } + } + + private TimeSeriesRoutingHashFieldMapper() { + super(TimeSeriesRoutingHashFieldType.INSTANCE); + } + + @Override + public void postParse(DocumentParserContext context) { + if (context.indexSettings().getMode() == IndexMode.TIME_SERIES + && context.indexSettings().getIndexVersionCreated().onOrAfter(IndexVersions.TIME_SERIES_ROUTING_HASH_IN_ID)) { + String routingHash = context.sourceToParse().routing(); + var field = new SortedDocValuesField(NAME, Uid.encodeId(routingHash != null ? routingHash : encode(0))); + context.rootDoc().add(field); + } + } + + @Override + protected String contentType() { + return NAME; + } + + @Override + public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { + return SourceLoader.SyntheticFieldLoader.NOTHING; + } + + public static String encode(int routingId) { + byte[] bytes = new byte[4]; + ByteUtils.writeIntLE(routingId, bytes, 0); + return Base64.getUrlEncoder().withoutPadding().encodeToString(bytes); + } + + public static final String DUMMY_ENCODED_VALUE = encode(0); + + public static int decode(String routingId) { + byte[] bytes = Base64.getUrlDecoder().decode(routingId); + return ByteUtils.readIntLE(bytes, 0); + } +} diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TsidExtractingIdFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TsidExtractingIdFieldMapper.java index 1e613767c2c89..8101b5be1b60e 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TsidExtractingIdFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TsidExtractingIdFieldMapper.java @@ -19,6 +19,7 @@ import org.elasticsearch.index.fielddata.FieldDataContext; import org.elasticsearch.index.fielddata.IndexFieldData; +import java.util.Base64; import java.util.Locale; /** @@ -52,22 +53,37 @@ public static void createField(DocumentParserContext context, IndexRouting.Extra ); } long timestamp = timestampField.numericValue().longValue(); - byte[] suffix = new byte[16]; - String id = createId(context.hasDynamicMappers(), routingBuilder, tsid, timestamp, suffix); - /* - * Make sure that _id from extracting the tsid matches that _id - * from extracting the _source. This should be true for all valid - * documents with valid mappings. *But* some invalid mappings - * will not parse the field but be rejected later by the dynamic - * mappings machinery. So if there are any dynamic mappings - * at all we just skip the assertion because we can't be sure - * it always must pass. - */ - IndexRouting.ExtractFromSource indexRouting = (IndexRouting.ExtractFromSource) context.indexSettings().getIndexRouting(); - assert context.getDynamicMappers().isEmpty() == false - || context.getDynamicRuntimeFields().isEmpty() == false - || id.equals(indexRouting.createId(context.sourceToParse().getXContentType(), context.sourceToParse().source(), suffix)); - + String id; + if (routingBuilder != null) { + byte[] suffix = new byte[16]; + id = createId(context.hasDynamicMappers(), routingBuilder, tsid, timestamp, suffix); + /* + * Make sure that _id from extracting the tsid matches that _id + * from extracting the _source. This should be true for all valid + * documents with valid mappings. *But* some invalid mappings + * will not parse the field but be rejected later by the dynamic + * mappings machinery. So if there are any dynamic mappings + * at all we just skip the assertion because we can't be sure + * it always must pass. + */ + IndexRouting.ExtractFromSource indexRouting = (IndexRouting.ExtractFromSource) context.indexSettings().getIndexRouting(); + assert context.getDynamicMappers().isEmpty() == false + || context.getDynamicRuntimeFields().isEmpty() == false + || id.equals(indexRouting.createId(context.sourceToParse().getXContentType(), context.sourceToParse().source(), suffix)); + } else if (context.sourceToParse().routing() != null) { + int routingHash = TimeSeriesRoutingHashFieldMapper.decode(context.sourceToParse().routing()); + id = createId(routingHash, tsid, timestamp); + } else { + if (context.sourceToParse().id() == null) { + throw new IllegalArgumentException( + "_ts_routing_hash was null but must be set because index [" + + context.indexSettings().getIndexMetadata().getIndex().getName() + + "] is in time_series mode" + ); + } + // In Translog operations, the id has already been generated based on the routing hash while the latter is no longer available. + id = context.sourceToParse().id(); + } if (context.sourceToParse().id() != null && false == context.sourceToParse().id().equals(id)) { throw new IllegalArgumentException( String.format( @@ -85,6 +101,18 @@ public static void createField(DocumentParserContext context, IndexRouting.Extra context.doc().add(new StringField(NAME, uidEncoded, Field.Store.YES)); } + public static String createId(int routingHash, BytesRef tsid, long timestamp) { + Hash128 hash = new Hash128(); + MurmurHash3.hash128(tsid.bytes, tsid.offset, tsid.length, SEED, hash); + + byte[] bytes = new byte[20]; + ByteUtils.writeIntLE(routingHash, bytes, 0); + ByteUtils.writeLongLE(hash.h1, bytes, 4); + ByteUtils.writeLongBE(timestamp, bytes, 12); // Big Ending shrinks the inverted index by ~37% + + return Base64.getUrlEncoder().withoutPadding().encodeToString(bytes); + } + public static String createId( boolean dynamicMappersExists, IndexRouting.ExtractFromSource.Builder routingBuilder, diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 3bafe139756fd..046483a6b074f 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -144,7 +144,6 @@ import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.indices.recovery.RecoveryTarget; import org.elasticsearch.plugins.IndexStorePlugin; -import org.elasticsearch.plugins.internal.DocumentSizeObserver; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.Repository; import org.elasticsearch.rest.RestStatus; @@ -1951,14 +1950,7 @@ private Engine.Result applyTranslogOperation(Engine engine, Translog.Operation o index.getAutoGeneratedIdTimestamp(), true, origin, - new SourceToParse( - index.id(), - index.source(), - XContentHelper.xContentType(index.source()), - index.routing(), - Map.of(), - DocumentSizeObserver.EMPTY_INSTANCE - ) + new SourceToParse(index.id(), index.source(), XContentHelper.xContentType(index.source()), index.routing()) ); } case DELETE -> { diff --git a/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java b/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java index b4c0b200eb143..a30249e94177e 100644 --- a/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java +++ b/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java @@ -41,7 +41,6 @@ import org.elasticsearch.index.mapper.StringFieldType; import org.elasticsearch.index.mapper.TextSearchInfo; import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.plugins.internal.DocumentSizeObserver; import org.elasticsearch.search.lookup.Source; import org.elasticsearch.xcontent.XContentType; @@ -305,14 +304,7 @@ private static Fields generateTermVectors( } private static Fields generateTermVectorsFromDoc(IndexShard indexShard, TermVectorsRequest request) throws IOException { - SourceToParse source = new SourceToParse( - "_id_for_tv_api", - request.doc(), - request.xContentType(), - request.routing(), - Map.of(), - DocumentSizeObserver.EMPTY_INSTANCE - ); + SourceToParse source = new SourceToParse("_id_for_tv_api", request.doc(), request.xContentType(), request.routing()); DocumentParser documentParser = indexShard.mapperService().documentParser(); MappingLookup mappingLookup = indexShard.mapperService().mappingLookup(); ParsedDocument parsedDocument = documentParser.parseDocument(source, mappingLookup); diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesModule.java b/server/src/main/java/org/elasticsearch/indices/IndicesModule.java index 795ed2120b098..b94c95834f65a 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesModule.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesModule.java @@ -60,6 +60,7 @@ import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.mapper.TextFieldMapper; import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; +import org.elasticsearch.index.mapper.TimeSeriesRoutingHashFieldMapper; import org.elasticsearch.index.mapper.VersionFieldMapper; import org.elasticsearch.index.mapper.flattened.FlattenedFieldMapper; import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper; @@ -247,6 +248,7 @@ private static Map initBuiltInMetadataMa builtInMetadataMappers.put(IdFieldMapper.NAME, IdFieldMapper.PARSER); builtInMetadataMappers.put(RoutingFieldMapper.NAME, RoutingFieldMapper.PARSER); builtInMetadataMappers.put(TimeSeriesIdFieldMapper.NAME, TimeSeriesIdFieldMapper.PARSER); + builtInMetadataMappers.put(TimeSeriesRoutingHashFieldMapper.NAME, TimeSeriesRoutingHashFieldMapper.PARSER); builtInMetadataMappers.put(IndexFieldMapper.NAME, IndexFieldMapper.PARSER); builtInMetadataMappers.put(SourceFieldMapper.NAME, SourceFieldMapper.PARSER); builtInMetadataMappers.put(NestedPathFieldMapper.NAME, NestedPathFieldMapper.PARSER); diff --git a/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java b/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java index 7f9e808db9560..0e6800b9c8d48 100644 --- a/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java @@ -27,6 +27,7 @@ import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.cache.bitset.BitsetFilterCache; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.fielddata.FieldDataContext; @@ -897,20 +898,24 @@ public SourceLoader newSourceLoader() { @Override public IdLoader newIdLoader() { if (indexService.getIndexSettings().getMode() == IndexMode.TIME_SERIES) { - var indexRouting = (IndexRouting.ExtractFromSource) indexService.getIndexSettings().getIndexRouting(); - List routingPaths = indexService.getMetadata().getRoutingPaths(); - for (String routingField : routingPaths) { - if (routingField.contains("*")) { - // In case the routing fields include path matches, find any matches and add them as distinct fields - // to the routing path. - Set matchingRoutingPaths = new TreeSet<>(routingPaths); - for (Mapper mapper : indexService.mapperService().mappingLookup().fieldMappers()) { - if (mapper instanceof KeywordFieldMapper && indexRouting.matchesField(mapper.name())) { - matchingRoutingPaths.add(mapper.name()); + IndexRouting.ExtractFromSource indexRouting = null; + List routingPaths = null; + if (indexService.getIndexSettings().getIndexVersionCreated().before(IndexVersions.TIME_SERIES_ROUTING_HASH_IN_ID)) { + indexRouting = (IndexRouting.ExtractFromSource) indexService.getIndexSettings().getIndexRouting(); + routingPaths = indexService.getMetadata().getRoutingPaths(); + for (String routingField : routingPaths) { + if (routingField.contains("*")) { + // In case the routing fields include path matches, find any matches and add them as distinct fields + // to the routing path. + Set matchingRoutingPaths = new TreeSet<>(routingPaths); + for (Mapper mapper : indexService.mapperService().mappingLookup().fieldMappers()) { + if (mapper instanceof KeywordFieldMapper && indexRouting.matchesField(mapper.name())) { + matchingRoutingPaths.add(mapper.name()); + } } + routingPaths = new ArrayList<>(matchingRoutingPaths); + break; } - routingPaths = new ArrayList<>(matchingRoutingPaths); - break; } } return IdLoader.createTsIdLoader(indexRouting, routingPaths); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/IndexRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/IndexRoutingTests.java index 8af74e03f8605..d76e874c7061b 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/IndexRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/IndexRoutingTests.java @@ -457,7 +457,7 @@ public void testRequiredRouting() { */ private int shardIdFromSimple(IndexRouting indexRouting, String id, @Nullable String routing) { return switch (between(0, 3)) { - case 0 -> indexRouting.indexShard(id, routing, null, null); + case 0 -> indexRouting.indexShard(id, routing, null, null, null); case 1 -> indexRouting.updateShard(id, routing); case 2 -> indexRouting.deleteShard(id, routing); case 3 -> indexRouting.getShard(id, routing); @@ -490,7 +490,7 @@ public void testRoutingPathEmptySource() throws IOException { IndexRouting routing = indexRoutingForPath(between(1, 5), randomAlphaOfLength(5)); Exception e = expectThrows( IllegalArgumentException.class, - () -> routing.indexShard(randomAlphaOfLength(5), null, XContentType.JSON, source(Map.of())) + () -> routing.indexShard(randomAlphaOfLength(5), null, XContentType.JSON, source(Map.of()), null) ); assertThat(e.getMessage(), equalTo("Error extracting routing: source didn't contain any routing fields")); } @@ -499,7 +499,7 @@ public void testRoutingPathMismatchSource() throws IOException { IndexRouting routing = indexRoutingForPath(between(1, 5), "foo"); Exception e = expectThrows( IllegalArgumentException.class, - () -> routing.indexShard(randomAlphaOfLength(5), null, XContentType.JSON, source(Map.of("bar", "dog"))) + () -> routing.indexShard(randomAlphaOfLength(5), null, XContentType.JSON, source(Map.of("bar", "dog")), null) ); assertThat(e.getMessage(), equalTo("Error extracting routing: source didn't contain any routing fields")); } @@ -520,7 +520,7 @@ public void testRoutingIndexWithRouting() throws IOException { String docRouting = randomAlphaOfLength(5); Exception e = expectThrows( IllegalArgumentException.class, - () -> indexRouting.indexShard(randomAlphaOfLength(5), docRouting, XContentType.JSON, source) + () -> indexRouting.indexShard(randomAlphaOfLength(5), docRouting, XContentType.JSON, source, null) ); assertThat( e.getMessage(), @@ -649,7 +649,7 @@ private IndexRouting indexRoutingForPath(IndexVersion createdVersion, int shards private void assertIndexShard(IndexRouting routing, Map source, int expectedShard) throws IOException { byte[] suffix = randomSuffix(); BytesReference sourceBytes = source(source); - assertThat(routing.indexShard(randomAlphaOfLength(5), null, XContentType.JSON, sourceBytes), equalTo(expectedShard)); + assertThat(routing.indexShard(randomAlphaOfLength(5), null, XContentType.JSON, sourceBytes, s -> {}), equalTo(expectedShard)); IndexRouting.ExtractFromSource r = (IndexRouting.ExtractFromSource) routing; String idFromSource = r.createId(XContentType.JSON, sourceBytes, suffix); assertThat(shardIdForReadFromSourceExtracting(routing, idFromSource), equalTo(expectedShard)); diff --git a/server/src/test/java/org/elasticsearch/common/lucene/uid/VersionsTests.java b/server/src/test/java/org/elasticsearch/common/lucene/uid/VersionsTests.java index d6c5fe812140f..011a23ddb0512 100644 --- a/server/src/test/java/org/elasticsearch/common/lucene/uid/VersionsTests.java +++ b/server/src/test/java/org/elasticsearch/common/lucene/uid/VersionsTests.java @@ -19,12 +19,8 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; import org.elasticsearch.cluster.metadata.DataStream; -import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.cluster.routing.IndexRouting; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.mapper.VersionFieldMapper; @@ -249,14 +245,6 @@ public void testTimeSeriesLoadDocIdAndVersion() throws Exception { } private static String createTSDBId(long timestamp) { - Settings.Builder b = Settings.builder() - .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) - .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "field"); - IndexMetadata indexMetadata = IndexMetadata.builder("idx").settings(b).numberOfShards(1).numberOfReplicas(0).build(); - IndexRouting.ExtractFromSource.Builder routingBuilder = ((IndexRouting.ExtractFromSource) IndexRouting.fromIndexMetadata( - indexMetadata - )).builder(); - routingBuilder.addMatching("field", new BytesRef("value")); - return createId(false, routingBuilder, new BytesRef("tsid"), timestamp, new byte[16]); + return createId(randomInt(), new BytesRef("tsid"), timestamp); } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/FieldFilterMapperPluginTests.java b/server/src/test/java/org/elasticsearch/index/mapper/FieldFilterMapperPluginTests.java index 0f2380f6c72fb..2b8be2882c409 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/FieldFilterMapperPluginTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/FieldFilterMapperPluginTests.java @@ -121,8 +121,9 @@ private static void assertFieldCaps(FieldCapabilitiesResponse fieldCapabilitiesR private static Set builtInMetadataFields() { Set builtInMetadataFields = new HashSet<>(IndicesModule.getBuiltInMetadataFields()); - // Index is not a time-series index, and it will not contain a _tsid field + // Index is not a time-series index, and it will not contain _tsid and _ts_routing_hash fields. builtInMetadataFields.remove(TimeSeriesIdFieldMapper.NAME); + builtInMetadataFields.remove(TimeSeriesRoutingHashFieldMapper.NAME); return builtInMetadataFields; } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IdLoaderTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IdLoaderTests.java index 5945e5c81856f..e4ce40d4c7c29 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IdLoaderTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IdLoaderTests.java @@ -46,10 +46,10 @@ public class IdLoaderTests extends ESTestCase { + private final int routingHash = randomInt(); + public void testSynthesizeIdSimple() throws Exception { - var routingPaths = List.of("dim1"); - var routing = createRouting(routingPaths); - var idLoader = IdLoader.createTsIdLoader(routing, routingPaths); + var idLoader = IdLoader.createTsIdLoader(null, null); long startTime = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2023-01-01T00:00:00Z"); List docs = List.of( @@ -63,17 +63,17 @@ public void testSynthesizeIdSimple() throws Exception { assertThat(leafReader.numDocs(), equalTo(3)); var leaf = idLoader.leaf(null, leafReader, new int[] { 0, 1, 2 }); // NOTE: time series data is ordered by (tsid, timestamp) - assertThat(leaf.getId(0), equalTo(expectedId(routing, docs.get(2)))); - assertThat(leaf.getId(1), equalTo(expectedId(routing, docs.get(0)))); - assertThat(leaf.getId(2), equalTo(expectedId(routing, docs.get(1)))); + assertThat(leaf.getId(0), equalTo(expectedId(docs.get(2), routingHash))); + assertThat(leaf.getId(1), equalTo(expectedId(docs.get(0), routingHash))); + assertThat(leaf.getId(2), equalTo(expectedId(docs.get(1), routingHash))); }; - prepareIndexReader(indexAndForceMerge(routing, docs), verify, false); + prepareIndexReader(indexAndForceMerge(docs, routingHash), verify, false); } public void testSynthesizeIdMultipleSegments() throws Exception { var routingPaths = List.of("dim1"); var routing = createRouting(routingPaths); - var idLoader = IdLoader.createTsIdLoader(routing, routingPaths); + var idLoader = IdLoader.createTsIdLoader(null, null); long startTime = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2023-01-01T00:00:00Z"); List docs1 = List.of( @@ -96,15 +96,15 @@ public void testSynthesizeIdMultipleSegments() throws Exception { ); CheckedConsumer buildIndex = writer -> { for (Doc doc : docs1) { - indexDoc(routing, writer, doc); + indexDoc(writer, doc, routingHash); } writer.flush(); for (Doc doc : docs2) { - indexDoc(routing, writer, doc); + indexDoc(writer, doc, routingHash); } writer.flush(); for (Doc doc : docs3) { - indexDoc(routing, writer, doc); + indexDoc(writer, doc, routingHash); } writer.flush(); }; @@ -115,22 +115,22 @@ public void testSynthesizeIdMultipleSegments() throws Exception { assertThat(leafReader.numDocs(), equalTo(docs1.size())); var leaf = idLoader.leaf(null, leafReader, IntStream.range(0, docs1.size()).toArray()); for (int i = 0; i < docs1.size(); i++) { - assertThat(leaf.getId(i), equalTo(expectedId(routing, docs1.get(i)))); + assertThat(leaf.getId(i), equalTo(expectedId(docs1.get(i), routingHash))); } } { LeafReader leafReader = indexReader.leaves().get(1).reader(); assertThat(leafReader.numDocs(), equalTo(docs2.size())); var leaf = idLoader.leaf(null, leafReader, new int[] { 0, 3 }); - assertThat(leaf.getId(0), equalTo(expectedId(routing, docs2.get(0)))); - assertThat(leaf.getId(3), equalTo(expectedId(routing, docs2.get(3)))); + assertThat(leaf.getId(0), equalTo(expectedId(docs2.get(0), routingHash))); + assertThat(leaf.getId(3), equalTo(expectedId(docs2.get(3), routingHash))); } { LeafReader leafReader = indexReader.leaves().get(2).reader(); assertThat(leafReader.numDocs(), equalTo(docs3.size())); var leaf = idLoader.leaf(null, leafReader, new int[] { 1, 2 }); - assertThat(leaf.getId(1), equalTo(expectedId(routing, docs3.get(1)))); - assertThat(leaf.getId(2), equalTo(expectedId(routing, docs3.get(2)))); + assertThat(leaf.getId(1), equalTo(expectedId(docs3.get(1), routingHash))); + assertThat(leaf.getId(2), equalTo(expectedId(docs3.get(2), routingHash))); } { LeafReader leafReader = indexReader.leaves().get(2).reader(); @@ -145,13 +145,14 @@ public void testSynthesizeIdMultipleSegments() throws Exception { public void testSynthesizeIdRandom() throws Exception { var routingPaths = List.of("dim1"); var routing = createRouting(routingPaths); - var idLoader = IdLoader.createTsIdLoader(routing, routingPaths); + var idLoader = IdLoader.createTsIdLoader(null, null); long startTime = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2023-01-01T00:00:00Z"); Set expectedIDs = new HashSet<>(); List randomDocs = new ArrayList<>(); int numberOfTimeSeries = randomIntBetween(8, 64); for (int i = 0; i < numberOfTimeSeries; i++) { + long routingId = 0; int numberOfDimensions = randomIntBetween(1, 6); List dimensions = new ArrayList<>(numberOfDimensions); for (int j = 1; j <= numberOfDimensions; j++) { @@ -163,12 +164,13 @@ public void testSynthesizeIdRandom() throws Exception { value = randomAlphaOfLength(4); } dimensions.add(new Dimension(fieldName, value)); + routingId = value.hashCode(); } int numberOfSamples = randomIntBetween(1, 16); for (int j = 0; j < numberOfSamples; j++) { Doc doc = new Doc(startTime++, dimensions); randomDocs.add(doc); - expectedIDs.add(expectedId(routing, doc)); + expectedIDs.add(expectedId(doc, routingHash)); } } CheckedConsumer verify = indexReader -> { @@ -181,14 +183,14 @@ public void testSynthesizeIdRandom() throws Exception { assertTrue("docId=" + i + " id=" + actualId, expectedIDs.remove(actualId)); } }; - prepareIndexReader(indexAndForceMerge(routing, randomDocs), verify, false); + prepareIndexReader(indexAndForceMerge(randomDocs, routingHash), verify, false); assertThat(expectedIDs, empty()); } - private static CheckedConsumer indexAndForceMerge(IndexRouting.ExtractFromSource routing, List docs) { + private static CheckedConsumer indexAndForceMerge(List docs, int routingHash) { return writer -> { for (Doc doc : docs) { - indexDoc(routing, writer, doc); + indexDoc(writer, doc, routingHash); } writer.forceMerge(1); }; @@ -207,6 +209,7 @@ private void prepareIndexReader( } Sort sort = new Sort( new SortField(TimeSeriesIdFieldMapper.NAME, SortField.Type.STRING, false), + new SortField(TimeSeriesRoutingHashFieldMapper.NAME, SortField.Type.STRING, false), new SortedNumericSortField(DataStreamTimestampFieldMapper.DEFAULT_PATH, SortField.Type.LONG, true) ); config.setIndexSort(sort); @@ -220,8 +223,8 @@ private void prepareIndexReader( } } - private static void indexDoc(IndexRouting.ExtractFromSource routing, IndexWriter iw, Doc doc) throws IOException { - final TimeSeriesIdFieldMapper.TimeSeriesIdBuilder builder = new TimeSeriesIdFieldMapper.TimeSeriesIdBuilder(routing.builder()); + private static void indexDoc(IndexWriter iw, Doc doc, int routingHash) throws IOException { + final TimeSeriesIdFieldMapper.TimeSeriesIdBuilder builder = new TimeSeriesIdFieldMapper.TimeSeriesIdBuilder(null); final List fields = new ArrayList<>(); fields.add(new SortedNumericDocValuesField(DataStreamTimestampFieldMapper.DEFAULT_PATH, doc.timestamp)); @@ -237,12 +240,17 @@ private static void indexDoc(IndexRouting.ExtractFromSource routing, IndexWriter } BytesRef tsid = builder.buildTsidHash().toBytesRef(); fields.add(new SortedDocValuesField(TimeSeriesIdFieldMapper.NAME, tsid)); + fields.add( + new SortedDocValuesField( + TimeSeriesRoutingHashFieldMapper.NAME, + Uid.encodeId(TimeSeriesRoutingHashFieldMapper.encode(routingHash)) + ) + ); iw.addDocument(fields); } - private static String expectedId(IndexRouting.ExtractFromSource routing, Doc doc) throws IOException { - var routingBuilder = routing.builder(); - var timeSeriesIdBuilder = new TimeSeriesIdFieldMapper.TimeSeriesIdBuilder(routingBuilder); + private static String expectedId(Doc doc, int routingHash) throws IOException { + var timeSeriesIdBuilder = new TimeSeriesIdFieldMapper.TimeSeriesIdBuilder(null); for (Dimension dimension : doc.dimensions) { if (dimension.value instanceof Number n) { timeSeriesIdBuilder.addLong(dimension.field, n.longValue()); @@ -250,13 +258,7 @@ private static String expectedId(IndexRouting.ExtractFromSource routing, Doc doc timeSeriesIdBuilder.addString(dimension.field, dimension.value.toString()); } } - return TsidExtractingIdFieldMapper.createId( - false, - routingBuilder, - timeSeriesIdBuilder.buildTsidHash().toBytesRef(), - doc.timestamp, - new byte[16] - ); + return TsidExtractingIdFieldMapper.createId(routingHash, timeSeriesIdBuilder.buildTsidHash().toBytesRef(), doc.timestamp); } private static IndexRouting.ExtractFromSource createRouting(List routingPaths) { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/RoutingFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/RoutingFieldMapperTests.java index 53fcd3d331745..e0c092bfd0bfd 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/RoutingFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/RoutingFieldMapperTests.java @@ -12,7 +12,6 @@ import org.apache.lucene.search.IndexSearcher; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.index.query.SearchExecutionContext; -import org.elasticsearch.plugins.internal.DocumentSizeObserver; import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.search.lookup.Source; import org.elasticsearch.xcontent.XContentFactory; @@ -21,7 +20,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; -import java.util.Map; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -53,9 +51,7 @@ public void testRoutingMapper() throws Exception { "1", BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("field", "value").endObject()), XContentType.JSON, - "routing_value", - Map.of(), - DocumentSizeObserver.EMPTY_INSTANCE + "routing_value" ) ); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapperTests.java index 94a0f2296bbfb..50abb47e51125 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapperTests.java @@ -666,16 +666,44 @@ public void testParseWithDynamicMapping() { .put(IndexSettings.MODE.getKey(), "time_series") .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "dim") .build(); - // without _id - { - MapperService mapper = createMapperService(IndexVersion.current(), indexSettings, () -> false); - SourceToParse source = new SourceToParse(null, new BytesArray(""" - { - "@timestamp": 1609459200000, - "dim": "6a841a21", - "value": 100 - }"""), XContentType.JSON); - Engine.Index index = IndexShard.prepareIndex( + MapperService mapper = createMapperService(IndexVersion.current(), indexSettings, () -> false); + SourceToParse source = new SourceToParse(null, new BytesArray(""" + { + "@timestamp": 1609459200000, + "dim": "6a841a21", + "value": 100 + }"""), XContentType.JSON, TimeSeriesRoutingHashFieldMapper.DUMMY_ENCODED_VALUE); + Engine.Index index = IndexShard.prepareIndex( + mapper, + source, + UNASSIGNED_SEQ_NO, + randomNonNegativeLong(), + Versions.MATCH_ANY, + VersionType.INTERNAL, + Engine.Operation.Origin.PRIMARY, + -1, + false, + UNASSIGNED_SEQ_NO, + 0, + System.nanoTime() + ); + assertNotNull(index.parsedDoc().dynamicMappingsUpdate()); + } + + public void testParseWithDynamicMappingInvalidRoutingHash() { + Settings indexSettings = Settings.builder() + .put(IndexSettings.MODE.getKey(), "time_series") + .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "dim") + .build(); + MapperService mapper = createMapperService(IndexVersion.current(), indexSettings, () -> false); + SourceToParse source = new SourceToParse(null, new BytesArray(""" + { + "@timestamp": 1609459200000, + "dim": "6a841a21", + "value": 100 + }"""), XContentType.JSON, "no such routing hash"); + var failure = expectThrows(DocumentParsingException.class, () -> { + IndexShard.prepareIndex( mapper, source, UNASSIGNED_SEQ_NO, @@ -689,40 +717,41 @@ public void testParseWithDynamicMapping() { 0, System.nanoTime() ); - assertNotNull(index.parsedDoc().dynamicMappingsUpdate()); - } - // with _id - { - MapperService mapper = createMapperService(IndexVersion.current(), indexSettings, () -> false); - SourceToParse source = new SourceToParse("no-such-tsid", new BytesArray(""" - { - "@timestamp": 1609459200000, - "dim": "6a841a21", - "value": 100 - }"""), XContentType.JSON); - var failure = expectThrows(DocumentParsingException.class, () -> { - IndexShard.prepareIndex( - mapper, - source, - UNASSIGNED_SEQ_NO, - randomNonNegativeLong(), - Versions.MATCH_ANY, - VersionType.INTERNAL, - Engine.Operation.Origin.PRIMARY, - -1, - false, - UNASSIGNED_SEQ_NO, - 0, - System.nanoTime() - ); - }); - assertThat( - failure.getMessage(), - equalTo( - "[5:1] failed to parse: _id must be unset or set to [AAAAAMpxfIC8Wpr0AAABdrs-cAA]" - + " but was [no-such-tsid] because [index] is in time_series mode" - ) + }); + assertThat(failure.getMessage(), equalTo("[5:1] failed to parse: Illegal base64 character 20")); + } + + public void testParseWithDynamicMappingNullId() { + Settings indexSettings = Settings.builder() + .put(IndexSettings.MODE.getKey(), "time_series") + .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "dim") + .build(); + MapperService mapper = createMapperService(IndexVersion.current(), indexSettings, () -> false); + SourceToParse source = new SourceToParse(null, new BytesArray(""" + { + "@timestamp": 1609459200000, + "dim": "6a841a21", + "value": 100 + }"""), XContentType.JSON); + var failure = expectThrows(DocumentParsingException.class, () -> { + IndexShard.prepareIndex( + mapper, + source, + UNASSIGNED_SEQ_NO, + randomNonNegativeLong(), + Versions.MATCH_ANY, + VersionType.INTERNAL, + Engine.Operation.Origin.PRIMARY, + -1, + false, + UNASSIGNED_SEQ_NO, + 0, + System.nanoTime() ); - } + }); + assertThat( + failure.getMessage(), + equalTo("[5:1] failed to parse: _ts_routing_hash was null but must be set because index [index] is in time_series mode") + ); } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TimeSeriesRoutingHashFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TimeSeriesRoutingHashFieldMapperTests.java new file mode 100644 index 0000000000000..df5ff9a8fe7e5 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/mapper/TimeSeriesRoutingHashFieldMapperTests.java @@ -0,0 +1,105 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.mapper; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.core.CheckedConsumer; +import org.elasticsearch.index.IndexMode; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; + +public class TimeSeriesRoutingHashFieldMapperTests extends MetadataMapperTestCase { + + @Override + protected String fieldName() { + return TimeSeriesRoutingHashFieldMapper.NAME; + } + + @Override + protected boolean isConfigurable() { + return false; + } + + @Override + protected void registerParameters(ParameterChecker checker) throws IOException { + // There aren't any parameters + } + + private DocumentMapper createMapper(XContentBuilder mappings) throws IOException { + return createMapperService( + getIndexSettingsBuilder().put(IndexSettings.MODE.getKey(), IndexMode.TIME_SERIES.name()) + .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "routing path is required") + .put(IndexSettings.TIME_SERIES_START_TIME.getKey(), "2021-04-28T00:00:00Z") + .put(IndexSettings.TIME_SERIES_END_TIME.getKey(), "2021-04-29T00:00:00Z") + .build(), + mappings + ).documentMapper(); + } + + private static ParsedDocument parseDocument(int hash, DocumentMapper docMapper, CheckedConsumer f) + throws IOException { + // Add the @timestamp field required by DataStreamTimestampFieldMapper for all time series indices + return docMapper.parse(source(null, b -> { + f.accept(b); + b.field("@timestamp", "2021-10-01"); + }, TimeSeriesRoutingHashFieldMapper.encode(hash))); + } + + private static int getRoutingHash(ParsedDocument document) { + BytesRef value = document.rootDoc().getBinaryValue(TimeSeriesRoutingHashFieldMapper.NAME); + return TimeSeriesRoutingHashFieldMapper.decode(Uid.decodeId(value.bytes)); + } + + @SuppressWarnings("unchecked") + public void testEnabledInTimeSeriesMode() throws Exception { + DocumentMapper docMapper = createMapper(mapping(b -> { + b.startObject("a").field("type", "keyword").field("time_series_dimension", true).endObject(); + })); + + int hash = randomInt(); + ParsedDocument doc = parseDocument(hash, docMapper, b -> b.field("a", "value")); + assertThat(doc.rootDoc().getField("a").binaryValue(), equalTo(new BytesRef("value"))); + assertEquals(hash, getRoutingHash(doc)); + } + + public void testDisabledInStandardMode() throws Exception { + DocumentMapper docMapper = createMapperService( + getIndexSettingsBuilder().put(IndexSettings.MODE.getKey(), IndexMode.STANDARD.name()).build(), + mapping(b -> {}) + ).documentMapper(); + assertThat(docMapper.metadataMapper(TimeSeriesRoutingHashFieldMapper.class), is(nullValue())); + + ParsedDocument doc = docMapper.parse(source("id", b -> b.field("field", "value"), null)); + assertThat(doc.rootDoc().getBinaryValue("_ts_routing_hash"), is(nullValue())); + assertThat(doc.rootDoc().get("field"), equalTo("value")); + } + + public void testIncludeInDocumentNotAllowed() throws Exception { + DocumentMapper docMapper = createMapper(mapping(b -> { + b.startObject("a").field("type", "keyword").field("time_series_dimension", true).endObject(); + })); + Exception e = expectThrows( + DocumentParsingException.class, + () -> parseDocument(randomInt(), docMapper, b -> b.field("_ts_routing_hash", "foo")) + ); + + assertThat( + e.getCause().getMessage(), + containsString("Field [_ts_routing_hash] is a metadata field and cannot be added inside a document") + ); + } +} diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TsidExtractingIdFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TsidExtractingIdFieldMapperTests.java index c19c21d54a569..0c176a0302620 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/TsidExtractingIdFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/TsidExtractingIdFieldMapperTests.java @@ -11,10 +11,10 @@ import org.apache.lucene.index.IndexableField; import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.cluster.routing.IndexRouting; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.inject.name.Named; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.ByteUtils; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.Nullable; import org.elasticsearch.index.IndexSettings; @@ -25,12 +25,14 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Base64; import java.util.List; import java.util.stream.Stream; import static org.hamcrest.Matchers.equalTo; public class TsidExtractingIdFieldMapperTests extends MetadataMapperTestCase { + private static class TestCase { private final String name; private final String expectedId; @@ -82,7 +84,7 @@ public static Iterable params() { items.add( new TestCase( "2022-01-01T01:00:00Z", - "XsFI2ajcFfi45iV3AAABfhMmioA", + "BwAAAKjcFfi45iV3AAABfhMmioA", "JJSLNivCxv3hDTQtWd6qGUwGlT_5e6_NYGOZWULpmMG9IAlZlA", "2022-01-01T01:00:00.000Z", b -> { @@ -94,7 +96,7 @@ public static Iterable params() { items.add( new TestCase( "2022-01-01T01:00:01Z", - "XsFI2ajcFfi45iV3AAABfhMmjmg", + "BwAAAKjcFfi45iV3AAABfhMmjmg", "JJSLNivCxv3hDTQtWd6qGUwGlT_5e6_NYGOZWULpmMG9IAlZlA", "2022-01-01T01:00:01.000Z", b -> { @@ -106,7 +108,7 @@ public static Iterable params() { items.add( new TestCase( "1970-01-01T00:00:00Z", - "XsFI2ajcFfi45iV3AAAAAAAAAAA", + "BwAAAKjcFfi45iV3AAAAAAAAAAA", "JJSLNivCxv3hDTQtWd6qGUwGlT_5e6_NYGOZWULpmMG9IAlZlA", "1970-01-01T00:00:00.000Z", b -> { @@ -118,7 +120,7 @@ public static Iterable params() { items.add( new TestCase( "-9998-01-01T00:00:00Z", - "XsFI2ajcFfi45iV3__6oggRgGAA", + "BwAAAKjcFfi45iV3__6oggRgGAA", "JJSLNivCxv3hDTQtWd6qGUwGlT_5e6_NYGOZWULpmMG9IAlZlA", "-9998-01-01T00:00:00.000Z", b -> { @@ -130,7 +132,7 @@ public static Iterable params() { items.add( new TestCase( "9998-01-01T00:00:00Z", - "XsFI2ajcFfi45iV3AADmaSK9hAA", + "BwAAAKjcFfi45iV3AADmaSK9hAA", "JJSLNivCxv3hDTQtWd6qGUwGlT_5e6_NYGOZWULpmMG9IAlZlA", "9998-01-01T00:00:00.000Z", b -> { @@ -144,7 +146,7 @@ public static Iterable params() { items.add( new TestCase( "r1", - "XsFI2ajcFfi45iV3AAABfhMmioA", + "BwAAAKjcFfi45iV3AAABfhMmioA", "JJSLNivCxv3hDTQtWd6qGUwGlT_5e6_NYGOZWULpmMG9IAlZlA", "2022-01-01T01:00:00.000Z", b -> { @@ -180,7 +182,7 @@ public static Iterable params() { items.add( new TestCase( "r2", - "1y-UzR0iuE1-sOQpAAABfhMmioA", + "BwAAAB0iuE1-sOQpAAABfhMmioA", "JNY_frTR9GmCbhXgK4Y8W44GlT_5e6_NYGOZWULpmMG9IAlZlA", "2022-01-01T01:00:00.000Z", b -> { @@ -192,7 +194,7 @@ public static Iterable params() { items.add( new TestCase( "o.r3", - "zh4dcS1h1gf2J5a8AAABfhMmioA", + "BwAAAC1h1gf2J5a8AAABfhMmioA", "JEyfZsJIp3UNyfWG-4SjKFIGlT_5e6_NYGOZWULpmMG9IAlZlA", "2022-01-01T01:00:00.000Z", b -> { @@ -209,7 +211,7 @@ public static Iterable params() { items.add( new TestCase( "k1=dog", - "XsFI2SrEiVgZlSsYAAABfhMmioA", + "BwAAACrEiVgZlSsYAAABfhMmioA", "KJQKpjU9U63jhh-eNJ1f8bipyU08BpU_-ZJxnTYtoe9Lsg-QvzL-qOY", "2022-01-01T01:00:00.000Z", b -> { @@ -222,7 +224,7 @@ public static Iterable params() { items.add( new TestCase( "k1=pumpkin", - "XsFI2W8GX8-0QcFxAAABfhMmioA", + "BwAAAG8GX8-0QcFxAAABfhMmioA", "KJQKpjU9U63jhh-eNJ1f8bibzw1JBpU_-VsHjSz5HC1yy_swPEM1iGo", "2022-01-01T01:00:00.000Z", b -> { @@ -235,7 +237,7 @@ public static Iterable params() { items.add( new TestCase( "k1=empty string", - "XsFI2cna58i6D-Q6AAABfhMmioA", + "BwAAAMna58i6D-Q6AAABfhMmioA", "KJQKpjU9U63jhh-eNJ1f8bhaCD7uBpU_-SWGG0Uv9tZ1mLO2gi9rC1I", "2022-01-01T01:00:00.000Z", b -> { @@ -248,7 +250,7 @@ public static Iterable params() { items.add( new TestCase( "k2", - "XsFI2VqlzAuv-06kAAABfhMmioA", + "BwAAAFqlzAuv-06kAAABfhMmioA", "KB9H-tGrL_UzqMcqXcgBtzypyU08BpU_-ZJxnTYtoe9Lsg-QvzL-qOY", "2022-01-01T01:00:00.000Z", b -> { @@ -261,7 +263,7 @@ public static Iterable params() { items.add( new TestCase( "o.k3", - "XsFI2S_VhridAKDUAAABfhMmioA", + "BwAAAC_VhridAKDUAAABfhMmioA", "KGXATwN7ISd1_EycFRJ9h6qpyU08BpU_-ZJxnTYtoe9Lsg-QvzL-qOY", "2022-01-01T01:00:00.000Z", b -> { @@ -274,7 +276,7 @@ public static Iterable params() { items.add( new TestCase( "o.r3", - "zh4dcUwfL7x__2oPAAABfhMmioA", + "BwAAAEwfL7x__2oPAAABfhMmioA", "KJaYZVZz8plfkEvvPBpi1EWpyU08BpU_-ZJxnTYtoe9Lsg-QvzL-qOY", "2022-01-01T01:00:00.000Z", b -> { @@ -305,7 +307,7 @@ public static Iterable params() { items.add( new TestCase( "L1=1", - "XsFI2fIe53BtV9PCAAABfhMmioA", + "BwAAAPIe53BtV9PCAAABfhMmioA", "KI4kVxcCLIMM2_VQGD575d-tm41vBpU_-TUExUU_bL3Puq_EBgIaLac", "2022-01-01T01:00:00.000Z", b -> { @@ -318,7 +320,7 @@ public static Iterable params() { items.add( new TestCase( "L1=min", - "XsFI2Qhu7hy1RoXRAAABfhMmioA", + "BwAAAAhu7hy1RoXRAAABfhMmioA", "KI4kVxcCLIMM2_VQGD575d8caJ3TBpU_-cLpg-VnCBnhYk33HZBle6E", "2022-01-01T01:00:00.000Z", b -> { @@ -331,7 +333,7 @@ public static Iterable params() { items.add( new TestCase( "L2=1234", - "XsFI2QTrNu7TTpc-AAABfhMmioA", + "BwAAAATrNu7TTpc-AAABfhMmioA", "KI_1WxF60L0IczG5ftUCWdndcGtgBpU_-QfM2BaR0DMagIfw3TDu_mA", "2022-01-01T01:00:00.000Z", b -> { @@ -344,7 +346,7 @@ public static Iterable params() { items.add( new TestCase( "o.L3=max", - "zh4dcWBQI6THHqxoAAABfhMmioA", + "BwAAAGBQI6THHqxoAAABfhMmioA", "KN4a6QzKhzc3nwzNLuZkV51xxTOVBpU_-erUU1qSW4eJ0kP0RmAB9TE", "2022-01-01T01:00:00.000Z", b -> { @@ -375,7 +377,7 @@ public static Iterable params() { items.add( new TestCase( "i1=1", - "XsFI2UMS_RWRoHYjAAABfhMmioA", + "BwAAAEMS_RWRoHYjAAABfhMmioA", "KLGFpvAV8QkWSmX54kXFMgitm41vBpU_-TUExUU_bL3Puq_EBgIaLac", "2022-01-01T01:00:00.000Z", b -> { @@ -388,7 +390,7 @@ public static Iterable params() { items.add( new TestCase( "i1=min", - "XsFI2adlQM5ILoA1AAABfhMmioA", + "BwAAAKdlQM5ILoA1AAABfhMmioA", "KLGFpvAV8QkWSmX54kXFMgjV8hFQBpU_-WG2MicRGWwJdBKWq2F4qy4", "2022-01-01T01:00:00.000Z", b -> { @@ -401,7 +403,7 @@ public static Iterable params() { items.add( new TestCase( "i2=1234", - "XsFI2bhxfB6J0kBFAAABfhMmioA", + "BwAAALhxfB6J0kBFAAABfhMmioA", "KJc4-5eN1uAlYuAknQQLUlxavn2sBpU_-UEXBjgaH1uYcbayrOhdgpc", "2022-01-01T01:00:00.000Z", b -> { @@ -414,7 +416,7 @@ public static Iterable params() { items.add( new TestCase( "o.i3=max", - "zh4dcelxKf19CbfdAAABfhMmioA", + "BwAAAOlxKf19CbfdAAABfhMmioA", "KKqnzPNBe8ObksSo8rNaIFPZPCcBBpU_-Rhd_U6Jn2pjQz2zpmBuJb4", "2022-01-01T01:00:00.000Z", b -> { @@ -445,7 +447,7 @@ public static Iterable params() { items.add( new TestCase( "s1=1", - "XsFI2Y_y-8kD_BFeAAABfhMmioA", + "BwAAAI_y-8kD_BFeAAABfhMmioA", "KFi_JDbvzWyAawmh8IEXedwGlT_5rZuNb-1ruHTTZhtsXRZpZRwWFoc", "2022-01-01T01:00:00.000Z", b -> { @@ -458,7 +460,7 @@ public static Iterable params() { items.add( new TestCase( "s1=min", - "XsFI2WV8VNVnmPVNAAABfhMmioA", + "BwAAAGV8VNVnmPVNAAABfhMmioA", "KFi_JDbvzWyAawmh8IEXedwGlT_5JgBZj9BSCms2_jgeFFhsmDlNFdM", "2022-01-01T01:00:00.000Z", b -> { @@ -471,7 +473,7 @@ public static Iterable params() { items.add( new TestCase( "s2=1234", - "XsFI2VO8mUr-J5CpAAABfhMmioA", + "BwAAAFO8mUr-J5CpAAABfhMmioA", "KKEQ2p3CkpMH61hNk_SuvI0GlT_53XBrYP5TPdmCR-vREPnt20e9f9w", "2022-01-01T01:00:00.000Z", b -> { @@ -484,7 +486,7 @@ public static Iterable params() { items.add( new TestCase( "o.s3=max", - "zh4dcQKh6K11zWeuAAABfhMmioA", + "BwAAAAKh6K11zWeuAAABfhMmioA", "KKVMoT_-GS95fvIBtR7XK9oGlT_5Dme9-H3sen0WZ7leJpCj7-vXau4", "2022-01-01T01:00:00.000Z", b -> { @@ -515,7 +517,7 @@ public static Iterable params() { items.add( new TestCase( "b1=1", - "XsFI2dKxqgT5JDQfAAABfhMmioA", + "BwAAANKxqgT5JDQfAAABfhMmioA", "KGPAUhTjWOsRfDmYp3SUELatm41vBpU_-TUExUU_bL3Puq_EBgIaLac", "2022-01-01T01:00:00.000Z", b -> { @@ -528,7 +530,7 @@ public static Iterable params() { items.add( new TestCase( "b1=min", - "XsFI2d_PD--DgUvoAAABfhMmioA", + "BwAAAN_PD--DgUvoAAABfhMmioA", "KGPAUhTjWOsRfDmYp3SUELYoK6qHBpU_-d8HkZFJ3aL2ZV1lgHAjT1g", "2022-01-01T01:00:00.000Z", b -> { @@ -541,7 +543,7 @@ public static Iterable params() { items.add( new TestCase( "b2=12", - "XsFI2aqX5QjiuhsEAAABfhMmioA", + "BwAAAKqX5QjiuhsEAAABfhMmioA", "KA58oUMzXeX1V5rh51Ste0K5K9vPBpU_-Wn8JQplO-x3CgoslYO5Vks", "2022-01-01T01:00:00.000Z", b -> { @@ -554,7 +556,7 @@ public static Iterable params() { items.add( new TestCase( "o.s3=max", - "zh4dccJ4YtN_21XHAAABfhMmioA", + "BwAAAMJ4YtN_21XHAAABfhMmioA", "KIwZH-StJBobjk9tCV-0OgjKmuwGBpU_-Sd-SdnoH3sbfKLgse-briE", "2022-01-01T01:00:00.000Z", b -> { @@ -585,7 +587,7 @@ public static Iterable params() { items.add( new TestCase( "ip1=192.168.0.1", - "XsFI2T5km9raIz_rAAABfhMmioA", + "BwAAAD5km9raIz_rAAABfhMmioA", "KNj6cLPRNEkqdjfOPIbg0wULrOlWBpU_-efWDsz6B6AnnwbZ7GeeocE", "2022-01-01T01:00:00.000Z", b -> { @@ -602,7 +604,7 @@ public static Iterable params() { items.add( new TestCase( "ip1=12.12.45.254", - "XsFI2QWfEH_e_6wIAAABfhMmioA", + "BwAAAAWfEH_e_6wIAAABfhMmioA", "KNj6cLPRNEkqdjfOPIbg0wVhJ08TBpU_-bANzLhvKPczlle7Pq0z8Qw", "2022-01-01T01:00:00.000Z", b -> { @@ -619,7 +621,7 @@ public static Iterable params() { items.add( new TestCase( "ip2=FE80:CD00:0000:0CDE:1257:0000:211E:729C", - "XsFI2WrrLHr1O4iQAAABfhMmioA", + "BwAAAGrrLHr1O4iQAAABfhMmioA", "KNDo3zGxO9HfN9XYJwKw2Z20h-WsBpU_-f4dSOLGSRlL1hoY2mgERuo", "2022-01-01T01:00:00.000Z", b -> { @@ -632,7 +634,7 @@ public static Iterable params() { items.add( new TestCase( "o.ip3=2001:db8:85a3:8d3:1319:8a2e:370:7348", - "zh4dca7d-9aKOS1MAAABfhMmioA", + "BwAAAK7d-9aKOS1MAAABfhMmioA", "KLXDcBBWJAjgJvjSdF_EJwraAQUzBpU_-ba6HZsIyKnGcbmc3KRLlmI", "2022-01-01T01:00:00.000Z", b -> { @@ -663,7 +665,7 @@ public static Iterable params() { items.add( new TestCase( "huge", - "WZKJR_dECvXBSl3xAAABfhMmioA", + "BwAAAPdECvXBSl3xAAABfhMmioA", "LIe18i0rRU_Bt9vB82F46LaS9mrUkvZq1K_2Gi7UEFMhFwNXrLA_H8TLpUr4", "2022-01-01T01:00:00.000Z", b -> { @@ -680,66 +682,50 @@ public static Iterable params() { private final TestCase testCase; + private static final int ROUTING_HASH = 7; + public TsidExtractingIdFieldMapperTests(@Named("testCase") TestCase testCase) { this.testCase = testCase; } public void testExpectedId() throws IOException { - assertThat(parse(null, mapperService(), testCase.source).id(), equalTo(testCase.expectedId)); + assertThat(parse(mapperService(), testCase.source).id(), equalTo(testCase.expectedId)); } public void testProvideExpectedId() throws IOException { assertThat(parse(testCase.expectedId, mapperService(), testCase.source).id(), equalTo(testCase.expectedId)); } - public void testProvideWrongId() { - String wrongId = testCase.expectedId + "wrong"; - Exception e = expectThrows(DocumentParsingException.class, () -> parse(wrongId, mapperService(), testCase.source)); - assertThat( - e.getCause().getMessage(), - equalTo( - "_id must be unset or set to [" - + testCase.expectedId - + "] but was [" - + testCase.expectedId - + "wrong] because [index] is in time_series mode" - ) - ); - } - public void testEquivalentSources() throws IOException { MapperService mapperService = mapperService(); for (CheckedConsumer equivalent : testCase.equivalentSources) { - assertThat(parse(null, mapperService, equivalent).id(), equalTo(testCase.expectedId)); + assertThat(parse(mapperService, equivalent).id(), equalTo(testCase.expectedId)); } } + private ParsedDocument parse(MapperService mapperService, CheckedConsumer source) throws IOException { + return parse(null, mapperService, source); + } + private ParsedDocument parse(@Nullable String id, MapperService mapperService, CheckedConsumer source) throws IOException { try (XContentBuilder builder = XContentBuilder.builder(randomFrom(XContentType.values()).xContent())) { builder.startObject(); source.accept(builder); builder.endObject(); - SourceToParse sourceToParse = new SourceToParse(id, BytesReference.bytes(builder), builder.contentType()); + SourceToParse sourceToParse = new SourceToParse( + id, + BytesReference.bytes(builder), + builder.contentType(), + TimeSeriesRoutingHashFieldMapper.encode(ROUTING_HASH) + ); return mapperService.documentParser().parseDocument(sourceToParse, mapperService.mappingLookup()); } } public void testRoutingPathCompliant() throws IOException { - IndexVersion version = IndexVersionUtils.randomCompatibleVersion(random()); - IndexRouting indexRouting = createIndexSettings(version, indexSettings(version)).getIndexRouting(); - int indexShard = indexShard(indexRouting); - assertThat(indexRouting.getShard(testCase.expectedId, null), equalTo(indexShard)); - assertThat(indexRouting.deleteShard(testCase.expectedId, null), equalTo(indexShard)); - } - - private int indexShard(IndexRouting indexRouting) throws IOException { - try (XContentBuilder builder = XContentBuilder.builder(randomFrom(XContentType.values()).xContent())) { - builder.startObject(); - testCase.source.accept(builder); - builder.endObject(); - return indexRouting.indexShard(null, null, builder.contentType(), BytesReference.bytes(builder)); - } + byte[] bytes = Base64.getUrlDecoder().decode(testCase.expectedId); + assertEquals(ROUTING_HASH, ByteUtils.readIntLE(bytes, 0)); } private Settings indexSettings(IndexVersion version) { @@ -800,7 +786,7 @@ protected void registerParameters(ParameterChecker checker) throws IOException { public void testSourceDescription() throws IOException { assertThat(TsidExtractingIdFieldMapper.INSTANCE.documentDescription(documentParserContext()), equalTo("a time series document")); - ParsedDocument d = parse(null, mapperService(), testCase.randomSource()); + ParsedDocument d = parse(mapperService(), testCase.randomSource()); IndexableField timestamp = d.rootDoc().getField(DataStreamTimestampFieldMapper.DEFAULT_PATH); assertThat( TsidExtractingIdFieldMapper.INSTANCE.documentDescription(documentParserContext(timestamp)), @@ -830,7 +816,7 @@ private TestDocumentParserContext documentParserContext(IndexableField... fields public void testParsedDescription() throws IOException { assertThat( - TsidExtractingIdFieldMapper.INSTANCE.documentDescription(parse(null, mapperService(), testCase.randomSource())), + TsidExtractingIdFieldMapper.INSTANCE.documentDescription(parse(mapperService(), testCase.randomSource())), equalTo("[" + testCase.expectedId + "][" + testCase.expectedTsid + "@" + testCase.expectedTimestamp + "]") ); } diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java index 1648e38a3f0b9..cade1e66c7fc7 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java @@ -30,6 +30,7 @@ import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.mapper.TextFieldMapper; import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; +import org.elasticsearch.index.mapper.TimeSeriesRoutingHashFieldMapper; import org.elasticsearch.index.mapper.VersionFieldMapper; import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.test.ESTestCase; @@ -80,6 +81,7 @@ public Map getMetadataMappers() { IdFieldMapper.NAME, RoutingFieldMapper.NAME, TimeSeriesIdFieldMapper.NAME, + TimeSeriesRoutingHashFieldMapper.NAME, IndexFieldMapper.NAME, SourceFieldMapper.NAME, NestedPathFieldMapper.NAME, diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java index 82fb694db6c66..86c111d1c7145 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java @@ -56,6 +56,7 @@ import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperServiceTestCase; import org.elasticsearch.index.mapper.SourceToParse; +import org.elasticsearch.index.mapper.TimeSeriesRoutingHashFieldMapper; import org.elasticsearch.index.seqno.ReplicationTracker; import org.elasticsearch.index.seqno.RetentionLease; import org.elasticsearch.index.seqno.RetentionLeases; @@ -521,7 +522,7 @@ public Engine.Index createIndexOp(int docIdent) { { "@timestamp": %s, "dim": "dim" - }""", docIdent)), XContentType.JSON); + }""", docIdent)), XContentType.JSON, TimeSeriesRoutingHashFieldMapper.DUMMY_ENCODED_VALUE); return IndexShard.prepareIndex( mapper, source, diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/TranslogHandler.java b/test/framework/src/main/java/org/elasticsearch/index/engine/TranslogHandler.java index 423990999fabd..d6e33c43e94c5 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/TranslogHandler.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/TranslogHandler.java @@ -22,12 +22,10 @@ import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.indices.IndicesModule; -import org.elasticsearch.plugins.internal.DocumentSizeObserver; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentParserConfiguration; import java.io.IOException; -import java.util.Map; import java.util.concurrent.atomic.AtomicLong; import static java.util.Collections.emptyList; @@ -89,14 +87,7 @@ public Engine.Operation convertToEngineOp(Translog.Operation operation, Engine.O final Translog.Index index = (Translog.Index) operation; final Engine.Index engineIndex = IndexShard.prepareIndex( mapperService, - new SourceToParse( - index.id(), - index.source(), - XContentHelper.xContentType(index.source()), - index.routing(), - Map.of(), - DocumentSizeObserver.EMPTY_INSTANCE - ), + new SourceToParse(index.id(), index.source(), XContentHelper.xContentType(index.source()), index.routing()), index.seqNo(), index.primaryTerm(), index.version(), diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java index c393042f07413..09c6eed08bf28 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java @@ -714,18 +714,7 @@ protected final String syntheticSource(DocumentMapper mapper, CheckedConsumer Date: Wed, 13 Mar 2024 08:40:16 -0700 Subject: [PATCH 179/248] [DOCS] `time_series_dimension` fields do not support `ignore_above` (#106203) * [DOCS] `time_series_dimension` fields do not support `ignore_above` There is existing validation for this combination of parameters but it was not documented. Closes #99044 * Remove maximum size constraint * Add reasoning for constraints --- docs/reference/mapping/types/keyword.asciidoc | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/reference/mapping/types/keyword.asciidoc b/docs/reference/mapping/types/keyword.asciidoc index 8efdffe75d4fa..a6f41a38f559c 100644 --- a/docs/reference/mapping/types/keyword.asciidoc +++ b/docs/reference/mapping/types/keyword.asciidoc @@ -165,8 +165,9 @@ Dimension fields have the following constraints: * The `doc_values` and `index` mapping parameters must be `true`. * Field values cannot be an <>. // end::dimension[] -* Field values cannot be larger than 1024 bytes. -* The field cannot use a <>. +* Dimension values are used to identify a document’s time series. If dimension values are altered in any way during indexing, the document will be stored as belonging to different from intended time series. As a result there are additional constraints: +** <> mapping parameter isn't supported. +** The field cannot use a <>. -- [[keyword-synthetic-source]] From 25e31b91b4c1fb9ad5e649381f82984a47117ec7 Mon Sep 17 00:00:00 2001 From: Matteo Piergiovanni <134913285+piergm@users.noreply.github.com> Date: Wed, 13 Mar 2024 16:51:04 +0100 Subject: [PATCH 180/248] muted test RestSQLIt >> testCompressCursor (#106295) --- .../org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java b/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java index ca9532d8dc7d0..c24a41ce9e2f0 100644 --- a/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java +++ b/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java @@ -1622,6 +1622,7 @@ public void testAsyncTextPaginated() throws IOException, InterruptedException { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch-serverless/issues/1501") public void testCompressCursor() throws IOException { String doc = IntStream.range(0, 1000) .mapToObj(i -> String.format(Locale.ROOT, "\"field%d\": %d", i, i)) From ac4e2f43b7ff957b78b7faf598dd378f7073ed2b Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Wed, 13 Mar 2024 17:03:13 +0100 Subject: [PATCH 181/248] Small time series agg improvement (#106288) After tsid hashing was introduced (#98023), the time series aggregator generates the tsid (from all dimension fields) instead of using the value from the _tsid field directly. This generation of the tsid happens for every time serie, parent bucket and segment combination. This changes alters that by only generating the tsid once per time serie and segment. This is done by just locally recording the current tsid. --- docs/changelog/106288.yaml | 5 +++++ .../bucket/timeseries/TimeSeriesAggregator.java | 15 ++++++++++----- 2 files changed, 15 insertions(+), 5 deletions(-) create mode 100644 docs/changelog/106288.yaml diff --git a/docs/changelog/106288.yaml b/docs/changelog/106288.yaml new file mode 100644 index 0000000000000..0f14e53c237a1 --- /dev/null +++ b/docs/changelog/106288.yaml @@ -0,0 +1,5 @@ +pr: 106288 +summary: Small time series agg improvement +area: TSDB +type: enhancement +issues: [] diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregator.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregator.java index 9cd7f7a86e532..255a78408eb6d 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregator.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregator.java @@ -144,6 +144,7 @@ protected LeafBucketCollector getLeafCollector(AggregationExecutionContext aggCt long currentTsidOrd = -1; long currentBucket = -1; long currentBucketOrdinal; + BytesRef currentTsid; @Override public void collect(int doc, long bucket) throws IOException { @@ -157,12 +158,16 @@ public void collect(int doc, long bucket) throws IOException { return; } - TimeSeriesIdFieldMapper.TimeSeriesIdBuilder tsidBuilder = new TimeSeriesIdFieldMapper.TimeSeriesIdBuilder(null); - for (TsidConsumer consumer : dimensionConsumers.values()) { - consumer.accept(doc, tsidBuilder); + BytesRef tsid; + if (currentTsidOrd == aggCtx.getTsidHashOrd()) { + tsid = currentTsid; + } else { + TimeSeriesIdFieldMapper.TimeSeriesIdBuilder tsidBuilder = new TimeSeriesIdFieldMapper.TimeSeriesIdBuilder(null); + for (TsidConsumer consumer : dimensionConsumers.values()) { + consumer.accept(doc, tsidBuilder); + } + currentTsid = tsid = tsidBuilder.buildLegacyTsid().toBytesRef(); } - - BytesRef tsid = tsidBuilder.buildLegacyTsid().toBytesRef(); long bucketOrdinal = bucketOrds.add(bucket, tsid); if (bucketOrdinal < 0) { // already seen bucketOrdinal = -1 - bucketOrdinal; From e6d1c905015a3691c408e80a8b31c2fe0181593c Mon Sep 17 00:00:00 2001 From: Fang Xing <155562079+fang-xing-esql@users.noreply.github.com> Date: Wed, 13 Mar 2024 12:04:12 -0400 Subject: [PATCH 182/248] [ES|QL] Add mv_sort (#106095) * add mv_sort --- .../esql/functions/mv-functions.asciidoc | 2 + .../reference/esql/functions/mv_sort.asciidoc | 35 +++ .../esql/functions/signature/mv_sort.svg | 1 + .../esql/functions/types/mv_sort.asciidoc | 13 + .../operator/MultivalueDedupeBytesRef.java | 46 +++- .../operator/MultivalueDedupeDouble.java | 46 +++- .../compute/operator/MultivalueDedupeInt.java | 46 +++- .../operator/MultivalueDedupeLong.java | 46 +++- .../operator/MultivalueDedupeBoolean.java | 47 ++++ .../operator/X-MultivalueDedupe.java.st | 68 +++-- .../src/main/resources/boolean.csv-spec | 23 ++ .../src/main/resources/date.csv-spec | 10 + .../src/main/resources/floats.csv-spec | 27 ++ .../src/main/resources/ints.csv-spec | 53 ++++ .../src/main/resources/ip.csv-spec | 16 ++ .../src/main/resources/show.csv-spec | 4 +- .../src/main/resources/string.csv-spec | 23 ++ .../function/EsqlFunctionRegistry.java | 2 + .../function/scalar/multivalue/MvSort.java | 244 ++++++++++++++++++ .../xpack/esql/io/stream/PlanNamedTypes.java | 14 + .../function/AbstractFunctionTestCase.java | 4 +- .../scalar/multivalue/MvSortTests.java | 215 +++++++++++++++ 22 files changed, 938 insertions(+), 47 deletions(-) create mode 100644 docs/reference/esql/functions/mv_sort.asciidoc create mode 100644 docs/reference/esql/functions/signature/mv_sort.svg create mode 100644 docs/reference/esql/functions/types/mv_sort.asciidoc create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSort.java create mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSortTests.java diff --git a/docs/reference/esql/functions/mv-functions.asciidoc b/docs/reference/esql/functions/mv-functions.asciidoc index 07d89e7879e67..f5ffe9a0d757c 100644 --- a/docs/reference/esql/functions/mv-functions.asciidoc +++ b/docs/reference/esql/functions/mv-functions.asciidoc @@ -17,6 +17,7 @@ * <> * <> * <> +* <> * <> * <> * <> @@ -31,6 +32,7 @@ include::mv_last.asciidoc[] include::mv_max.asciidoc[] include::mv_median.asciidoc[] include::mv_min.asciidoc[] +include::mv_sort.asciidoc[] include::mv_slice.asciidoc[] include::mv_sum.asciidoc[] include::mv_zip.asciidoc[] diff --git a/docs/reference/esql/functions/mv_sort.asciidoc b/docs/reference/esql/functions/mv_sort.asciidoc new file mode 100644 index 0000000000000..abe09989fbac5 --- /dev/null +++ b/docs/reference/esql/functions/mv_sort.asciidoc @@ -0,0 +1,35 @@ +[discrete] +[[esql-mv_sort]] +=== `MV_SORT` + +*Syntax* + +[.text-center] +image::esql/functions/signature/mv_sort.svg[Embedded,opts=inline] + +*Parameters* + +`field`:: +Multivalue expression. If `null`, the function returns `null`. + +`order`:: +Sort order. The valid options are ASC and DESC, the default is ASC. + +*Description* + +Sorts a multivalue expression in lexicographical order. + +*Supported types* + +include::types/mv_sort.asciidoc[] + +*Example* + +[source.merge.styled,esql] +---- +include::{esql-specs}/ints.csv-spec[tag=mv_sort] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/ints.csv-spec[tag=mv_sort-result] +|=== diff --git a/docs/reference/esql/functions/signature/mv_sort.svg b/docs/reference/esql/functions/signature/mv_sort.svg new file mode 100644 index 0000000000000..1c877ca51612e --- /dev/null +++ b/docs/reference/esql/functions/signature/mv_sort.svg @@ -0,0 +1 @@ +MV_SORT(field,order) \ No newline at end of file diff --git a/docs/reference/esql/functions/types/mv_sort.asciidoc b/docs/reference/esql/functions/types/mv_sort.asciidoc new file mode 100644 index 0000000000000..01416cdd71ae6 --- /dev/null +++ b/docs/reference/esql/functions/types/mv_sort.asciidoc @@ -0,0 +1,13 @@ +[%header.monospaced.styled,format=dsv,separator=|] +|=== +field | order | result +boolean | keyword | boolean +datetime | keyword | datetime +double | keyword | double +integer | keyword | integer +ip | keyword | ip +keyword | keyword | keyword +long | keyword | long +text | keyword | text +version | keyword | version +|=== diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeBytesRef.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeBytesRef.java index 89388cd9cc109..422f0bd65a28e 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeBytesRef.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeBytesRef.java @@ -79,7 +79,7 @@ public BytesRefBlock dedupeToBlockAdaptive(BlockFactory blockFactory) { writeUniquedWork(builder); } else { copyAndSort(first, count); - writeSortedWork(builder); + deduplicatedSortedWork(builder); } } } @@ -108,7 +108,7 @@ public BytesRefBlock dedupeToBlockUsingCopyAndSort(BlockFactory blockFactory) { case 1 -> builder.appendBytesRef(block.getBytesRef(first, work[0])); default -> { copyAndSort(first, count); - writeSortedWork(builder); + deduplicatedSortedWork(builder); } } } @@ -146,6 +146,27 @@ public BytesRefBlock dedupeToBlockUsingCopyMissing(BlockFactory blockFactory) { } } + /** + * Sort values from each position and write the results to a {@link Block}. + */ + public BytesRefBlock sortToBlock(BlockFactory blockFactory, boolean ascending) { + try (BytesRefBlock.Builder builder = blockFactory.newBytesRefBlockBuilder(block.getPositionCount())) { + for (int p = 0; p < block.getPositionCount(); p++) { + int count = block.getValueCount(p); + int first = block.getFirstValueIndex(p); + switch (count) { + case 0 -> builder.appendNull(); + case 1 -> builder.appendBytesRef(block.getBytesRef(first, work[0])); + default -> { + copyAndSort(first, count); + writeSortedWork(builder, ascending); + } + } + } + return builder.build(); + } + } + /** * Dedupe values and build a {@link IntBlock} suitable for passing * as the grouping block to a {@link GroupingAggregatorFunction}. @@ -300,11 +321,7 @@ private void writeUniquedWork(BytesRefBlock.Builder builder) { /** * Writes a sorted {@link #work} to a {@link BytesRefBlock.Builder}, skipping duplicates. */ - private void writeSortedWork(BytesRefBlock.Builder builder) { - if (w == 1) { - builder.appendBytesRef(work[0]); - return; - } + private void deduplicatedSortedWork(BytesRefBlock.Builder builder) { builder.beginPositionEntry(); BytesRef prev = work[0]; builder.appendBytesRef(prev); @@ -317,6 +334,21 @@ private void writeSortedWork(BytesRefBlock.Builder builder) { builder.endPositionEntry(); } + /** + * Writes a {@link #work} to a {@link BytesRefBlock.Builder}. + */ + private void writeSortedWork(BytesRefBlock.Builder builder, boolean ascending) { + builder.beginPositionEntry(); + for (int i = 0; i < w; i++) { + if (ascending) { + builder.appendBytesRef(work[i]); + } else { + builder.appendBytesRef(work[w - i - 1]); + } + } + builder.endPositionEntry(); + } + /** * Writes an already deduplicated {@link #work} to a hash. */ diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeDouble.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeDouble.java index 6066dbe8a74e0..49e515c62f13a 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeDouble.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeDouble.java @@ -76,7 +76,7 @@ public DoubleBlock dedupeToBlockAdaptive(BlockFactory blockFactory) { writeUniquedWork(builder); } else { copyAndSort(first, count); - writeSortedWork(builder); + deduplicatedSortedWork(builder); } } } @@ -105,7 +105,7 @@ public DoubleBlock dedupeToBlockUsingCopyAndSort(BlockFactory blockFactory) { case 1 -> builder.appendDouble(block.getDouble(first)); default -> { copyAndSort(first, count); - writeSortedWork(builder); + deduplicatedSortedWork(builder); } } } @@ -143,6 +143,27 @@ public DoubleBlock dedupeToBlockUsingCopyMissing(BlockFactory blockFactory) { } } + /** + * Sort values from each position and write the results to a {@link Block}. + */ + public DoubleBlock sortToBlock(BlockFactory blockFactory, boolean ascending) { + try (DoubleBlock.Builder builder = blockFactory.newDoubleBlockBuilder(block.getPositionCount())) { + for (int p = 0; p < block.getPositionCount(); p++) { + int count = block.getValueCount(p); + int first = block.getFirstValueIndex(p); + switch (count) { + case 0 -> builder.appendNull(); + case 1 -> builder.appendDouble(block.getDouble(first)); + default -> { + copyAndSort(first, count); + writeSortedWork(builder, ascending); + } + } + } + return builder.build(); + } + } + /** * Dedupe values and build a {@link IntBlock} suitable for passing * as the grouping block to a {@link GroupingAggregatorFunction}. @@ -289,11 +310,7 @@ private void writeUniquedWork(DoubleBlock.Builder builder) { /** * Writes a sorted {@link #work} to a {@link DoubleBlock.Builder}, skipping duplicates. */ - private void writeSortedWork(DoubleBlock.Builder builder) { - if (w == 1) { - builder.appendDouble(work[0]); - return; - } + private void deduplicatedSortedWork(DoubleBlock.Builder builder) { builder.beginPositionEntry(); double prev = work[0]; builder.appendDouble(prev); @@ -306,6 +323,21 @@ private void writeSortedWork(DoubleBlock.Builder builder) { builder.endPositionEntry(); } + /** + * Writes a {@link #work} to a {@link DoubleBlock.Builder}. + */ + private void writeSortedWork(DoubleBlock.Builder builder, boolean ascending) { + builder.beginPositionEntry(); + for (int i = 0; i < w; i++) { + if (ascending) { + builder.appendDouble(work[i]); + } else { + builder.appendDouble(work[w - i - 1]); + } + } + builder.endPositionEntry(); + } + /** * Writes an already deduplicated {@link #work} to a hash. */ diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeInt.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeInt.java index 3961208d5e46f..086ec2b5ca4b7 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeInt.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeInt.java @@ -76,7 +76,7 @@ public IntBlock dedupeToBlockAdaptive(BlockFactory blockFactory) { writeUniquedWork(builder); } else { copyAndSort(first, count); - writeSortedWork(builder); + deduplicatedSortedWork(builder); } } } @@ -105,7 +105,7 @@ public IntBlock dedupeToBlockUsingCopyAndSort(BlockFactory blockFactory) { case 1 -> builder.appendInt(block.getInt(first)); default -> { copyAndSort(first, count); - writeSortedWork(builder); + deduplicatedSortedWork(builder); } } } @@ -143,6 +143,27 @@ public IntBlock dedupeToBlockUsingCopyMissing(BlockFactory blockFactory) { } } + /** + * Sort values from each position and write the results to a {@link Block}. + */ + public IntBlock sortToBlock(BlockFactory blockFactory, boolean ascending) { + try (IntBlock.Builder builder = blockFactory.newIntBlockBuilder(block.getPositionCount())) { + for (int p = 0; p < block.getPositionCount(); p++) { + int count = block.getValueCount(p); + int first = block.getFirstValueIndex(p); + switch (count) { + case 0 -> builder.appendNull(); + case 1 -> builder.appendInt(block.getInt(first)); + default -> { + copyAndSort(first, count); + writeSortedWork(builder, ascending); + } + } + } + return builder.build(); + } + } + /** * Dedupe values and build a {@link IntBlock} suitable for passing * as the grouping block to a {@link GroupingAggregatorFunction}. @@ -289,11 +310,7 @@ private void writeUniquedWork(IntBlock.Builder builder) { /** * Writes a sorted {@link #work} to a {@link IntBlock.Builder}, skipping duplicates. */ - private void writeSortedWork(IntBlock.Builder builder) { - if (w == 1) { - builder.appendInt(work[0]); - return; - } + private void deduplicatedSortedWork(IntBlock.Builder builder) { builder.beginPositionEntry(); int prev = work[0]; builder.appendInt(prev); @@ -306,6 +323,21 @@ private void writeSortedWork(IntBlock.Builder builder) { builder.endPositionEntry(); } + /** + * Writes a {@link #work} to a {@link IntBlock.Builder}. + */ + private void writeSortedWork(IntBlock.Builder builder, boolean ascending) { + builder.beginPositionEntry(); + for (int i = 0; i < w; i++) { + if (ascending) { + builder.appendInt(work[i]); + } else { + builder.appendInt(work[w - i - 1]); + } + } + builder.endPositionEntry(); + } + /** * Writes an already deduplicated {@link #work} to a hash. */ diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeLong.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeLong.java index a3012ffa551b2..0eefb1b7e96a2 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeLong.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeLong.java @@ -77,7 +77,7 @@ public LongBlock dedupeToBlockAdaptive(BlockFactory blockFactory) { writeUniquedWork(builder); } else { copyAndSort(first, count); - writeSortedWork(builder); + deduplicatedSortedWork(builder); } } } @@ -106,7 +106,7 @@ public LongBlock dedupeToBlockUsingCopyAndSort(BlockFactory blockFactory) { case 1 -> builder.appendLong(block.getLong(first)); default -> { copyAndSort(first, count); - writeSortedWork(builder); + deduplicatedSortedWork(builder); } } } @@ -144,6 +144,27 @@ public LongBlock dedupeToBlockUsingCopyMissing(BlockFactory blockFactory) { } } + /** + * Sort values from each position and write the results to a {@link Block}. + */ + public LongBlock sortToBlock(BlockFactory blockFactory, boolean ascending) { + try (LongBlock.Builder builder = blockFactory.newLongBlockBuilder(block.getPositionCount())) { + for (int p = 0; p < block.getPositionCount(); p++) { + int count = block.getValueCount(p); + int first = block.getFirstValueIndex(p); + switch (count) { + case 0 -> builder.appendNull(); + case 1 -> builder.appendLong(block.getLong(first)); + default -> { + copyAndSort(first, count); + writeSortedWork(builder, ascending); + } + } + } + return builder.build(); + } + } + /** * Dedupe values and build a {@link IntBlock} suitable for passing * as the grouping block to a {@link GroupingAggregatorFunction}. @@ -290,11 +311,7 @@ private void writeUniquedWork(LongBlock.Builder builder) { /** * Writes a sorted {@link #work} to a {@link LongBlock.Builder}, skipping duplicates. */ - private void writeSortedWork(LongBlock.Builder builder) { - if (w == 1) { - builder.appendLong(work[0]); - return; - } + private void deduplicatedSortedWork(LongBlock.Builder builder) { builder.beginPositionEntry(); long prev = work[0]; builder.appendLong(prev); @@ -307,6 +324,21 @@ private void writeSortedWork(LongBlock.Builder builder) { builder.endPositionEntry(); } + /** + * Writes a {@link #work} to a {@link LongBlock.Builder}. + */ + private void writeSortedWork(LongBlock.Builder builder, boolean ascending) { + builder.beginPositionEntry(); + for (int i = 0; i < w; i++) { + if (ascending) { + builder.appendLong(work[i]); + } else { + builder.appendLong(work[w - i - 1]); + } + } + builder.endPositionEntry(); + } + /** * Writes an already deduplicated {@link #work} to a hash. */ diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/MultivalueDedupeBoolean.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/MultivalueDedupeBoolean.java index d6a908306e2f4..8922b03328cb0 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/MultivalueDedupeBoolean.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/MultivalueDedupeBoolean.java @@ -8,6 +8,7 @@ package org.elasticsearch.compute.operator; import org.elasticsearch.compute.aggregation.GroupingAggregatorFunction; +import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BooleanBlock; import org.elasticsearch.compute.data.IntBlock; @@ -63,6 +64,35 @@ public BooleanBlock dedupeToBlock(BlockFactory blockFactory) { } } + /** + * Sort values from each position and write the results to a {@link Block}. + */ + public BooleanBlock sortToBlock(BlockFactory blockFactory, boolean ascending) { + try (BooleanBlock.Builder builder = blockFactory.newBooleanBlockBuilder(block.getPositionCount())) { + for (int p = 0; p < block.getPositionCount(); p++) { + int totalCount = block.getValueCount(p); + int first = block.getFirstValueIndex(p); + switch (totalCount) { + case 0 -> builder.appendNull(); + case 1 -> builder.appendBoolean(block.getBoolean(first)); + default -> { + int trueCount = countTrue(first, totalCount); + builder.beginPositionEntry(); + if (ascending) { + writeValues(builder, false, 1, totalCount - trueCount); + writeValues(builder, true, totalCount - trueCount + 1, totalCount); + } else { + writeValues(builder, true, 1, trueCount); + writeValues(builder, false, trueCount + 1, totalCount); + } + builder.endPositionEntry(); + } + } + } + return builder.build(); + } + } + /** * Dedupe values and build a {@link LongBlock} suitable for passing * as the grouping block to a {@link GroupingAggregatorFunction}. @@ -198,4 +228,21 @@ public static int hashOrd(boolean[] everSeen, boolean b) { everSeen[FALSE_ORD] = true; return FALSE_ORD; } + + private int countTrue(int first, int count) { + int trueCount = 0; + int end = first + count; + for (int i = first; i < end; i++) { + if (block.getBoolean(i)) { + trueCount++; + } + } + return trueCount; + } + + private void writeValues(BooleanBlock.Builder builder, boolean value, int startIndex, int endIndex) { + for (int i = startIndex; i <= endIndex; i++) { + builder.appendBoolean(value); + } + } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/X-MultivalueDedupe.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/X-MultivalueDedupe.java.st index d55f1c4cb43ec..f1086489cc07a 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/X-MultivalueDedupe.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/X-MultivalueDedupe.java.st @@ -77,11 +77,11 @@ $endif$ int first = block.getFirstValueIndex(p); switch (count) { case 0 -> builder.appendNull(); - $if(BytesRef)$ +$if(BytesRef)$ case 1 -> builder.appendBytesRef(block.getBytesRef(first, work[0])); - $else$ +$else$ case 1 -> builder.append$Type$(block.get$Type$(first)); - $endif$ +$endif$ default -> { /* * It's better to copyMissing when there are few unique values @@ -105,7 +105,7 @@ $endif$ writeUniquedWork(builder); } else { copyAndSort(first, count); - writeSortedWork(builder); + deduplicatedSortedWork(builder); } } } @@ -131,14 +131,14 @@ $endif$ int first = block.getFirstValueIndex(p); switch (count) { case 0 -> builder.appendNull(); - $if(BytesRef)$ +$if(BytesRef)$ case 1 -> builder.appendBytesRef(block.getBytesRef(first, work[0])); - $else$ +$else$ case 1 -> builder.append$Type$(block.get$Type$(first)); - $endif$ +$endif$ default -> { copyAndSort(first, count); - writeSortedWork(builder); + deduplicatedSortedWork(builder); } } } @@ -165,11 +165,11 @@ $endif$ int first = block.getFirstValueIndex(p); switch (count) { case 0 -> builder.appendNull(); - $if(BytesRef)$ +$if(BytesRef)$ case 1 -> builder.appendBytesRef(block.getBytesRef(first, work[0])); - $else$ +$else$ case 1 -> builder.append$Type$(block.get$Type$(first)); - $endif$ +$endif$ default -> { copyMissing(first, count); writeUniquedWork(builder); @@ -180,6 +180,31 @@ $endif$ } } + /** + * Sort values from each position and write the results to a {@link Block}. + */ + public $Type$Block sortToBlock(BlockFactory blockFactory, boolean ascending) { + try ($Type$Block.Builder builder = blockFactory.new$Type$BlockBuilder(block.getPositionCount())) { + for (int p = 0; p < block.getPositionCount(); p++) { + int count = block.getValueCount(p); + int first = block.getFirstValueIndex(p); + switch (count) { + case 0 -> builder.appendNull(); +$if(BytesRef)$ + case 1 -> builder.appendBytesRef(block.getBytesRef(first, work[0])); +$else$ + case 1 -> builder.append$Type$(block.get$Type$(first)); +$endif$ + default -> { + copyAndSort(first, count); + writeSortedWork(builder, ascending); + } + } + } + return builder.build(); + } + } + /** * Dedupe values and build a {@link IntBlock} suitable for passing * as the grouping block to a {@link GroupingAggregatorFunction}. @@ -373,11 +398,7 @@ $endif$ /** * Writes a sorted {@link #work} to a {@link $Type$Block.Builder}, skipping duplicates. */ - private void writeSortedWork($Type$Block.Builder builder) { - if (w == 1) { - builder.append$Type$(work[0]); - return; - } + private void deduplicatedSortedWork($Type$Block.Builder builder) { builder.beginPositionEntry(); $type$ prev = work[0]; builder.append$Type$(prev); @@ -394,6 +415,21 @@ $endif$ builder.endPositionEntry(); } + /** + * Writes a {@link #work} to a {@link $Type$Block.Builder}. + */ + private void writeSortedWork($Type$Block.Builder builder, boolean ascending) { + builder.beginPositionEntry(); + for (int i = 0; i < w; i++) { + if (ascending) { + builder.append$Type$(work[i]); + } else { + builder.append$Type$(work[w - i - 1]); + } + } + builder.endPositionEntry(); + } + /** * Writes an already deduplicated {@link #work} to a hash. */ diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/boolean.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/boolean.csv-spec index bda103080adc0..a7a110b5778ef 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/boolean.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/boolean.csv-spec @@ -233,6 +233,29 @@ emp_no:integer |languages:integer |byte2bool:boolean |short2bool:boolean 10030 |3 |true |true ; +mvSort#[skip:-8.13.99, reason:newly added in 8.14] +row a = [true, false, true, false] | eval sa = mv_sort(a), sb = mv_sort(a, "DESC"); + +a:boolean | sa:boolean | sb:boolean +[true, false, true, false] | [false, false, true, true] | [true, true, false, false] +; + +mvSortEmp#[skip:-8.13.99, reason:newly added in 8.14] +FROM employees +| eval sd = mv_sort(is_rehired, "DESC"), sa = mv_sort(is_rehired) +| sort emp_no +| keep emp_no, is_rehired, sa, sd +| limit 5 +; + +emp_no:integer | is_rehired:boolean | sa:boolean | sd:boolean +10001 | [false, true] | [false, true] | [true, false] +10002 | [false, false] | [false, false] | [false, false] +10003 | null | null | null +10004 | true | true | true +10005 | [false,false,false,true] | [false,false,false,true] | [true,false,false,false] +; + mvSlice#[skip:-8.13.99, reason:newly added in 8.14] row a = [true, false, false, true] | eval a1 = mv_slice(a, 1), a2 = mv_slice(a, 2, 3); diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec index ba1dd8418bbb6..17960fde31074 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec @@ -1151,3 +1151,13 @@ FROM sample_data @timestamp:date | client_ip:ip | event_duration:long | message:keyword // end::docsNowWhere-result[] ; + +mvSort#[skip:-8.13.99, reason:newly added in 8.14] +row a = ["1985-01-01T00:00:00.000Z", "1986-01-01T00:00:00.000Z", "1987-01-01T00:00:00.000Z"] +| eval datetime = TO_DATETIME(a) +| eval sa = mv_sort(datetime), sd = mv_sort(datetime, "DESC") +| keep datetime, sa, sd; + +datetime:date | sa:date | sd:date +["1985-01-01T00:00:00.000Z", "1986-01-01T00:00:00.000Z", "1987-01-01T00:00:00.000Z"]| ["1985-01-01T00:00:00.000Z", "1986-01-01T00:00:00.000Z", "1987-01-01T00:00:00.000Z"] | ["1987-01-01T00:00:00.000Z", "1986-01-01T00:00:00.000Z", "1985-01-01T00:00:00.000Z"] +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/floats.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/floats.csv-spec index 0138ec1a70989..d62b7fb3d6681 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/floats.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/floats.csv-spec @@ -438,3 +438,30 @@ ROW deg = [90.0, 180.0, 270.0] [90.0, 180.0, 270.0] | [1.5707963267948966, 3.141592653589793, 4.71238898038469] // end::to_radians-result[] ; + +mvSort#[skip:-8.13.99, reason:newly added in 8.14] +row a = [4.0, 2.0, -3.0, 2.0] | eval sa = mv_sort(a), sd = mv_sort(a, "DESC"); + +a:double | sa:double | sd:double +[4.0, 2.0, -3.0, 2.0] | [-3.0, 2.0, 2.0, 4.0] | [4.0, 2.0, 2.0, -3.0] +; + +mvSortEmp#[skip:-8.13.99, reason:newly added in 8.14] +FROM employees +| eval sd = mv_sort(salary_change, "DESC"), sa = mv_sort(salary_change) +| sort emp_no +| keep emp_no, salary_change, sa, sd +| limit 9 +; + +emp_no:integer | salary_change:double | sa:double | sd:double +10001 | 1.19 | 1.19 | 1.19 +10002 | [-7.23,11.17] | [-7.23,11.17] | [11.17, -7.23] +10003 | [12.82, 14.68] | [12.82, 14.68] | [14.68,12.82] +10004 | [-0.35,1.13,3.65,13.48] | [-0.35,1.13,3.65,13.48] | [13.48,3.65,1.13,-0.35] +10005 | [-2.14,13.07] | [-2.14,13.07] | [13.07,-2.14] +10006 | -3.90 | -3.90 | -3.90 +10007 | [-7.06,0.57,1.99] | [-7.06,0.57,1.99] | [1.99,0.57,-7.06] +10008 | [-2.92,0.75,3.54,12.68] | [-2.92,0.75,3.54,12.68] | [12.68,3.54,0.75,-2.92] +10009 | null | null | null +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec index 63bc452bf5bd5..77ace3bceb721 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec @@ -384,6 +384,59 @@ row a = [1, 2, 2, 3] | eval da = mv_dedupe(a); [1, 2, 2, 3] | [1, 2, 3] ; +mvSort#[skip:-8.13.99, reason:newly added in 8.14] +// tag::mv_sort[] +ROW a = [4, 2, -3, 2] +| EVAL sa = mv_sort(a), sd = mv_sort(a, "DESC") +// end::mv_sort[] +; + +// tag::mv_sort-result[] +a:integer | sa:integer | sd:integer +[4, 2, -3, 2] | [-3, 2, 2, 4] | [4, 2, 2, -3] +// end::mv_sort-result[] +; + +mvSortEmpInt#[skip:-8.13.99, reason:newly added in 8.14] +FROM employees +| eval sd = mv_sort(salary_change.int, "DESC"), sa = mv_sort(salary_change.int) +| sort emp_no +| keep emp_no, salary_change.int, sa, sd +| limit 9 +; + +emp_no:integer | salary_change.int:integer | sa:integer | sd:integer +10001 | 1 | 1 | 1 +10002 | [-7, 11] | [-7, 11] | [11, -7] +10003 | [12, 14] | [12, 14] | [14, 12] +10004 | [0, 1, 3, 13] | [0, 1, 3, 13] | [13, 3, 1, 0] +10005 | [-2,13] | [-2,13] | [13, -2] +10006 | -3 | -3 | -3 +10007 | [-7,0, 1] | [-7, 0, 1] | [1,0, -7] +10008 | [-2,0,3,12] | [-2,0,3,12] | [12,3,0,-2] +10009 | null | null | null +; + +mvSortEmpLong#[skip:-8.13.99, reason:newly added in 8.14] +FROM employees +| eval sd = mv_sort(salary_change.long, "DESC"), sa = mv_sort(salary_change.long) +| sort emp_no +| keep emp_no, salary_change.long, sa, sd +| limit 9 +; + +emp_no:integer | salary_change.long:long | sa:long | sd:long +10001 | 1 | 1 | 1 +10002 | [-7, 11] | [-7, 11] | [11, -7] +10003 | [12, 14] | [12, 14] | [14, 12] +10004 | [0, 1, 3, 13] | [0, 1, 3, 13] | [13, 3, 1, 0] +10005 | [-2, 13] | [-2, 13] | [13, -2] +10006 | -3 | -3 | -3 +10007 | [-7, 0, 1] | [-7, 0, 1] | [1, 0, -7] +10008 | [-2, 0, 3, 12] | [-2, 0, 3, 12] | [12, 3, 0, -2] +10009 | null | null | null +; + mvSlice#[skip:-8.13.99, reason:newly added in 8.14] // tag::mv_slice_positive[] row a = [1, 2, 2, 3] diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ip.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ip.csv-spec index 54256b3420c82..d79a19aeb3962 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ip.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ip.csv-spec @@ -278,6 +278,22 @@ eth0 |127.0.0.3 eth0 |fe80::cae2:65ff:fece:fec1 ; +mvSort#[skip:-8.13.99, reason:newly added in 8.14] +FROM hosts +| eval sd = mv_sort(ip1, "DESC"), sa = mv_sort(ip1) +| sort host desc, ip1 +| keep host, ip1, sa, sd +| limit 5 +; + +host:keyword | ip1:ip | sa:ip | sd:ip +gamma | 127.0.0.3 | 127.0.0.3 | 127.0.0.3 +gamma | fe81::cae2:65ff:fece:feb9 | fe81::cae2:65ff:fece:feb9 | fe81::cae2:65ff:fece:feb9 +epsilon | [127.0.0.1, 127.0.0.2, 127.0.0.3] | [127.0.0.1, 127.0.0.2, 127.0.0.3] | [127.0.0.3, 127.0.0.2, 127.0.0.1] +epsilon | fe80::cae2:65ff:fece:fec1 | fe80::cae2:65ff:fece:fec1 | fe80::cae2:65ff:fece:fec1 +epsilon | [fe81::cae2:65ff:fece:feb9, fe82::cae2:65ff:fece:fec0] | [fe81::cae2:65ff:fece:feb9, fe82::cae2:65ff:fece:fec0] | [fe82::cae2:65ff:fece:fec0, fe81::cae2:65ff:fece:feb9] +; + mvSlice#[skip:-8.13.99, reason:newly added in 8.14] from hosts | where host == "epsilon" diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec index d38dce49020c4..933c106c1a85b 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec @@ -55,6 +55,7 @@ mv_max |"boolean|date|double|integer|ip|keyword|long|text|unsi mv_median |"double|integer|long|unsigned_long mv_median(v:double|integer|long|unsigned_long)" |v |"double|integer|long|unsigned_long" | "" |"double|integer|long|unsigned_long" | "Converts a multivalued field into a single valued field containing the median value." | false | false | false mv_min |"boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version mv_min(v:boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version)" |v | "boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version" | "" |"boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version" | "Reduce a multivalued field to a single valued field containing the minimum value." | false | false | false mv_slice |"boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version mv_slice(v:boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version, start:integer, ?end:integer)" |[v, start, end] | "[boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version, integer, integer]" | "[A multivalued field, start index, end index (included)]" |"boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version" | "Returns a subset of the multivalued field using the start and end index values." | [false, false, true] | false | false +mv_sort |"boolean|date|double|integer|ip|keyword|long|text|version mv_sort(field:boolean|date|double|integer|ip|keyword|long|text|version, ?order:keyword)" | [field, order] | ["boolean|date|double|integer|ip|keyword|long|text|version", "keyword"] | ["A multivalued field", "sort order"] |"boolean|date|double|integer|ip|keyword|long|text|version" | "Sorts a multivalued field in lexicographical order." | [false, true] | false | false mv_sum |"double|integer|long|unsigned_long mv_sum(v:double|integer|long|unsigned_long)" |v |"double|integer|long|unsigned_long" | "" |"double|integer|long|unsigned_long" | "Converts a multivalued field into a single valued field containing the sum of all of the values." | false | false | false mv_zip |"keyword mv_zip(mvLeft:keyword|text, mvRight:keyword|text, ?delim:keyword|text)" |[mvLeft, mvRight, delim] | ["keyword|text", "keyword|text", "keyword|text"] | [A multivalued field, A multivalued field, delimiter] | "keyword" | "Combines the values from two multivalued fields with a delimiter that joins them together." | [false, false, true] | false | false now |date now() | null |null | null |date | "Returns current date and time." | null | false | false @@ -156,6 +157,7 @@ double e() "double|integer|long|unsigned_long mv_median(v:double|integer|long|unsigned_long)" "boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version mv_min(v:boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version)" "boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version mv_slice(v:boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version, start:integer, ?end:integer)" +"boolean|date|double|integer|ip|keyword|long|text|version mv_sort(field:boolean|date|double|integer|ip|keyword|long|text|version, ?order:keyword)" "double|integer|long|unsigned_long mv_sum(v:double|integer|long|unsigned_long)" "keyword mv_zip(mvLeft:keyword|text, mvRight:keyword|text, ?delim:keyword|text)" date now() @@ -228,5 +230,5 @@ countFunctions#[skip:-8.13.99] show functions | stats a = count(*), b = count(*), c = count(*) | mv_expand c; a:long | b:long | c:long -94 | 94 | 94 +95 | 95 | 95 ; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec index 73c508aad03f5..2dbaf4c695e69 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec @@ -698,6 +698,29 @@ ROW a=[10, 9, 8] // end::mv_concat-to_string-result[] ; +mvSort#[skip:-8.13.99, reason:newly added in 8.14] +row a = ["Mon", "Tues", "Wed", "Thu", "Fri"] | eval sa = mv_sort(a), sd = mv_sort(a, "DESC"); + +a:keyword | sa:keyword | sd:keyword +["Mon", "Tues", "Wed", "Thu", "Fri"] | [Fri, Mon, Thu, Tues, Wed] | [Wed, Tues, Thu, Mon, Fri] +; + +mvSortEmp#[skip:-8.13.99, reason:newly added in 8.14] +FROM employees +| eval sd = mv_sort(job_positions, "DESC"), sa = mv_sort(job_positions) +| sort emp_no +| keep emp_no, job_positions, sa, sd +| limit 5 +; + +emp_no:integer | job_positions:keyword | sa:keyword | sd:keyword +10001 | [Accountant, Senior Python Developer] | [Accountant, Senior Python Developer] | [Senior Python Developer, Accountant] +10002 | Senior Team Lead | Senior Team Lead | Senior Team Lead +10003 | null | null | null +10004 | [Head Human Resources, Reporting Analyst, Support Engineer, Tech Lead] | [Head Human Resources, Reporting Analyst, Support Engineer, Tech Lead] | [Tech Lead, Support Engineer, Reporting Analyst, Head Human Resources] +10005 | null | null | null +; + mvSliceEmp#[skip:-8.13.99, reason:newly added in 8.14] from employees | eval a1 = mv_slice(salary_change.keyword, 0, 1) diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java index b577b8a68cd54..fac0121bd05b4 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java @@ -74,6 +74,7 @@ import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvMedian; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvMin; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvSlice; +import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvSort; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvSum; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvZip; import org.elasticsearch.xpack.esql.expression.function.scalar.nulls.Coalesce; @@ -214,6 +215,7 @@ private FunctionDefinition[][] functions() { def(MvMax.class, MvMax::new, "mv_max"), def(MvMedian.class, MvMedian::new, "mv_median"), def(MvMin.class, MvMin::new, "mv_min"), + def(MvSort.class, MvSort::new, "mv_sort"), def(MvSlice.class, MvSlice::new, "mv_slice"), def(MvZip.class, MvZip::new, "mv_zip"), def(MvSum.class, MvSum::new, "mv_sum"), diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSort.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSort.java new file mode 100644 index 0000000000000..c1b615364b0fe --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSort.java @@ -0,0 +1,244 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.TriFunction; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.compute.operator.MultivalueDedupeBoolean; +import org.elasticsearch.compute.operator.MultivalueDedupeBytesRef; +import org.elasticsearch.compute.operator.MultivalueDedupeDouble; +import org.elasticsearch.compute.operator.MultivalueDedupeInt; +import org.elasticsearch.compute.operator.MultivalueDedupeLong; +import org.elasticsearch.xpack.esql.capabilities.Validatable; +import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.Param; +import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; +import org.elasticsearch.xpack.esql.planner.PlannerUtils; +import org.elasticsearch.xpack.esql.type.EsqlDataTypes; +import org.elasticsearch.xpack.ql.common.Failures; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.expression.Literal; +import org.elasticsearch.xpack.ql.expression.function.OptionalArgument; +import org.elasticsearch.xpack.ql.tree.NodeInfo; +import org.elasticsearch.xpack.ql.tree.Source; +import org.elasticsearch.xpack.ql.type.DataType; +import org.elasticsearch.xpack.ql.type.DataTypes; + +import java.util.Arrays; +import java.util.List; +import java.util.Objects; +import java.util.function.Function; + +import static org.elasticsearch.xpack.esql.expression.Validations.isFoldable; +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.FIRST; +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.SECOND; +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.isString; +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.isType; + +/** + * Sorts a multivalued field in lexicographical order. + */ +public class MvSort extends EsqlScalarFunction implements OptionalArgument, Validatable { + private final Expression field, order; + + private static final Literal ASC = new Literal(Source.EMPTY, "ASC", DataTypes.KEYWORD); + + @FunctionInfo( + returnType = { "boolean", "date", "double", "integer", "ip", "keyword", "long", "text", "version" }, + description = "Sorts a multivalued field in lexicographical order." + ) + public MvSort( + Source source, + @Param( + name = "field", + type = { "boolean", "date", "double", "integer", "ip", "keyword", "long", "text", "version" }, + description = "A multivalued field" + ) Expression field, + @Param(name = "order", type = { "keyword" }, description = "sort order", optional = true) Expression order + ) { + super(source, order == null ? Arrays.asList(field, ASC) : Arrays.asList(field, order)); + this.field = field; + this.order = order == null ? ASC : order; + } + + @Override + protected TypeResolution resolveType() { + if (childrenResolved() == false) { + return new TypeResolution("Unresolved children"); + } + + TypeResolution resolution = isType(field, EsqlDataTypes::isRepresentable, sourceText(), FIRST, "representable"); + + if (resolution.unresolved()) { + return resolution; + } + + return isString(order, sourceText(), SECOND); + } + + @Override + public boolean foldable() { + return field.foldable() && (order == null || order.foldable()); + } + + @Override + public EvalOperator.ExpressionEvaluator.Factory toEvaluator( + Function toEvaluator + ) { + boolean ordering = order.foldable() && ((BytesRef) order.fold()).utf8ToString().equalsIgnoreCase("DESC") ? false : true; + return switch (PlannerUtils.toElementType(field.dataType())) { + case BOOLEAN -> new MvSort.EvaluatorFactory( + toEvaluator.apply(field), + ordering, + (blockFactory, fieldBlock, sortOrder) -> new MultivalueDedupeBoolean((BooleanBlock) fieldBlock).sortToBlock( + blockFactory, + sortOrder + ), + ElementType.BOOLEAN + ); + case BYTES_REF -> new MvSort.EvaluatorFactory( + toEvaluator.apply(field), + ordering, + (blockFactory, fieldBlock, sortOrder) -> new MultivalueDedupeBytesRef((BytesRefBlock) fieldBlock).sortToBlock( + blockFactory, + sortOrder + ), + ElementType.BYTES_REF + ); + case INT -> new MvSort.EvaluatorFactory( + toEvaluator.apply(field), + ordering, + (blockFactory, fieldBlock, sortOrder) -> new MultivalueDedupeInt((IntBlock) fieldBlock).sortToBlock( + blockFactory, + sortOrder + ), + ElementType.INT + ); + case LONG -> new MvSort.EvaluatorFactory( + toEvaluator.apply(field), + ordering, + (blockFactory, fieldBlock, sortOrder) -> new MultivalueDedupeLong((LongBlock) fieldBlock).sortToBlock( + blockFactory, + sortOrder + ), + ElementType.LONG + ); + case DOUBLE -> new MvSort.EvaluatorFactory( + toEvaluator.apply(field), + ordering, + (blockFactory, fieldBlock, sortOrder) -> new MultivalueDedupeDouble((DoubleBlock) fieldBlock).sortToBlock( + blockFactory, + sortOrder + ), + ElementType.DOUBLE + ); + case NULL -> EvalOperator.CONSTANT_NULL_FACTORY; + default -> throw new IllegalArgumentException("unsupported type [" + field.dataType() + "]"); + }; + } + + @Override + public Expression replaceChildren(List newChildren) { + return new MvSort(source(), newChildren.get(0), newChildren.size() > 1 ? newChildren.get(1) : null); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, MvSort::new, field, order); + } + + @Override + public DataType dataType() { + return field.dataType(); + } + + @Override + public int hashCode() { + return Objects.hash(field, order); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + MvSort other = (MvSort) obj; + return Objects.equals(other.field, field) && Objects.equals(other.order, order); + } + + @Override + public void validate(Failures failures) { + String operation = sourceText(); + failures.add(isFoldable(order, operation, SECOND)); + } + + private record EvaluatorFactory( + EvalOperator.ExpressionEvaluator.Factory field, + boolean order, + TriFunction sort, + ElementType dataType + ) implements EvalOperator.ExpressionEvaluator.Factory { + @Override + public EvalOperator.ExpressionEvaluator get(DriverContext context) { + return new MvSort.Evaluator(context.blockFactory(), field.get(context), order, sort, dataType); + } + + @Override + public String toString() { + return "MvSort" + dataType + "[field=" + field + ", order=" + order + "]"; + } + } + + private static class Evaluator implements EvalOperator.ExpressionEvaluator { + private final BlockFactory blockFactory; + private final EvalOperator.ExpressionEvaluator field; + private final boolean order; + private final TriFunction sort; + private final ElementType dataType; + + protected Evaluator( + BlockFactory blockFactory, + EvalOperator.ExpressionEvaluator field, + boolean order, + TriFunction sort, + ElementType dataType + ) { + this.blockFactory = blockFactory; + this.field = field; + this.order = order; + this.sort = sort; + this.dataType = dataType; + } + + @Override + public Block eval(Page page) { + try (Block fieldBlock = field.eval(page)) { + return sort.apply(blockFactory, fieldBlock, order); + } + } + + @Override + public String toString() { + return "MvSort" + dataType + "[field=" + field + ", order=" + order + "]"; + } + + @Override + public void close() {} + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java index 515d6cb5c92b3..96a1ce9ed715e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java @@ -98,6 +98,7 @@ import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvMedian; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvMin; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvSlice; +import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvSort; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvSum; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvZip; import org.elasticsearch.xpack.esql.expression.function.scalar.nulls.Coalesce; @@ -421,6 +422,7 @@ public static List namedTypeEntries() { of(ScalarFunction.class, MvMax.class, PlanNamedTypes::writeMvFunction, PlanNamedTypes::readMvFunction), of(ScalarFunction.class, MvMedian.class, PlanNamedTypes::writeMvFunction, PlanNamedTypes::readMvFunction), of(ScalarFunction.class, MvMin.class, PlanNamedTypes::writeMvFunction, PlanNamedTypes::readMvFunction), + of(ScalarFunction.class, MvSort.class, PlanNamedTypes::writeMvSort, PlanNamedTypes::readMvSort), of(ScalarFunction.class, MvSlice.class, PlanNamedTypes::writeMvSlice, PlanNamedTypes::readMvSlice), of(ScalarFunction.class, MvSum.class, PlanNamedTypes::writeMvFunction, PlanNamedTypes::readMvFunction), of(ScalarFunction.class, MvZip.class, PlanNamedTypes::writeMvZip, PlanNamedTypes::readMvZip), @@ -1852,6 +1854,18 @@ static void writeLog(PlanStreamOutput out, Log log) throws IOException { out.writeOptionalWriteable(fields.size() == 2 ? o -> out.writeExpression(fields.get(1)) : null); } + static MvSort readMvSort(PlanStreamInput in) throws IOException { + return new MvSort(in.readSource(), in.readExpression(), in.readOptionalNamed(Expression.class)); + } + + static void writeMvSort(PlanStreamOutput out, MvSort mvSort) throws IOException { + out.writeSource(mvSort.source()); + List fields = mvSort.children(); + assert fields.size() == 1 || fields.size() == 2; + out.writeExpression(fields.get(0)); + out.writeOptionalWriteable(fields.size() == 2 ? o -> out.writeExpression(fields.get(1)) : null); + } + static MvSlice readMvSlice(PlanStreamInput in) throws IOException { return new MvSlice(in.readSource(), in.readExpression(), in.readExpression(), in.readOptionalNamed(Expression.class)); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java index 9daf043714efc..40526d35031d4 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java @@ -181,11 +181,11 @@ public static Expression deepCopyOfField(String name, DataType type) { */ protected abstract Expression build(Source source, List args); - protected final Expression buildFieldExpression(TestCaseSupplier.TestCase testCase) { + protected Expression buildFieldExpression(TestCaseSupplier.TestCase testCase) { return build(testCase.getSource(), testCase.getDataAsFields()); } - protected final Expression buildDeepCopyOfFieldExpression(TestCaseSupplier.TestCase testCase) { + protected Expression buildDeepCopyOfFieldExpression(TestCaseSupplier.TestCase testCase) { return build(testCase.getSource(), testCase.getDataAsDeepCopiedFields()); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSortTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSortTests.java new file mode 100644 index 0000000000000..97b8a95289c7d --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSortTests.java @@ -0,0 +1,215 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.expression.Literal; +import org.elasticsearch.xpack.ql.tree.Source; +import org.elasticsearch.xpack.ql.type.DataTypes; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.function.Supplier; + +import static org.hamcrest.Matchers.equalTo; + +public class MvSortTests extends AbstractFunctionTestCase { + public MvSortTests(@Name("TestCase") Supplier testCaseSupplier) { + this.testCase = testCaseSupplier.get(); + } + + @ParametersFactory + public static Iterable parameters() { + List suppliers = new ArrayList<>(); + booleans(suppliers); + ints(suppliers); + longs(suppliers); + doubles(suppliers); + bytesRefs(suppliers); + return parameterSuppliersFromTypedData(suppliers); + } + + @Override + protected Expression build(Source source, List args) { + return new MvSort(source, args.get(0), args.size() > 1 ? args.get(1) : null); + } + + /** + * Override to create the second argument as a Literal instead of a FieldAttribute. + */ + @Override + protected Expression buildFieldExpression(TestCaseSupplier.TestCase testCase) { + List args = new ArrayList<>(2); + List data = testCase.getData(); + args.add(AbstractFunctionTestCase.field(data.get(0).name(), data.get(0).type())); + args.add(new Literal(Source.synthetic(data.get(1).name()), data.get(1).data(), data.get(1).type())); + return build(testCase.getSource(), args); + } + + /** + * Override to create the second argument as a Literal instead of a FieldAttribute. + */ + @Override + protected Expression buildDeepCopyOfFieldExpression(TestCaseSupplier.TestCase testCase) { + List args = new ArrayList<>(2); + List data = testCase.getData(); + args.add(AbstractFunctionTestCase.deepCopyOfField(data.get(0).name(), data.get(0).type())); + args.add(new Literal(Source.synthetic(data.get(1).name()), data.get(1).data(), data.get(1).type())); + return build(testCase.getSource(), args); + } + + private static void booleans(List suppliers) { + suppliers.add(new TestCaseSupplier(List.of(DataTypes.BOOLEAN, DataTypes.KEYWORD), () -> { + List field = randomList(1, 10, () -> randomBoolean()); + BytesRef order = new BytesRef("ASC"); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, DataTypes.BOOLEAN, "field"), + new TestCaseSupplier.TypedData(order, DataTypes.KEYWORD, "order") + ), + "MvSort" + ElementType.BOOLEAN + "[field=Attribute[channel=0], order=true]", + DataTypes.BOOLEAN, + equalTo(field.size() == 1 ? field.iterator().next() : field.stream().sorted().toList()) + ); + })); + + } + + private static void ints(List suppliers) { + suppliers.add(new TestCaseSupplier(List.of(DataTypes.INTEGER, DataTypes.KEYWORD), () -> { + List field = randomList(1, 10, () -> randomInt()); + BytesRef order = new BytesRef("DESC"); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, DataTypes.INTEGER, "field"), + new TestCaseSupplier.TypedData(order, DataTypes.KEYWORD, "order") + ), + "MvSort" + ElementType.INT + "[field=Attribute[channel=0], order=false]", + DataTypes.INTEGER, + equalTo(field.size() == 1 ? field.iterator().next() : field.stream().sorted(Collections.reverseOrder()).toList()) + ); + })); + } + + private static void longs(List suppliers) { + suppliers.add(new TestCaseSupplier(List.of(DataTypes.LONG, DataTypes.KEYWORD), () -> { + List field = randomList(1, 10, () -> randomLong()); + BytesRef order = new BytesRef("ASC"); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, DataTypes.LONG, "field"), + new TestCaseSupplier.TypedData(order, DataTypes.KEYWORD, "order") + ), + "MvSort" + ElementType.LONG + "[field=Attribute[channel=0], order=true]", + DataTypes.LONG, + equalTo(field.size() == 1 ? field.iterator().next() : field.stream().sorted().toList()) + ); + })); + + suppliers.add(new TestCaseSupplier(List.of(DataTypes.DATETIME, DataTypes.KEYWORD), () -> { + List field = randomList(1, 10, () -> randomLong()); + BytesRef order = new BytesRef("DESC"); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, DataTypes.DATETIME, "field"), + new TestCaseSupplier.TypedData(order, DataTypes.KEYWORD, "order") + ), + "MvSort" + ElementType.LONG + "[field=Attribute[channel=0], order=false]", + DataTypes.DATETIME, + equalTo(field.size() == 1 ? field.iterator().next() : field.stream().sorted(Collections.reverseOrder()).toList()) + ); + })); + } + + private static void doubles(List suppliers) { + suppliers.add(new TestCaseSupplier(List.of(DataTypes.DOUBLE, DataTypes.KEYWORD), () -> { + List field = randomList(1, 10, () -> randomDouble()); + BytesRef order = new BytesRef("ASC"); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, DataTypes.DOUBLE, "field"), + new TestCaseSupplier.TypedData(order, DataTypes.KEYWORD, "order") + ), + "MvSort" + ElementType.DOUBLE + "[field=Attribute[channel=0], order=true]", + DataTypes.DOUBLE, + equalTo(field.size() == 1 ? field.iterator().next() : field.stream().sorted().toList()) + ); + })); + } + + private static void bytesRefs(List suppliers) { + suppliers.add(new TestCaseSupplier(List.of(DataTypes.KEYWORD, DataTypes.KEYWORD), () -> { + List field = randomList(1, 10, () -> randomLiteral(DataTypes.KEYWORD).value()); + BytesRef order = new BytesRef("DESC"); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, DataTypes.KEYWORD, "field"), + new TestCaseSupplier.TypedData(order, DataTypes.KEYWORD, "order") + ), + "MvSort" + ElementType.BYTES_REF + "[field=Attribute[channel=0], order=false]", + DataTypes.KEYWORD, + equalTo(field.size() == 1 ? field.iterator().next() : field.stream().sorted(Collections.reverseOrder()).toList()) + ); + })); + + suppliers.add(new TestCaseSupplier(List.of(DataTypes.TEXT, DataTypes.KEYWORD), () -> { + List field = randomList(1, 10, () -> randomLiteral(DataTypes.TEXT).value()); + BytesRef order = new BytesRef("ASC"); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, DataTypes.TEXT, "field"), + new TestCaseSupplier.TypedData(order, DataTypes.KEYWORD, "order") + ), + "MvSort" + ElementType.BYTES_REF + "[field=Attribute[channel=0], order=true]", + DataTypes.TEXT, + equalTo(field.size() == 1 ? field.iterator().next() : field.stream().sorted().toList()) + ); + })); + + suppliers.add(new TestCaseSupplier(List.of(DataTypes.IP, DataTypes.KEYWORD), () -> { + List field = randomList(1, 10, () -> randomLiteral(DataTypes.IP).value()); + BytesRef order = new BytesRef("DESC"); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, DataTypes.IP, "field"), + new TestCaseSupplier.TypedData(order, DataTypes.KEYWORD, "order") + ), + "MvSort" + ElementType.BYTES_REF + "[field=Attribute[channel=0], order=false]", + DataTypes.IP, + equalTo(field.size() == 1 ? field.iterator().next() : field.stream().sorted(Collections.reverseOrder()).toList()) + ); + })); + + suppliers.add(new TestCaseSupplier(List.of(DataTypes.VERSION, DataTypes.KEYWORD), () -> { + List field = randomList(1, 10, () -> randomLiteral(DataTypes.VERSION).value()); + BytesRef order = new BytesRef("ASC"); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, DataTypes.VERSION, "field"), + new TestCaseSupplier.TypedData(order, DataTypes.KEYWORD, "order") + ), + "MvSort" + ElementType.BYTES_REF + "[field=Attribute[channel=0], order=true]", + DataTypes.VERSION, + equalTo(field.size() == 1 ? field.iterator().next() : field.stream().sorted().toList()) + ); + })); + } + + @Override + public void testSimpleWithNulls() { + assumeFalse("test case is invalid", false); + } +} From 35f4d4cf64ee2cb81bc36cf0e576781cc3562f36 Mon Sep 17 00:00:00 2001 From: Simon Cooper Date: Wed, 13 Mar 2024 16:24:26 +0000 Subject: [PATCH 183/248] Improve the javadocs around features infrastructure (#106292) --- .../org/elasticsearch/features/FeatureService.java | 8 +++++++- .../features/FeatureSpecification.java | 14 +++++++++++++- 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/features/FeatureService.java b/server/src/main/java/org/elasticsearch/features/FeatureService.java index 1d60627656b9e..89632601933a1 100644 --- a/server/src/main/java/org/elasticsearch/features/FeatureService.java +++ b/server/src/main/java/org/elasticsearch/features/FeatureService.java @@ -20,7 +20,8 @@ import java.util.Set; /** - * Manages information on the features supported by nodes in the cluster + * Manages information on the features supported by nodes in the cluster. + * For more information, see {@link FeatureSpecification}. */ public class FeatureService { @@ -36,6 +37,10 @@ public class FeatureService { private final NavigableMap> historicalFeatures; private final Map nodeFeatures; + /** + * Creates a new {@code FeatureService}, reporting all the features declared in {@code specs} + * as the local node's supported feature set + */ public FeatureService(List specs) { var featureData = FeatureData.createFromSpecifications(specs); @@ -47,6 +52,7 @@ public FeatureService(List specs) { /** * The non-historical features supported by this node. + * @return Map of {@code feature-id} to its declaring {@code NodeFeature} object. */ public Map getNodeFeatures() { return nodeFeatures; diff --git a/server/src/main/java/org/elasticsearch/features/FeatureSpecification.java b/server/src/main/java/org/elasticsearch/features/FeatureSpecification.java index 7df9ac7c4c203..817ccde4bad2e 100644 --- a/server/src/main/java/org/elasticsearch/features/FeatureSpecification.java +++ b/server/src/main/java/org/elasticsearch/features/FeatureSpecification.java @@ -15,6 +15,17 @@ /** * Specifies one or more features that are supported by this node. + *

        + * Features are published as part of node information in cluster state. + * Code can check if all nodes in a cluster support a feature using {@link FeatureService#clusterHasFeature}. + * Once all nodes in a cluster support a feature, other nodes are blocked from joining that cluster + * unless they also support that feature (this is known as the 'feature ratchet'). + * So once a feature is supported by a cluster, it will always be supported by that cluster in the future. + *

        + * The feature information in cluster state should not normally be directly accessed. + * All feature checks should be done through {@code FeatureService} to ensure that Elasticsearch's + * guarantees on the introduction of new functionality are followed; + * that is, new functionality is not enabled until all nodes in the cluster support it. */ public interface FeatureSpecification { /** @@ -25,7 +36,8 @@ default Set getFeatures() { } /** - * Returns information on historical features that should be added to all nodes at or above the {@link Version} specified. + * Returns information on historical features that should be deemed to be present on all nodes + * on or above the {@link Version} specified. */ default Map getHistoricalFeatures() { return Map.of(); From 6a917c7e46ff72adf75f467a9ceb776cc0e03643 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Wed, 13 Mar 2024 12:34:43 -0400 Subject: [PATCH 184/248] ESQL: Fix test name for duplicate tests (#106297) When we run the csv-spec tests for ESQL against a real http endpoint we actually run them twice - once async and once sync. But the names of the tests didn't reflect that - they just looked like they were accidentally duplicated. This updates the format. So this: ``` test {string.Trim} test {string.Trim #2} ``` becomes: ``` test {string.Trim ASYNC} test {string.Trim SYNC} ``` --- .../org/elasticsearch/xpack/esql/qa/rest/EsqlSpecTestCase.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/EsqlSpecTestCase.java b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/EsqlSpecTestCase.java index a05a6a284011d..07f480ce9950e 100644 --- a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/EsqlSpecTestCase.java +++ b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/EsqlSpecTestCase.java @@ -67,7 +67,7 @@ public enum Mode { ASYNC } - @ParametersFactory(argumentFormatting = "%2$s.%3$s") + @ParametersFactory(argumentFormatting = "%2$s.%3$s %6$s") public static List readScriptSpec() throws Exception { List urls = classpathResources("/*.csv-spec"); assertTrue("Not enough specs found " + urls, urls.size() > 0); From 8ff083be1b4485ea6269f9611ec5242bc35e7813 Mon Sep 17 00:00:00 2001 From: Matteo Piergiovanni <134913285+piergm@users.noreply.github.com> Date: Wed, 13 Mar 2024 17:43:02 +0100 Subject: [PATCH 185/248] =?UTF-8?q?mute=20GetStackTracesActionIT=20=C2=BB?= =?UTF-8?q?=20testGetStackTracesFromAPMWithMatchAndDownsampling=20(#106309?= =?UTF-8?q?)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Muting a test that failed multiple times --- .../elasticsearch/xpack/profiling/GetStackTracesActionIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStackTracesActionIT.java b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStackTracesActionIT.java index 62b8242e7df86..501d564bbda0d 100644 --- a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStackTracesActionIT.java +++ b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStackTracesActionIT.java @@ -42,6 +42,7 @@ public void testGetStackTracesUnfiltered() throws Exception { assertEquals("vmlinux", response.getExecutables().get("lHp5_WAgpLy2alrUVab6HA")); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/106308") public void testGetStackTracesFromAPMWithMatchNoDownsampling() throws Exception { BoolQueryBuilder query = QueryBuilders.boolQuery(); query.must().add(QueryBuilders.termQuery("transaction.name", "encodeSha1")); From 405b88b882f6279fbd46dcec4413e403eec77a8c Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Wed, 13 Mar 2024 09:45:12 -0700 Subject: [PATCH 186/248] Add zstd to native access (#105715) This commit makes zstd compression available to Elasticsearch. The library is pulled in through maven in jar files for each platform, then bundled in a new platform directory under lib. Access to the zstd compression/decompression is through NativeAccess. --- build-tools-internal/build.gradle | 4 + .../internal/JdkDownloadPluginFuncTest.groovy | 6 +- .../src/main/groovy/elasticsearch.ide.gradle | 9 +- .../internal/ElasticsearchJavaBasePlugin.java | 28 ++++ .../gradle/internal/MrjarPlugin.java | 2 +- .../gradle/internal/test/TestUtil.java | 25 ++++ .../fixtures/AbstractGradleFuncTest.groovy | 7 + distribution/archives/build.gradle | 2 +- distribution/build.gradle | 12 +- distribution/packages/build.gradle | 3 +- .../server/cli/SystemJvmOptions.java | 42 ++++++ .../server/cli/JvmOptionsParserTests.java | 57 ++++++- libs/native/build.gradle | 5 - .../jna/JnaCloseableByteBuffer.java | 35 +++++ .../nativeaccess/jna/JnaJavaLibrary.java | 19 +++ .../jna/JnaNativeLibraryProvider.java | 17 ++- .../nativeaccess/jna/JnaZstdLibrary.java | 62 ++++++++ libs/native/libraries/build.gradle | 64 ++++++++ libs/native/src/main/java/module-info.java | 2 +- .../nativeaccess/AbstractNativeAccess.java | 20 ++- .../nativeaccess/CloseableByteBuffer.java | 18 +++ .../nativeaccess/NativeAccess.java | 8 + .../nativeaccess/NativeAccessHolder.java | 4 +- .../nativeaccess/NoopNativeAccess.java | 23 ++- .../nativeaccess/PosixNativeAccess.java | 2 +- .../nativeaccess/WindowsNativeAccess.java | 2 +- .../org/elasticsearch/nativeaccess/Zstd.java | 81 ++++++++++ .../nativeaccess/lib/JavaLibrary.java | 15 ++ .../nativeaccess/lib/NativeLibrary.java | 2 +- .../nativeaccess/lib/ZstdLibrary.java | 24 +++ .../jdk/JdkCloseableByteBuffer.java | 34 +++++ .../nativeaccess/jdk/JdkJavaLibrary.java | 19 +++ .../jdk/JdkNativeLibraryProvider.java | 16 +- .../nativeaccess/jdk/JdkSystemdLibrary.java | 2 + .../nativeaccess/jdk/JdkZstdLibrary.java | 91 +++++++++++ .../elasticsearch/nativeaccess/ZstdTests.java | 141 ++++++++++++++++++ settings.gradle | 1 + 37 files changed, 870 insertions(+), 34 deletions(-) create mode 100644 build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/TestUtil.java create mode 100644 libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaCloseableByteBuffer.java create mode 100644 libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaJavaLibrary.java create mode 100644 libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaZstdLibrary.java create mode 100644 libs/native/libraries/build.gradle create mode 100644 libs/native/src/main/java/org/elasticsearch/nativeaccess/CloseableByteBuffer.java create mode 100644 libs/native/src/main/java/org/elasticsearch/nativeaccess/Zstd.java create mode 100644 libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/JavaLibrary.java create mode 100644 libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/ZstdLibrary.java create mode 100644 libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkCloseableByteBuffer.java create mode 100644 libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkJavaLibrary.java create mode 100644 libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkZstdLibrary.java create mode 100644 libs/native/src/test/java/org/elasticsearch/nativeaccess/ZstdTests.java diff --git a/build-tools-internal/build.gradle b/build-tools-internal/build.gradle index 758cdf687e6b6..24647c366c459 100644 --- a/build-tools-internal/build.gradle +++ b/build-tools-internal/build.gradle @@ -119,6 +119,10 @@ gradlePlugin { id = 'elasticsearch.java-doc' implementationClass = 'org.elasticsearch.gradle.internal.ElasticsearchJavadocPlugin' } + javaBase { + id = 'elasticsearch.java-base' + implementationClass = 'org.elasticsearch.gradle.internal.ElasticsearchJavaBasePlugin' + } java { id = 'elasticsearch.java' implementationClass = 'org.elasticsearch.gradle.internal.ElasticsearchJavaPlugin' diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/JdkDownloadPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/JdkDownloadPluginFuncTest.groovy index 67a04ebc5b7a0..24131c633e9d7 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/JdkDownloadPluginFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/JdkDownloadPluginFuncTest.groovy @@ -8,7 +8,7 @@ package org.elasticsearch.gradle.internal -import spock.lang.TempDir + import spock.lang.Unroll import com.github.tomakehurst.wiremock.WireMockServer @@ -103,10 +103,6 @@ class JdkDownloadPluginFuncTest extends AbstractGradleFuncTest { plugins { id 'elasticsearch.jdk-download' apply false } - - subprojects { - - } """ 3.times { subProject(':sub-' + it) << """ diff --git a/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle b/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle index ce068d4ca6490..ccbe9cd2f4a2b 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle @@ -9,6 +9,7 @@ import org.elasticsearch.gradle.util.Pair import org.elasticsearch.gradle.util.GradleUtils import org.elasticsearch.gradle.internal.info.BuildParams +import org.elasticsearch.gradle.internal.test.TestUtil import org.jetbrains.gradle.ext.JUnit import java.nio.file.Files @@ -128,9 +129,13 @@ if (providers.systemProperty('idea.active').getOrNull() == 'true') { ':x-pack:plugin:esql:compute:gen:jar', ':server:generateModulesList', ':server:generatePluginsList', - ':generateProviderImpls'].collect { elasticsearchProject.right()?.task(it) ?: it }) + ':generateProviderImpls', + ':libs:elasticsearch-native:elasticsearch-native-libraries:extractLibs'].collect { elasticsearchProject.right()?.task(it) ?: it }) } + // this path is produced by the extractLibs task above + String testLibraryPath = TestUtil.getTestLibraryPath("${elasticsearchProject.left()}/libs/native/libraries/build/platform") + idea { project { vcs = 'Git' @@ -162,6 +167,8 @@ if (providers.systemProperty('idea.active').getOrNull() == 'true') { '-ea', '-Djava.security.manager=allow', '-Djava.locale.providers=SPI,COMPAT', + '-Djava.library.path=' + testLibraryPath, + '-Djna.library.path=' + testLibraryPath, // TODO: only open these for mockito when it is modularized '--add-opens=java.base/java.security.cert=ALL-UNNAMED', '--add-opens=java.base/java.nio.channels=ALL-UNNAMED', diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java index e224b16bf588e..dbdb065858f43 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java @@ -12,11 +12,15 @@ import org.elasticsearch.gradle.internal.conventions.precommit.PrecommitTaskPlugin; import org.elasticsearch.gradle.internal.info.BuildParams; import org.elasticsearch.gradle.internal.info.GlobalBuildInfoPlugin; +import org.elasticsearch.gradle.internal.test.TestUtil; +import org.elasticsearch.gradle.test.SystemPropertyCommandLineArgumentProvider; import org.elasticsearch.gradle.util.GradleUtils; import org.gradle.api.JavaVersion; import org.gradle.api.Plugin; import org.gradle.api.Project; +import org.gradle.api.artifacts.Configuration; import org.gradle.api.artifacts.ResolutionStrategy; +import org.gradle.api.file.FileCollection; import org.gradle.api.plugins.JavaBasePlugin; import org.gradle.api.plugins.JavaPluginExtension; import org.gradle.api.provider.Provider; @@ -26,10 +30,13 @@ import org.gradle.api.tasks.compile.CompileOptions; import org.gradle.api.tasks.compile.GroovyCompile; import org.gradle.api.tasks.compile.JavaCompile; +import org.gradle.api.tasks.testing.Test; import org.gradle.jvm.toolchain.JavaLanguageVersion; import org.gradle.jvm.toolchain.JavaToolchainService; import java.util.List; +import java.util.Map; +import java.util.function.Supplier; import javax.inject.Inject; @@ -59,6 +66,7 @@ public void apply(Project project) { configureConfigurations(project); configureCompile(project); configureInputNormalization(project); + configureNativeLibraryPath(project); // convenience access to common versions used in dependencies project.getExtensions().getExtraProperties().set("versions", VersionProperties.getVersions()); @@ -165,6 +173,26 @@ public static void configureInputNormalization(Project project) { project.getNormalization().getRuntimeClasspath().ignore("IMPL-JARS/**/META-INF/MANIFEST.MF"); } + private static void configureNativeLibraryPath(Project project) { + String nativeProject = ":libs:elasticsearch-native:elasticsearch-native-libraries"; + Configuration nativeConfig = project.getConfigurations().create("nativeLibs"); + nativeConfig.defaultDependencies(deps -> { + deps.add(project.getDependencies().project(Map.of("path", nativeProject, "configuration", "default"))); + }); + // This input to the following lambda needs to be serializable. Configuration is not serializable, but FileCollection is. + FileCollection nativeConfigFiles = nativeConfig; + + project.getTasks().withType(Test.class).configureEach(test -> { + var systemProperties = test.getExtensions().getByType(SystemPropertyCommandLineArgumentProvider.class); + var libraryPath = (Supplier) () -> TestUtil.getTestLibraryPath(nativeConfigFiles.getAsPath()); + + test.dependsOn(nativeConfigFiles); + // we may use JNA or the JDK's foreign function api to load libraries, so we set both sysprops + systemProperties.systemProperty("java.library.path", libraryPath); + systemProperties.systemProperty("jna.library.path", libraryPath); + }); + } + private static Provider releaseVersionProviderFromCompileTask(Project project, AbstractCompile compileTask) { return project.provider(() -> { JavaVersion javaVersion = JavaVersion.toVersion(compileTask.getTargetCompatibility()); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/MrjarPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/MrjarPlugin.java index 46fa38a44f564..a9b332c3cfd3c 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/MrjarPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/MrjarPlugin.java @@ -105,7 +105,7 @@ private void addMrjarSourceset( testTask.dependsOn(jarTask); SourceSetContainer sourceSets = GradleUtils.getJavaSourceSets(project); - FileCollection mainRuntime = sourceSets.getByName(SourceSet.MAIN_SOURCE_SET_NAME).getRuntimeClasspath(); + FileCollection mainRuntime = sourceSets.getByName(SourceSet.MAIN_SOURCE_SET_NAME).getOutput(); FileCollection testRuntime = sourceSets.getByName(SourceSet.TEST_SOURCE_SET_NAME).getRuntimeClasspath(); testTask.setClasspath(testRuntime.minus(mainRuntime).plus(project.files(jarTask))); }); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/TestUtil.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/TestUtil.java new file mode 100644 index 0000000000000..53742b78accb3 --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/TestUtil.java @@ -0,0 +1,25 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.test; + +import org.elasticsearch.gradle.Architecture; +import org.elasticsearch.gradle.ElasticsearchDistribution; + +import java.util.Locale; + +public class TestUtil { + + public static String getTestLibraryPath(String nativeLibsDir) { + String arch = Architecture.current().toString().toLowerCase(Locale.ROOT); + String platform = String.format(Locale.ROOT, "%s-%s", ElasticsearchDistribution.CURRENT_PLATFORM, arch); + String existingLibraryPath = System.getProperty("java.library.path"); + + return String.format(Locale.ROOT, "%s/%s:%s", nativeLibsDir, platform, existingLibraryPath); + } +} diff --git a/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy b/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy index 8218829fe017b..49e9427462195 100644 --- a/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy +++ b/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy @@ -50,6 +50,13 @@ abstract class AbstractGradleFuncTest extends Specification { propertiesFile = testProjectDir.newFile('gradle.properties') propertiesFile << "org.gradle.java.installations.fromEnv=JAVA_HOME,RUNTIME_JAVA_HOME,JAVA15_HOME,JAVA14_HOME,JAVA13_HOME,JAVA12_HOME,JAVA11_HOME,JAVA8_HOME" + + def nativeLibsProject = subProject(":libs:elasticsearch-native:elasticsearch-native-libraries") + nativeLibsProject << """ + plugins { + id 'base' + } + """ } def cleanup() { diff --git a/distribution/archives/build.gradle b/distribution/archives/build.gradle index 0508f29ef595a..4d7850477dbf5 100644 --- a/distribution/archives/build.gradle +++ b/distribution/archives/build.gradle @@ -15,7 +15,7 @@ CopySpec archiveFiles(String distributionType, String os, String architecture, b return copySpec { into("elasticsearch-${version}") { into('lib') { - with libFiles + with libFiles(os, architecture) } into('config') { dirMode 0750 diff --git a/distribution/build.gradle b/distribution/build.gradle index c8cc60b6facf6..c3f9192ecee05 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -261,7 +261,7 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { * Properties to expand when copying packaging files * *****************************************************************************/ configurations { - ['libs', 'libsVersionChecker', 'libsCliLauncher', 'libsServerCli', 'libsWindowsServiceCli', 'libsPluginCli', 'libsKeystoreCli', 'libsSecurityCli', 'libsGeoIpCli', 'libsAnsiConsole'].each { + ['libs', 'libsVersionChecker', 'libsCliLauncher', 'libsServerCli', 'libsWindowsServiceCli', 'libsPluginCli', 'libsKeystoreCli', 'libsSecurityCli', 'libsGeoIpCli', 'libsAnsiConsole', 'libsNative'].each { create(it) { canBeConsumed = false canBeResolved = true @@ -292,6 +292,7 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { libsKeystoreCli project(path: ':distribution:tools:keystore-cli') libsSecurityCli project(':x-pack:plugin:security:cli') libsGeoIpCli project(':distribution:tools:geoip-cli') + libsNative project(':libs:elasticsearch-native:elasticsearch-native-libraries') } project.ext { @@ -299,7 +300,7 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { /***************************************************************************** * Common files in all distributions * *****************************************************************************/ - libFiles = + libFiles = { os, architecture -> copySpec { // Delay by using closures, since they have not yet been configured, so no jar task exists yet. from(configurations.libs) @@ -330,7 +331,14 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { into('tools/ansi-console') { from(configurations.libsAnsiConsole) } + into('platform') { + from(configurations.libsNative) + if (os != null) { + include (os + '-' + architecture + '/*') + } + } } + } modulesFiles = { os, architecture -> copySpec { diff --git a/distribution/packages/build.gradle b/distribution/packages/build.gradle index 1983736e4ee9e..6b57f32310c93 100644 --- a/distribution/packages/build.gradle +++ b/distribution/packages/build.gradle @@ -131,6 +131,7 @@ def commonPackageConfig(String type, String architecture) { // top level "into" directive is not inherited from ospackage for some reason, so we must // specify it again explicitly for copying common files + String platform = 'linux-' + ((architecture == 'x64') ? 'x86_64' : architecture) into('/usr/share/elasticsearch') { into('bin') { with binFiles(type, false) @@ -140,7 +141,7 @@ def commonPackageConfig(String type, String architecture) { fileMode 0644 } into('lib') { - with libFiles + with libFiles('linux', architecture) } into('modules') { with modulesFiles('linux', architecture) diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java index 850ee3fc71a22..0e95021a3af7e 100644 --- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java +++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java @@ -10,7 +10,11 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.core.SuppressForbidden; +import java.io.File; +import java.nio.file.Path; +import java.nio.file.Paths; import java.util.List; import java.util.Map; import java.util.stream.Collectors; @@ -21,6 +25,8 @@ final class SystemJvmOptions { static List systemJvmOptions(Settings nodeSettings, final Map sysprops) { String distroType = sysprops.get("es.distribution.type"); boolean isHotspot = sysprops.getOrDefault("sun.management.compiler", "").contains("HotSpot"); + String libraryPath = findLibraryPath(sysprops); + return Stream.of( /* * Cache ttl in seconds for positive DNS lookups noting that this overrides the JDK security property networkaddress.cache.ttl; @@ -71,6 +77,8 @@ static List systemJvmOptions(Settings nodeSettings, final Map sysprops) { + // working dir is ES installation, so we use relative path here + Path platformDir = Paths.get("lib", "platform"); + String existingPath = sysprops.get("java.library.path"); + assert existingPath != null; + + String osname = sysprops.get("os.name"); + String os; + if (osname.startsWith("Windows")) { + os = "windows"; + } else if (osname.startsWith("Linux")) { + os = "linux"; + } else if (osname.startsWith("Mac OS")) { + os = "darwin"; + } else { + os = "unsupported_os[" + osname + "]"; + } + String archname = sysprops.get("os.arch"); + String arch; + if (archname.equals("amd64")) { + arch = "x64"; + } else if (archname.equals("aarch64")) { + arch = archname; + } else { + arch = "unsupported_arch[" + archname + "]"; + } + return platformDir.resolve(os + "-" + arch).toAbsolutePath() + getPathSeparator() + existingPath; + } + + @SuppressForbidden(reason = "no way to get path separator with nio") + private static String getPathSeparator() { + return File.pathSeparator; + } } diff --git a/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/JvmOptionsParserTests.java b/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/JvmOptionsParserTests.java index 101be4301b522..c24623c75b5c2 100644 --- a/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/JvmOptionsParserTests.java +++ b/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/JvmOptionsParserTests.java @@ -17,6 +17,7 @@ import java.io.BufferedReader; import java.io.IOException; import java.io.StringReader; +import java.nio.file.FileSystems; import java.nio.file.Files; import java.nio.file.NoSuchFileException; import java.nio.file.Path; @@ -29,10 +30,12 @@ import java.util.Map; import java.util.concurrent.atomic.AtomicBoolean; +import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasEntry; import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.hasSize; @@ -41,6 +44,15 @@ @WithoutSecurityManager public class JvmOptionsParserTests extends ESTestCase { + private static final Map TEST_SYSPROPS = Map.of( + "os.name", + "Linux", + "os.arch", + "aarch64", + "java.library.path", + "/usr/lib" + ); + public void testSubstitution() { final List jvmOptions = JvmOptionsParser.substitutePlaceholders( List.of("-Djava.io.tmpdir=${ES_TMPDIR}"), @@ -350,30 +362,65 @@ public void accept(final int lineNumber, final String line) { public void testNodeProcessorsActiveCount() { { - final List jvmOptions = SystemJvmOptions.systemJvmOptions(Settings.EMPTY, Map.of()); + final List jvmOptions = SystemJvmOptions.systemJvmOptions(Settings.EMPTY, TEST_SYSPROPS); assertThat(jvmOptions, not(hasItem(containsString("-XX:ActiveProcessorCount=")))); } { Settings nodeSettings = Settings.builder().put(EsExecutors.NODE_PROCESSORS_SETTING.getKey(), 1).build(); - final List jvmOptions = SystemJvmOptions.systemJvmOptions(nodeSettings, Map.of()); + final List jvmOptions = SystemJvmOptions.systemJvmOptions(nodeSettings, TEST_SYSPROPS); assertThat(jvmOptions, hasItem("-XX:ActiveProcessorCount=1")); } { // check rounding Settings nodeSettings = Settings.builder().put(EsExecutors.NODE_PROCESSORS_SETTING.getKey(), 0.2).build(); - final List jvmOptions = SystemJvmOptions.systemJvmOptions(nodeSettings, Map.of()); + final List jvmOptions = SystemJvmOptions.systemJvmOptions(nodeSettings, TEST_SYSPROPS); assertThat(jvmOptions, hasItem("-XX:ActiveProcessorCount=1")); } { // check validation Settings nodeSettings = Settings.builder().put(EsExecutors.NODE_PROCESSORS_SETTING.getKey(), 10000).build(); - var e = expectThrows(IllegalArgumentException.class, () -> SystemJvmOptions.systemJvmOptions(nodeSettings, Map.of())); + var e = expectThrows(IllegalArgumentException.class, () -> SystemJvmOptions.systemJvmOptions(nodeSettings, TEST_SYSPROPS)); assertThat(e.getMessage(), containsString("setting [node.processors] must be <=")); } } public void testCommandLineDistributionType() { - final List jvmOptions = SystemJvmOptions.systemJvmOptions(Settings.EMPTY, Map.of("es.distribution.type", "testdistro")); + var sysprops = new HashMap<>(TEST_SYSPROPS); + sysprops.put("es.distribution.type", "testdistro"); + final List jvmOptions = SystemJvmOptions.systemJvmOptions(Settings.EMPTY, sysprops); assertThat(jvmOptions, hasItem("-Des.distribution.type=testdistro")); } + + public void testLibraryPath() { + assertLibraryPath("Mac OS", "aarch64", "darwin-aarch64"); + assertLibraryPath("Mac OS", "amd64", "darwin-x64"); + assertLibraryPath("Linux", "aarch64", "linux-aarch64"); + assertLibraryPath("Linux", "amd64", "linux-x64"); + assertLibraryPath("Windows", "amd64", "windows-x64"); + assertLibraryPath("Unknown", "aarch64", "unsupported_os[Unknown]-aarch64"); + assertLibraryPath("Mac OS", "Unknown", "darwin-unsupported_arch[Unknown]"); + } + + private void assertLibraryPath(String os, String arch, String expected) { + String existingPath = "/usr/lib"; + var sysprops = Map.of("os.name", os, "os.arch", arch, "java.library.path", existingPath); + final List jvmOptions = SystemJvmOptions.systemJvmOptions(Settings.EMPTY, sysprops); + Map options = new HashMap<>(); + for (var jvmOption : jvmOptions) { + if (jvmOption.startsWith("-D")) { + String[] parts = jvmOption.substring(2).split("="); + assert parts.length == 2; + options.put(parts[0], parts[1]); + } + } + String separator = FileSystems.getDefault().getSeparator(); + assertThat( + options, + hasEntry(equalTo("java.library.path"), allOf(containsString("platform" + separator + expected), containsString(existingPath))) + ); + assertThat( + options, + hasEntry(equalTo("jna.library.path"), allOf(containsString("platform" + separator + expected), containsString(existingPath))) + ); + } } diff --git a/libs/native/build.gradle b/libs/native/build.gradle index 83a169ce7c2d1..dbe546619c7aa 100644 --- a/libs/native/build.gradle +++ b/libs/native/build.gradle @@ -6,12 +6,7 @@ * Side Public License, v 1. */ -import org.elasticsearch.gradle.transform.UnzipTransform -import org.elasticsearch.gradle.internal.GenerateProviderManifest import org.elasticsearch.gradle.internal.precommit.CheckForbiddenApisTask -import org.gradle.api.internal.artifacts.ArtifactAttributes - -import java.util.stream.Collectors apply plugin: 'elasticsearch.publish' apply plugin: 'elasticsearch.build' diff --git a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaCloseableByteBuffer.java b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaCloseableByteBuffer.java new file mode 100644 index 0000000000000..e47b17e234705 --- /dev/null +++ b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaCloseableByteBuffer.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.nativeaccess.jna; + +import com.sun.jna.Memory; + +import org.elasticsearch.nativeaccess.CloseableByteBuffer; + +import java.nio.ByteBuffer; + +class JnaCloseableByteBuffer implements CloseableByteBuffer { + private final Memory memory; + private final ByteBuffer bufferView; + + JnaCloseableByteBuffer(int len) { + this.memory = new Memory(len); + this.bufferView = memory.getByteBuffer(0, len); + } + + @Override + public ByteBuffer buffer() { + return bufferView; + } + + @Override + public void close() { + memory.close(); + } +} diff --git a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaJavaLibrary.java b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaJavaLibrary.java new file mode 100644 index 0000000000000..8526968863688 --- /dev/null +++ b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaJavaLibrary.java @@ -0,0 +1,19 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.nativeaccess.jna; + +import org.elasticsearch.nativeaccess.CloseableByteBuffer; +import org.elasticsearch.nativeaccess.lib.JavaLibrary; + +class JnaJavaLibrary implements JavaLibrary { + @Override + public CloseableByteBuffer newBuffer(int len) { + return new JnaCloseableByteBuffer(len); + } +} diff --git a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaNativeLibraryProvider.java b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaNativeLibraryProvider.java index 7d43cb2e3d4bb..8ffa3121f3e54 100644 --- a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaNativeLibraryProvider.java +++ b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaNativeLibraryProvider.java @@ -8,14 +8,29 @@ package org.elasticsearch.nativeaccess.jna; +import org.elasticsearch.nativeaccess.lib.JavaLibrary; import org.elasticsearch.nativeaccess.lib.NativeLibraryProvider; import org.elasticsearch.nativeaccess.lib.PosixCLibrary; import org.elasticsearch.nativeaccess.lib.SystemdLibrary; +import org.elasticsearch.nativeaccess.lib.ZstdLibrary; import java.util.Map; public class JnaNativeLibraryProvider extends NativeLibraryProvider { + public JnaNativeLibraryProvider() { - super("jna", Map.of(PosixCLibrary.class, JnaPosixCLibrary::new, SystemdLibrary.class, JnaSystemdLibrary::new)); + super( + "jna", + Map.of( + JavaLibrary.class, + JnaJavaLibrary::new, + PosixCLibrary.class, + JnaPosixCLibrary::new, + SystemdLibrary.class, + JnaSystemdLibrary::new, + ZstdLibrary.class, + JnaZstdLibrary::new + ) + ); } } diff --git a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaZstdLibrary.java b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaZstdLibrary.java new file mode 100644 index 0000000000000..f0581633ea969 --- /dev/null +++ b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaZstdLibrary.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.nativeaccess.jna; + +import com.sun.jna.Library; +import com.sun.jna.Native; + +import org.elasticsearch.nativeaccess.lib.ZstdLibrary; + +import java.nio.ByteBuffer; + +class JnaZstdLibrary implements ZstdLibrary { + + private interface NativeFunctions extends Library { + long ZSTD_compressBound(int scrLen); + + long ZSTD_compress(ByteBuffer dst, int dstLen, ByteBuffer src, int srcLen, int compressionLevel); + + boolean ZSTD_isError(long code); + + String ZSTD_getErrorName(long code); + + long ZSTD_decompress(ByteBuffer dst, int dstLen, ByteBuffer src, int srcLen); + } + + private final NativeFunctions functions; + + JnaZstdLibrary() { + this.functions = Native.load("zstd", NativeFunctions.class); + } + + @Override + public long compressBound(int scrLen) { + return functions.ZSTD_compressBound(scrLen); + } + + @Override + public long compress(ByteBuffer dst, ByteBuffer src, int compressionLevel) { + return functions.ZSTD_compress(dst, dst.remaining(), src, src.remaining(), compressionLevel); + } + + @Override + public boolean isError(long code) { + return functions.ZSTD_isError(code); + } + + @Override + public String getErrorName(long code) { + return functions.ZSTD_getErrorName(code); + } + + @Override + public long decompress(ByteBuffer dst, ByteBuffer src) { + return functions.ZSTD_decompress(dst, dst.remaining(), src, src.remaining()); + } +} diff --git a/libs/native/libraries/build.gradle b/libs/native/libraries/build.gradle new file mode 100644 index 0000000000000..23d2b6e2219d9 --- /dev/null +++ b/libs/native/libraries/build.gradle @@ -0,0 +1,64 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +import org.elasticsearch.gradle.transform.UnzipTransform + +apply plugin: 'base' + +configurations { + libs { + attributes.attribute(ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE, ArtifactTypeDefinition.DIRECTORY_TYPE) + canBeConsumed = false + } +} + +var zstdVersion = "1.5.5" + +repositories { + exclusiveContent { + forRepository { + maven { + url "https://artifactory.elastic.dev/artifactory/elasticsearch-zstd" + metadataSources { + artifact() + } + } + } + filter { + includeModule("org.elasticsearch", "zstd") + } + } +} + +dependencies { + registerTransform(UnzipTransform, transformSpec -> { + transformSpec.getFrom().attribute(ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE, ArtifactTypeDefinition.JAR_TYPE); + transformSpec.getTo().attribute(ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE, ArtifactTypeDefinition.DIRECTORY_TYPE); + }); + libs "org.elasticsearch:zstd:${zstdVersion}:darwin-aarch64" + libs "org.elasticsearch:zstd:${zstdVersion}:darwin-x86-64" + libs "org.elasticsearch:zstd:${zstdVersion}:linux-aarch64" + libs "org.elasticsearch:zstd:${zstdVersion}:linux-x86-64" + libs "org.elasticsearch:zstd:${zstdVersion}:windows-x86-64" +} + +def extractLibs = tasks.register('extractLibs', Copy) { + from configurations.libs + into layout.buildDirectory.dir('platform') + // TODO: fix architecture in uploaded libs + filesMatching("*-x86-64/*") { + it.path = it.path.replace("x86-64", "x64") + } + filesMatching("win32*/*") { + it.path = it.path.replace("win32", "windows") + } +} + +artifacts { + 'default' extractLibs +} diff --git a/libs/native/src/main/java/module-info.java b/libs/native/src/main/java/module-info.java index ea049ff888cb3..46f6d8244359d 100644 --- a/libs/native/src/main/java/module-info.java +++ b/libs/native/src/main/java/module-info.java @@ -14,7 +14,7 @@ requires org.elasticsearch.base; requires org.elasticsearch.logging; - exports org.elasticsearch.nativeaccess to org.elasticsearch.server, org.elasticsearch.systemd; + exports org.elasticsearch.nativeaccess to org.elasticsearch.nativeaccess.jna, org.elasticsearch.server, org.elasticsearch.systemd; // allows jna to implement a library provider, and ProviderLocator to load it exports org.elasticsearch.nativeaccess.lib to org.elasticsearch.nativeaccess.jna, org.elasticsearch.base; diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/AbstractNativeAccess.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/AbstractNativeAccess.java index fa23966dbeb79..764dc7c67c9e5 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/AbstractNativeAccess.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/AbstractNativeAccess.java @@ -10,15 +10,22 @@ import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; +import org.elasticsearch.nativeaccess.lib.JavaLibrary; +import org.elasticsearch.nativeaccess.lib.NativeLibraryProvider; +import org.elasticsearch.nativeaccess.lib.ZstdLibrary; abstract class AbstractNativeAccess implements NativeAccess { protected static final Logger logger = LogManager.getLogger(NativeAccess.class); private final String name; + private final JavaLibrary javaLib; + private final Zstd zstd; - protected AbstractNativeAccess(String name) { + protected AbstractNativeAccess(String name, NativeLibraryProvider libraryProvider) { this.name = name; + this.javaLib = libraryProvider.getLibrary(JavaLibrary.class); + this.zstd = new Zstd(libraryProvider.getLibrary(ZstdLibrary.class)); } String getName() { @@ -29,4 +36,15 @@ String getName() { public Systemd systemd() { return null; } + + @Override + public Zstd getZstd() { + return zstd; + } + + @Override + public CloseableByteBuffer newBuffer(int len) { + assert len > 0; + return javaLib.newBuffer(len); + } } diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/CloseableByteBuffer.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/CloseableByteBuffer.java new file mode 100644 index 0000000000000..aa5d94080afa9 --- /dev/null +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/CloseableByteBuffer.java @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.nativeaccess; + +import java.nio.ByteBuffer; + +public interface CloseableByteBuffer extends AutoCloseable { + ByteBuffer buffer(); + + @Override + void close(); +} diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/NativeAccess.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/NativeAccess.java index 77b638690d1b9..5b2be93dadc1f 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/NativeAccess.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/NativeAccess.java @@ -28,4 +28,12 @@ static NativeAccess instance() { boolean definitelyRunningAsRoot(); Systemd systemd(); + + /** + * Returns an accessor to zstd compression functions. + * @return an object used to compress and decompress bytes using zstd + */ + Zstd getZstd(); + + CloseableByteBuffer newBuffer(int len); } diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/NativeAccessHolder.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/NativeAccessHolder.java index 6abbe02c47865..562e7163cd098 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/NativeAccessHolder.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/NativeAccessHolder.java @@ -37,10 +37,10 @@ class NativeAccessHolder { logger.warn("Unable to load native provider. Native methods will be disabled.", e); } if (inst == null) { - inst = new NoopNativeAccess(); + INSTANCE = new NoopNativeAccess(); } else { logger.info("Using [" + libProvider.getName() + "] native provider and native methods for [" + inst.getName() + "]"); + INSTANCE = inst; } - INSTANCE = inst; } } diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/NoopNativeAccess.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/NoopNativeAccess.java index 6eb6145699fe7..c13fc97324ea7 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/NoopNativeAccess.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/NoopNativeAccess.java @@ -8,11 +8,14 @@ package org.elasticsearch.nativeaccess; -class NoopNativeAccess extends AbstractNativeAccess { +import org.elasticsearch.logging.LogManager; +import org.elasticsearch.logging.Logger; - NoopNativeAccess() { - super("noop"); - } +class NoopNativeAccess implements NativeAccess { + + private static final Logger logger = LogManager.getLogger(NativeAccess.class); + + NoopNativeAccess() {} @Override public boolean definitelyRunningAsRoot() { @@ -25,4 +28,16 @@ public Systemd systemd() { logger.warn("Cannot get systemd access because native access is not available"); return null; } + + @Override + public Zstd getZstd() { + logger.warn("cannot compress with zstd because native access is not available"); + return null; + } + + @Override + public CloseableByteBuffer newBuffer(int len) { + logger.warn("cannot allocate buffer because native access is not available"); + return null; + } } diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/PosixNativeAccess.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/PosixNativeAccess.java index 050f9e89a0678..99dde99c67af4 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/PosixNativeAccess.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/PosixNativeAccess.java @@ -16,7 +16,7 @@ abstract class PosixNativeAccess extends AbstractNativeAccess { protected final PosixCLibrary libc; PosixNativeAccess(String name, NativeLibraryProvider libraryProvider) { - super(name); + super(name, libraryProvider); this.libc = libraryProvider.getLibrary(PosixCLibrary.class); } diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/WindowsNativeAccess.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/WindowsNativeAccess.java index 86d3952e1504c..7ea3bb65130b8 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/WindowsNativeAccess.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/WindowsNativeAccess.java @@ -13,7 +13,7 @@ class WindowsNativeAccess extends AbstractNativeAccess { WindowsNativeAccess(NativeLibraryProvider libraryProvider) { - super("Windows"); + super("Windows", libraryProvider); } @Override diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/Zstd.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/Zstd.java new file mode 100644 index 0000000000000..6a0d348d5251b --- /dev/null +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/Zstd.java @@ -0,0 +1,81 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.nativeaccess; + +import org.elasticsearch.nativeaccess.lib.ZstdLibrary; + +import java.nio.ByteBuffer; +import java.util.Objects; + +public final class Zstd { + + private final ZstdLibrary zstdLib; + + Zstd(ZstdLibrary zstdLib) { + this.zstdLib = zstdLib; + } + + /** + * Compress the content of {@code src} into {@code dst} at compression level {@code level}, and return the number of compressed bytes. + * {@link ByteBuffer#position()} and {@link ByteBuffer#limit()} of both {@link ByteBuffer}s are left unmodified. + */ + public int compress(ByteBuffer dst, ByteBuffer src, int level) { + Objects.requireNonNull(dst, "Null destination buffer"); + Objects.requireNonNull(src, "Null source buffer"); + assert dst.isDirect(); + assert dst.isReadOnly() == false; + assert src.isDirect(); + assert src.isReadOnly() == false; + long ret = zstdLib.compress(dst, src, level); + if (zstdLib.isError(ret)) { + throw new IllegalArgumentException(zstdLib.getErrorName(ret)); + } else if (ret < 0 || ret > Integer.MAX_VALUE) { + throw new IllegalStateException("Integer overflow? ret=" + ret); + } + return (int) ret; + } + + /** + * Compress the content of {@code src} into {@code dst}, and return the number of decompressed bytes. {@link ByteBuffer#position()} and + * {@link ByteBuffer#limit()} of both {@link ByteBuffer}s are left unmodified. + */ + public int decompress(ByteBuffer dst, ByteBuffer src) { + Objects.requireNonNull(dst, "Null destination buffer"); + Objects.requireNonNull(src, "Null source buffer"); + assert dst.isDirect(); + assert dst.isReadOnly() == false; + assert src.isDirect(); + assert src.isReadOnly() == false; + long ret = zstdLib.decompress(dst, src); + if (zstdLib.isError(ret)) { + throw new IllegalArgumentException(zstdLib.getErrorName(ret)); + } else if (ret < 0 || ret > Integer.MAX_VALUE) { + throw new IllegalStateException("Integer overflow? ret=" + ret); + } + return (int) ret; + } + + /** + * Return the maximum number of compressed bytes given an input length. + */ + public int compressBound(int srcLen) { + long ret = zstdLib.compressBound(srcLen); + if (zstdLib.isError(ret)) { + throw new IllegalArgumentException(zstdLib.getErrorName(ret)); + } else if (ret < 0 || ret > Integer.MAX_VALUE) { + throw new IllegalArgumentException( + srcLen + + " bytes may require up to " + + Long.toUnsignedString(ret) + + " bytes, which overflows the maximum capacity of a ByteBuffer" + ); + } + return (int) ret; + } +} diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/JavaLibrary.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/JavaLibrary.java new file mode 100644 index 0000000000000..50a3022fa77c2 --- /dev/null +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/JavaLibrary.java @@ -0,0 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.nativeaccess.lib; + +import org.elasticsearch.nativeaccess.CloseableByteBuffer; + +public non-sealed interface JavaLibrary extends NativeLibrary { + CloseableByteBuffer newBuffer(int len); +} diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/NativeLibrary.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/NativeLibrary.java index cf2116440a8bc..1fb868e1c3892 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/NativeLibrary.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/NativeLibrary.java @@ -9,4 +9,4 @@ package org.elasticsearch.nativeaccess.lib; /** A marker interface for libraries that can be loaded by {@link org.elasticsearch.nativeaccess.lib.NativeLibraryProvider} */ -public sealed interface NativeLibrary permits PosixCLibrary, SystemdLibrary {} +public sealed interface NativeLibrary permits JavaLibrary, PosixCLibrary, SystemdLibrary, ZstdLibrary {} diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/ZstdLibrary.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/ZstdLibrary.java new file mode 100644 index 0000000000000..feb1dbe8e3d61 --- /dev/null +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/ZstdLibrary.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.nativeaccess.lib; + +import java.nio.ByteBuffer; + +public non-sealed interface ZstdLibrary extends NativeLibrary { + + long compressBound(int scrLen); + + long compress(ByteBuffer dst, ByteBuffer src, int compressionLevel); + + boolean isError(long code); + + String getErrorName(long code); + + long decompress(ByteBuffer dst, ByteBuffer src); +} diff --git a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkCloseableByteBuffer.java b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkCloseableByteBuffer.java new file mode 100644 index 0000000000000..d802fd8be7a67 --- /dev/null +++ b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkCloseableByteBuffer.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.nativeaccess.jdk; + +import org.elasticsearch.nativeaccess.CloseableByteBuffer; + +import java.lang.foreign.Arena; +import java.nio.ByteBuffer; + +class JdkCloseableByteBuffer implements CloseableByteBuffer { + private final Arena arena; + private final ByteBuffer bufferView; + + JdkCloseableByteBuffer(int len) { + this.arena = Arena.ofShared(); + this.bufferView = this.arena.allocate(len).asByteBuffer(); + } + + @Override + public ByteBuffer buffer() { + return bufferView; + } + + @Override + public void close() { + arena.close(); + } +} diff --git a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkJavaLibrary.java b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkJavaLibrary.java new file mode 100644 index 0000000000000..60a3966463a77 --- /dev/null +++ b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkJavaLibrary.java @@ -0,0 +1,19 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.nativeaccess.jdk; + +import org.elasticsearch.nativeaccess.CloseableByteBuffer; +import org.elasticsearch.nativeaccess.lib.JavaLibrary; + +class JdkJavaLibrary implements JavaLibrary { + @Override + public CloseableByteBuffer newBuffer(int len) { + return new JdkCloseableByteBuffer(len); + } +} diff --git a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkNativeLibraryProvider.java b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkNativeLibraryProvider.java index b808dc3151058..35cc16653de3a 100644 --- a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkNativeLibraryProvider.java +++ b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkNativeLibraryProvider.java @@ -8,15 +8,29 @@ package org.elasticsearch.nativeaccess.jdk; +import org.elasticsearch.nativeaccess.lib.JavaLibrary; import org.elasticsearch.nativeaccess.lib.NativeLibraryProvider; import org.elasticsearch.nativeaccess.lib.PosixCLibrary; import org.elasticsearch.nativeaccess.lib.SystemdLibrary; +import org.elasticsearch.nativeaccess.lib.ZstdLibrary; import java.util.Map; public class JdkNativeLibraryProvider extends NativeLibraryProvider { public JdkNativeLibraryProvider() { - super("jdk", Map.of(PosixCLibrary.class, JdkPosixCLibrary::new, SystemdLibrary.class, JdkSystemdLibrary::new)); + super( + "jdk", + Map.of( + JavaLibrary.class, + JdkJavaLibrary::new, + PosixCLibrary.class, + JdkPosixCLibrary::new, + SystemdLibrary.class, + JdkSystemdLibrary::new, + ZstdLibrary.class, + JdkZstdLibrary::new + ) + ); } } diff --git a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkSystemdLibrary.java b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkSystemdLibrary.java index 682b94b6f4f74..745b93ac918dd 100644 --- a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkSystemdLibrary.java +++ b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkSystemdLibrary.java @@ -24,6 +24,7 @@ import static org.elasticsearch.nativeaccess.jdk.LinkerHelper.downcallHandle; class JdkSystemdLibrary implements SystemdLibrary { + static { System.load(findLibSystemd()); } @@ -39,6 +40,7 @@ static String findLibSystemd() { continue; } try (var stream = Files.walk(basepath)) { + var foundpath = stream.filter(Files::isDirectory).map(p -> p.resolve(libsystemd)).filter(Files::exists).findAny(); if (foundpath.isPresent()) { return foundpath.get().toAbsolutePath().toString(); diff --git a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkZstdLibrary.java b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkZstdLibrary.java new file mode 100644 index 0000000000000..632240a844255 --- /dev/null +++ b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkZstdLibrary.java @@ -0,0 +1,91 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.nativeaccess.jdk; + +import org.elasticsearch.nativeaccess.lib.ZstdLibrary; + +import java.lang.foreign.FunctionDescriptor; +import java.lang.foreign.MemorySegment; +import java.lang.invoke.MethodHandle; +import java.nio.ByteBuffer; + +import static java.lang.foreign.ValueLayout.ADDRESS; +import static java.lang.foreign.ValueLayout.JAVA_BOOLEAN; +import static java.lang.foreign.ValueLayout.JAVA_INT; +import static java.lang.foreign.ValueLayout.JAVA_LONG; +import static org.elasticsearch.nativeaccess.jdk.LinkerHelper.downcallHandle; + +class JdkZstdLibrary implements ZstdLibrary { + + static { + System.loadLibrary("zstd"); + } + + private static final MethodHandle compressBound$mh = downcallHandle("ZSTD_compressBound", FunctionDescriptor.of(JAVA_LONG, JAVA_INT)); + private static final MethodHandle compress$mh = downcallHandle( + "ZSTD_compress", + FunctionDescriptor.of(JAVA_LONG, ADDRESS, JAVA_INT, ADDRESS, JAVA_INT, JAVA_INT) + ); + private static final MethodHandle isError$mh = downcallHandle("ZSTD_isError", FunctionDescriptor.of(JAVA_BOOLEAN, JAVA_LONG)); + private static final MethodHandle getErrorName$mh = downcallHandle("ZSTD_getErrorName", FunctionDescriptor.of(ADDRESS, JAVA_LONG)); + private static final MethodHandle decompress$mh = downcallHandle( + "ZSTD_decompress", + FunctionDescriptor.of(JAVA_LONG, ADDRESS, JAVA_INT, ADDRESS, JAVA_INT) + ); + + @Override + public long compressBound(int srcLen) { + try { + return (long) compressBound$mh.invokeExact(srcLen); + } catch (Throwable t) { + throw new AssertionError(t); + } + } + + @Override + public long compress(ByteBuffer dst, ByteBuffer src, int compressionLevel) { + var nativeDst = MemorySegment.ofBuffer(dst); + var nativeSrc = MemorySegment.ofBuffer(src); + try { + return (long) compress$mh.invokeExact(nativeDst, dst.remaining(), nativeSrc, src.remaining(), compressionLevel); + } catch (Throwable t) { + throw new AssertionError(t); + } + } + + @Override + public boolean isError(long code) { + try { + return (boolean) isError$mh.invokeExact(code); + } catch (Throwable t) { + throw new AssertionError(t); + } + } + + @Override + public String getErrorName(long code) { + try { + MemorySegment str = (MemorySegment) getErrorName$mh.invokeExact(code); + return str.reinterpret(Long.MAX_VALUE).getUtf8String(0); + } catch (Throwable t) { + throw new AssertionError(t); + } + } + + @Override + public long decompress(ByteBuffer dst, ByteBuffer src) { + var nativeDst = MemorySegment.ofBuffer(dst); + var nativeSrc = MemorySegment.ofBuffer(src); + try { + return (long) decompress$mh.invokeExact(nativeDst, dst.remaining(), nativeSrc, src.remaining()); + } catch (Throwable t) { + throw new AssertionError(t); + } + } +} diff --git a/libs/native/src/test/java/org/elasticsearch/nativeaccess/ZstdTests.java b/libs/native/src/test/java/org/elasticsearch/nativeaccess/ZstdTests.java new file mode 100644 index 0000000000000..d051961b06c5f --- /dev/null +++ b/libs/native/src/test/java/org/elasticsearch/nativeaccess/ZstdTests.java @@ -0,0 +1,141 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.nativeaccess; + +import org.elasticsearch.test.ESTestCase; +import org.hamcrest.Matchers; +import org.junit.BeforeClass; + +import java.util.Arrays; + +import static org.hamcrest.Matchers.equalTo; + +public class ZstdTests extends ESTestCase { + + static NativeAccess nativeAccess; + static Zstd zstd; + + @BeforeClass + public static void getZstd() { + nativeAccess = NativeAccess.instance(); + zstd = nativeAccess.getZstd(); + } + + public void testCompressBound() { + assertThat(zstd.compressBound(0), Matchers.greaterThanOrEqualTo(1)); + assertThat(zstd.compressBound(100), Matchers.greaterThanOrEqualTo(100)); + expectThrows(IllegalArgumentException.class, () -> zstd.compressBound(Integer.MAX_VALUE)); + expectThrows(IllegalArgumentException.class, () -> zstd.compressBound(-1)); + expectThrows(IllegalArgumentException.class, () -> zstd.compressBound(-100)); + expectThrows(IllegalArgumentException.class, () -> zstd.compressBound(Integer.MIN_VALUE)); + } + + public void testCompressValidation() { + try (var src = nativeAccess.newBuffer(1000); var dst = nativeAccess.newBuffer(500)) { + var srcBuf = src.buffer(); + var dstBuf = dst.buffer(); + + var npe1 = expectThrows(NullPointerException.class, () -> zstd.compress(null, srcBuf, 0)); + assertThat(npe1.getMessage(), equalTo("Null destination buffer")); + var npe2 = expectThrows(NullPointerException.class, () -> zstd.compress(dstBuf, null, 0)); + assertThat(npe2.getMessage(), equalTo("Null source buffer")); + + // dst capacity too low + for (int i = 0; i < srcBuf.remaining(); ++i) { + srcBuf.put(i, randomByte()); + } + var e = expectThrows(IllegalArgumentException.class, () -> zstd.compress(dstBuf, srcBuf, 0)); + assertThat(e.getMessage(), equalTo("Destination buffer is too small")); + } + } + + public void testDecompressValidation() { + try ( + var original = nativeAccess.newBuffer(1000); + var compressed = nativeAccess.newBuffer(500); + var restored = nativeAccess.newBuffer(500) + ) { + var originalBuf = original.buffer(); + var compressedBuf = compressed.buffer(); + + var npe1 = expectThrows(NullPointerException.class, () -> zstd.decompress(null, originalBuf)); + assertThat(npe1.getMessage(), equalTo("Null destination buffer")); + var npe2 = expectThrows(NullPointerException.class, () -> zstd.decompress(compressedBuf, null)); + assertThat(npe2.getMessage(), equalTo("Null source buffer")); + + // Invalid compressed format + for (int i = 0; i < originalBuf.remaining(); ++i) { + originalBuf.put(i, (byte) i); + } + var e = expectThrows(IllegalArgumentException.class, () -> zstd.decompress(compressedBuf, originalBuf)); + assertThat(e.getMessage(), equalTo("Unknown frame descriptor")); + + int compressedLength = zstd.compress(compressedBuf, originalBuf, 0); + compressedBuf.limit(compressedLength); + e = expectThrows(IllegalArgumentException.class, () -> zstd.decompress(restored.buffer(), compressedBuf)); + assertThat(e.getMessage(), equalTo("Destination buffer is too small")); + + } + } + + public void testOneByte() { + doTestRoundtrip(new byte[] { 'z' }); + } + + public void testConstant() { + byte[] b = new byte[randomIntBetween(100, 1000)]; + Arrays.fill(b, randomByte()); + doTestRoundtrip(b); + } + + public void testCycle() { + byte[] b = new byte[randomIntBetween(100, 1000)]; + for (int i = 0; i < b.length; ++i) { + b[i] = (byte) (i & 0x0F); + } + doTestRoundtrip(b); + } + + private void doTestRoundtrip(byte[] data) { + try ( + var original = nativeAccess.newBuffer(data.length); + var compressed = nativeAccess.newBuffer(zstd.compressBound(data.length)); + var restored = nativeAccess.newBuffer(data.length) + ) { + original.buffer().put(0, data); + int compressedLength = zstd.compress(compressed.buffer(), original.buffer(), randomIntBetween(-3, 9)); + compressed.buffer().limit(compressedLength); + int decompressedLength = zstd.decompress(restored.buffer(), compressed.buffer()); + assertThat(restored.buffer(), equalTo(original.buffer())); + assertThat(decompressedLength, equalTo(data.length)); + } + + // Now with non-zero offsets + final int compressedOffset = randomIntBetween(1, 1000); + final int decompressedOffset = randomIntBetween(1, 1000); + try ( + var original = nativeAccess.newBuffer(decompressedOffset + data.length); + var compressed = nativeAccess.newBuffer(compressedOffset + zstd.compressBound(data.length)); + var restored = nativeAccess.newBuffer(decompressedOffset + data.length) + ) { + original.buffer().put(decompressedOffset, data); + original.buffer().position(decompressedOffset); + compressed.buffer().position(compressedOffset); + int compressedLength = zstd.compress(compressed.buffer(), original.buffer(), randomIntBetween(-3, 9)); + compressed.buffer().limit(compressedOffset + compressedLength); + restored.buffer().position(decompressedOffset); + int decompressedLength = zstd.decompress(restored.buffer(), compressed.buffer()); + assertThat( + restored.buffer().slice(decompressedOffset, data.length), + equalTo(original.buffer().slice(decompressedOffset, data.length)) + ); + assertThat(decompressedLength, equalTo(data.length)); + } + } +} diff --git a/settings.gradle b/settings.gradle index c183971bc12ca..97cce0a476d99 100644 --- a/settings.gradle +++ b/settings.gradle @@ -154,6 +154,7 @@ project(":libs").children.each { libsProject -> lp.name = lp.name // for :libs:elasticsearch-x-content:impl } } +project(":libs:elasticsearch-native:libraries").name = "elasticsearch-native-libraries" project(":qa:stable-api").children.each { libsProject -> libsProject.name = "elasticsearch-${libsProject.name}" From 1a0de35cb170df71c5026a35caaed733ccb41de5 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Wed, 13 Mar 2024 10:29:43 -0700 Subject: [PATCH 187/248] Support filling DoubleArray with stream input (#106305) Similar to #106217, we can't use DoubleArray#readFrom(StreamInput in) in ES|QL because the returned big array is not tracked with the circuit breaker. This PR adds an alternative method, where we create a big array first, then fill it with bytes from a stream input. Relates #106217 --- .../elasticsearch/common/util/BigArrays.java | 6 +++++ .../common/util/BigDoubleArray.java | 7 ++++++ .../common/util/DoubleArray.java | 5 ++++ .../common/util/ReleasableDoubleArray.java | 5 ++++ .../common/util/BigArraysTests.java | 25 +++++++++++++++++++ .../common/util/MockBigArrays.java | 5 ++++ 6 files changed, 53 insertions(+) diff --git a/server/src/main/java/org/elasticsearch/common/util/BigArrays.java b/server/src/main/java/org/elasticsearch/common/util/BigArrays.java index 36451932edc1a..c63b7cac75634 100644 --- a/server/src/main/java/org/elasticsearch/common/util/BigArrays.java +++ b/server/src/main/java/org/elasticsearch/common/util/BigArrays.java @@ -350,6 +350,12 @@ public void fill(long fromIndex, long toIndex, double value) { BigDoubleArray.fill(array, (int) fromIndex, (int) toIndex, value); } + @Override + public void fillWith(StreamInput in) throws IOException { + int numBytes = in.readVInt(); + in.readBytes(array, 0, numBytes); + } + @Override public void set(long index, byte[] buf, int offset, int len) { assert index >= 0 && index < size(); diff --git a/server/src/main/java/org/elasticsearch/common/util/BigDoubleArray.java b/server/src/main/java/org/elasticsearch/common/util/BigDoubleArray.java index 8fe6fd80ccd01..041852cf08560 100644 --- a/server/src/main/java/org/elasticsearch/common/util/BigDoubleArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/BigDoubleArray.java @@ -10,6 +10,7 @@ import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; @@ -18,6 +19,7 @@ import java.nio.ByteOrder; import java.util.Arrays; +import static org.elasticsearch.common.util.BigLongArray.readPages; import static org.elasticsearch.common.util.BigLongArray.writePages; import static org.elasticsearch.common.util.PageCacheRecycler.DOUBLE_PAGE_SIZE; @@ -117,6 +119,11 @@ public static void fill(byte[] page, int from, int to, double value) { } } + @Override + public void fillWith(StreamInput in) throws IOException { + readPages(in, pages); + } + /** Estimates the number of bytes that would be consumed by an array of the given size. */ public static long estimateRamBytes(final long size) { return ESTIMATOR.ramBytesEstimated(size); diff --git a/server/src/main/java/org/elasticsearch/common/util/DoubleArray.java b/server/src/main/java/org/elasticsearch/common/util/DoubleArray.java index 932db91bcf481..dde1157c905c7 100644 --- a/server/src/main/java/org/elasticsearch/common/util/DoubleArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/DoubleArray.java @@ -42,6 +42,11 @@ static DoubleArray readFrom(StreamInput in) throws IOException { */ void fill(long fromIndex, long toIndex, double value); + /** + * Alternative of {@link DoubleArray#readFrom(StreamInput)} where the written bytes are loaded into an existing {@link DoubleArray} + */ + void fillWith(StreamInput in) throws IOException; + /** * Bulk set. */ diff --git a/server/src/main/java/org/elasticsearch/common/util/ReleasableDoubleArray.java b/server/src/main/java/org/elasticsearch/common/util/ReleasableDoubleArray.java index ecee36189950b..61b2f52ee384e 100644 --- a/server/src/main/java/org/elasticsearch/common/util/ReleasableDoubleArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/ReleasableDoubleArray.java @@ -58,6 +58,11 @@ public void fill(long fromIndex, long toIndex, double value) { throw new UnsupportedOperationException(); } + @Override + public void fillWith(StreamInput in) throws IOException { + throw new UnsupportedOperationException(); + } + @Override public void set(long index, byte[] buf, int offset, int len) { throw new UnsupportedOperationException(); diff --git a/server/src/test/java/org/elasticsearch/common/util/BigArraysTests.java b/server/src/test/java/org/elasticsearch/common/util/BigArraysTests.java index 3434d8993aecb..13bbd50ce1409 100644 --- a/server/src/test/java/org/elasticsearch/common/util/BigArraysTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/BigArraysTests.java @@ -246,6 +246,31 @@ public void testLongArrayFill() { array2.close(); } + public void testSerializeDoubleArray() throws Exception { + int len = randomIntBetween(1, 100_000); + DoubleArray array1 = bigArrays.newDoubleArray(len, randomBoolean()); + for (int i = 0; i < len; ++i) { + array1.set(i, randomDouble()); + } + if (randomBoolean()) { + len = randomIntBetween(len, len * 3 / 2); + array1 = bigArrays.resize(array1, len); + } + BytesStreamOutput out = new BytesStreamOutput(); + array1.writeTo(out); + final DoubleArray array2 = bigArrays.newDoubleArray(len, randomBoolean()); + array2.fillWith(out.bytes().streamInput()); + for (int i = 0; i < len; i++) { + assertThat(array2.get(i), equalTo(array1.get(i))); + } + final DoubleArray array3 = DoubleArray.readFrom(out.bytes().streamInput()); + assertThat(array3.size(), equalTo((long) len)); + for (int i = 0; i < len; i++) { + assertThat(array3.get(i), equalTo(array1.get(i))); + } + Releasables.close(array1, array2, array3); + } + public void testSerializeLongArray() throws Exception { int len = randomIntBetween(1, 100_000); LongArray array1 = bigArrays.newLongArray(len, randomBoolean()); diff --git a/test/framework/src/main/java/org/elasticsearch/common/util/MockBigArrays.java b/test/framework/src/main/java/org/elasticsearch/common/util/MockBigArrays.java index 8ed9073d45625..10339ba6170c8 100644 --- a/test/framework/src/main/java/org/elasticsearch/common/util/MockBigArrays.java +++ b/test/framework/src/main/java/org/elasticsearch/common/util/MockBigArrays.java @@ -630,6 +630,11 @@ public void fill(long fromIndex, long toIndex, double value) { in.fill(fromIndex, toIndex, value); } + @Override + public void fillWith(StreamInput streamInput) throws IOException { + in.fillWith(streamInput); + } + @Override public void set(long index, byte[] buf, int offset, int len) { in.set(index, buf, offset, len); From d806d5a65c25f655f4edf18168673f8c714f1ac8 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Wed, 13 Mar 2024 10:30:21 -0700 Subject: [PATCH 188/248] Support filling IntArray with stream input (#106302) Similar to #106217, we can't use IntArray#readFrom(StreamInput in) in ES|QL because the returned big array is not tracked with the circuit breaker. This PR adds an alternative method, where we create a big array first, then fill it with bytes from a stream input. Relates #106217 --- .../elasticsearch/common/util/BigArrays.java | 6 +++++ .../common/util/BigIntArray.java | 7 ++++++ .../elasticsearch/common/util/IntArray.java | 5 ++++ .../common/util/ReleasableIntArray.java | 5 ++++ .../common/util/BigArraysTests.java | 25 +++++++++++++++++++ .../common/util/MockBigArrays.java | 5 ++++ 6 files changed, 53 insertions(+) diff --git a/server/src/main/java/org/elasticsearch/common/util/BigArrays.java b/server/src/main/java/org/elasticsearch/common/util/BigArrays.java index c63b7cac75634..feb5109422f5a 100644 --- a/server/src/main/java/org/elasticsearch/common/util/BigArrays.java +++ b/server/src/main/java/org/elasticsearch/common/util/BigArrays.java @@ -235,6 +235,12 @@ public void fill(long fromIndex, long toIndex, int value) { BigIntArray.fill(array, (int) fromIndex, (int) toIndex, value); } + @Override + public void fillWith(StreamInput in) throws IOException { + final int numBytes = in.readVInt(); + in.readBytes(array, 0, numBytes); + } + @Override public void set(long index, byte[] buf, int offset, int len) { assert index >= 0 && index < size(); diff --git a/server/src/main/java/org/elasticsearch/common/util/BigIntArray.java b/server/src/main/java/org/elasticsearch/common/util/BigIntArray.java index b40574ccb9af8..f12293ab41ae8 100644 --- a/server/src/main/java/org/elasticsearch/common/util/BigIntArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/BigIntArray.java @@ -10,6 +10,7 @@ import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; @@ -18,6 +19,7 @@ import java.nio.ByteOrder; import java.util.Arrays; +import static org.elasticsearch.common.util.BigLongArray.readPages; import static org.elasticsearch.common.util.BigLongArray.writePages; import static org.elasticsearch.common.util.PageCacheRecycler.INT_PAGE_SIZE; @@ -92,6 +94,11 @@ public void fill(long fromIndex, long toIndex, int value) { } } + @Override + public void fillWith(StreamInput in) throws IOException { + readPages(in, pages); + } + public static void fill(byte[] page, int from, int to, int value) { if (from < to) { VH_PLATFORM_NATIVE_INT.set(page, from << 2, value); diff --git a/server/src/main/java/org/elasticsearch/common/util/IntArray.java b/server/src/main/java/org/elasticsearch/common/util/IntArray.java index 89c6a12fad030..06975ffba46da 100644 --- a/server/src/main/java/org/elasticsearch/common/util/IntArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/IntArray.java @@ -41,6 +41,11 @@ static IntArray readFrom(StreamInput in) throws IOException { */ void fill(long fromIndex, long toIndex, int value); + /** + * Alternative of {@link IntArray#readFrom(StreamInput)} where the written bytes are loaded into an existing {@link IntArray} + */ + void fillWith(StreamInput in) throws IOException; + /** * Bulk set. */ diff --git a/server/src/main/java/org/elasticsearch/common/util/ReleasableIntArray.java b/server/src/main/java/org/elasticsearch/common/util/ReleasableIntArray.java index 9dbe6bc0771ef..2b433f6812a87 100644 --- a/server/src/main/java/org/elasticsearch/common/util/ReleasableIntArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/ReleasableIntArray.java @@ -58,6 +58,11 @@ public void fill(long fromIndex, long toIndex, int value) { throw new UnsupportedOperationException(); } + @Override + public void fillWith(StreamInput in) throws IOException { + throw new UnsupportedOperationException(); + } + @Override public void set(long index, byte[] buf, int offset, int len) { throw new UnsupportedOperationException(); diff --git a/server/src/test/java/org/elasticsearch/common/util/BigArraysTests.java b/server/src/test/java/org/elasticsearch/common/util/BigArraysTests.java index 13bbd50ce1409..7b759975cfaaa 100644 --- a/server/src/test/java/org/elasticsearch/common/util/BigArraysTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/BigArraysTests.java @@ -296,6 +296,31 @@ public void testSerializeLongArray() throws Exception { Releasables.close(array1, array2, array3); } + public void testSerializeIntArray() throws Exception { + int len = randomIntBetween(1, 100_000); + IntArray array1 = bigArrays.newIntArray(len, randomBoolean()); + for (int i = 0; i < len; ++i) { + array1.set(i, randomInt()); + } + if (randomBoolean()) { + len = randomIntBetween(len, len * 3 / 2); + array1 = bigArrays.resize(array1, len); + } + BytesStreamOutput out = new BytesStreamOutput(); + array1.writeTo(out); + final IntArray array2 = bigArrays.newIntArray(len, randomBoolean()); + array2.fillWith(out.bytes().streamInput()); + for (int i = 0; i < len; i++) { + assertThat(array2.get(i), equalTo(array1.get(i))); + } + final IntArray array3 = IntArray.readFrom(out.bytes().streamInput()); + assertThat(array3.size(), equalTo((long) len)); + for (int i = 0; i < len; i++) { + assertThat(array3.get(i), equalTo(array1.get(i))); + } + Releasables.close(array1, array2, array3); + } + public void testByteArrayBulkGet() { final byte[] array1 = new byte[randomIntBetween(1, 4000000)]; random().nextBytes(array1); diff --git a/test/framework/src/main/java/org/elasticsearch/common/util/MockBigArrays.java b/test/framework/src/main/java/org/elasticsearch/common/util/MockBigArrays.java index 10339ba6170c8..481ca207a71cf 100644 --- a/test/framework/src/main/java/org/elasticsearch/common/util/MockBigArrays.java +++ b/test/framework/src/main/java/org/elasticsearch/common/util/MockBigArrays.java @@ -475,6 +475,11 @@ public void fill(long fromIndex, long toIndex, int value) { in.fill(fromIndex, toIndex, value); } + @Override + public void fillWith(StreamInput streamInput) throws IOException { + in.fillWith(streamInput); + } + @Override public void set(long index, byte[] buf, int offset, int len) { in.set(index, buf, offset, len); From fffc39467c242e528392f42253cde05ffe6ff32f Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Wed, 13 Mar 2024 11:24:33 -0700 Subject: [PATCH 189/248] Fix potential leak in ProjectOperator (#106320) While writing a timeseries tests, I found that we leaked pages in ProjectionOperator. The reason is that the projection map was not correct. However, ProjectionOperator should not leak pages. --- .../compute/operator/ProjectOperator.java | 40 +++++++++++-------- 1 file changed, 24 insertions(+), 16 deletions(-) diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/ProjectOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/ProjectOperator.java index d318639625034..9b4d9d8f11a31 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/ProjectOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/ProjectOperator.java @@ -9,6 +9,7 @@ import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.Page; +import org.elasticsearch.core.Releasables; import java.util.Arrays; import java.util.List; @@ -50,24 +51,31 @@ protected Page process(Page page) { if (blockCount == 0) { return page; } - int b = 0; - for (int source : projection) { - if (source >= blockCount) { - throw new IllegalArgumentException( - "Cannot project block with index [" + source + "] from a page with size [" + blockCount + "]" - ); + Page output = null; + try { + int b = 0; + for (int source : projection) { + if (source >= blockCount) { + throw new IllegalArgumentException( + "Cannot project block with index [" + source + "] from a page with size [" + blockCount + "]" + ); + } + var block = page.getBlock(source); + blocks[b++] = block; + block.incRef(); } - var block = page.getBlock(source); - blocks[b++] = block; - block.incRef(); + int positionCount = page.getPositionCount(); + // Use positionCount explicitly to avoid re-computing - also, if the projection is empty, there may be + // no more blocks left to determine the positionCount from. + output = new Page(positionCount, blocks); + return output; + } finally { + if (output == null) { + Releasables.close(blocks); + } + Arrays.fill(blocks, null); + page.releaseBlocks(); } - int positionCount = page.getPositionCount(); - page.releaseBlocks(); - // Use positionCount explicitly to avoid re-computing - also, if the projection is empty, there may be - // no more blocks left to determine the positionCount from. - Page output = new Page(positionCount, blocks); - Arrays.fill(blocks, null); - return output; } @Override From de33a57f55297d5d898f4a459b593331226a1849 Mon Sep 17 00:00:00 2001 From: Jonathan Buttner <56361221+jonathan-buttner@users.noreply.github.com> Date: Wed, 13 Mar 2024 14:27:49 -0400 Subject: [PATCH 190/248] Allowing byte and int8 (#106299) --- ...xt.java => ConfigurationParseContext.java} | 6 +- .../services/cohere/CohereService.java | 17 +++-- .../cohere/CohereServiceSettings.java | 5 +- .../embeddings/CohereEmbeddingsModel.java | 5 +- .../CohereEmbeddingsServiceSettings.java | 30 ++++++++- .../services/openai/OpenAiService.java | 7 ++- .../embeddings/OpenAiEmbeddingsModel.java | 4 +- .../OpenAiEmbeddingsServiceSettings.java | 4 +- .../OpenAiEmbeddingsTaskSettings.java | 4 +- .../cohere/CohereServiceSettingsTests.java | 18 +++--- .../CohereEmbeddingsServiceSettingsTests.java | 63 ++++++++++++++++--- .../OpenAiEmbeddingsServiceSettingsTests.java | 24 +++---- .../OpenAiEmbeddingsTaskSettingsTests.java | 12 ++-- 13 files changed, 145 insertions(+), 54 deletions(-) rename x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/{openai/OpenAiParseContext.java => ConfigurationParseContext.java} (65%) diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiParseContext.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ConfigurationParseContext.java similarity index 65% rename from x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiParseContext.java rename to x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ConfigurationParseContext.java index 5730db30ba66e..e774c137c9e04 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiParseContext.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ConfigurationParseContext.java @@ -5,13 +5,13 @@ * 2.0. */ -package org.elasticsearch.xpack.inference.services.openai; +package org.elasticsearch.xpack.inference.services; -public enum OpenAiParseContext { +public enum ConfigurationParseContext { REQUEST, PERSISTENT; - public static boolean isRequestContext(OpenAiParseContext context) { + public static boolean isRequestContext(ConfigurationParseContext context) { return context == REQUEST; } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/CohereService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/CohereService.java index 4f476e60ee2db..2bd88f6f01eb4 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/CohereService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/CohereService.java @@ -24,6 +24,7 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xpack.inference.external.action.cohere.CohereActionCreator; import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; +import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; import org.elasticsearch.xpack.inference.services.SenderService; import org.elasticsearch.xpack.inference.services.ServiceComponents; import org.elasticsearch.xpack.inference.services.ServiceUtils; @@ -71,7 +72,7 @@ public void parseRequestConfig( taskSettingsMap, serviceSettingsMap, TaskType.unsupportedTaskTypeErrorMsg(taskType, NAME), - true + ConfigurationParseContext.REQUEST ); throwIfNotEmptyMap(config, NAME); @@ -92,7 +93,15 @@ private static CohereModel createModelWithoutLoggingDeprecations( @Nullable Map secretSettings, String failureMessage ) { - return createModel(inferenceEntityId, taskType, serviceSettings, taskSettings, secretSettings, failureMessage, false); + return createModel( + inferenceEntityId, + taskType, + serviceSettings, + taskSettings, + secretSettings, + failureMessage, + ConfigurationParseContext.PERSISTENT + ); } private static CohereModel createModel( @@ -102,7 +111,7 @@ private static CohereModel createModel( Map taskSettings, @Nullable Map secretSettings, String failureMessage, - boolean logDeprecations + ConfigurationParseContext context ) { return switch (taskType) { case TEXT_EMBEDDING -> new CohereEmbeddingsModel( @@ -112,7 +121,7 @@ private static CohereModel createModel( serviceSettings, taskSettings, secretSettings, - logDeprecations + context ); default -> throw new ElasticsearchStatusException(failureMessage, RestStatus.BAD_REQUEST); }; diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceSettings.java index 97ad1b575caa9..8ea271cdb64a5 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceSettings.java @@ -20,6 +20,7 @@ import org.elasticsearch.inference.SimilarityMeasure; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; import java.io.IOException; import java.net.URI; @@ -43,7 +44,7 @@ public class CohereServiceSettings implements ServiceSettings { public static final String OLD_MODEL_ID_FIELD = "model"; public static final String MODEL_ID = "model_id"; - public static CohereServiceSettings fromMap(Map map, boolean logDeprecations) { + public static CohereServiceSettings fromMap(Map map, ConfigurationParseContext context) { ValidationException validationException = new ValidationException(); String url = extractOptionalString(map, URL, ModelConfigurations.SERVICE_SETTINGS, validationException); @@ -56,7 +57,7 @@ public static CohereServiceSettings fromMap(Map map, boolean log String modelId = extractOptionalString(map, MODEL_ID, ModelConfigurations.SERVICE_SETTINGS, validationException); - if (logDeprecations && oldModelId != null) { + if (context == ConfigurationParseContext.REQUEST && oldModelId != null) { logger.info("The cohere [service_settings.model] field is deprecated. Please use [service_settings.model_id] instead."); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/embeddings/CohereEmbeddingsModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/embeddings/CohereEmbeddingsModel.java index ddd3b71ef4538..be25361724c1b 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/embeddings/CohereEmbeddingsModel.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/embeddings/CohereEmbeddingsModel.java @@ -14,6 +14,7 @@ import org.elasticsearch.inference.TaskType; import org.elasticsearch.xpack.inference.external.action.ExecutableAction; import org.elasticsearch.xpack.inference.external.action.cohere.CohereActionVisitor; +import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; import org.elasticsearch.xpack.inference.services.cohere.CohereModel; import org.elasticsearch.xpack.inference.services.settings.DefaultSecretSettings; @@ -32,13 +33,13 @@ public CohereEmbeddingsModel( Map serviceSettings, Map taskSettings, @Nullable Map secrets, - boolean logDeprecations + ConfigurationParseContext context ) { this( modelId, taskType, service, - CohereEmbeddingsServiceSettings.fromMap(serviceSettings, logDeprecations), + CohereEmbeddingsServiceSettings.fromMap(serviceSettings, context), CohereEmbeddingsTaskSettings.fromMap(taskSettings), DefaultSecretSettings.fromMap(secrets) ); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/embeddings/CohereEmbeddingsServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/embeddings/CohereEmbeddingsServiceSettings.java index b31802f4a7ccb..a8ae8aa8d7fdd 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/embeddings/CohereEmbeddingsServiceSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/embeddings/CohereEmbeddingsServiceSettings.java @@ -17,23 +17,29 @@ import org.elasticsearch.inference.ServiceSettings; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; import org.elasticsearch.xpack.inference.services.cohere.CohereServiceSettings; import java.io.IOException; import java.util.EnumSet; +import java.util.Locale; import java.util.Map; import java.util.Objects; import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalEnum; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractRequiredString; public class CohereEmbeddingsServiceSettings implements ServiceSettings { public static final String NAME = "cohere_embeddings_service_settings"; static final String EMBEDDING_TYPE = "embedding_type"; + static final String EMBEDDING_TYPE_BYTE = "byte"; - public static CohereEmbeddingsServiceSettings fromMap(Map map, boolean logDeprecations) { + public static CohereEmbeddingsServiceSettings fromMap(Map map, ConfigurationParseContext context) { ValidationException validationException = new ValidationException(); - var commonServiceSettings = CohereServiceSettings.fromMap(map, logDeprecations); + var commonServiceSettings = CohereServiceSettings.fromMap(map, context); + translateEmbeddingType(map, context); + CohereEmbeddingType embeddingTypes = extractOptionalEnum( map, EMBEDDING_TYPE, @@ -50,6 +56,26 @@ public static CohereEmbeddingsServiceSettings fromMap(Map map, b return new CohereEmbeddingsServiceSettings(commonServiceSettings, embeddingTypes); } + private static void translateEmbeddingType(Map map, ConfigurationParseContext context) { + if (ConfigurationParseContext.isRequestContext(context) == false || map.containsKey(EMBEDDING_TYPE) == false) { + return; + } + + ValidationException validationException = new ValidationException(); + + String embeddingType = extractRequiredString(map, EMBEDDING_TYPE, ModelConfigurations.SERVICE_SETTINGS, validationException); + if (validationException.validationErrors().isEmpty() == false) { + throw validationException; + } + + assert embeddingType != null; + if (embeddingType.toLowerCase(Locale.ROOT).equals(EMBEDDING_TYPE_BYTE)) { + map.put(EMBEDDING_TYPE, CohereEmbeddingType.INT8.toString()); + } else { + map.put(EMBEDDING_TYPE, embeddingType); + } + } + private final CohereServiceSettings commonSettings; private final CohereEmbeddingType embeddingType; diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiService.java index 234328de67efe..130928b17ff8d 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiService.java @@ -25,6 +25,7 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xpack.inference.external.action.openai.OpenAiActionCreator; import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; +import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; import org.elasticsearch.xpack.inference.services.SenderService; import org.elasticsearch.xpack.inference.services.ServiceComponents; import org.elasticsearch.xpack.inference.services.ServiceUtils; @@ -75,7 +76,7 @@ public void parseRequestConfig( taskSettingsMap, serviceSettingsMap, TaskType.unsupportedTaskTypeErrorMsg(taskType, NAME), - OpenAiParseContext.REQUEST + ConfigurationParseContext.REQUEST ); throwIfNotEmptyMap(config, NAME); @@ -103,7 +104,7 @@ private static OpenAiModel createModelFromPersistent( taskSettings, secretSettings, failureMessage, - OpenAiParseContext.PERSISTENT + ConfigurationParseContext.PERSISTENT ); } @@ -114,7 +115,7 @@ private static OpenAiModel createModel( Map taskSettings, @Nullable Map secretSettings, String failureMessage, - OpenAiParseContext context + ConfigurationParseContext context ) { return switch (taskType) { case TEXT_EMBEDDING -> new OpenAiEmbeddingsModel( diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsModel.java index fd87f5112533f..e822fa069598f 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsModel.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsModel.java @@ -13,8 +13,8 @@ import org.elasticsearch.inference.TaskType; import org.elasticsearch.xpack.inference.external.action.ExecutableAction; import org.elasticsearch.xpack.inference.external.action.openai.OpenAiActionVisitor; +import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; import org.elasticsearch.xpack.inference.services.openai.OpenAiModel; -import org.elasticsearch.xpack.inference.services.openai.OpenAiParseContext; import org.elasticsearch.xpack.inference.services.settings.DefaultSecretSettings; import java.util.Map; @@ -37,7 +37,7 @@ public OpenAiEmbeddingsModel( Map serviceSettings, Map taskSettings, @Nullable Map secrets, - OpenAiParseContext context + ConfigurationParseContext context ) { this( inferenceEntityId, diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsServiceSettings.java index 468e82d4f0866..34713ff2b7208 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsServiceSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsServiceSettings.java @@ -18,8 +18,8 @@ import org.elasticsearch.inference.SimilarityMeasure; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; import org.elasticsearch.xpack.inference.services.ServiceUtils; -import org.elasticsearch.xpack.inference.services.openai.OpenAiParseContext; import java.io.IOException; import java.net.URI; @@ -48,7 +48,7 @@ public class OpenAiEmbeddingsServiceSettings implements ServiceSettings { static final String ORGANIZATION = "organization_id"; static final String DIMENSIONS_SET_BY_USER = "dimensions_set_by_user"; - public static OpenAiEmbeddingsServiceSettings fromMap(Map map, OpenAiParseContext context) { + public static OpenAiEmbeddingsServiceSettings fromMap(Map map, ConfigurationParseContext context) { return switch (context) { case REQUEST -> fromRequestMap(map); case PERSISTENT -> fromPersistentMap(map); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsTaskSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsTaskSettings.java index 1a202e8ca8249..a7b11487ca72f 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsTaskSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsTaskSettings.java @@ -16,7 +16,7 @@ import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.TaskSettings; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.inference.services.openai.OpenAiParseContext; +import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; import java.io.IOException; import java.util.Map; @@ -35,7 +35,7 @@ public class OpenAiEmbeddingsTaskSettings implements TaskSettings { public static final String NAME = "openai_embeddings_task_settings"; public static final String USER = "user"; - public static OpenAiEmbeddingsTaskSettings fromMap(Map map, OpenAiParseContext context) { + public static OpenAiEmbeddingsTaskSettings fromMap(Map map, ConfigurationParseContext context) { ValidationException validationException = new ValidationException(); String user = extractOptionalString(map, USER, ModelConfigurations.TASK_SETTINGS, validationException); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceSettingsTests.java index bbee8aa1de577..b5ea720490b5a 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceSettingsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceSettingsTests.java @@ -16,6 +16,7 @@ import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; import org.elasticsearch.xpack.inference.services.ServiceFields; import org.elasticsearch.xpack.inference.services.ServiceUtils; import org.hamcrest.CoreMatchers; @@ -77,7 +78,7 @@ public void testFromMap() { model ) ), - true + ConfigurationParseContext.REQUEST ); MatcherAssert.assertThat( @@ -107,7 +108,7 @@ public void testFromMap_WhenUsingModelId() { model ) ), - false + ConfigurationParseContext.PERSISTENT ); MatcherAssert.assertThat( @@ -139,7 +140,7 @@ public void testFromMap_PrefersModelId_OverModel() { model ) ), - false + ConfigurationParseContext.PERSISTENT ); MatcherAssert.assertThat( @@ -149,14 +150,14 @@ public void testFromMap_PrefersModelId_OverModel() { } public void testFromMap_MissingUrl_DoesNotThrowException() { - var serviceSettings = CohereServiceSettings.fromMap(new HashMap<>(Map.of()), false); + var serviceSettings = CohereServiceSettings.fromMap(new HashMap<>(Map.of()), ConfigurationParseContext.PERSISTENT); assertNull(serviceSettings.getUri()); } public void testFromMap_EmptyUrl_ThrowsError() { var thrownException = expectThrows( ValidationException.class, - () -> CohereServiceSettings.fromMap(new HashMap<>(Map.of(ServiceFields.URL, "")), false) + () -> CohereServiceSettings.fromMap(new HashMap<>(Map.of(ServiceFields.URL, "")), ConfigurationParseContext.PERSISTENT) ); MatcherAssert.assertThat( @@ -174,7 +175,7 @@ public void testFromMap_InvalidUrl_ThrowsError() { var url = "https://www.abc^.com"; var thrownException = expectThrows( ValidationException.class, - () -> CohereServiceSettings.fromMap(new HashMap<>(Map.of(ServiceFields.URL, url)), false) + () -> CohereServiceSettings.fromMap(new HashMap<>(Map.of(ServiceFields.URL, url)), ConfigurationParseContext.PERSISTENT) ); MatcherAssert.assertThat( @@ -187,7 +188,10 @@ public void testFromMap_InvalidSimilarity_ThrowsError() { var similarity = "by_size"; var thrownException = expectThrows( ValidationException.class, - () -> CohereServiceSettings.fromMap(new HashMap<>(Map.of(ServiceFields.SIMILARITY, similarity)), false) + () -> CohereServiceSettings.fromMap( + new HashMap<>(Map.of(ServiceFields.SIMILARITY, similarity)), + ConfigurationParseContext.PERSISTENT + ) ); MatcherAssert.assertThat( diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/embeddings/CohereEmbeddingsServiceSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/embeddings/CohereEmbeddingsServiceSettingsTests.java index 2f5eba676a314..41906cca15fe9 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/embeddings/CohereEmbeddingsServiceSettingsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/embeddings/CohereEmbeddingsServiceSettingsTests.java @@ -17,6 +17,7 @@ import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xpack.core.ml.inference.MlInferenceNamedXContentProvider; import org.elasticsearch.xpack.inference.InferenceNamedWriteablesProvider; +import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; import org.elasticsearch.xpack.inference.services.ServiceFields; import org.elasticsearch.xpack.inference.services.ServiceUtils; import org.elasticsearch.xpack.inference.services.cohere.CohereServiceSettings; @@ -24,6 +25,7 @@ import org.hamcrest.MatcherAssert; import java.io.IOException; +import java.net.URI; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -63,7 +65,7 @@ public void testFromMap() { CohereEmbeddingType.INT8.toString() ) ), - false + ConfigurationParseContext.PERSISTENT ); MatcherAssert.assertThat( @@ -100,7 +102,7 @@ public void testFromMap_WithModelId() { CohereEmbeddingType.INT8.toString() ) ), - false + ConfigurationParseContext.PERSISTENT ); MatcherAssert.assertThat( @@ -139,7 +141,7 @@ public void testFromMap_PrefersModelId_OverModel() { CohereEmbeddingType.INT8.toString() ) ), - false + ConfigurationParseContext.PERSISTENT ); MatcherAssert.assertThat( @@ -154,14 +156,17 @@ public void testFromMap_PrefersModelId_OverModel() { } public void testFromMap_MissingEmbeddingType_DoesNotThrowException() { - var serviceSettings = CohereEmbeddingsServiceSettings.fromMap(new HashMap<>(Map.of()), false); + var serviceSettings = CohereEmbeddingsServiceSettings.fromMap(new HashMap<>(Map.of()), ConfigurationParseContext.PERSISTENT); assertNull(serviceSettings.getEmbeddingType()); } public void testFromMap_EmptyEmbeddingType_ThrowsError() { var thrownException = expectThrows( ValidationException.class, - () -> CohereEmbeddingsServiceSettings.fromMap(new HashMap<>(Map.of(CohereEmbeddingsServiceSettings.EMBEDDING_TYPE, "")), true) + () -> CohereEmbeddingsServiceSettings.fromMap( + new HashMap<>(Map.of(CohereEmbeddingsServiceSettings.EMBEDDING_TYPE, "")), + ConfigurationParseContext.REQUEST + ) ); MatcherAssert.assertThat( @@ -180,7 +185,7 @@ public void testFromMap_InvalidEmbeddingType_ThrowsError() { ValidationException.class, () -> CohereEmbeddingsServiceSettings.fromMap( new HashMap<>(Map.of(CohereEmbeddingsServiceSettings.EMBEDDING_TYPE, "abc")), - false + ConfigurationParseContext.PERSISTENT ) ); @@ -194,12 +199,31 @@ public void testFromMap_InvalidEmbeddingType_ThrowsError() { ); } + public void testFromMap_InvalidEmbeddingType_ThrowsError_WhenByteFromPersistedConfig() { + var thrownException = expectThrows( + ValidationException.class, + () -> CohereEmbeddingsServiceSettings.fromMap( + new HashMap<>(Map.of(CohereEmbeddingsServiceSettings.EMBEDDING_TYPE, CohereEmbeddingsServiceSettings.EMBEDDING_TYPE_BYTE)), + ConfigurationParseContext.PERSISTENT + ) + ); + + MatcherAssert.assertThat( + thrownException.getMessage(), + is( + Strings.format( + "Validation Failed: 1: [service_settings] Invalid value [byte] received. [embedding_type] must be one of [float, int8];" + ) + ) + ); + } + public void testFromMap_ReturnsFailure_WhenEmbeddingTypesAreNotValid() { var exception = expectThrows( ElasticsearchStatusException.class, () -> CohereEmbeddingsServiceSettings.fromMap( new HashMap<>(Map.of(CohereEmbeddingsServiceSettings.EMBEDDING_TYPE, List.of("abc"))), - false + ConfigurationParseContext.PERSISTENT ) ); @@ -209,6 +233,31 @@ public void testFromMap_ReturnsFailure_WhenEmbeddingTypesAreNotValid() { ); } + public void testFromMap_ConvertsCohereEmbeddingType_FromByteToInt8() { + assertThat( + CohereEmbeddingsServiceSettings.fromMap( + new HashMap<>(Map.of(CohereEmbeddingsServiceSettings.EMBEDDING_TYPE, CohereEmbeddingsServiceSettings.EMBEDDING_TYPE_BYTE)), + ConfigurationParseContext.REQUEST + ), + is(new CohereEmbeddingsServiceSettings(new CohereServiceSettings((URI) null, null, null, null, null), CohereEmbeddingType.INT8)) + ); + } + + public void testFromMap_PreservesEmbeddingTypeFloat() { + assertThat( + CohereEmbeddingsServiceSettings.fromMap( + new HashMap<>(Map.of(CohereEmbeddingsServiceSettings.EMBEDDING_TYPE, CohereEmbeddingType.FLOAT.toString())), + ConfigurationParseContext.REQUEST + ), + is( + new CohereEmbeddingsServiceSettings( + new CohereServiceSettings((URI) null, null, null, null, null), + CohereEmbeddingType.FLOAT + ) + ) + ); + } + @Override protected Writeable.Reader instanceReader() { return CohereEmbeddingsServiceSettings::new; diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsServiceSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsServiceSettingsTests.java index 18b5ab44f59ca..00cea6dc6ed21 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsServiceSettingsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsServiceSettingsTests.java @@ -16,9 +16,9 @@ import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; import org.elasticsearch.xpack.inference.services.ServiceFields; import org.elasticsearch.xpack.inference.services.ServiceUtils; -import org.elasticsearch.xpack.inference.services.openai.OpenAiParseContext; import org.hamcrest.CoreMatchers; import java.io.IOException; @@ -89,7 +89,7 @@ public void testFromMap_Request_CreatesSettingsCorrectly() { maxInputTokens ) ), - OpenAiParseContext.REQUEST + ConfigurationParseContext.REQUEST ); assertThat( @@ -129,7 +129,7 @@ public void testFromMap_Request_DimensionsSetByUser_IsFalse_WhenDimensionsAreNot maxInputTokens ) ), - OpenAiParseContext.REQUEST + ConfigurationParseContext.REQUEST ); assertThat( @@ -174,7 +174,7 @@ public void testFromMap_Persistent_CreatesSettingsCorrectly() { false ) ), - OpenAiParseContext.PERSISTENT + ConfigurationParseContext.PERSISTENT ); assertThat( @@ -196,7 +196,7 @@ public void testFromMap_Persistent_CreatesSettingsCorrectly() { public void testFromMap_PersistentContext_DoesNotThrowException_WhenDimensionsIsNull() { var settings = OpenAiEmbeddingsServiceSettings.fromMap( new HashMap<>(Map.of(OpenAiEmbeddingsServiceSettings.DIMENSIONS_SET_BY_USER, true, ServiceFields.MODEL_ID, "m")), - OpenAiParseContext.PERSISTENT + ConfigurationParseContext.PERSISTENT ); assertThat(settings, is(new OpenAiEmbeddingsServiceSettings("m", (URI) null, null, null, null, null, true))); @@ -207,7 +207,7 @@ public void testFromMap_PersistentContext_ThrowsException_WhenDimensionsSetByUse ValidationException.class, () -> OpenAiEmbeddingsServiceSettings.fromMap( new HashMap<>(Map.of(ServiceFields.DIMENSIONS, 1, ServiceFields.MODEL_ID, "m")), - OpenAiParseContext.PERSISTENT + ConfigurationParseContext.PERSISTENT ) ); @@ -220,7 +220,7 @@ public void testFromMap_PersistentContext_ThrowsException_WhenDimensionsSetByUse public void testFromMap_MissingUrl_DoesNotThrowException() { var serviceSettings = OpenAiEmbeddingsServiceSettings.fromMap( new HashMap<>(Map.of(ServiceFields.MODEL_ID, "m", OpenAiEmbeddingsServiceSettings.ORGANIZATION, "org")), - OpenAiParseContext.REQUEST + ConfigurationParseContext.REQUEST ); assertNull(serviceSettings.uri()); assertThat(serviceSettings.modelId(), is("m")); @@ -232,7 +232,7 @@ public void testFromMap_EmptyUrl_ThrowsError() { ValidationException.class, () -> OpenAiEmbeddingsServiceSettings.fromMap( new HashMap<>(Map.of(ServiceFields.URL, "", ServiceFields.MODEL_ID, "m")), - OpenAiParseContext.REQUEST + ConfigurationParseContext.REQUEST ) ); @@ -250,7 +250,7 @@ public void testFromMap_EmptyUrl_ThrowsError() { public void testFromMap_MissingOrganization_DoesNotThrowException() { var serviceSettings = OpenAiEmbeddingsServiceSettings.fromMap( new HashMap<>(Map.of(ServiceFields.MODEL_ID, "m")), - OpenAiParseContext.REQUEST + ConfigurationParseContext.REQUEST ); assertNull(serviceSettings.uri()); assertNull(serviceSettings.organizationId()); @@ -261,7 +261,7 @@ public void testFromMap_EmptyOrganization_ThrowsError() { ValidationException.class, () -> OpenAiEmbeddingsServiceSettings.fromMap( new HashMap<>(Map.of(OpenAiEmbeddingsServiceSettings.ORGANIZATION, "", ServiceFields.MODEL_ID, "m")), - OpenAiParseContext.REQUEST + ConfigurationParseContext.REQUEST ) ); @@ -282,7 +282,7 @@ public void testFromMap_InvalidUrl_ThrowsError() { ValidationException.class, () -> OpenAiEmbeddingsServiceSettings.fromMap( new HashMap<>(Map.of(ServiceFields.URL, url, ServiceFields.MODEL_ID, "m")), - OpenAiParseContext.REQUEST + ConfigurationParseContext.REQUEST ) ); @@ -298,7 +298,7 @@ public void testFromMap_InvalidSimilarity_ThrowsError() { ValidationException.class, () -> OpenAiEmbeddingsServiceSettings.fromMap( new HashMap<>(Map.of(ServiceFields.SIMILARITY, similarity, ServiceFields.MODEL_ID, "m")), - OpenAiParseContext.REQUEST + ConfigurationParseContext.REQUEST ) ); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsTaskSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsTaskSettingsTests.java index 3c91b68a545fd..6448b66d11cf3 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsTaskSettingsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsTaskSettingsTests.java @@ -12,7 +12,7 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.core.Nullable; import org.elasticsearch.test.AbstractWireSerializingTestCase; -import org.elasticsearch.xpack.inference.services.openai.OpenAiParseContext; +import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; import org.hamcrest.MatcherAssert; import java.io.IOException; @@ -40,7 +40,7 @@ public void testFromMap_WithUser() { new OpenAiEmbeddingsTaskSettings("user"), OpenAiEmbeddingsTaskSettings.fromMap( new HashMap<>(Map.of(OpenAiEmbeddingsTaskSettings.USER, "user")), - OpenAiParseContext.REQUEST + ConfigurationParseContext.REQUEST ) ); } @@ -50,7 +50,7 @@ public void testFromMap_UserIsEmptyString() { ValidationException.class, () -> OpenAiEmbeddingsTaskSettings.fromMap( new HashMap<>(Map.of(OpenAiEmbeddingsTaskSettings.USER, "")), - OpenAiParseContext.REQUEST + ConfigurationParseContext.REQUEST ) ); @@ -61,14 +61,14 @@ public void testFromMap_UserIsEmptyString() { } public void testFromMap_MissingUser_DoesNotThrowException() { - var taskSettings = OpenAiEmbeddingsTaskSettings.fromMap(new HashMap<>(Map.of()), OpenAiParseContext.PERSISTENT); + var taskSettings = OpenAiEmbeddingsTaskSettings.fromMap(new HashMap<>(Map.of()), ConfigurationParseContext.PERSISTENT); assertNull(taskSettings.user()); } public void testOverrideWith_KeepsOriginalValuesWithOverridesAreNull() { var taskSettings = OpenAiEmbeddingsTaskSettings.fromMap( new HashMap<>(Map.of(OpenAiEmbeddingsTaskSettings.USER, "user")), - OpenAiParseContext.PERSISTENT + ConfigurationParseContext.PERSISTENT ); var overriddenTaskSettings = OpenAiEmbeddingsTaskSettings.of(taskSettings, OpenAiEmbeddingsRequestTaskSettings.EMPTY_SETTINGS); @@ -78,7 +78,7 @@ public void testOverrideWith_KeepsOriginalValuesWithOverridesAreNull() { public void testOverrideWith_UsesOverriddenSettings() { var taskSettings = OpenAiEmbeddingsTaskSettings.fromMap( new HashMap<>(Map.of(OpenAiEmbeddingsTaskSettings.USER, "user")), - OpenAiParseContext.PERSISTENT + ConfigurationParseContext.PERSISTENT ); var requestTaskSettings = OpenAiEmbeddingsRequestTaskSettings.fromMap( From 3a87d260674c966a75a52bb10f470e6cedf9263c Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Wed, 13 Mar 2024 11:47:03 -0700 Subject: [PATCH 191/248] Fix time series source operator for empty index (#106322) Currently, we can return a LuceneSlice without leaves, which breaks TimeSeriesSortedSourceOperator. --- .../compute/lucene/LuceneOperator.java | 3 -- .../compute/lucene/LuceneSliceQueue.java | 4 +- .../xpack/esql/action/TimeSeriesIT.java | 42 +++++++++++++++++++ 3 files changed, 45 insertions(+), 4 deletions(-) create mode 100644 x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/TimeSeriesIT.java diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneOperator.java index d43eb8c280695..f49111a3275d6 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneOperator.java @@ -88,9 +88,6 @@ LuceneScorer getCurrentOrLoadNextScorer() { doneCollecting = true; return null; } - if (currentSlice.numLeaves() == 0) { - continue; - } processedSlices++; processedShards.add(currentSlice.shardContext().shardIdentifier()); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneSliceQueue.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneSliceQueue.java index d0329174f2839..f367499b12902 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneSliceQueue.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneSliceQueue.java @@ -71,7 +71,9 @@ public static LuceneSliceQueue create( weight.get(); // eagerly build Weight once } for (List group : groups) { - slices.add(new LuceneSlice(ctx, group, weight)); + if (group.isEmpty() == false) { + slices.add(new LuceneSlice(ctx, group, weight)); + } } } return new LuceneSliceQueue(slices); diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/TimeSeriesIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/TimeSeriesIT.java new file mode 100644 index 0000000000000..9aeeb10da2ad9 --- /dev/null +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/TimeSeriesIT.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.esql.plugin.QueryPragmas; + +import java.util.List; + +public class TimeSeriesIT extends AbstractEsqlIntegTestCase { + + @Override + protected EsqlQueryResponse run(EsqlQueryRequest request) { + assertTrue("timseries requires pragmas", canUseQueryPragmas()); + var settings = Settings.builder().put(request.pragmas().getSettings()).put(QueryPragmas.TIME_SERIES_MODE.getKey(), "true").build(); + request.pragmas(new QueryPragmas(settings)); + return super.run(request); + } + + public void testEmpty() { + Settings settings = Settings.builder().put("mode", "time_series").putList("routing_path", List.of("pod")).build(); + client().admin() + .indices() + .prepareCreate("pods") + .setSettings(settings) + .setMapping( + "@timestamp", + "type=date", + "pod", + "type=keyword,time_series_dimension=true", + "cpu", + "type=long,time_series_metric=gauge" + ) + .get(); + run("FROM pods | LIMIT 1").close(); + } +} From 8b14c1b7f038442b6e180dd6acc470e7a7709ddb Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Wed, 13 Mar 2024 11:47:33 -0700 Subject: [PATCH 192/248] Writeable BitArray (#106300) Make the BitArray writable so that it can be serialized directly in the BooleanBigArrayVector/BooleanBigArrayBlock. --- .../elasticsearch/common/util/BitArray.java | 31 +++++++++++++- .../common/util/BitArrayTests.java | 40 +++++++++++++++++-- 2 files changed, 67 insertions(+), 4 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/common/util/BitArray.java b/server/src/main/java/org/elasticsearch/common/util/BitArray.java index 96c00538f07d4..53244a0f2888a 100644 --- a/server/src/main/java/org/elasticsearch/common/util/BitArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/BitArray.java @@ -10,16 +10,21 @@ import org.apache.lucene.util.Accountable; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; +import java.io.IOException; + /** * A bit array that is implemented using a growing {@link LongArray} * created from {@link BigArrays}. * The underlying long array grows lazily based on the biggest index * that needs to be set. */ -public final class BitArray implements Accountable, Releasable { +public final class BitArray implements Accountable, Releasable, Writeable { private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(BitArray.class); @@ -35,6 +40,30 @@ public BitArray(long initialSize, BigArrays bigArrays) { this.bits = bigArrays.newLongArray(wordNum(initialSize) + 1, true); } + /** + * Create a {@link BitArray} using {@link BigArrays} with bytes are written by {@link BitArray#writeTo} + */ + public BitArray(BigArrays bigArrays, boolean readOnly, StreamInput in) throws IOException { + this.bigArrays = bigArrays; + final long numBits = in.readVLong(); + this.bits = bigArrays.newLongArray(wordNum(numBits), readOnly == false); + boolean success = false; + try { + this.bits.fillWith(in); + success = true; + } finally { + if (success == false) { + this.bits.close(); + } + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVLong(size()); + bits.writeTo(out); + } + /** * Set the {@code index}th bit. */ diff --git a/server/src/test/java/org/elasticsearch/common/util/BitArrayTests.java b/server/src/test/java/org/elasticsearch/common/util/BitArrayTests.java index 7d7539f8dd28c..f81a4bd2f4a18 100644 --- a/server/src/test/java/org/elasticsearch/common/util/BitArrayTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/BitArrayTests.java @@ -8,15 +8,19 @@ package org.elasticsearch.common.util; +import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.Releasables; import org.elasticsearch.test.ESTestCase; +import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.List; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.junit.Assume.assumeThat; @@ -59,15 +63,22 @@ public void testVeryLarge() { } } - public void testTooBigIsNotSet() { - try (BitArray bitArray = new BitArray(1, BigArrays.NON_RECYCLING_INSTANCE)) { + public void testTooBigIsNotSet() throws IOException { + try (BitArray bits1 = new BitArray(1, BigArrays.NON_RECYCLING_INSTANCE)) { for (int i = 0; i < 1000; i++) { /* * The first few times this is called we check within the * array. But we quickly go beyond it and those all return * false as well. */ - assertFalse(bitArray.get(i)); + assertFalse(bits1.get(i)); + } + BytesStreamOutput out = new BytesStreamOutput(); + bits1.writeTo(out); + try (BitArray bits2 = new BitArray(BigArrays.NON_RECYCLING_INSTANCE, randomBoolean(), out.bytes().streamInput())) { + for (int i = 0; i < 1000; i++) { + assertFalse(bits2.get(i)); + } } } } @@ -171,4 +182,27 @@ public void testGetAndSet() { assertFalse(bitArray.get(1001)); } } + + public void testSerialize() throws Exception { + int initial = randomIntBetween(1, 100_000); + BitArray bits1 = new BitArray(initial, BigArrays.NON_RECYCLING_INSTANCE); + int numBits = randomIntBetween(1, 1000_000); + for (int i = 0; i < numBits; i++) { + if (randomBoolean()) { + bits1.set(i); + } + if (rarely()) { + bits1.clear(i); + } + } + BytesStreamOutput out = new BytesStreamOutput(); + bits1.writeTo(out); + BitArray bits2 = new BitArray(BigArrays.NON_RECYCLING_INSTANCE, randomBoolean(), out.bytes().streamInput()); + assertThat(bits2.size(), equalTo(bits1.size())); + for (long i = 0; i < bits1.size(); i++) { + assertThat(bits2.get(i), equalTo(bits1.get(i))); + } + Releasables.close(bits1, bits2); + } + } From 26219564ef43331686d07688f7d23e9a894ae36d Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Wed, 13 Mar 2024 14:48:40 -0400 Subject: [PATCH 193/248] ESQL: Replace many version tests with features (#106293) This replaces many of the feture comparisons in esql's csv-spec tests with feature checks. --- .../src/main/resources/boolean.csv-spec | 4 +- .../src/main/resources/date.csv-spec | 4 +- .../src/main/resources/floats.csv-spec | 32 +++++++--- .../src/main/resources/ints.csv-spec | 64 ++++++++++++++----- .../src/main/resources/ip.csv-spec | 48 ++++++++++---- .../src/main/resources/math.csv-spec | 40 +++++++++--- .../src/main/resources/spatial.csv-spec | 4 +- .../src/main/resources/string.csv-spec | 50 +++++++++++---- .../src/main/resources/unsigned_long.csv-spec | 32 +++++++--- .../xpack/esql/plugin/EsqlFeatures.java | 34 +++++++++- 10 files changed, 240 insertions(+), 72 deletions(-) diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/boolean.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/boolean.csv-spec index a7a110b5778ef..7641bd3305b1d 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/boolean.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/boolean.csv-spec @@ -62,7 +62,9 @@ avg(salary):double | always_false:boolean ; -in#[skip:-8.11.99, reason:Lucene multivalue warning introduced in 8.12 only] +in +required_feature: esql.mv_warn + from employees | keep emp_no, is_rehired, still_hired | where is_rehired in (still_hired, true) | where is_rehired != still_hired; ignoreOrder:true warning:Line 1:63: evaluation of [is_rehired in (still_hired, true)] failed, treating result as null. Only first 20 failures recorded. diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec index 17960fde31074..f56cba7031def 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec @@ -182,7 +182,9 @@ string:keyword |datetime:date // end::to_datetime-str-result[] ; -convertFromUnsignedLong#[skip:-8.11.99, reason:ql exceptions were updated in 8.12] +convertFromUnsignedLong +required_feature:esql.convert_warn + row ul = [9223372036854775808, 520128000000] | eval dt = to_datetime(ul); warning:Line 1:58: evaluation of [to_datetime(ul)] failed, treating result as null. Only first 20 failures recorded. warning:Line 1:58: org.elasticsearch.xpack.ql.InvalidArgumentException: [9223372036854775808] out of [long] range diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/floats.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/floats.csv-spec index d62b7fb3d6681..44e437b9683ce 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/floats.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/floats.csv-spec @@ -91,7 +91,9 @@ int:integer |dbl:double 520128 |520128 ; -lessThanMultivalue#[skip:-8.11.99, reason:Lucene multivalue warning introduced in 8.12 only] +lessThanMultivalue +required_feature: esql.mv_warn + from employees | where salary_change < 1 | keep emp_no, salary_change | sort emp_no | limit 5; warning:Line 1:24: evaluation of [salary_change < 1] failed, treating result as null. Only first 20 failures recorded. warning:Line 1:24: java.lang.IllegalArgumentException: single-value function encountered multi-value @@ -105,7 +107,9 @@ emp_no:integer |salary_change:double 10030 |-0.4 ; -greaterThanMultivalue#[skip:-8.11.99, reason:Lucene multivalue warning introduced in 8.12 only] +greaterThanMultivalue +required_feature: esql.mv_warn + from employees | where salary_change > 1 | keep emp_no, salary_change | sort emp_no | limit 5; warning:Line 1:24: evaluation of [salary_change > 1] failed, treating result as null. Only first 20 failures recorded. warning:Line 1:24: java.lang.IllegalArgumentException: single-value function encountered multi-value @@ -119,7 +123,9 @@ emp_no:integer |salary_change:double 10079 |7.58 ; -equalToMultivalue#[skip:-8.11.99, reason:Lucene multivalue warning introduced in 8.12 only] +equalToMultivalue +required_feature: esql.mv_warn + from employees | where salary_change == 1.19 | keep emp_no, salary_change | sort emp_no; warning:Line 1:24: evaluation of [salary_change == 1.19] failed, treating result as null. Only first 20 failures recorded. warning:Line 1:24: java.lang.IllegalArgumentException: single-value function encountered multi-value @@ -129,7 +135,9 @@ emp_no:integer |salary_change:double 10001 |1.19 ; -equalToOrEqualToMultivalue#[skip:-8.11.99, reason:Lucene multivalue warning introduced in 8.12 only] +equalToOrEqualToMultivalue +required_feature: esql.mv_warn + from employees | where salary_change == 1.19 or salary_change == 7.58 | keep emp_no, salary_change | sort emp_no; warning:Line 1:24: evaluation of [salary_change] failed, treating result as null. Only first 20 failures recorded. warning:Line 1:24: java.lang.IllegalArgumentException: single-value function encountered multi-value @@ -140,7 +148,9 @@ emp_no:integer |salary_change:double 10079 |7.58 ; -inMultivalue#[skip:-8.11.99, reason:Lucene multivalue warning introduced in 8.12 only] +inMultivalue +required_feature: esql.mv_warn + from employees | where salary_change in (1.19, 7.58) | keep emp_no, salary_change | sort emp_no; warning:Line 1:24: evaluation of [salary_change in (1.19, 7.58)] failed, treating result as null. Only first 20 failures recorded. warning:Line 1:24: java.lang.IllegalArgumentException: single-value function encountered multi-value @@ -151,7 +161,9 @@ emp_no:integer |salary_change:double 10079 |7.58 ; -notLessThanMultivalue#[skip:-8.11.99, reason:Lucene multivalue warning introduced in 8.12 only] +notLessThanMultivalue +required_feature: esql.mv_warn + from employees | where not(salary_change < 1) | keep emp_no, salary_change | sort emp_no | limit 5; warning:Line 1:24: evaluation of [not(salary_change < 1)] failed, treating result as null. Only first 20 failures recorded.#[Emulated:Line 1:28: evaluation of [salary_change < 1] failed, treating result as null. Only first 20 failures recorded.] warning:Line 1:24: java.lang.IllegalArgumentException: single-value function encountered multi-value#[Emulated:Line 1:28: java.lang.IllegalArgumentException: single-value function encountered multi-value] @@ -165,7 +177,9 @@ emp_no:integer |salary_change:double 10079 | 7.58 ; -notGreaterThanMultivalue#[skip:-8.11.99, reason:Lucene multivalue warning introduced in 8.12 only] +notGreaterThanMultivalue +required_feature: esql.mv_warn + from employees | where not(salary_change > 1) | keep emp_no, salary_change | sort emp_no | limit 5; warning:Line 1:24: evaluation of [not(salary_change > 1)] failed, treating result as null. Only first 20 failures recorded.#[Emulated:Line 1:28: evaluation of [salary_change > 1] failed, treating result as null. Only first 20 failures recorded.] warning:Line 1:24: java.lang.IllegalArgumentException: single-value function encountered multi-value#[Emulated:Line 1:28: java.lang.IllegalArgumentException: single-value function encountered multi-value] @@ -179,7 +193,9 @@ emp_no:integer |salary_change:double 10030 | -0.4 ; -notEqualToMultivalue#[skip:-8.11.99, reason:Lucene multivalue warning introduced in 8.12 only] +notEqualToMultivalue +required_feature: esql.mv_warn + from employees | where not(salary_change == 1.19) | keep emp_no, salary_change | sort emp_no | limit 5; warning:Line 1:24: evaluation of [not(salary_change == 1.19)] failed, treating result as null. Only first 20 failures recorded.#[Emulated:Line 1:28: evaluation of [salary_change == 1.19] failed, treating result as null. Only first 20 failures recorded.] warning:Line 1:24: java.lang.IllegalArgumentException: single-value function encountered multi-value#[Emulated:Line 1:28: java.lang.IllegalArgumentException: single-value function encountered multi-value] diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec index 77ace3bceb721..deb398661aa80 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec @@ -1,6 +1,8 @@ // Integral types-specific tests -inLongAndInt#[skip:-8.11.99, reason:Lucene multivalue warning introduced in 8.12 only] +inLongAndInt +required_feature: esql.mv_warn + from employees | where avg_worked_seconds in (372957040, salary_change.long, 236703986) | where emp_no in (10017, emp_no - 1) | keep emp_no, avg_worked_seconds; warning:Line 1:24: evaluation of [avg_worked_seconds in (372957040, salary_change.long, 236703986)] failed, treating result as null. Only first 20 failures recorded. warning:Line 1:24: java.lang.IllegalArgumentException: single-value function encountered multi-value @@ -65,7 +67,9 @@ long:long |ul:ul [501379200000, 520128000000] |[501379200000, 520128000000] ; -convertDoubleToUL#[skip:-8.11.99, reason:ql exceptions updated in 8.12] +convertDoubleToUL +required_feature:esql.convert_warn + row d = 123.4 | eval ul = to_ul(d), overflow = to_ul(1e20); warning:Line 1:48: evaluation of [to_ul(1e20)] failed, treating result as null. Only first 20 failures recorded. warning:Line 1:48: org.elasticsearch.xpack.ql.InvalidArgumentException: [1.0E20] out of [unsigned_long] range @@ -122,7 +126,9 @@ int:integer |long:long [5013792, 520128] |[5013792, 520128] ; -convertULToLong#[skip:-8.11.99, reason:ql exceptions were updated in 8.12] +convertULToLong +required_feature:esql.convert_warn + row ul = [9223372036854775807, 9223372036854775808] | eval long = to_long(ul); warning:Line 1:67: evaluation of [to_long(ul)] failed, treating result as null. Only first 20 failures recorded. warning:Line 1:67: org.elasticsearch.xpack.ql.InvalidArgumentException: [9223372036854775808] out of [long] range @@ -163,7 +169,9 @@ str1:keyword |str2:keyword |str3:keyword |long1:long |long2:long |long3:long // end::to_long-str-result[] ; -convertDoubleToLong#[skip:-8.11.99, reason:ql exceptions were updated in 8.12] +convertDoubleToLong +required_feature:esql.convert_warn + row d = 123.4 | eval d2l = to_long(d), overflow = to_long(1e19); warning:Line 1:51: evaluation of [to_long(1e19)] failed, treating result as null. Only first 20 failures recorded. warning:Line 1:51: org.elasticsearch.xpack.ql.InvalidArgumentException: [1.0E19] out of [long] range @@ -181,7 +189,9 @@ int:integer |ii:integer [5013792, 520128] |[5013792, 520128] ; -convertLongToInt#[skip:-8.11.99, reason:ql exceptions were updated in 8.12] +convertLongToInt +required_feature:esql.convert_warn + // tag::to_int-long[] ROW long = [5013792, 2147483647, 501379200000] | EVAL int = TO_INTEGER(long) @@ -196,7 +206,9 @@ long:long |int:integer // end::to_int-long-result[] ; -convertULToInt#[skip:-8.11.99, reason:ql exceptions were updated in 8.12] +convertULToInt +required_feature:esql.convert_warn + row ul = [2147483647, 9223372036854775808] | eval int = to_int(ul); warning:Line 1:57: evaluation of [to_int(ul)] failed, treating result as null. Only first 20 failures recorded. warning:Line 1:57: org.elasticsearch.xpack.ql.InvalidArgumentException: [9223372036854775808] out of [integer] range @@ -226,7 +238,9 @@ int_str:keyword |int_dbl_str:keyword |is2i:integer|ids2i:integer 2147483647 |2147483646.2 |2147483647 |2147483646 ; -convertStringToIntFail#[skip:-8.11.99, reason:double rounding in conversion updated in 8.12] +convertStringToIntFail +required_feature: esql.mv_warn + row str1 = "2147483647.2", str2 = "2147483648", non = "no number" | eval i1 = to_integer(str1), i2 = to_integer(str2), noi = to_integer(non); warning:Line 1:79: evaluation of [to_integer(str1)] failed, treating result as null. Only first 20 failures recorded. warning:Line 1:79: java.lang.NumberFormatException: For input string: \"2147483647.2\" @@ -239,7 +253,9 @@ str1:keyword |str2:keyword |non:keyword |i1:integer |i2:integer | 2147483647.2 |2147483648 |no number |null |null |null ; -convertDoubleToInt#[skip:-8.11.99, reason:ql exceptions were updated in 8.12] +convertDoubleToInt +required_feature:esql.convert_warn + row d = 123.4 | eval d2i = to_integer(d), overflow = to_integer(1e19); warning:Line 1:54: evaluation of [to_integer(1e19)] failed, treating result as null. Only first 20 failures recorded. warning:Line 1:54: org.elasticsearch.xpack.ql.InvalidArgumentException: [1.0E19] out of [integer] range @@ -248,7 +264,9 @@ d:double |d2i:integer |overflow:integer 123.4 |123 |null ; -lessThanMultivalue#[skip:-8.11.99, reason:Lucene multivalue warning introduced in 8.12 only] +lessThanMultivalue +required_feature: esql.mv_warn + from employees | where salary_change.int < 1 | keep emp_no, salary_change.int | sort emp_no | limit 5; warning:Line 1:24: evaluation of [salary_change.int < 1] failed, treating result as null. Only first 20 failures recorded. warning:Line 1:24: java.lang.IllegalArgumentException: single-value function encountered multi-value @@ -262,7 +280,9 @@ emp_no:integer |salary_change.int:integer 10030 | 0 ; -greaterThanMultivalue#[skip:-8.11.99, reason:Lucene multivalue warning introduced in 8.12 only] +greaterThanMultivalue +required_feature: esql.mv_warn + from employees | where salary_change.int > 1 | keep emp_no, salary_change.int | sort emp_no | limit 5; warning:Line 1:24: evaluation of [salary_change.int > 1] failed, treating result as null. Only first 20 failures recorded. warning:Line 1:24: java.lang.IllegalArgumentException: single-value function encountered multi-value @@ -276,7 +296,9 @@ emp_no:integer |salary_change.int:integer 10086 |13 ; -equalToMultivalue#[skip:-8.11.99, reason:Lucene multivalue warning introduced in 8.12 only] +equalToMultivalue +required_feature: esql.mv_warn + from employees | where salary_change.int == 0 | keep emp_no, salary_change.int | sort emp_no; warning:Line 1:24: evaluation of [salary_change.int == 0] failed, treating result as null. Only first 20 failures recorded. warning:Line 1:24: java.lang.IllegalArgumentException: single-value function encountered multi-value @@ -289,7 +311,9 @@ emp_no:integer |salary_change.int:integer 10093 | 0 ; -equalToOrEqualToMultivalue#[skip:-8.11.99, reason:Lucene multivalue warning introduced in 8.12 only] +equalToOrEqualToMultivalue +required_feature: esql.mv_warn + from employees | where salary_change.int == 1 or salary_change.int == 8 | keep emp_no, salary_change.int | sort emp_no; warning:Line 1:24: evaluation of [salary_change.int] failed, treating result as null. Only first 20 failures recorded. warning:Line 1:24: java.lang.IllegalArgumentException: single-value function encountered multi-value @@ -300,7 +324,9 @@ emp_no:integer |salary_change.int:integer 10044 |8 ; -inMultivalue#[skip:-8.11.99, reason:Lucene multivalue warning introduced in 8.12 only] +inMultivalue +required_feature: esql.mv_warn + from employees | where salary_change.int in (1, 7) | keep emp_no, salary_change.int | sort emp_no; warning:Line 1:24: evaluation of [salary_change.int in (1, 7)] failed, treating result as null. Only first 20 failures recorded. warning:Line 1:24: java.lang.IllegalArgumentException: single-value function encountered multi-value @@ -311,7 +337,9 @@ emp_no:integer |salary_change.int:integer 10079 |7 ; -notLessThanMultivalue#[skip:-8.11.99, reason:Lucene multivalue warning introduced in 8.12 only] +notLessThanMultivalue +required_feature: esql.mv_warn + from employees | where not(salary_change.int < 1) | keep emp_no, salary_change.int | sort emp_no | limit 5; warning:Line 1:24: evaluation of [not(salary_change.int < 1)] failed, treating result as null. Only first 20 failures recorded.#[Emulated:Line 1:28: evaluation of [salary_change.int < 1] failed, treating result as null. Only first 20 failures recorded.] warning:Line 1:24: java.lang.IllegalArgumentException: single-value function encountered multi-value#[emulated:Line 1:28: java.lang.IllegalArgumentException: single-value function encountered multi-value] @@ -325,7 +353,9 @@ emp_no:integer |salary_change.int:integer 10079 | 7 ; -notGreaterThanMultivalue#[skip:-8.11.99, reason:Lucene multivalue warning introduced in 8.12 only] +notGreaterThanMultivalue +required_feature: esql.mv_warn + from employees | where not(salary_change.int > 1) | keep emp_no, salary_change.int | sort emp_no | limit 5; warning:Line 1:24: evaluation of [not(salary_change.int > 1)] failed, treating result as null. Only first 20 failures recorded.#[Emulated:Line 1:28: evaluation of [salary_change.int > 1] failed, treating result as null. Only first 20 failures recorded.] warning:Line 1:24: java.lang.IllegalArgumentException: single-value function encountered multi-value#[Emulated:Line 1:28: java.lang.IllegalArgumentException: single-value function encountered multi-value] @@ -339,7 +369,9 @@ emp_no:integer |salary_change.int:integer 10020 | -5 ; -notEqualToMultivalue#[skip:-8.11.99, reason:Lucene multivalue warning introduced in 8.12 only] +notEqualToMultivalue +required_feature: esql.mv_warn + from employees | where not(salary_change.int == 1) | keep emp_no, salary_change.int | sort emp_no | limit 5; warning:Line 1:24: evaluation of [not(salary_change.int == 1)] failed, treating result as null. Only first 20 failures recorded.#[Emulated:Line 1:28: evaluation of [salary_change.int == 1] failed, treating result as null. Only first 20 failures recorded.] warning:Line 1:24: java.lang.IllegalArgumentException: single-value function encountered multi-value#[Emulated:Line 1:28: java.lang.IllegalArgumentException: single-value function encountered multi-value] diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ip.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ip.csv-spec index d79a19aeb3962..c77c0e6747e87 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ip.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ip.csv-spec @@ -15,7 +15,9 @@ eth1 |epsilon |null eth2 |epsilon |[fe81::cae2:65ff:fece:feb9, fe82::cae2:65ff:fece:fec0] |[fe81::cae2:65ff:fece:feb9, fe82::cae2:65ff:fece:fec0] ; -equals#[skip:-8.11.99, reason:Lucene multivalue warning introduced in 8.12 only] +equals +required_feature: esql.mv_warn + from hosts | sort host, card | where ip0 == ip1 | keep card, host, ip0, ip1; warning:Line 1:38: evaluation of [ip0 == ip1] failed, treating result as null. Only first 20 failures recorded. warning:Line 1:38: java.lang.IllegalArgumentException: single-value function encountered multi-value @@ -57,7 +59,9 @@ eth0 |gamma |fe80::cae2:65ff:fece:feb9 eth2 |epsilon |[fe81::cae2:65ff:fece:feb9, fe82::cae2:65ff:fece:fec0]|[fe81::cae2:65ff:fece:feb9, fe82::cae2:65ff:fece:fec0] ; -lessThan#[skip:-8.11.99, reason:Lucene multivalue warning introduced in 8.12 only] +lessThan +required_feature: esql.mv_warn + from hosts | sort host, card | where ip0 < ip1 | keep card, host, ip0, ip1; warning:Line 1:38: evaluation of [ip0 < ip1] failed, treating result as null. Only first 20 failures recorded. warning:Line 1:38: java.lang.IllegalArgumentException: single-value function encountered multi-value @@ -68,7 +72,9 @@ eth1 |beta |127.0.0.1 |128.0.0.1 lo0 |gamma |fe80::cae2:65ff:fece:feb9|fe81::cae2:65ff:fece:feb9 ; -notEquals#[skip:-8.11.99, reason:Lucene multivalue warning introduced in 8.12 only] +notEquals +required_feature: esql.mv_warn + from hosts | sort host, card, ip1 | where ip0 != ip1 | keep card, host, ip0, ip1; warning:Line 1:43: evaluation of [ip0 != ip1] failed, treating result as null. Only first 20 failures recorded. warning:Line 1:43: java.lang.IllegalArgumentException: single-value function encountered multi-value @@ -118,7 +124,9 @@ ip0:ip |ip1:ip null |[127.0.0.1, 127.0.0.2, 127.0.0.3] ; -conditional#[skip:-8.11.99, reason:Lucene multivalue warning introduced in 8.12 only] +conditional +required_feature: esql.mv_warn + from hosts | eval eq=case(ip0==ip1, ip0, ip1) | keep eq, ip0, ip1; ignoreOrder:true warning:Line 1:27: evaluation of [ip0==ip1] failed, treating result as null. Only first 20 failures recorded. @@ -137,7 +145,9 @@ fe80::cae2:65ff:fece:fec1 |[fe80::cae2:65ff:fece:feb [fe81::cae2:65ff:fece:feb9, fe82::cae2:65ff:fece:fec0]|[fe81::cae2:65ff:fece:feb9, fe82::cae2:65ff:fece:fec0] |[fe81::cae2:65ff:fece:feb9, fe82::cae2:65ff:fece:fec0] ; -in#[skip:-8.11.99, reason:Lucene multivalue warning introduced in 8.12 only] +in +required_feature: esql.mv_warn + from hosts | eval eq=case(ip0==ip1, ip0, ip1) | where eq in (ip0, ip1) | keep card, host, ip0, ip1, eq; ignoreOrder:true warning:Line 1:27: evaluation of [ip0==ip1] failed, treating result as null. Only first 20 failures recorded. @@ -156,7 +166,9 @@ eth0 |gamma |fe80::cae2:65ff:fece:feb9 eth0 |epsilon |[fe80::cae2:65ff:fece:feb9, fe80::cae2:65ff:fece:fec0, fe80::cae2:65ff:fece:fec1]|fe80::cae2:65ff:fece:fec1|fe80::cae2:65ff:fece:fec1 ; -cidrMatchSimple#[skip:-8.11.99, reason:Lucene multivalue warning introduced in 8.12 only] +cidrMatchSimple +required_feature: esql.mv_warn + from hosts | where cidr_match(ip1, "127.0.0.2/32") | keep card, host, ip0, ip1; warning:Line 1:20: evaluation of [cidr_match(ip1, \"127.0.0.2/32\")] failed, treating result as null. Only first 20 failures recorded. warning:Line 1:20: java.lang.IllegalArgumentException: single-value function encountered multi-value @@ -165,7 +177,9 @@ card:keyword |host:keyword |ip0:ip |ip1:ip eth1 |beta |127.0.0.1 |127.0.0.2 ; -cidrMatchNullField#[skip:-8.11.99, reason:Lucene multivalue warning introduced in 8.12 only] +cidrMatchNullField +required_feature: esql.mv_warn + from hosts | where cidr_match(ip0, "127.0.0.2/32") is null | keep card, host, ip0, ip1; ignoreOrder:true warning:Line 1:20: evaluation of [cidr_match(ip0, \"127.0.0.2/32\")] failed, treating result as null. Only first 20 failures recorded. @@ -177,7 +191,9 @@ eth1 |epsilon |null eth2 |epsilon |[fe81::cae2:65ff:fece:feb9, fe82::cae2:65ff:fece:fec0] |[fe81::cae2:65ff:fece:feb9, fe82::cae2:65ff:fece:fec0] ; -cdirMatchMultipleArgs#[skip:-8.11.99, reason:Lucene multivalue warning introduced in 8.12 only] +cdirMatchMultipleArgs +required_feature: esql.mv_warn + //tag::cdirMatchMultipleArgs[] FROM hosts | WHERE CIDR_MATCH(ip1, "127.0.0.2/32", "127.0.0.3/32") @@ -195,7 +211,9 @@ eth0 |gamma |fe80::cae2:65ff:fece:feb9|127.0.0.3 //end::cdirMatchMultipleArgs-result[] ; -cidrMatchFunctionArg#[skip:-8.11.99, reason:Lucene multivalue warning introduced in 8.12 only] +cidrMatchFunctionArg +required_feature: esql.mv_warn + from hosts | where cidr_match(ip1, concat("127.0.0.2", "/32"), "127.0.0.3/32") | keep card, host, ip0, ip1; ignoreOrder:true warning:Line 1:20: evaluation of [cidr_match(ip1, concat(\"127.0.0.2\", \"/32\"), \"127.0.0.3/32\")] failed, treating result as null. Only first 20 failures recorded. @@ -206,7 +224,9 @@ eth1 |beta |127.0.0.1 |127.0.0.2 eth0 |gamma |fe80::cae2:65ff:fece:feb9|127.0.0.3 ; -cidrMatchFieldArg#[skip:-8.11.99, reason:Lucene multivalue warning introduced in 8.12 only] +cidrMatchFieldArg +required_feature: esql.mv_warn + from hosts | eval cidr="127.0.0.2" | where cidr_match(ip1, cidr, "127.0.0.3/32") | keep card, host, ip0, ip1; ignoreOrder:true warning:Line 1:44: evaluation of [cidr_match(ip1, cidr, \"127.0.0.3/32\")] failed, treating result as null. Only first 20 failures recorded. @@ -252,7 +272,9 @@ eth1 |alpha |::1 |::1 eth0 |beta |127.0.0.1 |::1 ; -pushDownIPWithIn#[skip:-8.11.99, reason:Lucene multivalue warning introduced in 8.12 only] +pushDownIPWithIn +required_feature: esql.mv_warn + from hosts | where ip1 in (to_ip("::1"), to_ip("127.0.0.1")) | keep card, host, ip0, ip1; ignoreOrder:true warning:Line 1:20: evaluation of [ip1 in (to_ip(\"::1\"), to_ip(\"127.0.0.1\"))] failed, treating result as null. Only first 20 failures recorded. @@ -264,7 +286,9 @@ eth1 |alpha |::1 |::1 eth0 |beta |127.0.0.1 |::1 ; -pushDownIPWithComparision#[skip:-8.11.99, reason:Lucene multivalue warning introduced in 8.12 only] +pushDownIPWithComparision +required_feature: esql.mv_warn + from hosts | where ip1 > to_ip("127.0.0.1") | keep card, ip1; ignoreOrder:true warning:Line 1:20: evaluation of [ip1 > to_ip(\"127.0.0.1\")] failed, treating result as null. Only first 20 failures recorded. diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/math.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/math.csv-spec index ff61ee39d9f99..d94d39f0a0c81 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/math.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/math.csv-spec @@ -200,7 +200,9 @@ height:double | s:double 1.53 | 0.34 ; -powSalarySquared#[skip:-8.11.99,reason:return type changed in 8.12] +powSalarySquared +required_feature: esql.pow_double + from employees | eval s = pow(to_long(salary) - 75000, 2) + 10000 | keep salary, s | sort salary desc | limit 4; salary:integer | s:double @@ -615,7 +617,9 @@ base:double | exponent:integer | result:double // end::powDI-result[] ; -powIntInt#[skip:-8.11.99,reason:return type changed in 8.12] +powIntInt +required_feature: esql.pow_double + ROW base = 2, exponent = 2 | EVAL s = POW(base, exponent) ; @@ -624,7 +628,9 @@ base:integer | exponent:integer | s:double 2 | 2 | 4.0 ; -powIntIntPlusInt#[skip:-8.11.99,reason:return type changed in 8.12] +powIntIntPlusInt +required_feature: esql.pow_double + row s = 1 + pow(2, 2); s:double @@ -638,21 +644,27 @@ s:double 5 ; -powIntUL#[skip:-8.11.99,reason:return type changed in 8.12] +powIntUL +required_feature: esql.pow_double + row x = pow(1, 9223372036854775808); x:double 1 ; -powLongUL#[skip:-8.11.99,reason:return type changed in 8.12] +powLongUL +required_feature: esql.pow_double + row x = to_long(1) | eval x = pow(x, 9223372036854775808); x:double 1 ; -powUnsignedLongUL#[skip:-8.11.99,reason:return type changed in 8.12] +powUnsignedLongUL +required_feature: esql.pow_double + row x = to_ul(1) | eval x = pow(x, 9223372036854775808); x:double @@ -675,14 +687,18 @@ x:double null ; -powULInt#[skip:-8.11.99,reason:return type changed in 8.12] +powULInt +required_feature: esql.pow_double + row x = pow(to_unsigned_long(9223372036854775807), 1); x:double 9223372036854775807 ; -powULIntOverrun#[skip:-8.11.99,reason:return type changed in 8.12] +powULIntOverrun +required_feature: esql.pow_double + ROW x = POW(9223372036854775808, 2) ; @@ -702,14 +718,18 @@ x:double // end::pow2d-result[] ; -powULLong#[skip:-8.11.99,reason:return type changed in 8.12] +powULLong +required_feature: esql.pow_double + row x = to_long(10) | eval x = pow(to_unsigned_long(10), x); x:double 10000000000 ; -powULLongOverrun#[skip:-8.11.99,reason:return type changed in 8.12] +powULLongOverrun +required_feature: esql.pow_double + row x = to_long(100) | eval x = pow(to_unsigned_long(10), x); x:double diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial.csv-spec index 5c789cee0492f..02da586c6f357 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial.csv-spec @@ -2,7 +2,9 @@ # Tests for GEO_POINT type # -convertFromStringQuantize#[skip:-8.11.99, reason:spatial type geo_point only added in 8.12] +convertFromStringQuantize +required_feature: esql.geo_point + row wkt = "POINT(42.97109629958868 14.7552534006536)" | eval pt = to_geopoint(wkt); diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec index 2dbaf4c695e69..5a44f16dd60af 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec @@ -300,7 +300,7 @@ emp_no:integer | name:keyword // Note: no matches in MV returned in -required_feature: esql.mv_load +required_feature: esql.mv_warn from employees | where job_positions in ("Internship", first_name) | keep emp_no, job_positions; ignoreOrder:true @@ -491,7 +491,9 @@ emp_no:integer |positions:keyword 10005 |null |null ; -lessThanMultivalue#[skip:-8.11.99, reason:Lucene multivalue warning introduced in 8.12 only] +lessThanMultivalue +required_feature: esql.mv_warn + from employees | where job_positions < "C" | keep emp_no, job_positions | sort emp_no; warning:Line 1:24: evaluation of [job_positions < \"C\"] failed, treating result as null. Only first 20 failures recorded. warning:Line 1:24: java.lang.IllegalArgumentException: single-value function encountered multi-value @@ -502,7 +504,9 @@ emp_no:integer |job_positions:keyword 10068 |Architect ; -greaterThanMultivalue#[skip:-8.11.99, reason:Lucene multivalue warning introduced in 8.12 only] +greaterThanMultivalue +required_feature: esql.mv_warn + from employees | where job_positions > "C" | keep emp_no, job_positions | sort emp_no | limit 6; warning:Line 1:24: evaluation of [job_positions > \"C\"] failed, treating result as null. Only first 20 failures recorded. warning:Line 1:24: java.lang.IllegalArgumentException: single-value function encountered multi-value @@ -517,7 +521,9 @@ emp_no:integer |job_positions:keyword 10021 |Support Engineer ; -equalToMultivalue#[skip:-8.11.99, reason:Lucene multivalue warning introduced in 8.12 only] +equalToMultivalue +required_feature: esql.mv_warn + from employees | where job_positions == "Accountant" | keep emp_no, job_positions | sort emp_no; warning:Line 1:24: evaluation of [job_positions == \"Accountant\"] failed, treating result as null. Only first 20 failures recorded. warning:Line 1:24: java.lang.IllegalArgumentException: single-value function encountered multi-value @@ -527,7 +533,9 @@ emp_no:integer |job_positions:keyword 10025 |Accountant ; -equalToOrEqualToMultivalue#[skip:-8.11.99, reason:Lucene multivalue warning introduced in 8.12 only] +equalToOrEqualToMultivalue +required_feature: esql.mv_warn + from employees | where job_positions == "Accountant" or job_positions == "Tech Lead" | keep emp_no, job_positions | sort emp_no; warning:Line 1:24: evaluation of [job_positions] failed, treating result as null. Only first 20 failures recorded. warning:Line 1:24: java.lang.IllegalArgumentException: single-value function encountered multi-value @@ -538,7 +546,9 @@ emp_no:integer |job_positions:keyword 10025 |Accountant ; -inMultivalue#[skip:-8.11.99, reason:Lucene multivalue warning introduced in 8.12 only] +inMultivalue +required_feature: esql.mv_warn + from employees | where job_positions in ("Accountant", "Tech Lead") | keep emp_no, job_positions | sort emp_no; warning:Line 1:24: evaluation of [job_positions in (\"Accountant\", \"Tech Lead\")] failed, treating result as null. Only first 20 failures recorded. warning:Line 1:24: java.lang.IllegalArgumentException: single-value function encountered multi-value @@ -549,7 +559,9 @@ emp_no:integer |job_positions:keyword 10025 |Accountant ; -notLessThanMultivalue#[skip:-8.11.99, reason:Lucene multivalue warning introduced in 8.12 only] +notLessThanMultivalue +required_feature: esql.mv_warn + from employees | where not(job_positions < "C") | keep emp_no, job_positions | sort emp_no | limit 6; warning:Line 1:24: evaluation of [not(job_positions < \"C\")] failed, treating result as null. Only first 20 failures recorded.#[Emulated:Line 1:28: evaluation of [job_positions < \"C\"] failed, treating result as null. Only first 20 failures recorded.] warning:Line 1:24: java.lang.IllegalArgumentException: single-value function encountered multi-value#[Emulated:Line 1:28: java.lang.IllegalArgumentException: single-value function encountered multi-value] @@ -564,7 +576,9 @@ emp_no:integer |job_positions:keyword 10021 |Support Engineer ; -notGreaterThanMultivalue#[skip:-8.11.99, reason:Lucene multivalue warning introduced in 8.12 only] +notGreaterThanMultivalue +required_feature: esql.mv_warn + from employees | where not(job_positions > "C") | keep emp_no, job_positions | sort emp_no | limit 6; warning:Line 1:24: evaluation of [not(job_positions > \"C\")] failed, treating result as null. Only first 20 failures recorded.#[Emulated:Line 1:28: evaluation of [job_positions > \"C\"] failed, treating result as null. Only first 20 failures recorded.] warning:Line 1:24: java.lang.IllegalArgumentException: single-value function encountered multi-value#[Emulated:Line 1:28: java.lang.IllegalArgumentException: single-value function encountered multi-value] @@ -575,7 +589,9 @@ emp_no:integer |job_positions:keyword 10068 |Architect ; -notEqualToMultivalue#[skip:-8.11.99, reason:Lucene multivalue warning introduced in 8.12 only] +notEqualToMultivalue +required_feature: esql.mv_warn + from employees | where not(job_positions == "Accountant") | keep emp_no, job_positions | sort emp_no | limit 6; warning:Line 1:24: evaluation of [not(job_positions == \"Accountant\")] failed, treating result as null. Only first 20 failures recorded.#[Emulated:Line 1:28: evaluation of [job_positions == \"Accountant\"] failed, treating result as null. Only first 20 failures recorded.] warning:Line 1:24: java.lang.IllegalArgumentException: single-value function encountered multi-value#[Emulated:Line 1:28: java.lang.IllegalArgumentException: single-value function encountered multi-value] @@ -775,7 +791,9 @@ beta | Kubernetes cluster | beta k8s server beta | Kubernetes cluster | [beta k8s server, beta k8s server2] ; -lengthOfText#[skip:-8.11.99, reason:Lucene multivalue warning introduced in 8.12 only] +lengthOfText +required_feature: esql.mv_warn + from hosts | where host=="epsilon" | eval l1 = length(host_group), l2 = length(description) | keep l1, l2; ignoreOrder:true warning:Line 1:73: evaluation of [length(description)] failed, treating result as null. Only first 20 failures recorded. @@ -787,7 +805,9 @@ null | 19 17 | null ; -startsWithText#[skip:-8.11.99, reason:Lucene multivalue warning introduced in 8.12 only] +startsWithText +required_feature: esql.mv_warn + from hosts | where host=="epsilon" | eval l1 = starts_with(host_group, host), l2 = starts_with(description, host) | keep l1, l2; ignoreOrder:true warning:Line 1:84: evaluation of [starts_with(description, host)] failed, treating result as null. Only first 20 failures recorded. @@ -799,7 +819,9 @@ false | null false | null ; -substringOfText#[skip:-8.11.99, reason:Lucene multivalue warning introduced in 8.12 only] +substringOfText +required_feature: esql.mv_warn + from hosts | where host=="epsilon" | eval l1 = substring(host_group, 0, 5), l2 = substring(description, 0, 5) | keep l1, l2; ignoreOrder:true warning:Line 1:82: evaluation of [substring(description, 0, 5)] failed, treating result as null. Only first 20 failures recorded. @@ -811,7 +833,9 @@ Gatew | null Gatew | null ; -concatOfText#[skip:-8.11.99, reason:Lucene multivalue warning introduced in 8.12 only] +concatOfText +required_feature: esql.mv_warn + from hosts | where host == "epsilon" | eval l1 = concat(host, "/", host_group), l2 = concat(host_group, "/", description) | sort l1 | keep l1, l2; warning:Line 1:86: evaluation of [concat(host_group, \"/\", description)] failed, treating result as null. Only first 20 failures recorded. warning:Line 1:86: java.lang.IllegalArgumentException: single-value function encountered multi-value diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/unsigned_long.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/unsigned_long.csv-spec index 523a0ef7c9eed..c6f24d876240f 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/unsigned_long.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/unsigned_long.csv-spec @@ -45,7 +45,9 @@ from ul_logs | sort bytes_in desc nulls last, id | limit 12; 2017-11-10T20:26:21.000Z|17067060651018256448|1722789377000665830 |67 |OK ; -filterPushDownGT#[skip:-8.11.99, reason:Lucene multivalue warning introduced in 8.12 only] +filterPushDownGT +required_feature: esql.mv_warn + from ul_logs | where bytes_in >= to_ul(74330435873664882) | sort bytes_in | eval div = bytes_in / to_ul(pow(10., 15)) | keep bytes_in, div, id | limit 12; warning:Line 1:22: evaluation of [bytes_in >= to_ul(74330435873664882)] failed, treating result as null. Only first 20 failures recorded. warning:Line 1:22: java.lang.IllegalArgumentException: single-value function encountered multi-value @@ -65,7 +67,9 @@ warning:Line 1:22: java.lang.IllegalArgumentException: single-value function enc 2703254959364209157|2703 |18 ; -filterPushDownRange#[skip:-8.11.99, reason:Lucene multivalue warning introduced in 8.12 only] +filterPushDownRange +required_feature: esql.mv_warn + from ul_logs | where bytes_in >= to_ul(74330435873664882) | where bytes_in <= to_ul(316080452389500167) | sort bytes_in | eval div = bytes_in / to_ul(pow(10., 15)) | keep bytes_in, div, id | limit 12; warning:Line 1:22: evaluation of [bytes_in >= to_ul(74330435873664882)] failed, treating result as null. Only first 20 failures recorded. warning:Line 1:22: java.lang.IllegalArgumentException: single-value function encountered multi-value @@ -79,7 +83,9 @@ warning:#[Emulated:Line 1:67: java.lang.IllegalArgumentException: single-value f 316080452389500167 |316 |25 ; -filterPushDownIn#[skip:-8.11.99, reason:Lucene multivalue warning introduced in 8.12 only] +filterPushDownIn +required_feature: esql.mv_warn + // TODO: testing framework doesn't perform implicit conversion to UL of given values, needs explicit conversion from ul_logs | where bytes_in in (to_ul(74330435873664882), to_ul(154551962150890564), to_ul(195161570976258241)) | sort bytes_in | keep bytes_in, id; warning:Line 1:22: evaluation of [bytes_in in (to_ul(74330435873664882), to_ul(154551962150890564), to_ul(195161570976258241))] failed, treating result as null. Only first 20 failures recorded. @@ -91,7 +97,9 @@ warning:Line 1:22: java.lang.IllegalArgumentException: single-value function enc 195161570976258241 |88 ; -filterOnFieldsEquality#[skip:-8.11.99, reason:Lucene multivalue warning introduced in 8.12 only] +filterOnFieldsEquality +required_feature: esql.mv_warn + from ul_logs | where bytes_in == bytes_out; warning:Line 1:22: evaluation of [bytes_in == bytes_out] failed, treating result as null. Only first 20 failures recorded. warning:Line 1:22: java.lang.IllegalArgumentException: single-value function encountered multi-value @@ -100,7 +108,9 @@ warning:Line 1:22: java.lang.IllegalArgumentException: single-value function enc 2017-11-10T21:12:17.000Z|16002960716282089759|16002960716282089759|34 |OK ; -filterOnFieldsInequality#[skip:-8.11.99, reason:Lucene multivalue warning introduced in 8.12 only] +filterOnFieldsInequality +required_feature: esql.mv_warn + from ul_logs | sort id | where bytes_in < bytes_out | eval b_in = bytes_in / to_ul(pow(10.,15)), b_out = bytes_out / to_ul(pow(10.,15)) | limit 5; warning:Line 1:32: evaluation of [bytes_in < bytes_out] failed, treating result as null. Only first 20 failures recorded. warning:Line 1:32: java.lang.IllegalArgumentException: single-value function encountered multi-value @@ -129,7 +139,9 @@ from ul_logs | stats c = count(bytes_in) by bytes_in | sort c desc, bytes_in des 1 |18317075104972913640 ; -case#[skip:-8.11.99, reason:Lucene multivalue warning introduced in 8.12 only] +case +required_feature: esql.mv_warn + from ul_logs | where case(bytes_in == to_ul(154551962150890564), true, false); warning:Line 1:27: evaluation of [bytes_in == to_ul(154551962150890564)] failed, treating result as null. Only first 20 failures recorded. warning:Line 1:27: java.lang.IllegalArgumentException: single-value function encountered multi-value @@ -152,7 +164,9 @@ FROM ul_logs 2017-11-10T20:34:43.000Z | 17764691215469285192 | 1.75E19 ; -toDegrees#[skip:-8.11.99, reason:Lucene multivalue warning introduced in 8.12 only] +toDegrees +required_feature: esql.mv_warn + FROM ul_logs | WHERE bytes_in == bytes_out | EVAL deg = TO_DEGREES(bytes_in) | KEEP bytes_in, deg ; warning:Line 1:22: evaluation of [bytes_in == bytes_out] failed, treating result as null. Only first 20 failures recorded. @@ -162,7 +176,9 @@ warning:Line 1:22: java.lang.IllegalArgumentException: single-value function enc 16002960716282089759 | 9.169021087566165E20 ; -toRadians#[skip:-8.11.99, reason:Lucene multivalue warning introduced in 8.12 only] +toRadians +required_feature: esql.mv_warn + FROM ul_logs | WHERE bytes_in == bytes_out | EVAL rad = TO_RADIANS(bytes_in) | KEEP bytes_in, rad ; warning:Line 1:22: evaluation of [bytes_in == bytes_out] failed, treating result as null. Only first 20 failures recorded. diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlFeatures.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlFeatures.java index 75dee029ad523..2b7eadb16f444 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlFeatures.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlFeatures.java @@ -14,13 +14,43 @@ import java.util.Map; public class EsqlFeatures implements FeatureSpecification { - private static final NodeFeature MV_LOAD = new NodeFeature("esql.mv_load"); + /** + * When we added the warnings for multivalued fields emitting {@code null} + * when they touched multivalued fields. Added in #102417. + */ + private static final NodeFeature MV_WARN = new NodeFeature("esql.mv_warn"); + + /** + * Support for loading {@code geo_point} fields. Added in #102177. + */ + private static final NodeFeature GEO_POINT_SUPPORT = new NodeFeature("esql.geo_point"); + + /** + * When we added the warnings when conversion functions fail. Like {@code TO_INT('foo')}. + * Added in ESQL-1183. + */ + private static final NodeFeature CONVERT_WARN = new NodeFeature("esql.convert_warn"); + + /** + * When we flipped the return type of {@code POW} to always return a double. Changed + * in #102183. + */ + private static final NodeFeature POW_DOUBLE = new NodeFeature("esql.pow_double"); + + // /** + // * Support for loading {@code geo_point} fields. + // */ + // private static final NodeFeature GEO_SHAPE_SUPPORT = new NodeFeature("esql.geo_shape"); @Override public Map getHistoricalFeatures() { return Map.ofEntries( Map.entry(TransportEsqlStatsAction.ESQL_STATS_FEATURE, Version.V_8_11_0), - Map.entry(MV_LOAD, Version.V_8_12_0) + Map.entry(MV_WARN, Version.V_8_12_0), + Map.entry(GEO_POINT_SUPPORT, Version.V_8_12_0), + Map.entry(CONVERT_WARN, Version.V_8_12_0), + Map.entry(POW_DOUBLE, Version.V_8_12_0) + // Map.entry(GEO_SHAPE_SUPPORT, Version.V_8_13_0) ); } } From 088907b0978f234c106cf742fe702048d847c507 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Wed, 13 Mar 2024 11:50:44 -0700 Subject: [PATCH 194/248] Revert "muted test RestSQLIt >> testCompressCursor (#106295)" This reverts commit 25e31b91b4c1fb9ad5e649381f82984a47117ec7. --- .../org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java | 1 - 1 file changed, 1 deletion(-) diff --git a/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java b/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java index c24a41ce9e2f0..ca9532d8dc7d0 100644 --- a/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java +++ b/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java @@ -1622,7 +1622,6 @@ public void testAsyncTextPaginated() throws IOException, InterruptedException { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch-serverless/issues/1501") public void testCompressCursor() throws IOException { String doc = IntStream.range(0, 1000) .mapToObj(i -> String.format(Locale.ROOT, "\"field%d\": %d", i, i)) From bf49f86952b84641340d86ac9fe1d20809a4261f Mon Sep 17 00:00:00 2001 From: Craig Taverner Date: Wed, 13 Mar 2024 20:03:09 +0100 Subject: [PATCH 195/248] Fix #106126 in 8.14 (#106319) --- .../elasticsearch/lucene/spatial/CentroidCalculatorTests.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/test/java/org/elasticsearch/lucene/spatial/CentroidCalculatorTests.java b/server/src/test/java/org/elasticsearch/lucene/spatial/CentroidCalculatorTests.java index fce58b07eb090..f28a45e702e39 100644 --- a/server/src/test/java/org/elasticsearch/lucene/spatial/CentroidCalculatorTests.java +++ b/server/src/test/java/org/elasticsearch/lucene/spatial/CentroidCalculatorTests.java @@ -427,6 +427,7 @@ private Matcher matchDouble(double value) { // Most data (notably geo data) has values within bounds, and an absolute delta makes more sense. double delta = (value > 1e28 || value < -1e28) ? Math.abs(value / 1e6) : (value > 1e20 || value < -1e20) ? Math.abs(value / 1e10) + : (value > 1e10 || value < -1e10) ? Math.abs(value / 1e15) : DELTA; return closeTo(value, delta); } From 92061f0d6c17f95dad01d867fc088c0e52893fb9 Mon Sep 17 00:00:00 2001 From: Lee Hinman Date: Wed, 13 Mar 2024 21:08:41 +0100 Subject: [PATCH 196/248] Add data stream lifecycle to kibana reporting template (#106259) * Add data stream lifecycle to kibana reporting template As a follow-up to #97765 this commit adds a lifecycle with infinite retention for Kibana reporting data streams. It also adds customization that follows our best practices with the optional `kibana-reporting@custom` component template. * Update docs/changelog/106259.yaml --- docs/changelog/106259.yaml | 5 +++++ .../src/main/resources/kibana-reporting@template.json | 3 +++ .../resources/rest-api-spec/test/stack/10_basic.yml | 1 + 3 files changed, 9 insertions(+) create mode 100644 docs/changelog/106259.yaml diff --git a/docs/changelog/106259.yaml b/docs/changelog/106259.yaml new file mode 100644 index 0000000000000..d56b5e5a5e379 --- /dev/null +++ b/docs/changelog/106259.yaml @@ -0,0 +1,5 @@ +pr: 106259 +summary: Add data stream lifecycle to kibana reporting template +area: Data streams +type: enhancement +issues: [] diff --git a/x-pack/plugin/core/template-resources/src/main/resources/kibana-reporting@template.json b/x-pack/plugin/core/template-resources/src/main/resources/kibana-reporting@template.json index b92942ff010d6..9c4da646c3399 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/kibana-reporting@template.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/kibana-reporting@template.json @@ -5,7 +5,10 @@ "hidden": true }, "allow_auto_create": true, + "composed_of": ["kibana-reporting@custom"], + "ignore_missing_component_templates": ["kibana-reporting@custom"], "template": { + "lifecycle": {}, "settings": { "number_of_shards": 1, "auto_expand_replicas": "0-1" diff --git a/x-pack/plugin/stack/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/stack/10_basic.yml b/x-pack/plugin/stack/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/stack/10_basic.yml index b1ac564a53715..5317123783921 100644 --- a/x-pack/plugin/stack/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/stack/10_basic.yml +++ b/x-pack/plugin/stack/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/stack/10_basic.yml @@ -239,6 +239,7 @@ setup: - match: { data_streams.0.timestamp_field.name: '@timestamp' } - match: { data_streams.0.generation: 1 } - length: { data_streams.0.indices: 1 } + - match: { data_streams.0.lifecycle.enabled: true } - match: { data_streams.0.indices.0.index_name: '/\.ds-.kibana-reporting-foo-(\d{4}\.\d{2}\.\d{2}-)?000001/' } - set: { data_streams.0.indices.0.index_name: idx0name } From e43512916ed42fd0ed7b326ce3bea489b94548b7 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Wed, 13 Mar 2024 15:41:17 -0700 Subject: [PATCH 197/248] Fix test lib path separator on windows (#106333) --- .../java/org/elasticsearch/gradle/internal/test/TestUtil.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/TestUtil.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/TestUtil.java index 53742b78accb3..96fde95d0dd17 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/TestUtil.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/TestUtil.java @@ -11,6 +11,7 @@ import org.elasticsearch.gradle.Architecture; import org.elasticsearch.gradle.ElasticsearchDistribution; +import java.io.File; import java.util.Locale; public class TestUtil { @@ -20,6 +21,6 @@ public static String getTestLibraryPath(String nativeLibsDir) { String platform = String.format(Locale.ROOT, "%s-%s", ElasticsearchDistribution.CURRENT_PLATFORM, arch); String existingLibraryPath = System.getProperty("java.library.path"); - return String.format(Locale.ROOT, "%s/%s:%s", nativeLibsDir, platform, existingLibraryPath); + return String.format(Locale.ROOT, "%s/%s%c%s", nativeLibsDir, platform, File.pathSeparatorChar, existingLibraryPath); } } From d47a461db86b8ab24841b5d87121ff1dc08fd089 Mon Sep 17 00:00:00 2001 From: Lloyd Date: Thu, 14 Mar 2024 08:27:07 +0900 Subject: [PATCH 198/248] [IdP plugin] Fix exception handling (#106231) * Add regression tests that test ACS and entity id mismatch, causing us to go into the initCause branch * Fix up exception creation: initCause it not allowed because ElasticsearchException initialises the cause to `null` already if it isn't passed as a contructor param. Signed-off-by: lloydmeta --- .../idp/WildcardServiceProviderRestIT.java | 31 +++++++++++++++++++ ...ansportSamlInitiateSingleSignOnAction.java | 16 ++++++---- .../SamlInitiateSingleSignOnException.java | 7 +++-- .../WildcardServiceProviderResolverTests.java | 5 +++ 4 files changed, 50 insertions(+), 9 deletions(-) diff --git a/x-pack/plugin/identity-provider/qa/idp-rest-tests/src/javaRestTest/java/org/elasticsearch/xpack/idp/WildcardServiceProviderRestIT.java b/x-pack/plugin/identity-provider/qa/idp-rest-tests/src/javaRestTest/java/org/elasticsearch/xpack/idp/WildcardServiceProviderRestIT.java index 1d739c45047f6..7868c87f4bfe0 100644 --- a/x-pack/plugin/identity-provider/qa/idp-rest-tests/src/javaRestTest/java/org/elasticsearch/xpack/idp/WildcardServiceProviderRestIT.java +++ b/x-pack/plugin/identity-provider/qa/idp-rest-tests/src/javaRestTest/java/org/elasticsearch/xpack/idp/WildcardServiceProviderRestIT.java @@ -8,8 +8,10 @@ import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; @@ -87,6 +89,35 @@ public void testInitSingleSignOnToWildcardServiceProvider() throws Exception { deleteRole(roleName); } + public void testInitSingleSignOnToWildcardServiceProviderWithMismatchedACSandEntityIds() throws Exception { + final String owner = randomAlphaOfLength(8); + final String service = randomAlphaOfLength(8); + // From "wildcard_services.json" + final String entityId = "service:" + owner + ":" + service; + final String acs = "https://" + service + "extra_stuff_lol" + ".services.example.com/api/v1/saml"; + + final String username = randomAlphaOfLength(6); + final SecureString password = new SecureString((randomAlphaOfLength(6) + randomIntBetween(10, 99)).toCharArray()); + final String roleName = username + "_role"; + final User user = createUser(username, password, roleName); + + final RoleDescriptor.ApplicationResourcePrivileges applicationPrivilege = RoleDescriptor.ApplicationResourcePrivileges.builder() + .application("elastic-cloud") + .privileges("sso:admin") + .resources("sso:" + entityId) + .build(); + createRole(roleName, List.of(), List.of(), List.of(applicationPrivilege)); + + ResponseException exception = expectThrows( + ResponseException.class, + () -> initSso(entityId, acs, new UsernamePasswordToken(username, password)) + ); + assertThat(exception.getResponse().getStatusLine().getStatusCode(), equalTo(RestStatus.BAD_REQUEST.getStatus())); + + deleteUser(username); + deleteRole(roleName); + } + private void getMetadata(String entityId, String acs) throws IOException { final Map map = getAsMap("/_idp/saml/metadata/" + encode(entityId) + "?acs=" + encode(acs)); assertThat(map, notNullValue()); diff --git a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/action/TransportSamlInitiateSingleSignOnAction.java b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/action/TransportSamlInitiateSingleSignOnAction.java index 85afdc96e6344..68b4759412e70 100644 --- a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/action/TransportSamlInitiateSingleSignOnAction.java +++ b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/action/TransportSamlInitiateSingleSignOnAction.java @@ -91,6 +91,7 @@ protected void doExecute( StatusCode.RESPONDER, RestStatus.BAD_REQUEST, "Service Provider with Entity ID [{}] and ACS [{}] is not known to this Identity Provider", + null, request.getSpEntityId(), request.getAssertionConsumerService() ) @@ -108,7 +109,8 @@ protected void doExecute( request.getAssertionConsumerService(), StatusCode.REQUESTER, RestStatus.FORBIDDEN, - "Request is missing secondary authentication" + "Request is missing secondary authentication", + null ) ); return; @@ -124,6 +126,7 @@ protected void doExecute( StatusCode.REQUESTER, RestStatus.FORBIDDEN, "User [{}] is not permitted to access service [{}]", + null, secondaryAuthentication.getUser().principal(), sp.getEntityId() ) @@ -217,6 +220,7 @@ private SamlInitiateSingleSignOnException buildSamlInitiateSingleSignOnException final String statusCode, final RestStatus restStatus, final String messageFormatStr, + final Exception cause, final Object... args ) { final SamlInitiateSingleSignOnException ex; @@ -231,10 +235,11 @@ private SamlInitiateSingleSignOnException buildSamlInitiateSingleSignOnException ex = new SamlInitiateSingleSignOnException( exceptionMessage, restStatus, + cause, new SamlInitiateSingleSignOnResponse(spEntityId, acsUrl, samlFactory.getXmlContent(response), statusCode, exceptionMessage) ); } else { - ex = new SamlInitiateSingleSignOnException(exceptionMessage, restStatus); + ex = new SamlInitiateSingleSignOnException(exceptionMessage, restStatus, cause); } return ex; } @@ -247,15 +252,14 @@ private SamlInitiateSingleSignOnException buildResponderSamlInitiateSingleSignOn ) { final String exceptionMessage = cause.getMessage(); final RestStatus restStatus = ExceptionsHelper.status(cause); - final SamlInitiateSingleSignOnException ex = buildSamlInitiateSingleSignOnException( + return buildSamlInitiateSingleSignOnException( authenticationState, spEntityId, acsUrl, StatusCode.RESPONDER, restStatus, - exceptionMessage + exceptionMessage, + cause ); - ex.initCause(cause); - return ex; } } diff --git a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/support/SamlInitiateSingleSignOnException.java b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/support/SamlInitiateSingleSignOnException.java index ba983a84b5199..ccd2a4d0baaa5 100644 --- a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/support/SamlInitiateSingleSignOnException.java +++ b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/support/SamlInitiateSingleSignOnException.java @@ -21,14 +21,15 @@ public class SamlInitiateSingleSignOnException extends ElasticsearchSecurityExce public SamlInitiateSingleSignOnException( String msg, RestStatus status, + Exception cause, SamlInitiateSingleSignOnResponse samlInitiateSingleSignOnResponse ) { - super(msg, status); + super(msg, status, cause); this.samlInitiateSingleSignOnResponse = samlInitiateSingleSignOnResponse; } - public SamlInitiateSingleSignOnException(String msg, RestStatus status) { - super(msg, status); + public SamlInitiateSingleSignOnException(String msg, RestStatus status, Exception cause) { + super(msg, status, cause); } @Override diff --git a/x-pack/plugin/identity-provider/src/test/java/org/elasticsearch/xpack/idp/saml/sp/WildcardServiceProviderResolverTests.java b/x-pack/plugin/identity-provider/src/test/java/org/elasticsearch/xpack/idp/saml/sp/WildcardServiceProviderResolverTests.java index 848e14927d6c7..70e5325878c0a 100644 --- a/x-pack/plugin/identity-provider/src/test/java/org/elasticsearch/xpack/idp/saml/sp/WildcardServiceProviderResolverTests.java +++ b/x-pack/plugin/identity-provider/src/test/java/org/elasticsearch/xpack/idp/saml/sp/WildcardServiceProviderResolverTests.java @@ -175,6 +175,11 @@ public void testResolveServices() throws IOException { assertThat(sp4.getAssertionConsumerService().toString(), equalTo("https://saml.example.net/12345/acs")); assertThat(sp4.getName(), equalTo("12345 at example.net")); assertThat(sp4.getPrivileges().getResource(), equalTo("service2:example:12345")); + + expectThrows( + IllegalArgumentException.class, + () -> resolver.resolve("https://zbcdef.example.com/", "https://abcdef.service.example.com/saml2/acs") + ); } public void testCaching() throws IOException { From 6d040d9bea355b1b8f97e2bec41747f35f1ad695 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Wed, 13 Mar 2024 18:05:37 -0700 Subject: [PATCH 199/248] Remove redundant BWC testing --- .buildkite/pipelines/periodic.template.yml | 1 - .buildkite/pipelines/periodic.yml | 1 - 2 files changed, 2 deletions(-) diff --git a/.buildkite/pipelines/periodic.template.yml b/.buildkite/pipelines/periodic.template.yml index 42e922462c7ac..8e92fffbc6f88 100644 --- a/.buildkite/pipelines/periodic.template.yml +++ b/.buildkite/pipelines/periodic.template.yml @@ -90,7 +90,6 @@ steps: - checkPart1 - checkPart2 - checkPart3 - - bwcTestSnapshots - checkRestCompat agents: provider: gcp diff --git a/.buildkite/pipelines/periodic.yml b/.buildkite/pipelines/periodic.yml index 80f38dc79eecc..8e1ff14eda792 100644 --- a/.buildkite/pipelines/periodic.yml +++ b/.buildkite/pipelines/periodic.yml @@ -1271,7 +1271,6 @@ steps: - checkPart1 - checkPart2 - checkPart3 - - bwcTestSnapshots - checkRestCompat agents: provider: gcp From dce8a6b77f61ede59d23af10b45e702048c9d0f7 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Wed, 13 Mar 2024 18:26:12 -0700 Subject: [PATCH 200/248] Add JDK 22 release candidate to oracle jdk toolchain resolver (#106335) This commit adds the upcoming JDK22 release candidate build to be resolvable within Elasticsearch. --- .../OracleOpenJdkToolchainResolver.java | 71 +++++++++++++------ .../OracleOpenJdkToolchainResolverSpec.groovy | 8 ++- 2 files changed, 54 insertions(+), 25 deletions(-) diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/toolchain/OracleOpenJdkToolchainResolver.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/toolchain/OracleOpenJdkToolchainResolver.java index e29fdc109a104..818cb040c172e 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/toolchain/OracleOpenJdkToolchainResolver.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/toolchain/OracleOpenJdkToolchainResolver.java @@ -19,28 +19,35 @@ import org.gradle.platform.OperatingSystem; import java.net.URI; +import java.util.List; import java.util.Optional; import java.util.regex.Matcher; import java.util.regex.Pattern; public abstract class OracleOpenJdkToolchainResolver extends AbstractCustomJavaToolchainResolver { + record JdkBuild(JavaLanguageVersion languageVersion, String version, String buildNumber, String hash) {} + private static final Pattern VERSION_PATTERN = Pattern.compile( "(\\d+)(\\.\\d+\\.\\d+(?:\\.\\d+)?)?\\+(\\d+(?:\\.\\d+)?)(@([a-f0-9]{32}))?" ); - // for testing reasons we keep that a package private field - String bundledJdkVersion = VersionProperties.getBundledJdkVersion(); - JavaLanguageVersion bundledJdkMajorVersion = JavaLanguageVersion.of(VersionProperties.getBundledJdkMajorVersion()); + private static final List supportedOperatingSystems = List.of( + OperatingSystem.MAC_OS, + OperatingSystem.LINUX, + OperatingSystem.WINDOWS + ); - /** - * We need some place to map JavaLanguageVersion to build, minor version etc. - * */ - @Override - public Optional resolve(JavaToolchainRequest request) { - if (requestIsSupported(request) == false) { - return Optional.empty(); - } + // package private so it can be replaced by tests + List builds = List.of( + getBundledJdkBuild(), + // 22 release candidate + new JdkBuild(JavaLanguageVersion.of(22), "22", "36", "830ec9fcccef480bb3e73fb7ecafe059") + ); + + private JdkBuild getBundledJdkBuild() { + String bundledJdkVersion = VersionProperties.getBundledJdkVersion(); + JavaLanguageVersion bundledJdkMajorVersion = JavaLanguageVersion.of(VersionProperties.getBundledJdkMajorVersion()); Matcher jdkVersionMatcher = VERSION_PATTERN.matcher(bundledJdkVersion); if (jdkVersionMatcher.matches() == false) { throw new IllegalStateException("Unable to parse bundled JDK version " + bundledJdkVersion); @@ -48,6 +55,18 @@ public Optional resolve(JavaToolchainRequest request) { String baseVersion = jdkVersionMatcher.group(1) + (jdkVersionMatcher.group(2) != null ? (jdkVersionMatcher.group(2)) : ""); String build = jdkVersionMatcher.group(3); String hash = jdkVersionMatcher.group(5); + return new JdkBuild(bundledJdkMajorVersion, baseVersion, build, hash); + } + + /** + * We need some place to map JavaLanguageVersion to buildNumber, minor version etc. + * */ + @Override + public Optional resolve(JavaToolchainRequest request) { + JdkBuild build = findSupportedBuild(request); + if (build == null) { + return Optional.empty(); + } OperatingSystem operatingSystem = request.getBuildPlatform().getOperatingSystem(); String extension = operatingSystem.equals(OperatingSystem.WINDOWS) ? "zip" : "tar.gz"; @@ -56,13 +75,13 @@ public Optional resolve(JavaToolchainRequest request) { return Optional.of( () -> URI.create( "https://download.oracle.com/java/GA/jdk" - + baseVersion + + build.version + "/" - + hash + + build.hash + "/" - + build + + build.buildNumber + "/GPL/openjdk-" - + baseVersion + + build.version + "_" + os + "-" @@ -80,20 +99,28 @@ public Optional resolve(JavaToolchainRequest request) { * 3. vendor must be any or oracle * 4. Aarch64 windows images are not supported */ - private boolean requestIsSupported(JavaToolchainRequest request) { + private JdkBuild findSupportedBuild(JavaToolchainRequest request) { if (VersionProperties.getBundledJdkVendor().toLowerCase().equals("openjdk") == false) { - return false; + return null; } JavaToolchainSpec javaToolchainSpec = request.getJavaToolchainSpec(); - if (javaToolchainSpec.getLanguageVersion().get().equals(bundledJdkMajorVersion) == false) { - return false; - } if (anyVendorOr(javaToolchainSpec.getVendor().get(), JvmVendorSpec.ORACLE) == false) { - return false; + return null; } BuildPlatform buildPlatform = request.getBuildPlatform(); Architecture architecture = buildPlatform.getArchitecture(); OperatingSystem operatingSystem = buildPlatform.getOperatingSystem(); - return Architecture.AARCH64 != architecture || OperatingSystem.WINDOWS != operatingSystem; + if (supportedOperatingSystems.contains(operatingSystem) == false + || Architecture.AARCH64 == architecture && OperatingSystem.WINDOWS == operatingSystem) { + return null; + } + + JavaLanguageVersion languageVersion = javaToolchainSpec.getLanguageVersion().get(); + for (JdkBuild build : builds) { + if (build.languageVersion.equals(languageVersion)) { + return build; + } + } + return null; } } diff --git a/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/toolchain/OracleOpenJdkToolchainResolverSpec.groovy b/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/toolchain/OracleOpenJdkToolchainResolverSpec.groovy index b49e734c087cc..b076baa94c2fb 100644 --- a/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/toolchain/OracleOpenJdkToolchainResolverSpec.groovy +++ b/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/toolchain/OracleOpenJdkToolchainResolverSpec.groovy @@ -24,8 +24,9 @@ class OracleOpenJdkToolchainResolverSpec extends AbstractToolchainResolverSpec { return null } } - toolChain.bundledJdkVersion = "20+36@bdc68b4b9cbc4ebcb30745c85038d91d" - toolChain.bundledJdkMajorVersion = JavaLanguageVersion.of(20) + toolChain.builds = [ + new OracleOpenJdkToolchainResolver.JdkBuild(JavaLanguageVersion.of(20), "20", "36", "bdc68b4b9cbc4ebcb30745c85038d91d") + ] toolChain } @@ -39,7 +40,8 @@ class OracleOpenJdkToolchainResolverSpec extends AbstractToolchainResolverSpec { [20, anyVendor(), MAC_OS, AARCH64, "https://download.oracle.com/java/GA/jdk20/bdc68b4b9cbc4ebcb30745c85038d91d/36/GPL/openjdk-20_macos-aarch64_bin.tar.gz"], [20, anyVendor(), LINUX, X86_64, "https://download.oracle.com/java/GA/jdk20/bdc68b4b9cbc4ebcb30745c85038d91d/36/GPL/openjdk-20_linux-x64_bin.tar.gz"], [20, anyVendor(), LINUX, AARCH64, "https://download.oracle.com/java/GA/jdk20/bdc68b4b9cbc4ebcb30745c85038d91d/36/GPL/openjdk-20_linux-aarch64_bin.tar.gz"], - [20, anyVendor(), WINDOWS, X86_64, "https://download.oracle.com/java/GA/jdk20/bdc68b4b9cbc4ebcb30745c85038d91d/36/GPL/openjdk-20_windows-x64_bin.zip"]] + [20, anyVendor(), WINDOWS, X86_64, "https://download.oracle.com/java/GA/jdk20/bdc68b4b9cbc4ebcb30745c85038d91d/36/GPL/openjdk-20_windows-x64_bin.zip"] + ] } def unsupportedRequests() { From 729fcfc4179e04f17fd4257c8e0c6366df2223d1 Mon Sep 17 00:00:00 2001 From: Chris Hegarty <62058229+ChrisHegarty@users.noreply.github.com> Date: Thu, 14 Mar 2024 09:50:21 +0100 Subject: [PATCH 201/248] Refactor xcontent parsing out of EsqlQueryRequest (#106310) This commit is pure refactoring - to extract xcontent request parsing out of EsqlQueryRequest. --- .../xpack/esql/action/EsqlQueryRequest.java | 168 --------------- .../xpack/esql/action/RequestXContent.java | 191 ++++++++++++++++++ .../esql/action/RestEsqlAsyncQueryAction.java | 2 +- .../esql/action/RestEsqlQueryAction.java | 2 +- .../esql/action/EsqlQueryRequestTests.java | 4 +- 5 files changed, 195 insertions(+), 172 deletions(-) create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RequestXContent.java diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequest.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequest.java index 841137d749d93..df196a0703f7d 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequest.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequest.java @@ -15,66 +15,25 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.index.query.AbstractQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.ObjectParser; -import org.elasticsearch.xcontent.ParseField; -import org.elasticsearch.xcontent.XContentLocation; -import org.elasticsearch.xcontent.XContentParseException; -import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xpack.esql.parser.ContentLocation; import org.elasticsearch.xpack.esql.parser.TypedParamValue; import org.elasticsearch.xpack.esql.plugin.QueryPragmas; import java.io.IOException; -import java.util.ArrayList; import java.util.List; import java.util.Locale; import java.util.Map; -import java.util.function.Supplier; import static org.elasticsearch.action.ValidateActions.addValidationError; -import static org.elasticsearch.common.xcontent.XContentParserUtils.parseFieldsValue; -import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; -import static org.elasticsearch.xcontent.ObjectParser.ValueType.VALUE_ARRAY; public class EsqlQueryRequest extends ActionRequest implements CompositeIndicesRequest { public static TimeValue DEFAULT_KEEP_ALIVE = TimeValue.timeValueDays(5); public static TimeValue DEFAULT_WAIT_FOR_COMPLETION = TimeValue.timeValueSeconds(1); - private static final ConstructingObjectParser PARAM_PARSER = new ConstructingObjectParser<>( - "params", - true, - objects -> new TypedParamValue((String) objects[1], objects[0]) - ); - private static final ParseField VALUE = new ParseField("value"); - private static final ParseField TYPE = new ParseField("type"); - - static { - PARAM_PARSER.declareField(constructorArg(), (p, c) -> parseFieldsValue(p), VALUE, ObjectParser.ValueType.VALUE); - PARAM_PARSER.declareString(constructorArg(), TYPE); - } - - private static final ParseField QUERY_FIELD = new ParseField("query"); - private static final ParseField COLUMNAR_FIELD = new ParseField("columnar"); - private static final ParseField FILTER_FIELD = new ParseField("filter"); - private static final ParseField PRAGMA_FIELD = new ParseField("pragma"); - private static final ParseField PARAMS_FIELD = new ParseField("params"); - private static final ParseField LOCALE_FIELD = new ParseField("locale"); - private static final ParseField PROFILE_FIELD = new ParseField("profile"); - - static final ParseField WAIT_FOR_COMPLETION_TIMEOUT = new ParseField("wait_for_completion_timeout"); - static final ParseField KEEP_ALIVE = new ParseField("keep_alive"); - static final ParseField KEEP_ON_COMPLETION = new ParseField("keep_on_completion"); - - private static final ObjectParser SYNC_PARSER = objectParserSync(EsqlQueryRequest::syncEsqlQueryRequest); - private static final ObjectParser ASYNC_PARSER = objectParserAsync(EsqlQueryRequest::asyncEsqlQueryRequest); - private boolean async; private String query; @@ -209,136 +168,9 @@ public void keepOnCompletion(boolean keepOnCompletion) { this.keepOnCompletion = keepOnCompletion; } - public static EsqlQueryRequest fromXContentSync(XContentParser parser) { - return SYNC_PARSER.apply(parser, null); - } - - public static EsqlQueryRequest fromXContentAsync(XContentParser parser) { - return ASYNC_PARSER.apply(parser, null); - } - - private static void objectParserCommon(ObjectParser parser) { - parser.declareString(EsqlQueryRequest::query, QUERY_FIELD); - parser.declareBoolean(EsqlQueryRequest::columnar, COLUMNAR_FIELD); - parser.declareObject(EsqlQueryRequest::filter, (p, c) -> AbstractQueryBuilder.parseTopLevelQuery(p), FILTER_FIELD); - parser.declareObject( - EsqlQueryRequest::pragmas, - (p, c) -> new QueryPragmas(Settings.builder().loadFromMap(p.map()).build()), - PRAGMA_FIELD - ); - parser.declareField(EsqlQueryRequest::params, EsqlQueryRequest::parseParams, PARAMS_FIELD, VALUE_ARRAY); - parser.declareString((request, localeTag) -> request.locale(Locale.forLanguageTag(localeTag)), LOCALE_FIELD); - parser.declareBoolean(EsqlQueryRequest::profile, PROFILE_FIELD); - } - - private static ObjectParser objectParserSync(Supplier supplier) { - ObjectParser parser = new ObjectParser<>("esql/query", false, supplier); - objectParserCommon(parser); - return parser; - } - - private static ObjectParser objectParserAsync(Supplier supplier) { - ObjectParser parser = new ObjectParser<>("esql/async_query", false, supplier); - objectParserCommon(parser); - parser.declareBoolean(EsqlQueryRequest::keepOnCompletion, KEEP_ON_COMPLETION); - parser.declareField( - EsqlQueryRequest::waitForCompletionTimeout, - (p, c) -> TimeValue.parseTimeValue(p.text(), WAIT_FOR_COMPLETION_TIMEOUT.getPreferredName()), - WAIT_FOR_COMPLETION_TIMEOUT, - ObjectParser.ValueType.VALUE - ); - parser.declareField( - EsqlQueryRequest::keepAlive, - (p, c) -> TimeValue.parseTimeValue(p.text(), KEEP_ALIVE.getPreferredName()), - KEEP_ALIVE, - ObjectParser.ValueType.VALUE - ); - return parser; - } - - private static List parseParams(XContentParser p) throws IOException { - List result = new ArrayList<>(); - XContentParser.Token token = p.currentToken(); - - if (token == XContentParser.Token.START_ARRAY) { - Object value = null; - String type = null; - TypedParamValue previousParam = null; - TypedParamValue currentParam; - - while ((token = p.nextToken()) != XContentParser.Token.END_ARRAY) { - XContentLocation loc = p.getTokenLocation(); - - if (token == XContentParser.Token.START_OBJECT) { - // we are at the start of a value/type pair... hopefully - currentParam = PARAM_PARSER.apply(p, null); - /* - * Always set the xcontentlocation for the first param just in case the first one happens to not meet the parsing rules - * that are checked later in validateParams method. - * Also, set the xcontentlocation of the param that is different from the previous param in list when it comes to - * its type being explicitly set or inferred. - */ - if ((previousParam != null && previousParam.hasExplicitType() == false) || result.isEmpty()) { - currentParam.tokenLocation(toProto(loc)); - } - } else { - if (token == XContentParser.Token.VALUE_STRING) { - value = p.text(); - type = "keyword"; - } else if (token == XContentParser.Token.VALUE_NUMBER) { - XContentParser.NumberType numberType = p.numberType(); - if (numberType == XContentParser.NumberType.INT) { - value = p.intValue(); - type = "integer"; - } else if (numberType == XContentParser.NumberType.LONG) { - value = p.longValue(); - type = "long"; - } else if (numberType == XContentParser.NumberType.DOUBLE) { - value = p.doubleValue(); - type = "double"; - } - } else if (token == XContentParser.Token.VALUE_BOOLEAN) { - value = p.booleanValue(); - type = "boolean"; - } else if (token == XContentParser.Token.VALUE_NULL) { - value = null; - type = "null"; - } else { - throw new XContentParseException(loc, "Failed to parse object: unexpected token [" + token + "] found"); - } - - currentParam = new TypedParamValue(type, value, false); - if ((previousParam != null && previousParam.hasExplicitType()) || result.isEmpty()) { - currentParam.tokenLocation(toProto(loc)); - } - } - - result.add(currentParam); - previousParam = currentParam; - } - } - - return result; - } - - static ContentLocation toProto(org.elasticsearch.xcontent.XContentLocation toProto) { - if (toProto == null) { - return null; - } - return new ContentLocation(toProto.lineNumber(), toProto.columnNumber()); - } - @Override public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { // Pass the query as the description return new CancellableTask(id, type, action, query, parentTaskId, headers); } - - static org.elasticsearch.xcontent.XContentLocation fromProto(ContentLocation fromProto) { - if (fromProto == null) { - return null; - } - return new org.elasticsearch.xcontent.XContentLocation(fromProto.lineNumber, fromProto.columnNumber); - } - } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RequestXContent.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RequestXContent.java new file mode 100644 index 0000000000000..8db940d5a4779 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RequestXContent.java @@ -0,0 +1,191 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.query.AbstractQueryBuilder; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.XContentLocation; +import org.elasticsearch.xcontent.XContentParseException; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.esql.parser.ContentLocation; +import org.elasticsearch.xpack.esql.parser.TypedParamValue; +import org.elasticsearch.xpack.esql.plugin.QueryPragmas; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Locale; +import java.util.function.Supplier; + +import static org.elasticsearch.common.xcontent.XContentParserUtils.parseFieldsValue; +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.xcontent.ObjectParser.ValueType.VALUE_ARRAY; + +/** Static methods for parsing xcontent requests to transport requests. */ +final class RequestXContent { + + private static final ConstructingObjectParser PARAM_PARSER = new ConstructingObjectParser<>( + "params", + true, + objects -> new TypedParamValue((String) objects[1], objects[0]) + ); + private static final ParseField VALUE = new ParseField("value"); + private static final ParseField TYPE = new ParseField("type"); + + static { + PARAM_PARSER.declareField(constructorArg(), (p, c) -> parseFieldsValue(p), VALUE, ObjectParser.ValueType.VALUE); + PARAM_PARSER.declareString(constructorArg(), TYPE); + } + + private static final ParseField QUERY_FIELD = new ParseField("query"); + private static final ParseField COLUMNAR_FIELD = new ParseField("columnar"); + private static final ParseField FILTER_FIELD = new ParseField("filter"); + private static final ParseField PRAGMA_FIELD = new ParseField("pragma"); + private static final ParseField PARAMS_FIELD = new ParseField("params"); + private static final ParseField LOCALE_FIELD = new ParseField("locale"); + private static final ParseField PROFILE_FIELD = new ParseField("profile"); + + static final ParseField WAIT_FOR_COMPLETION_TIMEOUT = new ParseField("wait_for_completion_timeout"); + static final ParseField KEEP_ALIVE = new ParseField("keep_alive"); + static final ParseField KEEP_ON_COMPLETION = new ParseField("keep_on_completion"); + + private static final ObjectParser SYNC_PARSER = objectParserSync(EsqlQueryRequest::syncEsqlQueryRequest); + private static final ObjectParser ASYNC_PARSER = objectParserAsync(EsqlQueryRequest::asyncEsqlQueryRequest); + + /** Parses a synchronous request. */ + static EsqlQueryRequest parseSync(XContentParser parser) { + return SYNC_PARSER.apply(parser, null); + } + + /** Parses an asynchronous request. */ + static EsqlQueryRequest parseAsync(XContentParser parser) { + return ASYNC_PARSER.apply(parser, null); + } + + private static void objectParserCommon(ObjectParser parser) { + parser.declareString(EsqlQueryRequest::query, QUERY_FIELD); + parser.declareBoolean(EsqlQueryRequest::columnar, COLUMNAR_FIELD); + parser.declareObject(EsqlQueryRequest::filter, (p, c) -> AbstractQueryBuilder.parseTopLevelQuery(p), FILTER_FIELD); + parser.declareObject( + EsqlQueryRequest::pragmas, + (p, c) -> new QueryPragmas(Settings.builder().loadFromMap(p.map()).build()), + PRAGMA_FIELD + ); + parser.declareField(EsqlQueryRequest::params, RequestXContent::parseParams, PARAMS_FIELD, VALUE_ARRAY); + parser.declareString((request, localeTag) -> request.locale(Locale.forLanguageTag(localeTag)), LOCALE_FIELD); + parser.declareBoolean(EsqlQueryRequest::profile, PROFILE_FIELD); + } + + private static ObjectParser objectParserSync(Supplier supplier) { + ObjectParser parser = new ObjectParser<>("esql/query", false, supplier); + objectParserCommon(parser); + return parser; + } + + private static ObjectParser objectParserAsync(Supplier supplier) { + ObjectParser parser = new ObjectParser<>("esql/async_query", false, supplier); + objectParserCommon(parser); + parser.declareBoolean(EsqlQueryRequest::keepOnCompletion, KEEP_ON_COMPLETION); + parser.declareField( + EsqlQueryRequest::waitForCompletionTimeout, + (p, c) -> TimeValue.parseTimeValue(p.text(), WAIT_FOR_COMPLETION_TIMEOUT.getPreferredName()), + WAIT_FOR_COMPLETION_TIMEOUT, + ObjectParser.ValueType.VALUE + ); + parser.declareField( + EsqlQueryRequest::keepAlive, + (p, c) -> TimeValue.parseTimeValue(p.text(), KEEP_ALIVE.getPreferredName()), + KEEP_ALIVE, + ObjectParser.ValueType.VALUE + ); + return parser; + } + + private static List parseParams(XContentParser p) throws IOException { + List result = new ArrayList<>(); + XContentParser.Token token = p.currentToken(); + + if (token == XContentParser.Token.START_ARRAY) { + Object value = null; + String type = null; + TypedParamValue previousParam = null; + TypedParamValue currentParam; + + while ((token = p.nextToken()) != XContentParser.Token.END_ARRAY) { + XContentLocation loc = p.getTokenLocation(); + + if (token == XContentParser.Token.START_OBJECT) { + // we are at the start of a value/type pair... hopefully + currentParam = PARAM_PARSER.apply(p, null); + /* + * Always set the xcontentlocation for the first param just in case the first one happens to not meet the parsing rules + * that are checked later in validateParams method. + * Also, set the xcontentlocation of the param that is different from the previous param in list when it comes to + * its type being explicitly set or inferred. + */ + if ((previousParam != null && previousParam.hasExplicitType() == false) || result.isEmpty()) { + currentParam.tokenLocation(toProto(loc)); + } + } else { + if (token == XContentParser.Token.VALUE_STRING) { + value = p.text(); + type = "keyword"; + } else if (token == XContentParser.Token.VALUE_NUMBER) { + XContentParser.NumberType numberType = p.numberType(); + if (numberType == XContentParser.NumberType.INT) { + value = p.intValue(); + type = "integer"; + } else if (numberType == XContentParser.NumberType.LONG) { + value = p.longValue(); + type = "long"; + } else if (numberType == XContentParser.NumberType.DOUBLE) { + value = p.doubleValue(); + type = "double"; + } + } else if (token == XContentParser.Token.VALUE_BOOLEAN) { + value = p.booleanValue(); + type = "boolean"; + } else if (token == XContentParser.Token.VALUE_NULL) { + value = null; + type = "null"; + } else { + throw new XContentParseException(loc, "Failed to parse object: unexpected token [" + token + "] found"); + } + + currentParam = new TypedParamValue(type, value, false); + if ((previousParam != null && previousParam.hasExplicitType()) || result.isEmpty()) { + currentParam.tokenLocation(toProto(loc)); + } + } + + result.add(currentParam); + previousParam = currentParam; + } + } + + return result; + } + + static ContentLocation toProto(org.elasticsearch.xcontent.XContentLocation toProto) { + if (toProto == null) { + return null; + } + return new ContentLocation(toProto.lineNumber(), toProto.columnNumber()); + } + + static org.elasticsearch.xcontent.XContentLocation fromProto(ContentLocation fromProto) { + if (fromProto == null) { + return null; + } + return new org.elasticsearch.xcontent.XContentLocation(fromProto.lineNumber, fromProto.columnNumber); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlAsyncQueryAction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlAsyncQueryAction.java index 0b2bad2eb22d3..678c2ca3bed5c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlAsyncQueryAction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlAsyncQueryAction.java @@ -43,7 +43,7 @@ public List routes() { protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { EsqlQueryRequest esqlRequest; try (XContentParser parser = request.contentOrSourceParamParser()) { - esqlRequest = EsqlQueryRequest.fromXContentAsync(parser); + esqlRequest = RequestXContent.parseAsync(parser); } LOGGER.info("Beginning execution of ESQL async query.\nQuery string: [{}]", esqlRequest.query()); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlQueryAction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlQueryAction.java index ddce16857f6f9..a03e5266bf520 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlQueryAction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlQueryAction.java @@ -42,7 +42,7 @@ public List routes() { protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { EsqlQueryRequest esqlRequest; try (XContentParser parser = request.contentOrSourceParamParser()) { - esqlRequest = EsqlQueryRequest.fromXContentSync(parser); + esqlRequest = RequestXContent.parseSync(parser); } LOGGER.info("Beginning execution of ESQL query.\nQuery string: [{}]", esqlRequest.query()); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestTests.java index dd8118c8074fb..5b16691bcee77 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestTests.java @@ -261,13 +261,13 @@ private static void assertParserErrorMessage(String json, String message) { } static EsqlQueryRequest parseEsqlQueryRequestSync(String json) throws IOException { - var request = parseEsqlQueryRequest(json, EsqlQueryRequest::fromXContentSync); + var request = parseEsqlQueryRequest(json, RequestXContent::parseSync); assertFalse(request.async()); return request; } static EsqlQueryRequest parseEsqlQueryRequestAsync(String json) throws IOException { - var request = parseEsqlQueryRequest(json, EsqlQueryRequest::fromXContentAsync); + var request = parseEsqlQueryRequest(json, RequestXContent::parseAsync); assertTrue(request.async()); return request; } From 4b1fe21c5c2005ab384cf63e57a3009e75be2f5c Mon Sep 17 00:00:00 2001 From: Chris Hegarty <62058229+ChrisHegarty@users.noreply.github.com> Date: Thu, 14 Mar 2024 09:50:59 +0100 Subject: [PATCH 202/248] ES|QL refactor PositionToXContent (#106298) This commit is pure refactoring - to extract PositionToXContent out of ColumnInfo. --- .../xpack/esql/action/ColumnInfo.java | 164 ---------------- .../xpack/esql/action/PositionToXContent.java | 179 ++++++++++++++++++ .../esql/action/ResponseXContentUtils.java | 10 +- 3 files changed, 186 insertions(+), 167 deletions(-) create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/PositionToXContent.java diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ColumnInfo.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ColumnInfo.java index 79ce1754f7163..3aa609e55b07c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ColumnInfo.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ColumnInfo.java @@ -7,36 +7,18 @@ package org.elasticsearch.xpack.esql.action; -import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.compute.data.Block; -import org.elasticsearch.compute.data.BooleanBlock; -import org.elasticsearch.compute.data.BytesRefBlock; -import org.elasticsearch.compute.data.DoubleBlock; -import org.elasticsearch.compute.data.IntBlock; -import org.elasticsearch.compute.data.LongBlock; -import org.elasticsearch.compute.lucene.UnsupportedValueSource; -import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.xcontent.InstantiatingObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.XContentParserConfiguration; -import org.elasticsearch.xcontent.XContentType; -import org.elasticsearch.xpack.versionfield.Version; import java.io.IOException; import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; -import static org.elasticsearch.xpack.ql.util.DateUtils.UTC_DATE_TIME_FORMATTER; -import static org.elasticsearch.xpack.ql.util.NumericUtils.unsignedLongAsNumber; -import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.CARTESIAN; -import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.GEO; public record ColumnInfo(String name, String type) implements Writeable { @@ -73,150 +55,4 @@ public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params par builder.endObject(); return builder; } - - public abstract class PositionToXContent { - protected final Block block; - - PositionToXContent(Block block) { - this.block = block; - } - - public XContentBuilder positionToXContent(XContentBuilder builder, ToXContent.Params params, int position) throws IOException { - if (block.isNull(position)) { - return builder.nullValue(); - } - int count = block.getValueCount(position); - int start = block.getFirstValueIndex(position); - if (count == 1) { - return valueToXContent(builder, params, start); - } - builder.startArray(); - int end = start + count; - for (int i = start; i < end; i++) { - valueToXContent(builder, params, i); - } - return builder.endArray(); - } - - protected abstract XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Params params, int valueIndex) - throws IOException; - } - - public PositionToXContent positionToXContent(Block block, BytesRef scratch) { - return switch (type) { - case "long" -> new PositionToXContent(block) { - @Override - protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Params params, int valueIndex) - throws IOException { - return builder.value(((LongBlock) block).getLong(valueIndex)); - } - }; - case "integer" -> new PositionToXContent(block) { - @Override - protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Params params, int valueIndex) - throws IOException { - return builder.value(((IntBlock) block).getInt(valueIndex)); - } - }; - case "double" -> new PositionToXContent(block) { - @Override - protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Params params, int valueIndex) - throws IOException { - return builder.value(((DoubleBlock) block).getDouble(valueIndex)); - } - }; - case "unsigned_long" -> new PositionToXContent(block) { - @Override - protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Params params, int valueIndex) - throws IOException { - long l = ((LongBlock) block).getLong(valueIndex); - return builder.value(unsignedLongAsNumber(l)); - } - }; - case "keyword", "text" -> new PositionToXContent(block) { - @Override - protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Params params, int valueIndex) - throws IOException { - BytesRef val = ((BytesRefBlock) block).getBytesRef(valueIndex, scratch); - if (builder.contentType() == XContentType.CBOR && val.offset != 0) { - // cbor needs a zero offset because of a bug in jackson - // https://github.com/FasterXML/jackson-dataformats-binary/issues/366 - val = BytesRef.deepCopyOf(scratch); - } - return builder.utf8Value(val.bytes, val.offset, val.length); - } - }; - case "ip" -> new PositionToXContent(block) { - @Override - protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Params params, int valueIndex) - throws IOException { - BytesRef val = ((BytesRefBlock) block).getBytesRef(valueIndex, scratch); - return builder.value(DocValueFormat.IP.format(val)); - } - }; - case "date" -> new PositionToXContent(block) { - @Override - protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Params params, int valueIndex) - throws IOException { - long longVal = ((LongBlock) block).getLong(valueIndex); - return builder.value(UTC_DATE_TIME_FORMATTER.formatMillis(longVal)); - } - }; - case "geo_point", "geo_shape" -> new PositionToXContent(block) { - @Override - protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Params params, int valueIndex) - throws IOException { - return builder.value(GEO.wkbToWkt(((BytesRefBlock) block).getBytesRef(valueIndex, scratch))); - } - }; - case "cartesian_point", "cartesian_shape" -> new PositionToXContent(block) { - @Override - protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Params params, int valueIndex) - throws IOException { - return builder.value(CARTESIAN.wkbToWkt(((BytesRefBlock) block).getBytesRef(valueIndex, scratch))); - } - }; - case "boolean" -> new PositionToXContent(block) { - @Override - protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Params params, int valueIndex) - throws IOException { - return builder.value(((BooleanBlock) block).getBoolean(valueIndex)); - } - }; - case "version" -> new PositionToXContent(block) { - @Override - protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Params params, int valueIndex) - throws IOException { - BytesRef val = ((BytesRefBlock) block).getBytesRef(valueIndex, scratch); - return builder.value(new Version(val).toString()); - } - }; - case "null" -> new PositionToXContent(block) { - @Override - protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Params params, int valueIndex) - throws IOException { - return builder.nullValue(); - } - }; - case "unsupported" -> new PositionToXContent(block) { - @Override - protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Params params, int valueIndex) - throws IOException { - return builder.value(UnsupportedValueSource.UNSUPPORTED_OUTPUT); - } - }; - case "_source" -> new PositionToXContent(block) { - @Override - protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Params params, int valueIndex) - throws IOException { - BytesRef val = ((BytesRefBlock) block).getBytesRef(valueIndex, scratch); - try (XContentParser parser = XContentHelper.createParser(XContentParserConfiguration.EMPTY, new BytesArray(val))) { - parser.nextToken(); - return builder.copyCurrentStructure(parser); - } - } - }; - default -> throw new IllegalArgumentException("can't convert values of type [" + type + "]"); - }; - } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/PositionToXContent.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/PositionToXContent.java new file mode 100644 index 0000000000000..669b22883fd8c --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/PositionToXContent.java @@ -0,0 +1,179 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.lucene.UnsupportedValueSource; +import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.versionfield.Version; + +import java.io.IOException; + +import static org.elasticsearch.xpack.ql.util.DateUtils.UTC_DATE_TIME_FORMATTER; +import static org.elasticsearch.xpack.ql.util.NumericUtils.unsignedLongAsNumber; +import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.CARTESIAN; +import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.GEO; + +abstract class PositionToXContent { + protected final Block block; + + PositionToXContent(Block block) { + this.block = block; + } + + public XContentBuilder positionToXContent(XContentBuilder builder, ToXContent.Params params, int position) throws IOException { + if (block.isNull(position)) { + return builder.nullValue(); + } + int count = block.getValueCount(position); + int start = block.getFirstValueIndex(position); + if (count == 1) { + return valueToXContent(builder, params, start); + } + builder.startArray(); + int end = start + count; + for (int i = start; i < end; i++) { + valueToXContent(builder, params, i); + } + return builder.endArray(); + } + + protected abstract XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Params params, int valueIndex) + throws IOException; + + public static PositionToXContent positionToXContent(ColumnInfo columnInfo, Block block, BytesRef scratch) { + return switch (columnInfo.type()) { + case "long" -> new PositionToXContent(block) { + @Override + protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Params params, int valueIndex) + throws IOException { + return builder.value(((LongBlock) block).getLong(valueIndex)); + } + }; + case "integer" -> new PositionToXContent(block) { + @Override + protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Params params, int valueIndex) + throws IOException { + return builder.value(((IntBlock) block).getInt(valueIndex)); + } + }; + case "double" -> new PositionToXContent(block) { + @Override + protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Params params, int valueIndex) + throws IOException { + return builder.value(((DoubleBlock) block).getDouble(valueIndex)); + } + }; + case "unsigned_long" -> new PositionToXContent(block) { + @Override + protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Params params, int valueIndex) + throws IOException { + long l = ((LongBlock) block).getLong(valueIndex); + return builder.value(unsignedLongAsNumber(l)); + } + }; + case "keyword", "text" -> new PositionToXContent(block) { + @Override + protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Params params, int valueIndex) + throws IOException { + BytesRef val = ((BytesRefBlock) block).getBytesRef(valueIndex, scratch); + if (builder.contentType() == XContentType.CBOR && val.offset != 0) { + // cbor needs a zero offset because of a bug in jackson + // https://github.com/FasterXML/jackson-dataformats-binary/issues/366 + val = BytesRef.deepCopyOf(scratch); + } + return builder.utf8Value(val.bytes, val.offset, val.length); + } + }; + case "ip" -> new PositionToXContent(block) { + @Override + protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Params params, int valueIndex) + throws IOException { + BytesRef val = ((BytesRefBlock) block).getBytesRef(valueIndex, scratch); + return builder.value(DocValueFormat.IP.format(val)); + } + }; + case "date" -> new PositionToXContent(block) { + @Override + protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Params params, int valueIndex) + throws IOException { + long longVal = ((LongBlock) block).getLong(valueIndex); + return builder.value(UTC_DATE_TIME_FORMATTER.formatMillis(longVal)); + } + }; + case "geo_point", "geo_shape" -> new PositionToXContent(block) { + @Override + protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Params params, int valueIndex) + throws IOException { + return builder.value(GEO.wkbToWkt(((BytesRefBlock) block).getBytesRef(valueIndex, scratch))); + } + }; + case "cartesian_point", "cartesian_shape" -> new PositionToXContent(block) { + @Override + protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Params params, int valueIndex) + throws IOException { + return builder.value(CARTESIAN.wkbToWkt(((BytesRefBlock) block).getBytesRef(valueIndex, scratch))); + } + }; + case "boolean" -> new PositionToXContent(block) { + @Override + protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Params params, int valueIndex) + throws IOException { + return builder.value(((BooleanBlock) block).getBoolean(valueIndex)); + } + }; + case "version" -> new PositionToXContent(block) { + @Override + protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Params params, int valueIndex) + throws IOException { + BytesRef val = ((BytesRefBlock) block).getBytesRef(valueIndex, scratch); + return builder.value(new Version(val).toString()); + } + }; + case "null" -> new PositionToXContent(block) { + @Override + protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Params params, int valueIndex) + throws IOException { + return builder.nullValue(); + } + }; + case "unsupported" -> new PositionToXContent(block) { + @Override + protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Params params, int valueIndex) + throws IOException { + return builder.value(UnsupportedValueSource.UNSUPPORTED_OUTPUT); + } + }; + case "_source" -> new PositionToXContent(block) { + @Override + protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Params params, int valueIndex) + throws IOException { + BytesRef val = ((BytesRefBlock) block).getBytesRef(valueIndex, scratch); + try (XContentParser parser = XContentHelper.createParser(XContentParserConfiguration.EMPTY, new BytesArray(val))) { + parser.nextToken(); + return builder.copyCurrentStructure(parser); + } + } + }; + default -> throw new IllegalArgumentException("can't convert values of type [" + columnInfo.type() + "]"); + }; + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ResponseXContentUtils.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ResponseXContentUtils.java index ca40faff81c55..b9695445e6e6a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ResponseXContentUtils.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ResponseXContentUtils.java @@ -78,7 +78,11 @@ static Iterator columnarValues(List columns, L return Iterators.concat( Iterators.single(((builder, params) -> builder.startArray())), Iterators.flatMap(pages.iterator(), page -> { - ColumnInfo.PositionToXContent toXContent = columns.get(column).positionToXContent(page.getBlock(column), scratch); + PositionToXContent toXContent = PositionToXContent.positionToXContent( + columns.get(column), + page.getBlock(column), + scratch + ); return Iterators.forRange( 0, page.getPositionCount(), @@ -96,9 +100,9 @@ static Iterator rowValues(List columns, List

        { final int columnCount = columns.size(); assert page.getBlockCount() == columnCount : page.getBlockCount() + " != " + columnCount; - final ColumnInfo.PositionToXContent[] toXContents = new ColumnInfo.PositionToXContent[columnCount]; + final PositionToXContent[] toXContents = new PositionToXContent[columnCount]; for (int column = 0; column < columnCount; column++) { - toXContents[column] = columns.get(column).positionToXContent(page.getBlock(column), scratch); + toXContents[column] = PositionToXContent.positionToXContent(columns.get(column), page.getBlock(column), scratch); } return Iterators.forRange(0, page.getPositionCount(), position -> (builder, params) -> { builder.startArray(); From 2b3cfcec2a444e4ca79086c01d70f7a3bb6df9f1 Mon Sep 17 00:00:00 2001 From: Ievgen Degtiarenko Date: Thu, 14 Mar 2024 10:10:43 +0100 Subject: [PATCH 203/248] Do not trip circuit breaker in GET /_cluster/allocation/explain (#106342) This changes allocation explain API to not trip circuit breaker so that it is always available for troubleshooting even if cluster is overloaded. --- ...ansportClusterAllocationExplainAction.java | 1 + .../RestClusterAllocationExplainAction.java | 5 ++ ...rtClusterAllocationExplainActionTests.java | 77 +++++++++++++++++++ ...stClusterAllocationExplainActionTests.java | 20 +++++ 4 files changed, 103 insertions(+) create mode 100644 server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainActionTests.java create mode 100644 server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestClusterAllocationExplainActionTests.java diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java index c51e6140fac89..313ee83669017 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java @@ -66,6 +66,7 @@ public TransportClusterAllocationExplainAction( ) { super( TYPE.name(), + false, transportService, clusterService, threadPool, diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterAllocationExplainAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterAllocationExplainAction.java index 607ae3f554fe8..a3ab6ad8e2f04 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterAllocationExplainAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterAllocationExplainAction.java @@ -60,4 +60,9 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC req.includeDiskInfo(request.paramAsBoolean("include_disk_info", false)); return channel -> client.admin().cluster().allocationExplain(req, new RestRefCountedChunkedToXContentListener<>(channel)); } + + @Override + public boolean canTripCircuitBreaker() { + return false; + } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainActionTests.java new file mode 100644 index 0000000000000..15a1bdde7706a --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainActionTests.java @@ -0,0 +1,77 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.admin.cluster.allocation; + +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.cluster.ClusterInfo; +import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.snapshots.EmptySnapshotsInfoService; +import org.elasticsearch.test.ClusterServiceUtils; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.transport.CapturingTransport; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.junit.After; +import org.junit.Before; + +import java.util.List; +import java.util.Set; + +import static org.hamcrest.Matchers.is; + +public class TransportClusterAllocationExplainActionTests extends ESTestCase { + + private ThreadPool threadPool; + private ClusterService clusterService; + private TransportService transportService; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + threadPool = new TestThreadPool(TransportClusterAllocationExplainActionTests.class.getName()); + clusterService = ClusterServiceUtils.createClusterService(threadPool); + transportService = new CapturingTransport().createTransportService( + clusterService.getSettings(), + threadPool, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, + address -> clusterService.localNode(), + clusterService.getClusterSettings(), + Set.of() + ); + new TransportClusterAllocationExplainAction( + transportService, + clusterService, + threadPool, + new ActionFilters(Set.of()), + null, + () -> ClusterInfo.EMPTY, + EmptySnapshotsInfoService.INSTANCE, + new AllocationDeciders(List.of()), + null + ); + } + + public void testCanNotTripCircuitBreaker() { + assertThat( + transportService.getRequestHandler(TransportClusterAllocationExplainAction.TYPE.name()).canTripCircuitBreaker(), + is(false) + ); + } + + @After + public void tearDown() throws Exception { + super.tearDown(); + threadPool.shutdown(); + clusterService.close(); + transportService.close(); + } +} diff --git a/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestClusterAllocationExplainActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestClusterAllocationExplainActionTests.java new file mode 100644 index 0000000000000..f7088fe88ff57 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestClusterAllocationExplainActionTests.java @@ -0,0 +1,20 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.rest.action.admin.cluster; + +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.is; + +public class RestClusterAllocationExplainActionTests extends ESTestCase { + + public void testCanNotTripCircuitBreaker() { + assertThat(new RestClusterAllocationExplainAction().canTripCircuitBreaker(), is(false)); + } +} From 0f9ebf268fa31e49091b8f8bc929c989f5bae57e Mon Sep 17 00:00:00 2001 From: Rene Groeschke Date: Thu, 14 Mar 2024 10:56:10 +0100 Subject: [PATCH 204/248] Mute ZstdTests (#106348) failing on jdk22 --- .../src/test/java/org/elasticsearch/nativeaccess/ZstdTests.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/libs/native/src/test/java/org/elasticsearch/nativeaccess/ZstdTests.java b/libs/native/src/test/java/org/elasticsearch/nativeaccess/ZstdTests.java index d051961b06c5f..26ae1ecb8a8b8 100644 --- a/libs/native/src/test/java/org/elasticsearch/nativeaccess/ZstdTests.java +++ b/libs/native/src/test/java/org/elasticsearch/nativeaccess/ZstdTests.java @@ -36,6 +36,7 @@ public void testCompressBound() { expectThrows(IllegalArgumentException.class, () -> zstd.compressBound(Integer.MIN_VALUE)); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/106347") public void testCompressValidation() { try (var src = nativeAccess.newBuffer(1000); var dst = nativeAccess.newBuffer(500)) { var srcBuf = src.buffer(); @@ -55,6 +56,7 @@ public void testCompressValidation() { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/106347") public void testDecompressValidation() { try ( var original = nativeAccess.newBuffer(1000); From 1b8f2cf8acf64b7191b302dc301d95d0c9527938 Mon Sep 17 00:00:00 2001 From: Simon Cooper Date: Thu, 14 Mar 2024 10:11:09 +0000 Subject: [PATCH 205/248] Add some initial github codeowners for core/infra (#106314) --- .github/CODEOWNERS | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 91a5039f5c5f7..17867eab5b369 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -39,3 +39,23 @@ build-tools-internal @elastic/es-delivery distribution/src @elastic/es-delivery distribution/packages/src @elastic/es-delivery distribution/docker/src @elastic/es-delivery + +# Core/Infra +distribution/tools @elastic/es-core-infra +libs/core @elastic/es-core-infra +libs/logging @elastic/es-core-infra +libs/native @elastic/es-core-infra +libs/plugin-analysis-api @elastic/es-core-infra +libs/plugin-api @elastic/es-core-infra +libs/plugin-classloader @elastic/es-core-infra +libs/plugin-scanner @elastic/es-core-infra +libs/x-content @elastic/es-core-infra +modules/lang-expression @elastic/es-core-infra +modules/lang-mustache @elastic/es-core-infra +modules/lang-painless @elastic/es-core-infra +modules/rest-root @elastic/es-core-infra +modules/systemd @elastic/es-core-infra +server/src/main/java/org/elasticsearch/bootstrap @elastic/es-core-infra +server/src/main/java/org/elasticsearch/node @elastic/es-core-infra +server/src/main/java/org/elasticsearch/plugins @elastic/es-core-infra +server/src/main/java/org/elasticsearch/threadpool @elastic/es-core-infra From d1cf3b12c72684048be9bc1306ad785547331cce Mon Sep 17 00:00:00 2001 From: Jedr Blaszyk Date: Thu, 14 Mar 2024 11:38:16 +0100 Subject: [PATCH 206/248] [Connector API] Support detached index for connectors (#106236) --- .../test/entsearch/300_connector_put.yml | 16 +++++++ .../test/entsearch/305_connector_post.yml | 17 ++++++++ .../338_connector_update_index_name.yml | 17 ++++++++ .../entsearch/400_connector_sync_job_post.yml | 18 ++++++++ .../connector/ConnectorIndexService.java | 18 +++++--- .../action/ConnectorActionRequest.java | 23 ++++++++++ .../connector/action/PostConnectorAction.java | 25 ++++------- .../connector/action/PutConnectorAction.java | 23 ++++------ .../UpdateConnectorIndexNameAction.java | 20 +++------ .../syncjob/ConnectorSyncJobIndexService.java | 19 ++++++++- .../connector/ConnectorTestUtils.java | 14 +++++-- .../ConnectorSyncJobIndexServiceTests.java | 42 ++++++++++++------- 12 files changed, 182 insertions(+), 70 deletions(-) diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/300_connector_put.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/300_connector_put.yml index a1eb627a68dff..e3fad98fd90d4 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/300_connector_put.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/300_connector_put.yml @@ -135,6 +135,22 @@ setup: body: index_name: search-test +--- +'Create Connector - Without index attached': + - do: + connector.put: + connector_id: test-connector-1 + body: + service_type: super-connector + + - match: { result: 'created' } + + - do: + connector.get: + connector_id: test-connector-1 + + - match: { index_name: null } + - match: { service_type: super-connector } --- "Put connector fails for unprivileged user": diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/305_connector_post.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/305_connector_post.yml index fe074d1438c60..8eedc06a025d5 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/305_connector_post.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/305_connector_post.yml @@ -105,6 +105,23 @@ setup: index_name: search-test +--- +'Create Connector - Without index attached': + - do: + connector.post: + body: + service_type: super-connector + + - set: { id: id } + - match: { id: $id } + + - do: + connector.get: + connector_id: $id + + - match: { index_name: null } + - match: { service_type: super-connector } + --- "Post connector fails for unprivileged user": - skip: diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/338_connector_update_index_name.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/338_connector_update_index_name.yml index 1ed7297df346e..9b2d46bc056dc 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/338_connector_update_index_name.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/338_connector_update_index_name.yml @@ -31,6 +31,23 @@ setup: - match: { index_name: search-2-test } - match: { status: created } +--- +"Update Connector Index Name - detach index": + - do: + connector.update_index_name: + connector_id: test-connector + body: + index_name: null + + + - match: { result: updated } + + - do: + connector.get: + connector_id: test-connector + + - match: { index_name: null } + --- "Update Connector Index Name - 404 when connector doesn't exist": - do: diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/400_connector_sync_job_post.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/400_connector_sync_job_post.yml index 065f43e821775..5b5177d83dd17 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/400_connector_sync_job_post.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/400_connector_sync_job_post.yml @@ -11,6 +11,14 @@ setup: language: de is_native: false service_type: super-connector + - do: + connector.put: + connector_id: test-connector-detached-index + body: + name: my-connector + language: de + is_native: false + service_type: super-connector --- 'Create connector sync job': @@ -289,6 +297,16 @@ setup: catch: bad_request +--- +'Create connector sync job with no index attached': + - do: + connector_sync_job.post: + body: + id: test-connector-detached-index + job_type: full + trigger_method: full + catch: bad_request + --- "Create connector sync job fails for unprivileged user": - skip: diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorIndexService.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorIndexService.java index 40a6eeaafd708..4703784bb57b1 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorIndexService.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorIndexService.java @@ -58,6 +58,7 @@ import java.time.Instant; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Locale; import java.util.Map; @@ -107,7 +108,6 @@ public void createConnectorWithDocId(PutConnectorAction.Request request, ActionL try { isDataIndexNameAlreadyInUse(indexName, connectorId, listener.delegateFailure((l, isIndexNameInUse) -> { if (isIndexNameInUse) { - l.onFailure( new ElasticsearchStatusException( "Index name [" + indexName + "] is used by another connector.", @@ -188,7 +188,7 @@ public void createConnectorWithAutoGeneratedId( * Creates a Connector with default values and specified parameters. * * @param description The description of the connector. - * @param indexName The name of the index associated with the connector. + * @param indexName The name of the index associated with the connector. It can be null to indicate that index is not attached yet. * @param isNative Flag indicating if the connector is native; defaults to false if null. * @param language The language supported by the connector. * @param name The name of the connector; defaults to an empty string if null. @@ -694,7 +694,8 @@ public void updateConnectorPipeline(UpdateConnectorPipelineAction.Request reques } /** - * Updates the index name property of a {@link Connector}. + * Updates the index name property of a {@link Connector}. Index name can be set to null to indicate that the connector + * is not associated with any index. * * @param request The request for updating the connector's index name. * @param listener The listener for handling responses, including successful updates or errors. @@ -720,8 +721,11 @@ public void updateConnectorIndexName(UpdateConnectorIndexNameAction.Request requ new IndexRequest(CONNECTOR_INDEX_NAME).opType(DocWriteRequest.OpType.INDEX) .id(connectorId) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .source(Map.of(Connector.INDEX_NAME_FIELD.getPreferredName(), request.getIndexName())) - + .source(new HashMap<>() { + { + put(Connector.INDEX_NAME_FIELD.getPreferredName(), request.getIndexName()); + } + }) ); client.update(updateRequest, new DelegatingIndexNotFoundActionListener<>(connectorId, listener, (ll, updateResponse) -> { if (updateResponse.getResult() == UpdateResponse.Result.NOT_FOUND) { @@ -912,6 +916,10 @@ private static ConnectorSearchResult hitToConnector(SearchHit searchHit) { * @param listener The listener for handling boolean responses and errors. */ private void isDataIndexNameAlreadyInUse(String indexName, String connectorId, ActionListener listener) { + if (indexName == null) { + listener.onResponse(false); + return; + } try { BoolQueryBuilder boolFilterQueryBuilder = new BoolQueryBuilder(); diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/ConnectorActionRequest.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/ConnectorActionRequest.java index 011073e5d0c6f..1799121505da5 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/ConnectorActionRequest.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/ConnectorActionRequest.java @@ -8,13 +8,18 @@ package org.elasticsearch.xpack.application.connector.action; import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.cluster.metadata.MetadataCreateIndexService; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.indices.InvalidIndexNameException; import org.elasticsearch.xpack.application.connector.ConnectorTemplateRegistry; import java.io.IOException; +import static org.elasticsearch.action.ValidateActions.addValidationError; + /** * Abstract base class for action requests targeting the connectors index. Implements {@link org.elasticsearch.action.IndicesRequest} * to ensure index-level privilege support. This class defines the connectors index as the target for all derived action requests. @@ -29,6 +34,24 @@ public ConnectorActionRequest(StreamInput in) throws IOException { super(in); } + /** + * Validates the given index name and updates the validation exception if the name is invalid. + * + * @param indexName The index name to validate. If null, no validation is performed. + * @param validationException The exception to accumulate validation errors. + * @return The updated or original {@code validationException} with any new validation errors added, if the index name is invalid. + */ + public ActionRequestValidationException validateIndexName(String indexName, ActionRequestValidationException validationException) { + if (indexName != null) { + try { + MetadataCreateIndexService.validateIndexOrAliasName(indexName, InvalidIndexNameException::new); + } catch (InvalidIndexNameException e) { + return addValidationError(e.toString(), validationException); + } + } + return validationException; + } + @Override public String[] indices() { return new String[] { ConnectorTemplateRegistry.CONNECTOR_INDEX_NAME_PATTERN }; diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/PostConnectorAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/PostConnectorAction.java index ecf431085fd99..d465418cb979f 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/PostConnectorAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/PostConnectorAction.java @@ -11,14 +11,11 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionType; -import org.elasticsearch.cluster.metadata.MetadataCreateIndexService; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Nullable; -import org.elasticsearch.indices.InvalidIndexNameException; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; @@ -31,8 +28,6 @@ import java.io.IOException; import java.util.Objects; -import static org.elasticsearch.action.ValidateActions.addValidationError; -import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; public class PostConnectorAction { @@ -46,6 +41,7 @@ public static class Request extends ConnectorActionRequest implements ToXContent @Nullable private final String description; + @Nullable private final String indexName; @Nullable private final Boolean isNative; @@ -68,7 +64,7 @@ public Request(String description, String indexName, Boolean isNative, String la public Request(StreamInput in) throws IOException { super(in); this.description = in.readOptionalString(); - this.indexName = in.readString(); + this.indexName = in.readOptionalString(); this.isNative = in.readOptionalBoolean(); this.language = in.readOptionalString(); this.name = in.readOptionalString(); @@ -90,7 +86,7 @@ public Request(StreamInput in) throws IOException { static { PARSER.declareString(optionalConstructorArg(), new ParseField("description")); - PARSER.declareString(constructorArg(), new ParseField("index_name")); + PARSER.declareStringOrNull(optionalConstructorArg(), new ParseField("index_name")); PARSER.declareBoolean(optionalConstructorArg(), new ParseField("is_native")); PARSER.declareString(optionalConstructorArg(), new ParseField("language")); PARSER.declareString(optionalConstructorArg(), new ParseField("name")); @@ -116,7 +112,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (description != null) { builder.field("description", description); } - builder.field("index_name", indexName); + if (indexName != null) { + builder.field("index_name", indexName); + } if (isNative != null) { builder.field("is_native", isNative); } @@ -138,14 +136,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws public ActionRequestValidationException validate() { ActionRequestValidationException validationException = null; - if (Strings.isNullOrEmpty(getIndexName())) { - validationException = addValidationError("[index_name] cannot be [null] or [\"\"]", validationException); - } - try { - MetadataCreateIndexService.validateIndexOrAliasName(getIndexName(), InvalidIndexNameException::new); - } catch (InvalidIndexNameException e) { - validationException = addValidationError(e.toString(), validationException); - } + validationException = validateIndexName(indexName, validationException); return validationException; } @@ -154,7 +145,7 @@ public ActionRequestValidationException validate() { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeOptionalString(description); - out.writeString(indexName); + out.writeOptionalString(indexName); out.writeOptionalBoolean(isNative); out.writeOptionalString(language); out.writeOptionalString(name); diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/PutConnectorAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/PutConnectorAction.java index e7ab8337d72a6..75f9ad24bef39 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/PutConnectorAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/PutConnectorAction.java @@ -13,14 +13,12 @@ import org.elasticsearch.action.ActionType; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.IndicesRequest; -import org.elasticsearch.cluster.metadata.MetadataCreateIndexService; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Nullable; -import org.elasticsearch.indices.InvalidIndexNameException; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; @@ -49,6 +47,7 @@ public static class Request extends ConnectorActionRequest implements IndicesReq @Nullable private final String description; + @Nullable private final String indexName; @Nullable private final Boolean isNative; @@ -81,7 +80,7 @@ public Request(StreamInput in) throws IOException { super(in); this.connectorId = in.readString(); this.description = in.readOptionalString(); - this.indexName = in.readString(); + this.indexName = in.readOptionalString(); this.isNative = in.readOptionalBoolean(); this.language = in.readOptionalString(); this.name = in.readOptionalString(); @@ -104,7 +103,7 @@ public Request(StreamInput in) throws IOException { static { PARSER.declareString(optionalConstructorArg(), new ParseField("description")); - PARSER.declareString(optionalConstructorArg(), new ParseField("index_name")); + PARSER.declareStringOrNull(optionalConstructorArg(), new ParseField("index_name")); PARSER.declareBoolean(optionalConstructorArg(), new ParseField("is_native")); PARSER.declareString(optionalConstructorArg(), new ParseField("language")); PARSER.declareString(optionalConstructorArg(), new ParseField("name")); @@ -130,7 +129,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (description != null) { builder.field("description", description); } - builder.field("index_name", indexName); + if (indexName != null) { + builder.field("index_name", indexName); + } if (isNative != null) { builder.field("is_native", isNative); } @@ -156,14 +157,8 @@ public ActionRequestValidationException validate() { if (Strings.isNullOrEmpty(getConnectorId())) { validationException = addValidationError("[connector_id] cannot be [null] or [\"\"]", validationException); } - if (Strings.isNullOrEmpty(getIndexName())) { - validationException = addValidationError("[index_name] cannot be [null] or [\"\"]", validationException); - } - try { - MetadataCreateIndexService.validateIndexOrAliasName(getIndexName(), InvalidIndexNameException::new); - } catch (InvalidIndexNameException e) { - validationException = addValidationError(e.toString(), validationException); - } + + validationException = validateIndexName(indexName, validationException); return validationException; } @@ -173,7 +168,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(connectorId); out.writeOptionalString(description); - out.writeString(indexName); + out.writeOptionalString(indexName); out.writeOptionalBoolean(isNative); out.writeOptionalString(language); out.writeOptionalString(name); diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorIndexNameAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorIndexNameAction.java index 67a703007477c..c6cb18089ad06 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorIndexNameAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorIndexNameAction.java @@ -10,13 +10,12 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; -import org.elasticsearch.cluster.metadata.MetadataCreateIndexService; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.indices.InvalidIndexNameException; +import org.elasticsearch.core.Nullable; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; @@ -41,6 +40,7 @@ private UpdateConnectorIndexNameAction() {/* no instances */} public static class Request extends ConnectorActionRequest implements ToXContentObject { private final String connectorId; + @Nullable private final String indexName; public Request(String connectorId, String indexName) { @@ -51,7 +51,7 @@ public Request(String connectorId, String indexName) { public Request(StreamInput in) throws IOException { super(in); this.connectorId = in.readString(); - this.indexName = in.readString(); + this.indexName = in.readOptionalString(); } public String getConnectorId() { @@ -70,7 +70,7 @@ public String getIndexName() { ); static { - PARSER.declareString(constructorArg(), Connector.INDEX_NAME_FIELD); + PARSER.declareStringOrNull(constructorArg(), Connector.INDEX_NAME_FIELD); } public static UpdateConnectorIndexNameAction.Request fromXContentBytes( @@ -107,15 +107,7 @@ public ActionRequestValidationException validate() { validationException = addValidationError("[connector_id] cannot be [null] or [\"\"].", validationException); } - if (Strings.isNullOrEmpty(indexName)) { - validationException = addValidationError("[index_name] cannot be [null] or [\"\"].", validationException); - } - - try { - MetadataCreateIndexService.validateIndexOrAliasName(indexName, InvalidIndexNameException::new); - } catch (InvalidIndexNameException e) { - validationException = addValidationError(e.toString(), validationException); - } + validationException = validateIndexName(indexName, validationException); return validationException; } @@ -124,7 +116,7 @@ public ActionRequestValidationException validate() { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(connectorId); - out.writeString(indexName); + out.writeOptionalString(indexName); } @Override diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobIndexService.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobIndexService.java index d1d345840874f..ff35d700b856e 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobIndexService.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobIndexService.java @@ -25,6 +25,7 @@ import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.client.internal.Client; +import org.elasticsearch.common.Strings; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.engine.DocumentMissingException; import org.elasticsearch.index.query.BoolQueryBuilder; @@ -87,8 +88,23 @@ public void createConnectorSyncJob( PostConnectorSyncJobAction.Request request, ActionListener listener ) { + String connectorId = request.getId(); try { - getSyncJobConnectorInfo(request.getId(), listener.delegateFailure((l, connector) -> { + getSyncJobConnectorInfo(connectorId, listener.delegateFailure((l, connector) -> { + + if (Strings.isNullOrEmpty(connector.getIndexName())) { + l.onFailure( + new ElasticsearchStatusException( + "Cannot start a sync for connector [" + + connectorId + + "] with no index attached. Set the [index_name] property for the connector " + + "to enable syncing data.", + RestStatus.BAD_REQUEST + ) + ); + return; + } + Instant now = Instant.now(); ConnectorSyncJobType jobType = Objects.requireNonNullElse(request.getJobType(), ConnectorSyncJob.DEFAULT_JOB_TYPE); ConnectorSyncJobTriggerMethod triggerMethod = Objects.requireNonNullElse( @@ -97,7 +113,6 @@ public void createConnectorSyncJob( ); try { - final IndexRequest indexRequest = new IndexRequest(CONNECTOR_SYNC_JOB_INDEX_NAME).opType(DocWriteRequest.OpType.INDEX) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTestUtils.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTestUtils.java index e9053a0a64507..0eade25eaa03f 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTestUtils.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTestUtils.java @@ -258,8 +258,7 @@ public static Map getRandomConnectorConfigurationValues() { return configMap; } - public static Connector getRandomConnector() { - + private static Connector.Builder getRandomConnectorBuilder() { return new Connector.Builder().setApiKeyId(randomFrom(new String[] { null, randomAlphaOfLength(10) })) .setApiKeySecretId(randomFrom(new String[] { null, randomAlphaOfLength(10) })) .setConfiguration(getRandomConnectorConfiguration()) @@ -278,8 +277,15 @@ public static Connector getRandomConnector() { .setScheduling(getRandomConnectorScheduling()) .setStatus(getRandomConnectorInitialStatus()) .setSyncCursor(randomBoolean() ? Map.of(randomAlphaOfLengthBetween(5, 10), randomAlphaOfLengthBetween(5, 10)) : null) - .setSyncNow(randomBoolean()) - .build(); + .setSyncNow(randomBoolean()); + } + + public static Connector getRandomConnector() { + return getRandomConnectorBuilder().build(); + } + + public static Connector getRandomConnectorWithDetachedIndex() { + return getRandomConnectorBuilder().setIndexName(null).build(); } private static BytesReference convertConnectorToBytesReference(Connector connector) { diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobIndexServiceTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobIndexServiceTests.java index 1e991569a9ffd..3df62674750bb 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobIndexServiceTests.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobIndexServiceTests.java @@ -71,18 +71,19 @@ public class ConnectorSyncJobIndexServiceTests extends ESSingleNodeTestCase { private String connectorOneId; private String connectorTwoId; + private String connectorThreeId; @Before public void setup() throws Exception { - connectorOneId = createConnector(); - connectorTwoId = createConnector(); + connectorOneId = createConnector(ConnectorTestUtils.getRandomConnector()); + connectorTwoId = createConnector(ConnectorTestUtils.getRandomConnector()); + connectorThreeId = createConnector(ConnectorTestUtils.getRandomConnectorWithDetachedIndex()); this.connectorSyncJobIndexService = new ConnectorSyncJobIndexService(client()); } - private String createConnector() throws IOException, InterruptedException, ExecutionException, TimeoutException { - Connector connector = ConnectorTestUtils.getRandomConnector(); + private String createConnector(Connector connector) throws IOException, InterruptedException, ExecutionException, TimeoutException { final IndexRequest indexRequest = new IndexRequest(ConnectorIndexService.CONNECTOR_INDEX_NAME).opType(DocWriteRequest.OpType.INDEX) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) @@ -151,6 +152,24 @@ public void testCreateConnectorSyncJob_WithMissingConnectorId_ExpectException() ); } + public void testDeleteConnectorSyncJob_WithDetachedConnectorIndex_ExpectException() { + PostConnectorSyncJobAction.Request syncJobRequest = new PostConnectorSyncJobAction.Request( + connectorThreeId, + ConnectorSyncJobType.FULL, + ConnectorSyncJobTriggerMethod.ON_DEMAND + ); + expectThrows(ElasticsearchStatusException.class, () -> awaitPutConnectorSyncJob(syncJobRequest)); + } + + public void testDeleteConnectorSyncJob_WithNonExistentConnectorId_ExpectException() { + PostConnectorSyncJobAction.Request syncJobRequest = new PostConnectorSyncJobAction.Request( + "non-existent-connector-id", + ConnectorSyncJobType.FULL, + ConnectorSyncJobTriggerMethod.ON_DEMAND + ); + expectThrows(ResourceNotFoundException.class, () -> awaitPutConnectorSyncJob(syncJobRequest)); + } + public void testDeleteConnectorSyncJob() throws Exception { PostConnectorSyncJobAction.Request syncJobRequest = ConnectorSyncJobTestUtils.getRandomPostConnectorSyncJobActionRequest( connectorOneId @@ -1114,13 +1133,13 @@ private PostConnectorSyncJobAction.Response awaitPutConnectorSyncJob(PostConnect throws Exception { CountDownLatch latch = new CountDownLatch(1); - final AtomicReference responseRef = new AtomicReference<>(null); + final AtomicReference resp = new AtomicReference<>(null); final AtomicReference exception = new AtomicReference<>(null); connectorSyncJobIndexService.createConnectorSyncJob(syncJobRequest, new ActionListener<>() { @Override public void onResponse(PostConnectorSyncJobAction.Response putConnectorSyncJobResponse) { - responseRef.set(putConnectorSyncJobResponse); + resp.set(putConnectorSyncJobResponse); latch.countDown(); } @@ -1130,18 +1149,13 @@ public void onFailure(Exception e) { latch.countDown(); } }); - + assertTrue("Timeout waiting for delete request", latch.await(TIMEOUT_SECONDS, TimeUnit.SECONDS)); if (exception.get() != null) { throw exception.get(); } - boolean requestTimedOut = latch.await(TIMEOUT_SECONDS, TimeUnit.SECONDS); - PostConnectorSyncJobAction.Response response = responseRef.get(); - - assertTrue("Timeout waiting for post request", requestTimedOut); - assertNotNull("Received null response from post request", response); - - return response; + assertNotNull("Received null response from delete request", resp.get()); + return resp.get(); } private String updateConnectorSyncJobStatusWithoutStateMachineGuard(String syncJobId, ConnectorSyncStatus syncStatus) throws Exception { From 511567bfebc2c45c33c11830b6a2ae689e1e4f33 Mon Sep 17 00:00:00 2001 From: Simon Cooper Date: Thu, 14 Mar 2024 10:49:45 +0000 Subject: [PATCH 207/248] Separate out date time parsing & printing functionality (#106114) Introduce separate types for printing & parsing date times, in preparation for adding more implementations later on. Also use the opportunity to tidy up the code around roundup parsers and creating formatters --- .../rest-api-spec/test/tsdb/10_settings.yml | 2 +- .../common/time/DateFormatters.java | 176 +++++++++--------- .../common/time/DateTimeParser.java | 49 +++++ .../common/time/DateTimePrinter.java | 31 +++ .../elasticsearch/common/time/EpochTime.java | 12 +- .../common/time/JavaDateFormatter.java | 168 +++++++---------- .../common/time/JavaDateMathParser.java | 18 +- .../common/time/JavaTimeDateTimeParser.java | 68 +++++++ .../common/time/JavaTimeDateTimePrinter.java | 48 +++++ .../common/time/DateFormattersTests.java | 20 +- .../common/time/JavaDateMathParserTests.java | 22 ++- .../scalar/convert/ToDatetimeTests.java | 2 +- 12 files changed, 396 insertions(+), 220 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/common/time/DateTimeParser.java create mode 100644 server/src/main/java/org/elasticsearch/common/time/DateTimePrinter.java create mode 100644 server/src/main/java/org/elasticsearch/common/time/JavaTimeDateTimeParser.java create mode 100644 server/src/main/java/org/elasticsearch/common/time/JavaTimeDateTimePrinter.java diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/10_settings.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/10_settings.yml index 922e0f4969669..4a6626577d5ee 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/10_settings.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/10_settings.yml @@ -232,7 +232,7 @@ empty start end times: reason: introduced in 8.1.0 - do: - catch: /cannot parse empty date/ + catch: /cannot parse empty date(time)?/ indices.create: index: test_index body: diff --git a/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java b/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java index 34d583ed7e732..80811fbbd1b80 100644 --- a/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java +++ b/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java @@ -30,6 +30,7 @@ import java.time.temporal.TemporalQuery; import java.time.temporal.WeekFields; import java.util.Locale; +import java.util.stream.Stream; import static java.time.temporal.ChronoField.DAY_OF_MONTH; import static java.time.temporal.ChronoField.DAY_OF_WEEK; @@ -42,6 +43,18 @@ public class DateFormatters { + private static DateFormatter newDateFormatter(String format, DateTimeFormatter formatter) { + return new JavaDateFormatter(format, new JavaTimeDateTimePrinter(formatter), new JavaTimeDateTimeParser(formatter)); + } + + private static DateFormatter newDateFormatter(String format, DateTimeFormatter printer, DateTimeFormatter... parsers) { + return new JavaDateFormatter( + format, + new JavaTimeDateTimePrinter(printer), + Stream.of(parsers).map(JavaTimeDateTimeParser::new).toArray(DateTimeParser[]::new) + ); + } + public static final WeekFields WEEK_FIELDS_ROOT = WeekFields.of(Locale.ROOT); private static final DateTimeFormatter TIME_ZONE_FORMATTER_NO_COLON = new DateTimeFormatterBuilder().appendOffset("+HHmm", "Z") @@ -155,7 +168,7 @@ public class DateFormatters { /** * Returns a generic ISO datetime parser where the date is mandatory and the time is optional. */ - private static final DateFormatter STRICT_DATE_OPTIONAL_TIME = new JavaDateFormatter( + private static final DateFormatter STRICT_DATE_OPTIONAL_TIME = newDateFormatter( "strict_date_optional_time", STRICT_DATE_OPTIONAL_TIME_PRINTER, STRICT_DATE_OPTIONAL_TIME_FORMATTER @@ -211,7 +224,7 @@ public class DateFormatters { /** * Returns a generic ISO datetime parser where the date is mandatory and the time is optional with nanosecond resolution. */ - private static final DateFormatter STRICT_DATE_OPTIONAL_TIME_NANOS = new JavaDateFormatter( + private static final DateFormatter STRICT_DATE_OPTIONAL_TIME_NANOS = newDateFormatter( "strict_date_optional_time_nanos", STRICT_DATE_OPTIONAL_TIME_PRINTER_NANOS, STRICT_DATE_OPTIONAL_TIME_FORMATTER_WITH_NANOS @@ -222,7 +235,7 @@ public class DateFormatters { * This is not fully compatible to the existing spec, which would require far more edge cases, but merely compatible with the * existing legacy joda time ISO date formatter */ - private static final DateFormatter ISO_8601 = new JavaDateFormatter( + private static final DateFormatter ISO_8601 = newDateFormatter( "iso8601", STRICT_DATE_OPTIONAL_TIME_PRINTER, new DateTimeFormatterBuilder().append(STRICT_YEAR_MONTH_DAY_FORMATTER) @@ -281,7 +294,7 @@ public class DateFormatters { * Returns a basic formatter for a two digit hour of day, two digit minute * of hour, two digit second of minute, and time zone offset (HHmmssZ). */ - private static final DateFormatter BASIC_TIME_NO_MILLIS = new JavaDateFormatter( + private static final DateFormatter BASIC_TIME_NO_MILLIS = newDateFormatter( "basic_time_no_millis", new DateTimeFormatterBuilder().append(BASIC_TIME_NO_MILLIS_BASE) .appendOffset("+HH:MM", "Z") @@ -326,7 +339,7 @@ public class DateFormatters { * of hour, two digit second of minute, three digit millis, and time zone * offset (HHmmss.SSSZ). */ - private static final DateFormatter BASIC_TIME = new JavaDateFormatter( + private static final DateFormatter BASIC_TIME = newDateFormatter( "basic_time", new DateTimeFormatterBuilder().append(BASIC_TIME_PRINTER) .appendOffset("+HH:MM", "Z") @@ -357,7 +370,7 @@ public class DateFormatters { * of hour, two digit second of minute, three digit millis, and time zone * offset prefixed by 'T' ('T'HHmmss.SSSZ). */ - private static final DateFormatter BASIC_T_TIME = new JavaDateFormatter( + private static final DateFormatter BASIC_T_TIME = newDateFormatter( "basic_t_time", new DateTimeFormatterBuilder().append(BASIC_T_TIME_PRINTER) .appendOffset("+HH:MM", "Z") @@ -375,7 +388,7 @@ public class DateFormatters { * of hour, two digit second of minute, and time zone offset prefixed by 'T' * ('T'HHmmssZ). */ - private static final DateFormatter BASIC_T_TIME_NO_MILLIS = new JavaDateFormatter( + private static final DateFormatter BASIC_T_TIME_NO_MILLIS = newDateFormatter( "basic_t_time_no_millis", new DateTimeFormatterBuilder().appendLiteral("T") .append(BASIC_TIME_NO_MILLIS_BASE) @@ -430,7 +443,7 @@ public class DateFormatters { * Returns a basic formatter that combines a basic date and time, separated * by a 'T' (uuuuMMdd'T'HHmmss.SSSZ). */ - private static final DateFormatter BASIC_DATE_TIME = new JavaDateFormatter( + private static final DateFormatter BASIC_DATE_TIME = newDateFormatter( "basic_date_time", new DateTimeFormatterBuilder().append(BASIC_DATE_TIME_PRINTER) .appendOffset("+HH:MM", "Z") @@ -460,7 +473,7 @@ public class DateFormatters { * Returns a basic formatter that combines a basic date and time without millis, * separated by a 'T' (uuuuMMdd'T'HHmmssZ). */ - private static final DateFormatter BASIC_DATE_TIME_NO_MILLIS = new JavaDateFormatter( + private static final DateFormatter BASIC_DATE_TIME_NO_MILLIS = newDateFormatter( "basic_date_time_no_millis", new DateTimeFormatterBuilder().append(BASIC_DATE_T_PRINTER) .append(BASIC_TIME_NO_MILLIS_BASE) @@ -483,7 +496,7 @@ public class DateFormatters { * Returns a formatter for a full ordinal date, using a four * digit year and three digit dayOfYear (uuuuDDD). */ - private static final DateFormatter BASIC_ORDINAL_DATE = new JavaDateFormatter( + private static final DateFormatter BASIC_ORDINAL_DATE = newDateFormatter( "basic_ordinal_date", DateTimeFormatter.ofPattern("uuuuDDD", Locale.ROOT) ); @@ -492,7 +505,7 @@ public class DateFormatters { * Returns a formatter for a full ordinal date and time, using a four * digit year and three digit dayOfYear (uuuuDDD'T'HHmmss.SSSZ). */ - private static final DateFormatter BASIC_ORDINAL_DATE_TIME = new JavaDateFormatter( + private static final DateFormatter BASIC_ORDINAL_DATE_TIME = newDateFormatter( "basic_ordinal_date_time", new DateTimeFormatterBuilder().appendPattern("yyyyDDD") .append(BASIC_T_TIME_PRINTER) @@ -516,7 +529,7 @@ public class DateFormatters { * Returns a formatter for a full ordinal date and time without millis, * using a four digit year and three digit dayOfYear (uuuuDDD'T'HHmmssZ). */ - private static final DateFormatter BASIC_ORDINAL_DATE_TIME_NO_MILLIS = new JavaDateFormatter( + private static final DateFormatter BASIC_ORDINAL_DATE_TIME_NO_MILLIS = newDateFormatter( "basic_ordinal_date_time_no_millis", new DateTimeFormatterBuilder().appendPattern("uuuuDDD") .appendLiteral("T") @@ -576,7 +589,7 @@ public class DateFormatters { * Returns a basic formatter for a full date as four digit weekyear, two * digit week of weekyear, and one digit day of week (YYYY'W'wwe). */ - private static final DateFormatter STRICT_BASIC_WEEK_DATE = new JavaDateFormatter( + private static final DateFormatter STRICT_BASIC_WEEK_DATE = newDateFormatter( "strict_basic_week_date", STRICT_BASIC_WEEK_DATE_PRINTER, STRICT_BASIC_WEEK_DATE_FORMATTER @@ -586,7 +599,7 @@ public class DateFormatters { * Returns a basic formatter that combines a basic weekyear date and time * without millis, separated by a 'T' (YYYY'W'wwe'T'HHmmssX). */ - private static final DateFormatter STRICT_BASIC_WEEK_DATE_TIME_NO_MILLIS = new JavaDateFormatter( + private static final DateFormatter STRICT_BASIC_WEEK_DATE_TIME_NO_MILLIS = newDateFormatter( "strict_basic_week_date_time_no_millis", new DateTimeFormatterBuilder().append(STRICT_BASIC_WEEK_DATE_PRINTER) .appendLiteral("T") @@ -618,7 +631,7 @@ public class DateFormatters { * Returns a basic formatter that combines a basic weekyear date and time, * separated by a 'T' (YYYY'W'wwe'T'HHmmss.SSSX). */ - private static final DateFormatter STRICT_BASIC_WEEK_DATE_TIME = new JavaDateFormatter( + private static final DateFormatter STRICT_BASIC_WEEK_DATE_TIME = newDateFormatter( "strict_basic_week_date_time", new DateTimeFormatterBuilder().append(STRICT_BASIC_WEEK_DATE_PRINTER) .append(DateTimeFormatter.ofPattern("'T'HHmmss.SSSX", Locale.ROOT)) @@ -647,7 +660,7 @@ public class DateFormatters { /* * An ISO date formatter that formats or parses a date without an offset, such as '2011-12-03'. */ - private static final DateFormatter STRICT_DATE = new JavaDateFormatter( + private static final DateFormatter STRICT_DATE = newDateFormatter( "strict_date", DateTimeFormatter.ISO_LOCAL_DATE.withResolverStyle(ResolverStyle.LENIENT).withLocale(Locale.ROOT) ); @@ -655,7 +668,7 @@ public class DateFormatters { /* * A date formatter that formats or parses a date plus an hour without an offset, such as '2011-12-03T01'. */ - private static final DateFormatter STRICT_DATE_HOUR = new JavaDateFormatter( + private static final DateFormatter STRICT_DATE_HOUR = newDateFormatter( "strict_date_hour", DateTimeFormatter.ofPattern("uuuu-MM-dd'T'HH", Locale.ROOT) ); @@ -663,7 +676,7 @@ public class DateFormatters { /* * A date formatter that formats or parses a date plus an hour/minute without an offset, such as '2011-12-03T01:10'. */ - private static final DateFormatter STRICT_DATE_HOUR_MINUTE = new JavaDateFormatter( + private static final DateFormatter STRICT_DATE_HOUR_MINUTE = newDateFormatter( "strict_date_hour_minute", DateTimeFormatter.ofPattern("uuuu-MM-dd'T'HH:mm", Locale.ROOT) ); @@ -671,15 +684,12 @@ public class DateFormatters { /* * A strict date formatter that formats or parses a date without an offset, such as '2011-12-03'. */ - private static final DateFormatter STRICT_YEAR_MONTH_DAY = new JavaDateFormatter( - "strict_year_month_day", - STRICT_YEAR_MONTH_DAY_FORMATTER - ); + private static final DateFormatter STRICT_YEAR_MONTH_DAY = newDateFormatter("strict_year_month_day", STRICT_YEAR_MONTH_DAY_FORMATTER); /* * A strict formatter that formats or parses a year and a month, such as '2011-12'. */ - private static final DateFormatter STRICT_YEAR_MONTH = new JavaDateFormatter( + private static final DateFormatter STRICT_YEAR_MONTH = newDateFormatter( "strict_year_month", new DateTimeFormatterBuilder().appendValue(ChronoField.YEAR, 4, 4, SignStyle.EXCEEDS_PAD) .appendLiteral("-") @@ -691,7 +701,7 @@ public class DateFormatters { /* * A strict formatter that formats or parses a year, such as '2011'. */ - private static final DateFormatter STRICT_YEAR = new JavaDateFormatter( + private static final DateFormatter STRICT_YEAR = newDateFormatter( "strict_year", new DateTimeFormatterBuilder().appendValue(ChronoField.YEAR, 4, 4, SignStyle.EXCEEDS_PAD) .toFormatter(Locale.ROOT) @@ -701,7 +711,7 @@ public class DateFormatters { /* * A strict formatter that formats or parses a hour, minute and second, such as '09:43:25'. */ - private static final DateFormatter STRICT_HOUR_MINUTE_SECOND = new JavaDateFormatter( + private static final DateFormatter STRICT_HOUR_MINUTE_SECOND = newDateFormatter( "strict_hour_minute_second", STRICT_HOUR_MINUTE_SECOND_FORMATTER ); @@ -727,7 +737,7 @@ public class DateFormatters { * Returns a formatter that combines a full date and time, separated by a 'T' * (uuuu-MM-dd'T'HH:mm:ss.SSSZZ). */ - private static final DateFormatter STRICT_DATE_TIME = new JavaDateFormatter( + private static final DateFormatter STRICT_DATE_TIME = newDateFormatter( "strict_date_time", STRICT_DATE_PRINTER, new DateTimeFormatterBuilder().append(STRICT_DATE_FORMATTER) @@ -757,7 +767,7 @@ public class DateFormatters { * Returns a formatter for a full ordinal date and time without millis, * using a four digit year and three digit dayOfYear (uuuu-DDD'T'HH:mm:ssZZ). */ - private static final DateFormatter STRICT_ORDINAL_DATE_TIME_NO_MILLIS = new JavaDateFormatter( + private static final DateFormatter STRICT_ORDINAL_DATE_TIME_NO_MILLIS = newDateFormatter( "strict_ordinal_date_time_no_millis", new DateTimeFormatterBuilder().append(STRICT_ORDINAL_DATE_TIME_NO_MILLIS_BASE) .appendOffset("+HH:MM", "Z") @@ -781,7 +791,7 @@ public class DateFormatters { * Returns a formatter that combines a full date and time without millis, * separated by a 'T' (uuuu-MM-dd'T'HH:mm:ssZZ). */ - private static final DateFormatter STRICT_DATE_TIME_NO_MILLIS = new JavaDateFormatter( + private static final DateFormatter STRICT_DATE_TIME_NO_MILLIS = newDateFormatter( "strict_date_time_no_millis", new DateTimeFormatterBuilder().append(STRICT_DATE_TIME_NO_MILLIS_FORMATTER) .appendOffset("+HH:MM", "Z") @@ -814,13 +824,13 @@ public class DateFormatters { * NOTE: this is not a strict formatter to retain the joda time based behaviour, * even though it's named like this */ - private static final DateFormatter STRICT_HOUR_MINUTE_SECOND_MILLIS = new JavaDateFormatter( + private static final DateFormatter STRICT_HOUR_MINUTE_SECOND_MILLIS = newDateFormatter( "strict_hour_minute_second_millis", STRICT_HOUR_MINUTE_SECOND_MILLIS_PRINTER, STRICT_HOUR_MINUTE_SECOND_MILLIS_FORMATTER ); - private static final DateFormatter STRICT_HOUR_MINUTE_SECOND_FRACTION = new JavaDateFormatter( + private static final DateFormatter STRICT_HOUR_MINUTE_SECOND_FRACTION = newDateFormatter( "strict_hour_minute_second_fraction", STRICT_HOUR_MINUTE_SECOND_MILLIS_PRINTER, STRICT_HOUR_MINUTE_SECOND_MILLIS_FORMATTER @@ -831,7 +841,7 @@ public class DateFormatters { * two digit minute of hour, two digit second of minute, and three digit * fraction of second (uuuu-MM-dd'T'HH:mm:ss.SSS). */ - private static final DateFormatter STRICT_DATE_HOUR_MINUTE_SECOND_FRACTION = new JavaDateFormatter( + private static final DateFormatter STRICT_DATE_HOUR_MINUTE_SECOND_FRACTION = newDateFormatter( "strict_date_hour_minute_second_fraction", new DateTimeFormatterBuilder().append(STRICT_YEAR_MONTH_DAY_FORMATTER) .appendLiteral("T") @@ -847,7 +857,7 @@ public class DateFormatters { .withResolverStyle(ResolverStyle.STRICT) ); - private static final DateFormatter STRICT_DATE_HOUR_MINUTE_SECOND_MILLIS = new JavaDateFormatter( + private static final DateFormatter STRICT_DATE_HOUR_MINUTE_SECOND_MILLIS = newDateFormatter( "strict_date_hour_minute_second_millis", new DateTimeFormatterBuilder().append(STRICT_YEAR_MONTH_DAY_FORMATTER) .appendLiteral("T") @@ -866,13 +876,13 @@ public class DateFormatters { /* * Returns a formatter for a two digit hour of day. (HH) */ - private static final DateFormatter STRICT_HOUR = new JavaDateFormatter("strict_hour", DateTimeFormatter.ofPattern("HH", Locale.ROOT)); + private static final DateFormatter STRICT_HOUR = newDateFormatter("strict_hour", DateTimeFormatter.ofPattern("HH", Locale.ROOT)); /* * Returns a formatter for a two digit hour of day and two digit minute of * hour. (HH:mm) */ - private static final DateFormatter STRICT_HOUR_MINUTE = new JavaDateFormatter( + private static final DateFormatter STRICT_HOUR_MINUTE = newDateFormatter( "strict_hour_minute", DateTimeFormatter.ofPattern("HH:mm", Locale.ROOT) ); @@ -917,7 +927,7 @@ public class DateFormatters { * Returns a formatter for a full ordinal date and time, using a four * digit year and three digit dayOfYear (uuuu-DDD'T'HH:mm:ss.SSSZZ). */ - private static final DateFormatter STRICT_ORDINAL_DATE_TIME = new JavaDateFormatter( + private static final DateFormatter STRICT_ORDINAL_DATE_TIME = newDateFormatter( "strict_ordinal_date_time", new DateTimeFormatterBuilder().append(STRICT_ORDINAL_DATE_TIME_PRINTER) .appendOffset("+HH:MM", "Z") @@ -967,7 +977,7 @@ public class DateFormatters { * hour, two digit second of minute, three digit fraction of second, and * time zone offset (HH:mm:ss.SSSZZ). */ - private static final DateFormatter STRICT_TIME = new JavaDateFormatter( + private static final DateFormatter STRICT_TIME = newDateFormatter( "strict_time", new DateTimeFormatterBuilder().append(STRICT_TIME_PRINTER) .appendOffset("+HH:MM", "Z") @@ -988,7 +998,7 @@ public class DateFormatters { * hour, two digit second of minute, three digit fraction of second, and * time zone offset prefixed by 'T' ('T'HH:mm:ss.SSSZZ). */ - private static final DateFormatter STRICT_T_TIME = new JavaDateFormatter( + private static final DateFormatter STRICT_T_TIME = newDateFormatter( "strict_t_time", new DateTimeFormatterBuilder().appendLiteral('T') .append(STRICT_TIME_PRINTER) @@ -1024,7 +1034,7 @@ public class DateFormatters { * Returns a formatter for a two digit hour of day, two digit minute of * hour, two digit second of minute, and time zone offset (HH:mm:ssZZ). */ - private static final DateFormatter STRICT_TIME_NO_MILLIS = new JavaDateFormatter( + private static final DateFormatter STRICT_TIME_NO_MILLIS = newDateFormatter( "strict_time_no_millis", new DateTimeFormatterBuilder().append(STRICT_TIME_NO_MILLIS_BASE) .appendOffset("+HH:MM", "Z") @@ -1045,7 +1055,7 @@ public class DateFormatters { * hour, two digit second of minute, and time zone offset prefixed * by 'T' ('T'HH:mm:ssZZ). */ - private static final DateFormatter STRICT_T_TIME_NO_MILLIS = new JavaDateFormatter( + private static final DateFormatter STRICT_T_TIME_NO_MILLIS = newDateFormatter( "strict_t_time_no_millis", new DateTimeFormatterBuilder().appendLiteral("T") .append(STRICT_TIME_NO_MILLIS_BASE) @@ -1082,13 +1092,13 @@ public class DateFormatters { * Returns a formatter for a full date as four digit weekyear, two digit * week of weekyear, and one digit day of week (YYYY-'W'ww-e). */ - private static final DateFormatter STRICT_WEEK_DATE = new JavaDateFormatter("strict_week_date", ISO_WEEK_DATE); + private static final DateFormatter STRICT_WEEK_DATE = newDateFormatter("strict_week_date", ISO_WEEK_DATE); /* * Returns a formatter that combines a full weekyear date and time without millis, * separated by a 'T' (YYYY-'W'ww-e'T'HH:mm:ssZZ). */ - private static final DateFormatter STRICT_WEEK_DATE_TIME_NO_MILLIS = new JavaDateFormatter( + private static final DateFormatter STRICT_WEEK_DATE_TIME_NO_MILLIS = newDateFormatter( "strict_week_date_time_no_millis", new DateTimeFormatterBuilder().append(ISO_WEEK_DATE_T) .append(STRICT_TIME_NO_MILLIS_BASE) @@ -1111,7 +1121,7 @@ public class DateFormatters { * Returns a formatter that combines a full weekyear date and time, * separated by a 'T' (YYYY-'W'ww-e'T'HH:mm:ss.SSSZZ). */ - private static final DateFormatter STRICT_WEEK_DATE_TIME = new JavaDateFormatter( + private static final DateFormatter STRICT_WEEK_DATE_TIME = newDateFormatter( "strict_week_date_time", new DateTimeFormatterBuilder().append(ISO_WEEK_DATE_T) .append(STRICT_TIME_PRINTER) @@ -1133,7 +1143,7 @@ public class DateFormatters { /* * Returns a formatter for a four digit weekyear */ - private static final DateFormatter STRICT_WEEKYEAR = new JavaDateFormatter( + private static final DateFormatter STRICT_WEEKYEAR = newDateFormatter( "strict_weekyear", new DateTimeFormatterBuilder().appendValue(WEEK_FIELDS_ROOT.weekBasedYear(), 4, 10, SignStyle.EXCEEDS_PAD) .toFormatter(Locale.ROOT) @@ -1155,13 +1165,13 @@ public class DateFormatters { * Returns a formatter for a four digit weekyear and two digit week of * weekyear. (YYYY-'W'ww) */ - private static final DateFormatter STRICT_WEEKYEAR_WEEK = new JavaDateFormatter("strict_weekyear_week", STRICT_WEEKYEAR_WEEK_FORMATTER); + private static final DateFormatter STRICT_WEEKYEAR_WEEK = newDateFormatter("strict_weekyear_week", STRICT_WEEKYEAR_WEEK_FORMATTER); /* * Returns a formatter for a four digit weekyear, two digit week of * weekyear, and one digit day of week. (YYYY-'W'ww-e) */ - private static final DateFormatter STRICT_WEEKYEAR_WEEK_DAY = new JavaDateFormatter( + private static final DateFormatter STRICT_WEEKYEAR_WEEK_DAY = newDateFormatter( "strict_weekyear_week_day", new DateTimeFormatterBuilder().append(STRICT_WEEKYEAR_WEEK_FORMATTER) .appendLiteral("-") @@ -1175,7 +1185,7 @@ public class DateFormatters { * two digit minute of hour, and two digit second of * minute. (uuuu-MM-dd'T'HH:mm:ss) */ - private static final DateFormatter STRICT_DATE_HOUR_MINUTE_SECOND = new JavaDateFormatter( + private static final DateFormatter STRICT_DATE_HOUR_MINUTE_SECOND = newDateFormatter( "strict_date_hour_minute_second", DateTimeFormatter.ofPattern("uuuu-MM-dd'T'HH:mm:ss", Locale.ROOT) ); @@ -1184,7 +1194,7 @@ public class DateFormatters { * A basic formatter for a full date as four digit year, two digit * month of year, and two digit day of month (uuuuMMdd). */ - private static final DateFormatter BASIC_DATE = new JavaDateFormatter( + private static final DateFormatter BASIC_DATE = newDateFormatter( "basic_date", new DateTimeFormatterBuilder().appendValue(ChronoField.YEAR, 4, 10, SignStyle.NORMAL) .appendValue(MONTH_OF_YEAR, 2, 2, SignStyle.NOT_NEGATIVE) @@ -1212,7 +1222,7 @@ public class DateFormatters { * Returns a formatter for a full ordinal date, using a four * digit year and three digit dayOfYear (uuuu-DDD). */ - private static final DateFormatter STRICT_ORDINAL_DATE = new JavaDateFormatter("strict_ordinal_date", STRICT_ORDINAL_DATE_FORMATTER); + private static final DateFormatter STRICT_ORDINAL_DATE = newDateFormatter("strict_ordinal_date", STRICT_ORDINAL_DATE_FORMATTER); ///////////////////////////////////////// // @@ -1258,7 +1268,7 @@ public class DateFormatters { * a date formatter with optional time, being very lenient, format is * uuuu-MM-dd'T'HH:mm:ss.SSSZ */ - private static final DateFormatter DATE_OPTIONAL_TIME = new JavaDateFormatter( + private static final DateFormatter DATE_OPTIONAL_TIME = newDateFormatter( "date_optional_time", STRICT_DATE_OPTIONAL_TIME_PRINTER, new DateTimeFormatterBuilder().append(DATE_FORMATTER) @@ -1353,7 +1363,7 @@ public class DateFormatters { * Returns a formatter for a full ordinal date, using a four * digit year and three digit dayOfYear (uuuu-DDD). */ - private static final DateFormatter ORDINAL_DATE = new JavaDateFormatter("ordinal_date", ORDINAL_DATE_PRINTER, ORDINAL_DATE_FORMATTER); + private static final DateFormatter ORDINAL_DATE = newDateFormatter("ordinal_date", ORDINAL_DATE_PRINTER, ORDINAL_DATE_FORMATTER); private static final DateTimeFormatter TIME_NO_MILLIS_FORMATTER = new DateTimeFormatterBuilder().appendValue( HOUR_OF_DAY, @@ -1394,7 +1404,7 @@ public class DateFormatters { /* * Returns a formatter for a four digit weekyear. (YYYY) */ - private static final DateFormatter WEEKYEAR = new JavaDateFormatter( + private static final DateFormatter WEEKYEAR = newDateFormatter( "weekyear", new DateTimeFormatterBuilder().appendValue(WEEK_FIELDS_ROOT.weekBasedYear()) .toFormatter(Locale.ROOT) @@ -1403,7 +1413,7 @@ public class DateFormatters { /* * Returns a formatter for a four digit year. (uuuu) */ - private static final DateFormatter YEAR = new JavaDateFormatter( + private static final DateFormatter YEAR = newDateFormatter( "year", new DateTimeFormatterBuilder().appendValue(ChronoField.YEAR).toFormatter(Locale.ROOT).withResolverStyle(ResolverStyle.STRICT) ); @@ -1412,7 +1422,7 @@ public class DateFormatters { * Returns a formatter that combines a full date and two digit hour of * day. (uuuu-MM-dd'T'HH) */ - private static final DateFormatter DATE_HOUR = new JavaDateFormatter( + private static final DateFormatter DATE_HOUR = newDateFormatter( "date_hour", DateTimeFormatter.ofPattern("uuuu-MM-dd'T'HH", Locale.ROOT), new DateTimeFormatterBuilder().append(DATE_FORMATTER) @@ -1427,7 +1437,7 @@ public class DateFormatters { * two digit minute of hour, two digit second of minute, and three digit * fraction of second (uuuu-MM-dd'T'HH:mm:ss.SSS). */ - private static final DateFormatter DATE_HOUR_MINUTE_SECOND_MILLIS = new JavaDateFormatter( + private static final DateFormatter DATE_HOUR_MINUTE_SECOND_MILLIS = newDateFormatter( "date_hour_minute_second_millis", new DateTimeFormatterBuilder().append(STRICT_YEAR_MONTH_DAY_FORMATTER) .appendLiteral("T") @@ -1441,7 +1451,7 @@ public class DateFormatters { .withResolverStyle(ResolverStyle.STRICT) ); - private static final DateFormatter DATE_HOUR_MINUTE_SECOND_FRACTION = new JavaDateFormatter( + private static final DateFormatter DATE_HOUR_MINUTE_SECOND_FRACTION = newDateFormatter( "date_hour_minute_second_fraction", new DateTimeFormatterBuilder().append(STRICT_YEAR_MONTH_DAY_FORMATTER) .appendLiteral("T") @@ -1459,7 +1469,7 @@ public class DateFormatters { * Returns a formatter that combines a full date, two digit hour of day, * and two digit minute of hour. (uuuu-MM-dd'T'HH:mm) */ - private static final DateFormatter DATE_HOUR_MINUTE = new JavaDateFormatter( + private static final DateFormatter DATE_HOUR_MINUTE = newDateFormatter( "date_hour_minute", DateTimeFormatter.ofPattern("uuuu-MM-dd'T'HH:mm", Locale.ROOT), new DateTimeFormatterBuilder().append(DATE_FORMATTER) @@ -1474,7 +1484,7 @@ public class DateFormatters { * two digit minute of hour, and two digit second of * minute. (uuuu-MM-dd'T'HH:mm:ss) */ - private static final DateFormatter DATE_HOUR_MINUTE_SECOND = new JavaDateFormatter( + private static final DateFormatter DATE_HOUR_MINUTE_SECOND = newDateFormatter( "date_hour_minute_second", DateTimeFormatter.ofPattern("uuuu-MM-dd'T'HH:mm:ss", Locale.ROOT), new DateTimeFormatterBuilder().append(DATE_FORMATTER) @@ -1499,7 +1509,7 @@ public class DateFormatters { * Returns a formatter that combines a full date and time, separated by a 'T' * (uuuu-MM-dd'T'HH:mm:ss.SSSZZ). */ - private static final DateFormatter DATE_TIME = new JavaDateFormatter( + private static final DateFormatter DATE_TIME = newDateFormatter( "date_time", STRICT_DATE_OPTIONAL_TIME_PRINTER, new DateTimeFormatterBuilder().append(DATE_TIME_FORMATTER) @@ -1516,7 +1526,7 @@ public class DateFormatters { * Returns a basic formatter for a full date as four digit weekyear, two * digit week of weekyear, and one digit day of week (YYYY'W'wwe). */ - private static final DateFormatter BASIC_WEEK_DATE = new JavaDateFormatter( + private static final DateFormatter BASIC_WEEK_DATE = newDateFormatter( "basic_week_date", STRICT_BASIC_WEEK_DATE_PRINTER, BASIC_WEEK_DATE_FORMATTER @@ -1526,7 +1536,7 @@ public class DateFormatters { * Returns a formatter for a full date as four digit year, two digit month * of year, and two digit day of month (uuuu-MM-dd). */ - private static final DateFormatter DATE = new JavaDateFormatter( + private static final DateFormatter DATE = newDateFormatter( "date", DateTimeFormatter.ISO_LOCAL_DATE.withResolverStyle(ResolverStyle.STRICT), DATE_FORMATTER @@ -1558,7 +1568,7 @@ public class DateFormatters { * Returns a formatter that combines a full date and time without millis, but with a timezone that can be optional * separated by a 'T' (uuuu-MM-dd'T'HH:mm:ssZ). */ - private static final DateFormatter DATE_TIME_NO_MILLIS = new JavaDateFormatter( + private static final DateFormatter DATE_TIME_NO_MILLIS = newDateFormatter( "date_time_no_millis", DATE_TIME_NO_MILLIS_PRINTER, new DateTimeFormatterBuilder().append(DATE_TIME_PREFIX) @@ -1588,13 +1598,13 @@ public class DateFormatters { * hour, two digit second of minute, and three digit fraction of * second (HH:mm:ss.SSS). */ - private static final DateFormatter HOUR_MINUTE_SECOND_MILLIS = new JavaDateFormatter( + private static final DateFormatter HOUR_MINUTE_SECOND_MILLIS = newDateFormatter( "hour_minute_second_millis", STRICT_HOUR_MINUTE_SECOND_MILLIS_PRINTER, HOUR_MINUTE_SECOND_MILLIS_FORMATTER ); - private static final DateFormatter HOUR_MINUTE_SECOND_FRACTION = new JavaDateFormatter( + private static final DateFormatter HOUR_MINUTE_SECOND_FRACTION = newDateFormatter( "hour_minute_second_fraction", STRICT_HOUR_MINUTE_SECOND_MILLIS_PRINTER, HOUR_MINUTE_SECOND_FRACTION_FORMATTER @@ -1604,7 +1614,7 @@ public class DateFormatters { * Returns a formatter for a two digit hour of day and two digit minute of * hour. (HH:mm) */ - private static final DateFormatter HOUR_MINUTE = new JavaDateFormatter( + private static final DateFormatter HOUR_MINUTE = newDateFormatter( "hour_minute", DateTimeFormatter.ofPattern("HH:mm", Locale.ROOT), HOUR_MINUTE_FORMATTER @@ -1613,7 +1623,7 @@ public class DateFormatters { /* * A strict formatter that formats or parses a hour, minute and second, such as '09:43:25'. */ - private static final DateFormatter HOUR_MINUTE_SECOND = new JavaDateFormatter( + private static final DateFormatter HOUR_MINUTE_SECOND = newDateFormatter( "hour_minute_second", STRICT_HOUR_MINUTE_SECOND_FORMATTER, new DateTimeFormatterBuilder().append(HOUR_MINUTE_FORMATTER) @@ -1626,7 +1636,7 @@ public class DateFormatters { /* * Returns a formatter for a two digit hour of day. (HH) */ - private static final DateFormatter HOUR = new JavaDateFormatter( + private static final DateFormatter HOUR = newDateFormatter( "hour", DateTimeFormatter.ofPattern("HH", Locale.ROOT), new DateTimeFormatterBuilder().appendValue(HOUR_OF_DAY, 1, 2, SignStyle.NOT_NEGATIVE) @@ -1649,7 +1659,7 @@ public class DateFormatters { * Returns a formatter for a full ordinal date and time, using a four * digit year and three digit dayOfYear (uuuu-DDD'T'HH:mm:ss.SSSZZ). */ - private static final DateFormatter ORDINAL_DATE_TIME = new JavaDateFormatter( + private static final DateFormatter ORDINAL_DATE_TIME = newDateFormatter( "ordinal_date_time", new DateTimeFormatterBuilder().append(STRICT_ORDINAL_DATE_TIME_PRINTER) .appendOffset("+HH:MM", "Z") @@ -1675,7 +1685,7 @@ public class DateFormatters { * Returns a formatter for a full ordinal date and time without millis, * using a four digit year and three digit dayOfYear (uuuu-DDD'T'HH:mm:ssZZ). */ - private static final DateFormatter ORDINAL_DATE_TIME_NO_MILLIS = new JavaDateFormatter( + private static final DateFormatter ORDINAL_DATE_TIME_NO_MILLIS = newDateFormatter( "ordinal_date_time_no_millis", new DateTimeFormatterBuilder().append(STRICT_ORDINAL_DATE_TIME_NO_MILLIS_BASE) .appendOffset("+HH:MM", "Z") @@ -1695,7 +1705,7 @@ public class DateFormatters { * Returns a formatter that combines a full weekyear date and time, * separated by a 'T' (YYYY-'W'ww-e'T'HH:mm:ss.SSSZZ). */ - private static final DateFormatter WEEK_DATE_TIME = new JavaDateFormatter( + private static final DateFormatter WEEK_DATE_TIME = newDateFormatter( "week_date_time", new DateTimeFormatterBuilder().append(ISO_WEEK_DATE_T) .append(STRICT_TIME_PRINTER) @@ -1720,7 +1730,7 @@ public class DateFormatters { * Returns a formatter that combines a full weekyear date and time, * separated by a 'T' (YYYY-'W'ww-e'T'HH:mm:ssZZ). */ - private static final DateFormatter WEEK_DATE_TIME_NO_MILLIS = new JavaDateFormatter( + private static final DateFormatter WEEK_DATE_TIME_NO_MILLIS = newDateFormatter( "week_date_time_no_millis", new DateTimeFormatterBuilder().append(ISO_WEEK_DATE_T) .append(STRICT_TIME_NO_MILLIS_BASE) @@ -1743,7 +1753,7 @@ public class DateFormatters { * Returns a basic formatter that combines a basic weekyear date and time, * separated by a 'T' (YYYY'W'wwe'T'HHmmss.SSSX). */ - private static final DateFormatter BASIC_WEEK_DATE_TIME = new JavaDateFormatter( + private static final DateFormatter BASIC_WEEK_DATE_TIME = newDateFormatter( "basic_week_date_time", new DateTimeFormatterBuilder().append(STRICT_BASIC_WEEK_DATE_PRINTER) .append(DateTimeFormatter.ofPattern("'T'HHmmss.SSSX", Locale.ROOT)) @@ -1765,7 +1775,7 @@ public class DateFormatters { * Returns a basic formatter that combines a basic weekyear date and time, * separated by a 'T' (YYYY'W'wwe'T'HHmmssX). */ - private static final DateFormatter BASIC_WEEK_DATE_TIME_NO_MILLIS = new JavaDateFormatter( + private static final DateFormatter BASIC_WEEK_DATE_TIME_NO_MILLIS = newDateFormatter( "basic_week_date_time_no_millis", new DateTimeFormatterBuilder().append(STRICT_BASIC_WEEK_DATE_PRINTER) .append(DateTimeFormatter.ofPattern("'T'HHmmssX", Locale.ROOT)) @@ -1790,7 +1800,7 @@ public class DateFormatters { * hour, two digit second of minute, three digit fraction of second, and * time zone offset (HH:mm:ss.SSSZZ). */ - private static final DateFormatter TIME = new JavaDateFormatter( + private static final DateFormatter TIME = newDateFormatter( "time", new DateTimeFormatterBuilder().append(STRICT_TIME_PRINTER) .appendOffset("+HH:MM", "Z") @@ -1810,7 +1820,7 @@ public class DateFormatters { * Returns a formatter for a two digit hour of day, two digit minute of * hour, two digit second of minute, andtime zone offset (HH:mm:ssZZ). */ - private static final DateFormatter TIME_NO_MILLIS = new JavaDateFormatter( + private static final DateFormatter TIME_NO_MILLIS = newDateFormatter( "time_no_millis", new DateTimeFormatterBuilder().append(STRICT_TIME_NO_MILLIS_BASE) .appendOffset("+HH:MM", "Z") @@ -1831,7 +1841,7 @@ public class DateFormatters { * hour, two digit second of minute, three digit fraction of second, and * time zone offset prefixed by 'T' ('T'HH:mm:ss.SSSZZ). */ - private static final DateFormatter T_TIME = new JavaDateFormatter( + private static final DateFormatter T_TIME = newDateFormatter( "t_time", new DateTimeFormatterBuilder().appendLiteral('T') .append(STRICT_TIME_PRINTER) @@ -1855,7 +1865,7 @@ public class DateFormatters { * hour, two digit second of minute, and time zone offset prefixed * by 'T' ('T'HH:mm:ssZZ). */ - private static final DateFormatter T_TIME_NO_MILLIS = new JavaDateFormatter( + private static final DateFormatter T_TIME_NO_MILLIS = newDateFormatter( "t_time_no_millis", new DateTimeFormatterBuilder().appendLiteral("T") .append(STRICT_TIME_NO_MILLIS_BASE) @@ -1875,7 +1885,7 @@ public class DateFormatters { /* * A strict formatter that formats or parses a year and a month, such as '2011-12'. */ - private static final DateFormatter YEAR_MONTH = new JavaDateFormatter( + private static final DateFormatter YEAR_MONTH = newDateFormatter( "year_month", new DateTimeFormatterBuilder().appendValue(ChronoField.YEAR, 4, 10, SignStyle.EXCEEDS_PAD) .appendLiteral("-") @@ -1892,7 +1902,7 @@ public class DateFormatters { /* * A strict date formatter that formats or parses a date without an offset, such as '2011-12-03'. */ - private static final DateFormatter YEAR_MONTH_DAY = new JavaDateFormatter( + private static final DateFormatter YEAR_MONTH_DAY = newDateFormatter( "year_month_day", STRICT_YEAR_MONTH_DAY_FORMATTER, new DateTimeFormatterBuilder().appendValue(ChronoField.YEAR) @@ -1908,13 +1918,13 @@ public class DateFormatters { * Returns a formatter for a full date as four digit weekyear, two digit * week of weekyear, and one digit day of week (YYYY-'W'ww-e). */ - private static final DateFormatter WEEK_DATE = new JavaDateFormatter("week_date", ISO_WEEK_DATE, WEEK_DATE_FORMATTER); + private static final DateFormatter WEEK_DATE = newDateFormatter("week_date", ISO_WEEK_DATE, WEEK_DATE_FORMATTER); /* * Returns a formatter for a four digit weekyear and two digit week of * weekyear. (YYYY-'W'ww) */ - private static final DateFormatter WEEKYEAR_WEEK = new JavaDateFormatter( + private static final DateFormatter WEEKYEAR_WEEK = newDateFormatter( "weekyear_week", STRICT_WEEKYEAR_WEEK_FORMATTER, new DateTimeFormatterBuilder().appendValue(WEEK_FIELDS_ROOT.weekBasedYear()) @@ -1928,7 +1938,7 @@ public class DateFormatters { * Returns a formatter for a four digit weekyear, two digit week of * weekyear, and one digit day of week. (YYYY-'W'ww-e) */ - private static final DateFormatter WEEKYEAR_WEEK_DAY = new JavaDateFormatter( + private static final DateFormatter WEEKYEAR_WEEK_DAY = newDateFormatter( "weekyear_week_day", new DateTimeFormatterBuilder().append(STRICT_WEEKYEAR_WEEK_FORMATTER) .appendLiteral("-") @@ -1954,7 +1964,7 @@ static DateFormatter forPattern(String input) { if (Strings.hasLength(input)) { input = input.trim(); } - if (input == null || input.length() == 0) { + if (input == null || input.isEmpty()) { throw new IllegalArgumentException("No date pattern provided"); } @@ -2121,7 +2131,7 @@ static DateFormatter forPattern(String input) { return STRICT_YEAR_MONTH_DAY; } else { try { - return new JavaDateFormatter( + return newDateFormatter( input, new DateTimeFormatterBuilder().appendPattern(input).toFormatter(Locale.ROOT).withResolverStyle(ResolverStyle.STRICT) ); diff --git a/server/src/main/java/org/elasticsearch/common/time/DateTimeParser.java b/server/src/main/java/org/elasticsearch/common/time/DateTimeParser.java new file mode 100644 index 0000000000000..7c37f4a3c569e --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/time/DateTimeParser.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.common.time; + +import java.time.ZoneId; +import java.time.format.DateTimeFormatterBuilder; +import java.time.format.DateTimeParseException; +import java.time.temporal.TemporalAccessor; +import java.util.Locale; +import java.util.Optional; + +/** + * An object that can parse strings into datetime objects + */ +interface DateTimeParser { + ZoneId getZone(); + + Locale getLocale(); + + String getFormatString(); + + DateTimeParser withZone(ZoneId zone); + + DateTimeParser withLocale(Locale locale); + + void applyToBuilder(DateTimeFormatterBuilder builder); + + /** + * Parses the specified string. + *

        + * The pattern must fully match, using the whole string. + * If the string cannot be fully parsed, {@link DateTimeParseException} is thrown. + * @throws DateTimeParseException The string could not be fully parsed + */ + TemporalAccessor parse(CharSequence str); + + /** + * Try to parse the specified string. + *

        + * The pattern must fully match, using the whole string. It must not throw exceptions if parsing fails. + */ + Optional tryParse(CharSequence str); +} diff --git a/server/src/main/java/org/elasticsearch/common/time/DateTimePrinter.java b/server/src/main/java/org/elasticsearch/common/time/DateTimePrinter.java new file mode 100644 index 0000000000000..a042566665f3c --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/time/DateTimePrinter.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.common.time; + +import java.time.ZoneId; +import java.time.temporal.TemporalAccessor; +import java.util.Locale; + +/** + * An object that can format datetime objects as strings + */ +interface DateTimePrinter { + ZoneId getZone(); + + Locale getLocale(); + + DateTimePrinter withZone(ZoneId zone); + + DateTimePrinter withLocale(Locale locale); + + /** + * Returns the string representation of the specified {@link TemporalAccessor} + */ + String format(TemporalAccessor accessor); +} diff --git a/server/src/main/java/org/elasticsearch/common/time/EpochTime.java b/server/src/main/java/org/elasticsearch/common/time/EpochTime.java index d9df109bda0cd..2c8ef4e48411f 100644 --- a/server/src/main/java/org/elasticsearch/common/time/EpochTime.java +++ b/server/src/main/java/org/elasticsearch/common/time/EpochTime.java @@ -251,18 +251,18 @@ public long getFrom(TemporalAccessor temporal) { static final DateFormatter SECONDS_FORMATTER = new JavaDateFormatter( "epoch_second", - SECONDS_FORMATTER1, + new JavaTimeDateTimePrinter(SECONDS_FORMATTER1), (builder, parser) -> builder.parseDefaulting(ChronoField.NANO_OF_SECOND, 999_999_999L), - SECONDS_FORMATTER1, - SECONDS_FORMATTER2 + new JavaTimeDateTimeParser(SECONDS_FORMATTER1), + new JavaTimeDateTimeParser(SECONDS_FORMATTER2) ); static final DateFormatter MILLIS_FORMATTER = new JavaDateFormatter( "epoch_millis", - MILLISECONDS_FORMATTER1, + new JavaTimeDateTimePrinter(MILLISECONDS_FORMATTER1), (builder, parser) -> builder.parseDefaulting(EpochTime.NANOS_OF_MILLI, 999_999L), - MILLISECONDS_FORMATTER1, - MILLISECONDS_FORMATTER2 + new JavaTimeDateTimeParser(MILLISECONDS_FORMATTER1), + new JavaTimeDateTimeParser(MILLISECONDS_FORMATTER2) ); private abstract static class EpochField implements TemporalField { diff --git a/server/src/main/java/org/elasticsearch/common/time/JavaDateFormatter.java b/server/src/main/java/org/elasticsearch/common/time/JavaDateFormatter.java index b3bb734b8b69a..c3a25cb4e15b5 100644 --- a/server/src/main/java/org/elasticsearch/common/time/JavaDateFormatter.java +++ b/server/src/main/java/org/elasticsearch/common/time/JavaDateFormatter.java @@ -10,9 +10,7 @@ import org.elasticsearch.common.Strings; -import java.text.ParsePosition; import java.time.ZoneId; -import java.time.format.DateTimeFormatter; import java.time.format.DateTimeFormatterBuilder; import java.time.format.DateTimeParseException; import java.time.temporal.ChronoField; @@ -40,8 +38,8 @@ class JavaDateFormatter implements DateFormatter { * this allows the rounding logic to default CLOCK_HOUR_OF_AMPM field instead of HOUR_OF_DAY * without this logic, the rounding would result in a conflict as HOUR_OF_DAY would be missing, but CLOCK_HOUR_OF_AMPM would be provided */ - private static final BiConsumer DEFAULT_ROUND_UP = (builder, parser) -> { - String parserAsString = parser.toString(); + private static final BiConsumer DEFAULT_ROUND_UP = (builder, parser) -> { + String parserAsString = parser.getFormatString(); if (parserAsString.contains(ChronoField.DAY_OF_YEAR.toString())) { builder.parseDefaulting(ChronoField.DAY_OF_YEAR, 1L); // TODO ideally we should make defaulting for weekbased year here too, @@ -68,24 +66,12 @@ class JavaDateFormatter implements DateFormatter { }; private final String format; - private final DateTimeFormatter printer; - private final DateTimeFormatter[] parsers; - private final RoundUpFormatter roundupParser; - - private static final class RoundUpFormatter extends JavaDateFormatter { - - RoundUpFormatter(String format, DateTimeFormatter[] roundUpParsers) { - super(format, roundUpParsers[0], (RoundUpFormatter) null, roundUpParsers); - } - - @Override - JavaDateFormatter getRoundupParser() { - throw new UnsupportedOperationException("RoundUpFormatter does not have another roundUpFormatter"); - } - } + private final DateTimePrinter printer; + private final DateTimeParser[] parsers; + final DateTimeParser[] roundupParsers; // named formatters use default roundUpParser - JavaDateFormatter(String format, DateTimeFormatter printer, DateTimeFormatter... parsers) { + JavaDateFormatter(String format, DateTimePrinter printer, DateTimeParser... parsers) { this( format, printer, @@ -95,30 +81,29 @@ JavaDateFormatter getRoundupParser() { ); } - // subclasses override roundUpParser JavaDateFormatter( String format, - DateTimeFormatter printer, - BiConsumer roundupParserConsumer, - DateTimeFormatter... parsers + DateTimePrinter printer, + BiConsumer roundupParserConsumer, + DateTimeParser... parsers ) { if (printer == null) { throw new IllegalArgumentException("printer may not be null"); } + if (parsers.length == 0) { + throw new IllegalArgumentException("parsers need to be specified"); + } this.printer = printer; this.format = format; - this.parsers = parsersArray(printer, parsers); - this.roundupParser = createRoundUpParser(format, roundupParserConsumer, locale(), this.parsers); + this.parsers = parsersArray(parsers); + this.roundupParsers = createRoundUpParsers(format, roundupParserConsumer, locale(), this.parsers); } - private static DateTimeFormatter[] parsersArray(DateTimeFormatter printer, DateTimeFormatter... parsers) { - if (parsers.length == 0) { - return new DateTimeFormatter[] { printer }; - } + private static DateTimeParser[] parsersArray(DateTimeParser[] parsers) { final ZoneId zoneId = parsers[0].getZone(); final Locale locale = parsers[0].getLocale(); for (int i = 1; i < parsers.length; i++) { - final DateTimeFormatter parser = parsers[i]; + final DateTimeParser parser = parsers[i]; if (Objects.equals(parser.getZone(), zoneId) == false) { throw new IllegalArgumentException("formatters must have the same time zone"); } @@ -138,75 +123,67 @@ private static DateTimeFormatter[] parsersArray(DateTimeFormatter printer, DateT * DateFormatters. * This means that we need to also have multiple RoundUp parsers. */ - private static RoundUpFormatter createRoundUpParser( + private static DateTimeParser[] createRoundUpParsers( String format, - BiConsumer roundupParserConsumer, + BiConsumer roundupParserConsumer, Locale locale, - DateTimeFormatter[] parsers + DateTimeParser[] parsers ) { - if (format.contains("||") == false) { - return new RoundUpFormatter(format, mapParsers(parser -> { - DateTimeFormatterBuilder builder = new DateTimeFormatterBuilder(); - builder.append(parser); - roundupParserConsumer.accept(builder, parser); - return builder.toFormatter(locale); - }, parsers)); - } - return null; + assert format.contains("||") == false; + return mapObjects(parser -> { + DateTimeFormatterBuilder builder = new DateTimeFormatterBuilder(); + parser.applyToBuilder(builder); + roundupParserConsumer.accept(builder, parser); + return new JavaTimeDateTimeParser(builder.toFormatter(locale)); + }, parsers); } - public static DateFormatter combined(String input, List formatters) { - assert formatters.size() > 0; + static DateFormatter combined(String input, List formatters) { + assert formatters.isEmpty() == false; - List parsers = new ArrayList<>(formatters.size()); - List roundUpParsers = new ArrayList<>(formatters.size()); + DateTimePrinter printer = null; + List parsers = new ArrayList<>(formatters.size()); + List roundUpParsers = new ArrayList<>(formatters.size()); - DateTimeFormatter printer = null; for (DateFormatter formatter : formatters) { - assert formatter instanceof JavaDateFormatter; JavaDateFormatter javaDateFormatter = (JavaDateFormatter) formatter; if (printer == null) { - printer = javaDateFormatter.getPrinter(); + printer = javaDateFormatter.printer; } Collections.addAll(parsers, javaDateFormatter.parsers); - Collections.addAll(roundUpParsers, javaDateFormatter.getRoundupParser().parsers); + Collections.addAll(roundUpParsers, javaDateFormatter.roundupParsers); } - return new JavaDateFormatter( - input, - printer, - roundUpParsers.toArray(DateTimeFormatter[]::new), - parsers.toArray(DateTimeFormatter[]::new) - ); - } - - private JavaDateFormatter(String format, DateTimeFormatter printer, DateTimeFormatter[] roundUpParsers, DateTimeFormatter[] parsers) { - this(format, printer, new RoundUpFormatter(format, roundUpParsers), parsers); + return new JavaDateFormatter(input, printer, roundUpParsers.toArray(DateTimeParser[]::new), parsers.toArray(DateTimeParser[]::new)); } - private JavaDateFormatter(String format, DateTimeFormatter printer, RoundUpFormatter roundupParser, DateTimeFormatter[] parsers) { + private JavaDateFormatter(String format, DateTimePrinter printer, DateTimeParser[] roundupParsers, DateTimeParser[] parsers) { this.format = format; this.printer = printer; - this.roundupParser = roundupParser; + this.roundupParsers = roundupParsers; this.parsers = parsers; } - JavaDateFormatter getRoundupParser() { - return roundupParser; - } + TemporalAccessor roundupParse(String input) { + if (Strings.isNullOrEmpty(input)) { + throw new IllegalArgumentException("cannot parse empty datetime"); + } - DateTimeFormatter getPrinter() { - return printer; + try { + return doParse(input, roundupParsers); + } catch (Exception e) { + throw new IllegalArgumentException("failed to parse date field [" + input + "] with format [" + format + "]", e); + } } @Override public TemporalAccessor parse(String input) { if (Strings.isNullOrEmpty(input)) { - throw new IllegalArgumentException("cannot parse empty date"); + throw new IllegalArgumentException("cannot parse empty datetime"); } try { - return doParse(input); + return doParse(input, parsers); } catch (Exception e) { throw new IllegalArgumentException("failed to parse date field [" + input + "] with format [" + format + "]", e); } @@ -214,67 +191,56 @@ public TemporalAccessor parse(String input) { /** * Attempt parsing the input without throwing exception. If multiple parsers are provided, - * it will continue iterating if the previous parser failed. The pattern must fully match, meaning whole input was used. - * This also means that this method depends on DateTimeFormatter.ClassicFormat.parseObject - * which does not throw exceptions when parsing failed. - * - * The approach with collection of parsers was taken because java-time requires ordering on optional (composite) - * patterns. Joda does not suffer from this. - * https://bugs.openjdk.java.net/browse/JDK-8188771 + * it will continue iterating until it finds one that works. * * @param input An arbitrary string resembling the string representation of a date or time * @return a TemporalAccessor if parsing was successful. * @throws DateTimeParseException when unable to parse with any parsers */ - private TemporalAccessor doParse(String input) { + private static TemporalAccessor doParse(String input, DateTimeParser[] parsers) { if (parsers.length > 1) { - for (DateTimeFormatter formatter : parsers) { - ParsePosition pos = new ParsePosition(0); - Object object = formatter.toFormat().parseObject(input, pos); - if (parsingSucceeded(object, input, pos)) { - return (TemporalAccessor) object; + for (DateTimeParser formatter : parsers) { + var result = formatter.tryParse(input); + if (result.isPresent()) { + return result.get(); } } throw new DateTimeParseException("Failed to parse with all enclosed parsers", input, 0); } - return this.parsers[0].parse(input); - } - - private static boolean parsingSucceeded(Object object, String input, ParsePosition pos) { - return object != null && pos.getIndex() == input.length(); + return parsers[0].parse(input); } @Override public DateFormatter withZone(ZoneId zoneId) { - // shortcurt to not create new objects unnecessarily + // shortcut to not create new objects unnecessarily if (zoneId.equals(zone())) { return this; } - return mapParsers(p -> p.withZone(zoneId)); + return mapParsers(p -> p.withZone(zoneId), p -> p.withZone(zoneId)); } @Override public DateFormatter withLocale(Locale locale) { - // shortcurt to not create new objects unnecessarily + // shortcut to not create new objects unnecessarily if (locale.equals(locale())) { return this; } - return mapParsers(p -> p.withLocale(locale)); + return mapParsers(p -> p.withLocale(locale), p -> p.withLocale(locale)); } - private JavaDateFormatter mapParsers(UnaryOperator mapping) { + private JavaDateFormatter mapParsers(UnaryOperator printerMapping, UnaryOperator parserMapping) { return new JavaDateFormatter( format, - mapping.apply(printer), - mapParsers(mapping, ((JavaDateFormatter) this.roundupParser).parsers), - mapParsers(mapping, this.parsers) + printerMapping.apply(printer), + mapObjects(parserMapping, this.roundupParsers), + mapObjects(parserMapping, this.parsers) ); } - private static DateTimeFormatter[] mapParsers(UnaryOperator mapping, DateTimeFormatter[] parsers) { - DateTimeFormatter[] res = new DateTimeFormatter[parsers.length]; - for (int i = 0; i < parsers.length; i++) { - res[i] = mapping.apply(parsers[i]); + private static T[] mapObjects(UnaryOperator mapping, T[] objects) { + T[] res = objects.clone(); + for (int i = 0; i < objects.length; i++) { + res[i] = mapping.apply(objects[i]); } return res; } @@ -301,7 +267,7 @@ public ZoneId zone() { @Override public DateMathParser toDateMathParser() { - return new JavaDateMathParser(format, this, getRoundupParser()); + return new JavaDateMathParser(format, this::parse, this::roundupParse); } @Override diff --git a/server/src/main/java/org/elasticsearch/common/time/JavaDateMathParser.java b/server/src/main/java/org/elasticsearch/common/time/JavaDateMathParser.java index 7702a0344c7df..f48e4725af956 100644 --- a/server/src/main/java/org/elasticsearch/common/time/JavaDateMathParser.java +++ b/server/src/main/java/org/elasticsearch/common/time/JavaDateMathParser.java @@ -23,6 +23,7 @@ import java.time.temporal.TemporalAdjusters; import java.time.temporal.TemporalQueries; import java.util.Objects; +import java.util.function.Function; import java.util.function.LongSupplier; /** @@ -34,15 +35,14 @@ */ public class JavaDateMathParser implements DateMathParser { - private final JavaDateFormatter formatter; private final String format; - private final JavaDateFormatter roundupParser; + private final Function parser; + private final Function roundupParser; - JavaDateMathParser(String format, JavaDateFormatter formatter, JavaDateFormatter roundupParser) { + JavaDateMathParser(String format, Function parser, Function roundupParser) { this.format = format; + this.parser = Objects.requireNonNull(parser); this.roundupParser = roundupParser; - Objects.requireNonNull(formatter); - this.formatter = formatter; } @Override @@ -201,15 +201,15 @@ private static Instant parseMath(final String mathString, final Instant time, fi private Instant parseDateTime(String value, ZoneId timeZone, boolean roundUpIfNoTime) { if (Strings.isNullOrEmpty(value)) { - throw new ElasticsearchParseException("cannot parse empty date"); + throw new ElasticsearchParseException("cannot parse empty datetime"); } - DateFormatter formatter = roundUpIfNoTime ? this.roundupParser : this.formatter; + Function formatter = roundUpIfNoTime ? roundupParser : this.parser; try { if (timeZone == null) { - return DateFormatters.from(formatter.parse(value)).toInstant(); + return DateFormatters.from(formatter.apply(value)).toInstant(); } else { - TemporalAccessor accessor = formatter.parse(value); + TemporalAccessor accessor = formatter.apply(value); // Use the offset if provided, otherwise fall back to the zone, or null. ZoneOffset offset = TemporalQueries.offset().queryFrom(accessor); ZoneId zoneId = offset == null ? TemporalQueries.zoneId().queryFrom(accessor) : ZoneId.ofOffset("", offset); diff --git a/server/src/main/java/org/elasticsearch/common/time/JavaTimeDateTimeParser.java b/server/src/main/java/org/elasticsearch/common/time/JavaTimeDateTimeParser.java new file mode 100644 index 0000000000000..c473b81771a53 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/time/JavaTimeDateTimeParser.java @@ -0,0 +1,68 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.common.time; + +import java.text.ParsePosition; +import java.time.ZoneId; +import java.time.format.DateTimeFormatter; +import java.time.format.DateTimeFormatterBuilder; +import java.time.temporal.TemporalAccessor; +import java.util.Locale; +import java.util.Optional; + +class JavaTimeDateTimeParser implements DateTimeParser { + + private final DateTimeFormatter formatter; + + JavaTimeDateTimeParser(DateTimeFormatter formatter) { + this.formatter = formatter; + } + + @Override + public ZoneId getZone() { + return formatter.getZone(); + } + + @Override + public Locale getLocale() { + return formatter.getLocale(); + } + + @Override + public String getFormatString() { + return formatter.toString(); + } + + @Override + public DateTimeParser withZone(ZoneId zone) { + return new JavaTimeDateTimeParser(formatter.withZone(zone)); + } + + @Override + public DateTimeParser withLocale(Locale locale) { + return new JavaTimeDateTimeParser(formatter.withLocale(locale)); + } + + @Override + public void applyToBuilder(DateTimeFormatterBuilder builder) { + builder.append(formatter); + } + + @Override + public TemporalAccessor parse(CharSequence str) { + return formatter.parse(str); + } + + @Override + public Optional tryParse(CharSequence str) { + ParsePosition pos = new ParsePosition(0); + return Optional.ofNullable((TemporalAccessor) formatter.toFormat().parseObject(str.toString(), pos)) + .filter(ta -> pos.getIndex() == str.length()); + } +} diff --git a/server/src/main/java/org/elasticsearch/common/time/JavaTimeDateTimePrinter.java b/server/src/main/java/org/elasticsearch/common/time/JavaTimeDateTimePrinter.java new file mode 100644 index 0000000000000..eaa46fc88c228 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/time/JavaTimeDateTimePrinter.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.common.time; + +import java.time.ZoneId; +import java.time.format.DateTimeFormatter; +import java.time.temporal.TemporalAccessor; +import java.util.Locale; + +class JavaTimeDateTimePrinter implements DateTimePrinter { + + private final DateTimeFormatter formatter; + + JavaTimeDateTimePrinter(DateTimeFormatter formatter) { + this.formatter = formatter; + } + + @Override + public ZoneId getZone() { + return formatter.getZone(); + } + + @Override + public Locale getLocale() { + return formatter.getLocale(); + } + + @Override + public DateTimePrinter withZone(ZoneId zone) { + return new JavaTimeDateTimePrinter(formatter.withZone(zone)); + } + + @Override + public DateTimePrinter withLocale(Locale locale) { + return new JavaTimeDateTimePrinter(formatter.withLocale(locale)); + } + + @Override + public String format(TemporalAccessor accessor) { + return formatter.format(accessor); + } +} diff --git a/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java b/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java index f00697a3ae870..a2bf548de4671 100644 --- a/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java +++ b/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java @@ -22,10 +22,13 @@ import java.time.format.DateTimeFormatter; import java.time.temporal.ChronoField; import java.time.temporal.TemporalAccessor; +import java.util.List; import java.util.Locale; +import static org.elasticsearch.test.LambdaMatchers.transformedItemsMatch; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.everyItem; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; @@ -548,8 +551,8 @@ public void testIso8601Parsing() { public void testRoundupFormatterWithEpochDates() { assertRoundupFormatter("epoch_millis", "1234567890", 1234567890L); // also check nanos of the epoch_millis formatter if it is rounded up to the nano second - JavaDateFormatter roundUpFormatter = ((JavaDateFormatter) DateFormatter.forPattern("8epoch_millis")).getRoundupParser(); - Instant epochMilliInstant = DateFormatters.from(roundUpFormatter.parse("1234567890")).toInstant(); + var formatter = (JavaDateFormatter) DateFormatter.forPattern("8epoch_millis"); + Instant epochMilliInstant = DateFormatters.from(formatter.roundupParse("1234567890")).toInstant(); assertThat(epochMilliInstant.getLong(ChronoField.NANO_OF_SECOND), is(890_999_999L)); assertRoundupFormatter("strict_date_optional_time||epoch_millis", "2018-10-10T12:13:14.123Z", 1539173594123L); @@ -561,8 +564,8 @@ public void testRoundupFormatterWithEpochDates() { assertRoundupFormatter("epoch_second", "1234567890", 1234567890999L); // also check nanos of the epoch_millis formatter if it is rounded up to the nano second - JavaDateFormatter epochSecondRoundupParser = ((JavaDateFormatter) DateFormatter.forPattern("8epoch_second")).getRoundupParser(); - Instant epochSecondInstant = DateFormatters.from(epochSecondRoundupParser.parse("1234567890")).toInstant(); + formatter = (JavaDateFormatter) DateFormatter.forPattern("8epoch_second"); + Instant epochSecondInstant = DateFormatters.from(formatter.roundupParse("1234567890")).toInstant(); assertThat(epochSecondInstant.getLong(ChronoField.NANO_OF_SECOND), is(999_999_999L)); assertRoundupFormatter("strict_date_optional_time||epoch_second", "2018-10-10T12:13:14.123Z", 1539173594123L); @@ -583,8 +586,7 @@ public void testYearWithoutMonthRoundUp() { private void assertRoundupFormatter(String format, String input, long expectedMilliSeconds) { JavaDateFormatter dateFormatter = (JavaDateFormatter) DateFormatter.forPattern(format); dateFormatter.parse(input); - JavaDateFormatter roundUpFormatter = dateFormatter.getRoundupParser(); - long millis = DateFormatters.from(roundUpFormatter.parse(input)).toInstant().toEpochMilli(); + long millis = DateFormatters.from(dateFormatter.roundupParse(input)).toInstant().toEpochMilli(); assertThat(millis, is(expectedMilliSeconds)); } @@ -598,9 +600,8 @@ public void testRoundupFormatterZone() { "strict_date_optional_time||date_optional_time" ); JavaDateFormatter formatter = (JavaDateFormatter) DateFormatter.forPattern(format).withZone(zoneId); - JavaDateFormatter roundUpFormatter = formatter.getRoundupParser(); - assertThat(roundUpFormatter.zone(), is(zoneId)); assertThat(formatter.zone(), is(zoneId)); + assertThat(List.of(formatter.roundupParsers), transformedItemsMatch(DateTimeParser::getZone, everyItem(is(zoneId)))); } public void testRoundupFormatterLocale() { @@ -613,9 +614,8 @@ public void testRoundupFormatterLocale() { "strict_date_optional_time||date_optional_time" ); JavaDateFormatter formatter = (JavaDateFormatter) DateFormatter.forPattern(format).withLocale(locale); - JavaDateFormatter roundupParser = formatter.getRoundupParser(); - assertThat(roundupParser.locale(), is(locale)); assertThat(formatter.locale(), is(locale)); + assertThat(List.of(formatter.roundupParsers), transformedItemsMatch(DateTimeParser::getLocale, everyItem(is(locale)))); } public void test0MillisAreFormatted() { diff --git a/server/src/test/java/org/elasticsearch/common/time/JavaDateMathParserTests.java b/server/src/test/java/org/elasticsearch/common/time/JavaDateMathParserTests.java index 1b17b266d9cb6..9a712867bb2c5 100644 --- a/server/src/test/java/org/elasticsearch/common/time/JavaDateMathParserTests.java +++ b/server/src/test/java/org/elasticsearch/common/time/JavaDateMathParserTests.java @@ -34,15 +34,19 @@ public class JavaDateMathParserTests extends ESTestCase { public void testRoundUpParserBasedOnList() { DateFormatter formatter = new JavaDateFormatter( "test", - new DateTimeFormatterBuilder().appendPattern("uuuu-MM-dd").toFormatter(Locale.ROOT), - new DateTimeFormatterBuilder().appendPattern("uuuu-MM-dd'T'HH:mm:ss.S") - .appendZoneOrOffsetId() - .toFormatter(Locale.ROOT) - .withResolverStyle(ResolverStyle.STRICT), - new DateTimeFormatterBuilder().appendPattern("uuuu-MM-dd'T'HH:mm:ss.S") - .appendOffset("+HHmm", "Z") - .toFormatter(Locale.ROOT) - .withResolverStyle(ResolverStyle.STRICT) + new JavaTimeDateTimePrinter(new DateTimeFormatterBuilder().appendPattern("uuuu-MM-dd").toFormatter(Locale.ROOT)), + new JavaTimeDateTimeParser( + new DateTimeFormatterBuilder().appendPattern("uuuu-MM-dd'T'HH:mm:ss.S") + .appendZoneOrOffsetId() + .toFormatter(Locale.ROOT) + .withResolverStyle(ResolverStyle.STRICT) + ), + new JavaTimeDateTimeParser( + new DateTimeFormatterBuilder().appendPattern("uuuu-MM-dd'T'HH:mm:ss.S") + .appendOffset("+HHmm", "Z") + .toFormatter(Locale.ROOT) + .withResolverStyle(ResolverStyle.STRICT) + ) ); Instant parsed = formatter.toDateMathParser().parse("1970-01-01T00:00:00.0+0000", () -> 0L, true, (ZoneId) null); assertThat(parsed.toEpochMilli(), equalTo(0L)); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetimeTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetimeTests.java index 2b3c9d166946a..808249a01969a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetimeTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetimeTests.java @@ -102,7 +102,7 @@ public static Iterable parameters() { "Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.", "Line -1:-1: java.lang.IllegalArgumentException: " + (bytesRef.utf8ToString().isEmpty() - ? "cannot parse empty date" + ? "cannot parse empty datetime" : ("failed to parse date field [" + bytesRef.utf8ToString() + "] with format [yyyy-MM-dd'T'HH:mm:ss.SSS'Z']")) ) ); From b71a33c2e78e22ea0b1599be7df46d4eab924b27 Mon Sep 17 00:00:00 2001 From: Simon Cooper Date: Thu, 14 Mar 2024 11:05:52 +0000 Subject: [PATCH 208/248] Mute JobsAndModelsIT test (#106349) --- .../org/elasticsearch/xpack/ml/integration/JobsAndModelsIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/JobsAndModelsIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/JobsAndModelsIT.java index 7f0a4cc381ee2..5be72ff9b7270 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/JobsAndModelsIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/JobsAndModelsIT.java @@ -57,6 +57,7 @@ */ public class JobsAndModelsIT extends BaseMlIntegTestCase { + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103588") public void testCluster_GivenAnomalyDetectionJobAndTrainedModelDeployment_ShouldNotAllocateBothOnSameNode() throws Exception { // This test starts 2 ML nodes and then starts an anomaly detection job and a // trained model deployment that do not both fit in one node. We then proceed From fc6c2dc35ba8ed5fbd58bf2559c3f582557838de Mon Sep 17 00:00:00 2001 From: Kathleen DeRusso Date: Thu, 14 Mar 2024 13:31:05 +0100 Subject: [PATCH 209/248] Fix Search Applications bug where deleting an alias before deleting an application intermittently caused errors (#106329) * Update delete object to never fail if alias does not exist * Update docs/changelog/106329.yaml * Update changelog * Fix area in changelog --- docs/changelog/106329.yaml | 5 ++++ .../40_search_application_delete.yml | 15 +++++++++++ .../search/SearchApplicationIndexService.java | 25 +++++++++++-------- 3 files changed, 35 insertions(+), 10 deletions(-) create mode 100644 docs/changelog/106329.yaml diff --git a/docs/changelog/106329.yaml b/docs/changelog/106329.yaml new file mode 100644 index 0000000000000..78e811e7987b6 --- /dev/null +++ b/docs/changelog/106329.yaml @@ -0,0 +1,5 @@ +pr: 106329 +summary: Fix Search Applications bug where deleting an alias before deleting an application intermittently caused errors +area: Application +type: bug +issues: [] diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/40_search_application_delete.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/40_search_application_delete.yml index c196d84755f99..dfc1609a4183d 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/40_search_application_delete.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/40_search_application_delete.yml @@ -77,3 +77,18 @@ teardown: name: test-search-application-to-delete - match: { acknowledged: true } + +--- +"Delete Search Application - Alias does not exist as alias was explicitly removed": + - do: + indices.update_aliases: + body: + actions: + - remove: + index: test-index + alias: test-search-application-to-delete + + search_application.delete: + name: test-search-application-to-delete + + - match: { acknowledged: true } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/SearchApplicationIndexService.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/SearchApplicationIndexService.java index 46e4d45f2c146..61e425d4b05dd 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/SearchApplicationIndexService.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/SearchApplicationIndexService.java @@ -330,16 +330,21 @@ private void removeAlias(String searchAliasName, ActionListener( - searchAliasName, - listener, - (l, acknowledgedResponse) -> l.onResponse(AcknowledgedResponse.TRUE) - ) - ); + client.admin().indices().aliases(aliasesRequest, new ActionListener<>() { + @Override + public void onResponse(AcknowledgedResponse acknowledgedResponse) { + listener.onResponse(AcknowledgedResponse.TRUE); + } + + @Override + public void onFailure(Exception e) { + if (e instanceof ResourceNotFoundException) { + listener.onResponse(AcknowledgedResponse.TRUE); + } else { + listener.onFailure(e); + } + } + }); } /** From 12299b89d0ed92eab39f2b61db16629362750470 Mon Sep 17 00:00:00 2001 From: Ievgen Degtiarenko Date: Thu, 14 Mar 2024 14:10:13 +0100 Subject: [PATCH 210/248] Troubleshooting unbalanced cluster docs (#105774) This adds initial page with explanation on balancing approach and steps to troubleshoot it. --- docs/reference/troubleshooting.asciidoc | 3 + ...roubleshooting-unbalanced-cluster.asciidoc | 74 +++++++++++++++++++ 2 files changed, 77 insertions(+) create mode 100644 docs/reference/troubleshooting/troubleshooting-unbalanced-cluster.asciidoc diff --git a/docs/reference/troubleshooting.asciidoc b/docs/reference/troubleshooting.asciidoc index 64df699d33638..01ef39b69c529 100644 --- a/docs/reference/troubleshooting.asciidoc +++ b/docs/reference/troubleshooting.asciidoc @@ -56,6 +56,7 @@ fix problems that an {es} deployment might encounter. * <> * <> * <> +* <> * <> [discrete] @@ -135,3 +136,5 @@ include::watcher/troubleshooting.asciidoc[] include::troubleshooting/troubleshooting-searches.asciidoc[] include::troubleshooting/troubleshooting-shards-capacity.asciidoc[] + +include::troubleshooting/troubleshooting-unbalanced-cluster.asciidoc[] diff --git a/docs/reference/troubleshooting/troubleshooting-unbalanced-cluster.asciidoc b/docs/reference/troubleshooting/troubleshooting-unbalanced-cluster.asciidoc new file mode 100644 index 0000000000000..ca06ec066c8a8 --- /dev/null +++ b/docs/reference/troubleshooting/troubleshooting-unbalanced-cluster.asciidoc @@ -0,0 +1,74 @@ +[[troubleshooting-unbalanced-cluster]] +== Troubleshooting an unbalanced cluster + +Elasticsearch balances shards across data tiers to achieve a good compromise between: + +* shard count +* disk usage +* write load (for indices in data streams) + +Elasticsearch does not take into account the amount or complexity of search queries when rebalancing shards. +This is indirectly achieved by balancing shard count and disk usage. + +There is no guarantee that individual components will be evenly spread across the nodes. +This could happen if some nodes have fewer shards, or are using less disk space, +but are assigned shards with higher write loads. + +Use the <> to list workloads per node: + +[source,console] +-------------------------------------------------- +GET /_cat/allocation?v +-------------------------------------------------- +// TEST[s/^/PUT test\n{"settings": {"number_of_replicas": 0}}\n/] + +The API returns the following response: + +[source,text] +-------------------------------------------------- +shards shards.undesired write_load.forecast disk.indices.forecast disk.indices disk.used disk.avail disk.total disk.percent host ip node node.role + 1 0 0.0 260b 260b 47.3gb 43.4gb 100.7gb 46 127.0.0.1 127.0.0.1 CSUXak2 himrst +-------------------------------------------------- +// TESTRESPONSE[s/\d+(\.\d+)?[tgmk]?b/\\d+(\\.\\d+)?[tgmk]?b/ s/46/\\d+/] +// TESTRESPONSE[s/CSUXak2 himrst/.+/ non_json] + +This response contains the following information that influences balancing: + +* `shards` is the current number of shards allocated to the node +* `shards.undesired` is the number of shards that needs to be moved to other nodes to finish balancing +* `disk.indices.forecast` is the expected disk usage according to projected shard growth +* `write_load.forecast` is the projected total write load associated with this node + +A cluster is considered balanced when all shards are in their desired locations, +which means that no further shard movements are planned (all `shards.undesired` values are equal to 0). + +Some operations such as node restarting, decommissioning, or changing cluster allocation settings +are disruptive and might require multiple shards to move in order to rebalance the cluster. + +Shard movement order is not deterministic and mostly determined by the source and target node readiness to move a shard. +While rebalancing is in progress some nodes might appear busier then others. + +When a shard is allocated to an undesired node it uses the resources of the current node instead of the target. +This might cause a hotspot (disk or CPU) when multiple shards reside on the current node that have not been +moved to their corresponding targets yet. + +If a cluster takes a long time to finish rebalancing you might find the following log entries: +[source,text] +-------------------------------------------------- +[WARN][o.e.c.r.a.a.DesiredBalanceReconciler] [10%] of assigned shards (10/100) are not on their desired nodes, which exceeds the warn threshold of [10%] +-------------------------------------------------- +This is not concerning as long as the number of such shards is decreasing and this warning appears occasionally, +for example after rolling restarts or changing allocation settings. + +If the cluster has this warning repeatedly for an extended period of time (multiple hours), +it is possible that the desired balance is diverging too far from the current state. + +If so, increase the <> +to reduce the sensitivity of the algorithm that tries to level up the shard count and disk usage within the cluster. + +And reset the desired balance using the following API call: + +[source,console,id=delete-desired-balance-request-example] +-------------------------------------------------- +DELETE /_internal/desired_balance +-------------------------------------------------- From d50002853a7d06ac2e07e39169f2a5b5b346673b Mon Sep 17 00:00:00 2001 From: David Turner Date: Thu, 14 Mar 2024 13:55:27 +0000 Subject: [PATCH 211/248] Small cleanup in `FollowersChecker` (#106355) Replaces a call to `ActionRunnable#supply` with the more appropriate `ActionRunnable#run`, and extracts the executor constant. --- .../elasticsearch/action/ActionRunnable.java | 5 +++ .../coordination/FollowersChecker.java | 31 +++++++++---------- 2 files changed, 20 insertions(+), 16 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/ActionRunnable.java b/server/src/main/java/org/elasticsearch/action/ActionRunnable.java index 7c0879941af89..b1c4482e703eb 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionRunnable.java +++ b/server/src/main/java/org/elasticsearch/action/ActionRunnable.java @@ -36,6 +36,11 @@ protected void doRun() throws Exception { runnable.run(); listener.onResponse(null); } + + @Override + public String toString() { + return runnable.toString(); + } }; } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/FollowersChecker.java b/server/src/main/java/org/elasticsearch/cluster/coordination/FollowersChecker.java index cdd21efce3ed5..6c1474b454173 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/FollowersChecker.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/FollowersChecker.java @@ -16,7 +16,6 @@ import org.elasticsearch.cluster.coordination.Coordinator.Mode; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.common.CheckedSupplier; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Setting; @@ -24,6 +23,7 @@ import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; +import org.elasticsearch.core.CheckedRunnable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.monitor.NodeHealthService; import org.elasticsearch.monitor.StatusInfo; @@ -102,6 +102,7 @@ public final class FollowersChecker { private final TransportService transportService; private final NodeHealthService nodeHealthService; + private final Executor clusterCoordinationExecutor; private volatile FastResponseState fastResponseState; public FollowersChecker( @@ -115,6 +116,7 @@ public FollowersChecker( this.handleRequestAndUpdateState = handleRequestAndUpdateState; this.onNodeFailure = onNodeFailure; this.nodeHealthService = nodeHealthService; + this.clusterCoordinationExecutor = transportService.getThreadPool().executor(Names.CLUSTER_COORDINATION); followerCheckInterval = FOLLOWER_CHECK_INTERVAL_SETTING.get(settings); followerCheckTimeout = FOLLOWER_CHECK_TIMEOUT_SETTING.get(settings); @@ -201,21 +203,18 @@ private void handleFollowerCheck(FollowerCheckRequest request, ActionListener() { - @Override - public Void get() { - logger.trace("responding to {} on slow path", request); - handleRequestAndUpdateState.accept(request); - return null; - } + clusterCoordinationExecutor.execute(ActionRunnable.run(listener, new CheckedRunnable<>() { + @Override + public void run() { + logger.trace("responding to {} on slow path", request); + handleRequestAndUpdateState.accept(request); + } - @Override - public String toString() { - return "responding to [" + request + "] on slow path"; - } - })); + @Override + public String toString() { + return "responding to [" + request + "] on slow path"; + } + })); } /** @@ -363,7 +362,7 @@ public void handleException(TransportException exp) { } void failNode(String reason) { - transportService.getThreadPool().executor(Names.CLUSTER_COORDINATION).execute(new AbstractRunnable() { + clusterCoordinationExecutor.execute(new AbstractRunnable() { @Override public void onRejection(Exception e) { From bf6ca879b037454c9ef39e50100abf06774e81fc Mon Sep 17 00:00:00 2001 From: Craig Taverner Date: Thu, 14 Mar 2024 15:21:56 +0100 Subject: [PATCH 212/248] Enhance ENRICH LookupRequest to include input data type (#106344) This is needed for refined support of type checking when doing range ENRICH queries --- .../org/elasticsearch/TransportVersions.java | 1 + .../esql/enrich/EnrichLookupOperator.java | 9 +++++ .../esql/enrich/EnrichLookupService.java | 40 +++++++++++++++++-- .../esql/planner/LocalExecutionPlanner.java | 4 +- 4 files changed, 50 insertions(+), 4 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index a83b0ea0c90e5..70e1c2c763768 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -144,6 +144,7 @@ static TransportVersion def(int id) { public static final TransportVersion ALLOCATION_STATS = def(8_604_00_0); public static final TransportVersion ESQL_EXTENDED_ENRICH_TYPES = def(8_605_00_0); public static final TransportVersion KNN_EXPLICIT_BYTE_QUERY_VECTOR_PARSING = def(8_606_00_0); + public static final TransportVersion ESQL_EXTENDED_ENRICH_INPUT_TYPE = def(8_607_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupOperator.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupOperator.java index 2d433f0732064..7e3dd53cdf037 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupOperator.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupOperator.java @@ -19,6 +19,7 @@ import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xpack.ql.expression.NamedExpression; +import org.elasticsearch.xpack.ql.type.DataType; import java.io.IOException; import java.util.List; @@ -29,6 +30,7 @@ public final class EnrichLookupOperator extends AsyncOperator { private final String sessionId; private final CancellableTask parentTask; private final int inputChannel; + private final DataType inputDataType; private final String enrichIndex; private final String matchType; private final String matchField; @@ -41,6 +43,7 @@ public record Factory( int maxOutstandingRequests, int inputChannel, EnrichLookupService enrichLookupService, + DataType inputDataType, String enrichIndex, String matchType, String matchField, @@ -68,6 +71,7 @@ public Operator get(DriverContext driverContext) { maxOutstandingRequests, inputChannel, enrichLookupService, + inputDataType, enrichIndex, matchType, matchField, @@ -83,6 +87,7 @@ public EnrichLookupOperator( int maxOutstandingRequests, int inputChannel, EnrichLookupService enrichLookupService, + DataType inputDataType, String enrichIndex, String matchType, String matchField, @@ -93,6 +98,7 @@ public EnrichLookupOperator( this.parentTask = parentTask; this.inputChannel = inputChannel; this.enrichLookupService = enrichLookupService; + this.inputDataType = inputDataType; this.enrichIndex = enrichIndex; this.matchType = matchType; this.matchField = matchField; @@ -107,6 +113,7 @@ protected void performAsync(Page inputPage, ActionListener listener) { sessionId, parentTask, enrichIndex, + inputDataType, matchType, matchField, enrichFields, @@ -119,6 +126,8 @@ protected void performAsync(Page inputPage, ActionListener listener) { public String toString() { return "EnrichOperator[index=" + enrichIndex + + " input_type=" + + inputDataType + " match_field=" + matchField + " enrich_fields=" diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java index b935632874157..423bd7e43bb0f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.esql.enrich; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.IndicesRequest; @@ -80,6 +81,7 @@ import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.ql.expression.Alias; import org.elasticsearch.xpack.ql.expression.NamedExpression; +import org.elasticsearch.xpack.ql.type.DataType; import java.io.IOException; import java.util.ArrayList; @@ -145,6 +147,7 @@ public void lookupAsync( String sessionId, CancellableTask parentTask, String index, + DataType inputDataType, String matchType, String matchField, List extractFields, @@ -169,7 +172,7 @@ public void lookupAsync( return; } DiscoveryNode targetNode = clusterState.nodes().get(shardRouting.currentNodeId()); - var lookupRequest = new LookupRequest(sessionId, shardId, matchType, matchField, inputPage, extractFields); + var lookupRequest = new LookupRequest(sessionId, shardId, inputDataType, matchType, matchField, inputPage, extractFields); // TODO: handle retry and avoid forking for the local lookup try (ThreadContext.StoredContext unused = threadContext.stashWithOrigin(ClientHelper.ENRICH_ORIGIN)) { transportService.sendChildRequest( @@ -237,6 +240,7 @@ private void doLookup( String sessionId, CancellableTask task, ShardId shardId, + DataType inputDataType, String matchType, String matchField, Page inputPage, @@ -336,7 +340,15 @@ private void doLookup( System.currentTimeMillis(), System.nanoTime(), driverContext, - () -> lookupDescription(sessionId, shardId, matchType, matchField, extractFields, inputPage.getPositionCount()), + () -> lookupDescription( + sessionId, + shardId, + inputDataType, + matchType, + matchField, + extractFields, + inputPage.getPositionCount() + ), queryOperator, intermediateOperators, outputOperator, @@ -398,6 +410,7 @@ public void messageReceived(LookupRequest request, TransportChannel channel, Tas request.sessionId, (CancellableTask) task, request.shardId, + request.inputDataType, request.matchType, request.matchField, request.inputPage, @@ -412,6 +425,7 @@ public void messageReceived(LookupRequest request, TransportChannel channel, Tas private static class LookupRequest extends TransportRequest implements IndicesRequest { private final String sessionId; private final ShardId shardId; + private final DataType inputDataType; private final String matchType; private final String matchField; private final Page inputPage; @@ -423,6 +437,7 @@ private static class LookupRequest extends TransportRequest implements IndicesRe LookupRequest( String sessionId, ShardId shardId, + DataType inputDataType, String matchType, String matchField, Page inputPage, @@ -430,6 +445,7 @@ private static class LookupRequest extends TransportRequest implements IndicesRe ) { this.sessionId = sessionId; this.shardId = shardId; + this.inputDataType = inputDataType; this.matchType = matchType; this.matchField = matchField; this.inputPage = inputPage; @@ -441,6 +457,10 @@ private static class LookupRequest extends TransportRequest implements IndicesRe super(in); this.sessionId = in.readString(); this.shardId = new ShardId(in); + String inputDataType = (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_EXTENDED_ENRICH_INPUT_TYPE)) + ? in.readString() + : "unknown"; + this.inputDataType = EsqlDataTypes.fromTypeName(inputDataType); this.matchType = in.readString(); this.matchField = in.readString(); try (BlockStreamInput bsi = new BlockStreamInput(in, blockFactory)) { @@ -456,6 +476,9 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(sessionId); out.writeWriteable(shardId); + if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_EXTENDED_ENRICH_INPUT_TYPE)) { + out.writeString(inputDataType.typeName()); + } out.writeString(matchType); out.writeString(matchField); out.writeWriteable(inputPage); @@ -478,7 +501,15 @@ public Task createTask(long id, String type, String action, TaskId parentTaskId, return new CancellableTask(id, type, action, "", parentTaskId, headers) { @Override public String getDescription() { - return lookupDescription(sessionId, shardId, matchType, matchField, extractFields, inputPage.getPositionCount()); + return lookupDescription( + sessionId, + shardId, + inputDataType, + matchType, + matchField, + extractFields, + inputPage.getPositionCount() + ); } }; } @@ -513,6 +544,7 @@ public boolean hasReferences() { private static String lookupDescription( String sessionId, ShardId shardId, + DataType inputDataType, String matchType, String matchField, List extractFields, @@ -523,6 +555,8 @@ private static String lookupDescription( + sessionId + " ,shard=" + shardId + + " ,input_type=" + + inputDataType + " ,match_type=" + matchType + " ,match_field=" diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java index af66a1ea069aa..34a31ac7e656d 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java @@ -460,13 +460,15 @@ private PhysicalOperation planEnrich(EnrichExec enrich, LocalExecutionPlannerCon if (enrichIndex == null) { throw new EsqlIllegalArgumentException("No concrete enrich index for cluster [" + clusterAlias + "]"); } + Layout.ChannelAndType input = source.layout.get(enrich.matchField().id()); return source.with( new EnrichLookupOperator.Factory( sessionId, parentTask, context.queryPragmas().enrichMaxWorkers(), - source.layout.get(enrich.matchField().id()).channel(), + input.channel(), enrichLookupService, + input.type(), enrichIndex, enrich.matchType(), enrich.policyMatchField(), From 5a7b53a277d8237965f23d3c0fd339fc5eaadbfb Mon Sep 17 00:00:00 2001 From: Moritz Mack Date: Thu, 14 Mar 2024 15:26:33 +0100 Subject: [PATCH 213/248] Fix leakage of log-level in LoggersTests (#106260) (fixes #106250) --- .../common/logging/LoggersTests.java | 53 ++++++++++--------- .../common/logging/LoggersTests.java | 53 ++++++++++--------- .../common/logging/TestLoggers.java | 51 ++++++++++++++++++ .../common/logging/TestLoggersTests.java | 31 +++++++++++ 4 files changed, 136 insertions(+), 52 deletions(-) create mode 100644 test/framework/src/main/java/org/elasticsearch/common/logging/TestLoggers.java create mode 100644 test/framework/src/test/java/org/elasticsearch/common/logging/TestLoggersTests.java diff --git a/qa/restricted-loggers/src/test/java/org/elasticsearch/common/logging/LoggersTests.java b/qa/restricted-loggers/src/test/java/org/elasticsearch/common/logging/LoggersTests.java index d8229b2401290..bd7e086d01f0d 100644 --- a/qa/restricted-loggers/src/test/java/org/elasticsearch/common/logging/LoggersTests.java +++ b/qa/restricted-loggers/src/test/java/org/elasticsearch/common/logging/LoggersTests.java @@ -69,36 +69,37 @@ public void testSetLevelWithRestrictions() { assertThat(Loggers.RESTRICTED_LOGGERS, hasSize(greaterThan(0))); for (String restricted : Loggers.RESTRICTED_LOGGERS) { + TestLoggers.runWithLoggersRestored(() -> { + // 'org.apache.http' is an example of a restricted logger, + // a restricted component logger would be `org.apache.http.client.HttpClient` for instance, + // and the parent logger is `org.apache`. + Logger restrictedLogger = LogManager.getLogger(restricted); + Logger restrictedComponent = LogManager.getLogger(restricted + ".component"); + Logger parentLogger = LogManager.getLogger(restricted.substring(0, restricted.lastIndexOf('.'))); + + Loggers.setLevel(restrictedLogger, Level.INFO); + assertHasINFO(restrictedLogger, restrictedComponent); + + for (Logger log : List.of(restrictedComponent, restrictedLogger)) { + // DEBUG is rejected due to restriction + Loggers.setLevel(log, Level.DEBUG); + assertHasINFO(restrictedComponent, restrictedLogger); + } - // 'org.apache.http' is an example of a restricted logger, - // a restricted component logger would be `org.apache.http.client.HttpClient` for instance, - // and the parent logger is `org.apache`. - Logger restrictedLogger = LogManager.getLogger(restricted); - Logger restrictedComponent = LogManager.getLogger(restricted + ".component"); - Logger parentLogger = LogManager.getLogger(restricted.substring(0, restricted.lastIndexOf('.'))); - - Loggers.setLevel(restrictedLogger, Level.INFO); - assertHasINFO(restrictedLogger, restrictedComponent); - - for (Logger log : List.of(restrictedComponent, restrictedLogger)) { - // DEBUG is rejected due to restriction - Loggers.setLevel(log, Level.DEBUG); + // OK for parent `org.apache`, but restriction is enforced for restricted descendants + Loggers.setLevel(parentLogger, Level.DEBUG); + assertEquals(Level.DEBUG, parentLogger.getLevel()); assertHasINFO(restrictedComponent, restrictedLogger); - } - // OK for parent `org.apache`, but restriction is enforced for restricted descendants - Loggers.setLevel(parentLogger, Level.DEBUG); - assertEquals(Level.DEBUG, parentLogger.getLevel()); - assertHasINFO(restrictedComponent, restrictedLogger); - - // Inheriting DEBUG of parent `org.apache` is rejected - Loggers.setLevel(restrictedLogger, (Level) null); - assertHasINFO(restrictedComponent, restrictedLogger); + // Inheriting DEBUG of parent `org.apache` is rejected + Loggers.setLevel(restrictedLogger, (Level) null); + assertHasINFO(restrictedComponent, restrictedLogger); - // DEBUG of root logger isn't propagated to restricted loggers - Loggers.setLevel(LogManager.getRootLogger(), Level.DEBUG); - assertEquals(Level.DEBUG, LogManager.getRootLogger().getLevel()); - assertHasINFO(restrictedComponent, restrictedLogger); + // DEBUG of root logger isn't propagated to restricted loggers + Loggers.setLevel(LogManager.getRootLogger(), Level.DEBUG); + assertEquals(Level.DEBUG, LogManager.getRootLogger().getLevel()); + assertHasINFO(restrictedComponent, restrictedLogger); + }); } } diff --git a/server/src/test/java/org/elasticsearch/common/logging/LoggersTests.java b/server/src/test/java/org/elasticsearch/common/logging/LoggersTests.java index 77603aaae068d..dc1ddd7b0d2c2 100644 --- a/server/src/test/java/org/elasticsearch/common/logging/LoggersTests.java +++ b/server/src/test/java/org/elasticsearch/common/logging/LoggersTests.java @@ -58,36 +58,37 @@ public void testCheckRestrictedLoggers() { public void testSetLevelWithRestrictions() { for (String restricted : restrictedLoggers) { + TestLoggers.runWithLoggersRestored(() -> { + // 'org.apache.http' is an example of a restricted logger, + // a restricted component logger would be `org.apache.http.client.HttpClient` for instance, + // and the parent logger is `org.apache`. + Logger restrictedLogger = LogManager.getLogger(restricted); + Logger restrictedComponent = LogManager.getLogger(restricted + ".component"); + Logger parentLogger = LogManager.getLogger(restricted.substring(0, restricted.lastIndexOf('.'))); + + Loggers.setLevel(restrictedLogger, Level.INFO, restrictedLoggers); + assertHasINFO(restrictedLogger, restrictedComponent); + + for (Logger log : List.of(restrictedComponent, restrictedLogger)) { + // DEBUG is rejected due to restriction + Loggers.setLevel(log, Level.DEBUG, restrictedLoggers); + assertHasINFO(restrictedComponent, restrictedLogger); + } - // 'org.apache.http' is an example of a restricted logger, - // a restricted component logger would be `org.apache.http.client.HttpClient` for instance, - // and the parent logger is `org.apache`. - Logger restrictedLogger = LogManager.getLogger(restricted); - Logger restrictedComponent = LogManager.getLogger(restricted + ".component"); - Logger parentLogger = LogManager.getLogger(restricted.substring(0, restricted.lastIndexOf('.'))); - - Loggers.setLevel(restrictedLogger, Level.INFO, restrictedLoggers); - assertHasINFO(restrictedLogger, restrictedComponent); - - for (Logger log : List.of(restrictedComponent, restrictedLogger)) { - // DEBUG is rejected due to restriction - Loggers.setLevel(log, Level.DEBUG, restrictedLoggers); + // OK for parent `org.apache`, but restriction is enforced for restricted descendants + Loggers.setLevel(parentLogger, Level.DEBUG, restrictedLoggers); + assertEquals(Level.DEBUG, parentLogger.getLevel()); assertHasINFO(restrictedComponent, restrictedLogger); - } - // OK for parent `org.apache`, but restriction is enforced for restricted descendants - Loggers.setLevel(parentLogger, Level.DEBUG, restrictedLoggers); - assertEquals(Level.DEBUG, parentLogger.getLevel()); - assertHasINFO(restrictedComponent, restrictedLogger); - - // Inheriting DEBUG of parent `org.apache` is rejected - Loggers.setLevel(restrictedLogger, null, restrictedLoggers); - assertHasINFO(restrictedComponent, restrictedLogger); + // Inheriting DEBUG of parent `org.apache` is rejected + Loggers.setLevel(restrictedLogger, null, restrictedLoggers); + assertHasINFO(restrictedComponent, restrictedLogger); - // DEBUG of root logger isn't propagated to restricted loggers - Loggers.setLevel(LogManager.getRootLogger(), Level.DEBUG, restrictedLoggers); - assertEquals(Level.DEBUG, LogManager.getRootLogger().getLevel()); - assertHasINFO(restrictedComponent, restrictedLogger); + // DEBUG of root logger isn't propagated to restricted loggers + Loggers.setLevel(LogManager.getRootLogger(), Level.DEBUG, restrictedLoggers); + assertEquals(Level.DEBUG, LogManager.getRootLogger().getLevel()); + assertHasINFO(restrictedComponent, restrictedLogger); + }); } } diff --git a/test/framework/src/main/java/org/elasticsearch/common/logging/TestLoggers.java b/test/framework/src/main/java/org/elasticsearch/common/logging/TestLoggers.java new file mode 100644 index 0000000000000..cfcef4c788789 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/common/logging/TestLoggers.java @@ -0,0 +1,51 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.common.logging; + +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.core.LoggerContext; +import org.apache.logging.log4j.core.config.Configuration; +import org.apache.logging.log4j.core.config.LoggerConfig; +import org.elasticsearch.common.util.Maps; + +import java.util.Collection; +import java.util.NavigableMap; +import java.util.function.Predicate; + +public class TestLoggers { + + // visible for testing + static NavigableMap getLogLevels() { + Configuration config = LoggerContext.getContext(false).getConfiguration(); + return getLogLevels(config.getLoggers().values()); + } + + private static NavigableMap getLogLevels(Collection configs) { + return configs.stream().collect(Maps.toUnmodifiableSortedMap(LoggerConfig::getName, LoggerConfig::getLevel)); + } + + /** + * Util to run a task with the original logger context restored afterwards. + * This will reset loggers to their previous log levels and remove any additional loggers configured while running task. + */ + public static void runWithLoggersRestored(Runnable task) { + Configuration config = LoggerContext.getContext(false).getConfiguration(); + Collection configs = config.getLoggers().values(); + var levels = getLogLevels(configs); + try { + task.run(); + } finally { + // remove any added logger + configs.stream().map(LoggerConfig::getName).filter(Predicate.not(levels::containsKey)).forEach(config::removeLogger); + // restore original levels (in the right order) + levels.forEach((logger, level) -> Loggers.setLevel(LogManager.getLogger(logger), level)); + } + } +} diff --git a/test/framework/src/test/java/org/elasticsearch/common/logging/TestLoggersTests.java b/test/framework/src/test/java/org/elasticsearch/common/logging/TestLoggersTests.java new file mode 100644 index 0000000000000..48bf8eb7c1f80 --- /dev/null +++ b/test/framework/src/test/java/org/elasticsearch/common/logging/TestLoggersTests.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.common.logging; + +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; +import org.elasticsearch.test.ESTestCase; + +import java.util.NavigableMap; + +import static org.hamcrest.Matchers.equalTo; + +public class TestLoggersTests extends ESTestCase { + + public void testRunWithLoggersRestored() { + NavigableMap levels = TestLoggers.getLogLevels(); + + TestLoggers.runWithLoggersRestored(() -> { + Loggers.setLevel(LogManager.getRootLogger(), Level.WARN); + Loggers.setLevel(LogManager.getLogger(TestLoggers.class), Level.DEBUG); + }); + + assertThat(TestLoggers.getLogLevels(), equalTo(levels)); + } +} From 5de1f176b45b982dba315b80bb9e0c00d9243b05 Mon Sep 17 00:00:00 2001 From: Jake Landis Date: Thu, 14 Mar 2024 10:37:36 -0500 Subject: [PATCH 214/248] disable validate when rewrite parameter is sent and the index access control list is non-null (#105709) --- docs/changelog/105709.yaml | 6 ++ .../xpack/security/Security.java | 4 +- .../ValidateRequestInterceptor.java | 61 ++++++++++++ .../ValidateRequestInterceptorTests.java | 94 +++++++++++++++++++ 4 files changed, 164 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/105709.yaml create mode 100644 x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/interceptor/ValidateRequestInterceptor.java create mode 100644 x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/interceptor/ValidateRequestInterceptorTests.java diff --git a/docs/changelog/105709.yaml b/docs/changelog/105709.yaml new file mode 100644 index 0000000000000..568d60a86334e --- /dev/null +++ b/docs/changelog/105709.yaml @@ -0,0 +1,6 @@ +pr: 105709 +summary: Disable validate when rewrite parameter is sent and the index access control + list is non-null +area: Security +type: bug +issues: [] diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index d3898cc510d77..3d27e9ee06ddb 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -303,6 +303,7 @@ import org.elasticsearch.xpack.security.authz.interceptor.SearchRequestInterceptor; import org.elasticsearch.xpack.security.authz.interceptor.ShardSearchRequestInterceptor; import org.elasticsearch.xpack.security.authz.interceptor.UpdateRequestInterceptor; +import org.elasticsearch.xpack.security.authz.interceptor.ValidateRequestInterceptor; import org.elasticsearch.xpack.security.authz.restriction.WorkflowService; import org.elasticsearch.xpack.security.authz.store.CompositeRolesStore; import org.elasticsearch.xpack.security.authz.store.DeprecationRoleDescriptorConsumer; @@ -999,7 +1000,8 @@ Collection createComponents( new UpdateRequestInterceptor(threadPool, getLicenseState()), new BulkShardRequestInterceptor(threadPool, getLicenseState()), new DlsFlsLicenseRequestInterceptor(threadPool.getThreadContext(), getLicenseState()), - new SearchRequestCacheDisablingInterceptor(threadPool, getLicenseState()) + new SearchRequestCacheDisablingInterceptor(threadPool, getLicenseState()), + new ValidateRequestInterceptor(threadPool, getLicenseState()) ) ); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/interceptor/ValidateRequestInterceptor.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/interceptor/ValidateRequestInterceptor.java new file mode 100644 index 0000000000000..1705ea7d14907 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/interceptor/ValidateRequestInterceptor.java @@ -0,0 +1,61 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.security.authz.interceptor; + +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.security.authz.accesscontrol.IndicesAccessControl; + +import java.util.Map; + +public class ValidateRequestInterceptor extends FieldAndDocumentLevelSecurityRequestInterceptor { + + public ValidateRequestInterceptor(ThreadPool threadPool, XPackLicenseState licenseState) { + super(threadPool.getThreadContext(), licenseState); + } + + @Override + void disableFeatures( + IndicesRequest indicesRequest, + Map indexAccessControlByIndex, + ActionListener listener + ) { + final ValidateQueryRequest request = (ValidateQueryRequest) indicesRequest; + if (indexAccessControlByIndex.values().stream().anyMatch(iac -> iac.getDocumentPermissions().hasDocumentLevelPermissions())) { + if (hasRewrite(request)) { + listener.onFailure( + new ElasticsearchSecurityException( + "Validate with rewrite isn't supported if document level security is enabled", + RestStatus.BAD_REQUEST + ) + ); + } else { + listener.onResponse(null); + } + } else { + listener.onResponse(null); + } + } + + @Override + public boolean supports(IndicesRequest request) { + if (request instanceof ValidateQueryRequest validateQueryRequest) { + return hasRewrite(validateQueryRequest); + } else { + return false; + } + } + + private static boolean hasRewrite(ValidateQueryRequest validateQueryRequest) { + return validateQueryRequest.rewrite(); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/interceptor/ValidateRequestInterceptorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/interceptor/ValidateRequestInterceptorTests.java new file mode 100644 index 0000000000000..bb494b7855db0 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/interceptor/ValidateRequestInterceptorTests.java @@ -0,0 +1,94 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security.authz.interceptor; + +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequestBuilder; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.client.internal.ElasticsearchClient; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.license.MockLicenseState; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.security.authz.accesscontrol.IndicesAccessControl; +import org.elasticsearch.xpack.core.security.authz.permission.DocumentPermissions; +import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissions; +import org.junit.After; +import org.junit.Before; + +import java.util.Map; +import java.util.Set; + +import static org.elasticsearch.xpack.core.security.SecurityField.DOCUMENT_LEVEL_SECURITY_FEATURE; +import static org.hamcrest.Matchers.containsString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class ValidateRequestInterceptorTests extends ESTestCase { + + private ThreadPool threadPool; + private MockLicenseState licenseState; + private ValidateRequestInterceptor interceptor; + + @Before + public void init() { + threadPool = new TestThreadPool("validate request interceptor tests"); + licenseState = mock(MockLicenseState.class); + when(licenseState.isAllowed(DOCUMENT_LEVEL_SECURITY_FEATURE)).thenReturn(true); + interceptor = new ValidateRequestInterceptor(threadPool, licenseState); + } + + @After + public void stopThreadPool() { + terminate(threadPool); + } + + public void testValidateRequestWithDLS() { + final DocumentPermissions documentPermissions = DocumentPermissions.filteredBy(Set.of(new BytesArray(""" + {"term":{"username":"foo"}}"""))); // value does not matter + ElasticsearchClient client = mock(ElasticsearchClient.class); + ValidateQueryRequestBuilder builder = new ValidateQueryRequestBuilder(client); + final String index = randomAlphaOfLengthBetween(3, 8); + final PlainActionFuture listener1 = new PlainActionFuture<>(); + Map accessControlMap = Map.of( + index, + new IndicesAccessControl.IndexAccessControl(FieldPermissions.DEFAULT, documentPermissions) + ); + // with DLS and rewrite enabled + interceptor.disableFeatures(builder.setRewrite(true).request(), accessControlMap, listener1); + ElasticsearchSecurityException exception = expectThrows(ElasticsearchSecurityException.class, () -> listener1.actionGet()); + assertThat(exception.getMessage(), containsString("Validate with rewrite isn't supported if document level security is enabled")); + + // with DLS and rewrite disabled + final PlainActionFuture listener2 = new PlainActionFuture<>(); + interceptor.disableFeatures(builder.setRewrite(false).request(), accessControlMap, listener2); + assertNull(listener2.actionGet()); + + } + + public void testValidateRequestWithOutDLS() { + final DocumentPermissions documentPermissions = null; // no DLS + ElasticsearchClient client = mock(ElasticsearchClient.class); + ValidateQueryRequestBuilder builder = new ValidateQueryRequestBuilder(client); + final String index = randomAlphaOfLengthBetween(3, 8); + final PlainActionFuture listener1 = new PlainActionFuture<>(); + Map accessControlMap = Map.of( + index, + new IndicesAccessControl.IndexAccessControl(FieldPermissions.DEFAULT, documentPermissions) + ); + // without DLS and rewrite enabled + interceptor.disableFeatures(builder.setRewrite(true).request(), accessControlMap, listener1); + assertNull(listener1.actionGet()); + + // without DLS and rewrite disabled + final PlainActionFuture listener2 = new PlainActionFuture<>(); + interceptor.disableFeatures(builder.setRewrite(false).request(), accessControlMap, listener2); + assertNull(listener2.actionGet()); + } +} From 5c06c58b9fff787b78a5ef2bbf31557cba57163e Mon Sep 17 00:00:00 2001 From: Liam Thompson <32779855+leemthompo@users.noreply.github.com> Date: Thu, 14 Mar 2024 16:49:07 +0100 Subject: [PATCH 215/248] [DOCS] Remove CCS limitation for 8.13+ (#106343) --- docs/reference/esql/esql-limitations.asciidoc | 6 ------ 1 file changed, 6 deletions(-) diff --git a/docs/reference/esql/esql-limitations.asciidoc b/docs/reference/esql/esql-limitations.asciidoc index 788177df64dc9..d007a7c3cfd06 100644 --- a/docs/reference/esql/esql-limitations.asciidoc +++ b/docs/reference/esql/esql-limitations.asciidoc @@ -140,12 +140,6 @@ you query, and query `keyword` sub-fields instead of `text` fields. {esql} does not support querying time series data streams (TSDS). -[discrete] -[[esql-limitations-ccs]] -=== {ccs-cap} is not supported - -{esql} does not support {ccs}. - [discrete] [[esql-limitations-date-math]] === Date math limitations From 946cf54ee52bb8c4459fd94dc4ee7d53a5d901a4 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Thu, 14 Mar 2024 09:31:41 -0700 Subject: [PATCH 216/248] Serialize big array vectors (#106327) With this change, we now serialize the underlying pages of big array vectors instead of values. Note that we avoid slicing into the Netty buffer because it's not tracked by the circuit breaker, which could be an issue for ESQL. --- docs/changelog/106327.yaml | 5 ++++ .../org/elasticsearch/TransportVersions.java | 1 + .../compute/data/BooleanBigArrayVector.java | 23 ++++++++++++++ .../compute/data/BooleanVector.java | 4 +++ .../compute/data/DoubleBigArrayVector.java | 24 +++++++++++++++ .../compute/data/DoubleVector.java | 4 +++ .../compute/data/IntBigArrayVector.java | 24 +++++++++++++++ .../elasticsearch/compute/data/IntVector.java | 4 +++ .../compute/data/LongBigArrayVector.java | 24 +++++++++++++++ .../compute/data/LongVector.java | 4 +++ .../elasticsearch/compute/data/Vector.java | 1 + .../compute/data/X-BigArrayVector.java.st | 30 +++++++++++++++++++ .../compute/data/X-Vector.java.st | 8 +++++ 13 files changed, 156 insertions(+) create mode 100644 docs/changelog/106327.yaml diff --git a/docs/changelog/106327.yaml b/docs/changelog/106327.yaml new file mode 100644 index 0000000000000..2b4b811ece40b --- /dev/null +++ b/docs/changelog/106327.yaml @@ -0,0 +1,5 @@ +pr: 106327 +summary: Serialize big array vectors +area: ES|QL +type: enhancement +issues: [] diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 70e1c2c763768..f4ec58a7e2839 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -145,6 +145,7 @@ static TransportVersion def(int id) { public static final TransportVersion ESQL_EXTENDED_ENRICH_TYPES = def(8_605_00_0); public static final TransportVersion KNN_EXPLICIT_BYTE_QUERY_VECTOR_PARSING = def(8_606_00_0); public static final TransportVersion ESQL_EXTENDED_ENRICH_INPUT_TYPE = def(8_607_00_0); + public static final TransportVersion ESQL_SERIALIZE_BIG_VECTOR = def(8_608_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBigArrayVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBigArrayVector.java index 9618edb1fa77a..5f6db129e73d3 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBigArrayVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBigArrayVector.java @@ -8,9 +8,13 @@ package org.elasticsearch.compute.data; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.BitArray; import org.elasticsearch.core.Releasable; +import java.io.IOException; + /** * Vector implementation that defers to an enclosed {@link BitArray}. * Does not take ownership of the array and does not adjust circuit breakers to account for it. @@ -27,6 +31,25 @@ public BooleanBigArrayVector(BitArray values, int positionCount, BlockFactory bl this.values = values; } + static BooleanBigArrayVector readArrayVector(int positions, StreamInput in, BlockFactory blockFactory) throws IOException { + BitArray values = new BitArray(blockFactory.bigArrays(), true, in); + boolean success = false; + try { + BooleanBigArrayVector vector = new BooleanBigArrayVector(values, positions, blockFactory); + blockFactory.adjustBreaker(vector.ramBytesUsed() - RamUsageEstimator.sizeOf(values)); + success = true; + return vector; + } finally { + if (success == false) { + values.close(); + } + } + } + + void writeArrayVector(int positions, StreamOutput out) throws IOException { + values.writeTo(out); + } + @Override public BooleanBlock asBlock() { return new BooleanVectorBlock(this); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVector.java index 2f50b45fbfc9d..7218f3d2771c8 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVector.java @@ -80,6 +80,7 @@ static BooleanVector readFrom(BlockFactory blockFactory, StreamInput in) throws case SERIALIZE_VECTOR_VALUES -> readValues(positions, in, blockFactory); case SERIALIZE_VECTOR_CONSTANT -> blockFactory.newConstantBooleanVector(in.readBoolean(), positions); case SERIALIZE_VECTOR_ARRAY -> BooleanArrayVector.readArrayVector(positions, in, blockFactory); + case SERIALIZE_VECTOR_BIG_ARRAY -> BooleanBigArrayVector.readArrayVector(positions, in, blockFactory); default -> { assert false : "invalid vector serialization type [" + serializationType + "]"; throw new IllegalStateException("invalid vector serialization type [" + serializationType + "]"); @@ -98,6 +99,9 @@ default void writeTo(StreamOutput out) throws IOException { } else if (version.onOrAfter(TransportVersions.ESQL_SERIALIZE_ARRAY_VECTOR) && this instanceof BooleanArrayVector v) { out.writeByte(SERIALIZE_VECTOR_ARRAY); v.writeArrayVector(positions, out); + } else if (version.onOrAfter(TransportVersions.ESQL_SERIALIZE_BIG_VECTOR) && this instanceof BooleanBigArrayVector v) { + out.writeByte(SERIALIZE_VECTOR_BIG_ARRAY); + v.writeArrayVector(positions, out); } else { out.writeByte(SERIALIZE_VECTOR_VALUES); writeValues(this, positions, out); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBigArrayVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBigArrayVector.java index 45b9b4bec14ba..8f6aedf31b50e 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBigArrayVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBigArrayVector.java @@ -8,9 +8,13 @@ package org.elasticsearch.compute.data; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.DoubleArray; import org.elasticsearch.core.Releasable; +import java.io.IOException; + /** * Vector implementation that defers to an enclosed {@link DoubleArray}. * Does not take ownership of the array and does not adjust circuit breakers to account for it. @@ -27,6 +31,26 @@ public DoubleBigArrayVector(DoubleArray values, int positionCount, BlockFactory this.values = values; } + static DoubleBigArrayVector readArrayVector(int positions, StreamInput in, BlockFactory blockFactory) throws IOException { + DoubleArray values = blockFactory.bigArrays().newDoubleArray(positions, false); + boolean success = false; + try { + values.fillWith(in); + DoubleBigArrayVector vector = new DoubleBigArrayVector(values, positions, blockFactory); + blockFactory.adjustBreaker(vector.ramBytesUsed() - RamUsageEstimator.sizeOf(values)); + success = true; + return vector; + } finally { + if (success == false) { + values.close(); + } + } + } + + void writeArrayVector(int positions, StreamOutput out) throws IOException { + values.writeTo(out); + } + @Override public DoubleBlock asBlock() { return new DoubleVectorBlock(this); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVector.java index c5553f6a102f9..1d71575b33316 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVector.java @@ -81,6 +81,7 @@ static DoubleVector readFrom(BlockFactory blockFactory, StreamInput in) throws I case SERIALIZE_VECTOR_VALUES -> readValues(positions, in, blockFactory); case SERIALIZE_VECTOR_CONSTANT -> blockFactory.newConstantDoubleVector(in.readDouble(), positions); case SERIALIZE_VECTOR_ARRAY -> DoubleArrayVector.readArrayVector(positions, in, blockFactory); + case SERIALIZE_VECTOR_BIG_ARRAY -> DoubleBigArrayVector.readArrayVector(positions, in, blockFactory); default -> { assert false : "invalid vector serialization type [" + serializationType + "]"; throw new IllegalStateException("invalid vector serialization type [" + serializationType + "]"); @@ -99,6 +100,9 @@ default void writeTo(StreamOutput out) throws IOException { } else if (version.onOrAfter(TransportVersions.ESQL_SERIALIZE_ARRAY_VECTOR) && this instanceof DoubleArrayVector v) { out.writeByte(SERIALIZE_VECTOR_ARRAY); v.writeArrayVector(positions, out); + } else if (version.onOrAfter(TransportVersions.ESQL_SERIALIZE_BIG_VECTOR) && this instanceof DoubleBigArrayVector v) { + out.writeByte(SERIALIZE_VECTOR_BIG_ARRAY); + v.writeArrayVector(positions, out); } else { out.writeByte(SERIALIZE_VECTOR_VALUES); writeValues(this, positions, out); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBigArrayVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBigArrayVector.java index b553c8aab8761..cab2baa9b00b1 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBigArrayVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBigArrayVector.java @@ -8,9 +8,13 @@ package org.elasticsearch.compute.data; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.IntArray; import org.elasticsearch.core.Releasable; +import java.io.IOException; + /** * Vector implementation that defers to an enclosed {@link IntArray}. * Does not take ownership of the array and does not adjust circuit breakers to account for it. @@ -27,6 +31,26 @@ public IntBigArrayVector(IntArray values, int positionCount, BlockFactory blockF this.values = values; } + static IntBigArrayVector readArrayVector(int positions, StreamInput in, BlockFactory blockFactory) throws IOException { + IntArray values = blockFactory.bigArrays().newIntArray(positions, false); + boolean success = false; + try { + values.fillWith(in); + IntBigArrayVector vector = new IntBigArrayVector(values, positions, blockFactory); + blockFactory.adjustBreaker(vector.ramBytesUsed() - RamUsageEstimator.sizeOf(values)); + success = true; + return vector; + } finally { + if (success == false) { + values.close(); + } + } + } + + void writeArrayVector(int positions, StreamOutput out) throws IOException { + values.writeTo(out); + } + @Override public IntBlock asBlock() { return new IntVectorBlock(this); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVector.java index 1d4fb0741cab0..2b1562860db15 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVector.java @@ -80,6 +80,7 @@ static IntVector readFrom(BlockFactory blockFactory, StreamInput in) throws IOEx case SERIALIZE_VECTOR_VALUES -> readValues(positions, in, blockFactory); case SERIALIZE_VECTOR_CONSTANT -> blockFactory.newConstantIntVector(in.readInt(), positions); case SERIALIZE_VECTOR_ARRAY -> IntArrayVector.readArrayVector(positions, in, blockFactory); + case SERIALIZE_VECTOR_BIG_ARRAY -> IntBigArrayVector.readArrayVector(positions, in, blockFactory); default -> { assert false : "invalid vector serialization type [" + serializationType + "]"; throw new IllegalStateException("invalid vector serialization type [" + serializationType + "]"); @@ -98,6 +99,9 @@ default void writeTo(StreamOutput out) throws IOException { } else if (version.onOrAfter(TransportVersions.ESQL_SERIALIZE_ARRAY_VECTOR) && this instanceof IntArrayVector v) { out.writeByte(SERIALIZE_VECTOR_ARRAY); v.writeArrayVector(positions, out); + } else if (version.onOrAfter(TransportVersions.ESQL_SERIALIZE_BIG_VECTOR) && this instanceof IntBigArrayVector v) { + out.writeByte(SERIALIZE_VECTOR_BIG_ARRAY); + v.writeArrayVector(positions, out); } else { out.writeByte(SERIALIZE_VECTOR_VALUES); writeValues(this, positions, out); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBigArrayVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBigArrayVector.java index d5ea5c9e2a453..d30dedd4cce16 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBigArrayVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBigArrayVector.java @@ -8,9 +8,13 @@ package org.elasticsearch.compute.data; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.LongArray; import org.elasticsearch.core.Releasable; +import java.io.IOException; + /** * Vector implementation that defers to an enclosed {@link LongArray}. * Does not take ownership of the array and does not adjust circuit breakers to account for it. @@ -27,6 +31,26 @@ public LongBigArrayVector(LongArray values, int positionCount, BlockFactory bloc this.values = values; } + static LongBigArrayVector readArrayVector(int positions, StreamInput in, BlockFactory blockFactory) throws IOException { + LongArray values = blockFactory.bigArrays().newLongArray(positions, false); + boolean success = false; + try { + values.fillWith(in); + LongBigArrayVector vector = new LongBigArrayVector(values, positions, blockFactory); + blockFactory.adjustBreaker(vector.ramBytesUsed() - RamUsageEstimator.sizeOf(values)); + success = true; + return vector; + } finally { + if (success == false) { + values.close(); + } + } + } + + void writeArrayVector(int positions, StreamOutput out) throws IOException { + values.writeTo(out); + } + @Override public LongBlock asBlock() { return new LongVectorBlock(this); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVector.java index 60592469f0ea1..2ebdb89a31262 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVector.java @@ -81,6 +81,7 @@ static LongVector readFrom(BlockFactory blockFactory, StreamInput in) throws IOE case SERIALIZE_VECTOR_VALUES -> readValues(positions, in, blockFactory); case SERIALIZE_VECTOR_CONSTANT -> blockFactory.newConstantLongVector(in.readLong(), positions); case SERIALIZE_VECTOR_ARRAY -> LongArrayVector.readArrayVector(positions, in, blockFactory); + case SERIALIZE_VECTOR_BIG_ARRAY -> LongBigArrayVector.readArrayVector(positions, in, blockFactory); default -> { assert false : "invalid vector serialization type [" + serializationType + "]"; throw new IllegalStateException("invalid vector serialization type [" + serializationType + "]"); @@ -99,6 +100,9 @@ default void writeTo(StreamOutput out) throws IOException { } else if (version.onOrAfter(TransportVersions.ESQL_SERIALIZE_ARRAY_VECTOR) && this instanceof LongArrayVector v) { out.writeByte(SERIALIZE_VECTOR_ARRAY); v.writeArrayVector(positions, out); + } else if (version.onOrAfter(TransportVersions.ESQL_SERIALIZE_BIG_VECTOR) && this instanceof LongBigArrayVector v) { + out.writeByte(SERIALIZE_VECTOR_BIG_ARRAY); + v.writeArrayVector(positions, out); } else { out.writeByte(SERIALIZE_VECTOR_VALUES); writeValues(this, positions, out); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Vector.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Vector.java index c309a7a0b8827..757e2a5b22145 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Vector.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Vector.java @@ -82,4 +82,5 @@ interface Builder extends Releasable { byte SERIALIZE_VECTOR_VALUES = 0; byte SERIALIZE_VECTOR_CONSTANT = 1; byte SERIALIZE_VECTOR_ARRAY = 2; + byte SERIALIZE_VECTOR_BIG_ARRAY = 3; } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BigArrayVector.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BigArrayVector.java.st index addca35643dd6..6a20385604aa0 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BigArrayVector.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BigArrayVector.java.st @@ -8,9 +8,13 @@ package org.elasticsearch.compute.data; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.$Array$; import org.elasticsearch.core.Releasable; +import java.io.IOException; + /** * Vector implementation that defers to an enclosed {@link $if(boolean)$Bit$else$$Type$$endif$Array}. * Does not take ownership of the array and does not adjust circuit breakers to account for it. @@ -27,6 +31,32 @@ public final class $Type$BigArrayVector extends AbstractVector implements $Type$ this.values = values; } + static $Type$BigArrayVector readArrayVector(int positions, StreamInput in, BlockFactory blockFactory) throws IOException { +$if(boolean)$ + $Array$ values = new BitArray(blockFactory.bigArrays(), true, in); +$else$ + $Array$ values = blockFactory.bigArrays().new$Type$Array(positions, false); +$endif$ + boolean success = false; + try { +$if(boolean)$$else$ + values.fillWith(in); +$endif$ + $Type$BigArrayVector vector = new $Type$BigArrayVector(values, positions, blockFactory); + blockFactory.adjustBreaker(vector.ramBytesUsed() - RamUsageEstimator.sizeOf(values)); + success = true; + return vector; + } finally { + if (success == false) { + values.close(); + } + } + } + + void writeArrayVector(int positions, StreamOutput out) throws IOException { + values.writeTo(out); + } + @Override public $Type$Block asBlock() { return new $Type$VectorBlock(this); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Vector.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Vector.java.st index 0796801c55d40..01090b6cab18a 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Vector.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Vector.java.st @@ -114,6 +114,9 @@ $endif$ case SERIALIZE_VECTOR_VALUES -> readValues(positions, in, blockFactory); case SERIALIZE_VECTOR_CONSTANT -> blockFactory.newConstant$Type$Vector(in.read$Type$(), positions); case SERIALIZE_VECTOR_ARRAY -> $Type$ArrayVector.readArrayVector(positions, in, blockFactory); +$if(BytesRef)$$else$ + case SERIALIZE_VECTOR_BIG_ARRAY -> $Type$BigArrayVector.readArrayVector(positions, in, blockFactory); +$endif$ default -> { assert false : "invalid vector serialization type [" + serializationType + "]"; throw new IllegalStateException("invalid vector serialization type [" + serializationType + "]"); @@ -136,6 +139,11 @@ $endif$ } else if (version.onOrAfter(TransportVersions.ESQL_SERIALIZE_ARRAY_VECTOR) && this instanceof $Type$ArrayVector v) { out.writeByte(SERIALIZE_VECTOR_ARRAY); v.writeArrayVector(positions, out); +$if(BytesRef)$$else$ + } else if (version.onOrAfter(TransportVersions.ESQL_SERIALIZE_BIG_VECTOR) && this instanceof $Type$BigArrayVector v) { + out.writeByte(SERIALIZE_VECTOR_BIG_ARRAY); + v.writeArrayVector(positions, out); +$endif$ } else { out.writeByte(SERIALIZE_VECTOR_VALUES); writeValues(this, positions, out); From 5f6b3ff7e6075a4de7d8cb4b6e0eb95ce843bbae Mon Sep 17 00:00:00 2001 From: Liam Thompson <32779855+leemthompo@users.noreply.github.com> Date: Thu, 14 Mar 2024 17:33:31 +0100 Subject: [PATCH 217/248] [DOCS][ESQL] Add link to getting started notebook (#106345) --- docs/reference/esql/esql-get-started.asciidoc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/docs/reference/esql/esql-get-started.asciidoc b/docs/reference/esql/esql-get-started.asciidoc index 4dae9ffcddd7f..29f61299cec30 100644 --- a/docs/reference/esql/esql-get-started.asciidoc +++ b/docs/reference/esql/esql-get-started.asciidoc @@ -9,6 +9,11 @@ preview::["Do not use {esql} on production environments. This functionality is i This guide shows how you can use {esql} to query and aggregate your data. +[TIP] +==== +This getting started is also available as an https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/esql/esql-getting-started.ipynb[interactive Python notebook] in the `elasticsearch-labs` GitHub repository. +==== + [discrete] [[esql-getting-started-prerequisites]] === Prerequisites From 1d7a0159d36a03a00c3d4df3495070c60474982a Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 14 Mar 2024 10:30:14 -0700 Subject: [PATCH 218/248] Support jdk22 in zstd bindings (#106360) The foreign memory API changed between Java 21 and 22 in how to decode a string from native memory. This commit adds an multi-release class to handle the two different methods on MemorySegment to decode a string. --- libs/native/build.gradle | 4 ++++ .../nativeaccess/jdk/JdkZstdLibrary.java | 2 +- .../nativeaccess/jdk/MemorySegmentUtil.java | 23 +++++++++++++++++++ .../nativeaccess/jdk/MemorySegmentUtil.java | 20 ++++++++++++++++ .../elasticsearch/nativeaccess/ZstdTests.java | 2 -- 5 files changed, 48 insertions(+), 3 deletions(-) create mode 100644 libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/MemorySegmentUtil.java create mode 100644 libs/native/src/main22/java/org/elasticsearch/nativeaccess/jdk/MemorySegmentUtil.java diff --git a/libs/native/build.gradle b/libs/native/build.gradle index dbe546619c7aa..150ca9c8e80f3 100644 --- a/libs/native/build.gradle +++ b/libs/native/build.gradle @@ -32,3 +32,7 @@ tasks.withType(CheckForbiddenApisTask).configureEach { tasks.named('forbiddenApisMain21').configure { ignoreMissingClasses = true } + +tasks.named('forbiddenApisMain22').configure { + ignoreMissingClasses = true +} diff --git a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkZstdLibrary.java b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkZstdLibrary.java index 632240a844255..d193750939b23 100644 --- a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkZstdLibrary.java +++ b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkZstdLibrary.java @@ -72,7 +72,7 @@ public boolean isError(long code) { public String getErrorName(long code) { try { MemorySegment str = (MemorySegment) getErrorName$mh.invokeExact(code); - return str.reinterpret(Long.MAX_VALUE).getUtf8String(0); + return MemorySegmentUtil.getString(str.reinterpret(Long.MAX_VALUE), 0); } catch (Throwable t) { throw new AssertionError(t); } diff --git a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/MemorySegmentUtil.java b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/MemorySegmentUtil.java new file mode 100644 index 0000000000000..53e4c06bf0435 --- /dev/null +++ b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/MemorySegmentUtil.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.nativeaccess.jdk; + +import java.lang.foreign.MemorySegment; + +/** + * Utility methods to act on MemorySegment apis which have changed in subsequent JDK releases. + */ +class MemorySegmentUtil { + + static String getString(MemorySegment segment, long offset) { + return segment.getUtf8String(offset); + } + + private MemorySegmentUtil() {} +} diff --git a/libs/native/src/main22/java/org/elasticsearch/nativeaccess/jdk/MemorySegmentUtil.java b/libs/native/src/main22/java/org/elasticsearch/nativeaccess/jdk/MemorySegmentUtil.java new file mode 100644 index 0000000000000..c155647a3ccd4 --- /dev/null +++ b/libs/native/src/main22/java/org/elasticsearch/nativeaccess/jdk/MemorySegmentUtil.java @@ -0,0 +1,20 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.nativeaccess.jdk; + +import java.lang.foreign.MemorySegment; + +public class MemorySegmentUtil { + + static String getString(MemorySegment segment, long offset) { + return segment.getString(offset); + } + + private MemorySegmentUtil() {} +} diff --git a/libs/native/src/test/java/org/elasticsearch/nativeaccess/ZstdTests.java b/libs/native/src/test/java/org/elasticsearch/nativeaccess/ZstdTests.java index 26ae1ecb8a8b8..d051961b06c5f 100644 --- a/libs/native/src/test/java/org/elasticsearch/nativeaccess/ZstdTests.java +++ b/libs/native/src/test/java/org/elasticsearch/nativeaccess/ZstdTests.java @@ -36,7 +36,6 @@ public void testCompressBound() { expectThrows(IllegalArgumentException.class, () -> zstd.compressBound(Integer.MIN_VALUE)); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/106347") public void testCompressValidation() { try (var src = nativeAccess.newBuffer(1000); var dst = nativeAccess.newBuffer(500)) { var srcBuf = src.buffer(); @@ -56,7 +55,6 @@ public void testCompressValidation() { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/106347") public void testDecompressValidation() { try ( var original = nativeAccess.newBuffer(1000); From 75e67f07642e7091ba71f542e43b1b636a569190 Mon Sep 17 00:00:00 2001 From: Jake Landis Date: Thu, 14 Mar 2024 12:55:18 -0500 Subject: [PATCH 219/248] cross check livedocs for terms aggs when index access control list is non-null (#105714) --- docs/changelog/105714.yaml | 5 + .../org/elasticsearch/TransportVersions.java | 1 + .../aggregations/AggregatorFactories.java | 41 +++++ .../GlobalOrdinalsStringTermsAggregator.java | 153 +++++++++++++++--- .../terms/MapStringTermsAggregator.java | 16 +- .../bucket/terms/NumericTermsAggregator.java | 16 +- .../SignificantTermsAggregatorFactory.java | 9 +- .../SignificantTextAggregatorFactory.java | 3 +- .../bucket/terms/TermsAggregationBuilder.java | 30 +++- .../bucket/terms/TermsAggregatorFactory.java | 41 +++-- .../bucket/terms/TermsAggregatorSupplier.java | 3 +- .../AggregatorFactoriesBuilderTests.java | 37 +++++ .../DocumentLevelSecurityTests.java | 129 ++++++++++++++- .../interceptor/SearchRequestInterceptor.java | 13 +- .../SearchRequestInterceptorTests.java | 105 ++++++++++++ 15 files changed, 547 insertions(+), 55 deletions(-) create mode 100644 docs/changelog/105714.yaml create mode 100644 x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/interceptor/SearchRequestInterceptorTests.java diff --git a/docs/changelog/105714.yaml b/docs/changelog/105714.yaml new file mode 100644 index 0000000000000..20301a4c03e83 --- /dev/null +++ b/docs/changelog/105714.yaml @@ -0,0 +1,5 @@ +pr: 105714 +summary: Cross check livedocs for terms aggs when index access control list is non-null +area: "Aggregations" +type: bug +issues: [] diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index f4ec58a7e2839..87120e6a2f8d3 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -146,6 +146,7 @@ static TransportVersion def(int id) { public static final TransportVersion KNN_EXPLICIT_BYTE_QUERY_VECTOR_PARSING = def(8_606_00_0); public static final TransportVersion ESQL_EXTENDED_ENRICH_INPUT_TYPE = def(8_607_00_0); public static final TransportVersion ESQL_SERIALIZE_BIG_VECTOR = def(8_608_00_0); + public static final TransportVersion AGGS_EXCLUDED_DELETED_DOCS = def(8_609_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java b/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java index 7b7c41165b51e..3dd3698d4f1bf 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java @@ -41,6 +41,7 @@ import java.util.Locale; import java.util.Map; import java.util.Objects; +import java.util.Queue; import java.util.Set; import java.util.function.ToLongFunction; import java.util.regex.Matcher; @@ -334,6 +335,46 @@ public boolean isInSortOrderExecutionRequired() { return false; } + /** + * Return true if any of the builders is a terms aggregation with min_doc_count=0 + */ + public boolean hasZeroMinDocTermsAggregation() { + final Queue queue = new LinkedList<>(aggregationBuilders); + while (queue.isEmpty() == false) { + final AggregationBuilder current = queue.poll(); + if (current == null) { + continue; + } + if (current instanceof TermsAggregationBuilder termsBuilder) { + if (termsBuilder.minDocCount() == 0) { + return true; + } + } + queue.addAll(current.getSubAggregations()); + } + return false; + } + + /** + * Force all min_doc_count=0 terms aggregations to exclude deleted docs. + */ + public void forceTermsAggsToExcludeDeletedDocs() { + assert hasZeroMinDocTermsAggregation(); + final Queue queue = new LinkedList<>(aggregationBuilders); + while (queue.isEmpty() == false) { + final AggregationBuilder current = queue.poll(); + if (current == null) { + continue; + } + if (current instanceof TermsAggregationBuilder termsBuilder) { + if (termsBuilder.minDocCount() == 0) { + termsBuilder.excludeDeletedDocs(true); + } + } + queue.addAll(current.getSubAggregations()); + } + } + /** * Return false if this aggregation or any of the child aggregations does not support parallel collection. * As a result, a request including such aggregation is always executed sequentially despite concurrency is enabled for the query diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java index 317c66654a203..d184bb6c4c145 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java @@ -9,15 +9,20 @@ package org.elasticsearch.search.aggregations.bucket.terms; import org.apache.lucene.index.DocValues; +import org.apache.lucene.index.LeafReader; +import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedDocValues; import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.util.ArrayUtil; +import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.PriorityQueue; import org.elasticsearch.common.CheckedSupplier; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.LongArray; import org.elasticsearch.common.util.LongHash; +import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; import org.elasticsearch.search.DocValueFormat; @@ -85,7 +90,8 @@ public GlobalOrdinalsStringTermsAggregator( SubAggCollectionMode collectionMode, boolean showTermDocCountError, CardinalityUpperBound cardinality, - Map metadata + Map metadata, + boolean excludeDeletedDocs ) throws IOException { super(name, factories, context, parent, order, format, bucketCountThresholds, collectionMode, showTermDocCountError, metadata); this.resultStrategy = resultStrategy.apply(this); // ResultStrategy needs a reference to the Aggregator to do its job. @@ -94,14 +100,14 @@ public GlobalOrdinalsStringTermsAggregator( this.valueCount = valuesSupplier.get().getValueCount(); this.acceptedGlobalOrdinals = acceptedOrds; if (remapGlobalOrds) { - this.collectionStrategy = new RemapGlobalOrds(cardinality); + this.collectionStrategy = new RemapGlobalOrds(cardinality, excludeDeletedDocs); } else { this.collectionStrategy = cardinality.map(estimate -> { if (estimate > 1) { // This is a 500 class error, because we should never be able to reach it. throw new AggregationExecutionException("Dense ords don't know how to collect from many buckets"); } - return new DenseGlobalOrds(); + return new DenseGlobalOrds(excludeDeletedDocs); }); } } @@ -278,7 +284,8 @@ static class LowCardinality extends GlobalOrdinalsStringTermsAggregator { boolean remapGlobalOrds, SubAggCollectionMode collectionMode, boolean showTermDocCountError, - Map metadata + Map metadata, + boolean excludeDeletedDocs ) throws IOException { super( name, @@ -296,7 +303,8 @@ static class LowCardinality extends GlobalOrdinalsStringTermsAggregator { collectionMode, showTermDocCountError, CardinalityUpperBound.ONE, - metadata + metadata, + excludeDeletedDocs ); assert factories == null || factories.countAggregators() == 0; this.segmentDocCounts = context.bigArrays().newLongArray(1, true); @@ -445,6 +453,13 @@ interface BucketInfoConsumer { * bucket ordinal. */ class DenseGlobalOrds extends CollectionStrategy { + + private final boolean excludeDeletedDocs; + + DenseGlobalOrds(boolean excludeDeletedDocs) { + this.excludeDeletedDocs = excludeDeletedDocs; + } + @Override String describe() { return "dense"; @@ -475,6 +490,14 @@ long globalOrdToBucketOrd(long owningBucketOrd, long globalOrd) { @Override void forEach(long owningBucketOrd, BucketInfoConsumer consumer) throws IOException { assert owningBucketOrd == 0; + if (excludeDeletedDocs) { + forEachExcludeDeletedDocs(consumer); + } else { + forEachAllowDeletedDocs(consumer); + } + } + + private void forEachAllowDeletedDocs(BucketInfoConsumer consumer) throws IOException { for (long globalOrd = 0; globalOrd < valueCount; globalOrd++) { if (false == acceptedGlobalOrdinals.test(globalOrd)) { continue; @@ -486,6 +509,39 @@ void forEach(long owningBucketOrd, BucketInfoConsumer consumer) throws IOExcepti } } + /** + * Excludes deleted docs in the results by cross-checking with liveDocs. + */ + private void forEachExcludeDeletedDocs(BucketInfoConsumer consumer) throws IOException { + try (LongHash accepted = new LongHash(20, new BigArrays(null, null, ""))) { + for (LeafReaderContext ctx : searcher().getTopReaderContext().leaves()) { + LeafReader reader = ctx.reader(); + Bits liveDocs = reader.getLiveDocs(); + SortedSetDocValues globalOrds = null; + for (int docId = 0; docId < reader.maxDoc(); ++docId) { + if (liveDocs == null || liveDocs.get(docId)) { // document is not deleted + globalOrds = globalOrds == null ? valuesSource.globalOrdinalsValues(ctx) : globalOrds; + if (globalOrds.advanceExact(docId)) { + for (long globalOrd = globalOrds.nextOrd(); globalOrd != NO_MORE_ORDS; globalOrd = globalOrds.nextOrd()) { + if (accepted.find(globalOrd) >= 0) { + continue; + } + if (false == acceptedGlobalOrdinals.test(globalOrd)) { + continue; + } + long docCount = bucketDocCount(globalOrd); + if (bucketCountThresholds.getMinDocCount() == 0 || docCount > 0) { + consumer.accept(globalOrd, globalOrd, docCount); + accepted.add(globalOrd); + } + } + } + } + } + } + } + } + @Override public void close() {} } @@ -498,9 +554,11 @@ public void close() {} */ private class RemapGlobalOrds extends CollectionStrategy { private final LongKeyedBucketOrds bucketOrds; + private final boolean excludeDeletedDocs; - private RemapGlobalOrds(CardinalityUpperBound cardinality) { + private RemapGlobalOrds(CardinalityUpperBound cardinality, boolean excludeDeletedDocs) { bucketOrds = LongKeyedBucketOrds.buildForValueRange(bigArrays(), cardinality, 0, valueCount - 1); + this.excludeDeletedDocs = excludeDeletedDocs; } @Override @@ -534,27 +592,20 @@ long globalOrdToBucketOrd(long owningBucketOrd, long globalOrd) { @Override void forEach(long owningBucketOrd, BucketInfoConsumer consumer) throws IOException { + if (excludeDeletedDocs) { + forEachExcludeDeletedDocs(owningBucketOrd, consumer); + } else { + forEachAllowDeletedDocs(owningBucketOrd, consumer); + } + } + + void forEachAllowDeletedDocs(long owningBucketOrd, BucketInfoConsumer consumer) throws IOException { if (bucketCountThresholds.getMinDocCount() == 0) { for (long globalOrd = 0; globalOrd < valueCount; globalOrd++) { if (false == acceptedGlobalOrdinals.test(globalOrd)) { continue; } - /* - * Use `add` instead of `find` here to assign an ordinal - * even if the global ord wasn't found so we can build - * sub-aggregations without trouble even though we haven't - * hit any documents for them. This is wasteful, but - * settings minDocCount == 0 is wasteful in general..... - */ - long bucketOrd = bucketOrds.add(owningBucketOrd, globalOrd); - long docCount; - if (bucketOrd < 0) { - bucketOrd = -1 - bucketOrd; - docCount = bucketDocCount(bucketOrd); - } else { - docCount = 0; - } - consumer.accept(globalOrd, bucketOrd, docCount); + addBucketForMinDocCountZero(owningBucketOrd, globalOrd, consumer, null); } } else { LongKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrd); @@ -567,6 +618,64 @@ void forEach(long owningBucketOrd, BucketInfoConsumer consumer) throws IOExcepti } } + /** + * Excludes deleted docs in the results by cross-checking with liveDocs. + */ + void forEachExcludeDeletedDocs(long owningBucketOrd, BucketInfoConsumer consumer) throws IOException { + assert bucketCountThresholds.getMinDocCount() == 0; + try (LongHash accepted = new LongHash(20, new BigArrays(null, null, ""))) { + for (LeafReaderContext ctx : searcher().getTopReaderContext().leaves()) { + LeafReader reader = ctx.reader(); + Bits liveDocs = reader.getLiveDocs(); + SortedSetDocValues globalOrds = null; + for (int docId = 0; docId < reader.maxDoc(); ++docId) { + if (liveDocs == null || liveDocs.get(docId)) { // document is not deleted + globalOrds = globalOrds == null ? valuesSource.globalOrdinalsValues(ctx) : globalOrds; + if (globalOrds.advanceExact(docId)) { + for (long globalOrd = globalOrds.nextOrd(); globalOrd != NO_MORE_ORDS; globalOrd = globalOrds.nextOrd()) { + if (accepted.find(globalOrd) >= 0) { + continue; + } + if (false == acceptedGlobalOrdinals.test(globalOrd)) { + continue; + } + addBucketForMinDocCountZero(owningBucketOrd, globalOrd, consumer, accepted); + } + } + } + } + } + } + } + + private void addBucketForMinDocCountZero( + long owningBucketOrd, + long globalOrd, + BucketInfoConsumer consumer, + @Nullable LongHash accepted + ) throws IOException { + /* + * Use `add` instead of `find` here to assign an ordinal + * even if the global ord wasn't found so we can build + * sub-aggregations without trouble even though we haven't + * hit any documents for them. This is wasteful, but + * settings minDocCount == 0 is wasteful in general..... + */ + long bucketOrd = bucketOrds.add(owningBucketOrd, globalOrd); + long docCount; + if (bucketOrd < 0) { + bucketOrd = -1 - bucketOrd; + docCount = bucketDocCount(bucketOrd); + } else { + docCount = 0; + } + assert globalOrd >= 0; + consumer.accept(globalOrd, bucketOrd, docCount); + if (accepted != null) { + accepted.add(globalOrd); + } + } + @Override public void close() { bucketOrds.close(); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/MapStringTermsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/MapStringTermsAggregator.java index e4650ad9fdddf..5a9cc767fab17 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/MapStringTermsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/MapStringTermsAggregator.java @@ -52,6 +52,7 @@ public final class MapStringTermsAggregator extends AbstractStringTermsAggregato private final ResultStrategy resultStrategy; private final BytesKeyedBucketOrds bucketOrds; private final IncludeExclude.StringFilter includeExclude; + private final boolean excludeDeletedDocs; public MapStringTermsAggregator( String name, @@ -67,7 +68,8 @@ public MapStringTermsAggregator( SubAggCollectionMode collectionMode, boolean showTermDocCountError, CardinalityUpperBound cardinality, - Map metadata + Map metadata, + boolean excludeDeletedDocs ) throws IOException { super(name, factories, context, parent, order, format, bucketCountThresholds, collectionMode, showTermDocCountError, metadata); this.resultStrategy = resultStrategy.apply(this); // ResultStrategy needs a reference to the Aggregator to do its job. @@ -75,6 +77,7 @@ public MapStringTermsAggregator( bucketOrds = BytesKeyedBucketOrds.build(context.bigArrays(), cardinality); // set last because if there is an error during construction the collector gets release outside the constructor. this.collectorSource = collectorSource; + this.excludeDeletedDocs = excludeDeletedDocs; } @Override @@ -244,7 +247,7 @@ private InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws B[][] topBucketsPerOrd = buildTopBucketsPerOrd(owningBucketOrds.length); long[] otherDocCounts = new long[owningBucketOrds.length]; for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) { - collectZeroDocEntriesIfNeeded(owningBucketOrds[ordIdx]); + collectZeroDocEntriesIfNeeded(owningBucketOrds[ordIdx], excludeDeletedDocs); int size = (int) Math.min(bucketOrds.size(), bucketCountThresholds.getShardSize()); PriorityQueue ordered = buildPriorityQueue(size); @@ -296,7 +299,7 @@ private InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws * Collect extra entries for "zero" hit documents if they were requested * and required. */ - abstract void collectZeroDocEntriesIfNeeded(long owningBucketOrd) throws IOException; + abstract void collectZeroDocEntriesIfNeeded(long owningBucketOrd, boolean excludeDeletedDocs) throws IOException; /** * Build an empty temporary bucket. @@ -371,7 +374,7 @@ LeafBucketCollector wrapCollector(LeafBucketCollector primary) { } @Override - void collectZeroDocEntriesIfNeeded(long owningBucketOrd) throws IOException { + void collectZeroDocEntriesIfNeeded(long owningBucketOrd, boolean excludeDeletedDocs) throws IOException { if (bucketCountThresholds.getMinDocCount() != 0) { return; } @@ -383,6 +386,9 @@ void collectZeroDocEntriesIfNeeded(long owningBucketOrd) throws IOException { SortedBinaryDocValues values = valuesSource.bytesValues(ctx); // brute force for (int docId = 0; docId < ctx.reader().maxDoc(); ++docId) { + if (excludeDeletedDocs && ctx.reader().getLiveDocs() != null && ctx.reader().getLiveDocs().get(docId) == false) { + continue; + } if (values.advanceExact(docId)) { int valueCount = values.docValueCount(); for (int i = 0; i < valueCount; ++i) { @@ -519,7 +525,7 @@ public void collect(int doc, long owningBucketOrd) throws IOException { } @Override - void collectZeroDocEntriesIfNeeded(long owningBucketOrd) throws IOException {} + void collectZeroDocEntriesIfNeeded(long owningBucketOrd, boolean excludeDeletedDocs) throws IOException {} @Override Supplier emptyBucketBuilder(long owningBucketOrd) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/NumericTermsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/NumericTermsAggregator.java index 96d81aad86c4a..80da463001a07 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/NumericTermsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/NumericTermsAggregator.java @@ -50,6 +50,7 @@ public final class NumericTermsAggregator extends TermsAggregator { private final ValuesSource.Numeric valuesSource; private final LongKeyedBucketOrds bucketOrds; private final LongFilter longFilter; + private final boolean excludeDeletedDocs; public NumericTermsAggregator( String name, @@ -64,13 +65,15 @@ public NumericTermsAggregator( SubAggCollectionMode subAggCollectMode, IncludeExclude.LongFilter longFilter, CardinalityUpperBound cardinality, - Map metadata + Map metadata, + boolean excludeDeletedDocs ) throws IOException { super(name, factories, context, parent, bucketCountThresholds, order, format, subAggCollectMode, metadata); this.resultStrategy = resultStrategy.apply(this); // ResultStrategy needs a reference to the Aggregator to do its job. this.valuesSource = valuesSource; this.longFilter = longFilter; bucketOrds = LongKeyedBucketOrds.build(bigArrays(), cardinality); + this.excludeDeletedDocs = excludeDeletedDocs; } @Override @@ -144,7 +147,7 @@ private InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws B[][] topBucketsPerOrd = buildTopBucketsPerOrd(owningBucketOrds.length); long[] otherDocCounts = new long[owningBucketOrds.length]; for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) { - collectZeroDocEntriesIfNeeded(owningBucketOrds[ordIdx]); + collectZeroDocEntriesIfNeeded(owningBucketOrds[ordIdx], excludeDeletedDocs); long bucketsInOrd = bucketOrds.bucketsInOrd(owningBucketOrds[ordIdx]); int size = (int) Math.min(bucketsInOrd, bucketCountThresholds.getShardSize()); @@ -240,7 +243,7 @@ private InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws * Collect extra entries for "zero" hit documents if they were requested * and required. */ - abstract void collectZeroDocEntriesIfNeeded(long owningBucketOrd) throws IOException; + abstract void collectZeroDocEntriesIfNeeded(long owningBucketOrd, boolean excludeDeletedDocs) throws IOException; /** * Turn the buckets into an aggregation result. @@ -285,7 +288,7 @@ Supplier emptyBucketBuilder(long owningBucketOrd) { abstract B buildEmptyBucket(); @Override - final void collectZeroDocEntriesIfNeeded(long owningBucketOrd) throws IOException { + final void collectZeroDocEntriesIfNeeded(long owningBucketOrd, boolean excludeDeletedDocs) throws IOException { if (bucketCountThresholds.getMinDocCount() != 0) { return; } @@ -296,6 +299,9 @@ final void collectZeroDocEntriesIfNeeded(long owningBucketOrd) throws IOExceptio for (LeafReaderContext ctx : searcher().getTopReaderContext().leaves()) { SortedNumericDocValues values = getValues(ctx); for (int docId = 0; docId < ctx.reader().maxDoc(); ++docId) { + if (excludeDeletedDocs && ctx.reader().getLiveDocs() != null && ctx.reader().getLiveDocs().get(docId) == false) { + continue; + } if (values.advanceExact(docId)) { int valueCount = values.docValueCount(); for (int v = 0; v < valueCount; ++v) { @@ -561,7 +567,7 @@ void buildSubAggs(SignificantLongTerms.Bucket[][] topBucketsPerOrd) throws IOExc } @Override - void collectZeroDocEntriesIfNeeded(long owningBucketOrd) throws IOException {} + void collectZeroDocEntriesIfNeeded(long owningBucketOrd, boolean excludeDeletedDocs) throws IOException {} @Override SignificantLongTerms buildResult(long owningBucketOrd, long otherDocCoun, SignificantLongTerms.Bucket[] topBuckets) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTermsAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTermsAggregatorFactory.java index f47e28bbc6dbd..1608b38b142e6 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTermsAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTermsAggregatorFactory.java @@ -197,7 +197,8 @@ private static SignificantTermsAggregatorSupplier numericSupplier() { SubAggCollectionMode.BREADTH_FIRST, longFilter, cardinality, - metadata + metadata, + false ); }; } @@ -356,7 +357,8 @@ Aggregator create( SubAggCollectionMode.BREADTH_FIRST, false, cardinality, - metadata + metadata, + false ); } @@ -409,7 +411,8 @@ Aggregator create( SubAggCollectionMode.BREADTH_FIRST, false, cardinality, - metadata + metadata, + false ); } }; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTextAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTextAggregatorFactory.java index 13f3ce3e2020e..a4ff03b7b29cf 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTextAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTextAggregatorFactory.java @@ -190,7 +190,8 @@ protected Aggregator createInternal(Aggregator parent, CardinalityUpperBound car SubAggCollectionMode.BREADTH_FIRST, false, cardinality, - metadata + metadata, + false ); success = true; return mapStringTermsAggregator; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java index 13e5fe3dbd11f..7190589de38c4 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java @@ -37,6 +37,8 @@ import java.util.Objects; import java.util.function.ToLongFunction; +import static org.elasticsearch.TransportVersions.AGGS_EXCLUDED_DELETED_DOCS; + public class TermsAggregationBuilder extends ValuesSourceAggregationBuilder { public static final int KEY_ORDER_CONCURRENCY_THRESHOLD = 50; @@ -112,6 +114,7 @@ public static void registerAggregators(ValuesSourceRegistry.Builder builder) { private final TermsAggregator.BucketCountThresholds bucketCountThresholds; private boolean showTermDocCountError = false; + private boolean excludeDeletedDocs = false; public TermsAggregationBuilder(String name) { super(name); @@ -195,6 +198,9 @@ public TermsAggregationBuilder(StreamInput in) throws IOException { includeExclude = in.readOptionalWriteable(IncludeExclude::new); order = InternalOrder.Streams.readOrder(in); showTermDocCountError = in.readBoolean(); + if (in.getTransportVersion().onOrAfter(AGGS_EXCLUDED_DELETED_DOCS)) { + excludeDeletedDocs = in.readBoolean(); + } } @Override @@ -210,6 +216,9 @@ protected void innerWriteTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(includeExclude); order.writeTo(out); out.writeBoolean(showTermDocCountError); + if (out.getTransportVersion().onOrAfter(AGGS_EXCLUDED_DELETED_DOCS)) { + out.writeBoolean(excludeDeletedDocs); + } } /** @@ -391,6 +400,18 @@ public TermsAggregationBuilder showTermDocCountError(boolean showTermDocCountErr return this; } + /** + * Set whether deleted documents should be explicitly excluded from the aggregation results + */ + public TermsAggregationBuilder excludeDeletedDocs(boolean excludeDeletedDocs) { + this.excludeDeletedDocs = excludeDeletedDocs; + return this; + } + + public boolean excludeDeletedDocs() { + return excludeDeletedDocs; + } + @Override public BucketCardinality bucketCardinality() { return BucketCardinality.MANY; @@ -417,7 +438,8 @@ protected ValuesSourceAggregatorFactory innerBuild( parent, subFactoriesBuilder, metadata, - aggregatorSupplier + aggregatorSupplier, + excludeDeletedDocs ); } @@ -448,7 +470,8 @@ public int hashCode() { executionHint, includeExclude, order, - showTermDocCountError + showTermDocCountError, + excludeDeletedDocs ); } @@ -463,7 +486,8 @@ public boolean equals(Object obj) { && Objects.equals(executionHint, other.executionHint) && Objects.equals(includeExclude, other.includeExclude) && Objects.equals(order, other.order) - && Objects.equals(showTermDocCountError, other.showTermDocCountError); + && Objects.equals(showTermDocCountError, other.showTermDocCountError) + && Objects.equals(excludeDeletedDocs, other.excludeDeletedDocs); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java index 68a4ffca22b51..fdc1c4e4651c9 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java @@ -102,7 +102,8 @@ private static TermsAggregatorSupplier bytesSupplier() { subAggCollectMode, showTermDocCountError, cardinality, - metadata) -> { + metadata, + excludeDeletedDocs) -> { ValuesSource valuesSource = valuesSourceConfig.getValuesSource(); ExecutionMode execution = null; if (executionHint != null) { @@ -145,7 +146,8 @@ private static TermsAggregatorSupplier bytesSupplier() { subAggCollectMode, showTermDocCountError, cardinality, - metadata + metadata, + excludeDeletedDocs ); }; } @@ -168,7 +170,8 @@ private static TermsAggregatorSupplier numericSupplier() { subAggCollectMode, showTermDocCountError, cardinality, - metadata) -> { + metadata, + excludeDeletedDocs) -> { if ((includeExclude != null) && (includeExclude.isRegexBased())) { throw new IllegalArgumentException( @@ -211,7 +214,8 @@ private static TermsAggregatorSupplier numericSupplier() { subAggCollectMode, longFilter, cardinality, - metadata + metadata, + excludeDeletedDocs ); }; } @@ -223,6 +227,7 @@ private static TermsAggregatorSupplier numericSupplier() { private final SubAggCollectionMode collectMode; private final TermsAggregator.BucketCountThresholds bucketCountThresholds; private final boolean showTermDocCountError; + private final boolean excludeDeletedDocs; TermsAggregatorFactory( String name, @@ -237,7 +242,8 @@ private static TermsAggregatorSupplier numericSupplier() { AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metadata, - TermsAggregatorSupplier aggregatorSupplier + TermsAggregatorSupplier aggregatorSupplier, + boolean excludeDeletedDocs ) throws IOException { super(name, config, context, parent, subFactoriesBuilder, metadata); this.aggregatorSupplier = aggregatorSupplier; @@ -247,6 +253,7 @@ private static TermsAggregatorSupplier numericSupplier() { this.collectMode = collectMode; this.bucketCountThresholds = bucketCountThresholds; this.showTermDocCountError = showTermDocCountError; + this.excludeDeletedDocs = excludeDeletedDocs; } @Override @@ -325,7 +332,8 @@ protected Aggregator doCreateInternal(Aggregator parent, CardinalityUpperBound c collectMode, showTermDocCountError, cardinality, - metadata + metadata, + excludeDeletedDocs ); } @@ -384,7 +392,8 @@ Aggregator create( SubAggCollectionMode subAggCollectMode, boolean showTermDocCountError, CardinalityUpperBound cardinality, - Map metadata + Map metadata, + boolean excludeDeletedDocs ) throws IOException { IncludeExclude.StringFilter filter = includeExclude == null ? null @@ -403,7 +412,8 @@ Aggregator create( subAggCollectMode, showTermDocCountError, cardinality, - metadata + metadata, + excludeDeletedDocs ); } }, @@ -422,7 +432,8 @@ Aggregator create( SubAggCollectionMode subAggCollectMode, boolean showTermDocCountError, CardinalityUpperBound cardinality, - Map metadata + Map metadata, + boolean excludeDeletedDocs ) throws IOException { assert valuesSourceConfig.getValuesSource() instanceof ValuesSource.Bytes.WithOrdinals; @@ -433,7 +444,8 @@ Aggregator create( if (maxOrd > 0 && maxOrd <= MAX_ORDS_TO_TRY_FILTERS && context.enableRewriteToFilterByFilter() - && false == context.isInSortOrderExecutionRequired()) { + && false == context.isInSortOrderExecutionRequired() + && false == excludeDeletedDocs) { StringTermsAggregatorFromFilters adapted = StringTermsAggregatorFromFilters.adaptIntoFiltersOrNull( name, factories, @@ -501,7 +513,8 @@ Aggregator create( false, subAggCollectMode, showTermDocCountError, - metadata + metadata, + excludeDeletedDocs ); } @@ -547,7 +560,8 @@ Aggregator create( subAggCollectMode, showTermDocCountError, cardinality, - metadata + metadata, + excludeDeletedDocs ); } }; @@ -580,7 +594,8 @@ abstract Aggregator create( SubAggCollectionMode subAggCollectMode, boolean showTermDocCountError, CardinalityUpperBound cardinality, - Map metadata + Map metadata, + boolean excludeDeletedDocs ) throws IOException; @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorSupplier.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorSupplier.java index 0d8d1d7a19045..c4284c30e889f 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorSupplier.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorSupplier.java @@ -31,6 +31,7 @@ Aggregator build( Aggregator.SubAggCollectionMode subAggCollectMode, boolean showTermDocCountError, CardinalityUpperBound cardinality, - Map metadata + Map metadata, + boolean excludeDeletedDocs ) throws IOException; } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/AggregatorFactoriesBuilderTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/AggregatorFactoriesBuilderTests.java index 6c4ecf2388151..3f07ce5d50d23 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/AggregatorFactoriesBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/AggregatorFactoriesBuilderTests.java @@ -13,6 +13,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; +import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.elasticsearch.search.aggregations.pipeline.CumulativeSumPipelineAggregationBuilder; import org.elasticsearch.test.AbstractXContentSerializingTestCase; import org.elasticsearch.xcontent.NamedXContentRegistry; @@ -128,6 +129,42 @@ public void testUnorderedEqualsSubSet() { assertNotEquals(builder1.hashCode(), builder2.hashCode()); } + public void testForceExcludedDocs() { + // simple + AggregatorFactories.Builder builder = new AggregatorFactories.Builder(); + TermsAggregationBuilder termsAggregationBuilder = AggregationBuilders.terms("myterms"); + builder.addAggregator(termsAggregationBuilder); + assertFalse(termsAggregationBuilder.excludeDeletedDocs()); + assertFalse(builder.hasZeroMinDocTermsAggregation()); + termsAggregationBuilder.minDocCount(0); + assertTrue(builder.hasZeroMinDocTermsAggregation()); + builder.forceTermsAggsToExcludeDeletedDocs(); + assertTrue(termsAggregationBuilder.excludeDeletedDocs()); + + // nested + AggregatorFactories.Builder nested = new AggregatorFactories.Builder(); + boolean hasZeroMinDocTermsAggregation = false; + for (int i = 0; i <= randomIntBetween(1, 10); i++) { + AggregationBuilder agg = getRandomAggregation(); + nested.addAggregator(agg); + if (randomBoolean()) { + hasZeroMinDocTermsAggregation = true; + agg.subAggregation(termsAggregationBuilder); + } + } + if (hasZeroMinDocTermsAggregation) { + assertTrue(nested.hasZeroMinDocTermsAggregation()); + nested.forceTermsAggsToExcludeDeletedDocs(); + for (AggregationBuilder agg : nested.getAggregatorFactories()) { + if (agg instanceof TermsAggregationBuilder) { + assertTrue(((TermsAggregationBuilder) agg).excludeDeletedDocs()); + } + } + } else { + assertFalse(nested.hasZeroMinDocTermsAggregation()); + } + } + private static AggregationBuilder getRandomAggregation() { // just a couple of aggregations, sufficient for the purpose of this test final int randomAggregatorPoolSize = 4; diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DocumentLevelSecurityTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DocumentLevelSecurityTests.java index c10dc7f1da25c..d105b616c57f1 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DocumentLevelSecurityTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DocumentLevelSecurityTests.java @@ -48,6 +48,9 @@ import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.bucket.global.Global; +import org.elasticsearch.search.aggregations.bucket.terms.IncludeExclude; +import org.elasticsearch.search.aggregations.bucket.terms.LongTerms; +import org.elasticsearch.search.aggregations.bucket.terms.StringTerms; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.builder.PointInTimeBuilder; import org.elasticsearch.search.profile.ProfileResult; @@ -101,9 +104,12 @@ import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.BASIC_AUTH_HEADER; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; @LuceneTestCase.SuppressCodecs("*") // suppress test codecs otherwise test using completion suggester fails @@ -134,7 +140,8 @@ protected String configUsers() { user3:%s user4:%s user5:%s - """, usersPasswdHashed, usersPasswdHashed, usersPasswdHashed, usersPasswdHashed, usersPasswdHashed); + user6:%s + """, usersPasswdHashed, usersPasswdHashed, usersPasswdHashed, usersPasswdHashed, usersPasswdHashed, usersPasswdHashed); } @Override @@ -145,6 +152,7 @@ protected String configUsersRoles() { role3:user2,user3 role4:user4 role5:user5 + role6:user6 """; } @@ -192,6 +200,12 @@ protected String configRoles() { privileges: [ read ] field_security: grant: [ 'field1', 'other_field', 'suggest_field2' ] + role6: + cluster: [ all ] + indices: + - names: '*' + privileges: [ ALL ] + query: '{"term" : {"color" : "red"}}' """; } @@ -988,6 +1002,119 @@ public void testGlobalAggregation() throws Exception { ); } + public void testZeroMinDocAggregation() throws Exception { + assertAcked( + indicesAdmin().prepareCreate("test") + .setMapping("color", "type=keyword", "fruit", "type=keyword", "count", "type=integer") + .setSettings(Map.of("index.number_of_shards", 1)) + ); + prepareIndex("test").setId("1").setSource("color", "red", "fruit", "apple", "count", -1).setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("2").setSource("color", "yellow", "fruit", "banana", "count", -2).setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("3").setSource("color", "green", "fruit", "grape", "count", -3).setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("4").setSource("color", "red", "fruit", "grape", "count", -4).setRefreshPolicy(IMMEDIATE).get(); + indicesAdmin().prepareForceMerge("test").get(); + + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user6", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(termQuery("fruit", "apple")) + // global ordinal + .addAggregation(AggregationBuilders.terms("colors1").field("color").minDocCount(0)) + .addAggregation(AggregationBuilders.terms("fruits").field("fruit").minDocCount(0)) + // global ordinal remapped + .addAggregation( + AggregationBuilders.terms("colors2") + .field("color") + .minDocCount(0) + .includeExclude(new IncludeExclude(".*", null, null, null)) + ) + // mapped + .addAggregation(AggregationBuilders.terms("colors3").field("color").minDocCount(0).executionHint("map")) + // numeric + .addAggregation(AggregationBuilders.terms("counts").field("count").minDocCount(0)) + // nested + .addAggregation( + AggregationBuilders.terms("nested") + .field("color") + .minDocCount(0) + .subAggregation( + AggregationBuilders.terms("fruits") + .field("fruit") + .minDocCount(0) + .executionHint("map") + .subAggregation(AggregationBuilders.terms("counts").field("count").minDocCount(0)) + ) + .minDocCount(0) + ), + response -> { + assertThat( + response.toString(), + allOf( + containsString("apple"), + containsString("grape"), + containsString("red"), + containsString("-1"), + containsString("-4") + ) + ); + assertThat( + response.toString(), + allOf( + not(containsString("banana")), + not(containsString("yellow")), + not(containsString("green")), + not(containsString("-2")), + not(containsString("-3")) + ) + ); + assertHitCount(response, 1); + assertSearchHits(response, "1"); + // fruits + StringTerms fruits = response.getAggregations().get("fruits"); + assertThat(fruits.getBuckets().size(), equalTo(2)); + List fruitBuckets = fruits.getBuckets(); + assertTrue(fruitBuckets.stream().anyMatch(bucket -> bucket.getKeyAsString().equals("apple") && bucket.getDocCount() == 1)); + assertTrue(fruitBuckets.stream().anyMatch(bucket -> bucket.getKeyAsString().equals("grape") && bucket.getDocCount() == 0)); + // counts + LongTerms counts = response.getAggregations().get("counts"); + assertThat(counts.getBuckets().size(), equalTo(2)); + List countsBuckets = counts.getBuckets(); + assertTrue(countsBuckets.stream().anyMatch(bucket -> bucket.getKeyAsString().equals("-1") && bucket.getDocCount() == 1)); + assertTrue(countsBuckets.stream().anyMatch(bucket -> bucket.getKeyAsString().equals("-4") && bucket.getDocCount() == 0)); + // colors + for (int i = 1; i <= 3; i++) { + StringTerms colors = response.getAggregations().get("colors" + i); + assertThat(colors.getBuckets().size(), equalTo(1)); + assertThat(colors.getBuckets().get(0).getKeyAsString(), equalTo("red")); + assertThat(colors.getBuckets().get(0).getDocCount(), equalTo(1L)); + } + // nested + StringTerms nested = response.getAggregations().get("nested"); + assertThat(nested.getBuckets().size(), equalTo(1)); + assertThat(nested.getBuckets().get(0).getKeyAsString(), equalTo("red")); + assertThat(nested.getBuckets().get(0).getDocCount(), equalTo(1L)); + StringTerms innerFruits = nested.getBuckets().get(0).getAggregations().get("fruits"); + List innerFruitsBuckets = innerFruits.getBuckets(); + assertTrue(innerFruitsBuckets.stream().anyMatch(b -> b.getKeyAsString().equals("apple") && b.getDocCount() == 1)); + assertTrue(innerFruitsBuckets.stream().anyMatch(b -> b.getKeyAsString().equals("grape") && b.getDocCount() == 0)); + assertThat(innerFruitsBuckets.size(), equalTo(2)); + + for (int i = 0; i <= 1; i++) { + String parentBucketKey = innerFruitsBuckets.get(i).getKeyAsString(); + LongTerms innerCounts = innerFruitsBuckets.get(i).getAggregations().get("counts"); + assertThat(innerCounts.getBuckets().size(), equalTo(2)); + List icb = innerCounts.getBuckets(); + if ("apple".equals(parentBucketKey)) { + assertTrue(icb.stream().anyMatch(bucket -> bucket.getKeyAsString().equals("-1") && bucket.getDocCount() == 1)); + } else { + assertTrue(icb.stream().anyMatch(bucket -> bucket.getKeyAsString().equals("-1") && bucket.getDocCount() == 0)); + } + assertTrue(icb.stream().anyMatch(bucket -> bucket.getKeyAsString().equals("-4") && bucket.getDocCount() == 0)); + } + } + ); + } + public void testParentChild() throws Exception { XContentBuilder mapping = jsonBuilder().startObject() .startObject("properties") diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/interceptor/SearchRequestInterceptor.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/interceptor/SearchRequestInterceptor.java index 37839ae45670b..f9468b53c9d0c 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/interceptor/SearchRequestInterceptor.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/interceptor/SearchRequestInterceptor.java @@ -50,6 +50,11 @@ void disableFeatures( ) ); } else { + if (hasZeroMinDocTermsAggregation(request)) { + assert request.source() != null && request.source().aggregations() != null; + request.source().aggregations().forceTermsAggsToExcludeDeletedDocs(); + } + listener.onResponse(null); } } else { @@ -60,7 +65,7 @@ void disableFeatures( @Override public boolean supports(IndicesRequest request) { if (request instanceof SearchRequest searchRequest) { - return hasSuggest(searchRequest) || hasProfile(searchRequest); + return hasSuggest(searchRequest) || hasProfile(searchRequest) || hasZeroMinDocTermsAggregation(searchRequest); } else { return false; } @@ -74,4 +79,10 @@ private static boolean hasProfile(SearchRequest searchRequest) { return searchRequest.source() != null && searchRequest.source().profile(); } + private static boolean hasZeroMinDocTermsAggregation(SearchRequest searchRequest) { + return searchRequest.source() != null + && searchRequest.source().aggregations() != null + && searchRequest.source().aggregations().hasZeroMinDocTermsAggregation(); + } + } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/interceptor/SearchRequestInterceptorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/interceptor/SearchRequestInterceptorTests.java new file mode 100644 index 0000000000000..6bb9d6087cdaf --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/interceptor/SearchRequestInterceptorTests.java @@ -0,0 +1,105 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security.authz.interceptor; + +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.license.MockLicenseState; +import org.elasticsearch.search.aggregations.AggregationBuilders; +import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.security.authz.accesscontrol.IndicesAccessControl; +import org.elasticsearch.xpack.core.security.authz.permission.DocumentPermissions; +import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissions; +import org.junit.After; +import org.junit.Before; + +import java.util.Map; +import java.util.Set; + +import static org.elasticsearch.xpack.core.security.SecurityField.DOCUMENT_LEVEL_SECURITY_FEATURE; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class SearchRequestInterceptorTests extends ESTestCase { + + private ClusterService clusterService; + private ThreadPool threadPool; + private MockLicenseState licenseState; + private SearchRequestInterceptor interceptor; + + @Before + public void init() { + threadPool = new TestThreadPool("search request interceptor tests"); + licenseState = mock(MockLicenseState.class); + when(licenseState.isAllowed(DOCUMENT_LEVEL_SECURITY_FEATURE)).thenReturn(true); + clusterService = mock(ClusterService.class); + interceptor = new SearchRequestInterceptor(threadPool, licenseState, clusterService); + } + + @After + public void stopThreadPool() { + terminate(threadPool); + } + + public void testForceExcludeDeletedDocs() { + SearchRequest searchRequest = new SearchRequest(); + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + TermsAggregationBuilder termsAggregationBuilder = AggregationBuilders.terms("myterms"); + termsAggregationBuilder.minDocCount(0); + searchSourceBuilder.aggregation(termsAggregationBuilder); + searchRequest.source(searchSourceBuilder); + + final DocumentPermissions documentPermissions = DocumentPermissions.filteredBy(Set.of(new BytesArray(""" + {"term":{"username":"foo"}}"""))); + final String index = randomAlphaOfLengthBetween(3, 8); + final PlainActionFuture listener = new PlainActionFuture<>(); + assertFalse(termsAggregationBuilder.excludeDeletedDocs()); + interceptor.disableFeatures( + searchRequest, + Map.of(index, new IndicesAccessControl.IndexAccessControl(FieldPermissions.DEFAULT, documentPermissions)), + listener + ); + assertTrue(termsAggregationBuilder.excludeDeletedDocs()); // changed value + } + + public void testNoForceExcludeDeletedDocs() { + SearchRequest searchRequest = new SearchRequest(); + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + TermsAggregationBuilder termsAggregationBuilder = new TermsAggregationBuilder("myterms"); + termsAggregationBuilder.minDocCount(1); + searchSourceBuilder.aggregation(termsAggregationBuilder); + searchRequest.source(searchSourceBuilder); + + final DocumentPermissions documentPermissions = DocumentPermissions.filteredBy(Set.of(new BytesArray(""" + {"term":{"username":"foo"}}"""))); + final String index = randomAlphaOfLengthBetween(3, 8); + final PlainActionFuture listener = new PlainActionFuture<>(); + assertFalse(termsAggregationBuilder.excludeDeletedDocs()); + interceptor.disableFeatures( + searchRequest, + Map.of(index, new IndicesAccessControl.IndexAccessControl(FieldPermissions.DEFAULT, documentPermissions)), + listener + ); + assertFalse(termsAggregationBuilder.excludeDeletedDocs()); // did not change value + + termsAggregationBuilder.minDocCount(0); + interceptor.disableFeatures( + searchRequest, + Map.of(), // no DLS + listener + ); + assertFalse(termsAggregationBuilder.excludeDeletedDocs()); // did not change value + } + +} From ed33fdd24b5fe276d095cc883a8b0099be50e0bc Mon Sep 17 00:00:00 2001 From: Athena Brown Date: Thu, 14 Mar 2024 13:00:01 -0600 Subject: [PATCH 220/248] Adjust interception of requests for specific shard IDs (#101656) Some index requests target shard IDs specifically, which may not match the indices that the request targets as given by `IndicesRequest#indices()`, which requires a different interception strategy in order to make sure those requests are handled correctly in all cases and that any malformed messages are caught early to aid in troubleshooting. This PR adds and interface allowing requests to report the shard IDs they target as well as the index names, and adjusts the interception of those requests as appropriate to handle those shard IDs in the cases where they are relevant. --- docs/changelog/101656.yaml | 5 + .../elasticsearch/action/IndicesRequest.java | 18 ++ .../elasticsearch/action/get/GetRequest.java | 2 +- .../elasticsearch/action/get/GetResponse.java | 2 +- .../single/shard/SingleShardRequest.java | 11 +- .../core/downsample/DownsampleShardTask.java | 4 + ...DownsampleShardPersistentTaskExecutor.java | 8 +- .../qa/consistency-checks/build.gradle | 26 +++ .../security/CrossClusterShardTests.java | 165 ++++++++++++++++++ ...lusterSecurityFcActionAuthorizationIT.java | 134 ++++++++++++++ .../xpack/security/authz/RBACEngine.java | 33 +++- 11 files changed, 403 insertions(+), 5 deletions(-) create mode 100644 docs/changelog/101656.yaml create mode 100644 x-pack/plugin/security/qa/consistency-checks/build.gradle create mode 100644 x-pack/plugin/security/qa/consistency-checks/src/test/java/org/elasticsearch/xpack/security/CrossClusterShardTests.java diff --git a/docs/changelog/101656.yaml b/docs/changelog/101656.yaml new file mode 100644 index 0000000000000..7cd4f30cae849 --- /dev/null +++ b/docs/changelog/101656.yaml @@ -0,0 +1,5 @@ +pr: 101656 +summary: Adjust interception of requests for specific shard IDs +area: Authorization +type: bug +issues: [] diff --git a/server/src/main/java/org/elasticsearch/action/IndicesRequest.java b/server/src/main/java/org/elasticsearch/action/IndicesRequest.java index 346b300349161..20913148fd9b3 100644 --- a/server/src/main/java/org/elasticsearch/action/IndicesRequest.java +++ b/server/src/main/java/org/elasticsearch/action/IndicesRequest.java @@ -9,6 +9,9 @@ package org.elasticsearch.action; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.index.shard.ShardId; + +import java.util.Collection; /** * Needs to be implemented by all {@link org.elasticsearch.action.ActionRequest} subclasses that relate to @@ -72,4 +75,19 @@ default boolean allowsRemoteIndices() { return true; } } + + /** + * This subtype of request is for requests which may travel to remote clusters. These requests may need to provide additional + * information to the system on top of the indices the action relates to in order to be handled correctly in all cases. + */ + interface RemoteClusterShardRequest extends IndicesRequest { + /** + * Returns the shards this action is targeting directly, which may not obviously align with the indices returned by + * {@code indices()}. This is mostly used by requests which fan out to a number of shards for the those fan-out requests. + * + * A default is intentionally not provided for this method. It is critical that this method be implemented correctly for all + * remote cluster requests, + */ + Collection shards(); + } } diff --git a/server/src/main/java/org/elasticsearch/action/get/GetRequest.java b/server/src/main/java/org/elasticsearch/action/get/GetRequest.java index f15bb2ef448df..9f7ed8fd56474 100644 --- a/server/src/main/java/org/elasticsearch/action/get/GetRequest.java +++ b/server/src/main/java/org/elasticsearch/action/get/GetRequest.java @@ -64,7 +64,7 @@ public class GetRequest extends SingleShardRequest implements Realti public GetRequest() {} - GetRequest(StreamInput in) throws IOException { + public GetRequest(StreamInput in) throws IOException { super(in); if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) { in.readString(); diff --git a/server/src/main/java/org/elasticsearch/action/get/GetResponse.java b/server/src/main/java/org/elasticsearch/action/get/GetResponse.java index 5b407d0ebceb0..4c48cbc44e207 100644 --- a/server/src/main/java/org/elasticsearch/action/get/GetResponse.java +++ b/server/src/main/java/org/elasticsearch/action/get/GetResponse.java @@ -34,7 +34,7 @@ public class GetResponse extends ActionResponse implements Iterable> extends ActionRequest implements IndicesRequest { +public abstract class SingleShardRequest> extends ActionRequest + implements + IndicesRequest.RemoteClusterShardRequest { public static final IndicesOptions INDICES_OPTIONS = IndicesOptions.strictSingleIndexNoExpandForbidClosed(); @@ -85,6 +89,11 @@ public String[] indices() { return new String[] { index }; } + @Override + public List shards() { + return Collections.singletonList(this.internalShardId); + } + @Override public IndicesOptions indicesOptions() { return INDICES_OPTIONS; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleShardTask.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleShardTask.java index 2cb0da5d315a5..04accd7dd1b20 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleShardTask.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleShardTask.java @@ -98,6 +98,10 @@ public DownsampleConfig config() { return config; } + public ShardId shardId() { + return shardId; + } + public long getTotalShardDocCount() { return totalShardDocCount; } diff --git a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardPersistentTaskExecutor.java b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardPersistentTaskExecutor.java index 883986887fc3d..b4116d42d25ca 100644 --- a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardPersistentTaskExecutor.java +++ b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardPersistentTaskExecutor.java @@ -49,6 +49,7 @@ import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.Map; import java.util.Objects; import java.util.concurrent.Executor; @@ -260,7 +261,7 @@ private DelegatingAction() { super(NAME); } - public static class Request extends ActionRequest implements IndicesRequest { + public static class Request extends ActionRequest implements IndicesRequest.RemoteClusterShardRequest { private final DownsampleShardTask task; private final BytesRef lastDownsampleTsid; @@ -291,6 +292,11 @@ public IndicesOptions indicesOptions() { public void writeTo(StreamOutput out) { throw new IllegalStateException("request should stay local"); } + + @Override + public Collection shards() { + return Collections.singletonList(task.shardId()); + } } public static class TA extends TransportAction { diff --git a/x-pack/plugin/security/qa/consistency-checks/build.gradle b/x-pack/plugin/security/qa/consistency-checks/build.gradle new file mode 100644 index 0000000000000..807bd19bdc343 --- /dev/null +++ b/x-pack/plugin/security/qa/consistency-checks/build.gradle @@ -0,0 +1,26 @@ +apply plugin: 'elasticsearch.standalone-test' + +dependencies { + + testImplementation(testArtifact(project(xpackModule('core')))) + testImplementation project(path: ':modules:ingest-common') + testImplementation project(path: ':modules:data-streams') + testImplementation project(path: ':modules:lang-mustache') + testImplementation project(path: ':modules:rank-eval') + testImplementation project(path: ':modules:reindex') + testImplementation project(path: xpackModule('analytics')) + testImplementation project(path: xpackModule('async-search')) + testImplementation project(path: xpackModule('autoscaling')) + testImplementation project(path: xpackModule('ccr')) + testImplementation project(path: xpackModule('downsample')) + testImplementation project(path: xpackModule('eql')) + testImplementation project(path: xpackModule('esql')) + testImplementation project(path: xpackModule('frozen-indices')) + testImplementation project(path: xpackModule('graph')) + testImplementation project(path: xpackModule('ilm')) + testImplementation project(path: xpackModule('inference')) + testImplementation project(path: xpackModule('profiling')) + testImplementation project(path: xpackModule('rollup')) + testImplementation project(path: xpackModule('slm')) + testImplementation project(path: xpackModule('sql')) +} diff --git a/x-pack/plugin/security/qa/consistency-checks/src/test/java/org/elasticsearch/xpack/security/CrossClusterShardTests.java b/x-pack/plugin/security/qa/consistency-checks/src/test/java/org/elasticsearch/xpack/security/CrossClusterShardTests.java new file mode 100644 index 0000000000000..bb76056e065f3 --- /dev/null +++ b/x-pack/plugin/security/qa/consistency-checks/src/test/java/org/elasticsearch/xpack/security/CrossClusterShardTests.java @@ -0,0 +1,165 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security; + +import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsAction; +import org.elasticsearch.action.search.TransportSearchShardsAction; +import org.elasticsearch.action.support.TransportAction; +import org.elasticsearch.action.support.single.shard.TransportSingleShardAction; +import org.elasticsearch.common.inject.Binding; +import org.elasticsearch.common.inject.TypeLiteral; +import org.elasticsearch.datastreams.DataStreamsPlugin; +import org.elasticsearch.index.rankeval.RankEvalPlugin; +import org.elasticsearch.ingest.IngestTestPlugin; +import org.elasticsearch.ingest.common.IngestCommonPlugin; +import org.elasticsearch.node.Node; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.reindex.ReindexPlugin; +import org.elasticsearch.script.mustache.MustachePlugin; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.xpack.analytics.AnalyticsPlugin; +import org.elasticsearch.xpack.autoscaling.Autoscaling; +import org.elasticsearch.xpack.ccr.Ccr; +import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; +import org.elasticsearch.xpack.core.security.action.apikey.CrossClusterApiKeyRoleDescriptorBuilder; +import org.elasticsearch.xpack.core.security.authz.privilege.IndexPrivilege; +import org.elasticsearch.xpack.downsample.Downsample; +import org.elasticsearch.xpack.downsample.DownsampleShardPersistentTaskExecutor; +import org.elasticsearch.xpack.eql.plugin.EqlPlugin; +import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; +import org.elasticsearch.xpack.frozen.FrozenIndices; +import org.elasticsearch.xpack.graph.Graph; +import org.elasticsearch.xpack.ilm.IndexLifecycle; +import org.elasticsearch.xpack.inference.InferencePlugin; +import org.elasticsearch.xpack.profiling.ProfilingPlugin; +import org.elasticsearch.xpack.rollup.Rollup; +import org.elasticsearch.xpack.search.AsyncSearch; +import org.elasticsearch.xpack.slm.SnapshotLifecycle; +import org.elasticsearch.xpack.sql.plugin.SqlPlugin; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashSet; +import java.util.List; +import java.util.Locale; +import java.util.Set; +import java.util.function.Predicate; + +public class CrossClusterShardTests extends ESSingleNodeTestCase { + + Set MANUALLY_CHECKED_SHARD_ACTIONS = Set.of( + // The request types for these actions are all subtypes of SingleShardRequest, and have been evaluated to make sure their + // `shards()` methods return the correct thing. + TransportSearchShardsAction.NAME, + + // These types have had the interface implemented manually. + DownsampleShardPersistentTaskExecutor.DelegatingAction.NAME, + + // These actions do not have any references to shard IDs in their requests. + ClusterSearchShardsAction.NAME + ); + + Set> CHECKED_ABSTRACT_CLASSES = Set.of( + // This abstract class implements the interface so we can assume all of its subtypes do so properly as well. + TransportSingleShardAction.class + ); + + @Override + protected Collection> getPlugins() { + final ArrayList> plugins = new ArrayList<>(super.getPlugins()); + plugins.addAll( + List.of( + LocalStateCompositeXPackPlugin.class, + AnalyticsPlugin.class, + AsyncSearch.class, + Autoscaling.class, + Ccr.class, + DataStreamsPlugin.class, + Downsample.class, + EqlPlugin.class, + EsqlPlugin.class, + FrozenIndices.class, + Graph.class, + IndexLifecycle.class, + InferencePlugin.class, + IngestCommonPlugin.class, + IngestTestPlugin.class, + MustachePlugin.class, + ProfilingPlugin.class, + RankEvalPlugin.class, + ReindexPlugin.class, + Rollup.class, + SnapshotLifecycle.class, + SqlPlugin.class + ) + ); + return plugins; + } + + @SuppressWarnings("rawtypes") + public void testCheckForNewShardLevelTransportActions() throws Exception { + Node node = node(); + List> transportActionBindings = node.injector().findBindingsByType(TypeLiteral.get(TransportAction.class)); + Set crossClusterPrivilegeNames = new HashSet<>(); + crossClusterPrivilegeNames.addAll(List.of(CrossClusterApiKeyRoleDescriptorBuilder.CCS_INDICES_PRIVILEGE_NAMES)); + crossClusterPrivilegeNames.addAll(List.of(CrossClusterApiKeyRoleDescriptorBuilder.CCR_INDICES_PRIVILEGE_NAMES)); + + List shardActions = transportActionBindings.stream() + .map(binding -> binding.getProvider().get()) + .filter(action -> IndexPrivilege.get(crossClusterPrivilegeNames).predicate().test(action.actionName)) + .filter(this::actionIsLikelyShardAction) + .map(action -> action.actionName) + .toList(); + + List actionsNotOnAllowlist = shardActions.stream().filter(Predicate.not(MANUALLY_CHECKED_SHARD_ACTIONS::contains)).toList(); + if (actionsNotOnAllowlist.isEmpty() == false) { + fail(""" + If this test fails, you likely just added a transport action, probably with `shard` in the name. Transport actions which + operate on shards directly and can be used across clusters must meet some additional requirements in order to be + handled correctly by all Elasticsearch infrastructure, so please make sure you have read the javadoc on the + IndicesRequest.RemoteClusterShardRequest interface and implemented it if appropriate and not already appropriately + implemented by a supertype, then add the name (as in "indices:data/read/get") of your new transport action to + MANUALLY_CHECKED_SHARD_ACTIONS above. Found actions not in allowlist: + """ + actionsNotOnAllowlist); + } + + // Also make sure the allowlist stays up to date and doesn't have any unnecessary entries. + List actionsOnAllowlistNotFound = MANUALLY_CHECKED_SHARD_ACTIONS.stream() + .filter(Predicate.not(shardActions::contains)) + .toList(); + if (actionsOnAllowlistNotFound.isEmpty() == false) { + fail( + "Some actions were on the allowlist but not found in the list of cross-cluster capable transport actions, please remove " + + "these from MANUALLY_CHECKED_SHARD_ACTIONS if they have been removed from Elasticsearch: " + + actionsOnAllowlistNotFound + ); + } + } + + /** + * Getting to the actual request classes themselves is made difficult by the design of Elasticsearch's transport + * protocol infrastructure combined with JVM type erasure. Therefore, we resort to a crude heuristic here. + * @param transportAction The transportport action to be checked. + * @return True if the action is suspected of being an action which may operate on shards directly. + */ + private boolean actionIsLikelyShardAction(TransportAction transportAction) { + Class clazz = transportAction.getClass(); + Set> classHeirarchy = new HashSet<>(); + while (clazz != TransportAction.class) { + classHeirarchy.add(clazz); + clazz = clazz.getSuperclass(); + } + boolean hasCheckedSuperclass = classHeirarchy.stream().anyMatch(clz -> CHECKED_ABSTRACT_CLASSES.contains(clz)); + boolean shardInClassName = classHeirarchy.stream().anyMatch(clz -> clz.getName().toLowerCase(Locale.ROOT).contains("shard")); + return hasCheckedSuperclass == false + && (shardInClassName + || transportAction.actionName.toLowerCase(Locale.ROOT).contains("shard") + || transportAction.actionName.toLowerCase(Locale.ROOT).contains("[s]")); + } + +} diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityFcActionAuthorizationIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityFcActionAuthorizationIT.java index 2dbb716e8cfa6..f02c1fd61a77e 100644 --- a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityFcActionAuthorizationIT.java +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityFcActionAuthorizationIT.java @@ -11,24 +11,36 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.RemoteClusterActionType; import org.elasticsearch.action.admin.cluster.remote.RemoteClusterNodesAction; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; import org.elasticsearch.action.fieldcaps.TransportFieldCapabilitiesAction; +import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.get.TransportGetAction; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; +import org.elasticsearch.client.WarningsHandler; import org.elasticsearch.client.internal.RemoteClusterClient; import org.elasticsearch.cluster.node.VersionInformation; import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.io.stream.InputStreamStreamInput; +import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.Strings; +import org.elasticsearch.index.VersionType; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.tasks.TaskId; import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.ObjectPath; @@ -52,6 +64,8 @@ import org.elasticsearch.xpack.security.authc.CrossClusterAccessHeaders; import org.junit.ClassRule; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; import java.io.IOException; import java.util.List; import java.util.Map; @@ -454,6 +468,78 @@ public void testUpdateCrossClusterApiKey() throws Exception { } } + public void testMalformedShardLevelActionIsRejected() throws Exception { + final Map crossClusterApiKeyMap = createCrossClusterAccessApiKey(adminClient(), """ + { + "search": [ + { + "names": ["idx-a", "idx-b"] + } + ] + }"""); + + final Request bulkRequest = new Request("POST", "/_bulk?refresh=true"); + bulkRequest.setJsonEntity(Strings.format(""" + { "index": { "_index": "idx-a", "_id": "1" } } + { "name": "doc-1" } + { "index": { "_index": "idx-b", "_id": "1" } } + { "name": "doc-1" } + """)); + assertOK(adminClient().performRequest(bulkRequest)); + + final Request getIndexSettingsRequest = new Request("GET", "/idx-b/_settings"); + getIndexSettingsRequest.setOptions(RequestOptions.DEFAULT.toBuilder().setWarningsHandler(WarningsHandler.PERMISSIVE)); + final ObjectPath indexSettings = assertOKAndCreateObjectPath(adminClient().performRequest(getIndexSettingsRequest)); + String otherIndexId = indexSettings.evaluate(".idx-b.settings.index.uuid"); + + // Create the malformed request with a mismatch between with the request index and shard ID + String indexA = "idx-a"; + + try ( + MockTransportService service = startTransport( + "node", + threadPool, + (String) crossClusterApiKeyMap.get("encoded"), + Map.of(TransportGetAction.TYPE.name() + "[s]", buildCrossClusterAccessSubjectInfo(indexA)) + ) + ) { + final RemoteClusterService remoteClusterService = service.getRemoteClusterService(); + final List remoteConnectionInfos = remoteClusterService.getRemoteConnectionInfos().toList(); + assertThat(remoteConnectionInfos, hasSize(1)); + assertThat(remoteConnectionInfos.get(0).isConnected(), is(true)); + + MalformedGetRequest malformedGetRequest = new MalformedGetRequest(otherIndexId); + malformedGetRequest.assertParsesAsGetRequest(); + final ElasticsearchSecurityException e = expectThrows( + ElasticsearchSecurityException.class, + () -> executeRemote( + remoteClusterService.getRemoteClusterClient("my_remote_cluster", threadPool.generic()), + new RemoteClusterActionType(TransportGetAction.TYPE.name() + "[s]", GetResponse::new), + malformedGetRequest + ) + ); + assertThat(e.getMessage(), containsString("is unauthorized")); + } + + ByteArrayOutputStream outBuffer = new ByteArrayOutputStream(); + OutputStreamStreamOutput out = new OutputStreamStreamOutput(outBuffer); + } + + private static CrossClusterAccessSubjectInfo buildCrossClusterAccessSubjectInfo(String... indices) throws IOException { + return new CrossClusterAccessSubjectInfo( + Authentication.newRealmAuthentication(new User("query-user", "role"), new Authentication.RealmRef("file", "file", "node")), + new RoleDescriptorsIntersection( + new RoleDescriptor( + "cross_cluster", + null, + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices(indices).privileges("read", "read_cross_cluster").build() }, + null + ) + ) + ); + } + private static MockTransportService startTransport(final String nodeName, final ThreadPool threadPool, String encodedApiKey) { return startTransport(nodeName, threadPool, encodedApiKey, Map.of()); } @@ -512,4 +598,52 @@ private static MockTransportService startTransport( } return service; } + + private static class MalformedGetRequest extends ActionRequest { + private final String otherIndexId; + + MalformedGetRequest(String otherIndexId) { + this.otherIndexId = otherIndexId; + } + + @Override + public ActionRequestValidationException validate() { + return null; // this space intentionally left blank + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + // This is a manually-written malformed get request, since it's intentionally difficult to form this kind of + // request with production code. + TaskId.EMPTY_TASK_ID.writeTo(out); + out.writeOptionalWriteable(new ShardId("idx-b", otherIndexId, 0)); // InternalShardId + out.writeOptionalString("idx-a"); // index name + out.writeString("1"); // doc id + out.writeOptionalString(null); // routing + out.writeOptionalString(null); // preference + + out.writeBoolean(true); // refresh + out.writeOptionalStringArray(null); // stored fields + out.writeBoolean(true); // realtime + out.writeByte(VersionType.INTERNAL.getValue()); // version type + out.writeLong(Versions.MATCH_ANY); // version + out.writeOptionalWriteable(null); // fetch source context + out.writeBoolean(false); // force synthetic source + } + + /** + * Checks that this fake request can actually be parsed as a get request. If this assertion fails, + * check that the above writeTo method matches GetRequest's streaming methods. + */ + public void assertParsesAsGetRequest() throws Exception { + ByteArrayOutputStream outBuffer = new ByteArrayOutputStream(); + OutputStreamStreamOutput out = new OutputStreamStreamOutput(outBuffer); + this.writeTo(out); + InputStreamStreamInput inputStreamStreamInput = new InputStreamStreamInput(new ByteArrayInputStream(outBuffer.toByteArray())); + GetRequest parsedRequest = new GetRequest(inputStreamStreamInput); + assertEquals("idx-a", parsedRequest.index()); + assertEquals("1", parsedRequest.id()); + assertEquals("idx-b", parsedRequest.shards().get(0).getIndexName()); + } + } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/RBACEngine.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/RBACEngine.java index 05608187fcc68..4e2df77a15f9b 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/RBACEngine.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/RBACEngine.java @@ -39,6 +39,7 @@ import org.elasticsearch.common.util.CachedSupplier; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.transport.TransportActionProxy; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.xpack.core.async.TransportDeleteAsyncResultAction; @@ -399,7 +400,19 @@ public void authorizeIndexAction( .stream() .allMatch(IndicesAliasesRequest.AliasActions::expandAliasesWildcards)) : "expanded wildcards for local indices OR the request should not expand wildcards at all"; - delegateListener.onResponse(buildIndicesAccessControl(action, role, resolvedIndices, aliasOrIndexLookup)); + + IndexAuthorizationResult result = buildIndicesAccessControl(action, role, resolvedIndices, aliasOrIndexLookup); + if (requestInfo.getAuthentication().isCrossClusterAccess() + && request instanceof IndicesRequest.RemoteClusterShardRequest shardsRequest + && shardsRequest.shards() != null) { + for (ShardId shardId : shardsRequest.shards()) { + if (shardId != null && shardIdAuthorized(shardsRequest, shardId, result.getIndicesAccessControl()) == false) { + listener.onResponse(IndexAuthorizationResult.DENIED); + return; + } + } + } + delegateListener.onResponse(result); } })); } else { @@ -407,6 +420,24 @@ public void authorizeIndexAction( } } + private static boolean shardIdAuthorized(IndicesRequest request, ShardId shardId, IndicesAccessControl accessControl) { + var shardIdAccessPermissions = accessControl.getIndexPermissions(shardId.getIndexName()); + if (shardIdAccessPermissions != null) { + return true; + } + + logger.warn( + Strings.format( + "bad request of type [%s], request's stated indices %s are authorized but specified internal shard " + + "ID %s is not authorized", + request.getClass().getCanonicalName(), + request.indices(), + shardId + ) + ); + return false; + } + private static boolean allowsRemoteIndices(TransportRequest transportRequest) { // TODO this may need to change. See https://github.com/elastic/elasticsearch/issues/105598 if (transportRequest instanceof IndicesRequest.SingleIndexNoWildcards single) { From 925a9a373e5231bf4a3180bbb9435e33f44426c4 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Thu, 14 Mar 2024 12:49:59 -0700 Subject: [PATCH 221/248] Fix byte array iterator (#106372) If ByteArray has been resized, the byte iterator can access null pages. When resizing a big array, we overly allocate the pages array and assign null to the extra pages. Relates #106270 --- .../org/elasticsearch/common/util/BigByteArray.java | 9 ++++++--- .../elasticsearch/common/util/BigArraysTests.java | 13 +++++++++++++ 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/common/util/BigByteArray.java b/server/src/main/java/org/elasticsearch/common/util/BigByteArray.java index 0dce5ca21ffb5..379c714b2d355 100644 --- a/server/src/main/java/org/elasticsearch/common/util/BigByteArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/BigByteArray.java @@ -146,14 +146,17 @@ public byte[] array() { public BytesRefIterator iterator() { return new BytesRefIterator() { int i = 0; + long remained = size; @Override public BytesRef next() { - if (i >= pages.length) { + if (remained == 0) { return null; } - int len = i == pages.length - 1 ? Math.toIntExact(size - (pages.length - 1L) * PAGE_SIZE_IN_BYTES) : PAGE_SIZE_IN_BYTES; - return new BytesRef(pages[i++], 0, len); + byte[] page = pages[i++]; + int len = Math.toIntExact(Math.min(page.length, remained)); + remained -= len; + return new BytesRef(page, 0, len); } }; } diff --git a/server/src/test/java/org/elasticsearch/common/util/BigArraysTests.java b/server/src/test/java/org/elasticsearch/common/util/BigArraysTests.java index 7b759975cfaaa..1fc2ed2a72ec7 100644 --- a/server/src/test/java/org/elasticsearch/common/util/BigArraysTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/BigArraysTests.java @@ -371,6 +371,19 @@ public void testByteIterator() throws Exception { } } assertThat(offset, equalTo(bytes.length)); + int newLen = randomIntBetween(bytes.length, bytes.length + 100_000); + array = bigArrays.resize(array, newLen); + it = array.iterator(); + offset = 0; + while ((ref = it.next()) != null) { + for (int i = 0; i < ref.length; i++) { + if (offset < bytes.length) { + assertEquals(bytes[offset], ref.bytes[ref.offset + i]); + } + offset++; + } + } + assertThat(offset, equalTo(newLen)); array.close(); } From b6f876f32a270e7240117bdc6cd46d01c563f549 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Thu, 14 Mar 2024 12:50:53 -0700 Subject: [PATCH 222/248] Serialize big array blocks (#106373) Similar to ArrayBlocks, this change serializes the underlying structure of BigArrayBlocks. --- docs/changelog/106373.yaml | 5 ++++ .../org/elasticsearch/TransportVersions.java | 1 + .../compute/data/BooleanBigArrayBlock.java | 25 +++++++++++++++++++ .../compute/data/BooleanBlock.java | 4 +++ .../compute/data/DoubleBigArrayBlock.java | 25 +++++++++++++++++++ .../compute/data/DoubleBlock.java | 4 +++ .../compute/data/IntBigArrayBlock.java | 25 +++++++++++++++++++ .../elasticsearch/compute/data/IntBlock.java | 4 +++ .../compute/data/LongBigArrayBlock.java | 25 +++++++++++++++++++ .../elasticsearch/compute/data/LongBlock.java | 4 +++ .../org/elasticsearch/compute/data/Block.java | 1 + .../compute/data/X-BigArrayBlock.java.st | 25 +++++++++++++++++++ .../compute/data/X-Block.java.st | 8 ++++++ 13 files changed, 156 insertions(+) create mode 100644 docs/changelog/106373.yaml diff --git a/docs/changelog/106373.yaml b/docs/changelog/106373.yaml new file mode 100644 index 0000000000000..e838c7b1a660d --- /dev/null +++ b/docs/changelog/106373.yaml @@ -0,0 +1,5 @@ +pr: 106373 +summary: Serialize big array blocks +area: ES|QL +type: enhancement +issues: [] diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 87120e6a2f8d3..5c09a5464171d 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -147,6 +147,7 @@ static TransportVersion def(int id) { public static final TransportVersion ESQL_EXTENDED_ENRICH_INPUT_TYPE = def(8_607_00_0); public static final TransportVersion ESQL_SERIALIZE_BIG_VECTOR = def(8_608_00_0); public static final TransportVersion AGGS_EXCLUDED_DELETED_DOCS = def(8_609_00_0); + public static final TransportVersion ESQL_SERIALIZE_BIG_ARRAY = def(8_610_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBigArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBigArrayBlock.java index 890e6b6a59acd..51418445713b0 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBigArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBigArrayBlock.java @@ -8,9 +8,11 @@ package org.elasticsearch.compute.data; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.BitArray; import org.elasticsearch.core.Releasables; +import java.io.IOException; import java.util.BitSet; /** @@ -54,6 +56,29 @@ private BooleanBigArrayBlock( : firstValueIndexes[getPositionCount()] == vector.getPositionCount(); } + static BooleanBigArrayBlock readArrayBlock(BlockFactory blockFactory, BlockStreamInput in) throws IOException { + final SubFields sub = new SubFields(blockFactory, in); + BooleanBigArrayVector vector = null; + boolean success = false; + try { + vector = BooleanBigArrayVector.readArrayVector(sub.vectorPositions(), in, blockFactory); + var block = new BooleanBigArrayBlock(vector, sub.positionCount, sub.firstValueIndexes, sub.nullsMask, sub.mvOrdering); + blockFactory.adjustBreaker(block.ramBytesUsed() - vector.ramBytesUsed() - sub.bytesReserved); + success = true; + return block; + } finally { + if (success == false) { + Releasables.close(vector); + blockFactory.adjustBreaker(-sub.bytesReserved); + } + } + } + + void writeArrayBlock(StreamOutput out) throws IOException { + writeSubFields(out); + vector.writeArrayVector(vector.getPositionCount(), out); + } + @Override public BooleanVector asVector() { return null; diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBlock.java index 1dd231c129a2d..ecc2d03105998 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBlock.java @@ -55,6 +55,7 @@ private static BooleanBlock readFrom(BlockStreamInput in) throws IOException { case SERIALIZE_BLOCK_VALUES -> BooleanBlock.readValues(in); case SERIALIZE_BLOCK_VECTOR -> BooleanVector.readFrom(in.blockFactory(), in).asBlock(); case SERIALIZE_BLOCK_ARRAY -> BooleanArrayBlock.readArrayBlock(in.blockFactory(), in); + case SERIALIZE_BLOCK_BIG_ARRAY -> BooleanBigArrayBlock.readArrayBlock(in.blockFactory(), in); default -> { assert false : "invalid block serialization type " + serializationType; throw new IllegalStateException("invalid serialization type " + serializationType); @@ -91,6 +92,9 @@ default void writeTo(StreamOutput out) throws IOException { } else if (version.onOrAfter(TransportVersions.ESQL_SERIALIZE_ARRAY_BLOCK) && this instanceof BooleanArrayBlock b) { out.writeByte(SERIALIZE_BLOCK_ARRAY); b.writeArrayBlock(out); + } else if (version.onOrAfter(TransportVersions.ESQL_SERIALIZE_BIG_ARRAY) && this instanceof BooleanBigArrayBlock b) { + out.writeByte(SERIALIZE_BLOCK_BIG_ARRAY); + b.writeArrayBlock(out); } else { out.writeByte(SERIALIZE_BLOCK_VALUES); BooleanBlock.writeValues(this, out); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBigArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBigArrayBlock.java index 702499513a0c3..5698f40b530b7 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBigArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBigArrayBlock.java @@ -8,9 +8,11 @@ package org.elasticsearch.compute.data; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.DoubleArray; import org.elasticsearch.core.Releasables; +import java.io.IOException; import java.util.BitSet; /** @@ -54,6 +56,29 @@ private DoubleBigArrayBlock( : firstValueIndexes[getPositionCount()] == vector.getPositionCount(); } + static DoubleBigArrayBlock readArrayBlock(BlockFactory blockFactory, BlockStreamInput in) throws IOException { + final SubFields sub = new SubFields(blockFactory, in); + DoubleBigArrayVector vector = null; + boolean success = false; + try { + vector = DoubleBigArrayVector.readArrayVector(sub.vectorPositions(), in, blockFactory); + var block = new DoubleBigArrayBlock(vector, sub.positionCount, sub.firstValueIndexes, sub.nullsMask, sub.mvOrdering); + blockFactory.adjustBreaker(block.ramBytesUsed() - vector.ramBytesUsed() - sub.bytesReserved); + success = true; + return block; + } finally { + if (success == false) { + Releasables.close(vector); + blockFactory.adjustBreaker(-sub.bytesReserved); + } + } + } + + void writeArrayBlock(StreamOutput out) throws IOException { + writeSubFields(out); + vector.writeArrayVector(vector.getPositionCount(), out); + } + @Override public DoubleVector asVector() { return null; diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBlock.java index 20be6402ba097..3a539ebd00d27 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBlock.java @@ -55,6 +55,7 @@ private static DoubleBlock readFrom(BlockStreamInput in) throws IOException { case SERIALIZE_BLOCK_VALUES -> DoubleBlock.readValues(in); case SERIALIZE_BLOCK_VECTOR -> DoubleVector.readFrom(in.blockFactory(), in).asBlock(); case SERIALIZE_BLOCK_ARRAY -> DoubleArrayBlock.readArrayBlock(in.blockFactory(), in); + case SERIALIZE_BLOCK_BIG_ARRAY -> DoubleBigArrayBlock.readArrayBlock(in.blockFactory(), in); default -> { assert false : "invalid block serialization type " + serializationType; throw new IllegalStateException("invalid serialization type " + serializationType); @@ -91,6 +92,9 @@ default void writeTo(StreamOutput out) throws IOException { } else if (version.onOrAfter(TransportVersions.ESQL_SERIALIZE_ARRAY_BLOCK) && this instanceof DoubleArrayBlock b) { out.writeByte(SERIALIZE_BLOCK_ARRAY); b.writeArrayBlock(out); + } else if (version.onOrAfter(TransportVersions.ESQL_SERIALIZE_BIG_ARRAY) && this instanceof DoubleBigArrayBlock b) { + out.writeByte(SERIALIZE_BLOCK_BIG_ARRAY); + b.writeArrayBlock(out); } else { out.writeByte(SERIALIZE_BLOCK_VALUES); DoubleBlock.writeValues(this, out); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBigArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBigArrayBlock.java index 5e29dace7449c..66c0b15415418 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBigArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBigArrayBlock.java @@ -8,9 +8,11 @@ package org.elasticsearch.compute.data; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.IntArray; import org.elasticsearch.core.Releasables; +import java.io.IOException; import java.util.BitSet; /** @@ -54,6 +56,29 @@ private IntBigArrayBlock( : firstValueIndexes[getPositionCount()] == vector.getPositionCount(); } + static IntBigArrayBlock readArrayBlock(BlockFactory blockFactory, BlockStreamInput in) throws IOException { + final SubFields sub = new SubFields(blockFactory, in); + IntBigArrayVector vector = null; + boolean success = false; + try { + vector = IntBigArrayVector.readArrayVector(sub.vectorPositions(), in, blockFactory); + var block = new IntBigArrayBlock(vector, sub.positionCount, sub.firstValueIndexes, sub.nullsMask, sub.mvOrdering); + blockFactory.adjustBreaker(block.ramBytesUsed() - vector.ramBytesUsed() - sub.bytesReserved); + success = true; + return block; + } finally { + if (success == false) { + Releasables.close(vector); + blockFactory.adjustBreaker(-sub.bytesReserved); + } + } + } + + void writeArrayBlock(StreamOutput out) throws IOException { + writeSubFields(out); + vector.writeArrayVector(vector.getPositionCount(), out); + } + @Override public IntVector asVector() { return null; diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBlock.java index 296d9378323a5..81c4dffa50ded 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBlock.java @@ -55,6 +55,7 @@ private static IntBlock readFrom(BlockStreamInput in) throws IOException { case SERIALIZE_BLOCK_VALUES -> IntBlock.readValues(in); case SERIALIZE_BLOCK_VECTOR -> IntVector.readFrom(in.blockFactory(), in).asBlock(); case SERIALIZE_BLOCK_ARRAY -> IntArrayBlock.readArrayBlock(in.blockFactory(), in); + case SERIALIZE_BLOCK_BIG_ARRAY -> IntBigArrayBlock.readArrayBlock(in.blockFactory(), in); default -> { assert false : "invalid block serialization type " + serializationType; throw new IllegalStateException("invalid serialization type " + serializationType); @@ -91,6 +92,9 @@ default void writeTo(StreamOutput out) throws IOException { } else if (version.onOrAfter(TransportVersions.ESQL_SERIALIZE_ARRAY_BLOCK) && this instanceof IntArrayBlock b) { out.writeByte(SERIALIZE_BLOCK_ARRAY); b.writeArrayBlock(out); + } else if (version.onOrAfter(TransportVersions.ESQL_SERIALIZE_BIG_ARRAY) && this instanceof IntBigArrayBlock b) { + out.writeByte(SERIALIZE_BLOCK_BIG_ARRAY); + b.writeArrayBlock(out); } else { out.writeByte(SERIALIZE_BLOCK_VALUES); IntBlock.writeValues(this, out); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBigArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBigArrayBlock.java index f4b1f16566d24..e3b17cc7be5d4 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBigArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBigArrayBlock.java @@ -8,9 +8,11 @@ package org.elasticsearch.compute.data; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.LongArray; import org.elasticsearch.core.Releasables; +import java.io.IOException; import java.util.BitSet; /** @@ -54,6 +56,29 @@ private LongBigArrayBlock( : firstValueIndexes[getPositionCount()] == vector.getPositionCount(); } + static LongBigArrayBlock readArrayBlock(BlockFactory blockFactory, BlockStreamInput in) throws IOException { + final SubFields sub = new SubFields(blockFactory, in); + LongBigArrayVector vector = null; + boolean success = false; + try { + vector = LongBigArrayVector.readArrayVector(sub.vectorPositions(), in, blockFactory); + var block = new LongBigArrayBlock(vector, sub.positionCount, sub.firstValueIndexes, sub.nullsMask, sub.mvOrdering); + blockFactory.adjustBreaker(block.ramBytesUsed() - vector.ramBytesUsed() - sub.bytesReserved); + success = true; + return block; + } finally { + if (success == false) { + Releasables.close(vector); + blockFactory.adjustBreaker(-sub.bytesReserved); + } + } + } + + void writeArrayBlock(StreamOutput out) throws IOException { + writeSubFields(out); + vector.writeArrayVector(vector.getPositionCount(), out); + } + @Override public LongVector asVector() { return null; diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBlock.java index 820600bda87f9..1504f6f7d9100 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBlock.java @@ -55,6 +55,7 @@ private static LongBlock readFrom(BlockStreamInput in) throws IOException { case SERIALIZE_BLOCK_VALUES -> LongBlock.readValues(in); case SERIALIZE_BLOCK_VECTOR -> LongVector.readFrom(in.blockFactory(), in).asBlock(); case SERIALIZE_BLOCK_ARRAY -> LongArrayBlock.readArrayBlock(in.blockFactory(), in); + case SERIALIZE_BLOCK_BIG_ARRAY -> LongBigArrayBlock.readArrayBlock(in.blockFactory(), in); default -> { assert false : "invalid block serialization type " + serializationType; throw new IllegalStateException("invalid serialization type " + serializationType); @@ -91,6 +92,9 @@ default void writeTo(StreamOutput out) throws IOException { } else if (version.onOrAfter(TransportVersions.ESQL_SERIALIZE_ARRAY_BLOCK) && this instanceof LongArrayBlock b) { out.writeByte(SERIALIZE_BLOCK_ARRAY); b.writeArrayBlock(out); + } else if (version.onOrAfter(TransportVersions.ESQL_SERIALIZE_BIG_ARRAY) && this instanceof LongBigArrayBlock b) { + out.writeByte(SERIALIZE_BLOCK_BIG_ARRAY); + b.writeArrayBlock(out); } else { out.writeByte(SERIALIZE_BLOCK_VALUES); LongBlock.writeValues(this, out); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Block.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Block.java index b14a27fa01930..7c04ef57f9e2e 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Block.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Block.java @@ -246,4 +246,5 @@ static List getNamedWriteables() { byte SERIALIZE_BLOCK_VALUES = 0; byte SERIALIZE_BLOCK_VECTOR = 1; byte SERIALIZE_BLOCK_ARRAY = 2; + byte SERIALIZE_BLOCK_BIG_ARRAY = 3; } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BigArrayBlock.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BigArrayBlock.java.st index 14ec5382f282c..53f0bb09640c5 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BigArrayBlock.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BigArrayBlock.java.st @@ -8,9 +8,11 @@ package org.elasticsearch.compute.data; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.$Array$; import org.elasticsearch.core.Releasables; +import java.io.IOException; import java.util.BitSet; /** @@ -54,6 +56,29 @@ public final class $Type$BigArrayBlock extends AbstractArrayBlock implements $Ty : firstValueIndexes[getPositionCount()] == vector.getPositionCount(); } + static $Type$BigArrayBlock readArrayBlock(BlockFactory blockFactory, BlockStreamInput in) throws IOException { + final SubFields sub = new SubFields(blockFactory, in); + $Type$BigArrayVector vector = null; + boolean success = false; + try { + vector = $Type$BigArrayVector.readArrayVector(sub.vectorPositions(), in, blockFactory); + var block = new $Type$BigArrayBlock(vector, sub.positionCount, sub.firstValueIndexes, sub.nullsMask, sub.mvOrdering); + blockFactory.adjustBreaker(block.ramBytesUsed() - vector.ramBytesUsed() - sub.bytesReserved); + success = true; + return block; + } finally { + if (success == false) { + Releasables.close(vector); + blockFactory.adjustBreaker(-sub.bytesReserved); + } + } + } + + void writeArrayBlock(StreamOutput out) throws IOException { + writeSubFields(out); + vector.writeArrayVector(vector.getPositionCount(), out); + } + @Override public $Type$Vector asVector() { return null; diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Block.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Block.java.st index 06aed6f7b0fad..04f91f166b9d2 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Block.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Block.java.st @@ -70,6 +70,9 @@ $endif$ case SERIALIZE_BLOCK_VALUES -> $Type$Block.readValues(in); case SERIALIZE_BLOCK_VECTOR -> $Type$Vector.readFrom(in.blockFactory(), in).asBlock(); case SERIALIZE_BLOCK_ARRAY -> $Type$ArrayBlock.readArrayBlock(in.blockFactory(), in); +$if(BytesRef)$$else$ + case SERIALIZE_BLOCK_BIG_ARRAY -> $Type$BigArrayBlock.readArrayBlock(in.blockFactory(), in); +$endif$ default -> { assert false : "invalid block serialization type " + serializationType; throw new IllegalStateException("invalid serialization type " + serializationType); @@ -106,6 +109,11 @@ $endif$ } else if (version.onOrAfter(TransportVersions.ESQL_SERIALIZE_ARRAY_BLOCK) && this instanceof $Type$ArrayBlock b) { out.writeByte(SERIALIZE_BLOCK_ARRAY); b.writeArrayBlock(out); +$if(BytesRef)$$else$ + } else if (version.onOrAfter(TransportVersions.ESQL_SERIALIZE_BIG_ARRAY) && this instanceof $Type$BigArrayBlock b) { + out.writeByte(SERIALIZE_BLOCK_BIG_ARRAY); + b.writeArrayBlock(out); +$endif$ } else { out.writeByte(SERIALIZE_BLOCK_VALUES); $Type$Block.writeValues(this, out); From b418c9422c73596dbf2c8f2dfe6f478f1b86220a Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Thu, 14 Mar 2024 16:36:03 -0500 Subject: [PATCH 223/248] Updating the tika version to 2.9.1 in the ingest attachment plugin (#106315) --- docs/changelog/106315.yaml | 5 ++ gradle/verification-metadata.xml | 100 +++++++++++++++++++++++++ modules/ingest-attachment/build.gradle | 30 +++++--- 3 files changed, 125 insertions(+), 10 deletions(-) create mode 100644 docs/changelog/106315.yaml diff --git a/docs/changelog/106315.yaml b/docs/changelog/106315.yaml new file mode 100644 index 0000000000000..57c41c8024d20 --- /dev/null +++ b/docs/changelog/106315.yaml @@ -0,0 +1,5 @@ +pr: 106315 +summary: Updating the tika version to 2.9.1 in the ingest attachment plugin +area: Ingest Node +type: upgrade +issues: [] diff --git a/gradle/verification-metadata.xml b/gradle/verification-metadata.xml index a9d24d4d50b17..8978274e6df95 100644 --- a/gradle/verification-metadata.xml +++ b/gradle/verification-metadata.xml @@ -1151,6 +1151,11 @@ + + + + + @@ -1845,6 +1850,11 @@ + + + + + @@ -2433,11 +2443,21 @@ + + + + + + + + + + @@ -2863,6 +2883,11 @@ + + + + + @@ -2873,6 +2898,11 @@ + + + + + @@ -2923,56 +2953,111 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -4101,6 +4186,11 @@ + + + + + @@ -4136,6 +4226,11 @@ + + + + + @@ -4151,6 +4246,11 @@ + + + + + diff --git a/modules/ingest-attachment/build.gradle b/modules/ingest-attachment/build.gradle index b10f3ac31958a..2c15ea076e11a 100644 --- a/modules/ingest-attachment/build.gradle +++ b/modules/ingest-attachment/build.gradle @@ -18,19 +18,29 @@ esplugin { // this overwrites the 'versions' map from Elasticsearch itself, but indeed we want that -- we're interested in managing our dependencies // as we (and tika) demand, and are not interested in, say, having the same version of commons-codec as elasticsearch itself def versions = [ - 'tika' : '2.7.0', - 'pdfbox': '2.0.27', + 'tika' : '2.9.1', + 'pdfbox': '2.0.29', 'poi' : '5.2.3', - 'mime4j': '0.8.9' + 'mime4j': '0.8.10', + 'commonsCodec': '1.16.0' ] // exclude commons-logging from test dependencies to avoid jar-hell, we use jcl-over-slf4j here -configurations.testRuntimeClasspath { exclude module: 'commons-logging' } +configurations.testRuntimeClasspath { + exclude module: 'commons-logging' + // The version used by POI potentially conflicts with the one pulled in by :test:framework: + resolutionStrategy.force "commons-codec:commons-codec:${versions.commonsCodec}" +} + +configurations.testCompileClasspath { + // The version used by POI potentially conflicts with the one pulled in by :test:framework: + resolutionStrategy.force "commons-codec:commons-codec:${versions.commonsCodec}" +} dependencies { // take over logging for all dependencies - api "org.slf4j:slf4j-api:2.0.6" - api "org.slf4j:jcl-over-slf4j:2.0.6" + api "org.slf4j:slf4j-api:2.0.9" + api "org.slf4j:jcl-over-slf4j:2.0.9" // route slf4j over log4j // TODO blocked on https://github.com/elastic/elasticsearch/issues/93714 @@ -38,7 +48,7 @@ dependencies { // nop all slf4j logging // workaround for https://github.com/elastic/elasticsearch/issues/93714 - api "org.slf4j:slf4j-nop:2.0.6" + api "org.slf4j:slf4j-nop:2.0.9" // mandatory for tika api "org.apache.tika:tika-core:${versions.tika}" @@ -54,7 +64,7 @@ dependencies { api "org.apache.tika:tika-parser-xmp-commons:${versions.tika}" api "org.apache.tika:tika-parser-zip-commons:${versions.tika}" api 'org.tukaani:xz:1.9' - api 'commons-io:commons-io:2.11.0' + api 'commons-io:commons-io:2.14.0' // character set detection api 'com.googlecode.juniversalchardet:juniversalchardet:1.0.3' @@ -70,7 +80,7 @@ dependencies { api "org.apache.poi:poi-ooxml:${versions.poi}" api "org.apache.poi:poi-ooxml-lite:${versions.poi}" api "org.apache.poi:poi:${versions.poi}" - api "commons-codec:commons-codec:1.15" + api "commons-codec:commons-codec:${versions.commonsCodec}" api 'org.apache.xmlbeans:xmlbeans:5.1.1' api 'org.apache.commons:commons-collections4:4.4' // MS Office @@ -81,7 +91,7 @@ dependencies { api "org.apache.james:apache-mime4j-core:${versions.mime4j}" api "org.apache.james:apache-mime4j-dom:${versions.mime4j}" // EPUB books - api "org.apache.commons:commons-lang3:3.12.0" + api "org.apache.commons:commons-lang3:3.13.0" // Microsoft Word files with visio diagrams api 'org.apache.commons:commons-math3:3.6.1' // POIs dependency From 002ed8d49d4287fd0d54d8ce3e9521d4553ff890 Mon Sep 17 00:00:00 2001 From: Craig Taverner Date: Thu, 14 Mar 2024 23:16:53 +0100 Subject: [PATCH 224/248] ESQL: Fix error on sorting unsortable geo_point and cartesian_point (#106351) * Fix error on sorting unsortable geo_point and cartesian_point Without a LIMIT the correct error worked, but with LIMIT it did not. This fix mimics the same error with LIMIT and adds tests for all three scenarios: * Without limit * With Limit * From row with limit * Update docs/changelog/106351.yaml * Add tests for geo_shape and cartesian_shape also * Updated changelog * Separate point and shape error messages * Move error to later so we get it only if geo field is actually used in sort. * Implemented planner check in Verifier instead This is a much better solution. * Revert previous solution * Also check non-field attributes so the same error is provided for ROW * Changed "can't" to "cannot" * Add unit tests for verifier error * Added sort limitations to documentation * Added unit tests for spatial fields in VerifierTests * Don't run the new yaml tests on older versions These tests mostly test the validation errors which were changed only in 8.14.0, so should not be tested in earlier versions. * Simplify check based on code review, skip duplicate forEachDown --- docs/changelog/106351.yaml | 6 + docs/reference/esql/esql-limitations.asciidoc | 12 + .../xpack/esql/analysis/Verifier.java | 17 ++ .../xpack/esql/analysis/VerifierTests.java | 23 ++ .../rest-api-spec/test/esql/130_spatial.yml | 234 ++++++++++++++++++ 5 files changed, 292 insertions(+) create mode 100644 docs/changelog/106351.yaml create mode 100644 x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/130_spatial.yml diff --git a/docs/changelog/106351.yaml b/docs/changelog/106351.yaml new file mode 100644 index 0000000000000..45868acc3a284 --- /dev/null +++ b/docs/changelog/106351.yaml @@ -0,0 +1,6 @@ +pr: 106351 +summary: "Fix error on sorting unsortable `geo_point` and `cartesian_point`" +area: ES|QL +type: bug +issues: + - 106007 diff --git a/docs/reference/esql/esql-limitations.asciidoc b/docs/reference/esql/esql-limitations.asciidoc index d007a7c3cfd06..11e3fd7ae9883 100644 --- a/docs/reference/esql/esql-limitations.asciidoc +++ b/docs/reference/esql/esql-limitations.asciidoc @@ -73,6 +73,18 @@ unsupported type is not explicitly used in a query, it is returned with `null` values, with the exception of nested fields. Nested fields are not returned at all. +[discrete] +==== Limitations on supported types + +Some <> are not supported in all contexts: + +* Spatial types are not supported in the <> processing command. + Specifying a column of one of these types as a sort parameter will result in an error: +** `geo_point` +** `geo_shape` +** `cartesian_point` +** `cartesian_shape` + [discrete] [[esql-_source-availability]] === _source availability diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java index dd4efdc0694c1..6492743c8548b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java @@ -21,6 +21,7 @@ import org.elasticsearch.xpack.ql.capabilities.Unresolvable; import org.elasticsearch.xpack.ql.common.Failure; import org.elasticsearch.xpack.ql.expression.Alias; +import org.elasticsearch.xpack.ql.expression.Attribute; import org.elasticsearch.xpack.ql.expression.AttributeMap; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.NamedExpression; @@ -32,6 +33,7 @@ import org.elasticsearch.xpack.ql.plan.logical.Aggregate; import org.elasticsearch.xpack.ql.plan.logical.Limit; import org.elasticsearch.xpack.ql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.ql.plan.logical.OrderBy; import org.elasticsearch.xpack.ql.plan.logical.Project; import org.elasticsearch.xpack.ql.plan.logical.UnaryPlan; import org.elasticsearch.xpack.ql.type.DataType; @@ -139,6 +141,7 @@ else if (p.resolved()) { checkOperationsOnUnsignedLong(p, failures); checkBinaryComparison(p, failures); + checkForSortOnSpatialTypes(p, failures); }); checkRemoteEnrich(plan, failures); @@ -381,6 +384,20 @@ private static Failure validateUnsignedLongNegation(Neg neg) { return null; } + /** + * Makes sure that spatial types do not appear in sorting contexts. + */ + private static void checkForSortOnSpatialTypes(LogicalPlan p, Set localFailures) { + if (p instanceof OrderBy ob) { + ob.forEachExpression(Attribute.class, attr -> { + DataType dataType = attr.dataType(); + if (EsqlDataTypes.isSpatial(dataType)) { + localFailures.add(fail(attr, "cannot sort on " + dataType.typeName())); + } + }); + } + } + /** * Ensure that no remote enrich is allowed after a reduction or an enrich with coordinator mode. *

        diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java index 0e075af55fd8f..866a4c458c424 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java @@ -18,6 +18,7 @@ import java.util.List; import static org.elasticsearch.xpack.esql.EsqlTestUtils.withDefaultLimitWarning; +import static org.elasticsearch.xpack.esql.analysis.AnalyzerTestUtils.loadMapping; import static org.elasticsearch.xpack.ql.type.DataTypes.UNSIGNED_LONG; import static org.hamcrest.Matchers.containsString; @@ -355,6 +356,28 @@ public void testUnfinishedAggFunction() { assertEquals("1:23: invalid stats declaration; [avg] is not an aggregate function", error("from test | stats c = avg")); } + public void testSpatialSort() { + String prefix = "ROW wkt = [\"POINT(42.9711 -14.7553)\", \"POINT(75.8093 22.7277)\"] | MV_EXPAND wkt "; + assertEquals("1:130: cannot sort on geo_point", error(prefix + "| EVAL shape = TO_GEOPOINT(wkt) | limit 5 | sort shape")); + assertEquals( + "1:136: cannot sort on cartesian_point", + error(prefix + "| EVAL shape = TO_CARTESIANPOINT(wkt) | limit 5 | sort shape") + ); + assertEquals("1:130: cannot sort on geo_shape", error(prefix + "| EVAL shape = TO_GEOSHAPE(wkt) | limit 5 | sort shape")); + assertEquals( + "1:136: cannot sort on cartesian_shape", + error(prefix + "| EVAL shape = TO_CARTESIANSHAPE(wkt) | limit 5 | sort shape") + ); + var airports = AnalyzerTestUtils.analyzer(loadMapping("mapping-airports.json", "airports")); + var airportsWeb = AnalyzerTestUtils.analyzer(loadMapping("mapping-airports_web.json", "airports_web")); + var countriesBbox = AnalyzerTestUtils.analyzer(loadMapping("mapping-countries_bbox.json", "countries_bbox")); + var countriesBboxWeb = AnalyzerTestUtils.analyzer(loadMapping("mapping-countries_bbox_web.json", "countries_bbox_web")); + assertEquals("1:32: cannot sort on geo_point", error("FROM airports | LIMIT 5 | sort location", airports)); + assertEquals("1:36: cannot sort on cartesian_point", error("FROM airports_web | LIMIT 5 | sort location", airportsWeb)); + assertEquals("1:38: cannot sort on geo_shape", error("FROM countries_bbox | LIMIT 5 | sort shape", countriesBbox)); + assertEquals("1:42: cannot sort on cartesian_shape", error("FROM countries_bbox_web | LIMIT 5 | sort shape", countriesBboxWeb)); + } + private String error(String query) { return error(query, defaultAnalyzer); } diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/130_spatial.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/130_spatial.yml new file mode 100644 index 0000000000000..9368df6c81604 --- /dev/null +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/130_spatial.yml @@ -0,0 +1,234 @@ +--- +setup: + - skip: + version: " - 8.13.99" + reason: "Mixed cluster tests don't work with the changed error message from sort" + features: allowed_warnings_regex + + - do: + indices.create: + index: geo_points + body: + mappings: + properties: + location: + type: geo_point + + - do: + bulk: + index: geo_points + refresh: true + body: + - { "index": { } } + - { "location": "POINT(1 -1)" } + - { "index": { } } + - { "location": "POINT(-1 1)" } + + - do: + indices.create: + index: cartesian_points + body: + mappings: + properties: + location: + type: point + + - do: + bulk: + index: cartesian_points + refresh: true + body: + - { "index": { } } + - { "location": "POINT(4321 -1234)" } + - { "index": { } } + - { "location": "POINT(-4321 1234)" } + + - do: + indices.create: + index: geo_shapes + body: + mappings: + properties: + shape: + type: geo_shape + + - do: + bulk: + index: geo_shapes + refresh: true + body: + - { "index": { } } + - { "shape": "POINT(0 0)" } + - { "index": { } } + - { "shape": "POLYGON((-1 -1, 1 -1, 1 1, -1 1, -1 -1))" } + + - do: + indices.create: + index: cartesian_shapes + body: + mappings: + properties: + shape: + type: shape + + - do: + bulk: + index: cartesian_shapes + refresh: true + body: + - { "index": { } } + - { "shape": "POINT(0 0)" } + - { "index": { } } + - { "shape": "POLYGON((-1 -1, 1 -1, 1 1, -1 1, -1 -1))" } + +--- +geo_point: + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'from geo_points' + - match: { columns.0.name: location } + - match: { columns.0.type: geo_point } + - length: { values: 2 } + - match: { values.0.0: "POINT (1.0 -1.0)" } + - match: { values.1.0: "POINT (-1.0 1.0)" } + +--- +geo_point unsortable: + - do: + catch: /cannot sort on geo_point/ + esql.query: + body: + query: 'from geo_points | sort location' + +--- +geo_point unsortable with limit: + - do: + catch: /cannot sort on geo_point/ + esql.query: + body: + query: 'from geo_points | LIMIT 10 | sort location' + +--- +geo_point unsortable with limit from row: + - do: + catch: /cannot sort on geo_point/ + esql.query: + body: + query: 'ROW wkt = ["POINT(42.9711 -14.7553)", "POINT(75.8093 22.7277)"] | MV_EXPAND wkt | EVAL pt = TO_GEOPOINT(wkt) | limit 5 | sort pt' + +--- +cartesian_point: + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'from cartesian_points' + - match: { columns.0.name: location } + - match: { columns.0.type: cartesian_point } + - length: { values: 2 } + - match: { values.0.0: "POINT (4321.0 -1234.0)" } + - match: { values.1.0: "POINT (-4321.0 1234.0)" } + +--- +cartesian_point unsortable: + - do: + catch: /cannot sort on cartesian_point/ + esql.query: + body: + query: 'from cartesian_points | sort location' + +--- +cartesian_point unsortable with limit: + - do: + catch: /cannot sort on cartesian_point/ + esql.query: + body: + query: 'from cartesian_points | LIMIT 10 | sort location' + +--- +cartesian_point unsortable with limit from row: + - do: + catch: /cannot sort on cartesian_point/ + esql.query: + body: + query: 'ROW wkt = ["POINT(4297.11 -1475.53)", "POINT(7580.93 2272.77)"] | MV_EXPAND wkt | EVAL pt = TO_CARTESIANPOINT(wkt) | limit 5 | sort pt' + +--- +geo_shape: + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'from geo_shapes' + - match: { columns.0.name: shape } + - match: { columns.0.type: geo_shape } + - length: { values: 2 } + - match: { values.0.0: "POINT (0.0 0.0)" } + - match: { values.1.0: "POLYGON ((-1.0 -1.0, 1.0 -1.0, 1.0 1.0, -1.0 1.0, -1.0 -1.0))" } + +--- +geo_shape unsortable: + - do: + catch: /cannot sort on geo_shape/ + esql.query: + body: + query: 'from geo_shapes | sort shape' + +--- +geo_shape unsortable with limit: + - do: + catch: /cannot sort on geo_shape/ + esql.query: + body: + query: 'from geo_shapes | LIMIT 10 | sort shape' + +--- +geo_shape unsortable with limit from row: + - do: + catch: /cannot sort on geo_shape/ + esql.query: + body: + query: 'ROW wkt = ["POINT(42.9711 -14.7553)", "POINT(75.8093 22.7277)"] | MV_EXPAND wkt | EVAL shape = TO_GEOSHAPE(wkt) | limit 5 | sort shape' + +--- +cartesian_shape: + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'from cartesian_shapes' + - match: { columns.0.name: shape } + - match: { columns.0.type: cartesian_shape } + - length: { values: 2 } + - match: { values.0.0: "POINT (0.0 0.0)" } + - match: { values.1.0: "POLYGON ((-1.0 -1.0, 1.0 -1.0, 1.0 1.0, -1.0 1.0, -1.0 -1.0))" } + +--- +cartesian_shape unsortable: + - do: + catch: /cannot sort on cartesian_shape/ + esql.query: + body: + query: 'from cartesian_shapes | sort shape' + +--- +cartesian_shape unsortable with limit: + - do: + catch: /cannot sort on cartesian_shape/ + esql.query: + body: + query: 'from cartesian_shapes | LIMIT 10 | sort shape' + +--- +cartesian_shape unsortable with limit from row: + - do: + catch: /cannot sort on cartesian_shape/ + esql.query: + body: + query: 'ROW wkt = ["POINT(4297.11 -1475.53)", "POINT(7580.93 2272.77)"] | MV_EXPAND wkt | EVAL shape = TO_CARTESIANSHAPE(wkt) | limit 5 | sort shape' From 387eb38bfd83d23a2c6592d7253a302f9e37fa81 Mon Sep 17 00:00:00 2001 From: Andrei Dan Date: Fri, 15 Mar 2024 13:28:28 +0000 Subject: [PATCH 225/248] Autoshard data streams on rollover (#106076) This enhances our rollover logic to use the data stream autosharding service to retrieve an autosharding recommendation. If the recommendation is an INCREASE_SHARDS or an COOLDOWN_PREVENTED_INCREASE_SHARDS we'll create a rollover condition that'll capture this recommendation, such that rollover will be triggered in ourder to increase the number of shards even if other "regular" conditions are not met (or in the case where cooldown prevented rollover, display the information as to why in the rollover response). All other recommednations are passed to the `MetadataRolloverService` that'll do the needful to ensure the new write index of the data stream receives the correct number of shards. Note that a DECREASE_SHARDS recommendation will reduce the number of shards for a data stream when one of the other "regular" rollover conditions match. It will not trigger a rollover itself, only the INCREASE_SHARDS recommendation will. Some notes on the `NOT_APPLICABLE` recommendation: N/A results are switching back a data stream to the sharding configured in the index template. A data stream can be using auto sharding and later be excluded from the functionality using the `data_streams.auto_sharding.excludes` setting. After a data stream is excluded it needs to start using the number of shards configured in the backing index template. The new autosharding_condition will look like this in the rollover response: ``` "acknowledged": true, "shards_acknowledged": true, "old_index": ".ds-logs-nginx-2024.03.13-000003", "new_index": ".ds-logs-nginx-2024.03.13-000004", "rolled_over": true, "dry_run": false, "lazy": false, "conditions": { "[optimal_shard_count : 3]": true } ``` and like so in the `met_conditions` field, part of rollover info in the cluster state : ``` "rollover_info" : { "logs-nginx" : { "met_conditions" : { "max_docs" : 20000000, "optimal_shard_count" : 3 }, "time" : 1710421491138 } }, ``` --- .../datastreams/DataStreamAutoshardingIT.java | 615 ++++++++++++++++++ .../datastreams/DataStreamFeatures.java | 4 +- .../DataStreamGetWriteIndexTests.java | 2 +- ...etadataDataStreamRolloverServiceTests.java | 11 +- .../org/elasticsearch/TransportVersions.java | 1 + .../admin/indices/rollover/Condition.java | 8 +- .../indices/rollover/LazyRolloverAction.java | 6 +- .../rollover/MetadataRolloverService.java | 71 +- .../rollover/OptimalShardCountCondition.java | 70 ++ .../indices/rollover/RolloverConditions.java | 24 +- .../rollover/TransportRolloverAction.java | 82 ++- .../DataStreamAutoShardingService.java | 22 +- .../node/TransportBroadcastByNodeAction.java | 6 +- .../cluster/metadata/DataStream.java | 16 +- .../common/settings/ClusterSettings.java | 6 + .../elasticsearch/indices/IndicesModule.java | 9 +- .../elasticsearch/node/NodeConstruction.java | 10 + .../indices/rollover/ConditionTests.java | 35 + ...adataRolloverServiceAutoShardingTests.java | 489 ++++++++++++++ .../MetadataRolloverServiceTests.java | 4 + .../OptimalShardCountConditionTests.java | 33 + .../rollover/RolloverConditionsTests.java | 34 + .../TransportRolloverActionTests.java | 18 +- .../DataStreamAutoShardingServiceTests.java | 72 +- .../cluster/metadata/DataStreamTests.java | 68 +- .../cluster/metadata/IndexMetadataTests.java | 5 +- .../metadata/DataStreamTestHelper.java | 26 + .../ReactiveStorageDeciderService.java | 7 +- 28 files changed, 1706 insertions(+), 48 deletions(-) create mode 100644 modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamAutoshardingIT.java create mode 100644 server/src/main/java/org/elasticsearch/action/admin/indices/rollover/OptimalShardCountCondition.java create mode 100644 server/src/test/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverServiceAutoShardingTests.java create mode 100644 server/src/test/java/org/elasticsearch/action/admin/indices/rollover/OptimalShardCountConditionTests.java diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamAutoshardingIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamAutoshardingIT.java new file mode 100644 index 0000000000000..332622cc98db8 --- /dev/null +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamAutoshardingIT.java @@ -0,0 +1,615 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +package org.elasticsearch.datastreams; + +import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; +import org.elasticsearch.action.admin.indices.rollover.Condition; +import org.elasticsearch.action.admin.indices.rollover.MaxDocsCondition; +import org.elasticsearch.action.admin.indices.rollover.OptimalShardCountCondition; +import org.elasticsearch.action.admin.indices.rollover.RolloverConditions; +import org.elasticsearch.action.admin.indices.rollover.RolloverInfo; +import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; +import org.elasticsearch.action.admin.indices.rollover.RolloverResponse; +import org.elasticsearch.action.admin.indices.stats.CommonStats; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction; +import org.elasticsearch.action.admin.indices.stats.ShardStats; +import org.elasticsearch.action.admin.indices.stats.TransportIndicesStatsAction; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; +import org.elasticsearch.action.bulk.BulkItemResponse; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.datastreams.CreateDataStreamAction; +import org.elasticsearch.action.datastreams.autosharding.DataStreamAutoShardingService; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; +import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.Template; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.RecoverySource; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleService; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.mapper.DateFieldMapper; +import org.elasticsearch.index.shard.DocsStats; +import org.elasticsearch.index.shard.IndexingStats; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.shard.ShardPath; +import org.elasticsearch.index.store.StoreStats; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.xcontent.XContentType; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Locale; +import java.util.Map; + +import static org.elasticsearch.action.datastreams.autosharding.DataStreamAutoShardingService.DATA_STREAMS_AUTO_SHARDING_ENABLED; +import static org.elasticsearch.cluster.metadata.MetadataIndexTemplateService.DEFAULT_TIMESTAMP_FIELD; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.startsWith; + +public class DataStreamAutoshardingIT extends ESIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return List.of(DataStreamsPlugin.class, MockTransportService.TestPlugin.class, TestAutoshardingPlugin.class); + } + + @Before + public void configureClusterSettings() { + updateClusterSettings( + Settings.builder() + .putList(DataStreamAutoShardingService.DATA_STREAMS_AUTO_SHARDING_EXCLUDES_SETTING.getKey(), List.of()) + // we want to manually trigger the rollovers in this test suite to be able to assert incrementally the changes in shard + // configurations + .put(DataStreamLifecycleService.DATA_STREAM_LIFECYCLE_POLL_INTERVAL, "30d") + ); + } + + @After + public void resetClusterSetting() { + updateClusterSettings( + Settings.builder() + .putNull(DataStreamAutoShardingService.DATA_STREAMS_AUTO_SHARDING_EXCLUDES_SETTING.getKey()) + .putNull(DataStreamLifecycleService.DATA_STREAM_LIFECYCLE_POLL_INTERVAL) + ); + } + + public void testRolloverOnAutoShardCondition() throws Exception { + final String dataStreamName = "logs-es"; + + putComposableIndexTemplate( + "my-template", + List.of("logs-*"), + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 3).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build() + ); + final var createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName); + assertAcked(client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).actionGet()); + + indexDocs(dataStreamName, randomIntBetween(100, 200)); + + { + ClusterState clusterStateBeforeRollover = internalCluster().getCurrentMasterNodeInstance(ClusterService.class).state(); + DataStream dataStreamBeforeRollover = clusterStateBeforeRollover.getMetadata().dataStreams().get(dataStreamName); + String assignedShardNodeId = clusterStateBeforeRollover.routingTable() + .index(dataStreamBeforeRollover.getWriteIndex()) + .shard(0) + .primaryShard() + .currentNodeId(); + + Index firstGenerationIndex = clusterStateBeforeRollover.metadata().dataStreams().get(dataStreamName).getWriteIndex(); + IndexMetadata firstGenerationMeta = clusterStateBeforeRollover.getMetadata().index(firstGenerationIndex); + + List shards = new ArrayList<>(firstGenerationMeta.getNumberOfShards()); + for (int i = 0; i < firstGenerationMeta.getNumberOfShards(); i++) { + // the shard stats will yield a write load of 75.0 which will make the auto sharding service recommend an optimal number + // of 5 shards + shards.add(getShardStats(firstGenerationMeta, i, 75, assignedShardNodeId)); + } + + for (DiscoveryNode node : clusterStateBeforeRollover.nodes().getAllNodes()) { + MockTransportService.getInstance(node.getName()) + .addRequestHandlingBehavior(IndicesStatsAction.NAME + "[n]", (handler, request, channel, task) -> { + TransportIndicesStatsAction instance = internalCluster().getInstance( + TransportIndicesStatsAction.class, + node.getName() + ); + channel.sendResponse( + instance.new NodeResponse(node.getId(), firstGenerationMeta.getNumberOfShards(), shards, List.of()) + ); + }); + } + + assertAcked(indicesAdmin().rolloverIndex(new RolloverRequest(dataStreamName, null)).actionGet()); + + ClusterState clusterStateAfterRollover = internalCluster().getCurrentMasterNodeInstance(ClusterService.class).state(); + DataStream dataStream = clusterStateAfterRollover.getMetadata().dataStreams().get(dataStreamName); + IndexMetadata secondGenerationMeta = clusterStateAfterRollover.metadata().getIndexSafe(dataStream.getWriteIndex()); + + // we auto sharded up to 5 shards + assertThat(secondGenerationMeta.getNumberOfShards(), is(5)); + + IndexMetadata index = clusterStateAfterRollover.metadata().index(firstGenerationIndex); + Map rolloverInfos = index.getRolloverInfos(); + assertThat(rolloverInfos.size(), is(1)); + List> metConditions = rolloverInfos.get(dataStreamName).getMetConditions(); + assertThat(metConditions.size(), is(1)); + assertThat(metConditions.get(0).value(), instanceOf(Integer.class)); + int autoShardingRolloverInfo = (int) metConditions.get(0).value(); + assertThat(autoShardingRolloverInfo, is(5)); + } + + // let's do another rollover now that will not increase the number of shards because the increase shards cooldown has not lapsed, + // however the rollover will use the existing/previous auto shard configuration and the new generation index will have 5 shards + { + ClusterState clusterStateBeforeRollover = internalCluster().getCurrentMasterNodeInstance(ClusterService.class).state(); + DataStream dataStreamBeforeRollover = clusterStateBeforeRollover.getMetadata().dataStreams().get(dataStreamName); + String assignedShardNodeId = clusterStateBeforeRollover.routingTable() + .index(dataStreamBeforeRollover.getWriteIndex()) + .shard(0) + .primaryShard() + .currentNodeId(); + + IndexMetadata secondGenerationMeta = clusterStateBeforeRollover.metadata().index(dataStreamBeforeRollover.getIndices().get(1)); + List shards = new ArrayList<>(secondGenerationMeta.getNumberOfShards()); + for (int i = 0; i < secondGenerationMeta.getNumberOfShards(); i++) { + // the shard stats will yield a write load of 100.0 which will make the auto sharding service recommend an optimal number of + // 7 shards + shards.add(getShardStats(secondGenerationMeta, i, 100, assignedShardNodeId)); + } + + for (DiscoveryNode node : clusterStateBeforeRollover.nodes().getAllNodes()) { + MockTransportService.getInstance(node.getName()) + .addRequestHandlingBehavior(IndicesStatsAction.NAME + "[n]", (handler, request, channel, task) -> { + TransportIndicesStatsAction instance = internalCluster().getInstance( + TransportIndicesStatsAction.class, + node.getName() + ); + channel.sendResponse( + instance.new NodeResponse(node.getId(), secondGenerationMeta.getNumberOfShards(), shards, List.of()) + ); + }); + } + + RolloverResponse response = indicesAdmin().rolloverIndex(new RolloverRequest(dataStreamName, null)).actionGet(); + assertAcked(response); + Map conditionStatus = response.getConditionStatus(); + // empty rollover executed + assertThat(conditionStatus.size(), is(0)); + + ClusterState clusterStateAfterRollover = internalCluster().getCurrentMasterNodeInstance(ClusterService.class).state(); + DataStream dataStream = clusterStateAfterRollover.getMetadata().dataStreams().get(dataStreamName); + IndexMetadata thirdGenerationMeta = clusterStateAfterRollover.metadata().getIndexSafe(dataStream.getWriteIndex()); + + // we remained on 5 shards due to the increase shards cooldown + assertThat(thirdGenerationMeta.getNumberOfShards(), is(5)); + } + + { + try { + // eliminate the increase shards cooldown and re-do the rollover should configure the data stream to 7 shards + // this time also add a rollover condition that does NOT match so that we test that it's the auto sharding that triggers + // indeed the rollover + updateClusterSettings( + Settings.builder().put(DataStreamAutoShardingService.DATA_STREAMS_AUTO_SHARDING_INCREASE_SHARDS_COOLDOWN.getKey(), "0s") + ); + + ClusterState clusterStateBeforeRollover = internalCluster().getCurrentMasterNodeInstance(ClusterService.class).state(); + DataStream dataStreamBeforeRollover = clusterStateBeforeRollover.getMetadata().dataStreams().get(dataStreamName); + String assignedShardNodeId = clusterStateBeforeRollover.routingTable() + .index(dataStreamBeforeRollover.getWriteIndex()) + .shard(0) + .primaryShard() + .currentNodeId(); + + IndexMetadata thirdGenIndex = clusterStateBeforeRollover.metadata().index(dataStreamBeforeRollover.getIndices().get(2)); + List shards = new ArrayList<>(thirdGenIndex.getNumberOfShards()); + for (int i = 0; i < thirdGenIndex.getNumberOfShards(); i++) { + // the shard stats will yield a write load of 100.0 which will make the auto sharding service recommend an optimal + // number of 7 shards + shards.add(getShardStats(thirdGenIndex, i, 100, assignedShardNodeId)); + } + + for (DiscoveryNode node : clusterStateBeforeRollover.nodes().getAllNodes()) { + MockTransportService.getInstance(node.getName()) + .addRequestHandlingBehavior(IndicesStatsAction.NAME + "[n]", (handler, request, channel, task) -> { + TransportIndicesStatsAction instance = internalCluster().getInstance( + TransportIndicesStatsAction.class, + node.getName() + ); + channel.sendResponse( + instance.new NodeResponse(node.getId(), thirdGenIndex.getNumberOfShards(), shards, List.of()) + ); + }); + } + + RolloverRequest request = new RolloverRequest(dataStreamName, null); + request.setConditions(RolloverConditions.newBuilder().addMaxIndexDocsCondition(1_000_000L).build()); + RolloverResponse response = indicesAdmin().rolloverIndex(request).actionGet(); + assertAcked(response); + Map conditionStatus = response.getConditionStatus(); + assertThat(conditionStatus.size(), is(2)); + for (Map.Entry entry : conditionStatus.entrySet()) { + if (entry.getKey().equals(new MaxDocsCondition(1_000_000L).toString())) { + assertThat(entry.getValue(), is(false)); + } else { + assertThat(entry.getKey(), is(new OptimalShardCountCondition(7).toString())); + assertThat(entry.getValue(), is(true)); + } + } + + ClusterState clusterStateAfterRollover = internalCluster().getCurrentMasterNodeInstance(ClusterService.class).state(); + DataStream dataStream = clusterStateAfterRollover.getMetadata().dataStreams().get(dataStreamName); + IndexMetadata fourthGenerationMeta = clusterStateAfterRollover.metadata().getIndexSafe(dataStream.getWriteIndex()); + + // we auto-sharded up to 7 shards as there was no cooldown period + assertThat(fourthGenerationMeta.getNumberOfShards(), is(7)); + } finally { + // reset increase shards cooldown value + updateClusterSettings( + Settings.builder().putNull(DataStreamAutoShardingService.DATA_STREAMS_AUTO_SHARDING_INCREASE_SHARDS_COOLDOWN.getKey()) + ); + } + } + } + + public void testReduceShardsOnRollover() throws IOException { + final String dataStreamName = "logs-es"; + + // start with 3 shards + putComposableIndexTemplate( + "my-template", + List.of("logs-*"), + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 3).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build() + ); + final var createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName); + assertAcked(client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).actionGet()); + + indexDocs(dataStreamName, randomIntBetween(100, 200)); + + { + // rollover executes but the reduction in shard number will not be executed due to the reduce shards cooldown + ClusterState clusterStateBeforeRollover = internalCluster().getCurrentMasterNodeInstance(ClusterService.class).state(); + DataStream dataStreamBeforeRollover = clusterStateBeforeRollover.getMetadata().dataStreams().get(dataStreamName); + String assignedShardNodeId = clusterStateBeforeRollover.routingTable() + .index(dataStreamBeforeRollover.getWriteIndex()) + .shard(0) + .primaryShard() + .currentNodeId(); + + Index firstGenerationIndex = clusterStateBeforeRollover.metadata().dataStreams().get(dataStreamName).getWriteIndex(); + IndexMetadata firstGenerationMeta = clusterStateBeforeRollover.getMetadata().index(firstGenerationIndex); + + List shards = new ArrayList<>(firstGenerationMeta.getNumberOfShards()); + for (int i = 0; i < firstGenerationMeta.getNumberOfShards(); i++) { + // the shard stats will yield a write load of 2.0 which will make the auto sharding service recommend an optimal number + // of 2 shards + shards.add(getShardStats(firstGenerationMeta, i, 2, assignedShardNodeId)); + } + + for (DiscoveryNode node : clusterStateBeforeRollover.nodes().getAllNodes()) { + MockTransportService.getInstance(node.getName()) + .addRequestHandlingBehavior(IndicesStatsAction.NAME + "[n]", (handler, request, channel, task) -> { + TransportIndicesStatsAction instance = internalCluster().getInstance( + TransportIndicesStatsAction.class, + node.getName() + ); + channel.sendResponse( + instance.new NodeResponse(node.getId(), firstGenerationMeta.getNumberOfShards(), shards, List.of()) + ); + }); + } + + assertAcked(indicesAdmin().rolloverIndex(new RolloverRequest(dataStreamName, null)).actionGet()); + + ClusterState clusterStateAfterRollover = internalCluster().getCurrentMasterNodeInstance(ClusterService.class).state(); + DataStream dataStream = clusterStateAfterRollover.getMetadata().dataStreams().get(dataStreamName); + IndexMetadata secondGenerationMeta = clusterStateAfterRollover.metadata().getIndexSafe(dataStream.getWriteIndex()); + + // we kept the number of shards to 3 as the reduce shards cooldown prevented us reducing the number of shards + assertThat(secondGenerationMeta.getNumberOfShards(), is(3)); + } + + { + // temporarily disable reduce shards cooldown and test that a rollover that doesn't match ANOTHER condition will not be + // executed just because we need to reduce the number of shards, and then that rollover when a different condition does + // indeed match will execute the rollover and the number of shards will be reduced to 2 + try { + updateClusterSettings( + Settings.builder().put(DataStreamAutoShardingService.DATA_STREAMS_AUTO_SHARDING_DECREASE_SHARDS_COOLDOWN.getKey(), "0s") + ); + + ClusterState clusterStateBeforeRollover = internalCluster().getCurrentMasterNodeInstance(ClusterService.class).state(); + DataStream dataStreamBeforeRollover = clusterStateBeforeRollover.getMetadata().dataStreams().get(dataStreamName); + String assignedShardNodeId = clusterStateBeforeRollover.routingTable() + .index(dataStreamBeforeRollover.getWriteIndex()) + .shard(0) + .primaryShard() + .currentNodeId(); + + IndexMetadata secondGenerationIndex = clusterStateBeforeRollover.metadata() + .index(dataStreamBeforeRollover.getIndices().get(1)); + List shards = new ArrayList<>(secondGenerationIndex.getNumberOfShards()); + for (int i = 0; i < secondGenerationIndex.getNumberOfShards(); i++) { + // the shard stats will yield a write load of 2.0 which will make the auto sharding service recommend an optimal + // number of 2 shards + shards.add(getShardStats(secondGenerationIndex, i, 2, assignedShardNodeId)); + } + + for (DiscoveryNode node : clusterStateBeforeRollover.nodes().getAllNodes()) { + MockTransportService.getInstance(node.getName()) + .addRequestHandlingBehavior(IndicesStatsAction.NAME + "[n]", (handler, request, channel, task) -> { + TransportIndicesStatsAction instance = internalCluster().getInstance( + TransportIndicesStatsAction.class, + node.getName() + ); + channel.sendResponse( + instance.new NodeResponse(node.getId(), secondGenerationIndex.getNumberOfShards(), shards, List.of()) + ); + }); + } + + RolloverRequest request = new RolloverRequest(dataStreamName, null); + // adding condition that does NOT match + request.setConditions(RolloverConditions.newBuilder().addMaxIndexDocsCondition(1_000_000L).build()); + RolloverResponse response = indicesAdmin().rolloverIndex(request).actionGet(); + assertThat(response.isRolledOver(), is(false)); + Map conditionStatus = response.getConditionStatus(); + assertThat(conditionStatus.size(), is(1)); + assertThat(conditionStatus.get(new MaxDocsCondition(1_000_000L).toString()), is(false)); + + // let's rollover with a condition that does match and test that the number of shards is reduced to 2 + indexDocs(dataStreamName, 100); + request = new RolloverRequest(dataStreamName, null); + // adding condition that does NOT match + request.setConditions(RolloverConditions.newBuilder().addMaxIndexDocsCondition(1L).build()); + response = indicesAdmin().rolloverIndex(request).actionGet(); + assertThat(response.isRolledOver(), is(true)); + conditionStatus = response.getConditionStatus(); + assertThat(conditionStatus.size(), is(2)); + for (Map.Entry entry : conditionStatus.entrySet()) { + if (entry.getKey().equals(new MaxDocsCondition(1L).toString())) { + assertThat(conditionStatus.get(new MaxDocsCondition(1L).toString()), is(true)); + } else { + assertThat(conditionStatus.get(new OptimalShardCountCondition(2).toString()), is(true)); + } + } + + ClusterState clusterStateAfterRollover = internalCluster().getCurrentMasterNodeInstance(ClusterService.class).state(); + DataStream dataStream = clusterStateAfterRollover.getMetadata().dataStreams().get(dataStreamName); + IndexMetadata thirdGenerationMeta = clusterStateAfterRollover.metadata().getIndexSafe(dataStream.getWriteIndex()); + + assertThat(thirdGenerationMeta.getNumberOfShards(), is(2)); + } finally { + // reset increase shards cooldown value + updateClusterSettings( + Settings.builder().putNull(DataStreamAutoShardingService.DATA_STREAMS_AUTO_SHARDING_DECREASE_SHARDS_COOLDOWN.getKey()) + ); + + } + + } + + } + + public void testLazyRolloverKeepsPreviousAutoshardingDecision() throws IOException { + final String dataStreamName = "logs-es"; + + putComposableIndexTemplate( + "my-template", + List.of("logs-*"), + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 3).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build() + ); + final var createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName); + assertAcked(client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).actionGet()); + + indexDocs(dataStreamName, randomIntBetween(100, 200)); + + { + ClusterState clusterStateBeforeRollover = internalCluster().getCurrentMasterNodeInstance(ClusterService.class).state(); + DataStream dataStreamBeforeRollover = clusterStateBeforeRollover.getMetadata().dataStreams().get(dataStreamName); + + Index firstGenerationIndex = clusterStateBeforeRollover.metadata().dataStreams().get(dataStreamName).getWriteIndex(); + IndexMetadata firstGenerationMeta = clusterStateBeforeRollover.getMetadata().index(firstGenerationIndex); + + List shards = new ArrayList<>(firstGenerationMeta.getNumberOfShards()); + for (int i = 0; i < firstGenerationMeta.getNumberOfShards(); i++) { + // the shard stats will yield a write load of 75.0 which will make the auto sharding service recommend an optimal number + // of 5 shards + shards.add( + getShardStats( + firstGenerationMeta, + i, + 75, + clusterStateBeforeRollover.routingTable() + .index(dataStreamBeforeRollover.getWriteIndex()) + .shard(0) + .primaryShard() + .currentNodeId() + ) + ); + } + + for (DiscoveryNode node : clusterStateBeforeRollover.nodes().getAllNodes()) { + MockTransportService.getInstance(node.getName()) + .addRequestHandlingBehavior(IndicesStatsAction.NAME + "[n]", (handler, request, channel, task) -> { + TransportIndicesStatsAction instance = internalCluster().getInstance( + TransportIndicesStatsAction.class, + node.getName() + ); + channel.sendResponse( + instance.new NodeResponse(node.getId(), firstGenerationMeta.getNumberOfShards(), shards, List.of()) + ); + }); + } + + assertAcked(indicesAdmin().rolloverIndex(new RolloverRequest(dataStreamName, null)).actionGet()); + + ClusterState clusterStateAfterRollover = internalCluster().getCurrentMasterNodeInstance(ClusterService.class).state(); + DataStream dataStream = clusterStateAfterRollover.getMetadata().dataStreams().get(dataStreamName); + IndexMetadata secondGenerationMeta = clusterStateAfterRollover.metadata().getIndexSafe(dataStream.getWriteIndex()); + + // we auto sharded up to 5 shards + assertThat(secondGenerationMeta.getNumberOfShards(), is(5)); + } + + { + try { + // eliminate the increase shards cooldown so there are no potential barriers to another increase shards option (we'll + // actually also simulate the stats such that an increase to 7 is warranted) and execute a lazy rollover that should not + // indeed auto shard up, but just keep the existing auto sharding event and create a new index with 5 shards (as dictated + // by the existing auto sharding event) + updateClusterSettings( + Settings.builder().put(DataStreamAutoShardingService.DATA_STREAMS_AUTO_SHARDING_INCREASE_SHARDS_COOLDOWN.getKey(), "0s") + ); + + ClusterState clusterStateBeforeRollover = internalCluster().getCurrentMasterNodeInstance(ClusterService.class).state(); + DataStream dataStreamBeforeRollover = clusterStateBeforeRollover.getMetadata().dataStreams().get(dataStreamName); + + IndexMetadata secondGenIndex = clusterStateBeforeRollover.metadata().index(dataStreamBeforeRollover.getIndices().get(1)); + List shards = new ArrayList<>(secondGenIndex.getNumberOfShards()); + for (int i = 0; i < secondGenIndex.getNumberOfShards(); i++) { + // the shard stats will yield a write load of 100.0 which will make the auto sharding service recommend an optimal + // number of 7 shards + shards.add( + getShardStats( + secondGenIndex, + i, + 100, + clusterStateBeforeRollover.routingTable() + .index(dataStreamBeforeRollover.getWriteIndex()) + .shard(i) + .primaryShard() + .currentNodeId() + ) + ); + } + + for (DiscoveryNode node : clusterStateBeforeRollover.nodes().getAllNodes()) { + MockTransportService.getInstance(node.getName()) + .addRequestHandlingBehavior(IndicesStatsAction.NAME + "[n]", (handler, request, channel, task) -> { + TransportIndicesStatsAction instance = internalCluster().getInstance( + TransportIndicesStatsAction.class, + node.getName() + ); + channel.sendResponse( + instance.new NodeResponse(node.getId(), secondGenIndex.getNumberOfShards(), shards, List.of()) + ); + }); + } + + RolloverRequest request = new RolloverRequest(dataStreamName, null); + request.lazy(true); + assertAcked(indicesAdmin().rolloverIndex(request).actionGet()); + + // index some docs so the rollover is executed + indexDocs(dataStreamName, 10); + ClusterState clusterStateAfterRollover = internalCluster().getCurrentMasterNodeInstance(ClusterService.class).state(); + DataStream dataStream = clusterStateAfterRollover.getMetadata().dataStreams().get(dataStreamName); + IndexMetadata thirdGenerationIndex = clusterStateAfterRollover.metadata().getIndexSafe(dataStream.getWriteIndex()); + + // we kept the number of shards to 5 as we did a lazy rollover + assertThat(thirdGenerationIndex.getNumberOfShards(), is(5)); + } finally { + // reset increase shards cooldown value + updateClusterSettings( + Settings.builder().putNull(DataStreamAutoShardingService.DATA_STREAMS_AUTO_SHARDING_INCREASE_SHARDS_COOLDOWN.getKey()) + ); + } + } + } + + private static ShardStats getShardStats(IndexMetadata indexMeta, int shardIndex, long targetWriteLoad, String assignedShardNodeId) { + ShardId shardId = new ShardId(indexMeta.getIndex(), shardIndex); + Path path = createTempDir().resolve("indices").resolve(indexMeta.getIndexUUID()).resolve(String.valueOf(shardIndex)); + ShardRouting shardRouting = ShardRouting.newUnassigned( + shardId, + true, + RecoverySource.EmptyStoreRecoverySource.INSTANCE, + new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null), + ShardRouting.Role.DEFAULT + ); + shardRouting = shardRouting.initialize(assignedShardNodeId, null, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE); + shardRouting = shardRouting.moveToStarted(ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE); + CommonStats stats = new CommonStats(); + stats.docs = new DocsStats(100, 0, randomByteSizeValue().getBytes()); + stats.store = new StoreStats(); + stats.indexing = new IndexingStats(new IndexingStats.Stats(1, 1, 1, 1, 1, 1, 1, 1, false, 1, targetWriteLoad, 1)); + return new ShardStats(shardRouting, new ShardPath(false, path, path, shardId), stats, null, null, null, false, 0); + } + + static void putComposableIndexTemplate(String id, List patterns, @Nullable Settings settings) throws IOException { + TransportPutComposableIndexTemplateAction.Request request = new TransportPutComposableIndexTemplateAction.Request(id); + request.indexTemplate( + ComposableIndexTemplate.builder() + .indexPatterns(patterns) + .template(new Template(settings, null, null, null)) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build() + ); + client().execute(TransportPutComposableIndexTemplateAction.TYPE, request).actionGet(); + } + + static void indexDocs(String dataStream, int numDocs) { + BulkRequest bulkRequest = new BulkRequest(); + for (int i = 0; i < numDocs; i++) { + String value = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.formatMillis(System.currentTimeMillis()); + bulkRequest.add( + new IndexRequest(dataStream).opType(DocWriteRequest.OpType.CREATE) + .source(String.format(Locale.ROOT, "{\"%s\":\"%s\"}", DEFAULT_TIMESTAMP_FIELD, value), XContentType.JSON) + ); + } + BulkResponse bulkResponse = client().bulk(bulkRequest).actionGet(); + assertThat(bulkResponse.getItems().length, equalTo(numDocs)); + String backingIndexPrefix = DataStream.BACKING_INDEX_PREFIX + dataStream; + for (BulkItemResponse itemResponse : bulkResponse) { + assertThat(itemResponse.getFailureMessage(), nullValue()); + assertThat(itemResponse.status(), equalTo(RestStatus.CREATED)); + assertThat(itemResponse.getIndex(), startsWith(backingIndexPrefix)); + } + indicesAdmin().refresh(new RefreshRequest(dataStream)).actionGet(); + } + + /** + * Test plugin that registers an additional setting. + */ + public static class TestAutoshardingPlugin extends Plugin { + @Override + public List> getSettings() { + return List.of( + Setting.boolSetting(DATA_STREAMS_AUTO_SHARDING_ENABLED, false, Setting.Property.Dynamic, Setting.Property.NodeScope) + ); + } + + @Override + public Settings additionalSettings() { + return Settings.builder().put(DATA_STREAMS_AUTO_SHARDING_ENABLED, true).build(); + } + } + +} diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamFeatures.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamFeatures.java index 734c10570ab2b..06dc8919360f8 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamFeatures.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamFeatures.java @@ -9,6 +9,7 @@ package org.elasticsearch.datastreams; import org.elasticsearch.action.admin.indices.rollover.LazyRolloverAction; +import org.elasticsearch.action.datastreams.autosharding.DataStreamAutoShardingService; import org.elasticsearch.datastreams.lifecycle.health.DataStreamLifecycleHealthInfoPublisher; import org.elasticsearch.features.FeatureSpecification; import org.elasticsearch.features.NodeFeature; @@ -24,7 +25,8 @@ public class DataStreamFeatures implements FeatureSpecification { public Set getFeatures() { return Set.of( DataStreamLifecycleHealthInfoPublisher.DSL_HEALTH_INFO_FEATURE, // Added in 8.12 - LazyRolloverAction.DATA_STREAM_LAZY_ROLLOVER // Added in 8.13 + LazyRolloverAction.DATA_STREAM_LAZY_ROLLOVER, // Added in 8.13 + DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE ); } } diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamGetWriteIndexTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamGetWriteIndexTests.java index d0b41c847a61d..b61cbdc837010 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamGetWriteIndexTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamGetWriteIndexTests.java @@ -318,7 +318,7 @@ private MetadataRolloverService.RolloverResult rolloverOver(ClusterState state, MaxDocsCondition condition = new MaxDocsCondition(randomNonNegativeLong()); List> metConditions = Collections.singletonList(condition); CreateIndexRequest createIndexRequest = new CreateIndexRequest("_na_"); - return rolloverService.rolloverClusterState(state, name, null, createIndexRequest, metConditions, time, false, false, null); + return rolloverService.rolloverClusterState(state, name, null, createIndexRequest, metConditions, time, false, false, null, null); } private Index getWriteIndex(ClusterState state, String name, String timestamp) { diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataDataStreamRolloverServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataDataStreamRolloverServiceTests.java index 4f2df2c690bc8..2bfbeb8e37aaf 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataDataStreamRolloverServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataDataStreamRolloverServiceTests.java @@ -119,7 +119,8 @@ public void testRolloverClusterStateForDataStream() throws Exception { now, randomBoolean(), false, - indexStats + indexStats, + null ); long after = testThreadPool.absoluteTimeInMillis(); @@ -218,6 +219,7 @@ public void testRolloverAndMigrateDataStream() throws Exception { now, randomBoolean(), false, + null, null ); @@ -310,6 +312,7 @@ public void testChangingIndexModeFromTimeSeriesToSomethingElseNoEffectOnExisting now, randomBoolean(), false, + null, null ); @@ -375,7 +378,8 @@ public void testRolloverClusterStateWithBrokenOlderTsdbDataStream() throws Excep now, randomBoolean(), false, - indexStats + indexStats, + null ); long after = testThreadPool.absoluteTimeInMillis(); @@ -455,7 +459,8 @@ public void testRolloverClusterStateWithBrokenTsdbDataStream() throws Exception now, randomBoolean(), false, - indexStats + indexStats, + null ) ); assertThat(e.getMessage(), containsString("is overlapping with backing index")); diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 5c09a5464171d..536a5db51e8a8 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -148,6 +148,7 @@ static TransportVersion def(int id) { public static final TransportVersion ESQL_SERIALIZE_BIG_VECTOR = def(8_608_00_0); public static final TransportVersion AGGS_EXCLUDED_DELETED_DOCS = def(8_609_00_0); public static final TransportVersion ESQL_SERIALIZE_BIG_ARRAY = def(8_610_00_0); + public static final TransportVersion AUTO_SHARDING_ROLLOVER_CONDITION = def(8_611_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/Condition.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/Condition.java index ba7d6b03043c5..b4a466dc9aa1e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/Condition.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/Condition.java @@ -20,12 +20,14 @@ */ public abstract class Condition implements NamedWriteable, ToXContentFragment { - /** - * Describes the type of condition - a min_* condition (MIN) or max_* condition (MAX). + /* + * Describes the type of condition - a min_* condition (MIN), max_* condition (MAX), or an automatic condition (automatic conditions + * are something that the platform configures and manages) */ public enum Type { MIN, - MAX + MAX, + AUTOMATIC } protected T value; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/LazyRolloverAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/LazyRolloverAction.java index 9266a320f598c..623186e052eb7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/LazyRolloverAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/LazyRolloverAction.java @@ -9,6 +9,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.datastreams.autosharding.DataStreamAutoShardingService; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; @@ -59,6 +60,7 @@ public TransportLazyRolloverAction( MetadataRolloverService rolloverService, AllocationService allocationService, MetadataDataStreamsService metadataDataStreamsService, + DataStreamAutoShardingService dataStreamAutoShardingService, Client client ) { super( @@ -71,7 +73,8 @@ public TransportLazyRolloverAction( rolloverService, client, allocationService, - metadataDataStreamsService + metadataDataStreamsService, + dataStreamAutoShardingService ); } @@ -121,6 +124,7 @@ protected void masterOperation( new RolloverRequest(rolloverRequest.getRolloverTarget(), null), null, trialRolloverResponse, + null, listener ); submitRolloverTask(rolloverRequest, source, rolloverTask); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java index 4972a784cc2bd..6645de880ad86 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java @@ -8,14 +8,18 @@ package org.elasticsearch.action.admin.indices.rollover; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.admin.indices.create.CreateIndexClusterStateUpdateRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.datastreams.autosharding.AutoShardingResult; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.AliasAction; import org.elasticsearch.cluster.metadata.AliasMetadata; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.DataStreamAutoShardingEvent; import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexMetadataStats; @@ -61,6 +65,7 @@ * Service responsible for handling rollover requests for write aliases and data streams */ public class MetadataRolloverService { + private static final Logger logger = LogManager.getLogger(MetadataRolloverService.class); private static final Pattern INDEX_NAME_PATTERN = Pattern.compile("^.*-\\d+$"); private static final List VALID_ROLLOVER_TARGETS = List.of(ALIAS, DATA_STREAM); @@ -110,7 +115,8 @@ public RolloverResult rolloverClusterState( Instant now, boolean silent, boolean onlyValidate, - @Nullable IndexMetadataStats sourceIndexStats + @Nullable IndexMetadataStats sourceIndexStats, + @Nullable AutoShardingResult autoShardingResult ) throws Exception { validate(currentState.metadata(), rolloverTarget, newIndexName, createIndexRequest); final IndexAbstraction indexAbstraction = currentState.metadata().getIndicesLookup().get(rolloverTarget); @@ -134,7 +140,8 @@ public RolloverResult rolloverClusterState( now, silent, onlyValidate, - sourceIndexStats + sourceIndexStats, + autoShardingResult ); default -> // the validate method above prevents this case @@ -244,7 +251,8 @@ private RolloverResult rolloverDataStream( Instant now, boolean silent, boolean onlyValidate, - @Nullable IndexMetadataStats sourceIndexStats + @Nullable IndexMetadataStats sourceIndexStats, + @Nullable AutoShardingResult autoShardingResult ) throws Exception { if (SnapshotsService.snapshottingDataStreams(currentState, Collections.singleton(dataStream.getName())).isEmpty() == false) { @@ -281,6 +289,54 @@ private RolloverResult rolloverDataStream( return new RolloverResult(newWriteIndexName, originalWriteIndex.getName(), currentState); } + DataStreamAutoShardingEvent dataStreamAutoShardingEvent = autoShardingResult == null + ? dataStream.getAutoShardingEvent() + : switch (autoShardingResult.type()) { + case NO_CHANGE_REQUIRED -> { + logger.info( + "Rolling over data stream [{}] using existing auto-sharding recommendation [{}]", + dataStreamName, + dataStream.getAutoShardingEvent() + ); + yield dataStream.getAutoShardingEvent(); + } + case INCREASE_SHARDS, DECREASE_SHARDS -> { + logger.info("Auto sharding data stream [{}] to [{}]", dataStreamName, autoShardingResult); + yield new DataStreamAutoShardingEvent( + dataStream.getWriteIndex().getName(), + autoShardingResult.targetNumberOfShards(), + now.toEpochMilli() + ); + } + case COOLDOWN_PREVENTED_INCREASE, COOLDOWN_PREVENTED_DECREASE -> { + // we're in the cooldown period for this particular recommendation so perhaps use a previous autosharding + // recommendation (or the value configured in the backing index template otherwise) + if (dataStream.getAutoShardingEvent() != null) { + logger.info( + "Rolling over data stream [{}] using existing auto-sharding recommendation [{}]", + dataStreamName, + dataStream.getAutoShardingEvent() + ); + } + yield dataStream.getAutoShardingEvent(); + } + // data sharding might not be available due to the feature not being available/enabled or due to cluster level excludes + // being configured. the index template will dictate the number of shards as usual + case NOT_APPLICABLE -> { + logger.debug("auto sharding is not applicable for data stream [{}]", dataStreamName); + yield null; + } + }; + + // configure the number of shards using an auto sharding event (new, or existing) if we have one + if (dataStreamAutoShardingEvent != null) { + Settings settingsWithAutoSharding = Settings.builder() + .put(createIndexRequest.settings()) + .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), dataStreamAutoShardingEvent.targetNumberOfShards()) + .build(); + createIndexRequest.settings(settingsWithAutoSharding); + } + var createIndexClusterStateRequest = prepareDataStreamCreateIndexRequest( dataStreamName, newWriteIndexName, @@ -298,7 +354,14 @@ private RolloverResult rolloverDataStream( silent, (builder, indexMetadata) -> { downgradeBrokenTsdbBackingIndices(dataStream, builder); - builder.put(dataStream.rollover(indexMetadata.getIndex(), newGeneration, metadata.isTimeSeriesTemplate(templateV2))); + builder.put( + dataStream.rollover( + indexMetadata.getIndex(), + newGeneration, + metadata.isTimeSeriesTemplate(templateV2), + dataStreamAutoShardingEvent + ) + ); }, rerouteCompletionIsNotRequired() ); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/OptimalShardCountCondition.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/OptimalShardCountCondition.java new file mode 100644 index 0000000000000..93a11b8fe0855 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/OptimalShardCountCondition.java @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.admin.indices.rollover; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; + +/** + * Condition for automatically increasing the number of shards for a data stream. This indicates the optimum number of shards that was + * configured for the index abstraction as part of rollover. + * It's more of a marker condition, when present the condition is met, more than a condition we evaluate against stats. + */ +public class OptimalShardCountCondition extends Condition { + public static final String NAME = "optimal_shard_count"; + + public OptimalShardCountCondition(int optimalShards) { + super(NAME, Type.AUTOMATIC); + this.value = optimalShards; + } + + public OptimalShardCountCondition(StreamInput in) throws IOException { + super(NAME, Type.AUTOMATIC); + this.value = in.readVInt(); + } + + @Override + public Result evaluate(final Stats stats) { + return new Result(this, true); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(value); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.field(NAME, value); + } + + public static OptimalShardCountCondition fromXContent(XContentParser parser) throws IOException { + if (parser.nextToken() == XContentParser.Token.VALUE_NUMBER) { + return new OptimalShardCountCondition(parser.intValue()); + } else { + throw new IllegalArgumentException("invalid token when parsing " + NAME + " condition: " + parser.currentToken()); + } + } + + @Override + boolean includedInVersion(TransportVersion version) { + return version.onOrAfter(TransportVersions.AUTO_SHARDING_ROLLOVER_CONDITION); + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverConditions.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverConditions.java index 24f93ccb45348..d7cd7459d4821 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverConditions.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverConditions.java @@ -7,6 +7,7 @@ */ package org.elasticsearch.action.admin.indices.rollover; +import org.elasticsearch.action.datastreams.autosharding.AutoShardingResult; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -27,6 +28,9 @@ import java.util.Map; import java.util.Objects; +import static org.elasticsearch.action.datastreams.autosharding.AutoShardingType.DECREASE_SHARDS; +import static org.elasticsearch.action.datastreams.autosharding.AutoShardingType.INCREASE_SHARDS; + /** * Contains the conditions that determine if an index can be rolled over or not. It is used by the {@link RolloverRequest}, * the Index Lifecycle Management and the Data Stream Lifecycle. @@ -243,7 +247,12 @@ public boolean areConditionsMet(Map conditionResults) { .filter(c -> Condition.Type.MAX == c.type()) .anyMatch(c -> conditionResults.getOrDefault(c.toString(), false)); - return conditionResults.size() == 0 || (allMinConditionsMet && anyMaxConditionsMet); + boolean anyAutomaticConditionsMet = conditions.values() + .stream() + .filter(c -> Condition.Type.AUTOMATIC == c.type()) + .anyMatch(c -> conditionResults.getOrDefault(c.toString(), false)); + + return conditionResults.size() == 0 || (allMinConditionsMet && anyMaxConditionsMet) || anyAutomaticConditionsMet; } public static RolloverConditions fromXContent(XContentParser parser) throws IOException { @@ -408,6 +417,19 @@ public Builder addMinPrimaryShardDocsCondition(Long numDocs) { return this; } + /** + * Adds an optimal shard count condition if the autosharding result is of type INCREASE or DECREASE_SHARDS, ignores it otherwise. + */ + public Builder addOptimalShardCountCondition(AutoShardingResult autoShardingResult) { + if (autoShardingResult.type().equals(INCREASE_SHARDS) || autoShardingResult.type().equals(DECREASE_SHARDS)) { + OptimalShardCountCondition optimalShardCountCondition = new OptimalShardCountCondition( + autoShardingResult.targetNumberOfShards() + ); + this.conditions.put(optimalShardCountCondition.name, optimalShardCountCondition); + } + return this; + } + public RolloverConditions build() { return new RolloverConditions(conditions); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java index 481eda825b047..c295ccde01623 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java @@ -17,6 +17,9 @@ import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.admin.indices.stats.ShardStats; +import org.elasticsearch.action.datastreams.autosharding.AutoShardingResult; +import org.elasticsearch.action.datastreams.autosharding.AutoShardingType; +import org.elasticsearch.action.datastreams.autosharding.DataStreamAutoShardingService; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActiveShardsObserver; import org.elasticsearch.action.support.IndicesOptions; @@ -27,6 +30,7 @@ import org.elasticsearch.cluster.ClusterStateTaskListener; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexMetadataStats; @@ -54,6 +58,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; @@ -70,6 +75,7 @@ public class TransportRolloverAction extends TransportMasterNodeAction rolloverTaskQueue; private final MetadataDataStreamsService metadataDataStreamsService; + private final DataStreamAutoShardingService dataStreamAutoShardingService; @Inject public TransportRolloverAction( @@ -81,7 +87,8 @@ public TransportRolloverAction( MetadataRolloverService rolloverService, Client client, AllocationService allocationService, - MetadataDataStreamsService metadataDataStreamsService + MetadataDataStreamsService metadataDataStreamsService, + DataStreamAutoShardingService dataStreamAutoShardingService ) { this( RolloverAction.INSTANCE, @@ -93,7 +100,8 @@ public TransportRolloverAction( rolloverService, client, allocationService, - metadataDataStreamsService + metadataDataStreamsService, + dataStreamAutoShardingService ); } @@ -107,7 +115,8 @@ public TransportRolloverAction( MetadataRolloverService rolloverService, Client client, AllocationService allocationService, - MetadataDataStreamsService metadataDataStreamsService + MetadataDataStreamsService metadataDataStreamsService, + DataStreamAutoShardingService dataStreamAutoShardingService ) { super( actionType.name(), @@ -127,6 +136,7 @@ public TransportRolloverAction( new RolloverExecutor(clusterService, allocationService, rolloverService, threadPool) ); this.metadataDataStreamsService = metadataDataStreamsService; + this.dataStreamAutoShardingService = dataStreamAutoShardingService; } @Override @@ -221,6 +231,40 @@ protected void masterOperation( listener.delegateFailureAndWrap((delegate, statsResponse) -> { + AutoShardingResult rolloverAutoSharding = null; + final IndexAbstraction indexAbstraction = clusterState.metadata() + .getIndicesLookup() + .get(rolloverRequest.getRolloverTarget()); + if (indexAbstraction.getType().equals(IndexAbstraction.Type.DATA_STREAM)) { + DataStream dataStream = (DataStream) indexAbstraction; + final Optional indexStats = Optional.ofNullable(statsResponse) + .map(stats -> stats.getIndex(dataStream.getWriteIndex().getName())); + + Double writeLoad = indexStats.map(stats -> stats.getTotal().getIndexing()) + .map(indexing -> indexing.getTotal().getWriteLoad()) + .orElse(null); + + rolloverAutoSharding = dataStreamAutoShardingService.calculate(clusterState, dataStream, writeLoad); + logger.debug("auto sharding result for data stream [{}] is [{}]", dataStream.getName(), rolloverAutoSharding); + + // if auto sharding recommends increasing the number of shards we want to trigger a rollover even if there are no + // other "regular" conditions matching (we want to aggressively increse the number of shards) so we're adding the + // automatic {@link OptimalShardCountCondition} to the rollover request conditions so it gets evaluated and triggers + // the rollover operation (having this condition met will also provide a useful paper trail as it'll get stored in + // the {@link org.elasticsearch.action.admin.indices.rollover.RolloverInfo#metConditions} ) + + // NOTE that the {@link AutoShardingType#DECREASE_SHARDS} recommendation is treated differently (i.e. added to the + // conditions later only if other "regular" rollover conditions match: see {@link RolloverTask#executeTask}) because we + // do NOT want to trigger a rollover **just** to reduce the number of shards, but we will reduce the number of shards + // when the rollover will naturally occur. + if (rolloverAutoSharding.type().equals(AutoShardingType.INCREASE_SHARDS)) { + RolloverConditions conditionsIncludingImplicit = RolloverConditions.newBuilder(rolloverRequest.getConditions()) + .addOptimalShardCountCondition(rolloverAutoSharding) + .build(); + rolloverRequest.setConditions(conditionsIncludingImplicit); + } + } + // Evaluate the conditions, so that we can tell without a cluster state update whether a rollover would occur. final Map trialConditionResults = evaluateConditions( rolloverRequest.getConditionValues(), @@ -247,7 +291,13 @@ protected void masterOperation( // Pre-check the conditions to see whether we should submit a new cluster state task if (rolloverRequest.areConditionsMet(trialConditionResults)) { String source = "rollover_index source [" + trialRolloverIndexName + "] to target [" + trialRolloverIndexName + "]"; - RolloverTask rolloverTask = new RolloverTask(rolloverRequest, statsResponse, trialRolloverResponse, delegate); + RolloverTask rolloverTask = new RolloverTask( + rolloverRequest, + statsResponse, + trialRolloverResponse, + rolloverAutoSharding, + delegate + ); submitRolloverTask(rolloverRequest, source, rolloverTask); } else { // conditions not met @@ -317,8 +367,10 @@ record RolloverTask( RolloverRequest rolloverRequest, IndicesStatsResponse statsResponse, RolloverResponse trialRolloverResponse, + @Nullable AutoShardingResult autoShardingResult, ActionListener listener ) implements ClusterStateTaskListener { + @Override public void onFailure(Exception e) { listener.onFailure(e); @@ -388,9 +440,24 @@ public ClusterState executeTask( ); if (rolloverRequest.getConditions().areConditionsMet(postConditionResults)) { + Map resultsIncludingDecreaseShards = new HashMap<>(postConditionResults); + if (rolloverTask.autoShardingResult != null + && rolloverTask.autoShardingResult.type().equals(AutoShardingType.DECREASE_SHARDS)) { + // if we're executing a rollover ("regular" conditions are met) and we're also decreasing the number of shards we'll + // include the decrease_shards optimal shard count condition in the response and {@link RolloverInfo#metConditions} + RolloverConditions conditionsIncludingDecreaseShards = RolloverConditions.newBuilder(rolloverRequest.getConditions()) + .addOptimalShardCountCondition(rolloverTask.autoShardingResult) + .build(); + rolloverRequest.setConditions(conditionsIncludingDecreaseShards); + resultsIncludingDecreaseShards.put( + new OptimalShardCountCondition(rolloverTask.autoShardingResult.targetNumberOfShards()).toString(), + true + ); + } + final List> metConditions = rolloverRequest.getConditionValues() .stream() - .filter(condition -> postConditionResults.get(condition.toString())) + .filter(condition -> resultsIncludingDecreaseShards.get(condition.toString())) .toList(); final IndexAbstraction rolloverTargetAbstraction = currentState.metadata() @@ -411,7 +478,8 @@ public ClusterState executeTask( Instant.now(), false, false, - sourceIndexStats + sourceIndexStats, + rolloverTask.autoShardingResult() ); results.add(rolloverResult); logger.trace("rollover result [{}]", rolloverResult); @@ -435,7 +503,7 @@ public ClusterState executeTask( // things like date resolution sourceIndexName, rolloverIndexName, - postConditionResults, + resultsIncludingDecreaseShards, false, true, true, diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/autosharding/DataStreamAutoShardingService.java b/server/src/main/java/org/elasticsearch/action/datastreams/autosharding/DataStreamAutoShardingService.java index e830f538d222f..06aec69bc97da 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/autosharding/DataStreamAutoShardingService.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/autosharding/DataStreamAutoShardingService.java @@ -29,6 +29,7 @@ import java.util.List; import java.util.Objects; import java.util.OptionalDouble; +import java.util.OptionalLong; import java.util.function.Function; import java.util.function.LongSupplier; @@ -380,12 +381,27 @@ static double getMaxIndexLoadWithinCoolingPeriod( // assume the current write index load is the highest observed and look back to find the actual maximum double maxIndexLoadWithinCoolingPeriod = writeIndexLoad; for (IndexWriteLoad writeLoad : writeLoadsWithinCoolingPeriod) { - double totalIndexLoad = 0; + // the IndexWriteLoad stores _for each shard_ a shard average write load ( calculated using : shard indexing time / shard + // uptime ) and its corresponding shard uptime + // + // to reconstruct the average _index_ write load we recalculate the shard indexing time by multiplying the shard write load + // to its uptime, and then, having the indexing time and uptime for each shard we calculate the average _index_ write load using + // (indexingTime_shard0 + indexingTime_shard1) / (uptime_shard0 + uptime_shard1) + // as {@link org.elasticsearch.index.shard.IndexingStats#add} does + double totalShardIndexingTime = 0; + long totalShardUptime = 0; for (int shardId = 0; shardId < writeLoad.numberOfShards(); shardId++) { final OptionalDouble writeLoadForShard = writeLoad.getWriteLoadForShard(shardId); - totalIndexLoad += writeLoadForShard.orElse(0); + final OptionalLong uptimeInMillisForShard = writeLoad.getUptimeInMillisForShard(shardId); + if (writeLoadForShard.isPresent()) { + assert uptimeInMillisForShard.isPresent(); + double shardIndexingTime = writeLoadForShard.getAsDouble() * uptimeInMillisForShard.getAsLong(); + long shardUptimeInMillis = uptimeInMillisForShard.getAsLong(); + totalShardIndexingTime += shardIndexingTime; + totalShardUptime += shardUptimeInMillis; + } } - + double totalIndexLoad = totalShardUptime == 0 ? 0.0 : (totalShardIndexingTime / totalShardUptime); if (totalIndexLoad > maxIndexLoadWithinCoolingPeriod) { maxIndexLoadWithinCoolingPeriod = totalIndexLoad; } diff --git a/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java b/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java index 19c7561ccdb15..45c13dde29d06 100644 --- a/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java @@ -542,7 +542,8 @@ public String toString() { } } - class NodeResponse extends TransportResponse { + // visible for testing + public class NodeResponse extends TransportResponse { protected String nodeId; protected int totalShards; protected List exceptions; @@ -560,7 +561,8 @@ class NodeResponse extends TransportResponse { } } - NodeResponse( + // visible for testing + public NodeResponse( String nodeId, int totalShards, List results, diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java index 073ba460a4698..a5f424f875eb7 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java @@ -462,19 +462,25 @@ public DataStreamAutoShardingEvent getAutoShardingEvent() { * @param writeIndex new write index * @param generation new generation * @param timeSeries whether the template that created this data stream is in time series mode + * @param autoShardingEvent the auto sharding event this rollover operation is applying * * @return new {@code DataStream} instance with the rollover operation applied */ - public DataStream rollover(Index writeIndex, long generation, boolean timeSeries) { + public DataStream rollover( + Index writeIndex, + long generation, + boolean timeSeries, + @Nullable DataStreamAutoShardingEvent autoShardingEvent + ) { ensureNotReplicated(); - return unsafeRollover(writeIndex, generation, timeSeries); + return unsafeRollover(writeIndex, generation, timeSeries, autoShardingEvent); } /** - * Like {@link #rollover(Index, long, boolean)}, but does no validation, use with care only. + * Like {@link #rollover(Index, long, boolean, DataStreamAutoShardingEvent)}, but does no validation, use with care only. */ - public DataStream unsafeRollover(Index writeIndex, long generation, boolean timeSeries) { + public DataStream unsafeRollover(Index writeIndex, long generation, boolean timeSeries, DataStreamAutoShardingEvent autoShardingEvent) { IndexMode indexMode = this.indexMode; if ((indexMode == null || indexMode == IndexMode.STANDARD) && timeSeries) { // This allows for migrating a data stream to be a tsdb data stream: @@ -506,7 +512,7 @@ public DataStream unsafeRollover(Index writeIndex, long generation, boolean time /** * Performs a dummy rollover on a {@code DataStream} instance and returns the tuple of the next write index name and next generation - * that this {@code DataStream} should roll over to using {@link #rollover(Index, long, boolean)}. + * that this {@code DataStream} should roll over to using {@link #rollover(Index, long, boolean, DataStreamAutoShardingEvent)}. * * @param clusterMetadata Cluster metadata * diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index a2416fe6a6a15..21801dee844b0 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.admin.cluster.configuration.TransportAddVotingConfigExclusionsAction; import org.elasticsearch.action.admin.indices.close.TransportCloseIndexAction; import org.elasticsearch.action.bulk.WriteAckDelay; +import org.elasticsearch.action.datastreams.autosharding.DataStreamAutoShardingService; import org.elasticsearch.action.ingest.SimulatePipelineTransportAction; import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.AutoCreateIndex; @@ -209,6 +210,11 @@ public void apply(Settings value, Settings current, Settings previous) { BalancedShardsAllocator.WRITE_LOAD_BALANCE_FACTOR_SETTING, BalancedShardsAllocator.DISK_USAGE_BALANCE_FACTOR_SETTING, BalancedShardsAllocator.THRESHOLD_SETTING, + DataStreamAutoShardingService.DATA_STREAMS_AUTO_SHARDING_DECREASE_SHARDS_COOLDOWN, + DataStreamAutoShardingService.DATA_STREAMS_AUTO_SHARDING_INCREASE_SHARDS_COOLDOWN, + DataStreamAutoShardingService.DATA_STREAMS_AUTO_SHARDING_EXCLUDES_SETTING, + DataStreamAutoShardingService.CLUSTER_AUTO_SHARDING_MAX_WRITE_THREADS, + DataStreamAutoShardingService.CLUSTER_AUTO_SHARDING_MIN_WRITE_THREADS, DesiredBalanceComputer.PROGRESS_LOG_INTERVAL_SETTING, DesiredBalanceReconciler.UNDESIRED_ALLOCATIONS_LOG_INTERVAL_SETTING, DesiredBalanceReconciler.UNDESIRED_ALLOCATIONS_LOG_THRESHOLD_SETTING, diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesModule.java b/server/src/main/java/org/elasticsearch/indices/IndicesModule.java index b94c95834f65a..048d9adb8e7e3 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesModule.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesModule.java @@ -19,6 +19,7 @@ import org.elasticsearch.action.admin.indices.rollover.MinPrimaryShardDocsCondition; import org.elasticsearch.action.admin.indices.rollover.MinPrimaryShardSizeCondition; import org.elasticsearch.action.admin.indices.rollover.MinSizeCondition; +import org.elasticsearch.action.admin.indices.rollover.OptimalShardCountCondition; import org.elasticsearch.action.resync.TransportResyncReplicationAction; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -110,7 +111,8 @@ public static List getNamedWriteables() { new NamedWriteableRegistry.Entry(Condition.class, MaxDocsCondition.NAME, MaxDocsCondition::new), new NamedWriteableRegistry.Entry(Condition.class, MaxSizeCondition.NAME, MaxSizeCondition::new), new NamedWriteableRegistry.Entry(Condition.class, MaxPrimaryShardSizeCondition.NAME, MaxPrimaryShardSizeCondition::new), - new NamedWriteableRegistry.Entry(Condition.class, MaxPrimaryShardDocsCondition.NAME, MaxPrimaryShardDocsCondition::new) + new NamedWriteableRegistry.Entry(Condition.class, MaxPrimaryShardDocsCondition.NAME, MaxPrimaryShardDocsCondition::new), + new NamedWriteableRegistry.Entry(Condition.class, OptimalShardCountCondition.NAME, OptimalShardCountCondition::new) ); } @@ -165,6 +167,11 @@ public static List getNamedXContents() { Condition.class, new ParseField(MaxPrimaryShardDocsCondition.NAME), (p, c) -> MaxPrimaryShardDocsCondition.fromXContent(p) + ), + new NamedXContentRegistry.Entry( + Condition.class, + new ParseField(OptimalShardCountCondition.NAME), + (p, c) -> OptimalShardCountCondition.fromXContent(p) ) ); } diff --git a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java index 19a6d200189f2..15ebe2752451d 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java +++ b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java @@ -22,6 +22,7 @@ import org.elasticsearch.action.ActionType; import org.elasticsearch.action.admin.cluster.repositories.reservedstate.ReservedRepositoryAction; import org.elasticsearch.action.admin.indices.template.reservedstate.ReservedComposableIndexTemplateAction; +import org.elasticsearch.action.datastreams.autosharding.DataStreamAutoShardingService; import org.elasticsearch.action.ingest.ReservedPipelineAction; import org.elasticsearch.action.search.SearchExecutionStatsCollector; import org.elasticsearch.action.search.SearchPhaseController; @@ -1061,6 +1062,14 @@ record PluginServiceInstances( modules.add(loadPluginComponents(pluginComponents)); + DataStreamAutoShardingService dataStreamAutoShardingService = new DataStreamAutoShardingService( + settings, + clusterService, + featureService, + threadPool::absoluteTimeInMillis + ); + dataStreamAutoShardingService.init(); + modules.add(b -> { b.bind(NodeService.class).toInstance(nodeService); b.bind(BigArrays.class).toInstance(bigArrays); @@ -1095,6 +1104,7 @@ record PluginServiceInstances( b.bind(IndexSettingProviders.class).toInstance(indexSettingProviders); b.bind(FileSettingsService.class).toInstance(fileSettingsService); b.bind(CompatibilityVersions.class).toInstance(compatibilityVersions); + b.bind(DataStreamAutoShardingService.class).toInstance(dataStreamAutoShardingService); }); if (ReadinessService.enabled(environment)) { diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/ConditionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/ConditionTests.java index 56e2d63307103..a6a9bf14325d3 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/ConditionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/ConditionTests.java @@ -12,8 +12,13 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.EqualsHashCodeTestUtils; +import org.elasticsearch.xcontent.json.JsonXContent; + +import java.io.IOException; +import java.util.List; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; public class ConditionTests extends ESTestCase { @@ -347,6 +352,36 @@ public void testEqualsAndHashCode() { condition -> new MinPrimaryShardDocsCondition(condition.value), condition -> new MinPrimaryShardDocsCondition(randomNonNegativeLong()) ); + OptimalShardCountCondition optimalShardCountCondition = new OptimalShardCountCondition(3); + EqualsHashCodeTestUtils.checkEqualsAndHashCode( + optimalShardCountCondition, + condition -> new OptimalShardCountCondition(3), + condition -> new OptimalShardCountCondition(2) + ); + } + + public void testAutoShardCondition() { + OptimalShardCountCondition optimalShardCountCondition = new OptimalShardCountCondition(randomNonNegativeInt()); + assertThat( + optimalShardCountCondition.evaluate( + new Condition.Stats(1, randomNonNegativeLong(), randomByteSizeValue(), randomByteSizeValue(), 1) + ).matched(), + is(true) + ); + } + + public void testParseAutoShardConditionFromRolloverInfo() throws IOException { + long time = System.currentTimeMillis(); + RolloverInfo info = new RolloverInfo("logs-nginx", List.of(new OptimalShardCountCondition(3)), time); + + RolloverInfo parsedInfo = RolloverInfo.parse( + createParser( + JsonXContent.jsonXContent, + "{\n" + " \"met_conditions\": {\n" + " \"optimal_shard_count\": 3" + "\n},\n" + " \"time\": " + time + "\n" + " }" + ), + "logs-nginx" + ); + assertThat(parsedInfo, is(info)); } private static ByteSizeValue randomByteSize() { diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverServiceAutoShardingTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverServiceAutoShardingTests.java new file mode 100644 index 0000000000000..fd21e0c27099e --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverServiceAutoShardingTests.java @@ -0,0 +1,489 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.admin.indices.rollover; + +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.datastreams.autosharding.AutoShardingResult; +import org.elasticsearch.action.datastreams.autosharding.AutoShardingType; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; +import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.DataStreamAutoShardingEvent; +import org.elasticsearch.cluster.metadata.DataStreamTestHelper; +import org.elasticsearch.cluster.metadata.IndexAbstraction; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.Template; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; + +import java.time.Instant; +import java.util.List; +import java.util.Set; +import java.util.UUID; + +import static org.elasticsearch.action.datastreams.autosharding.AutoShardingType.COOLDOWN_PREVENTED_DECREASE; +import static org.elasticsearch.action.datastreams.autosharding.AutoShardingType.COOLDOWN_PREVENTED_INCREASE; +import static org.elasticsearch.action.datastreams.autosharding.AutoShardingType.DECREASE_SHARDS; +import static org.elasticsearch.action.datastreams.autosharding.AutoShardingType.INCREASE_SHARDS; +import static org.elasticsearch.action.datastreams.autosharding.AutoShardingType.NOT_APPLICABLE; +import static org.elasticsearch.action.datastreams.autosharding.AutoShardingType.NO_CHANGE_REQUIRED; +import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_INDEX_UUID; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.lessThanOrEqualTo; + +public class MetadataRolloverServiceAutoShardingTests extends ESTestCase { + + public void testRolloverDataStreamWithoutExistingAutosharding() throws Exception { + String dataStreamName = "no_preexising_autoshard_event_ds"; + DataStream dataStream = DataStreamTestHelper.newInstance( + dataStreamName, + List.of( + new Index(DataStream.getDefaultBackingIndexName(dataStreamName, 1), UUID.randomUUID().toString()), + new Index(DataStream.getDefaultBackingIndexName(dataStreamName, 2), UUID.randomUUID().toString()), + new Index(DataStream.getDefaultBackingIndexName(dataStreamName, 3), UUID.randomUUID().toString()), + new Index(DataStream.getDefaultBackingIndexName(dataStreamName, 4), UUID.randomUUID().toString()), + new Index(DataStream.getDefaultBackingIndexName(dataStreamName, 5), UUID.randomUUID().toString()) + ), + 5, + null, + false, + null, + (DataStreamAutoShardingEvent) null + ); + ComposableIndexTemplate template = ComposableIndexTemplate.builder() + .indexPatterns(List.of(dataStreamName + "*")) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + // all indices have, by default 3 shards (using a value GT 1 so we can test decreasing the number of shards) + .template(new Template(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 3).build(), null, null)) + .build(); + Metadata.Builder builder = Metadata.builder(); + builder.put("template", template); + for (Index index : dataStream.getIndices()) { + // all indices have, by default 3 shards (using a value GT 1 so we can test decreasing the number of shards) + builder.put(getIndexMetadataBuilderForIndex(index, 3)); + } + builder.put(dataStream); + final ClusterState clusterState = ClusterState.builder(new ClusterName("test")).metadata(builder).build(); + + ThreadPool testThreadPool = new TestThreadPool(getTestName()); + try { + MetadataRolloverService rolloverService = DataStreamTestHelper.getMetadataRolloverService( + dataStream, + testThreadPool, + Set.of(), + xContentRegistry() + ); + + // let's rollover the data stream using all the possible autosharding recommendations + for (AutoShardingType type : AutoShardingType.values()) { + long before = testThreadPool.absoluteTimeInMillis(); + switch (type) { + case INCREASE_SHARDS -> { + List> metConditions = List.of(new OptimalShardCountCondition(5)); + MetadataRolloverService.RolloverResult rolloverResult = rolloverService.rolloverClusterState( + clusterState, + dataStream.getName(), + null, + new CreateIndexRequest("_na_"), + metConditions, + Instant.now(), + randomBoolean(), + false, + null, + new AutoShardingResult(INCREASE_SHARDS, 3, 5, TimeValue.ZERO, 64.33) + ); + assertRolloverResult(dataStream, rolloverResult, before, testThreadPool.absoluteTimeInMillis(), metConditions, 5); + } + case DECREASE_SHARDS -> { + { + // we have another condition that matched, so the rollover will be executed and the new number of shards + // will be 1 + List> metConditions = List.of(new MaxDocsCondition(2L), new OptimalShardCountCondition(1)); + MetadataRolloverService.RolloverResult rolloverResult = rolloverService.rolloverClusterState( + clusterState, + dataStream.getName(), + null, + new CreateIndexRequest("_na_"), + metConditions, + Instant.now(), + randomBoolean(), + false, + null, + new AutoShardingResult(DECREASE_SHARDS, 3, 1, TimeValue.ZERO, 0.33) + ); + assertRolloverResult( + dataStream, + rolloverResult, + before, + testThreadPool.absoluteTimeInMillis(), + metConditions, + 1 + ); + } + + { + // even if the decrease shards recommendation is not a rollover condition, an empty POST _rollover request will + // configure the decrease shards recommendation + List> metConditions = List.of(new OptimalShardCountCondition(1)); + MetadataRolloverService.RolloverResult rolloverResult = rolloverService.rolloverClusterState( + clusterState, + dataStream.getName(), + null, + new CreateIndexRequest("_na_"), + metConditions, + Instant.now(), + randomBoolean(), + false, + null, + new AutoShardingResult(DECREASE_SHARDS, 3, 1, TimeValue.ZERO, 0.33) + ); + assertRolloverResult( + dataStream, + rolloverResult, + before, + testThreadPool.absoluteTimeInMillis(), + metConditions, + 1 + ); + } + } + case COOLDOWN_PREVENTED_INCREASE -> { + AutoShardingResult autoShardingResult = new AutoShardingResult( + COOLDOWN_PREVENTED_INCREASE, + 3, + 5, + TimeValue.timeValueMinutes(10), + 64.33 + ); + MetadataRolloverService.RolloverResult rolloverResult = rolloverService.rolloverClusterState( + clusterState, + dataStream.getName(), + null, + new CreateIndexRequest("_na_"), + List.of(), + Instant.now(), + randomBoolean(), + false, + null, + new AutoShardingResult(COOLDOWN_PREVENTED_INCREASE, 3, 5, TimeValue.timeValueMinutes(10), 64.33) + ); + // the expected number of shards remains 3 for the data stream due to the remaining cooldown + assertRolloverResult(dataStream, rolloverResult, before, testThreadPool.absoluteTimeInMillis(), List.of(), 3); + } + case COOLDOWN_PREVENTED_DECREASE -> { + MetadataRolloverService.RolloverResult rolloverResult = rolloverService.rolloverClusterState( + clusterState, + dataStream.getName(), + null, + new CreateIndexRequest("_na_"), + List.of(), + Instant.now(), + randomBoolean(), + false, + null, + new AutoShardingResult(COOLDOWN_PREVENTED_DECREASE, 3, 1, TimeValue.timeValueMinutes(10), 64.33) + ); + // the expected number of shards remains 3 for the data stream due to the remaining cooldown + assertRolloverResult(dataStream, rolloverResult, before, testThreadPool.absoluteTimeInMillis(), List.of(), 3); + } + case NO_CHANGE_REQUIRED -> { + List> metConditions = List.of(new MaxDocsCondition(randomNonNegativeLong())); + MetadataRolloverService.RolloverResult rolloverResult = rolloverService.rolloverClusterState( + clusterState, + dataStream.getName(), + null, + new CreateIndexRequest("_na_"), + metConditions, + Instant.now(), + randomBoolean(), + false, + null, + new AutoShardingResult(NO_CHANGE_REQUIRED, 3, 3, TimeValue.ZERO, 2.33) + ); + assertRolloverResult(dataStream, rolloverResult, before, testThreadPool.absoluteTimeInMillis(), metConditions, 3); + } + case NOT_APPLICABLE -> { + List> metConditions = List.of(new MaxDocsCondition(randomNonNegativeLong())); + MetadataRolloverService.RolloverResult rolloverResult = rolloverService.rolloverClusterState( + clusterState, + dataStream.getName(), + null, + new CreateIndexRequest("_na_"), + metConditions, + Instant.now(), + randomBoolean(), + false, + null, + new AutoShardingResult(NOT_APPLICABLE, 1, 1, TimeValue.MAX_VALUE, null) + ); + assertRolloverResult(dataStream, rolloverResult, before, testThreadPool.absoluteTimeInMillis(), metConditions, 3); + } + } + } + } finally { + testThreadPool.shutdown(); + } + } + + public void testRolloverDataStreamWithExistingAutoShardEvent() throws Exception { + String dataStreamName = "ds_with_existing_autoshard_event"; + String autoShardEventTriggerIndex = DataStream.getDefaultBackingIndexName(dataStreamName, 3); + DataStream dataStream = DataStreamTestHelper.newInstance( + dataStreamName, + List.of( + new Index(DataStream.getDefaultBackingIndexName(dataStreamName, 1), UUID.randomUUID().toString()), + new Index(DataStream.getDefaultBackingIndexName(dataStreamName, 2), UUID.randomUUID().toString()), + new Index(autoShardEventTriggerIndex, UUID.randomUUID().toString()), + new Index(DataStream.getDefaultBackingIndexName(dataStreamName, 4), UUID.randomUUID().toString()), + new Index(DataStream.getDefaultBackingIndexName(dataStreamName, 5), UUID.randomUUID().toString()) + ), + 5, + null, + false, + null, + new DataStreamAutoShardingEvent(autoShardEventTriggerIndex, 3, System.currentTimeMillis()) + ); + ComposableIndexTemplate template = ComposableIndexTemplate.builder() + .indexPatterns(List.of(dataStreamName + "*")) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + // the index template does not configure any number of shards so we'll default to 1 + .build(); + Metadata.Builder builder = Metadata.builder(); + builder.put("template", template); + int numberOfShards = 1; + for (Index index : dataStream.getIndices()) { + if (index.getName().equals(autoShardEventTriggerIndex)) { + // we configure the indices to have 1 shard until the auto shard trigger index, after which we go to 3 shards + numberOfShards = 3; + } + builder.put(getIndexMetadataBuilderForIndex(index, numberOfShards)); + } + builder.put(dataStream); + final ClusterState clusterState = ClusterState.builder(new ClusterName("test")).metadata(builder).build(); + + ThreadPool testThreadPool = new TestThreadPool(getTestName()); + try { + MetadataRolloverService rolloverService = DataStreamTestHelper.getMetadataRolloverService( + dataStream, + testThreadPool, + Set.of(), + xContentRegistry() + ); + + // let's rollover the data stream using all the possible autosharding recommendations + for (AutoShardingType type : AutoShardingType.values()) { + long before = testThreadPool.absoluteTimeInMillis(); + switch (type) { + case INCREASE_SHARDS -> { + List> metConditions = List.of(new OptimalShardCountCondition(3)); + MetadataRolloverService.RolloverResult rolloverResult = rolloverService.rolloverClusterState( + clusterState, + dataStream.getName(), + null, + new CreateIndexRequest("_na_"), + metConditions, + Instant.now(), + randomBoolean(), + false, + null, + new AutoShardingResult(INCREASE_SHARDS, 3, 5, TimeValue.ZERO, 64.33) + ); + assertRolloverResult(dataStream, rolloverResult, before, testThreadPool.absoluteTimeInMillis(), metConditions, 5); + } + case DECREASE_SHARDS -> { + { + // we have another condition that matched, so the rollover will be executed and the new number of shards + // will be 1 + List> metConditions = List.of(new MaxDocsCondition(2L), new OptimalShardCountCondition(1)); + MetadataRolloverService.RolloverResult rolloverResult = rolloverService.rolloverClusterState( + clusterState, + dataStream.getName(), + null, + new CreateIndexRequest("_na_"), + metConditions, + Instant.now(), + randomBoolean(), + false, + null, + new AutoShardingResult(DECREASE_SHARDS, 3, 1, TimeValue.ZERO, 0.33) + ); + assertRolloverResult( + dataStream, + rolloverResult, + before, + testThreadPool.absoluteTimeInMillis(), + metConditions, + 1 + ); + } + + { + // even if the decrease shards recommendation is not a rollover condition, an empty POST _rollover request will + // configure the decrease shards recommendation + List> metConditions = List.of(new OptimalShardCountCondition(1)); + MetadataRolloverService.RolloverResult rolloverResult = rolloverService.rolloverClusterState( + clusterState, + dataStream.getName(), + null, + new CreateIndexRequest("_na_"), + metConditions, + Instant.now(), + randomBoolean(), + false, + null, + new AutoShardingResult(DECREASE_SHARDS, 3, 1, TimeValue.ZERO, 0.33) + ); + assertRolloverResult( + dataStream, + rolloverResult, + before, + testThreadPool.absoluteTimeInMillis(), + metConditions, + 1 + ); + } + } + case COOLDOWN_PREVENTED_INCREASE -> { + MetadataRolloverService.RolloverResult rolloverResult = rolloverService.rolloverClusterState( + clusterState, + dataStream.getName(), + null, + new CreateIndexRequest("_na_"), + List.of(), + Instant.now(), + randomBoolean(), + false, + null, + new AutoShardingResult(COOLDOWN_PREVENTED_INCREASE, 3, 5, TimeValue.timeValueMinutes(10), 64.33) + ); + // the expected number of shards remains 3 for the data stream due to the remaining cooldown + assertRolloverResult(dataStream, rolloverResult, before, testThreadPool.absoluteTimeInMillis(), List.of(), 3); + } + case COOLDOWN_PREVENTED_DECREASE -> { + MetadataRolloverService.RolloverResult rolloverResult = rolloverService.rolloverClusterState( + clusterState, + dataStream.getName(), + null, + new CreateIndexRequest("_na_"), + List.of(), + Instant.now(), + randomBoolean(), + false, + null, + new AutoShardingResult(COOLDOWN_PREVENTED_DECREASE, 3, 1, TimeValue.timeValueMinutes(10), 64.33) + ); + // the expected number of shards remains 3 for the data stream due to the remaining cooldown + assertRolloverResult(dataStream, rolloverResult, before, testThreadPool.absoluteTimeInMillis(), List.of(), 3); + } + case NO_CHANGE_REQUIRED -> { + List> metConditions = List.of(new MaxDocsCondition(randomNonNegativeLong())); + MetadataRolloverService.RolloverResult rolloverResult = rolloverService.rolloverClusterState( + clusterState, + dataStream.getName(), + null, + new CreateIndexRequest("_na_"), + metConditions, + Instant.now(), + randomBoolean(), + false, + null, + new AutoShardingResult(NO_CHANGE_REQUIRED, 3, 3, TimeValue.ZERO, 2.33) + ); + assertRolloverResult(dataStream, rolloverResult, before, testThreadPool.absoluteTimeInMillis(), metConditions, 3); + } + case NOT_APPLICABLE -> { + List> metConditions = List.of(new MaxDocsCondition(randomNonNegativeLong())); + MetadataRolloverService.RolloverResult rolloverResult = rolloverService.rolloverClusterState( + clusterState, + dataStream.getName(), + null, + new CreateIndexRequest("_na_"), + metConditions, + Instant.now(), + randomBoolean(), + false, + null, + new AutoShardingResult(NOT_APPLICABLE, 1, 1, TimeValue.MAX_VALUE, null) + ); + // if the auto sharding is not applicable we just use whatever's in the index template (1 shard in this case) + assertRolloverResult(dataStream, rolloverResult, before, testThreadPool.absoluteTimeInMillis(), metConditions, 1); + } + } + } + } finally { + testThreadPool.shutdown(); + } + } + + private static void assertRolloverResult( + DataStream preRolloverDataStream, + MetadataRolloverService.RolloverResult rolloverResult, + long before, + long after, + List> metConditions, + int expectedNumberOfShards + ) { + String sourceIndexName = DataStream.getDefaultBackingIndexName( + preRolloverDataStream.getName(), + preRolloverDataStream.getGeneration() + ); + String newIndexName = DataStream.getDefaultBackingIndexName( + preRolloverDataStream.getName(), + preRolloverDataStream.getGeneration() + 1 + ); + assertEquals(sourceIndexName, rolloverResult.sourceIndexName()); + assertEquals(newIndexName, rolloverResult.rolloverIndexName()); + Metadata rolloverMetadata = rolloverResult.clusterState().metadata(); + assertEquals(preRolloverDataStream.getIndices().size() + 1, rolloverMetadata.indices().size()); + IndexMetadata rolloverIndexMetadata = rolloverMetadata.index(newIndexName); + // number of shards remained the same + assertThat(rolloverIndexMetadata.getNumberOfShards(), is(expectedNumberOfShards)); + + IndexAbstraction ds = rolloverMetadata.getIndicesLookup().get(preRolloverDataStream.getName()); + assertThat(ds.getType(), equalTo(IndexAbstraction.Type.DATA_STREAM)); + assertThat(ds.getIndices(), hasSize(preRolloverDataStream.getIndices().size() + 1)); + assertThat(ds.getIndices(), hasItem(rolloverMetadata.index(sourceIndexName).getIndex())); + assertThat(ds.getIndices(), hasItem(rolloverIndexMetadata.getIndex())); + assertThat(ds.getWriteIndex(), equalTo(rolloverIndexMetadata.getIndex())); + + RolloverInfo info = rolloverMetadata.index(sourceIndexName).getRolloverInfos().get(preRolloverDataStream.getName()); + assertThat(info.getTime(), lessThanOrEqualTo(after)); + assertThat(info.getTime(), greaterThanOrEqualTo(before)); + assertThat(info.getMetConditions(), hasSize(metConditions.size())); + for (Condition rolloverInfoCondition : info.getMetConditions()) { + boolean foundMetCondition = false; + for (Condition metCondition : metConditions) { + if (metCondition.name.equals(rolloverInfoCondition.name)) { + foundMetCondition = true; + assertThat(rolloverInfoCondition.value, is(metCondition.value)); + break; + } + } + assertThat(foundMetCondition, is(true)); + } + } + + private static IndexMetadata.Builder getIndexMetadataBuilderForIndex(Index index, int numberOfShards) { + return IndexMetadata.builder(index.getName()) + .settings(ESTestCase.settings(IndexVersion.current()).put("index.hidden", true).put(SETTING_INDEX_UUID, index.getUUID())) + .numberOfShards(numberOfShards) + .numberOfReplicas(1); + } +} diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java index 83bdc68d0b9c0..23905c9445d18 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java @@ -547,6 +547,7 @@ public void testRolloverClusterState() throws Exception { Instant.now(), randomBoolean(), false, + null, null ); long after = testThreadPool.absoluteTimeInMillis(); @@ -615,6 +616,7 @@ public void testRolloverClusterStateForDataStream() throws Exception { Instant.now(), randomBoolean(), false, + null, null ); long after = testThreadPool.absoluteTimeInMillis(); @@ -701,6 +703,7 @@ public void testValidation() throws Exception { Instant.now(), randomBoolean(), true, + null, null ); @@ -742,6 +745,7 @@ public void testRolloverClusterStateForDataStreamNoTemplate() throws Exception { Instant.now(), false, randomBoolean(), + null, null ) ); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/OptimalShardCountConditionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/OptimalShardCountConditionTests.java new file mode 100644 index 0000000000000..b979a7f1ccd0e --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/OptimalShardCountConditionTests.java @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.admin.indices.rollover; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +public class OptimalShardCountConditionTests extends AbstractWireSerializingTestCase { + + @Override + protected Writeable.Reader instanceReader() { + return OptimalShardCountCondition::new; + } + + @Override + protected OptimalShardCountCondition createTestInstance() { + return new OptimalShardCountCondition(randomNonNegativeInt()); + } + + @Override + protected OptimalShardCountCondition mutateInstance(OptimalShardCountCondition instance) throws IOException { + return new OptimalShardCountCondition(randomValueOtherThan(instance.value, ESTestCase::randomNonNegativeInt)); + } +} diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverConditionsTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverConditionsTests.java index cda8a6086b53a..404c74d0854cf 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverConditionsTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverConditionsTests.java @@ -8,6 +8,8 @@ package org.elasticsearch.action.admin.indices.rollover; +import org.elasticsearch.action.datastreams.autosharding.AutoShardingResult; +import org.elasticsearch.action.datastreams.autosharding.AutoShardingType; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.ByteSizeUnit; @@ -21,6 +23,8 @@ import java.util.Collections; import java.util.Map; +import static org.hamcrest.Matchers.is; + public class RolloverConditionsTests extends AbstractXContentSerializingTestCase { @Override @@ -157,5 +161,35 @@ public void testConditionsAreMet() { String minAgeCondition = new MinAgeCondition(age).toString(); assertFalse(rolloverConditions.areConditionsMet(Map.of(maxAgeCondition, true, minDocsCondition, true))); assertTrue(rolloverConditions.areConditionsMet(Map.of(maxAgeCondition, true, minDocsCondition, true, minAgeCondition, true))); + + OptimalShardCountCondition optimalShardCountCondition = new OptimalShardCountCondition(3); + rolloverConditions = RolloverConditions.newBuilder() + .addOptimalShardCountCondition( + randomBoolean() + ? new AutoShardingResult(AutoShardingType.INCREASE_SHARDS, 1, 3, TimeValue.ZERO, 3.0) + : new AutoShardingResult(AutoShardingType.DECREASE_SHARDS, 7, 3, TimeValue.ZERO, 0.8) + ) + .build(); + assertThat(rolloverConditions.areConditionsMet(Map.of(optimalShardCountCondition.toString(), true)), is(true)); + assertThat(rolloverConditions.areConditionsMet(Map.of(optimalShardCountCondition.toString(), false)), is(false)); + + // the rollover condition must be INCREASE or DECREASE_SHARDS, any other type should be ignored + rolloverConditions = RolloverConditions.newBuilder() + .addOptimalShardCountCondition( + new AutoShardingResult( + randomFrom( + AutoShardingType.COOLDOWN_PREVENTED_INCREASE, + AutoShardingType.COOLDOWN_PREVENTED_DECREASE, + AutoShardingType.NO_CHANGE_REQUIRED, + AutoShardingType.NOT_APPLICABLE + ), + 1, + 3, + TimeValue.ZERO, + 3.0 + ) + ) + .build(); + assertThat(rolloverConditions.getConditions().size(), is(0)); } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java index 950d1a9f22f08..814cff37e0708 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java @@ -16,6 +16,7 @@ import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.admin.indices.stats.IndicesStatsTests; import org.elasticsearch.action.admin.indices.stats.ShardStats; +import org.elasticsearch.action.datastreams.autosharding.DataStreamAutoShardingService; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.master.AcknowledgedResponse; @@ -41,6 +42,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.features.FeatureService; import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.cache.query.QueryCacheStats; @@ -111,6 +113,13 @@ public class TransportRolloverActionTests extends ESTestCase { WriteLoadForecaster.DEFAULT ); + final DataStreamAutoShardingService dataStreamAutoShardingService = new DataStreamAutoShardingService( + Settings.EMPTY, + mockClusterService, + new FeatureService(List.of()), + System::currentTimeMillis + ); + @Before public void setUpMocks() { when(mockNode.getId()).thenReturn("mocknode"); @@ -374,7 +383,8 @@ public void testConditionEvaluationWhenAliasToWriteAndReadIndicesConsidersOnlyPr rolloverService, mockClient, mockAllocationService, - mockMetadataDataStreamService + mockMetadataDataStreamService, + dataStreamAutoShardingService ); // For given alias, verify that condition evaluation fails when the condition doc count is greater than the primaries doc count @@ -449,7 +459,8 @@ public void testLazyRollover() throws Exception { rolloverService, mockClient, mockAllocationService, - mockMetadataDataStreamService + mockMetadataDataStreamService, + dataStreamAutoShardingService ); final PlainActionFuture future = new PlainActionFuture<>(); RolloverRequest rolloverRequest = new RolloverRequest("logs-ds", null); @@ -501,7 +512,8 @@ public void testLazyRolloverFails() throws Exception { rolloverService, mockClient, mockAllocationService, - mockMetadataDataStreamService + mockMetadataDataStreamService, + dataStreamAutoShardingService ); // Lazy rollover fails on a concrete index diff --git a/server/src/test/java/org/elasticsearch/action/datastreams/autosharding/DataStreamAutoShardingServiceTests.java b/server/src/test/java/org/elasticsearch/action/datastreams/autosharding/DataStreamAutoShardingServiceTests.java index 674b3e855e912..bc1ec6788eec6 100644 --- a/server/src/test/java/org/elasticsearch/action/datastreams/autosharding/DataStreamAutoShardingServiceTests.java +++ b/server/src/test/java/org/elasticsearch/action/datastreams/autosharding/DataStreamAutoShardingServiceTests.java @@ -51,7 +51,9 @@ import static org.elasticsearch.action.datastreams.autosharding.AutoShardingType.INCREASE_SHARDS; import static org.elasticsearch.action.datastreams.autosharding.AutoShardingType.NO_CHANGE_REQUIRED; import static org.elasticsearch.test.ClusterServiceUtils.createClusterService; +import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.lessThan; public class DataStreamAutoShardingServiceTests extends ESTestCase { @@ -65,10 +67,6 @@ public class DataStreamAutoShardingServiceTests extends ESTestCase { public void setupService() { threadPool = new TestThreadPool(getTestName()); Set> builtInClusterSettings = new HashSet<>(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - builtInClusterSettings.add(DataStreamAutoShardingService.CLUSTER_AUTO_SHARDING_MIN_WRITE_THREADS); - builtInClusterSettings.add(DataStreamAutoShardingService.CLUSTER_AUTO_SHARDING_MAX_WRITE_THREADS); - builtInClusterSettings.add(DataStreamAutoShardingService.DATA_STREAMS_AUTO_SHARDING_INCREASE_SHARDS_COOLDOWN); - builtInClusterSettings.add(DataStreamAutoShardingService.DATA_STREAMS_AUTO_SHARDING_DECREASE_SHARDS_COOLDOWN); builtInClusterSettings.add( Setting.boolSetting( DataStreamAutoShardingService.DATA_STREAMS_AUTO_SHARDING_ENABLED, @@ -613,7 +611,7 @@ public void testGetMaxIndexLoadWithinCoolingPeriod() { indexMetadata = createIndexMetadata( DataStream.getDefaultBackingIndexName(dataStreamName, backingIndices.size(), createdAt), 3, - getWriteLoad(3, 3.0), // each backing index has a write load of 9.0 + getWriteLoad(3, 3.0), // each backing index has a write load of 3.0 createdAt ); } @@ -648,7 +646,67 @@ public void testGetMaxIndexLoadWithinCoolingPeriod() { () -> now ); // to cover the entire cooldown period, the last index before the cooling period is taken into account - assertThat(maxIndexLoadWithinCoolingPeriod, is(lastIndexBeforeCoolingPeriodHasLowWriteLoad ? 15.0 : 999.0)); + assertThat(maxIndexLoadWithinCoolingPeriod, is(lastIndexBeforeCoolingPeriodHasLowWriteLoad ? 5.0 : 999.0)); + } + + public void testIndexLoadWithinCoolingPeriodIsShardLoadsAvg() { + final TimeValue coolingPeriod = TimeValue.timeValueDays(3); + + final Metadata.Builder metadataBuilder = Metadata.builder(); + final int numberOfBackingIndicesWithinCoolingPeriod = randomIntBetween(3, 10); + final List backingIndices = new ArrayList<>(); + final String dataStreamName = "logs"; + long now = System.currentTimeMillis(); + + for (int i = 0; i < numberOfBackingIndicesWithinCoolingPeriod; i++) { + final long createdAt = now - (coolingPeriod.getMillis() / 2); + IndexMetadata indexMetadata; + IndexWriteLoad.Builder builder = IndexWriteLoad.builder(3); + for (int shardId = 0; shardId < 3; shardId++) { + switch (shardId) { + case 0 -> builder.withShardWriteLoad(shardId, 0.5, 40); + case 1 -> builder.withShardWriteLoad(shardId, 3.0, 10); + case 2 -> builder.withShardWriteLoad(shardId, 0.3333, 150); + } + } + indexMetadata = createIndexMetadata( + DataStream.getDefaultBackingIndexName(dataStreamName, backingIndices.size(), createdAt), + 3, + builder.build(), // max write index within cooling period should be 0.5 (ish) + createdAt + ); + backingIndices.add(indexMetadata.getIndex()); + metadataBuilder.put(indexMetadata, false); + } + + final String writeIndexName = DataStream.getDefaultBackingIndexName(dataStreamName, backingIndices.size()); + final IndexMetadata writeIndexMetadata = createIndexMetadata(writeIndexName, 3, getWriteLoad(3, 0.1), System.currentTimeMillis()); + backingIndices.add(writeIndexMetadata.getIndex()); + metadataBuilder.put(writeIndexMetadata, false); + + final DataStream dataStream = new DataStream( + dataStreamName, + backingIndices, + backingIndices.size(), + Collections.emptyMap(), + false, + false, + false, + false, + IndexMode.STANDARD + ); + + metadataBuilder.put(dataStream); + + double maxIndexLoadWithinCoolingPeriod = DataStreamAutoShardingService.getMaxIndexLoadWithinCoolingPeriod( + metadataBuilder.build(), + dataStream, + 0.1, + coolingPeriod, + () -> now + ); + assertThat(maxIndexLoadWithinCoolingPeriod, is(greaterThan(0.499))); + assertThat(maxIndexLoadWithinCoolingPeriod, is(lessThan(0.5))); } public void testAutoShardingResultValidation() { @@ -763,7 +821,7 @@ private IndexMetadata createIndexMetadata( private IndexWriteLoad getWriteLoad(int numberOfShards, double shardWriteLoad) { IndexWriteLoad.Builder builder = IndexWriteLoad.builder(numberOfShards); for (int shardId = 0; shardId < numberOfShards; shardId++) { - builder.withShardWriteLoad(shardId, shardWriteLoad, randomLongBetween(1, 10)); + builder.withShardWriteLoad(shardId, shardWriteLoad, 1); } return builder.build(); } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java index a07cd8e60411a..8e1ce495fdf5c 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java @@ -160,7 +160,7 @@ protected DataStream mutateInstance(DataStream instance) { public void testRollover() { DataStream ds = DataStreamTestHelper.randomInstance().promoteDataStream(); Tuple newCoordinates = ds.nextWriteIndexAndGeneration(Metadata.EMPTY_METADATA); - final DataStream rolledDs = ds.rollover(new Index(newCoordinates.v1(), UUIDs.randomBase64UUID()), newCoordinates.v2(), false); + final DataStream rolledDs = ds.rollover(new Index(newCoordinates.v1(), UUIDs.randomBase64UUID()), newCoordinates.v2(), false, null); assertThat(rolledDs.getName(), equalTo(ds.getName())); assertThat(rolledDs.getGeneration(), equalTo(ds.getGeneration() + 1)); assertThat(rolledDs.getIndices().size(), equalTo(ds.getIndices().size() + 1)); @@ -185,7 +185,7 @@ public void testRolloverWithConflictingBackingIndexName() { } final Tuple newCoordinates = ds.nextWriteIndexAndGeneration(builder.build()); - final DataStream rolledDs = ds.rollover(new Index(newCoordinates.v1(), UUIDs.randomBase64UUID()), newCoordinates.v2(), false); + final DataStream rolledDs = ds.rollover(new Index(newCoordinates.v1(), UUIDs.randomBase64UUID()), newCoordinates.v2(), false, null); assertThat(rolledDs.getName(), equalTo(ds.getName())); assertThat(rolledDs.getGeneration(), equalTo(ds.getGeneration() + numConflictingIndices + 1)); assertThat(rolledDs.getIndices().size(), equalTo(ds.getIndices().size() + 1)); @@ -215,7 +215,7 @@ public void testRolloverUpgradeToTsdbDataStream() { ); var newCoordinates = ds.nextWriteIndexAndGeneration(Metadata.EMPTY_METADATA); - var rolledDs = ds.rollover(new Index(newCoordinates.v1(), UUIDs.randomBase64UUID()), newCoordinates.v2(), true); + var rolledDs = ds.rollover(new Index(newCoordinates.v1(), UUIDs.randomBase64UUID()), newCoordinates.v2(), true, null); assertThat(rolledDs.getName(), equalTo(ds.getName())); assertThat(rolledDs.getGeneration(), equalTo(ds.getGeneration() + 1)); assertThat(rolledDs.getIndices().size(), equalTo(ds.getIndices().size() + 1)); @@ -243,7 +243,7 @@ public void testRolloverDowngradeToRegularDataStream() { ); var newCoordinates = ds.nextWriteIndexAndGeneration(Metadata.EMPTY_METADATA); - var rolledDs = ds.rollover(new Index(newCoordinates.v1(), UUIDs.randomBase64UUID()), newCoordinates.v2(), false); + var rolledDs = ds.rollover(new Index(newCoordinates.v1(), UUIDs.randomBase64UUID()), newCoordinates.v2(), false, null); assertThat(rolledDs.getName(), equalTo(ds.getName())); assertThat(rolledDs.getGeneration(), equalTo(ds.getGeneration() + 1)); assertThat(rolledDs.getIndices().size(), equalTo(ds.getIndices().size() + 1)); @@ -1752,6 +1752,66 @@ public void testGetIndicesWithinMaxAgeRange() { assertThat(indicesWithinMaxAgeRange, is(equalTo(expectedIndicesWithinMaxAgeRange))); } + public void testGetIndicesWithinMaxAgeRangeAllIndicesOutsideRange() { + final TimeValue maxIndexAge = TimeValue.timeValueDays(7); + + final Metadata.Builder metadataBuilder = Metadata.builder(); + final int numberOfBackingIndicesOlderThanMinAge = randomIntBetween(5, 10); + final int numberOfShards = 1; + final List backingIndices = new ArrayList<>(); + final String dataStreamName = "logs-es"; + final List backingIndicesOlderThanMinAge = new ArrayList<>(); + for (int i = 0; i < numberOfBackingIndicesOlderThanMinAge; i++) { + long creationDate = System.currentTimeMillis() - maxIndexAge.millis() * 2; + final IndexMetadata indexMetadata = createIndexMetadata( + DataStream.getDefaultBackingIndexName(dataStreamName, backingIndices.size(), creationDate), + randomIndexWriteLoad(numberOfShards), + creationDate + ); + backingIndices.add(indexMetadata.getIndex()); + backingIndicesOlderThanMinAge.add(indexMetadata.getIndex()); + metadataBuilder.put(indexMetadata, false); + } + + final String writeIndexName = DataStream.getDefaultBackingIndexName(dataStreamName, backingIndices.size()); + final IndexMetadata writeIndexMetadata = createIndexMetadata( + writeIndexName, + null, + System.currentTimeMillis() - maxIndexAge.millis() * 2 + ); + backingIndices.add(writeIndexMetadata.getIndex()); + metadataBuilder.put(writeIndexMetadata, false); + + final DataStream dataStream = new DataStream( + dataStreamName, + backingIndices, + backingIndices.size(), + Collections.emptyMap(), + false, + false, + false, + false, + randomBoolean() ? IndexMode.STANDARD : IndexMode.TIME_SERIES + ); + + metadataBuilder.put(dataStream); + + final List indicesWithinMaxAgeRange = DataStream.getIndicesWithinMaxAgeRange( + dataStream, + metadataBuilder::getSafe, + maxIndexAge, + System::currentTimeMillis + ); + + final List expectedIndicesWithinMaxAgeRange = new ArrayList<>(); + if (numberOfBackingIndicesOlderThanMinAge > 0) { + expectedIndicesWithinMaxAgeRange.add(backingIndicesOlderThanMinAge.get(backingIndicesOlderThanMinAge.size() - 1)); + } + expectedIndicesWithinMaxAgeRange.add(writeIndexMetadata.getIndex()); + assertThat(indicesWithinMaxAgeRange, is(equalTo(expectedIndicesWithinMaxAgeRange))); + assertThat(indicesWithinMaxAgeRange.get(indicesWithinMaxAgeRange.size() - 1).getName(), is(writeIndexName)); + } + private IndexWriteLoad randomIndexWriteLoad(int numberOfShards) { IndexWriteLoad.Builder builder = IndexWriteLoad.builder(numberOfShards); for (int shardId = 0; shardId < numberOfShards; shardId++) { diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataTests.java index b4c9f670f66b6..5cc1a7206e7e4 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataTests.java @@ -13,6 +13,7 @@ import org.elasticsearch.action.admin.indices.rollover.MaxPrimaryShardDocsCondition; import org.elasticsearch.action.admin.indices.rollover.MaxPrimaryShardSizeCondition; import org.elasticsearch.action.admin.indices.rollover.MaxSizeCondition; +import org.elasticsearch.action.admin.indices.rollover.OptimalShardCountCondition; import org.elasticsearch.action.admin.indices.rollover.RolloverInfo; import org.elasticsearch.cluster.routing.allocation.DataTier; import org.elasticsearch.common.Strings; @@ -97,7 +98,8 @@ public void testIndexMetadataSerialization() throws IOException { new MaxDocsCondition(randomNonNegativeLong()), new MaxSizeCondition(ByteSizeValue.ofBytes(randomNonNegativeLong())), new MaxPrimaryShardSizeCondition(ByteSizeValue.ofBytes(randomNonNegativeLong())), - new MaxPrimaryShardDocsCondition(randomNonNegativeLong()) + new MaxPrimaryShardDocsCondition(randomNonNegativeLong()), + new OptimalShardCountCondition(3) ), randomNonNegativeLong() ) @@ -128,6 +130,7 @@ public void testIndexMetadataSerialization() throws IOException { assertEquals(metadata.getCreationVersion(), fromXContentMeta.getCreationVersion()); assertEquals(metadata.getCompatibilityVersion(), fromXContentMeta.getCompatibilityVersion()); assertEquals(metadata.getRoutingNumShards(), fromXContentMeta.getRoutingNumShards()); + assertEquals(metadata.getRolloverInfos(), fromXContentMeta.getRolloverInfos()); assertEquals(metadata.getCreationDate(), fromXContentMeta.getCreationDate()); assertEquals(metadata.getRoutingFactor(), fromXContentMeta.getRoutingFactor()); assertEquals(metadata.primaryTerm(0), fromXContentMeta.primaryTerm(0)); diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java b/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java index 8402b5756e915..5d6ba6c3a6d1d 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java @@ -120,6 +120,32 @@ public static DataStream newInstance( return newInstance(name, indices, generation, metadata, replicated, lifecycle, List.of()); } + public static DataStream newInstance( + String name, + List indices, + long generation, + Map metadata, + boolean replicated, + @Nullable DataStreamLifecycle lifecycle, + @Nullable DataStreamAutoShardingEvent autoShardingEvent + ) { + return new DataStream( + name, + indices, + generation, + metadata, + false, + replicated, + false, + false, + null, + lifecycle, + false, + List.of(), + autoShardingEvent + ); + } + public static DataStream newInstance( String name, List indices, diff --git a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderService.java b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderService.java index 2379e5f8e9380..28983fe34df91 100644 --- a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderService.java +++ b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderService.java @@ -817,7 +817,12 @@ private SingleForecast forecast(Metadata metadata, DataStream stream, long forec for (int i = 0; i < numberNewIndices; ++i) { final String uuid = UUIDs.randomBase64UUID(); final Tuple rolledDataStreamInfo = stream.unsafeNextWriteIndexAndGeneration(state.metadata()); - stream = stream.unsafeRollover(new Index(rolledDataStreamInfo.v1(), uuid), rolledDataStreamInfo.v2(), false); + stream = stream.unsafeRollover( + new Index(rolledDataStreamInfo.v1(), uuid), + rolledDataStreamInfo.v2(), + false, + stream.getAutoShardingEvent() + ); // this unintentionally copies the in-sync allocation ids too. This has the fortunate effect of these indices // not being regarded new by the disk threshold decider, thereby respecting the low watermark threshold even for primaries. From aa6222a40817457ad03888ac8fea5663733e770a Mon Sep 17 00:00:00 2001 From: Chris Hegarty <62058229+ChrisHegarty@users.noreply.github.com> Date: Sun, 17 Mar 2024 11:18:28 +0000 Subject: [PATCH 226/248] Support ES|QL requests through the NodeClient::execute (#106244) This commit adds support for executing ES|QL requests through the NodeClient::execute. A subset of ES|QL's transport request and response APIs has been added x-pack/core. This offers basic functionality to run ES|QL queries without depending upon the ES|QL plugin directly. The API is deliberately small so as to not expose any unnecessary parts of the ES|QL implementation. It can be expanded later if and when needed, e.g. adding an explicitly Page, and Block types. --- docs/changelog/106244.yaml | 5 + .../core/src/main/java/module-info.java | 2 + .../xpack/core}/esql/action/ColumnInfo.java | 2 +- .../core/esql/action/EsqlQueryRequest.java | 27 +++ .../esql/action/EsqlQueryRequestBuilder.java | 35 ++++ .../core/esql/action/EsqlQueryResponse.java | 39 ++++ .../xpack/core/esql/action/EsqlResponse.java | 47 +++++ .../esql/action/internal/SharedSecrets.java | 41 ++++ .../action/EsqlQueryRequestBuilderTests.java | 22 +++ x-pack/plugin/esql/qa/action/build.gradle | 16 ++ .../test/esql/qa/action/CoreEsqlActionIT.java | 178 ++++++++++++++++++ .../plugin/esql/qa/testFixtures/build.gradle | 1 + .../esql/action/AsyncEsqlQueryActionIT.java | 1 + .../xpack/esql/action/EnrichIT.java | 1 + .../xpack/esql/action/EsqlActionIT.java | 1 + .../xpack/esql/action/EsqlAsyncActionIT.java | 1 + .../xpack/esql/action/EsqlQueryRequest.java | 5 +- .../esql/action/EsqlQueryRequestBuilder.java | 12 +- .../xpack/esql/action/EsqlQueryResponse.java | 37 +++- .../xpack/esql/action/EsqlResponseImpl.java | 89 +++++++++ .../xpack/esql/action/PositionToXContent.java | 1 + .../xpack/esql/action/ResponseValueUtils.java | 80 +++++--- .../esql/action/ResponseXContentUtils.java | 1 + .../xpack/esql/formatter/TextFormat.java | 2 +- .../xpack/esql/plugin/EsqlPlugin.java | 12 ++ .../esql/plugin/TransportEsqlQueryAction.java | 2 +- .../esql/action/EsqlQueryResponseTests.java | 111 +++++++++++ .../xpack/esql/formatter/TextFormatTests.java | 2 +- .../esql/formatter/TextFormatterTests.java | 2 +- 29 files changed, 741 insertions(+), 34 deletions(-) create mode 100644 docs/changelog/106244.yaml rename x-pack/plugin/{esql/src/main/java/org/elasticsearch/xpack => core/src/main/java/org/elasticsearch/xpack/core}/esql/action/ColumnInfo.java (97%) create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/esql/action/EsqlQueryRequest.java create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/esql/action/EsqlQueryRequestBuilder.java create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/esql/action/EsqlQueryResponse.java create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/esql/action/EsqlResponse.java create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/esql/action/internal/SharedSecrets.java create mode 100644 x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/esql/action/EsqlQueryRequestBuilderTests.java create mode 100644 x-pack/plugin/esql/qa/action/build.gradle create mode 100644 x-pack/plugin/esql/qa/action/src/internalClusterTest/java/org/elasticsearch/test/esql/qa/action/CoreEsqlActionIT.java create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlResponseImpl.java diff --git a/docs/changelog/106244.yaml b/docs/changelog/106244.yaml new file mode 100644 index 0000000000000..fe03f575b9efb --- /dev/null +++ b/docs/changelog/106244.yaml @@ -0,0 +1,5 @@ +pr: 106244 +summary: Support ES|QL requests through the `NodeClient::execute` +area: ES|QL +type: feature +issues: [] diff --git a/x-pack/plugin/core/src/main/java/module-info.java b/x-pack/plugin/core/src/main/java/module-info.java index daa3a4db913ef..77def0fd12459 100644 --- a/x-pack/plugin/core/src/main/java/module-info.java +++ b/x-pack/plugin/core/src/main/java/module-info.java @@ -65,6 +65,8 @@ exports org.elasticsearch.xpack.core.enrich; exports org.elasticsearch.xpack.core.eql; exports org.elasticsearch.xpack.core.esql; + exports org.elasticsearch.xpack.core.esql.action; + exports org.elasticsearch.xpack.core.esql.action.internal; // TODO: qualify to esql when modularized exports org.elasticsearch.xpack.core.frozen.action; exports org.elasticsearch.xpack.core.frozen; exports org.elasticsearch.xpack.core.graph.action; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ColumnInfo.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/esql/action/ColumnInfo.java similarity index 97% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ColumnInfo.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/esql/action/ColumnInfo.java index 3aa609e55b07c..b3248077397c2 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ColumnInfo.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/esql/action/ColumnInfo.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.action; +package org.elasticsearch.xpack.core.esql.action; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/esql/action/EsqlQueryRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/esql/action/EsqlQueryRequest.java new file mode 100644 index 0000000000000..9faa78d3b34f9 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/esql/action/EsqlQueryRequest.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.esql.action; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.index.query.QueryBuilder; + +import java.io.IOException; + +public abstract class EsqlQueryRequest extends ActionRequest { + + protected EsqlQueryRequest() {} + + protected EsqlQueryRequest(StreamInput in) throws IOException { + super(in); + } + + public abstract String query(); + + public abstract QueryBuilder filter(); +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/esql/action/EsqlQueryRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/esql/action/EsqlQueryRequestBuilder.java new file mode 100644 index 0000000000000..c77d2613cd321 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/esql/action/EsqlQueryRequestBuilder.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.esql.action; + +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.client.internal.ElasticsearchClient; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.xpack.core.esql.action.internal.SharedSecrets; + +public abstract class EsqlQueryRequestBuilder extends + ActionRequestBuilder { + + /** Creates a new ES|QL query request builder. */ + public static EsqlQueryRequestBuilder newRequestBuilder( + ElasticsearchClient client + ) { + return SharedSecrets.getEsqlQueryRequestBuilderAccess().newEsqlQueryRequestBuilder(client); + } + + // not for direct use + protected EsqlQueryRequestBuilder(ElasticsearchClient client, ActionType action, Request request) { + super(client, action, request); + } + + public abstract EsqlQueryRequestBuilder query(String query); + + public abstract EsqlQueryRequestBuilder filter(QueryBuilder filter); + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/esql/action/EsqlQueryResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/esql/action/EsqlQueryResponse.java new file mode 100644 index 0000000000000..113542a01edf3 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/esql/action/EsqlQueryResponse.java @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.esql.action; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.core.Releasable; + +/** + * Response to an ES|QL query request. + * + * This query response must be closed when the consumer of its response + * object is finished. Closing the query response closes and invalidates + * the response object. Calling {@link #response()} on a closed query + * response results in an IllegalStateException. + */ +public abstract class EsqlQueryResponse extends ActionResponse implements Releasable { + + private boolean closed; + + /** Returns the response object. */ + public EsqlResponse response() { + if (closed) { + throw new IllegalStateException("closed"); + } + return responseInternal(); + } + + protected abstract EsqlResponse responseInternal(); + + @Override + public void close() { + closed = true; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/esql/action/EsqlResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/esql/action/EsqlResponse.java new file mode 100644 index 0000000000000..a4b206d3ffd9c --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/esql/action/EsqlResponse.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.esql.action; + +import org.elasticsearch.core.Releasable; + +import java.util.List; + +/** + * An ES|QL Response object. + * + *

        Iterator based access to values of type T has the following properties: + *

          + *
        1. single-value is of type {@code T}
        2. + *
        3. multi-value is of type {@code List}
        4. + *
        5. absent value is {@code null}
        6. + *
        + * + *

        This response object should be closed when the consumer of its values + * is finished. Closing the response object invalidates any iterators of its + * values. An invalidated iterator, if not already exhausted, will eventually + * throw an IllegalStateException. Once a response object is closed, calling + * {@link #rows()}, {@link #column(int)}, or operating on an Iterable return + * from the aforementioned value accessor methods, results in an + * IllegalStateException. + */ +public interface EsqlResponse extends Releasable { + + /** Returns the column info. */ + List columns(); + + /** + * Returns an iterable that allows to iterator over the values in all rows + * of the response, this is the rows-iterator. A further iterator can be + * retrieved from the rows-iterator, which iterates over the actual values + * in the row, one row at a time, column-wise from left to right. + */ + Iterable> rows(); + + /** Returns an iterable over the values in the given column. */ + Iterable column(int columnIndex); +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/esql/action/internal/SharedSecrets.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/esql/action/internal/SharedSecrets.java new file mode 100644 index 0000000000000..7e635c4e89354 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/esql/action/internal/SharedSecrets.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.esql.action.internal; + +import org.elasticsearch.client.internal.ElasticsearchClient; +import org.elasticsearch.xpack.core.esql.action.EsqlQueryRequest; +import org.elasticsearch.xpack.core.esql.action.EsqlQueryRequestBuilder; +import org.elasticsearch.xpack.core.esql.action.EsqlQueryResponse; + +/** + * For secret access to ES|QL internals only. Do not use. + * TODO qualify export when ES|QL is modularized + */ +public class SharedSecrets { + + private static EsqlQueryRequestBuilderAccess esqlQueryRequestBuilderAccess; + + public static void setEsqlQueryRequestBuilderAccess(EsqlQueryRequestBuilderAccess access) { + esqlQueryRequestBuilderAccess = access; + } + + public static EsqlQueryRequestBuilderAccess getEsqlQueryRequestBuilderAccess() { + var access = esqlQueryRequestBuilderAccess; + if (access == null) { + throw new IllegalStateException("ESQL module not present or initialized"); + } + return access; + } + + public interface EsqlQueryRequestBuilderAccess { + + EsqlQueryRequestBuilder newEsqlQueryRequestBuilder( + ElasticsearchClient client + ); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/esql/action/EsqlQueryRequestBuilderTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/esql/action/EsqlQueryRequestBuilderTests.java new file mode 100644 index 0000000000000..d32c2d2c7d6d0 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/esql/action/EsqlQueryRequestBuilderTests.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.esql.action; + +import org.elasticsearch.test.ESIntegTestCase; + +import static org.hamcrest.core.IsEqual.equalTo; + +public class EsqlQueryRequestBuilderTests extends ESIntegTestCase { + + // This is a trivial test that asserts IAE when the ES|QL module is + // not present. + public void testIllegalStateException() { + var e = expectThrows(IllegalStateException.class, () -> EsqlQueryRequestBuilder.newRequestBuilder(client())); + assertThat(e.getMessage(), equalTo("ESQL module not present or initialized")); + } +} diff --git a/x-pack/plugin/esql/qa/action/build.gradle b/x-pack/plugin/esql/qa/action/build.gradle new file mode 100644 index 0000000000000..171f0c39df21e --- /dev/null +++ b/x-pack/plugin/esql/qa/action/build.gradle @@ -0,0 +1,16 @@ +apply plugin: 'elasticsearch.java' +apply plugin: 'elasticsearch.internal-cluster-test' + +description = 'Tests for requests made through the Node Client request API' + +dependencies { + api project(":test:framework") + api project(':server') + compileOnly project(path: xpackModule('core')) + + testImplementation(testArtifact(project(xpackModule('core')))) + // runtime only - since the test source should not explicitly depend + // upon any types from ES|QL (only xpack core) + testImplementation project(':x-pack:plugin:ql') + testImplementation project(':x-pack:plugin:esql') +} diff --git a/x-pack/plugin/esql/qa/action/src/internalClusterTest/java/org/elasticsearch/test/esql/qa/action/CoreEsqlActionIT.java b/x-pack/plugin/esql/qa/action/src/internalClusterTest/java/org/elasticsearch/test/esql/qa/action/CoreEsqlActionIT.java new file mode 100644 index 0000000000000..7431c5b8204d7 --- /dev/null +++ b/x-pack/plugin/esql/qa/action/src/internalClusterTest/java/org/elasticsearch/test/esql/qa/action/CoreEsqlActionIT.java @@ -0,0 +1,178 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.test.esql.qa.action; + +import org.elasticsearch.ElasticsearchTimeoutException; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.xpack.core.ClientHelper; +import org.elasticsearch.xpack.core.esql.action.ColumnInfo; +import org.elasticsearch.xpack.core.esql.action.EsqlQueryRequest; +import org.elasticsearch.xpack.core.esql.action.EsqlQueryRequestBuilder; +import org.elasticsearch.xpack.core.esql.action.EsqlQueryResponse; +import org.junit.Before; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; + +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.contains; + +// A subset of test scenarios exercised through the xpack core ES|QL +// transport API (rather than through the ES|QL request API). +// Tests here have no static dependencies on types from the ES|QL plugin. +public class CoreEsqlActionIT extends ESIntegTestCase { + + @Before + public void setupIndex() { + createAndPopulateIndex("test"); + } + + protected Collection> nodePlugins() { + try { + @SuppressWarnings("unchecked") + var c = (Class) Class.forName("org.elasticsearch.xpack.esql.plugin.EsqlPlugin"); + return List.of(c); + } catch (ClassNotFoundException e) { + throw new AssertionError(e); // the ES|QL plugin must be present + } + } + + public void testRowTypesAndValues() { + var query = "row a = 1, b = \"x\", c = 1000000000000, d = 1.1"; + var request = EsqlQueryRequestBuilder.newRequestBuilder(client()).query(query); + try (var queryResp = run(request)) { + logger.info("response=" + queryResp); + var resp = queryResp.response(); + assertThat(resp.columns().stream().map(ColumnInfo::name).toList(), contains("a", "b", "c", "d")); + assertThat(resp.columns().stream().map(ColumnInfo::type).toList(), contains("integer", "keyword", "long", "double")); + assertThat(getValuesList(resp.rows()), contains(List.of(1, "x", 1000000000000L, 1.1d))); + } + } + + public void testRowStatsProjectGroupByInt() { + var query = "row a = 1, b = 2 | stats count(b) by a | keep a"; + var request = EsqlQueryRequestBuilder.newRequestBuilder(client()).query(query); + try (var queryResp = run(request)) { + logger.info("response=" + queryResp); + var resp = queryResp.response(); + assertThat(resp.columns().stream().map(ColumnInfo::name).toList(), contains("a")); + assertThat(resp.columns().stream().map(ColumnInfo::type).toList(), contains("integer")); + assertThat(getValuesList(resp.rows()), contains(List.of(1))); + } + } + + public void testFrom() { + var query = "from test | keep item, cost, color, sale | sort item"; + var request = EsqlQueryRequestBuilder.newRequestBuilder(client()).query(query); + try (var queryResp = run(request)) { + var resp = queryResp.response(); + logger.info("response=" + queryResp); + assertThat(resp.columns().stream().map(ColumnInfo::name).toList(), contains("item", "cost", "color", "sale")); + assertThat(resp.columns().stream().map(ColumnInfo::type).toList(), contains("long", "double", "keyword", "date")); + // columnar values + assertThat(columnValues(resp.column(0)), contains(1L, 2L, 3L, 4L)); + assertThat(columnValues(resp.column(1)), contains(1.1d, 2.1d, 3.1d, 4.1d)); + assertThat(columnValues(resp.column(2)), contains("red", "blue", "green", "red")); + var d = List.of("2004-03-02T00:00:00.000Z", "1992-06-01T00:00:00.000Z", "1965-06-01T00:00:00.000Z", "2000-03-15T00:00:00.000Z"); + assertThat(columnValues(resp.column(3)), contains(d.toArray())); + // row values + List> values = getValuesList(resp.rows()); + assertThat(values.get(0), contains(1L, 1.1d, "red", "2004-03-02T00:00:00.000Z")); + assertThat(values.get(1), contains(2L, 2.1d, "blue", "1992-06-01T00:00:00.000Z")); + assertThat(values.get(2), contains(3L, 3.1d, "green", "1965-06-01T00:00:00.000Z")); + assertThat(values.get(3), contains(4L, 4.1d, "red", "2000-03-15T00:00:00.000Z")); + } + } + + public void testAccessAfterClose() { + for (var closedQueryResp : new boolean[] { true, false }) { + var query = "row a = 1"; + var request = EsqlQueryRequestBuilder.newRequestBuilder(client()).query(query); + var queryResp = run(request); + var resp = queryResp.response(); + var rows = resp.rows(); + var rowItr = rows.iterator(); + var cols = resp.column(0); + var colItr = cols.iterator(); + + // must close at least one of them + if (closedQueryResp) queryResp.close(); + if (randomBoolean() || closedQueryResp == false) resp.close(); + + assertThrows(IllegalStateException.class, resp::rows); + assertThrows(IllegalStateException.class, () -> resp.column(0)); + assertThrows(IllegalStateException.class, () -> rows.iterator()); + assertThrows(IllegalStateException.class, () -> cols.iterator()); + assertThrows(IllegalStateException.class, () -> queryResp.response().rows()); + assertThrows(IllegalStateException.class, () -> queryResp.response().column(0)); + assertThrows(IllegalStateException.class, () -> rowItr.next().iterator().next()); + assertThrows(IllegalStateException.class, () -> colItr.next()); + if (closedQueryResp) { + assertThrows(IllegalStateException.class, () -> queryResp.response()); + } else { + queryResp.close(); // we must close the query response if not already closed + } + } + } + + protected EsqlQueryResponse run(EsqlQueryRequestBuilder request) { + try { + if (randomBoolean()) { + return request.execute().actionGet(30, SECONDS); + } else { + return ClientHelper.executeWithHeaders( + Map.of("Foo", "bar"), + "origin", + client(), + () -> request.execute().actionGet(30, SECONDS) + ); + } + } catch (ElasticsearchTimeoutException e) { + throw new AssertionError("timeout", e); + } + } + + static List> getValuesList(Iterable> values) { + var valuesList = new ArrayList>(); + values.forEach(row -> { + var rowValues = new ArrayList<>(); + row.forEach(rowValues::add); + valuesList.add(rowValues); + }); + return valuesList; + } + + static List columnValues(Iterable values) { + List l = new ArrayList<>(); + values.forEach(l::add); + return l; + } + + private void createAndPopulateIndex(String indexName) { + var client = client().admin().indices(); + var CreateRequest = client.prepareCreate(indexName) + .setSettings(Settings.builder().put("index.number_of_shards", 1)) + .setMapping("item", "type=long", "cost", "type=double", "color", "type=keyword", "sale", "type=date"); + assertAcked(CreateRequest); + client().prepareBulk() + .add(new IndexRequest(indexName).id("1").source("item", 1, "cost", 1.1d, "color", "red", "sale", "2004-03-02T00:00:00.000Z")) + .add(new IndexRequest(indexName).id("2").source("item", 2, "cost", 2.1d, "color", "blue", "sale", "1992-06-01T00:00:00.000Z")) + .add(new IndexRequest(indexName).id("3").source("item", 3, "cost", 3.1d, "color", "green", "sale", "1965-06-01T00:00:00.000Z")) + .add(new IndexRequest(indexName).id("4").source("item", 4, "cost", 4.1d, "color", "red", "sale", "2000-03-15T00:00:00.000Z")) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + ensureYellow(indexName); + } +} diff --git a/x-pack/plugin/esql/qa/testFixtures/build.gradle b/x-pack/plugin/esql/qa/testFixtures/build.gradle index cf1057452344c..52a0df539e937 100644 --- a/x-pack/plugin/esql/qa/testFixtures/build.gradle +++ b/x-pack/plugin/esql/qa/testFixtures/build.gradle @@ -4,6 +4,7 @@ apply plugin: 'elasticsearch.java' dependencies { implementation project(':x-pack:plugin:esql:compute') compileOnly project(':x-pack:plugin:esql') + compileOnly project(path: xpackModule('core')) implementation project(":libs:elasticsearch-x-content") implementation project(':client:rest') implementation project(':libs:elasticsearch-logging') diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AsyncEsqlQueryActionIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AsyncEsqlQueryActionIT.java index 6e3873f654778..c985c87fd88e0 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AsyncEsqlQueryActionIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AsyncEsqlQueryActionIT.java @@ -18,6 +18,7 @@ import org.elasticsearch.xpack.core.async.DeleteAsyncResultRequest; import org.elasticsearch.xpack.core.async.GetAsyncResultRequest; import org.elasticsearch.xpack.core.async.TransportDeleteAsyncResultAction; +import org.elasticsearch.xpack.core.esql.action.ColumnInfo; import org.elasticsearch.xpack.esql.plugin.QueryPragmas; import org.hamcrest.core.IsEqual; diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EnrichIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EnrichIT.java index 3bb6bb35b5210..9b3f61175c3f7 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EnrichIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EnrichIT.java @@ -40,6 +40,7 @@ import org.elasticsearch.xpack.core.enrich.action.DeleteEnrichPolicyAction; import org.elasticsearch.xpack.core.enrich.action.ExecuteEnrichPolicyAction; import org.elasticsearch.xpack.core.enrich.action.PutEnrichPolicyAction; +import org.elasticsearch.xpack.core.esql.action.ColumnInfo; import org.elasticsearch.xpack.enrich.EnrichPlugin; import org.elasticsearch.xpack.esql.EsqlTestUtils; import org.elasticsearch.xpack.esql.plan.logical.Enrich; diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionIT.java index b23c75df6fa4f..115ae54833e14 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionIT.java @@ -31,6 +31,7 @@ import org.elasticsearch.test.ListMatcher; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.json.JsonXContent; +import org.elasticsearch.xpack.core.esql.action.ColumnInfo; import org.elasticsearch.xpack.esql.VerificationException; import org.elasticsearch.xpack.esql.expression.function.EsqlFunctionRegistry; import org.elasticsearch.xpack.esql.parser.ParsingException; diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlAsyncActionIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlAsyncActionIT.java index 6f6e41ee42b35..e884b67fb5d24 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlAsyncActionIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlAsyncActionIT.java @@ -23,6 +23,7 @@ import org.elasticsearch.xpack.core.async.DeleteAsyncResultRequest; import org.elasticsearch.xpack.core.async.GetAsyncResultRequest; import org.elasticsearch.xpack.core.async.TransportDeleteAsyncResultAction; +import org.elasticsearch.xpack.core.esql.action.ColumnInfo; import org.elasticsearch.xpack.esql.TestBlockFactory; import org.elasticsearch.xpack.esql.VerificationException; import org.elasticsearch.xpack.esql.parser.ParsingException; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequest.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequest.java index df196a0703f7d..e5ff790619d14 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequest.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequest.java @@ -8,7 +8,6 @@ package org.elasticsearch.xpack.esql.action; import org.elasticsearch.Build; -import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.CompositeIndicesRequest; import org.elasticsearch.common.Strings; @@ -29,7 +28,7 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; -public class EsqlQueryRequest extends ActionRequest implements CompositeIndicesRequest { +public class EsqlQueryRequest extends org.elasticsearch.xpack.core.esql.action.EsqlQueryRequest implements CompositeIndicesRequest { public static TimeValue DEFAULT_KEEP_ALIVE = TimeValue.timeValueDays(5); public static TimeValue DEFAULT_WAIT_FOR_COMPLETION = TimeValue.timeValueSeconds(1); @@ -81,6 +80,7 @@ public void query(String query) { this.query = query; } + @Override public String query() { return query; } @@ -124,6 +124,7 @@ public void filter(QueryBuilder filter) { this.filter = filter; } + @Override public QueryBuilder filter() { return filter; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestBuilder.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestBuilder.java index 5b00208342296..7df5c95cbc953 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestBuilder.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestBuilder.java @@ -7,13 +7,15 @@ package org.elasticsearch.xpack.esql.action; -import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.xpack.core.esql.action.internal.SharedSecrets; import org.elasticsearch.xpack.esql.plugin.QueryPragmas; -public class EsqlQueryRequestBuilder extends ActionRequestBuilder { +public class EsqlQueryRequestBuilder extends org.elasticsearch.xpack.core.esql.action.EsqlQueryRequestBuilder< + EsqlQueryRequest, + EsqlQueryResponse> { public static EsqlQueryRequestBuilder newAsyncEsqlQueryRequestBuilder(ElasticsearchClient client) { return new EsqlQueryRequestBuilder(client, EsqlQueryRequest.asyncEsqlQueryRequest()); @@ -27,6 +29,7 @@ private EsqlQueryRequestBuilder(ElasticsearchClient client, EsqlQueryRequest req super(client, EsqlQueryAction.INSTANCE, request); } + @Override public EsqlQueryRequestBuilder query(String query) { request.query(query); return this; @@ -37,6 +40,7 @@ public EsqlQueryRequestBuilder columnar(boolean columnar) { return this; } + @Override public EsqlQueryRequestBuilder filter(QueryBuilder filter) { request.filter(filter); return this; @@ -61,4 +65,8 @@ public EsqlQueryRequestBuilder keepOnCompletion(boolean keepOnCompletion) { request.keepOnCompletion(keepOnCompletion); return this; } + + static { // plumb access from x-pack core + SharedSecrets.setEsqlQueryRequestBuilderAccess(EsqlQueryRequestBuilder::newSyncEsqlQueryRequestBuilder); + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponse.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponse.java index 254abb9719877..49a0307a6599e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponse.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponse.java @@ -8,7 +8,6 @@ package org.elasticsearch.xpack.esql.action; import org.elasticsearch.TransportVersions; -import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; @@ -26,6 +25,8 @@ import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xpack.core.esql.action.ColumnInfo; +import org.elasticsearch.xpack.core.esql.action.EsqlResponse; import java.io.IOException; import java.util.Collections; @@ -34,7 +35,10 @@ import java.util.Objects; import java.util.Optional; -public class EsqlQueryResponse extends ActionResponse implements ChunkedToXContentObject, Releasable { +public class EsqlQueryResponse extends org.elasticsearch.xpack.core.esql.action.EsqlQueryResponse + implements + ChunkedToXContentObject, + Releasable { @SuppressWarnings("this-escape") private final AbstractRefCounted counted = AbstractRefCounted.of(this::closeInternal); @@ -130,6 +134,16 @@ public Iterator> values() { return ResponseValueUtils.pagesToValues(dataTypes, pages); } + public Iterable> rows() { + List dataTypes = columns.stream().map(ColumnInfo::type).toList(); + return ResponseValueUtils.valuesForRowsInPages(dataTypes, pages); + } + + public Iterator column(int columnIndex) { + if (columnIndex < 0 || columnIndex >= columns.size()) throw new IllegalArgumentException(); + return ResponseValueUtils.valuesForColumn(columnIndex, columns.get(columnIndex).type(), pages); + } + public Profile profile() { return profile; } @@ -261,13 +275,32 @@ public boolean hasReferences() { @Override public void close() { + super.close(); decRef(); + if (esqlResponse != null) { + esqlResponse.setClosedState(); + } } void closeInternal() { Releasables.close(() -> Iterators.map(pages.iterator(), p -> p::releaseBlocks)); } + // singleton lazy set view over this response + private EsqlResponseImpl esqlResponse; + + @Override + public EsqlResponse responseInternal() { + if (hasReferences() == false) { + throw new IllegalStateException("closed"); + } + if (esqlResponse != null) { + return esqlResponse; + } + esqlResponse = new EsqlResponseImpl(this); + return esqlResponse; + } + public static class Profile implements Writeable, ChunkedToXContentObject { private final List drivers; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlResponseImpl.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlResponseImpl.java new file mode 100644 index 0000000000000..9393ae6be9e19 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlResponseImpl.java @@ -0,0 +1,89 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import org.elasticsearch.xpack.core.esql.action.ColumnInfo; +import org.elasticsearch.xpack.core.esql.action.EsqlResponse; + +import java.util.Iterator; +import java.util.List; + +/** View over the response, that supports the xpack core transport API. */ +public class EsqlResponseImpl implements EsqlResponse { + + private final EsqlQueryResponse queryResponse; + private boolean closed; + + EsqlResponseImpl(EsqlQueryResponse queryResponse) { + this.queryResponse = queryResponse; + } + + @Override + public List columns() { + return queryResponse.columns(); + } + + @Override + public Iterable> rows() { + ensureOpen(); + return () -> { + ensureOpen(); + return new DelegatingIterator<>(queryResponse.rows().iterator()); + }; + } + + @Override + public Iterable column(int columnIndex) { + ensureOpen(); + return () -> { + ensureOpen(); + return new DelegatingIterator<>(queryResponse.column(columnIndex)); + }; + } + + @Override + public void close() { + setClosedState(); + } + + public void setClosedState() { + closed = true; + } + + private void ensureOpen() { + if (closed || queryResponse.hasReferences() == false) { + throw new IllegalStateException("closed"); + } + } + + @Override + public String toString() { + return "EsqlResponse[response=" + queryResponse + "]"; + } + + /** A delegating iterator, that first checks the closed state before delegating. */ + final class DelegatingIterator implements Iterator { + final Iterator delegate; + + DelegatingIterator(Iterator delegate) { + this.delegate = delegate; + } + + @Override + public boolean hasNext() { + ensureOpen(); + return delegate.hasNext(); + } + + @Override + public T next() { + ensureOpen(); + return delegate.next(); + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/PositionToXContent.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/PositionToXContent.java index 669b22883fd8c..176b89f80c910 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/PositionToXContent.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/PositionToXContent.java @@ -23,6 +23,7 @@ import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.esql.action.ColumnInfo; import org.elasticsearch.xpack.versionfield.Version; import java.io.IOException; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ResponseValueUtils.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ResponseValueUtils.java index d5dc12357f3fe..f407d5c4b5e6e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ResponseValueUtils.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ResponseValueUtils.java @@ -26,6 +26,7 @@ import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.json.JsonXContent; +import org.elasticsearch.xpack.core.esql.action.ColumnInfo; import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.planner.PlannerUtils; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; @@ -58,32 +59,65 @@ public static Iterator> pagesToValues(List dataTypes, L BytesRef scratch = new BytesRef(); return Iterators.flatMap( pages.iterator(), - page -> Iterators.forRange(0, page.getPositionCount(), p -> Iterators.forRange(0, page.getBlockCount(), b -> { - Block block = page.getBlock(b); - if (block.isNull(p)) { - return null; - } - /* - * Use the ESQL data type to map to the output to make sure compute engine - * respects its types. See the INTEGER clause where is doesn't always - * respect it. - */ - int count = block.getValueCount(p); - int start = block.getFirstValueIndex(p); - String dataType = dataTypes.get(b); - if (count == 1) { - return valueAt(dataType, block, start, scratch); - } - List thisResult = new ArrayList<>(count); - int end = count + start; - for (int i = start; i < end; i++) { - thisResult.add(valueAt(dataType, block, i, scratch)); - } - return thisResult; - })) + page -> Iterators.forRange( + 0, + page.getPositionCount(), + pos -> Iterators.forRange(0, page.getBlockCount(), b -> valueAtPosition(page.getBlock(b), pos, dataTypes.get(b), scratch)) + ) + ); + } + + /** Returns an iterable of iterables over the values in the given pages. There is one iterables for each row. */ + static Iterable> valuesForRowsInPages(List dataTypes, List pages) { + BytesRef scratch = new BytesRef(); + return () -> Iterators.flatMap(pages.iterator(), page -> valuesForRowsInPage(dataTypes, page, scratch)); + } + + /** Returns an iterable of iterables over the values in the given page. There is one iterables for each row. */ + static Iterator> valuesForRowsInPage(List dataTypes, Page page, BytesRef scratch) { + return Iterators.forRange(0, page.getPositionCount(), position -> valuesForRow(dataTypes, page, position, scratch)); + } + + /** Returns an iterable over the values in the given row in a page. */ + static Iterable valuesForRow(List dataTypes, Page page, int position, BytesRef scratch) { + return () -> Iterators.forRange( + 0, + page.getBlockCount(), + blockIdx -> valueAtPosition(page.getBlock(blockIdx), position, dataTypes.get(blockIdx), scratch) ); } + /** Returns an iterator of values for the given column. */ + static Iterator valuesForColumn(int columnIndex, String dataType, List pages) { + BytesRef scratch = new BytesRef(); + return Iterators.flatMap( + pages.iterator(), + page -> Iterators.forRange( + 0, + page.getPositionCount(), + pos -> valueAtPosition(page.getBlock(columnIndex), pos, dataType, scratch) + ) + ); + } + + /** Returns the value that the position and with the given data type, in the block. */ + static Object valueAtPosition(Block block, int position, String dataType, BytesRef scratch) { + if (block.isNull(position)) { + return null; + } + int count = block.getValueCount(position); + int start = block.getFirstValueIndex(position); + if (count == 1) { + return valueAt(dataType, block, start, scratch); + } + List values = new ArrayList<>(count); + int end = count + start; + for (int i = start; i < end; i++) { + values.add(valueAt(dataType, block, i, scratch)); + } + return values; + } + private static Object valueAt(String dataType, Block block, int offset, BytesRef scratch) { return switch (dataType) { case "unsigned_long" -> unsignedLongAsNumber(((LongBlock) block).getLong(offset)); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ResponseXContentUtils.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ResponseXContentUtils.java index b9695445e6e6a..ad76fde7eca26 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ResponseXContentUtils.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ResponseXContentUtils.java @@ -12,6 +12,7 @@ import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; import org.elasticsearch.compute.data.Page; import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xpack.core.esql.action.ColumnInfo; import java.util.Collections; import java.util.Iterator; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/formatter/TextFormat.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/formatter/TextFormat.java index ac8f9560074f5..df2536379f3be 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/formatter/TextFormat.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/formatter/TextFormat.java @@ -11,7 +11,7 @@ import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.xcontent.MediaType; -import org.elasticsearch.xpack.esql.action.ColumnInfo; +import org.elasticsearch.xpack.core.esql.action.ColumnInfo; import org.elasticsearch.xpack.esql.action.EsqlQueryResponse; import org.elasticsearch.xpack.ql.util.StringUtils; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java index fded9339567bd..1e2557c040b06 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java @@ -49,6 +49,7 @@ import org.elasticsearch.xpack.esql.EsqlUsageTransportAction; import org.elasticsearch.xpack.esql.action.EsqlAsyncGetResultAction; import org.elasticsearch.xpack.esql.action.EsqlQueryAction; +import org.elasticsearch.xpack.esql.action.EsqlQueryRequestBuilder; import org.elasticsearch.xpack.esql.action.RestEsqlAsyncQueryAction; import org.elasticsearch.xpack.esql.action.RestEsqlDeleteAsyncResultAction; import org.elasticsearch.xpack.esql.action.RestEsqlGetAsyncResultAction; @@ -60,6 +61,7 @@ import org.elasticsearch.xpack.esql.type.EsqlDataTypeRegistry; import org.elasticsearch.xpack.ql.index.IndexResolver; +import java.lang.invoke.MethodHandles; import java.util.Collection; import java.util.List; import java.util.Objects; @@ -101,6 +103,7 @@ public Collection createComponents(PluginServices services) { ); BigArrays bigArrays = services.indicesService().getBigArrays().withCircuitBreaking(); BlockFactory blockFactory = new BlockFactory(circuitBreaker, bigArrays, maxPrimitiveArrayBlockSize); + setupSharedSecrets(); return List.of( new PlanExecutor( new IndexResolver( @@ -116,6 +119,15 @@ public Collection createComponents(PluginServices services) { ); } + private void setupSharedSecrets() { + try { + // EsqlQueryRequestBuilder. initializes the shared secret access + MethodHandles.lookup().ensureInitialized(EsqlQueryRequestBuilder.class); + } catch (IllegalAccessException e) { + throw new AssertionError(e); + } + } + /** * The settings defined by the ESQL plugin. * diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java index 366046d39dc43..bce189754b485 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java @@ -28,7 +28,7 @@ import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.XPackPlugin; import org.elasticsearch.xpack.core.async.AsyncExecutionId; -import org.elasticsearch.xpack.esql.action.ColumnInfo; +import org.elasticsearch.xpack.core.esql.action.ColumnInfo; import org.elasticsearch.xpack.esql.action.EsqlQueryAction; import org.elasticsearch.xpack.esql.action.EsqlQueryRequest; import org.elasticsearch.xpack.esql.action.EsqlQueryResponse; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java index 839e9c323bf74..0e2886d099916 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java @@ -44,6 +44,7 @@ import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xcontent.json.JsonXContent; +import org.elasticsearch.xpack.core.esql.action.ColumnInfo; import org.elasticsearch.xpack.esql.TestBlockFactory; import org.elasticsearch.xpack.esql.planner.PlannerUtils; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; @@ -56,6 +57,7 @@ import java.io.IOException; import java.io.UncheckedIOException; import java.util.ArrayList; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.stream.Stream; @@ -67,6 +69,7 @@ import static org.elasticsearch.xpack.esql.action.ResponseValueUtils.valuesToPage; import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.CARTESIAN; import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.GEO; +import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; public class EsqlQueryResponseTests extends AbstractChunkedSerializingTestCase { @@ -513,4 +516,112 @@ public void testProfileXContent() { protected void dispose(EsqlQueryResponse esqlQueryResponse) { esqlQueryResponse.close(); } + + // Tests for response::column + public void testColumns() { + var intBlk1 = blockFactory.newIntArrayVector(new int[] { 10, 20 }, 2).asBlock(); + var intBlk2 = blockFactory.newIntArrayVector(new int[] { 30, 40, 50 }, 3).asBlock(); + var longBlk1 = blockFactory.newLongArrayVector(new long[] { 100L, 200L }, 2).asBlock(); + var longBlk2 = blockFactory.newLongArrayVector(new long[] { 300L, 400L, 500L }, 3).asBlock(); + var columnInfo = List.of(new ColumnInfo("foo", "integer"), new ColumnInfo("bar", "long")); + var pages = List.of(new Page(intBlk1, longBlk1), new Page(intBlk2, longBlk2)); + try (var response = new EsqlQueryResponse(columnInfo, pages, null, false, null, false, false)) { + assertThat(columnValues(response.column(0)), contains(10, 20, 30, 40, 50)); + assertThat(columnValues(response.column(1)), contains(100L, 200L, 300L, 400L, 500L)); + expectThrows(IllegalArgumentException.class, () -> response.column(-1)); + expectThrows(IllegalArgumentException.class, () -> response.column(2)); + } + } + + public void testColumnsIllegalArg() { + var intBlk1 = blockFactory.newIntArrayVector(new int[] { 10 }, 1).asBlock(); + var columnInfo = List.of(new ColumnInfo("foo", "integer")); + var pages = List.of(new Page(intBlk1)); + try (var response = new EsqlQueryResponse(columnInfo, pages, null, false, null, false, false)) { + expectThrows(IllegalArgumentException.class, () -> response.column(-1)); + expectThrows(IllegalArgumentException.class, () -> response.column(1)); + } + } + + public void testColumnsWithNull() { + IntBlock blk1, blk2, blk3; + try ( + var bb1 = blockFactory.newIntBlockBuilder(2); + var bb2 = blockFactory.newIntBlockBuilder(4); + var bb3 = blockFactory.newIntBlockBuilder(4) + ) { + blk1 = bb1.appendInt(10).appendNull().build(); + blk2 = bb2.appendInt(30).appendNull().appendNull().appendInt(60).build(); + blk3 = bb3.appendNull().appendInt(80).appendInt(90).appendNull().build(); + } + var columnInfo = List.of(new ColumnInfo("foo", "integer")); + var pages = List.of(new Page(blk1), new Page(blk2), new Page(blk3)); + try (var response = new EsqlQueryResponse(columnInfo, pages, null, false, null, false, false)) { + assertThat(columnValues(response.column(0)), contains(10, null, 30, null, null, 60, null, 80, 90, null)); + expectThrows(IllegalArgumentException.class, () -> response.column(-1)); + expectThrows(IllegalArgumentException.class, () -> response.column(2)); + } + } + + public void testColumnsWithMultiValue() { + IntBlock blk1, blk2, blk3; + try ( + var bb1 = blockFactory.newIntBlockBuilder(2); + var bb2 = blockFactory.newIntBlockBuilder(4); + var bb3 = blockFactory.newIntBlockBuilder(4) + ) { + blk1 = bb1.beginPositionEntry().appendInt(10).appendInt(20).endPositionEntry().appendNull().build(); + blk2 = bb2.beginPositionEntry().appendInt(40).appendInt(50).endPositionEntry().build(); + blk3 = bb3.appendNull().appendInt(70).appendInt(80).appendNull().build(); + } + var columnInfo = List.of(new ColumnInfo("foo", "integer")); + var pages = List.of(new Page(blk1), new Page(blk2), new Page(blk3)); + try (var response = new EsqlQueryResponse(columnInfo, pages, null, false, null, false, false)) { + assertThat(columnValues(response.column(0)), contains(List.of(10, 20), null, List.of(40, 50), null, 70, 80, null)); + expectThrows(IllegalArgumentException.class, () -> response.column(-1)); + expectThrows(IllegalArgumentException.class, () -> response.column(2)); + } + } + + public void testRowValues() { + for (int times = 0; times < 10; times++) { + int numColumns = randomIntBetween(1, 10); + List columns = randomList(numColumns, numColumns, this::randomColumnInfo); + int noPages = randomIntBetween(1, 20); + List pages = randomList(noPages, noPages, () -> randomPage(columns)); + try (var resp = new EsqlQueryResponse(columns, pages, null, false, "", false, false)) { + var rowValues = getValuesList(resp.rows()); + var valValues = getValuesList(resp.values()); + for (int i = 0; i < rowValues.size(); i++) { + assertThat(rowValues.get(i), equalTo(valValues.get(i))); + } + } + } + } + + static List> getValuesList(Iterator> values) { + var valuesList = new ArrayList>(); + values.forEachRemaining(row -> { + var rowValues = new ArrayList<>(); + row.forEachRemaining(rowValues::add); + valuesList.add(rowValues); + }); + return valuesList; + } + + static List> getValuesList(Iterable> values) { + var valuesList = new ArrayList>(); + values.forEach(row -> { + var rowValues = new ArrayList<>(); + row.forEach(rowValues::add); + valuesList.add(rowValues); + }); + return valuesList; + } + + static List columnValues(Iterator values) { + List l = new ArrayList<>(); + values.forEachRemaining(l::add); + return l; + } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/formatter/TextFormatTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/formatter/TextFormatTests.java index 8403dc3775dce..80a3985be01b8 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/formatter/TextFormatTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/formatter/TextFormatTests.java @@ -17,8 +17,8 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.rest.FakeRestRequest; import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xpack.core.esql.action.ColumnInfo; import org.elasticsearch.xpack.esql.TestBlockFactory; -import org.elasticsearch.xpack.esql.action.ColumnInfo; import org.elasticsearch.xpack.esql.action.EsqlQueryResponse; import org.elasticsearch.xpack.ql.util.StringUtils; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/formatter/TextFormatterTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/formatter/TextFormatterTests.java index 482ff84e1fd30..cde6a242e5e66 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/formatter/TextFormatterTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/formatter/TextFormatterTests.java @@ -15,8 +15,8 @@ import org.elasticsearch.compute.data.Page; import org.elasticsearch.geometry.Point; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.esql.action.ColumnInfo; import org.elasticsearch.xpack.esql.TestBlockFactory; -import org.elasticsearch.xpack.esql.action.ColumnInfo; import org.elasticsearch.xpack.esql.action.EsqlQueryResponse; import java.util.Arrays; From 7a7cbe4879fa51bcbfe857526e0a23592f9d689a Mon Sep 17 00:00:00 2001 From: William Brafford Date: Sun, 17 Mar 2024 16:12:42 -0400 Subject: [PATCH 227/248] [DOCS] Recommend a better installation directory for Windows (#106374) --- docs/reference/setup/install/zip-windows.asciidoc | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/reference/setup/install/zip-windows.asciidoc b/docs/reference/setup/install/zip-windows.asciidoc index 7c3aab0bb89d8..214feaab16acc 100644 --- a/docs/reference/setup/install/zip-windows.asciidoc +++ b/docs/reference/setup/install/zip-windows.asciidoc @@ -41,7 +41,7 @@ window, `cd` to the `%ES_HOME%` directory, for instance: ["source","sh",subs="attributes"] ---------------------------- -cd C:\elasticsearch-{version} +cd C:\Program Files\elasticsearch-{version} ---------------------------- ifdef::include-xpack[] @@ -93,7 +93,7 @@ automatically at boot time without user interaction. + ["source","sh",subs="attributes"] ---- -C:\elasticsearch-{version}{backslash}bin>elasticsearch-service.bat install +C:\Program Files\elasticsearch-{version}{backslash}bin>elasticsearch-service.bat install Installing service : "elasticsearch-service-x64" Using ES_JAVA_HOME (64-bit): "C:\jvm\jdk1.8" The service 'elasticsearch-service-x64' has been installed. @@ -104,7 +104,7 @@ default: + ["source","sh",subs="attributes"] ---- -C:\elasticsearch-{version}{backslash}bin>bin\elasticsearch-service.bat start +C:\Program Files\elasticsearch-{version}{backslash}bin>bin\elasticsearch-service.bat start ---- + NOTE: TLS is not enabled or configured when you start {es} as a service. @@ -115,7 +115,7 @@ to the command line. + ["source","sh",subs="attributes"] ---- -C:\elasticsearch-{version}{backslash}bin>\bin\elasticsearch-reset-password -u elastic +C:\Program Files\elasticsearch-{version}{backslash}bin>\bin\elasticsearch-reset-password -u elastic ---- NOTE: While a JRE can be used for the {es} service, due to its use of a client @@ -138,7 +138,7 @@ service from the command line. ["source","sh",subs="attributes,callouts"] ---- -C:\elasticsearch-{version}{backslash}bin>elasticsearch-service.bat +C:\Program Files\elasticsearch-{version}{backslash}bin>elasticsearch-service.bat Usage: elasticsearch-service.bat install|remove|start|stop|manager [SERVICE_ID] ---- From 1c8bf79435ec321bcc9b048af9149e62cedd35af Mon Sep 17 00:00:00 2001 From: Moritz Mack Date: Mon, 18 Mar 2024 08:40:11 +0100 Subject: [PATCH 228/248] Use historical features for Yaml REST tests for cat apis where applicable (#106323) --- .../test/cat.aliases/10_basic.yml | 6 +-- .../test/cat.aliases/40_hidden.yml | 12 ++--- .../test/cat.allocation/10_basic.yml | 4 +- .../test/cat.indices/10_basic.yml | 18 ++++---- .../test/cat.indices/20_hidden.yml | 20 ++++----- .../test/cat.plugins/10_basic.yml | 4 +- .../test/cat.recovery/10_basic.yml | 10 ++--- .../test/cat.shards/10_basic.yml | 16 +++---- .../rest-api-spec/test/cat.tasks/10_basic.yml | 6 +-- .../test/cat.templates/10_basic.yml | 38 ++++++++-------- .../rest/yaml/YamlTestLegacyFeatures.java | 44 ++++++++++++++++++- 11 files changed, 111 insertions(+), 67 deletions(-) diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.aliases/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.aliases/10_basic.yml index 9eebb281795b0..fff8c3f499ba7 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.aliases/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.aliases/10_basic.yml @@ -487,9 +487,9 @@ --- "Deprecated local parameter": - - skip: - version: "- 8.11.99" - features: ["warnings"] + - requires: + cluster_features: ["cat_aliases_local_deprecated"] + test_runner_features: ["warnings"] reason: verifying deprecation warnings from 8.12.0 onwards - do: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.aliases/40_hidden.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.aliases/40_hidden.yml index 3fe58d7ea08f4..663668fb4b7af 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.aliases/40_hidden.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.aliases/40_hidden.yml @@ -1,7 +1,7 @@ --- "Test cat aliases output with a hidden index with a hidden alias": - - skip: - version: "- 7.6.99" + - requires: + cluster_features: ["cat_aliases_hidden"] reason: "hidden indices and aliases were added in 7.7.0" - do: @@ -57,8 +57,8 @@ $/ --- "Test cat aliases output with a hidden index with a visible alias": - - skip: - version: "- 7.6.99" + - requires: + cluster_features: ["cat_aliases_hidden"] reason: "hidden indices and aliases were added in 7.7.0" - do: @@ -104,8 +104,8 @@ --- "Test cat aliases output with a visible index with a hidden alias": - - skip: - version: "- 7.6.99" + - requires: + cluster_features: ["cat_aliases_hidden"] reason: "hidden indices and aliases were added in 7.7.0" - do: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.allocation/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.allocation/10_basic.yml index 2ba01c3b5711e..6d8e1205867b0 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.allocation/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.allocation/10_basic.yml @@ -261,8 +261,8 @@ --- "Node roles": - - skip: - version: " - 8.9.99" + - requires: + cluster_features: ["cat_allocation_node_role"] reason: "node.role column added in 8.10.0" - do: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.indices/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.indices/10_basic.yml index 9c3c716421061..2d006f3425790 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.indices/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.indices/10_basic.yml @@ -9,8 +9,8 @@ /^$/ --- "Test cat indices output": - - skip: - version: " - 8.10.99" + - requires: + cluster_features: ["cat_indices_dataset_size"] reason: dataset size was added in 8.11.0 - do: @@ -71,9 +71,10 @@ --- "Test cat indices output for closed index (pre 7.2.0)": - skip: - version: "7.2.0 - " reason: "closed indices are replicated starting version 7.2.0" - features: ["allowed_warnings"] + cluster_features: ["cat_indices_replicate_closed"] + - requires: + test_runner_features: ["allowed_warnings"] - do: indices.create: @@ -115,9 +116,10 @@ --- "Test cat indices output for closed index": - skip: - version: " - 7.1.99" reason: "closed indices are replicated starting version 7.2.0" - features: ["allowed_warnings"] + cluster_features: ["cat_indices_replicate_closed"] + - requires: + test_runner_features: ["allowed_warnings"] - do: indices.create: @@ -307,8 +309,8 @@ --- "Test cat indices with invalid health parameter": - - skip: - version: " - 7.7.1" + - requires: + cluster_features: ["cat_indices_validate_health_param"] reason: "fixed in 7.7.1+" - do: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.indices/20_hidden.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.indices/20_hidden.yml index 63ed36f238119..49c319f9ba425 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.indices/20_hidden.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.indices/20_hidden.yml @@ -1,7 +1,7 @@ --- "Test cat indices output for hidden index": - - skip: - version: "- 8.10.99" + - requires: + cluster_features: ["cat_indices_dataset_size"] reason: "dataset size was added in 8.11.0" - do: indices.create: @@ -41,8 +41,8 @@ --- "Test cat indices output for dot-hidden index and dot-prefixed pattern": - - skip: - version: "- 8.10.99" + - requires: + cluster_features: ["cat_indices_dataset_size"] reason: "dataset size was added in 8.11.0" - do: indices.create: @@ -80,8 +80,8 @@ --- "Test cat indices output with a hidden index with a visible alias": - - skip: - version: "- 8.10.99" + - requires: + cluster_features: ["cat_indices_dataset_size"] reason: "dataset size was added in 8.11.0" - do: @@ -145,8 +145,8 @@ $/ --- "Test cat indices output with a hidden index with a hidden alias": - - skip: - version: "- 8.10.99" + - requires: + cluster_features: ["cat_indices_dataset_size"] reason: "dataset size was added in 8.11.0" - do: @@ -209,8 +209,8 @@ $/ --- "Test cat indices output with a hidden index, dot-hidden alias and dot pattern": - - skip: - version: "- 8.10.99" + - requires: + cluster_features: ["cat_indices_dataset_size"] reason: "dataset size was added in 8.11.0" - do: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.plugins/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.plugins/10_basic.yml index 8503a382415e4..d5067e4d80d89 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.plugins/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.plugins/10_basic.yml @@ -1,7 +1,7 @@ --- "Help": - - skip: - version: " - 7.11.99" + - requires: + cluster_features: ["cat_plugins_new_format"] reason: output format changed in 7.12.0 - do: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.recovery/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.recovery/10_basic.yml index a8c0808782272..e06435c4736d8 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.recovery/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.recovery/10_basic.yml @@ -1,7 +1,7 @@ --- "Test cat recovery output": - - skip: - version: " - 7.99.99" + - requires: + cluster_features: ["cat_recovery_new_bytes_format"] reason: format of bytes output changed in 8.0.0 - do: @@ -81,10 +81,10 @@ --- "Test cat recovery output for closed index": - - skip: - version: " - 7.99.99" + - requires: + cluster_features: ["cat_recovery_new_bytes_format"] reason: format of bytes output changed in 8.0.0 - features: ["allowed_warnings"] + test_runner_features: ["allowed_warnings"] - do: indices.create: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.shards/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.shards/10_basic.yml index 35ffd196e4ce9..d1bd0d7627f49 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.shards/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.shards/10_basic.yml @@ -1,7 +1,7 @@ --- "Help": - - skip: - version: " - 8.10.99" + - requires: + cluster_features: ["cat_shards_dataset_size"] reason: dataset size was added in 8.11.0 - do: cat.shards: @@ -90,8 +90,8 @@ $/ --- "Test cat shards output": - - skip: - version: " - 8.10.99" + - requires: + cluster_features: [ "cat_shards_dataset_size" ] reason: dataset size was added in 8.11.0 - do: @@ -187,8 +187,8 @@ --- "Test cat shards sort": - - skip: - version: " - 8.10.99" + - requires: + cluster_features: [ "cat_shards_dataset_size" ] reason: dataset size was added in 8.11.0 - do: @@ -240,8 +240,8 @@ --- "Test cat shards with hidden indices": - - skip: - version: " - 8.2.99" + - requires: + cluster_features: ["cat_shards_fix_hidden_indices"] reason: hidden indices were misreported in versions before 8.3.0 - do: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.tasks/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.tasks/10_basic.yml index c3f0793a7999a..b0007870f1d74 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.tasks/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.tasks/10_basic.yml @@ -20,10 +20,10 @@ --- "Test cat tasks output with X-Opaque-Id": - - skip: - version: " - 7.9.99" + - requires: + cluster_features: ["cat_tasks_x_opaque_id"] reason: support for opaque_id was added in 7.10.0 - features: headers + test_runner_features: ["headers"] - do: headers: { "X-Opaque-Id": "TestID" } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.templates/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.templates/10_basic.yml index 255ed3a4b0719..f6f20913e402b 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.templates/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.templates/10_basic.yml @@ -1,7 +1,7 @@ --- "Help": - - skip: - version: " - 7.7.99" + - requires: + cluster_features: ["cat_templates_v2"] reason: "format changed in 7.8 to accomodate V2 index templates" - do: @@ -31,8 +31,8 @@ --- "Normal templates": - - skip: - version: " - 7.7.99" + - requires: + cluster_features: [ "cat_templates_v2" ] reason: "format changed in 7.8 to accomodate V2 index templates" - do: @@ -82,8 +82,8 @@ --- "Filtered templates": - - skip: - version: " - 7.7.99" + - requires: + cluster_features: [ "cat_templates_v2" ] reason: "format changed in 7.8 to accomodate V2 index templates" - do: @@ -124,8 +124,8 @@ --- "Column headers": - - skip: - version: " - 7.7.99" + - requires: + cluster_features: [ "cat_templates_v2" ] reason: "format changed in 7.8 to accomodate V2 index templates" - do: @@ -162,8 +162,8 @@ --- "Select columns": - - skip: - version: " - 7.7.99" + - requires: + cluster_features: [ "cat_templates_v2" ] reason: "format changed in 7.8 to accomodate V2 index templates" - do: @@ -196,10 +196,10 @@ --- "Sort templates": - - skip: - version: " - 7.7.99" + - requires: + cluster_features: [ "cat_templates_v2" ] reason: "format changed in 7.8 to accomodate V2 index templates" - features: default_shards, no_xpack + test_runner_features: default_shards, no_xpack - do: indices.put_template: @@ -249,10 +249,10 @@ --- "Multiple template": - - skip: - version: " - 7.7.99" + - requires: + cluster_features: [ "cat_templates_v2" ] reason: "format changed in 7.8 to accomodate V2 index templates" - features: default_shards, no_xpack + test_runner_features: default_shards, no_xpack - do: indices.put_template: @@ -285,10 +285,10 @@ --- "Mixture of legacy and composable templates": - - skip: - version: " - 7.7.99" + - requires: + cluster_features: [ "cat_templates_v2" ] reason: "format changed in 7.8 to accomodate V2 index templates" - features: allowed_warnings + test_runner_features: allowed_warnings - do: cluster.put_component_template: diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/YamlTestLegacyFeatures.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/YamlTestLegacyFeatures.java index 0c27cea49f955..fb9918e1f85f1 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/YamlTestLegacyFeatures.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/YamlTestLegacyFeatures.java @@ -21,9 +21,51 @@ public class YamlTestLegacyFeatures implements FeatureSpecification { private static final NodeFeature CAT_ALIASES_SHOW_WRITE_INDEX = new NodeFeature("cat_aliases_show_write_index"); + private static final NodeFeature CAT_ALIASES_HIDDEN = new NodeFeature("cat_aliases_hidden"); + private static final NodeFeature CAT_ALIASES_LOCAL_DEPRECATED = new NodeFeature("cat_aliases_local_deprecated"); + + private static final NodeFeature CAT_ALLOCATION_NODE_ROLE = new NodeFeature("cat_allocation_node_role"); + + private static final NodeFeature CAT_INDICES_DATASET_SIZE = new NodeFeature("cat_indices_dataset_size"); + private static final NodeFeature CAT_INDICES_REPLICATE_CLOSED = new NodeFeature("cat_indices_replicate_closed"); + private static final NodeFeature CAT_INDICES_VALIDATE_HEALTH_PARAM = new NodeFeature("cat_indices_validate_health_param"); + + private static final NodeFeature CAT_PLUGINS_NEW_FORMAT = new NodeFeature("cat_plugins_new_format"); + + private static final NodeFeature CAT_RECOVERY_NEW_BYTES_FORMAT = new NodeFeature("cat_recovery_new_bytes_format"); + + private static final NodeFeature CAT_SHARDS_DATASET_SIZE = new NodeFeature("cat_shards_dataset_size"); + private static final NodeFeature CAT_SHARDS_FIX_HIDDEN_INDICES = new NodeFeature("cat_shards_fix_hidden_indices"); + + private static final NodeFeature CAT_TASKS_X_OPAQUE_ID = new NodeFeature("cat_tasks_x_opaque_id"); + + private static final NodeFeature CAT_TEMPLATES_V2 = new NodeFeature("cat_templates_v2"); + private static final NodeFeature CAT_TEMPLATE_NAME_VALIDATION = new NodeFeature("cat_template_name_validation"); @Override public Map getHistoricalFeatures() { - return Map.ofEntries(Map.entry(CAT_ALIASES_SHOW_WRITE_INDEX, Version.V_7_4_0)); + return Map.ofEntries( + Map.entry(CAT_ALIASES_SHOW_WRITE_INDEX, Version.V_7_4_0), + Map.entry(CAT_ALIASES_HIDDEN, Version.V_7_7_0), + Map.entry(CAT_ALIASES_LOCAL_DEPRECATED, Version.V_8_12_0), + + Map.entry(CAT_ALLOCATION_NODE_ROLE, Version.V_8_10_0), + + Map.entry(CAT_INDICES_REPLICATE_CLOSED, Version.V_7_2_0), + Map.entry(CAT_INDICES_VALIDATE_HEALTH_PARAM, Version.V_7_8_0), + Map.entry(CAT_INDICES_DATASET_SIZE, Version.V_8_11_0), + + Map.entry(CAT_PLUGINS_NEW_FORMAT, Version.V_7_12_0), + + Map.entry(CAT_RECOVERY_NEW_BYTES_FORMAT, Version.V_8_0_0), + + Map.entry(CAT_SHARDS_FIX_HIDDEN_INDICES, Version.V_8_3_0), + Map.entry(CAT_SHARDS_DATASET_SIZE, Version.V_8_11_0), + + Map.entry(CAT_TASKS_X_OPAQUE_ID, Version.V_7_10_0), + + Map.entry(CAT_TEMPLATES_V2, Version.V_7_8_0), + Map.entry(CAT_TEMPLATE_NAME_VALIDATION, Version.V_7_16_0) + ); } } From 8247eeac0ee391690ac0dd2e9402d9031f87d032 Mon Sep 17 00:00:00 2001 From: David Turner Date: Mon, 18 Mar 2024 07:43:46 +0000 Subject: [PATCH 229/248] Clarify docs about the flood-stage index block (#106391) The docs here are a little inaccurate, and link to several individual settings (incorrectly in some cases) in a paragraph that's pretty hard to read. This commit fixes the inaccuracies and replaces the links to individual settings with one to all the docs about the disk-based shard allocator. --- docs/reference/index-modules/blocks.asciidoc | 31 ++++++++++---------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/docs/reference/index-modules/blocks.asciidoc b/docs/reference/index-modules/blocks.asciidoc index 3aa3c9e66d9ad..dcd6035fcf174 100644 --- a/docs/reference/index-modules/blocks.asciidoc +++ b/docs/reference/index-modules/blocks.asciidoc @@ -25,26 +25,25 @@ index: `index.blocks.read_only_allow_delete`:: - Similar to `index.blocks.write`, but also allows deleting the index to - make more resources available. The <> may add and remove this block automatically. + Similar to `index.blocks.write`, except that you can delete the index when + this block is in place. Do not set or remove this block yourself. The + <> sets and removes + this block automatically according to the available disk space. + Deleting documents from an index to release resources - rather than deleting -the index itself - can increase the index size over time. When +the index itself - increases the index size temporarily, and therefore may not +be possible when nodes are low on disk space. When `index.blocks.read_only_allow_delete` is set to `true`, deleting documents is -not permitted. However, deleting the index itself releases the read-only index -block and makes resources available almost immediately. +not permitted. However, deleting the index entirely requires very little extra +disk space and frees up the disk space consumed by the index almost immediately +so this is still permitted. + -IMPORTANT: {es} adds the read-only index block automatically when the disk -utilization exceeds the flood stage watermark, controlled by the -<> -and <> -settings, and removes the block automatically when the disk utilization falls -under the high watermark, controlled by the -<> -and <> -settings. Refer to <> to resolve -watermark issues. +IMPORTANT: {es} adds the read-only-allow-delete index block automatically when +the disk utilization exceeds the flood stage watermark, and removes this block +automatically when the disk utilization falls under the high watermark. See +<> for more +information about watermarks, and <> +for help with resolving watermark issues. `index.blocks.read`:: From 0b0b3b1c9b976112e005bc7158d549ddfcc53aa0 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Mon, 18 Mar 2024 09:16:33 +0100 Subject: [PATCH 230/248] Fix GoogleCloudStorageHttpHandler for parsing longs (#106240) Fixes a small bug in the GCS HTTP handler which can fail parsing bytes range request offsets that are longs. --- .../gcs/GoogleCloudStorageHttpHandler.java | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/test/fixtures/gcs-fixture/src/main/java/fixture/gcs/GoogleCloudStorageHttpHandler.java b/test/fixtures/gcs-fixture/src/main/java/fixture/gcs/GoogleCloudStorageHttpHandler.java index 964cb178b386f..7f9c50204b0a7 100644 --- a/test/fixtures/gcs-fixture/src/main/java/fixture/gcs/GoogleCloudStorageHttpHandler.java +++ b/test/fixtures/gcs-fixture/src/main/java/fixture/gcs/GoogleCloudStorageHttpHandler.java @@ -131,24 +131,27 @@ public void handle(final HttpExchange exchange) throws IOException { BytesReference blob = blobs.get(exchange.getRequestURI().getPath().replace("/download/storage/v1/b/" + bucket + "/o/", "")); if (blob != null) { final String range = exchange.getRequestHeaders().getFirst("Range"); - final int offset; - final int end; + final long offset; + final long end; if (range == null) { - offset = 0; + offset = 0L; end = blob.length() - 1; } else { Matcher matcher = RANGE_MATCHER.matcher(range); if (matcher.find() == false) { throw new AssertionError("Range bytes header does not match expected format: " + range); } - offset = Integer.parseInt(matcher.group(1)); - end = Integer.parseInt(matcher.group(2)); + offset = Long.parseLong(matcher.group(1)); + end = Long.parseLong(matcher.group(2)); } BytesReference response = blob; exchange.getResponseHeaders().add("Content-Type", "application/octet-stream"); final int bufferedLength = response.length(); if (offset > 0 || bufferedLength > end) { - response = response.slice(offset, Math.min(end + 1 - offset, bufferedLength - offset)); + response = response.slice( + Math.toIntExact(offset), + Math.toIntExact(Math.min(end + 1 - offset, bufferedLength - offset)) + ); } exchange.sendResponseHeaders(RestStatus.OK.getStatus(), response.length()); response.writeTo(exchange.getResponseBody()); From c7f79f47ed36f0bfc53bdc0601f85c386996f7ea Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Mon, 18 Mar 2024 09:17:45 +0100 Subject: [PATCH 231/248] Fix AzureHttpHandler bytes range requests support (#106182) Fixes the AzureHttpHandler so that it supports range byte requests with ending offsets that go beyond the real blob length, like the real Azure service supports. --- .../src/main/java/fixture/azure/AzureHttpHandler.java | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/test/fixtures/azure-fixture/src/main/java/fixture/azure/AzureHttpHandler.java b/test/fixtures/azure-fixture/src/main/java/fixture/azure/AzureHttpHandler.java index 14ce4a8318370..e49941efa1e70 100644 --- a/test/fixtures/azure-fixture/src/main/java/fixture/azure/AzureHttpHandler.java +++ b/test/fixtures/azure-fixture/src/main/java/fixture/azure/AzureHttpHandler.java @@ -129,15 +129,16 @@ public void handle(final HttpExchange exchange) throws IOException { throw new AssertionError("Range header does not match expected format: " + range); } - final int start = Integer.parseInt(matcher.group(1)); - final int length = Integer.parseInt(matcher.group(2)) - start + 1; + final long start = Long.parseLong(matcher.group(1)); + final long end = Long.parseLong(matcher.group(2)); + var responseBlob = blob.slice(Math.toIntExact(start), Math.toIntExact(Math.min(end - start + 1, blob.length() - start))); exchange.getResponseHeaders().add("Content-Type", "application/octet-stream"); - exchange.getResponseHeaders().add("x-ms-blob-content-length", String.valueOf(length)); + exchange.getResponseHeaders().add("x-ms-blob-content-length", String.valueOf(responseBlob.length())); exchange.getResponseHeaders().add("x-ms-blob-type", "blockblob"); exchange.getResponseHeaders().add("ETag", "\"blockblob\""); - exchange.sendResponseHeaders(RestStatus.OK.getStatus(), length); - exchange.getResponseBody().write(blob.toBytesRef().bytes, start, length); + exchange.sendResponseHeaders(RestStatus.OK.getStatus(), responseBlob.length()); + responseBlob.writeTo(exchange.getResponseBody()); } else if (Regex.simpleMatch("DELETE /" + account + "/" + container + "/*", request)) { // Delete Blob (https://docs.microsoft.com/en-us/rest/api/storageservices/delete-blob) From 2a2e648836e9c40f81c224a40ef53215bcedcce1 Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Mon, 18 Mar 2024 10:56:51 +0100 Subject: [PATCH 232/248] Add a PriorityQueue backed by BigArrays (#106361) This implementation is heavily based on lucene's implementation but it is using an ObjectArray instead of java plain object arrays. --- docs/changelog/106361.yaml | 5 + .../common/util/ObjectArrayPriorityQueue.java | 276 ++++++++++++++ .../bucket/composite/CompositeAggregator.java | 6 +- .../CompositeValuesCollectorQueue.java | 41 ++- .../bucket/composite/InternalComposite.java | 10 +- .../countedterms/CountedTermsAggregator.java | 45 ++- .../bucket/geogrid/BucketPriorityQueue.java | 9 +- .../bucket/geogrid/GeoGridAggregator.java | 35 +- .../bucket/geogrid/InternalGeoGrid.java | 29 +- .../bucket/terms/BucketPriorityQueue.java | 9 +- .../BucketSignificancePriorityQueue.java | 9 +- .../GlobalOrdinalsStringTermsAggregator.java | 54 +-- .../terms/InternalSignificantTerms.java | 47 +-- .../terms/MapStringTermsAggregator.java | 52 +-- .../bucket/terms/NumericTermsAggregator.java | 57 +-- .../util/ObjectArrayPriorityQueueTests.java | 341 ++++++++++++++++++ .../CompositeValuesCollectorQueueTests.java | 2 +- .../multiterms/MultiTermsAggregator.java | 59 +-- 18 files changed, 873 insertions(+), 213 deletions(-) create mode 100644 docs/changelog/106361.yaml create mode 100644 server/src/main/java/org/elasticsearch/common/util/ObjectArrayPriorityQueue.java create mode 100644 server/src/test/java/org/elasticsearch/common/util/ObjectArrayPriorityQueueTests.java diff --git a/docs/changelog/106361.yaml b/docs/changelog/106361.yaml new file mode 100644 index 0000000000000..a4cd608279c12 --- /dev/null +++ b/docs/changelog/106361.yaml @@ -0,0 +1,5 @@ +pr: 106361 +summary: Add a `PriorityQueue` backed by `BigArrays` +area: Aggregations +type: enhancement +issues: [] diff --git a/server/src/main/java/org/elasticsearch/common/util/ObjectArrayPriorityQueue.java b/server/src/main/java/org/elasticsearch/common/util/ObjectArrayPriorityQueue.java new file mode 100644 index 0000000000000..f08f31c484c37 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/util/ObjectArrayPriorityQueue.java @@ -0,0 +1,276 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +package org.elasticsearch.common.util; + +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; + +import java.util.Collection; +import java.util.Iterator; +import java.util.NoSuchElementException; + +/** + * A priority queue maintains a partial ordering of its elements such that the least element can + * always be found in constant time. Put()'s and pop()'s require log(size) but the remove() + * cost implemented here is linear. + * + *

        NOTE: Iteration order is not specified. + * + * Based in lucene's {@link org.apache.lucene.util.PriorityQueue} but it uses a {@link ObjectArray} instead of plain {code Object[]}. + * This class only track the {@link ObjectArray} and not the memory usage of the elements. Furthermore, + * the elements are not closed even if they implement {@link Releasable}. + */ +public abstract class ObjectArrayPriorityQueue implements Iterable, Releasable { + private long size = 0; + private final long maxSize; + // package private for testing + final ObjectArray heap; + + /** + * Create a priority queue. + */ + public ObjectArrayPriorityQueue(long maxSize, BigArrays bigArrays) { + final long heapSize; + if (0 == maxSize) { + // We allocate 1 extra to avoid if statement in top() + heapSize = 2; + } else { + if ((maxSize < 0) || (maxSize >= Long.MAX_VALUE)) { + // Throw exception to prevent confusing OOME: + throw new IllegalArgumentException("maxSize must be >= 0 and < " + (Long.MAX_VALUE) + "; got: " + maxSize); + } + // NOTE: we add +1 because all access to heap is + // 1-based not 0-based. heap[0] is unused. + heapSize = maxSize + 1; + } + + this.heap = bigArrays.newObjectArray(heapSize); + this.maxSize = maxSize; + } + + /** + * Determines the ordering of objects in this priority queue. Subclasses must define this one + * method. + * + * @return true iff parameter a is less than parameter b. + */ + protected abstract boolean lessThan(T a, T b); + + /** + * Adds an Object to a PriorityQueue in log(size) time. If one tries to add more objects than + * maxSize from initialize an {@link ArrayIndexOutOfBoundsException} is thrown. + * + * @return the new 'top' element in the queue. + */ + public final T add(T element) { + // don't modify size until we know heap access didn't throw AIOOB. + long index = size + 1; + heap.set(index, element); + size = index; + upHeap(index); + return heap.get(1); + } + + /** + * Adds all elements of the collection into the queue. This method should be preferred over + * calling {@link #add(Object)} in loop if all elements are known in advance as it builds queue + * faster. + * + *

        If one tries to add more objects than the maxSize passed in the constructor, an {@link + * ArrayIndexOutOfBoundsException} is thrown. + */ + public void addAll(Collection elements) { + if (this.size + elements.size() > this.maxSize) { + throw new ArrayIndexOutOfBoundsException( + "Cannot add " + elements.size() + " elements to a queue with remaining capacity: " + (maxSize - size) + ); + } + + // Heap with size S always takes first S elements of the array, + // and thus it's safe to fill array further - no actual non-sentinel value will be overwritten. + Iterator iterator = elements.iterator(); + while (iterator.hasNext()) { + this.heap.set(size + 1, iterator.next()); + this.size++; + } + + // The loop goes down to 1 as heap is 1-based not 0-based. + for (long i = (size >>> 1); i >= 1; i--) { + downHeap(i); + } + } + + /** + * Adds an Object to a PriorityQueue in log(size) time. It returns the object (if any) that was + * dropped off the heap because it was full. This can be the given parameter (in case it is + * smaller than the full heap's minimum, and couldn't be added), or another object that was + * previously the smallest value in the heap and now has been replaced by a larger one, or null if + * the queue wasn't yet full with maxSize elements. + */ + public T insertWithOverflow(T element) { + if (size < maxSize) { + add(element); + return null; + } else if (size > 0 && lessThan(heap.get(1), element)) { + T ret = heap.get(1); + heap.set(1, element); + updateTop(); + return ret; + } else { + return element; + } + } + + /** Returns the least element of the PriorityQueue in constant time. */ + public final T top() { + // We don't need to check size here: if maxSize is 0, + // then heap is length 2 array with both entries null. + // If size is 0 then heap[1] is already null. + return heap.get(1); + } + + /** Removes and returns the least element of the PriorityQueue in log(size) time. */ + public final T pop() { + if (size > 0) { + T result = heap.get(1); // save first value + heap.set(1, heap.get(size)); // move last to first + heap.set(size, null); // permit GC of objects + size--; + downHeap(1); // adjust heap + return result; + } else { + return null; + } + } + + /** + * Should be called when the Object at top changes values. Still log(n) worst case, but it's at + * least twice as fast to + * + *

        +     * pq.top().change();
        +     * pq.updateTop();
        +     * 
        + * + * instead of + * + *
        +     * o = pq.pop();
        +     * o.change();
        +     * pq.push(o);
        +     * 
        + * + * @return the new 'top' element. + */ + public final T updateTop() { + downHeap(1); + return heap.get(1); + } + + /** Replace the top of the pq with {@code newTop} and run {@link #updateTop()}. */ + public final T updateTop(T newTop) { + heap.set(1, newTop); + return updateTop(); + } + + /** Returns the number of elements currently stored in the PriorityQueue. */ + public final long size() { + return size; + } + + /** Removes all entries from the PriorityQueue. */ + public final void clear() { + for (int i = 0; i <= size; i++) { + heap.set(i, null); + } + size = 0; + } + + /** + * Removes an existing element currently stored in the PriorityQueue. Cost is linear with the size + * of the queue. (A specialization of PriorityQueue which tracks element positions would provide a + * constant remove time but the trade-off would be extra cost to all additions/insertions) + */ + public final boolean remove(T element) { + for (int i = 1; i <= size; i++) { + if (heap.get(i) == element) { + heap.set(i, heap.get(size)); + heap.set(size, null); // permit GC of objects + size--; + if (i <= size) { + if (upHeap(i) == false) { + downHeap(i); + } + } + return true; + } + } + return false; + } + + private boolean upHeap(long origPos) { + long i = origPos; + T node = heap.get(i); // save bottom node + long j = i >>> 1; + while (j > 0 && lessThan(node, heap.get(j))) { + heap.set(i, heap.get(j)); // shift parents down + i = j; + j = j >>> 1; + } + heap.set(i, node); // install saved node + return i != origPos; + } + + private void downHeap(long i) { + T node = heap.get(i); // save top node + long j = i << 1; // find smaller child + long k = j + 1; + if (k <= size && lessThan(heap.get(k), heap.get(j))) { + j = k; + } + while (j <= size && lessThan(heap.get(j), node)) { + heap.set(i, heap.get(j)); // shift up child + i = j; + j = i << 1; + k = j + 1; + if (k <= size && lessThan(heap.get(k), heap.get(j))) { + j = k; + } + } + heap.set(i, node); // install saved node + } + + @Override + public final Iterator iterator() { + return new Iterator<>() { + + long i = 1; + + @Override + public boolean hasNext() { + return i <= size; + } + + @Override + public T next() { + if (hasNext() == false) { + throw new NoSuchElementException(); + } + return heap.get(i++); + } + }; + } + + @Override + public final void close() { + Releasables.close(heap); + doClose(); + } + + protected void doClose() {} +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java index e0189c3dd6651..638afbb3df261 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java @@ -191,9 +191,9 @@ public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws I runDeferredCollections(); } - int num = Math.min(size, queue.size()); + int num = Math.min(size, (int) queue.size()); final InternalComposite.InternalBucket[] buckets = new InternalComposite.InternalBucket[num]; - long[] bucketOrdsToCollect = new long[queue.size()]; + long[] bucketOrdsToCollect = new long[(int) queue.size()]; for (int i = 0; i < queue.size(); i++) { bucketOrdsToCollect[i] = i; } @@ -203,7 +203,7 @@ public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws I CompositeKey key = queue.toCompositeKey(slot); InternalAggregations aggs = subAggsForBuckets.apply(slot); long docCount = queue.getDocCount(slot); - buckets[queue.size()] = new InternalComposite.InternalBucket( + buckets[(int) queue.size()] = new InternalComposite.InternalBucket( sourceNames, formats, key, diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesCollectorQueue.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesCollectorQueue.java index 0a7f6a26f580b..6f4a067d046f8 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesCollectorQueue.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesCollectorQueue.java @@ -11,11 +11,10 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.CollectionTerminatedException; -import org.apache.lucene.util.PriorityQueue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.LongArray; import org.elasticsearch.common.util.Maps; -import org.elasticsearch.core.Releasable; +import org.elasticsearch.common.util.ObjectArrayPriorityQueue; import org.elasticsearch.core.Releasables; import org.elasticsearch.search.aggregations.LeafBucketCollector; @@ -25,9 +24,9 @@ import static org.elasticsearch.core.Types.forciblyCast; /** - * A specialized {@link PriorityQueue} implementation for composite buckets. + * A specialized {@link ObjectArrayPriorityQueue} implementation for composite buckets. */ -final class CompositeValuesCollectorQueue extends PriorityQueue implements Releasable { +final class CompositeValuesCollectorQueue extends ObjectArrayPriorityQueue { private class Slot { final int value; @@ -74,25 +73,33 @@ private interface CompetitiveBoundsChangedListener { * @param indexReader */ CompositeValuesCollectorQueue(BigArrays bigArrays, SingleDimensionValuesSource[] sources, int size, IndexReader indexReader) { - super(size); + super(size, bigArrays); this.bigArrays = bigArrays; this.maxSize = size; this.arrays = sources; - // If the leading source is a GlobalOrdinalValuesSource we can apply an optimization which requires - // tracking the highest competitive value. - if (arrays[0] instanceof GlobalOrdinalValuesSource globalOrdinalValuesSource) { - if (shouldApplyGlobalOrdinalDynamicPruningForLeadingSource(sources, size, indexReader)) { - competitiveBoundsChangedListener = globalOrdinalValuesSource::updateHighestCompetitiveValue; + boolean success = false; + try { + // If the leading source is a GlobalOrdinalValuesSource we can apply an optimization which requires + // tracking the highest competitive value. + if (arrays[0] instanceof GlobalOrdinalValuesSource globalOrdinalValuesSource) { + if (shouldApplyGlobalOrdinalDynamicPruningForLeadingSource(sources, size, indexReader)) { + competitiveBoundsChangedListener = globalOrdinalValuesSource::updateHighestCompetitiveValue; + } else { + competitiveBoundsChangedListener = null; + } } else { competitiveBoundsChangedListener = null; } - } else { - competitiveBoundsChangedListener = null; - } - this.map = Maps.newMapWithExpectedSize(size); - this.docCounts = bigArrays.newLongArray(1, false); + this.map = Maps.newMapWithExpectedSize(size); + this.docCounts = bigArrays.newLongArray(1, false); + success = true; + } finally { + if (success == false) { + super.close(); + } + } } private static boolean shouldApplyGlobalOrdinalDynamicPruningForLeadingSource( @@ -385,7 +392,7 @@ boolean addIfCompetitive(int indexSortSourcePrefix, long inc) throws IOException // and we recycle the deleted slot newSlot = slot; } else { - newSlot = size(); + newSlot = (int) size(); } // move the candidate key to its new slot copyCurrent(newSlot, inc); @@ -399,7 +406,7 @@ boolean addIfCompetitive(int indexSortSourcePrefix, long inc) throws IOException } @Override - public void close() { + protected void doClose() { Releasables.close(docCounts); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java index 6ceff52e51248..31cd5c9426755 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java @@ -9,9 +9,10 @@ package org.elasticsearch.search.aggregations.bucket.composite; import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.PriorityQueue; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.util.ObjectArrayPriorityQueue; +import org.elasticsearch.core.Releasables; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.AggregationReduceContext; import org.elasticsearch.search.aggregations.AggregatorReducer; @@ -200,7 +201,7 @@ int[] getReverseMuls() { @Override protected AggregatorReducer getLeaderReducer(AggregationReduceContext reduceContext, int size) { return new AggregatorReducer() { - final PriorityQueue pq = new PriorityQueue<>(size) { + final ObjectArrayPriorityQueue pq = new ObjectArrayPriorityQueue<>(size, reduceContext.bigArrays()) { @Override protected boolean lessThan(BucketIterator a, BucketIterator b) { return a.compareTo(b) < 0; @@ -271,6 +272,11 @@ public InternalAggregation get() { reduced.validateAfterKey(); return reduced; } + + @Override + public void close() { + Releasables.close(pq); + } }; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/countedterms/CountedTermsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/countedterms/CountedTermsAggregator.java index 588c53a2d1463..ba59026fbc12a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/countedterms/CountedTermsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/countedterms/CountedTermsAggregator.java @@ -92,27 +92,34 @@ public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws I int size = (int) Math.min(bucketOrds.size(), bucketCountThresholds.getShardSize()); // as users can't control sort order, in practice we'll always sort by doc count descending - BucketPriorityQueue ordered = new BucketPriorityQueue<>(size, partiallyBuiltBucketComparator); - StringTerms.Bucket spare = null; - BytesKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrds[ordIdx]); - Supplier emptyBucketBuilder = () -> new StringTerms.Bucket(new BytesRef(), 0, null, false, 0, format); - while (ordsEnum.next()) { - long docCount = bucketDocCount(ordsEnum.ord()); - otherDocCounts[ordIdx] += docCount; - if (spare == null) { - spare = emptyBucketBuilder.get(); + try ( + BucketPriorityQueue ordered = new BucketPriorityQueue<>( + size, + bigArrays(), + partiallyBuiltBucketComparator + ) + ) { + StringTerms.Bucket spare = null; + BytesKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrds[ordIdx]); + Supplier emptyBucketBuilder = () -> new StringTerms.Bucket(new BytesRef(), 0, null, false, 0, format); + while (ordsEnum.next()) { + long docCount = bucketDocCount(ordsEnum.ord()); + otherDocCounts[ordIdx] += docCount; + if (spare == null) { + spare = emptyBucketBuilder.get(); + } + ordsEnum.readValue(spare.getTermBytes()); + spare.setDocCount(docCount); + spare.setBucketOrd(ordsEnum.ord()); + spare = ordered.insertWithOverflow(spare); } - ordsEnum.readValue(spare.getTermBytes()); - spare.setDocCount(docCount); - spare.setBucketOrd(ordsEnum.ord()); - spare = ordered.insertWithOverflow(spare); - } - topBucketsPerOrd[ordIdx] = new StringTerms.Bucket[ordered.size()]; - for (int i = ordered.size() - 1; i >= 0; --i) { - topBucketsPerOrd[ordIdx][i] = ordered.pop(); - otherDocCounts[ordIdx] -= topBucketsPerOrd[ordIdx][i].getDocCount(); - topBucketsPerOrd[ordIdx][i].setTermBytes(BytesRef.deepCopyOf(topBucketsPerOrd[ordIdx][i].getTermBytes())); + topBucketsPerOrd[ordIdx] = new StringTerms.Bucket[(int) ordered.size()]; + for (int i = (int) ordered.size() - 1; i >= 0; --i) { + topBucketsPerOrd[ordIdx][i] = ordered.pop(); + otherDocCounts[ordIdx] -= topBucketsPerOrd[ordIdx][i].getDocCount(); + topBucketsPerOrd[ordIdx][i].setTermBytes(BytesRef.deepCopyOf(topBucketsPerOrd[ordIdx][i].getTermBytes())); + } } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/BucketPriorityQueue.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/BucketPriorityQueue.java index fc0d72f417b01..06390892817ce 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/BucketPriorityQueue.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/BucketPriorityQueue.java @@ -7,12 +7,13 @@ */ package org.elasticsearch.search.aggregations.bucket.geogrid; -import org.apache.lucene.util.PriorityQueue; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.ObjectArrayPriorityQueue; -class BucketPriorityQueue extends PriorityQueue { +class BucketPriorityQueue extends ObjectArrayPriorityQueue { - BucketPriorityQueue(int size) { - super(size); + BucketPriorityQueue(int size, BigArrays bigArrays) { + super(size, bigArrays); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregator.java index 21c245a0237f2..069d972c5c29e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregator.java @@ -136,25 +136,26 @@ public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws I for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) { int size = (int) Math.min(bucketOrds.bucketsInOrd(owningBucketOrds[ordIdx]), shardSize); - BucketPriorityQueue ordered = new BucketPriorityQueue<>(size); - InternalGeoGridBucket spare = null; - LongKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrds[ordIdx]); - while (ordsEnum.next()) { - if (spare == null) { - spare = newEmptyBucket(); - } + try (BucketPriorityQueue ordered = new BucketPriorityQueue<>(size, bigArrays())) { + InternalGeoGridBucket spare = null; + LongKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrds[ordIdx]); + while (ordsEnum.next()) { + if (spare == null) { + spare = newEmptyBucket(); + } - // need a special function to keep the source bucket - // up-to-date so it can get the appropriate key - spare.hashAsLong = ordsEnum.value(); - spare.docCount = bucketDocCount(ordsEnum.ord()); - spare.bucketOrd = ordsEnum.ord(); - spare = ordered.insertWithOverflow(spare); - } + // need a special function to keep the source bucket + // up-to-date so it can get the appropriate key + spare.hashAsLong = ordsEnum.value(); + spare.docCount = bucketDocCount(ordsEnum.ord()); + spare.bucketOrd = ordsEnum.ord(); + spare = ordered.insertWithOverflow(spare); + } - topBucketsPerOrd[ordIdx] = new InternalGeoGridBucket[ordered.size()]; - for (int i = ordered.size() - 1; i >= 0; --i) { - topBucketsPerOrd[ordIdx][i] = ordered.pop(); + topBucketsPerOrd[ordIdx] = new InternalGeoGridBucket[(int) ordered.size()]; + for (int i = (int) ordered.size() - 1; i >= 0; --i) { + topBucketsPerOrd[ordIdx][i] = ordered.pop(); + } } } buildSubAggsForAllBuckets(topBucketsPerOrd, b -> b.bucketOrd, (b, aggs) -> b.aggregations = aggs); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoGrid.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoGrid.java index bbf92cbf679d0..4918a57b29ed1 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoGrid.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoGrid.java @@ -78,12 +78,12 @@ public List getBuckets() { } @Override - protected AggregatorReducer getLeaderReducer(AggregationReduceContext reduceContext, int size) { + protected AggregatorReducer getLeaderReducer(AggregationReduceContext context, int size) { return new AggregatorReducer() { final LongObjectPagedHashMap bucketsReducer = new LongObjectPagedHashMap<>( size, - reduceContext.bigArrays() + context.bigArrays() ); @Override @@ -93,7 +93,7 @@ public void accept(InternalAggregation aggregation) { for (InternalGeoGridBucket bucket : grid.getBuckets()) { MultiBucketAggregatorsReducer reducer = bucketsReducer.get(bucket.hashAsLong()); if (reducer == null) { - reducer = new MultiBucketAggregatorsReducer(reduceContext, size); + reducer = new MultiBucketAggregatorsReducer(context, size); bucketsReducer.put(bucket.hashAsLong(), reducer); } reducer.accept(bucket); @@ -103,19 +103,20 @@ public void accept(InternalAggregation aggregation) { @Override public InternalAggregation get() { final int size = Math.toIntExact( - reduceContext.isFinalReduce() == false ? bucketsReducer.size() : Math.min(requiredSize, bucketsReducer.size()) + context.isFinalReduce() == false ? bucketsReducer.size() : Math.min(requiredSize, bucketsReducer.size()) ); - final BucketPriorityQueue ordered = new BucketPriorityQueue<>(size); - bucketsReducer.iterator().forEachRemaining(entry -> { - InternalGeoGridBucket bucket = createBucket(entry.key, entry.value.getDocCount(), entry.value.get()); - ordered.insertWithOverflow(bucket); - }); - final InternalGeoGridBucket[] list = new InternalGeoGridBucket[ordered.size()]; - for (int i = ordered.size() - 1; i >= 0; i--) { - list[i] = ordered.pop(); + try (BucketPriorityQueue ordered = new BucketPriorityQueue<>(size, context.bigArrays())) { + bucketsReducer.iterator().forEachRemaining(entry -> { + InternalGeoGridBucket bucket = createBucket(entry.key, entry.value.getDocCount(), entry.value.get()); + ordered.insertWithOverflow(bucket); + }); + final InternalGeoGridBucket[] list = new InternalGeoGridBucket[(int) ordered.size()]; + for (int i = (int) ordered.size() - 1; i >= 0; i--) { + list[i] = ordered.pop(); + } + context.consumeBucketsAndMaybeBreak(list.length); + return create(getName(), requiredSize, Arrays.asList(list), getMetadata()); } - reduceContext.consumeBucketsAndMaybeBreak(list.length); - return create(getName(), requiredSize, Arrays.asList(list), getMetadata()); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/BucketPriorityQueue.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/BucketPriorityQueue.java index 886524f627f28..81657b6bcd909 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/BucketPriorityQueue.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/BucketPriorityQueue.java @@ -7,16 +7,17 @@ */ package org.elasticsearch.search.aggregations.bucket.terms; -import org.apache.lucene.util.PriorityQueue; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.ObjectArrayPriorityQueue; import java.util.Comparator; -public class BucketPriorityQueue extends PriorityQueue { +public class BucketPriorityQueue extends ObjectArrayPriorityQueue { private final Comparator comparator; - public BucketPriorityQueue(int size, Comparator comparator) { - super(size); + public BucketPriorityQueue(int size, BigArrays bigArrays, Comparator comparator) { + super(size, bigArrays); this.comparator = comparator; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/BucketSignificancePriorityQueue.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/BucketSignificancePriorityQueue.java index dacb0fab94e1a..3b12fc1fad13c 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/BucketSignificancePriorityQueue.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/BucketSignificancePriorityQueue.java @@ -8,12 +8,13 @@ package org.elasticsearch.search.aggregations.bucket.terms; -import org.apache.lucene.util.PriorityQueue; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.ObjectArrayPriorityQueue; -public class BucketSignificancePriorityQueue extends PriorityQueue { +public class BucketSignificancePriorityQueue extends ObjectArrayPriorityQueue { - public BucketSignificancePriorityQueue(int size) { - super(size); + public BucketSignificancePriorityQueue(int size, BigArrays bigArrays) { + super(size, bigArrays); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java index d184bb6c4c145..acdb24b9109af 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.LongArray; import org.elasticsearch.common.util.LongHash; +import org.elasticsearch.common.util.ObjectArrayPriorityQueue; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; @@ -710,30 +711,31 @@ private InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws } else { size = (int) Math.min(maxBucketOrd(), bucketCountThresholds.getShardSize()); } - PriorityQueue ordered = buildPriorityQueue(size); - final int finalOrdIdx = ordIdx; - BucketUpdater updater = bucketUpdater(owningBucketOrds[ordIdx], lookupGlobalOrd); - collectionStrategy.forEach(owningBucketOrds[ordIdx], new BucketInfoConsumer() { - TB spare = null; - - @Override - public void accept(long globalOrd, long bucketOrd, long docCount) throws IOException { - otherDocCount[finalOrdIdx] += docCount; - if (docCount >= bucketCountThresholds.getShardMinDocCount()) { - if (spare == null) { - spare = buildEmptyTemporaryBucket(); + try (ObjectArrayPriorityQueue ordered = buildPriorityQueue(size)) { + final int finalOrdIdx = ordIdx; + BucketUpdater updater = bucketUpdater(owningBucketOrds[ordIdx], lookupGlobalOrd); + collectionStrategy.forEach(owningBucketOrds[ordIdx], new BucketInfoConsumer() { + TB spare = null; + + @Override + public void accept(long globalOrd, long bucketOrd, long docCount) throws IOException { + otherDocCount[finalOrdIdx] += docCount; + if (docCount >= bucketCountThresholds.getShardMinDocCount()) { + if (spare == null) { + spare = buildEmptyTemporaryBucket(); + } + updater.updateBucket(spare, globalOrd, bucketOrd, docCount); + spare = ordered.insertWithOverflow(spare); } - updater.updateBucket(spare, globalOrd, bucketOrd, docCount); - spare = ordered.insertWithOverflow(spare); } - } - }); + }); - // Get the top buckets - topBucketsPreOrd[ordIdx] = buildBuckets(ordered.size()); - for (int i = ordered.size() - 1; i >= 0; --i) { - topBucketsPreOrd[ordIdx][i] = convertTempBucketToRealBucket(ordered.pop(), lookupGlobalOrd); - otherDocCount[ordIdx] -= topBucketsPreOrd[ordIdx][i].getDocCount(); + // Get the top buckets + topBucketsPreOrd[ordIdx] = buildBuckets((int) ordered.size()); + for (int i = (int) ordered.size() - 1; i >= 0; --i) { + topBucketsPreOrd[ordIdx][i] = convertTempBucketToRealBucket(ordered.pop(), lookupGlobalOrd); + otherDocCount[ordIdx] -= topBucketsPreOrd[ordIdx][i].getDocCount(); + } } } @@ -773,7 +775,7 @@ public void accept(long globalOrd, long bucketOrd, long docCount) throws IOExcep * Build a {@link PriorityQueue} to sort the buckets. After we've * collected all of the buckets we'll collect all entries in the queue. */ - abstract PriorityQueue buildPriorityQueue(int size); + abstract ObjectArrayPriorityQueue buildPriorityQueue(int size); /** * Build an array to hold the "top" buckets for each ordinal. @@ -858,8 +860,8 @@ BucketUpdater bucketUpdater(long owningBucketOrd, GlobalOrdLookupFunc } @Override - PriorityQueue buildPriorityQueue(int size) { - return new BucketPriorityQueue<>(size, partiallyBuiltBucketComparator); + ObjectArrayPriorityQueue buildPriorityQueue(int size) { + return new BucketPriorityQueue<>(size, bigArrays(), partiallyBuiltBucketComparator); } @Override @@ -1006,8 +1008,8 @@ BucketUpdater bucketUpdater(long owningBucketOrd, } @Override - PriorityQueue buildPriorityQueue(int size) { - return new BucketSignificancePriorityQueue<>(size); + ObjectArrayPriorityQueue buildPriorityQueue(int size) { + return new BucketSignificancePriorityQueue<>(size, bigArrays()); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalSignificantTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalSignificantTerms.java index a84b0e369e223..0d06e455c57fa 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalSignificantTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalSignificantTerms.java @@ -227,33 +227,34 @@ public void accept(InternalAggregation aggregation) { public InternalAggregation get() { final SignificanceHeuristic heuristic = getSignificanceHeuristic().rewrite(reduceContext); final int size = reduceContext.isFinalReduce() == false ? buckets.size() : Math.min(requiredSize, buckets.size()); - final BucketSignificancePriorityQueue ordered = new BucketSignificancePriorityQueue<>(size); - for (ReducerAndProto reducerAndProto : buckets.values()) { - final B b = createBucket( - reducerAndProto.subsetDf[0], - globalSubsetSize, - reducerAndProto.supersetDf[0], - globalSupersetSize, - reducerAndProto.reducer.get(), - reducerAndProto.proto - ); - b.updateScore(heuristic); - if (((b.score > 0) && (b.subsetDf >= minDocCount)) || reduceContext.isFinalReduce() == false) { - final B removed = ordered.insertWithOverflow(b); - if (removed == null) { - reduceContext.consumeBucketsAndMaybeBreak(1); + try (BucketSignificancePriorityQueue ordered = new BucketSignificancePriorityQueue<>(size, reduceContext.bigArrays())) { + for (ReducerAndProto reducerAndProto : buckets.values()) { + final B b = createBucket( + reducerAndProto.subsetDf[0], + globalSubsetSize, + reducerAndProto.supersetDf[0], + globalSupersetSize, + reducerAndProto.reducer.get(), + reducerAndProto.proto + ); + b.updateScore(heuristic); + if (((b.score > 0) && (b.subsetDf >= minDocCount)) || reduceContext.isFinalReduce() == false) { + final B removed = ordered.insertWithOverflow(b); + if (removed == null) { + reduceContext.consumeBucketsAndMaybeBreak(1); + } else { + reduceContext.consumeBucketsAndMaybeBreak(-countInnerBucket(removed)); + } } else { - reduceContext.consumeBucketsAndMaybeBreak(-countInnerBucket(removed)); + reduceContext.consumeBucketsAndMaybeBreak(-countInnerBucket(b)); } - } else { - reduceContext.consumeBucketsAndMaybeBreak(-countInnerBucket(b)); } + final B[] list = createBucketsArray((int) ordered.size()); + for (int i = (int) ordered.size() - 1; i >= 0; i--) { + list[i] = ordered.pop(); + } + return create(globalSubsetSize, globalSupersetSize, Arrays.asList(list)); } - final B[] list = createBucketsArray(ordered.size()); - for (int i = ordered.size() - 1; i >= 0; i--) { - list[i] = ordered.pop(); - } - return create(globalSubsetSize, globalSupersetSize, Arrays.asList(list)); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/MapStringTermsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/MapStringTermsAggregator.java index 5a9cc767fab17..66ecdeb1a87bd 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/MapStringTermsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/MapStringTermsAggregator.java @@ -13,6 +13,7 @@ import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.PriorityQueue; import org.elasticsearch.common.util.LongArray; +import org.elasticsearch.common.util.ObjectArrayPriorityQueue; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; @@ -250,28 +251,29 @@ private InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws collectZeroDocEntriesIfNeeded(owningBucketOrds[ordIdx], excludeDeletedDocs); int size = (int) Math.min(bucketOrds.size(), bucketCountThresholds.getShardSize()); - PriorityQueue ordered = buildPriorityQueue(size); - B spare = null; - BytesKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrds[ordIdx]); - Supplier emptyBucketBuilder = emptyBucketBuilder(owningBucketOrds[ordIdx]); - while (ordsEnum.next()) { - long docCount = bucketDocCount(ordsEnum.ord()); - otherDocCounts[ordIdx] += docCount; - if (docCount < bucketCountThresholds.getShardMinDocCount()) { - continue; - } - if (spare == null) { - spare = emptyBucketBuilder.get(); + try (ObjectArrayPriorityQueue ordered = buildPriorityQueue(size)) { + B spare = null; + BytesKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrds[ordIdx]); + Supplier emptyBucketBuilder = emptyBucketBuilder(owningBucketOrds[ordIdx]); + while (ordsEnum.next()) { + long docCount = bucketDocCount(ordsEnum.ord()); + otherDocCounts[ordIdx] += docCount; + if (docCount < bucketCountThresholds.getShardMinDocCount()) { + continue; + } + if (spare == null) { + spare = emptyBucketBuilder.get(); + } + updateBucket(spare, ordsEnum, docCount); + spare = ordered.insertWithOverflow(spare); } - updateBucket(spare, ordsEnum, docCount); - spare = ordered.insertWithOverflow(spare); - } - topBucketsPerOrd[ordIdx] = buildBuckets(ordered.size()); - for (int i = ordered.size() - 1; i >= 0; --i) { - topBucketsPerOrd[ordIdx][i] = ordered.pop(); - otherDocCounts[ordIdx] -= topBucketsPerOrd[ordIdx][i].getDocCount(); - finalizeBucket(topBucketsPerOrd[ordIdx][i]); + topBucketsPerOrd[ordIdx] = buildBuckets((int) ordered.size()); + for (int i = (int) ordered.size() - 1; i >= 0; --i) { + topBucketsPerOrd[ordIdx][i] = ordered.pop(); + otherDocCounts[ordIdx] -= topBucketsPerOrd[ordIdx][i].getDocCount(); + finalizeBucket(topBucketsPerOrd[ordIdx][i]); + } } } @@ -310,7 +312,7 @@ private InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws * Build a {@link PriorityQueue} to sort the buckets. After we've * collected all of the buckets we'll collect all entries in the queue. */ - abstract PriorityQueue buildPriorityQueue(int size); + abstract ObjectArrayPriorityQueue buildPriorityQueue(int size); /** * Update fields in {@code spare} to reflect information collected for @@ -408,8 +410,8 @@ Supplier emptyBucketBuilder(long owningBucketOrd) { } @Override - PriorityQueue buildPriorityQueue(int size) { - return new BucketPriorityQueue<>(size, partiallyBuiltBucketComparator); + ObjectArrayPriorityQueue buildPriorityQueue(int size) { + return new BucketPriorityQueue<>(size, bigArrays(), partiallyBuiltBucketComparator); } @Override @@ -534,8 +536,8 @@ Supplier emptyBucketBuilder(long owningBucketOrd) } @Override - PriorityQueue buildPriorityQueue(int size) { - return new BucketSignificancePriorityQueue<>(size); + ObjectArrayPriorityQueue buildPriorityQueue(int size) { + return new BucketSignificancePriorityQueue<>(size, bigArrays()); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/NumericTermsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/NumericTermsAggregator.java index 80da463001a07..cce5140a36af7 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/NumericTermsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/NumericTermsAggregator.java @@ -11,8 +11,8 @@ import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.util.NumericUtils; -import org.apache.lucene.util.PriorityQueue; import org.elasticsearch.common.util.LongArray; +import org.elasticsearch.common.util.ObjectArrayPriorityQueue; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; import org.elasticsearch.index.fielddata.FieldData; @@ -151,29 +151,30 @@ private InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws long bucketsInOrd = bucketOrds.bucketsInOrd(owningBucketOrds[ordIdx]); int size = (int) Math.min(bucketsInOrd, bucketCountThresholds.getShardSize()); - PriorityQueue ordered = buildPriorityQueue(size); - B spare = null; - BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrds[ordIdx]); - Supplier emptyBucketBuilder = emptyBucketBuilder(owningBucketOrds[ordIdx]); - while (ordsEnum.next()) { - long docCount = bucketDocCount(ordsEnum.ord()); - otherDocCounts[ordIdx] += docCount; - if (docCount < bucketCountThresholds.getShardMinDocCount()) { - continue; - } - if (spare == null) { - spare = emptyBucketBuilder.get(); + try (ObjectArrayPriorityQueue ordered = buildPriorityQueue(size)) { + B spare = null; + BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrds[ordIdx]); + Supplier emptyBucketBuilder = emptyBucketBuilder(owningBucketOrds[ordIdx]); + while (ordsEnum.next()) { + long docCount = bucketDocCount(ordsEnum.ord()); + otherDocCounts[ordIdx] += docCount; + if (docCount < bucketCountThresholds.getShardMinDocCount()) { + continue; + } + if (spare == null) { + spare = emptyBucketBuilder.get(); + } + updateBucket(spare, ordsEnum, docCount); + spare = ordered.insertWithOverflow(spare); } - updateBucket(spare, ordsEnum, docCount); - spare = ordered.insertWithOverflow(spare); - } - // Get the top buckets - B[] bucketsForOrd = buildBuckets(ordered.size()); - topBucketsPerOrd[ordIdx] = bucketsForOrd; - for (int b = ordered.size() - 1; b >= 0; --b) { - topBucketsPerOrd[ordIdx][b] = ordered.pop(); - otherDocCounts[ordIdx] -= topBucketsPerOrd[ordIdx][b].getDocCount(); + // Get the top buckets + B[] bucketsForOrd = buildBuckets((int) ordered.size()); + topBucketsPerOrd[ordIdx] = bucketsForOrd; + for (int b = (int) ordered.size() - 1; b >= 0; --b) { + topBucketsPerOrd[ordIdx][b] = ordered.pop(); + otherDocCounts[ordIdx] -= topBucketsPerOrd[ordIdx][b].getDocCount(); + } } } @@ -228,10 +229,10 @@ private InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws abstract void updateBucket(B spare, BucketOrdsEnum ordsEnum, long docCount) throws IOException; /** - * Build a {@link PriorityQueue} to sort the buckets. After we've + * Build a {@link ObjectArrayPriorityQueue} to sort the buckets. After we've * collected all of the buckets we'll collect all entries in the queue. */ - abstract PriorityQueue buildPriorityQueue(int size); + abstract ObjectArrayPriorityQueue buildPriorityQueue(int size); /** * Build the sub-aggregations into the buckets. This will usually @@ -271,8 +272,8 @@ final LeafBucketCollector wrapCollector(LeafBucketCollector primary) { } @Override - final PriorityQueue buildPriorityQueue(int size) { - return new BucketPriorityQueue<>(size, partiallyBuiltBucketComparator); + final ObjectArrayPriorityQueue buildPriorityQueue(int size) { + return new BucketPriorityQueue<>(size, bigArrays(), partiallyBuiltBucketComparator); } @Override @@ -557,8 +558,8 @@ void updateBucket(SignificantLongTerms.Bucket spare, BucketOrdsEnum ordsEnum, lo } @Override - PriorityQueue buildPriorityQueue(int size) { - return new BucketSignificancePriorityQueue<>(size); + ObjectArrayPriorityQueue buildPriorityQueue(int size) { + return new BucketSignificancePriorityQueue<>(size, bigArrays()); } @Override diff --git a/server/src/test/java/org/elasticsearch/common/util/ObjectArrayPriorityQueueTests.java b/server/src/test/java/org/elasticsearch/common/util/ObjectArrayPriorityQueueTests.java new file mode 100644 index 0000000000000..d2e4f49f9cb54 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/common/util/ObjectArrayPriorityQueueTests.java @@ -0,0 +1,341 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +package org.elasticsearch.common.util; + +import org.apache.lucene.tests.util.TestUtil; +import org.apache.lucene.util.CollectionUtil; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.test.ESTestCase; +import org.hamcrest.MatcherAssert; +import org.hamcrest.Matchers; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.NoSuchElementException; +import java.util.Random; + +public class ObjectArrayPriorityQueueTests extends ESTestCase { + + private static BigArrays randombigArrays() { + return new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); + } + + private static class IntegerQueue extends ObjectArrayPriorityQueue { + IntegerQueue(int count, BigArrays bigArrays) { + super(count, bigArrays); + } + + @Override + protected boolean lessThan(Integer a, Integer b) { + return (a < b); + } + + protected final void checkValidity() { + for (int i = 1; i <= size(); i++) { + int parent = i >>> 1; + if (parent > 1) { + if (lessThan(heap.get(parent), heap.get(i)) == false) { + assertThat(heap.get(i), Matchers.equalTo(heap.get(parent))); + } + } + } + } + } + + public void testZeroSizedQueue() { + try (ObjectArrayPriorityQueue pq = new IntegerQueue(0, randombigArrays())) { + assertEquals((Object) 1, pq.insertWithOverflow(1)); + assertEquals(0, pq.size()); + + // should fail, but passes and modifies the top... + pq.add(1); + assertEquals((Object) 1, pq.top()); + } + } + + public void testNoExtraWorkOnEqualElements() { + class Value { + private final int index; + private final int value; + + Value(int index, int value) { + this.index = index; + this.value = value; + } + } + + try (ObjectArrayPriorityQueue pq = new ObjectArrayPriorityQueue<>(5, randombigArrays()) { + @Override + protected boolean lessThan(Value a, Value b) { + return a.value < b.value; + } + }) { + + // Make all elements equal but record insertion order. + for (int i = 0; i < 100; i++) { + pq.insertWithOverflow(new Value(i, 0)); + } + + ArrayList indexes = new ArrayList<>(); + for (Value e : pq) { + indexes.add(e.index); + } + + // All elements are "equal" so we should have exactly the indexes of those elements that were + // added first. + MatcherAssert.assertThat(indexes, Matchers.containsInAnyOrder(0, 1, 2, 3, 4)); + } + } + + public void testPQ() { + testPQ(atLeast(10000), random()); + } + + public static void testPQ(int count, Random gen) { + try (ObjectArrayPriorityQueue pq = new IntegerQueue(count, randombigArrays())) { + int sum = 0, sum2 = 0; + + for (int i = 0; i < count; i++) { + int next = gen.nextInt(); + sum += next; + pq.add(next); + } + + int last = Integer.MIN_VALUE; + for (int i = 0; i < count; i++) { + Integer next = pq.pop(); + assertTrue(next.intValue() >= last); + last = next.intValue(); + sum2 += last; + } + + assertEquals(sum, sum2); + } + } + + public void testFixedSize() { + try (ObjectArrayPriorityQueue pq = new IntegerQueue(3, randombigArrays())) { + pq.insertWithOverflow(2); + pq.insertWithOverflow(3); + pq.insertWithOverflow(1); + pq.insertWithOverflow(5); + pq.insertWithOverflow(7); + pq.insertWithOverflow(1); + assertEquals(3, pq.size()); + assertEquals((Integer) 3, pq.top()); + } + } + + public void testInsertWithOverflow() { + int size = 4; + try (ObjectArrayPriorityQueue pq = new IntegerQueue(size, randombigArrays())) { + Integer i1 = 2; + Integer i2 = 3; + Integer i3 = 1; + Integer i4 = 5; + Integer i5 = 7; + Integer i6 = 1; + + assertNull(pq.insertWithOverflow(i1)); + assertNull(pq.insertWithOverflow(i2)); + assertNull(pq.insertWithOverflow(i3)); + assertNull(pq.insertWithOverflow(i4)); + assertSame(pq.insertWithOverflow(i5), i3); // i3 should have been dropped + assertSame(pq.insertWithOverflow(i6), i6); // i6 should not have been inserted + assertEquals(size, pq.size()); + assertEquals((Integer) 2, pq.top()); + } + } + + public void testAddAllToEmptyQueue() { + Random random = random(); + int size = 10; + List list = new ArrayList<>(); + for (int i = 0; i < size; i++) { + list.add(random.nextInt()); + } + try (IntegerQueue pq = new IntegerQueue(size, randombigArrays())) { + pq.addAll(list); + pq.checkValidity(); + assertOrderedWhenDrained(pq, list); + } + } + + public void testAddAllToPartiallyFilledQueue() { + try (IntegerQueue pq = new IntegerQueue(20, randombigArrays())) { + List oneByOne = new ArrayList<>(); + List bulkAdded = new ArrayList<>(); + Random random = random(); + for (int i = 0; i < 10; i++) { + bulkAdded.add(random.nextInt()); + + int x = random.nextInt(); + pq.add(x); + oneByOne.add(x); + } + + pq.addAll(bulkAdded); + pq.checkValidity(); + + oneByOne.addAll(bulkAdded); // Gather all "reference" data. + assertOrderedWhenDrained(pq, oneByOne); + } + } + + public void testAddAllDoesNotFitIntoQueue() { + try (IntegerQueue pq = new IntegerQueue(20, randombigArrays())) { + List list = new ArrayList<>(); + Random random = random(); + for (int i = 0; i < 11; i++) { + list.add(random.nextInt()); + pq.add(random.nextInt()); + } + + assertThrows( + "Cannot add 11 elements to a queue with remaining capacity: 9", + ArrayIndexOutOfBoundsException.class, + () -> pq.addAll(list) + ); + } + } + + public void testRemovalsAndInsertions() { + Random random = random(); + int numDocsInPQ = TestUtil.nextInt(random, 1, 100); + try (IntegerQueue pq = new IntegerQueue(numDocsInPQ, randombigArrays())) { + Integer lastLeast = null; + + // Basic insertion of new content + ArrayList sds = new ArrayList(numDocsInPQ); + for (int i = 0; i < numDocsInPQ * 10; i++) { + Integer newEntry = Math.abs(random.nextInt()); + sds.add(newEntry); + Integer evicted = pq.insertWithOverflow(newEntry); + pq.checkValidity(); + if (evicted != null) { + assertTrue(sds.remove(evicted)); + if (evicted != newEntry) { + assertSame(evicted, lastLeast); + } + } + Integer newLeast = pq.top(); + if ((lastLeast != null) && (newLeast != newEntry) && (newLeast != lastLeast)) { + // If there has been a change of least entry and it wasn't our new + // addition we expect the scores to increase + assertTrue(newLeast <= newEntry); + assertTrue(newLeast >= lastLeast); + } + lastLeast = newLeast; + } + + // Try many random additions to existing entries - we should always see + // increasing scores in the lowest entry in the PQ + for (int p = 0; p < 500000; p++) { + int element = (int) (random.nextFloat() * (sds.size() - 1)); + Integer objectToRemove = sds.get(element); + assertSame(sds.remove(element), objectToRemove); + assertTrue(pq.remove(objectToRemove)); + pq.checkValidity(); + Integer newEntry = Math.abs(random.nextInt()); + sds.add(newEntry); + assertNull(pq.insertWithOverflow(newEntry)); + pq.checkValidity(); + Integer newLeast = pq.top(); + if ((objectToRemove != lastLeast) && (lastLeast != null) && (newLeast != newEntry)) { + // If there has been a change of least entry and it wasn't our new + // addition or the loss of our randomly removed entry we expect the + // scores to increase + assertTrue(newLeast <= newEntry); + assertTrue(newLeast >= lastLeast); + } + lastLeast = newLeast; + } + } + } + + public void testIteratorEmpty() { + try (IntegerQueue queue = new IntegerQueue(3, randombigArrays())) { + Iterator it = queue.iterator(); + assertFalse(it.hasNext()); + expectThrows(NoSuchElementException.class, () -> { it.next(); }); + } + } + + public void testIteratorOne() { + try (IntegerQueue queue = new IntegerQueue(3, randombigArrays())) { + queue.add(1); + Iterator it = queue.iterator(); + assertTrue(it.hasNext()); + assertEquals(Integer.valueOf(1), it.next()); + assertFalse(it.hasNext()); + expectThrows(NoSuchElementException.class, () -> { it.next(); }); + } + } + + public void testIteratorTwo() { + try (IntegerQueue queue = new IntegerQueue(3, randombigArrays())) { + queue.add(1); + queue.add(2); + Iterator it = queue.iterator(); + assertTrue(it.hasNext()); + assertEquals(Integer.valueOf(1), it.next()); + assertTrue(it.hasNext()); + assertEquals(Integer.valueOf(2), it.next()); + assertFalse(it.hasNext()); + expectThrows(NoSuchElementException.class, () -> { it.next(); }); + } + } + + public void testIteratorRandom() { + final int maxSize = TestUtil.nextInt(random(), 1, 20); + try (IntegerQueue queue = new IntegerQueue(maxSize, randombigArrays())) { + final int iters = atLeast(100); + final List expected = new ArrayList<>(); + for (int iter = 0; iter < iters; ++iter) { + if (queue.size() == 0 || (queue.size() < maxSize && random().nextBoolean())) { + final Integer value = random().nextInt(10); + queue.add(value); + expected.add(value); + } else { + expected.remove(queue.pop()); + } + List actual = new ArrayList<>(); + for (Integer value : queue) { + actual.add(value); + } + CollectionUtil.introSort(expected); + CollectionUtil.introSort(actual); + assertEquals(expected, actual); + } + } + } + + public void testMaxIntSize() { + expectThrows(IllegalArgumentException.class, () -> { + new ObjectArrayPriorityQueue(Long.MAX_VALUE, randombigArrays()) { + @Override + public boolean lessThan(Boolean a, Boolean b) { + // uncalled + return true; + } + }; + }); + } + + private void assertOrderedWhenDrained(IntegerQueue pq, List referenceDataList) { + Collections.sort(referenceDataList); + int i = 0; + while (pq.size() > 0) { + assertEquals(pq.pop(), referenceDataList.get(i)); + i++; + } + } +} diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesCollectorQueueTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesCollectorQueueTests.java index c78caced15a42..ade47e248a73c 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesCollectorQueueTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesCollectorQueueTests.java @@ -357,7 +357,7 @@ public void collect(int doc, long bucket) throws IOException { } } assertEquals(size, Math.min(queue.size(), expected.length - pos)); - int ptr = pos + (queue.size() - 1); + int ptr = pos + ((int) queue.size() - 1); pos += queue.size(); last = null; while (queue.size() > pos) { diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregator.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregator.java index 2a1d9f8c44c53..e0c927c762514 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregator.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregator.java @@ -12,12 +12,12 @@ import org.apache.lucene.search.ScoreMode; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; -import org.apache.lucene.util.PriorityQueue; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.util.ObjectArrayPriorityQueue; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.Releasables; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; @@ -237,33 +237,40 @@ public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws I long bucketsInOrd = bucketOrds.bucketsInOrd(owningBucketOrds[ordIdx]); int size = (int) Math.min(bucketsInOrd, bucketCountThresholds.getShardSize()); - PriorityQueue ordered = new BucketPriorityQueue<>(size, partiallyBuiltBucketComparator); - InternalMultiTerms.Bucket spare = null; - BytesRef spareKey = null; - BytesKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrds[ordIdx]); - while (ordsEnum.next()) { - long docCount = bucketDocCount(ordsEnum.ord()); - otherDocCounts[ordIdx] += docCount; - if (docCount < bucketCountThresholds.getShardMinDocCount()) { - continue; - } - if (spare == null) { - spare = new InternalMultiTerms.Bucket(null, 0, null, showTermDocCountError, 0, formats, keyConverters); - spareKey = new BytesRef(); + try ( + ObjectArrayPriorityQueue ordered = new BucketPriorityQueue<>( + size, + bigArrays(), + partiallyBuiltBucketComparator + ) + ) { + InternalMultiTerms.Bucket spare = null; + BytesRef spareKey = null; + BytesKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrds[ordIdx]); + while (ordsEnum.next()) { + long docCount = bucketDocCount(ordsEnum.ord()); + otherDocCounts[ordIdx] += docCount; + if (docCount < bucketCountThresholds.getShardMinDocCount()) { + continue; + } + if (spare == null) { + spare = new InternalMultiTerms.Bucket(null, 0, null, showTermDocCountError, 0, formats, keyConverters); + spareKey = new BytesRef(); + } + ordsEnum.readValue(spareKey); + spare.terms = unpackTerms(spareKey); + spare.docCount = docCount; + spare.bucketOrd = ordsEnum.ord(); + spare = ordered.insertWithOverflow(spare); } - ordsEnum.readValue(spareKey); - spare.terms = unpackTerms(spareKey); - spare.docCount = docCount; - spare.bucketOrd = ordsEnum.ord(); - spare = ordered.insertWithOverflow(spare); - } - // Get the top buckets - InternalMultiTerms.Bucket[] bucketsForOrd = new InternalMultiTerms.Bucket[ordered.size()]; - topBucketsPerOrd[ordIdx] = bucketsForOrd; - for (int b = ordered.size() - 1; b >= 0; --b) { - topBucketsPerOrd[ordIdx][b] = ordered.pop(); - otherDocCounts[ordIdx] -= topBucketsPerOrd[ordIdx][b].getDocCount(); + // Get the top buckets + InternalMultiTerms.Bucket[] bucketsForOrd = new InternalMultiTerms.Bucket[(int) ordered.size()]; + topBucketsPerOrd[ordIdx] = bucketsForOrd; + for (int b = (int) ordered.size() - 1; b >= 0; --b) { + topBucketsPerOrd[ordIdx][b] = ordered.pop(); + otherDocCounts[ordIdx] -= topBucketsPerOrd[ordIdx][b].getDocCount(); + } } } From 10ac06307e58dbb23d9569cd6cc9bec0fcdf67c7 Mon Sep 17 00:00:00 2001 From: Mary Gouseti Date: Mon, 18 Mar 2024 12:33:22 +0200 Subject: [PATCH 233/248] [DSL Global Retention] Calculate and use global retention in DSL (#106268) --- .../lifecycle/DataStreamLifecycleService.java | 8 +- .../cluster/metadata/DataStream.java | 22 +++-- .../cluster/metadata/DataStreamLifecycle.java | 74 +++++++++++++++- .../metadata/DataStreamLifecycleTests.java | 75 +++++++++++++++- .../cluster/metadata/DataStreamTests.java | 86 ++++++++++++------- .../metadata/DataStreamTestHelper.java | 13 +++ ...taStreamLifecycleUsageTransportAction.java | 4 +- 7 files changed, 235 insertions(+), 47 deletions(-) diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java index 1b875c28f7f43..d1dd008e27977 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java @@ -42,6 +42,7 @@ import org.elasticsearch.cluster.SimpleBatchedExecutor; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -796,7 +797,7 @@ private void maybeExecuteRollover(ClusterState state, DataStream dataStream) { RolloverRequest rolloverRequest = getDefaultRolloverRequest( rolloverConfiguration, dataStream.getName(), - dataStream.getLifecycle().getEffectiveDataRetention() + dataStream.getLifecycle().getEffectiveDataRetention(DataStreamGlobalRetention.getFromClusterState(state)) ); transportActionsDeduplicator.executeOnce( rolloverRequest, @@ -823,14 +824,15 @@ private void maybeExecuteRollover(ClusterState state, DataStream dataStream) { */ private Set maybeExecuteRetention(ClusterState state, DataStream dataStream, Set indicesToExcludeForRemainingRun) { Metadata metadata = state.metadata(); - List backingIndicesOlderThanRetention = dataStream.getIndicesPastRetention(metadata::index, nowSupplier); + DataStreamGlobalRetention globalRetention = DataStreamGlobalRetention.getFromClusterState(state); + List backingIndicesOlderThanRetention = dataStream.getIndicesPastRetention(metadata::index, nowSupplier, globalRetention); if (backingIndicesOlderThanRetention.isEmpty()) { return Set.of(); } Set indicesToBeRemoved = new HashSet<>(); // We know that there is lifecycle and retention because there are indices to be deleted assert dataStream.getLifecycle() != null; - TimeValue effectiveDataRetention = dataStream.getLifecycle().getEffectiveDataRetention(); + TimeValue effectiveDataRetention = dataStream.getLifecycle().getEffectiveDataRetention(globalRetention); for (Index index : backingIndicesOlderThanRetention) { if (indicesToExcludeForRemainingRun.contains(index) == false) { IndexMetadata backingIndex = metadata.index(index); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java index a5f424f875eb7..776fb9fd87740 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java @@ -759,13 +759,17 @@ public DataStream snapshot(Collection indicesInSnapshot) { * NOTE that this specifically does not return the write index of the data stream as usually retention * is treated differently for the write index (i.e. they first need to be rolled over) */ - public List getIndicesPastRetention(Function indexMetadataSupplier, LongSupplier nowSupplier) { - if (lifecycle == null || lifecycle.isEnabled() == false || lifecycle.getEffectiveDataRetention() == null) { + public List getIndicesPastRetention( + Function indexMetadataSupplier, + LongSupplier nowSupplier, + DataStreamGlobalRetention globalRetention + ) { + if (lifecycle == null || lifecycle.isEnabled() == false || lifecycle.getEffectiveDataRetention(globalRetention) == null) { return List.of(); } List indicesPastRetention = getNonWriteIndicesOlderThan( - lifecycle.getEffectiveDataRetention(), + lifecycle.getEffectiveDataRetention(globalRetention), indexMetadataSupplier, this::isIndexManagedByDataStreamLifecycle, nowSupplier @@ -1098,14 +1102,18 @@ public static DataStream fromXContent(XContentParser parser) throws IOException @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - return toXContent(builder, params, null); + return toXContent(builder, params, null, null); } /** * Converts the data stream to XContent and passes the RolloverConditions, when provided, to the lifecycle. */ - public XContentBuilder toXContent(XContentBuilder builder, Params params, @Nullable RolloverConfiguration rolloverConfiguration) - throws IOException { + public XContentBuilder toXContent( + XContentBuilder builder, + Params params, + @Nullable RolloverConfiguration rolloverConfiguration, + @Nullable DataStreamGlobalRetention globalRetention + ) throws IOException { builder.startObject(); builder.field(NAME_FIELD.getPreferredName(), name); builder.field(TIMESTAMP_FIELD_FIELD.getPreferredName()) @@ -1132,7 +1140,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params, @Nulla } if (lifecycle != null) { builder.field(LIFECYCLE.getPreferredName()); - lifecycle.toXContent(builder, params, rolloverConfiguration); + lifecycle.toXContent(builder, params, rolloverConfiguration, globalRetention); } builder.field(ROLLOVER_ON_WRITE_FIELD.getPreferredName(), rolloverOnWrite); if (autoShardingEvent != null) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java index b4a3a1eb3502a..a8b094bafde2e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.core.Tuple; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.xcontent.AbstractObjectParser; import org.elasticsearch.xcontent.ConstructingObjectParser; @@ -34,6 +35,7 @@ import java.io.IOException; import java.util.List; +import java.util.Locale; import java.util.Objects; import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; @@ -131,15 +133,52 @@ public boolean isEnabled() { /** * The least amount of time data should be kept by elasticsearch. * @return the time period or null, null represents that data should never be deleted. + * @deprecated use {@link #getEffectiveDataRetention(DataStreamGlobalRetention)} */ + @Deprecated @Nullable public TimeValue getEffectiveDataRetention() { - return getDataStreamRetention(); + return getEffectiveDataRetention(null); + } + + /** + * The least amount of time data should be kept by elasticsearch. + * @return the time period or null, null represents that data should never be deleted. + */ + @Nullable + public TimeValue getEffectiveDataRetention(@Nullable DataStreamGlobalRetention globalRetention) { + return getEffectiveDataRetentionWithSource(globalRetention).v1(); + } + + /** + * The least amount of time data should be kept by elasticsearch. + * @return the time period or null, null represents that data should never be deleted. + */ + @Nullable + public Tuple getEffectiveDataRetentionWithSource(@Nullable DataStreamGlobalRetention globalRetention) { + // If lifecycle is disabled there is no effective retention + if (enabled == false) { + return Tuple.tuple(null, RetentionSource.DATA_STREAM_CONFIGURATION); + } + var dataStreamRetention = getDataStreamRetention(); + if (globalRetention == null) { + return Tuple.tuple(dataStreamRetention, RetentionSource.DATA_STREAM_CONFIGURATION); + } + if (dataStreamRetention == null) { + return globalRetention.getDefaultRetention() != null + ? Tuple.tuple(globalRetention.getDefaultRetention(), RetentionSource.DEFAULT_GLOBAL_RETENTION) + : Tuple.tuple(globalRetention.getMaxRetention(), RetentionSource.MAX_GLOBAL_RETENTION); + } + if (globalRetention.getMaxRetention() != null && globalRetention.getMaxRetention().getMillis() < dataStreamRetention.getMillis()) { + return Tuple.tuple(globalRetention.getMaxRetention(), RetentionSource.MAX_GLOBAL_RETENTION); + } else { + return Tuple.tuple(dataStreamRetention, RetentionSource.DATA_STREAM_CONFIGURATION); + } } /** * The least amount of time data the data stream is requesting es to keep the data. - * NOTE: this can be overriden by the {@link DataStreamLifecycle#getEffectiveDataRetention()}. + * NOTE: this can be overridden by the {@link DataStreamLifecycle#getEffectiveDataRetention(DataStreamGlobalRetention)}. * @return the time period or null, null represents that data should never be deleted. */ @Nullable @@ -232,14 +271,28 @@ public String toString() { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - return toXContent(builder, params, null); + return toXContent(builder, params, null, null); } /** * Converts the data stream lifecycle to XContent and injects the RolloverConditions if they exist. + * @deprecated use {@link #toXContent(XContentBuilder, Params, RolloverConfiguration, DataStreamGlobalRetention)} */ + @Deprecated public XContentBuilder toXContent(XContentBuilder builder, Params params, @Nullable RolloverConfiguration rolloverConfiguration) throws IOException { + return toXContent(builder, params, rolloverConfiguration, null); + } + + /** + * Converts the data stream lifecycle to XContent and injects the RolloverConditions and the global retention if they exist. + */ + public XContentBuilder toXContent( + XContentBuilder builder, + Params params, + @Nullable RolloverConfiguration rolloverConfiguration, + @Nullable DataStreamGlobalRetention globalRetention + ) throws IOException { builder.startObject(); builder.field(ENABLED_FIELD.getPreferredName(), enabled); if (dataRetention != null) { @@ -255,7 +308,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params, @Nulla } if (rolloverConfiguration != null) { builder.field(ROLLOVER_FIELD.getPreferredName()); - rolloverConfiguration.evaluateAndConvertToXContent(builder, params, getEffectiveDataRetention()); + rolloverConfiguration.evaluateAndConvertToXContent(builder, params, getEffectiveDataRetention(globalRetention)); } builder.endObject(); return builder; @@ -466,4 +519,17 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } } + + /** + * This enum represents all configuration sources that can influence the retention of a data stream. + */ + public enum RetentionSource { + DATA_STREAM_CONFIGURATION, + DEFAULT_GLOBAL_RETENTION, + MAX_GLOBAL_RETENTION; + + public String displayName() { + return this.toString().toLowerCase(Locale.ROOT); + } + } } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamLifecycleTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamLifecycleTests.java index 441e8491b4b92..e3bf5260a7445 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamLifecycleTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamLifecycleTests.java @@ -19,6 +19,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.core.Tuple; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.test.AbstractXContentSerializingTestCase; import org.elasticsearch.test.ESTestCase; @@ -33,6 +34,9 @@ import java.util.Set; import java.util.stream.Stream; +import static org.elasticsearch.cluster.metadata.DataStreamLifecycle.RetentionSource.DATA_STREAM_CONFIGURATION; +import static org.elasticsearch.cluster.metadata.DataStreamLifecycle.RetentionSource.DEFAULT_GLOBAL_RETENTION; +import static org.elasticsearch.cluster.metadata.DataStreamLifecycle.RetentionSource.MAX_GLOBAL_RETENTION; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; @@ -107,10 +111,11 @@ public void testXContentSerializationWithRollover() throws IOException { try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) { builder.humanReadable(true); RolloverConfiguration rolloverConfiguration = RolloverConfigurationTests.randomRolloverConditions(); - lifecycle.toXContent(builder, ToXContent.EMPTY_PARAMS, rolloverConfiguration); + DataStreamGlobalRetention globalRetention = DataStreamGlobalRetentionSerializationTests.randomGlobalRetention(); + lifecycle.toXContent(builder, ToXContent.EMPTY_PARAMS, rolloverConfiguration, globalRetention); String serialized = Strings.toString(builder); assertThat(serialized, containsString("rollover")); - for (String label : rolloverConfiguration.resolveRolloverConditions(lifecycle.getEffectiveDataRetention()) + for (String label : rolloverConfiguration.resolveRolloverConditions(lifecycle.getEffectiveDataRetention(globalRetention)) .getConditions() .keySet()) { assertThat(serialized, containsString(label)); @@ -253,6 +258,72 @@ public void testInvalidDownsamplingConfiguration() { } } + public void testEffectiveRetention() { + // No retention in the data stream lifecycle + { + DataStreamLifecycle noRetentionLifecycle = DataStreamLifecycle.newBuilder().downsampling(randomDownsampling()).build(); + TimeValue maxRetention = TimeValue.timeValueDays(randomIntBetween(50, 100)); + TimeValue defaultRetention = TimeValue.timeValueDays(randomIntBetween(1, 50)); + Tuple effectiveDataRetentionWithSource = noRetentionLifecycle + .getEffectiveDataRetentionWithSource(null); + assertThat(effectiveDataRetentionWithSource.v1(), nullValue()); + assertThat(effectiveDataRetentionWithSource.v2(), equalTo(DATA_STREAM_CONFIGURATION)); + + effectiveDataRetentionWithSource = noRetentionLifecycle.getEffectiveDataRetentionWithSource( + new DataStreamGlobalRetention(null, maxRetention) + ); + assertThat(effectiveDataRetentionWithSource.v1(), equalTo(maxRetention)); + assertThat(effectiveDataRetentionWithSource.v2(), equalTo(MAX_GLOBAL_RETENTION)); + + effectiveDataRetentionWithSource = noRetentionLifecycle.getEffectiveDataRetentionWithSource( + new DataStreamGlobalRetention(defaultRetention, null) + ); + assertThat(effectiveDataRetentionWithSource.v1(), equalTo(defaultRetention)); + assertThat(effectiveDataRetentionWithSource.v2(), equalTo(DEFAULT_GLOBAL_RETENTION)); + + effectiveDataRetentionWithSource = noRetentionLifecycle.getEffectiveDataRetentionWithSource( + new DataStreamGlobalRetention(defaultRetention, maxRetention) + ); + assertThat(effectiveDataRetentionWithSource.v1(), equalTo(defaultRetention)); + assertThat(effectiveDataRetentionWithSource.v2(), equalTo(DEFAULT_GLOBAL_RETENTION)); + } + + // With retention in the data stream lifecycle + { + TimeValue dataStreamRetention = TimeValue.timeValueDays(randomIntBetween(5, 100)); + DataStreamLifecycle lifecycleRetention = DataStreamLifecycle.newBuilder() + .dataRetention(dataStreamRetention) + .downsampling(randomDownsampling()) + .build(); + TimeValue defaultRetention = TimeValue.timeValueDays(randomIntBetween(1, (int) dataStreamRetention.getDays() - 1)); + + Tuple effectiveDataRetentionWithSource = lifecycleRetention + .getEffectiveDataRetentionWithSource(null); + assertThat(effectiveDataRetentionWithSource.v1(), equalTo(dataStreamRetention)); + assertThat(effectiveDataRetentionWithSource.v2(), equalTo(DATA_STREAM_CONFIGURATION)); + + effectiveDataRetentionWithSource = lifecycleRetention.getEffectiveDataRetentionWithSource( + new DataStreamGlobalRetention(defaultRetention, null) + ); + assertThat(effectiveDataRetentionWithSource.v1(), equalTo(dataStreamRetention)); + assertThat(effectiveDataRetentionWithSource.v2(), equalTo(DATA_STREAM_CONFIGURATION)); + + TimeValue maxGlobalRetention = randomBoolean() ? dataStreamRetention : TimeValue.timeValueDays(dataStreamRetention.days() + 1); + effectiveDataRetentionWithSource = lifecycleRetention.getEffectiveDataRetentionWithSource( + new DataStreamGlobalRetention(defaultRetention, maxGlobalRetention) + ); + assertThat(effectiveDataRetentionWithSource.v1(), equalTo(dataStreamRetention)); + assertThat(effectiveDataRetentionWithSource.v2(), equalTo(DATA_STREAM_CONFIGURATION)); + + TimeValue maxRetentionLessThanDataStream = TimeValue.timeValueDays(dataStreamRetention.days() - 1); + effectiveDataRetentionWithSource = lifecycleRetention.getEffectiveDataRetentionWithSource( + new DataStreamGlobalRetention(randomBoolean() ? null : TimeValue.timeValueDays(10), maxRetentionLessThanDataStream) + ); + assertThat(effectiveDataRetentionWithSource.v1(), equalTo(maxRetentionLessThanDataStream)); + assertThat(effectiveDataRetentionWithSource.v2(), equalTo(MAX_GLOBAL_RETENTION)); + } + } + @Nullable public static DataStreamLifecycle randomLifecycle() { return DataStreamLifecycle.newBuilder() diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java index 8e1ce495fdf5c..3e758df17c432 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java @@ -50,6 +50,7 @@ import static org.elasticsearch.cluster.metadata.DataStream.getDefaultBackingIndexName; import static org.elasticsearch.cluster.metadata.DataStreamTestHelper.newInstance; +import static org.elasticsearch.cluster.metadata.DataStreamTestHelper.randomGlobalRetention; import static org.elasticsearch.cluster.metadata.DataStreamTestHelper.randomIndexInstances; import static org.elasticsearch.cluster.metadata.DataStreamTestHelper.randomNonEmptyIndexInstances; import static org.elasticsearch.index.IndexSettings.LIFECYCLE_ORIGINATION_DATE; @@ -1146,11 +1147,15 @@ public void testGetIndicesPastRetention() { ); Metadata metadata = builder.build(); - assertThat(dataStream.getIndicesPastRetention(metadata::index, () -> now).isEmpty(), is(true)); + assertThat(dataStream.getIndicesPastRetention(metadata::index, () -> now, randomGlobalRetention()).isEmpty(), is(true)); } { - // no retention configured so we expect an empty list + // no retention configured but we have default retention + DataStreamGlobalRetention globalRetention = new DataStreamGlobalRetention( + TimeValue.timeValueMillis(2500), + randomBoolean() ? TimeValue.timeValueMillis(randomIntBetween(2500, 5000)) : null + ); Metadata.Builder builder = Metadata.builder(); DataStream dataStream = createDataStream( builder, @@ -1161,7 +1166,29 @@ public void testGetIndicesPastRetention() { ); Metadata metadata = builder.build(); - assertThat(dataStream.getIndicesPastRetention(metadata::index, () -> now).isEmpty(), is(true)); + List backingIndices = dataStream.getIndicesPastRetention(metadata::index, () -> now, globalRetention); + assertThat(backingIndices.size(), is(2)); + assertThat(backingIndices.get(0).getName(), is(dataStream.getIndices().get(0).getName())); + assertThat(backingIndices.get(1).getName(), is(dataStream.getIndices().get(1).getName())); + } + + { + // no retention configured but we have max retention + DataStreamGlobalRetention globalRetention = new DataStreamGlobalRetention(null, TimeValue.timeValueMillis(2500)); + Metadata.Builder builder = Metadata.builder(); + DataStream dataStream = createDataStream( + builder, + dataStreamName, + creationAndRolloverTimes, + settings(IndexVersion.current()), + new DataStreamLifecycle() + ); + Metadata metadata = builder.build(); + + List backingIndices = dataStream.getIndicesPastRetention(metadata::index, () -> now, globalRetention); + assertThat(backingIndices.size(), is(2)); + assertThat(backingIndices.get(0).getName(), is(dataStream.getIndices().get(0).getName())); + assertThat(backingIndices.get(1).getName(), is(dataStream.getIndices().get(1).getName())); } { @@ -1175,10 +1202,10 @@ public void testGetIndicesPastRetention() { ); Metadata metadata = builder.build(); - List backingIndices = dataStream.getIndicesPastRetention(metadata::index, () -> now); + List backingIndices = dataStream.getIndicesPastRetention(metadata::index, () -> now, randomGlobalRetention()); assertThat(backingIndices.size(), is(2)); - assertThat(backingIndices.get(0).getName(), is(DataStream.getDefaultBackingIndexName(dataStreamName, 1))); - assertThat(backingIndices.get(1).getName(), is(DataStream.getDefaultBackingIndexName(dataStreamName, 2))); + assertThat(backingIndices.get(0).getName(), is(dataStream.getIndices().get(0).getName())); + assertThat(backingIndices.get(1).getName(), is(dataStream.getIndices().get(1).getName())); } { @@ -1193,13 +1220,13 @@ public void testGetIndicesPastRetention() { ); Metadata metadata = builder.build(); - List backingIndices = dataStream.getIndicesPastRetention(metadata::index, () -> now); + List backingIndices = dataStream.getIndicesPastRetention(metadata::index, () -> now, randomGlobalRetention()); assertThat(backingIndices.size(), is(4)); - assertThat(backingIndices.get(0).getName(), is(DataStream.getDefaultBackingIndexName(dataStreamName, 1))); - assertThat(backingIndices.get(1).getName(), is(DataStream.getDefaultBackingIndexName(dataStreamName, 2))); - assertThat(backingIndices.get(2).getName(), is(DataStream.getDefaultBackingIndexName(dataStreamName, 3))); - assertThat(backingIndices.get(3).getName(), is(DataStream.getDefaultBackingIndexName(dataStreamName, 4))); + assertThat(backingIndices.get(0).getName(), is(dataStream.getIndices().get(0).getName())); + assertThat(backingIndices.get(1).getName(), is(dataStream.getIndices().get(1).getName())); + assertThat(backingIndices.get(2).getName(), is(dataStream.getIndices().get(2).getName())); + assertThat(backingIndices.get(3).getName(), is(dataStream.getIndices().get(3).getName())); } { @@ -1214,7 +1241,7 @@ public void testGetIndicesPastRetention() { ); Metadata metadata = builder.build(); - List backingIndices = dataStream.getIndicesPastRetention(metadata::index, () -> now); + List backingIndices = dataStream.getIndicesPastRetention(metadata::index, () -> now, randomGlobalRetention()); assertThat(backingIndices.isEmpty(), is(true)); } @@ -1232,13 +1259,13 @@ public void testGetIndicesPastRetention() { ); Metadata metadata = builder.build(); - List backingIndices = dataStream.getIndicesPastRetention(metadata::index, () -> now); + List backingIndices = dataStream.getIndicesPastRetention(metadata::index, () -> now, randomGlobalRetention()); assertThat(backingIndices.isEmpty(), is(true)); } } public void testGetIndicesPastRetentionWithOriginationDate() { - // First, build an ordinary datastream: + // First, build an ordinary data stream: String dataStreamName = "metrics-foo"; long now = System.currentTimeMillis(); List creationAndRolloverTimes = List.of( @@ -1267,37 +1294,37 @@ public TimeValue getDataStreamRetention() { { // no retention configured so we expect an empty list testRetentionReference.set(null); - assertThat(dataStream.getIndicesPastRetention(metadata::index, () -> now).isEmpty(), is(true)); + assertThat(dataStream.getIndicesPastRetention(metadata::index, () -> now, null).isEmpty(), is(true)); } { // retention period where oldIndex is too old, but newIndex should be retained testRetentionReference.set(TimeValue.timeValueMillis(2500)); - List backingIndices = dataStream.getIndicesPastRetention(metadata::index, () -> now); + List backingIndices = dataStream.getIndicesPastRetention(metadata::index, () -> now, null); assertThat(backingIndices.size(), is(3)); - assertThat(backingIndices.get(0).getName(), is(DataStream.getDefaultBackingIndexName(dataStreamName, 1))); - assertThat(backingIndices.get(1).getName(), is(DataStream.getDefaultBackingIndexName(dataStreamName, 2))); - assertThat(backingIndices.get(2).getName(), is(DataStream.getDefaultBackingIndexName(dataStreamName, 6))); + assertThat(backingIndices.get(0).getName(), is(dataStream.getIndices().get(0).getName())); + assertThat(backingIndices.get(1).getName(), is(dataStream.getIndices().get(1).getName())); + assertThat(backingIndices.get(2).getName(), is(dataStream.getIndices().get(5).getName())); } { // even though all indices match the write index should not be returned testRetentionReference.set(TimeValue.timeValueMillis(0)); - List backingIndices = dataStream.getIndicesPastRetention(metadata::index, () -> now); + List backingIndices = dataStream.getIndicesPastRetention(metadata::index, () -> now, null); assertThat(backingIndices.size(), is(6)); - assertThat(backingIndices.get(0).getName(), is(DataStream.getDefaultBackingIndexName(dataStreamName, 1))); - assertThat(backingIndices.get(1).getName(), is(DataStream.getDefaultBackingIndexName(dataStreamName, 2))); - assertThat(backingIndices.get(2).getName(), is(DataStream.getDefaultBackingIndexName(dataStreamName, 3))); - assertThat(backingIndices.get(3).getName(), is(DataStream.getDefaultBackingIndexName(dataStreamName, 4))); - assertThat(backingIndices.get(4).getName(), is(DataStream.getDefaultBackingIndexName(dataStreamName, 5))); - assertThat(backingIndices.get(5).getName(), is(DataStream.getDefaultBackingIndexName(dataStreamName, 6))); + assertThat(backingIndices.get(0).getName(), is(dataStream.getIndices().get(0).getName())); + assertThat(backingIndices.get(1).getName(), is(dataStream.getIndices().get(1).getName())); + assertThat(backingIndices.get(2).getName(), is(dataStream.getIndices().get(2).getName())); + assertThat(backingIndices.get(3).getName(), is(dataStream.getIndices().get(3).getName())); + assertThat(backingIndices.get(4).getName(), is(dataStream.getIndices().get(4).getName())); + assertThat(backingIndices.get(5).getName(), is(dataStream.getIndices().get(5).getName())); } { // no index matches the retention age testRetentionReference.set(TimeValue.timeValueMillis(9000)); - List backingIndices = dataStream.getIndicesPastRetention(metadata::index, () -> now); + List backingIndices = dataStream.getIndicesPastRetention(metadata::index, () -> now, null); assertThat(backingIndices.isEmpty(), is(true)); } } @@ -1670,10 +1697,11 @@ public void testXContentSerializationWithRollover() throws IOException { try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) { builder.humanReadable(true); RolloverConfiguration rolloverConfiguration = RolloverConfigurationTests.randomRolloverConditions(); - dataStream.toXContent(builder, ToXContent.EMPTY_PARAMS, rolloverConfiguration); + DataStreamGlobalRetention globalRetention = DataStreamGlobalRetentionSerializationTests.randomGlobalRetention(); + dataStream.toXContent(builder, ToXContent.EMPTY_PARAMS, rolloverConfiguration, globalRetention); String serialized = Strings.toString(builder); assertThat(serialized, containsString("rollover")); - for (String label : rolloverConfiguration.resolveRolloverConditions(lifecycle.getEffectiveDataRetention()) + for (String label : rolloverConfiguration.resolveRolloverConditions(lifecycle.getEffectiveDataRetention(globalRetention)) .getConditions() .keySet()) { assertThat(serialized, containsString(label)); diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java b/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java index 5d6ba6c3a6d1d..4cc019a300e8b 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java @@ -20,6 +20,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.CheckedFunction; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; import org.elasticsearch.env.Environment; import org.elasticsearch.index.Index; @@ -372,6 +373,18 @@ public static DataStreamAlias randomAliasInstance() { ); } + @Nullable + public static DataStreamGlobalRetention randomGlobalRetention() { + if (randomBoolean()) { + return null; + } + boolean withDefault = randomBoolean(); + return new DataStreamGlobalRetention( + withDefault ? TimeValue.timeValueDays(randomIntBetween(1, 30)) : null, + withDefault == false || randomBoolean() ? TimeValue.timeValueDays(randomIntBetween(31, 100)) : null + ); + } + /** * Constructs {@code ClusterState} with the specified data streams and indices. * diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/DataStreamLifecycleUsageTransportAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/DataStreamLifecycleUsageTransportAction.java index fb49ba6c7e7a7..947adf9f8462f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/DataStreamLifecycleUsageTransportAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/DataStreamLifecycleUsageTransportAction.java @@ -85,8 +85,8 @@ public static Tuple calculateStats(Collection Date: Mon, 18 Mar 2024 11:43:30 +0100 Subject: [PATCH 234/248] Fix S3HttpHandler bytes range requests support (#106180) This change fixes the S3HttpHandler so that it supports range byte requests with ending offsets that can be larger than the blob length. This is something that is supported today by S3 and the current version of the AWS SDK we use can also set an ending offset to a very large value (Long.MAX_VALUE). This change also adds support for trappy situation where the starting offset is also larger than the bob length. --- .../s3/S3RetryingInputStream.java | 2 +- .../main/java/fixture/s3/S3HttpHandler.java | 64 +++++--- .../java/fixture/s3/S3HttpHandlerTests.java | 140 ++++++++++++++++-- 3 files changed, 171 insertions(+), 35 deletions(-) diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RetryingInputStream.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RetryingInputStream.java index f7a99a399f59f..998455a658406 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RetryingInputStream.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RetryingInputStream.java @@ -124,7 +124,7 @@ private long getStreamLength(final S3Object object) { assert range[1] >= range[0] : range[1] + " vs " + range[0]; assert range[0] == start + currentOffset : "Content-Range start value [" + range[0] + "] exceeds start [" + start + "] + current offset [" + currentOffset + ']'; - assert range[1] == end : "Content-Range end value [" + range[1] + "] exceeds end [" + end + ']'; + assert range[1] <= end : "Content-Range end value [" + range[1] + "] exceeds end [" + end + ']'; return range[1] - range[0] + 1L; } return metadata.getContentLength(); diff --git a/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpHandler.java b/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpHandler.java index 21fa0c59352da..7f363fe0b87c3 100644 --- a/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpHandler.java +++ b/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpHandler.java @@ -245,31 +245,49 @@ public void handle(final HttpExchange exchange) throws IOException { } else if (Regex.simpleMatch("GET /" + path + "/*", request)) { final BytesReference blob = blobs.get(requestComponents.uri()); - if (blob != null) { - final String range = exchange.getRequestHeaders().getFirst("Range"); - if (range == null) { - exchange.getResponseHeaders().add("Content-Type", "application/octet-stream"); - exchange.sendResponseHeaders(RestStatus.OK.getStatus(), blob.length()); - blob.writeTo(exchange.getResponseBody()); - } else { - final Matcher matcher = Pattern.compile("^bytes=([0-9]+)-([0-9]+)$").matcher(range); - if (matcher.matches() == false) { - throw new AssertionError("Bytes range does not match expected pattern: " + range); - } - - final int start = Integer.parseInt(matcher.group(1)); - final int end = Integer.parseInt(matcher.group(2)); - - final BytesReference rangeBlob = blob.slice(start, end + 1 - start); - exchange.getResponseHeaders().add("Content-Type", "application/octet-stream"); - exchange.getResponseHeaders() - .add("Content-Range", String.format(Locale.ROOT, "bytes %d-%d/%d", start, end, rangeBlob.length())); - exchange.sendResponseHeaders(RestStatus.OK.getStatus(), rangeBlob.length()); - rangeBlob.writeTo(exchange.getResponseBody()); - } - } else { + if (blob == null) { exchange.sendResponseHeaders(RestStatus.NOT_FOUND.getStatus(), -1); + return; + } + final String range = exchange.getRequestHeaders().getFirst("Range"); + if (range == null) { + exchange.getResponseHeaders().add("Content-Type", "application/octet-stream"); + exchange.sendResponseHeaders(RestStatus.OK.getStatus(), blob.length()); + blob.writeTo(exchange.getResponseBody()); + return; + } + + // S3 supports https://www.rfc-editor.org/rfc/rfc9110.html#name-range. The AWS SDK v1.x seems to always generate range + // requests with a header value like "Range: bytes=start-end" where both {@code start} and {@code end} are always defined + // (sometimes to very high value for {@code end}). It would be too tedious to fully support the RFC so S3HttpHandler only + // supports when both {@code start} and {@code end} are defined to match the SDK behavior. + final Matcher matcher = Pattern.compile("^bytes=([0-9]+)-([0-9]+)$").matcher(range); + if (matcher.matches() == false) { + throw new AssertionError("Bytes range does not match expected pattern: " + range); + } + var groupStart = matcher.group(1); + var groupEnd = matcher.group(2); + if (groupStart == null || groupEnd == null) { + throw new AssertionError("Bytes range does not match expected pattern: " + range); + } + long start = Long.parseLong(groupStart); + long end = Long.parseLong(groupEnd); + if (end < start) { + exchange.getResponseHeaders().add("Content-Type", "application/octet-stream"); + exchange.sendResponseHeaders(RestStatus.OK.getStatus(), blob.length()); + blob.writeTo(exchange.getResponseBody()); + return; + } else if (blob.length() <= start) { + exchange.getResponseHeaders().add("Content-Type", "application/octet-stream"); + exchange.sendResponseHeaders(RestStatus.REQUESTED_RANGE_NOT_SATISFIED.getStatus(), -1); + return; } + var responseBlob = blob.slice(Math.toIntExact(start), Math.toIntExact(Math.min(end - start + 1, blob.length() - start))); + end = start + responseBlob.length() - 1; + exchange.getResponseHeaders().add("Content-Type", "application/octet-stream"); + exchange.getResponseHeaders().add("Content-Range", String.format(Locale.ROOT, "bytes %d-%d/%d", start, end, blob.length())); + exchange.sendResponseHeaders(RestStatus.PARTIAL_CONTENT.getStatus(), responseBlob.length()); + responseBlob.writeTo(exchange.getResponseBody()); } else if (Regex.simpleMatch("DELETE /" + path + "/*", request)) { int deletions = 0; diff --git a/test/fixtures/s3-fixture/src/test/java/fixture/s3/S3HttpHandlerTests.java b/test/fixtures/s3-fixture/src/test/java/fixture/s3/S3HttpHandlerTests.java index 5227c333e5659..0188200186103 100644 --- a/test/fixtures/s3-fixture/src/test/java/fixture/s3/S3HttpHandlerTests.java +++ b/test/fixtures/s3-fixture/src/test/java/fixture/s3/S3HttpHandlerTests.java @@ -83,6 +83,63 @@ public void testSimpleObjectOperations() { ); } + public void testGetWithBytesRange() { + final var handler = new S3HttpHandler("bucket", "path"); + final var blobName = "blob_name_" + randomIdentifier(); + final var blobPath = "/bucket/path/" + blobName; + final var blobBytes = randomBytesReference(256); + assertEquals(RestStatus.OK, handleRequest(handler, "PUT", blobPath, blobBytes).status()); + + assertEquals( + "No Range", + new TestHttpResponse(RestStatus.OK, blobBytes, TestHttpExchange.EMPTY_HEADERS), + handleRequest(handler, "GET", blobPath) + ); + + var end = blobBytes.length() - 1; + assertEquals( + "Exact Range: bytes=0-" + end, + new TestHttpResponse(RestStatus.PARTIAL_CONTENT, blobBytes, contentRangeHeader(0, end, blobBytes.length())), + handleRequest(handler, "GET", blobPath, BytesArray.EMPTY, bytesRangeHeader(0, end)) + ); + + end = randomIntBetween(blobBytes.length() - 1, Integer.MAX_VALUE); + assertEquals( + "Larger Range: bytes=0-" + end, + new TestHttpResponse(RestStatus.PARTIAL_CONTENT, blobBytes, contentRangeHeader(0, blobBytes.length() - 1, blobBytes.length())), + handleRequest(handler, "GET", blobPath, BytesArray.EMPTY, bytesRangeHeader(0, end)) + ); + + var start = randomIntBetween(blobBytes.length(), Integer.MAX_VALUE - 1); + end = randomIntBetween(start, Integer.MAX_VALUE); + assertEquals( + "Invalid Range: bytes=" + start + '-' + end, + new TestHttpResponse(RestStatus.REQUESTED_RANGE_NOT_SATISFIED, BytesArray.EMPTY, TestHttpExchange.EMPTY_HEADERS), + handleRequest(handler, "GET", blobPath, BytesArray.EMPTY, bytesRangeHeader(start, end)) + ); + + start = randomIntBetween(2, Integer.MAX_VALUE - 1); + end = randomIntBetween(0, start - 1); + assertEquals( + "Weird Valid Range: bytes=" + start + '-' + end, + new TestHttpResponse(RestStatus.OK, blobBytes, TestHttpExchange.EMPTY_HEADERS), + handleRequest(handler, "GET", blobPath, BytesArray.EMPTY, bytesRangeHeader(start, end)) + ); + + start = randomIntBetween(0, blobBytes.length() - 1); + var length = randomIntBetween(1, blobBytes.length() - start); + end = start + length - 1; + assertEquals( + "Range: bytes=" + start + '-' + end, + new TestHttpResponse( + RestStatus.PARTIAL_CONTENT, + blobBytes.slice(start, length), + contentRangeHeader(start, end, blobBytes.length()) + ), + handleRequest(handler, "GET", blobPath, BytesArray.EMPTY, bytesRangeHeader(start, end)) + ); + } + public void testSingleMultipartUpload() { final var handler = new S3HttpHandler("bucket", "path"); @@ -99,12 +156,12 @@ public void testSingleMultipartUpload() { final var part1 = randomAlphaOfLength(50); final var uploadPart1Response = handleRequest(handler, "PUT", "/bucket/path/blob?uploadId=" + uploadId + "&partNumber=1", part1); final var part1Etag = Objects.requireNonNull(uploadPart1Response.etag()); - assertEquals(new TestHttpResponse(RestStatus.OK, "", part1Etag), uploadPart1Response); + assertEquals(new TestHttpResponse(RestStatus.OK, etagHeader(part1Etag)), uploadPart1Response); final var part2 = randomAlphaOfLength(50); final var uploadPart2Response = handleRequest(handler, "PUT", "/bucket/path/blob?uploadId=" + uploadId + "&partNumber=2", part2); final var part2Etag = Objects.requireNonNull(uploadPart2Response.etag()); - assertEquals(new TestHttpResponse(RestStatus.OK, "", part2Etag), uploadPart2Response); + assertEquals(new TestHttpResponse(RestStatus.OK, etagHeader(part2Etag)), uploadPart2Response); assertEquals( new TestHttpResponse(RestStatus.OK, Strings.format(""" @@ -176,12 +233,12 @@ public void testListAndAbortMultipartUpload() { final var part1 = randomAlphaOfLength(50); final var uploadPart1Response = handleRequest(handler, "PUT", "/bucket/path/blob?uploadId=" + uploadId + "&partNumber=1", part1); final var part1Etag = Objects.requireNonNull(uploadPart1Response.etag()); - assertEquals(new TestHttpResponse(RestStatus.OK, "", part1Etag), uploadPart1Response); + assertEquals(new TestHttpResponse(RestStatus.OK, etagHeader(part1Etag)), uploadPart1Response); final var part2 = randomAlphaOfLength(50); final var uploadPart2Response = handleRequest(handler, "PUT", "/bucket/path/blob?uploadId=" + uploadId + "&partNumber=2", part2); final var part2Etag = Objects.requireNonNull(uploadPart2Response.etag()); - assertEquals(new TestHttpResponse(RestStatus.OK, "", part2Etag), uploadPart2Response); + assertEquals(new TestHttpResponse(RestStatus.OK, etagHeader(part2Etag)), uploadPart2Response); assertEquals( new TestHttpResponse(RestStatus.OK, Strings.format(""" @@ -217,6 +274,10 @@ public void testListAndAbortMultipartUpload() { """, part1Etag, part2Etag)).status()); } + private static String getUploadId(BytesReference createUploadResponseBody) { + return getUploadId(createUploadResponseBody.utf8ToString()); + } + private static String getUploadId(String createUploadResponseBody) { final var startTag = ""; final var startTagPosition = createUploadResponseBody.indexOf(startTag); @@ -279,9 +340,17 @@ private void runExtractPartETagsTest(String body, String... expectedTags) { assertEquals(List.of(expectedTags), S3HttpHandler.extractPartEtags(new BytesArray(body.getBytes(StandardCharsets.UTF_8)))); } - private record TestHttpResponse(RestStatus status, String body, @Nullable String etag) { + private record TestHttpResponse(RestStatus status, BytesReference body, Headers headers) { TestHttpResponse(RestStatus status, String body) { - this(status, body, null); + this(status, new BytesArray(body.getBytes(StandardCharsets.UTF_8)), TestHttpExchange.EMPTY_HEADERS); + } + + TestHttpResponse(RestStatus status, Headers headers) { + this(status, BytesArray.EMPTY, headers); + } + + String etag() { + return headers.getFirst("ETag"); } } @@ -290,20 +359,67 @@ private static TestHttpResponse handleRequest(S3HttpHandler handler, String meth } private static TestHttpResponse handleRequest(S3HttpHandler handler, String method, String uri, String requestBody) { - final var httpExchange = new TestHttpExchange(method, uri, new BytesArray(requestBody.getBytes(StandardCharsets.UTF_8))); + return handleRequest(handler, method, uri, new BytesArray(requestBody.getBytes(StandardCharsets.UTF_8))); + } + + private static TestHttpResponse handleRequest(S3HttpHandler handler, String method, String uri, BytesReference requestBody) { + return handleRequest(handler, method, uri, requestBody, TestHttpExchange.EMPTY_HEADERS); + } + + private static TestHttpResponse handleRequest( + S3HttpHandler handler, + String method, + String uri, + BytesReference requestBody, + Headers requestHeaders + ) { + final var httpExchange = new TestHttpExchange(method, uri, requestBody, requestHeaders); try { handler.handle(httpExchange); } catch (IOException e) { fail(e); } assertNotEquals(0, httpExchange.getResponseCode()); + var responseHeaders = new Headers(); + httpExchange.getResponseHeaders().forEach((header, values) -> { + // com.sun.net.httpserver.Headers.Headers() normalize keys + if ("Etag".equals(header) || "Content-range".equals(header)) { + responseHeaders.put(header, List.copyOf(values)); + } + }); return new TestHttpResponse( RestStatus.fromCode(httpExchange.getResponseCode()), - httpExchange.getResponseBodyContents().utf8ToString(), - httpExchange.getResponseHeaders().getFirst("ETag") + httpExchange.getResponseBodyContents(), + responseHeaders ); } + private static Headers bytesRangeHeader(@Nullable Integer startInclusive, @Nullable Integer endInclusive) { + StringBuilder range = new StringBuilder("bytes="); + if (startInclusive != null) { + range.append(startInclusive); + } + range.append('-'); + if (endInclusive != null) { + range.append(endInclusive); + } + var headers = new Headers(); + headers.put("Range", List.of(range.toString())); + return headers; + } + + private static Headers etagHeader(String etag) { + var headers = new Headers(); + headers.put("ETag", List.of(Objects.requireNonNull(etag))); + return headers; + } + + private static Headers contentRangeHeader(long start, long end, long length) { + var headers = new Headers(); + headers.put("Content-Range", List.of(Strings.format("bytes %d-%d/%d", start, end, length))); + return headers; + } + private static class TestHttpExchange extends HttpExchange { private static final Headers EMPTY_HEADERS = new Headers(); @@ -311,20 +427,22 @@ private static class TestHttpExchange extends HttpExchange { private final String method; private final URI uri; private final BytesReference requestBody; + private final Headers requestHeaders; private final Headers responseHeaders = new Headers(); private final BytesStreamOutput responseBody = new BytesStreamOutput(); private int responseCode; - TestHttpExchange(String method, String uri, BytesReference requestBody) { + TestHttpExchange(String method, String uri, BytesReference requestBody, Headers requestHeaders) { this.method = method; this.uri = URI.create(uri); this.requestBody = requestBody; + this.requestHeaders = requestHeaders; } @Override public Headers getRequestHeaders() { - return EMPTY_HEADERS; + return requestHeaders; } @Override From d84b46c6d28970b169b10d05861db20605c58c25 Mon Sep 17 00:00:00 2001 From: Craig Taverner Date: Mon, 18 Mar 2024 12:02:46 +0100 Subject: [PATCH 235/248] Make new spatial sort tests less flaky (#106401) The tests that assert sorting on spatial types causes consistent error messages, also were flaky for the non-error message cases under rare circumstances where the results were returned in different order. We now sort those on a sortable field for deterministic behaviour. --- .../rest-api-spec/test/esql/130_spatial.yml | 80 ++++++++++++------- 1 file changed, 52 insertions(+), 28 deletions(-) diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/130_spatial.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/130_spatial.yml index 9368df6c81604..4727a5394cf3d 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/130_spatial.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/130_spatial.yml @@ -13,6 +13,8 @@ setup: properties: location: type: geo_point + id: + type: integer - do: bulk: @@ -20,9 +22,9 @@ setup: refresh: true body: - { "index": { } } - - { "location": "POINT(1 -1)" } + - { "id": 1, "location": "POINT(1 -1)" } - { "index": { } } - - { "location": "POINT(-1 1)" } + - { "id": 2, "location": "POINT(-1 1)" } - do: indices.create: @@ -32,6 +34,8 @@ setup: properties: location: type: point + id: + type: integer - do: bulk: @@ -39,9 +43,9 @@ setup: refresh: true body: - { "index": { } } - - { "location": "POINT(4321 -1234)" } + - { "id": 1, "location": "POINT(4321 -1234)" } - { "index": { } } - - { "location": "POINT(-4321 1234)" } + - { "id": 2, "location": "POINT(-4321 1234)" } - do: indices.create: @@ -51,6 +55,8 @@ setup: properties: shape: type: geo_shape + id: + type: integer - do: bulk: @@ -58,9 +64,9 @@ setup: refresh: true body: - { "index": { } } - - { "shape": "POINT(0 0)" } + - { "id": 1, "shape": "POINT(0 0)" } - { "index": { } } - - { "shape": "POLYGON((-1 -1, 1 -1, 1 1, -1 1, -1 -1))" } + - { "id": 2, "shape": "POLYGON((-1 -1, 1 -1, 1 1, -1 1, -1 -1))" } - do: indices.create: @@ -70,6 +76,8 @@ setup: properties: shape: type: shape + id: + type: integer - do: bulk: @@ -77,9 +85,9 @@ setup: refresh: true body: - { "index": { } } - - { "shape": "POINT(0 0)" } + - { "id": 1, "shape": "POINT(0 0)" } - { "index": { } } - - { "shape": "POLYGON((-1 -1, 1 -1, 1 1, -1 1, -1 -1))" } + - { "id": 2, "shape": "POLYGON((-1 -1, 1 -1, 1 1, -1 1, -1 -1))" } --- geo_point: @@ -88,12 +96,16 @@ geo_point: - "No limit defined, adding default limit of \\[.*\\]" esql.query: body: - query: 'from geo_points' - - match: { columns.0.name: location } - - match: { columns.0.type: geo_point } + query: 'from geo_points | sort id' + - match: { columns.0.name: id } + - match: { columns.0.type: integer } + - match: { columns.1.name: location } + - match: { columns.1.type: geo_point } - length: { values: 2 } - - match: { values.0.0: "POINT (1.0 -1.0)" } - - match: { values.1.0: "POINT (-1.0 1.0)" } + - match: { values.0.0: 1 } + - match: { values.1.0: 2 } + - match: { values.0.1: "POINT (1.0 -1.0)" } + - match: { values.1.1: "POINT (-1.0 1.0)" } --- geo_point unsortable: @@ -126,12 +138,16 @@ cartesian_point: - "No limit defined, adding default limit of \\[.*\\]" esql.query: body: - query: 'from cartesian_points' - - match: { columns.0.name: location } - - match: { columns.0.type: cartesian_point } + query: 'from cartesian_points | sort id' + - match: { columns.0.name: id } + - match: { columns.0.type: integer } + - match: { columns.1.name: location } + - match: { columns.1.type: cartesian_point } - length: { values: 2 } - - match: { values.0.0: "POINT (4321.0 -1234.0)" } - - match: { values.1.0: "POINT (-4321.0 1234.0)" } + - match: { values.0.0: 1 } + - match: { values.1.0: 2 } + - match: { values.0.1: "POINT (4321.0 -1234.0)" } + - match: { values.1.1: "POINT (-4321.0 1234.0)" } --- cartesian_point unsortable: @@ -164,12 +180,16 @@ geo_shape: - "No limit defined, adding default limit of \\[.*\\]" esql.query: body: - query: 'from geo_shapes' - - match: { columns.0.name: shape } - - match: { columns.0.type: geo_shape } + query: 'from geo_shapes | sort id' + - match: { columns.0.name: id } + - match: { columns.0.type: integer } + - match: { columns.1.name: shape } + - match: { columns.1.type: geo_shape } - length: { values: 2 } - - match: { values.0.0: "POINT (0.0 0.0)" } - - match: { values.1.0: "POLYGON ((-1.0 -1.0, 1.0 -1.0, 1.0 1.0, -1.0 1.0, -1.0 -1.0))" } + - match: { values.0.0: 1 } + - match: { values.1.0: 2 } + - match: { values.0.1: "POINT (0.0 0.0)" } + - match: { values.1.1: "POLYGON ((-1.0 -1.0, 1.0 -1.0, 1.0 1.0, -1.0 1.0, -1.0 -1.0))" } --- geo_shape unsortable: @@ -202,12 +222,16 @@ cartesian_shape: - "No limit defined, adding default limit of \\[.*\\]" esql.query: body: - query: 'from cartesian_shapes' - - match: { columns.0.name: shape } - - match: { columns.0.type: cartesian_shape } + query: 'from cartesian_shapes | sort id' + - match: { columns.0.name: id } + - match: { columns.0.type: integer } + - match: { columns.1.name: shape } + - match: { columns.1.type: cartesian_shape } - length: { values: 2 } - - match: { values.0.0: "POINT (0.0 0.0)" } - - match: { values.1.0: "POLYGON ((-1.0 -1.0, 1.0 -1.0, 1.0 1.0, -1.0 1.0, -1.0 -1.0))" } + - match: { values.0.0: 1 } + - match: { values.1.0: 2 } + - match: { values.0.1: "POINT (0.0 0.0)" } + - match: { values.1.1: "POLYGON ((-1.0 -1.0, 1.0 -1.0, 1.0 1.0, -1.0 1.0, -1.0 -1.0))" } --- cartesian_shape unsortable: From c252b40ea63aa764c4a88e12d37f1e51af9af768 Mon Sep 17 00:00:00 2001 From: David Turner Date: Mon, 18 Mar 2024 12:18:14 +0000 Subject: [PATCH 236/248] Introduce `RemoteClusterService.DisconnectedStrategy` (#106334) Today the behaviour of remote cluster clients that encounter a disconnected remote cluster depends on how the client was obtained: instances obtained from `NodeClient#getRemoteClusterClient` will always try and reconnect before failing, whereas instances obtained from `RemoteClusterService#getRemoteClusterClient` will only do so if the remote is configured with `skip_unavailable: false`. This distinction is important, but today's implementation is subtle and trappy. This commit adds a parameter to both APIs to make the choice explicit. Relates #80589 Closes #106316 --- .../action/PainlessExecuteAction.java | 7 ++- .../indices/resolve/ResolveIndexAction.java | 3 +- .../TransportResolveClusterAction.java | 2 +- .../TransportFieldCapabilitiesAction.java | 7 ++- .../action/search/TransportSearchAction.java | 12 +++- .../elasticsearch/client/internal/Client.java | 7 ++- .../client/internal/FilterClient.java | 9 ++- .../internal/ParentTaskAssigningClient.java | 9 ++- .../client/internal/node/NodeClient.java | 8 ++- .../transport/RemoteClusterService.java | 57 ++++++++++++------- .../ParentTaskAssigningClientTests.java | 13 ++++- .../transport/RemoteClusterClientTests.java | 26 +++++++-- .../test/client/NoOpNodeClient.java | 6 +- .../xpack/ccr/CcrLicenseChecker.java | 13 ++++- .../ccr/action/ShardFollowTasksExecutor.java | 4 +- .../TransportPutAutoFollowPatternAction.java | 7 ++- .../ccr/action/TransportPutFollowAction.java | 7 ++- .../action/TransportResumeFollowAction.java | 7 ++- .../ccr/action/TransportUnfollowAction.java | 7 ++- .../xpack/ccr/repository/CcrRepository.java | 13 ++++- .../action/AutoFollowCoordinatorTests.java | 24 ++++---- .../license/RemoteClusterLicenseChecker.java | 8 ++- .../action/TransportTermsEnumAction.java | 6 +- .../RemoteClusterLicenseCheckerTests.java | 46 ++++++++++++--- .../validation/SourceDestValidatorTests.java | 6 +- ...lusterSecurityFcActionAuthorizationIT.java | 22 +++++-- .../checkpoint/TransformCCSCanMatchIT.java | 25 +++++++- .../checkpoint/DefaultCheckpointProvider.java | 8 ++- .../DefaultCheckpointProviderTests.java | 6 +- 29 files changed, 289 insertions(+), 86 deletions(-) diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java index 47defea0a1f95..7f5f1fe4f84ea 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java @@ -95,6 +95,7 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.RemoteClusterAware; +import org.elasticsearch.transport.RemoteClusterService; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.NamedXContentRegistry; @@ -542,7 +543,11 @@ protected void doExecute(Task task, Request request, ActionListener li // forward to remote cluster after stripping off the clusterAlias from the index expression removeClusterAliasFromIndexExpression(request); transportService.getRemoteClusterService() - .getRemoteClusterClient(request.getContextSetup().getClusterAlias(), EsExecutors.DIRECT_EXECUTOR_SERVICE) + .getRemoteClusterClient( + request.getContextSetup().getClusterAlias(), + EsExecutors.DIRECT_EXECUTOR_SERVICE, + RemoteClusterService.DisconnectedStrategy.RECONNECT_UNLESS_SKIP_UNAVAILABLE + ) .execute(PainlessExecuteAction.REMOTE_TYPE, request, listener); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexAction.java index e402eaa9c0edd..da0cc956cf9cc 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexAction.java @@ -496,7 +496,8 @@ protected void doExecute(Task task, Request request, final ActionListener { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/TransportResolveClusterAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/TransportResolveClusterAction.java index b3f2015b9f5ae..8b171b0d12bf5 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/TransportResolveClusterAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/TransportResolveClusterAction.java @@ -143,7 +143,7 @@ protected void doExecuteForked(Task task, ResolveClusterActionRequest request, A RemoteClusterClient remoteClusterClient = remoteClusterService.getRemoteClusterClient( clusterAlias, searchCoordinationExecutor, - true + RemoteClusterService.DisconnectedStrategy.RECONNECT_IF_DISCONNECTED ); var remoteRequest = new ResolveClusterActionRequest(originalIndices.indices(), request.indicesOptions()); // allow cancellation requests to propagate to remote clusters diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java index e28434623601a..e6acaba8307f6 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java @@ -40,6 +40,7 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.RemoteClusterAware; +import org.elasticsearch.transport.RemoteClusterService; import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportRequestHandler; import org.elasticsearch.transport.TransportService; @@ -224,7 +225,11 @@ private void doExecuteForked(Task task, FieldCapabilitiesRequest request, final String clusterAlias = remoteIndices.getKey(); OriginalIndices originalIndices = remoteIndices.getValue(); var remoteClusterClient = transportService.getRemoteClusterService() - .getRemoteClusterClient(clusterAlias, searchCoordinationExecutor); + .getRemoteClusterClient( + clusterAlias, + searchCoordinationExecutor, + RemoteClusterService.DisconnectedStrategy.RECONNECT_UNLESS_SKIP_UNAVAILABLE + ); FieldCapabilitiesRequest remoteRequest = prepareRemoteRequest(request, originalIndices, nowInMillis); ActionListener remoteListener = ActionListener.wrap(response -> { for (FieldCapabilitiesIndexResponse resp : response.getIndexResponses()) { diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index 0922e15999e8c..2255681e275b9 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -533,7 +533,11 @@ static void ccsRemoteReduce( timeProvider.absoluteStartMillis(), true ); - var remoteClusterClient = remoteClusterService.getRemoteClusterClient(clusterAlias, remoteClientResponseExecutor); + var remoteClusterClient = remoteClusterService.getRemoteClusterClient( + clusterAlias, + remoteClientResponseExecutor, + RemoteClusterService.DisconnectedStrategy.RECONNECT_UNLESS_SKIP_UNAVAILABLE + ); remoteClusterClient.execute(TransportSearchAction.REMOTE_TYPE, ccsSearchRequest, new ActionListener<>() { @Override public void onResponse(SearchResponse searchResponse) { @@ -612,7 +616,11 @@ public void onFailure(Exception e) { task.getProgressListener(), listener ); - final var remoteClusterClient = remoteClusterService.getRemoteClusterClient(clusterAlias, remoteClientResponseExecutor); + final var remoteClusterClient = remoteClusterService.getRemoteClusterClient( + clusterAlias, + remoteClientResponseExecutor, + RemoteClusterService.DisconnectedStrategy.RECONNECT_UNLESS_SKIP_UNAVAILABLE + ); remoteClusterClient.execute(TransportSearchAction.REMOTE_TYPE, ccsSearchRequest, ccsListener); } if (localIndices != null) { diff --git a/server/src/main/java/org/elasticsearch/client/internal/Client.java b/server/src/main/java/org/elasticsearch/client/internal/Client.java index c6a2b0fee767f..6eb742fb5853e 100644 --- a/server/src/main/java/org/elasticsearch/client/internal/Client.java +++ b/server/src/main/java/org/elasticsearch/client/internal/Client.java @@ -55,6 +55,7 @@ import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Nullable; +import org.elasticsearch.transport.RemoteClusterService; import java.util.Map; import java.util.concurrent.Executor; @@ -413,7 +414,11 @@ public interface Client extends ElasticsearchClient { * @throws IllegalArgumentException if the given clusterAlias doesn't exist * @throws UnsupportedOperationException if this functionality is not available on this client. */ - default RemoteClusterClient getRemoteClusterClient(String clusterAlias, Executor responseExecutor) { + default RemoteClusterClient getRemoteClusterClient( + String clusterAlias, + Executor responseExecutor, + RemoteClusterService.DisconnectedStrategy disconnectedStrategy + ) { throw new UnsupportedOperationException("this client doesn't support remote cluster connections"); } } diff --git a/server/src/main/java/org/elasticsearch/client/internal/FilterClient.java b/server/src/main/java/org/elasticsearch/client/internal/FilterClient.java index 7feafe69fbcc2..3e31340ec535d 100644 --- a/server/src/main/java/org/elasticsearch/client/internal/FilterClient.java +++ b/server/src/main/java/org/elasticsearch/client/internal/FilterClient.java @@ -14,6 +14,7 @@ import org.elasticsearch.client.internal.support.AbstractClient; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.RemoteClusterService; import java.util.concurrent.Executor; @@ -62,7 +63,11 @@ protected Client in() { } @Override - public RemoteClusterClient getRemoteClusterClient(String clusterAlias, Executor responseExecutor) { - return in.getRemoteClusterClient(clusterAlias, responseExecutor); + public RemoteClusterClient getRemoteClusterClient( + String clusterAlias, + Executor responseExecutor, + RemoteClusterService.DisconnectedStrategy disconnectedStrategy + ) { + return in.getRemoteClusterClient(clusterAlias, responseExecutor, disconnectedStrategy); } } diff --git a/server/src/main/java/org/elasticsearch/client/internal/ParentTaskAssigningClient.java b/server/src/main/java/org/elasticsearch/client/internal/ParentTaskAssigningClient.java index c5bcd5e94a4fb..61528ccaa2427 100644 --- a/server/src/main/java/org/elasticsearch/client/internal/ParentTaskAssigningClient.java +++ b/server/src/main/java/org/elasticsearch/client/internal/ParentTaskAssigningClient.java @@ -16,6 +16,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.transport.RemoteClusterService; import org.elasticsearch.transport.TransportResponse; import java.util.concurrent.Executor; @@ -64,8 +65,12 @@ protected void } @Override - public RemoteClusterClient getRemoteClusterClient(String clusterAlias, Executor responseExecutor) { - final var delegate = super.getRemoteClusterClient(clusterAlias, responseExecutor); + public RemoteClusterClient getRemoteClusterClient( + String clusterAlias, + Executor responseExecutor, + RemoteClusterService.DisconnectedStrategy disconnectedStrategy + ) { + final var delegate = super.getRemoteClusterClient(clusterAlias, responseExecutor, disconnectedStrategy); return new RemoteClusterClient() { @Override public void execute( diff --git a/server/src/main/java/org/elasticsearch/client/internal/node/NodeClient.java b/server/src/main/java/org/elasticsearch/client/internal/node/NodeClient.java index cbfc325aec497..f37adc2fb5e2d 100644 --- a/server/src/main/java/org/elasticsearch/client/internal/node/NodeClient.java +++ b/server/src/main/java/org/elasticsearch/client/internal/node/NodeClient.java @@ -137,7 +137,11 @@ private Transpo } @Override - public RemoteClusterClient getRemoteClusterClient(String clusterAlias, Executor responseExecutor) { - return remoteClusterService.getRemoteClusterClient(clusterAlias, responseExecutor, true); + public RemoteClusterClient getRemoteClusterClient( + String clusterAlias, + Executor responseExecutor, + RemoteClusterService.DisconnectedStrategy disconnectedStrategy + ) { + return remoteClusterService.getRemoteClusterClient(clusterAlias, responseExecutor, disconnectedStrategy); } } diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java index ce9ca88c6158d..d452a78431f74 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java @@ -540,15 +540,43 @@ public void onFailure(Exception e) { } } + /** + * Specifies how to behave when executing a request against a disconnected remote cluster. + */ + public enum DisconnectedStrategy { + /** + * Always try and reconnect before executing a request, waiting for {@link TransportSettings#CONNECT_TIMEOUT} before failing if the + * remote cluster is totally unresponsive. + */ + RECONNECT_IF_DISCONNECTED, + + /** + * Fail the request immediately if the remote cluster is disconnected (but also trigger another attempt to reconnect to the remote + * cluster in the background so that the next request might succeed). + */ + FAIL_IF_DISCONNECTED, + + /** + * Behave according to the {@link #REMOTE_CLUSTER_SKIP_UNAVAILABLE} setting for this remote cluster: if this setting is + * {@code false} (the default) then behave like {@link #RECONNECT_IF_DISCONNECTED}, but if it is {@code true} then behave like + * {@link #FAIL_IF_DISCONNECTED}. + */ + RECONNECT_UNLESS_SKIP_UNAVAILABLE + } + /** * Returns a client to the remote cluster if the given cluster alias exists. * - * @param clusterAlias the cluster alias the remote cluster is registered under - * @param responseExecutor the executor to use to process the response - * @param ensureConnected whether requests should wait for a connection attempt when there isn't a connection available + * @param clusterAlias the cluster alias the remote cluster is registered under + * @param responseExecutor the executor to use to process the response + * @param disconnectedStrategy how to handle the situation where the remote cluster is disconnected when executing a request * @throws IllegalArgumentException if the given clusterAlias doesn't exist */ - public RemoteClusterClient getRemoteClusterClient(String clusterAlias, Executor responseExecutor, boolean ensureConnected) { + public RemoteClusterClient getRemoteClusterClient( + String clusterAlias, + Executor responseExecutor, + DisconnectedStrategy disconnectedStrategy + ) { if (transportService.getRemoteClusterService().isEnabled() == false) { throw new IllegalArgumentException( "this node does not have the " + DiscoveryNodeRole.REMOTE_CLUSTER_CLIENT_ROLE.roleName() + " role" @@ -557,22 +585,11 @@ public RemoteClusterClient getRemoteClusterClient(String clusterAlias, Executor if (transportService.getRemoteClusterService().getRemoteClusterNames().contains(clusterAlias) == false) { throw new NoSuchRemoteClusterException(clusterAlias); } - return new RemoteClusterAwareClient(transportService, clusterAlias, responseExecutor, ensureConnected); - } - - /** - * Returns a client to the remote cluster if the given cluster alias exists. - * - * @param clusterAlias the cluster alias the remote cluster is registered under - * @param responseExecutor the executor to use to process the response - * @throws IllegalArgumentException if the given clusterAlias doesn't exist - */ - public RemoteClusterClient getRemoteClusterClient(String clusterAlias, Executor responseExecutor) { - return getRemoteClusterClient( - clusterAlias, - responseExecutor, - transportService.getRemoteClusterService().isSkipUnavailable(clusterAlias) == false - ); + return new RemoteClusterAwareClient(transportService, clusterAlias, responseExecutor, switch (disconnectedStrategy) { + case RECONNECT_IF_DISCONNECTED -> true; + case FAIL_IF_DISCONNECTED -> false; + case RECONNECT_UNLESS_SKIP_UNAVAILABLE -> transportService.getRemoteClusterService().isSkipUnavailable(clusterAlias) == false; + }); } Collection getConnections() { diff --git a/server/src/test/java/org/elasticsearch/client/internal/ParentTaskAssigningClientTests.java b/server/src/test/java/org/elasticsearch/client/internal/ParentTaskAssigningClientTests.java index 9bda2f064072c..f0f44407642d8 100644 --- a/server/src/test/java/org/elasticsearch/client/internal/ParentTaskAssigningClientTests.java +++ b/server/src/test/java/org/elasticsearch/client/internal/ParentTaskAssigningClientTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.tasks.TaskId; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.client.NoOpClient; +import org.elasticsearch.transport.RemoteClusterService; import org.elasticsearch.transport.TransportResponse; import java.util.concurrent.Executor; @@ -67,7 +68,11 @@ public void testRemoteClientIsAlsoAParentAssigningClient() { try (var threadPool = createThreadPool()) { final var mockClient = new NoOpClient(threadPool) { @Override - public RemoteClusterClient getRemoteClusterClient(String clusterAlias, Executor responseExecutor) { + public RemoteClusterClient getRemoteClusterClient( + String clusterAlias, + Executor responseExecutor, + RemoteClusterService.DisconnectedStrategy disconnectedStrategy + ) { return new RemoteClusterClient() { @Override public void execute( @@ -83,7 +88,11 @@ public void }; final var client = new ParentTaskAssigningClient(mockClient, parentTaskId); - final var remoteClusterClient = client.getRemoteClusterClient("remote-cluster", EsExecutors.DIRECT_EXECUTOR_SERVICE); + final var remoteClusterClient = client.getRemoteClusterClient( + "remote-cluster", + EsExecutors.DIRECT_EXECUTOR_SERVICE, + randomFrom(RemoteClusterService.DisconnectedStrategy.values()) + ); assertEquals( "fake remote-cluster client", expectThrows( diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterClientTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterClientTests.java index 44fa6ca8cbdf1..1b37dcf18fff0 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterClientTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterClientTests.java @@ -97,7 +97,7 @@ public void testConnectAndExecuteRequest() throws Exception { var client = remoteClusterService.getRemoteClusterClient( "test", threadPool.executor(TEST_THREAD_POOL_NAME), - randomBoolean() + randomFrom(RemoteClusterService.DisconnectedStrategy.values()) ); ClusterStateResponse clusterStateResponse = PlainActionFuture.get( future -> client.execute( @@ -171,7 +171,14 @@ public void testEnsureWeReconnect() throws Exception { connectionManager.disconnectFromNode(remoteNode); closeFuture.get(); - var client = remoteClusterService.getRemoteClusterClient("test", EsExecutors.DIRECT_EXECUTOR_SERVICE, true); + var client = remoteClusterService.getRemoteClusterClient( + "test", + EsExecutors.DIRECT_EXECUTOR_SERVICE, + randomFrom( + RemoteClusterService.DisconnectedStrategy.RECONNECT_IF_DISCONNECTED, + RemoteClusterService.DisconnectedStrategy.RECONNECT_UNLESS_SKIP_UNAVAILABLE + ) + ); ClusterStateResponse clusterStateResponse = PlainActionFuture.get( f -> client.execute(ClusterStateAction.REMOTE_TYPE, new ClusterStateRequest(), f) ); @@ -199,7 +206,11 @@ public void testRemoteClusterServiceNotEnabled() { final RemoteClusterService remoteClusterService = service.getRemoteClusterService(); final IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> remoteClusterService.getRemoteClusterClient("test", EsExecutors.DIRECT_EXECUTOR_SERVICE, randomBoolean()) + () -> remoteClusterService.getRemoteClusterClient( + "test", + EsExecutors.DIRECT_EXECUTOR_SERVICE, + randomFrom(RemoteClusterService.DisconnectedStrategy.values()) + ) ); assertThat(e.getMessage(), equalTo("this node does not have the remote_cluster_client role")); } @@ -242,7 +253,14 @@ public void testQuicklySkipUnavailableClusters() throws Exception { service.start(); service.acceptIncomingRequests(); RemoteClusterService remoteClusterService = service.getRemoteClusterService(); - var client = remoteClusterService.getRemoteClusterClient("test", EsExecutors.DIRECT_EXECUTOR_SERVICE); + var client = remoteClusterService.getRemoteClusterClient( + "test", + EsExecutors.DIRECT_EXECUTOR_SERVICE, + randomFrom( + RemoteClusterService.DisconnectedStrategy.FAIL_IF_DISCONNECTED, + RemoteClusterService.DisconnectedStrategy.RECONNECT_UNLESS_SKIP_UNAVAILABLE + ) + ); try { assertFalse(remoteClusterService.isRemoteNodeConnected("test", remoteNode)); diff --git a/test/framework/src/main/java/org/elasticsearch/test/client/NoOpNodeClient.java b/test/framework/src/main/java/org/elasticsearch/test/client/NoOpNodeClient.java index 70e3d1ddcdef1..fe770ca3328bf 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/client/NoOpNodeClient.java +++ b/test/framework/src/main/java/org/elasticsearch/test/client/NoOpNodeClient.java @@ -79,7 +79,11 @@ public String getLocalNodeId() { } @Override - public RemoteClusterClient getRemoteClusterClient(String clusterAlias, Executor responseExecutor) { + public RemoteClusterClient getRemoteClusterClient( + String clusterAlias, + Executor responseExecutor, + RemoteClusterService.DisconnectedStrategy disconnectedStrategy + ) { return null; } } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrLicenseChecker.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrLicenseChecker.java index e4af826ba5066..b4607e002f27e 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrLicenseChecker.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrLicenseChecker.java @@ -42,6 +42,7 @@ import org.elasticsearch.indices.IndexClosedException; import org.elasticsearch.license.RemoteClusterLicenseChecker; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.transport.RemoteClusterService; import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.xpack.ccr.action.CcrRequests; import org.elasticsearch.xpack.ccr.action.ShardChangesAction; @@ -123,7 +124,11 @@ public void checkRemoteClusterLicenseAndFetchLeaderIndexMetadataAndHistoryUUIDs( final Consumer onFailure, final BiConsumer> consumer ) { - final var remoteClient = client.getRemoteClusterClient(clusterAlias, client.threadPool().executor(Ccr.CCR_THREAD_POOL_NAME)); + final var remoteClient = client.getRemoteClusterClient( + clusterAlias, + client.threadPool().executor(Ccr.CCR_THREAD_POOL_NAME), + RemoteClusterService.DisconnectedStrategy.RECONNECT_IF_DISCONNECTED + ); checkRemoteClusterLicenseAndFetchClusterState( client, clusterAlias, @@ -199,7 +204,11 @@ public static void checkRemoteClusterLicenseAndFetchClusterState( try { var remoteClient = systemClient( client.threadPool().getThreadContext(), - client.getRemoteClusterClient(clusterAlias, client.threadPool().executor(Ccr.CCR_THREAD_POOL_NAME)) + client.getRemoteClusterClient( + clusterAlias, + client.threadPool().executor(Ccr.CCR_THREAD_POOL_NAME), + RemoteClusterService.DisconnectedStrategy.RECONNECT_IF_DISCONNECTED + ) ); checkRemoteClusterLicenseAndFetchClusterState( client, diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java index 942bee6a9d47b..585bf2491bfc4 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java @@ -63,6 +63,7 @@ import org.elasticsearch.threadpool.Scheduler; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.NoSuchRemoteClusterException; +import org.elasticsearch.transport.RemoteClusterService; import org.elasticsearch.xpack.ccr.Ccr; import org.elasticsearch.xpack.ccr.CcrRetentionLeases; import org.elasticsearch.xpack.ccr.CcrSettings; @@ -574,7 +575,8 @@ private RemoteClusterClient remoteClient(ShardFollowTask params) { // this client is only used for lightweight single-index metadata responses and for the shard-changes actions themselves // which are about as easy to parse as shard bulks, and which handle their own forking, so we can handle responses on the // transport thread - EsExecutors.DIRECT_EXECUTOR_SERVICE + EsExecutors.DIRECT_EXECUTOR_SERVICE, + RemoteClusterService.DisconnectedStrategy.RECONNECT_IF_DISCONNECTED ), params.getHeaders(), clusterService.state() diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternAction.java index 72c1f384edd5f..ef27988d0416f 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternAction.java @@ -30,6 +30,7 @@ import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.RemoteClusterService; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.ccr.Ccr; import org.elasticsearch.xpack.ccr.CcrLicenseChecker; @@ -100,7 +101,11 @@ protected void masterOperation( listener.onFailure(new IllegalArgumentException(message)); return; } - final var remoteClient = client.getRemoteClusterClient(request.getRemoteCluster(), remoteClientResponseExecutor); + final var remoteClient = client.getRemoteClusterClient( + request.getRemoteCluster(), + remoteClientResponseExecutor, + RemoteClusterService.DisconnectedStrategy.RECONNECT_IF_DISCONNECTED + ); final Map filteredHeaders = ClientHelper.getPersistableSafeSecurityHeaders( threadPool.getThreadContext(), clusterService.state() diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java index c3dd30bd2f242..0eff80709ccd4 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java @@ -37,6 +37,7 @@ import org.elasticsearch.snapshots.RestoreService; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.RemoteClusterService; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.ccr.CcrLicenseChecker; import org.elasticsearch.xpack.ccr.CcrSettings; @@ -111,7 +112,11 @@ protected void masterOperation( } String remoteCluster = request.getRemoteCluster(); // Validates whether the leader cluster has been configured properly: - client.getRemoteClusterClient(remoteCluster, remoteClientResponseExecutor); + client.getRemoteClusterClient( + remoteCluster, + remoteClientResponseExecutor, + RemoteClusterService.DisconnectedStrategy.RECONNECT_IF_DISCONNECTED + ); String leaderIndex = request.getLeaderIndex(); ccrLicenseChecker.checkRemoteClusterLicenseAndFetchLeaderIndexMetadataAndHistoryUUIDs( diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java index cad1a37a3a17d..848060f102222 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java @@ -46,6 +46,7 @@ import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.RemoteClusterService; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.ccr.Ccr; import org.elasticsearch.xpack.ccr.CcrLicenseChecker; @@ -145,7 +146,11 @@ protected void masterOperation( } final String leaderCluster = ccrMetadata.get(Ccr.CCR_CUSTOM_METADATA_REMOTE_CLUSTER_NAME_KEY); // Validates whether the leader cluster has been configured properly: - client.getRemoteClusterClient(leaderCluster, remoteClientResponseExecutor); + client.getRemoteClusterClient( + leaderCluster, + remoteClientResponseExecutor, + RemoteClusterService.DisconnectedStrategy.RECONNECT_IF_DISCONNECTED + ); final String leaderIndex = ccrMetadata.get(Ccr.CCR_CUSTOM_METADATA_LEADER_INDEX_NAME_KEY); ccrLicenseChecker.checkRemoteClusterLicenseAndFetchLeaderIndexMetadataAndHistoryUUIDs( client, diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportUnfollowAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportUnfollowAction.java index dcbd8dfc70c4a..edcfc0d8d77d6 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportUnfollowAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportUnfollowAction.java @@ -40,6 +40,7 @@ import org.elasticsearch.persistent.PersistentTasksCustomMetadata; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.RemoteClusterService; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.ccr.Ccr; import org.elasticsearch.xpack.ccr.CcrRetentionLeases; @@ -123,7 +124,11 @@ public void clusterStateProcessed(final ClusterState oldState, final ClusterStat final RemoteClusterClient remoteClient; try { - remoteClient = client.getRemoteClusterClient(remoteClusterName, remoteClientResponseExecutor); + remoteClient = client.getRemoteClusterClient( + remoteClusterName, + remoteClientResponseExecutor, + RemoteClusterService.DisconnectedStrategy.RECONNECT_IF_DISCONNECTED + ); } catch (Exception e) { onLeaseRemovalFailure(indexMetadata.getIndex(), retentionLeaseId, e); return; diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java index 2702a2e28546c..06e902caf0105 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java @@ -82,6 +82,7 @@ import org.elasticsearch.snapshots.SnapshotState; import org.elasticsearch.threadpool.Scheduler; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.RemoteClusterService; import org.elasticsearch.xpack.ccr.Ccr; import org.elasticsearch.xpack.ccr.CcrLicenseChecker; import org.elasticsearch.xpack.ccr.CcrRetentionLeases; @@ -180,7 +181,11 @@ public RepositoryMetadata getMetadata() { } private RemoteClusterClient getRemoteClusterClient() { - return client.getRemoteClusterClient(remoteClusterAlias, remoteClientResponseExecutor); + return client.getRemoteClusterClient( + remoteClusterAlias, + remoteClientResponseExecutor, + RemoteClusterService.DisconnectedStrategy.RECONNECT_IF_DISCONNECTED + ); } @Override @@ -592,7 +597,11 @@ void openSession( ActionListener responseListener = listener.map( response -> new RestoreSession( repositoryName, - client.getRemoteClusterClient(remoteClusterAlias, chunkResponseExecutor), + client.getRemoteClusterClient( + remoteClusterAlias, + chunkResponseExecutor, + RemoteClusterService.DisconnectedStrategy.RECONNECT_IF_DISCONNECTED + ), sessionUUID, response.getNode(), indexShardId, diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java index d08b4c0f503d8..c0b09b8389e65 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java @@ -97,7 +97,7 @@ public class AutoFollowCoordinatorTests extends ESTestCase { public void testAutoFollower() { Client client = mock(Client.class); - when(client.getRemoteClusterClient(anyString(), any())).thenReturn(new RedirectToLocalClusterRemoteClusterClient(client)); + when(client.getRemoteClusterClient(anyString(), any(), any())).thenReturn(new RedirectToLocalClusterRemoteClusterClient(client)); ClusterState remoteState = createRemoteClusterState("logs-20190101", true); @@ -167,7 +167,7 @@ void cleanFollowedRemoteIndices(ClusterState remoteClusterState, List pa public void testAutoFollower_dataStream() { Client client = mock(Client.class); - when(client.getRemoteClusterClient(anyString(), any())).thenReturn(new RedirectToLocalClusterRemoteClusterClient(client)); + when(client.getRemoteClusterClient(anyString(), any(), any())).thenReturn(new RedirectToLocalClusterRemoteClusterClient(client)); ClusterState remoteState = createRemoteClusterStateWithDataStream("logs-foobar"); @@ -237,7 +237,7 @@ void cleanFollowedRemoteIndices(ClusterState remoteClusterState, List pa public void testAutoFollowerClusterStateApiFailure() { Client client = mock(Client.class); - when(client.getRemoteClusterClient(anyString(), any())).thenReturn(new RedirectToLocalClusterRemoteClusterClient(client)); + when(client.getRemoteClusterClient(anyString(), any(), any())).thenReturn(new RedirectToLocalClusterRemoteClusterClient(client)); AutoFollowPattern autoFollowPattern = createAutoFollowPattern("remote", "logs-*"); Map patterns = new HashMap<>(); @@ -286,7 +286,7 @@ void updateAutoFollowMetadata(Function updateFunctio public void testAutoFollowerUpdateClusterStateFailure() { Client client = mock(Client.class); - when(client.getRemoteClusterClient(anyString(), any())).thenReturn(new RedirectToLocalClusterRemoteClusterClient(client)); + when(client.getRemoteClusterClient(anyString(), any(), any())).thenReturn(new RedirectToLocalClusterRemoteClusterClient(client)); ClusterState remoteState = createRemoteClusterState("logs-20190101", true); AutoFollowPattern autoFollowPattern = createAutoFollowPattern("remote", "logs-*"); @@ -649,7 +649,7 @@ void cleanFollowedRemoteIndices(ClusterState remoteClusterState, List pa public void testAutoFollowerCreateAndFollowApiCallFailure() { Client client = mock(Client.class); - when(client.getRemoteClusterClient(anyString(), any())).thenReturn(new RedirectToLocalClusterRemoteClusterClient(client)); + when(client.getRemoteClusterClient(anyString(), any(), any())).thenReturn(new RedirectToLocalClusterRemoteClusterClient(client)); ClusterState remoteState = createRemoteClusterState("logs-20190101", true); AutoFollowPattern autoFollowPattern = createAutoFollowPattern("remote", "logs-*"); @@ -1674,7 +1674,7 @@ public void testUpdateAutoFollowersNoActivePatterns() { public void testWaitForMetadataVersion() { Client client = mock(Client.class); - when(client.getRemoteClusterClient(anyString(), any())).thenReturn(new RedirectToLocalClusterRemoteClusterClient(client)); + when(client.getRemoteClusterClient(anyString(), any(), any())).thenReturn(new RedirectToLocalClusterRemoteClusterClient(client)); AutoFollowPattern autoFollowPattern = createAutoFollowPattern("remote", "logs-*"); Map patterns = new HashMap<>(); @@ -1738,7 +1738,7 @@ void updateAutoFollowMetadata(Function updateFunctio public void testWaitForTimeOut() { Client client = mock(Client.class); - when(client.getRemoteClusterClient(anyString(), any())).thenReturn(new RedirectToLocalClusterRemoteClusterClient(client)); + when(client.getRemoteClusterClient(anyString(), any(), any())).thenReturn(new RedirectToLocalClusterRemoteClusterClient(client)); AutoFollowPattern autoFollowPattern = createAutoFollowPattern("remote", "logs-*"); Map patterns = new HashMap<>(); @@ -1790,7 +1790,7 @@ void updateAutoFollowMetadata(Function updateFunctio public void testAutoFollowerSoftDeletesDisabled() { Client client = mock(Client.class); - when(client.getRemoteClusterClient(anyString(), any())).thenReturn(new RedirectToLocalClusterRemoteClusterClient(client)); + when(client.getRemoteClusterClient(anyString(), any(), any())).thenReturn(new RedirectToLocalClusterRemoteClusterClient(client)); ClusterState remoteState = createRemoteClusterState("logs-20190101", false); @@ -1856,7 +1856,7 @@ void cleanFollowedRemoteIndices(ClusterState remoteClusterState, List pa public void testAutoFollowerFollowerIndexAlreadyExists() { Client client = mock(Client.class); - when(client.getRemoteClusterClient(anyString(), any())).thenReturn(new RedirectToLocalClusterRemoteClusterClient(client)); + when(client.getRemoteClusterClient(anyString(), any(), any())).thenReturn(new RedirectToLocalClusterRemoteClusterClient(client)); ClusterState remoteState = createRemoteClusterState("logs-20190101", true); @@ -2023,7 +2023,7 @@ void updateAutoFollowMetadata( public void testClosedIndicesAreNotAutoFollowed() { final Client client = mock(Client.class); - when(client.getRemoteClusterClient(anyString(), any())).thenReturn(new RedirectToLocalClusterRemoteClusterClient(client)); + when(client.getRemoteClusterClient(anyString(), any(), any())).thenReturn(new RedirectToLocalClusterRemoteClusterClient(client)); final String pattern = "pattern1"; final ClusterState localState = ClusterState.builder(new ClusterName("local")) @@ -2118,7 +2118,7 @@ void cleanFollowedRemoteIndices(ClusterState remoteClusterState, List pa public void testExcludedPatternIndicesAreNotAutoFollowed() { final Client client = mock(Client.class); - when(client.getRemoteClusterClient(anyString(), any())).thenReturn(new RedirectToLocalClusterRemoteClusterClient(client)); + when(client.getRemoteClusterClient(anyString(), any(), any())).thenReturn(new RedirectToLocalClusterRemoteClusterClient(client)); final String pattern = "pattern1"; final ClusterState localState = ClusterState.builder(new ClusterName("local")) @@ -2419,7 +2419,7 @@ private Tuple, Set> execute ClusterState finalRemoteState ) { final Client client = mock(Client.class); - when(client.getRemoteClusterClient(anyString(), any())).thenReturn(new RedirectToLocalClusterRemoteClusterClient(client)); + when(client.getRemoteClusterClient(anyString(), any(), any())).thenReturn(new RedirectToLocalClusterRemoteClusterClient(client)); final String pattern = "pattern1"; final ClusterState localState = ClusterState.builder(new ClusterName("local")) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RemoteClusterLicenseChecker.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RemoteClusterLicenseChecker.java index 18125592a1f51..8db05703a3f0d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RemoteClusterLicenseChecker.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RemoteClusterLicenseChecker.java @@ -20,6 +20,7 @@ import org.elasticsearch.protocol.xpack.license.LicenseStatus; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.RemoteClusterAware; +import org.elasticsearch.transport.RemoteClusterService; import org.elasticsearch.xpack.core.action.XPackInfoAction; import java.util.Collection; @@ -220,8 +221,11 @@ private void remoteClusterLicense(final String clusterAlias, final ActionListene final XPackInfoRequest request = new XPackInfoRequest(); request.setCategories(EnumSet.of(XPackInfoRequest.Category.LICENSE)); try { - client.getRemoteClusterClient(clusterAlias, remoteClientResponseExecutor) - .execute(XPackInfoAction.REMOTE_TYPE, request, contextPreservingActionListener); + client.getRemoteClusterClient( + clusterAlias, + remoteClientResponseExecutor, + RemoteClusterService.DisconnectedStrategy.RECONNECT_IF_DISCONNECTED + ).execute(XPackInfoAction.REMOTE_TYPE, request, contextPreservingActionListener); } catch (final Exception e) { contextPreservingActionListener.onFailure(e); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/termsenum/action/TransportTermsEnumAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/termsenum/action/TransportTermsEnumAction.java index 61fc666829eea..734f674336809 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/termsenum/action/TransportTermsEnumAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/termsenum/action/TransportTermsEnumAction.java @@ -639,7 +639,11 @@ void performRemoteClusterOperation(final String clusterAlias, final OriginalIndi try { TermsEnumRequest req = new TermsEnumRequest(request).indices(remoteIndices.indices()); - var remoteClient = remoteClusterService.getRemoteClusterClient(clusterAlias, coordinationExecutor); + var remoteClient = remoteClusterService.getRemoteClusterClient( + clusterAlias, + coordinationExecutor, + RemoteClusterService.DisconnectedStrategy.RECONNECT_UNLESS_SKIP_UNAVAILABLE + ); remoteClient.execute(TermsEnumAction.REMOTE_TYPE, req, new ActionListener<>() { @Override public void onResponse(TermsEnumResponse termsEnumResponse) { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/RemoteClusterLicenseCheckerTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/RemoteClusterLicenseCheckerTests.java index 99fe20fb220c5..0a3cdaf7c9b94 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/RemoteClusterLicenseCheckerTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/RemoteClusterLicenseCheckerTests.java @@ -20,6 +20,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.RemoteClusterService; import org.elasticsearch.xpack.core.action.XPackInfoAction; import java.util.ArrayList; @@ -147,7 +148,11 @@ public void testCheckRemoteClusterLicensesGivenCompatibleLicenses() { final ThreadPool threadPool = createMockThreadPool(); final Client client = createMockClient(threadPool); - final RemoteClusterClient remoteClient = client.getRemoteClusterClient("", Runnable::run); + final RemoteClusterClient remoteClient = client.getRemoteClusterClient( + "", + Runnable::run, + RemoteClusterService.DisconnectedStrategy.RECONNECT_IF_DISCONNECTED + ); doAnswer(invocationMock -> { @SuppressWarnings("unchecked") ActionListener listener = (ActionListener) invocationMock.getArguments()[2]; @@ -196,7 +201,11 @@ public void testCheckRemoteClusterLicensesGivenIncompatibleLicense() { final ThreadPool threadPool = createMockThreadPool(); final Client client = createMockClient(threadPool); - final RemoteClusterClient remoteClient = client.getRemoteClusterClient("", Runnable::run); + final RemoteClusterClient remoteClient = client.getRemoteClusterClient( + "", + Runnable::run, + RemoteClusterService.DisconnectedStrategy.RECONNECT_IF_DISCONNECTED + ); doAnswer(invocationMock -> { @SuppressWarnings("unchecked") ActionListener listener = (ActionListener) invocationMock.getArguments()[2]; @@ -240,7 +249,11 @@ public void testCheckRemoteClusterLicencesGivenNonExistentCluster() { final String failingClusterAlias = randomFrom(remoteClusterAliases); final ThreadPool threadPool = createMockThreadPool(); final Client client = createMockClientThatThrowsOnGetRemoteClusterClient(threadPool, failingClusterAlias); - final RemoteClusterClient remoteClient = client.getRemoteClusterClient("", Runnable::run); + final RemoteClusterClient remoteClient = client.getRemoteClusterClient( + "", + Runnable::run, + RemoteClusterService.DisconnectedStrategy.RECONNECT_IF_DISCONNECTED + ); doAnswer(invocationMock -> { @SuppressWarnings("unchecked") ActionListener listener = (ActionListener) invocationMock.getArguments()[2]; @@ -285,7 +298,11 @@ public void testRemoteClusterLicenseCallUsesSystemContext() throws InterruptedEx try { final Client client = createMockClient(threadPool); - final RemoteClusterClient remoteClient = client.getRemoteClusterClient("", Runnable::run); + final RemoteClusterClient remoteClient = client.getRemoteClusterClient( + "", + Runnable::run, + RemoteClusterService.DisconnectedStrategy.RECONNECT_IF_DISCONNECTED + ); doAnswer(invocationMock -> { assertTrue(threadPool.getThreadContext().isSystemContext()); @SuppressWarnings("unchecked") @@ -321,7 +338,11 @@ public void testListenerIsExecutedWithCallingContext() throws InterruptedExcepti } else { client = createMockClient(threadPool); } - final RemoteClusterClient remoteClient = client.getRemoteClusterClient("", Runnable::run); + final RemoteClusterClient remoteClient = client.getRemoteClusterClient( + "", + Runnable::run, + RemoteClusterService.DisconnectedStrategy.RECONNECT_IF_DISCONNECTED + ); doAnswer(invocationMock -> { @SuppressWarnings("unchecked") ActionListener listener = (ActionListener) invocationMock.getArguments()[2]; @@ -411,7 +432,11 @@ public void testBuildErrorMessageForInactiveLicense() { public void testCheckRemoteClusterLicencesNoLicenseMetadata() { final ThreadPool threadPool = createMockThreadPool(); final Client client = createMockClient(threadPool); - final RemoteClusterClient remoteClient = client.getRemoteClusterClient("", Runnable::run); + final RemoteClusterClient remoteClient = client.getRemoteClusterClient( + "", + Runnable::run, + RemoteClusterService.DisconnectedStrategy.RECONNECT_IF_DISCONNECTED + ); doAnswer(invocationMock -> { @SuppressWarnings("unchecked") ActionListener listener = (ActionListener) invocationMock.getArguments()[2]; @@ -478,14 +503,17 @@ private ThreadPool createMockThreadPool() { private Client createMockClient(final ThreadPool threadPool) { final var remoteClient = mock(RemoteClusterClient.class); - return createMockClient(threadPool, client -> when(client.getRemoteClusterClient(anyString(), any())).thenReturn(remoteClient)); + return createMockClient( + threadPool, + client -> when(client.getRemoteClusterClient(anyString(), any(), any())).thenReturn(remoteClient) + ); } private Client createMockClientThatThrowsOnGetRemoteClusterClient(final ThreadPool threadPool, final String clusterAlias) { final var remoteClient = mock(RemoteClusterClient.class); return createMockClient(threadPool, client -> { - when(client.getRemoteClusterClient(eq(clusterAlias), any())).thenThrow(new IllegalArgumentException()); - when(client.getRemoteClusterClient(argThat(a -> not(clusterAlias).matches(a)), any())).thenReturn(remoteClient); + when(client.getRemoteClusterClient(eq(clusterAlias), any(), any())).thenThrow(new IllegalArgumentException()); + when(client.getRemoteClusterClient(argThat(a -> not(clusterAlias).matches(a)), any(), any())).thenReturn(remoteClient); }); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/validation/SourceDestValidatorTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/validation/SourceDestValidatorTests.java index 9eefad61f943a..04e04f65b1182 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/validation/SourceDestValidatorTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/validation/SourceDestValidatorTests.java @@ -164,7 +164,11 @@ private class MockClientLicenseCheck extends NoOpClient { } @Override - public RemoteClusterClient getRemoteClusterClient(String clusterAlias, Executor responseExecutor) { + public RemoteClusterClient getRemoteClusterClient( + String clusterAlias, + Executor responseExecutor, + RemoteClusterService.DisconnectedStrategy disconnectedStrategy + ) { return new RedirectToLocalClusterRemoteClusterClient(this); } diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityFcActionAuthorizationIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityFcActionAuthorizationIT.java index f02c1fd61a77e..a5ffeacf28112 100644 --- a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityFcActionAuthorizationIT.java +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityFcActionAuthorizationIT.java @@ -70,6 +70,7 @@ import java.util.List; import java.util.Map; import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executor; import java.util.concurrent.TimeUnit; import static org.elasticsearch.xpack.remotecluster.AbstractRemoteClusterSecurityTestCase.PASS; @@ -181,7 +182,12 @@ public void testIndicesPrivilegesAreEnforcedForCcrRestoreSessionActions() throws assertThat(remoteConnectionInfos, hasSize(1)); assertThat(remoteConnectionInfos.get(0).isConnected(), is(true)); - final var remoteClusterClient = remoteClusterService.getRemoteClusterClient("my_remote_cluster", threadPool.generic()); + Executor responseExecutor = threadPool.generic(); + final var remoteClusterClient = remoteClusterService.getRemoteClusterClient( + "my_remote_cluster", + responseExecutor, + RemoteClusterService.DisconnectedStrategy.RECONNECT_UNLESS_SKIP_UNAVAILABLE + ); // Creating a restore session fails if index is not accessible final ShardId privateShardId = new ShardId("private-index", privateIndexUUID, 0); @@ -326,7 +332,8 @@ public void testRestApiKeyIsNotAllowedOnRemoteClusterPort() throws IOException { final RemoteClusterService remoteClusterService = service.getRemoteClusterService(); final var remoteClusterClient = remoteClusterService.getRemoteClusterClient( "my_remote_cluster", - EsExecutors.DIRECT_EXECUTOR_SERVICE + EsExecutors.DIRECT_EXECUTOR_SERVICE, + RemoteClusterService.DisconnectedStrategy.RECONNECT_UNLESS_SKIP_UNAVAILABLE ); final ElasticsearchSecurityException e = expectThrows( @@ -399,7 +406,8 @@ public void testUpdateCrossClusterApiKey() throws Exception { assertThat(remoteConnectionInfos.get(0).isConnected(), is(true)); final var remoteClusterClient = remoteClusterService.getRemoteClusterClient( "my_remote_cluster", - EsExecutors.DIRECT_EXECUTOR_SERVICE + EsExecutors.DIRECT_EXECUTOR_SERVICE, + RemoteClusterService.DisconnectedStrategy.RECONNECT_UNLESS_SKIP_UNAVAILABLE ); // 1. Not accessible because API key does not grant the access @@ -513,8 +521,12 @@ public void testMalformedShardLevelActionIsRejected() throws Exception { final ElasticsearchSecurityException e = expectThrows( ElasticsearchSecurityException.class, () -> executeRemote( - remoteClusterService.getRemoteClusterClient("my_remote_cluster", threadPool.generic()), - new RemoteClusterActionType(TransportGetAction.TYPE.name() + "[s]", GetResponse::new), + remoteClusterService.getRemoteClusterClient( + "my_remote_cluster", + threadPool.generic(), + RemoteClusterService.DisconnectedStrategy.RECONNECT_UNLESS_SKIP_UNAVAILABLE + ), + new RemoteClusterActionType<>(TransportGetAction.TYPE.name() + "[s]", GetResponse::new), malformedGetRequest ) ); diff --git a/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/checkpoint/TransformCCSCanMatchIT.java b/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/checkpoint/TransformCCSCanMatchIT.java index 5872e78e133d1..a7f7b5bd3edda 100644 --- a/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/checkpoint/TransformCCSCanMatchIT.java +++ b/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/checkpoint/TransformCCSCanMatchIT.java @@ -40,6 +40,7 @@ import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.test.AbstractMultiClustersTestCase; import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; +import org.elasticsearch.transport.RemoteClusterService; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentParser; @@ -235,7 +236,13 @@ public void testGetCheckpointAction_MatchAllQuery() throws InterruptedException ); testGetCheckpointAction( threadContext, - CheckpointClient.remote(client().getRemoteClusterClient(REMOTE_CLUSTER, EsExecutors.DIRECT_EXECUTOR_SERVICE)), + CheckpointClient.remote( + client().getRemoteClusterClient( + REMOTE_CLUSTER, + EsExecutors.DIRECT_EXECUTOR_SERVICE, + RemoteClusterService.DisconnectedStrategy.RECONNECT_IF_DISCONNECTED + ) + ), REMOTE_CLUSTER, new String[] { "remote_*" }, QueryBuilders.matchAllQuery(), @@ -255,7 +262,13 @@ public void testGetCheckpointAction_RangeQuery() throws InterruptedException { ); testGetCheckpointAction( threadContext, - CheckpointClient.remote(client().getRemoteClusterClient(REMOTE_CLUSTER, EsExecutors.DIRECT_EXECUTOR_SERVICE)), + CheckpointClient.remote( + client().getRemoteClusterClient( + REMOTE_CLUSTER, + EsExecutors.DIRECT_EXECUTOR_SERVICE, + RemoteClusterService.DisconnectedStrategy.RECONNECT_IF_DISCONNECTED + ) + ), REMOTE_CLUSTER, new String[] { "remote_*" }, QueryBuilders.rangeQuery("@timestamp").from(timestamp), @@ -275,7 +288,13 @@ public void testGetCheckpointAction_RangeQueryThatMatchesNoShards() throws Inter ); testGetCheckpointAction( threadContext, - CheckpointClient.remote(client().getRemoteClusterClient(REMOTE_CLUSTER, EsExecutors.DIRECT_EXECUTOR_SERVICE)), + CheckpointClient.remote( + client().getRemoteClusterClient( + REMOTE_CLUSTER, + EsExecutors.DIRECT_EXECUTOR_SERVICE, + RemoteClusterService.DisconnectedStrategy.RECONNECT_IF_DISCONNECTED + ) + ), REMOTE_CLUSTER, new String[] { "remote_*" }, QueryBuilders.rangeQuery("@timestamp").from(100_000_000), diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/checkpoint/DefaultCheckpointProvider.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/checkpoint/DefaultCheckpointProvider.java index f60429f954b78..d82c3369d0d12 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/checkpoint/DefaultCheckpointProvider.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/checkpoint/DefaultCheckpointProvider.java @@ -147,7 +147,13 @@ protected void getIndexCheckpoints(TimeValue timeout, ActionListener Date: Mon, 18 Mar 2024 13:25:44 +0100 Subject: [PATCH 237/248] Add multi cluster text expansion tests (#106205) --- .../build.gradle | 4 +- .../test/multi_cluster/40_text_expansion.yml | 324 ++++++++++++++++++ .../test/remote_cluster/40_text_expansion.yml | 324 ++++++++++++++++++ 3 files changed, 650 insertions(+), 2 deletions(-) create mode 100644 x-pack/plugin/ml/qa/multi-cluster-tests-with-security/src/test/resources/rest-api-spec/test/multi_cluster/40_text_expansion.yml create mode 100644 x-pack/plugin/ml/qa/multi-cluster-tests-with-security/src/test/resources/rest-api-spec/test/remote_cluster/40_text_expansion.yml diff --git a/x-pack/plugin/ml/qa/multi-cluster-tests-with-security/build.gradle b/x-pack/plugin/ml/qa/multi-cluster-tests-with-security/build.gradle index d102490820a07..b19fa4ab5f185 100644 --- a/x-pack/plugin/ml/qa/multi-cluster-tests-with-security/build.gradle +++ b/x-pack/plugin/ml/qa/multi-cluster-tests-with-security/build.gradle @@ -24,7 +24,7 @@ def remoteCluster = testClusters.register('remote-cluster') { testDistribution = 'DEFAULT' versions = [ccsCompatVersion.toString(), project.version] numberOfNodes = 2 - setting 'node.roles', '[data,ingest,master]' + setting 'node.roles', '[data,ingest,master,ml]' setting 'xpack.security.enabled', 'true' setting 'xpack.watcher.enabled', 'false' setting 'xpack.license.self_generated.type', 'trial' @@ -35,7 +35,7 @@ def remoteCluster = testClusters.register('remote-cluster') { testClusters.register('mixed-cluster') { testDistribution = 'DEFAULT' numberOfNodes = 2 - setting 'node.roles', '[data,ingest,master]' + setting 'node.roles', '[data,ingest,master,ml]' setting 'xpack.security.enabled', 'true' setting 'xpack.watcher.enabled', 'false' setting 'xpack.license.self_generated.type', 'trial' diff --git a/x-pack/plugin/ml/qa/multi-cluster-tests-with-security/src/test/resources/rest-api-spec/test/multi_cluster/40_text_expansion.yml b/x-pack/plugin/ml/qa/multi-cluster-tests-with-security/src/test/resources/rest-api-spec/test/multi_cluster/40_text_expansion.yml new file mode 100644 index 0000000000000..3ac52c2d45d0d --- /dev/null +++ b/x-pack/plugin/ml/qa/multi-cluster-tests-with-security/src/test/resources/rest-api-spec/test/multi_cluster/40_text_expansion.yml @@ -0,0 +1,324 @@ +# This test tests cases covered by ML's text_expansion.yml +--- +setup: + - skip: + features: headers + + - do: + headers: + Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" #test_user credentials + indices.create: + index: index-with-sparse-vector + body: + mappings: + properties: + source_text: + type: keyword + ml.tokens: + type: sparse_vector + + - do: + headers: + Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" + indices.create: + index: unrelated + body: + mappings: + properties: + source_text: + type: keyword + + - do: + headers: + Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" + ml.put_trained_model: + model_id: "text_expansion_model" + body: > + { + "description": "simple model for testing", + "model_type": "pytorch", + "inference_config": { + "text_expansion": { + "tokenization": { + "bert": { + "with_special_tokens": false + } + } + } + } + } + - do: + headers: + Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" + ml.put_trained_model_vocabulary: + model_id: "text_expansion_model" + body: > + { "vocabulary": ["[PAD]", "[UNK]", "these", "are", "my", "words", "the", "washing", "machine", "is", "leaking", "octopus", "comforter", "smells"] } + - do: + headers: + Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" + ml.put_trained_model_definition_part: + model_id: "text_expansion_model" + part: 0 + body: > + { + "total_definition_length":2078, + "definition": "UEsDBAAACAgAAAAAAAAAAAAAAAAAAAAAAAAUAA4Ac2ltcGxlbW9kZWwvZGF0YS5wa2xGQgoAWlpaWlpaWlpaWoACY19fdG9yY2hfXwpUaW55VGV4dEV4cGFuc2lvbgpxACmBfShYCAAAAHRyYWluaW5ncQGJWBYAAABfaXNfZnVsbF9iYWNrd2FyZF9ob29rcQJOdWJxAy5QSwcIITmbsFgAAABYAAAAUEsDBBQACAgIAAAAAAAAAAAAAAAAAAAAAAAdAB0Ac2ltcGxlbW9kZWwvY29kZS9fX3RvcmNoX18ucHlGQhkAWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWoWRT4+cMAzF7/spfASJomF3e0Ga3nrrn8vcELIyxAzRhAQlpjvbT19DWDrdquqBA/bvPT87nVUxwsm41xPd+PNtUi4a77KvXs+W8voBAHFSQY3EFCIiHKFp1+p57vs/ShyUccZdoIaz93aBTMR+thbPqru+qKBx8P4q/e8TyxRlmwVctJp66H1YmCyS7WsZwD50A2L5V7pCBADGTTOj0bGGE7noQyqzv5JDfp0o9fZRCWqP37yjhE4+mqX5X3AdFZHGM/2TzOHDpy1IvQWR+OWo3KwsRiKdpcqg4pBFDtm+QJ7nqwIPckrlnGfFJG0uNhOl38Sjut3pCqg26QuZy8BR9In7ScHHrKkKMW0TIucFrGQXCMpdaDO05O6DpOiy8e4kr0Ed/2YKOIhplW8gPr4ntygrd9ixpx3j9UZZVRagl2c6+imWUzBjuf5m+Ch7afphuvvW+r/0dsfn+2N9MZGb9+/SFtCYdhd83CMYp+mGy0LiKNs8y/eUuEA8B/d2z4dfUEsHCFSE3IaCAQAAIAMAAFBLAwQUAAgICAAAAAAAAAAAAAAAAAAAAAAAJwApAHNpbXBsZW1vZGVsL2NvZGUvX190b3JjaF9fLnB5LmRlYnVnX3BrbEZCJQBaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpahZHLbtNAFIZtp03rSVIuLRKXjdk5ojitKJsiFq24lem0KKSqpRIZt55gE9/GM+lNLFgx4i1Ys2aHhIBXgAVICNggHgNm6rqJN2BZGv36/v/MOWeea/Z5RVHurLfRUsfZXOnccx522itrd53O0vLqbaKYtsAKUe1pcege7hm9JNtzM8+kOOzNApIX0A3xBXE6YE7g0UWjg2OaZAJXbKvALOnj2GEHKc496ykLktgNt3Jz17hprCUxFqExe7YIpQkNpO1/kfHhPUdtUAdH2/gfmeYiIFW7IkM6IBP2wrDNbMe3Mjf2ksiK3Hjghg7F2DN9l/omZZl5Mmez2QRk0q4WUUB0+1oh9nDwxGdUXJdXPMRZQs352eGaRPV9s2lcMeZFGWBfKJJiw0YgbCMLBaRmXyy4flx6a667Fch55q05QOq2Jg2ANOyZwplhNsjiohVApo7aa21QnNGW5+4GXv8gxK1beBeHSRrhmLXWVh+0aBhErZ7bx1ejxMOhlR6QU4ycNqGyk8/yNGCWkwY7/RCD7UEQek4QszCgDJAzZtfErA0VqHBy9ugQP9pUfUmgCjVYgWNwHFbhBJyEOgSwBuuwARWZmoI6J9PwLfzEocpRpPrT8DP8wqHG0b4UX+E3DiscvRglXIoi81KKPwioHI5x9EooNKWiy0KOc/T6WF4SssrRuzJ9L2VNRXUhJzj6UKYfS4W/q/5wuh/l4M9R9qsU+y2dpoo2hJzkaEET8r6KRONicnRdK9EbUi6raFVIwNGjsrlbpk6ZPi7TbS3fv3LyNjPiEKzG0aG0tvNb6xw90/whe6ONjnJcUxobHDUqQ8bIOW79BVBLBwhfSmPKdAIAAE4EAABQSwMEAAAICAAAAAAAAAAAAAAAAAAAAAAAABkABQBzaW1wbGVtb2RlbC9jb25zdGFudHMucGtsRkIBAFqAAikuUEsHCG0vCVcEAAAABAAAAFBLAwQAAAgIAAAAAAAAAAAAAAAAAAAAAAAAEwA7AHNpbXBsZW1vZGVsL3ZlcnNpb25GQjcAWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWjMKUEsHCNGeZ1UCAAAAAgAAAFBLAQIAAAAACAgAAAAAAAAhOZuwWAAAAFgAAAAUAAAAAAAAAAAAAAAAAAAAAABzaW1wbGVtb2RlbC9kYXRhLnBrbFBLAQIAABQACAgIAAAAAABUhNyGggEAACADAAAdAAAAAAAAAAAAAAAAAKgAAABzaW1wbGVtb2RlbC9jb2RlL19fdG9yY2hfXy5weVBLAQIAABQACAgIAAAAAABfSmPKdAIAAE4EAAAnAAAAAAAAAAAAAAAAAJICAABzaW1wbGVtb2RlbC9jb2RlL19fdG9yY2hfXy5weS5kZWJ1Z19wa2xQSwECAAAAAAgIAAAAAAAAbS8JVwQAAAAEAAAAGQAAAAAAAAAAAAAAAACEBQAAc2ltcGxlbW9kZWwvY29uc3RhbnRzLnBrbFBLAQIAAAAACAgAAAAAAADRnmdVAgAAAAIAAAATAAAAAAAAAAAAAAAAANQFAABzaW1wbGVtb2RlbC92ZXJzaW9uUEsGBiwAAAAAAAAAHgMtAAAAAAAAAAAABQAAAAAAAAAFAAAAAAAAAGoBAAAAAAAAUgYAAAAAAABQSwYHAAAAALwHAAAAAAAAAQAAAFBLBQYAAAAABQAFAGoBAABSBgAAAAA=", + "total_parts": 1 + } + - do: + headers: + Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" + Content-Type: application/json + bulk: + index: index-with-sparse-vector + refresh: true + body: | + {"index": {}} + {"source_text": "my words comforter", "ml.tokens":{"my":1.0, "words":1.0,"comforter":1.0}} + {"index": {}} + {"source_text": "the machine is leaking", "ml.tokens":{"the":1.0,"machine":1.0,"is":1.0,"leaking":1.0}} + {"index": {}} + {"source_text": "these are my words", "ml.tokens":{"these":1.0,"are":1.0,"my":1.0,"words":1.0}} + {"index": {}} + {"source_text": "the octopus comforter smells", "ml.tokens":{"the":1.0,"octopus":1.0,"comforter":1.0,"smells":1.0}} + {"index": {}} + {"source_text": "the octopus comforter is leaking", "ml.tokens":{"the":1.0,"octopus":1.0,"comforter":1.0,"is":1.0,"leaking":1.0}} + {"index": {}} + {"source_text": "washing machine smells", "ml.tokens":{"washing":1.0,"machine":1.0,"smells":1.0}} + + - do: + headers: + Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" + Content-Type: application/json + ml.start_trained_model_deployment: + model_id: text_expansion_model + wait_for: started + + +--- +teardown: + - skip: + features: headers + + - do: + headers: + Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" + indices.delete: + index: index-with-sparse-vector + ignore: 404 + + - do: + headers: + Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" + indices.delete: + index: unrelated + ignore: 404 + + - do: + headers: + Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" + ml.stop_trained_model_deployment: + model_id: text_expansion_model + ignore: 404 + + - do: + headers: + Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" + ml.delete_trained_model: + model_id: "text_expansion_model" + ignore: 404 + +--- +"Test text expansion search": + - do: + search: + index: index-with-sparse-vector + body: + query: + text_expansion: + ml.tokens: + model_id: text_expansion_model + model_text: "octopus comforter smells" + - match: { hits.total.value: 4 } + - match: { hits.hits.0._source.source_text: "the octopus comforter smells" } + +--- +"Test text expansion search with pruning config": + - skip: + version: " - 8.12.99" + reason: "pruning introduced in 8.13.0" + + - do: + search: + index: index-with-sparse-vector + body: + query: + text_expansion: + ml.tokens: + model_id: text_expansion_model + model_text: "octopus comforter smells" + pruning_config: + tokens_freq_ratio_threshold: 4 + tokens_weight_threshold: 0.4 + - match: { hits.total.value: 4 } + - match: { hits.hits.0._source.source_text: "the octopus comforter smells" } + +--- +"Test named, boosted text expansion search with pruning config": + - skip: + version: " - 8.12.99" + reason: "pruning introduced in 8.13.0" + - do: + search: + index: index-with-sparse-vector + body: + query: + text_expansion: + ml.tokens: + model_id: text_expansion_model + model_text: "octopus comforter smells" + pruning_config: + tokens_freq_ratio_threshold: 4 + tokens_weight_threshold: 0.4 + - match: { hits.total.value: 4 } + - match: { hits.hits.0._source.source_text: "the octopus comforter smells" } + - match: { hits.hits.0._score: 3.0 } + + - do: + search: + index: index-with-sparse-vector + body: + query: + text_expansion: + ml.tokens: + model_id: text_expansion_model + model_text: "octopus comforter smells" + pruning_config: + tokens_freq_ratio_threshold: 4 + tokens_weight_threshold: 0.4 + _name: i-like-naming-my-queries + boost: 100.0 + - match: { hits.total.value: 4 } + - match: { hits.hits.0._source.source_text: "the octopus comforter smells" } + - match: { hits.hits.0.matched_queries: ["i-like-naming-my-queries"] } + - match: { hits.hits.0._score: 300.0 } + +--- +"Test text expansion search with default pruning config": + - skip: + version: " - 8.12.99" + reason: "pruning introduced in 8.13.0" + + - do: + search: + index: index-with-sparse-vector + body: + query: + text_expansion: + ml.tokens: + model_id: text_expansion_model + model_text: "octopus comforter smells" + pruning_config: {} + - match: { hits.total.value: 4 } + - match: { hits.hits.0._source.source_text: "the octopus comforter smells" } + +--- +"Test text expansion search with weighted tokens rescoring only pruned tokens": + - skip: + version: " - 8.12.99" + reason: "pruning introduced in 8.13.0" + + - do: + search: + index: index-with-sparse-vector + body: + query: + text_expansion: + ml.tokens: + model_id: text_expansion_model + model_text: "octopus comforter smells" + pruning_config: + tokens_freq_ratio_threshold: 4 + tokens_weight_threshold: 0.4 + only_score_pruned_tokens: true + - match: { hits.total.value: 0 } + +--- +"Test weighted tokens search": + - skip: + version: " - 8.12.99" + reason: "weighted token search introduced in 8.13.0" + + - do: + search: + index: index-with-sparse-vector + body: + query: + weighted_tokens: + ml.tokens: + tokens: [{"the": 1.0}, {"comforter":1.0}, {"smells":1.0}, {"bad": 1.0}] + pruning_config: + tokens_freq_ratio_threshold: 1 + tokens_weight_threshold: 0.4 + only_score_pruned_tokens: false + - match: { hits.total.value: 5 } + - match: { hits.hits.0._source.source_text: "the octopus comforter smells" } + +--- +"Test weighted tokens search with default pruning config": + - skip: + version: " - 8.12.99" + reason: "weighted token search introduced in 8.13.0" + + - do: + search: + index: index-with-sparse-vector + body: + query: + weighted_tokens: + ml.tokens: + tokens: [{"the": 1.0}, {"comforter":1.0}, {"smells":1.0}, {"bad": 1.0}] + pruning_config: {} + - match: { hits.total.value: 5 } + - match: { hits.hits.0._source.source_text: "the octopus comforter smells" } + +--- +"Test weighted tokens search only scoring pruned tokens": + - skip: + version: " - 8.12.99" + reason: "weighted token search introduced in 8.13.0" + + - do: + search: + index: index-with-sparse-vector + body: + query: + weighted_tokens: + ml.tokens: + tokens: [{"the": 1.0}, {"comforter":1.0}, {"smells":1.0}, {"bad": 1.0}] + pruning_config: + tokens_freq_ratio_threshold: 4 + tokens_weight_threshold: 0.4 + only_score_pruned_tokens: true + - match: { hits.total.value: 0 } + +--- +"Test weighted tokens search that prunes tokens based on frequency": + - skip: + version: " - 8.12.99" + reason: "weighted token search introduced in 8.13.0" + + - do: + search: + index: index-with-sparse-vector + body: + query: + weighted_tokens: + ml.tokens: + tokens: [{"the": 1.0}, {"octopus":1.0}, {"comforter":1.0}, {"is": 1.0}, {"the": 1.0}, {"best": 1.0}, {"of": 1.0}, {"the": 1.0}, {"bunch": 1.0}] + pruning_config: + tokens_freq_ratio_threshold: 3 + tokens_weight_threshold: 0.4 + only_score_pruned_tokens: true + - match: { hits.total.value: 0 } diff --git a/x-pack/plugin/ml/qa/multi-cluster-tests-with-security/src/test/resources/rest-api-spec/test/remote_cluster/40_text_expansion.yml b/x-pack/plugin/ml/qa/multi-cluster-tests-with-security/src/test/resources/rest-api-spec/test/remote_cluster/40_text_expansion.yml new file mode 100644 index 0000000000000..3ac52c2d45d0d --- /dev/null +++ b/x-pack/plugin/ml/qa/multi-cluster-tests-with-security/src/test/resources/rest-api-spec/test/remote_cluster/40_text_expansion.yml @@ -0,0 +1,324 @@ +# This test tests cases covered by ML's text_expansion.yml +--- +setup: + - skip: + features: headers + + - do: + headers: + Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" #test_user credentials + indices.create: + index: index-with-sparse-vector + body: + mappings: + properties: + source_text: + type: keyword + ml.tokens: + type: sparse_vector + + - do: + headers: + Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" + indices.create: + index: unrelated + body: + mappings: + properties: + source_text: + type: keyword + + - do: + headers: + Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" + ml.put_trained_model: + model_id: "text_expansion_model" + body: > + { + "description": "simple model for testing", + "model_type": "pytorch", + "inference_config": { + "text_expansion": { + "tokenization": { + "bert": { + "with_special_tokens": false + } + } + } + } + } + - do: + headers: + Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" + ml.put_trained_model_vocabulary: + model_id: "text_expansion_model" + body: > + { "vocabulary": ["[PAD]", "[UNK]", "these", "are", "my", "words", "the", "washing", "machine", "is", "leaking", "octopus", "comforter", "smells"] } + - do: + headers: + Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" + ml.put_trained_model_definition_part: + model_id: "text_expansion_model" + part: 0 + body: > + { + "total_definition_length":2078, + "definition": "UEsDBAAACAgAAAAAAAAAAAAAAAAAAAAAAAAUAA4Ac2ltcGxlbW9kZWwvZGF0YS5wa2xGQgoAWlpaWlpaWlpaWoACY19fdG9yY2hfXwpUaW55VGV4dEV4cGFuc2lvbgpxACmBfShYCAAAAHRyYWluaW5ncQGJWBYAAABfaXNfZnVsbF9iYWNrd2FyZF9ob29rcQJOdWJxAy5QSwcIITmbsFgAAABYAAAAUEsDBBQACAgIAAAAAAAAAAAAAAAAAAAAAAAdAB0Ac2ltcGxlbW9kZWwvY29kZS9fX3RvcmNoX18ucHlGQhkAWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWoWRT4+cMAzF7/spfASJomF3e0Ga3nrrn8vcELIyxAzRhAQlpjvbT19DWDrdquqBA/bvPT87nVUxwsm41xPd+PNtUi4a77KvXs+W8voBAHFSQY3EFCIiHKFp1+p57vs/ShyUccZdoIaz93aBTMR+thbPqru+qKBx8P4q/e8TyxRlmwVctJp66H1YmCyS7WsZwD50A2L5V7pCBADGTTOj0bGGE7noQyqzv5JDfp0o9fZRCWqP37yjhE4+mqX5X3AdFZHGM/2TzOHDpy1IvQWR+OWo3KwsRiKdpcqg4pBFDtm+QJ7nqwIPckrlnGfFJG0uNhOl38Sjut3pCqg26QuZy8BR9In7ScHHrKkKMW0TIucFrGQXCMpdaDO05O6DpOiy8e4kr0Ed/2YKOIhplW8gPr4ntygrd9ixpx3j9UZZVRagl2c6+imWUzBjuf5m+Ch7afphuvvW+r/0dsfn+2N9MZGb9+/SFtCYdhd83CMYp+mGy0LiKNs8y/eUuEA8B/d2z4dfUEsHCFSE3IaCAQAAIAMAAFBLAwQUAAgICAAAAAAAAAAAAAAAAAAAAAAAJwApAHNpbXBsZW1vZGVsL2NvZGUvX190b3JjaF9fLnB5LmRlYnVnX3BrbEZCJQBaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpahZHLbtNAFIZtp03rSVIuLRKXjdk5ojitKJsiFq24lem0KKSqpRIZt55gE9/GM+lNLFgx4i1Ys2aHhIBXgAVICNggHgNm6rqJN2BZGv36/v/MOWeea/Z5RVHurLfRUsfZXOnccx522itrd53O0vLqbaKYtsAKUe1pcege7hm9JNtzM8+kOOzNApIX0A3xBXE6YE7g0UWjg2OaZAJXbKvALOnj2GEHKc496ykLktgNt3Jz17hprCUxFqExe7YIpQkNpO1/kfHhPUdtUAdH2/gfmeYiIFW7IkM6IBP2wrDNbMe3Mjf2ksiK3Hjghg7F2DN9l/omZZl5Mmez2QRk0q4WUUB0+1oh9nDwxGdUXJdXPMRZQs352eGaRPV9s2lcMeZFGWBfKJJiw0YgbCMLBaRmXyy4flx6a667Fch55q05QOq2Jg2ANOyZwplhNsjiohVApo7aa21QnNGW5+4GXv8gxK1beBeHSRrhmLXWVh+0aBhErZ7bx1ejxMOhlR6QU4ycNqGyk8/yNGCWkwY7/RCD7UEQek4QszCgDJAzZtfErA0VqHBy9ugQP9pUfUmgCjVYgWNwHFbhBJyEOgSwBuuwARWZmoI6J9PwLfzEocpRpPrT8DP8wqHG0b4UX+E3DiscvRglXIoi81KKPwioHI5x9EooNKWiy0KOc/T6WF4SssrRuzJ9L2VNRXUhJzj6UKYfS4W/q/5wuh/l4M9R9qsU+y2dpoo2hJzkaEET8r6KRONicnRdK9EbUi6raFVIwNGjsrlbpk6ZPi7TbS3fv3LyNjPiEKzG0aG0tvNb6xw90/whe6ONjnJcUxobHDUqQ8bIOW79BVBLBwhfSmPKdAIAAE4EAABQSwMEAAAICAAAAAAAAAAAAAAAAAAAAAAAABkABQBzaW1wbGVtb2RlbC9jb25zdGFudHMucGtsRkIBAFqAAikuUEsHCG0vCVcEAAAABAAAAFBLAwQAAAgIAAAAAAAAAAAAAAAAAAAAAAAAEwA7AHNpbXBsZW1vZGVsL3ZlcnNpb25GQjcAWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWjMKUEsHCNGeZ1UCAAAAAgAAAFBLAQIAAAAACAgAAAAAAAAhOZuwWAAAAFgAAAAUAAAAAAAAAAAAAAAAAAAAAABzaW1wbGVtb2RlbC9kYXRhLnBrbFBLAQIAABQACAgIAAAAAABUhNyGggEAACADAAAdAAAAAAAAAAAAAAAAAKgAAABzaW1wbGVtb2RlbC9jb2RlL19fdG9yY2hfXy5weVBLAQIAABQACAgIAAAAAABfSmPKdAIAAE4EAAAnAAAAAAAAAAAAAAAAAJICAABzaW1wbGVtb2RlbC9jb2RlL19fdG9yY2hfXy5weS5kZWJ1Z19wa2xQSwECAAAAAAgIAAAAAAAAbS8JVwQAAAAEAAAAGQAAAAAAAAAAAAAAAACEBQAAc2ltcGxlbW9kZWwvY29uc3RhbnRzLnBrbFBLAQIAAAAACAgAAAAAAADRnmdVAgAAAAIAAAATAAAAAAAAAAAAAAAAANQFAABzaW1wbGVtb2RlbC92ZXJzaW9uUEsGBiwAAAAAAAAAHgMtAAAAAAAAAAAABQAAAAAAAAAFAAAAAAAAAGoBAAAAAAAAUgYAAAAAAABQSwYHAAAAALwHAAAAAAAAAQAAAFBLBQYAAAAABQAFAGoBAABSBgAAAAA=", + "total_parts": 1 + } + - do: + headers: + Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" + Content-Type: application/json + bulk: + index: index-with-sparse-vector + refresh: true + body: | + {"index": {}} + {"source_text": "my words comforter", "ml.tokens":{"my":1.0, "words":1.0,"comforter":1.0}} + {"index": {}} + {"source_text": "the machine is leaking", "ml.tokens":{"the":1.0,"machine":1.0,"is":1.0,"leaking":1.0}} + {"index": {}} + {"source_text": "these are my words", "ml.tokens":{"these":1.0,"are":1.0,"my":1.0,"words":1.0}} + {"index": {}} + {"source_text": "the octopus comforter smells", "ml.tokens":{"the":1.0,"octopus":1.0,"comforter":1.0,"smells":1.0}} + {"index": {}} + {"source_text": "the octopus comforter is leaking", "ml.tokens":{"the":1.0,"octopus":1.0,"comforter":1.0,"is":1.0,"leaking":1.0}} + {"index": {}} + {"source_text": "washing machine smells", "ml.tokens":{"washing":1.0,"machine":1.0,"smells":1.0}} + + - do: + headers: + Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" + Content-Type: application/json + ml.start_trained_model_deployment: + model_id: text_expansion_model + wait_for: started + + +--- +teardown: + - skip: + features: headers + + - do: + headers: + Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" + indices.delete: + index: index-with-sparse-vector + ignore: 404 + + - do: + headers: + Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" + indices.delete: + index: unrelated + ignore: 404 + + - do: + headers: + Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" + ml.stop_trained_model_deployment: + model_id: text_expansion_model + ignore: 404 + + - do: + headers: + Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" + ml.delete_trained_model: + model_id: "text_expansion_model" + ignore: 404 + +--- +"Test text expansion search": + - do: + search: + index: index-with-sparse-vector + body: + query: + text_expansion: + ml.tokens: + model_id: text_expansion_model + model_text: "octopus comforter smells" + - match: { hits.total.value: 4 } + - match: { hits.hits.0._source.source_text: "the octopus comforter smells" } + +--- +"Test text expansion search with pruning config": + - skip: + version: " - 8.12.99" + reason: "pruning introduced in 8.13.0" + + - do: + search: + index: index-with-sparse-vector + body: + query: + text_expansion: + ml.tokens: + model_id: text_expansion_model + model_text: "octopus comforter smells" + pruning_config: + tokens_freq_ratio_threshold: 4 + tokens_weight_threshold: 0.4 + - match: { hits.total.value: 4 } + - match: { hits.hits.0._source.source_text: "the octopus comforter smells" } + +--- +"Test named, boosted text expansion search with pruning config": + - skip: + version: " - 8.12.99" + reason: "pruning introduced in 8.13.0" + - do: + search: + index: index-with-sparse-vector + body: + query: + text_expansion: + ml.tokens: + model_id: text_expansion_model + model_text: "octopus comforter smells" + pruning_config: + tokens_freq_ratio_threshold: 4 + tokens_weight_threshold: 0.4 + - match: { hits.total.value: 4 } + - match: { hits.hits.0._source.source_text: "the octopus comforter smells" } + - match: { hits.hits.0._score: 3.0 } + + - do: + search: + index: index-with-sparse-vector + body: + query: + text_expansion: + ml.tokens: + model_id: text_expansion_model + model_text: "octopus comforter smells" + pruning_config: + tokens_freq_ratio_threshold: 4 + tokens_weight_threshold: 0.4 + _name: i-like-naming-my-queries + boost: 100.0 + - match: { hits.total.value: 4 } + - match: { hits.hits.0._source.source_text: "the octopus comforter smells" } + - match: { hits.hits.0.matched_queries: ["i-like-naming-my-queries"] } + - match: { hits.hits.0._score: 300.0 } + +--- +"Test text expansion search with default pruning config": + - skip: + version: " - 8.12.99" + reason: "pruning introduced in 8.13.0" + + - do: + search: + index: index-with-sparse-vector + body: + query: + text_expansion: + ml.tokens: + model_id: text_expansion_model + model_text: "octopus comforter smells" + pruning_config: {} + - match: { hits.total.value: 4 } + - match: { hits.hits.0._source.source_text: "the octopus comforter smells" } + +--- +"Test text expansion search with weighted tokens rescoring only pruned tokens": + - skip: + version: " - 8.12.99" + reason: "pruning introduced in 8.13.0" + + - do: + search: + index: index-with-sparse-vector + body: + query: + text_expansion: + ml.tokens: + model_id: text_expansion_model + model_text: "octopus comforter smells" + pruning_config: + tokens_freq_ratio_threshold: 4 + tokens_weight_threshold: 0.4 + only_score_pruned_tokens: true + - match: { hits.total.value: 0 } + +--- +"Test weighted tokens search": + - skip: + version: " - 8.12.99" + reason: "weighted token search introduced in 8.13.0" + + - do: + search: + index: index-with-sparse-vector + body: + query: + weighted_tokens: + ml.tokens: + tokens: [{"the": 1.0}, {"comforter":1.0}, {"smells":1.0}, {"bad": 1.0}] + pruning_config: + tokens_freq_ratio_threshold: 1 + tokens_weight_threshold: 0.4 + only_score_pruned_tokens: false + - match: { hits.total.value: 5 } + - match: { hits.hits.0._source.source_text: "the octopus comforter smells" } + +--- +"Test weighted tokens search with default pruning config": + - skip: + version: " - 8.12.99" + reason: "weighted token search introduced in 8.13.0" + + - do: + search: + index: index-with-sparse-vector + body: + query: + weighted_tokens: + ml.tokens: + tokens: [{"the": 1.0}, {"comforter":1.0}, {"smells":1.0}, {"bad": 1.0}] + pruning_config: {} + - match: { hits.total.value: 5 } + - match: { hits.hits.0._source.source_text: "the octopus comforter smells" } + +--- +"Test weighted tokens search only scoring pruned tokens": + - skip: + version: " - 8.12.99" + reason: "weighted token search introduced in 8.13.0" + + - do: + search: + index: index-with-sparse-vector + body: + query: + weighted_tokens: + ml.tokens: + tokens: [{"the": 1.0}, {"comforter":1.0}, {"smells":1.0}, {"bad": 1.0}] + pruning_config: + tokens_freq_ratio_threshold: 4 + tokens_weight_threshold: 0.4 + only_score_pruned_tokens: true + - match: { hits.total.value: 0 } + +--- +"Test weighted tokens search that prunes tokens based on frequency": + - skip: + version: " - 8.12.99" + reason: "weighted token search introduced in 8.13.0" + + - do: + search: + index: index-with-sparse-vector + body: + query: + weighted_tokens: + ml.tokens: + tokens: [{"the": 1.0}, {"octopus":1.0}, {"comforter":1.0}, {"is": 1.0}, {"the": 1.0}, {"best": 1.0}, {"of": 1.0}, {"the": 1.0}, {"bunch": 1.0}] + pruning_config: + tokens_freq_ratio_threshold: 3 + tokens_weight_threshold: 0.4 + only_score_pruned_tokens: true + - match: { hits.total.value: 0 } From 853ee2c621616ef777d1dc33239809aba2e6f195 Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Mon, 18 Mar 2024 13:35:10 +0100 Subject: [PATCH 238/248] Fix potential BigArray leak in InternalAggregation#getReducer (#106406) --- .../search/aggregations/InternalAggregation.java | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java b/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java index 690c70045daa0..e7422cab78100 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java @@ -11,6 +11,7 @@ import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.Releasables; import org.elasticsearch.rest.action.search.RestSearchAction; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator.PipelineTree; @@ -139,6 +140,11 @@ public void accept(InternalAggregation aggregation) { public InternalAggregation get() { return aggregatorReducer == null ? current : aggregatorReducer.get(); } + + @Override + public void close() { + Releasables.close(aggregatorReducer); + } }; } From 6af6ba92cd975d73336ef90e315fbbca82e13114 Mon Sep 17 00:00:00 2001 From: Simon Cooper Date: Mon, 18 Mar 2024 13:21:38 +0000 Subject: [PATCH 239/248] Update implementation of BaseRestHandler.unrecognized (#106408) --- .../elasticsearch/rest/BaseRestHandler.java | 67 ++++++++----------- 1 file changed, 29 insertions(+), 38 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java b/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java index 5ea80ac608b8f..d075983464f76 100644 --- a/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java +++ b/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java @@ -9,7 +9,6 @@ package org.elasticsearch.rest; import org.apache.lucene.search.spell.LevenshteinDistance; -import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -22,11 +21,11 @@ import org.elasticsearch.rest.action.admin.cluster.RestNodesUsageAction; import java.io.IOException; -import java.util.ArrayList; import java.util.Collections; +import java.util.Comparator; import java.util.HashSet; +import java.util.Iterator; import java.util.List; -import java.util.Locale; import java.util.Set; import java.util.SortedSet; import java.util.TreeSet; @@ -108,46 +107,38 @@ public final void handleRequest(RestRequest request, RestChannel channel, NodeCl } } - protected static String unrecognized( - final RestRequest request, - final Set invalids, - final Set candidates, - final String detail - ) { - StringBuilder message = new StringBuilder( - String.format(Locale.ROOT, "request [%s] contains unrecognized %s%s: ", request.path(), detail, invalids.size() > 1 ? "s" : "") - ); - boolean first = true; - for (final String invalid : invalids) { - final LevenshteinDistance ld = new LevenshteinDistance(); - final List> scoredParams = new ArrayList<>(); - for (final String candidate : candidates) { - final float distance = ld.getDistance(invalid, candidate); - if (distance > 0.5f) { - scoredParams.add(new Tuple<>(distance, candidate)); - } - } - CollectionUtil.timSort(scoredParams, (a, b) -> { - // sort by distance in reverse order, then parameter name for equal distances - int compare = a.v1().compareTo(b.v1()); - if (compare != 0) return -compare; - else return a.v2().compareTo(b.v2()); - }); - if (first == false) { - message.append(", "); - } + protected static String unrecognized(RestRequest request, Set invalids, Set candidates, String detail) { + StringBuilder message = new StringBuilder().append("request [") + .append(request.path()) + .append("] contains unrecognized ") + .append(detail) + .append(invalids.size() > 1 ? "s" : "") + .append(": "); + + for (Iterator it = invalids.iterator(); it.hasNext();) { + String invalid = it.next(); + + LevenshteinDistance ld = new LevenshteinDistance(); + List candidateParams = candidates.stream() + .map(c -> Tuple.tuple(ld.getDistance(invalid, c), c)) + .filter(t -> t.v1() > 0.5f) + .sorted(Comparator., Float>comparing(Tuple::v1).reversed().thenComparing(Tuple::v2)) + .map(Tuple::v2) + .toList(); + message.append("[").append(invalid).append("]"); - final List keys = scoredParams.stream().map(Tuple::v2).toList(); - if (keys.isEmpty() == false) { + if (candidateParams.isEmpty() == false) { message.append(" -> did you mean "); - if (keys.size() == 1) { - message.append("[").append(keys.get(0)).append("]"); - } else { - message.append("any of ").append(keys.toString()); + if (candidateParams.size() > 1) { + message.append("any of "); } + message.append(candidateParams); message.append("?"); } - first = false; + + if (it.hasNext()) { + message.append(", "); + } } return message.toString(); From c1ce3e9a834887ab6e17cadad3faacb3fc694661 Mon Sep 17 00:00:00 2001 From: Craig Taverner Date: Mon, 18 Mar 2024 14:45:16 +0100 Subject: [PATCH 240/248] ES|QL Expand support for ENRICH to full set supported by ES ingest processors (#106186) * Enrich skeleton * Initial support for CIDR range query in ENRICH * Remove to_string workaround for IP range ENRICH The original ENRICH allowed for an effective IP-range ENRICH using to_string, but this is not really the desired behavior, and was never documented nor advertised, so we'll remove it in preference to the new behaviour that directly compares IP's to ip-ranges. * Update docs/changelog/106186.yaml * Refined after code-review * Moved the switch outside the lambda * Minimized memory allocation for the byte[] used in IP decoding * Initial support for geo_match ENRICH * Fix failing tests for AnalyzerTests * Extended AnalyzerTests to include one CIDR ENRICH * Add support for integer_range Mostly adding new tests * Reduced flaky test * Fixed failing tests * Also removed unused file mapping-client_cidr.json since it turns out we use the file in the original location afterall * Used boolean as the failing type, since there are no plans to ever enrich on this type, but we do want to enrich on all numbers and dates * Make enrich long_range tests less flaky In CI the sharding seemed to result in variable ordering, so we changed to stats to remove that concern, as well as set fixed end-dates for ages to prevent the results changing when NOW() changes. * Add support for double_range in ENRICH This already worked, so just needed tests to be updated * Try reduce flakiness of one test This is likely due to the test framework issue with ENRICH indices, so we might want to remove this test, or stop doing direct tests on the source index. * Try reduce flakiness of one test This is likely due to the test framework issue with ENRICH indices, so we might want to remove this test, or stop doing direct tests on the source index. * Try reduce flakiness of two tests The one can get multiple results in some mixed clusters, and the other has sort-order issues in clusters. * Remove flaky test that only tested old behaviour * Expand AnalyzerTests to cover new range and geo_match queries * Support date_range ENRICH * Input null support in geo_match ENRICH * Use the input data type to decide how to decode the block Previously we used the match field data type, but that is not necessarily an exact match in all cases. Since we're reading the input block, we should use the input type for decoding that block. This only matters for blocks that need decoding. For example, ByteRefsBlock can be an encoded form of IP, Version, and all geo/spatial types. * Reduced the strictness on non-geo types for match/range This also allows keywords to be used in ip_range matches, which was the previous behaviour. However, this also means that invalid IP strings will throw errors. I added a test asserting on that error, and think this matches previous behaviour, but wonder if we should instead return null and set a warning. * Remove IP as string check in AnalyzerTests Since we again support matching strings to ip-range, we need to remove this test that asserts otherwise. * After removing the IP-string test we put instead a geo_match test * Simplify DataFailures in CsvAssert * Use input field datatype, not enrich match datatype for DATES * Simplify validation of enrich types when policy is null * Support doc-values for geo_point aggregations If the enrich is before a spatial aggregation, then points can appear as doc-values. --------- Co-authored-by: Nhat Nguyen --- docs/changelog/106186.yaml | 6 + .../elasticsearch/xpack/esql/CsvAssert.java | 20 +- .../xpack/esql/CsvTestUtils.java | 5 + .../xpack/esql/CsvTestsDataLoader.java | 64 +- .../testFixtures/src/main/resources/ages.csv | 11 + .../resources/airport_city_boundaries.csv | 778 ++++++++++++++++++ .../src/main/resources/client_cidr.csv | 4 + .../src/main/resources/date.csv-spec | 17 + .../src/main/resources/decades.csv | 14 + .../resources/enrich-IT_tests_only.csv-spec | 141 ++++ .../main/resources/enrich-policy-ages.json | 9 + .../enrich-policy-city_boundaries.json | 7 + .../resources/enrich-policy-city_names.json | 7 + .../resources/enrich-policy-client_cidr.json | 7 + .../main/resources/enrich-policy-decades.json | 10 + .../main/resources/enrich-policy-heights.json | 9 + .../src/main/resources/heights.csv | 6 + .../src/main/resources/mapping-ages.json | 10 + .../mapping-airport_city_boundaries.json | 22 + .../main/resources/mapping-client_cidr.json | 10 + .../src/main/resources/mapping-decades.json | 13 + .../src/main/resources/mapping-heights.json | 10 + .../main/resources/spatial_shapes.csv-spec | 12 + .../xpack/esql/analysis/Analyzer.java | 31 +- .../esql/enrich/EnrichLookupService.java | 14 +- .../xpack/esql/enrich/QueryList.java | 217 +++-- .../esql/analysis/AnalyzerTestUtils.java | 52 +- .../xpack/esql/analysis/AnalyzerTests.java | 56 +- .../EnrichQuerySourceOperatorTests.java | 5 +- .../optimizer/LogicalPlanOptimizerTests.java | 3 +- .../optimizer/PhysicalPlanOptimizerTests.java | 85 ++ .../test/resources/mapping-sample_data.json | 16 + .../rest-api-spec/test/esql/61_enrich_ip.yml | 37 +- 33 files changed, 1612 insertions(+), 96 deletions(-) create mode 100644 docs/changelog/106186.yaml create mode 100644 x-pack/plugin/esql/qa/testFixtures/src/main/resources/ages.csv create mode 100644 x-pack/plugin/esql/qa/testFixtures/src/main/resources/airport_city_boundaries.csv create mode 100644 x-pack/plugin/esql/qa/testFixtures/src/main/resources/client_cidr.csv create mode 100644 x-pack/plugin/esql/qa/testFixtures/src/main/resources/decades.csv create mode 100644 x-pack/plugin/esql/qa/testFixtures/src/main/resources/enrich-policy-ages.json create mode 100644 x-pack/plugin/esql/qa/testFixtures/src/main/resources/enrich-policy-city_boundaries.json create mode 100644 x-pack/plugin/esql/qa/testFixtures/src/main/resources/enrich-policy-city_names.json create mode 100644 x-pack/plugin/esql/qa/testFixtures/src/main/resources/enrich-policy-client_cidr.json create mode 100644 x-pack/plugin/esql/qa/testFixtures/src/main/resources/enrich-policy-decades.json create mode 100644 x-pack/plugin/esql/qa/testFixtures/src/main/resources/enrich-policy-heights.json create mode 100644 x-pack/plugin/esql/qa/testFixtures/src/main/resources/heights.csv create mode 100644 x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-ages.json create mode 100644 x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-airport_city_boundaries.json create mode 100644 x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-client_cidr.json create mode 100644 x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-decades.json create mode 100644 x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-heights.json create mode 100644 x-pack/plugin/ql/src/test/resources/mapping-sample_data.json diff --git a/docs/changelog/106186.yaml b/docs/changelog/106186.yaml new file mode 100644 index 0000000000000..097639dd28f1b --- /dev/null +++ b/docs/changelog/106186.yaml @@ -0,0 +1,6 @@ +pr: 106186 +summary: Expand support for ENRICH to full set supported by ES ingest processors +area: ES|QL +type: enhancement +issues: + - 106162 diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvAssert.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvAssert.java index 38bd05d57d768..f8f406b269a22 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvAssert.java +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvAssert.java @@ -20,6 +20,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.function.BiFunction; import java.util.function.Function; @@ -169,6 +170,8 @@ public static void assertData( assertData(expected, EsqlTestUtils.getValuesList(actualValuesIterator), ignoreOrder, logger, valueTransformer); } + private record DataFailure(int row, int column, Object expected, Object actual) {} + public static void assertData( ExpectedResults expected, List> actualValues, @@ -181,6 +184,7 @@ public static void assertData( actualValues.sort(resultRowComparator(expected.columnTypes())); } var expectedValues = expected.values(); + ArrayList dataFailures = new ArrayList<>(); for (int row = 0; row < expectedValues.size(); row++) { try { @@ -220,11 +224,14 @@ public static void assertData( expectedValue = rebuildExpected(expectedValue, Long.class, x -> unsignedLongAsNumber((long) x)); } } - assertEquals( - "Row[" + row + "] Column[" + column + "]", - valueTransformer.apply(expectedType, expectedValue), - valueTransformer.apply(expectedType, actualValue) - ); + var transformedExpected = valueTransformer.apply(expectedType, expectedValue); + var transformedActual = valueTransformer.apply(expectedType, actualValue); + if (Objects.equals(transformedExpected, transformedActual) == false) { + dataFailures.add(new DataFailure(row, column, transformedExpected, transformedActual)); + } + if (dataFailures.size() > 10) { + fail("Data mismatch: " + dataFailures); + } } var delta = actualRow.size() - expectedRow.size(); @@ -239,6 +246,9 @@ public static void assertData( throw ae; } } + if (dataFailures.isEmpty() == false) { + fail("Data mismatch: " + dataFailures); + } if (expectedValues.size() < actualValues.size()) { fail( "Elasticsearch still has data after [" + expectedValues.size() + "] entries:\n" + row(actualValues, expectedValues.size()) diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java index 438edecc76fbc..b495a6f1a6479 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java @@ -12,6 +12,7 @@ import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.breaker.NoopCircuitBreaker; +import org.elasticsearch.common.network.InetAddresses; import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; @@ -414,6 +415,10 @@ public enum Type { : ((BytesRef) l).compareTo((BytesRef) r), BytesRef.class ), + IP_RANGE(InetAddresses::parseCidr, BytesRef.class), + INTEGER_RANGE(s -> s == null ? null : Arrays.stream(s.split("-")).map(Integer::parseInt).toArray(), int[].class), + DOUBLE_RANGE(s -> s == null ? null : Arrays.stream(s.split("-")).map(Double::parseDouble).toArray(), double[].class), + DATE_RANGE(s -> s == null ? null : Arrays.stream(s.split("-")).map(BytesRef::new).toArray(), BytesRef[].class), VERSION(v -> new org.elasticsearch.xpack.versionfield.Version(v).toBytesRef(), BytesRef.class), NULL(s -> null, Void.class), DATETIME( diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java index 9763c362c9b4b..c5730f3271945 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java @@ -56,6 +56,10 @@ public class CsvTestsDataLoader { private static final TestsDataset UL_LOGS = new TestsDataset("ul_logs", "mapping-ul_logs.json", "ul_logs.csv"); private static final TestsDataset SAMPLE_DATA = new TestsDataset("sample_data", "mapping-sample_data.json", "sample_data.csv"); private static final TestsDataset CLIENT_IPS = new TestsDataset("clientips", "mapping-clientips.json", "clientips.csv"); + private static final TestsDataset CLIENT_CIDR = new TestsDataset("client_cidr", "mapping-client_cidr.json", "client_cidr.csv"); + private static final TestsDataset AGES = new TestsDataset("ages", "mapping-ages.json", "ages.csv"); + private static final TestsDataset HEIGHTS = new TestsDataset("heights", "mapping-heights.json", "heights.csv"); + private static final TestsDataset DECADES = new TestsDataset("decades", "mapping-decades.json", "decades.csv"); private static final TestsDataset AIRPORTS = new TestsDataset("airports", "mapping-airports.json", "airports.csv"); private static final TestsDataset AIRPORTS_WEB = new TestsDataset("airports_web", "mapping-airports_web.json", "airports_web.csv"); private static final TestsDataset COUNTRIES_BBOX = new TestsDataset( @@ -68,6 +72,11 @@ public class CsvTestsDataLoader { "mapping-countries_bbox_web.json", "countries_bbox_web.csv" ); + private static final TestsDataset AIRPORT_CITY_BOUNDARIES = new TestsDataset( + "airport_city_boundaries", + "mapping-airport_city_boundaries.json", + "airport_city_boundaries.csv" + ); public static final Map CSV_DATASET_MAP = Map.ofEntries( Map.entry(EMPLOYEES.indexName, EMPLOYEES), @@ -77,17 +86,45 @@ public class CsvTestsDataLoader { Map.entry(UL_LOGS.indexName, UL_LOGS), Map.entry(SAMPLE_DATA.indexName, SAMPLE_DATA), Map.entry(CLIENT_IPS.indexName, CLIENT_IPS), + Map.entry(CLIENT_CIDR.indexName, CLIENT_CIDR), + Map.entry(AGES.indexName, AGES), + Map.entry(HEIGHTS.indexName, HEIGHTS), + Map.entry(DECADES.indexName, DECADES), Map.entry(AIRPORTS.indexName, AIRPORTS), Map.entry(AIRPORTS_WEB.indexName, AIRPORTS_WEB), Map.entry(COUNTRIES_BBOX.indexName, COUNTRIES_BBOX), - Map.entry(COUNTRIES_BBOX_WEB.indexName, COUNTRIES_BBOX_WEB) + Map.entry(COUNTRIES_BBOX_WEB.indexName, COUNTRIES_BBOX_WEB), + Map.entry(AIRPORT_CITY_BOUNDARIES.indexName, AIRPORT_CITY_BOUNDARIES) ); private static final EnrichConfig LANGUAGES_ENRICH = new EnrichConfig("languages_policy", "enrich-policy-languages.json"); private static final EnrichConfig CLIENT_IPS_ENRICH = new EnrichConfig("clientip_policy", "enrich-policy-clientips.json"); + private static final EnrichConfig CLIENT_CIDR_ENRICH = new EnrichConfig("client_cidr_policy", "enrich-policy-client_cidr.json"); + private static final EnrichConfig AGES_ENRICH = new EnrichConfig("ages_policy", "enrich-policy-ages.json"); + private static final EnrichConfig HEIGHTS_ENRICH = new EnrichConfig("heights_policy", "enrich-policy-heights.json"); + private static final EnrichConfig DECADES_ENRICH = new EnrichConfig("decades_policy", "enrich-policy-decades.json"); + private static final EnrichConfig CITY_NAMES_ENRICH = new EnrichConfig("city_names", "enrich-policy-city_names.json"); + private static final EnrichConfig CITY_BOUNDARIES_ENRICH = new EnrichConfig("city_boundaries", "enrich-policy-city_boundaries.json"); - public static final List ENRICH_SOURCE_INDICES = List.of("languages", "clientips"); - public static final List ENRICH_POLICIES = List.of(LANGUAGES_ENRICH, CLIENT_IPS_ENRICH); + public static final List ENRICH_SOURCE_INDICES = List.of( + "languages", + "clientips", + "client_cidr", + "ages", + "heights", + "decades", + "airport_city_boundaries" + ); + public static final List ENRICH_POLICIES = List.of( + LANGUAGES_ENRICH, + CLIENT_IPS_ENRICH, + CLIENT_CIDR_ENRICH, + AGES_ENRICH, + HEIGHTS_ENRICH, + DECADES_ENRICH, + CITY_NAMES_ENRICH, + CITY_BOUNDARIES_ENRICH + ); /** *

        @@ -318,26 +355,20 @@ private static void loadCsvData( } // split on comma ignoring escaped commas String[] multiValues = entries[i].split(COMMA_ESCAPING_REGEX); - if (multiValues.length > 0) {// multi-value + if (multiValues.length > 1) { StringBuilder rowStringValue = new StringBuilder("["); for (String s : multiValues) { - if (entries[i].startsWith("\"") == false || entries[i].endsWith("\"") == false) { - rowStringValue.append("\"" + s + "\","); - } else { - rowStringValue.append(s + ","); - } + rowStringValue.append(quoteIfNecessary(s)).append(","); } // remove the last comma and put a closing bracket instead rowStringValue.replace(rowStringValue.length() - 1, rowStringValue.length(), "]"); entries[i] = rowStringValue.toString(); } else { - if (entries[i].startsWith("\"") == false || entries[i].endsWith("\"") == false) { - entries[i] = "\"" + entries[i] + "\""; - } + entries[i] = quoteIfNecessary(entries[i]); } // replace any escaped commas with single comma entries[i] = entries[i].replace(ESCAPED_COMMA_SEQUENCE, ","); - row.append("\"" + columns[i] + "\":" + entries[i]); + row.append("\"").append(columns[i]).append("\":").append(entries[i]); } catch (Exception e) { throw new IllegalArgumentException( format( @@ -364,11 +395,16 @@ private static void loadCsvData( } } } - if (builder.length() > 0) { + if (builder.isEmpty() == false) { sendBulkRequest(indexName, builder, client, logger); } } + private static String quoteIfNecessary(String value) { + boolean isQuoted = (value.startsWith("\"") && value.endsWith("\"")) || (value.startsWith("{") && value.endsWith("}")); + return isQuoted ? value : "\"" + value + "\""; + } + private static void sendBulkRequest(String indexName, StringBuilder builder, RestClient client, Logger logger) throws IOException { // The indexName is optional for a bulk request, but we use it for routing in MultiClusterSpecIT. builder.append("\n"); diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ages.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ages.csv new file mode 100644 index 0000000000000..8d9212379b837 --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ages.csv @@ -0,0 +1,11 @@ +age_range:integer_range, description:keyword +{"gte": 0\, "lt":2}, Baby +{"gte": 2\, "lt":4}, Toddler +{"gte": 3\, "lt":5}, Preschooler +{"gte": 5\, "lt":12}, Child +{"gte": 13\, "lt":20}, Adolescent +{"gte": 20\, "lt":40}, Young Adult +{"gte": 40\, "lt":60}, Middle-aged +{"gte": 60\, "lt":80}, Senior +{"gte": 80\, "lt":100}, Elderly +{"gte": 100\, "lt":200}, Incredible diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/airport_city_boundaries.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/airport_city_boundaries.csv new file mode 100644 index 0000000000000..09002931e633b --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/airport_city_boundaries.csv @@ -0,0 +1,778 @@ +abbrev:keyword,airport:text,region:text,city:keyword,city_location:geo_point,city_boundary:geo_shape +ABJ,Abidjan Port Bouet,Abidjan,Abidjan,POINT(-4.0333 5.3167),"POLYGON((-4.2941 5.307\,-4.2519 5.2994\,-4.2167 5.2209\,-3.978 5.2553\,-3.8548 5.2269\,-3.844 5.2541\,-3.8196 5.2507\,-3.8086 5.2893\,-3.7879 5.2728\,-3.7307 5.2737\,-3.7177 5.2945\,-3.7847 5.3634\,-3.8274 5.3747\,-3.8135 5.4275\,-3.8261 5.4324\,-3.8304 5.4854\,-3.8462 5.5089\,-3.8684 5.504\,-3.8966 5.5546\,-3.9219 5.5653\,-3.9356 5.6363\,-3.9853 5.6053\,-4.2579 5.6058\,-4.2439 5.49\,-4.2597 5.4862\,-4.2725 5.4497\,-4.2941 5.307))" +ABV,Abuja Int'l,Municipal Area Council,Abuja,POINT(7.4833 9.0667),"POLYGON((7.0972 8.9458\,7.3315 8.8215\,7.4793 8.8065\,7.4852 8.7724\,7.5231 8.7478\,7.482 8.6784\,7.4297 8.6384\,7.4776 8.6313\,7.5917 8.8349\,7.5887 9.1291\,7.6033 9.1614\,7.2498 9.1557\,7.1911 9.0483\,7.0972 8.9458))" +ACA,General Juan N Alvarez Int'l,Acapulco de Juárez,Acapulco de Juárez,POINT(-99.8825 16.8636),"POLYGON((-100.0022 16.914\,-99.6089 16.6834\,-99.4736 16.972\,-99.7223 17.2337\,-100.0022 16.914))" +ACC,Kotoka Int'l,Accra,Accra,POINT(-0.2 5.55),"POLYGON((-0.2841 5.572\,-0.272 5.5406\,-0.282 5.5172\,-0.1227 5.5661\,-0.0549 5.6077\,-0.1142 5.6466\,-0.1407 5.636\,-0.1515 5.6599\,-0.1904 5.6674\,-0.2671 5.6069\,-0.2841 5.572))" +ACV,Arcata-Eureka,Humboldt County,McKinleyville,POINT(-124.0857 40.9488),"POLYGON((-124.482 40.4403\,-124.4281 40.3618\,-124.4213 40.2371\,-124.2312 40.0934\,-124.1538 40.0635\,-124.1349 40.0025\,-123.5445 40.002\,-123.5435 40.7376\,-123.5661 40.789\,-123.5537 40.7962\,-123.5636 40.842\,-123.5876 40.8586\,-123.5801 40.8676\,-123.6103 40.8785\,-123.5973 40.8844\,-123.6238 40.9293\,-123.5874 40.928\,-123.5594 40.9503\,-123.4815 40.9148\,-123.4058 41.0134\,-123.4235 41.0582\,-123.4638 41.0761\,-123.464 41.0951\,-123.4396 41.0924\,-123.4325 41.1628\,-123.4081 41.1801\,-123.4541 41.2363\,-123.4431 41.273\,-123.4615 41.2826\,-123.4596 41.3101\,-123.4786 41.3296\,-123.4743 41.3663\,-123.4993 41.3824\,-123.7704 41.3808\,-123.7708 41.4641\,-124.1366 41.4645\,-124.14 41.3818\,-124.2065 41.3847\,-124.2424 41.3433\,-124.22 41.303\,-124.1607 41.2893\,-124.1955 41.1846\,-124.2376 41.166\,-124.2525 41.1341\,-124.2271 41.0466\,-124.1827 41.0013\,-124.1976 40.9505\,-124.2326 40.8693\,-124.4034 40.6132\,-124.482 40.4403))" +ADA,Şakirpaşa,Adana,Adana,POINT(35.3213 37.0),"POLYGON((34.7393 36.574\,35.4026 36.263\,35.965 36.671\,36.0962 37.155\,35.8806 37.1757\,35.9203 37.4744\,36.2673 37.7557\,36.3747 38.3784\,35.7699 38.1024\,35.5128 37.7183\,34.8796 37.682\,34.7788 37.3309\,34.9746 37.2677\,35.1471 36.7991\,34.7393 36.574))" +ADB,Adnan Menderes,İzmir,İzmir,POINT(27.14 38.42),"POLYGON((26.1708 38.2618\,27.3897 37.8152\,27.524 38.0078\,28.0074 37.9658\,28.4931 38.2282\,28.083 38.4069\,27.8179 38.3067\,27.243 38.5757\,27.1127 38.8002\,27.5638 39.026\,27.3765 39.3855\,26.6876 39.1198\,26.7787 38.8317\,26.2843 38.7099\,26.3435 38.4925\,26.1708 38.2618))" +ADD,Bole Int'l,አዲስ አበባ / Addis Ababa,Addis Ababa,POINT(38.74 9.03),"POLYGON((38.6399 8.974\,38.6798 8.9583\,38.6828 8.9401\,38.7435 8.9009\,38.747 8.8638\,38.7754 8.8599\,38.7828 8.8353\,38.8004 8.8331\,38.8259 8.8417\,38.8264 8.8807\,38.8714 8.874\,38.8809 8.9187\,38.857 8.9387\,38.8991 8.9357\,38.9081 8.999\,38.8768 9.0839\,38.7949 9.0722\,38.7667 9.0985\,38.6858 9.0733\,38.6694 9.0113\,38.652 9.0087\,38.6579 8.9774\,38.6399 8.974))" +ADE,Aden Int'l,مديرية خور مكسر,Aden,POINT(45.0333 12.8),"POLYGON((45.0013 12.8433\,45.0237 12.8111\,45.0168 12.81\,45.0158 12.8005\,45.0239 12.8101\,45.0403 12.7901\,45.0981 12.9407\,45.0448 12.9399\,45.0319 12.8433\,45.0013 12.8433))" +ADJ,Marka Int'l,ناحية عمان,Amman,POINT(35.9328 31.9497),"POLYGON((35.8695 31.9418\,35.8748 31.9331\,35.9073 31.9155\,35.9389 31.9031\,35.95 31.9425\,35.9258 31.9673\,35.916 31.9928\,35.9006 31.987\,35.8891 31.9768\,35.8855 31.966\,35.8741 31.973\,35.8698 31.9741\,35.8695 31.9418))" +ADL,Adelaide Int'l,Adelaide City Council,Adelaide,POINT(138.6 -34.9275),"POLYGON((138.5772 -34.9088\,138.5827 -34.9418\,138.6242 -34.9399\,138.6233 -34.9283\,138.6155 -34.9227\,138.6126 -34.9055\,138.6101 -34.9024\,138.5976 -34.8988\,138.588 -34.8994\,138.5772 -34.9088))" +AGT,Guaraní Int'l,Ciudad del Este,Ciudad del Este,POINT(-54.6167 -25.5167),"POLYGON((-54.7402 -25.5977\,-54.7072 -25.5742\,-54.6932 -25.5388\,-54.6174 -25.5379\,-54.5939 -25.5535\,-54.6014 -25.5031\,-54.6391 -25.4778\,-54.628 -25.4592\,-54.6482 -25.4433\,-54.6685 -25.4685\,-54.683 -25.4326\,-54.7103 -25.4434\,-54.711 -25.4245\,-54.6698 -25.4144\,-54.7001 -25.3949\,-54.715 -25.4169\,-54.7258 -25.3867\,-54.739 -25.4048\,-54.7191 -25.4156\,-54.7402 -25.5977))" +AGU,Lic. Jesús Terán Peredo Int'l,Municipio de Aguascalientes,Aguascalientes,POINT(-102.296 21.876),"POLYGON((-102.5831 21.7471\,-102.3257 21.6223\,-102.1421 21.7054\,-102.1037 22.0574\,-102.5831 21.7471))" +AKL,Auckland Int'l,Auckland,Auckland,POINT(174.74 -36.8406),"POLYGON((173.8963 -36.5586\,174.4262 -37.3645\,175.3886 -37.0219\,175.1631 -36.5239\,175.9032 -36.2582\,175.5468 -35.8585\,174.9114 -35.7133\,174.8043 -35.99\,173.8963 -36.5586))" +ALA,Almaty Int'l,Жетісу ауданы,Almaty,POINT(76.8958 43.2775),"POLYGON((76.8857 43.2638\,76.9667 43.2722\,76.9735 43.2931\,76.9301 43.3017\,76.9327 43.3456\,76.9636 43.3798\,76.9292 43.3798\,76.9089 43.352\,76.9203 43.3432\,76.8857 43.2638))" +ALB,Albany Int'l,Town of Colonie,Colonie,POINT(-73.7874 42.7396),"POLYGON((-73.9335 42.7606\,-73.8125 42.6903\,-73.7232 42.6728\,-73.7024 42.7006\,-73.719 42.7367\,-73.6942 42.7323\,-73.6842 42.7576\,-73.7317 42.7654\,-73.7302 42.7874\,-73.7107 42.7944\,-73.7269 42.8225\,-73.7671 42.786\,-73.9335 42.7606))" +ALC,Alicante,Alacant / Alicante,Alicante,POINT(-0.4831 38.3453),"POLYGON((-0.6567 38.3823\,-0.6405 38.3448\,-0.5685 38.3018\,-0.5416 38.3058\,-0.5247 38.275\,-0.5085 38.3302\,-0.4944 38.319\,-0.4952 38.334\,-0.4449 38.3629\,-0.4027 38.3528\,-0.4081 38.3833\,-0.4669 38.3961\,-0.4696 38.4113\,-0.4929 38.4004\,-0.4975 38.3801\,-0.5515 38.398\,-0.5483 38.4323\,-0.5951 38.4741\,-0.6148 38.4763\,-0.5914 38.4562\,-0.6106 38.4283\,-0.578 38.3984\,-0.6247 38.3996\,-0.6567 38.3823))" +ALG,Houari Boumediene,Alger,Algiers,POINT(3.0589 36.7539),"POLYGON((2.7997 36.6504\,3.1114 36.5816\,3.3801 36.7652\,2.8946 36.805\,2.7997 36.6504))" +ALL,Albenga,Albenga,Albenga,POINT(8.2167 44.05),"POLYGON((8.0994 44.0716\,8.1547 44.0568\,8.1676 44.0317\,8.2271 44.0454\,8.226 44.0723\,8.19 44.1011\,8.1615 44.0986\,8.1688 44.0691\,8.0994 44.0716))" +ALP,Aleppo Int'l,ناحية جبل سمعان,Aleppo,POINT(37.16 36.2),"POLYGON((36.9952 36.1374\,37.0034 36.1119\,37.0801 36.0757\,37.0802 36.0337\,37.1407 36.0087\,37.1521 36.0409\,37.2117 36.0351\,37.2654 36.1475\,37.3009 36.1675\,37.2259 36.2218\,37.241 36.2583\,37.2833 36.2842\,37.2881 36.3323\,37.2298 36.3321\,37.2051 36.36\,37.1331 36.3396\,37.1027 36.2886\,37.1018 36.2475\,37.0207 36.2397\,36.9952 36.1374))" +AMA,Amarillo Int'l,Amarillo,Amarillo,POINT(-101.8316 35.1984),"POLYGON((-101.9562 35.1617\,-101.9561 35.1245\,-101.922 35.1319\,-101.9378 35.1022\,-101.9106 35.1148\,-101.8837 35.102\,-101.866 35.1202\,-101.8796 35.1397\,-101.8308 35.1494\,-101.827 35.1766\,-101.822 35.1494\,-101.7953 35.1495\,-101.7954 35.1676\,-101.7601 35.1858\,-101.6895 35.1859\,-101.6892 35.2074\,-101.6539 35.2221\,-101.6538 35.2509\,-101.7068 35.2347\,-101.7067 35.2462\,-101.7426 35.2362\,-101.8111 35.251\,-101.8133 35.2944\,-101.84 35.2945\,-101.84 35.2669\,-101.8665 35.2659\,-101.8841 35.2384\,-101.9208 35.265\,-101.9105 35.2212\,-101.9371 35.2061\,-101.9404 35.1618\,-101.9562 35.1617))" +AMD,Sardar Vallabhbhai Patel Int'l,Ahmedabad District,Ahmedabad,POINT(72.58 23.03),"POLYGON((71.84 22.3257\,72.2636 21.9832\,72.5221 22.7958\,72.8155 23.0377\,72.1595 23.1057\,72.2893 23.3988\,71.9984 23.4989\,71.87 23.2185\,71.9957 23.2485\,71.8996 23.1026\,72.1894 22.5424\,72.0839 22.4357\,71.9415 22.5486\,71.84 22.3257))" +AMM,Queen Alia Int'l,لواء قصبة عمان,Amman,POINT(35.9328 31.9497),"POLYGON((35.8695 31.9418\,35.8748 31.9331\,35.9073 31.9155\,35.9389 31.9031\,35.95 31.9425\,35.9258 31.9673\,35.916 31.9928\,35.9006 31.987\,35.8891 31.9768\,35.8855 31.966\,35.8741 31.973\,35.8698 31.9741\,35.8695 31.9418))" +AMS,Schiphol,Haarlemmermeer,Hoofddorp,POINT(4.6907 52.3061),"POLYGON((4.5505 52.2266\,4.6181 52.2145\,4.665 52.2276\,4.7805 52.2835\,4.819 52.3251\,4.7553 52.3565\,4.7578 52.3967\,4.7288 52.4007\,4.7392 52.4311\,4.7005 52.4255\,4.6688 52.4042\,4.6869 52.3874\,4.6756 52.3529\,4.6166 52.3315\,4.5505 52.2266))" +ANF,Cerro Moreno Int'l,Antofagasta,Antofagasta,POINT(-70.4 -23.65),"POLYGON((-70.6286 -23.5148\,-70.3925 -23.5798\,-70.5802 -24.5516\,-69.1737 -24.6884\,-69.247 -25.1752\,-69.0394 -25.3816\,-68.7234 -25.3896\,-68.3433 -25.1106\,-68.5619 -24.7627\,-68.067 -24.3282\,-68.3456 -23.9991\,-69.1418 -23.666\,-70.0475 -23.6233\,-70.0136 -23.0584\,-70.1867 -23.3504\,-70.6066 -23.3592\,-70.6286 -23.5148))" +ANR,Deurne,Antwerpen,Antwerp,POINT(4.4003 51.2178),"POLYGON((4.3008 51.2624\,4.3523 51.24\,4.3381 51.2298\,4.3569 51.2019\,4.3432 51.1938\,4.4185 51.1755\,4.4106 51.198\,4.4443 51.2052\,4.4272 51.2101\,4.4437 51.2254\,4.425 51.2414\,4.4286 51.264\,4.3859 51.2765\,4.4088 51.2879\,4.3975 51.2971\,4.356 51.3135\,4.3159 51.3027\,4.3195 51.2823\,4.3008 51.2624))" +ANU,V.C. Bird Int'l,Saint John,Saint John’s,POINT(-61.85 17.1167),"POLYGON((-61.9054 17.1006\,-61.8328 17.0907\,-61.8429 17.0558\,-61.7876 17.0612\,-61.7876 17.0829\,-61.8084 17.0919\,-61.8011 17.17\,-61.8453 17.1674\,-61.8643 17.1316\,-61.8519 17.1303\,-61.8449 17.1173\,-61.8862 17.1304\,-61.9054 17.1006))" +APW,Faleolo,Tuamasaga,Apia,POINT(-171.75 -13.8333),"POLYGON((-171.9189 -13.8067\,-171.9008 -13.8523\,-171.9082 -13.9276\,-171.8818 -13.9322\,-171.9084 -13.95\,-171.9116 -14.0047\,-171.8332 -13.9853\,-171.8128 -14.006\,-171.84 -13.9919\,-171.8282 -14.0079\,-171.7811 -14.0119\,-171.7678 -14.042\,-171.7481 -14.0447\,-171.7404 -13.9368\,-171.6954 -13.948\,-171.6958 -13.862\,-171.7795 -13.8148\,-171.7778 -13.8292\,-171.7926 -13.8268\,-171.8304 -13.793\,-171.8685 -13.7985\,-171.8623 -13.8474\,-171.8704 -13.8597\,-171.8762 -13.7967\,-171.9189 -13.8067))" +AQP,Rodríguez Ballón Int'l,Arequipa,Arequipa,POINT(-71.5333 -16.4),"POLYGON((-72.2835 -16.6357\,-71.9967 -16.7996\,-71.91 -16.675\,-71.8695 -16.786\,-71.3261 -16.7522\,-71.2421 -16.4631\,-70.9554 -16.4716\,-70.8433 -15.9233\,-71.7043 -15.9337\,-72.0913 -16.1893\,-72.2835 -16.6357))" +ARH,Arkhangelsk-Talagi,Октябрьский округ,Arkhangelsk,POINT(40.5333 64.55),"POLYGON((40.3759 64.5367\,40.4862 64.5303\,40.6733 64.5746\,40.582 64.5757\,40.626 64.6001\,40.7116 64.583\,40.8365 64.5968\,40.7034 64.6142\,40.6698 64.5972\,40.6069 64.6029\,40.4975 64.5528\,40.3759 64.5367))" +ARN,Arlanda,Stockholms kommun,Stockholm,POINT(18.0686 59.3294),"POLYGON((17.7607 59.3918\,17.7963 59.3551\,17.9548 59.3107\,17.8665 59.2862\,17.8779 59.2681\,17.9479 59.2878\,17.9634 59.2685\,18.108 59.2273\,18.1337 59.2424\,18.1937 59.2298\,18.2001 59.2475\,18.1497 59.2658\,18.1054 59.3137\,18.1603 59.3208\,18.1605 59.3346\,18.0617 59.3753\,18.0364 59.3714\,18.049 59.3568\,18.0248 59.3393\,17.9814 59.3451\,17.9277 59.3746\,17.976 59.3996\,17.9028 59.4401\,17.8884 59.4302\,17.8962 59.4118\,17.8399 59.3813\,17.8172 59.399\,17.7607 59.3918))" +ASB,Ashkhabad Northwest,Bagtyýarlyk etraby,Ashgabat,POINT(58.3833 37.95),"POLYGON((58.1234 38.0831\,58.3392 37.9796\,58.3197 37.9586\,58.3394 37.952\,58.3167 37.9077\,58.3716 37.9266\,58.3674 37.9493\,58.3848 37.9438\,58.4514 38.0249\,58.4111 38.0915\,58.3847 38.0826\,58.3569 38.1037\,58.2708 38.1019\,58.239 38.0735\,58.1789 38.1025\,58.1331 38.1038\,58.1234 38.0831))" +ASM,Yohannes Iv Int'l,ዞባ ማእከል Maekel zone المنطقة المركزية,Asmara,POINT(38.925 15.3228),"POLYGON((38.6813 15.2589\,38.6961 15.2164\,38.744 15.211\,38.9288 15.1307\,38.9539 15.13\,38.9955 15.1727\,39.0371 15.2385\,39.0363 15.2993\,39.007 15.3709\,38.9813 15.3891\,38.9811 15.4699\,38.9167 15.5089\,38.9095 15.5757\,38.8934 15.5941\,38.8442 15.5898\,38.8395 15.5693\,38.7469 15.5306\,38.7623 15.4617\,38.7415 15.4115\,38.7402 15.3183\,38.6813 15.2589))" +ASP,Alice Springs,Town of Alice Springs,Alice Springs,POINT(133.8667 -23.7),"POLYGON((133.726 -23.7761\,133.7572 -23.7829\,133.7563 -23.8134\,133.8458 -23.8134\,133.872 -23.7923\,133.8842 -23.8315\,133.9404 -23.8315\,133.9404 -23.781\,133.9207 -23.781\,133.9202 -23.7642\,133.9404 -23.7557\,133.9404 -23.6648\,133.9195 -23.6648\,133.9195 -23.6305\,133.8108 -23.6305\,133.8117 -23.7246\,133.726 -23.7476\,133.726 -23.7761))" +ASU,Silvio Pettirossi Int'l,Luque,Luque,POINT(-57.4872 -25.27),"POLYGON((-57.5013 -25.2042\,-57.5419 -25.2917\,-57.4731 -25.3246\,-57.4448 -25.3101\,-57.4474 -25.2876\,-57.4253 -25.2587\,-57.3929 -25.2572\,-57.3497 -25.2267\,-57.4006 -25.18\,-57.4651 -25.2191\,-57.5013 -25.2042))" +ASW,Aswan Int'l,أسوان,Aswān,POINT(32.8997 24.0889),"POLYGON((31.0981 22\,31.315 22\,31.365 22.1316\,31.4698 22.2249\,31.5062 22.1852\,31.4339 22.0856\,31.3989 22\,33.4131 22\,33.4691 25.3362\,32.6851 25.3369\,32.6473 25.2101\,32.6968 25.177\,32.6724 25.1101\,31.0981 22))" +ATH,Eleftherios Venizelos Int'l,Περιφερειακή Ενότητα Πειραιώς,Piraeus,POINT(23.6469 37.943),"POLYGON((23.5448 37.9731\,23.5808 37.9493\,23.5651 37.9296\,23.6056 37.8627\,23.6911 37.9624\,23.6439 38.0033\,23.5516 37.9921\,23.5448 37.9731))" +ATL,Hartsfield-Jackson Atlanta Int'l,Atlanta,Atlanta,POINT(-84.422 33.7628),"POLYGON((-84.5511 33.7208\,-84.5368 33.7268\,-84.5457 33.6808\,-84.5261 33.6731\,-84.5259 33.6567\,-84.5062 33.6542\,-84.4865 33.69\,-84.4429 33.7057\,-84.4154 33.6972\,-84.4183 33.6731\,-84.3975 33.6729\,-84.3998 33.6483\,-84.3502 33.6479\,-84.3486 33.7325\,-84.3293 33.7184\,-84.3286 33.7474\,-84.2896 33.7447\,-84.3474 33.7802\,-84.3481 33.8821\,-84.4026 33.8868\,-84.4498 33.8725\,-84.4558 33.8258\,-84.4951 33.8085\,-84.5153 33.7901\,-84.5154 33.7627\,-84.535 33.758\,-84.5357 33.7388\,-84.5148 33.7275\,-84.5058 33.7394\,-84.5011 33.7155\,-84.5357 33.7347\,-84.5511 33.7208)\,(-84.3823 33.8772\,-84.3824 33.8774\,-84.3823 33.8774\,-84.3823 33.8772))" +ATZ,Asyut,أسيوط,Asyūţ,POINT(31.1667 27.1833),"POLYGON((30.4557 27.7298\,30.8523 26.7702\,31.3427 26.838\,31.3778 26.8605\,31.3788 26.8888\,31.4521 26.9349\,31.4782 26.9133\,31.5351 26.8023\,32.1606 26.9843\,32.2505 27.0474\,32.6153 27.3743\,32.673 27.3657\,32.7441 27.5014\,32.7536 27.5571\,32.6539 27.9028\,32.4193 27.9098\,32.1171 27.8496\,31.6351 27.6954\,30.9251 27.6475\,30.8956 27.6117\,30.647 27.6316\,30.6878 27.7303\,30.4557 27.7298))" +AUH,Abu Dhabi Int'l,أبو ظبي,Abu Dhabi,POINT(54.3667 24.4667),"POLYGON((54.2972 24.4607\,54.4643 24.3838\,54.5242 24.3859\,54.527 24.3127\,54.5529 24.2879\,54.6012 24.3089\,54.6478 24.281\,54.7659 24.4278\,54.7465 24.4301\,54.7467 24.4607\,54.6743 24.4641\,54.6339 24.4874\,54.6286 24.5121\,54.5047 24.5642\,54.4744 24.6019\,54.4262 24.5473\,54.3542 24.5375\,54.2972 24.4607))" +AWZ,Ahwaz,شهر اهواز,Ahvāz,POINT(48.6692 31.3203),"POLYGON((48.5622 31.2809\,48.5835 31.2653\,48.6332 31.2682\,48.6327 31.2379\,48.6589 31.2699\,48.7292 31.256\,48.7949 31.1084\,48.8053 31.142\,48.7849 31.2636\,48.8022 31.3505\,48.7398 31.3746\,48.6176 31.3768\,48.6143 31.3109\,48.5622 31.2809))" +AYT,Antalya,Antalya,Antalya,POINT(30.7075 36.8874),"POLYGON((29.1917 36.235\,29.6147 36.1682\,29.7286 35.923\,30.1988 36.059\,30.4983 35.98\,30.7643 36.262\,30.8322 36.647\,30.9817 36.657\,31.9207 36.335\,32.3951 35.924\,32.6053 36.1599\,32.6263 36.5108\,31.7817 37.3957\,31.3951 37.2681\,31.2006 37.4277\,30.4349 37.2065\,30.2244 37.3923\,29.8271 37.129\,29.6845 36.6387\,29.1917 36.235))" +BAH,Bahrain Int'l,محافظة العاصمة,Manama,POINT(50.5775 26.225),"POLYGON((50.3957 26.5344\,50.5134 26.3035\,50.5152 26.245\,50.502 26.2475\,50.503 26.2397\,50.5336 26.2156\,50.5366 26.1866\,50.6112 26.1242\,50.6207 26.153\,50.6512 26.1616\,50.635 26.2135\,50.5464 26.3143\,50.4387 26.5575\,50.3957 26.5344))" +BAIK,Baikonur Cosmodrome,Байқоңыр Қ.Ә.,Baikonur,POINT(63.3167 45.6167),"POLYGON((63.1815 45.6086\,63.2233 45.6136\,63.2767 45.5956\,63.3227 45.6081\,63.3365 45.6359\,63.3222 45.644\,63.3226 45.6465\,63.3299 45.6518\,63.3304 45.6541\,63.2869 45.6481\,63.2467 45.6261\,63.1908 45.6266\,63.1815 45.6086))" +BAQ,Ernesto Cortissoz Int'l,Perímetro Urbano Barranquilla,Barranquilla,POINT(-74.8019 10.9833),"POLYGON((-74.8509 10.9761\,-74.8293 10.9218\,-74.7588 10.9451\,-74.7655 10.9913\,-74.828 11.0453\,-74.8362 10.9826\,-74.8509 10.9761))" +BAX,Barnaul,Железнодорожный район,Barnaul,POINT(83.75 53.3333),"POLYGON((83.7097 53.3497\,83.7244 53.3249\,83.7793 53.3457\,83.75 53.3803\,83.7097 53.3497))" +BBI,Biju Patnaik,South East Zone,Bhubaneshwar,POINT(85.84 20.27),"POLYGON((85.8073 20.2165\,85.8327 20.2249\,85.8408 20.2122\,85.8794 20.2784\,85.8248 20.2953\,85.8319 20.2553\,85.8073 20.2165))" +BBU,Aeroportul National Bucuresti-Baneasa,Sector 3,Bucharest,POINT(26.1039 44.4325),"POLYGON((26.0956 44.4316\,26.1242 44.4082\,26.1822 44.3938\,26.2121 44.3951\,26.2256 44.4324\,26.1975 44.4426\,26.0956 44.4316))" +BCN,Barcelona,el Prat de Llobregat,El Prat de Llobregat,POINT(2.0953 41.3246),"POLYGON((2.0573 41.3025\,2.0748 41.2766\,2.1516 41.2954\,2.1549 41.2974\,2.1666 41.3088\,2.1676 41.3113\,2.145 41.2977\,2.1563 41.3206\,2.1188 41.3228\,2.0767 41.3483\,2.0573 41.3025))" +BDL,Bradley Int'l,Windsor Locks,Windsor Locks,POINT(-72.6544 41.9267),"POLYGON((-72.6968 41.9225\,-72.6889 41.9188\,-72.6754 41.9165\,-72.6627 41.9082\,-72.6458 41.9022\,-72.6396 41.8983\,-72.6353 41.9035\,-72.6217 41.9047\,-72.624 41.9329\,-72.6226 41.9361\,-72.614 41.9435\,-72.6876 41.9482\,-72.6968 41.9225))" +BDO,Husein Sastranegara Int'l,Kota Cimahi,Cimahi,POINT(107.5548 -6.8712),"POLYGON((107.5103 -6.8982\,107.519 -6.9189\,107.5642 -6.9328\,107.5564 -6.8896\,107.5761 -6.8908\,107.5518 -6.8294\,107.5103 -6.8982))" +BDQ,Vadodara,Vadodara Rural Taluka,Vadodara,POINT(73.2 22.3),"POLYGON((73.0467 22.3608\,73.0825 22.2666\,73.1594 22.2152\,73.1521 22.183\,73.1037 22.1446\,73.0984 22.1135\,73.1582 22.0905\,73.1904 22.122\,73.2665 22.1105\,73.2627 22.1251\,73.2918 22.1545\,73.2636 22.2143\,73.3004 22.2214\,73.2901 22.2368\,73.3093 22.2469\,73.3024 22.2753\,73.2808 22.2764\,73.2739 22.3113\,73.256 22.3136\,73.2736 22.3356\,73.2554 22.3385\,73.2864 22.3516\,73.2638 22.3549\,73.2508 22.4106\,73.2171 22.4064\,73.2201 22.4324\,73.1713 22.4299\,73.1671 22.4586\,73.1298 22.4474\,73.1261 22.4644\,73.0937 22.4656\,73.0654 22.4291\,73.073 22.3971\,73.0467 22.3608))" +BEG,Surcin,Сурчин,Surčin,POINT(20.2833 44.8),"POLYGON((20.222 44.7864\,20.2602 44.7837\,20.3154 44.7382\,20.351 44.7688\,20.3257 44.8343\,20.2767 44.8336\,20.222 44.7864))" +BEL,Val de Caes Int'l,Ananindeua,Ananindeua,POINT(-48.3719 -1.3658),"POLYGON((-48.4401 -1.3441\,-48.4214 -1.3693\,-48.4354 -1.3846\,-48.4299 -1.4043\,-48.3944 -1.3911\,-48.3906 -1.4087\,-48.3739 -1.403\,-48.3714 -1.4236\,-48.4 -1.4648\,-48.3474 -1.4549\,-48.3324 -1.4214\,-48.3636 -1.4068\,-48.3674 -1.3697\,-48.3496 -1.3654\,-48.3386 -1.335\,-48.3529 -1.3118\,-48.3387 -1.2678\,-48.3536 -1.2631\,-48.3338 -1.2434\,-48.353 -1.221\,-48.3744 -1.2434\,-48.4076 -1.2279\,-48.4276 -1.2572\,-48.4222 -1.313\,-48.4401 -1.3441))" +BEN,Benina Int'l,بنغازي,Benghazi,POINT(20.0667 32.1167),"POLYGON((18.6509 32.7005\,19.3801 31.4111\,20.299 31.2317\,20.4175 31.0884\,20.5235 31.0255\,20.5938 31.007\,20.9341 31.0462\,21.3191 31.0351\,21.2888 31.2763\,21.2424 31.3558\,21.2154 31.5411\,21.0537 31.6735\,20.8087 31.9646\,20.7972 32.1202\,20.7713 32.1894\,20.6387 32.4084\,20.4858 32.7336\,20.4222 32.7005\,18.6509 32.7005))" +BEY,Beirut Int'l,محافظة بيروت,Beirut,POINT(35.5131 33.8869),"POLYGON((35.4668 33.8932\,35.4807 33.8679\,35.5105 33.8629\,35.5425 33.9086\,35.4668 33.8932))" +BFS,Belfast Int'l,Northern Ireland / Tuaisceart Éireann,Belfast,POINT(-5.93 54.5964),"POLYGON((-8.1775 54.4648\,-7.5474 54.1221\,-7.2789 54.1226\,-6.9816 54.4095\,-6.6238 54.0365\,-6.2847 54.1122\,-5.826 53.8702\,-5.2678 54.174\,-5.0778 54.4645\,-5.5711 55.0228\,-6.1105 55.3667\,-6.5787 55.4436\,-7.3913 55.0223\,-7.5347 54.747\,-7.9209 54.696\,-7.6973 54.6101\,-8.1775 54.4648))" +BGA,Palonegro,Tona,Bucaramanga,POINT(-73.0 7.1333),"POLYGON((-73.066 7.1474\,-73.0494 7.1343\,-73.028 7.1451\,-73.0026 7.1019\,-72.9699 7.1097\,-72.9503 7.0863\,-72.9373 7.095\,-72.8852 7.0775\,-72.8749 7.0539\,-72.8222 7.0522\,-72.8188 7.09\,-72.8383 7.0986\,-72.8586 7.1548\,-72.8315 7.2075\,-72.8558 7.2175\,-72.8746 7.2635\,-72.9565 7.2357\,-72.989 7.2509\,-73.0293 7.1982\,-73.0568 7.186\,-73.066 7.1474))" +BGF,Bangui M'Poko Int'l,Ombella M'Poko,Bimbo,POINT(18.5163 4.3313),"POLYGON((16.7543 5.7837\,16.5359 5.2712\,16.9651 5.3363\,17.1305 5.1474\,17.0319 5.0234\,17.4648 4.794\,17.6008 4.525\,18.1317 4.4482\,18.5964 3.8734\,18.524 4.4184\,18.7627 4.4036\,19.1044 4.9392\,18.8605 5.626\,18.6266 5.742\,18.4049 5.5541\,18.3701 5.7403\,17.9093 5.8898\,17.5808 5.7006\,16.7543 5.7837))" +BGI,Grantley Adams Int'l,Fort-de-France,Fort-de-France,POINT(-61.0667 14.6),"POLYGON((-61.103 14.6884\,-61.0816 14.6333\,-61.0921 14.5984\,-61.0365 14.5945\,-61.0359 14.64\,-61.0849 14.6735\,-61.0737 14.7032\,-61.0903 14.7078\,-61.103 14.6884))" +BGO,Bergen Flesland,Askøy,Askøy,POINT(5.15 60.4667),"POLYGON((4.8979 60.6319\,4.9216 60.5497\,4.9761 60.5092\,5.0573 60.4117\,5.1003 60.414\,5.1701 60.3818\,5.2803 60.4091\,5.2372 60.4954\,4.9976 60.5518\,4.9817 60.5607\,4.995 60.5782\,4.9582 60.5883\,4.9297 60.6266\,4.8979 60.6319))" +BGY,Orio Al Serio,Bergamo,Bergamo,POINT(9.67 45.695),"POLYGON((9.6194 45.7191\,9.6347 45.658\,9.6889 45.6665\,9.7137 45.6886\,9.687 45.7314\,9.6194 45.7191))" +BHO,Bairagarh,Bhopal,Bhopāl,POINT(77.4167 23.25),"POLYGON((77.2686 23.2423\,77.3301 23.2284\,77.3444 23.1861\,77.4124 23.1955\,77.4703 23.1435\,77.4894 23.1527\,77.4785 23.1897\,77.5238 23.2221\,77.5172 23.2617\,77.5294 23.2783\,77.5004 23.2851\,77.4791 23.2695\,77.4458 23.3251\,77.4172 23.3108\,77.3371 23.3275\,77.3036 23.2726\,77.3119 23.252\,77.2686 23.2423))" +BHQ,Broken Hill,Broken Hill City Council,Broken Hill,POINT(141.4667 -31.95),"POLYGON((141.3928 -31.9798\,141.4121 -32.0026\,141.463 -32.0025\,141.4785 -32.0192\,141.483 -32.0025\,141.5426 -32.0024\,141.5455 -31.9248\,141.5311 -31.9129\,141.5647 -31.9074\,141.5635 -31.885\,141.4127 -31.8864\,141.413 -31.972\,141.3928 -31.9798))" +BHX,Birmingham Int'l,Solihull,Solihull,POINT(-1.778 52.413),"POLYGON((-1.872 52.3676\,-1.8078 52.3666\,-1.7777 52.3477\,-1.7792 52.3645\,-1.746 52.355\,-1.7208 52.3726\,-1.7178 52.3681\,-1.7208 52.3591\,-1.7186 52.3557\,-1.6484 52.3567\,-1.6079 52.3765\,-1.6022 52.416\,-1.6145 52.428\,-1.5935 52.4535\,-1.6243 52.4636\,-1.6414 52.4433\,-1.6772 52.4363\,-1.7464 52.5097\,-1.7939 52.5088\,-1.7558 52.4995\,-1.755 52.4563\,-1.8003 52.4583\,-1.8436 52.4105\,-1.8667 52.411\,-1.8455 52.3998\,-1.872 52.3676))" +BIO,Bilbao,Bilbao,Bilbao,POINT(-2.9236 43.2569),"POLYGON((-2.986 43.2461\,-2.9292 43.2137\,-2.9064 43.2263\,-2.9209 43.2423\,-2.8803 43.2653\,-2.9105 43.2858\,-2.9178 43.2738\,-2.9714 43.2901\,-2.9706 43.2586\,-2.986 43.2461))" +BJL,Yundum Int'l,Kanifing,Serekunda,POINT(-16.6667 13.4333),"POLYGON((-16.7193 13.4531\,-16.6701 13.4185\,-16.5891 13.4228\,-16.5858 13.4485\,-16.6157 13.4779\,-16.6578 13.4739\,-16.6674 13.4893\,-16.7193 13.4531))" +BJM,Bujumbura Int'l,Bujumbura Mairie,Bujumbura,POINT(29.3667 -3.3833),"POLYGON((29.2888 -3.3451\,29.3407 -3.3629\,29.3503 -3.3904\,29.3384 -3.432\,29.3687 -3.445\,29.3845 -3.3964\,29.4046 -3.4\,29.4046 -3.3442\,29.4218 -3.322\,29.3118 -3.305\,29.3182 -3.3474\,29.2888 -3.3451))" +BJX,Del Bajio Int'l,León,León de los Aldama,POINT(-101.6833 21.1167),"POLYGON((-101.8334 21.1373\,-101.8005 21.0809\,-101.8089 21.052\,-101.7799 21.0425\,-101.7838 21.0163\,-101.7304 21.0053\,-101.7384 20.9818\,-101.6684 20.9942\,-101.6714 20.951\,-101.7128 20.9545\,-101.7224 20.909\,-101.6885 20.893\,-101.6894 20.8646\,-101.6641 20.8984\,-101.6466 20.8915\,-101.6414 20.9291\,-101.5951 20.9254\,-101.5887 20.9385\,-101.5862 20.9083\,-101.5675 20.9052\,-101.5613 20.9443\,-101.5406 20.9442\,-101.5518 20.9579\,-101.5641 20.9504\,-101.5621 20.9789\,-101.5818 20.9769\,-101.5732 21.0295\,-101.4994 21.0104\,-101.4858 21.0263\,-101.4726 21.1522\,-101.3948 21.1289\,-101.3722 21.177\,-101.3912 21.2129\,-101.427 21.2278\,-101.4431 21.254\,-101.4984 21.2551\,-101.5016 21.2331\,-101.5183 21.2291\,-101.5373 21.2725\,-101.644 21.3302\,-101.6425 21.3009\,-101.6609 21.2872\,-101.6867 21.2941\,-101.702 21.2755\,-101.7597 21.2676\,-101.7811 21.1605\,-101.804 21.1663\,-101.8118 21.1412\,-101.8334 21.1373))" +BKI,Kota Kinabalu Int'l,Kota Kinabalu,Kota Kinabalu,POINT(116.0725 5.975),"POLYGON((116.04 5.916\,116.0871 5.9442\,116.195 5.9501\,116.2789 5.8578\,116.2362 5.952\,116.2667 5.979\,116.267 6.0247\,116.2789 6.0231\,116.2817 6.0419\,116.2128 6.0365\,116.2103 6.0728\,116.1794 6.0976\,116.1809 6.1185\,116.138 6.0988\,116.1392 6.1383\,116.0944 6.1201\,116.085 6.1072\,116.1018 6.0779\,116.1139 6.0986\,116.1324 6.091\,116.1285 6.0581\,116.1099 6.0513\,116.1102 5.9979\,116.0809 5.9981\,116.04 5.9567\,116.04 5.916))" +BKO,Bamako Sénou,Bamako,Bamako,POINT(-7.9922 12.6458),"POLYGON((-8.045 12.6748\,-8.0678 12.5619\,-8.0355 12.5664\,-8.0383 12.5967\,-7.9881 12.5276\,-7.9202 12.5183\,-7.9287 12.5854\,-7.906 12.6093\,-7.9006 12.6459\,-7.9141 12.6801\,-7.9313 12.6828\,-7.9467 12.7137\,-7.9819 12.6811\,-8.019 12.6782\,-8.0219 12.6597\,-8.045 12.6748))" +BLR,Bengaluru Int'l,Bangalore North,Bangalore,POINT(77.5917 12.9789),"POLYGON((77.3756 13.0288\,77.3718 12.9921\,77.4434 12.981\,77.4424 12.9589\,77.4619 12.9486\,77.5508 12.9501\,77.5549 12.9317\,77.5987 12.9373\,77.5967 12.9547\,77.6204 12.9512\,77.6645 12.9157\,77.642 12.9499\,77.6433 13.0412\,77.592 13.0575\,77.5837 13.0461\,77.5355 13.0515\,77.5389 13.0331\,77.5343 13.0349\,77.5231 13.0461\,77.5408 13.0907\,77.4684 13.0877\,77.4753 13.1211\,77.4566 13.1289\,77.4607 13.1608\,77.4469 13.1656\,77.3929 13.1176\,77.4288 13.086\,77.4237 13.0703\,77.3837 13.0674\,77.3891 13.0265\,77.3756 13.0288))" +BLZ,Chileka Int'l,Blantyre,Blantyre,POINT(35.0058 -15.7861),"POLYGON((34.7195 -15.6994\,34.7381 -15.7517\,34.7357 -15.8168\,34.873 -15.8292\,34.8807 -15.8607\,34.8548 -15.9025\,34.8694 -15.9205\,34.8465 -15.9661\,34.8886 -15.9683\,34.888 -16.0183\,34.9269 -16.005\,34.9223 -15.9943\,34.9498 -15.9863\,34.9624 -15.947\,34.9995 -15.9162\,35.026 -15.927\,35.0641 -15.9076\,35.0912 -15.8721\,35.081 -15.8481\,35.1066 -15.8489\,35.1262 -15.8192\,35.1064 -15.7598\,35.1247 -15.7404\,35.1112 -15.7149\,35.1247 -15.6915\,35.1083 -15.6853\,35.1212 -15.6483\,35.0967 -15.6205\,35.1307 -15.534\,35.1032 -15.5454\,35.0926 -15.5359\,35.1338 -15.4727\,35.1034 -15.4741\,35.0691 -15.444\,35.054 -15.401\,35.0223 -15.3891\,35.0029 -15.3551\,34.9253 -15.3616\,34.8666 -15.4376\,34.828 -15.5314\,34.7503 -15.5817\,34.7635 -15.6614\,34.7195 -15.6994))" +BMA,Bromma,Stockholms kommun,Stockholm,POINT(18.0686 59.3294),"POLYGON((17.7607 59.3918\,17.7963 59.3551\,17.9548 59.3107\,17.8665 59.2862\,17.8779 59.2681\,17.9479 59.2878\,17.9634 59.2685\,18.108 59.2273\,18.1337 59.2424\,18.1937 59.2298\,18.2001 59.2475\,18.1497 59.2658\,18.1054 59.3137\,18.1603 59.3208\,18.1605 59.3346\,18.0617 59.3753\,18.0364 59.3714\,18.049 59.3568\,18.0248 59.3393\,17.9814 59.3451\,17.9277 59.3746\,17.976 59.3996\,17.9028 59.4401\,17.8884 59.4302\,17.8962 59.4118\,17.8399 59.3813\,17.8172 59.399\,17.7607 59.3918))" +BME,Broome Int'l,Shire Of Broome,Broome,POINT(122.2361 -17.9619),"POLYGON((120.044 -19.8493\,120.0641 -20.07\,120.6669 -20.07\,120.6848 -19.875\,121.1139 -19.9066\,121.1139 -19.7947\,123.566 -19.7941\,123.566 -18.7541\,123.4042 -18.7541\,123.5157 -18.4982\,123.3486 -18.4982\,123.3486 -18.2722\,123.1483 -18.2721\,123.1483 -17.8151\,122.9961 -17.8151\,122.9961 -17.5373\,123.1652 -17.5373\,123.0289 -17.2331\,123.4863 -17.1828\,123.2286 -16.6179\,123.2672 -16.3586\,123.0704 -16.1859\,122.1693 -17.1045\,122.1005 -18.2591\,121.6622 -18.529\,121.3442 -19.2019\,120.9042 -19.5853\,120.044 -19.8493))" +BNA,Nashville Int'l,Nashville-Davidson,Nashville,POINT(-86.7842 36.1715),"POLYGON((-86.9096 36.3899\,-87.0404 35.9892\,-86.6144 35.9678\,-86.5156 36.1006\,-86.7548 36.4055\,-86.9096 36.3899)\,(-86.7882 36.0373\,-86.8511 36.1242\,-86.7647 36.1016\,-86.7882 36.0373)\,(-86.7734 36.1124\,-86.757 36.1246\,-86.7593 36.1116\,-86.7734 36.1124))" +BND,Bandar Abbass Int'l,بخش مرکزی شهرستان بندرعباس,Bandar ‘Abbās,POINT(56.2667 27.1833),"POLYGON((55.7358 27.0913\,55.7595 27.0597\,55.7598 26.9624\,55.9575 26.9857\,56.1752 27.0873\,56.3218 27.1209\,56.4725 27.1313\,56.5307 27.1209\,56.5373 27.1746\,56.4957 27.2923\,56.4979 27.4884\,56.5309 27.5344\,56.6342 27.605\,56.6559 27.6905\,56.7222 27.7182\,56.6217 27.9149\,56.5118 27.8981\,56.4999 27.9085\,56.5082 27.9643\,56.0196 27.9246\,56.1001 27.8673\,56.197 27.8345\,56.3245 27.6837\,56.4254 27.6642\,56.2507 27.5942\,56.2804 27.5559\,56.2879 27.4931\,56.2201 27.4834\,56.1515 27.4267\,55.9955 27.3839\,55.9811 27.3277\,56.0262 27.3147\,56.0445 27.2801\,56.0201 27.232\,55.9311 27.1963\,55.8823 27.2128\,55.7439 27.196\,55.7489 27.1298\,55.7358 27.0913))" +BNE,Brisbane Int'l,Brisbane City,Brisbane,POINT(153.0281 -27.4678),"POLYGON((152.6797 -27.3723\,152.7432 -27.5671\,153.1787 -27.6051\,153.2022 -27.3529\,153.0668 -27.2815\,152.6797 -27.3723))" +BNI,Benin,Oredo,Benin City,POINT(5.6222 6.3333),"POLYGON((5.4622 6.1\,5.5149 6.094\,5.5707 6.2072\,5.6214 6.2262\,5.6446 6.3705\,5.5966 6.3497\,5.5288 6.2772\,5.4909 6.2111\,5.4622 6.1))" +BOD,Bordeaux,Bordeaux,Bordeaux,POINT(-0.58 44.84),"POLYGON((-0.6387 44.8592\,-0.6029 44.8184\,-0.5894 44.8253\,-0.5739 44.8108\,-0.5347 44.8244\,-0.5481 44.8357\,-0.5354 44.8891\,-0.5474 44.9162\,-0.5805 44.9088\,-0.5758 44.866\,-0.5941 44.8532\,-0.631 44.8717\,-0.6387 44.8592))" +BOG,Eldorado Int'l,Bogotá,Bogotá,POINT(-74.0722 4.7111),"POLYGON((-74.2235 4.6252\,-74.1837 4.5965\,-74.1772 4.5537\,-74.1594 4.561\,-74.173 4.5413\,-74.1516 4.5386\,-74.1566 4.5267\,-74.1319 4.5368\,-74.1371 4.5475\,-74.1366 4.5521\,-74.1332 4.5563\,-74.1213 4.487\,-74.0963 4.4856\,-74.1094 4.5389\,-74.0969 4.5328\,-74.1037 4.5455\,-74.0985 4.543\,-74.0938 4.5282\,-74.0939 4.5033\,-74.0851 4.4976\,-74.0828 4.5576\,-74.068 4.5637\,-74.0592 4.6303\,-74.0237 4.6891\,-74.0231 4.7346\,-74.0125 4.7333\,-74.0338 4.8011\,-74.0254 4.8231\,-74.0531 4.8333\,-74.0586 4.8154\,-74.0782 4.8225\,-74.0412 4.8076\,-74.0632 4.7711\,-74.0756 4.7586\,-74.1208 4.7606\,-74.1282 4.7268\,-74.1581 4.7254\,-74.1559 4.7086\,-74.1769 4.7027\,-74.1584 4.6626\,-74.2211 4.6443\,-74.2235 4.6252))" +BOI,Boise Air Terminal,Ada County,Boise,POINT(-116.2308 43.6005),"POLYGON((-116.5134 43.4593\,-116.5119 43.2905\,-116.4331 43.2963\,-116.4102 43.2827\,-116.3769 43.2376\,-116.3838 43.1909\,-116.308 43.1228\,-115.9787 43.1134\,-115.9757 43.5914\,-116.282 43.8074\,-116.5125 43.8072\,-116.5131 43.6342\,-116.4735 43.6341\,-116.4736 43.4592\,-116.5134 43.4593))" +BOJ,Bourgas,Бургас,Burgas,POINT(27.4702 42.503),"POLYGON((27.2089 42.5489\,27.4469 42.2956\,27.5705 42.4442\,27.5109 42.4286\,27.4427 42.4615\,27.4963 42.7041\,27.2089 42.5489))" +BOM,Chhatrapati Shivaji Int'l,Mumbai Suburban,Mumbai,POINT(72.8775 19.0761),"POLYGON((72.7837 19.2649\,72.7817 19.1945\,72.7966 19.1949\,72.7811 19.1652\,72.7872 19.1301\,72.8052 19.1388\,72.8263 19.0964\,72.8172 19.043\,72.8793 19.0478\,72.8873 18.9919\,72.9123 18.9969\,72.9592 19.0463\,72.9429 19.1148\,72.9817 19.1704\,72.9398 19.189\,72.9251 19.216\,72.9102 19.2076\,72.9055 19.2492\,72.8626 19.2685\,72.8394 19.2665\,72.8272 19.245\,72.7837 19.2649))" +BOS,Gen E L Logan Int'l,Revere,Revere,POINT(-71.004 42.4189),"POLYGON((-71.0332 42.4142\,-70.9844 42.3887\,-70.9592 42.4077\,-70.9491 42.4437\,-71.0041 42.4299\,-71.0188 42.4501\,-71.0332 42.4142))" +BRC,Teniente Luis Candelaria Int'l,San Carlos de Bariloche,San Carlos de Bariloche,POINT(-71.3 -41.15),"POLYGON((-71.5881 -41.0781\,-71.5144 -41.1169\,-71.5196 -41.1395\,-71.4507 -41.1442\,-71.4863 -41.1737\,-71.4395 -41.1993\,-71.4097 -41.1611\,-71.3916 -41.186\,-71.3539 -41.1884\,-71.3438 -41.2062\,-71.3105 -41.1827\,-71.2431 -41.1859\,-71.2432 -41.1651\,-71.1821 -41.1475\,-71.1374 -41.1132\,-71.1511 -41.0838\,-71.1884 -41.0881\,-71.19 -41.1016\,-71.2604 -41.1286\,-71.3984 -41.1211\,-71.4912 -41.059\,-71.4217 -41.0913\,-71.4717 -41.0465\,-71.521 -41.042\,-71.5296 -41.054\,-71.5607 -41.0371\,-71.5823 -41.0465\,-71.5641 -41.0717\,-71.5881 -41.0781))" +BRE,Bremen,Bremen,Bremen,POINT(8.8 53.0833),"POLYGON((8.4816 53.2265\,8.5339 53.1869\,8.6199 53.1669\,8.6665 53.0917\,8.7039 53.0815\,8.7096 53.046\,8.7308 53.0338\,8.7706 53.0531\,8.8221 53.0209\,8.8668 53.0218\,8.862 53.0356\,8.8663 53.0407\,8.9158 53.011\,8.9803 53.0468\,8.961 53.0852\,8.9907 53.0966\,8.9472 53.1162\,8.9833 53.1257\,8.9448 53.1521\,8.9125 53.1326\,8.8632 53.1314\,8.8293 53.1643\,8.7762 53.1582\,8.6994 53.1854\,8.6594 53.1772\,8.6279 53.1983\,8.5934 53.1842\,8.5802 53.1931\,8.5991 53.2128\,8.554 53.2078\,8.5175 53.2285\,8.4816 53.2265))" +BRM,Jacinto Lara Int'l,Municipio Iribarren,Barquisimeto,POINT(-69.3467 10.0678),"POLYGON((-69.6762 10.2353\,-69.4877 9.7904\,-69.3423 9.7278\,-69.3125 10.0196\,-69.1307 10.1413\,-69.3116 10.2651\,-69.2427 10.4824\,-69.4462 10.5226\,-69.6762 10.2353))" +BRO,Brownsville-South Padre Island Int'l,Matamoros,Heroica Matamoros,POINT(-97.5042 25.8797),"POLYGON((-97.9426 25.3853\,-97.5162 25.0425\,-97.1474 25.9563\,-97.4056 25.8376\,-97.9022 26.061\,-97.7366 25.504\,-97.9426 25.3853))" +BRS,Bristol Int'l,Cardiff,Caerdydd,POINT(-3.1792 51.4817),"POLYGON((-3.3438 51.5299\,-3.3366 51.5088\,-3.2984 51.4999\,-3.271 51.4651\,-3.2189 51.4749\,-3.1597 51.4428\,-3.0774 51.4943\,-3.0904 51.5099\,-3.0689 51.5202\,-3.1637 51.5605\,-3.2616 51.5378\,-3.2744 51.5512\,-3.3101 51.549\,-3.3438 51.5299))" +BRU,Brussels,Région de Bruxelles-Capitale - Brussels Hoofdstedelijk Gewest,Brussels,POINT(4.3525 50.8467),"POLYGON((4.2438 50.8196\,4.3065 50.8124\,4.3312 50.7755\,4.3829 50.7637\,4.4823 50.793\,4.4472 50.8083\,4.4767 50.8204\,4.4671 50.8446\,4.4209 50.8677\,4.4371 50.8788\,4.4329 50.8947\,4.4035 50.9139\,4.3775 50.897\,4.2938 50.889\,4.2829 50.8378\,4.2438 50.8196))" +BSB,Juscelino Kubitschek Int'l,Plano Piloto,Brasília,POINT(-47.8828 -15.7939),"POLYGON((-48.0896 -15.6789\,-48.0822 -15.7312\,-48.0474 -15.7538\,-48.0042 -15.755\,-47.9781 -15.7828\,-47.9697 -15.7526\,-47.9169 -15.7401\,-47.9385 -15.7771\,-47.9136 -15.785\,-47.9137 -15.7981\,-47.9484 -15.8101\,-47.9533 -15.8405\,-47.9323 -15.8514\,-47.8553 -15.8274\,-47.8682 -15.8446\,-47.8716 -15.8589\,-47.8255 -15.8326\,-47.8093 -15.7932\,-47.7837 -15.7972\,-47.7859 -15.7823\,-47.8065 -15.7876\,-47.8161 -15.7514\,-47.8386 -15.7342\,-47.8589 -15.7246\,-47.8277 -15.7507\,-47.8319 -15.7737\,-47.8811 -15.7301\,-47.9097 -15.7285\,-47.8597 -15.6881\,-48.0044 -15.5786\,-48.0744 -15.6285\,-48.0896 -15.6789))" +BSR,Basrah Int'l,ناحية مرکز قضاء البصرة,Al Başrah,POINT(47.81 30.515),"POLYGON((47.7192 30.5247\,47.7892 30.3816\,47.8777 30.4232\,47.847 30.4464\,47.8856 30.4832\,47.8752 30.4893\,47.859 30.5033\,47.8304 30.5418\,47.8224 30.5484\,47.8153 30.553\,47.7957 30.5611\,47.7883 30.5658\,47.7785 30.5761\,47.7531 30.5502\,47.7192 30.5247))" +BTS,Bratislava-M.R. Štefánik,okres Bratislava I,Bratislava,POINT(17.1097 48.1439),"POLYGON((17.0721 48.171\,17.0739 48.1424\,17.1324 48.1457\,17.1252 48.1561\,17.0721 48.171))" +BTV,Burlington Int'l,Burlington,South Burlington,POINT(-73.2202 44.4622),"POLYGON((-73.277 44.5061\,-73.2315 44.4469\,-73.19 44.4541\,-73.1938 44.474\,-73.1761 44.4847\,-73.2418 44.529\,-73.2648 44.5246\,-73.266 44.5397\,-73.277 44.5061))" +BUD,Ferihegy,Budapest,Budapest,POINT(19.0514 47.4925),"POLYGON((18.9251 47.5441\,18.9358 47.5433\,18.9269 47.5222\,18.9448 47.5175\,18.9359 47.4915\,18.9827 47.4608\,18.9699 47.4511\,18.9762 47.4301\,18.957 47.4186\,18.9624 47.3726\,19.0836 47.4049\,19.1405 47.3497\,19.1797 47.3611\,19.2317 47.4242\,19.247 47.4192\,19.3349 47.4632\,19.3142 47.493\,19.3187 47.5154\,19.2638 47.5083\,19.2449 47.5369\,19.1903 47.539\,19.1381 47.5998\,19.0993 47.6131\,19.0746 47.5891\,19.0767 47.6069\,19.0382 47.6096\,19.0253 47.6046\,19.0345 47.5869\,18.9426 47.5789\,18.9288 47.5724\,18.9408 47.5508\,18.9251 47.5441))" +BUF,Greater Buffalo Int'l,Town of Cheektowaga,Cheektowaga,POINT(-78.7466 42.9082),"POLYGON((-78.7999 42.8717\,-78.697 42.8638\,-78.697 42.9512\,-78.799 42.9493\,-78.7999 42.8717))" +BUQ,Bulawayo,Bulawayo,Bulawayo,POINT(28.58 -20.17),"POLYGON((28.3823 -20.1551\,28.3918 -20.1704\,28.4423 -20.1805\,28.436 -20.1599\,28.4903 -20.1734\,28.4821 -20.1918\,28.5126 -20.2009\,28.4951 -20.2372\,28.5396 -20.2606\,28.5522 -20.2228\,28.6632 -20.2368\,28.6948 -20.2051\,28.6522 -20.1538\,28.677 -20.1146\,28.6543 -20.1099\,28.6654 -20.0748\,28.6497 -20.075\,28.6705 -20.0166\,28.6077 -19.9596\,28.5533 -20.0611\,28.5213 -20.0361\,28.5127 -20.0585\,28.4649 -20.0736\,28.4094 -20.0233\,28.4149 -20.1192\,28.3966 -20.1352\,28.4175 -20.1351\,28.4234 -20.1529\,28.3823 -20.1551))" +BVB,Boa Vista Int'l,Boa Vista,Boa Vista,POINT(-60.6714 2.8194),"POLYGON((-61.0005 2.6817\,-60.9173 2.6278\,-60.9154 2.5655\,-60.9446 2.5209\,-60.8937 2.5076\,-60.9147 2.4681\,-60.892 2.476\,-60.8582 2.4494\,-60.8696 2.4352\,-60.8367 2.435\,-60.824 2.547\,-60.775 2.644\,-60.6985 2.7331\,-60.6444 2.8404\,-60.6057 2.8519\,-60.539 2.9161\,-60.4922 3.0219\,-60.448 3.046\,-60.373 3.1657\,-60.412 3.2592\,-60.4084 3.2878\,-60.3457 3.3185\,-60.3467 3.3406\,-60.321 3.3715\,-60.3213 3.4182\,-60.287 3.4482\,-60.3384 3.5454\,-60.3602 3.5416\,-60.4283 3.5862\,-60.4519 3.5799\,-60.483 3.606\,-60.514 3.543\,-60.528 3.555\,-60.552 3.519\,-60.5534 3.4776\,-60.578 3.479\,-60.602 3.457\,-60.594 3.402\,-60.743 3.499\,-60.887 3.479\,-60.909 3.464\,-60.946 3.48\,-61 3.448\,-61.0005 2.6817))" +BWI,Baltimore-Washington Int'l Thurgood Marshall,Baltimore,Baltimore,POINT(-76.6144 39.3051),"POLYGON((-76.7113 39.3719\,-76.7112 39.2778\,-76.6116 39.2344\,-76.5837 39.2081\,-76.5497 39.1972\,-76.5299 39.2096\,-76.5297 39.372\,-76.7113 39.3719))" +BWN,Brunei Int'l,Brunei-Muara,Bandar Seri Begawan,POINT(114.9422 4.8903),"POLYGON((114.6611 5.0923\,114.7952 4.8889\,114.8043 4.8565\,114.7721 4.7663\,114.7892 4.7305\,114.8239 4.745\,114.8508 4.7914\,114.8963 4.8161\,114.9713 4.8082\,114.988 4.8719\,115.0153 4.8941\,115.0669 4.8708\,115.0859 4.9391\,115.1322 4.9622\,115.1642 5.0283\,115.2384 5.099\,115.1078 5.1172\,115.0647 5.1556\,114.9289 5.1508\,114.8897 5.1991\,114.7248 5.1395\,114.6611 5.0923))" +BYK,Bouaké,Gbêkê,Bouaké,POINT(-5.0167 7.6833),"POLYGON((-5.7373 7.4845\,-5.6468 7.25\,-5.296 7.2334\,-4.8858 7.5728\,-4.5021 7.6175\,-4.5339 7.8307\,-4.6239 8.031\,-5.1491 7.9551\,-5.4012 8.1651\,-5.7373 7.4845))" +BZE,Philip S. W. Goldson Int'l,Belize City,Belize City,POINT(-88.1886 17.4986),"POLYGON((-88.2834 17.5535\,-88.2426 17.5362\,-88.2409 17.5125\,-88.2245 17.5081\,-88.2621 17.4665\,-88.2307 17.4897\,-88.2077 17.4736\,-88.1814 17.4914\,-88.1993 17.5204\,-88.2265 17.5162\,-88.2393 17.5415\,-88.2834 17.5535))" +CAE,Columbia Metro,Lexington County,West Columbia,POINT(-81.0936 33.9932),"POLYGON((-81.5752 33.8751\,-81.5044 33.809\,-81.4318 33.7748\,-81.4111 33.733\,-81.367 33.7295\,-81.1873 33.6536\,-80.9253 33.7556\,-80.9606 33.7788\,-81.0142 33.781\,-81.0363 33.7451\,-81.0584 33.7469\,-81.0418 33.8592\,-81.0125 33.8802\,-81.0219 33.8879\,-81.0087 33.9043\,-81.0303 33.9152\,-81.017 33.9306\,-81.0486 33.9924\,-81.1823 34.0914\,-81.2777 34.1007\,-81.3151 34.1461\,-81.2996 34.1639\,-81.3114 34.1856\,-81.3392 34.1974\,-81.404 34.1777\,-81.4355 34.1285\,-81.3897 34.0754\,-81.4657 34.088\,-81.5752 33.8751))" +CAI,Cairo Int'l,الجيزة,Giza,POINT(31.2118 29.987),"POLYGON((27.3106 27.738\,28.6251 27.7403\,28.8226 27.9798\,29.2392 28.1312\,29.585 28.3447\,29.7592 28.6486\,29.7797 28.8026\,29.8506 29.0924\,30.5633 29.6264\,30.6968 29.6997\,31.0235 29.7163\,31.123 29.4359\,31.2118 29.4362\,31.2351 29.3674\,31.2029 29.3066\,31.231 29.2522\,31.2122 29.1866\,31.8445 29.0106\,31.9063 29.7483\,31.7318 29.7693\,31.4643 29.7533\,31.2867 29.766\,31.2958 29.8008\,31.2829 29.9126\,31.2711 29.9422\,31.2351 29.9649\,31.2203 30.0136\,31.2146 30.0659\,31.2357 30.1225\,31.1715 30.1389\,31.0686 30.219\,31.0137 30.1972\,30.9887 30.208\,30.9548 30.283\,30.9131 30.2864\,30.9163 30.3341\,30.8742 30.3401\,30.8359 30.3263\,30.7866 30.2732\,30.3112 29.9203\,29.5704 29.556\,28.7293 28.8197\,27.7856 28.6355\,27.3106 27.738))" +CAN,Guangzhou Baiyun Int'l,越秀区,Guangzhou,POINT(113.26 23.13),"POLYGON((113.2351 23.1711\,113.2489 23.11\,113.3168 23.1116\,113.3008 23.1621\,113.2566 23.1505\,113.2351 23.1711))" +CAS,Casablanca-Anfa,Province de Médiouna إقليم مديونة,Mediouna,POINT(-7.51 33.45),"POLYGON((-7.5997 33.4739\,-7.5866 33.4628\,-7.5957 33.4558\,-7.5235 33.4108\,-7.4572 33.4512\,-7.3534 33.4679\,-7.373 33.4728\,-7.3788 33.5237\,-7.4168 33.5617\,-7.4528 33.5567\,-7.4781 33.5808\,-7.484 33.5652\,-7.5149 33.5684\,-7.5432 33.5514\,-7.5344 33.5324\,-7.5582 33.5204\,-7.543 33.5021\,-7.5997 33.4739))" +CAY,Cayenne – Rochambeau,Cayenne,Cayenne,POINT(-52.33 4.933),"POLYGON((-52.3402 4.9371\,-52.3179 4.8907\,-52.3102 4.9158\,-52.2808 4.9312\,-52.304 4.9516\,-52.3402 4.9371))" +CBR,Canberra Int'l,District of Canberra Central,Canberra,POINT(149.1269 -35.2931),"POLYGON((149.0702 -35.2981\,149.092 -35.3129\,149.0872 -35.3264\,149.1143 -35.3244\,149.1244 -35.3479\,149.1616 -35.3304\,149.1907 -35.3397\,149.1514 -35.275\,149.1803 -35.2513\,149.1612 -35.2258\,149.0883 -35.2578\,149.0879 -35.2851\,149.0702 -35.2981))" +CCP,Carriel Sur Int'l,Talcahuano,Talcahuano,POINT(-73.1219 -36.7167),"POLYGON((-73.1619 -36.7116\,-73.1465 -36.7337\,-73.1266 -36.732\,-73.1522 -36.7624\,-73.0662 -36.7955\,-73.0157 -36.7398\,-73.1072 -36.7215\,-73.0906 -36.6301\,-73.107 -36.615\,-73.1335 -36.6414\,-73.138 -36.6954\,-73.1619 -36.7116))" +CCS,Simón Bolivar Int'l,Parroquia Catia La Mar,Catia La Mar,POINT(-67.0333 10.6),"POLYGON((-67.0954 10.5693\,-67.0763 10.5585\,-67.0861 10.5299\,-67.0661 10.511\,-67.0528 10.538\,-67.0074 10.565\,-67.0328 10.6216\,-67.0883 10.5952\,-67.0954 10.5693))" +CCU,Netaji Subhash Chandra Bose Int'l,Kolkata,Kolkāta,POINT(88.37 22.5675),"POLYGON((88.237 22.5577\,88.2559 22.5411\,88.287 22.5445\,88.295 22.5116\,88.2821 22.4935\,88.2641 22.4974\,88.2854 22.4613\,88.3212 22.4671\,88.3168 22.4794\,88.3721 22.4503\,88.4143 22.4772\,88.4072 22.5001\,88.4222 22.5225\,88.4482 22.5072\,88.459 22.523\,88.4128 22.5553\,88.3925 22.6287\,88.374 22.6321\,88.3605 22.6307\,88.3638 22.6135\,88.3187 22.5508\,88.237 22.5577))" +CDG,Charles de Gaulle Int'l,Aulnay-sous-Bois,Aulnay-sous-Bois,POINT(2.4906 48.9386),"POLYGON((2.4593 48.9551\,2.4842 48.936\,2.4794 48.9184\,2.5181 48.9251\,2.5053 48.9357\,2.5241 48.9505\,2.4927 48.9748\,2.4593 48.9551))" +CEB,Mactan-Cebu Int'l,Lapu-Lapu,Lapu-Lapu City,POINT(123.9488 10.3127),"POLYGON((123.9145 10.2809\,123.9259 10.2585\,123.9552 10.2796\,123.9715 10.2271\,123.9869 10.2329\,124.0056 10.216\,124.02 10.1789\,124.0757 10.225\,124.2731 10.2848\,124.3655 10.385\,124.2875 10.4408\,124.1522 10.4325\,124.0904 10.3803\,123.9413 10.3156\,123.9145 10.2809))" +CEK,Chelyabinsk,Советский район,Chelyabinsk,POINT(61.4 55.15),"POLYGON((61.1436 55.0535\,61.1665 55.0402\,61.2212 55.0368\,61.3193 55.0781\,61.3395 55.0399\,61.326 55.0365\,61.357 55.0167\,61.3845 55.022\,61.3914 54.991\,61.4237 55.0391\,61.4655 55.0491\,61.4069 55.1074\,61.4277 55.1611\,61.4008 55.1603\,61.3616 55.1182\,61.2983 55.1241\,61.2442 55.0889\,61.2142 55.0465\,61.1436 55.0535))" +CEQ,Mandelieu,Mandelieu-la-Napoule,Mandelieu-la-Napoule,POINT(6.9381 43.5464),"POLYGON((6.8786 43.5292\,6.8963 43.5275\,6.884 43.5026\,6.9037 43.495\,6.9498 43.5262\,6.9559 43.5639\,6.9031 43.565\,6.8786 43.5292))" +CFB,Cabo Frio Int'l,Cabo Frio,Cabo Frio,POINT(-42.0189 -22.8789),"POLYGON((-42.1222 -22.9083\,-42.0885 -22.9328\,-42.0373 -22.9331\,-42.0202 -22.8887\,-41.9796 -22.8735\,-41.9864 -22.8533\,-41.9697 -22.8223\,-42.0266 -22.7943\,-42.0412 -22.8657\,-42.0996 -22.862\,-42.1222 -22.9083))" +CFU,Corfu Int'l (Ioannis Kapodistrias),Δήμος Κεντρικής Κέρκυρας και Διαποντίων Νήσων,Kérkyra,POINT(19.9214 39.6239),"POLYGON((19.673 39.6806\,19.7292 39.6741\,19.7206 39.6537\,19.7408 39.6248\,19.8193 39.5844\,19.8592 39.5291\,19.8966 39.5124\,19.9236 39.5278\,19.901 39.599\,19.9248 39.5937\,19.9327 39.6249\,19.8392 39.6591\,19.8626 39.6647\,19.8414 39.6764\,19.8397 39.7\,19.8868 39.7237\,19.8854 39.7395\,19.8369 39.7315\,19.8041 39.751\,19.7936 39.7245\,19.7532 39.7205\,19.7174 39.6935\,19.6824 39.7137\,19.673 39.6806))" +CGB,Marechal Rondon Int'l,Várzea Grande,Várzea Grande,POINT(-56.1333 -15.65),"POLYGON((-56.4249 -15.4666\,-56.3914 -15.5229\,-56.2151 -15.617\,-56.213 -15.6345\,-56.264 -15.6841\,-56.2455 -15.7598\,-56.1927 -15.7775\,-56.1725 -15.7535\,-56.1539 -15.7528\,-56.1459 -15.7697\,-56.1113 -15.7424\,-56.1136 -15.6947\,-56.0733 -15.6826\,-56.0702 -15.6443\,-56.1042 -15.6159\,-56.1298 -15.6281\,-56.1421 -15.5613\,-56.1638 -15.5589\,-56.193 -15.5232\,-56.1841 -15.5086\,-56.246 -15.459\,-56.2324 -15.4142\,-56.2469 -15.3624\,-56.2871 -15.3428\,-56.3559 -15.413\,-56.3867 -15.4119\,-56.4249 -15.4666))" +CGH,Congonhas Int'l,Região Metropolitana de São Paulo,São Paulo,POINT(-46.6333 -23.55),"POLYGON((-47.2087 -23.9648\,-46.6502 -24.0015\,-45.6949 -23.6304\,-46.236 -23.186\,-46.4167 -23.3138\,-46.8029 -23.2574\,-47.0419 -23.3515\,-46.9842 -23.8027\,-47.2087 -23.9648))" +CGK,Soekarno-Hatta Int'l,Jakarta Pusat,Jakarta,POINT(106.8275 -6.175),"POLYGON((106.7918 -6.2088\,106.7976 -6.2293\,106.8227 -6.2026\,106.8544 -6.2063\,106.875 -6.1924\,106.882 -6.1623\,106.8213 -6.1368\,106.8285 -6.1626\,106.8014 -6.1572\,106.8107 -6.1886\,106.7918 -6.2088))" +CGN,Cologne/Bonn,Innenstadt,Cologne,POINT(6.9528 50.9364),"POLYGON((6.9247 50.9362\,6.966 50.9156\,6.9956 50.9494\,6.9612 50.9592\,6.9247 50.9362))" +CGO,Zhengzhou Xinzheng Int'l,二七区,Zhengzhou,POINT(113.6605 34.7492),"POLYGON((113.4973 34.6508\,113.5334 34.6142\,113.5533 34.6365\,113.5687 34.6062\,113.5965 34.6019\,113.5965 34.6293\,113.6559 34.6434\,113.6536 34.6685\,113.6764 34.6848\,113.6613 34.7039\,113.6591 34.7645\,113.6252 34.7681\,113.6191 34.7217\,113.6008 34.7053\,113.5124 34.7184\,113.4973 34.6508))" +CGP,Chittagong,চট্টগ্রাম জেলা,Chattogram,POINT(91.8325 22.335),"POLYGON((91.2596 21.9837\,91.9207 21.8562\,92.2167 21.9954\,92.1046 22.5859\,91.9713 22.5021\,91.7206 22.9875\,91.4314 22.8496\,91.2596 21.9837))" +CGQ,Changchun Longjia Int'l,西新镇,Changchun,POINT(125.2 43.9),"POLYGON((125.087 43.9029\,125.105 43.8714\,125.1371 43.8573\,125.1661 43.8738\,125.1759 43.857\,125.2248 43.9112\,125.1513 43.8954\,125.1179 43.9316\,125.087 43.9029))" +CGR,Campo Grande Int'l,Campo Grande,Campo Grande,POINT(-54.615 -20.4839),"POLYGON((-54.8543 -20.6898\,-54.831 -20.724\,-54.8228 -20.7772\,-54.816 -20.739\,-54.766 -20.692\,-54.7226 -20.7123\,-54.7212 -20.7444\,-54.6953 -20.7715\,-54.7086 -20.793\,-54.6952 -20.8173\,-54.7062 -20.8203\,-54.7105 -20.8299\,-54.6946 -20.8263\,-54.7058 -20.8337\,-54.6949 -20.8598\,-54.6743 -20.87\,-54.6834 -20.8897\,-54.6371 -20.8895\,-54.6201 -20.9117\,-54.6054 -20.9025\,-54.5494 -20.9607\,-54.5237 -20.8612\,-54.4222 -20.7803\,-54.4145 -20.7335\,-54.3245 -20.7904\,-54.2945 -20.8693\,-54.2378 -20.9397\,-54.1873 -20.8699\,-54.188 -20.7607\,-54.2161 -20.7175\,-54.177 -20.601\,-54.1956 -20.5405\,-54.1815 -20.4693\,-54.2425 -20.4957\,-54.3359 -20.4784\,-54.445 -20.3971\,-54.533 -20.3861\,-54.5735 -20.2909\,-54.602 -20.2975\,-54.6281 -20.3531\,-54.6873 -20.3959\,-54.7034 -20.3871\,-54.702 -20.3638\,-54.769 -20.3191\,-54.7616 -20.4817\,-54.7852 -20.5015\,-54.794 -20.568\,-54.765 -20.623\,-54.8543 -20.6898))" +CHC,Christchurch Int'l,Selwyn District,Rolleston,POINT(172.3833 -43.5833),"POLYGON((171.1135 -43.0957\,172.195 -43.904\,172.6056 -43.75\,172.439 -43.4685\,171.9396 -43.2742\,172.2502 -42.8666\,171.9073 -42.7408\,171.1135 -43.0957))" +CJB,Coimbatore,Coimbatore North,Coimbatore,POINT(76.9558 11.0168),"POLYGON((76.7389 11.1044\,76.7546 11.0831\,76.7465 11.055\,76.7774 11.0424\,76.7985 11.0566\,76.8616 11.0526\,76.9163 11.0365\,76.9192 11.0229\,76.8845 11.0216\,76.8731 11.0012\,76.9553 11.016\,76.9763 10.9849\,76.9623 10.9756\,76.981 10.9719\,76.9949 10.9888\,76.9879 10.9891\,76.9856 10.997\,77.0042 10.9963\,76.9884 11.0296\,77.0481 11.0271\,77.0613 11.0832\,77.1137 11.1152\,77.1329 11.1104\,77.1368 11.1293\,77.12 11.1275\,77.0967 11.1717\,77.1234 11.2007\,77.1496 11.2014\,77.1691 11.283\,77.1169 11.2967\,77.1489 11.306\,77.1507 11.3302\,77.1373 11.343\,77.0939 11.3307\,77.0744 11.3411\,77.0688 11.32\,77.0483 11.3159\,77.044 11.285\,77.0076 11.2645\,77.0077 11.2333\,76.9471 11.231\,76.94 11.2102\,76.8329 11.1752\,76.8281 11.1622\,76.8462 11.1478\,76.7662 11.1303\,76.7389 11.1044))" +CJJ,Cheongju Int'l,상당구,Cheongju,POINT(127.4833 36.6333),"POLYGON((127.4452 36.515\,127.4838 36.4775\,127.4806 36.4553\,127.5035 36.4548\,127.4898 36.4343\,127.5091 36.4073\,127.5765 36.4904\,127.5652 36.5305\,127.6125 36.5518\,127.6357 36.6001\,127.6946 36.5884\,127.7288 36.6023\,127.7467 36.5855\,127.7695 36.604\,127.7094 36.654\,127.7271 36.6825\,127.6645 36.73\,127.6507 36.702\,127.6178 36.7075\,127.4829 36.6448\,127.499 36.5883\,127.4738 36.5812\,127.4674 36.5621\,127.4873 36.547\,127.4465 36.5499\,127.4452 36.515))" +CJS,Ciudad Juarez Int'l,Juárez,Juárez,POINT(-106.487 31.7386),"POLYGON((-106.9516 31.3359\,-106.9225 31.2369\,-106.8822 31.2341\,-106.8633 31.186\,-106.8219 31.1456\,-106.8213 31.12\,-106.7458 31.1379\,-106.7456 31.1603\,-106.659 31.1743\,-106.6147 31.152\,-106.5015 31.1525\,-106.5074 31.1873\,-106.3908 31.1876\,-106.3968 31.159\,-106.374 31.1547\,-106.3669 31.1871\,-106.3103 31.1537\,-106.2643 31.1766\,-106.2646 31.2546\,-106.2804 31.254\,-106.2804 31.272\,-106.2636 31.2731\,-106.2552 31.3231\,-106.2665 31.3227\,-106.2701 31.3706\,-106.281 31.3712\,-106.2595 31.4339\,-106.2349 31.4102\,-106.1811 31.4549\,-106.2091 31.4685\,-106.2465 31.5417\,-106.2809 31.5623\,-106.3496 31.6967\,-106.4154 31.7508\,-106.4534 31.7646\,-106.4887 31.7481\,-106.5282 31.7839\,-106.8676 31.7838\,-106.8308 31.6796\,-106.8775 31.6224\,-106.749 31.6034\,-106.75 31.4959\,-106.6888 31.432\,-106.7418 31.4003\,-106.8485 31.4012\,-106.8469 31.3718\,-106.9516 31.3359))" +CJU,Jeju Int'l,제주시,Jeju,POINT(126.5219 33.5097),"POLYGON((126.1357 33.3132\,126.1558 33.2761\,126.1919 33.2985\,126.2442 33.2735\,126.2447 33.2883\,126.3414 33.3438\,126.4332 33.3399\,126.4456 33.3588\,126.587 33.3742\,126.7049 33.4206\,126.7607 33.4217\,126.8845 33.4807\,127.0588 33.4751\,126.9486 33.599\,126.6808 33.6488\,126.3258 33.5492\,126.2132 33.4639\,126.1411 33.3522\,126.1357 33.3132))" +CKG,Chongqing Jiangbei Int'l,渝中区,Chongqing,POINT(106.5069 29.55),"POLYGON((106.4779 29.5515\,106.5202 29.5302\,106.5773 29.5494\,106.5886 29.5676\,106.4779 29.5515))" +//CKY,Conakry,Conakry,Conakry,POINT(-13.7122 9.5092),"POLYGON((-13.7279 9.5097\,-13.712 9.5282\,-13.7045 9.5194\,-13.7237 9.5087\,-13.7081 9.5034\,-13.6375 9.5605\,-13.5969 9.5727\,-13.556 9.5069\,-13.5215 9.4898\,-13.4863 9.5512\,-13.4738 9.6496\,-13.5654 9.6887\,-13.6081 9.7534\,-13.6246 9.6347\,-13.6543 9.6088\,-13.6665 9.5609\,-13.6962 9.5238\,-13.6989 9.521\,-13.7015 9.5201\,-13.6989 9.5242\,-13.7046 9.5223\,-13.7093 9.5271\,-13.7109 9.5279\,-13.7131 9.5285\,-13.7169 9.529\,-13.7279 9.5097))" +//CLE,Hopkins Int'l,Akron,Akron,POINT(-81.5219 41.0798),"POLYGON((-81.621 41.1446\,-81.5723 41.0195\,-81.5028 40.9977\,-81.4051 41.0569\,-81.621 41.1446)\,(-81.5899 41.1406\,-81.5874 41.1431\,-81.5874 41.1404\,-81.5899 41.1406)\,(-81.5811 41.1374\,-81.5806 41.1383\,-81.5806 41.1374\,-81.5811 41.1374)\,(-81.577 41.1404\,-81.5774 41.1409\,-81.5769 41.141\,-81.577 41.1404)\,(-81.5761 41.1388\,-81.5768 41.141\,-81.5761 41.141\,-81.5761 41.1388)\,(-81.5719 41.1412\,-81.5651 41.1428\,-81.564 41.1408\,-81.5719 41.1412)\,(-81.5361 41.0265\,-81.5389 41.0285\,-81.536 41.0285\,-81.5361 41.0265)\,(-81.5266 41.0253\,-81.5171 41.0285\,-81.5166 41.025\,-81.5266 41.0253)\,(-81.5141 41.0254\,-81.5152 41.0258\,-81.514 41.0258\,-81.5141 41.0254)\,(-81.5109 41.0191\,-81.5121 41.0178\,-81.5121 41.0198\,-81.5109 41.0191)\,(-81.5109 41.0166\,-81.5082 41.0179\,-81.5082 41.0166\,-81.5109 41.0166)\,(-81.4215 41.0548\,-81.4219 41.0628\,-81.4205 41.0555\,-81.4215 41.0548)\,(-81.4261 41.0625\,-81.4257 41.0645\,-81.4244 41.0643\,-81.4261 41.0625))" +CLJ,Someseni,Cluj-Napoca,Cluj-Napoca,POINT(23.5833 46.7667),"POLYGON((23.4992 46.7545\,23.5345 46.7552\,23.5292 46.7227\,23.5406 46.7145\,23.5293 46.7091\,23.5592 46.69\,23.5924 46.7119\,23.5881 46.7321\,23.7073 46.7379\,23.6868 46.7699\,23.7183 46.8083\,23.6824 46.8369\,23.631 46.8415\,23.6211 46.8602\,23.5701 46.8567\,23.5766 46.8437\,23.5422 46.8345\,23.5506 46.8145\,23.5314 46.8011\,23.5491 46.7857\,23.5204 46.7887\,23.5014 46.7739\,23.5306 46.7618\,23.4992 46.7545))" +CLO,Alfonso Bonilla Aragón Int'l,Perímetro Urbano Santiago de Cali,Cali,POINT(-76.5222 3.4206),"POLYGON((-76.5928 3.4663\,-76.579 3.4497\,-76.5528 3.4402\,-76.5684 3.417\,-76.5558 3.4063\,-76.5612 3.3708\,-76.5423 3.361\,-76.5513 3.3329\,-76.525 3.3349\,-76.5129 3.389\,-76.4832 3.4142\,-76.4674 3.4\,-76.4615 3.4282\,-76.4839 3.4952\,-76.4913 3.5062\,-76.5088 3.4903\,-76.5284 3.4975\,-76.5465 3.4545\,-76.5928 3.4663))" +//CLT,Douglas Int'l,Gastonia,Gastonia,POINT(-81.1854 35.2494),"POLYGON((-81.3213 35.2032\,-81.2851 35.2013\,-81.2667 35.2375\,-81.0829 35.2164\,-81.2068 35.3119\,-81.3213 35.2032)\,(-81.2496 35.2892\,-81.2488 35.2904\,-81.2484 35.2892\,-81.2496 35.2892)\,(-81.1895 35.2348\,-81.1906 35.2325\,-81.1908 35.2338\,-81.1895 35.2348)\,(-81.1851 35.2319\,-81.1845 35.2336\,-81.1832 35.2319\,-81.1851 35.2319)\,(-81.178 35.2322\,-81.1768 35.2344\,-81.1749 35.2324\,-81.178 35.2322)\,(-81.1669 35.2118\,-81.1673 35.2133\,-81.1665 35.212\,-81.1669 35.2118)\,(-81.1538 35.2015\,-81.1503 35.2072\,-81.1482 35.2073\,-81.1538 35.2015))" +CMB,Katunayake Int'l,Gampaha District,Negombo,POINT(79.8386 7.2111),"POLYGON((79.6162 7.199\,79.6553 7.025\,79.8805 6.9812\,79.8792 6.9583\,79.8992 6.9476\,79.9861 6.9354\,80.0135 6.947\,80.0203 6.9247\,80.0555 6.9074\,80.0933 6.9124\,80.0986 6.9517\,80.124 6.9743\,80.1812 6.9814\,80.1629 7.0587\,80.2127 7.132\,80.1627 7.1269\,80.1454 7.1883\,80.1824 7.1941\,80.2003 7.2494\,80.1306 7.3283\,80.0592 7.2839\,80.0068 7.3212\,79.9459 7.2739\,79.9249 7.2847\,79.9273 7.2719\,79.9079 7.2653\,79.9085 7.2804\,79.8758 7.2827\,79.8599 7.2699\,79.6283 7.273\,79.6162 7.199))" +CMG,Corumbá Int'l,Corumbá,Corumbá,POINT(-57.6528 -19.0089),"POLYGON((-57.8863 -19.2475\,-57.2268 -19.2455\,-57.4312 -19.0025\,-57.313 -18.5291\,-57.4859 -18.0487\,-57.691 -19.0109\,-57.8863 -19.2475)\,(-57.6776 -19.1374\,-57.6142 -18.9994\,-57.4662 -19.0275\,-57.5229 -19.2194\,-57.6776 -19.1374))" +CMH,Port Columbus Int'l,Jefferson Township,Gahanna,POINT(-82.8637 40.0251),"POLYGON((-82.8645 40.0249\,-82.8041 39.9892\,-82.7994 40.008\,-82.7713 40.0045\,-82.7675 40.051\,-82.8173 40.0533\,-82.8108 40.039\,-82.844 40.045\,-82.8645 40.0249))" +CMN,Mohamed V Int'l,Province de Médiouna إقليم مديونة,Mediouna,POINT(-7.51 33.45),"POLYGON((-7.5997 33.4739\,-7.5866 33.4628\,-7.5957 33.4558\,-7.5235 33.4108\,-7.4572 33.4512\,-7.3534 33.4679\,-7.373 33.4728\,-7.3788 33.5237\,-7.4168 33.5617\,-7.4528 33.5567\,-7.4781 33.5808\,-7.484 33.5652\,-7.5149 33.5684\,-7.5432 33.5514\,-7.5344 33.5324\,-7.5582 33.5204\,-7.543 33.5021\,-7.5997 33.4739))" +CMW,Ignacio Agramonte,Ciudad de Camagüey,Camagüey,POINT(-77.9075 21.3839),"POLYGON((-77.978 21.3932\,-77.9534 21.3612\,-77.9318 21.3654\,-77.9262 21.3337\,-77.9117 21.3294\,-77.9141 21.3466\,-77.8956 21.3516\,-77.8531 21.3371\,-77.8861 21.3965\,-77.8691 21.3992\,-77.871 21.412\,-77.8292 21.4284\,-77.8278 21.4336\,-77.8636 21.4318\,-77.8938 21.4078\,-77.9062 21.4234\,-77.9545 21.418\,-77.978 21.3932))" +CNF,Tancredo Neves Int'l,Belo Horizonte,Belo Horizonte,POINT(-43.9333 -19.9167),"POLYGON((-44.0633 -19.9754\,-44.0101 -20.0391\,-44.0182 -20.0543\,-44.0028 -20.0579\,-43.9813 -20.0139\,-43.8697 -19.9278\,-43.9111 -19.8745\,-43.8572 -19.8563\,-43.8616 -19.8265\,-43.8989 -19.8024\,-43.9444 -19.7975\,-43.9458 -19.7772\,-43.964 -19.7915\,-43.9897 -19.7845\,-44.0202 -19.8377\,-44.0084 -19.8536\,-44.0283 -19.915\,-44.0062 -19.9552\,-44.036 -19.9837\,-44.0633 -19.9754))" +CNS,Cairns Int'l,Cairns Regional,Cairns,POINT(145.78 -16.92),"POLYGON((145.6146 -17.0515\,145.8351 -17.4974\,146.125 -17.403\,145.964 -16.8249\,145.6335 -16.6985\,145.6146 -17.0515)\,(145.9648 -17.1705\,145.9184 -16.8642\,145.9579 -16.8968\,145.9648 -17.1705)\,(145.8947 -16.8862\,145.8952 -16.8841\,145.8958 -16.8874\,145.8947 -16.8862)\,(145.9009 -16.9942\,145.9009 -16.9934\,145.9017 -16.9941\,145.9009 -16.9942))" +CNX,Chiang Mai Int'l,จังหวัดเชียงใหม่,Chiang Mai,POINT(98.9986 18.7953),"POLYGON((98.0808 18.8236\,98.2125 18.4494\,98.0861 17.9967\,98.4292 17.2494\,98.5694 17.2953\,98.4933 17.8068\,98.8983 17.7453\,98.7037 18.435\,99.1428 18.7042\,99.3536 18.5794\,99.2617 19.6455\,99.5657 20.1239\,99.0778 20.102\,99.0147 19.7937\,98.4677 19.6949\,98.6487 19.0805\,98.3466 19.0303\,98.1858 19.1644\,98.0808 18.8236))" +COK,Cochin Int'l,Kanayannur,Kochi,POINT(76.28 9.97),"POLYGON((76.2395 10.0712\,76.2461 9.975\,76.2684 9.9788\,76.2808 9.9624\,76.3017 9.8903\,76.3531 9.8835\,76.3726 9.8367\,76.3885 9.8443\,76.4279 9.8245\,76.469 9.86\,76.46 9.887\,76.4242 9.8991\,76.4161 9.9265\,76.3801 9.9425\,76.356 9.9768\,76.3796 10.0372\,76.3458 10.0779\,76.3081 10.0621\,76.3017 10.0433\,76.2843 10.0725\,76.2771 10.0592\,76.2395 10.0712))" +COO,Cotonou Cadjehon,Cotonou,Cotonou,POINT(2.4333 6.3667),"POLYGON((2.3589 6.3987\,2.361 6.3476\,2.4285 6.3373\,2.4196 6.3479\,2.4958 6.3596\,2.4783 6.3887\,2.4433 6.397\,2.3589 6.3987))" +COR,Ingeniero Ambrosio L.V. Taravella Int'l,Municipio de Villa Allende,Villa Allende,POINT(-64.3 -31.3),"POLYGON((-64.3248 -31.2757\,-64.3129 -31.308\,-64.2206 -31.3085\,-64.2699 -31.2993\,-64.2842 -31.2649\,-64.3053 -31.2809\,-64.3248 -31.2757))" +CPE,Ign. Alberto Ongay Int'l,Municipio de Campeche,Campeche,POINT(-90.5306 19.85),"POLYGON((-90.67 19.7354\,-90.3101 19.4546\,-90.2911 19.2548\,-90.1214 19.3762\,-89.8653 19.2939\,-90.0313 19.7744\,-90.2382 19.9101\,-90.449 19.9612\,-90.67 19.7354))" +CPH,Copenhagen,Københavns Kommune,Copenhagen,POINT(12.5683 55.6761),"POLYGON((12.453 55.7122\,12.4785 55.6563\,12.5332 55.6318\,12.5955 55.6802\,12.5963 55.7125\,12.6398 55.7224\,12.6022 55.7257\,12.5905 55.7097\,12.5659 55.7327\,12.453 55.7122)\,(12.4913 55.6791\,12.5381 55.6977\,12.5573 55.6817\,12.5233 55.6665\,12.4913 55.6791))" +CPT,Cape Town Int'l,City of Cape Town,Mitchells Plain,POINT(18.6181 -34.0506),"POLYGON((18.3072 -34.0395\,18.4724 -34.3583\,18.4684 -34.1095\,18.7837 -34.087\,18.8502 -34.2703\,19.0053 -34.0743\,18.7444 -34.0248\,18.8219 -33.6418\,18.4555 -33.4713\,18.3413 -33.5586\,18.4879 -33.8779\,18.3072 -34.0395))" +CRK,Clark Int'l,Angeles,Angeles City,POINT(120.5847 15.1472),"POLYGON((120.4781 15.1569\,120.5647 15.1359\,120.5972 15.1082\,120.6044 15.1287\,120.6354 15.1372\,120.6372 15.1604\,120.6345 15.1794\,120.5695 15.1786\,120.5516 15.1627\,120.4946 15.1731\,120.4781 15.1569))" +CRL,Gosselies,Région de Bruxelles-Capitale - Brussels Hoofdstedelijk Gewest,Brussels,POINT(4.3525 50.8467),"POLYGON((4.2438 50.8196\,4.3065 50.8124\,4.3312 50.7755\,4.3829 50.7637\,4.4823 50.793\,4.4472 50.8083\,4.4767 50.8204\,4.4671 50.8446\,4.4209 50.8677\,4.4371 50.8788\,4.4329 50.8947\,4.4035 50.9139\,4.3775 50.897\,4.2938 50.889\,4.2829 50.8378\,4.2438 50.8196))" +CRP,Corpus Christi Int'l,Corpus Christi,Corpus Christi,POINT(-97.3767 27.7254),"POLYGON((-97.6827 27.8543\,-97.2076 27.4836\,-96.9611 27.8848\,-97.1134 27.746\,-97.2637 27.8807\,-97.6827 27.8543)\,(-97.6102 27.882\,-97.6144 27.8927\,-97.6121 27.8922\,-97.6102 27.8905\,-97.6102 27.882)\,(-97.6103 27.8716\,-97.6107 27.8756\,-97.6088 27.8707\,-97.6103 27.8716)\,(-97.4839 27.8055\,-97.4205 27.8196\,-97.3833 27.8379\,-97.4238 27.8021\,-97.4839 27.8055)\,(-97.5434 27.8325\,-97.5415 27.8321\,-97.5416 27.8314\,-97.5434 27.8325)\,(-97.5374 27.8289\,-97.5361 27.8307\,-97.5362 27.8281\,-97.5374 27.8289)\,(-97.5203 27.8223\,-97.5217 27.8233\,-97.5203 27.8231\,-97.5203 27.8223)\,(-97.5131 27.8215\,-97.5128 27.8217\,-97.5128 27.8215\,-97.5131 27.8215))" +CSG,Centre Spatial Guyanais,Kourou,Kourou,POINT(-52.6499 5.16),"POLYGON((-52.9815 5.0655\,-52.8889 4.6241\,-52.7781 4.5108\,-52.5613 5.1108\,-52.8069 5.3159\,-52.9815 5.0655))" +CSX,Changsha Huanghua Int'l,芦淞区,Zhuzhou,POINT(113.1469 27.8407),"POLYGON((113.1087 27.7807\,113.1143 27.76\,113.1345 27.7725\,113.1687 27.7554\,113.1826 27.7734\,113.2145 27.7692\,113.2109 27.7834\,113.2462 27.7595\,113.2663 27.766\,113.2793 27.7532\,113.2974 27.7659\,113.3236 27.7474\,113.356 27.795\,113.3533 27.8512\,113.2912 27.8818\,113.2057 27.8341\,113.1556 27.8311\,113.1312 27.8607\,113.1216 27.8509\,113.1404 27.8367\,113.1409 27.8075\,113.1087 27.7807))" +CTA,Catania Fontanarossa,Catania,Catania,POINT(15.0903 37.5),"POLYGON((14.9359 37.408\,14.9823 37.3737\,15.0265 37.3785\,15.0329 37.3622\,15.0916 37.3583\,15.0856 37.4785\,15.1257 37.5346\,15.1183 37.548\,15.1009 37.5344\,15.061 37.5388\,15.0406 37.562\,15.0565 37.5339\,15.0192 37.4946\,15.0263 37.4662\,14.978 37.4527\,14.9465 37.4647\,14.9359 37.408))" +CTG,Rafael Nunez,Turbaco,Turbaco,POINT(-75.3333 10.35),"POLYGON((-75.4736 10.3655\,-75.4639 10.3278\,-75.3878 10.274\,-75.3824 10.2923\,-75.3399 10.2996\,-75.2759 10.3582\,-75.3432 10.4182\,-75.4475 10.395\,-75.4736 10.3655))" +CTM,Chetumal Int'l,Chetumal,Chetumal,POINT(-88.3053 18.5036),"POLYGON((-88.3577 18.5122\,-88.3369 18.4917\,-88.2861 18.4939\,-88.2612 18.5463\,-88.2944 18.5377\,-88.2787 18.5529\,-88.2814 18.5635\,-88.286 18.5475\,-88.3016 18.5534\,-88.3077 18.5334\,-88.3577 18.5122))" +CTU,Chengdushuang Liu,青羊区,Chengdu,POINT(104.0633 30.66),"POLYGON((103.8943 30.6826\,103.9276 30.6611\,104.0577 30.6494\,104.0872 30.6725\,104.0605 30.6849\,104.0443 30.6655\,104.0015 30.6892\,104.0057 30.7027\,103.9859 30.7089\,103.9789 30.6949\,103.9414 30.7267\,103.8943 30.6826))" +CUL,Federal de Bachigualato Int'l,Culiacán Rosales,Culiacán,POINT(-107.3939 24.8069),"POLYGON((-107.5182 24.775\,-107.4815 24.755\,-107.4655 24.7686\,-107.4714 24.7583\,-107.457 24.7527\,-107.4364 24.7724\,-107.4453 24.7438\,-107.4404 24.7573\,-107.4431 24.7467\,-107.4131 24.7413\,-107.405 24.7613\,-107.392 24.7442\,-107.405 24.7352\,-107.3878 24.7254\,-107.3768 24.7564\,-107.3504 24.7392\,-107.3534 24.7727\,-107.3403 24.7633\,-107.3441 24.7913\,-107.328 24.7984\,-107.3269 24.8084\,-107.357 24.8098\,-107.3564 24.8289\,-107.3894 24.8584\,-107.3739 24.8763\,-107.3774 24.8853\,-107.3962 24.882\,-107.404 24.8324\,-107.4258 24.8538\,-107.4338 24.8232\,-107.4645 24.8377\,-107.4606 24.8159\,-107.4323 24.8072\,-107.4687 24.8115\,-107.4713 24.8024\,-107.4809 24.8042\,-107.4822 24.7987\,-107.4432 24.8012\,-107.4565 24.7939\,-107.4457 24.785\,-107.5182 24.775)\,(-107.4455 24.7498\,-107.4448 24.7498\,-107.4448 24.7496\,-107.4455 24.7498)\,(-107.3409 24.7974\,-107.339 24.8038\,-107.3364 24.7984\,-107.3409 24.7974))" +CUN,Cancún,Cancún,Cancún,POINT(-86.8475 21.1606),"POLYGON((-86.9712 21.1024\,-86.9209 21.0603\,-86.9156 21.1271\,-86.9111 21.0936\,-86.9002 21.1309\,-86.8971 21.0665\,-86.8949 21.1282\,-86.8835 21.1117\,-86.8591 21.1307\,-86.8632 21.1007\,-86.8413 21.0961\,-86.8391 21.0737\,-86.8196 21.156\,-86.8038 21.1589\,-86.792 21.1335\,-86.7626 21.1337\,-86.7701 21.1215\,-86.7618 21.131\,-86.751 21.1368\,-86.7483 21.1356\,-86.7607 21.1134\,-86.7702 21.1193\,-86.7883 21.0708\,-86.7872 21.0298\,-86.7406 21.1376\,-86.7871 21.1453\,-86.8031 21.1601\,-86.8035 21.2077\,-86.8195 21.1831\,-86.818 21.2\,-86.8396 21.213\,-86.8631 21.1963\,-86.8847 21.2048\,-86.8778 21.1928\,-86.8966 21.1709\,-86.925 21.1679\,-86.9191 21.155\,-86.9355 21.1484\,-86.9225 21.1266\,-86.9516 21.112\,-86.9566 21.1282\,-86.9712 21.1024))" +CUU,General R F Villalobos Int'l,Municipio de Chihuahua,Chihuahua,POINT(-106.0889 28.6353),"POLYGON((-106.4821 29.5672\,-106.5983 28.7834\,-106.2176 28.3939\,-106.2978 28.1362\,-105.9745 28.1365\,-105.7028 28.342\,-106.2081 29.0149\,-106.2233 29.1316\,-106.0372 29.1519\,-105.929 29.7577\,-106.5943 29.7428\,-106.4821 29.5672))" +CUZ,Velazco Astete Int'l,Cusco,Cusco,POINT(-71.9722 -13.525),"POLYGON((-72.168 -13.5939\,-72.1322 -13.6393\,-72.1147 -13.6284\,-72.0917 -13.6492\,-72.0461 -13.6387\,-72.0221 -13.6134\,-71.9284 -13.6154\,-71.9156 -13.5936\,-71.8432 -13.6307\,-71.8199 -13.6077\,-71.8356 -13.5908\,-71.8159 -13.5762\,-71.8209 -13.5206\,-71.8046 -13.5083\,-71.8465 -13.4866\,-71.8838 -13.4978\,-71.9317 -13.4676\,-71.9292 -13.4361\,-71.9802 -13.4295\,-72.0182 -13.4748\,-72.0514 -13.4898\,-72.0615 -13.5279\,-72.1057 -13.554\,-72.1587 -13.5438\,-72.168 -13.5939))" +CVG,Greater Cincinnati Int'l,Cincinnati,Cincinnati,POINT(-84.506 39.1413),"POLYGON((-84.5868 39.0839\,-84.6885 39.1023\,-84.6199 39.0734\,-84.3697 39.0737\,-84.4607 39.2178\,-84.5794 39.2048\,-84.5868 39.0839)\,(-84.4838 39.163\,-84.5057 39.1798\,-84.4817 39.1892\,-84.4838 39.163)\,(-84.4761 39.2055\,-84.4922 39.2104\,-84.4744 39.2099\,-84.4761 39.2055)\,(-84.4429 39.1448\,-84.4588 39.1782\,-84.4346 39.1664\,-84.4429 39.1448)\,(-84.4637 39.1984\,-84.4681 39.2016\,-84.4634 39.2026\,-84.4637 39.1984))" +CWB,Afonso Pena Int'l,Curitiba,Curitiba,POINT(-49.2711 -25.4297),"POLYGON((-49.3891 -25.4298\,-49.372 -25.5134\,-49.3379 -25.5158\,-49.3577 -25.6435\,-49.2701 -25.61\,-49.2222 -25.561\,-49.2259 -25.538\,-49.1843 -25.4769\,-49.2034 -25.4352\,-49.1949 -25.3785\,-49.2297 -25.3497\,-49.2737 -25.345\,-49.3027 -25.387\,-49.3415 -25.3467\,-49.3891 -25.4298))" +CWL,Cardiff,Cardiff,Caerdydd,POINT(-3.1792 51.4817),"POLYGON((-3.3438 51.5299\,-3.3366 51.5088\,-3.2984 51.4999\,-3.271 51.4651\,-3.2189 51.4749\,-3.1597 51.4428\,-3.0774 51.4943\,-3.0904 51.5099\,-3.0689 51.5202\,-3.1637 51.5605\,-3.2616 51.5378\,-3.2744 51.5512\,-3.3101 51.549\,-3.3438 51.5299))" +CYB,Gerrard Smith,Ciudad de Ciego de Ávila,Ciego de Ávila,POINT(-78.7631 21.8481),"POLYGON((-78.799 21.8577\,-78.7793 21.8247\,-78.724 21.8352\,-78.7654 21.8767\,-78.7649 21.8581\,-78.799 21.8577))" +CZL,Mohamed Boudiaf Int'l,Constantine ⵇⵙⴻⵟⵉⵏⴰ قسنطينة,Constantine,POINT(6.6147 36.365),"POLYGON((6.5284 36.3616\,6.5586 36.3288\,6.5554 36.2882\,6.575 36.307\,6.6241 36.2589\,6.6371 36.2686\,6.6176 36.2899\,6.6564 36.2706\,6.651 36.2911\,6.6969 36.319\,6.6778 36.3461\,6.6808 36.3759\,6.7249 36.3935\,6.7651 36.4478\,6.7317 36.4268\,6.63 36.4235\,6.6097 36.3838\,6.529 36.3896\,6.5284 36.3616))" +CZM,Cozumel Int'l,San Miguel de Cozumel,Cozumel,POINT(-86.9493 20.5104),"POLYGON((-86.9762 20.4747\,-86.9709 20.4758\,-86.9637 20.4951\,-86.9659 20.474\,-86.9293 20.476\,-86.928 20.5051\,-86.9083 20.5089\,-86.92 20.5143\,-86.9154 20.5401\,-86.9349 20.5396\,-86.9141 20.5555\,-86.9279 20.5501\,-86.9378 20.5368\,-86.9467 20.5149\,-86.9606 20.5018\,-86.9762 20.4747))" +DAC,Zia Int'l Dhaka,ঢাকা মহানগর,Dhaka,POINT(90.3889 23.7639),"POLYGON((90.3298 23.7508\,90.3638 23.7101\,90.4004 23.71\,90.4494 23.6681\,90.5129 23.7092\,90.4713 23.7816\,90.4882 23.8262\,90.4625 23.8474\,90.4718 23.8671\,90.4577 23.8947\,90.3932 23.8803\,90.3848 23.8981\,90.3615 23.8927\,90.3372 23.8469\,90.3442 23.7685\,90.3298 23.7508))" +DAD,Da Nang,Thị xã Điện Bàn,Quảng Hà,POINT(108.2667 15.9333),"POLYGON((108.1282 15.8903\,108.1684 15.8334\,108.2368 15.8474\,108.2468 15.8307\,108.2596 15.8573\,108.3103 15.8686\,108.2928 15.8987\,108.33 15.9217\,108.2896 15.9674\,108.2494 15.9715\,108.2107 15.9428\,108.2062 15.9617\,108.1843 15.9653\,108.1789 15.9398\,108.1512 15.9502\,108.1282 15.8903))" +DAL,Dallas Love Field,Dallas County,Irving,POINT(-96.9702 32.8583),"POLYGON((-97.0384 32.5487\,-96.5233 32.5453\,-96.5169 32.9824\,-97.0311 32.9897\,-97.0384 32.5487))" +DAM,Damascus Int'l,ناحية ببيلا,Qabr as Sitt,POINT(36.3361 33.4472),"POLYGON((36.2706 33.4534\,36.2628 33.4151\,36.3557 33.3901\,36.354 33.3774\,36.4235 33.3881\,36.3935 33.4027\,36.3907 33.4401\,36.3256 33.4897\,36.3192 33.4448\,36.2706 33.4534))" +DAR,Julius Nyerere Int'l,Ilala Municipal,Dar es Salaam,POINT(39.2803 -6.8161),"POLYGON((39.0222 -7.0258\,39.0387 -7.05\,39.0684 -7.0314\,39.0697 -7.0573\,39.1131 -7.0731\,39.1083 -7.0867\,39.1151 -7.0939\,39.1516 -7.0197\,39.1744 -7.011\,39.17 -6.9953\,39.2363 -6.9128\,39.2106 -6.8838\,39.245 -6.8641\,39.245 -6.8412\,39.2991 -6.8116\,39.2756 -6.7931\,39.243 -6.8238\,39.181 -6.8098\,39.1463 -6.8325\,39.1209 -6.8304\,39.1088 -6.9151\,39.0881 -6.9231\,39.0941 -6.9334\,39.0596 -6.9731\,39.0745 -6.9796\,39.0652 -7.014\,39.0509 -7.0283\,39.0222 -7.0258))" +DAV,Enrique Malek Int'l,Distrito David,David,POINT(-82.4333 8.4333),"POLYGON((-82.5596 8.7703\,-82.5238 8.5571\,-82.5318 8.5088\,-82.519 8.4666\,-82.5354 8.4555\,-82.5228 8.4017\,-82.4432 8.2916\,-82.4514 8.2777\,-82.4287 8.2724\,-82.4304 8.2537\,-82.3954 8.2239\,-82.3208 8.2344\,-82.238 8.2959\,-82.2087 8.3779\,-82.2235 8.4123\,-82.2054 8.4276\,-82.3001 8.4351\,-82.2842 8.4578\,-82.3228 8.5693\,-82.3529 8.6168\,-82.3738 8.6185\,-82.3954 8.648\,-82.3798 8.6203\,-82.4134 8.5941\,-82.3653 8.4887\,-82.3993 8.5095\,-82.41 8.4736\,-82.4375 8.4854\,-82.4466 8.5213\,-82.5102 8.6085\,-82.5379 8.6893\,-82.5343 8.7446\,-82.5596 8.7703))" +DAY,James M. Cox Dayton Int'l,Vandalia,Vandalia,POINT(-84.193 39.8791),"POLYGON((-84.2555 39.8794\,-84.1885 39.8695\,-84.1897 39.8417\,-84.2086 39.8398\,-84.176 39.8346\,-84.1576 39.923\,-84.1893 39.902\,-84.198 39.9116\,-84.2037 39.8906\,-84.2555 39.8794))" +DBO,Dubbo,Dubbo,Dubbo,POINT(148.6011 -32.2569),"POLYGON((148.8489 -32.1043\,148.3757 -32.2568\,148.5186 -32.2499\,148.3809 -32.381\,148.4594 -32.496\,148.7541 -32.4191\,148.8489 -32.1043))" +DCA,Washington Nat'l,Charles County,Waldorf,POINT(-76.9194 38.6085),"POLYGON((-77.3235 38.4337\,-77.3051 38.3753\,-77.2617 38.3319\,-77.1652 38.345\,-77.1386 38.3677\,-77.0852 38.3683\,-77.0414 38.4005\,-77.0116 38.3745\,-77.0269 38.3033\,-76.9621 38.257\,-76.949 38.2084\,-76.8689 38.1713\,-76.8209 38.2705\,-76.8537 38.3356\,-76.8486 38.3639\,-76.8714 38.39\,-76.8223 38.4289\,-76.7744 38.5092\,-76.7118 38.5101\,-76.6841 38.4951\,-76.6625 38.5253\,-76.7404 38.5585\,-76.7476 38.6174\,-76.8627 38.6585\,-77.0021 38.6548\,-77.0478 38.6162\,-77.083 38.7073\,-77.1328 38.674\,-77.1304 38.6349\,-77.2464 38.5933\,-77.3102 38.4953\,-77.3235 38.4337))" +DEL,Indira Gandhi Int'l,Najafgarh,Najafgarh,POINT(76.9798 28.6092),"POLYGON((76.9065 28.6236\,76.9385 28.5937\,76.9358 28.5105\,76.9909 28.5136\,76.9693 28.5482\,76.9892 28.554\,77.0298 28.652\,76.9711 28.6581\,76.9353 28.6184\,76.9065 28.6236))" +//DEN,Denver Int'l,Denver,Denver,POINT(-104.8758 39.762),"POLYGON((-105.1099 39.6271\,-104.8853 39.6241\,-104.5997 39.8991\,-105.0643 39.7911\,-105.1099 39.6271)\,(-105.064 39.6534\,-105.0649 39.6538\,-105.064 39.6538\,-105.064 39.6534)\,(-104.9282 39.6966\,-104.9407 39.7111\,-104.927 39.7082\,-104.9282 39.6966)\,(-104.9207 39.6682\,-104.9176 39.673\,-104.9131 39.6562\,-104.9207 39.6682)\,(-104.9291 39.7013\,-104.9293 39.7021\,-104.9281 39.7013\,-104.9291 39.7013)\,(-104.9027 39.6857\,-104.8988 39.6857\,-104.8988 39.6834\,-104.9027 39.6857))" +DET,Detroit City,Detroit,Detroit,POINT(-83.1024 42.3834),"POLYGON((-83.288 42.4427\,-83.264 42.3417\,-83.2154 42.3289\,-83.1966 42.3509\,-83.1477 42.3519\,-83.1567 42.3278\,-83.1405 42.2977\,-83.1671 42.2896\,-83.1609 42.2552\,-83.1375 42.2828\,-83.0983 42.2867\,-83.0634 42.3179\,-82.924 42.352\,-82.9469 42.387\,-82.9217 42.3953\,-82.9104 42.419\,-82.9514 42.4358\,-82.9409 42.4504\,-83.288 42.4427)\,(-83.1219 42.4175\,-83.0425 42.4044\,-83.0406 42.3848\,-83.0547 42.3796\,-83.0738 42.3987\,-83.1023 42.3882\,-83.1219 42.4175))" +DFW,Dallas-Ft. Worth Int'l,Dallas County,Irving,POINT(-96.9702 32.8583),"POLYGON((-97.0384 32.5487\,-96.5233 32.5453\,-96.5169 32.9824\,-97.0311 32.9897\,-97.0384 32.5487))" +DGO,Durango Int'l,Municipio de Durango,Colonia General Felipe Ángeles,POINT(-104.6 23.9167),"POLYGON((-105.5725 24.0281\,-105.2493 23.8262\,-105.275 23.5594\,-104.8501 23.5473\,-104.8098 23.3975\,-104.4454 23.6093\,-104.5392 23.7687\,-104.102 24.1203\,-104.1577 24.3222\,-104.5644 24.3662\,-104.8493 24.2145\,-105.1435 24.4247\,-105.1256 24.2989\,-105.5725 24.0281))" +DIL,Presidente Nicolau Lobato Int'l,Dili,Dili,POINT(125.5783 -8.5536),"POLYGON((125.4942 -8.5629\,125.5309 -8.5721\,125.5275 -8.6112\,125.5445 -8.623\,125.6168 -8.5916\,125.6307 -8.6125\,125.6322 -8.5825\,125.6459 -8.5738\,125.6776 -8.5931\,125.698 -8.5592\,125.7947 -8.585\,125.793 -8.5478\,125.8378 -8.5407\,125.8614 -8.5093\,125.8352 -8.4854\,125.6988 -8.5367\,125.6082 -8.5191\,125.6061 -8.5461\,125.5751 -8.5537\,125.5342 -8.5378\,125.4942 -8.5629))" +DIR,Aba Tenna D. Yilma Int'l,ድሬዳዋ,Dire Dawa,POINT(41.8667 9.6),"POLYGON((41.7495 9.6373\,41.7355 9.4725\,41.7739 9.4921\,41.9143 9.4735\,41.9835 9.5095\,42.02 9.5092\,42.0757 9.4747\,42.1535 9.5574\,42.1258 9.6155\,42.2015 9.6541\,42.2596 9.6526\,42.3407 9.7206\,42.2519 9.7845\,42.2143 9.7847\,42.1867 9.7745\,42.0317 9.6195\,41.8742 9.6849\,41.8096 9.6415\,41.7748 9.6547\,41.7495 9.6373))" +DKR,Léopold Sedar Senghor Int'l,Dakar,Dakar,POINT(-17.4467 14.6928),"POLYGON((-17.549 14.7474\,-17.4699 14.674\,-17.4483 14.672\,-17.4319 14.6469\,-17.417 14.7017\,-17.4304 14.7179\,-17.4128 14.7337\,-17.43 14.764\,-17.4091 14.7753\,-17.4169 14.7966\,-17.549 14.7474))" +DLC,Dalian Zhoushuizi Int'l,西岗区,Dalian,POINT(121.6 38.9),"POLYGON((121.5819 38.9317\,121.6004 38.921\,121.599 38.8725\,121.6208 38.8628\,121.645 38.8814\,121.6212 38.9171\,121.6453 38.9444\,121.6117 38.9277\,121.6114 38.9493\,121.5918 38.9477\,121.5819 38.9317))" +DME,Moscow Domodedovo Int'l,городской округ Балашиха,Balashikha,POINT(37.9667 55.8167),"POLYGON((37.7761 55.8727\,37.7774 55.8564\,37.8438 55.8148\,37.8431 55.7792\,37.8821 55.7724\,37.891 55.7422\,37.8642 55.7347\,37.8765 55.7205\,37.9236 55.7312\,37.9873 55.7029\,38.0137 55.7131\,38.0712 55.6963\,38.1589 55.7059\,38.0888 55.7415\,38.1448 55.7462\,38.0536 55.7752\,38.0729 55.7938\,38.0628 55.8196\,38.0242 55.8469\,37.9834 55.8432\,37.9708 55.8606\,37.9034 55.8797\,37.7761 55.8727)\,(37.8453 55.8136\,37.8722 55.8225\,37.8993 55.8163\,37.8666 55.8042\,37.8453 55.8136)\,(37.8689 55.8229\,37.873 55.8251\,37.876 55.8249\,37.8689 55.8229))" +DMK,Don Muang Int'l,กรุงเทพมหานคร,Bangkok,POINT(100.4942 13.7525),"POLYGON((100.3279 13.8042\,100.3394 13.655\,100.3778 13.6209\,100.3951 13.5541\,100.4201 13.5454\,100.4048 13.5081\,100.4541 13.2191\,100.4537 13.606\,100.4697 13.5927\,100.521 13.6051\,100.5204 13.6756\,100.5467 13.6717\,100.5565 13.7061\,100.5879 13.6914\,100.5793 13.6692\,100.635 13.6475\,100.6554 13.6723\,100.6983 13.6562\,100.7164 13.7184\,100.7884 13.7166\,100.8593 13.6894\,100.9099 13.7905\,100.9386 13.8143\,100.9052 13.8482\,100.9141 13.9462\,100.6868 13.9163\,100.6898 13.931\,100.574 13.9546\,100.5437 13.8497\,100.5094 13.8256\,100.5012 13.8008\,100.4665 13.7891\,100.3279 13.8042))" +//DNK,Voloskoye,Дніпровська міська громада,Dnipro,POINT(35.04 48.4675),"POLYGON((34.7489 48.484\,35.0037 48.3559\,35.2422 48.4336\,34.9294 48.5764\,34.7489 48.484)\,(34.9428 48.4121\,34.9452 48.4263\,34.9561 48.4083\,34.9428 48.4121)\,(34.9675 48.3904\,34.968 48.4005\,34.9742 48.3915\,34.9675 48.3904)\,(34.9868 48.3967\,34.9858 48.4003\,34.9883 48.3967\,34.9868 48.3967)\,(35.1113 48.5152\,35.1018 48.5281\,35.1168 48.5129\,35.1113 48.5152))" +DNMA,Maiduguri Int'l,Maiduguri,Maiduguri,POINT(13.15 11.8333),"POLYGON((13.0762 11.7872\,13.1036 11.7535\,13.1275 11.7929\,13.1841 11.8065\,13.1801 11.8569\,13.1501 11.9027\,13.1131 11.8846\,13.092 11.912\,13.0804 11.9076\,13.0772 11.8738\,13.1118 11.8382\,13.0919 11.835\,13.1019 11.8072\,13.0762 11.7872))" +DOK,Donetsk,Центрально-Міський район,Makiivka,POINT(37.9611 48.0556),"POLYGON((37.9173 48.0247\,37.9874 48.047\,38.0021 48.0603\,37.9898 48.0978\,37.9555 48.1017\,37.9603 48.0529\,37.9173 48.0247))" +DOL,St Gatien,Le Havre,Le Havre,POINT(0.1 49.49),"POLYGON((0.0668 49.5185\,0.0876 49.5147\,0.0903 49.4824\,0.1126 49.4583\,0.1951 49.4592\,0.1954 49.501\,0.1686 49.5355\,0.152 49.524\,0.0776 49.5401\,0.0668 49.5185))" +DPS,Bali Int'l,Denpasar Utara,Denpasar,POINT(115.2167 -8.65),"POLYGON((115.1861 -8.6222\,115.1921 -8.6513\,115.224 -8.6569\,115.2384 -8.6102\,115.2151 -8.595\,115.1861 -8.6222))" +DRS,Dresden,Dresden,Dresden,POINT(13.74 51.05),"POLYGON((13.5793 51.0506\,13.6038 51.0323\,13.6692 51.0351\,13.6653 51.0166\,13.6869 51.0023\,13.7309 51.0101\,13.8183 50.9749\,13.8586 50.9929\,13.8722 50.9836\,13.8848 50.9822\,13.9121 51.0168\,13.9628 51.0313\,13.9661 51.0548\,13.9535 51.0739\,13.9223 51.061\,13.8834 51.0726\,13.9109 51.0941\,13.8726 51.1274\,13.8892 51.1429\,13.8797 51.1543\,13.8312 51.1453\,13.7989 51.1698\,13.7462 51.1777\,13.7673 51.1456\,13.7129 51.1341\,13.6882 51.0996\,13.7011 51.0913\,13.5987 51.0984\,13.5793 51.0506))" +DSM,Des Moines Int'l,West Des Moines,West Des Moines,POINT(-93.7806 41.5521),"POLYGON((-93.8869 41.5143\,-93.8815 41.4966\,-93.8552 41.511\,-93.8382 41.501\,-93.8145 41.5317\,-93.788 41.5363\,-93.7743 41.5265\,-93.7906 41.494\,-93.7694 41.5143\,-93.7367 41.5087\,-93.7656 41.5049\,-93.7626 41.4905\,-93.7411 41.4906\,-93.732 41.5121\,-93.6983 41.5121\,-93.7035 41.5882\,-93.7243 41.6004\,-93.8434 41.6004\,-93.8377 41.5774\,-93.8666 41.5561\,-93.8474 41.565\,-93.8331 41.5626\,-93.8667 41.549\,-93.8671 41.5202\,-93.8869 41.5143))" +DTW,Detroit Metro,Detroit,Detroit,POINT(-83.1024 42.3834),"POLYGON((-83.288 42.4427\,-83.264 42.3417\,-83.2154 42.3289\,-83.1966 42.3509\,-83.1477 42.3519\,-83.1567 42.3278\,-83.1405 42.2977\,-83.1671 42.2896\,-83.1609 42.2552\,-83.1375 42.2828\,-83.0983 42.2867\,-83.0634 42.3179\,-82.924 42.352\,-82.9469 42.387\,-82.9217 42.3953\,-82.9104 42.419\,-82.9514 42.4358\,-82.9409 42.4504\,-83.288 42.4427)\,(-83.1219 42.4175\,-83.0425 42.4044\,-83.0406 42.3848\,-83.0547 42.3796\,-83.0738 42.3987\,-83.1023 42.3882\,-83.1219 42.4175))" +DUB,Dublin,Fingal,Finglas,POINT(-6.2181 53.4597),"POLYGON((-6.475 53.3635\,-6.0517 53.3612\,-6.2031 53.4648\,-6.0797 53.5531\,-6.3893 53.5911\,-6.475 53.3635))" +DUD,Dunedin Int'l,Dunedin City,Mosgiel,POINT(170.3486 -45.875),"POLYGON((169.7365 -45.7137\,170.0507 -45.7788\,170.2036 -46.0519\,170.7504 -45.8678\,170.5985 -45.7313\,170.7271 -45.549\,170.2094 -45.5067\,170.4209 -45.3079\,170.3138 -45.2358\,169.7365 -45.7137))" +DUR,Louis Botha,eThekwini Metropolitan Municipality,Durban,POINT(31.05 -29.8833),"POLYGON((30.56 -29.7283\,30.8284 -30.0808\,30.7643 -30.2685\,31.1831 -29.528\,30.7257 -29.5663\,30.56 -29.7283))" +DUS,Düsseldorf Int'l,Düsseldorf,Düsseldorf,POINT(6.7833 51.2333),"POLYGON((6.6888 51.2295\,6.7235 51.2259\,6.7372 51.1834\,6.7933 51.1809\,6.799 51.1445\,6.8489 51.1622\,6.8543 51.1276\,6.899 51.1244\,6.9251 51.1365\,6.8908 51.1589\,6.9213 51.1927\,6.9106 51.2114\,6.8787 51.2141\,6.8711 51.2415\,6.9312 51.238\,6.9277 51.2656\,6.9399 51.2728\,6.896 51.2645\,6.8037 51.2791\,6.8175 51.3209\,6.8026 51.3284\,6.8059 51.3504\,6.7473 51.3524\,6.7145 51.3334\,6.7346 51.3126\,6.7026 51.2712\,6.7265 51.2566\,6.6888 51.2295))" +DVO,Francisco Bangoy Int'l,Davao City,Davao,POINT(125.6 7.0667),"POLYGON((125.2176 7.1733\,125.2956 7.1047\,125.2905 7.0612\,125.3142 7.0223\,125.277 7.0207\,125.2624 7.0043\,125.2708 6.9872\,125.3581 6.9869\,125.3822 6.9626\,125.4006 6.9746\,125.5765 6.9562\,125.6622 7.023\,125.6492 7.0646\,125.6755 7.2102\,125.7449 7.2447\,125.6661 7.2588\,125.6586 7.2828\,125.6204 7.256\,125.6028 7.2167\,125.55 7.2167\,125.55 7.5778\,125.3871 7.5863\,125.3433 7.5357\,125.2601 7.5147\,125.2316 7.522\,125.2588 7.498\,125.2257 7.4951\,125.2335 7.4111\,125.2709 7.3633\,125.253 7.2583\,125.2216 7.226\,125.2176 7.1733))" +DXB,Dubai Int'l,دبي,Dubai,POINT(55.2972 25.2631),"POLYGON((54.7154 25.0693\,54.887 24.9119\,55.0243 24.8676\,55.2024 24.6591\,55.3346 24.6231\,55.5233 24.6505\,55.5958 24.712\,55.7371 24.7573\,55.6617 24.9888\,55.6293 25.2101\,55.3864 25.3026\,55.3638 25.298\,55.3548 25.3276\,55.3322 25.3283\,55.0979 25.5251\,54.9476 25.3518\,54.9044 25.2261\,54.821 25.1819\,54.7154 25.0693))" +DZA,Dzaoudzi Pamanzi Int'l,Mamoudzou,Mamoudzou,POINT(45.2272 -12.7794),"POLYGON((45.1731 -12.7641\,45.1548 -12.8196\,45.2035 -12.82\,45.238 -12.7851\,45.2265 -12.7747\,45.2369 -12.7534\,45.1731 -12.7641))" +DZO,Santa Bernardina Int'l,Durazno,Durazno,POINT(-56.5167 -33.3667),"POLYGON((-56.5467 -33.3598\,-56.5377 -33.402\,-56.5033 -33.4023\,-56.492 -33.3765\,-56.5467 -33.3598))" +EBB,Entebbe Int'l,Kampala,Kampala,POINT(32.5811 0.3136),"POLYGON((32.5099 0.3079\,32.5414 0.2709\,32.5813 0.276\,32.6343 0.2144\,32.6687 0.2747\,32.6655 0.3239\,32.6404 0.3296\,32.6343 0.3783\,32.6193 0.3774\,32.603 0.4053\,32.5649 0.4025\,32.5536 0.3482\,32.5325 0.3437\,32.5099 0.3079))" +EDI,Edinburgh Int'l,Fife,Dunfermline,POINT(-3.4393 56.0719),"POLYGON((-3.7399 56.0766\,-3.6866 56.0471\,-3.5676 56.0448\,-3.3915 56.0058\,-3.3831 56.0259\,-3.277 56.0567\,-3.1725 56.062\,-3.1508 56.1151\,-2.9637 56.2068\,-2.914 56.2095\,-2.8692 56.187\,-2.8077 56.1835\,-2.6855 56.2228\,-2.583 56.2787\,-2.6595 56.3196\,-2.7998 56.3463\,-2.8108 56.3695\,-2.7876 56.4286\,-2.7958 56.4332\,-2.7931 56.4354\,-2.805 56.4418\,-2.809 56.4478\,-2.805 56.4433\,-2.7941 56.438\,-2.6954 56.4458\,-2.9229 56.4519\,-3.277 56.3509\,-3.256 56.3401\,-3.3006 56.314\,-3.2937 56.2887\,-3.3837 56.2688\,-3.3533 56.2553\,-3.3675 56.2398\,-3.28 56.2339\,-3.2902 56.225\,-3.2649 56.2201\,-3.2749 56.2142\,-3.2609 56.1961\,-3.3091 56.1853\,-3.2965 56.1706\,-3.3454 56.1729\,-3.3717 56.1644\,-3.3699 56.1458\,-3.4166 56.1384\,-3.5688 56.1591\,-3.581 56.1393\,-3.6639 56.123\,-3.6288 56.1104\,-3.7146 56.1045\,-3.7399 56.0766))" +EDL,Eldoret Int'l,Kimumu ward,Eldoret,POINT(35.2833 0.5167),"POLYGON((35.2525 0.5495\,35.2793 0.5069\,35.2903 0.5267\,35.3148 0.5239\,35.3175 0.585\,35.2814 0.5653\,35.2641 0.5793\,35.2525 0.5495))" +EKO,Elko Reg.,Elko,Elko,POINT(-115.7678 40.8381),"POLYGON((-115.8338 40.8106\,-115.7122 40.8105\,-115.7177 40.8253\,-115.7001 40.8253\,-115.7001 40.8402\,-115.7419 40.8471\,-115.7076 40.8883\,-115.7572 40.8582\,-115.7746 40.8633\,-115.762 40.8468\,-115.7763 40.8615\,-115.8146 40.8613\,-115.7954 40.8495\,-115.8083 40.8164\,-115.7856 40.824\,-115.7856 40.8226\,-115.8154 40.8125\,-115.8096 40.8376\,-115.8331 40.8386\,-115.8338 40.8106)\,(-115.7962 40.8501\,-115.7954 40.8521\,-115.7954 40.8501\,-115.7962 40.8501)\,(-115.7336 40.8326\,-115.738 40.8345\,-115.7335 40.8338\,-115.7336 40.8326)\,(-115.7189 40.8252\,-115.7236 40.8325\,-115.7189 40.8325\,-115.7189 40.8252))" +ELP,El Paso Int'l,Juárez,Juárez,POINT(-106.487 31.7386),"POLYGON((-106.9516 31.3359\,-106.9225 31.2369\,-106.8822 31.2341\,-106.8633 31.186\,-106.8219 31.1456\,-106.8213 31.12\,-106.7458 31.1379\,-106.7456 31.1603\,-106.659 31.1743\,-106.6147 31.152\,-106.5015 31.1525\,-106.5074 31.1873\,-106.3908 31.1876\,-106.3968 31.159\,-106.374 31.1547\,-106.3669 31.1871\,-106.3103 31.1537\,-106.2643 31.1766\,-106.2646 31.2546\,-106.2804 31.254\,-106.2804 31.272\,-106.2636 31.2731\,-106.2552 31.3231\,-106.2665 31.3227\,-106.2701 31.3706\,-106.281 31.3712\,-106.2595 31.4339\,-106.2349 31.4102\,-106.1811 31.4549\,-106.2091 31.4685\,-106.2465 31.5417\,-106.2809 31.5623\,-106.3496 31.6967\,-106.4154 31.7508\,-106.4534 31.7646\,-106.4887 31.7481\,-106.5282 31.7839\,-106.8676 31.7838\,-106.8308 31.6796\,-106.8775 31.6224\,-106.749 31.6034\,-106.75 31.4959\,-106.6888 31.432\,-106.7418 31.4003\,-106.8485 31.4012\,-106.8469 31.3718\,-106.9516 31.3359))" +EPLL,Łódź Władysław Reymont,Łódź,Łódź,POINT(19.4547 51.7769),"POLYGON((19.3209 51.8081\,19.3572 51.7929\,19.3373 51.7841\,19.3376 51.7673\,19.3746 51.7559\,19.3717 51.7421\,19.3353 51.7316\,19.3542 51.7022\,19.4205 51.687\,19.5332 51.6907\,19.5864 51.7075\,19.6075 51.6977\,19.6399 51.7547\,19.6206 51.7556\,19.6093 51.7789\,19.6165 51.8144\,19.56 51.809\,19.5472 51.8281\,19.5631 51.8459\,19.5065 51.8598\,19.3813 51.8215\,19.3844 51.8361\,19.3514 51.8385\,19.3421 51.8142\,19.3209 51.8081))" +ESB,Esenboğa Int'l,Ankara,Ankara,POINT(32.85 39.93),"POLYGON((30.8361 40.0979\,31.6691 40.0521\,31.9001 39.8559\,31.7647 39.7226\,31.9434 39.6362\,32.0088 39.2261\,31.8196 39.1595\,32.0139 38.974\,32.4407 38.9546\,32.7902 39.2453\,33.1608 39.3072\,33.1227 39.1751\,33.3897 39.0312\,33.3559 38.7953\,33.5552 38.6757\,33.8852 39.0279\,33.1861 39.6597\,33.6482 40.2947\,33.2636 40.3237\,32.6817 40.7547\,31.8528 40.3048\,31.0948 40.3697\,30.8361 40.0979))" +ESE,Ensenada,Ensenada,Rodolfo Sánchez Taboada,POINT(-116.5911 31.7958),"POLYGON((-116.7568 31.9321\,-116.7461 31.9019\,-116.6992 31.8946\,-116.6262 31.8404\,-116.6335 31.855\,-116.6293 31.861\,-116.6213 31.858\,-116.6091 31.8248\,-116.6341 31.7504\,-116.66 31.7199\,-116.7276 31.7491\,-116.7417 31.7498\,-116.7441 31.7428\,-116.6773 31.6998\,-116.6508 31.6519\,-116.5157 31.6843\,-116.5298 31.7671\,-116.5165 31.7695\,-116.5209 31.7945\,-116.5576 31.7879\,-116.5635 31.8226\,-116.5396 31.827\,-116.5425 31.8434\,-116.4973 31.8516\,-116.5039 31.8914\,-116.5958 31.9352\,-116.6655 31.9224\,-116.6692 31.9462\,-116.7568 31.9321))" +EVN,Zvartnots Int'l,Կենտրոն,Yerevan,POINT(44.5144 40.1814),"POLYGON((44.4792 40.1872\,44.4871 40.1597\,44.5165 40.1596\,44.5479 40.1961\,44.4792 40.1872))" +EWR,Newark Int'l,City of New York,New York,POINT(-73.9249 40.6943),"POLYGON((-74.2588 40.4989\,-74.2253 40.4766\,-73.9779 40.5191\,-73.9021 40.4921\,-73.8126 40.53\,-73.7572 40.5312\,-73.7565 40.5862\,-73.7381 40.6026\,-73.7681 40.6263\,-73.7248 40.6523\,-73.7303 40.7222\,-73.7002 40.7393\,-73.7797 40.8121\,-73.7484 40.8718\,-73.8382 40.8941\,-73.8511 40.9101\,-73.8593 40.9005\,-73.9183 40.9176\,-74.014 40.7576\,-74.0558 40.6515\,-74.1914 40.642\,-74.2146 40.5605\,-74.2475 40.5494\,-74.2588 40.4989))" +EZE,Ministro Pistarini Int'l,Aeropuerto Internacional Ezeiza,José María Ezeiza,POINT(-58.5167 -34.8333),"POLYGON((-58.5977 -34.8051\,-58.5688 -34.8621\,-58.508 -34.8311\,-58.5205 -34.7409\,-58.5671 -34.7576\,-58.5977 -34.8051))" +FAE,Vágar,Tórshavn,Tórshavn,POINT(-6.7833 62.0),"POLYGON((-6.8585 62.0197\,-6.8335 61.9968\,-6.7981 61.9964\,-6.7727 62.0094\,-6.7622 61.9994\,-6.7527 62.0207\,-6.8042 62.0332\,-6.8585 62.0197))" +FAI,Fairbanks Int'l,Fairbanks,Fairbanks,POINT(-147.6533 64.8353),"POLYGON((-147.8138 64.8344\,-147.7785 64.8274\,-147.7785 64.813\,-147.6706 64.8166\,-147.6684 64.7935\,-147.5938 64.78\,-147.5789 64.7824\,-147.5692 64.8204\,-147.5435 64.8202\,-147.5453 64.8345\,-147.5658 64.8334\,-147.5608 64.8489\,-147.5267 64.8531\,-147.5245 64.878\,-147.7735 64.8639\,-147.7795 64.8455\,-147.8138 64.8344))" +FAT,Fresno Yosemite Int'l,Hanford,Hanford,POINT(-119.6549 36.3274),"POLYGON((-119.691 36.3415\,-119.6768 36.3281\,-119.6822 36.3055\,-119.6613 36.3056\,-119.6549 36.2693\,-119.6369 36.2659\,-119.6488 36.3093\,-119.619 36.3057\,-119.6275 36.3232\,-119.609 36.3231\,-119.6009 36.3319\,-119.6228 36.328\,-119.6208 36.3573\,-119.6461 36.3719\,-119.6639 36.3684\,-119.6637 36.3426\,-119.6864 36.3496\,-119.6688 36.335\,-119.691 36.3415)\,(-119.6549 36.3062\,-119.6504 36.3093\,-119.6504 36.3056\,-119.6549 36.3062)\,(-119.6369 36.3283\,-119.6328 36.334\,-119.628 36.328\,-119.6369 36.3283)\,(-119.637 36.3439\,-119.6351 36.3473\,-119.635 36.3439\,-119.637 36.3439)\,(-119.635 36.3369\,-119.628 36.3426\,-119.628 36.3389\,-119.635 36.3369)\,(-119.63 36.3244\,-119.6362 36.328\,-119.6299 36.3273\,-119.63 36.3244)\,(-119.6315 36.3573\,-119.6283 36.3612\,-119.6283 36.3573\,-119.6315 36.3573))" +FBM,Lubumbashi Luano Int'l,Lubumbashi,Lubumbashi,POINT(27.4794 -11.6647),"POLYGON((27.4114 -11.6447\,27.4549 -11.6666\,27.4297 -11.684\,27.4717 -11.6988\,27.4915 -11.6773\,27.4917 -11.607\,27.4599 -11.6131\,27.4511 -11.6353\,27.4223 -11.6262\,27.4114 -11.6447))" +FCO,Leonardo da Vinci Int'l,Fiumicino,Fiumicino,POINT(12.2333 41.7667),"POLYGON((12.1273 41.9248\,12.2058 41.827\,12.2225 41.744\,12.2864 41.7563\,12.2814 41.7843\,12.3168 41.7979\,12.2369 41.885\,12.3045 41.9639\,12.249 41.9727\,12.2429 42.0172\,12.2007 42.0186\,12.1749 41.9496\,12.1273 41.9248))" +FEZ,Saiss,Fès ⴼⴰⵙ فاس,Fès,POINT(-5.0033 34.0433),"POLYGON((-5.0782 34.0189\,-5.0524 33.9764\,-4.9647 33.9912\,-4.9432 34.0062\,-4.9717 34.0362\,-4.941 34.0441\,-4.9381 34.073\,-5.0365 34.068\,-5.0782 34.0189)\,(-5.0045 34.0583\,-4.9878 34.0604\,-4.9839 34.0564\,-4.9942 34.0482\,-5.0045 34.0583))" +FIH,Kinshasa N Djili Int'l,Kinshasa,Kinshasa,POINT(15.3222 -4.325),"POLYGON((15.1271 -4.4429\,15.3323 -4.6461\,15.649 -4.5855\,15.6498 -4.9682\,15.83 -5.03\,15.8709 -4.8601\,16.2618 -4.9926\,16.5341 -4.358\,15.946 -3.9276\,15.5527 -4.0427\,15.1271 -4.4429))" +FLG,Flagstaff Pulliam,Flagstaff,Flagstaff,POINT(-111.6194 35.1872),"POLYGON((-111.7089 35.1807\,-111.7013 35.1515\,-111.6862 35.1516\,-111.686 35.1224\,-111.5977 35.123\,-111.5982 35.1525\,-111.5594 35.152\,-111.5599 35.1667\,-111.5246 35.1737\,-111.5068 35.2106\,-111.5828 35.2207\,-111.6195 35.1967\,-111.6405 35.1922\,-111.5819 35.2217\,-111.5245 35.215\,-111.5259 35.2387\,-111.6315 35.2326\,-111.6488 35.2178\,-111.6625 35.2217\,-111.6566 35.2396\,-111.7014 35.2395\,-111.7012 35.1931\,-111.6849 35.1967\,-111.6631 35.1953\,-111.6871 35.1949\,-111.7089 35.1807)\,(-111.6782 35.1879\,-111.675 35.1919\,-111.675 35.188\,-111.6782 35.1879))" +FLL,Fort Lauderdale Hollywood Int'l,Hollywood,Hollywood,POINT(-80.1679 26.0293),"POLYGON((-80.2488 26.031\,-80.2326 26.0314\,-80.2397 26.0112\,-80.2135 26.0101\,-80.2203 25.9952\,-80.12 25.9975\,-80.1172 25.987\,-80.1054 26.0934\,-80.1357 26.0928\,-80.1301 26.0339\,-80.1863 26.0492\,-80.1937 26.0677\,-80.1752 26.0718\,-80.1813 26.0834\,-80.1997 26.0629\,-80.1946 26.0471\,-80.2078 26.0659\,-80.2088 26.043\,-80.2045 26.0358\,-80.2309 26.0452\,-80.2488 26.031)\,(-80.2051 26.0469\,-80.2066 26.0487\,-80.2051 26.048\,-80.2051 26.0469)\,(-80.1942 26.0471\,-80.1926 26.049\,-80.1927 26.0471\,-80.1942 26.0471))" +FLN,Hercilio Luz Int'l,Florianópolis,Florianópolis,POINT(-48.4853 -27.6122),"POLYGON((-48.613 -27.614\,-48.594 -27.641\,-48.605 -27.673\,-48.588 -27.751\,-48.601 -27.777\,-48.568 -27.847\,-48.5426 -27.8277\,-48.5269 -27.7867\,-48.4938 -27.7941\,-48.4757 -27.7703\,-48.5063 -27.7469\,-48.5029 -27.7163\,-48.4314 -27.6161\,-48.4119 -27.5747\,-48.4285 -27.5679\,-48.4247 -27.5394\,-48.3586 -27.4402\,-48.3912 -27.4353\,-48.4165 -27.3805\,-48.4835 -27.3783\,-48.5166 -27.3999\,-48.541 -27.436\,-48.5843 -27.4531\,-48.59 -27.485\,-48.568 -27.545\,-48.5996 -27.5733\,-48.613 -27.614))" +FNA,Freetown Lungi,Port Loko District,Port Loko,POINT(-12.7875 8.7667),"POLYGON((-13.2486 8.806\,-12.9317 8.2598\,-12.2847 8.5724\,-12.5152 9.0263\,-13.2486 8.806))" +FNJ,Sunan,중구역,Pyongyang,POINT(125.7381 39.0194),"POLYGON((125.7219 38.9899\,125.7419 38.987\,125.7512 38.9924\,125.7861 39.0528\,125.7258 39.0233\,125.7432 38.9955\,125.7219 38.9899))" +FOC,Fuzhou Changle Int'l,鼓楼区,Fuzhou,POINT(119.2917 26.0769),"POLYGON((119.2401 26.0975\,119.2673 26.0618\,119.3179 26.0728\,119.3012 26.1222\,119.2623 26.1221\,119.2401 26.0975))" +FOR,Pinto Martins Int'l,Fortaleza,Fortaleza,POINT(-38.5275 -3.7275),"POLYGON((-38.6379 -3.8014\,-38.519 -3.8647\,-38.5311 -3.8794\,-38.514 -3.8945\,-38.4499 -3.8379\,-38.4429 -3.8016\,-38.4014 -3.8266\,-38.4604 -3.7052\,-38.4824 -3.6986\,-38.4761 -3.7191\,-38.493 -3.7247\,-38.5812 -3.6921\,-38.6218 -3.7025\,-38.5989 -3.7423\,-38.6379 -3.8014))" +FPO,Freeport Int'l,City of Freeport,Freeport City,POINT(-78.6967 26.5286),"POLYGON((-78.9267 26.369\,-78.8552 26.324\,-78.734 26.291\,-78.621 26.298\,-78.5515 26.32\,-78.5091 26.345\,-78.6165 26.5255\,-78.4991 26.3485\,-78.3686 26.402\,-78.3055 26.412\,-78.3995 26.7232\,-78.4884 26.7176\,-78.6206 26.6554\,-78.6802 26.6152\,-78.6877 26.585\,-78.7818 26.5428\,-78.7911 26.4998\,-78.9267 26.369)\,(-78.7697 26.5125\,-78.7717 26.5175\,-78.768 26.5136\,-78.7697 26.5125)\,(-78.7693 26.5103\,-78.7416 26.5059\,-78.7235 26.4972\,-78.7286 26.4939\,-78.7693 26.5103)\,(-78.6868 26.4916\,-78.6769 26.4964\,-78.6687 26.4968\,-78.6868 26.4916)\,(-78.5985 26.5308\,-78.5974 26.5347\,-78.5956 26.5327\,-78.5985 26.5308)\,(-78.5903 26.5361\,-78.5914 26.5401\,-78.5876 26.5376\,-78.5903 26.5361))" +FRA,Frankfurt Int'l,Frankfurt am Main,Frankfurt,POINT(8.6822 50.1106),"POLYGON((8.4728 50.0998\,8.5204 50.0618\,8.5257 50.0839\,8.5498 50.0669\,8.5533 50.0489\,8.5198 50.0368\,8.52 50.0208\,8.592 50.0177\,8.5985 50.0423\,8.6246 50.0551\,8.725 50.0617\,8.7502 50.0965\,8.7255 50.1085\,8.7798 50.1111\,8.7692 50.1316\,8.8004 50.1712\,8.7688 50.1798\,8.723 50.1635\,8.702 50.1805\,8.735 50.2156\,8.7093 50.2271\,8.5879 50.1682\,8.6027 50.157\,8.5933 50.1398\,8.4728 50.0998))" +FRS,Mundo Maya Int'l,El Chal,El Chal,POINT(-89.65 16.6333),"POLYGON((-89.9073 16.5645\,-89.92 16.3901\,-89.9022 16.4056\,-89.9121 16.3457\,-89.8868 16.328\,-89.8568 16.3265\,-89.8189 16.3695\,-89.8145 16.4026\,-89.7755 16.4263\,-89.6693 16.438\,-89.6402 16.4165\,-89.6398 16.459\,-89.5914 16.4656\,-89.5781 16.4495\,-89.5744 16.4969\,-89.548 16.4956\,-89.5471 16.5704\,-89.5681 16.5922\,-89.5343 16.6224\,-89.5423 16.6464\,-89.5202 16.664\,-89.6214 16.7039\,-89.6548 16.6446\,-89.6954 16.6239\,-89.7331 16.6595\,-89.7615 16.6422\,-89.8173 16.6521\,-89.831 16.6017\,-89.8227 16.5769\,-89.9073 16.5645))" +FRU,Vasilyevka,Свердлов району,Bishkek,POINT(74.6122 42.8747),"POLYGON((74.5965 42.8837\,74.6118 42.885\,74.6109 42.8635\,74.7177 42.8659\,74.6566 42.881\,74.6507 42.9093\,74.6465 42.8933\,74.6313 42.895\,74.6265 42.9287\,74.6408 42.9426\,74.6171 42.9487\,74.6277 42.9716\,74.6027 42.9534\,74.6107 42.9294\,74.5965 42.8837))" +FSD,Sioux Falls Reg.,Sioux Falls Township,Sioux Falls,POINT(-96.7311 43.5396),"POLYGON((-96.7711 43.5874\,-96.771 43.5003\,-96.6514 43.5003\,-96.6515 43.5874\,-96.7711 43.5874))" +FUK,Fukuoka,中央区,Fukuoka,POINT(130.4 33.5833),"POLYGON((130.3439 33.6201\,130.3767 33.5625\,130.3955 33.5582\,130.4154 33.5768\,130.3786 33.6286\,130.3439 33.6201))" +FUN,Funafuti Int'l,Funafuti,Funafuti,POINT(179.2 -8.5167),"POLYGON((178.5777 -8.5067\,178.9373 -8.7927\,179.2604 -9.1227\,179.6681 -8.7743\,179.3343 -8.3164\,179.3125 -8.291\,179.2986 -8.2786\,179.2805 -8.2655\,178.9018 -8.0211\,178.5777 -8.5067))" +GAU,Lokpriya G. Bordoloi Int'l,Guwahati,Guwāhāti,POINT(91.7458 26.1722),"POLYGON((91.6269 26.1229\,91.7103 26.1116\,91.7384 26.1595\,91.8045 26.1826\,91.8197 26.2098\,91.726 26.1724\,91.6496 26.1662\,91.6269 26.1229))" +GBE,Sir Seretse Khama Int'l,South-East District,Gaborone,POINT(25.9122 -24.6581),"POLYGON((25.539 -25.4358\,25.6636 -25.4679\,25.7017 -25.2892\,25.7289 -25.257\,25.86 -24.924\,25.8825 -24.9149\,25.8761 -24.8903\,25.889 -24.8822\,25.8732 -24.8278\,25.8427 -24.79\,25.8484 -24.7539\,25.9558 -24.7477\,26.1886 -24.6868\,26.1506 -24.6467\,26.033 -24.5796\,25.9728 -24.5126\,25.8736 -24.5822\,25.9027 -24.5984\,25.7891 -24.7328\,25.7156 -24.7872\,25.709 -24.8535\,25.6338 -24.8857\,25.6609 -24.9117\,25.5998 -25.0218\,25.6683 -25.0279\,25.539 -25.4358))" +GDL,Don Miguel Hidalgo Int'l,San Pedro Tlaquepaque,Tlaquepaque,POINT(-103.3167 20.6167),"POLYGON((-103.4578 20.5495\,-103.4356 20.5385\,-103.4321 20.5588\,-103.3988 20.5513\,-103.3632 20.5726\,-103.3609 20.5556\,-103.3317 20.5538\,-103.3189 20.5852\,-103.2543 20.5649\,-103.2404 20.5832\,-103.2601 20.5876\,-103.2573 20.6118\,-103.2829 20.613\,-103.2822 20.6412\,-103.3207 20.6463\,-103.3403 20.6094\,-103.3873 20.6037\,-103.3966 20.6366\,-103.4018 20.6196\,-103.4231 20.6204\,-103.4181 20.5887\,-103.4578 20.5495))" +GDN,Gdansk Lech Walesa,Gdańsk,Gdańsk,POINT(18.6453 54.3475),"POLYGON((18.4295 54.385\,18.4471 54.3765\,18.4411 54.3574\,18.4684 54.3475\,18.4569 54.3323\,18.488 54.317\,18.5014 54.3341\,18.5342 54.3356\,18.54 54.3141\,18.6058 54.2987\,18.6048 54.2785\,18.6368 54.275\,18.6393 54.291\,18.6909 54.3016\,18.6874 54.3134\,18.7054 54.3128\,18.7279 54.3415\,18.7641 54.3333\,18.7929 54.3465\,18.8731 54.3104\,18.9091 54.3129\,18.9407 54.2802\,18.9337 54.3105\,18.9496 54.363\,18.8885 54.3475\,18.7793 54.3744\,18.7797 54.3997\,18.8981 54.3997\,18.8981 54.4664\,18.6431 54.4664\,18.6511 54.4081\,18.5916 54.4304\,18.5069 54.4229\,18.4538 54.4472\,18.4373 54.4394\,18.4579 54.436\,18.4417 54.4079\,18.4655 54.4077\,18.4654 54.3999\,18.4295 54.385))" +GDT,JAGS McCartney Int'l,Grand Turk,Grand Turk,POINT(-71.136 21.4664),"POLYGON((-71.1536 21.4452\,-71.1431 21.4225\,-71.1314 21.4307\,-71.1268 21.436\,-71.1273 21.441\,-71.1321 21.4435\,-71.1277 21.4438\,-71.1261 21.4492\,-71.1312 21.4723\,-71.13 21.4919\,-71.1312 21.5134\,-71.1352 21.5116\,-71.1377 21.5069\,-71.1437 21.5061\,-71.1491 21.4976\,-71.1503 21.4852\,-71.1466 21.4647\,-71.149 21.456\,-71.1536 21.4488\,-71.1536 21.4452))" +GEG,Spokane Int'l,Spokane,Spokane,POINT(-117.433 47.6671),"POLYGON((-117.604 47.6138\,-117.5541 47.6017\,-117.472 47.6371\,-117.4645 47.6138\,-117.4301 47.6137\,-117.4473 47.5944\,-117.4001 47.5868\,-117.3951 47.6059\,-117.3411 47.6128\,-117.3468 47.6671\,-117.3039 47.6938\,-117.3467 47.6764\,-117.3351 47.7005\,-117.3789 47.7185\,-117.3952 47.7511\,-117.4219 47.726\,-117.4126 47.7151\,-117.4324 47.7154\,-117.4304 47.7441\,-117.47 47.73\,-117.4749 47.7588\,-117.5212 47.7587\,-117.4968 47.7316\,-117.5144 47.7245\,-117.4993 47.7196\,-117.4967 47.6943\,-117.4669 47.6913\,-117.4805 47.6805\,-117.4645 47.6538\,-117.5503 47.6466\,-117.5608 47.6283\,-117.6039 47.6283\,-117.604 47.6138))" +GEO,Cheddi Jagan Int'l,Rest of Region 7,Bartica,POINT(-58.6167 6.4),"POLYGON((-61.3785 5.9527\,-60.7182 5.9358\,-60.573 6.4217\,-60.4675 5.9405\,-60.273 5.9119\,-60.3775 5.6871\,-60.113 5.4926\,-59.6111 5.3902\,-59.5834 5.5817\,-59.2317 5.7256\,-58.8752 5.3958\,-58.6281 5.5166\,-58.5697 6.2585\,-58.6178 6.5083\,-58.8426 6.5583\,-58.9872 6.8698\,-59.9008 7.202\,-60.1019 7.0654\,-60.2938 7.1338\,-60.7184 6.7553\,-61.1395 6.7211\,-61.2321 6.5702\,-61.0022 6.4438\,-61.0478 6.2292\,-61.168 6.3797\,-61.1255 6.1911\,-61.3785 5.9527)\,(-58.6413 6.3595\,-58.6308 6.4112\,-58.6171 6.4069\,-58.6413 6.3595))" +GGT,Exuma Int'l,New Providence,Nassau,POINT(-77.3386 25.0781),"POLYGON((-77.7742 25.1466\,-77.6445 24.8079\,-77.3345 24.7901\,-76.8322 25.0965\,-76.9078 25.2421\,-77.2001 25.559\,-77.7742 25.1466))" +GIG,Rio de Janeiro-Antonio Carlos Jobim Int'l,Rio de Janeiro,Rio de Janeiro,POINT(-43.2056 -22.9111),"POLYGON((-43.7963 -22.9192\,-43.6898 -22.992\,-43.6743 -22.9805\,-43.5991 -23.0229\,-43.6678 -23.0532\,-43.589 -23.0534\,-43.5532 -23.0764\,-43.5348 -23.0505\,-43.4386 -23.021\,-43.2865 -23.0165\,-43.2232 -22.9878\,-43.1897 -22.9907\,-43.1502 -22.9508\,-43.1521 -22.9386\,-43.1805 -22.948\,-43.1619 -22.9047\,-43.2108 -22.8976\,-43.2013 -22.8715\,-43.2267 -22.8741\,-43.2728 -22.8098\,-43.3082 -22.7999\,-43.37 -22.8053\,-43.4581 -22.849\,-43.4901 -22.8155\,-43.5594 -22.8289\,-43.5885 -22.8387\,-43.5929 -22.8605\,-43.6441 -22.8557\,-43.7482 -22.8779\,-43.7948 -22.9035\,-43.7963 -22.9192))" +GLA,Glasgow Int'l,Renfrewshire,Paisley,POINT(-4.4239 55.8456),"POLYGON((-4.784 55.8415\,-4.7224 55.8208\,-4.7205 55.8051\,-4.6857 55.8037\,-4.6611 55.7597\,-4.6505 55.7609\,-4.6387 55.7748\,-4.6338 55.776\,-4.6166 55.7615\,-4.5747 55.7835\,-4.5517 55.7663\,-4.4952 55.8014\,-4.4282 55.8016\,-4.3855 55.8183\,-4.368 55.8454\,-4.3809 55.8563\,-4.3534 55.8737\,-4.4841 55.9283\,-4.6099 55.9363\,-4.635 55.9137\,-4.6132 55.906\,-4.6192 55.889\,-4.5973 55.8624\,-4.6228 55.8616\,-4.6326 55.8411\,-4.7294 55.8533\,-4.784 55.8415))" +GLS,Scholes Int'l,Galveston,Galveston,POINT(-94.8913 29.2484),"POLYGON((-95.1299 29.0856\,-94.9702 28.9963\,-94.67 29.1795\,-94.6197 29.2283\,-94.5921 29.2948\,-94.5257 29.3303\,-94.6951 29.3994\,-94.7635 29.342\,-94.8335 29.3647\,-94.885 29.299\,-94.8986 29.3036\,-95.0896 29.1477\,-95.1299 29.0856)\,(-94.9779 29.1778\,-94.9837 29.2008\,-94.9694 29.1828\,-94.9779 29.1778))" +GMP,Gimpo Int'l,서울특별시,Seoul,POINT(126.99 37.56),"POLYGON((126.7644 37.5553\,126.7945 37.5357\,126.8221 37.5407\,126.8147 37.4746\,126.8452 37.4733\,126.8698 37.4957\,126.9029 37.4341\,126.9291 37.4502\,126.9389 37.4354\,126.9637 37.441\,126.9978 37.4677\,127.0146 37.4548\,127.0347 37.4641\,127.036 37.4391\,127.0525 37.4285\,127.1435 37.4739\,127.1615 37.5\,127.1406 37.5156\,127.1632 37.545\,127.1838 37.5455\,127.1771 37.5812\,127.1045 37.5556\,127.1184 37.6076\,127.1038 37.6219\,127.1111 37.6426\,127.092 37.6465\,127.0964 37.6889\,127.0155 37.7015\,126.9912 37.6793\,126.9816 37.6347\,126.9585 37.6294\,126.9476 37.6592\,126.9055 37.6489\,126.8999 37.5899\,126.8536 37.5718\,126.8026 37.605\,126.7644 37.5553))" +GND,Point Salines Int'l,Saint David,Saint David’s,POINT(-61.6806 12.0444),"POLYGON((-61.7261 12.0315\,-61.7108 12.0126\,-61.6991 12.0067\,-61.7073 12.0171\,-61.6754 12.0133\,-61.6322 12.0493\,-61.6766 12.0791\,-61.7015 12.0748\,-61.7261 12.0315))" +GOJ,Nizhny Novgorod Int'l,Нижегородский район,Nizhniy Novgorod,POINT(44.0075 56.3269),"POLYGON((43.9622 56.3118\,43.9684 56.299\,43.9701 56.3119\,44.0318 56.3199\,44.0557 56.3014\,44.0536 56.2766\,44.0769 56.2659\,44.0798 56.2818\,44.0973 56.2782\,44.0915 56.2908\,44.1192 56.2981\,44.0682 56.338\,43.9913 56.3364\,43.9622 56.3118))" +GOM,Goma Int'l,Goma,Goma,POINT(29.2336 -1.6794),"POLYGON((29.1153 -1.6214\,29.1373 -1.6422\,29.1585 -1.6339\,29.1692 -1.6633\,29.2448 -1.6981\,29.2434 -1.6683\,29.2601 -1.6523\,29.2007 -1.6497\,29.1504 -1.5951\,29.1153 -1.6214))" +GOT,Gothenburg,Centrum,Gothenburg,POINT(11.9675 57.7075),"POLYGON((11.8941 57.6889\,11.9492 57.6635\,11.9696 57.6775\,12.0885 57.6823\,12.0541 57.7338\,11.8941 57.6889))" +GOU,Garoua Int'l,Garoua I,Garoua,POINT(13.4 9.3),"POLYGON((13.3281 9.3288\,13.3393 9.2578\,13.3803 9.2557\,13.4054 9.2959\,13.3756 9.4179\,13.3281 9.3288))" +GRR,Gerald R. Ford Int'l,Kentwood,Kentwood,POINT(-85.5926 42.8852),"POLYGON((-85.6649 42.8876\,-85.6637 42.8549\,-85.5458 42.8545\,-85.5507 42.941\,-85.5803 42.9412\,-85.5686 42.9125\,-85.6126 42.9088\,-85.6065 42.8836\,-85.6649 42.8876))" +GRU,São Paulo-Guarulhos Int'l,Região Imediata de São Paulo,São Paulo,POINT(-46.6333 -23.55),"POLYGON((-47.2087 -23.9648\,-46.6502 -24.0015\,-45.6949 -23.6304\,-46.236 -23.186\,-46.4167 -23.3138\,-46.8029 -23.2574\,-47.0419 -23.3515\,-46.9842 -23.8027\,-47.2087 -23.9648))" +GUA,La Aurora,Ciudad de Guatemala,Guatemala City,POINT(-90.5252 14.6099),"POLYGON((-90.5667 14.6004\,-90.5797 14.5708\,-90.565 14.5878\,-90.5593 14.5868\,-90.5575 14.5425\,-90.5357 14.5364\,-90.5151 14.5744\,-90.4432 14.5944\,-90.4134 14.5883\,-90.3872 14.6373\,-90.3929 14.7052\,-90.44 14.7138\,-90.4799 14.6822\,-90.5143 14.6827\,-90.5384 14.6481\,-90.5717 14.635\,-90.5667 14.6004))" +GUM,Antonio B. Won Pat Int'l,Guam,Hagåtña,POINT(144.7504 13.4745),"POLYGON((144.5634 13.4481\,144.593 13.3924\,144.5776 13.344\,144.6084 13.2706\,144.5909 13.2393\,144.5962 13.2077\,144.6358 13.1825\,144.7406 13.1952\,144.818 13.2804\,144.8266 13.3814\,144.972 13.5\,145.0082 13.5862\,145.0031 13.6181\,144.8892 13.6982\,144.8385 13.7027\,144.7896 13.6533\,144.75 13.5519\,144.7255 13.5317\,144.6112 13.5102\,144.5634 13.4481))" +GVA,Geneva,Genève,Le Grand-Saconnex,POINT(6.1167 46.2333),"POLYGON((5.9559 46.1324\,5.9941 46.1446\,6.0354 46.1343\,6.0523 46.1514\,6.1365 46.1416\,6.2219 46.2007\,6.2947 46.2252\,6.3098 46.2562\,6.2602 46.2517\,6.2376 46.2776\,6.2529 46.2905\,6.2446 46.304\,6.2196 46.3119\,6.1969 46.2866\,6.1243 46.3173\,6.1024 46.2849\,6.1245 46.2513\,5.9746 46.215\,5.9637 46.197\,5.9949 46.1819\,5.9559 46.1324))" +GYD,Heydar Aliyev Int'l,Səbail rayonu,Baku,POINT(49.8352 40.3667),"POLYGON((49.7425 40.2903\,49.8861 40.0877\,49.9879 40.0574\,49.8495 40.3756\,49.7963 40.3572\,49.7813 40.318\,49.7425 40.2903))" +GYE,Simon Bolivar Int'l,Guayaquil,Guayaquil,POINT(-79.8875 -2.19),"POLYGON((-79.9591 -2.1872\,-79.9568 -2.2244\,-79.9307 -2.2454\,-79.9291 -2.2719\,-79.8722 -2.2854\,-79.8564 -2.2584\,-79.8824 -2.2222\,-79.8624 -2.1671\,-79.8925 -2.0836\,-79.887 -2.0529\,-79.9438 -2.0631\,-79.9451 -2.1753\,-79.9591 -2.1872))" +GYM,Gen. José M. Yáñez Int'l,Guaymas,Heroica Guaymas,POINT(-110.8989 27.9183),"POLYGON((-111.3889 28.2505\,-110.89 27.8397\,-110.6296 28.1418\,-110.5153 28.0281\,-110.5878 27.5165\,-110.153 27.5605\,-110.3153 28.1901\,-110.1327 28.131\,-110.0879 28.5709\,-110.5741 28.4542\,-110.8578 28.6312\,-111.3889 28.2505))" +GYN,Santa Genoveva,Goiânia,Goiânia,POINT(-49.25 -16.6667),"POLYGON((-49.447 -16.7164\,-49.4124 -16.7404\,-49.3868 -16.8313\,-49.3654 -16.8177\,-49.3348 -16.7505\,-49.2882 -16.7482\,-49.2616 -16.7256\,-49.2084 -16.7453\,-49.159 -16.7907\,-49.1424 -16.739\,-49.1569 -16.7153\,-49.1767 -16.7251\,-49.1969 -16.6919\,-49.1663 -16.6502\,-49.078 -16.593\,-49.096 -16.591\,-49.105 -16.561\,-49.167 -16.578\,-49.195 -16.517\,-49.176 -16.465\,-49.215 -16.4544\,-49.2795 -16.529\,-49.3115 -16.5306\,-49.3299 -16.5704\,-49.3758 -16.5785\,-49.3921 -16.5975\,-49.379 -16.629\,-49.4456 -16.6731\,-49.447 -16.7164))" +GYY,Gary/Chicago Int'l,West Chicago Township,Chicago,POINT(-87.6866 41.8375),"POLYGON((-87.7756 41.9093\,-87.7741 41.8655\,-87.74 41.866\,-87.7385 41.8221\,-87.6324 41.86\,-87.6443 41.897\,-87.6881 41.9395\,-87.7076 41.9394\,-87.7069 41.9101\,-87.7756 41.9093))" +GZT,Gaziantep Oğuzeli Int'l,Gaziantep,Gaziantep,POINT(37.3792 37.0628),"POLYGON((36.4621 36.8882\,36.7114 36.8257\,36.9937 37.0346\,37.3691 36.7238\,38.0185 36.826\,37.8409 37.2678\,38.0797 37.4611\,37.6388 37.5136\,37.0819 37.1995\,36.8906 37.3231\,36.4621 36.8882))" +HAH,Prince Said Ibrahim Int'l,Ngazidja القمر الكبرى,Moroni,POINT(43.256 -11.699),"POLYGON((43.2287 -11.7363\,43.2393 -11.7806\,43.2882 -11.8469\,43.3838 -11.8631\,43.4569 -11.9383\,43.4882 -11.9316\,43.5179 -11.894\,43.4672 -11.8236\,43.4511 -11.7369\,43.3959 -11.6454\,43.3987 -11.4882\,43.4166 -11.4357\,43.393 -11.375\,43.3421 -11.3656\,43.2779 -11.3986\,43.2632 -11.4495\,43.2782 -11.5026\,43.2602 -11.6006\,43.2663 -11.672\,43.2287 -11.7363))" +HAM,Hamburg,Norderstedt,Norderstedt,POINT(10.0103 53.7064),"POLYGON((9.9348 53.6785\,9.947 53.6518\,9.9858 53.6482\,9.9996 53.6815\,10.0693 53.6795\,10.0603 53.697\,10.0729 53.7097\,10.0332 53.7188\,10.0226 53.7497\,9.9833 53.7595\,9.9657 53.7287\,9.9551 53.7324\,9.9348 53.6785))" +HAN,Noi Bai,Thành phố Hà Nội,Hanoi,POINT(105.8542 21.0283),"POLYGON((105.289 21.157\,105.5815 20.9282\,105.6662 20.617\,105.9968 20.6781\,105.8571 21.376\,105.6263 21.1734\,105.3958 21.3045\,105.289 21.157))" +HAV,José Martí Int'l,La Habana,Havana,POINT(-82.3589 23.1367),"POLYGON((-82.6073 23.308\,-82.5468 23.0546\,-82.4918 23.075\,-82.4816 23.0322\,-82.5048 23.0197\,-82.4994 22.9999\,-82.4707 22.9702\,-82.4437 22.9875\,-82.4364 22.9405\,-82.4196 22.9511\,-82.3625 22.9369\,-82.3562 22.9657\,-82.2462 22.9536\,-82.2613 22.9781\,-82.1987 22.9968\,-82.2272 23.0146\,-82.2258 23.0638\,-82.1759 23.0584\,-82.147 23.0809\,-82.1023 23.0516\,-82.1143 23.0885\,-82.0933 23.0972\,-82.0959 23.1191\,-82.0814 23.1273\,-82.1068 23.1614\,-82.0885 23.3858\,-82.316 23.3728\,-82.6073 23.308))" +HBA,Hobart Int'l,Kingston,Kingston,POINT(147.3083 -42.9769),"POLYGON((147.2445 -42.9704\,147.2761 -43.002\,147.2896 -42.9874\,147.3098 -42.9977\,147.3352 -42.959\,147.3338 -42.9308\,147.3018 -42.926\,147.2691 -42.9534\,147.2612 -42.9417\,147.2445 -42.9704))" +HBE,Borg El Arab Int'l,الإسكندرية,Al ‘Ajamī,POINT(29.7604 31.0959),"POLYGON((29.4716 31.235\,29.5922 30.9907\,29.6174 30.3597\,29.7254 30.4649\,29.9907 30.5794\,29.9353 30.6985\,29.9619 30.7275\,29.8688 30.8693\,29.9143 30.8959\,30.0156 31.1085\,30.0931 31.1967\,30.0843 31.5903\,29.6603 31.3172\,29.4716 31.235))" +HBX,Hubli,Savadatti taluku,Saundatti,POINT(75.1167 15.7833),"POLYGON((74.8014 15.8802\,74.8224 15.837\,74.8652 15.8403\,74.9027 15.7913\,74.9533 15.7764\,74.9636 15.7258\,75.0041 15.7275\,75.0135 15.6881\,74.9957 15.6848\,74.9921 15.65\,75.0194 15.6439\,75.0283 15.6698\,75.0598 15.6821\,75.0563 15.6325\,75.0918 15.6079\,75.1082 15.672\,75.2027 15.6491\,75.2026 15.6815\,75.2363 15.679\,75.2491 15.647\,75.2737 15.6487\,75.2746 15.7404\,75.2996 15.7276\,75.3183 15.7449\,75.2713 15.8978\,75.2476 15.8994\,75.2292 15.8731\,75.1927 15.8618\,75.1801 15.8904\,75.1973 15.8893\,75.1948 15.9209\,75.1459 15.9219\,75.088 15.905\,75.0976 15.8863\,75.0558 15.8577\,74.9654 15.8732\,74.9931 15.9103\,74.9293 15.9446\,74.929 15.9736\,74.905 15.9789\,74.9045 16.0044\,74.8448 15.9856\,74.8315 15.9481\,74.8416 15.9007\,74.8014 15.8802))" +HDY,Hat Yai Int'l,จังหวัดสงขลา,Hat Yai,POINT(100.4667 7.0167),"POLYGON((100.0305 7.0916\,100.3659 6.5401\,100.7385 6.5104\,100.9197 6.3122\,101.1091 6.5967\,101.3258 8.1639\,100.1464 7.8692\,100.428 7.2839\,100.0305 7.0916))" +HEL,Helsinki Vantaa,Helsinki,Helsinki,POINT(24.9375 60.1708),"POLYGON((24.7828 60.1\,24.9423 59.9225\,25.1694 59.9434\,25.1768 60.0935\,25.2204 60.1997\,25.2014 60.2182\,25.2409 60.2463\,25.2273 60.2611\,25.2509 60.2735\,25.2545 60.2952\,25.1425 60.2698\,25.1599 60.2488\,25.1358 60.2372\,25.0824 60.2497\,25.088 60.2713\,25.021 60.2893\,24.9732 60.2644\,24.9475 60.2785\,24.8679 60.2524\,24.8347 60.2585\,24.8419 60.2212\,24.8514 60.225\,24.8357 60.1304\,24.7828 60.1))" +HEM,Helsinki-Malmi,Helsinki,Helsinki,POINT(24.9375 60.1708),"POLYGON((24.7828 60.1\,24.9423 59.9225\,25.1694 59.9434\,25.1768 60.0935\,25.2204 60.1997\,25.2014 60.2182\,25.2409 60.2463\,25.2273 60.2611\,25.2509 60.2735\,25.2545 60.2952\,25.1425 60.2698\,25.1599 60.2488\,25.1358 60.2372\,25.0824 60.2497\,25.088 60.2713\,25.021 60.2893\,24.9732 60.2644\,24.9475 60.2785\,24.8679 60.2524\,24.8347 60.2585\,24.8419 60.2212\,24.8514 60.225\,24.8357 60.1304\,24.7828 60.1))" +HER,Heraklion Int'l,Δημοτική Ενότητα Νέας Αλικαρνασσού,Néa Alikarnassós,POINT(25.1833 35.3167),"POLYGON((25.1533 35.3429\,25.1662 35.3067\,25.2034 35.2997\,25.1927 35.3414\,25.1533 35.3429))" +HET,Hohhot Baita Int'l,回民区 ᠬᠣᠲᠣᠩ ᠠᠷᠠᠳ ᠤᠨ ᠲᠣᠭᠣᠷᠢᠭ,Hohhot,POINT(111.6629 40.8151),"POLYGON((111.4426 40.9293\,111.4465 40.8658\,111.5081 40.849\,111.5411 40.8004\,111.5632 40.8091\,111.5762 40.8018\,111.5653 40.7907\,111.5811 40.7887\,111.6685 40.8052\,111.6478 40.8242\,111.6375 40.8689\,111.571 40.9436\,111.5363 40.9375\,111.5269 40.9135\,111.4426 40.9293))" +HFE,Hefei-Luogang,庐阳区,Hefei,POINT(117.2808 31.8639),"POLYGON((117.0619 31.9522\,117.1358 31.8926\,117.1485 31.9001\,117.1534 31.8791\,117.1846 31.8863\,117.1847 31.8619\,117.1956 31.8824\,117.2193 31.887\,117.2642 31.8758\,117.2686 31.8554\,117.2946 31.8648\,117.2826 31.8947\,117.292 31.9384\,117.2735 31.959\,117.2447 31.9551\,117.231 31.9336\,117.1977 31.944\,117.1785 31.9274\,117.1624 31.962\,117.1452 31.948\,117.0936 31.9661\,117.0619 31.9522)\,(117.1408 31.9113\,117.1588 31.9123\,117.1738 31.8984\,117.1646 31.8976\,117.1408 31.9113))" +HGH,Hangzhou Xiaoshan Int'l,上城区,Hangzhou,POINT(120.1675 30.25),"POLYGON((120.1324 30.1996\,120.1997 30.224\,120.2404 30.2733\,120.2888 30.2905\,120.2893 30.3175\,120.2682 30.3228\,120.2747 30.3396\,120.2599 30.3288\,120.2575 30.3413\,120.2275 30.3442\,120.243 30.3626\,120.2288 30.3943\,120.2046 30.3813\,120.2098 30.357\,120.1953 30.3534\,120.2074 30.3368\,120.1837 30.3112\,120.1836 30.2601\,120.1535 30.2604\,120.155 30.2261\,120.1324 30.1996))" +HIR,Honiara Int'l,Honiara,Honiara,POINT(159.9556 -9.4319),"POLYGON((159.9173 -9.4127\,159.9232 -9.4406\,159.9583 -9.4439\,159.9573 -9.4568\,160.0032 -9.4471\,160.0227 -9.4233\,159.9173 -9.4127))" +HIW,Hiroshima-Nishi,中区,Hiroshima,POINT(132.4519 34.3914),"POLYGON((132.4197 34.3578\,132.4592 34.3685\,132.4651 34.4135\,132.446 34.403\,132.4197 34.3578))" +HKG,Hong Kong Int'l,福田区,Shenzhen,POINT(114.054 22.535),"POLYGON((113.986 22.5241\,114.0088 22.5202\,113.9978 22.5124\,114.0119 22.5006\,114.0532 22.5028\,114.1003 22.5346\,114.0991 22.5701\,114.0544 22.5898\,114.0044 22.5824\,113.986 22.5241))" +HKT,Phuket Int'l,ตำบลรัษฎา,Phuket,POINT(98.3975 7.8881),"POLYGON((98.3484 7.9376\,98.3927 7.8672\,98.4144 7.8714\,98.4259 7.8967\,98.411 7.9303\,98.3484 7.9376))" +HLN,Helena Reg.,Lewis and Clark County,Helena Valley Southeast,POINT(-111.8973 46.6219),"POLYGON((-113.166 47.7207\,-113.0591 47.1793\,-112.7958 47.1769\,-112.7954 46.8317\,-112.3209 46.656\,-112.3168 46.4225\,-111.6318 46.569\,-111.6393 46.7381\,-111.4968 46.7647\,-111.7902 46.9138\,-111.7894 47.129\,-112.0451 47.1928\,-112.048 47.516\,-112.817 47.6074\,-112.9848 47.954\,-113.166 47.7207))" +HLZ,Hamilton Int'l,Waipa District,Te Awamutu,POINT(175.3167 -38.0167),"POLYGON((175.0559 -37.9048\,175.1202 -37.9944\,175.1043 -38.004\,175.1312 -38.0136\,175.1312 -38.0272\,175.1513 -38.0272\,175.1629 -38.0016\,175.214 -38.0061\,175.1918 -38.0528\,175.2005 -38.0671\,175.2838 -38.0918\,175.4079 -38.0898\,175.4446 -38.105\,175.4514 -38.1261\,175.5058 -38.1358\,175.5524 -38.1987\,175.5702 -38.1717\,175.6424 -38.1492\,175.6221 -38.1134\,175.6381 -38.0633\,175.6602 -38.0567\,175.6483 -37.9416\,175.6578 -37.9109\,175.6265 -37.8813\,175.6408 -37.8508\,175.5954 -37.8245\,175.6034 -37.7964\,175.5734 -37.796\,175.5805 -37.7768\,175.544 -37.769\,175.5412 -37.7871\,175.4824 -37.8093\,175.474 -37.8313\,175.4538 -37.8212\,175.4207 -37.838\,175.4244 -37.8748\,175.403 -37.8846\,175.3569 -37.8707\,175.3268 -37.8274\,175.3068 -37.846\,175.2659 -37.809\,175.2438 -37.8044\,175.2256 -37.8322\,175.1776 -37.8166\,175.1431 -37.8403\,175.135 -37.8178\,175.0879 -37.8472\,175.0957 -37.8618\,175.0647 -37.862\,175.0559 -37.9048))" +HMO,Gen. Ignacio P. Garcia Int'l,Hermosillo,Hermosillo,POINT(-110.9542 29.0989),"POLYGON((-112.2414 29.3167\,-112.1722 28.9684\,-111.2963 28.2254\,-110.819 28.6772\,-110.8809 28.9143\,-110.3821 28.9905\,-110.5793 29.0553\,-110.6317 29.3796\,-111.024 29.3083\,-111.0377 29.4824\,-111.5127 29.7128\,-111.7511 29.55\,-111.8796 29.6889\,-112.0369 29.3184\,-112.2414 29.3167))" +HNL,Honolulu Int'l,Honolulu County,Honolulu,POINT(-157.846 21.3294),"POLYGON((-158.3446 21.5777\,-158.2987 21.5247\,-158.3014 21.4968\,-158.2289 21.3756\,-158.1833 21.3459\,-158.1644 21.2723\,-158.1348 21.2424\,-157.8987 21.2499\,-157.8591 21.2131\,-157.8102 21.1992\,-157.7453 21.2217\,-157.6758 21.2138\,-157.6009 21.2829\,-157.6023 21.3415\,-157.6522 21.4391\,-157.6407 21.4627\,-157.6505 21.4851\,-157.7026 21.5187\,-157.7619 21.5264\,-157.802 21.5901\,-157.8441 21.6218\,-157.8814 21.7052\,-157.9418 21.7546\,-157.9917 21.763\,-158.0501 21.7399\,-158.0955 21.703\,-158.1202 21.6573\,-158.175 21.632\,-158.3119 21.6218\,-158.3446 21.5777))" +HOD,Hodeidah Int'l,مديرية الميناء,Al Ḩudaydah,POINT(42.9511 14.8022),"POLYGON((42.8688 14.8955\,42.8861 14.8525\,42.9469 14.7877\,42.956 14.81\,42.9249 14.815\,42.9329 14.8251\,42.9095 14.8644\,42.9246 14.8743\,42.9076 14.8692\,42.8933 14.8483\,42.884 14.8942\,42.8866 14.8991\,42.9003 14.9005\,42.9018 14.9017\,42.8782 14.9124\,42.8959 14.9146\,42.8746 14.9231\,42.8688 14.8955))" +HRB,Harbin Taiping,南岗区,Harbin,POINT(126.6333 45.75),"POLYGON((126.4326 45.5738\,126.4973 45.5486\,126.5789 45.5495\,126.5593 45.616\,126.5819 45.651\,126.6437 45.6674\,126.6251 45.7352\,126.7016 45.7395\,126.7129 45.7574\,126.6811 45.7592\,126.6647 45.7816\,126.6172 45.7609\,126.5406 45.6706\,126.5191 45.6832\,126.4949 45.6634\,126.5141 45.6181\,126.4326 45.5738))" +HRE,Harare Int'l,Harare,Harare,POINT(31.0522 -17.8292),"POLYGON((30.8774 -17.8778\,30.9359 -17.9133\,30.9253 -17.9189\,30.9483 -17.9209\,30.8956 -17.9502\,30.9057 -17.9653\,30.9884 -17.9856\,31.06 -18.051\,31.114 -18.0339\,31.1275 -18.0207\,31.1146 -17.9942\,31.1402 -17.9974\,31.1612 -17.9616\,31.1424 -17.945\,31.1487 -17.9319\,31.1739 -17.9082\,31.1906 -17.9147\,31.2152 -17.88\,31.1956 -17.8634\,31.2215 -17.8526\,31.2144 -17.8209\,31.1925 -17.8059\,31.2116 -17.7944\,31.1959 -17.7506\,31.2063 -17.7138\,31.182 -17.6977\,31.1603 -17.7055\,31.1471 -17.6877\,31.1153 -17.6954\,31.1106 -17.6572\,31.0739 -17.6887\,31.0817 -17.7308\,30.9941 -17.738\,30.9499 -17.7103\,30.9477 -17.7583\,30.9078 -17.7658\,30.9189 -17.7812\,30.8884 -17.7826\,30.8987 -17.8672\,30.8774 -17.8778))" +HRK,Kharkov Int'l,Харківська міська громада,Kharkiv,POINT(36.2311 49.9925),"POLYGON((36.1056 49.983\,36.1346 49.9583\,36.1302 49.9229\,36.1988 49.9202\,36.2179 49.9072\,36.2122 49.8911\,36.2424 49.8792\,36.2995 49.8821\,36.2779 49.9115\,36.3576 49.9327\,36.3921 49.8988\,36.4555 49.9082\,36.41 50.0123\,36.3659 50.0554\,36.3338 50.0753\,36.3477 50.0646\,36.3118 50.0528\,36.2775 50.0705\,36.2606 50.0855\,36.2703 50.1048\,36.2183 50.0862\,36.1727 50.0425\,36.1542 50.0461\,36.1657 50.0093\,36.1215 50.0174\,36.1277 49.9916\,36.1056 49.983))" +HTN,Hotan,洛浦县 لوپ ناھىيىسى,Hotan,POINT(80.0167 37.1),"POLYGON((79.8891 36.9054\,80.2411 36.5172\,80.4494 36.5378\,81.5203 39.4842\,80.8923 39.4807\,80.9195 38.5079\,80.6047 38.2181\,80.433 37.6074\,79.9875 37.3946\,80.1272 37.2821\,79.8891 36.9054))" +HYD,Rajiv Gandhi Int'l,Greater Hyderabad Municipal Corporation Central Zone,Hyderābād,POINT(78.4867 17.385),"POLYGON((78.3753 17.3764\,78.4339 17.3748\,78.4491 17.3599\,78.5031 17.386\,78.5252 17.3763\,78.5294 17.3953\,78.4313 17.4622\,78.3961 17.4395\,78.393 17.3901\,78.3753 17.3764))" +IAD,Dulles Int'l,Fairfax County,Centreville,POINT(-77.4389 38.839),"POLYGON((-77.5371 38.8425\,-77.5086 38.8409\,-77.4937 38.7976\,-77.4431 38.8036\,-77.4126 38.7717\,-77.4118 38.745\,-77.3881 38.7488\,-77.3739 38.7119\,-77.3559 38.7254\,-77.2576 38.6837\,-77.2209 38.6478\,-77.2273 38.604\,-77.1304 38.6349\,-77.1226 38.6853\,-77.0415 38.7258\,-77.0405 38.7852\,-77.0757 38.8006\,-77.1376 38.7982\,-77.1419 38.8257\,-77.1102 38.8448\,-77.1497 38.8756\,-77.1943 38.8861\,-77.1944 38.8993\,-77.1723 38.8932\,-77.1198 38.9344\,-77.1357 38.9542\,-77.2349 38.9761\,-77.2559 39.0021\,-77.2471 39.0257\,-77.3283 39.0577\,-77.5371 38.8425)\,(-77.335 38.8527\,-77.2925 38.8717\,-77.2689 38.8627\,-77.2705 38.841\,-77.3032 38.8328\,-77.335 38.8527))" +IBA,Ibadan,Ibadan North,Ibadan,POINT(3.9167 7.3964),"POLYGON((3.8762 7.4465\,3.8801 7.4221\,3.8993 7.3798\,3.9328 7.4137\,3.9328 7.4365\,3.9011 7.4619\,3.8762 7.4465))" +ICN,Incheon Int'l,인천광역시,Incheon,POINT(126.6333 37.4833),"POLYGON((125.3949 36.8544\,125.8182 36.8586\,126.0505 36.9312\,126.2779 37.0562\,126.369 37.1524\,126.4228 37.1521\,126.5534 37.2391\,126.4945 37.3073\,126.4989 37.3206\,126.6603 37.3329\,126.7224 37.38\,126.7793 37.453\,126.7781 37.4718\,126.7423 37.4869\,126.7417 37.5089\,126.7603 37.516\,126.7644 37.5553\,126.7936 37.5816\,126.7517 37.5796\,126.6525 37.639\,126.6253 37.6029\,126.545 37.5839\,126.5491 37.618\,126.5223 37.675\,126.52 37.792\,126.4136 37.8447\,126.2061 37.8232\,126.1837 37.7784\,126.1872 37.749\,126.1607 37.7175\,126.1111 37.7125\,126.0167 37.6583\,125.75 37.7147\,125.695 37.6917\,125.5167 37.6819\,125.4263 37.6492\,125.471 37.527\,125.5524 37.4453\,125.7179 37.3925\,125.8678 37.4094\,125.7525 37.3021\,125.713 37.1811\,125.6349 37.1702\,125.5611 37.1155\,125.3949 36.8544))" +IDR,Devi Ahilyabai Holkar Int'l,Indore City,Indore,POINT(75.8472 22.7167),"POLYGON((75.7951 22.7254\,75.8181 22.7043\,75.8181 22.6691\,75.8561 22.6569\,75.8927 22.6874\,75.9184 22.6816\,75.9287 22.7485\,75.9047 22.7505\,75.8983 22.7823\,75.8602 22.7719\,75.8427 22.7834\,75.8397 22.7435\,75.7951 22.7254))" +IEV,Kiev Zhuliany Int'l,Вишнева міська громада,Vyshneve,POINT(30.3581 50.3869),"POLYGON((30.3311 50.3859\,30.3642 50.3528\,30.426 50.3741\,30.3901 50.3861\,30.4162 50.3981\,30.3311 50.3859))" +IFN,Esfahan Int'l,اصفهان,Eşfahān,POINT(51.6675 32.6447),"POLYGON((51.5256 32.7745\,51.5274 32.7564\,51.5902 32.7033\,51.5883 32.6673\,51.5608 32.6375\,51.5719 32.5969\,51.6286 32.5677\,51.6428 32.5147\,51.6795 32.5052\,51.6664 32.5346\,51.7384 32.5605\,51.7642 32.5985\,51.8044 32.5655\,51.8459 32.5788\,51.847 32.6348\,51.8321 32.6538\,51.8581 32.6591\,51.8644 32.6907\,51.7703 32.706\,51.7384 32.7328\,51.7087 32.736\,51.7082 32.7497\,51.6338 32.7645\,51.6065 32.806\,51.5746 32.8204\,51.5387 32.8093\,51.5466 32.7785\,51.5256 32.7745))" +IGU,Foz do Iguaçu Int'l,Foz do Iguaçu,Foz do Iguaçu,POINT(-54.5875 -25.54),"POLYGON((-54.6199 -25.4555\,-54.6018 -25.4855\,-54.5935 -25.5928\,-54.5526 -25.5881\,-54.5264 -25.6287\,-54.4927 -25.6181\,-54.451 -25.6575\,-54.434 -25.6974\,-54.3839 -25.5975\,-54.3483 -25.607\,-54.3307 -25.5734\,-54.294 -25.558\,-54.322 -25.533\,-54.4864 -25.5524\,-54.4859 -25.4039\,-54.5075 -25.3667\,-54.4669 -25.3247\,-54.3991 -25.3771\,-54.385 -25.259\,-54.3951 -25.2323\,-54.4089 -25.248\,-54.4736 -25.2115\,-54.5016 -25.2972\,-54.6199 -25.4555))" +ILM,Wilmington Int'l,New Hanover County,Murraysville,POINT(-77.8429 34.2919),"POLYGON((-78.0296 34.3318\,-78.0262 34.3181\,-78.0066 34.3202\,-78.0157 34.2907\,-77.9911 34.283\,-78.0015 34.2721\,-77.9566 34.2345\,-77.9311 34.0434\,-77.9485 33.9713\,-77.897 33.7868\,-77.8942 33.8787\,-77.7856 34.1252\,-77.6753 34.2531\,-77.7105 34.298\,-77.7392 34.2928\,-77.8127 34.359\,-77.8047 34.3767\,-77.8231 34.3893\,-77.8856 34.3645\,-77.9167 34.3758\,-77.9347 34.358\,-77.9622 34.3829\,-77.9878 34.3711\,-77.978 34.357\,-77.9909 34.3373\,-78.0296 34.3318))" +ILR,Ilorin Int'l,Ilorin East,Ilorin,POINT(4.55 8.5),"POLYGON((4.5315 8.5306\,4.5574 8.4529\,4.5767 8.4366\,4.6341 8.4422\,4.629 8.4783\,4.5823 8.4996\,4.6768 8.5068\,4.7233 8.5494\,4.8552 8.5801\,4.9812 8.6563\,4.9837 8.6762\,4.8892 8.7276\,4.8808 8.7464\,4.6636 8.5616\,4.5437 8.5606\,4.5315 8.5306))" +IND,Indianapolis Int'l,Indianapolis,Indianapolis,POINT(-86.1458 39.7771),"POLYGON((-86.3281 39.8661\,-86.3264 39.7795\,-86.3263 39.6322\,-85.9521 39.6385\,-85.9515 39.7164\,-85.954 39.8696\,-85.9381 39.8698\,-85.938 39.9275\,-86.1459 39.9272\,-86.3261 39.9242\,-86.3254 39.8662\,-86.3281 39.8661))" +ING,Com. Armando Tola Int'l,Lago Argentino,El Calafate,POINT(-72.2833 -50.3333),"POLYGON((-73.5605 -49.9425\,-73.4445 -49.9656\,-73.5218 -50.1537\,-73.3575 -50.5351\,-73.1451 -50.6525\,-73.2 -50.8206\,-72.4944 -50.6017\,-70.644 -50.7373\,-70.5797 -49.2044\,-71.3988 -49.1939\,-72.5851 -48.4871\,-72.5317 -48.7972\,-72.9427 -48.943\,-73.1464 -49.1891\,-73.04 -49.2769\,-73.4731 -49.2047\,-73.5605 -49.9425))" +INL,Falls Int'l,Hibbing,Hibbing,POINT(-92.9486 47.398),"POLYGON((-93.0696 47.3935\,-93.0667 47.3716\,-93.0647 47.3716\,-93.0658 47.2828\,-92.8095 47.2826\,-92.8096 47.3716\,-92.8113 47.3716\,-92.8063 47.4286\,-92.8069 47.4677\,-92.8945 47.4684\,-92.9415 47.4716\,-92.9347 47.5434\,-93.064 47.5444\,-93.0651 47.4999\,-93.0689 47.4564\,-93.0696 47.3935))" +IPL,Imperial Cty.,Municipio de Mexicali,Mexicali,POINT(-115.4678 32.6633),"POLYGON((-115.8793 32.6361\,-115.8477 31.9901\,-115.6047 31.6242\,-115.3855 31.7732\,-115.3116 31.8051\,-115.2927 31.7911\,-115.1872 31.8303\,-115.1818 31.8572\,-115.1069 31.8049\,-115.0563 31.7122\,-115.0713 31.6775\,-115.0752 31.5617\,-115.0539 31.4406\,-115.0311 31.448\,-114.9885 31.3629\,-114.8787 31.385\,-114.8328 31.5785\,-114.7837 31.6355\,-114.817 31.7287\,-114.8063 31.8164\,-114.8432 31.8641\,-114.9106 31.8676\,-114.942 31.8907\,-114.947 31.9181\,-114.9656 31.9215\,-114.957 32.0386\,-114.9897 32.1464\,-114.9753 32.178\,-115.053 32.2452\,-114.9666 32.3388\,-114.9685 32.3907\,-114.9319 32.4397\,-114.935 32.4819\,-114.8159 32.483\,-114.7915 32.5564\,-114.813 32.5574\,-114.8004 32.5868\,-114.8105 32.6223\,-114.7652 32.6431\,-114.7199 32.7187\,-115.8793 32.6361))" +ISK,Gandhinagar,Nashik,Nāsik,POINT(73.78 20.0),"POLYGON((73.5566 20.0768\,73.5764 20.0602\,73.5847 20.0035\,73.6086 20.0013\,73.6071 19.9806\,73.643 19.9688\,73.6198 19.8835\,73.6976 19.8906\,73.7245 19.8373\,73.7535 19.8502\,73.7534 19.8163\,73.7714 19.8042\,73.7964 19.8255\,73.8265 19.822\,73.835 19.8536\,73.8704 19.8596\,73.869 19.8884\,73.9216 19.8982\,73.9109 19.9266\,73.937 19.9325\,73.9461 19.9891\,73.9347 20.0125\,73.9533 20.0658\,73.8987 20.0589\,73.8527 20.0751\,73.7917 20.0727\,73.7852 20.0855\,73.763 20.0778\,73.7354 20.0995\,73.6935 20.093\,73.6791 20.1293\,73.6628 20.1173\,73.6084 20.1402\,73.5566 20.0768))" +IST,Atatürk Hava Limani,Fatih,Istanbul,POINT(28.955 41.0136),"POLYGON((28.9199 40.989\,28.9787 41.0012\,28.9871 41.0163\,28.9417 41.0397\,28.9199 40.989))" +ITM,Osaka Int'l,大阪市,Ōsaka,POINT(135.5022 34.6939),"POLYGON((135.31 34.6266\,135.3144 34.5939\,135.3811 34.6111\,135.5711 34.5869\,135.587 34.6088\,135.5619 34.617\,135.5766 34.6265\,135.5575 34.6683\,135.5992 34.7144\,135.5824 34.7234\,135.5699 34.7089\,135.545 34.7688\,135.5276 34.7485\,135.4477 34.7336\,135.3961 34.6763\,135.3297 34.6633\,135.31 34.6266))" +ITO,Hilo Int'l,Hawaii,Hilo,POINT(-155.0863 19.6883),"POLYGON((-156.12 19.7272\,-156.0786 19.625\,-156.0303 19.5875\,-156.0129 19.5\,-155.9432 19.3442\,-155.9747 19.1302\,-155.9541 19.0536\,-155.9163 19\,-155.74 18.9169\,-155.7229 18.8825\,-155.6943 18.8668\,-155.6391 18.875\,-155.5656 18.9345\,-155.5 19.0643\,-155.4661 19.0954\,-155.3254 19.1713\,-155.2684 19.2195\,-155.1756 19.2114\,-154.9397 19.3128\,-154.7861 19.4404\,-154.7566 19.5069\,-154.7783 19.5599\,-154.8814 19.62\,-154.9289 19.6736\,-154.9319 19.7155\,-154.9638 19.7692\,-155.0333 19.7934\,-155.0316 19.875\,-155.1403 19.9895\,-155.2524 20.0634\,-155.3975 20.1328\,-155.5821 20.1813\,-155.7404 20.2889\,-155.8368 20.3212\,-155.9159 20.301\,-155.959 20.2269\,-155.948 20.1238\,-155.8843 20.0183\,-155.9389 19.9615\,-155.9624 19.9092\,-156.0124 19.8898\,-156.0957 19.815\,-156.12 19.7272))" +IUE,Niue Int'l,Alofi,Alofi,POINT(-169.921 -19.056),"POLYGON((-169.9497 -19.0749\,-169.9238 -19.08\,-169.92 -19.0766\,-169.9145 -19.08\,-169.9118 -19.0784\,-169.8851 -19.0855\,-169.8788 -19.0901\,-169.846 -19.0571\,-169.8419 -19.0249\,-169.8548 -19.0243\,-169.877 -19.0379\,-169.8847 -19.0382\,-169.9235 -19.0161\,-169.917 -19.0407\,-169.9199 -19.0527\,-169.9242 -19.0579\,-169.9461 -19.0701\,-169.9497 -19.0749))" +IXD,Allahabad,Prayagraj,Prayagraj,POINT(81.8464 25.4358),"POLYGON((81.6371 25.4729\,81.6481 25.4825\,81.6594 25.4701\,81.6456 25.4406\,81.6459 25.4184\,81.6639 25.4156\,81.6588 25.3808\,81.6746 25.3728\,81.6942 25.3941\,81.7134 25.3897\,81.6979 25.3574\,81.7894 25.3235\,81.8142 25.3412\,81.7942 25.3772\,81.8011 25.3949\,81.8375 25.4213\,81.8866 25.4247\,81.8904 25.4482\,81.8876 25.5\,81.8711 25.505\,81.8049 25.5062\,81.7517 25.4839\,81.6995 25.5127\,81.6705 25.5061\,81.6371 25.4729))" +IXJ,Jammu,Jammu,Jammu,POINT(74.87 32.73),"POLYGON((74.6545 32.7291\,74.6662 32.6756\,74.7319 32.6674\,74.7749 32.6382\,74.8271 32.6428\,74.835 32.6249\,74.8654 32.6454\,74.8975 32.6399\,74.9621 32.6823\,74.96 32.7093\,75.0038 32.7426\,75.0154 32.7286\,75.0439 32.7252\,75.056 32.7395\,75.0913 32.7194\,75.1648 32.7688\,74.9749 32.9351\,74.9607 32.8944\,74.9358 32.8865\,74.8525 32.9872\,74.8116 32.9929\,74.7854 32.9695\,74.782 32.9085\,74.739 32.8992\,74.7053 32.842\,74.6545 32.7291))" +IXM,Madurai,Madurai,Madurai,POINT(78.1198 9.9252),"POLYGON((78.0156 9.8486\,78.093 9.8247\,78.203 9.9619\,78.1462 9.9934\,78.0896 9.9825\,78.0849 9.9562\,78.0298 9.984\,78.0595 9.9405\,78.0156 9.8486))" +IXR,Birsa Munda,Kanke,Rānchi,POINT(85.33 23.36),"POLYGON((85.2036 23.4902\,85.2496 23.4526\,85.2212 23.3741\,85.3151 23.3583\,85.3216 23.3372\,85.3471 23.3601\,85.4185 23.3751\,85.4539 23.413\,85.4504 23.4366\,85.4217 23.4606\,85.4297 23.4746\,85.3677 23.4959\,85.3367 23.526\,85.3405 23.5609\,85.3186 23.5703\,85.2218 23.5801\,85.2194 23.5087\,85.2036 23.4902))" +IXU,Aurangabad,Jalna,Jālna,POINT(75.8864 19.841),"POLYGON((75.8011 19.8252\,75.8145 19.817\,75.8115 19.7863\,75.8351 19.775\,75.8265 19.7336\,75.8415 19.7203\,75.9072 19.6987\,75.9752 19.6989\,75.9878 19.6808\,76.0616 19.6617\,76.1148 19.6887\,76.1391 19.6761\,76.133 19.6958\,76.1633 19.6747\,76.1853 19.6718\,76.1909 19.6905\,76.2161 19.6805\,76.2195 19.7015\,76.198 19.7109\,76.2224 19.7583\,76.3071 19.7661\,76.3074 19.8173\,76.3262 19.8488\,76.3051 19.8785\,76.1969 19.8655\,76.1818 19.8481\,76.1485 19.8561\,76.1478 19.8437\,76.1295 19.8651\,76.0977 19.857\,76.0812 19.9126\,76.059 19.9196\,76.0482 19.946\,76.0114 19.9537\,76.0114 19.9737\,75.9218 20.0014\,75.8541 19.9046\,75.8138 19.8811\,75.8011 19.8252))" +IXW,Sonari,Golmuri-Cum-Jugsalai,Jamshedpur,POINT(86.1842 22.7925),"POLYGON((86.1458 22.8077\,86.1747 22.7886\,86.1579 22.7515\,86.1693 22.6743\,86.2561 22.7032\,86.2685 22.7329\,86.2844 22.7218\,86.3026 22.7368\,86.3495 22.738\,86.3718 22.7164\,86.3835 22.7382\,86.4188 22.7275\,86.4524 22.7562\,86.3961 22.8034\,86.2044 22.8619\,86.1973 22.8183\,86.1599 22.8369\,86.1458 22.8077))" +JAI,Jaipur Int'l,Niwai Tehsil,Nawai,POINT(75.924 26.3824),"POLYGON((75.7331 26.408\,75.7558 26.4042\,75.7422 26.3774\,75.753 26.356\,75.8275 26.341\,75.8198 26.3118\,75.8508 26.304\,75.8791 26.3201\,75.8821 26.2937\,75.9192 26.2855\,75.922 26.246\,75.9518 26.2972\,75.9924 26.2745\,75.9892 26.2263\,75.9304 26.2409\,75.9506 26.1853\,75.9809 26.1954\,75.9902 26.2214\,76.028 26.2233\,76.0258 26.2529\,76.0966 26.2396\,76.1194 26.2529\,76.1104 26.2765\,76.1453 26.3111\,76.124 26.3455\,76.1413 26.3602\,76.1296 26.3754\,76.1533 26.4402\,76.1319 26.4602\,76.1757 26.4864\,76.1433 26.5263\,76.1004 26.5388\,76.0633 26.5092\,76.0253 26.5209\,76.0088 26.4999\,75.9734 26.5071\,75.9613 26.4859\,75.9283 26.5088\,75.8037 26.5245\,75.7967 26.5507\,75.7725 26.5428\,75.7793 26.5243\,75.76 26.5179\,75.7709 26.4716\,75.7471 26.4697\,75.741 26.4471\,75.7596 26.4285\,75.7331 26.408))" +JAN,Jackson Int'l,Pearl,Pearl,POINT(-90.0918 32.273),"POLYGON((-90.1673 32.2797\,-90.1388 32.2437\,-90.0434 32.2399\,-90.038 32.3258\,-90.0628 32.3201\,-90.0522 32.2982\,-90.0659 32.2841\,-90.0912 32.3024\,-90.1302 32.3013\,-90.1378 32.2802\,-90.1673 32.2797))" +JAX,Jacksonville Int'l,Saint Johns County,Fruit Cove,POINT(-81.6175 30.0972),"POLYGON((-81.6802 30.1212\,-81.6809 30.0147\,-81.601 29.956\,-81.599 29.8754\,-81.5639 29.7981\,-81.5252 29.7595\,-81.5237 29.6224\,-81.3241 29.6256\,-81.3243 29.6535\,-81.3087 29.6421\,-81.261 29.6691\,-81.2473 29.6591\,-81.1508 29.6711\,-81.1928 29.7783\,-81.3202 30.2529\,-81.4369 30.2523\,-81.4335 30.1053\,-81.5387 30.1037\,-81.539 30.1199\,-81.6123 30.1329\,-81.6802 30.1212))" +JED,King Abdul Aziz Int'l,محافظة جدة,Jeddah,POINT(39.1728 21.5433),"POLYGON((38.93 22.0121\,39.0827 21.7096\,39.132 21.7653\,39.1574 21.7802\,39.1612 21.7782\,39.1042 21.3001\,39.3726 20.8916\,39.3108 21.3337\,39.4542 21.5154\,39.3786 21.7159\,39.2426 21.6627\,39.2321 22.3207\,38.93 22.0121))" +JFK,John F Kennedy Int'l,City of New York,New York,POINT(-73.9249 40.6943),"POLYGON((-74.2588 40.4989\,-74.2253 40.4766\,-73.9779 40.5191\,-73.9021 40.4921\,-73.8126 40.53\,-73.7572 40.5312\,-73.7565 40.5862\,-73.7381 40.6026\,-73.7681 40.6263\,-73.7248 40.6523\,-73.7303 40.7222\,-73.7002 40.7393\,-73.7797 40.8121\,-73.7484 40.8718\,-73.8382 40.8941\,-73.8511 40.9101\,-73.8593 40.9005\,-73.9183 40.9176\,-74.014 40.7576\,-74.0558 40.6515\,-74.1914 40.642\,-74.2146 40.5605\,-74.2475 40.5494\,-74.2588 40.4989))" +JIB,Djibouti-Ambouli Int'l,Djibouti جيبوتي,Djibouti,POINT(43.145 11.5883),"POLYGON((42.9242 11.5323\,43.1861 11.5291\,43.1448 11.6236\,43.1356 11.5921\,43.102 11.5803\,43.1102 11.5956\,43.1024 11.6013\,43.0947 11.5798\,43.0807 11.6013\,43.0533 11.5987\,43.0593 11.5872\,42.9248 11.5926\,42.9242 11.5323))" +JLR,Jabalpur,Jabalpur,Jabalpur,POINT(79.9333 23.1667),"POLYGON((79.8831 23.1597\,79.9305 23.1181\,79.9703 23.1476\,79.972 23.2056\,79.9342 23.2111\,79.9285 23.1946\,79.8995 23.2061\,79.9078 23.1671\,79.8927 23.1732\,79.8831 23.1597))" +JMU,Jiamusi Dongjiao,前进区,Jiamusi,POINT(130.3653 46.8081),"POLYGON((130.3556 46.8179\,130.3851 46.7409\,130.4567 46.7319\,130.388 46.8212\,130.3556 46.8179))" +JNB,OR Tambo Int'l,City of Johannesburg Metropolitan Municipality,Johannesburg,POINT(28.0456 -26.2044),"POLYGON((27.7143 -26.2139\,27.8874 -26.5263\,28.181 -25.9195\,27.9084 -25.9269\,27.7143 -26.2139))" +JNU,Juneau Int'l,Juneau,Juneau,POINT(-134.1739 58.4546),"POLYGON((-135.2191 58.975\,-135.0741 58.5021\,-134.7034 58.2768\,-134.7095 58.2257\,-134.7725 58.1664\,-134.777 58.1032\,-134.6957 58.0304\,-134.6654 58.03\,-134.6214 58.0592\,-134.5539 58.0536\,-134.5488 58.0665\,-134.5702 58.0779\,-134.5318 58.0967\,-134.4668 58.0942\,-134.1775 58.1596\,-133.8131 57.8377\,-133.6966 57.795\,-133.1723 58.1537\,-133.3454 58.2763\,-133.4615 58.3877\,-133.3773 58.4305\,-133.7071 58.6123\,-133.8405 58.7294\,-134.2581 58.861\,-134.3365 58.9234\,-134.3135 58.9622\,-135.2191 58.975))" +JOI,Joinville-Lauro C. de Loyola,Joinville,Joinvile,POINT(-48.8437 -26.3204),"POLYGON((-49.2008 -26.2206\,-49.1693 -26.2182\,-49.1757 -26.263\,-49.1447 -26.3162\,-49.102 -26.2933\,-49.0942 -26.3288\,-49.082 -26.295\,-49.0629 -26.2966\,-49.0349 -26.2613\,-49.0669 -26.3135\,-48.9968 -26.3113\,-49.0209 -26.3504\,-49.0037 -26.3874\,-48.9305 -26.4212\,-48.9233 -26.3799\,-48.8771 -26.3805\,-48.8278 -26.4521\,-48.804 -26.42\,-48.824 -26.398\,-48.806 -26.373\,-48.7652 -26.3519\,-48.7367 -26.3581\,-48.7233 -26.3421\,-48.758 -26.295\,-48.74 -26.286\,-48.7632 -26.2162\,-48.7554 -26.1964\,-48.7937 -26.1326\,-48.8227 -26.1202\,-49.0417 -26.1142\,-49.0528 -26.0776\,-49.0741 -26.0742\,-49.1312 -26.0999\,-49.1382 -26.1173\,-49.1623 -26.1131\,-49.16 -26.1329\,-49.1906 -26.1444\,-49.1564 -26.1988\,-49.192 -26.2\,-49.2008 -26.2206))" +JPA,Presidente Castro Pinto Int'l,João Pessoa,João Pessoa,POINT(-34.88 -7.12),"POLYGON((-34.974 -7.1755\,-34.9677 -7.2161\,-34.9294 -7.2254\,-34.8896 -7.2145\,-34.8054 -7.2473\,-34.793 -7.1547\,-34.8193 -7.1348\,-34.8423 -7.056\,-34.8445 -7.1004\,-34.8627 -7.0879\,-34.8634 -7.0608\,-34.8957 -7.1021\,-34.8942 -7.1215\,-34.9194 -7.1348\,-34.9162 -7.1664\,-34.974 -7.1755))" +JRO,Kilimanjaro Int'l,Arusha Municipal,Arusha,POINT(36.6833 -3.3667),"POLYGON((36.584 -3.3907\,36.5928 -3.4358\,36.6253 -3.4488\,36.6297 -3.4915\,36.6518 -3.4898\,36.6702 -3.5571\,36.7156 -3.5537\,36.7303 -3.4941\,36.722 -3.4434\,36.7671 -3.3952\,36.7568 -3.3774\,36.6928 -3.3602\,36.633 -3.3662\,36.6081 -3.3428\,36.584 -3.3907))" +JUJ,Gob. Horacio Guzman Int'l,Municipio de San Salvador de Jujuy,San Salvador de Jujuy,POINT(-65.3 -24.1833),"POLYGON((-65.725 -24.186\,-65.6788 -24.2468\,-65.5749 -24.2949\,-65.4907 -24.2962\,-65.4464 -24.2689\,-65.3783 -24.2772\,-65.3456 -24.2546\,-65.2347 -24.2764\,-65.2282 -24.2545\,-65.2571 -24.2298\,-65.2355 -24.2167\,-65.2025 -24.2293\,-65.2183 -24.2079\,-65.2151 -24.1379\,-65.2407 -24.1293\,-65.2102 -24.1101\,-65.201 -24.0824\,-65.1865 -24.0941\,-65.1305 -24.0862\,-65.1122 -24.0201\,-65.1833 -23.881\,-65.2115 -23.9328\,-65.2355 -23.9427\,-65.2543 -23.9904\,-65.237 -24.0282\,-65.2594 -24.0385\,-65.2519 -24.0526\,-65.2994 -24.0343\,-65.3579 -24.0631\,-65.3484 -24.1216\,-65.3732 -24.1457\,-65.3654 -24.1618\,-65.4334 -24.1847\,-65.5399 -24.1605\,-65.725 -24.186))" +KAD,Kaduna,Kaduna North,Kaduna,POINT(7.4333 10.5167),"POLYGON((7.4115 10.5764\,7.4272 10.5504\,7.4185 10.5022\,7.4397 10.4868\,7.4872 10.5624\,7.4513 10.6186\,7.4246 10.6061\,7.4115 10.5764))" +KAN,Kano Mallam Aminu Int'l,Kano,Kano,POINT(8.5167 12.0),"POLYGON((7.6752 11.4833\,7.7859 11.3614\,8.1501 11.5192\,8.1229 11.3867\,8.6073 11.0712\,8.4812 10.7159\,8.7111 10.5434\,8.8544 10.6969\,8.8059 11.0846\,9.3603 11.5608\,9.1389 11.7192\,9.1987 12.0828\,8.915 12.0762\,8.9099 12.301\,8.7235 12.2948\,8.73 12.5591\,8.4818 12.597\,8.3743 12.4298\,8.2788 12.6046\,7.8645 12.2807\,7.9023 11.6532\,7.6752 11.4833))" +KBL,Kabul Int'l,کابل ښاروالۍ,Kabul,POINT(69.1783 34.5253),"POLYGON((68.9965 34.4485\,69.011 34.435\,69.0573 34.4413\,69.1103 34.4071\,69.1244 34.4294\,69.1474 34.425\,69.1832 34.4489\,69.1786 34.4832\,69.2223 34.4728\,69.2589 34.4927\,69.2485 34.5028\,69.2666 34.5362\,69.3554 34.5455\,69.3602 34.5583\,69.2748 34.5958\,69.226 34.5896\,69.117 34.6105\,69.0993 34.5743\,69.129 34.5578\,69.1225 34.5398\,69.0553 34.5621\,69.0209 34.4701\,68.9965 34.4485))" +KBP,Boryspil Int'l,Бориспіль,Boryspil,POINT(30.95 50.35),"POLYGON((30.8989 50.373\,30.9062 50.3415\,30.9259 50.3457\,30.9341 50.3298\,31.0076 50.3384\,30.9609 50.411\,30.9481 50.4136\,30.9496 50.3803\,30.9168 50.3864\,30.8989 50.373))" +KCH,Kuching Int'l,Kuching,Kuching,POINT(110.3439 1.5575),"POLYGON((110.1144 1.5321\,110.281 1.4602\,110.1487 1.1981\,110.2863 1.0445\,110.4823 1.3552\,110.302 1.4486\,110.5164 1.5106\,110.5258 1.7247\,110.3356 1.8062\,110.1144 1.5321))" +KGA,Kananga,Kasaï-Central,Kananga,POINT(22.4488 -5.897),"POLYGON((21.4069 -6.8245\,21.7952 -7.2589\,21.8347 -7.6763\,22.173 -7.8885\,22.6118 -7.744\,22.678 -7.8975\,23.084 -7.8749\,23.2062 -7.7399\,23.2819 -6.9238\,23.0157 -6.7916\,23.1534 -6.3858\,22.9444 -5.8413\,23.7645 -5.6283\,23.714 -5.3647\,23.2162 -5.31\,22.9051 -5.0297\,23.1887 -4.7682\,22.6292 -4.2318\,22.5243 -4.2013\,22.5367 -4.4554\,22.0601 -5.1467\,21.6889 -5.0649\,21.6244 -5.3207\,21.9782 -5.6433\,21.8824 -5.8661\,21.749 -5.7644\,21.5919 -5.97\,21.5286 -6.4954\,21.694 -6.6381\,21.4069 -6.8245))" +KGL,Kigali Int'l,Umujyi wa Kigali,Kigali,POINT(30.0606 -1.9536),"POLYGON((29.9795 -1.8866\,30.0034 -1.9552\,29.9929 -1.9932\,30.0155 -2.0124\,29.9833 -2.0437\,29.9942 -2.0686\,30.0166 -2.0759\,30.0476 -2.0546\,30.0656 -2.0713\,30.119 -2.0598\,30.1724 -2.0282\,30.2078 -2.0797\,30.2291 -2.0683\,30.2341 -1.9682\,30.2799 -1.9305\,30.2474 -1.8456\,30.2196 -1.8423\,30.1382 -1.7796\,30.1188 -1.8191\,30.0632 -1.8278\,30.0308 -1.8629\,30.0169 -1.8349\,29.9978 -1.8423\,29.9795 -1.8866))" +KHG,Kashi,喀什市 قەشقەر شەھىرى,Kashgar,POINT(75.9938 39.4681),"POLYGON((75.8193 39.4545\,75.9782 39.4085\,75.9958 39.4365\,76.0679 39.4229\,76.1865 39.4325\,76.2194 39.4101\,76.2762 39.4323\,76.2804 39.4197\,76.3263 39.4218\,76.4252 39.5008\,76.4901 39.483\,76.5014 39.5152\,76.5561 39.5446\,76.5876 39.6011\,76.4484 39.5762\,76.2969 39.5912\,76.2115 39.6261\,75.9706 39.6115\,75.9782 39.5884\,75.903 39.5986\,75.8871 39.5928\,75.8889 39.5336\,75.9204 39.5038\,75.8833 39.5004\,75.89 39.487\,75.8593 39.473\,75.8648 39.4587\,75.8193 39.4545))" +KHH,Kaohsiung Int'l,高雄市,Kaohsiung,POINT(120.2975 22.615),"POLYGON((120.1747 22.9103\,120.4237 22.4769\,120.4741 22.8368\,120.8647 22.8431\,121.0149 23.4543\,120.6437 23.296\,120.4162 22.9161\,120.1747 22.9103))" +KHI,Karachi Civil,صدر ٹاؤن,Karachi,POINT(67.01 24.86),"POLYGON((66.9874 24.7876\,67.0313 24.7767\,67.0704 24.7235\,67.1127 24.7582\,67.0777 24.8029\,67.0739 24.8313\,67.0945 24.8512\,67.0838 24.8663\,67.0602 24.8508\,67.0307 24.8728\,67.0083 24.8594\,67.0305 24.8497\,67.0165 24.8224\,66.993 24.8186\,66.9874 24.7876))" +KHN,Nanchang Changbei Int'l,东湖区,Nanchang,POINT(115.8872 28.6842),"POLYGON((115.8641 28.6725\,115.9226 28.6782\,115.9281 28.7356\,115.9777 28.7906\,115.9534 28.7956\,115.8915 28.7541\,115.8641 28.6725)\,(115.9206 28.6816\,115.9209 28.6824\,115.9213 28.6819\,115.9206 28.6816))" +KIN,Norman Manley Int'l,Saint Catherine,Portmore,POINT(-76.8799 17.95),"POLYGON((-77.2193 18.1816\,-77.1883 18.0496\,-77.1708 18.0448\,-77.1531 17.9021\,-77.1341 17.8827\,-77.0831 17.8989\,-77.0793 17.9127\,-77.0578 17.9081\,-77.0276 17.8758\,-77.0396 17.8456\,-76.9723 17.8523\,-76.9557 17.8379\,-76.9256 17.8501\,-76.8809 17.915\,-76.8956 17.9105\,-76.8927 17.9218\,-76.8765 17.9171\,-76.8724 17.953\,-76.839 17.9747\,-76.8522 18.0175\,-76.8896 18.0373\,-76.8778 18.0727\,-76.8883 18.0788\,-76.8628 18.1109\,-76.8766 18.1195\,-76.8705 18.1594\,-76.8938 18.1579\,-76.9456 18.2534\,-76.9928 18.2508\,-77.083 18.2011\,-77.2193 18.1816))" +KIV,Kishinev S.E.,Sectorul Centru,Chisinau,POINT(28.8353 47.0228),"POLYGON((28.77 46.9791\,28.8005 46.9951\,28.8386 46.9817\,28.8417 47.0056\,28.8623 47.0113\,28.8799 46.9938\,28.8873 46.9944\,28.8606 47.023\,28.8172 47.0235\,28.7761 47.0042\,28.77 46.9791))" +KIX,Kansai Int'l,大阪市,Ōsaka,POINT(135.5022 34.6939),"POLYGON((135.31 34.6266\,135.3144 34.5939\,135.3811 34.6111\,135.5711 34.5869\,135.587 34.6088\,135.5619 34.617\,135.5766 34.6265\,135.5575 34.6683\,135.5992 34.7144\,135.5824 34.7234\,135.5699 34.7089\,135.545 34.7688\,135.5276 34.7485\,135.4477 34.7336\,135.3961 34.6763\,135.3297 34.6633\,135.31 34.6266))" +KMG,Kunming Wujiaba Int'l,五华区,Kunming,POINT(102.7061 25.0433),"POLYGON((102.5601 25.1967\,102.5983 25.1282\,102.5992 25.1022\,102.6172 25.0941\,102.6163 25.0491\,102.7158 25.0364\,102.7124 25.0743\,102.7468 25.1214\,102.7297 25.1346\,102.719 25.1226\,102.7065 25.1433\,102.7197 25.2008\,102.7346 25.2079\,102.7342 25.2251\,102.713 25.2288\,102.7257 25.2405\,102.7261 25.2946\,102.6942 25.2885\,102.6705 25.3023\,102.6575 25.2756\,102.6414 25.2776\,102.639 25.2126\,102.6098 25.2145\,102.602 25.1979\,102.5601 25.1967))" +KMS,Kumasi,Kumasi Metropolitan District,Kumasi,POINT(-1.6167 6.6667),"POLYGON((-1.7128 6.7514\,-1.6637 6.7411\,-1.6372 6.7084\,-1.6467 6.6342\,-1.633 6.617\,-1.6025 6.6243\,-1.6143 6.6806\,-1.5948 6.6819\,-1.618 6.7\,-1.5711 6.7321\,-1.5785 6.7474\,-1.5954 6.7481\,-1.6048 6.7185\,-1.6278 6.7112\,-1.6454 6.7584\,-1.6765 6.7669\,-1.7128 6.7514))" +KNA,Viña del Mar,Viña del Mar,Viña del Mar,POINT(-71.5517 -33.0244),"POLYGON((-71.5871 -33.0285\,-71.5575 -33.051\,-71.5397 -33.1041\,-71.4938 -33.1027\,-71.4687 -33.066\,-71.4863 -33.0596\,-71.4861 -33.0366\,-71.4426 -33.0018\,-71.5508 -32.9454\,-71.5524 -33.0084\,-71.5871 -33.0285))" +KNU,Kanpur,Kanpur,Cawnpore,POINT(80.3319 26.4499),"POLYGON((80.1196 26.4652\,80.1363 26.4019\,80.151 26.4053\,80.1349 26.3943\,80.1537 26.3814\,80.1365 26.3657\,80.1509 26.3663\,80.1504 26.348\,80.1687 26.3538\,80.1896 26.3444\,80.1744 26.3462\,80.185 26.3349\,80.1555 26.3044\,80.1761 26.2735\,80.2067 26.2818\,80.2348 26.2518\,80.2585 26.2646\,80.2801 26.2539\,80.3363 26.2867\,80.3919 26.2794\,80.4212 26.2094\,80.4653 26.1997\,80.4755 26.1738\,80.494 26.1958\,80.536 26.1972\,80.544 26.2232\,80.5914 26.2312\,80.5169 26.3584\,80.3895 26.4503\,80.3695 26.4735\,80.3749 26.5076\,80.3055 26.5458\,80.2732 26.6401\,80.2516 26.633\,80.258 26.6017\,80.2369 26.5851\,80.2417 26.565\,80.2158 26.5763\,80.2119 26.5328\,80.1625 26.5217\,80.1749 26.5038\,80.1388 26.4968\,80.155 26.4717\,80.1306 26.4792\,80.1196 26.4652))" +KOA,Kona Int'l at Keahole,Hawaiʻi County,Waimea,POINT(-155.6381 20.0124),"POLYGON((-156.12 19.7272\,-156.0786 19.625\,-156.0303 19.5875\,-156.0129 19.5\,-155.9432 19.3442\,-155.9747 19.1302\,-155.9541 19.0536\,-155.9163 19\,-155.74 18.9169\,-155.7229 18.8825\,-155.6943 18.8668\,-155.6391 18.875\,-155.5656 18.9345\,-155.5 19.0643\,-155.4661 19.0954\,-155.3254 19.1713\,-155.2684 19.2195\,-155.1756 19.2114\,-154.9397 19.3128\,-154.7861 19.4404\,-154.7566 19.5069\,-154.7783 19.5599\,-154.8814 19.62\,-154.9289 19.6736\,-154.9319 19.7155\,-154.9638 19.7692\,-155.0333 19.7934\,-155.0316 19.875\,-155.1403 19.9895\,-155.2524 20.0634\,-155.3975 20.1328\,-155.5821 20.1813\,-155.7404 20.2889\,-155.8368 20.3212\,-155.9159 20.301\,-155.959 20.2269\,-155.948 20.1238\,-155.8843 20.0183\,-155.9389 19.9615\,-155.9624 19.9092\,-156.0124 19.8898\,-156.0957 19.815\,-156.12 19.7272))" +KOI,Kirkwall,Orkney Islands,Kirkwall,POINT(-2.96 58.981),"POLYGON((-3.3682 58.9987\,-2.9698 58.9593\,-2.961 58.7294\,-2.7031 58.9632\,-3.1232 59.0103\,-3.0025 59.0719\,-3.1959 59.1542\,-3.3682 58.9987))" +KRK,Kraków-Balice,Kraków,Kraków,POINT(19.9372 50.0614),"POLYGON((19.7922 50.0118\,19.7965 49.9945\,19.849 49.9949\,19.8707 49.9717\,20.0081 49.975\,20.0187 49.9863\,20.0041 50.0012\,20.0657 50.0078\,20.088 50.0449\,20.2092 50.0448\,20.2173 50.0534\,20.1849 50.0861\,20.2091 50.1156\,20.1591 50.1216\,20.1379 50.1066\,20.0962 50.1193\,20.0242 50.1039\,19.9413 50.119\,19.9189 50.1002\,19.8925 50.1261\,19.8616 50.0927\,19.8083 50.0784\,19.8045 50.0621\,19.8219 50.0582\,19.8106 50.0407\,19.8291 50.0258\,19.7922 50.0118))" +KRT,Khartoum,الخرطوم,Khartoum,POINT(32.56 15.5006),"POLYGON((32.4591 15.4619\,32.4969 15.3877\,32.7073 15.4392\,32.7123 15.4688\,32.6785 15.4832\,32.6609 15.5195\,32.6155 15.5262\,32.581 15.6149\,32.4936 15.615\,32.5035 15.5846\,32.4591 15.4619))" +KSA,Kosrae Island,Lelu,Tofol,POINT(163.0086 5.3258),"POLYGON((162.9848 5.3235\,163.0043 5.303\,163.2365 5.295\,163.2074 5.4458\,163.1298 5.5412\,163.091 5.5613\,163.0174 5.3697\,162.9848 5.3235))" +KSH,Shahid Ashrafi Esfahani,کرمانشاه,Kermānshāh,POINT(47.065 34.3142),"POLYGON((46.9954 34.3151\,47.0465 34.2625\,47.0771 34.2972\,47.1551 34.3026\,47.1264 34.3404\,47.1892 34.3675\,47.1251 34.4014\,47.0704 34.4032\,47.0796 34.3871\,47.0609 34.3463\,47.0196 34.3387\,47.0459 34.3215\,47.0341 34.3079\,46.9954 34.3151))" +KTM,Tribhuvan Int'l,काठमाडौँ महानगरपालिका,Kathmandu,POINT(85.324 27.7172),"POLYGON((85.2905 27.7395\,85.2807 27.6839\,85.2993 27.6792\,85.3084 27.6931\,85.3518 27.6679\,85.3707 27.6901\,85.373 27.732\,85.3457 27.722\,85.3454 27.75\,85.2905 27.7395))" +KTU,Kota,Bundi Tehsil,Būndi,POINT(75.6372 25.4383),"POLYGON((75.3823 25.3058\,75.5644 25.1323\,75.6224 25.3134\,75.6636 25.2672\,75.6783 25.3584\,75.8018 25.3661\,75.934 25.4325\,75.9532 25.4841\,75.7685 25.5437\,75.3823 25.3058))" +KUF,Kurumoch,Ленинский район,Samara,POINT(50.1408 53.2028),"POLYGON((50.0582 53.2133\,50.1141 53.1873\,50.1598 53.2036\,50.1308 53.1995\,50.0759 53.2382\,50.0582 53.2133))" +KUL,Kuala Lumpur Int'l,Kuala Lumpur,Kuala Lumpur,POINT(101.6953 3.1478),"POLYGON((101.6151 3.153\,101.6591 3.1129\,101.6513 3.0461\,101.7134 3.054\,101.7235 3.0334\,101.7305 3.055\,101.7496 3.055\,101.7535 3.1044\,101.735 3.1356\,101.7509 3.1604\,101.7311 3.1736\,101.7585 3.1874\,101.74 3.2333\,101.7148 3.2129\,101.6642 3.2446\,101.6404 3.2318\,101.6151 3.153))" +KWI,Kuwait Int'l,العاصمة,Kuwait City,POINT(47.9783 29.3697),"POLYGON((47.7562 29.3215\,48.0217 29.3072\,48.0006 29.3401\,48.0637 29.3764\,48.1371 29.3819\,48.197 29.367\,48.2346 29.293\,48.2498 29.2087\,48.3294 29.0423\,48.4537 28.9492\,48.5672 28.6245\,48.6639 28.6656\,48.6883 28.6969\,49.0035 28.7988\,48.9945 28.877\,48.9322 28.9651\,48.8328 29.0128\,48.7126 29.0107\,48.7235 29.0753\,48.7075 29.1462\,48.6619 29.2098\,48.6122 29.244\,48.6637 29.3259\,48.6637 29.4313\,48.6231 29.5025\,48.5395 29.5716\,48.2269 29.5409\,48.1625 29.4662\,48.1067 29.4502\,48.0802 29.4753\,48.036 29.4856\,47.9024 29.4804\,47.7821 29.4018\,47.7562 29.3215))" +KWL,Guilin Liangjiang Int'l,秀峰区,Guilin,POINT(110.2864 25.2819),"POLYGON((110.2179 25.2542\,110.2331 25.2282\,110.2702 25.2518\,110.2549 25.2637\,110.2944 25.2738\,110.3004 25.2853\,110.2714 25.2979\,110.2963 25.3343\,110.2619 25.3419\,110.2482 25.2907\,110.2179 25.2542))" +KZN,Kazan Int'l,Вахитовский район,Kazan,POINT(49.1089 55.7964),"POLYGON((49.0125 55.7485\,49.1106 55.7612\,49.116 55.7748\,49.1618 55.7641\,49.1677 55.8074\,49.1047 55.8092\,49.0125 55.7485))" +LAD,Luanda 4 de Fevereiro,Município de Luanda,Luanda,POINT(13.2344 -8.8383),"POLYGON((13.1735 -8.8862\,13.1892 -8.9045\,13.2542 -8.8814\,13.261 -8.9019\,13.3111 -8.9067\,13.2951 -8.8905\,13.3107 -8.8649\,13.2849 -8.8491\,13.2777 -8.8156\,13.3088 -8.7841\,13.303 -8.7564\,13.2301 -8.8102\,13.2245 -8.7975\,13.2634 -8.7606\,13.2292 -8.7868\,13.2031 -8.8307\,13.2006 -8.8623\,13.1735 -8.8862))" +LAO,Laoag Int'l,Ilocos Norte,Laoag,POINT(120.5936 18.1978),"POLYGON((120.2718 17.9498\,120.2723 17.8865\,120.37 17.8611\,120.4457 17.9049\,120.5303 17.8791\,120.5069 17.7465\,120.5074 17.7206\,120.5331 17.6967\,120.6268 17.8226\,120.693 17.8356\,120.7309 17.8837\,120.8284 17.9412\,120.9197 17.9388\,120.9457 18.0037\,120.9347 18.1045\,120.9794 18.165\,120.9644 18.1986\,120.9274 18.2096\,120.9023 18.2517\,120.936 18.2613\,120.955 18.3293\,120.942 18.3607\,120.9712 18.5638\,120.9336 18.5966\,120.9601 18.7389\,120.9392 18.7569\,120.8644 18.789\,120.7898 18.7799\,120.6292 18.6847\,120.5402 18.6585\,120.4508 18.5852\,120.4175 18.4991\,120.4561 18.3675\,120.3916 18.2597\,120.3756 18.1922\,120.3384 18.1436\,120.3292 18.0361\,120.2718 17.9498))" +LAP,Gen. Márquez de León Int'l,Los Mochis,Los Mochis,POINT(-108.9937 25.7835),"POLYGON((-109.0463 25.7662\,-109.0267 25.7761\,-109.0061 25.7646\,-109.0186 25.7289\,-109.0022 25.7523\,-108.985 25.7439\,-108.9729 25.7684\,-108.9686 25.7572\,-108.9634 25.809\,-108.9499 25.8024\,-108.9536 25.7836\,-108.939 25.8072\,-108.9674 25.8339\,-108.9807 25.8206\,-108.9874 25.8395\,-109.0229 25.8083\,-109.0297 25.8173\,-109.0444 25.7863\,-109.0298 25.781\,-109.0463 25.7662))" +LAS,Mccarran Int'l,Clark County,Sunrise Manor,POINT(-115.0487 36.1783),"POLYGON((-115.8946 36.8421\,-115.8945 36.1717\,-115.8412 36.1716\,-115.846 35.9636\,-114.633 35.0019\,-114.6351 35.0333\,-114.6028 35.0689\,-114.646 35.1034\,-114.5793 35.1286\,-114.5697 35.1855\,-114.6 35.3419\,-114.6788 35.4979\,-114.6635 35.5346\,-114.6751 35.5791\,-114.6531 35.6065\,-114.6892 35.6511\,-114.6812 35.6841\,-114.7028 35.7074\,-114.6951 35.7576\,-114.7114 35.8068\,-114.6956 35.8305\,-114.7052 35.8509\,-114.6622 35.8723\,-114.7305 35.9432\,-114.7432 36.0083\,-114.7232 36.0269\,-114.7445 36.0413\,-114.735 36.0547\,-114.7556 36.0872\,-114.6317 36.1423\,-114.6167 36.1301\,-114.572 36.1516\,-114.5117 36.151\,-114.5022 36.1288\,-114.4636 36.1397\,-114.4466 36.126\,-114.4055 36.1474\,-114.3721 36.1431\,-114.3084 36.0824\,-114.3156 36.0595\,-114.2527 36.0202\,-114.1482 36.028\,-114.1145 36.0952\,-114.1231 36.1116\,-114.0999 36.1217\,-114.0466 36.1957\,-114.0504 36.8431\,-115.737 36.8539\,-115.7372 36.843\,-115.8946 36.8421))" +LBA,Leeds Bradford,Bradford,Bradford,POINT(-1.75 53.8),"POLYGON((-2.0612 53.8256\,-1.9808 53.7863\,-1.8734 53.7787\,-1.8727 53.7549\,-1.8555 53.7483\,-1.8094 53.7644\,-1.7737 53.7243\,-1.7144 53.7624\,-1.6816 53.7565\,-1.6404 53.7763\,-1.712 53.7831\,-1.6951 53.8575\,-1.7605 53.8636\,-1.8004 53.8859\,-1.7875 53.8969\,-1.7254 53.8857\,-1.7272 53.9102\,-1.8051 53.939\,-1.8605 53.9328\,-1.8813 53.9631\,-1.9662 53.9515\,-1.9769 53.9264\,-1.9526 53.9035\,-1.9794 53.9011\,-1.9824 53.8688\,-2.0217 53.8715\,-2.0612 53.8256))" +LBV,Libreville Leon M'ba Int'l,Libreville,Libreville,POINT(9.4542 0.3903),"POLYGON((9.1031 0.367\,9.2877 0.3769\,9.3722 0.3981\,9.4658 0.2524\,9.7916 0.1547\,9.8993 0.3502\,9.9082 0.4212\,9.9731 0.5179\,9.8874 0.5265\,9.8512 0.5933\,9.7969 0.6304\,9.7626 0.6333\,9.6714 0.6022\,9.5949 0.6097\,9.5412 0.7094\,9.3358 0.7209\,9.1493 0.7534\,9.1092 0.6112\,9.1031 0.367))" +LCE,Goloson Int'l,La Ceiba,La Ceiba,POINT(-86.7931 15.7792),"POLYGON((-86.8922 15.7612\,-86.8704 15.7461\,-86.8465 15.6848\,-86.8492 15.6581\,-86.8823 15.6396\,-86.8639 15.6165\,-86.8835 15.6077\,-86.8495 15.5463\,-86.7398 15.5554\,-86.6416 15.6045\,-86.6152 15.642\,-86.6222 15.6617\,-86.6047 15.6959\,-86.6252 15.7258\,-86.6077 15.7406\,-86.5988 15.7977\,-86.7029 15.7873\,-86.7762 15.8014\,-86.8312 15.7674\,-86.8922 15.7612))" +LED,Pulkovo 2,Петроградский район,Saint Petersburg,POINT(30.3167 59.95),"POLYGON((30.2101 59.9723\,30.2939 59.9467\,30.3454 59.9522\,30.2995 59.9826\,30.2101 59.9723))" +LEI,Almeria,Almería,Almería,POINT(-2.4681 36.8403),"POLYGON((-2.5444 36.8633\,-2.5252 36.8246\,-2.482 36.8355\,-2.485 36.832\,-2.4641 36.8259\,-2.4739 36.8365\,-2.4286 36.8108\,-2.3704 36.8408\,-2.3182 36.8333\,-2.2072 36.7372\,-2.2327 36.7974\,-2.2022 36.8633\,-2.2881 36.9486\,-2.3032 36.9975\,-2.3575 36.989\,-2.3801 36.927\,-2.3706 36.8924\,-2.3888 36.8799\,-2.4394 36.8625\,-2.5188 36.897\,-2.5444 36.8633))" +LFW,Lomé Tokoin,Lomé,Lomé,POINT(1.2228 6.1319),"POLYGON((1.1754 6.1881\,1.1773 6.1692\,1.1994 6.1691\,1.1996 6.1124\,1.2878 6.1316\,1.3057 6.152\,1.2985 6.1785\,1.2626 6.2005\,1.1943 6.2016\,1.1754 6.1881))" +LGA,LaGuardia,City of New York,New York,POINT(-73.9249 40.6943),"POLYGON((-74.2588 40.4989\,-74.2253 40.4766\,-73.9779 40.5191\,-73.9021 40.4921\,-73.8126 40.53\,-73.7572 40.5312\,-73.7565 40.5862\,-73.7381 40.6026\,-73.7681 40.6263\,-73.7248 40.6523\,-73.7303 40.7222\,-73.7002 40.7393\,-73.7797 40.8121\,-73.7484 40.8718\,-73.8382 40.8941\,-73.8511 40.9101\,-73.8593 40.9005\,-73.9183 40.9176\,-74.014 40.7576\,-74.0558 40.6515\,-74.1914 40.642\,-74.2146 40.5605\,-74.2475 40.5494\,-74.2588 40.4989))" +LGW,London Gatwick,Crawley,Crawley,POINT(-0.1872 51.1092),"POLYGON((-0.2556 51.1418\,-0.2003 51.1391\,-0.2369 51.1094\,-0.1964 51.0848\,-0.1395 51.1081\,-0.133 51.1589\,-0.1785 51.1672\,-0.2556 51.1418))" +LHE,Allama Iqbal Int'l,تحصیل لاہور کنٹونمنٹ,Lahore,POINT(74.3436 31.5497),"POLYGON((74.3356 31.5054\,74.3745 31.4657\,74.4301 31.4688\,74.4339 31.4164\,74.401 31.4012\,74.4136 31.3301\,74.4361 31.3418\,74.4514 31.3283\,74.4715 31.3639\,74.4973 31.3452\,74.5519 31.3539\,74.5845 31.3801\,74.6013 31.4199\,74.6547 31.4259\,74.6386 31.4442\,74.6522 31.4572\,74.6278 31.4769\,74.6367 31.4847\,74.5782 31.4983\,74.5842 31.5208\,74.5652 31.5225\,74.5591 31.5065\,74.5047 31.5095\,74.5023 31.5788\,74.4354 31.5687\,74.426 31.5422\,74.3431 31.5578\,74.3522 31.533\,74.3356 31.5054))" +LHR,London Heathrow,London Borough of Hounslow,Hounslow,POINT(-0.375 51.4668),"POLYGON((-0.4615 51.449\,-0.3855 51.4206\,-0.3668 51.4416\,-0.3878 51.4494\,-0.327 51.457\,-0.2921 51.4873\,-0.2565 51.4715\,-0.2444 51.4979\,-0.4092 51.5003\,-0.4112 51.4699\,-0.4615 51.449))" +LIH,Lihue,Hawaii,Kapaa,POINT(-159.3521 22.091),"POLYGON((-159.851 22.0271\,-159.7936 21.9398\,-159.7117 21.9076\,-159.6313 21.8467\,-159.444 21.8178\,-159.3578 21.8657\,-159.2844 21.9406\,-159.2837 22.0185\,-159.245 22.1017\,-159.2458 22.1668\,-159.3088 22.25\,-159.3897 22.2875\,-159.5964 22.278\,-159.7728 22.1811\,-159.8481 22.0761\,-159.851 22.0271))" +LIL,Lille-Lesquin,Roubaix,Roubaix,POINT(3.1817 50.6901),"POLYGON((3.1511 50.6878\,3.1657 50.6816\,3.1534 50.67\,3.1953 50.6688\,3.2174 50.6882\,3.1902 50.7087\,3.1641 50.7064\,3.1511 50.6878))" +LIM,Jorge Chávez,Callao,Callao,POINT(-77.1333 -12.0333),"POLYGON((-77.1584 -12.0666\,-77.1326 -12.0707\,-77.1342 -12.0597\,-77.0767 -12.0465\,-77.0984 -12.0484\,-77.0873 -12.0121\,-77.1127 -11.9968\,-77.1072 -11.9777\,-77.1298 -11.9368\,-77.1555 -12.0463\,-77.1388 -12.0391\,-77.1584 -12.0666))" +LIN,Linate,Milano,Milan,POINT(9.19 45.4669),"POLYGON((9.0409 45.4474\,9.0644 45.4332\,9.0779 45.4524\,9.1096 45.451\,9.133 45.4139\,9.1545 45.4161\,9.1561 45.4017\,9.1955 45.3872\,9.2664 45.435\,9.2726 45.4726\,9.2602 45.4714\,9.2582 45.4898\,9.2781 45.5052\,9.1801 45.5352\,9.1515 45.5181\,9.099 45.532\,9.0901 45.4989\,9.0538 45.5049\,9.0746 45.4573\,9.0446 45.4661\,9.0409 45.4474))" +LIS,Lisbon Portela,Loures,Loures,POINT(-9.1667 38.8333),"POLYGON((-9.246 38.8427\,-9.1962 38.8122\,-9.1453 38.8275\,-9.1819 38.855\,-9.1848 38.8761\,-9.2394 38.8638\,-9.2314 38.8461\,-9.246 38.8427))" +LIT,Clinton National,Little Rock,Little Rock,POINT(-92.3577 34.7256),"POLYGON((-92.5216 34.8081\,-92.4801 34.7981\,-92.4998 34.7967\,-92.4988 34.7678\,-92.4549 34.7602\,-92.4618 34.7504\,-92.4577 34.7413\,-92.445 34.755\,-92.4303 34.7479\,-92.4471 34.7302\,-92.4126 34.7182\,-92.4136 34.6929\,-92.4266 34.6969\,-92.4141 34.6785\,-92.4431 34.6721\,-92.4428 34.627\,-92.3549 34.6398\,-92.3391 34.626\,-92.3502 34.6424\,-92.3063 34.6528\,-92.3009 34.6811\,-92.2913 34.6737\,-92.277 34.695\,-92.2559 34.6868\,-92.2463 34.7054\,-92.2356 34.6952\,-92.2365 34.7181\,-92.1812 34.6998\,-92.1688 34.6805\,-92.1508 34.6951\,-92.2032 34.7402\,-92.2915 34.7522\,-92.3126 34.7854\,-92.3813 34.799\,-92.3889 34.8154\,-92.3939 34.8016\,-92.4795 34.8216\,-92.5216 34.8081)\,(-92.3403 34.7785\,-92.3489 34.7851\,-92.34 34.7848\,-92.3403 34.7785))" +LJU,Ljubljana,Ljubljana,Ljubljana,POINT(14.5061 46.0514),"POLYGON((14.4086 46.0814\,14.441 46.053\,14.4379 46.0133\,14.465 46.0061\,14.431 45.987\,14.4391 45.9741\,14.4764 45.9773\,14.472 45.9949\,14.5094 46.0031\,14.5337 45.9905\,14.5302 46.0158\,14.5836 46.0192\,14.6325 45.9868\,14.6692 45.9884\,14.7045 46.0153\,14.7317 45.9979\,14.7518 46.0074\,14.7401 46.0206\,14.7553 46.0297\,14.7251 46.0521\,14.7457 46.0682\,14.6758 46.0865\,14.6206 46.0762\,14.5071 46.1458\,14.468 46.1415\,14.444 46.121\,14.4375 46.1284\,14.4259 46.1287\,14.42 46.114\,14.4426 46.0956\,14.4086 46.0814))" +LKO,Amausi Int'l,Lucknow,Lucknow,POINT(80.95 26.85),"POLYGON((80.8264 26.7588\,80.9019 26.753\,80.9043 26.7646\,80.95 26.7508\,80.9985 26.8259\,81.0465 26.8171\,81.0898 26.836\,81.0954 26.9142\,80.9841 26.9251\,80.9656 26.9478\,80.9346 26.9278\,80.9164 26.9431\,80.8781 26.9322\,80.836 26.8973\,80.8662 26.7947\,80.8264 26.7588))" +LLA,Lulea,Luleå kommun,Luleå,POINT(22.1539 65.5844),"POLYGON((21.3357 65.6716\,22.4511 65.0696\,23.1608 65.2898\,22.1466 66.2831\,21.8513 65.9785\,22.1148 65.8483\,21.3357 65.6716))" +LLW,Kamuzu Int'l,Lilongwe,Lilongwe,POINT(33.7833 -13.9833),"POLYGON((33.2462 -13.8325\,33.6501 -14.5958\,34.2835 -13.8584\,33.5117 -13.724\,33.4117 -13.4868\,33.2462 -13.8325))" +LMM,Los Mochis,Los Mochis,Los Mochis,POINT(-108.9937 25.7835),"POLYGON((-109.0463 25.7662\,-109.0267 25.7761\,-109.0061 25.7646\,-109.0186 25.7289\,-109.0022 25.7523\,-108.985 25.7439\,-108.9729 25.7684\,-108.9686 25.7572\,-108.9634 25.809\,-108.9499 25.8024\,-108.9536 25.7836\,-108.939 25.8072\,-108.9674 25.8339\,-108.9807 25.8206\,-108.9874 25.8395\,-109.0229 25.8083\,-109.0297 25.8173\,-109.0444 25.7863\,-109.0298 25.781\,-109.0463 25.7662))" +LOS,Lagos Murtala Muhammed,Ikeja,Ikeja,POINT(3.3426 6.6186),"POLYGON((3.3101 6.5509\,3.3376 6.5899\,3.3522 6.5557\,3.367 6.5583\,3.3832 6.5932\,3.3537 6.6587\,3.3331 6.6018\,3.314 6.6053\,3.3101 6.5509))" +LOV,Venustiano Carranza Int'l,Monclova,Monclova,POINT(-101.4222 26.9103),"POLYGON((-101.5293 26.8453\,-101.4882 26.8171\,-101.4406 26.8368\,-101.367 26.8193\,-101.256 26.8452\,-101.205 26.8073\,-101.1958 26.748\,-101.1126 26.6977\,-101.0073 26.7948\,-100.9663 26.7895\,-100.981 26.81\,-100.9383 26.8393\,-100.9167 26.8826\,-100.9817 26.9672\,-101.0379 26.9839\,-101.0764 27.0425\,-101.1797 27.0118\,-101.2052 27.0406\,-101.2641 27.007\,-101.3488 27.0641\,-101.4356 26.9493\,-101.4523 26.8973\,-101.475 26.8912\,-101.4672 26.8823\,-101.4866 26.8637\,-101.5281 26.8603\,-101.5293 26.8453))" +LPA,Gran Canaria,Las Palmas de Gran Canaria,Las Palmas,POINT(-15.4314 28.1272),"POLYGON((-15.5272 28.0525\,-15.4779 28.0582\,-15.4553 28.0245\,-15.421 28.0388\,-15.3949 28.0313\,-15.416 28.0476\,-15.411 28.1009\,-15.4289 28.1304\,-15.4176 28.1466\,-15.412 28.1281\,-15.4069 28.1582\,-15.4079 28.1371\,-15.4076 28.1329\,-15.4047 28.1307\,-15.4045 28.1213\,-15.3982 28.1617\,-15.4069 28.1782\,-15.4394 28.1693\,-15.4312 28.1466\,-15.4459 28.1315\,-15.4872 28.133\,-15.5272 28.0525))" +LPB,El Alto Int'l,El Alto,El Alto,POINT(-68.1633 -16.5047),"POLYGON((-68.3201 -16.5018\,-68.2877 -16.5104\,-68.2805 -16.5397\,-68.2519 -16.5284\,-68.2367 -16.5437\,-68.2595 -16.5608\,-68.206 -16.5735\,-68.2399 -16.6101\,-68.1647 -16.667\,-68.1083 -16.6351\,-68.1309 -16.6344\,-68.1283 -16.6212\,-68.185 -16.5732\,-68.1449 -16.5286\,-68.1711 -16.4906\,-68.1385 -16.3502\,-68.1537 -16.2627\,-68.2055 -16.3365\,-68.2163 -16.3835\,-68.2434 -16.3927\,-68.3201 -16.5018))" +LPG,La Plata,Partido de Berisso,Berisso,POINT(-57.8858 -34.8728),"POLYGON((-57.932 -34.9035\,-57.8958 -34.9305\,-57.7546 -34.9717\,-57.7092 -34.9303\,-57.7732 -34.9017\,-57.874 -34.8258\,-57.932 -34.9035))" +LPL,Liverpool John Lennon,Liverpool,Liverpool,POINT(-2.9919 53.4075),"POLYGON((-3.0192 53.4362\,-2.9971 53.384\,-2.9032 53.3381\,-2.9101 53.3238\,-2.8181 53.3187\,-2.8324 53.3373\,-2.8188 53.348\,-2.8404 53.3473\,-2.8562 53.3787\,-2.822 53.3807\,-2.8371 53.4\,-2.8905 53.4069\,-2.866 53.4183\,-2.8676 53.4486\,-2.8951 53.4671\,-2.9563 53.473\,-2.9749 53.4433\,-3.0192 53.4362))" +LRD,Laredo Int'l,Nuevo Laredo,Nuevo Laredo,POINT(-99.5069 27.4861),"POLYGON((-99.9264 27.5492\,-99.9192 27.4915\,-99.7604 27.437\,-99.7488 27.289\,-99.6382 27.2918\,-99.4964 27.2713\,-99.4944 27.3036\,-99.5381 27.3172\,-99.5042 27.3392\,-99.4876 27.4102\,-99.4954 27.4457\,-99.4787 27.4794\,-99.4937 27.4977\,-99.5281 27.4982\,-99.5112 27.5652\,-99.544 27.6075\,-99.5835 27.6031\,-99.5773 27.6185\,-99.6003 27.6415\,-99.6561 27.6295\,-99.6639 27.6575\,-99.6903 27.669\,-99.7054 27.6554\,-99.7281 27.6791\,-99.9264 27.5492))" +LSI,Sumburgh,Shetland Islands,Lerwick,POINT(-1.145 60.155),"POLYGON((-1.7034 60.288\,-1.4631 60.1463\,-1.2686 60.2365\,-1.3923 59.9131\,-1.3188 59.8966\,-1.3135 59.8552\,-1.2732 59.8529\,-1.0482 60.4502\,-1.1639 60.3774\,-1.2575 60.4031\,-1.0274 60.4977\,-0.9863 60.7002\,-1.6333 60.4872\,-1.2628 60.3518\,-1.3697 60.2858\,-1.7034 60.288))" +LTK,Bassel Al-Assad Int'l,ناحية اللاذقية,Latakia,POINT(35.7833 35.5167),"POLYGON((35.7578 35.5923\,35.721 35.5786\,35.7448 35.5784\,35.7329 35.561\,35.7714 35.5373\,35.7617 35.5081\,35.763 35.5182\,35.7549 35.5322\,35.7598 35.509\,35.7787 35.4962\,35.7877 35.5068\,35.8168 35.4964\,35.867 35.5424\,35.8559 35.5721\,35.8653 35.6132\,35.8134 35.6541\,35.7578 35.5923))" +LTN,London Luton,Luton,Luton,POINT(-0.4147 51.8783),"POLYGON((-0.5059 51.9006\,-0.4225 51.8545\,-0.3499 51.8787\,-0.3856 51.9157\,-0.4191 51.9123\,-0.4263 51.9267\,-0.4857 51.9227\,-0.4823 51.9078\,-0.5059 51.9006))" +LUH,Sahnewal,Ludhiana (West) Tahsil,Ludhiāna,POINT(75.85 30.91),"POLYGON((75.5921 30.828\,75.6097 30.8232\,75.6074 30.8076\,75.6296 30.8169\,75.6505 30.8003\,75.6682 30.8226\,75.6639 30.7756\,75.6948 30.762\,75.6917 30.7369\,75.7594 30.7654\,75.7668 30.7437\,75.7927 30.7359\,75.7823 30.757\,75.8195 30.7781\,75.8059 30.7911\,75.8126 30.8112\,75.8328 30.8165\,75.8466 30.7777\,75.8845 30.7908\,75.8726 30.8152\,75.9107 30.7886\,75.9397 30.8224\,75.9176 30.8467\,75.9562 30.8472\,75.9426 30.8579\,75.9577 30.8832\,75.9157 30.9128\,75.919 30.9348\,75.9022 30.9304\,75.8306 30.9716\,75.8222 30.9987\,75.7444 31.0124\,75.6513 30.9804\,75.6715 30.962\,75.6351 30.9052\,75.6369 30.8612\,75.5921 30.828))" +LUN,Lusaka Int'l,Lusaka District,Lusaka,POINT(28.2833 -15.4167),"POLYGON((28.2021 -15.4098\,28.2366 -15.5434\,28.3093 -15.5401\,28.3101 -15.4773\,28.331 -15.4762\,28.3316 -15.4906\,28.362 -15.4756\,28.478 -15.4901\,28.489 -15.4662\,28.4256 -15.4469\,28.4036 -15.3656\,28.3441 -15.3493\,28.3092 -15.3039\,28.2751 -15.3191\,28.2765 -15.3389\,28.2247 -15.3327\,28.2021 -15.4098))" +LUX,Luxembourg-Findel,Luxembourg,Luxembourg,POINT(6.1319 49.6117),"POLYGON((6.0692 49.632\,6.1038 49.625\,6.0693 49.5898\,6.1137 49.561\,6.1255 49.5813\,6.2036 49.613\,6.1783 49.6243\,6.1826 49.6365\,6.1427 49.6331\,6.1643 49.6407\,6.1487 49.6418\,6.1589 49.6474\,6.1597 49.6549\,6.0784 49.6468\,6.0692 49.632))" +LXA,Lhasa Gonggar,纳金街道,Lhasa,POINT(91.1719 29.6534),"POLYGON((91.1398 29.6662\,91.168 29.6601\,91.1487 29.6473\,91.172 29.6354\,91.2784 29.6815\,91.2457 29.6934\,91.2376 29.718\,91.1398 29.6662))" +LXR,Luxor,الأقصر,Luxor,POINT(32.65 25.6833),"POLYGON((32.4321 25.5879\,32.475 25.5014\,32.4852 25.4084\,32.5601 25.4304\,32.8167 25.5904\,32.9679 25.8066\,32.9592 25.9846\,32.7086 25.9843\,32.677 25.843\,32.5341 25.7245\,32.4362 25.6661\,32.4321 25.5879))" +LYS,Lyon-Saint Exupery,Lyon,Lyon,POINT(4.84 45.76),"POLYGON((4.7718 45.751\,4.8183 45.7491\,4.8145 45.7323\,4.8377 45.7074\,4.8537 45.7297\,4.8876 45.7204\,4.8984 45.7529\,4.8755 45.7549\,4.8598 45.787\,4.8237 45.7843\,4.8189 45.7897\,4.8393 45.8005\,4.837 45.8083\,4.784 45.7878\,4.7952 45.7758\,4.7718 45.751))" +MAA,Chennai Int'l,Chennai District,Chennai,POINT(80.275 13.0825),"POLYGON((80.1183 13.0189\,80.1481 12.9668\,80.2028 12.9602\,80.1716 12.9368\,80.187 12.9288\,80.1725 12.9153\,80.1822 12.8678\,80.2015 12.8514\,80.213 12.8755\,80.2422 12.8528\,80.3069 13.1017\,80.3003 13.0911\,80.2935 13.0864\,80.3001 13.1433\,80.2514 13.1498\,80.2413 13.128\,80.2019 13.1261\,80.1871 13.0235\,80.1449 13.0237\,80.1304 13.0449\,80.1183 13.0189))" +MAD,Madrid Barajas,San Fernando de Henares,Torrejón de Ardoz,POINT(-3.4978 40.4614),"POLYGON((-3.5382 40.4298\,-3.509 40.4065\,-3.4525 40.4216\,-3.4348 40.4128\,-3.438 40.4323\,-3.4106 40.4267\,-3.4341 40.4554\,-3.4824 40.4307\,-3.4974 40.473\,-3.5308 40.4713\,-3.5267 40.4333\,-3.5382 40.4298))" +MAJ,Marshall Islands Int'l,Mājro,Majuro,POINT(171.3833 7.0833),"POLYGON((170.8173 7.6729\,170.8251 7.1353\,170.829 7.1064\,170.8358 7.0819\,170.8454 7.0584\,170.8598 7.033\,171.233 6.5246\,171.559 6.5742\,171.3729 7.7443\,170.8173 7.6729))" +MAM,Gen. Sevando Canales,Matamoros,Heroica Matamoros,POINT(-97.5042 25.8797),"POLYGON((-97.9426 25.3853\,-97.5162 25.0425\,-97.1474 25.9563\,-97.4056 25.8376\,-97.9022 26.061\,-97.7366 25.504\,-97.9426 25.3853))" +MAN,Manchester Int'l,Manchester,Wythenshawe,POINT(-2.264 53.392),"POLYGON((-2.3199 53.4116\,-2.2857 53.3762\,-2.3143 53.3569\,-2.3028 53.341\,-2.2408 53.3596\,-2.2468 53.396\,-2.1492 53.4594\,-2.1681 53.4801\,-2.1619 53.4983\,-2.1787 53.5058\,-2.1548 53.518\,-2.2187 53.5439\,-2.2675 53.538\,-2.2641 53.5247\,-2.2466 53.5292\,-2.257 53.5179\,-2.2451 53.4861\,-2.2682 53.4697\,-2.2532 53.4607\,-2.3001 53.4366\,-2.2778 53.4155\,-2.3199 53.4116))" +MAO,Eduardo Gomes Int'l,Manaus,Manaus,POINT(-60.0167 -3.1),"POLYGON((-60.801 -2.545\,-60.505 -3.092\,-59.994 -3.222\,-59.717 -3.061\,-59.16 -3.168\,-59.706 -2.939\,-60.033 -2.549\,-60.0016 -2.1165\,-60.281 -1.924\,-60.668 -2.074\,-60.801 -2.545))" +MAR,La Chinita Int'l,Municipio Maracaibo,Maracaibo,POINT(-71.6333 10.6333),"POLYGON((-71.7833 10.7151\,-71.7704 10.697\,-71.7812 10.6778\,-71.777 10.6421\,-71.7638 10.6412\,-71.769 10.6019\,-71.6738 10.5843\,-71.569 10.5932\,-71.5625 10.7196\,-71.6357 10.7949\,-71.7105 10.7389\,-71.751 10.7383\,-71.7833 10.7151))" +MBA,Moi Int'l,Mombasa,Mombasa,POINT(39.6667 -4.05),"POLYGON((39.5631 -4.022\,39.5811 -4.0369\,39.5761 -4.1223\,39.6085 -4.0902\,39.6385 -4.1212\,39.6275 -4.1447\,39.6436 -4.1548\,39.6715 -4.0793\,39.7038 -4.0572\,39.7643 -3.9571\,39.7149 -3.9526\,39.6993 -3.9232\,39.6475 -3.9262\,39.6498 -3.9607\,39.5977 -3.9545\,39.5703 -3.9777\,39.5631 -4.022))" +MBJ,Sangster Int'l,Saint James,Montego Bay,POINT(-77.9167 18.4667),"POLYGON((-77.9967 18.4298\,-77.9714 18.4127\,-77.9511 18.3573\,-77.9 18.2956\,-77.9069 18.2865\,-77.8777 18.2636\,-77.8934 18.2062\,-77.7757 18.2532\,-77.7361 18.5116\,-77.8902 18.5193\,-77.9247 18.5009\,-77.9281 18.4643\,-77.9513 18.4596\,-77.935 18.4435\,-77.9835 18.4524\,-77.9967 18.4298))" +MCI,Kansas City Int'l,Kansas City,Kansas City,POINT(-94.7443 39.1235),"POLYGON((-94.9084 39.1161\,-94.7977 39.1057\,-94.7918 39.0436\,-94.6074 39.0441\,-94.6081 39.1175\,-94.5882 39.1498\,-94.6534 39.1548\,-94.674 39.1838\,-94.7358 39.1694\,-94.7772 39.201\,-94.9002 39.2029\,-94.9084 39.1161))" +MCO,Orlando Int'l,Orange County,Orlando,POINT(-81.337 28.4773),"POLYGON((-81.6586 28.7422\,-81.6571 28.3467\,-80.8632 28.3478\,-80.9012 28.433\,-80.8946 28.4669\,-80.8707 28.4713\,-80.8753 28.4894\,-80.8858 28.5104\,-80.9394 28.5351\,-80.9301 28.5632\,-80.9522 28.6042\,-81.3279 28.6105\,-81.3285 28.6396\,-81.4597 28.6402\,-81.4595 28.713\,-81.4226 28.7374\,-81.4155 28.7856\,-81.6466 28.7859\,-81.6586 28.7422))" +MCT,Seeb Int'l,مسقط,Muscat,POINT(58.5922 23.6139),"POLYGON((58.2266 23.5789\,58.2455 23.5427\,58.329 23.5592\,58.3335 23.5257\,58.3634 23.5401\,58.3866 23.5224\,58.4485 23.5782\,58.5309 23.5832\,58.5921 23.557\,58.6485 23.5989\,58.6047 23.6334\,58.4945 23.6564\,58.4528 23.6259\,58.4026 23.6139\,58.2687 23.642\,58.246 23.6125\,58.2513 23.5874\,58.2266 23.5789))" +MCZ,Maceio/Zumbi dos Palmares Int'l,Maceió,Maceió,POINT(-35.735 -9.6658),"POLYGON((-35.8144 -9.58\,-35.769 -9.635\,-35.7945 -9.7131\,-35.748 -9.6746\,-35.7284 -9.6717\,-35.7236 -9.6846\,-35.6958 -9.6653\,-35.6971 -9.6345\,-35.6758 -9.5989\,-35.559 -9.4851\,-35.617 -9.469\,-35.625 -9.448\,-35.665 -9.43\,-35.6906 -9.371\,-35.7696 -9.3896\,-35.8144 -9.58))" +MDE,José María Córdova,Perímetro Urbano Medellín,Medellín,POINT(-75.5906 6.2308),"POLYGON((-75.6388 6.2442\,-75.6139 6.2404\,-75.6235 6.2354\,-75.6094 6.2052\,-75.5574 6.1755\,-75.5553 6.2248\,-75.522 6.2295\,-75.54 6.2618\,-75.5272 6.2957\,-75.5874 6.3101\,-75.6012 6.2845\,-75.625 6.283\,-75.6181 6.2634\,-75.6388 6.2442))" +MDG,Mudanjiang Hailang,西安区,Mudanjiang,POINT(129.5997 44.5861),"POLYGON((129.3872 44.4725\,129.3956 44.458\,129.4643 44.4456\,129.4231 44.4243\,129.513 44.4122\,129.4998 44.3947\,129.5305 44.3743\,129.7429 44.3802\,129.6563 44.4488\,129.5972 44.4515\,129.5695 44.5122\,129.594 44.5379\,129.5833 44.5456\,129.6192 44.5602\,129.6052 44.5891\,129.5173 44.5524\,129.4904 44.5859\,129.5043 44.6185\,129.4637 44.6599\,129.4411 44.6506\,129.4501 44.6168\,129.4382 44.5744\,129.3955 44.5457\,129.4068 44.4981\,129.3872 44.4725))" +MDL,Mandalay Int'l,မန္တလေးခရိုင်,Mandalay,POINT(96.0844 21.9831),"POLYGON((95.984 21.8499\,96.0202 21.8113\,96.0443 21.8204\,96.0481 21.8494\,96.0681 21.8405\,96.0696 21.8215\,96.0824 21.835\,96.1028 21.8195\,96.0829 21.7896\,96.134 21.8167\,96.1442 21.7723\,96.168 21.7774\,96.1716 21.8\,96.1928 21.7881\,96.21 21.837\,96.248 21.8438\,96.2574 21.8104\,96.258 21.8509\,96.2421 21.874\,96.2674 21.876\,96.2675 21.9063\,96.2913 21.9406\,96.2873 21.9802\,96.3336 22.0033\,96.3655 22.0799\,96.3061 22.0663\,96.296 22.0885\,96.2401 22.1006\,96.252 22.1672\,96.2332 22.1531\,96.1782 22.1674\,96.1451 22.1388\,96.1648 22.1284\,96.1261 22.0645\,96.0491 22.0509\,96.0613 22.0249\,96.0255 22.0076\,96.0188 22.0191\,96.0188 21.9378\,95.984 21.8499))" +MDQ,Astor Piazzolla Int'l,Mar del Plata,Mar del Plata,POINT(-57.55 -38.0),"POLYGON((-57.6591 -37.9588\,-57.6249 -38.0031\,-57.6437 -38.0081\,-57.634 -38.0288\,-57.6531 -38.0447\,-57.6238 -38.067\,-57.6508 -38.0895\,-57.6071 -38.1337\,-57.5443 -38.1\,-57.5179 -38.039\,-57.5338 -38.05\,-57.5403 -38.0485\,-57.5332 -38.0319\,-57.5223 -38.0371\,-57.5441 -37.99\,-57.5263 -37.9201\,-57.5473 -37.904\,-57.6591 -37.9588))" +MDW,Chicago Midway Int'l,West Chicago Township,Chicago,POINT(-87.6866 41.8375),"POLYGON((-87.7756 41.9093\,-87.7741 41.8655\,-87.74 41.866\,-87.7385 41.8221\,-87.6324 41.86\,-87.6443 41.897\,-87.6881 41.9395\,-87.7076 41.9394\,-87.7069 41.9101\,-87.7756 41.9093))" +MDZ,El Plumerillo,Departamento Guaymallén,Godoy Cruz,POINT(-68.8333 -32.9167),"POLYGON((-68.8391 -32.9295\,-68.7192 -32.9443\,-68.7038 -32.9704\,-68.6623 -32.9184\,-68.637 -32.8323\,-68.6893 -32.8299\,-68.7978 -32.8619\,-68.8263 -32.8767\,-68.8391 -32.9295))" +MED,Madinah Int'l,محافظة بدر,Badr Ḩunayn,POINT(38.7906 23.78),"POLYGON((38.3769 23.9961\,38.7151 23.8232\,38.5026 23.6475\,38.6659 23.4122\,38.894 23.497\,39.0274 23.3272\,39.1033 23.5318\,39.4688 23.5489\,39.345 23.8994\,39.1799 23.8342\,39.2236 24.1603\,38.9386 24.345\,38.9801 24.4812\,38.3769 23.9961))" +MEL,Melbourne Int'l,Melton,Melton,POINT(144.5833 -37.6833),"POLYGON((144.567 -37.6902\,144.621 -37.7024\,144.6478 -37.6592\,144.6085 -37.6485\,144.6051 -37.6764\,144.5703 -37.6729\,144.567 -37.6902))" +MEM,Memphis Int'l,Southaven,Southaven,POINT(-89.9786 34.9514),"POLYGON((-90.0795 34.9908\,-90.0007 34.9674\,-89.9974 34.9481\,-90.0168 34.9441\,-90.0064 34.9189\,-90.0212 34.919\,-90.017 34.9074\,-89.9187 34.9039\,-89.919 34.9649\,-89.9366 34.9703\,-89.9367 34.9915\,-89.919 34.9948\,-90.0795 34.9908))" +MES,Polonia Int'l,Kota Medan,Medan,POINT(98.6739 3.5894),"POLYGON((98.5925 3.4979\,98.6172 3.4983\,98.6257 3.5188\,98.6451 3.4882\,98.659 3.5046\,98.6543 3.5173\,98.6567 3.519\,98.6556 3.5196\,98.6576 3.5225\,98.7448 3.5281\,98.7201 3.5516\,98.7415 3.599\,98.7066 3.6024\,98.7099 3.6266\,98.6934 3.6268\,98.703 3.6626\,98.6685 3.6718\,98.7033 3.667\,98.7047 3.7151\,98.731 3.7204\,98.702 3.7509\,98.7242 3.8018\,98.7041 3.7827\,98.6814 3.7885\,98.6802 3.7674\,98.6528 3.7823\,98.6277 3.7676\,98.6534 3.7378\,98.6248 3.6997\,98.6578 3.6752\,98.6653 3.6256\,98.6219 3.6117\,98.5993 3.6203\,98.6119 3.5558\,98.5931 3.5403\,98.5925 3.4979))" +MEX,Lic Benito Juarez Int'l,Cuauhtémoc,Mexico City,POINT(-99.1333 19.4333),"POLYGON((-99.1843 19.4078\,-99.1712 19.4035\,-99.1704 19.3998\,-99.1567 19.404\,-99.1311 19.4031\,-99.1257 19.4043\,-99.1282 19.4138\,-99.123 19.4425\,-99.1259 19.4478\,-99.1222 19.4598\,-99.1339 19.465\,-99.1416 19.4656\,-99.1504 19.463\,-99.1593 19.4636\,-99.1631 19.4589\,-99.1656 19.4437\,-99.1776 19.4236\,-99.1751 19.423\,-99.1843 19.4078))" +MFM,Macau Int'l,香洲区,Zhuhai,POINT(113.5678 22.2769),"POLYGON((113.4086 22.2068\,113.4614 22.0785\,113.5281 21.9962\,113.5801 21.7847\,114.0435 21.7834\,114.1667 21.8471\,114.3889 22.0642\,114.236 22.1485\,113.8966 22.1425\,113.8324 22.1838\,113.8172 22.2173\,113.8526 22.2876\,113.791 22.3202\,113.7492 22.4562\,113.6808 22.4858\,113.6248 22.4806\,113.5685 22.415\,113.4893 22.4111\,113.4848 22.3444\,113.4983 22.3286\,113.4644 22.3125\,113.5079 22.2543\,113.4766 22.2453\,113.4812 22.2298\,113.4086 22.2068)\,(113.5282 22.1822\,113.5444 22.217\,113.6052 22.2041\,113.6301 22.1656\,113.6301 22.1089\,113.6102 22.0767\,113.571 22.0767\,113.571 22.0988\,113.5496 22.1089\,113.5496 22.1454\,113.5282 22.1822)\,(113.5408 22.1241\,113.5474 22.1361\,113.5466 22.1206\,113.5408 22.1241))" +MGA,Augusto Cesar Sandino Int'l,Managua (Municipio),Managua,POINT(-86.2738 12.1544),"POLYGON((-86.3919 12.1025\,-86.3573 12.0608\,-86.3144 12.0756\,-86.2974 12.0458\,-86.2719 12.04\,-86.2722 12.0212\,-86.2517 12.0237\,-86.1451 12.0987\,-86.1257 12.1412\,-86.1968 12.2243\,-86.3229 12.1669\,-86.3157 12.1317\,-86.3276 12.1173\,-86.3919 12.1025))" +MGQ,Aden Adde Int'l,Banaadir بنادر,Mogadishu,POINT(45.3419 2.0392),"POLYGON((45.23 2.0186\,45.2301 1.9757\,45.3178 1.7869\,45.551 1.9196\,45.4329 2.0997\,45.3699 2.0998\,45.3017 2.0547\,45.2865 2.0993\,45.2569 2.0913\,45.2507 2.0689\,45.2829 2.0443\,45.23 2.0186))" +MHD,Mashhad,منطقه ۱,Mashhad,POINT(59.6 36.3),"POLYGON((59.5406 36.3151\,59.5632 36.3001\,59.5571 36.2872\,59.5597 36.2835\,59.5636 36.2833\,59.6059 36.298\,59.5779 36.3236\,59.5435 36.3262\,59.5406 36.3151))" +MHT,Manchester-Boston Reg.,Nashua,Nashua,POINT(-71.491 42.7491),"POLYGON((-71.5614 42.789\,-71.5249 42.7717\,-71.5424 42.7028\,-71.4323 42.6999\,-71.4399 42.7565\,-71.4683 42.8005\,-71.4775 42.7908\,-71.4953 42.8054\,-71.5437 42.8058\,-71.5614 42.789))" +MIA,Miami Int'l,Hialeah,Hialeah,POINT(-80.3045 25.8696),"POLYGON((-80.3737 25.9278\,-80.3723 25.8986\,-80.3564 25.8987\,-80.3558 25.8843\,-80.3398 25.8843\,-80.3226 25.8537\,-80.2586 25.8061\,-80.2594 25.8821\,-80.2873 25.8814\,-80.2921 25.8986\,-80.3446 25.8983\,-80.3505 25.9279\,-80.3737 25.9278)\,(-80.3021 25.8741\,-80.2912 25.8805\,-80.291 25.8743\,-80.3021 25.8741))" +MID,Lic M Crecencio Rejon Int'l,Kanasín,Kanasín,POINT(-89.5578 20.9344),"POLYGON((-89.5954 20.9299\,-89.5779 20.9047\,-89.5722 20.9226\,-89.5355 20.9141\,-89.549 20.9362\,-89.5442 20.9808\,-89.559 20.9813\,-89.5608 20.9577\,-89.5954 20.9299))" +MIR,Habib Bourguiba Int'l,ولاية سوسة,Sousse,POINT(10.6333 35.8333),"POLYGON((10.1823 36.1565\,10.3012 35.5264\,10.5663 35.3981\,10.5 35.5568\,10.6874 35.7891\,10.4802 36.0635\,10.5065 36.3918\,10.1823 36.1565))" +MJM,Mbuji Mayi,Kanshi,Mbuji-Mayi,POINT(23.6 -6.15),"POLYGON((23.567 -6.1459\,23.6343 -6.1785\,23.6738 -6.1569\,23.6083 -6.1458\,23.5807 -6.1142\,23.567 -6.1459))" +MKE,General Mitchell Int'l,Milwaukee,Milwaukee,POINT(-87.9675 43.0642),"POLYGON((-88.0634 43.1921\,-88.066 43.1042\,-88.0275 43.1043\,-88.0277 43.09\,-88.0477 43.0902\,-88.0388 43.0677\,-87.9892 43.0676\,-87.984 43.0389\,-88.0453 43.0269\,-87.9617 43.0214\,-87.9611 43.003\,-88.0377 42.9881\,-88.0375 42.9736\,-87.9845 42.9706\,-87.986 42.979\,-87.9822 42.981\,-87.9832 42.9704\,-87.9883 42.9666\,-87.9776 42.9667\,-87.9811 42.9807\,-87.9704 42.9879\,-87.9684 42.9727\,-87.9484 42.974\,-87.9491 42.9436\,-87.9635 42.9376\,-87.9404 42.9209\,-87.8889 42.9228\,-87.894 42.9733\,-87.8634 42.989\,-87.8814 43.0014\,-87.8809 43.0519\,-87.8627 43.0748\,-87.8918 43.082\,-87.9071 43.104\,-87.9079 43.09\,-87.925 43.0945\,-87.9208 43.1187\,-87.9458 43.1123\,-87.9451 43.1587\,-87.9753 43.1486\,-87.9749 43.1631\,-87.9949 43.1632\,-87.9943 43.1926\,-88.0634 43.1921))" +MLA,Luqa,Xlokk,Valletta,POINT(14.5125 35.8983),"POLYGON((14.4806 35.8758\,14.5424 35.8297\,14.544 35.839\,14.5456 35.8411\,14.548 35.8401\,14.5591 35.819\,14.5623 35.8198\,14.5632 35.8535\,14.5765 35.8613\,14.551 35.8882\,14.5243 35.899\,14.5201 35.8814\,14.5141 35.8911\,14.5168 35.8809\,14.4966 35.879\,14.5184 35.8971\,14.5175 35.9035\,14.4806 35.8758))" +MNL,Ninoy Aquino Int'l,Manila,Manila,POINT(120.9772 14.5958),"POLYGON((120.7917 14.6011\,120.7934 14.5867\,120.9103 14.5508\,120.9987 14.5617\,121.0262 14.5944\,120.9807 14.639\,120.8395 14.6395\,120.7917 14.6011))" +MOT,Minot Int'l,Minot,Minot,POINT(-101.278 48.2375),"POLYGON((-101.2972 48.2743\,-101.3283 48.2658\,-101.3398 48.2292\,-101.3295 48.2204\,-101.343 48.2193\,-101.3068 48.1922\,-101.2598 48.2123\,-101.2732 48.2255\,-101.2471 48.2193\,-101.1877 48.2326\,-101.261 48.2299\,-101.2852 48.2526\,-101.2592 48.2594\,-101.2847 48.2689\,-101.2806 48.2796\,-101.3062 48.2828\,-101.2972 48.2743)\,(-101.3322 48.2401\,-101.3341 48.2439\,-101.3322 48.2439\,-101.3322 48.2401)\,(-101.3274 48.2522\,-101.327 48.2545\,-101.3255 48.2538\,-101.3274 48.2522)\,(-101.325 48.2566\,-101.3232 48.2582\,-101.3232 48.2566\,-101.325 48.2566)\,(-101.3225 48.2494\,-101.3183 48.2504\,-101.3184 48.2494\,-101.3225 48.2494)\,(-101.2986 48.269\,-101.2985 48.2711\,-101.297 48.269\,-101.2986 48.269)\,(-101.2959 48.2734\,-101.2956 48.2743\,-101.2956 48.2732\,-101.2959 48.2734)\,(-101.2484 48.2231\,-101.2476 48.2244\,-101.2471 48.222\,-101.2484 48.2231))" +MPM,Maputo Int'l,Cidade de Maputo,Maputo,POINT(32.5833 -25.9667),"POLYGON((32.44 -26.0661\,32.4512 -26.09\,32.6147 -26.0524\,32.6015 -26.0264\,32.5696 -26.0133\,32.555 -25.9741\,32.5939 -25.9834\,32.6705 -25.8896\,32.6993 -25.8762\,32.6975 -25.8575\,32.5673 -25.8117\,32.5566 -25.8964\,32.4801 -25.9831\,32.4928 -26.0297\,32.44 -26.0661))" +MRS,Marseille Provence Airport,Marseille,Marseille,POINT(5.37 43.2964),"POLYGON((5.2781 43.3635\,5.3173 43.3565\,5.3612 43.3088\,5.3456 43.2819\,5.3711 43.2671\,5.3735 43.246\,5.337 43.2136\,5.5098 43.1978\,5.5068 43.233\,5.5261 43.2376\,5.5047 43.2817\,5.5321 43.2983\,5.5311 43.313\,5.4719 43.3171\,5.4503 43.3391\,5.4647 43.3601\,5.4475 43.3871\,5.3798 43.3697\,5.3685 43.3896\,5.3218 43.3682\,5.297 43.389\,5.2781 43.3635))" +MRU,Sir Seewoosagur Ramgoolam Int'l,Curepipe,Curepipe,POINT(57.5263 -20.3188),"POLYGON((57.4875 -20.3268\,57.5108 -20.3467\,57.5471 -20.3277\,57.5471 -20.306\,57.5145 -20.2957\,57.4936 -20.3132\,57.5034 -20.3272\,57.4875 -20.3268))" +MSN,Dane Cty. Reg. (Truax Field),Sun Prairie,Sun Prairie,POINT(-89.2362 43.1825),"POLYGON((-89.2882 43.1794\,-89.2748 43.1759\,-89.2855 43.1681\,-89.2757 43.1507\,-89.2369 43.1529\,-89.2293 43.1718\,-89.1862 43.1807\,-89.1812 43.1918\,-89.2023 43.1952\,-89.1813 43.2106\,-89.2501 43.2095\,-89.2882 43.1794)\,(-89.2628 43.1651\,-89.265 43.1688\,-89.2579 43.1689\,-89.2628 43.1651)\,(-89.2491 43.1728\,-89.2513 43.1744\,-89.2495 43.1753\,-89.2491 43.1728)\,(-89.2421 43.1634\,-89.2426 43.1638\,-89.2421 43.1638\,-89.2421 43.1634))" +MSP,Minneapolis St. Paul Int'l,Minneapolis,Minneapolis,POINT(-93.2678 44.9635),"POLYGON((-93.3291 44.9203\,-93.3188 44.8908\,-93.1939 44.9054\,-93.2077 45.0062\,-93.2268 45.0133\,-93.227 45.0357\,-93.3195 45.0513\,-93.3291 44.9203))" +MSQ,Minsk Int'l,Ленінскі раён,Minsk,POINT(27.5667 53.9),"POLYGON((27.5503 53.8962\,27.5634 53.8882\,27.5645 53.8327\,27.6005 53.833\,27.6272 53.8537\,27.6004 53.8931\,27.5503 53.8962))" +MSU,Moshoeshoe I Int'l,Maseru District,Maseru,POINT(27.48 -29.31),"POLYGON((27.2999 -29.5127\,27.6645 -29.6948\,27.6239 -29.8614\,28.0924 -29.9252\,28.2504 -29.8583\,28.1663 -29.2938\,28.0554 -29.3964\,28.0001 -29.2755\,27.4954 -29.2837\,27.2999 -29.5127))" +MSY,New Orleans Int'l,New Orleans,New Orleans,POINT(-89.9288 30.0687),"POLYGON((-90.1059 30.1919\,-90.1327 29.9135\,-90.1012 29.9103\,-90.0633 29.9248\,-90.0584 29.9455\,-90.0025 29.8926\,-89.9586 29.9025\,-89.9107 29.8679\,-89.9069 29.896\,-89.9241 29.9189\,-90.0119 29.9457\,-89.9893 29.9895\,-89.9407 29.9805\,-89.8945 30.0028\,-89.8632 29.9876\,-89.8398 30.0244\,-89.8046 30.044\,-89.7169 30.0235\,-89.7296 30.057\,-89.6843 30.0739\,-89.6757 30.1052\,-89.6252 30.1537\,-89.684 30.175\,-89.7216 30.1619\,-89.744 30.1774\,-89.8021 30.1515\,-89.823 30.1832\,-89.8741 30.1995\,-89.999 30.1491\,-90.1059 30.1919))" +MUC,Franz-Josef-Strauss,München,Munich,POINT(11.575 48.1375),"POLYGON((11.3608 48.1581\,11.3891 48.1479\,11.3944 48.1255\,11.4637 48.1299\,11.4709 48.0832\,11.5088 48.0616\,11.5869 48.0939\,11.6857 48.0775\,11.6828 48.0918\,11.7146 48.1108\,11.6942 48.1231\,11.7229 48.1371\,11.6782 48.1443\,11.692 48.1825\,11.6277 48.1773\,11.6505 48.2134\,11.639 48.226\,11.5875 48.2135\,11.5823 48.2286\,11.5012 48.2481\,11.4911 48.2231\,11.3908 48.2005\,11.3933 48.1845\,11.3608 48.1581))" +MUCf,Munich Freight Terminal,München,Munich,POINT(11.575 48.1375),"POLYGON((11.3608 48.1581\,11.3891 48.1479\,11.3944 48.1255\,11.4637 48.1299\,11.4709 48.0832\,11.5088 48.0616\,11.5869 48.0939\,11.6857 48.0775\,11.6828 48.0918\,11.7146 48.1108\,11.6942 48.1231\,11.7229 48.1371\,11.6782 48.1443\,11.692 48.1825\,11.6277 48.1773\,11.6505 48.2134\,11.639 48.226\,11.5875 48.2135\,11.5823 48.2286\,11.5012 48.2481\,11.4911 48.2231\,11.3908 48.2005\,11.3933 48.1845\,11.3608 48.1581))" +MVD,Carrasco Int'l,Montevideo,Montevideo,POINT(-56.1819 -34.8836),"POLYGON((-56.4314 -34.8281\,-56.3401 -34.8707\,-56.3157 -34.9048\,-56.2967 -34.8931\,-56.2564 -34.9061\,-56.2468 -34.8773\,-56.2125 -34.8751\,-56.1983 -34.8993\,-56.223 -34.9135\,-56.1715 -34.9148\,-56.1605 -34.9381\,-56.1192 -34.8987\,-56.0646 -34.8997\,-56.0225 -34.8778\,-56.0608 -34.8658\,-56.0349 -34.769\,-56.0544 -34.7606\,-56.0916 -34.7796\,-56.0942 -34.7582\,-56.119 -34.7487\,-56.1245 -34.7188\,-56.139 -34.7141\,-56.1656 -34.7281\,-56.1917 -34.7215\,-56.231 -34.7701\,-56.3244 -34.7022\,-56.3544 -34.7266\,-56.3415 -34.7515\,-56.3566 -34.7943\,-56.4314 -34.8281))" +MXL,Gen R.S. Taboada Int'l,Municipio de Mexicali,Mexicali,POINT(-115.4678 32.6633),"POLYGON((-115.8793 32.6361\,-115.8477 31.9901\,-115.6047 31.6242\,-115.3855 31.7732\,-115.3116 31.8051\,-115.2927 31.7911\,-115.1872 31.8303\,-115.1818 31.8572\,-115.1069 31.8049\,-115.0563 31.7122\,-115.0713 31.6775\,-115.0752 31.5617\,-115.0539 31.4406\,-115.0311 31.448\,-114.9885 31.3629\,-114.8787 31.385\,-114.8328 31.5785\,-114.7837 31.6355\,-114.817 31.7287\,-114.8063 31.8164\,-114.8432 31.8641\,-114.9106 31.8676\,-114.942 31.8907\,-114.947 31.9181\,-114.9656 31.9215\,-114.957 32.0386\,-114.9897 32.1464\,-114.9753 32.178\,-115.053 32.2452\,-114.9666 32.3388\,-114.9685 32.3907\,-114.9319 32.4397\,-114.935 32.4819\,-114.8159 32.483\,-114.7915 32.5564\,-114.813 32.5574\,-114.8004 32.5868\,-114.8105 32.6223\,-114.7652 32.6431\,-114.7199 32.7187\,-115.8793 32.6361))" +MXP,Malpensa,Gallarate,Gallarate,POINT(8.7914 45.6649),"POLYGON((8.7537 45.6759\,8.7839 45.6406\,8.8323 45.6409\,8.7973 45.6897\,8.7537 45.6759))" +MZT,General Rafael Buelna Int'l,Mazatlán,Mazatlán,POINT(-106.4167 23.2167),"POLYGON((-106.6185 23.474\,-106.4293 23.1763\,-106.1403 23.0854\,-106.237 23.4463\,-105.9322 23.7947\,-106.0683 23.8912\,-106.2807 23.6003\,-106.5212 23.691\,-106.6185 23.474))" +NAG,Dr. Babasaheb Ambedkar Int'l,Nagpur City,Nāgpur,POINT(79.0806 21.1497),"POLYGON((78.9952 21.1712\,79.0179 21.1245\,79.0134 21.077\,79.0463 21.0775\,79.0465 21.0585\,79.0655 21.053\,79.084 21.0963\,79.1172 21.089\,79.1423 21.1245\,79.1622 21.1252\,79.1788 21.165\,79.1648 21.1918\,79.1448 21.1869\,79.0876 21.231\,79.0738 21.2091\,79.0405 21.2166\,79.0319 21.1849\,78.9952 21.1712))" +NAN,Nadi Int'l,Ba,Nadi,POINT(177.4167 -17.8),"POLYGON((176.6701 -17.2113\,176.7201 -17.4162\,177.1216 -17.4162\,177.4856 -17.9796\,177.5852 -17.8098\,178.0073 -17.7689\,178.0979 -16.2277\,177.1671 -16.6168\,176.6701 -17.2113))" +NAP,Naples Int'l,Casoria,Casoria,POINT(14.3 40.9),"POLYGON((14.2798 40.907\,14.3065 40.8752\,14.3305 40.8842\,14.3283 40.9026\,14.2924 40.9194\,14.2984 40.9285\,14.2798 40.907))" +NAS,Nassau Int'l,New Providence,Nassau,POINT(-77.3386 25.0781),"POLYGON((-77.7742 25.1466\,-77.6445 24.8079\,-77.3345 24.7901\,-76.8322 25.0965\,-76.9078 25.2421\,-77.2001 25.559\,-77.7742 25.1466))" +NAT,Augusto Severo Int'l,Natal,Natal,POINT(-35.2 -5.7833),"POLYGON((-35.2912 -5.7322\,-35.2871 -5.7631\,-35.2475 -5.782\,-35.2815 -5.822\,-35.2824 -5.8498\,-35.1544 -5.8987\,-35.1789 -5.8675\,-35.1944 -5.7514\,-35.2649 -5.7027\,-35.2912 -5.7322))" +NBO,Jomo Kenyatta Int'l,Nairobi,Nairobi,POINT(36.8172 -1.2864),"POLYGON((36.6647 -1.3201\,36.7422 -1.3823\,36.8442 -1.3868\,36.9616 -1.4448\,36.9071 -1.3636\,36.9836 -1.3164\,36.9745 -1.2896\,36.9955 -1.2814\,37.0027 -1.3007\,37.0765 -1.3034\,37.1049 -1.2654\,37.0634 -1.2058\,37.0118 -1.2369\,36.9317 -1.2229\,36.9132 -1.2081\,36.9433 -1.1755\,36.8978 -1.1607\,36.8877 -1.1731\,36.8994 -1.1932\,36.8638 -1.1903\,36.8351 -1.2154\,36.7914 -1.1924\,36.7791 -1.2273\,36.7575 -1.2191\,36.7473 -1.2374\,36.7296 -1.2345\,36.7213 -1.265\,36.6944 -1.2606\,36.6647 -1.3201))" +NCL,Newcastle Int'l,Gateshead,Gateshead,POINT(-1.6 54.95),"POLYGON((-1.8269 54.9302\,-1.849 54.9146\,-1.8267 54.9101\,-1.821 54.9057\,-1.7954 54.9035\,-1.7376 54.9187\,-1.6748 54.9096\,-1.6453 54.8788\,-1.5942 54.902\,-1.5799 54.8778\,-1.5561 54.8848\,-1.5689 54.9246\,-1.5103 54.9321\,-1.5153 54.9573\,-1.5336 54.965\,-1.6401 54.9593\,-1.7861 54.9844\,-1.8128 54.9763\,-1.8337 54.9533\,-1.8269 54.9302))" +NDB,Nouadhibou Int'l,ولاية داخلة نواذيبو,Nouadhibou,POINT(-17.0333 20.9333),"POLYGON((-17.0681 20.8853\,-17.0449 20.7706\,-17.0273 20.8539\,-17.0555 20.8921\,-17.0159 20.9144\,-16.9944 21.0301\,-16.9156 21.1553\,-16.8875 21.1121\,-16.8664 21.0238\,-16.8263 20.9855\,-16.8063 20.9221\,-16.7802 20.9166\,-16.763 20.8328\,-16.7018 20.7462\,-16.6722 20.732\,-16.6792 20.6774\,-16.529 20.5502\,-16.4088 20.5638\,-16.3628 20.4239\,-16.2673 20.2744\,-16.2409 20.2782\,-16.2071 20.2173\,-16.2126 20.1802\,-16.2679 20.1186\,-16.228 20.0481\,-16.2308 19.989\,-16.3091 19.9149\,-16.3215 19.8843\,-16.4233 19.832\,-16.4436 19.7515\,-16.489 19.753\,-16.5142 19.6987\,-16.4993 19.6638\,-16.4688 19.6494\,-16.4424 19.5918\,-16.3937 19.5804\,-16.4809 19.4233\,-16.446 19.3969\,-16.3721 19.4795\,-16.3192 19.4528\,-16.2829 19.4951\,-15.5785 20.7376\,-15.8398 21.1508\,-14.1732 21.1445\,-14.3012 21.3417\,-16.9485 21.3342\,-17.0681 20.8853))" +NDJ,Ndjamena,N'Djaména انجمينا,N’Djamena,POINT(15.05 12.11),"POLYGON((14.8853 12.1676\,14.982 12.0881\,14.9937 12.0883\,14.9993 12.1207\,15.0262 12.1143\,15.0602 12.0543\,15.1545 12.0665\,15.1826 12.0953\,15.1252 12.1674\,15.0275 12.217\,15.0262 12.2535\,14.9029 12.2249\,14.8928 12.2029\,14.9106 12.1852\,14.8853 12.1676))" +NGB,Ningbo Lishe Int'l,海曙区,Ningbo,POINT(121.5492 29.875),"POLYGON((121.1349 29.7775\,121.1679 29.7292\,121.18 29.743\,121.201 29.7268\,121.4112 29.742\,121.4449 29.7738\,121.44 29.7889\,121.4522 29.7796\,121.4873 29.8029\,121.503 29.7946\,121.4994 29.8082\,121.5188 29.8152\,121.5092 29.828\,121.5565 29.875\,121.5245 29.8928\,121.5291 29.91\,121.5078 29.9058\,121.4923 29.9254\,121.4723 29.9083\,121.4482 29.9384\,121.4188 29.9315\,121.4058 29.9488\,121.38 29.9479\,121.36 29.8975\,121.34 29.9021\,121.3456 29.9114\,121.3199 29.9073\,121.3159 29.9265\,121.2829 29.8938\,121.249 29.8933\,121.2221 29.846\,121.1957 29.841\,121.1686 29.812\,121.1705 29.7856\,121.1473 29.7928\,121.1349 29.7775))" +NGO,Chubu Centrair Int'l,名古屋市,Nagoya,POINT(136.9 35.1833),"POLYGON((136.792 35.1123\,136.8405 35.064\,136.8472 35.026\,136.884 35.0673\,136.9707 35.0422\,136.9745 35.0632\,137.0087 35.0746\,137.0235 35.1019\,137.0001 35.1301\,137.0233 35.1399\,137.0179 35.1884\,137.0323 35.1916\,137.0015 35.2094\,137.0603 35.2496\,137.0299 35.2593\,136.9663 35.2168\,136.9366 35.2161\,136.9299 35.2437\,136.8557 35.2306\,136.8602 35.208\,136.8793 35.2069\,136.836 35.1578\,136.8042 35.1651\,136.792 35.1123))" +NIM,Niamey,Niamey,Niamey,POINT(2.1175 13.515),"POLYGON((1.9609 13.4934\,1.9758 13.4898\,1.9673 13.4364\,2.0019 13.4223\,1.9981 13.3979\,2.0247 13.383\,2.0416 13.4163\,2.1049 13.3993\,2.1495 13.4282\,2.1376 13.4439\,2.1667 13.4542\,2.1962 13.4376\,2.2292 13.4769\,2.2137 13.5541\,2.2472 13.5678\,2.2413 13.585\,2.2642 13.6301\,2.2197 13.6473\,2.1849 13.6205\,2.1428 13.6334\,2.1333 13.6089\,2.114 13.6086\,2.1028 13.6308\,2.0841 13.628\,2.0895 13.6024\,2.0724 13.5751\,2.0391 13.5902\,2.0466 13.6017\,2.0316 13.6112\,2.016 13.5876\,1.9685 13.6019\,2.0179 13.5376\,1.9631 13.5229\,1.9609 13.4934))" +NKC,Nouakchott Int'l,Tevragh Zein,Nouakchott,POINT(-15.9785 18.0858),"POLYGON((-16.0448 18.1764\,-16.0443 18.0996\,-16.0188 18.0972\,-15.9992 18.0845\,-15.9939 18.0843\,-15.9863 18.0867\,-15.9833 18.0788\,-15.977 18.0777\,-15.9749 18.0874\,-15.97 18.0866\,-15.9682 18.0988\,-15.9756 18.1002\,-15.9748 18.1043\,-15.9714 18.1086\,-15.9719 18.1111\,-15.9965 18.1767\,-16.0448 18.1764))" +NKG,Nanjing Lukou Int'l,玄武区,Nanjing,POINT(118.7789 32.0608),"POLYGON((118.7787 32.0937\,118.7789 32.0438\,118.8343 32.0421\,118.846 32.0202\,118.8919 32.0264\,118.8732 32.0452\,118.8832 32.0607\,118.8711 32.077\,118.9033 32.0887\,118.8956 32.0975\,118.8295 32.0959\,118.8372 32.1074\,118.8292 32.1083\,118.7787 32.0937))" +NKM,Nagoya,名古屋市,Nagoya,POINT(136.9 35.1833),"POLYGON((136.792 35.1123\,136.8405 35.064\,136.8472 35.026\,136.884 35.0673\,136.9707 35.0422\,136.9745 35.0632\,137.0087 35.0746\,137.0235 35.1019\,137.0001 35.1301\,137.0233 35.1399\,137.0179 35.1884\,137.0323 35.1916\,137.0015 35.2094\,137.0603 35.2496\,137.0299 35.2593\,136.9663 35.2168\,136.9366 35.2161\,136.9299 35.2437\,136.8557 35.2306\,136.8602 35.208\,136.8793 35.2069\,136.836 35.1578\,136.8042 35.1651\,136.792 35.1123))" +NLD,Quetzalcoatl Int'l,Nuevo Laredo,Nuevo Laredo,POINT(-99.5069 27.4861),"POLYGON((-99.9264 27.5492\,-99.9192 27.4915\,-99.7604 27.437\,-99.7488 27.289\,-99.6382 27.2918\,-99.4964 27.2713\,-99.4944 27.3036\,-99.5381 27.3172\,-99.5042 27.3392\,-99.4876 27.4102\,-99.4954 27.4457\,-99.4787 27.4794\,-99.4937 27.4977\,-99.5281 27.4982\,-99.5112 27.5652\,-99.544 27.6075\,-99.5835 27.6031\,-99.5773 27.6185\,-99.6003 27.6415\,-99.6561 27.6295\,-99.6639 27.6575\,-99.6903 27.669\,-99.7054 27.6554\,-99.7281 27.6791\,-99.9264 27.5492))" +//NNG,Nanning Wuwu Int'l,兴宁区,Nanning,POINT(108.315 22.8192),"POLYGON((108.3085 22.8267\,108.3188 22.8158\,108.321 22.8229\,108.4012 22.8711\,108.4812 22.8453\,108.5008 22.8604\,108.5683 22.8607\,108.5972 22.8896\,108.6453 22.896\,108.6649 22.9315\,108.6943 22.9393\,108.7124 22.9947\,108.7659 22.9967\,108.7756 23.0215\,108.7207 23.0563\,108.7198 23.0839\,108.6964 23.1024\,108.6765 23.0919\,108.6125 23.1093\,108.6107 23.0482\,108.5768 23.0415\,108.5634 23.0162\,108.4922 23.0476\,108.4495 23.0291\,108.4392 23.0006\,108.3684 22.9941\,108.3493 22.9706\,108.3518 22.9146\,108.3269 22.8768\,108.3285 22.8425\,108.3085 22.8267)\,(108.3426 22.8338\,108.3428 22.834\,108.3429 22.8339\,108.3426 22.8338)\,(108.3551 22.8401\,108.3556 22.8413\,108.3561 22.8407\,108.3551 22.8401))" +NOG,Nogales Int'l,Nogales,Heroica Nogales,POINT(-110.9458 31.3186),"POLYGON((-111.3568 31.2795\,-111.3358 31.2568\,-111.3176 31.1782\,-111.2625 31.1908\,-111.2536 31.156\,-111.2651 31.1482\,-111.2298 31.1106\,-111.2428 31.0957\,-111.2134 31.06\,-111.1734 31.0842\,-111.1582 31.0594\,-111.124 31.0698\,-111.1157 31.0472\,-111.1322 31.0108\,-111.052 31.0275\,-111.0041 31.0214\,-110.9825 31.058\,-110.9674 31.038\,-110.8624 31.0192\,-110.8569 31.0565\,-110.7773 31.0638\,-110.7533 31.0354\,-110.7347 31.0511\,-110.7828 31.1741\,-110.6512 31.2164\,-110.7007 31.3334\,-111.075 31.3323\,-111.2119 31.3765\,-111.2139 31.3234\,-111.2952 31.3219\,-111.3165 31.2804\,-111.3568 31.2795))" +NOU,La Tontouta Int'l,Nouméa,Nouméa,POINT(166.458 -22.2758),"POLYGON((166.394 -22.2306\,166.4493 -22.2489\,166.4294 -22.246\,166.4434 -22.2617\,166.4247 -22.2664\,166.4408 -22.2755\,166.4307 -22.2939\,166.4483 -22.3125\,166.4622 -22.3066\,166.4595 -22.2801\,166.4727 -22.2716\,166.4662 -22.2858\,166.4814 -22.2833\,166.4758 -22.259\,166.502 -22.26\,166.4862 -22.2167\,166.394 -22.2306))" +NOV,Nova Lisboa,Huambo,Huambo,POINT(15.7347 -12.7767),"POLYGON((14.7926 -12.6831\,14.9569 -13.2869\,15.3208 -13.3933\,15.5022 -13.7628\,15.9597 -13.3933\,16.4514 -13.3706\,16.5044 -12.7544\,16.2989 -12.5378\,16.5852 -11.9019\,16.3213 -11.5073\,15.387 -11.552\,15.3672 -11.9429\,15.0211 -12.2061\,15.0692 -12.5161\,14.7926 -12.6831))" +NRK,Norrköping Airport,Norrköpings kommun,Norrköping,POINT(16.2 58.6),"POLYGON((15.6176 58.6442\,16.0157 58.418\,16.1666 58.5269\,17.5693 58.399\,16.1822 58.854\,15.9781 58.6224\,15.6176 58.6442))" +NRT,Narita Int'l,中央区,Chiba,POINT(140.1064 35.6073),"POLYGON((140.0247 35.5801\,140.1354 35.5426\,140.1754 35.5812\,140.1481 35.6067\,140.1538 35.6174\,140.1335 35.6148\,140.1262 35.6289\,140.0965 35.6212\,140.1053 35.6113\,140.0843 35.5923\,140.0247 35.5801))" +NSI,Yaoundé Nsimalen Int'l,Yaoundé I,Yaoundé,POINT(11.5167 3.8667),"POLYGON((11.4783 3.9495\,11.5008 3.9206\,11.4989 3.89\,11.5205 3.8614\,11.5425 3.9306\,11.5748 3.9413\,11.5593 3.9645\,11.5443 3.9535\,11.5138 3.9692\,11.4783 3.9495))" +NTR,Del Norte Int'l,Apodaca,Ciudad Apodaca,POINT(-100.1886 25.7817),"POLYGON((-100.2717 25.8718\,-100.2671 25.7643\,-100.1779 25.7295\,-100.1831 25.7097\,-100.1068 25.7014\,-100.1243 25.7346\,-100.1486 25.7311\,-100.101 25.7784\,-100.1012 25.8004\,-100.1319 25.8211\,-100.1169 25.8448\,-100.2353 25.8632\,-100.254 25.8793\,-100.2717 25.8718))" +NUE,Nurnberg,Nürnberg,Nuremberg,POINT(11.0775 49.4539),"POLYGON((10.9887 49.5365\,10.9995 49.5136\,11.0287 49.5087\,11.0142 49.4926\,11.0258 49.4728\,10.9914 49.4361\,11.0225 49.4182\,11.0243 49.3939\,10.9937 49.3766\,11.0334 49.3719\,11.0696 49.3314\,11.1222 49.3574\,11.1 49.3721\,11.1228 49.3686\,11.1005 49.377\,11.1108 49.3925\,11.1894 49.3899\,11.1752 49.4129\,11.1761 49.4137\,11.1854 49.4085\,11.195 49.4058\,11.1765 49.414\,11.2135 49.4206\,11.1797 49.4287\,11.1767 49.4146\,11.1326 49.4323\,11.1285 49.4423\,11.1552 49.4466\,11.1367 49.4529\,11.1885 49.4675\,11.1503 49.4731\,11.1232 49.4913\,11.1202 49.5129\,11.0581 49.5052\,11.0784 49.5197\,10.9887 49.5365)\,(11.1769 49.3922\,11.1845 49.3971\,11.1865 49.3934\,11.1769 49.3922))" +NYO,Stockholm-Skavsta,Nyköpings kommun,Nyköping,POINT(17.0086 58.7531),"POLYGON((16.2689 58.8255\,16.3857 58.7036\,16.8275 58.5942\,17.1786 58.7006\,17.8747 58.471\,17.3848 58.9342\,17.0567 59.0211\,16.5856 58.8933\,16.5914 58.7837\,16.2689 58.8255)\,(17.0501 59.0141\,17.0526 59.0152\,17.0571 59.0111\,17.0501 59.0141))" +OAK,Oakland Int'l,San Leandro,San Leandro,POINT(-122.1599 37.7074),"POLYGON((-122.2097 37.7111\,-122.1706 37.6674\,-122.1226 37.6986\,-122.127 37.7293\,-122.146 37.7428\,-122.2097 37.7111))" +OAX,Xoxocotlán Int'l,Oaxaca de Juárez,Oaxaca,POINT(-96.7253 17.0606),"POLYGON((-96.7805 17.1218\,-96.7552 17.1094\,-96.7449 17.0732\,-96.7674 17.0748\,-96.7729 17.048\,-96.7047 17.0337\,-96.7103 17.0662\,-96.688 17.0907\,-96.6727 17.164\,-96.7805 17.1218))" +ODS,Odessa Int'l,Одеська міська громада,Odesa,POINT(30.7326 46.4775),"POLYGON((30.6114 46.5028\,30.6193 46.4655\,30.6532 46.4511\,30.6499 46.4285\,30.6843 46.4026\,30.7088 46.4036\,30.688 46.3938\,30.7219 46.3752\,30.6898 46.3559\,30.7023 46.3433\,30.7522 46.3752\,30.7727 46.4364\,30.7631 46.4625\,30.7659 46.477\,30.7611 46.486\,30.7648 46.4867\,30.7628 46.4924\,30.7603 46.4963\,30.757 46.484\,30.7293 46.4991\,30.7366 46.5287\,30.804 46.5707\,30.8029 46.5954\,30.8194 46.6076\,30.8314 46.6219\,30.8026 46.6244\,30.8012 46.605\,30.8143 46.6043\,30.7595 46.5804\,30.7558 46.5655\,30.7175 46.5759\,30.731 46.5521\,30.6627 46.5428\,30.6815 46.5209\,30.6639 46.512\,30.676 46.4966\,30.6114 46.5028))" +OGG,Kahului,Maui County,Kahului,POINT(-156.4603 20.8715),"POLYGON((-156.756 20.5267\,-156.706 20.4658\,-156.5461 20.4608\,-156.4894 20.5\,-156.4684 20.5564\,-156.3964 20.5335\,-156.2905 20.5369\,-156.1906 20.5755\,-156.1353 20.5698\,-156.0386 20.6007\,-155.9458 20.6839\,-155.9268 20.7387\,-155.9365 20.792\,-155.9683 20.8342\,-156.0765 20.8808\,-156.2143 20.9831\,-156.3205 21.0062\,-156.4511 20.9688\,-156.4831 21.0287\,-156.5595 21.0823\,-156.6059 21.0926\,-156.6559 21.0818\,-156.735 21.0057\,-156.7518 20.9485\,-156.7411 20.88\,-156.6519 20.7722\,-156.5627 20.738\,-156.5119 20.7354\,-156.5 20.6912\,-156.5496 20.658\,-156.5977 20.6579\,-156.7044 20.6046\,-156.7427 20.5701\,-156.756 20.5267))" +OKC,Will Rogers,Oklahoma City,Oklahoma City,POINT(-97.5136 35.4676),"POLYGON((-97.8309 35.3584\,-97.5472 35.2908\,-97.1247 35.3552\,-97.4592 35.4208\,-97.2117 35.673\,-97.5229 35.6748\,-97.7984 35.6306\,-97.7069 35.5153\,-97.8309 35.3584)\,(-97.6714 35.3772\,-97.7776 35.406\,-97.6714 35.4064\,-97.6714 35.3772)\,(-97.6234 35.4897\,-97.6234 35.5511\,-97.6109 35.5502\,-97.6234 35.4897)\,(-97.6622 35.5174\,-97.6632 35.5222\,-97.6578 35.5223\,-97.6622 35.5174)\,(-97.5903 35.5112\,-97.5856 35.515\,-97.5857 35.5113\,-97.5903 35.5112)\,(-97.5755 35.3758\,-97.5754 35.3771\,-97.575 35.3771\,-97.5755 35.3758)\,(-97.5549 35.5368\,-97.5404 35.5801\,-97.5299 35.537\,-97.5549 35.5368)\,(-97.4857 35.3989\,-97.4769 35.4063\,-97.477 35.3986\,-97.4857 35.3989)\,(-97.4329 35.5078\,-97.4534 35.5368\,-97.4418 35.5369\,-97.4329 35.5078))" +OKD,Okadama,,Sapporo,POINT(141.35 43.0667),"POLYGON((141.3097 43.0587\,141.346 43.0171\,141.364 43.0553\,141.3899 43.0687\,141.3404 43.0673\,141.3281 43.0865\,141.3097 43.0587)\,(141.3521 43.0435\,141.355 43.0489\,141.3561 43.0402\,141.3521 43.0435))" +OMS,Omsk Tsentralny,Центральный административный округ,Omsk,POINT(73.3833 54.9667),"POLYGON((73.3369 55.029\,73.3571 55.0017\,73.341 54.9946\,73.3769 54.9554\,73.3961 54.9579\,73.3985 54.9756\,73.4951 54.9919\,73.4892 54.978\,73.5153 54.9709\,73.5778 54.9935\,73.5612 55.0077\,73.5155 55.0077\,73.5574 55.0267\,73.5152 55.0589\,73.54 55.0712\,73.529 55.0756\,73.4733 55.0394\,73.4288 55.0652\,73.3369 55.029))" +ONT,Ontario Int'l,Rancho Cucamonga,Rancho Cucamonga,POINT(-117.5667 34.1247),"POLYGON((-117.6305 34.1712\,-117.6196 34.1216\,-117.6287 34.0922\,-117.6011 34.0776\,-117.5241 34.0772\,-117.5146 34.1209\,-117.4789 34.1504\,-117.4885 34.179\,-117.5755 34.1795\,-117.5755 34.1652\,-117.6305 34.1712))" +OOL,Gold Coast,Gold Coast City,Gold Coast,POINT(153.4 -28.0167),"POLYGON((153.1689 -27.8012\,153.1889 -27.8551\,153.1807 -27.8951\,153.2154 -27.9027\,153.2066 -27.9492\,153.2252 -27.9561\,153.2251 -27.9817\,153.2008 -27.9816\,153.2176 -28.0082\,153.2389 -28.0112\,153.2134 -28.0803\,153.2324 -28.0853\,153.2299 -28.1\,153.2009 -28.1192\,153.1841 -28.1925\,153.2142 -28.2602\,153.2327 -28.265\,153.2789 -28.2336\,153.3568 -28.2496\,153.4769 -28.157\,153.5346 -28.1776\,153.5519 -28.1645\,153.5232 -28.1664\,153.4776 -28.1257\,153.4399 -28.0502\,153.4248 -27.8973\,153.4467 -27.7352\,153.4105 -27.7427\,153.3917 -27.7077\,153.3664 -27.7123\,153.3582 -27.6906\,153.3089 -27.7106\,153.2925 -27.692\,153.2341 -27.694\,153.2335 -27.711\,153.2127 -27.7153\,153.2326 -27.7238\,153.2012 -27.74\,153.193 -27.78\,153.1689 -27.8012))" +OPO,Francisco Sa Carneiro,Matosinhos e Leça da Palmeira,Matosinhos,POINT(-8.7 41.1833),"POLYGON((-8.7172 41.2051\,-8.7071 41.1921\,-8.7123 41.1823\,-8.7082 41.1726\,-8.7062 41.1863\,-8.6698 41.1763\,-8.6646 41.1923\,-8.6863 41.2111\,-8.7172 41.2051))" +OPQS,Dhamial,تحصیل راولپنڈی چھاؤنی,Rawalpindi,POINT(73.0333 33.6),"POLYGON((72.9141 33.573\,73.0017 33.4398\,73.0409 33.4275\,73.0704 33.4605\,73.1429 33.4587\,73.1838 33.503\,73.0769 33.567\,73.0758 33.5983\,73.0151 33.626\,72.9941 33.6151\,72.9748 33.6269\,72.9814 33.6088\,72.9141 33.573))" +ORD,Chicago O'Hare Int'l,West Chicago Township,Chicago,POINT(-87.6866 41.8375),"POLYGON((-87.7756 41.9093\,-87.7741 41.8655\,-87.74 41.866\,-87.7385 41.8221\,-87.6324 41.86\,-87.6443 41.897\,-87.6881 41.9395\,-87.7076 41.9394\,-87.7069 41.9101\,-87.7756 41.9093))" +ORF,Norfolk Int'l,Virginia Beach,Virginia Beach,POINT(-76.0435 36.7335),"POLYGON((-76.2282 36.8325\,-76.2036 36.7695\,-76.1436 36.7568\,-76.0652 36.6868\,-76.1213 36.6664\,-76.1227 36.5505\,-75.8319 36.5529\,-75.9166 36.7507\,-75.9679 36.9438\,-75.9501 37.001\,-76.0843 37.0302\,-76.1296 36.9194\,-76.1753 36.9308\,-76.1925 36.9182\,-76.1939 36.8646\,-76.1794 36.8498\,-76.1973 36.8281\,-76.2282 36.8325))" +ORK,Cork,Cork,Cork,POINT(-8.47 51.8972),"POLYGON((-8.6379 51.9299\,-8.6271 51.9062\,-8.637 51.878\,-8.5243 51.8645\,-8.482 51.8273\,-8.3727 51.8494\,-8.3551 51.8652\,-8.3575 51.8799\,-8.3864 51.8822\,-8.373 51.9304\,-8.3969 51.97\,-8.5146 51.9335\,-8.5741 51.9579\,-8.6379 51.9299))" +ORN,Oran Es Senia,Oran,Oran,POINT(-0.6331 35.6969),"POLYGON((-0.7296 35.7046\,-0.7076 35.6861\,-0.7047 35.6453\,-0.6446 35.6576\,-0.6102 35.6828\,-0.5969 35.676\,-0.597 35.7149\,-0.5521 35.7506\,-0.5535 35.7676\,-0.6339 35.71\,-0.6562 35.7099\,-0.6554 35.7132\,-0.6472 35.7124\,-0.6379 35.7143\,-0.627 35.72\,-0.6573 35.7142\,-0.6933 35.698\,-0.7296 35.7046))" +ORY,Paris Orly,Vitry-sur-Seine,Vitry-sur-Seine,POINT(2.3928 48.7875),"POLYGON((2.3673 48.7794\,2.4119 48.7743\,2.4227 48.792\,2.4088 48.8085\,2.3757 48.8059\,2.3673 48.7794))" +OSB,Mosul Int'l,ناحية مرکز قضاء الموصل,Mosul,POINT(43.13 36.34),"POLYGON((43.0066 36.361\,43.0341 36.3482\,43.0305 36.3098\,43.098 36.2839\,43.0857 36.2251\,43.1013 36.2179\,43.1258 36.2168\,43.1478 36.2576\,43.17 36.2534\,43.175 36.2663\,43.2375 36.2557\,43.2867 36.2763\,43.2657 36.2906\,43.2456 36.4149\,43.1944 36.4189\,43.1966 36.4555\,43.1335 36.4557\,43.1284 36.3981\,43.0726 36.3705\,43.0144 36.3778\,43.0066 36.361))" +OSL,Oslo Gardermoen,Oslo,Oslo,POINT(10.7389 59.9133),"POLYGON((10.4892 60.0173\,10.614 59.9743\,10.6356 59.9478\,10.6257 59.9227\,10.6581 59.8844\,10.731 59.8772\,10.744 59.8393\,10.7716 59.8235\,10.8563 59.8093\,10.9366 59.8317\,10.9076 59.885\,10.9214 59.9265\,10.9514 59.9492\,10.9257 59.9837\,10.8377 59.998\,10.8117 60.0255\,10.8196 60.0647\,10.7868 60.0682\,10.7548 60.1002\,10.7684 60.1124\,10.7469 60.123\,10.6039 60.1339\,10.5728 60.1168\,10.5973 60.0773\,10.4892 60.0173))" +OUA,Ouagadougou,Ouagadougou,Ouagadougou,POINT(-1.5275 12.3686),"POLYGON((-1.6543 12.3197\,-1.6093 12.2741\,-1.5609 12.2742\,-1.5292 12.2512\,-1.4554 12.2578\,-1.4424 12.3115\,-1.4271 12.3104\,-1.4076 12.3465\,-1.4456 12.3991\,-1.4181 12.4359\,-1.4177 12.4679\,-1.4494 12.4655\,-1.4593 12.4911\,-1.5322 12.4577\,-1.6381 12.4597\,-1.6529 12.4323\,-1.6543 12.3197))" +OUL,Oulu,Oulu,Oulu,POINT(25.4719 65.0142),"POLYGON((25.6594 65.2401\,24.1149 65.1739\,24.951 65.1411\,25.3868 64.8755\,26.7711 64.9569\,26.2337 65.2742\,26.4267 65.3476\,26.1533 65.4191\,26.1549 65.5551\,25.7891 65.5503\,25.6594 65.2401)\,(25.3403 65.2555\,25.3508 65.2522\,25.3392 65.2521\,25.3403 65.2555)\,(26.0009 65.2878\,26.1244 65.3564\,26.1533 65.2997\,26.0009 65.2878))" +OVB,Novosibirsk Tolmachev,городской округ Новосибирск,Novosibirsk,POINT(82.9167 55.0333),"POLYGON((82.7511 54.991\,82.8684 54.9275\,83.0347 54.9416\,83.0135 54.8779\,82.9273 54.8675\,82.9261 54.8342\,82.9817 54.8522\,83.0559 54.8421\,83.0456 54.8357\,83.0771 54.8314\,83.0833 54.8051\,83.0869 54.8038\,83.1265 54.8017\,83.1465 54.819\,83.1278 54.827\,83.1425 54.8359\,83.1233 54.8411\,83.1236 54.8801\,83.1398 54.8858\,83.1315 54.9022\,83.1531 54.9155\,83.1438 54.931\,83.1602 54.957\,83.0757 54.9898\,83.0862 55.0636\,82.9986 55.1013\,83.0014 55.1329\,82.9648 55.1104\,82.8965 55.1348\,82.8188 55.1207\,82.8021 55.0748\,82.8301 55.0483\,82.7511 54.991))" +PAP,Mais Gate Int'l,Port-au-Prince,Port-au-Prince,POINT(-72.3333 18.5333),"POLYGON((-72.5698 18.4932\,-72.5493 18.472\,-72.4701 18.4777\,-72.4518 18.397\,-72.4221 18.3975\,-72.435 18.3728\,-72.389 18.3915\,-72.3671 18.378\,-72.3647 18.3529\,-72.3225 18.3405\,-72.2637 18.3576\,-72.1994 18.3522\,-72.2103 18.4044\,-72.1649 18.4297\,-72.1793 18.4359\,-72.159 18.4576\,-72.2486 18.5869\,-72.3584 18.6307\,-72.3391 18.593\,-72.3531 18.5417\,-72.3787 18.5341\,-72.4137 18.5565\,-72.4308 18.5423\,-72.546 18.55\,-72.5698 18.4932))" +PAT,Lok Nayak Jaiprakash,Patna Rural,Patna,POINT(85.1 25.6),"POLYGON((85.0615 25.6458\,85.0845 25.5813\,85.1616 25.5801\,85.2064 25.5628\,85.1926 25.5024\,85.2817 25.5493\,85.2397 25.6016\,85.1325 25.6273\,85.1023 25.663\,85.0615 25.6458))" +PBC,Puebla,Municipio de Puebla,Puebla,POINT(-98.1833 19.0333),"POLYGON((-98.2941 18.8671\,-98.2802 18.8499\,-98.1683 18.8371\,-98.148 18.8466\,-98.1598 18.8681\,-98.1351 18.8894\,-98.1507 18.9063\,-98.0956 18.9129\,-98.1366 18.9551\,-98.1385 18.9935\,-98.1184 19.0298\,-98.1326 19.0434\,-98.079 19.0543\,-98.0322 19.1347\,-98.0365 19.1905\,-98.0202 19.2084\,-98.0317 19.2309\,-98.1644 19.1056\,-98.2043 19.1149\,-98.1907 19.14\,-98.2292 19.1346\,-98.2401 19.0874\,-98.2779 19.051\,-98.2243 19.0252\,-98.28 18.9843\,-98.2702 18.95\,-98.2902 18.9087\,-98.2685 18.8989\,-98.2941 18.8671))" +PBI,Palm Beach Int'l,West Palm Beach,West Palm Beach,POINT(-80.132 26.7469),"POLYGON((-80.2146 26.7662\,-80.1978 26.7657\,-80.2006 26.7093\,-80.1496 26.7082\,-80.1513 26.7224\,-80.127 26.7219\,-80.1094 26.747\,-80.1109 26.7126\,-80.0783 26.706\,-80.0699 26.6464\,-80.0435 26.6444\,-80.0484 26.7633\,-80.1247 26.7603\,-80.1158 26.7783\,-80.1331 26.7787\,-80.1321 26.7967\,-80.1971 26.8402\,-80.2146 26.7662)\,(-80.136 26.7452\,-80.1368 26.7463\,-80.134 26.7456\,-80.136 26.7452)\,(-80.1207 26.7464\,-80.1211 26.7473\,-80.1187 26.7473\,-80.1207 26.7464))" +PBM,Pengel Int'l,Paramaribo,Paramaribo,POINT(-55.2039 5.8522),"POLYGON((-55.2468 6.141\,-55.2452 5.8563\,-55.2279 5.8551\,-55.2297 5.7814\,-55.1673 5.7666\,-55.1503 5.7829\,-55.1606 5.8155\,-55.1111 5.8341\,-55.094 5.8689\,-55.1119 5.9008\,-55.1782 5.9286\,-55.2277 6.151\,-55.2468 6.141))" +PDG,Minangkabau Int'l,Padang,Padang,POINT(100.3531 -0.95),"POLYGON((100.2912 -0.8169\,100.3419 -0.8843\,100.3469 -0.9657\,100.3647 -1.0056\,100.3826 -0.9934\,100.3913 -1.0095\,100.3784 -1.0415\,100.3983 -1.0291\,100.4123 -1.0423\,100.4092 -1.0747\,100.3813 -1.0635\,100.3645 -1.0756\,100.3883 -1.1246\,100.3746 -1.1199\,100.36 -1.1334\,100.417 -1.1268\,100.4326 -1.1145\,100.4253 -1.0724\,100.4774 -1.0045\,100.5247 -1.0008\,100.5271 -0.9648\,100.5623 -0.9111\,100.5562 -0.8611\,100.5355 -0.847\,100.5449 -0.8121\,100.5246 -0.7899\,100.5104 -0.7949\,100.5094 -0.7547\,100.4793 -0.7518\,100.4766 -0.7334\,100.4562 -0.7263\,100.3854 -0.7556\,100.3674 -0.8004\,100.2912 -0.8169))" +PDL,João Paulo II,Ponta Delgada,Ponta Delgada,POINT(-25.67 37.74),"POLYGON((-25.8548 37.8597\,-25.8406 37.828\,-25.6957 37.7348\,-25.6066 37.744\,-25.5867 37.7699\,-25.6389 37.7916\,-25.617 37.8281\,-25.6896 37.8419\,-25.7188 37.8873\,-25.7741 37.9094\,-25.8212 37.9004\,-25.8548 37.8597))" +PDU,Paysandu,Paysandú,Paysandú,POINT(-58.0756 -32.3214),"POLYGON((-58.1078 -32.3326\,-58.0631 -32.3462\,-58.0546 -32.2948\,-58.0904 -32.29\,-58.1078 -32.3326))" +PDX,Portland Int'l,Vancouver,Vancouver,POINT(-122.5967 45.6366),"POLYGON((-122.7727 45.698\,-122.764 45.6573\,-122.6772 45.6187\,-122.4718 45.5774\,-122.4665 45.5886\,-122.4657 45.6268\,-122.5057 45.6262\,-122.4955 45.6794\,-122.5727 45.66\,-122.6069 45.693\,-122.6193 45.6824\,-122.6031 45.6804\,-122.6052 45.6498\,-122.6395 45.6465\,-122.6364 45.6723\,-122.6719 45.6545\,-122.6932 45.6787\,-122.7206 45.6505\,-122.7434 45.6693\,-122.7286 45.6954\,-122.7497 45.6768\,-122.7727 45.698))" +PEE,Bolshesavino,Ленинский район,Perm,POINT(56.2489 58.0139),"POLYGON((56.1869 58.0401\,56.1507 58.0189\,56.2126 58.0162\,56.1989 58.0109\,56.2262 57.9995\,56.2639 58.0121\,56.258 58.0268\,56.28 58.0357\,56.2401 58.0609\,56.2485 58.0956\,56.2075 58.0933\,56.1666 58.0855\,56.1583 58.0515\,56.1869 58.0401))" +PEK,Beijing Capital,东城区,Beijing,POINT(116.4075 39.904),"POLYGON((116.3723 39.8658\,116.4049 39.8575\,116.4446 39.8887\,116.4287 39.9137\,116.4411 39.9437\,116.403 39.9726\,116.3802 39.9554\,116.3931 39.8708\,116.3723 39.8658))" +PEN,Penang Int'l,Central George Town,George Town,POINT(100.3292 5.4144),"POLYGON((100.2834 5.417\,100.2997 5.3744\,100.3102 5.4026\,100.3245 5.3972\,100.3198 5.3913\,100.3166 5.3842\,100.3142 5.3619\,100.3176 5.3621\,100.3179 5.386\,100.3273 5.3884\,100.3302 5.4038\,100.3379 5.4043\,100.3373 5.411\,100.3439 5.4131\,100.3466 5.4173\,100.3462 5.4229\,100.2997 5.4451\,100.2834 5.417))" +PER,Perth Int'l,City Of Kwinana,Kwinana,POINT(115.7702 -32.2394),"POLYGON((115.7551 -32.2516\,115.7853 -32.249\,115.7986 -32.2866\,115.8466 -32.272\,115.8776 -32.2799\,115.8828 -32.2046\,115.9007 -32.1819\,115.7749 -32.1814\,115.7551 -32.2516))" +PEW,Bacha Khan Int'l,تحصیل پشاور شہر,Peshawar,POINT(71.5675 34.0144),"POLYGON((71.3688 34.054\,71.4072 34.0089\,71.412 33.962\,71.4347 33.9381\,71.4786 33.9281\,71.4963 33.9468\,71.522 33.9401\,71.5295 33.9181\,71.5647 33.9156\,71.6025 33.9256\,71.6207 33.9582\,71.6108 33.9835\,71.6251 34.0161\,71.5976 34.0158\,71.6109 34.0304\,71.5746 34.0332\,71.5712 34.0499\,71.5471 34.0383\,71.5182 34.0628\,71.4717 34.0606\,71.4673 34.0774\,71.3688 34.054))" +PFO,Paphos Int'l,Δήμος Πάφου,Paphos,POINT(32.4167 34.7667),"POLYGON((32.3998 34.7549\,32.429 34.745\,32.4245 34.7565\,32.4636 34.7985\,32.4016 34.7847\,32.3998 34.7549))" +PHC,Port Harcourt Int'l,Obio/Akpor,Port Harcourt,POINT(7.0336 4.8242),"POLYGON((6.8991 4.9045\,6.9043 4.8527\,6.9236 4.8437\,6.9273 4.7939\,6.9806 4.7928\,6.9873 4.823\,7.0781 4.8145\,7.0938 4.839\,7.1524 4.8507\,7.1261 4.8966\,7.1044 4.893\,7.0252 4.9398\,6.9809 4.9224\,6.9949 4.8929\,6.8991 4.9045))" +PHE,Port Hedland Int'l,Port Hedland,Port Hedland,POINT(118.6011 -20.31),"POLYGON((118.5726 -20.3142\,118.5909 -20.3317\,118.6362 -20.3269\,118.6016 -20.3892\,118.6868 -20.383\,118.6868 -20.2972\,118.648 -20.3062\,118.6317 -20.2899\,118.6029 -20.3066\,118.5969 -20.3013\,118.602 -20.2967\,118.5996 -20.2883\,118.5935 -20.2785\,118.5726 -20.3142))" +PHL,Philadelphia Int'l,Philadelphia County,Philadelphia,POINT(-75.1339 40.0077),"POLYGON((-75.2803 39.975\,-75.2478 39.9639\,-75.2348 39.9373\,-75.2625 39.8766\,-75.2114 39.867\,-75.1432 39.8849\,-75.1284 39.9097\,-75.1326 39.9567\,-75.0713 39.9805\,-74.9744 40.0498\,-74.9852 40.0572\,-74.9582 40.0844\,-74.9638 40.1176\,-75.0151 40.138\,-75.1089 40.0457\,-75.1757 40.0844\,-75.1885 40.0728\,-75.2236 40.0929\,-75.2644 40.0541\,-75.2058 40.0117\,-75.2803 39.975))" +PIK,Glasgow Prestwick,South Ayrshire,Prestwick,POINT(-4.6142 55.4956),"POLYGON((-5.061 55.0271\,-4.4637 55.162\,-4.6035 55.3949\,-4.4098 55.5536\,-4.6631 55.5706\,-5.061 55.0271))" +PIR,Pierre Regional,Pierre,Pierre,POINT(-100.3205 44.3748),"POLYGON((-100.3717 44.3962\,-100.3627 44.366\,-100.3173 44.3413\,-100.2948 44.3414\,-100.2999 44.3743\,-100.2697 44.3633\,-100.2692 44.3935\,-100.3717 44.3962))" +PIT,Greater Pittsburgh Int'l,Pittsburgh,Pittsburgh,POINT(-79.9763 40.4397),"POLYGON((-80.0955 40.4594\,-80.0824 40.4402\,-80.0554 40.4439\,-80.0843 40.4203\,-80.0346 40.4283\,-80.0514 40.402\,-80.0355 40.4043\,-80.0068 40.3748\,-79.9761 40.3813\,-79.9796 40.4016\,-79.9639 40.4096\,-79.9529 40.4107\,-79.9378 40.393\,-79.956 40.4103\,-79.9698 40.4008\,-79.9071 40.3615\,-79.9133 40.397\,-79.9338 40.3978\,-79.8995 40.4158\,-79.8924 40.4505\,-79.8657 40.4561\,-79.89 40.4617\,-79.8848 40.4907\,-79.9382 40.4911\,-79.9835 40.4667\,-80.0105 40.5012\,-80.0498 40.4856\,-80.0523 40.4653\,-80.0955 40.4594)\,(-79.9814 40.4057\,-79.9882 40.4185\,-79.9822 40.4177\,-79.9814 40.4057))" +PKU,Simpang Tiga,Pekanbaru,Pekanbaru,POINT(101.4453 0.5092),"POLYGON((101.3261 0.5986\,101.3748 0.5542\,101.3497 0.4906\,101.3629 0.432\,101.4474 0.417\,101.4536 0.4427\,101.477 0.456\,101.5671 0.4252\,101.5929 0.5319\,101.5758 0.5506\,101.5914 0.6066\,101.571 0.639\,101.5498 0.6384\,101.5403 0.658\,101.4891 0.6513\,101.4407 0.666\,101.3973 0.6924\,101.3753 0.6448\,101.3261 0.5986))" +PLM,Sultan Mahmud Badaruddin II,Palembang,Palembang,POINT(104.7556 -2.9861),"POLYGON((104.6137 -2.9541\,104.6424 -3.0239\,104.721 -3.0727\,104.6816 -3.0972\,104.6941 -3.1162\,104.75 -3.0401\,104.7948 -3.0239\,104.8123 -3.0328\,104.8365 -3.0143\,104.8346 -2.9867\,104.8606 -2.9699\,104.8215 -2.9134\,104.7918 -2.9046\,104.7569 -2.9179\,104.7483 -2.8978\,104.7272 -2.9022\,104.7117 -2.8548\,104.679 -2.8656\,104.67 -2.8851\,104.678 -2.9393\,104.6618 -2.9363\,104.6561 -2.9576\,104.6235 -2.9283\,104.6137 -2.9541))" +PLQ,Palanga Int'l,Klaipėda,Klaipėda,POINT(21.1667 55.75),"POLYGON((21.0751 55.7741\,21.0767 55.727\,21.0941 55.7223\,21.1032 55.6672\,21.1345 55.6738\,21.1326 55.6322\,21.172 55.6348\,21.1894 55.6128\,21.1758 55.6429\,21.2398 55.6501\,21.2232 55.6749\,21.2403 55.681\,21.2206 55.6784\,21.1706 55.7529\,21.1355 55.7721\,21.1376 55.7966\,21.0751 55.7741))" +PLZ,H F Verwoerd,Nelson Mandela Bay Metropolitan Municipality,Port Elizabeth,POINT(25.6 -33.9581),"POLYGON((25.1922 -33.9216\,25.7038 -34.032\,25.631 -33.8642\,25.868 -33.6994\,25.3463 -33.5571\,25.1922 -33.9216))" +PMC,El Tepual Int'l,Puerto Montt,Puerto Montt,POINT(-72.9333 -41.4667),"POLYGON((-73.3402 -41.4824\,-73.3248 -41.5336\,-73.2866 -41.5586\,-73.2552 -41.5724\,-73.218 -41.5579\,-73.1305 -41.5845\,-73.1064 -41.6286\,-73.1452 -41.6578\,-73.1404 -41.6821\,-73.1175 -41.69\,-73.0499 -41.6832\,-72.9771 -41.6448\,-72.6481 -41.7129\,-72.6486 -41.6952\,-72.5794 -41.6702\,-72.5611 -41.6373\,-72.5172 -41.6444\,-72.4139 -41.5754\,-72.4139 -41.5553\,-72.3904 -41.5481\,-72.3773 -41.4824\,-72.4343 -41.427\,-72.3742 -41.4016\,-72.3864 -41.3848\,-72.4233 -41.3819\,-72.4418 -41.351\,-72.4916 -41.379\,-72.5216 -41.3777\,-72.6101 -41.3298\,-72.644 -41.3445\,-72.6589 -41.3745\,-72.6972 -41.3801\,-72.7244 -41.3759\,-72.7179 -41.3588\,-72.7693 -41.3509\,-72.7643 -41.3368\,-72.8371 -41.3401\,-72.9239 -41.3792\,-72.9379 -41.4132\,-73.0135 -41.4069\,-73.0256 -41.3895\,-73.1156 -41.4079\,-73.1932 -41.387\,-73.2599 -41.4474\,-73.2475 -41.4603\,-73.2631 -41.4742\,-73.3402 -41.4824))" +PMG,Ponta Porã Int'l,Ponta Porã,Ponta Porã,POINT(-55.7258 -22.5358),"POLYGON((-55.7642 -22.3847\,-55.683 -22.5889\,-55.3585 -22.7095\,-55.3497 -22.4861\,-54.9764 -22.3509\,-55.2911 -22.1914\,-55.6095 -22.2514\,-55.4554 -22.1399\,-55.5576 -22.1061\,-55.7642 -22.3847))" +PMI,Palma de Mallorca,Marratxí,Marratxi,POINT(2.7527 39.6421),"POLYGON((2.6674 39.6334\,2.687 39.5836\,2.7976 39.614\,2.7504 39.6587\,2.6674 39.6334))" +PMO,Palermo,Palermo,Palermo,POINT(13.3613 38.1157),"POLYGON((13.2422 38.1005\,13.2919 38.1086\,13.3234 38.0499\,13.3681 38.0721\,13.4216 38.0594\,13.4521 38.0926\,13.3671 38.1193\,13.3627 38.134\,13.3685 38.1367\,13.368 38.1326\,13.3805 38.1224\,13.3657 38.1823\,13.33 38.199\,13.3164 38.2246\,13.2585 38.2012\,13.2761 38.1861\,13.2559 38.1685\,13.2651 38.1503\,13.2422 38.1005))" +PMR,Palmerston N. Int'l,Palmerston North City,Palmerston North,POINT(175.6117 -40.355),"POLYGON((175.4973 -40.4339\,175.5596 -40.4832\,175.5563 -40.5011\,175.5842 -40.5297\,175.6097 -40.5019\,175.6253 -40.5101\,175.6892 -40.4656\,175.7297 -40.4125\,175.7237 -40.4024\,175.7664 -40.3726\,175.7906 -40.3135\,175.7581 -40.3003\,175.7719 -40.2812\,175.7583 -40.2698\,175.7189 -40.2905\,175.6138 -40.2681\,175.5965 -40.28\,175.6121 -40.2937\,175.5318 -40.3481\,175.5519 -40.3656\,175.539 -40.3873\,175.5518 -40.4092\,175.4973 -40.4339))" +PNH,Pochentong,រាជធានីភ្នំពេញ,Phnom Penh,POINT(104.9211 11.5694),"POLYGON((104.7527 11.666\,104.7219 11.5432\,104.7496 11.5475\,104.7454 11.5294\,104.767 11.5116\,104.7443 11.4549\,104.7773 11.467\,104.8048 11.4316\,104.8681 11.4201\,104.8806 11.4362\,104.923 11.429\,104.9214 11.4844\,104.9459 11.4945\,104.967 11.464\,105.0153 11.4662\,105.044 11.5323\,104.9467 11.5619\,104.9326 11.6264\,104.968 11.7146\,104.9395 11.7349\,104.9057 11.6952\,104.8694 11.7073\,104.8671 11.6671\,104.8287 11.7182\,104.8137 11.6739\,104.7527 11.666))" +PNI,Pohnpei Int'l,Pohnpei,Kolonia,POINT(158.2081 6.9639),"POLYGON((157.563 7.0908\,157.5725 7.0249\,157.6067 6.9527\,157.7047 6.8654\,157.6973 6.7671\,157.7469 6.6309\,157.8022 6.566\,157.8743 6.5295\,157.9799 6.526\,158.1081 6.5826\,158.2539 6.5577\,158.3412 6.5729\,158.4406 6.6323\,158.5323 6.734\,158.5623 6.7984\,158.5655 6.8915\,158.5072 7.0791\,158.3897 7.2057\,158.3062 7.2513\,158.1819 7.2476\,158.0121 7.149\,157.975 7.2015\,157.9056 7.2531\,157.8005 7.2862\,157.724 7.2846\,157.6572 7.2572\,157.6068 7.212\,157.5745 7.1534\,157.563 7.0908))" +POA,Salgado Filho Int'l,Porto Alegre,Porto Alegre,POINT(-51.23 -30.0331),"POLYGON((-51.3034 -29.9487\,-51.2614 -30.01\,-51.2714 -30.0563\,-51.2504 -30.0331\,-51.2337 -30.0485\,-51.2677 -30.1037\,-51.2257 -30.145\,-51.2267 -30.1769\,-51.2484 -30.1866\,-51.1995 -30.1932\,-51.1995 -30.2108\,-51.178 -30.2184\,-51.1935 -30.2374\,-51.1553 -30.2403\,-51.1596 -30.2619\,-51.1334 -30.2694\,-51.119 -30.2456\,-51.1016 -30.2594\,-51.0989 -30.2572\,-51.1073 -30.2457\,-51.1071 -30.2418\,-51.1065 -30.2407\,-51.0534 -30.26\,-51.053 -30.2302\,-51.0189 -30.2059\,-51.0794 -30.1791\,-51.0722 -30.1613\,-51.1206 -30.0653\,-51.0843 -30.0474\,-51.0885 -29.9639\,-51.1262 -29.9542\,-51.2006 -29.9718\,-51.2518 -29.9325\,-51.3034 -29.9487))" +POM,Port Moresby Int'l,National Capital District,Port Moresby,POINT(147.1494 -9.4789),"POLYGON((147.0976 -9.4208\,147.118 -9.4341\,147.1063 -9.4657\,147.1249 -9.4654\,147.1406 -9.51\,147.168 -9.529\,147.2494 -9.5438\,147.2963 -9.4972\,147.2794 -9.4003\,147.2565 -9.3828\,147.2701 -9.3648\,147.26 -9.3326\,147.204 -9.3345\,147.1982 -9.3504\,147.1556 -9.3602\,147.0977 -9.3934\,147.0976 -9.4208))" +PPG,Pago Pago Int'l,Pago Pago,Pago Pago,POINT(-170.7046 -14.274),"POLYGON((-170.7475 -14.2366\,-170.728 -14.2669\,-170.7152 -14.2784\,-170.7116 -14.2849\,-170.7193 -14.2974\,-170.7037 -14.2951\,-170.7024 -14.2903\,-170.6964 -14.2878\,-170.6985 -14.2854\,-170.6978 -14.2799\,-170.6944 -14.2739\,-170.6942 -14.2655\,-170.686 -14.2646\,-170.6898 -14.2586\,-170.6921 -14.2457\,-170.7061 -14.205\,-170.7203 -14.2147\,-170.7263 -14.2227\,-170.7372 -14.2258\,-170.7475 -14.2366))" +PPT,Tahiti Faa'a Int'l,Îles du Vent,Papeete,POINT(-149.5667 -17.5334),"POLYGON((-150.1294 -17.4907\,-150.0949 -17.6202\,-149.9753 -17.7471\,-149.8874 -17.7869\,-149.781 -17.7911\,-149.7431 -17.8646\,-149.6822 -17.9151\,-149.5289 -17.9726\,-149.429 -17.9796\,-149.3211 -18.0503\,-149.196 -18.0797\,-149.084 -18.0672\,-148.9885 -18.0048\,-148.9294 -17.9033\,-148.919 -17.7626\,-148.9549 -17.668\,-149.0247 -17.5911\,-149.0981 -17.5528\,-149.1291 -17.4755\,-149.1899 -17.403\,-149.3533 -17.32\,-149.5021 -17.2936\,-149.6231 -17.3326\,-149.6812 -17.2938\,-149.748 -17.2744\,-149.9532 -17.2894\,-150.0295 -17.318\,-150.0824 -17.363\,-150.1156 -17.4182\,-150.1294 -17.4907))" +PRG,Ruzyn,Praha,Prague,POINT(14.4214 50.0875),"POLYGON((14.2244 50.103\,14.2894 50.0771\,14.2481 50.0583\,14.3159 50.0236\,14.2948 50.0022\,14.3428 49.9906\,14.3268 49.9719\,14.3449 49.9676\,14.3254 49.9572\,14.3956 49.9419\,14.4006 49.9707\,14.4622 49.9708\,14.5275 50.0108\,14.5824 50.0164\,14.6402 49.9944\,14.6695 50.0188\,14.6402 50.0569\,14.7 50.0721\,14.7068 50.087\,14.6574 50.1065\,14.6591 50.1226\,14.6005 50.1295\,14.5877 50.1452\,14.599 50.1541\,14.5632 50.1502\,14.5505 50.1661\,14.5342 50.1616\,14.5325 50.1772\,14.5269 50.1774\,14.3949 50.1414\,14.3657 50.148\,14.3608 50.116\,14.3206 50.1152\,14.3024 50.1301\,14.2244 50.103))" +PRN,Pristina,Prishtinë,Pristina,POINT(21.1622 42.6633),"POLYGON((21.1066 42.6413\,21.1661 42.6291\,21.1828 42.6588\,21.2033 42.6515\,21.2107 42.6501\,21.2136 42.6515\,21.1858 42.6656\,21.1983 42.6735\,21.1699 42.697\,21.1322 42.686\,21.1066 42.6413))" +PTG,Polokwane Int'l,Polokwane Local Municipality,Polokwane,POINT(29.45 -23.9),"POLYGON((29.12 -23.9965\,29.189 -24.2569\,29.4716 -24.278\,29.7796 -24.1079\,29.9181 -23.7602\,29.5008 -23.8114\,29.2763 -23.6223\,29.12 -23.9965))" +PTY,Tocumen Int'l,Tocumen,Tocumen,POINT(-79.38 9.08),"POLYGON((-79.4795 9.2145\,-79.4625 9.1889\,-79.4629 9.1418\,-79.4156 9.1371\,-79.3863 9.1026\,-79.4038 9.0698\,-79.3985 9.0288\,-79.3522 9.0457\,-79.3505 9.0827\,-79.3676 9.1092\,-79.3943 9.1356\,-79.4379 9.1498\,-79.4435 9.1928\,-79.4795 9.2145))" +PUJ,Punta Cana,Higüey,Pantanal,POINT(-68.3667 18.5333),"POLYGON((-68.9377 18.6803\,-68.9028 18.634\,-68.9053 18.6083\,-68.8513 18.5637\,-68.8502 18.5205\,-68.8238 18.4968\,-68.7772 18.5003\,-68.774 18.5216\,-68.7195 18.5437\,-68.7173 18.5275\,-68.6663 18.5269\,-68.594 18.4711\,-68.4748 18.4664\,-68.429 18.4414\,-68.4015 18.4634\,-68.3229 18.5979\,-68.3448 18.6439\,-68.3941 18.6642\,-68.5271 18.78\,-68.551 18.7828\,-68.6996 18.9295\,-68.7753 18.9743\,-68.7815 18.9525\,-68.8184 18.9392\,-68.8169 18.9151\,-68.842 18.8945\,-68.8087 18.8311\,-68.8096 18.7878\,-68.8517 18.7724\,-68.8694 18.7221\,-68.9251 18.7226\,-68.9377 18.6803))" +PUS,Kimhae Int'l,부산광역시,Busan,POINT(129.075 35.18),"POLYGON((128.7568 35.0073\,128.7963 34.9752\,128.9485 34.7253\,129.1008 34.9494\,129.2666 35.1337\,129.4656 35.1309\,129.489 35.1862\,129.3037 35.3369\,129.2808 35.3411\,129.2845 35.3645\,129.2657 35.3874\,129.2006 35.3889\,129.1783 35.3516\,129.1183 35.3683\,129.1348 35.3531\,129.1108 35.3065\,129.0128 35.2735\,128.9985 35.2327\,128.8791 35.21\,128.8769 35.1516\,128.8692 35.1679\,128.7953 35.1585\,128.8431 35.1084\,128.8236 35.1017\,128.8183 35.0741\,128.7832 35.0715\,128.7568 35.0073))" +PVD,T.F. Green,Providence,Providence,POINT(-71.4187 41.823),"POLYGON((-71.4727 41.8371\,-71.4687 41.8051\,-71.4427 41.807\,-71.4239 41.7773\,-71.3791 41.787\,-71.4032 41.8113\,-71.3784 41.8283\,-71.3769 41.8574\,-71.4349 41.8618\,-71.4727 41.8371))" +PVG,Shanghai Pudong Int'l,浦东新区,Shanghai,POINT(121.4667 31.1667),"POLYGON((121.4525 31.1473\,121.467 31.1121\,121.4847 31.1269\,121.5558 31.1135\,121.5632 31.0952\,121.546 31.0847\,121.5571 31.0861\,121.5668 31.082\,121.5693 31.0833\,121.5718 31.0824\,121.5499 31.0731\,121.5422 31.0496\,121.5572 31.0457\,121.5491 31.0207\,121.57 31.025\,121.5662 31.0006\,121.6953 30.9896\,121.7745 30.9321\,121.7635 30.8661\,121.7792 30.7278\,121.9179 30.7701\,122.0753 30.8441\,122.1697 30.9104\,122.2826 31.0823\,122.3283 31.1177\,122.0846 31.2324\,121.8646 31.2696\,121.7536 31.3075\,121.6985 31.3501\,121.5566 31.4168\,121.4992 31.3692\,121.5551 31.3326\,121.5643 31.279\,121.5329 31.2523\,121.4903 31.2441\,121.5058 31.2163\,121.4618 31.1828\,121.4525 31.1473))" +PVH,Gov. Jorge Teixeira de Oliveira Int'l,Porto Velho,Porto Velho,POINT(-63.9039 -8.7619),"POLYGON((-64.3784 -9.1773\,-63.9672 -9.833\,-63.6839 -9.833\,-63.554 -9.442\,-63.786 -9.374\,-63.798 -9.0487\,-63.7097 -8.7146\,-63.4447 -8.5822\,-63.4993 -8.4436\,-63.6457 -8.5308\,-63.5347 -7.974\,-63.7816 -8.3292\,-63.9441 -8.3312\,-63.9246 -8.5755\,-64.3784 -9.1773))" +PZU,Port Sudan New Int'l,بورتسودان,Port Sudan,POINT(37.2167 19.6167),"POLYGON((36.9258 19.8467\,36.9419 19.5724\,37.0613 19.4037\,37.0753 19.0098\,37.2815 19.0257\,37.4207 19.0675\,37.5551 19.2203\,37.5009 19.5249\,37.4602 19.6142\,37.4816 19.848\,37.4665 19.9177\,37.4176 20.0075\,37.43 20.1132\,37.1675 20.1006\,36.9952 20.0389\,36.9258 19.8467))" +QNJ,Annemasse,Saint-Julien-en-Genevois,Annemasse,POINT(6.2364 46.1958),"POLYGON((5.8202 46.1044\,5.8101 45.9878\,5.834 45.9703\,5.8376 45.9321\,5.8624 45.9325\,5.8939 45.9632\,5.925 45.9374\,5.9564 45.9623\,5.988 45.9635\,5.9765 46.0097\,5.9902 46.0139\,6.0556 46.0152\,6.0826 45.9935\,6.0939 46.0067\,6.1053 45.9814\,6.1278 45.9831\,6.1287 46.0101\,6.1953 46.0464\,6.1998 46.0666\,6.2299 46.0837\,6.2637 46.0749\,6.2617 46.0865\,6.3049 46.1094\,6.3453 46.1139\,6.3167 46.1474\,6.3705 46.1536\,6.3765 46.1789\,6.3469 46.197\,6.3549 46.2297\,6.34 46.26\,6.2978 46.2667\,6.3103 46.244\,6.2947 46.2252\,6.2219 46.2007\,6.1365 46.1416\,6.0523 46.1514\,6.0354 46.1343\,5.9941 46.1446\,5.9175 46.1304\,5.8917 46.1179\,5.8833 46.0852\,5.8202 46.1044))" +QRO,Queretaro Int'l,Santiago de Querétaro,Querétaro,POINT(-100.3928 20.5875),"POLYGON((-100.4801 20.6269\,-100.47 20.612\,-100.4745 20.6292\,-100.4545 20.6218\,-100.4538 20.591\,-100.4285 20.5897\,-100.4237 20.5599\,-100.3918 20.5553\,-100.3873 20.5403\,-100.3408 20.5868\,-100.3613 20.5932\,-100.312 20.6136\,-100.3186 20.6242\,-100.3365 20.6128\,-100.3365 20.6274\,-100.3585 20.6151\,-100.3875 20.6224\,-100.3781 20.6344\,-100.3925 20.6396\,-100.3914 20.6513\,-100.3835 20.6402\,-100.3736 20.6641\,-100.4039 20.6666\,-100.4246 20.645\,-100.407 20.6415\,-100.4261 20.6399\,-100.4184 20.6703\,-100.4362 20.6592\,-100.4554 20.6839\,-100.4466 20.6576\,-100.4765 20.6621\,-100.4583 20.6522\,-100.4767 20.65\,-100.4801 20.6269))" +RAI,Praia Int'l,Praia,Praia,POINT(-23.509 14.918),"POLYGON((-23.5885 14.9769\,-23.5534 14.9031\,-23.5089 14.9001\,-23.511 14.9117\,-23.5034 14.9192\,-23.502 14.9051\,-23.4818 14.9058\,-23.4541 14.963\,-23.5212 14.9963\,-23.5885 14.9769))" +RAJ,Rajkot,Rajkot East Taluka,Rājkot,POINT(70.7833 22.3),"POLYGON((70.7587 22.3196\,70.7752 22.3131\,70.7964 22.2523\,70.825 22.2588\,70.849 22.2335\,70.8624 22.2414\,70.8525 22.2553\,70.8439 22.2416\,70.8358 22.2604\,70.8551 22.2756\,70.8684 22.2452\,70.8792 22.249\,70.8682 22.2713\,70.8507 22.2772\,70.8618 22.3036\,70.8413 22.308\,70.823 22.3406\,70.7791 22.3386\,70.7749 22.3173\,70.7587 22.3196))" +RAK,Marrakech-Menara,Pachalik de Marrakech,Marrakech,POINT(-8.0089 31.63),"POLYGON((-8.0893 31.7009\,-8.0658 31.6532\,-8.0696 31.628\,-8.0881 31.6283\,-8.0719 31.57\,-8.02 31.5996\,-8.0075 31.5907\,-7.9773 31.6193\,-7.9659 31.5785\,-7.9811 31.5749\,-7.9755 31.5565\,-7.9205 31.5542\,-7.9003 31.5723\,-7.8988 31.5469\,-7.8864 31.5463\,-7.8985 31.5815\,-7.9153 31.59\,-7.9277 31.6914\,-8.0893 31.7009))" +RAR,Rarotonga Int'l,Avarua,Avarua,POINT(-159.771 -21.207),"POLYGON((-159.964 -21.0531\,-159.8032 -21.2333\,-159.777 -21.2409\,-159.5911 -21.0656\,-159.6802 -21.0137\,-159.7918 -20.9953\,-159.8847 -21.0078\,-159.964 -21.0531))" +REC,Gilberto Freyre Int'l,Recife,Recife,POINT(-34.9 -8.05),"POLYGON((-35.0186 -8.0564\,-34.9716 -8.0784\,-34.9568 -8.1386\,-34.9089 -8.1552\,-34.8588 -8.0471\,-34.873 -8.0173\,-34.917 -7.9881\,-34.9132 -7.937\,-34.9808 -7.9417\,-35 -7.929\,-35.0028 -7.9611\,-34.9625 -7.9751\,-34.9597 -8.0215\,-35.0186 -8.0564))" +REK,Reykjavik Air Terminal,Reykjavíkurborg,Reykjavík,POINT(-21.94 64.1467),"POLYGON((-21.9838 64.1502\,-21.938 64.1205\,-21.8477 64.1159\,-21.8596 64.0983\,-21.8348 64.0897\,-21.7838 64.0985\,-21.7863 64.0764\,-21.8259 64.0595\,-21.8103 64.0404\,-21.7549 64.0566\,-21.6608 64.0525\,-21.7026 64.0648\,-21.6721 64.0801\,-21.7066 64.0923\,-21.6801 64.1016\,-21.6986 64.106\,-21.6692 64.1413\,-21.7425 64.1476\,-21.7465 64.1652\,-21.7871 64.1564\,-21.7805 64.1675\,-21.8256 64.1718\,-21.7897 64.1602\,-21.8265 64.1426\,-21.7951 64.1271\,-21.8269 64.1361\,-21.8442 64.1253\,-21.8388 64.1469\,-21.8646 64.1581\,-21.9838 64.1502))" +REP,Siem Reap Int'l,ក្រុងសៀមរាប,Siem Reap,POINT(103.8597 13.3622),"POLYGON((103.6965 13.2771\,103.8858 13.1029\,103.8992 13.3611\,103.9237 13.4224\,103.9118 13.468\,103.8017 13.468\,103.7623 13.4441\,103.7629 13.4262\,103.7788 13.4251\,103.777 13.3794\,103.7554 13.3805\,103.6965 13.2771))" +RGA,Hermes Quijada Int'l,Cabo de Hornos,Puerto Williams,POINT(-67.6167 -54.9333),"POLYGON((-72.5558 -54.6577\,-69.6383 -55.5421\,-69.3395 -55.842\,-68.0708 -55.7169\,-67.8735 -55.8869\,-67.0099 -55.9368\,-66.7778 -55.829\,-66.7747 -55.5652\,-66.416 -55.2195\,-67.2607 -54.9062\,-68.6097 -54.9119\,-68.6115 -54.5918\,-69.0304 -54.59\,-69.2822 -54.7921\,-69.8847 -54.5976\,-70.7672 -54.6605\,-71.1215 -54.4166\,-71.7185 -54.3463\,-72.5558 -54.6577))" +RGL,Piloto Civil Norberto Fernandez Int'l,Río Gallegos,Río Gallegos,POINT(-69.2161 -51.6233),"POLYGON((-69.3633 -51.6186\,-69.3635 -51.6573\,-69.2998 -51.6579\,-69.2793 -51.6925\,-69.1616 -51.6487\,-69.2582 -51.6006\,-69.2737 -51.6001\,-69.2751 -51.6179\,-69.3633 -51.6186))" +RGN,Mingaladon,Southern District,Rangoon,POINT(96.16 16.795),"POLYGON((96.1547 16.8292\,96.1722 16.8058\,96.159 16.7793\,96.202 16.7665\,96.2423 16.7956\,96.227 16.8182\,96.1876 16.8101\,96.1646 16.852\,96.1547 16.8292))" +RIC,Richmond Int'l,Henrico County,Highland Springs,POINT(-77.3285 37.5516),"POLYGON((-77.6549 37.6381\,-77.6164 37.5783\,-77.6492 37.5596\,-77.5297 37.5592\,-77.5464 37.5737\,-77.5325 37.5925\,-77.4824 37.5731\,-77.4781 37.599\,-77.4476 37.6028\,-77.4133 37.58\,-77.4117 37.5524\,-77.3857 37.5304\,-77.3939 37.505\,-77.4168 37.5173\,-77.4285 37.433\,-77.3985 37.4213\,-77.3835 37.3847\,-77.3261 37.3785\,-77.3004 37.4059\,-77.3093 37.366\,-77.2806 37.3533\,-77.2486 37.3949\,-77.2346 37.3815\,-77.2151 37.3856\,-77.226 37.4054\,-77.177 37.4914\,-77.2317 37.5396\,-77.4039 37.6053\,-77.4459 37.6841\,-77.4786 37.6809\,-77.5113 37.7018\,-77.5579 37.6819\,-77.6068 37.7098\,-77.6304 37.707\,-77.6549 37.6381))" +RIX,Riga,Jūrmala,Jūrmala,POINT(23.7703 56.9681),"POLYGON((23.4806 56.9648\,23.4749 56.927\,23.6021 56.9244\,23.6271 56.942\,23.6802 56.9317\,23.7774 56.9486\,23.7817 56.9703\,23.8178 56.9523\,23.9127 56.9626\,23.971 56.9865\,23.9344 56.9956\,23.9326 57.0088\,23.6949 56.9641\,23.553 56.98\,23.4806 56.9648))" +RJH,Shah Makhdum,Rajshahi Metro,Rājshāhi,POINT(88.6 24.3667),"POLYGON((88.4125 24.3797\,88.525 24.3136\,88.5772 24.3164\,88.6522 24.2942\,88.6762 24.3222\,88.6466 24.3827\,88.5548 24.3854\,88.5415 24.3706\,88.5637 24.3648\,88.5289 24.3617\,88.4125 24.3797))" +ROB,Roberts Int'l,Firestone,Harbel,POINT(-10.35 6.2833),"POLYGON((-10.482 6.3641\,-10.4531 6.3036\,-10.394 6.288\,-10.3671 6.3001\,-10.347 6.2487\,-10.2966 6.3005\,-10.3132 6.3146\,-10.3015 6.3269\,-10.3137 6.3451\,-10.2999 6.3408\,-10.269 6.3704\,-10.2711 6.4257\,-10.2373 6.4419\,-10.2204 6.4882\,-10.277 6.4983\,-10.3055 6.4846\,-10.3303 6.4962\,-10.384 6.4876\,-10.4051 6.3677\,-10.4114 6.3921\,-10.482 6.3641))" +ROC,Greater Rochester Int'l,Town of Greece,Greece,POINT(-77.6988 43.246),"POLYGON((-77.7535 43.1794\,-77.6602 43.1797\,-77.6668 43.1951\,-77.6809 43.1917\,-77.7015 43.1887\,-77.6472 43.2039\,-77.6156 43.2606\,-77.7484 43.3347\,-77.7535 43.1794))" +ROK,Rockhampton,Gracemere,Gracemere,POINT(150.4558 -23.4391),"POLYGON((150.3999 -23.4487\,150.4443 -23.5103\,150.4604 -23.5101\,150.4696 -23.4797\,150.5002 -23.4706\,150.5146 -23.439\,150.4317 -23.4048\,150.3999 -23.4487))" +ROP,Rota Int'l,Saipan Municipality,Capitol Hill,POINT(145.7546 15.2137),"POLYGON((145.5726 15.125\,145.6426 15.1069\,145.7442 15.0229\,145.7501 15.0666\,145.8027 15.0756\,145.8374 15.1201\,145.876 15.1354\,145.9094 15.2741\,145.8963 15.3087\,145.8528 15.3519\,145.7818 15.3483\,145.7085 15.2753\,145.687 15.1974\,145.6483 15.1387\,145.5726 15.125))" +ROR,Roman Tmetuchl Int'l,Koror,Koror,POINT(134.4792 7.3419),"POLYGON((133.5741 7.2869\,133.8835 7.1652\,134.0728 7.0774\,134.0761 7.0758\,134.121 7.0256\,134.2908 7.1174\,134.5057 6.8176\,134.6336 7.0217\,134.5154 7.2598\,134.5163 7.2927\,134.5239 7.3089\,134.5238 7.3247\,134.5125 7.3512\,134.5 7.3673\,134.4888 7.3704\,134.4217 7.418\,133.8686 7.6373\,133.5741 7.2869))" +ROS,Rosario – Islas Malvinas Int'l,Funes,Funes,POINT(-60.8167 -32.9167),"POLYGON((-60.8721 -32.9301\,-60.7829 -32.9441\,-60.7825 -32.9281\,-60.7692 -32.9369\,-60.7621 -32.9239\,-60.7907 -32.9205\,-60.7974 -32.8864\,-60.8266 -32.9112\,-60.8629 -32.8927\,-60.8721 -32.9301))" +ROV,Rostov-on-Don,городской округ Батайск,Bataysk,POINT(39.7333 47.1667),"POLYGON((39.6665 47.1219\,39.6864 47.1077\,39.7224 47.1216\,39.7248 47.104\,39.7435 47.0989\,39.7464 47.0404\,39.7897 47.0407\,39.7995 47.0578\,39.7787 47.0601\,39.7643 47.0963\,39.8056 47.0968\,39.8186 47.1267\,39.7885 47.1688\,39.7123 47.1752\,39.7323 47.1593\,39.6665 47.1219))" +RPR,Raipur,Durg Tahsil,Bhilai,POINT(81.38 21.21),"POLYGON((81.1482 21.2119\,81.199 21.222\,81.1963 21.1965\,81.2164 21.1678\,81.1956 21.1214\,81.2061 21.0974\,81.1678 21.102\,81.1514 21.0892\,81.1582 21.0694\,81.1912 21.0719\,81.1918 21.0394\,81.2089 21.0281\,81.2243 21.0468\,81.2705 21.037\,81.3012 21.0623\,81.3044 21.0406\,81.3271 21.0367\,81.3659 21.038\,81.3882 21.0571\,81.4015 21.091\,81.3912 21.1126\,81.4092 21.1208\,81.4062 21.2144\,81.427 21.2351\,81.39 21.2631\,81.3929 21.2934\,81.3736 21.2843\,81.3506 21.3125\,81.3664 21.3326\,81.3216 21.366\,81.3044 21.3521\,81.3129 21.3132\,81.2795 21.2942\,81.2263 21.2998\,81.2177 21.2615\,81.1687 21.2682\,81.1492 21.2459\,81.1642 21.2354\,81.1482 21.2119))" +RTM,Rotterdam The Hague,Den Haag,The Hague,POINT(4.31 52.08),"POLYGON((4.185 52.0615\,4.2123 52.0307\,4.2727 52.0359\,4.2955 52.0148\,4.3076 52.0182\,4.291 52.0416\,4.3403 52.0619\,4.3598 52.0265\,4.4225 52.0572\,4.4039 52.0778\,4.348 52.0565\,4.3555 52.068\,4.3352 52.075\,4.3727 52.0948\,4.3585 52.1058\,4.3275 52.0956\,4.3331 52.1057\,4.2901 52.135\,4.185 52.0615))" +RTW,Saratov,городской округ Саратов,Saratov,POINT(46.0167 51.5333),"POLYGON((45.3091 51.4575\,45.4378 51.3888\,45.3204 51.2389\,45.8392 51.2104\,46.2985 51.7603\,46.0317 51.8973\,45.8088 51.5947\,45.3091 51.4575))" +RUH,King Khalid Int'l,محافظة الرياض,Riyadh,POINT(46.7167 24.6333),"POLYGON((46.3168 24.6333\,46.9779 24.2204\,47.2088 24.5243\,47.6558 24.4374\,47.7428 24.8494\,47.4639 25.0457\,46.861 24.7939\,46.5595 25.2673\,46.5546 24.9557\,46.4359 24.9271\,46.6334 24.7545\,46.3168 24.6333))" +RVN,Rovaniemi,Rovaniemi,Rovaniemi,POINT(25.7333 66.5),"POLYGON((24.7366 67.1519\,25.0504 66.4759\,24.8016 66.3377\,26.0871 66.1555\,26.7275 66.2986\,27.3266 66.2375\,26.6976 66.5746\,26.518 66.9429\,25.9044 67.1199\,24.7366 67.1519))" +SAH,Sanaa Int'l,أمانة العاصمة,Sanaa,POINT(44.2064 15.3483),"POLYGON((44.1206 15.4594\,44.1265 15.4351\,44.1633 15.4359\,44.1666 15.4024\,44.1419 15.3896\,44.1504 15.3527\,44.1358 15.3519\,44.1783 15.3247\,44.1441 15.299\,44.2254 15.2756\,44.2703 15.283\,44.2532 15.2994\,44.2628 15.3306\,44.2984 15.3503\,44.2951 15.3619\,44.24 15.3637\,44.2585 15.4214\,44.2798 15.4203\,44.2817 15.4466\,44.269 15.4473\,44.2829 15.513\,44.3339 15.5204\,44.3552 15.557\,44.3635 15.5348\,44.3997 15.5579\,44.3509 15.5994\,44.3061 15.6117\,44.2625 15.5714\,44.2109 15.5535\,44.2023 15.5008\,44.1206 15.4594))" +SAL,El Salvador Int'l,Municipio de Antiguo Cuscatlán,Santa Tecla,POINT(-89.2406 13.6731),"POLYGON((-89.2793 13.7022\,-89.2625 13.6782\,-89.2779 13.6626\,-89.2427 13.6357\,-89.2247 13.6621\,-89.2302 13.6832\,-89.246 13.6811\,-89.2612 13.7095\,-89.2793 13.7022))" +SAN,San Diego Int'l,Municipio de Tijuana,Tijuana,POINT(-117.0333 32.525),"POLYGON((-117.1242 32.5343\,-117.0921 32.4087\,-117.0666 32.4119\,-117.0628 32.4263\,-117.0198 32.4165\,-116.9492 32.3659\,-116.9492 32.3433\,-116.9079 32.3343\,-116.9492 32.3172\,-116.9492 32.2984\,-116.8225 32.2982\,-116.676 32.1852\,-116.5341 32.2502\,-116.5247 32.2821\,-116.6026 32.3068\,-116.615 32.3664\,-116.6687 32.4112\,-116.7018 32.5125\,-116.6909 32.5711\,-117.1242 32.5343))" +SAP,Ramón Villeda Morales Int'l,San Pedro Sula,San Pedro Sula,POINT(-88.0333 15.5),"POLYGON((-88.4107 15.5473\,-88.2469 15.5012\,-88.2328 15.4185\,-88.1573 15.372\,-88.1305 15.3879\,-88.1322 15.4044\,-87.9599 15.4305\,-87.9534 15.4672\,-87.932 15.4419\,-87.9089 15.4545\,-87.9087 15.4839\,-87.885 15.5135\,-87.8464 15.5349\,-87.9477 15.5443\,-87.9803 15.5618\,-87.9956 15.6098\,-88.0555 15.6168\,-88.0856 15.6055\,-88.157 15.6167\,-88.4107 15.5473))" +SAT,San Antonio Int'l,New Braunfels,New Braunfels,POINT(-98.1148 29.6994),"POLYGON((-98.1039 29.777\,-98.1958 29.6484\,-98.107 29.6305\,-98.0277 29.7039\,-98.0617 29.7626\,-98.1039 29.777)\,(-98.1684 29.658\,-98.1662 29.664\,-98.1639 29.6598\,-98.1684 29.658)\,(-98.16 29.6615\,-98.1598 29.6684\,-98.1562 29.663\,-98.16 29.6615)\,(-98.0921 29.7467\,-98.0924 29.7486\,-98.0913 29.7474\,-98.0921 29.7467)\,(-98.0782 29.7004\,-98.0788 29.7036\,-98.0757 29.7009\,-98.0782 29.7004)\,(-98.0777 29.746\,-98.0781 29.7482\,-98.0766 29.7469\,-98.0777 29.746)\,(-98.0673 29.7081\,-98.0712 29.7154\,-98.063 29.7117\,-98.0673 29.7081)\,(-98.058 29.694\,-98.0599 29.7087\,-98.0505 29.7003\,-98.058 29.694)\,(-98.0651 29.7303\,-98.0641 29.7392\,-98.0587 29.7342\,-98.0651 29.7303)\,(-98.0546 29.7062\,-98.0603 29.7203\,-98.049 29.711\,-98.0546 29.7062))" +SAV,Savannah Int'l,Chatham County,Savannah,POINT(-81.1821 32.0286),"POLYGON((-81.3917 32.0959\,-81.368 32.0485\,-81.3054 32.0019\,-81.3089 31.9823\,-81.2883 31.9775\,-81.2887 31.9455\,-81.258 31.9267\,-81.2629 31.9075\,-81.2353 31.9101\,-81.2366 31.8873\,-81.1938 31.9214\,-81.2223 31.8904\,-81.1734 31.8998\,-81.1385 31.8559\,-81.1812 31.7956\,-81.1607 31.785\,-81.1733 31.7572\,-81.1417 31.711\,-81.0515 31.7052\,-81.0566 31.7149\,-81.0138 31.7286\,-80.9707 31.7984\,-80.9286 31.817\,-80.8553 31.9108\,-80.7963 31.9535\,-80.7807 32.0114\,-80.7514 32.0335\,-80.918 32.0376\,-81.0023 32.1001\,-81.0633 32.0886\,-81.1172 32.1176\,-81.1122 32.1508\,-81.1298 32.1658\,-81.1111 32.1727\,-81.114 32.1957\,-81.1448 32.2265\,-81.1954 32.2376\,-81.3917 32.0959))" +SAW,Sabiha Gökçen Havaalani,Fatih,Istanbul,POINT(28.955 41.0136),"POLYGON((28.9199 40.989\,28.9787 41.0012\,28.9871 41.0163\,28.9417 41.0397\,28.9199 40.989))" +SBA,Santa Barbara Muni.,Goleta,Goleta,POINT(-119.8594 34.4361),"POLYGON((-119.9219 34.4359\,-119.8848 34.4138\,-119.8371 34.4382\,-119.8219 34.4234\,-119.8051 34.4419\,-119.8205 34.4588\,-119.9219 34.4359))" +SCL,Arturo Merino Benitez Int'l,Quilicura,Quilicura,POINT(-70.7333 -33.3667),"POLYGON((-70.8036 -33.3504\,-70.75 -33.3908\,-70.7149 -33.3915\,-70.6943 -33.3809\,-70.7003 -33.3662\,-70.68 -33.3373\,-70.7083 -33.3181\,-70.8036 -33.3504))" +SCU,Antonio Maceo,Ciudad de Santiago de Cuba,Santiago de Cuba,POINT(-75.8294 20.0217),"POLYGON((-75.8781 20.0419\,-75.871 19.9926\,-75.861 20.0155\,-75.8741 20.0276\,-75.8352 20.0263\,-75.8748 19.9853\,-75.8597 19.9857\,-75.8644 19.9657\,-75.7961 20.0152\,-75.7734 20.0089\,-75.7657 19.9799\,-75.7559 20.0129\,-75.794 20.0178\,-75.7875 20.0375\,-75.7624 20.0237\,-75.7564 20.0379\,-75.7723 20.049\,-75.7554 20.0623\,-75.8026 20.0532\,-75.7887 20.0817\,-75.8096 20.0916\,-75.8192 20.06\,-75.8781 20.0419))" +SDA,Baghdad Int'l,ناحية مرکز قضاء الکرخ,Baghdad,POINT(44.3661 33.3153),"POLYGON((44.3484 33.3353\,44.3647 33.3211\,44.3593 33.2819\,44.4149 33.3\,44.3726 33.3538\,44.3484 33.3353))" +SDF,Louisville Int'l,Clark County,Jeffersonville,POINT(-85.7026 38.3376),"POLYGON((-85.9949 38.403\,-85.8082 38.4044\,-85.7608 38.3502\,-85.797 38.3329\,-85.7829 38.3208\,-85.7923 38.2875\,-85.7421 38.2674\,-85.6553 38.3239\,-85.6068 38.4387\,-85.498 38.4687\,-85.4728 38.5052\,-85.4196 38.535\,-85.4158 38.5639\,-85.4284 38.5868\,-85.4781 38.5861\,-85.4967 38.6067\,-85.7939 38.6046\,-85.8212 38.5614\,-85.8669 38.547\,-85.8665 38.518\,-85.8848 38.5186\,-85.8849 38.5031\,-85.9938 38.4887\,-85.9949 38.403))" +SDJ,Sendai,青葉区,Sendai,POINT(140.8694 38.2682),"POLYGON((140.5632 38.3772\,140.5931 38.344\,140.5812 38.3213\,140.608 38.3163\,140.5852 38.2979\,140.6128 38.287\,140.6634 38.3007\,140.7289 38.2439\,140.88 38.2467\,140.8974 38.2703\,140.88 38.3093\,140.8319 38.3084\,140.8286 38.2964\,140.7179 38.3406\,140.733 38.3587\,140.6891 38.3794\,140.6959 38.4253\,140.6197 38.4547\,140.5804 38.4373\,140.5782 38.3929\,140.5632 38.3772))" +SDQ,De Las Américas Int'l,Santo Domingo Este,Santo Domingo Este,POINT(-69.8734 18.4855),"POLYGON((-69.8979 18.5143\,-69.875 18.5022\,-69.8854 18.4636\,-69.7068 18.4547\,-69.7026 18.4863\,-69.7224 18.4844\,-69.7265 18.5155\,-69.7524 18.5304\,-69.7382 18.5955\,-69.7674 18.5958\,-69.7925 18.5651\,-69.8208 18.5636\,-69.8241 18.545\,-69.8979 18.5143))" +SEA,Tacoma Int'l,Seattle,Seattle,POINT(-122.3244 47.6211),"POLYGON((-122.4597 47.6743\,-122.4444 47.554\,-122.4575 47.5223\,-122.4168 47.5056\,-122.4073 47.481\,-122.3757 47.4964\,-122.3714 47.5174\,-122.3273 47.5139\,-122.3097 47.5222\,-122.3185 47.5318\,-122.2701 47.5097\,-122.2703 47.4956\,-122.2244 47.5094\,-122.2493 47.5318\,-122.2413 47.5609\,-122.2682 47.5895\,-122.2557 47.6471\,-122.2262 47.6669\,-122.2697 47.7338\,-122.432 47.7341\,-122.4597 47.6743))" +SFO,San Francisco Int'l,South San Francisco,South San Francisco,POINT(-122.4196 37.6538),"POLYGON((-122.4717 37.649\,-122.4439 37.6345\,-122.2205 37.6355\,-122.2501 37.6714\,-122.4533 37.6708\,-122.4638 37.6611\,-122.4504 37.6485\,-122.4717 37.649))" +SGC,Surgut,городской округ Сургут,Surgut,POINT(73.4333 61.25),"POLYGON((73.2181 61.2994\,73.2371 61.301\,73.2352 61.2792\,73.2235 61.278\,73.2193 61.2764\,73.2818 61.2754\,73.2713 61.264\,73.282 61.2614\,73.2327 61.2527\,73.36 61.2228\,73.5175 61.2382\,73.695 61.2225\,73.7094 61.2451\,73.6851 61.2629\,73.7112 61.2826\,73.7031 61.2944\,73.7838 61.3597\,73.7498 61.3809\,73.7154 61.356\,73.6402 61.3375\,73.5236 61.3398\,73.4803 61.362\,73.3105 61.3562\,73.2276 61.3189\,73.2181 61.2994))" +SGF,Springfield Reg.,Greene County,Republic,POINT(-93.4446 37.1452),"POLYGON((-93.6256 37.282\,-93.6052 37.2814\,-93.6088 37.0983\,-93.2794 37.0943\,-93.0653 37.0887\,-93.0615 37.2698\,-93.0768 37.27\,-93.0735 37.4148\,-93.621 37.4272\,-93.6256 37.282))" +SGN,Tan Son Nhat,Thành phố Hồ Chí Minh,Ho Chi Minh City,POINT(106.7019 10.7756),"POLYGON((106.3567 10.9916\,106.5332 10.9013\,106.4636 10.7565\,106.7224 10.6458\,106.9934 10.1399\,107.0268 10.5884\,106.7514 10.7075\,106.8401 10.8988\,106.7007 10.868\,106.4639 11.1598\,106.3567 10.9916))" +SHA,Hongqiao,浦东新区,Shanghai,POINT(121.4667 31.1667),"POLYGON((121.4525 31.1473\,121.467 31.1121\,121.4847 31.1269\,121.5558 31.1135\,121.5632 31.0952\,121.546 31.0847\,121.5571 31.0861\,121.5668 31.082\,121.5693 31.0833\,121.5718 31.0824\,121.5499 31.0731\,121.5422 31.0496\,121.5572 31.0457\,121.5491 31.0207\,121.57 31.025\,121.5662 31.0006\,121.6953 30.9896\,121.7745 30.9321\,121.7635 30.8661\,121.7792 30.7278\,121.9179 30.7701\,122.0753 30.8441\,122.1697 30.9104\,122.2826 31.0823\,122.3283 31.1177\,122.0846 31.2324\,121.8646 31.2696\,121.7536 31.3075\,121.6985 31.3501\,121.5566 31.4168\,121.4992 31.3692\,121.5551 31.3326\,121.5643 31.279\,121.5329 31.2523\,121.4903 31.2441\,121.5058 31.2163\,121.4618 31.1828\,121.4525 31.1473))" +SHE,Shenyang Taoxian Int'l,沈河区,Shenyang,POINT(123.4281 41.8025),"POLYGON((123.4174 41.79\,123.4396 41.7459\,123.4971 41.7542\,123.5758 41.8071\,123.5731 41.8279\,123.5654 41.8456\,123.52 41.8302\,123.5059 41.8181\,123.5123 41.7898\,123.4894 41.7753\,123.4426 41.8026\,123.4499 41.824\,123.4231 41.8151\,123.4174 41.79))" +SHJ,Sharjah Int'l,دبي,Dubai,POINT(55.2972 25.2631),"POLYGON((54.7154 25.0693\,54.887 24.9119\,55.0243 24.8676\,55.2024 24.6591\,55.3346 24.6231\,55.5233 24.6505\,55.5958 24.712\,55.7371 24.7573\,55.6617 24.9888\,55.6293 25.2101\,55.3864 25.3026\,55.3638 25.298\,55.3548 25.3276\,55.3322 25.3283\,55.0979 25.5251\,54.9476 25.3518\,54.9044 25.2261\,54.821 25.1819\,54.7154 25.0693))" +SID,Amilcar Cabral Int'l,Sal,Espargos,POINT(-22.946 16.756),"POLYGON((-22.9943 16.794\,-22.9779 16.7532\,-22.9875 16.7315\,-22.9762 16.7119\,-22.9899 16.6997\,-22.9393 16.688\,-22.9489 16.653\,-22.9261 16.586\,-22.8869 16.5917\,-22.9004 16.6324\,-22.8724 16.6696\,-22.8999 16.7331\,-22.8792 16.7713\,-22.9028 16.8055\,-22.8892 16.8339\,-22.9194 16.8557\,-22.9516 16.8449\,-22.9859 16.8252\,-22.9943 16.794))" +SIN,Singapore Changi,Central,Singapore,POINT(103.8 1.3),"POLYGON((103.7926 1.2991\,103.8175 1.28\,103.8156 1.2617\,103.84 1.2642\,103.8677 1.2381\,103.8813 1.2061\,103.9186 1.2227\,103.8655 1.2942\,103.875 1.3195\,103.8571 1.3571\,103.9021 1.4278\,103.8779 1.4454\,103.8524 1.4121\,103.8315 1.4209\,103.8094 1.3986\,103.8181 1.3738\,103.8015 1.3443\,103.8187 1.3358\,103.7926 1.2991))" +SIP,Simferopol Int'l,Гвардейское сельское поселение,Hvardiiske,POINT(34.0142 45.1142),"POLYGON((33.9314 45.105\,33.9372 45.078\,33.9614 45.0625\,33.9815 45.0767\,33.9815 45.0622\,34.0481 45.0724\,34.0486 45.1337\,34.0809 45.1458\,34.0867 45.1683\,34.035 45.1814\,34.0142 45.1489\,33.9911 45.1632\,33.9678 45.1634\,33.9752 45.1459\,33.9386 45.1557\,33.9386 45.1316\,33.9627 45.116\,33.9314 45.105))" +SIT,Sitka Rocky Gutierrez,Sitka,Sitka,POINT(-135.3152 57.2401),"POLYGON((-136.4544 57.8307\,-135.8599 56.9924\,-134.8151 56.2159\,-134.6664 56.1476\,-134.6667 56.2833\,-134.4292 56.2827\,-134.4962 56.6996\,-134.5276 56.7971\,-134.6772 56.999\,-134.7386 57.2263\,-134.6957 57.316\,-134.7011 57.4546\,-134.8362 57.7877\,-135.0617 57.7609\,-135.2381 57.7617\,-135.4267 57.844\,-135.757 57.9427\,-135.902 58.0013\,-136.0243 57.8388\,-136.4544 57.8307))" +SJC,San Jose Int'l,Sunnyvale,Sunnyvale,POINT(-122.0255 37.3836),"POLYGON((-122.0652 37.3431\,-122.0544 37.3302\,-121.9957 37.3378\,-121.9964 37.3704\,-121.9825 37.3702\,-122.0049 37.3739\,-121.9868 37.3777\,-121.9863 37.4175\,-122.0596 37.4639\,-122.0594 37.4265\,-122.0347 37.4263\,-122.0652 37.3431))" +SJD,Los Cabos Int'l,Municipio de Los Cabos,San José del Cabo,POINT(-109.7081 23.0614),"POLYGON((-110.1207 23.114\,-109.9264 22.8716\,-109.5362 23.1094\,-109.4684 23.5548\,-109.9314 23.6101\,-109.9457 23.1351\,-110.1207 23.114))" +SJJ,Sarajevo,Sarajevo,Sarajevo,POINT(18.4131 43.8564),"POLYGON((18.261 43.8686\,18.3384 43.8446\,18.3268 43.8344\,18.3529 43.8209\,18.4013 43.8449\,18.4597 43.8269\,18.4666 43.8548\,18.4542 43.8621\,18.4645 43.8786\,18.4015 43.8959\,18.377 43.8727\,18.3135 43.8997\,18.2714 43.8971\,18.261 43.8686))" +SJU,Luis Muñoz Marin,Carolina,Carolina,POINT(-65.9792 18.4054),"POLYGON((-66.0431 18.444\,-65.993 18.4072\,-66.0003 18.3808\,-65.9391 18.3096\,-65.9392 18.2867\,-65.9073 18.2721\,-65.9156 18.2943\,-65.9018 18.3163\,-65.9198 18.345\,-65.9131 18.379\,-65.9312 18.3932\,-65.9189 18.4002\,-65.9304 18.4284\,-65.9671 18.4325\,-65.993 18.458\,-66.0431 18.444))" +SJW,Shijiazhuang Zhengding Int'l,长安区,Shijiazhuang,POINT(114.5086 38.0422),"POLYGON((114.4741 38.1054\,114.4891 38.104\,114.4959 38.0809\,114.4856 38.0429\,114.5058 38.035\,114.5854 38.0366\,114.5853 38.058\,114.6727 38.0446\,114.7011 38.089\,114.5157 38.1306\,114.4741 38.1054))" +SKB,Robert L. Bradshaw Int'l,Saint George Basseterre,Basseterre,POINT(-62.7333 17.3),"POLYGON((-62.7551 17.3256\,-62.7437 17.2906\,-62.7107 17.2938\,-62.6632 17.2653\,-62.6578 17.2134\,-62.6262 17.2272\,-62.6259 17.2507\,-62.7551 17.3256))" +SKG,Thessaloniki,Δημοτική Ενότητα Θεσαλονίκης,Thessaloníki,POINT(22.9347 40.6403),"POLYGON((22.8995 40.652\,22.9055 40.6327\,22.951 40.623\,22.9467 40.5915\,22.9781 40.5927\,22.9899 40.6134\,22.9677 40.6196\,22.9625 40.6446\,22.8995 40.652))" +SKO,Sadiq Abubakar III,Sokoto South,Sokoto,POINT(5.2339 13.0622),"POLYGON((5.2221 13.0437\,5.2511 13.0056\,5.2921 13.0305\,5.2993 13.0554\,5.2281 13.0654\,5.2221 13.0437))" +SKP,Skopje,Скопје,Skopje,POINT(21.4317 41.9961),"POLYGON((21.3154 41.9941\,21.391 41.9911\,21.3888 41.9638\,21.4441 41.9549\,21.467 41.9673\,21.4733 41.94\,21.5299 41.9632\,21.5077 41.9826\,21.5377 41.9942\,21.5035 41.999\,21.5164 42.013\,21.4836 42.0197\,21.4715 42.0462\,21.4142 42.0492\,21.4197 42.0282\,21.3681 42.0208\,21.3532 42.0443\,21.3536 42.0255\,21.3188 42.0227\,21.3154 41.9941))" +SLA,Martín Miguel de Güemes Int,Salta,Salta,POINT(-65.4167 -24.7833),"POLYGON((-65.4791 -24.8735\,-65.43 -24.8609\,-65.4282 -24.8468\,-65.3697 -24.8417\,-65.371 -24.8003\,-65.3406 -24.7848\,-65.3784 -24.7766\,-65.3902 -24.7988\,-65.3772 -24.8042\,-65.3954 -24.8081\,-65.401 -24.7757\,-65.3872 -24.7747\,-65.3943 -24.7446\,-65.3834 -24.7368\,-65.3938 -24.7125\,-65.4245 -24.7154\,-65.4125 -24.7706\,-65.4499 -24.7685\,-65.4637 -24.7829\,-65.4505 -24.8074\,-65.4737 -24.8104\,-65.4764 -24.8228\,-65.4331 -24.8077\,-65.4289 -24.8239\,-65.467 -24.8341\,-65.4791 -24.8735))" +SLC,Salt Lake City Int'l,West Valley City,West Valley City,POINT(-112.0123 40.6886),"POLYGON((-112.0973 40.6532\,-112.0631 40.6459\,-112.0683 40.6313\,-112.0245 40.6313\,-112.0435 40.6422\,-112.0247 40.6676\,-111.958 40.6676\,-111.958 40.6821\,-111.9211 40.6863\,-111.9202 40.7207\,-112.0633 40.7255\,-112.0631 40.6821\,-112.0972 40.6821\,-112.0973 40.6532))" +SLE,McNary Field,Keizer,Keizer,POINT(-123.0243 45.0028),"POLYGON((-123.0571 44.9945\,-123.0212 44.9738\,-122.991 45.0166\,-123.0166 45.0207\,-123.0194 45.0372\,-123.0287 45.0144\,-123.0538 45.0135\,-123.0571 44.9945))" +SLP,Ponciano Arriaga Int'l,Municipio de San Luis Potosí,San Luis Potosí,POINT(-100.9761 22.1511),"POLYGON((-101.1685 22.0315\,-100.742 22.0537\,-100.9677 22.1716\,-100.7597 22.4551\,-100.9189 22.4522\,-100.8951 22.6553\,-101.1444 22.5476\,-100.9653 22.3024\,-101.1685 22.0315))" +SLW,Plan de Guadalupe,Saltillo,Saltillo,POINT(-100.9919 25.4231),"POLYGON((-101.621 25.1844\,-101.3793 25.0892\,-101.6004 24.7544\,-101.3421 24.8134\,-100.7999 24.5562\,-100.7275 24.9187\,-100.9466 25.5217\,-101.4019 25.4731\,-101.2507 25.3822\,-101.3855 25.2\,-101.621 25.1844))" +SLZ,Marechal Cunha Machado Int'l,São Luís,São Luís,POINT(-44.3044 -2.5283),"POLYGON((-44.4111 -2.8004\,-44.3883 -2.7988\,-44.3881 -2.7654\,-44.3729 -2.7578\,-44.307 -2.7791\,-44.2658 -2.7604\,-44.2496 -2.7303\,-44.1757 -2.7021\,-44.183 -2.6323\,-44.1616 -2.6078\,-44.1947 -2.5625\,-44.2074 -2.567\,-44.1897 -2.5209\,-44.2135 -2.5213\,-44.2123 -2.4746\,-44.3046 -2.4875\,-44.3232 -2.5022\,-44.3059 -2.526\,-44.3489 -2.5269\,-44.377 -2.5642\,-44.3541 -2.6032\,-44.4111 -2.8004))" +SMF,Sacramento Int'l,Sacramento County,Elk Grove,POINT(-121.3842 38.4161),"POLYGON((-121.8628 38.0661\,-121.8189 38.0222\,-121.7806 38.0185\,-121.7016 38.043\,-121.6718 38.0944\,-121.567 38.0967\,-121.5836 38.1202\,-121.5361 38.152\,-121.5287 38.1955\,-121.4715 38.2597\,-121.4296 38.2548\,-121.3984 38.2275\,-121.3439 38.2283\,-121.282 38.2502\,-121.1728 38.2552\,-121.067 38.2991\,-121.0272 38.3001\,-121.0271 38.5082\,-121.1184 38.7174\,-121.1333 38.7055\,-121.6032 38.7362\,-121.6305 38.6774\,-121.5926 38.643\,-121.5675 38.6459\,-121.5498 38.5994\,-121.5197 38.6038\,-121.5067 38.5901\,-121.5246 38.5196\,-121.5592 38.4991\,-121.5423 38.4761\,-121.5049 38.4694\,-121.503 38.4409\,-121.5327 38.4312\,-121.5134 38.3988\,-121.5211 38.3621\,-121.5851 38.3308\,-121.6052 38.2961\,-121.612 38.1999\,-121.6852 38.1596\,-121.7116 38.0851\,-121.7426 38.0869\,-121.8002 38.0591\,-121.8423 38.0765\,-121.8628 38.0661))" +SNA,John Wayne,Mission Viejo,Mission Viejo,POINT(-117.6551 33.6096),"POLYGON((-117.6959 33.613\,-117.6743 33.5962\,-117.6741 33.5431\,-117.659 33.5338\,-117.6196 33.6728\,-117.6959 33.613))" +SNN,Shannon,Shannon Municipal District,Shannon,POINT(-8.8686 52.7137),"POLYGON((-8.9969 52.6972\,-8.9848 52.6615\,-8.8249 52.6802\,-8.7068 52.6701\,-8.654 52.6901\,-8.6182 52.6889\,-8.5919 52.6693\,-8.5464 52.6807\,-8.5017 52.7185\,-8.5227 52.7454\,-8.6076 52.7161\,-8.616 52.7293\,-8.6321 52.7181\,-8.6697 52.726\,-8.6953 52.7513\,-8.8675 52.7351\,-8.8841 52.7601\,-8.8558 52.7872\,-8.8735 52.8016\,-8.8987 52.7879\,-8.919 52.7985\,-8.9879 52.7601\,-8.9928 52.7459\,-8.9723 52.7239\,-8.9969 52.6972))" +SOF,Vrazhdebna,Столична,Sofia,POINT(23.33 42.7),"POLYGON((23.0511 42.7021\,23.1058 42.6949\,23.1183 42.6672\,23.1726 42.6533\,23.1718 42.6136\,23.1433 42.5872\,23.1865 42.5923\,23.2842 42.563\,23.3 42.5444\,23.2902 42.5327\,23.4134 42.464\,23.4479 42.4804\,23.4251 42.5112\,23.4466 42.5247\,23.51 42.5122\,23.5416 42.4754\,23.5296 42.4467\,23.5458 42.4125\,23.6623 42.4217\,23.6606 42.4557\,23.6221 42.489\,23.6301 42.5004\,23.587 42.5051\,23.5498 42.5334\,23.5674 42.5651\,23.5595 42.6162\,23.5307 42.625\,23.5319 42.6388\,23.4926 42.6391\,23.5087 42.6469\,23.4939 42.6744\,23.546 42.6807\,23.5482 42.704\,23.6003 42.7161\,23.6772 42.8444\,23.6363 42.8387\,23.6325 42.8566\,23.6045 42.8604\,23.5565 42.8276\,23.4719 42.8744\,23.372 42.8392\,23.3202 42.8835\,23.2777 42.8965\,23.2466 42.8656\,23.2505 42.7974\,23.2033 42.7765\,23.2184 42.7564\,23.1985 42.7292\,23.0872 42.7377\,23.0511 42.7021))" +SON,Santo Pekoa Int'l,Sanma,Luganville,POINT(167.1667 -15.5333),"POLYGON((166.3355 -14.8608\,166.3385 -14.8835\,166.4432 -15.4327\,166.4557 -15.4735\,166.5856 -15.7379\,166.6039 -15.768\,166.7323 -15.9432\,167.43 -15.7633\,167.5533 -14.475\,166.3405 -14.138\,166.3423 -14.7747\,166.3365 -14.8221\,166.3355 -14.8608))" +SPN,Saipan Int'l,Saipan Municipality,Capitol Hill,POINT(145.7546 15.2137),"POLYGON((145.5726 15.125\,145.6426 15.1069\,145.7442 15.0229\,145.7501 15.0666\,145.8027 15.0756\,145.8374 15.1201\,145.876 15.1354\,145.9094 15.2741\,145.8963 15.3087\,145.8528 15.3519\,145.7818 15.3483\,145.7085 15.2753\,145.687 15.1974\,145.6483 15.1387\,145.5726 15.125))" +SQQ,Šiauliai Int'l,Šiauliai,Šiauliai,POINT(23.3167 55.9333),"POLYGON((23.228 55.9076\,23.2655 55.8822\,23.2954 55.9037\,23.3083 55.8982\,23.3177 55.891\,23.2653 55.8641\,23.3213 55.8414\,23.3642 55.8805\,23.4188 55.8722\,23.429 55.8862\,23.3605 55.9176\,23.369 55.9274\,23.3499 55.9326\,23.3421 55.9562\,23.3548 55.9696\,23.2803 55.9608\,23.2719 55.9546\,23.2857 55.9477\,23.2578 55.9265\,23.2677 55.9221\,23.228 55.9076))" +SRE,Juana Azurduy de Padilla Int'l,Municipio Tarabuco,Tarabuco,POINT(-64.9167 -19.1667),"POLYGON((-65.1069 -18.9149\,-65.0674 -18.983\,-65.0868 -18.9975\,-65.0668 -19.019\,-65.0594 -19.0839\,-65.0143 -19.1224\,-65.0123 -19.1954\,-65.0014 -19.2111\,-64.9797 -19.2078\,-64.9796 -19.2595\,-64.9316 -19.2709\,-64.923 -19.3589\,-64.891 -19.3516\,-64.8452 -19.287\,-64.788 -19.2738\,-64.7367 -19.2926\,-64.7231 -19.2586\,-64.7561 -19.1254\,-64.8084 -19.0556\,-64.8364 -19.0523\,-64.8452 -19.0172\,-64.8547 -19.0238\,-64.9029 -18.9842\,-64.964 -19.0113\,-64.9823 -18.9785\,-64.9693 -18.9447\,-64.9959 -18.8719\,-65.0947 -18.8578\,-65.0486 -18.9195\,-65.0506 -18.9442\,-65.1069 -18.9149))" +SRG,Achmad Yani,Semarang,Semarang,POINT(110.4167 -6.9667),"POLYGON((110.2673 -7.0185\,110.3008 -7.0645\,110.3074 -7.1018\,110.3388 -7.1033\,110.3461 -7.0821\,110.3547 -7.1056\,110.3761 -7.0944\,110.3778 -7.1106\,110.3968 -7.1145\,110.5011 -7.0671\,110.4816 -7.0442\,110.505 -7.0117\,110.491 -7.0043\,110.5088 -6.9563\,110.4619 -6.932\,110.4363 -6.9513\,110.4233 -6.9355\,110.4239 -6.9551\,110.419 -6.9428\,110.3564 -6.9538\,110.306 -6.9368\,110.285 -6.9693\,110.283 -7.0131\,110.2673 -7.0185))" +SSA,Deputado Luis Eduardo Magalhaes Int'l,Camaçari,Camaçari,POINT(-38.3239 -12.6978),"POLYGON((-38.376 -12.674\,-38.3269 -12.8356\,-38.2891 -12.8557\,-38.2813 -12.8806\,-38.2678 -12.8753\,-38.0449 -12.632\,-38.0304 -12.5971\,-38.0696 -12.5361\,-38.0662 -12.4683\,-38.1 -12.478\,-38.097 -12.5\,-38.114 -12.509\,-38.178 -12.487\,-38.174 -12.461\,-38.192 -12.453\,-38.226 -12.476\,-38.244 -12.468\,-38.249 -12.496\,-38.226 -12.52\,-38.215 -12.5667\,-38.1687 -12.5417\,-38.1317 -12.6018\,-38.1767 -12.5972\,-38.2251 -12.6466\,-38.304 -12.621\,-38.337 -12.63\,-38.376 -12.674))" +SSE,Solapur,Solapur North,Solāpur,POINT(75.92 17.68),"POLYGON((75.7321 17.9388\,75.7646 17.9134\,75.7435 17.8881\,75.7621 17.8755\,75.7665 17.8239\,75.7491 17.7815\,75.7837 17.7717\,75.812 17.782\,75.8215 17.7651\,75.8112 17.7493\,75.761 17.7372\,75.7805 17.5998\,75.8459 17.5609\,75.8656 17.5608\,75.8716 17.5866\,75.9413 17.5829\,75.9631 17.7141\,75.9335 17.7696\,75.9619 17.8309\,75.9402 17.8525\,75.9246 17.8337\,75.8742 17.8452\,75.8448 17.8906\,75.8469 17.9295\,75.8118 17.9311\,75.806 17.9875\,75.7402 17.9752\,75.7567 17.9614\,75.7321 17.9388))" +SSH,Sharm el-Sheikh Int'l,جنوب سيناء,Sharm ash Shaykh,POINT(34.3297 27.9122),"POLYGON((32.5425 29.7744\,32.5579 29.6028\,33.001 28.7429\,33.1238 28.5326\,33.5145 28.0946\,34.2653 27.3883\,34.519 27.7135\,34.4771 27.7897\,34.4572 27.9005\,34.4635 28.0132\,34.5358 28.1932\,34.5452 28.2771\,34.6771 28.5791\,34.7368 28.8416\,34.7336 28.9052\,34.7616 28.9527\,34.768 29.082\,34.8158 29.193\,34.8306 29.2697\,34.9212 29.4534\,34.876 29.5589\,34.6911 29.5345\,34.4706 29.5544\,34.3344 29.5935\,33.7219 29.833\,33.4782 29.8736\,33.1588 29.9015\,32.833 29.9233\,32.6311 29.8065\,32.5425 29.7744))" +STI,Cibao Int'l,Santiago,Gurabo al Medio,POINT(-70.6727 19.4739),"POLYGON((-70.9085 19.4891\,-70.7567 19.3825\,-70.7199 19.4002\,-70.6823 19.4015\,-70.6744 19.3891\,-70.6787 19.4276\,-70.6562 19.4265\,-70.6475 19.449\,-70.6272 19.441\,-70.6191 19.4646\,-70.6511 19.4622\,-70.6442 19.5007\,-70.6165 19.5295\,-70.6347 19.572\,-70.6309 19.6083\,-70.7052 19.65\,-70.7096 19.5993\,-70.7466 19.5805\,-70.7308 19.5355\,-70.7687 19.523\,-70.755 19.5024\,-70.7796 19.4926\,-70.7969 19.5152\,-70.8911 19.5419\,-70.9085 19.4891))" +STL,Lambert St Louis Int'l,Saint Louis,St. Louis,POINT(-90.2451 38.6359),"POLYGON((-90.3207 38.5942\,-90.2584 38.5323\,-90.1954 38.5959\,-90.18 38.6538\,-90.2128 38.7176\,-90.1642 38.772\,-90.1842 38.7743\,-90.2079 38.7397\,-90.2523 38.7182\,-90.3018 38.6556\,-90.3207 38.5942))" +STR,Stuttgart,Stuttgart,Stuttgart,POINT(9.18 48.7775),"POLYGON((9.0386 48.7413\,9.1003 48.7117\,9.2449 48.6946\,9.2138 48.7312\,9.316 48.7782\,9.2469 48.7949\,9.2568 48.8121\,9.2304 48.8638\,9.19 48.8477\,9.141 48.8619\,9.1382 48.828\,9.0858 48.8309\,9.0769 48.7553\,9.0386 48.7413))" +STV,Surat,Majura Taluka,Sūrat,POINT(72.8311 21.1702),"POLYGON((72.6903 21.0774\,72.7245 21.0754\,72.7373 21.0507\,72.7929 21.0736\,72.8313 21.0555\,72.8322 21.0773\,72.8727 21.0976\,72.8754 21.1279\,72.8339 21.1368\,72.8377 21.1827\,72.784 21.1785\,72.7428 21.1441\,72.6981 21.1377\,72.6903 21.0774))" +STY,Nueva Hespérides Int'l,Federación,Federación,POINT(-57.9167 -30.9833),"POLYGON((-58.0085 -30.9567\,-58.0372 -31.022\,-57.994 -31.0217\,-57.9937 -31.0473\,-57.9622 -31.0802\,-57.9821 -31.105\,-57.9578 -31.1636\,-57.9685 -31.176\,-57.9502 -31.1811\,-57.9507 -31.2066\,-57.9456 -31.1995\,-57.9454 -31.2098\,-57.9382 -31.204\,-57.9366 -31.2132\,-57.9244 -31.2151\,-57.942 -31.1912\,-57.9295 -31.1671\,-57.9452 -31.1663\,-57.9339 -31.1601\,-57.9434 -31.1335\,-57.931 -31.1309\,-57.9455 -31.1243\,-57.92 -31.1075\,-57.936 -31.0993\,-57.9107 -31.0953\,-57.9322 -31.0834\,-57.9319 -31.0755\,-57.9106 -31.0853\,-57.9033 -31.0693\,-57.8997 -31.0936\,-57.8945 -31.0663\,-57.8809 -31.0694\,-57.8855 -31.0017\,-57.9008 -30.9899\,-57.9288 -31.0162\,-57.9109 -30.9869\,-57.932 -30.9586\,-57.9456 -30.9733\,-57.955 -30.9776\,-57.9475 -30.96\,-57.9911 -30.9597\,-58.0035 -30.9805\,-58.0085 -30.9567))" +SUB,Juanda Int'l,Surabaya,Surabaya,POINT(112.7378 -7.2458),"POLYGON((112.5916 -7.2229\,112.6319 -7.261\,112.6294 -7.3116\,112.6492 -7.3146\,112.6646 -7.3514\,112.7047 -7.3362\,112.7258 -7.3483\,112.7552 -7.3367\,112.8126 -7.3464\,112.8442 -7.3132\,112.8341 -7.2604\,112.804 -7.2501\,112.7643 -7.196\,112.7236 -7.1981\,112.7257 -7.2097\,112.7182 -7.1986\,112.7088 -7.2233\,112.6852 -7.2256\,112.6602 -7.2079\,112.6657 -7.1915\,112.6398 -7.1924\,112.5982 -7.2036\,112.5916 -7.2229))" +SUV,Nausori Int'l,Tailevu,Nausori,POINT(178.5454 -18.0244),"POLYGON((178.2683 -17.6805\,178.2744 -17.7357\,178.3011 -17.7503\,178.2956 -17.7771\,178.3287 -17.7727\,178.3315 -17.752\,178.3489 -17.7456\,178.3842 -17.7655\,178.3808 -17.7975\,178.3375 -17.81\,178.336 -17.8231\,178.3734 -17.8446\,178.3862 -17.8203\,178.4047 -17.822\,178.407 -17.8804\,178.4369 -17.8801\,178.4344 -17.9165\,178.534 -17.9603\,178.5204 -18.014\,178.5614 -18.0542\,178.5643 -18.0825\,178.5875 -18.0949\,178.6264 -18.0858\,178.6352 -18.0996\,178.8369 -18.0986\,178.8369 -17.909\,178.6928 -17.8276\,178.6928 -17.6065\,178.8687 -17.3992\,178.8687 -17.188\,178.6168 -17.188\,178.6168 -17.3275\,178.4494 -17.4956\,178.4515 -17.6139\,178.4288 -17.6611\,178.4382 -17.6748\,178.4138 -17.6847\,178.3615 -17.6598\,178.3333 -17.6828\,178.284 -17.6304\,178.2683 -17.6805))" +SVG,Stavanger Sola,Sandnes,Sandnes,POINT(5.7361 58.8517),"POLYGON((5.6097 58.8279\,5.8197 58.7768\,5.9648 58.8193\,5.9848 58.8094\,6.0293 58.8483\,6.1139 58.8692\,6.1659 58.8442\,6.2104 58.8467\,6.2044 58.8574\,6.2969 58.8836\,6.4192 58.8971\,6.5394 58.9492\,6.5473 58.9769\,6.7026 58.9942\,6.9037 59.1134\,6.881 59.1446\,6.9392 59.1819\,6.8377 59.1926\,6.7526 59.1719\,6.6588 59.1852\,6.6155 59.1681\,6.6117 59.149\,6.5706 59.1407\,6.5479 59.0979\,6.3053 59.0757\,6.3005 59.0573\,6.2223 59.0367\,6.2286 59.0078\,6.2486 58.9979\,6.1022 58.9359\,6.0741 58.8961\,5.9717 58.9573\,5.8634 58.9866\,5.7798 58.946\,5.7581 58.8849\,5.696 58.8945\,5.6699 58.8761\,5.6712 58.8607\,5.6097 58.8279))" +SVO,Sheremtyevo,городской округ Химки,Khimki,POINT(37.445 55.8892),"POLYGON((37.134 55.9152\,37.3399 55.8742\,37.4145 55.9546\,37.4094 55.8807\,37.5072 55.8973\,37.2522 56.0778\,37.134 55.9152))" +SVX,Koltsovo,Октябрьский район,Yekaterinburg,POINT(60.6128 56.8356),"POLYGON((60.6057 56.8274\,60.6372 56.8118\,60.623 56.8099\,60.632 56.7985\,60.763 56.7614\,60.755 56.7524\,60.7852 56.7299\,60.8449 56.7211\,60.8736 56.6957\,60.9144 56.7167\,60.8656 56.7258\,60.8836 56.7534\,60.9198 56.7635\,60.9414 56.835\,60.737 56.8299\,60.7569 56.8109\,60.6328 56.8416\,60.6057 56.8274))" +SXB,Strasbourg,Strasbourg,Strasbourg,POINT(7.7458 48.5833),"POLYGON((7.6881 48.5995\,7.6939 48.5604\,7.7255 48.5617\,7.7581 48.5404\,7.7668 48.5161\,7.7502 48.4947\,7.7707 48.4919\,7.8047 48.5126\,7.8006 48.5848\,7.8361 48.6335\,7.8076 48.6458\,7.7927 48.6405\,7.8036 48.6271\,7.7647 48.6027\,7.6881 48.5995))" +SYD,Kingsford Smith,Council of the City of Sydney,Sydney,POINT(151.21 -33.8678),"POLYGON((151.1749 -33.8726\,151.1767 -33.9178\,151.2142 -33.9244\,151.2308 -33.8592\,151.2206 -33.8691\,151.2066 -33.8537\,151.2016 -33.8732\,151.1941 -33.8624\,151.1749 -33.8726))" +SYQ,Nacional Tobías Bolaños,Cantón San José,La Uruca,POINT(-84.1327 9.9575),"POLYGON((-84.18 9.9686\,-84.0907 9.9\,-84.0475 9.9028\,-84.0624 9.9406\,-84.1088 9.9621\,-84.18 9.9686))" +SYR,Syracuse Hancock Int'l,Town of Cicero,Cicero,POINT(-76.0662 43.1662),"POLYGON((-76.1577 43.2433\,-76.1277 43.1186\,-75.9547 43.1167\,-75.9501 43.135\,-75.9773 43.1362\,-75.9712 43.1544\,-75.9918 43.1835\,-76.0789 43.1995\,-76.0852 43.2131\,-76.0681 43.2126\,-76.0694 43.2147\,-76.1577 43.2433))" +SYX,Sanya Phoenix Int'l,天涯区,Sanya,POINT(109.5036 18.2533),"POLYGON((109.1802 18.4609\,109.2304 18.4483\,109.2311 18.4261\,109.2696 18.411\,109.2602 18.3581\,109.2273 18.348\,109.2286 18.2638\,109.2842 18.2665\,109.3518 18.2166\,109.4508 18.1936\,109.4895 18.2281\,109.5137 18.2283\,109.4914 18.2781\,109.5041 18.2948\,109.4809 18.3034\,109.5029 18.3422\,109.5249 18.3375\,109.4993 18.3632\,109.535 18.4007\,109.5104 18.4374\,109.5231 18.4474\,109.5321 18.53\,109.4992 18.5568\,109.4192 18.5646\,109.4029 18.6302\,109.3286 18.5772\,109.309 18.5191\,109.2913 18.5141\,109.2434 18.5607\,109.2022 18.5572\,109.1815 18.5163\,109.1908 18.4711\,109.1802 18.4609))" +SYZ,Shiraz Int'l,شیراز,Shīrāz,POINT(52.5425 29.61),"POLYGON((52.3823 29.7588\,52.4161 29.7338\,52.4237 29.6871\,52.4464 29.6885\,52.4565 29.6314\,52.427 29.619\,52.4294 29.5977\,52.4954 29.5556\,52.5327 29.5512\,52.5767 29.5136\,52.6279 29.5045\,52.625 29.5594\,52.6673 29.5616\,52.6302 29.5713\,52.6494 29.5859\,52.6233 29.5831\,52.5857 29.6097\,52.6049 29.6356\,52.5701 29.6234\,52.5714 29.6514\,52.56 29.6366\,52.5112 29.653\,52.4834 29.6866\,52.4958 29.7115\,52.4447 29.7239\,52.4228 29.7754\,52.3963 29.776\,52.3823 29.7588))" +SZX,Shenzhen Bao'an Int'l,福田区,Shenzhen,POINT(114.054 22.535),"POLYGON((113.986 22.5241\,114.0088 22.5202\,113.9978 22.5124\,114.0119 22.5006\,114.0532 22.5028\,114.1003 22.5346\,114.0991 22.5701\,114.0544 22.5898\,114.0044 22.5824\,113.986 22.5241))" +TAE,Daegu Int'l,대구광역시,Daegu,POINT(128.6017 35.8717),"POLYGON((128.4179 35.9217\,128.3816 35.6068\,128.6835 35.7211\,128.7037 36.0582\,128.8829 36.1989\,128.4169 36.2989\,128.6413 36.0103\,128.4179 35.9217))" +TAI,Ta'izz Int'l,محافظة تعز,Ta‘izz,POINT(44.0219 13.5789),"POLYGON((43.021 13.3218\,43.2898 12.633\,43.6408 12.5166\,43.5232 12.7095\,43.6949 13.0766\,44.3074 13.2122\,44.515 13.6106\,44.3794 13.771\,43.61 13.9147\,43.078 13.6549\,43.021 13.3218))" +TAM,Gen Francisco J Mina Int'l,Tampico,Tampico,POINT(-97.8686 22.2553),"POLYGON((-98.0003 22.2974\,-97.9567 22.2939\,-97.895 22.2224\,-97.8377 22.2124\,-97.8576 22.2471\,-97.8556 22.3219\,-97.97 22.333\,-98.0003 22.2974))" +TAO,Qingdao Liuting Int'l,市北区,Qingdao,POINT(120.4 36.1167),"POLYGON((120.2859 36.0821\,120.3569 36.0673\,120.3688 36.0869\,120.4267 36.0969\,120.3951 36.1518\,120.3311 36.1631\,120.2859 36.0821))" +TAP,Tapachula Int'l,Tapachula,Tapachula,POINT(-92.2667 14.9),"POLYGON((-92.4656 14.7584\,-92.3174 14.6209\,-92.1844 15.2277\,-92.3832 15.1442\,-92.3018 14.983\,-92.4656 14.7584))" +TAS,Tashkent Int'l,Yunusobod Tumani,Tashkent,POINT(69.2797 41.3111),"POLYGON((69.228 41.3951\,69.2696 41.3557\,69.26 41.3111\,69.2808 41.3099\,69.344 41.356\,69.317 41.3844\,69.228 41.3951))" +TBS,Tbilisi Int'l,თბილისი,Tbilisi,POINT(44.7925 41.7225),"POLYGON((44.5962 41.6878\,44.6352 41.6372\,44.6736 41.6479\,44.797 41.6249\,44.809 41.6346\,44.7946 41.65\,44.843 41.6575\,44.9023 41.6398\,44.9005 41.6226\,44.9239 41.6178\,44.9286 41.6379\,44.9965 41.6649\,45.0161 41.7242\,44.9379 41.7665\,44.8774 41.7629\,44.8807 41.7978\,44.895 41.8012\,44.8793 41.8147\,44.7601 41.8376\,44.7461 41.8283\,44.7585 41.8193\,44.6893 41.8178\,44.6597 41.7865\,44.6882 41.7635\,44.6953 41.7463\,44.6766 41.735\,44.6923 41.7143\,44.5962 41.6878))" +TBT,Tabatinga Int'l,Leticia,Leticia,POINT(-69.9333 -4.2167),"POLYGON((-70.3664 -3.6893\,-70.3142 -3.7287\,-70.3068 -3.8075\,-70.2687 -3.8435\,-70.2546 -3.8845\,-70.1767 -3.9513\,-70.1266 -4.0352\,-70.0621 -4.0827\,-69.9498 -4.2294\,-69.9333 -4.2197\,-69.7141 -3.0025\,-69.9794 -3.0024\,-70.003 -3.017\,-69.9972 -3.0468\,-70.018 -3.0306\,-70.0255 -3.0448\,-70.0576 -3.0471\,-70.0389 -3.09\,-70.0582 -3.0876\,-70.0635 -3.1042\,-70.1005 -3.0892\,-70.123 -3.1081\,-70.1224 -3.1302\,-70.1481 -3.1405\,-70.147 -3.164\,-70.1633 -3.1594\,-70.1625 -3.1712\,-70.1701 -3.1811\,-70.1701 -3.1686\,-70.191 -3.1753\,-70.1977 -3.1983\,-70.2491 -3.1785\,-70.318 -3.2106\,-70.3356 -3.1973\,-70.3664 -3.6893))" +TBU,Fua'amotu Int'l,Tongatapu,Nuku‘alofa,POINT(-175.2 -21.1333),"POLYGON((-175.5703 -21.0995\,-175.5434 -21.2178\,-175.4948 -21.2916\,-175.2757 -21.4323\,-175.1715 -21.47\,-174.9517 -21.2048\,-174.763 -21.1413\,-174.7611 -21.1252\,-174.8016 -20.9571\,-174.8346 -20.899\,-174.923 -20.8352\,-174.9978 -20.8201\,-175.0926 -20.8384\,-175.1879 -20.8161\,-175.2659 -20.8168\,-175.4131 -20.8726\,-175.5318 -20.9774\,-175.5625 -21.0395\,-175.5703 -21.0995))" +TBZ,Tabriz,شهر تبریز,Tabrīz,POINT(46.3006 38.0814),"POLYGON((46.1685 38.0395\,46.1849 38.0242\,46.2457 38.0409\,46.2914 38.0157\,46.3334 38.0262\,46.3716 38.0156\,46.4126 38.0444\,46.3793 38.0817\,46.2087 38.1537\,46.1966 38.1433\,46.2249 38.1192\,46.2117 38.0866\,46.2262 38.0888\,46.2258 38.0677\,46.1685 38.0395))" +TCP,Taba Int'l,قضاء العقبة,Al ‘Aqabah,POINT(35.0056 29.5319),"POLYGON((34.9602 29.3684\,34.9685 29.3557\,35.5512 29.2668\,35.514 29.4916\,35.4549 29.5439\,35.3911 29.5723\,35.4113 29.6284\,35.3822 29.666\,35.3228 29.6752\,35.2246 29.652\,35.0439 29.7881\,35.0133 29.7109\,35.0146 29.6391\,34.9786 29.5768\,34.9774 29.543\,35.0013 29.5175\,34.9675 29.4466\,34.9798 29.4094\,34.9602 29.3684))" +TFN,Tenerife N.,San Cristóbal de La Laguna,La Laguna,POINT(-16.3167 28.4853),"POLYGON((-16.411 28.5271\,-16.3747 28.4909\,-16.3726 28.4719\,-16.377 28.4713\,-16.3836 28.4537\,-16.3827 28.4504\,-16.3688 28.4692\,-16.3494 28.4655\,-16.2942 28.4319\,-16.2703 28.4984\,-16.286 28.5166\,-16.2673 28.5433\,-16.2744 28.5722\,-16.3235 28.5792\,-16.334 28.5576\,-16.411 28.5271)\,(-16.3732 28.5019\,-16.3409 28.5452\,-16.2927 28.533\,-16.3528 28.4913\,-16.3732 28.5019))" +TGD,Podgorica,Podgorica,Podgorica,POINT(19.2629 42.4413),"POLYGON((19.1797 42.4735\,19.2051 42.444\,19.1981 42.4148\,19.2523 42.3771\,19.2775 42.3807\,19.3097 42.4193\,19.2969 42.439\,19.3141 42.4551\,19.3063 42.4749\,19.2474 42.4517\,19.2383 42.4586\,19.265 42.4725\,19.2088 42.457\,19.1797 42.4735))" +TGU,Toncontin Int'l,Tegucigalpa,Tegucigalpa,POINT(-87.2167 14.1),"POLYGON((-87.2808 14.0845\,-87.2769 14.0388\,-87.199 14.0317\,-87.1599 14.0068\,-87.1354 14.0248\,-87.147 14.141\,-87.2369 14.1295\,-87.2808 14.0845))" +THE,Teresina-Senador Petronio Portella,Teresina,Teresina,POINT(-42.8042 -5.0949),"POLYGON((-42.9706 -5.5616\,-42.9418 -5.5866\,-42.9146 -5.556\,-42.8557 -5.5706\,-42.8463 -5.5361\,-42.8192 -5.5242\,-42.8111 -5.5013\,-42.7487 -5.5413\,-42.748 -5.339\,-42.633 -5.246\,-42.6015 -5.2519\,-42.6018 -4.9043\,-42.713 -4.905\,-42.731 -4.823\,-42.788 -4.817\,-42.814 -4.787\,-42.8942 -4.8853\,-42.8543 -4.937\,-42.8603 -4.9824\,-42.8043 -5.1522\,-42.7998 -5.1969\,-42.8292 -5.2394\,-42.8147 -5.3145\,-42.7632 -5.3357\,-42.7524 -5.3547\,-42.7745 -5.4474\,-42.811 -5.4963\,-42.8515 -5.4896\,-42.8479 -5.5252\,-42.8928 -5.5263\,-42.9097 -5.5429\,-42.9435 -5.5354\,-42.9706 -5.5616))" +THR,Mehrabad Int'l,شهر تهران,Tehran,POINT(51.3889 35.6892),"POLYGON((51.089 35.743\,51.2334 35.6836\,51.2373 35.6639\,51.3344 35.6105\,51.4003 35.6141\,51.4203 35.5713\,51.4506 35.5824\,51.4666 35.5682\,51.456 35.6093\,51.5022 35.6076\,51.5122 35.6344\,51.5016 35.6446\,51.5232 35.6691\,51.4949 35.6929\,51.5122 35.7167\,51.5981 35.7193\,51.5877 35.7248\,51.6063 35.744\,51.5273 35.7773\,51.5344 35.8159\,51.4632 35.8285\,51.3821 35.8126\,51.3824 35.793\,51.3613 35.8037\,51.3446 35.793\,51.338 35.81\,51.3359 35.7935\,51.2989 35.7789\,51.0942 35.7578\,51.089 35.743))" +TIA,Tirane Rinas,Tiranë,Tirana,POINT(19.8178 41.3289),"POLYGON((19.7547 41.3121\,19.7602 41.2997\,19.7983 41.3148\,19.8457 41.2956\,19.8453 41.314\,19.8764 41.3313\,19.8543 41.3591\,19.7954 41.3479\,19.7576 41.3654\,19.7874 41.3253\,19.7547 41.3121))" +TIJ,General Abelardo L Rodriguez Int'l,Municipio de Tijuana,Tijuana,POINT(-117.0333 32.525),"POLYGON((-117.1242 32.5343\,-117.0921 32.4087\,-117.0666 32.4119\,-117.0628 32.4263\,-117.0198 32.4165\,-116.9492 32.3659\,-116.9492 32.3433\,-116.9079 32.3343\,-116.9492 32.3172\,-116.9492 32.2984\,-116.8225 32.2982\,-116.676 32.1852\,-116.5341 32.2502\,-116.5247 32.2821\,-116.6026 32.3068\,-116.615 32.3664\,-116.6687 32.4112\,-116.7018 32.5125\,-116.6909 32.5711\,-117.1242 32.5343))" +TIP,Tripoli Int'l,شعبية الزاوية,Az Zāwīyah,POINT(12.7278 32.7522),"POLYGON((12.463 32.3315\,12.4572 32.2402\,12.6045 32.1921\,12.7684 32.22\,12.7964 32.313\,12.7975 32.4886\,12.7779 32.4998\,12.7679 32.5538\,12.8036 32.6019\,12.8037 32.6849\,12.8351 32.6957\,12.8066 32.7353\,12.8132 32.7855\,12.828 32.79\,12.8396 33.0178\,12.6839 32.9965\,12.6198 33.0035\,12.6002 32.7953\,12.609 32.7224\,12.5269 32.565\,12.4773 32.5022\,12.4863 32.3536\,12.463 32.3315))" +TKK,Chuuk Int'l,Chuuk,Weno,POINT(151.85 7.45),"POLYGON((151.2336 7.3144\,151.2571 7.2246\,151.3198 7.1499\,151.3968 7.1086\,151.4785 7.0921\,151.5675 7.0318\,151.6591 7.0218\,151.7258 6.9229\,151.8127 6.8436\,151.8931 6.7983\,151.9857 6.7859\,152.0735 6.8111\,152.1412 6.8666\,152.1946 6.9564\,152.2088 7.0412\,152.1865 7.1262\,152.2222 7.1937\,152.2333 7.2671\,152.1678 7.595\,152.0952 7.7364\,152.0215 7.8068\,151.9203 7.8617\,151.7517 7.8846\,151.6288 7.8563\,151.5578 7.8157\,151.5093 7.7592\,151.4808 7.6727\,151.4392 7.6189\,151.3266 7.5558\,151.2703 7.4948\,151.242 7.4235\,151.2336 7.3144))" +TLC,Jose Maria Morelos Y Pavon,Cuauhtémoc,Mexico City,POINT(-99.1333 19.4333),"POLYGON((-99.1843 19.4078\,-99.1712 19.4035\,-99.1704 19.3998\,-99.1567 19.404\,-99.1311 19.4031\,-99.1257 19.4043\,-99.1282 19.4138\,-99.123 19.4425\,-99.1259 19.4478\,-99.1222 19.4598\,-99.1339 19.465\,-99.1416 19.4656\,-99.1504 19.463\,-99.1593 19.4636\,-99.1631 19.4589\,-99.1656 19.4437\,-99.1776 19.4236\,-99.1751 19.423\,-99.1843 19.4078))" +TLL,Ulemiste,Tallinn,Tallinn,POINT(24.7453 59.4372),"POLYGON((24.5502 59.4389\,24.6098 59.4274\,24.6192 59.4072\,24.5941 59.3956\,24.627 59.3863\,24.6109 59.356\,24.6289 59.3518\,24.6716 59.3649\,24.7736 59.3601\,24.7605 59.3749\,24.7877 59.3827\,24.7957 59.4076\,24.8702 59.4107\,24.8699 59.4227\,24.9263 59.4355\,24.9044 59.4573\,24.9149 59.4728\,24.8886 59.4908\,24.8292 59.4981\,24.8342 59.4765\,24.7922 59.4439\,24.7006 59.4554\,24.7287 59.473\,24.7065 59.4862\,24.6857 59.4839\,24.6898 59.4653\,24.6465 59.4709\,24.6834 59.449\,24.664 59.4286\,24.5742 59.4619\,24.5733 59.4472\,24.5502 59.4389))" +TLS,Toulouse-Blagnac,Toulouse,Toulouse,POINT(1.444 43.6045),"POLYGON((1.3504 43.6043\,1.3771 43.59\,1.3594 43.5604\,1.3887 43.5368\,1.4106 43.5491\,1.4613 43.5327\,1.4624 43.5491\,1.5154 43.5702\,1.4799 43.6283\,1.4993 43.6461\,1.476 43.6423\,1.46 43.6608\,1.4431 43.6478\,1.4399 43.6687\,1.4318 43.6574\,1.4005 43.6679\,1.3998 43.6209\,1.3696 43.6288\,1.3504 43.6043))" +TLV,Ben Gurion,תל אביב-יפו,Tel Aviv-Yafo,POINT(34.78 32.08),"POLYGON((34.7391 32.0338\,34.7744 32.0328\,34.786 32.0468\,34.8073 32.0346\,34.8018 32.0929\,34.8523 32.1194\,34.7956 32.1287\,34.8024 32.144\,34.7876 32.147\,34.7391 32.0338))" +TMS,São Tomé Salazar,Neves,Neves,POINT(6.5517 0.3592),"POLYGON((6.4834 0.2836\,6.5498 0.2535\,6.5832 0.2743\,6.5914 0.3769\,6.4929 0.3185\,6.4834 0.2836))" +TNG,Tangier Ibn Battouta,arrondissement de Tanger-Medina طنجة المدينة,Tangier,POINT(-5.8039 35.7767),"POLYGON((-5.9484 35.7274\,-5.914 35.7098\,-5.8567 35.7323\,-5.8269 35.7781\,-5.7912 35.7722\,-5.808 35.787\,-5.7931 35.7911\,-5.9075 35.8012\,-5.9303 35.7872\,-5.9484 35.7274))" +TNR,Antananarivo Ivato,Analamanga,Antananarivo,POINT(47.5167 -18.9333),"POLYGON((46.5387 -17.796\,46.8166 -18.4115\,47.359 -18.7862\,47.5814 -19.3919\,47.8556 -19.5172\,48.0455 -18.3904\,47.8789 -17.747\,47.5089 -17.8911\,47.2801 -17.7192\,47.0523 -17.8785\,46.8758 -17.7013\,46.5387 -17.796))" +TOS,Tromsø Langnes,Tromsø,Tromsø,POINT(18.9428 69.6828),"POLYGON((17.16 69.8115\,18.1476 69.4958\,19.3626 69.4555\,19.6422 69.2821\,20.0436 69.5845\,19.7158 69.6751\,19.8416 69.8329\,19.3542 69.7984\,18.7845 70.0518\,17.8052 70.1684\,17.16 69.8115))" +//TPA,Tampa Int'l,Tampa,Tampa,POINT(-82.4447 27.9945),"POLYGON((-82.5865 27.973\,-82.4736 27.8218\,-82.3636 28.133\,-82.2931 28.1291\,-82.2539 28.1566\,-82.2541 28.1714\,-82.392 28.171\,-82.5865 27.973)\,(-82.4282 27.906\,-82.4272 27.9061\,-82.4272 27.906\,-82.4282 27.906)\,(-82.427 27.9046\,-82.4268 27.9047\,-82.4268 27.9046\,-82.427 27.9046)\,(-82.4113 27.9037\,-82.4101 27.9039\,-82.4101 27.9037\,-82.4113 27.9037)\,(-82.2573 28.1607\,-82.2618 28.1691\,-82.2565 28.1662\,-82.2573 28.1607))" +TPE,Taoyuan,臺北市,Taipei,POINT(121.5625 25.0375),"POLYGON((121.4571 25.108\,121.5059 25.0753\,121.5049 25.0497\,121.4834 25.0358\,121.4905 25.0111\,121.5191 25.0198\,121.5613 24.9647\,121.6196 24.9672\,121.5995 24.9782\,121.5974 25.0147\,121.6649 25.0247\,121.6165 25.048\,121.6262 25.0961\,121.5975 25.1095\,121.6057 25.1367\,121.577 25.1672\,121.5838 25.1946\,121.5627 25.2095\,121.52 25.1693\,121.5026 25.173\,121.4571 25.108))" +TRC,Torreon Int'l,Torreón,Torreón,POINT(-103.4486 25.5394),"POLYGON((-103.4932 25.5385\,-103.3907 25.4346\,-103.3414 25.4101\,-103.3027 25.5795\,-103.2596 25.5795\,-103.2747 25.6129\,-103.3662 25.6261\,-103.3455 25.6456\,-103.3554 25.6914\,-103.3814 25.6403\,-103.406 25.645\,-103.4033 25.6281\,-103.4248 25.6237\,-103.416 25.6144\,-103.4547 25.5579\,-103.4932 25.5385))" +TRD,Trondheim Vaernes,Stjørdal,Stjørdalshalsen,POINT(10.9189 63.4712),"POLYGON((10.6539 63.4905\,10.7745 63.4553\,10.8382 63.4545\,10.8597 63.427\,10.8612 63.3801\,10.8442 63.3551\,10.8602 63.3548\,10.8517 63.353\,10.8546 63.3442\,10.9646 63.3294\,11.1891 63.3479\,11.4546 63.3262\,11.5287 63.4656\,11.7282 63.5934\,11.6286 63.598\,11.5744 63.5849\,11.5536 63.6003\,11.4226 63.5696\,11.3682 63.579\,10.7953 63.5574\,10.7412 63.5478\,10.6539 63.4905)\,(10.8596 63.3437\,10.8613 63.3499\,10.8704 63.3452\,10.8596 63.3437)\,(10.8785 63.3428\,10.8699 63.3515\,10.8857 63.3486\,10.8785 63.3428))" +TRN,Turin Int'l,Torino,Turin,POINT(7.6761 45.0792),"POLYGON((7.5778 45.0418\,7.6074 45.0298\,7.5964 45.0123\,7.6551 45.0073\,7.7226 45.0312\,7.7733 45.0779\,7.7416 45.0926\,7.7524 45.1026\,7.7174 45.1273\,7.7247 45.1368\,7.664 45.1402\,7.6545 45.1128\,7.6118 45.1071\,7.6196 45.0951\,7.6035 45.0711\,7.6251 45.0597\,7.6164 45.039\,7.5778 45.0418))" +TRV,Trivandrum Int'l,Thiruvananthapuram,Thiruvananthapuram,POINT(76.9525 8.4875),"POLYGON((76.7971 8.6186\,76.977 8.3914\,77.0239 8.3945\,77.0388 8.4124\,77.0181 8.4152\,77.0233 8.4323\,77.0038 8.4543\,77.0128 8.5495\,76.9756 8.5462\,76.9359 8.5736\,76.8674 8.6743\,76.8262 8.6539\,76.8355 8.6376\,76.8209 8.6176\,76.7971 8.6186))" +TRW,Bonriki Int'l,Tarawa Te Inainano,Tarawa,POINT(173.0176 1.3382),"POLYGON((172.8507 1.166\,173.5822 1.1594\,173.3836 1.4154\,173.4146 1.5474\,173.2819 1.5869\,173.138 1.3964\,173.132 1.3848\,173.1282 1.3813\,172.9692 1.367\,172.9702 1.3486\,172.9468 1.3245\,172.8507 1.166))" +TRZ,Tiruchirappalli,Tiruchirappalli,Trichinopoly,POINT(78.7047 10.7903),"POLYGON((78.6461 10.8332\,78.6634 10.8292\,78.6575 10.7935\,78.6934 10.7631\,78.6877 10.7273\,78.7086 10.7333\,78.7182 10.7753\,78.7403 10.7773\,78.735 10.8185\,78.6897 10.8429\,78.6461 10.8332))" +TSE,Astana Int'l,Есіл ауданы,Nur-Sultan,POINT(71.4222 51.1472),"POLYGON((71.218 51.1108\,71.286 51.0223\,71.4542 51.0009\,71.4509 50.9604\,71.4866 50.9308\,71.5469 50.9309\,71.5543 50.9683\,71.537 51.0069\,71.5558 51.0154\,71.5397 51.0482\,71.6083 51.0914\,71.4569 51.1061\,71.4236 51.1594\,71.358 51.1634\,71.2918 51.1893\,71.253 51.176\,71.218 51.1108))" +TSN,Tianjin Binhai Int'l,河北区,Tianjin,POINT(117.2056 39.1467),"POLYGON((117.1616 39.1827\,117.1921 39.1298\,117.2095 39.1499\,117.2564 39.1581\,117.2272 39.1919\,117.1616 39.1827))" +TUC,Teniente Gen. Benjamin Matienzo Int'l,San Miguel de Tucumán,San Miguel de Tucumán,POINT(-65.2167 -26.8167),"POLYGON((-65.2749 -26.8748\,-65.2306 -26.8746\,-65.2117 -26.9103\,-65.1621 -26.7963\,-65.2495 -26.7839\,-65.2749 -26.8748))" +TUK,Turbat Int'l,تحصیل تربت,Turbat,POINT(63.0544 26.0031),"POLYGON((62.5996 26.1236\,62.601 25.9464\,62.6642 25.9144\,62.7665 25.9013\,62.7903 25.8408\,62.9164 25.7932\,62.8961 25.5646\,62.964 25.571\,63.0271 25.5095\,63.3369 25.4826\,63.4109 25.4414\,63.4298 25.4106\,63.3935 25.3837\,63.4022 25.3588\,63.5227 25.4335\,63.6576 25.4846\,63.7639 25.59\,63.9885 25.6351\,64.115 25.6285\,64.1656 25.6499\,64.2443 25.6342\,64.2834 25.6431\,64.2948 25.6772\,64.2323 25.6967\,64.2329 25.7474\,64.2742 25.7918\,64.2659 25.8387\,64.308 25.888\,64.3624 26.0263\,64.4534 26.0437\,64.4765 26.0672\,64.482 26.1215\,64.4728 26.174\,64.3661 26.2544\,64.3432 26.2151\,64.2953 26.1853\,64.2222 26.1717\,64.2061 26.1879\,64.0658 26.1476\,64.0193 26.1475\,63.8031 26.2187\,63.7309 26.2279\,63.6389 26.2059\,63.2781 26.1876\,63.2033 26.1656\,62.9445 26.1885\,62.6547 26.1802\,62.5996 26.1236))" +TUL,Tulsa Int'l,Tulsa,Tulsa,POINT(-95.9042 36.1283),"POLYGON((-95.9734 36.0431\,-95.6819 36.1048\,-95.7597 36.2351\,-95.9584 36.2351\,-95.9369 36.2494\,-95.9375 36.3364\,-95.8932 36.3365\,-95.9376 36.3365\,-95.9373 36.2497\,-95.9949 36.2497\,-95.985 36.242\,-95.9956 36.25\,-96.0732 36.2447\,-96.0744 36.1479\,-96.0081 36.1424\,-96.0295 36.032\,-95.9734 36.0431)\,(-96.074 36.1483\,-96.0473 36.1612\,-96.0474 36.1571\,-96.074 36.1483)\,(-95.9938 36.2071\,-96.0727 36.2447\,-95.9936 36.2422\,-95.9938 36.2071)\,(-96.0343 36.1487\,-96.0334 36.1499\,-96.0294 36.1506\,-96.0343 36.1487)\,(-96.0276 36.1515\,-96.0292 36.1551\,-96.0205 36.1537\,-96.0276 36.1515)\,(-96.0274 36.1509\,-96.0248 36.1513\,-96.0274 36.1502\,-96.0274 36.1509)\,(-95.9902 36.1067\,-96.007 36.1262\,-95.9925 36.1261\,-95.9902 36.1067)\,(-95.9588 36.2351\,-95.9841 36.2494\,-95.9587 36.2493\,-95.9588 36.2351)\,(-95.9226 36.1773\,-95.9291 36.1861\,-95.9226 36.188\,-95.9226 36.1773)\,(-95.9252 35.984\,-95.9245 35.9824\,-95.9276 35.9862\,-95.9252 35.984)\,(-95.8691 36.1844\,-95.8856 36.2135\,-95.869 36.2133\,-95.8691 36.1844))" +TUN,Aeroport Tunis,ولاية تونس,Tunis,POINT(10.1817 36.8064),"POLYGON((10.0038 36.7869\,10.0258 36.7733\,10.0323 36.7396\,10.075 36.7241\,10.079 36.6988\,10.1248 36.6945\,10.1513 36.7118\,10.131 36.7254\,10.1444 36.7358\,10.1763 36.72\,10.2062 36.7407\,10.2204 36.7587\,10.2058 36.7785\,10.2239 36.8029\,10.3104 36.8065\,10.3085 36.8259\,10.3548 36.8695\,10.3221 36.8932\,10.3173 36.9176\,10.2521 36.943\,10.2849 36.9012\,10.2714 36.875\,10.2887 36.8671\,10.2311 36.8569\,10.2325 36.8712\,10.2126 36.8485\,10.1694 36.8405\,10.1404 36.8544\,10.1167 36.8445\,10.1192 36.7975\,10.0515 36.7866\,10.0207 36.7985\,10.0038 36.7869))" +TUS,Tucson Int'l,Tucson,Tucson,POINT(-110.8787 32.1541),"POLYGON((-111.0441 32.3209\,-110.8238 32.1329\,-110.9277 32.0015\,-110.7085 32.034\,-110.7719 32.2552\,-111.0441 32.3209)\,(-110.9775 32.1885\,-110.9608 32.2032\,-110.9614 32.186\,-110.9775 32.1885))" +TXL,Berlin-Tegel Int'l,Hohen Neuendorf,Hohen Neuendorf,POINT(13.2833 52.6667),"POLYGON((13.2351 52.6765\,13.2792 52.6572\,13.3041 52.6699\,13.2934 52.6838\,13.2446 52.6817\,13.2607 52.6878\,13.2413 52.6844\,13.2351 52.6765))" +TYN,Taiyuan Wusu Int'l,杏花岭区,Taiyuan,POINT(112.5425 37.8733),"POLYGON((112.5309 37.8683\,112.6665 37.8697\,112.6787 37.9114\,112.7473 37.9219\,112.7566 37.9113\,112.7707 37.9235\,112.736 37.9566\,112.7229 37.9384\,112.6917 37.9425\,112.6538 37.9737\,112.6217 37.9783\,112.5588 37.9479\,112.5619 37.9106\,112.5318 37.8898\,112.5309 37.8683))" +UET,Quetta Int'l,تحصیل کوئٹہ شہر,Quetta,POINT(67.0 30.1833),"POLYGON((66.8273 30.0145\,66.8505 30.0102\,66.8674 30.0391\,66.9304 30.0239\,67.0304 30.0404\,67.0206 30.0689\,67.0405 30.0913\,67.0453 30.1357\,67.0742 30.1503\,67.0677 30.1738\,67.1306 30.1876\,67.1652 30.2208\,67.2271 30.2315\,67.2434 30.25\,67.1069 30.2188\,67.0898 30.2363\,67.0502 30.2333\,67.0183 30.2164\,66.9581 30.2534\,66.9527 30.2327\,66.9061 30.2435\,66.877 30.1018\,66.8497 30.0768\,66.8273 30.0145))" +UFA,Ufa Int'l,Кировский район,Ufa,POINT(55.9475 54.7261),"POLYGON((55.8739 54.6624\,55.9308 54.6423\,55.9106 54.5875\,55.9112 54.5748\,55.8878 54.5669\,55.9153 54.5441\,55.8741 54.5497\,55.8914 54.5378\,55.8926 54.5008\,55.9244 54.5047\,55.9748 54.5463\,55.9531 54.5757\,55.9126 54.5761\,55.9166 54.6009\,55.9274 54.5937\,55.953 54.5985\,55.9168 54.6011\,55.932 54.6418\,56.0481 54.6402\,56.054 54.6635\,56.085 54.6775\,56.0935 54.7047\,56.077 54.7032\,56.0907 54.7306\,56.0443 54.7533\,56.029 54.7163\,55.9297 54.7291\,55.9113 54.7168\,55.919 54.6862\,55.8739 54.6624))" +UIII,Irkutsk N.W.,Правобережный административный округ,Irkutsk,POINT(104.2833 52.2833),"POLYGON((104.2632 52.2866\,104.2756 52.2652\,104.3121 52.2823\,104.4427 52.2889\,104.438 52.3051\,104.4728 52.3297\,104.459 52.3295\,104.4547 52.3593\,104.3332 52.3676\,104.3095 52.334\,104.274 52.3581\,104.2758 52.3156\,104.2917 52.3023\,104.2632 52.2866))" +UIO,Mariscal Sucre Int'l,Quito,Quito,POINT(-78.5125 -0.22),"POLYGON((-78.9477 0.2043\,-78.6595 -0.0275\,-78.6469 -0.137\,-78.8406 -0.0985\,-78.7322 -0.3201\,-78.4802 -0.457\,-78.4631 -0.2908\,-78.4269 -0.5713\,-78.279 -0.5676\,-78.1649 -0.1761\,-78.3978 -0.032\,-78.3106 0.1852\,-78.9477 0.2043))" +ULN,Chinggis Khaan Int'l,Сүхбаатар ᠰᠦᠬᠡᠪᠠᠭᠠᠲᠤᠷ,Ulaanbaatar,POINT(106.9172 47.9203),"POLYGON((106.8635 48.1391\,106.89 48.1228\,106.8945 48.0644\,106.9329 48.0051\,106.9038 47.9296\,106.9164 47.9164\,106.8948 47.9081\,106.9328 47.9074\,106.9281 47.9618\,107.0019 47.9825\,107.0036 48.0106\,107.0343 48.0368\,107.0278 48.0882\,106.9675 48.1973\,106.8752 48.185\,106.8635 48.1391))" +UPG,Sultan Hasanuddin Int'l,Makassar,Makassar,POINT(119.4362 -5.1619),"POLYGON((119.378 -5.195\,119.4064 -5.2324\,119.3973 -5.1863\,119.4572 -5.1918\,119.4637 -5.1755\,119.5158 -5.1919\,119.5195 -5.1658\,119.4937 -5.1515\,119.5184 -5.151\,119.5397 -5.122\,119.5315 -5.0684\,119.4794 -5.0599\,119.4484 -5.0866\,119.4513 -5.1036\,119.4163 -5.0936\,119.4146 -5.1034\,119.4304 -5.1004\,119.433 -5.1015\,119.4337 -5.1041\,119.4094 -5.1137\,119.4085 -5.1468\,119.3966 -5.1416\,119.378 -5.195))" +URC,Ürümqi Diwopu Int'l,水磨沟区 بۇلاقتاغ رايونى,Ürümqi,POINT(87.6125 43.8225),"POLYGON((87.5967 43.8745\,87.6019 43.8028\,87.6714 43.8227\,87.7081 43.7625\,87.7917 43.764\,87.8304 43.7861\,87.8957 43.7827\,87.9584 43.7587\,88.0019 43.7558\,88.0143 43.7703\,87.9703 43.8037\,87.9153 43.8103\,87.8681 43.8643\,87.8208 43.8881\,87.7713 43.8772\,87.7791 43.8491\,87.7157 43.8623\,87.7013 43.8704\,87.713 43.8872\,87.7041 43.9075\,87.6856 43.917\,87.5967 43.8745))" +USN,Ulsan,남구,Ulsan,POINT(129.3167 35.55),"POLYGON((129.2367 35.536\,129.2744 35.5292\,129.3575 35.4519\,129.3967 35.4406\,129.3925 35.5098\,129.3746 35.5395\,129.2658 35.5569\,129.2367 35.536))" +UVF,Hewanorra Int'l,Vieux Fort,Vieux Fort,POINT(-60.954 13.728),"POLYGON((-60.997 13.8382\,-60.9822 13.7844\,-60.9851 13.7398\,-60.94 13.7096\,-60.9297 13.7692\,-60.997 13.8382))" +VAV,Vava'u Int'l,Vavaʻu,Neiafu,POINT(-173.9831 -18.6508),"POLYGON((-174.3818 -18.8073\,-174.3431 -18.9068\,-174.2887 -18.9572\,-174.0639 -19.0487\,-173.9962 -19.0595\,-173.9275 -19.049\,-173.8365 -18.9917\,-173.7598 -18.8539\,-173.7095 -18.6955\,-173.7248 -18.5635\,-173.7825 -18.454\,-173.835 -18.4048\,-173.9355 -18.3704\,-174.0408 -18.3875\,-174.1064 -18.4328\,-174.1765 -18.4578\,-174.298 -18.5622\,-174.3473 -18.6488\,-174.3818 -18.8073))" +VCE,Venice Marco Polo,Venezia,Mestre,POINT(12.2381 45.4906),"POLYGON((12.1668 45.4999\,12.1816 45.5058\,12.1792 45.453\,12.2602 45.4178\,12.2475 45.3958\,12.2661 45.3964\,12.2657 45.3601\,12.2431 45.3256\,12.2958 45.3323\,12.2751 45.2365\,12.3142 45.2312\,12.2982 45.2431\,12.3042 45.2804\,12.3237 45.328\,12.3452 45.3292\,12.3297 45.349\,12.3601 45.3959\,12.4011 45.4289\,12.4357 45.4218\,12.416 45.4409\,12.4747 45.4933\,12.527 45.5007\,12.5966 45.5452\,12.5483 45.578\,12.4151 45.5452\,12.4072 45.5313\,12.2994 45.5454\,12.2465 45.5305\,12.1813 45.5531\,12.1688 45.531\,12.1999 45.517\,12.1668 45.4999))" +VCP,Viracopos-Campinas Int'l,Campinas,Campinas,POINT(-47.0608 -22.9058),"POLYGON((-47.2463 -22.91\,-47.2022 -22.9303\,-47.2278 -22.9566\,-47.2057 -22.9733\,-47.2033 -22.9991\,-47.2317 -23.0097\,-47.2061 -23.0309\,-47.1524 -23.0385\,-47.1442 -23.058\,-47.1091 -23.0612\,-47.0486 -23.0514\,-47.0726 -23.0247\,-47.0661 -22.993\,-47.0284 -22.9727\,-46.9812 -22.9036\,-46.9522 -22.9085\,-46.9355 -22.9311\,-46.8878 -22.9342\,-46.8761 -22.9312\,-46.8893 -22.9078\,-46.8294 -22.9071\,-46.8153 -22.8879\,-46.8272 -22.8729\,-46.8486 -22.8771\,-46.8612 -22.8259\,-46.8916 -22.8159\,-46.9014 -22.7671\,-46.9545 -22.7306\,-46.9854 -22.7457\,-47.0693 -22.7296\,-47.1083 -22.7545\,-47.0947 -22.7943\,-47.1117 -22.8001\,-47.1089 -22.8166\,-47.1638 -22.8054\,-47.1533 -22.8315\,-47.1678 -22.9045\,-47.2463 -22.91))" +VE23,Burnpur,Asansol Kulti Township,Āsansol,POINT(86.99 23.68),"POLYGON((86.7971 23.7023\,86.9122 23.6309\,87.0011 23.63\,87.0312 23.6562\,87.0016 23.7224\,86.9787 23.7306\,86.9617 23.7103\,86.9298 23.7455\,86.9068 23.7382\,86.9199 23.7496\,86.9081 23.7447\,86.9069 23.7574\,86.8942 23.7381\,86.8348 23.7857\,86.7971 23.7023))" +VER,Gen. Heriberto Jara Int'l,Municipio de Veracruz,Veracruz,POINT(-96.1533 19.1903),"POLYGON((-96.3394 19.2419\,-96.3321 19.1996\,-96.3019 19.187\,-96.3139 19.1501\,-96.2942 19.1585\,-96.2636 19.1333\,-96.244 19.145\,-96.1904 19.1027\,-96.1497 19.1095\,-96.1185 19.1687\,-96.1257 19.2161\,-96.1632 19.218\,-96.1716 19.247\,-96.1538 19.2294\,-96.1648 19.2466\,-96.222 19.2653\,-96.2264 19.2208\,-96.273 19.2648\,-96.3096 19.2327\,-96.3394 19.2419))" +VFA,Victoria Falls,Livingstone District,Livingstone,POINT(25.8667 -17.85),"POLYGON((25.6524 -17.7211\,25.6549 -17.8263\,25.6972 -17.8122\,25.7051 -17.8386\,25.7669 -17.8494\,25.8191 -17.8978\,25.8444 -17.8974\,25.8575 -17.9747\,25.9076 -17.9842\,25.9771 -17.7866\,26.0277 -17.6972\,26.0516 -17.6913\,26.0605 -17.6525\,26.0367 -17.6347\,25.9086 -17.6079\,25.8698 -17.6555\,25.8311 -17.6615\,25.7983 -17.727\,25.6524 -17.7211))" +VGA,Vijaywada,Vijayawada (Urban),Vijayavāda,POINT(80.6305 16.5193),"POLYGON((80.585 16.5233\,80.6427 16.4709\,80.6637 16.4913\,80.6879 16.4863\,80.6625 16.5219\,80.6688 16.5794\,80.6419 16.5488\,80.5935 16.5499\,80.585 16.5233))" +VIBY,Bareilly,Bareilly,Bareilly,POINT(79.415 28.364),"POLYGON((79.2391 28.4033\,79.2855 28.3582\,79.313 28.3626\,79.3272 28.2996\,79.3427 28.3186\,79.3639 28.2828\,79.3825 28.2892\,79.3845 28.2535\,79.4051 28.2297\,79.3907 28.2151\,79.4721 28.2315\,79.49 28.2966\,79.5545 28.3298\,79.5905 28.3195\,79.5826 28.3456\,79.6291 28.3827\,79.5761 28.4139\,79.5772 28.4456\,79.516 28.467\,79.5299 28.4888\,79.4758 28.5038\,79.4796 28.5213\,79.4624 28.5355\,79.4937 28.6013\,79.4554 28.6104\,79.4202 28.5928\,79.4074 28.617\,79.3646 28.6042\,79.3632 28.5776\,79.4031 28.5795\,79.3964 28.5509\,79.3766 28.5473\,79.3906 28.5278\,79.3525 28.4934\,79.3649 28.4564\,79.3355 28.4683\,79.3112 28.4465\,79.3513 28.389\,79.3188 28.4058\,79.3081 28.4043\,79.3278 28.3953\,79.3197 28.388\,79.258 28.4129\,79.2391 28.4033))" +VIDX,Hindon Air Force Station,Ghaziabad,Ghāziābād,POINT(77.42 28.67),"POLYGON((77.1992 28.8045\,77.2587 28.7526\,77.2558 28.7385\,77.2909 28.7227\,77.2913 28.7064\,77.3311 28.7132\,77.3168 28.6377\,77.3421 28.6102\,77.3689 28.6088\,77.3584 28.6178\,77.3849 28.6544\,77.3911 28.6343\,77.4337 28.6222\,77.4566 28.6379\,77.4776 28.6273\,77.5006 28.6488\,77.542 28.6335\,77.5419 28.6517\,77.5662 28.6529\,77.546 28.6848\,77.6165 28.7216\,77.5505 28.7807\,77.5304 28.7584\,77.5467 28.7487\,77.5405 28.7379\,77.4964 28.7321\,77.4538 28.7649\,77.4226 28.7631\,77.418 28.7796\,77.3634 28.7861\,77.3482 28.806\,77.3046 28.7906\,77.2779 28.835\,77.249 28.8227\,77.222 28.8339\,77.2187 28.8108\,77.1992 28.8045))" +VIE,Vienna Schwechat Int'l,Sopron,Sopron,POINT(16.5831 47.6849),"POLYGON((16.4215 47.6653\,16.5514 47.6324\,16.5992 47.6482\,16.6196 47.633\,16.6398 47.6551\,16.6818 47.6279\,16.7025 47.6709\,16.749 47.6814\,16.7209 47.7353\,16.67 47.7408\,16.6705 47.6939\,16.6389 47.6936\,16.5926 47.7591\,16.5376 47.74\,16.5522 47.7225\,16.5279 47.7012\,16.5383 47.698\,16.529 47.6799\,16.4503 47.6625\,16.4876 47.6847\,16.4479 47.6965\,16.4434 47.6741\,16.4215 47.6653))" +VIX,Eurico de Aguiar Salles,Vitória,Vitória,POINT(-40.3083 -20.2889),"POLYGON((-40.3623 -20.3118\,-40.3545 -20.327\,-40.2744 -20.32\,-40.2905 -20.306\,-40.2768 -20.3019\,-40.2772 -20.2987\,-40.3163 -20.2797\,-40.3024 -20.2548\,-40.3114 -20.2443\,-40.3623 -20.3118))" +VLC,Valencia,Paterna,Paterna,POINT(-0.4406 39.5028),"POLYGON((-0.5187 39.5286\,-0.498 39.5095\,-0.4249 39.4887\,-0.4316 39.5061\,-0.4185 39.5178\,-0.482 39.5654\,-0.4775 39.5447\,-0.5064 39.5539\,-0.5187 39.5286))" +VLI,Bauerfield Int'l,Shefa,Port-Vila,POINT(168.3167 -17.7333),"POLYGON((167.4606 -16.9799\,167.977 -17.815\,167.9977 -17.8463\,168.3538 -18.3437\,168.5283 -18.2717\,169.3133 -18.35\,168.8097 -17.6352\,168.8571 -17.0216\,168.8574 -16.9968\,168.8524 -16.9608\,168.8471 -16.9422\,168.7339 -16.6093\,168.5617 -16.6267\,167.9617 -16.5\,167.4606 -16.9799))" +VLN,Zim Valencia,Parroquia Los Guayos,Los Guayos,POINT(-67.9333 10.1833),"POLYGON((-67.9507 10.1871\,-67.9166 10.1521\,-67.8554 10.146\,-67.8888 10.1624\,-67.9103 10.2139\,-67.9507 10.1871))" +VNO,Vilnius,Vilnius,Vilnius,POINT(25.28 54.6872),"POLYGON((25.0245 54.6232\,25.0661 54.6197\,25.0784 54.5991\,25.1153 54.6013\,25.0925 54.5898\,25.1213 54.5697\,25.1864 54.5695\,25.1851 54.5914\,25.1626 54.5999\,25.1713 54.6055\,25.237 54.5748\,25.2408 54.593\,25.314 54.5993\,25.3309 54.6097\,25.3199 54.6471\,25.3634 54.6396\,25.4094 54.6807\,25.4804 54.6861\,25.4604 54.6969\,25.4647 54.7085\,25.4083 54.7179\,25.4515 54.7409\,25.455 54.8017\,25.3772 54.8058\,25.3662 54.826\,25.3597 54.8063\,25.3315 54.8095\,25.3215 54.8182\,25.3344 54.8236\,25.2937 54.8272\,25.2909 54.7981\,25.2757 54.7942\,25.2848 54.7744\,25.2142 54.7501\,25.1746 54.7532\,25.1904 54.7359\,25.2053 54.7452\,25.199 54.7131\,25.1543 54.7297\,25.1484 54.7108\,25.101 54.7022\,25.1804 54.7013\,25.1895 54.6808\,25.1423 54.6814\,25.0245 54.6232))" +VNS,Varanasi,Sadar,Vārānasi,POINT(83.0128 25.3189),"POLYGON((82.6666 25.2534\,82.7126 25.2615\,82.7481 25.2385\,82.7766 25.2704\,82.7756 25.234\,82.792 25.2282\,82.7844 25.249\,82.8195 25.2558\,82.8041 25.2192\,82.8157 25.2045\,82.7827 25.2037\,82.8035 25.1699\,82.8082 25.189\,82.8609 25.1689\,82.8697 25.1994\,82.9003 25.2152\,82.9067 25.1957\,83.0066 25.2052\,83.0308 25.2491\,83.0674 25.248\,83.0118 25.3059\,83.0581 25.3301\,83.1413 25.3219\,83.1709 25.3517\,83.1882 25.4163\,83.1186 25.4422\,83.0652 25.4352\,83.0522 25.4466\,83.0433 25.4268\,83.0057 25.4391\,82.9725 25.4097\,82.9728 25.3927\,82.9184 25.3965\,82.9434 25.3604\,82.934 25.3503\,82.8637 25.3763\,82.8626 25.3925\,82.8239 25.383\,82.8134 25.4068\,82.7742 25.375\,82.7142 25.3906\,82.7062 25.3593\,82.6828 25.3557\,82.7039 25.3232\,82.6666 25.2534))" +VOG,Gumrak,Центральный район,Volgograd,POINT(44.5147 48.7086),"POLYGON((44.4889 48.7086\,44.5047 48.7041\,44.5206 48.6937\,44.5544 48.7233\,44.5653 48.7368\,44.5517 48.7411\,44.5505 48.7394\,44.5458 48.7406\,44.543 48.742\,44.5405 48.7475\,44.5285 48.7514\,44.5248 48.7423\,44.5289 48.7379\,44.5276 48.7327\,44.4889 48.7086))" +VOZ,Voronezh-Chertovitskoye,Центральный район,Voronezh,POINT(39.2106 51.6717),"POLYGON((39.2256 51.8075\,39.1808 51.713\,39.2116 51.7029\,39.2137 51.6824\,39.1857 51.6736\,39.2235 51.6534\,39.2498 51.7113\,39.2472 51.7605\,39.2848 51.8035\,39.2256 51.8075))" +VRA,Juan Gualberto Gomez,Ciudad de Cárdenas,Cárdenas,POINT(-81.2036 23.0428),"POLYGON((-81.2431 23.014\,-81.189 23.0087\,-81.1864 23.0461\,-81.2366 23.0615\,-81.2273 23.018\,-81.2431 23.014))" +VSA,Villahermosa,Macuspana,Macuspana,POINT(-92.6 17.7667),"POLYGON((-92.718 17.9149\,-92.558 17.5402\,-92.1299 17.767\,-92.3857 18.1603\,-92.718 17.9149))" +VTE,Vientiane,ເມືອງຈັນທະບູລີ,Vientiane,POINT(102.6 17.9667),"POLYGON((102.5827 18.0194\,102.5877 17.9892\,102.6068 17.9961\,102.5945 17.9854\,102.6064 17.9809\,102.6034 17.9553\,102.6305 17.988\,102.6265 18.0263\,102.6038 18.0359\,102.5827 18.0194))" +VTZ,Vishakapatnam,Visakhapatnam (Urban),Vishākhapatnam,POINT(83.2978 17.7042),"POLYGON((83.2245 17.751\,83.2224 17.6832\,83.2642 17.6741\,83.2808 17.6891\,83.2638 17.7121\,83.28 17.6951\,83.2834 17.7118\,83.2836 17.6894\,83.3063 17.6882\,83.3594 17.7611\,83.3342 17.7855\,83.3043 17.7884\,83.3076 17.7619\,83.3303 17.7503\,83.2245 17.751))" +VVI,Viru Viru Int'l,Municipio Warnes,Warnes,POINT(-63.1667 -17.5167),"POLYGON((-63.2681 -17.4184\,-63.2113 -17.5146\,-63.1969 -17.6053\,-63.2071 -17.6848\,-63.0872 -17.6569\,-62.9605 -17.6049\,-62.938 -17.4355\,-62.9537 -17.4295\,-62.9582 -17.3886\,-62.923 -17.343\,-62.9227 -17.3208\,-62.9329 -17.2648\,-62.9676 -17.2636\,-62.9788 -17.2413\,-62.9612 -17.2128\,-62.9774 -17.1541\,-63.0213 -17.1326\,-63.0392 -17.1044\,-62.9882 -17.0488\,-63.0116 -17.057\,-63.0341 -17.0356\,-63.0775 -17.0759\,-63.0557 -17.1212\,-63.0872 -17.202\,-63.1022 -17.337\,-63.1674 -17.3716\,-63.2269 -17.3734\,-63.2681 -17.4184))" +WAW,Okecie Int'l,Piaseczno,Piaseczno,POINT(21.0167 52.0667),"POLYGON((20.9729 52.0526\,21.0241 52.0495\,21.0613 52.0878\,21.0115 52.0965\,21.0112 52.0716\,20.987 52.0737\,20.9783 52.0616\,20.9918 52.0572\,20.9729 52.0526))" +WDH,Windhoek Hosea Kutako Int'l,Khomas Region,Windhoek,POINT(17.0836 -22.57),"POLYGON((15.7234 -23.9616\,16.2632 -23.8465\,16.1643 -23.9551\,16.3354 -24.0454\,16.3873 -23.8645\,16.5188 -23.8904\,16.3053 -23.8084\,16.3297 -23.6634\,16.582 -23.5942\,16.4866 -23.507\,16.624 -23.2203\,17.4911 -23.2872\,17.443 -23.6223\,18.0859 -23.6533\,18.4352 -23.5013\,18.2121 -22.4803\,17.95 -22.3773\,18.1171 -21.7085\,17.5734 -21.8478\,17.4508 -22.1028\,17.1048 -22.2686\,16.7022 -22.2408\,16.7347 -22.3457\,16.0728 -22.5739\,15.9515 -22.6949\,16.0679 -22.8342\,15.8952 -22.8724\,15.9433 -23.205\,15.7967 -23.2527\,15.8495 -23.7951\,15.7234 -23.9616))" +WIIT,Radin Inten II,Bandar Lampung,Bandar Lampung,POINT(105.2667 -5.45),"POLYGON((105.1784 -5.4067\,105.1912 -5.4292\,105.1834 -5.4514\,105.2216 -5.4462\,105.2248 -5.4897\,105.2899 -5.4453\,105.3203 -5.4693\,105.3263 -5.5035\,105.3502 -5.514\,105.3258 -5.466\,105.3408 -5.4155\,105.3066 -5.3583\,105.2914 -5.3664\,105.2942 -5.3482\,105.2606 -5.3508\,105.2524 -5.3283\,105.1784 -5.4067))" +WLG,Wellington Int'l,Lower Hutt City,Lower Hutt,POINT(174.9167 -41.2167),"POLYGON((174.8479 -41.3595\,174.8759 -41.3878\,174.8678 -41.4095\,174.915 -41.4374\,174.9831 -41.4029\,174.9587 -41.3829\,174.9934 -41.345\,175.0224 -41.3482\,175.0867 -41.2792\,175.0819 -41.2179\,174.9994 -41.2146\,174.9911 -41.2038\,175.0182 -41.1784\,174.975 -41.1328\,174.9487 -41.1531\,174.9111 -41.14\,174.8498 -41.2266\,174.8896 -41.2326\,174.9125 -41.2583\,174.8479 -41.3595))" +WWK,Wewak Int'l,East Sepik,Wewak,POINT(143.6333 -3.55),"POLYGON((141.3404 -4.2587\,141.3479 -4.6017\,141.3744 -4.6128\,142.3099 -4.6132\,142.3229 -4.6361\,142.3246 -4.9408\,142.3866 -4.9913\,142.4085 -4.994\,142.4521 -4.9626\,142.5608 -4.9919\,142.673 -4.9837\,142.7507 -5.0422\,142.787 -5.0268\,142.851 -5.0298\,143.0141 -5.1301\,143.0518 -5.1051\,143.1003 -5.1037\,143.2516 -5.1208\,143.3178 -5.1582\,143.3539 -5.1365\,144.0291 -4.9941\,144.1049 -4.9435\,144.5742 -4.5878\,144.5816 -4\,144.9167 -4\,145 -3.9167\,145 -2\,143.0833 -2\,143.0833 -3.53\,143.0275 -3.5374\,142.9587 -3.4948\,142.8918 -3.4877\,142.8261 -3.4553\,142.7726 -3.4012\,142.696 -3.4442\,142.6199 -3.4398\,142.6423 -3.5165\,142.6159 -3.555\,142.6099 -3.6674\,142.6388 -3.792\,142.6292 -3.9057\,142.617 -3.9202\,142.54 -3.9298\,142.5318 -4.0543\,141.835 -4.0594\,141.773 -4.0255\,141.7631 -4.0005\,141.7166 -3.9904\,141.3404 -4.2587))" +XIY,Hsien Yang,莲湖区,Xi’an,POINT(108.9 34.2667),"POLYGON((108.8492 34.285\,108.8497 34.2567\,108.8843 34.2544\,108.8851 34.2388\,108.9345 34.2528\,108.9487 34.2941\,108.8492 34.285))" +XMN,Xiamen Gaoqi Int'l,思明区,Xiamen,POINT(118.0819 24.4797),"POLYGON((118.0634 24.4801\,118.0478 24.4252\,118.1317 24.3822\,118.1469 24.41\,118.2594 24.4954\,118.1951 24.5062\,118.1402 24.4829\,118.1197 24.5071\,118.0634 24.4801))" +XXC,Cascais,"Sintra (Santa Maria e São Miguel\, São Martinho e São Pedro de Penaferrim)",Sintra,POINT(-9.3883 38.7992),"POLYGON((-9.4527 38.8367\,-9.4257 38.7897\,-9.4366 38.7668\,-9.3542 38.7416\,-9.3596 38.7586\,-9.3416 38.7788\,-9.3667 38.8086\,-9.3414 38.8207\,-9.4527 38.8367))" +YAM,Sault Ste Marie,Sault Ste. Marie,Sault Ste. Marie,POINT(-84.3723 46.4817),"POLYGON((-84.347 46.5023\,-84.4552 46.4628\,-84.2894 46.4626\,-84.3024 46.4871\,-84.347 46.5023))" +YBR,Brandon,Minot,Minot,POINT(-101.278 48.2375),"POLYGON((-101.2972 48.2743\,-101.3283 48.2658\,-101.3398 48.2292\,-101.3295 48.2204\,-101.343 48.2193\,-101.3068 48.1922\,-101.2598 48.2123\,-101.2732 48.2255\,-101.2471 48.2193\,-101.1877 48.2326\,-101.261 48.2299\,-101.2852 48.2526\,-101.2592 48.2594\,-101.2847 48.2689\,-101.2806 48.2796\,-101.3062 48.2828\,-101.2972 48.2743)\,(-101.3322 48.2401\,-101.3341 48.2439\,-101.3322 48.2439\,-101.3322 48.2401)\,(-101.3274 48.2522\,-101.327 48.2545\,-101.3255 48.2538\,-101.3274 48.2522)\,(-101.325 48.2566\,-101.3232 48.2582\,-101.3232 48.2566\,-101.325 48.2566)\,(-101.3225 48.2494\,-101.3183 48.2504\,-101.3184 48.2494\,-101.3225 48.2494)\,(-101.2986 48.269\,-101.2985 48.2711\,-101.297 48.269\,-101.2986 48.269)\,(-101.2959 48.2734\,-101.2956 48.2743\,-101.2956 48.2732\,-101.2959 48.2734)\,(-101.2484 48.2231\,-101.2476 48.2244\,-101.2471 48.222\,-101.2484 48.2231))" +YDQ,Dawson Cr.,Dawson Creek,Dawson Creek,POINT(-120.2356 55.7606),"POLYGON((-120.2893 55.767\,-120.2247 55.7305\,-120.147 55.7377\,-120.2116 55.7485\,-120.2247 55.7811\,-120.2893 55.7886\,-120.2762 55.7812\,-120.2893 55.767))" +YEG,Edmonton Int'l,City of Leduc,Leduc,POINT(-113.5492 53.2594),"POLYGON((-113.6104 53.2797\,-113.6105 53.2433\,-113.5613 53.2432\,-113.5616 53.2361\,-113.5006 53.2361\,-113.5007 53.2505\,-113.4884 53.2505\,-113.4885 53.2797\,-113.5129 53.2797\,-113.5128 53.3086\,-113.5435 53.3085\,-113.5469 53.3043\,-113.547 53.2942\,-113.5492 53.2942\,-113.5506 53.2797\,-113.6104 53.2797))" +YEI,Yenisehir,İnegöl,İnegöl,POINT(29.5097 40.0806),"POLYGON((29.2423 40.056\,29.2971 40.0319\,29.3071 39.9949\,29.3693 39.9421\,29.3736 39.9138\,29.4515 39.9066\,29.4238 39.8269\,29.4717 39.8384\,29.5262 39.877\,29.7605 39.8678\,29.7416 39.8855\,29.7473 39.9023\,29.7981 39.9319\,29.7402 39.9868\,29.7541 40.0018\,29.7216 40.0995\,29.7238 40.1356\,29.6441 40.1301\,29.6282 40.1498\,29.565 40.154\,29.5194 40.1953\,29.455 40.2139\,29.4013 40.1851\,29.3712 40.1\,29.2423 40.056))" +YFB,Iqaluit,Iqaluit ᐃᖃᓗᐃᑦ,Iqaluit,POINT(-68.5107 63.7598),"POLYGON((-68.6012 63.7267\,-68.5101 63.7085\,-68.4254 63.7214\,-68.4618 63.7496\,-68.468 63.7644\,-68.5561 63.7763\,-68.588 63.7761\,-68.5993 63.7466\,-68.6012 63.7267))" +YHM,John C. Munro Hamilton Int'l,Brantford,Brantford,POINT(-80.25 43.1667),"POLYGON((-80.3536 43.1578\,-80.3118 43.1484\,-80.3088 43.1301\,-80.3287 43.1092\,-80.2714 43.0942\,-80.2336 43.1241\,-80.2157 43.1197\,-80.2166 43.1409\,-80.1933 43.1486\,-80.2122 43.1662\,-80.1897 43.1697\,-80.2159 43.1778\,-80.2227 43.2054\,-80.2555 43.1972\,-80.2609 43.2084\,-80.336 43.1909\,-80.3536 43.1578))" +YHZ,Halifax Int'l,Moncton,Moncton,POINT(-64.7714 46.1328),"POLYGON((-64.9154 46.1416\,-64.8822 46.0887\,-64.9125 46.0821\,-64.8976 46.0492\,-64.8008 46.0668\,-64.7644 46.0994\,-64.6859 46.1241\,-64.6904 46.148\,-64.7266 46.1385\,-64.7455 46.1765\,-64.8108 46.1748\,-64.8563 46.1496\,-64.8655 46.1601\,-64.9154 46.1416))" +YKA,Kamloops,Kamloops,Kamloops,POINT(-120.3408 50.6761),"POLYGON((-120.5424 50.7254\,-120.5192 50.7118\,-120.5192 50.6927\,-120.4733 50.6866\,-120.473 50.62\,-120.0807 50.6194\,-120.0808 50.6485\,-120.042 50.6489\,-120.0678 50.669\,-120.0632 50.6573\,-120.3427 50.6805\,-120.3563 50.7357\,-120.3241 50.7821\,-120.3002 50.7818\,-120.3003 50.801\,-120.2773 50.8082\,-120.2485 50.8664\,-120.3119 50.8664\,-120.3118 50.8446\,-120.3348 50.8227\,-120.3808 50.8227\,-120.3808 50.7429\,-120.5424 50.7254))" +YMX,Mirabel Int'l,Montréal,Montréal,POINT(-73.5617 45.5089),"POLYGON((-73.9742 45.4664\,-73.9247 45.4406\,-73.8936 45.4467\,-73.8376 45.4978\,-73.7903 45.5046\,-73.7642 45.4916\,-73.7738 45.482\,-73.7504 45.4611\,-73.7222 45.4827\,-73.7081 45.4735\,-73.7246 45.4612\,-73.723 45.4218\,-73.6278 45.4101\,-73.5373 45.4225\,-73.5188 45.4521\,-73.5307 45.5375\,-73.5003 45.5747\,-73.5005 45.6094\,-73.5683 45.6342\,-73.5447 45.6482\,-73.4848 45.6318\,-73.4743 45.7022\,-73.5235 45.6993\,-73.6076 45.6448\,-73.6787 45.5533\,-73.7634 45.512\,-73.8532 45.5114\,-73.8954 45.5266\,-73.9742 45.4664)\,(-73.6772 45.4837\,-73.656 45.479\,-73.6505 45.4927\,-73.6304 45.4796\,-73.6584 45.4613\,-73.632 45.4495\,-73.6873 45.4554\,-73.6772 45.4837)\,(-73.6209 45.5233\,-73.6856 45.4898\,-73.6483 45.5306\,-73.6209 45.5233)\,(-73.6185 45.4883\,-73.6029 45.495\,-73.5806 45.4856\,-73.5989 45.4744\,-73.6185 45.4883))" +YNY,Yangyang Int'l,강릉시,Gangneung,POINT(128.9 37.75),"POLYGON((128.5804 37.828\,128.6 37.8088\,128.6024 37.7693\,128.6373 37.7825\,128.6984 37.7713\,128.7438 37.7414\,128.765 37.6729\,128.7335 37.6549\,128.7366 37.6097\,128.723 37.6113\,128.7121 37.5886\,128.6841 37.5938\,128.6789 37.5671\,128.7198 37.5287\,128.7376 37.5475\,128.7898 37.5397\,128.8138 37.5033\,128.8491 37.5155\,128.8638 37.5752\,128.8845 37.5929\,128.9057 37.5526\,128.9344 37.5381\,129.0471 37.5466\,129.0557 37.5971\,129.2757 37.7825\,129.1405 37.8992\,128.9974 38.0562\,128.8034 37.913\,128.6692 37.8858\,128.6429 37.8949\,128.588 37.862\,128.5804 37.828))" +YOW,Macdonald-Cartier Int'l,Gatineau,Gatineau,POINT(-75.65 45.4833),"POLYGON((-75.7688 45.5065\,-75.7188 45.4584\,-75.6923 45.4527\,-75.5757 45.4745\,-75.5262 45.5008\,-75.5375 45.5773\,-75.5418 45.5625\,-75.7235 45.547\,-75.7202 45.5235\,-75.7643 45.521\,-75.7688 45.5065))" +YPE,Peace River,Grande Prairie,Grande Prairie,POINT(-118.7947 55.1708),"POLYGON((-118.9618 55.2215\,-118.9615 55.1634\,-118.8721 55.1634\,-118.8721 55.149\,-118.7998 55.1125\,-118.7821 55.1344\,-118.7504 55.127\,-118.7308 55.1415\,-118.7312 55.1857\,-118.6796 55.1924\,-118.6798 55.2142\,-118.846 55.2058\,-118.859 55.2362\,-118.9618 55.2215))" +YPR,Prince Rupert,Prince Rupert,Prince Rupert,POINT(-130.3271 54.3122),"POLYGON((-130.3657 54.3132\,-130.352 54.2016\,-130.3048 54.2016\,-130.2902 54.2118\,-130.3019 54.2343\,-130.2854 54.2324\,-130.2889 54.2487\,-130.2459 54.2565\,-130.246 54.3009\,-130.2847 54.3389\,-130.3657 54.3132))" +YQB,Québec,La Cité-Limoilou,Quebec City,POINT(-71.2081 46.8139),"POLYGON((-71.2876 46.7959\,-71.2578 46.7862\,-71.2116 46.7993\,-71.193 46.8322\,-71.1967 46.8604\,-71.2515 46.8695\,-71.2327 46.8506\,-71.2622 46.8348\,-71.2395 46.8177\,-71.2876 46.7959))" +YQG,Windsor,Detroit,Detroit,POINT(-83.1024 42.3834),"POLYGON((-83.288 42.4427\,-83.264 42.3417\,-83.2154 42.3289\,-83.1966 42.3509\,-83.1477 42.3519\,-83.1567 42.3278\,-83.1405 42.2977\,-83.1671 42.2896\,-83.1609 42.2552\,-83.1375 42.2828\,-83.0983 42.2867\,-83.0634 42.3179\,-82.924 42.352\,-82.9469 42.387\,-82.9217 42.3953\,-82.9104 42.419\,-82.9514 42.4358\,-82.9409 42.4504\,-83.288 42.4427)\,(-83.1219 42.4175\,-83.0425 42.4044\,-83.0406 42.3848\,-83.0547 42.3796\,-83.0738 42.3987\,-83.1023 42.3882\,-83.1219 42.4175))" +YQM,Greater Moncton Int'l,Moncton,Moncton,POINT(-64.7714 46.1328),"POLYGON((-64.9154 46.1416\,-64.8822 46.0887\,-64.9125 46.0821\,-64.8976 46.0492\,-64.8008 46.0668\,-64.7644 46.0994\,-64.6859 46.1241\,-64.6904 46.148\,-64.7266 46.1385\,-64.7455 46.1765\,-64.8108 46.1748\,-64.8563 46.1496\,-64.8655 46.1601\,-64.9154 46.1416))" +YQR,Regina,Regina,Regina,POINT(-104.6067 50.4547),"POLYGON((-104.7781 50.4697\,-104.778 50.4341\,-104.7098 50.4411\,-104.7208 50.4549\,-104.6871 50.4438\,-104.6869 50.3971\,-104.4922 50.4123\,-104.5033 50.4717\,-104.5261 50.4676\,-104.5379 50.4989\,-104.6221 50.4989\,-104.6871 50.5207\,-104.7212 50.5135\,-104.7211 50.4843\,-104.7781 50.4697)\,(-104.6195 50.4599\,-104.619 50.4613\,-104.619 50.4599\,-104.6195 50.4599)\,(-104.619 50.4591\,-104.6195 50.4598\,-104.619 50.4598\,-104.619 50.4591)\,(-104.6183 50.4584\,-104.6189 50.4593\,-104.6183 50.4593\,-104.6183 50.4584)\,(-104.5812 50.4166\,-104.5812 50.421\,-104.5778 50.4211\,-104.5812 50.4166))" +YQT,Thunder Bay Int'l,Thunder Bay,Thunder Bay,POINT(-89.2461 48.3822),"POLYGON((-89.4276 48.515\,-89.4267 48.399\,-89.3885 48.3989\,-89.3887 48.2892\,-89.3294 48.2882\,-89.3334 48.3168\,-89.3058 48.3168\,-89.3057 48.3439\,-89.2631 48.3568\,-89.2232 48.3471\,-89.2118 48.3078\,-89.1075 48.283\,-89.0608 48.3859\,-89.1502 48.3849\,-89.152 48.4966\,-89.1821 48.4825\,-89.182 48.5146\,-89.4276 48.515))" +YQX,Gander Int'l,Gander,Gander,POINT(-54.6089 48.9569),"POLYGON((-54.7211 48.9622\,-54.7165 48.9443\,-54.6331 48.9509\,-54.5317 48.885\,-54.4988 48.9762\,-54.6231 49.008\,-54.7211 48.9622))" +YQY,Sydney/J.A. Douglas McCurdy,Cape Breton Regional Municipality,Cape Breton,POINT(-60.1931 46.1389),"POLYGON((-60.9142 45.8968\,-60.6765 45.8218\,-59.8804 45.6439\,-59.8033 45.714\,-59.6332 45.7697\,-59.4824 45.8612\,-59.3932 45.9664\,-59.3752 46.0521\,-59.4015 46.1224\,-59.549 46.3093\,-59.645 46.3611\,-59.9124 46.4294\,-59.9856 46.5481\,-60.1295 46.5\,-60.3409 46.3602\,-60.3482 46.314\,-60.3813 46.2693\,-60.3965 46.2769\,-60.5041 46.1884\,-60.4658 46.1572\,-60.5844 46.0805\,-60.724 46.0307\,-60.8072 45.9449\,-60.9142 45.8968)\,(-60.6667 45.9319\,-60.648 45.9481\,-60.6659 45.9508\,-60.6633 45.9634\,-60.5854 46.0019\,-60.567 45.9838\,-60.5856 45.9684\,-60.5642 45.9582\,-60.5811 45.946\,-60.5668 45.9273\,-60.6489 45.8881\,-60.6667 45.9319)\,(-60.2314 46.0524\,-60.2207 46.0614\,-60.2045 46.0521\,-60.2158 46.0424\,-60.2314 46.0524)\,(-60.1964 46.1063\,-60.1919 46.1217\,-60.175 46.1165\,-60.1964 46.1063)\,(-60.1753 46.1646\,-60.1711 46.1665\,-60.1707 46.1653\,-60.1753 46.1646))" +YSB,Sudbury,North Bay,North Bay,POINT(-79.45 46.3),"POLYGON((-79.5 46.3244\,-79.4653 46.3064\,-79.4266 46.2553\,-79.4237 46.2278\,-79.3422 46.2489\,-79.3833 46.3175\,-79.2571 46.3134\,-79.257 46.4467\,-79.5 46.447\,-79.5 46.3244))" +YSJ,Saint John,City of Saint John,Saint John,POINT(-66.0761 45.2806),"POLYGON((-66.2456 45.1332\,-66.1424 45.182\,-66.0206 45.2122\,-65.9569 45.208\,-65.9496 45.2403\,-65.87 45.2787\,-65.8892 45.296\,-65.8826 45.3268\,-65.8623 45.3155\,-65.8389 45.3342\,-65.9236 45.3769\,-66.239 45.2866\,-66.2456 45.1332))" +YTS,Timmins,Timmins,Timmins,POINT(-81.3333 48.4667),"POLYGON((-81.7272 48.7112\,-81.7264 48.2771\,-81.5964 48.2783\,-80.8127 48.2771\,-80.8131 48.5378\,-80.8114 48.6238\,-80.8161 48.6248\,-80.9212 48.625\,-80.9152 48.6485\,-80.9389 48.6682\,-80.931 48.6829\,-80.9434 48.7028\,-80.9434 48.7113\,-81.7272 48.7112))" +YUL,Montréal-Trudeau,Montréal,Montréal,POINT(-73.5617 45.5089),"POLYGON((-73.9742 45.4664\,-73.9247 45.4406\,-73.8936 45.4467\,-73.8376 45.4978\,-73.7903 45.5046\,-73.7642 45.4916\,-73.7738 45.482\,-73.7504 45.4611\,-73.7222 45.4827\,-73.7081 45.4735\,-73.7246 45.4612\,-73.723 45.4218\,-73.6278 45.4101\,-73.5373 45.4225\,-73.5188 45.4521\,-73.5307 45.5375\,-73.5003 45.5747\,-73.5005 45.6094\,-73.5683 45.6342\,-73.5447 45.6482\,-73.4848 45.6318\,-73.4743 45.7022\,-73.5235 45.6993\,-73.6076 45.6448\,-73.6787 45.5533\,-73.7634 45.512\,-73.8532 45.5114\,-73.8954 45.5266\,-73.9742 45.4664)\,(-73.6772 45.4837\,-73.656 45.479\,-73.6505 45.4927\,-73.6304 45.4796\,-73.6584 45.4613\,-73.632 45.4495\,-73.6873 45.4554\,-73.6772 45.4837)\,(-73.6209 45.5233\,-73.6856 45.4898\,-73.6483 45.5306\,-73.6209 45.5233)\,(-73.6185 45.4883\,-73.6029 45.495\,-73.5806 45.4856\,-73.5989 45.4744\,-73.6185 45.4883))" +YVR,Vancouver Int'l,Surrey,Surrey,POINT(-122.8489 49.19),"POLYGON((-122.9572 49.0021\,-122.8456 49.0021\,-122.8454 49.0312\,-122.7792 49.0311\,-122.7903 49.0021\,-122.6801 49.0021\,-122.6795 49.1871\,-122.7205 49.1866\,-122.7311 49.2116\,-122.7509 49.2153\,-122.8841 49.2176\,-122.9228 49.1771\,-122.8902 49.1771\,-122.8904 49.0627\,-122.9572 49.0021))" +//YWG,Winnipeg Int'l,Winnipeg,Winnipeg,POINT(-97.1464 49.8844),"POLYGON((-97.3492 49.809\,-97.2197 49.8144\,-97.2201 49.7136\,-97.0434 49.7761\,-97.0491 49.7928\,-97.0264 49.7973\,-97.0268 49.8709\,-96.9811 49.8709\,-96.9811 49.8858\,-96.9565 49.8858\,-96.9567 49.8975\,-96.9664 49.9156\,-97.025 49.9159\,-97.0248 49.9304\,-96.9973 49.9302\,-97.1552 49.9939\,-97.1612 49.9756\,-97.23 49.9756\,-97.2299 49.9166\,-97.2636 49.9166\,-97.2623 49.9313\,-97.3212 49.9294\,-97.3227 49.8988\,-97.3415 49.8984\,-97.3435 49.8786\,-97.3265 49.8686\,-97.3254 49.8381\,-97.3467 49.8372\,-97.3492 49.809)\,(-97.2047 49.8887\,-97.2046 49.8897\,-97.2041 49.8897\,-97.2036 49.8904\,-97.2047 49.8887)\,(-97.2043 49.8898\,-97.2046 49.8898\,-97.2042 49.8898\,-97.2043 49.8898)\,(-97.2037 49.8886\,-97.2041 49.8888\,-97.2037 49.8887\,-97.2037 49.8886))" +YXD,Edmonton City Centre,St. Albert,St. Albert,POINT(-113.6258 53.6303),"POLYGON((-113.715 53.673\,-113.7067 53.6297\,-113.658 53.6141\,-113.6717 53.5996\,-113.5656 53.6338\,-113.5657 53.6506\,-113.5972 53.661\,-113.6148 53.6873\,-113.715 53.673))" +YXE,John G Diefenbaker Int'l,Saskatoon,Saskatoon,POINT(-106.6833 52.1333),"POLYGON((-106.825 52.1221\,-106.6038 52.0698\,-106.5037 52.1729\,-106.7059 52.2311\,-106.825 52.1221)\,(-106.7096 52.1286\,-106.7101 52.1292\,-106.7096 52.1292\,-106.7096 52.1286)\,(-106.6947 52.1257\,-106.6941 52.1261\,-106.6941 52.1257\,-106.6947 52.1257)\,(-106.694 52.1548\,-106.6882 52.1585\,-106.6881 52.1548\,-106.694 52.1548)\,(-106.6609 52.1249\,-106.6614 52.1257\,-106.6606 52.1256\,-106.6609 52.1249)\,(-106.5854 52.1278\,-106.5873 52.1306\,-106.5844 52.1293\,-106.5854 52.1278)\,(-106.5851 52.1302\,-106.5859 52.1317\,-106.5836 52.1299\,-106.5851 52.1302)\,(-106.5824 52.1262\,-106.5833 52.1286\,-106.5785 52.1264\,-106.5824 52.1262))" +YXJ,Fort St. John (N. Peace),Fort St. John,Fort St. John,POINT(-120.8476 56.2465),"POLYGON((-120.8811 56.2494\,-120.8738 56.2319\,-120.8344 56.2319\,-120.8477 56.2247\,-120.8345 56.2101\,-120.8215 56.2101\,-120.8214 56.2289\,-120.7686 56.2247\,-120.7816 56.2464\,-120.8214 56.2464\,-120.8082 56.261\,-120.8343 56.2646\,-120.8213 56.2755\,-120.8811 56.2494))" +YXS,Prince George,Prince George,Prince George,POINT(-122.7494 53.9169),"POLYGON((-122.8994 53.9816\,-122.8976 53.8524\,-122.8815 53.8525\,-122.8815 53.8163\,-122.7188 53.8129\,-122.7059 53.8802\,-122.6287 53.8766\,-122.6287 53.8911\,-122.6045 53.8911\,-122.6043 53.9129\,-122.6868 53.9295\,-122.6906 53.9537\,-122.7086 53.9606\,-122.6519 53.9646\,-122.6829 53.9914\,-122.625 54.0046\,-122.7276 54.0124\,-122.7274 54.0395\,-122.8501 54.0402\,-122.8502 53.9817\,-122.8994 53.9816))" +YXY,Whitehorse Int'l,Whitehorse,Whitehorse,POINT(-135.0691 60.7029),"POLYGON((-135.2787 60.8392\,-135.2718 60.7832\,-135.1781 60.6636\,-135.0264 60.5536\,-134.8567 60.5898\,-134.8613 60.6458\,-134.9003 60.6583\,-134.9649 60.7496\,-135.0814 60.8401\,-135.1878 60.8425\,-135.2233 60.8289\,-135.2301 60.8462\,-135.2315 60.8341\,-135.2787 60.8392))" +YYB,North Bay/Jack Garland,North Bay,North Bay,POINT(-79.45 46.3),"POLYGON((-79.5 46.3244\,-79.4653 46.3064\,-79.4266 46.2553\,-79.4237 46.2278\,-79.3422 46.2489\,-79.3833 46.3175\,-79.2571 46.3134\,-79.257 46.4467\,-79.5 46.447\,-79.5 46.3244))" +YYC,Calgary Int'l,Calgary,Calgary,POINT(-114.0667 51.05),"POLYGON((-114.3158 51.1398\,-114.2808 51.1012\,-114.2921 51.0815\,-114.2343 51.0815\,-114.2339 51.0087\,-114.1413 51.0086\,-114.1413 50.9941\,-114.164 50.9969\,-114.1642 50.9848\,-114.1414 50.9797\,-114.1412 50.9214\,-114.2095 50.9214\,-114.2097 50.892\,-114.0944 50.8913\,-114.0941 50.863\,-114.0708 50.8484\,-113.9678 50.8556\,-113.9519 50.8425\,-113.8601 50.8579\,-113.8601 50.9215\,-113.889 50.9215\,-113.8658 50.9432\,-113.8657 50.9795\,-113.9117 50.9793\,-113.9117 51.0161\,-113.8655 51.0161\,-113.8655 51.0518\,-113.9117 51.0597\,-113.9117 51.1836\,-114.0023 51.1761\,-114.0132 51.2125\,-114.1295 51.2122\,-114.1295 51.1978\,-114.2111 51.1978\,-114.2111 51.1833\,-114.2344 51.1833\,-114.2343 51.1543\,-114.2575 51.1542\,-114.269 51.1106\,-114.2949 51.1396\,-114.3158 51.1398))" +YYG,Charlottetown,City of Charlottetown,Charlottetown,POINT(-63.1347 46.2403),"POLYGON((-63.197 46.2722\,-63.1363 46.2171\,-63.0734 46.2645\,-63.0891 46.296\,-63.1247 46.3066\,-63.1428 46.2839\,-63.1877 46.3016\,-63.1788 46.2884\,-63.197 46.2722))" +YYJ,Victoria Int'l,Saanich,Saanich,POINT(-123.381 48.484),"POLYGON((-123.478 48.5442\,-123.4583 48.5152\,-123.4535 48.4641\,-123.4187 48.4693\,-123.4226 48.4517\,-123.3324 48.4481\,-123.3292 48.4338\,-123.3207 48.4677\,-123.2608 48.4499\,-123.3032 48.498\,-123.3613 48.5166\,-123.3611 48.5503\,-123.4579 48.558\,-123.478 48.5442))" +YYT,St John's Int'l,St. John's,St. John's,POINT(-52.7971 47.4817),"POLYGON((-53.0379 47.4569\,-52.9613 47.4275\,-52.9463 47.3922\,-52.921 47.3803\,-52.8742 47.4058\,-52.8279 47.3825\,-52.8318 47.3642\,-52.7297 47.3274\,-52.6911 47.3942\,-52.6484 47.4322\,-52.6486 47.4489\,-52.7149 47.4625\,-52.722 47.4817\,-52.6488 47.4596\,-52.6491 47.4812\,-52.63 47.4805\,-52.6128 47.5264\,-52.6563 47.5436\,-52.6453 47.6093\,-52.7831 47.6339\,-52.8088 47.6234\,-52.7945 47.6048\,-52.8251 47.6073\,-52.8505 47.5741\,-52.8303 47.5719\,-52.8825 47.5582\,-52.864 47.537\,-52.7855 47.5344\,-52.7739 47.5203\,-52.7992 47.5125\,-52.7889 47.5015\,-52.8524 47.5142\,-52.916 47.5066\,-52.9314 47.4816\,-53.011 47.4773\,-53.0379 47.4569))" +YYZ,Toronto-Pearson Int'l,Toronto,Toronto,POINT(-79.3733 43.7417),"POLYGON((-79.6393 43.7498\,-79.5888 43.6646\,-79.6087 43.6464\,-79.5639 43.6278\,-79.5671 43.6092\,-79.5407 43.5796\,-79.5058 43.586\,-79.4544 43.6355\,-79.3898 43.6111\,-79.3206 43.617\,-79.3171 43.6484\,-79.1132 43.7931\,-79.1517 43.814\,-79.1702 43.8554\,-79.6393 43.7498))" +YZF,Yellowknife,Yellowknife,Yellowknife,POINT(-114.4053 62.4709),"POLYGON((-114.5182 62.4085\,-114.3327 62.4081\,-114.3327 62.4579\,-114.3 62.4577\,-114.3002 62.5417\,-114.4322 62.5417\,-114.4316 62.5\,-114.518 62.5\,-114.5182 62.4085)\,(-114.3767 62.4543\,-114.3766 62.4548\,-114.3761 62.4545\,-114.3767 62.4543)\,(-114.3429 62.4751\,-114.3364 62.4838\,-114.33 62.4816\,-114.3319 62.4701\,-114.34 62.4708\,-114.3429 62.4751))" +ZAG,Zagreb,Velika Gorica,Velika Gorica,POINT(16.0667 45.7),"POLYGON((16.0308 45.7234\,16.0506 45.7051\,16.0317 45.6695\,16.0428 45.6416\,16.0549 45.6454\,16.0562 45.6802\,16.0845 45.6831\,16.1014 45.7064\,16.0995 45.7339\,16.0881 45.756\,16.0405 45.7463\,16.0308 45.7234))" +ZAH,Zahedan Int'l,شهر زاهدان,Zāhedān,POINT(60.8628 29.4964),"POLYGON((60.8172 29.553\,60.8333 29.4247\,60.9112 29.4303\,60.9266 29.4536\,60.9023 29.5211\,60.8172 29.553))" +ZAM,Zamboanga Int'l,Zamboanga City,Zamboanga City,POINT(122.0761 6.9042),"POLYGON((121.7597 7.1098\,121.7902 6.9554\,121.865 6.8492\,121.9316 6.8489\,122.0333 6.8076\,122.1689 6.7973\,122.2178 6.8203\,122.295 6.8\,122.4037 6.8765\,122.4542 6.8875\,122.4809 6.927\,122.4945 7\,122.4428 7.1497\,122.5659 7.3006\,122.571 7.3992\,122.5587 7.4311\,122.4167 7.4425\,122.3546 7.4669\,122.2263 7.4689\,122.1786 7.3694\,122.1776 7.2838\,122.151 7.2209\,122.0756 7.1468\,121.8963 7.1515\,121.7858 7.2214\,121.7597 7.1098))" +ZAR,Zaria,Zaria,Zaria,POINT(7.7 11.0667),"POLYGON((7.4878 11.0341\,7.5231 10.9706\,7.5879 11.0216\,7.6423 10.9687\,7.6684 10.9999\,7.7031 10.9929\,7.7177 10.9654\,7.7672 10.9678\,7.7783 10.9211\,7.8039 10.9273\,7.8175 10.9607\,7.8145 10.9991\,7.7995 11.0107\,7.8202 11.0353\,7.8162 11.0659\,7.7746 11.133\,7.7518 11.1255\,7.753 11.0898\,7.7408 11.0841\,7.6688 11.1312\,7.6457 11.1308\,7.6401 11.077\,7.6006 11.0844\,7.5648 11.0619\,7.517 11.0628\,7.4878 11.0341))" +ZGC,Lanzhou Zhongchuan,城关区,Lanzhou,POINT(103.8318 36.0617),"POLYGON((103.7651 36.1518\,103.792 36.1364\,103.777 36.0778\,103.8068 36.0648\,103.8026 36.0447\,103.8747 35.9619\,103.9028 36.0147\,103.9422 36.0351\,103.9315 36.0529\,103.9755 36.0574\,103.9828 36.0958\,103.9425 36.0947\,103.9395 36.1365\,103.8868 36.1873\,103.7933 36.1895\,103.7673 36.1814\,103.7651 36.1518))" +ZLO,Playa de Oro Int'l,Cihuatlán,Cihuatlán,POINT(-104.5667 19.25),"POLYGON((-104.7999 19.3152\,-104.7572 19.285\,-104.7863 19.2442\,-104.7585 19.2287\,-104.7327 19.2394\,-104.7338 19.2216\,-104.6955 19.218\,-104.6844 19.2021\,-104.698 19.1755\,-104.68 19.1932\,-104.6188 19.1809\,-104.6131 19.1558\,-104.5951 19.1505\,-104.5856 19.1609\,-104.6001 19.1787\,-104.5769 19.1801\,-104.5691 19.2187\,-104.5334 19.2581\,-104.5084 19.2441\,-104.4946 19.254\,-104.4889 19.2299\,-104.472 19.2287\,-104.4358 19.2696\,-104.4344 19.3068\,-104.4669 19.3679\,-104.493 19.3497\,-104.533 19.3604\,-104.543 19.3392\,-104.5998 19.3244\,-104.6229 19.3012\,-104.6487 19.3371\,-104.6258 19.3533\,-104.6725 19.3765\,-104.7191 19.3402\,-104.7577 19.3434\,-104.7764 19.3127\,-104.7999 19.3152))" +ZNZ,Zanzibar,Unguja Mjini Magharibi,Zanzibar,POINT(39.199 -6.165),"POLYGON((39.1854 -6.163\,39.207 -6.1904\,39.1999 -6.2199\,39.2129 -6.2492\,39.2497 -6.2737\,39.2727 -6.3163\,39.2991 -6.3208\,39.2794 -6.257\,39.2893 -6.2501\,39.3053 -6.2723\,39.2944 -6.2383\,39.3095 -6.2289\,39.2745 -6.1807\,39.269 -6.1456\,39.2787 -6.028\,39.2044 -6.0355\,39.2147 -6.1306\,39.1854 -6.163))" +ZRH,Zurich Int'l,Bezirk Zürich,Zürich,POINT(8.5411 47.3744),"POLYGON((8.448 47.3802\,8.4977 47.3452\,8.5032 47.3202\,8.6254 47.3547\,8.5832 47.3883\,8.5973 47.4063\,8.5431 47.4329\,8.4858 47.431\,8.4691 47.4169\,8.473 47.3951\,8.448 47.3802))" diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/client_cidr.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/client_cidr.csv new file mode 100644 index 0000000000000..1737d4792ef5b --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/client_cidr.csv @@ -0,0 +1,4 @@ +client_cidr:ip_range,env:keyword +172.21.0.0/16,Development +172.21.2.0/24,QA +172.21.3.0/24,Production diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec index f56cba7031def..4bb43146aaf0d 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec @@ -1163,3 +1163,20 @@ row a = ["1985-01-01T00:00:00.000Z", "1986-01-01T00:00:00.000Z", "1987-01-01T00: datetime:date | sa:date | sd:date ["1985-01-01T00:00:00.000Z", "1986-01-01T00:00:00.000Z", "1987-01-01T00:00:00.000Z"]| ["1985-01-01T00:00:00.000Z", "1986-01-01T00:00:00.000Z", "1987-01-01T00:00:00.000Z"] | ["1987-01-01T00:00:00.000Z", "1986-01-01T00:00:00.000Z", "1985-01-01T00:00:00.000Z"] ; + +calculateAges +FROM employees +| WHERE birth_date > NOW() - 100 years +| EVAL age = 2024 - DATE_EXTRACT("year", birth_date) +| STATS count=count(age) BY age +| SORT count DESC, age DESC +| LIMIT 5 +; + +count:long | age:long +11 | 71 +9 | 65 +8 | 72 +8 | 70 +8 | 64 +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/decades.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/decades.csv new file mode 100644 index 0000000000000..68a1579836ca2 --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/decades.csv @@ -0,0 +1,14 @@ +date_range:date_range, decade:integer, description:keyword +{"gte": "1900-01-01"\, "lt":"1910-01-01"}, 1900, Edwardian Era +{"gte": "1910-01-01"\, "lt":"1920-01-01"}, 1910, Ragtime Era +{"gte": "1920-01-01"\, "lt":"1930-01-01"}, 1920, Roaring Twenties +{"gte": "1930-01-01"\, "lt":"1940-01-01"}, 1930, Dirty Thirties +{"gte": "1940-01-01"\, "lt":"1950-01-01"}, 1940, Fabulous Forties +{"gte": "1950-01-01"\, "lt":"1960-01-01"}, 1950, Nifty Fifties +{"gte": "1960-01-01"\, "lt":"1970-01-01"}, 1960, Swinging Sixties +{"gte": "1970-01-01"\, "lt":"1980-01-01"}, 1970, Groovy Seventies +{"gte": "1980-01-01"\, "lt":"1990-01-01"}, 1980, Radical Eighties +{"gte": "1990-01-01"\, "lt":"2000-01-01"}, 1990, Nineties Nostalgia +{"gte": "2000-01-01"\, "lt":"2010-01-01"}, 2000, Innovation Explosion Decade +{"gte": "2010-01-01"\, "lt":"2020-01-01"}, 2010, Renaissance Decade +{"gte": "2020-01-01"\, "lt":"2030-01-01"}, 2020, Empowerment Era diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/enrich-IT_tests_only.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/enrich-IT_tests_only.csv-spec index 2fa567996290d..c97f49469fa24 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/enrich-IT_tests_only.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/enrich-IT_tests_only.csv-spec @@ -151,3 +151,144 @@ row a = ["1", "2"] | enrich languages_policy on a with a_lang = language_name; a:keyword | a_lang:keyword ["1", "2"] | ["English", "French"] ; + +enrichCidr#[skip:-8.13.99, reason:enrich for cidr added in 8.14.0] +FROM sample_data +| ENRICH client_cidr_policy ON client_ip WITH env +| KEEP client_ip, env +; + +client_ip:ip | env:keyword +172.21.3.15 | [Development, Production] +172.21.3.15 | [Development, Production] +172.21.3.15 | [Development, Production] +172.21.3.15 | [Development, Production] +172.21.0.5 | Development +172.21.2.113 | [Development, QA] +172.21.2.162 | [Development, QA] +; + +enrichCidr2#[skip:-8.99.99, reason:ip_range support not added yet] +FROM sample_data +| ENRICH client_cidr_policy ON client_ip WITH env, client_cidr +| KEEP client_ip, env, client_cidr +| SORT client_ip +; + +client_ip:ip | env:keyword | client_cidr:ip_range +172.21.3.15 | [Development, Production] | 172.21.3.0/24 +172.21.3.15 | [Development, Production] | 172.21.3.0/24 +172.21.3.15 | [Development, Production] | 172.21.3.0/24 +172.21.3.15 | [Development, Production] | 172.21.3.0/24 +172.21.0.5 | Development | 172.21.0.0/16 +172.21.2.113 | [Development, QA] | 172.21.2.0/24 +172.21.2.162 | [Development, QA] | 172.21.2.0/24 +; + +enrichAgesStatsYear#[skip:-8.13.99, reason:ENRICH extended in 8.14.0] +FROM employees +| WHERE birth_date > "1960-01-01" +| EVAL birth_year = DATE_EXTRACT("year", birth_date) +| EVAL age = 2022 - birth_year +| ENRICH ages_policy ON age WITH age_group = description +| STATS count=count(age_group) BY age_group, birth_year +| KEEP birth_year, age_group, count +| SORT birth_year DESC +; + +birth_year:long | age_group:keyword | count:long +1965 | Middle-aged | 1 +1964 | Middle-aged | 4 +1963 | Middle-aged | 7 +1962 | Senior | 6 +1961 | Senior | 8 +1960 | Senior | 8 +; + +enrichAgesStatsAgeGroup#[skip:-8.13.99, reason:ENRICH extended in 8.14.0] +FROM employees +| WHERE birth_date IS NOT NULL +| EVAL age = 2022 - DATE_EXTRACT("year", birth_date) +| ENRICH ages_policy ON age WITH age_group = description +| STATS count=count(age_group) BY age_group +| SORT count DESC +; + +count:long | age_group:keyword +78 | Senior +12 | Middle-aged +; + +enrichHeightsStats#[skip:-8.13.99, reason:ENRICH extended in 8.14.0] +FROM employees +| ENRICH heights_policy ON height WITH height_group = description +| STATS count=count(height_group), min=min(height), max=max(height) BY height_group +| KEEP height_group, min, max, count +| SORT min ASC +; + +height_group:k | min:double | max:double | count:long +Very Short | 1.41 | 1.48 | 9 +Short | 1.5 | 1.59 | 20 +Medium Height | 1.61 | 1.79 | 26 +Tall | 1.8 | 1.99 | 25 +Very Tall | 2.0 | 2.1 | 20 +; + +enrichDecadesStats#[skip:-8.13.99, reason:ENRICH extended in 8.14.0] +FROM employees +| ENRICH decades_policy ON birth_date WITH birth_decade = decade, birth_description = description +| ENRICH decades_policy ON hire_date WITH hire_decade = decade, hire_description = description +| STATS count=count(*) BY birth_decade, hire_decade, birth_description, hire_description +| KEEP birth_decade, hire_decade, birth_description, hire_description, count +| SORT birth_decade DESC, hire_decade DESC +; + +birth_decade:long | hire_decade:l | birth_description:k | hire_description:k | count:long +null | 1990 | null | Nineties Nostalgia | 6 +null | 1980 | null | Radical Eighties | 4 +1960 | 1990 | Swinging Sixties | Nineties Nostalgia | 13 +1960 | 1980 | Swinging Sixties | Radical Eighties | 21 +1950 | 1990 | Nifty Fifties | Nineties Nostalgia | 22 +1950 | 1980 | Nifty Fifties | Radical Eighties | 34 +; + +spatialEnrichmentKeywordMatch#[skip:-8.13.99, reason:ENRICH extended in 8.14.0] +FROM airports +| WHERE abbrev == "CPH" +| ENRICH city_names ON city WITH airport, region, city_boundary +| EVAL boundary_wkt_length = LENGTH(TO_STRING(city_boundary)) +| KEEP abbrev, city, city_location, country, location, name, airport, region, boundary_wkt_length +; + +abbrev:keyword | city:keyword | city_location:geo_point | country:keyword | location:geo_point | name:text | airport:text | region:text | boundary_wkt_length:integer +CPH | Copenhagen | POINT(12.5683 55.6761) | Denmark | POINT(12.6493508684508 55.6285017221528) | Copenhagen | Copenhagen | Københavns Kommune | 265 +; + +spatialEnrichmentGeoMatch#[skip:-8.13.99, reason:ENRICH extended in 8.14.0] +FROM airports +| WHERE abbrev == "CPH" +| ENRICH city_boundaries ON city_location WITH airport, region, city_boundary +| EVAL boundary_wkt_length = LENGTH(TO_STRING(city_boundary)) +| KEEP abbrev, city, city_location, country, location, name, airport, region, boundary_wkt_length +; + +abbrev:keyword | city:keyword | city_location:geo_point | country:keyword | location:geo_point | name:text | airport:text | region:text | boundary_wkt_length:integer +CPH | Copenhagen | POINT(12.5683 55.6761) | Denmark | POINT(12.6493508684508 55.6285017221528) | Copenhagen | Copenhagen | Københavns Kommune | 265 +; + +spatialEnrichmentGeoMatchStats#[skip:-8.13.99, reason:ENRICH extended in 8.14.0] +required_feature: esql.mv_warn + +FROM airports +| ENRICH city_boundaries ON city_location WITH airport, region, city_boundary +| EVAL boundary_wkt_length = LENGTH(TO_STRING(city_boundary)) +| STATS city_centroid = ST_CENTROID(city_location), count = COUNT(city_location), min_wkt = MIN(boundary_wkt_length), max_wkt = MAX(boundary_wkt_length) +; +warning:Line 3:30: evaluation of [LENGTH(TO_STRING(city_boundary))] failed, treating result as null. Only first 20 failures recorded. +warning:Line 3:30: java.lang.IllegalArgumentException: single-value function encountered multi-value + + +city_centroid:geo_point | count:long | min_wkt:integer | max_wkt:integer +POINT(1.396561 24.127649) | 872 | 88 | 1044 +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/enrich-policy-ages.json b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/enrich-policy-ages.json new file mode 100644 index 0000000000000..4d801711c9a8c --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/enrich-policy-ages.json @@ -0,0 +1,9 @@ +{ + "range": { + "indices": "ages", + "match_field": "age_range", + "enrich_fields": [ + "description" + ] + } +} diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/enrich-policy-city_boundaries.json b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/enrich-policy-city_boundaries.json new file mode 100644 index 0000000000000..b73665d4ffe59 --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/enrich-policy-city_boundaries.json @@ -0,0 +1,7 @@ +{ + "geo_match": { + "indices": "airport_city_boundaries", + "match_field": "city_boundary", + "enrich_fields": ["city", "airport", "region", "city_boundary"] + } +} diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/enrich-policy-city_names.json b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/enrich-policy-city_names.json new file mode 100644 index 0000000000000..9c0d3880013e7 --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/enrich-policy-city_names.json @@ -0,0 +1,7 @@ +{ + "match": { + "indices": "airport_city_boundaries", + "match_field": "city", + "enrich_fields": ["airport", "region", "city_boundary"] + } +} diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/enrich-policy-client_cidr.json b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/enrich-policy-client_cidr.json new file mode 100644 index 0000000000000..a9015ce48cad1 --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/enrich-policy-client_cidr.json @@ -0,0 +1,7 @@ +{ + "range": { + "indices": "client_cidr", + "match_field": "client_cidr", + "enrich_fields": ["env", "client_cidr"] + } +} diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/enrich-policy-decades.json b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/enrich-policy-decades.json new file mode 100644 index 0000000000000..c9242b57b810d --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/enrich-policy-decades.json @@ -0,0 +1,10 @@ +{ + "range": { + "indices": "decades", + "match_field": "date_range", + "enrich_fields": [ + "decade", + "description" + ] + } +} diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/enrich-policy-heights.json b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/enrich-policy-heights.json new file mode 100644 index 0000000000000..1010b7b7fbd1e --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/enrich-policy-heights.json @@ -0,0 +1,9 @@ +{ + "range": { + "indices": "heights", + "match_field": "height_range", + "enrich_fields": [ + "description" + ] + } +} diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/heights.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/heights.csv new file mode 100644 index 0000000000000..5a9c3a9530cb0 --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/heights.csv @@ -0,0 +1,6 @@ +height_range:double_range, description:keyword +{"gte": 0\, "lt": 1.5}, Very Short +{"gte": 1.5\, "lt": 1.6}, Short +{"gte": 1.6\, "lt": 1.8}, Medium Height +{"gte": 1.8\, "lt": 2.0}, Tall +{"gte": 2.0\, "lt": 5.0}, Very Tall diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-ages.json b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-ages.json new file mode 100644 index 0000000000000..8aab95906cdf8 --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-ages.json @@ -0,0 +1,10 @@ +{ + "properties": { + "age_range": { + "type": "integer_range" + }, + "description": { + "type": "keyword" + } + } + } diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-airport_city_boundaries.json b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-airport_city_boundaries.json new file mode 100644 index 0000000000000..76b47ffb11ded --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-airport_city_boundaries.json @@ -0,0 +1,22 @@ +{ + "properties": { + "abbrev": { + "type": "keyword" + }, + "airport": { + "type": "text" + }, + "region": { + "type": "text" + }, + "city": { + "type": "keyword" + }, + "city_location": { + "type": "geo_point" + }, + "city_boundary": { + "type": "geo_shape" + } + } +} diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-client_cidr.json b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-client_cidr.json new file mode 100644 index 0000000000000..7d9d87666d3a8 --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-client_cidr.json @@ -0,0 +1,10 @@ +{ + "properties": { + "client_cidr": { + "type": "ip_range" + }, + "env": { + "type": "keyword" + } + } + } diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-decades.json b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-decades.json new file mode 100644 index 0000000000000..596d3975bd664 --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-decades.json @@ -0,0 +1,13 @@ +{ + "properties": { + "date_range": { + "type": "date_range" + }, + "decade": { + "type": "integer" + }, + "description": { + "type": "keyword" + } + } + } diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-heights.json b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-heights.json new file mode 100644 index 0000000000000..8347e13971d77 --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-heights.json @@ -0,0 +1,10 @@ +{ + "properties": { + "height_range": { + "type": "double_range" + }, + "description": { + "type": "keyword" + } + } + } diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial_shapes.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial_shapes.csv-spec index f8ba7b9cb8cbe..7209812e0569c 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial_shapes.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial_shapes.csv-spec @@ -54,6 +54,18 @@ abbrev:keyword | name:text | location:geo_shape | cou "VLC" | "Valencia" | POINT(-0.473474930771676 39.4914597884489) | "Spain" | "Paterna" | POINT(-0.4406 39.5028) ; +simpleLoadFromCityBoundaries#[skip:-8.12.99, reason: spatial type geo_shape only added in 8.13] +FROM airport_city_boundaries +| WHERE abbrev == "CPH" +| EVAL boundary_wkt_length = LENGTH(TO_STRING(city_boundary)) +| KEEP abbrev, region, city_location, airport, boundary_wkt_length +| LIMIT 1 +; + +abbrev:keyword | region:text | city_location:geo_point | airport:text | boundary_wkt_length:integer +CPH | Københavns Kommune | POINT(12.5683 55.6761) | Copenhagen | 265 +; + geo_shapeEquals#[skip:-8.12.99, reason: spatial type geo_shape only added in 8.13] ROW wkt = ["POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))", "POINT(75.8092915005895 22.727749187571)"] diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java index 3d5ee7d7507ef..3540aa83638a1 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java @@ -55,6 +55,7 @@ import org.elasticsearch.xpack.ql.rule.Rule; import org.elasticsearch.xpack.ql.rule.RuleExecutor; import org.elasticsearch.xpack.ql.tree.Source; +import org.elasticsearch.xpack.ql.type.DataType; import org.elasticsearch.xpack.ql.type.DataTypes; import org.elasticsearch.xpack.ql.type.EsField; import org.elasticsearch.xpack.ql.type.InvalidMappedField; @@ -63,6 +64,7 @@ import org.elasticsearch.xpack.ql.util.StringUtils; import java.util.ArrayList; +import java.util.Arrays; import java.util.BitSet; import java.util.Collection; import java.util.Comparator; @@ -79,10 +81,18 @@ import static java.util.Collections.singletonList; import static org.elasticsearch.common.logging.LoggerMessageFormat.format; +import static org.elasticsearch.xpack.core.enrich.EnrichPolicy.GEO_MATCH_TYPE; import static org.elasticsearch.xpack.esql.stats.FeatureMetric.LIMIT; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.GEO_POINT; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.GEO_SHAPE; import static org.elasticsearch.xpack.ql.analyzer.AnalyzerRules.resolveFunction; import static org.elasticsearch.xpack.ql.type.DataTypes.DATETIME; +import static org.elasticsearch.xpack.ql.type.DataTypes.DOUBLE; +import static org.elasticsearch.xpack.ql.type.DataTypes.FLOAT; +import static org.elasticsearch.xpack.ql.type.DataTypes.INTEGER; +import static org.elasticsearch.xpack.ql.type.DataTypes.IP; import static org.elasticsearch.xpack.ql.type.DataTypes.KEYWORD; +import static org.elasticsearch.xpack.ql.type.DataTypes.LONG; import static org.elasticsearch.xpack.ql.type.DataTypes.NESTED; public class Analyzer extends ParameterizedRuleExecutor { @@ -569,10 +579,16 @@ private LogicalPlan resolveEnrich(Enrich enrich, List childrenOutput) if (resolved.equals(ua)) { return enrich; } - if (resolved.resolved() && resolved.dataType() != KEYWORD) { - resolved = ua.withUnresolvedMessage( - "Unsupported type [" + resolved.dataType() + "] for enrich matching field [" + ua.name() + "]; only KEYWORD allowed" - ); + if (resolved.resolved() && enrich.policy() != null) { + final DataType dataType = resolved.dataType(); + String matchType = enrich.policy().getType(); + DataType[] allowed = allowedEnrichTypes(matchType); + if (Arrays.asList(allowed).contains(dataType) == false) { + String suffix = "only " + Arrays.toString(allowed) + " allowed for type [" + matchType + "]"; + resolved = ua.withUnresolvedMessage( + "Unsupported type [" + resolved.dataType() + "] for enrich matching field [" + ua.name() + "]; " + suffix + ); + } } return new Enrich( enrich.source(), @@ -587,6 +603,13 @@ private LogicalPlan resolveEnrich(Enrich enrich, List childrenOutput) } return enrich; } + + private static final DataType[] GEO_TYPES = new DataType[] { GEO_POINT, GEO_SHAPE }; + private static final DataType[] NON_GEO_TYPES = new DataType[] { KEYWORD, IP, LONG, INTEGER, FLOAT, DOUBLE, DATETIME }; + + private DataType[] allowedEnrichTypes(String matchType) { + return matchType.equals(GEO_MATCH_TYPE) ? GEO_TYPES : NON_GEO_TYPES; + } } private static List resolveAgainstList(UnresolvedNamePattern up, Collection attrList) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java index 423bd7e43bb0f..77120c757e97a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java @@ -38,7 +38,6 @@ import org.elasticsearch.compute.operator.Operator; import org.elasticsearch.compute.operator.OutputOperator; import org.elasticsearch.compute.operator.ProjectOperator; -import org.elasticsearch.compute.operator.SourceOperator; import org.elasticsearch.core.AbstractRefCounted; import org.elasticsearch.core.RefCounted; import org.elasticsearch.core.Releasables; @@ -265,13 +264,16 @@ private void doLookup( DriverContext driverContext = new DriverContext(bigArrays, blockFactory.newChildFactory(localBreaker)); SearchExecutionContext searchExecutionContext = searchContext.getSearchExecutionContext(); MappedFieldType fieldType = searchExecutionContext.getFieldType(matchField); - final SourceOperator queryOperator = switch (matchType) { - case "match", "range" -> { - QueryList queryList = QueryList.termQueryList(fieldType, searchExecutionContext, inputBlock); - yield new EnrichQuerySourceOperator(driverContext.blockFactory(), queryList, searchExecutionContext.getIndexReader()); - } + var queryList = switch (matchType) { + case "match", "range" -> QueryList.termQueryList(fieldType, searchExecutionContext, inputBlock, inputDataType); + case "geo_match" -> QueryList.geoShapeQuery(fieldType, searchExecutionContext, inputBlock, inputDataType); default -> throw new EsqlIllegalArgumentException("illegal match type " + matchType); }; + var queryOperator = new EnrichQuerySourceOperator( + driverContext.blockFactory(), + queryList, + searchExecutionContext.getIndexReader() + ); List intermediateOperators = new ArrayList<>(extractFields.size() + 2); final ElementType[] mergingTypes = new ElementType[extractFields.size()]; // load the fields diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/QueryList.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/QueryList.java index 3ad6bace3dca9..3039196a75f24 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/QueryList.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/QueryList.java @@ -7,8 +7,10 @@ package org.elasticsearch.xpack.esql.enrich; +import org.apache.lucene.document.InetAddressPoint; import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BooleanBlock; import org.elasticsearch.compute.data.BytesRefBlock; @@ -16,14 +18,25 @@ import org.elasticsearch.compute.data.IntBlock; import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.core.Nullable; +import org.elasticsearch.geometry.Geometry; +import org.elasticsearch.geometry.utils.GeometryValidator; +import org.elasticsearch.geometry.utils.WellKnownBinary; +import org.elasticsearch.index.mapper.GeoShapeQueryable; import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.RangeFieldMapper; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; +import org.elasticsearch.xpack.ql.type.DataType; +import org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes; +import java.net.InetAddress; import java.util.ArrayList; import java.util.List; import java.util.function.IntFunction; +import static org.elasticsearch.xpack.ql.type.DataTypes.DATETIME; +import static org.elasticsearch.xpack.ql.type.DataTypes.IP; + /** * Generates a list of Lucene queries based on the input block. */ @@ -50,55 +63,167 @@ int getPositionCount() { /** * Returns a list of term queries for the given field and the input block. */ - static QueryList termQueryList(MappedFieldType field, SearchExecutionContext searchExecutionContext, Block block) { - return new QueryList(block) { - private final IntFunction blockValueReader = QueryList.blockToJavaObject(block); - - @Override - Query getQuery(int position) { - final int first = block.getFirstValueIndex(position); - final int count = block.getValueCount(position); - return switch (count) { - case 0 -> null; - case 1 -> field.termQuery(blockValueReader.apply(first), searchExecutionContext); - default -> { - final List terms = new ArrayList<>(count); - for (int i = 0; i < count; i++) { - final Object value = blockValueReader.apply(first + i); - terms.add(value); - } - yield field.termsQuery(terms, searchExecutionContext); + static QueryList termQueryList( + MappedFieldType field, + SearchExecutionContext searchExecutionContext, + Block block, + DataType inputDataType + ) { + return new TermQueryList(field, searchExecutionContext, block, inputDataType); + } + + /** + * Returns a list of geo_shape queries for the given field and the input block. + */ + static QueryList geoShapeQuery( + MappedFieldType field, + SearchExecutionContext searchExecutionContext, + Block block, + DataType inputDataType + ) { + return new GeoShapeQueryList(field, searchExecutionContext, block, inputDataType); + } + + private static class TermQueryList extends QueryList { + private final BytesRef scratch = new BytesRef(); + private final byte[] ipBytes = new byte[InetAddressPoint.BYTES]; + private final MappedFieldType field; + private final SearchExecutionContext searchExecutionContext; + private final DataType inputDataType; + private final IntFunction blockValueReader; + + private TermQueryList(MappedFieldType field, SearchExecutionContext searchExecutionContext, Block block, DataType inputDataType) { + super(block); + + this.field = field; + this.searchExecutionContext = searchExecutionContext; + this.inputDataType = inputDataType; + this.blockValueReader = blockToJavaObject(); + } + + @Override + Query getQuery(int position) { + final int first = block.getFirstValueIndex(position); + final int count = block.getValueCount(position); + return switch (count) { + case 0 -> null; + case 1 -> field.termQuery(blockValueReader.apply(first), searchExecutionContext); + default -> { + final List terms = new ArrayList<>(count); + for (int i = 0; i < count; i++) { + final Object value = blockValueReader.apply(first + i); + terms.add(value); } - }; - } - }; + yield field.termsQuery(terms, searchExecutionContext); + } + }; + } + + private IntFunction blockToJavaObject() { + return switch (block.elementType()) { + case BOOLEAN -> { + BooleanBlock booleanBlock = (BooleanBlock) block; + yield booleanBlock::getBoolean; + } + case BYTES_REF -> { + BytesRefBlock bytesRefBlock = (BytesRefBlock) block; + if (inputDataType == IP) { + yield offset -> { + bytesRefBlock.getBytesRef(offset, scratch); + if (ipBytes.length != scratch.length) { + // Lucene only support 16-byte IP addresses, even IPv4 is encoded in 16 bytes + throw new IllegalStateException("Cannot decode IP field from bytes of length " + scratch.length); + } + System.arraycopy(scratch.bytes, scratch.offset, ipBytes, 0, scratch.length); + InetAddress ip = InetAddressPoint.decode(ipBytes); + return ip; + }; + } + yield offset -> bytesRefBlock.getBytesRef(offset, new BytesRef()); + } + case DOUBLE -> { + DoubleBlock doubleBlock = ((DoubleBlock) block); + yield doubleBlock::getDouble; + } + case INT -> { + IntBlock intBlock = (IntBlock) block; + yield intBlock::getInt; + } + case LONG -> { + LongBlock longBlock = (LongBlock) block; + if (inputDataType == DATETIME && field instanceof RangeFieldMapper.RangeFieldType rangeFieldType) { + yield offset -> rangeFieldType.dateTimeFormatter().formatMillis(longBlock.getLong(offset)); + } + yield longBlock::getLong; + } + case NULL -> offset -> null; + case DOC -> throw new EsqlIllegalArgumentException("can't read values from [doc] block"); + case UNKNOWN -> throw new EsqlIllegalArgumentException("can't read values from [" + block + "]"); + }; + } } - private static IntFunction blockToJavaObject(Block block) { - return switch (block.elementType()) { - case BOOLEAN -> { - BooleanBlock booleanBlock = (BooleanBlock) block; - yield booleanBlock::getBoolean; - } - case BYTES_REF -> { - BytesRefBlock bytesRefBlock = (BytesRefBlock) block; - yield offset -> bytesRefBlock.getBytesRef(offset, new BytesRef()); - } - case DOUBLE -> { - DoubleBlock doubleBlock = ((DoubleBlock) block); - yield doubleBlock::getDouble; - } - case INT -> { - IntBlock intBlock = (IntBlock) block; - yield intBlock::getInt; - } - case LONG -> { - LongBlock longBlock = (LongBlock) block; - yield longBlock::getLong; + private static class GeoShapeQueryList extends QueryList { + private final BytesRef scratch = new BytesRef(); + private final MappedFieldType field; + private final SearchExecutionContext searchExecutionContext; + private final IntFunction blockValueReader; + private final DataType inputDataType; // Currently unused, but might be needed for when input is read as doc-values + private final IntFunction shapeQuery; + + private GeoShapeQueryList( + MappedFieldType field, + SearchExecutionContext searchExecutionContext, + Block block, + DataType inputDataType + ) { + super(block); + + this.field = field; + this.searchExecutionContext = searchExecutionContext; + this.inputDataType = inputDataType; + this.blockValueReader = blockToGeometry(block); + this.shapeQuery = shapeQuery(); + } + + @Override + Query getQuery(int position) { + final int first = block.getFirstValueIndex(position); + final int count = block.getValueCount(position); + return switch (count) { + case 0 -> null; + case 1 -> shapeQuery.apply(first); + // TODO: support multiple values + default -> throw new EsqlIllegalArgumentException("can't read multiple Geometry values from a single position"); + }; + } + + private IntFunction blockToGeometry(Block block) { + return switch (block.elementType()) { + case LONG -> offset -> { + var encoded = ((LongBlock) block).getLong(offset); + return SpatialCoordinateTypes.GEO.longAsPoint(encoded); + }; + case BYTES_REF -> offset -> { + var wkb = ((BytesRefBlock) block).getBytesRef(offset, scratch); + return WellKnownBinary.fromWKB(GeometryValidator.NOOP, false, wkb.bytes, wkb.offset, wkb.length); + }; + case NULL -> offset -> null; + default -> throw new EsqlIllegalArgumentException("can't read Geometry values from [" + block.elementType() + "] block"); + }; + } + + private IntFunction shapeQuery() { + if (field instanceof GeoShapeQueryable geoShapeQueryable) { + return offset -> geoShapeQueryable.geoShapeQuery( + searchExecutionContext, + field.name(), + ShapeRelation.INTERSECTS, + blockValueReader.apply(offset) + ); } - case NULL -> offset -> null; - case DOC -> throw new EsqlIllegalArgumentException("can't read values from [doc] block"); - case UNKNOWN -> throw new EsqlIllegalArgumentException("can't read values from [" + block + "]"); - }; + // TODO: Support cartesian ShapeQueryable + throw new IllegalArgumentException("Unsupported field type for geo_match ENRICH: " + field.typeName()); + } } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTestUtils.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTestUtils.java index 38588090d1656..ad8cb1003eeaa 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTestUtils.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTestUtils.java @@ -22,6 +22,9 @@ import java.util.List; import java.util.Map; +import static org.elasticsearch.xpack.core.enrich.EnrichPolicy.GEO_MATCH_TYPE; +import static org.elasticsearch.xpack.core.enrich.EnrichPolicy.MATCH_TYPE; +import static org.elasticsearch.xpack.core.enrich.EnrichPolicy.RANGE_TYPE; import static org.elasticsearch.xpack.esql.EsqlTestUtils.TEST_VERIFIER; import static org.elasticsearch.xpack.esql.EsqlTestUtils.configuration; @@ -64,7 +67,11 @@ public static LogicalPlan analyze(String query) { } public static LogicalPlan analyze(String query, String mapping) { - return analyze(query, analyzer(loadMapping(mapping, "test"), TEST_VERIFIER, configuration(query))); + return analyze(query, "test", mapping); + } + + public static LogicalPlan analyze(String query, String index, String mapping) { + return analyze(query, analyzer(loadMapping(mapping, index), TEST_VERIFIER, configuration(query))); } public static LogicalPlan analyze(String query, Analyzer analyzer) { @@ -89,19 +96,42 @@ public static IndexResolution analyzerExpandedDefaultMapping() { } public static EnrichResolution defaultEnrichResolution() { - return loadEnrichPolicyResolution("languages", "language_code", "languages_idx", "mapping-languages.json"); + EnrichResolution enrichResolution = new EnrichResolution(); + loadEnrichPolicyResolution(enrichResolution, MATCH_TYPE, "languages", "language_code", "languages_idx", "mapping-languages.json"); + loadEnrichPolicyResolution(enrichResolution, RANGE_TYPE, "client_cidr", "client_cidr", "client_cidr", "mapping-client_cidr.json"); + loadEnrichPolicyResolution(enrichResolution, RANGE_TYPE, "ages_policy", "age_range", "ages", "mapping-ages.json"); + loadEnrichPolicyResolution(enrichResolution, RANGE_TYPE, "heights_policy", "height_range", "heights", "mapping-heights.json"); + loadEnrichPolicyResolution(enrichResolution, RANGE_TYPE, "decades_policy", "date_range", "decades", "mapping-decades.json"); + loadEnrichPolicyResolution( + enrichResolution, + GEO_MATCH_TYPE, + "city_boundaries", + "city_boundary", + "airport_city_boundaries", + "mapping-airport_city_boundaries.json" + ); + return enrichResolution; } - public static EnrichResolution loadEnrichPolicyResolution(String policyName, String matchField, String idxName, String mappingFile) { - IndexResolution mapping = loadMapping(mappingFile, idxName); - List enrichFields = new ArrayList<>(mapping.get().mapping().keySet()); - enrichFields.remove(matchField); - EnrichResolution enrichResolution = new EnrichResolution(); - enrichResolution.addResolvedPolicy( - policyName, + public static void loadEnrichPolicyResolution( + EnrichResolution enrich, + String policyType, + String policy, + String field, + String index, + String mapping + ) { + IndexResolution indexResolution = loadMapping(mapping, index); + List enrichFields = new ArrayList<>(indexResolution.get().mapping().keySet()); + enrichFields.remove(field); + enrich.addResolvedPolicy( + policy, Enrich.Mode.ANY, - new ResolvedEnrichPolicy(matchField, EnrichPolicy.MATCH_TYPE, enrichFields, Map.of("", idxName), mapping.get().mapping()) + new ResolvedEnrichPolicy(field, policyType, enrichFields, Map.of("", index), indexResolution.get().mapping()) ); - return enrichResolution; + } + + public static void loadEnrichPolicyResolution(EnrichResolution enrich, String policy, String field, String index, String mapping) { + loadEnrichPolicyResolution(enrich, EnrichPolicy.MATCH_TYPE, policy, field, index, mapping); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java index 0d406d19d3d16..975b31b967fe0 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java @@ -1462,13 +1462,19 @@ public void testEnrichPolicyMatchFieldName() { public void testEnrichWrongMatchFieldType() { var e = expectThrows(VerificationException.class, () -> analyze(""" from test - | enrich languages on languages + | eval x = to_boolean(languages) + | enrich languages on x | keep first_name, language_name, id """)); - assertThat( - e.getMessage(), - containsString("Unsupported type [INTEGER] for enrich matching field [languages]; only KEYWORD allowed") - ); + assertThat(e.getMessage(), containsString("Unsupported type [BOOLEAN] for enrich matching field [x]; only [KEYWORD,")); + + e = expectThrows(VerificationException.class, () -> analyze(""" + FROM airports + | EVAL x = to_string(city_location) + | ENRICH city_boundaries ON x + | KEEP abbrev, airport, region + """, "airports", "mapping-airports.json")); + assertThat(e.getMessage(), containsString("Unsupported type [KEYWORD] for enrich matching field [x]; only [GEO_POINT,")); } public void testValidEnrich() { @@ -1492,6 +1498,41 @@ public void testValidEnrich() { | enrich languages on x with y = language_name | keep first_name, y """, "first_name", "y"); + + assertProjection(analyze(""" + FROM sample_data + | ENRICH client_cidr ON client_ip WITH env + | KEEP client_ip, env + """, "sample_data", "mapping-sample_data.json"), "client_ip", "env"); + + assertProjection(analyze(""" + FROM employees + | WHERE birth_date > "1960-01-01" + | EVAL birth_year = DATE_EXTRACT("year", birth_date) + | EVAL age = 2022 - birth_year + | ENRICH ages_policy ON age WITH age_group = description + | KEEP first_name, last_name, age, age_group + """, "employees", "mapping-default.json"), "first_name", "last_name", "age", "age_group"); + + assertProjection(analyze(""" + FROM employees + | ENRICH heights_policy ON height WITH height_group = description + | KEEP first_name, last_name, height, height_group + """, "employees", "mapping-default.json"), "first_name", "last_name", "height", "height_group"); + + assertProjection(analyze(""" + FROM employees + | ENRICH decades_policy ON birth_date WITH birth_decade = decade, birth_description = description + | ENRICH decades_policy ON hire_date WITH hire_decade = decade + | KEEP first_name, last_name, birth_decade, hire_decade, birth_description + """, "employees", "mapping-default.json"), "first_name", "last_name", "birth_decade", "hire_decade", "birth_description"); + + assertProjection(analyze(""" + FROM airports + | WHERE abbrev == "CPH" + | ENRICH city_boundaries ON city_location WITH airport, region + | KEEP abbrev, airport, region + """, "airports", "mapping-airports.json"), "abbrev", "airport", "region"); } public void testEnrichExcludesPolicyKey() { @@ -1744,7 +1785,10 @@ private void verifyUnsupported(String query, String errorMessage, String mapping } private void assertProjection(String query, String... names) { - var plan = analyze(query); + assertProjection(analyze(query), names); + } + + private void assertProjection(LogicalPlan plan, String... names) { var limit = as(plan, Limit.class); assertThat(Expressions.names(limit.output()), contains(names)); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/enrich/EnrichQuerySourceOperatorTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/enrich/EnrichQuerySourceOperatorTests.java index c06ed820b9983..7f8e1f7113e22 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/enrich/EnrichQuerySourceOperatorTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/enrich/EnrichQuerySourceOperatorTests.java @@ -46,6 +46,7 @@ import java.util.Map; import java.util.Set; +import static org.elasticsearch.xpack.ql.type.DataTypes.KEYWORD; import static org.hamcrest.Matchers.equalTo; import static org.mockito.Mockito.mock; @@ -103,7 +104,7 @@ public void testQueries() throws Exception { inputTerms = termBuilder.build(); } MappedFieldType uidField = new KeywordFieldMapper.KeywordFieldType("uid"); - QueryList queryList = QueryList.termQueryList(uidField, mock(SearchExecutionContext.class), inputTerms); + QueryList queryList = QueryList.termQueryList(uidField, mock(SearchExecutionContext.class), inputTerms, KEYWORD); assertThat(queryList.getPositionCount(), equalTo(6)); assertThat(queryList.getQuery(0), equalTo(new TermQuery(new Term("uid", new BytesRef("b2"))))); assertThat(queryList.getQuery(1), equalTo(new TermInSetQuery("uid", new BytesRef("c1"), new BytesRef("a2")))); @@ -218,7 +219,7 @@ public void testRandomMatchQueries() throws Exception { inputTerms = builder.build(); } MappedFieldType uidField = new KeywordFieldMapper.KeywordFieldType("uid"); - var queryList = QueryList.termQueryList(uidField, mock(SearchExecutionContext.class), inputTerms); + var queryList = QueryList.termQueryList(uidField, mock(SearchExecutionContext.class), inputTerms, KEYWORD); EnrichQuerySourceOperator queryOperator = new EnrichQuerySourceOperator(blockFactory, queryList, reader); Map> actualPositions = new HashMap<>(); while (queryOperator.isFinished() == false) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java index 1ce383f2327ad..f2bce6951151e 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java @@ -176,7 +176,8 @@ public class LogicalPlanOptimizerTests extends ESTestCase { public static void init() { parser = new EsqlParser(); logicalOptimizer = new LogicalPlanOptimizer(new LogicalOptimizerContext(EsqlTestUtils.TEST_CFG)); - enrichResolution = AnalyzerTestUtils.loadEnrichPolicyResolution("languages_idx", "id", "languages_idx", "mapping-languages.json"); + enrichResolution = new EnrichResolution(); + AnalyzerTestUtils.loadEnrichPolicyResolution(enrichResolution, "languages_idx", "id", "languages_idx", "mapping-languages.json"); // Most tests used data from the test index, so we load it here, and use it in the plan() function. mapping = loadMapping("mapping-basic.json"); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java index 0c87db5e5c6db..80b9c07095ee9 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java @@ -217,6 +217,22 @@ private static EnrichResolution setupEnrichResolution() { ) ) ); + enrichResolution.addResolvedPolicy( + "city_boundaries", + Enrich.Mode.ANY, + new ResolvedEnrichPolicy( + "city_boundary", + EnrichPolicy.GEO_MATCH_TYPE, + List.of("city", "airport", "region", "city_boundary"), + Map.of("", "airport_city_boundaries"), + Map.ofEntries( + Map.entry("city", new EsField("city", DataTypes.KEYWORD, Map.of(), true)), + Map.entry("airport", new EsField("airport", DataTypes.TEXT, Map.of(), false)), + Map.entry("region", new EsField("region", DataTypes.TEXT, Map.of(), false)), + Map.entry("city_boundary", new EsField("city_boundary", EsqlDataTypes.GEO_SHAPE, Map.of(), false)) + ) + ) + ); enrichResolution.addResolvedPolicy( "departments", Enrich.Mode.ANY, @@ -2744,6 +2760,75 @@ public void testSpatialTypesAndStatsUseDocValuesMultiAggregationsGroupedAggregat source(extract.child()); } + /** + * Plan: + * LimitExec[1000[INTEGER]] + * \_AggregateExec[[],[SPATIALCENTROID(city_location{f}#16) AS centroid],FINAL,null] + * \_ExchangeExec[[xVal{r}#24, xDel{r}#25, yVal{r}#26, yDel{r}#27, count{r}#28],true] + * \_FragmentExec[filter=null, estimatedRowSize=0, fragment=[ + * Aggregate[[],[SPATIALCENTROID(city_location{f}#16) AS centroid]] + * \_Enrich[ANY,[63 69 74 79 5f 62 6f 75 6e 64 61 72 69 65 73][KEYWORD],city_location{f}#16,{"geo_match":{"indices":[],"match + * _field":"city_boundary","enrich_fields":["city","airport","region","city_boundary"]}},{=airport_city_boundaries + * },[airport{r}#21, region{r}#22, city_boundary{r}#23]] + * \_EsRelation[airports][abbrev{f}#9, city{f}#15, city_location{f}#16, count..]]] + * + * Optimized: + * LimitExec[1000[INTEGER]] + * \_AggregateExec[[],[SPATIALCENTROID(city_location{f}#16) AS centroid],FINAL,50] + * \_ExchangeExec[[xVal{r}#24, xDel{r}#25, yVal{r}#26, yDel{r}#27, count{r}#28],true] + * \_AggregateExec[[],[SPATIALCENTROID(city_location{f}#16) AS centroid],PARTIAL,50] + * \_EnrichExec[ANY,geo_match,city_location{f}#16,city_boundaries,city_boundary,{=airport_city_boundaries},[airport{r}#21, + * region{r}#22, city_boundary{r}#23]] + * \_FilterExec[ISNOTNULL(city_location{f}#16)] + * \_FieldExtractExec[city_location{f}#16][city_location{f}#16]> + * \_EsQueryExec[airports], query[][_doc{f}#46], limit[], sort[] estimatedRowSize[204] + * + * Note the FieldExtractExec has 'city_location' set for doc-values: FieldExtractExec[city_location{f}#16][city_location{f}#16] + */ + public void testEnrichBeforeSpatialAggregationSupportsDocValues() { + var plan = physicalPlanAirports(""" + from airports + | enrich city_boundaries ON city_location WITH airport, region, city_boundary + | stats centroid = st_centroid(city_location) + """); + + var limit = as(plan, LimitExec.class); + var agg = as(limit.child(), AggregateExec.class); + // Before optimization the aggregation does not use doc-values + assertAggregation(agg, "centroid", SpatialCentroid.class, GEO_POINT, false); + + var exchange = as(agg.child(), ExchangeExec.class); + var fragment = as(exchange.child(), FragmentExec.class); + var fAgg = as(fragment.fragment(), Aggregate.class); + var enrich = as(fAgg.child(), Enrich.class); + assertThat(enrich.mode(), equalTo(Enrich.Mode.ANY)); + assertThat(enrich.concreteIndices(), equalTo(Map.of("", "airport_city_boundaries"))); + assertThat(enrich.enrichFields().size(), equalTo(3)); + as(enrich.child(), EsRelation.class); + + // Now optimize the plan and assert the aggregation uses doc-values + var optimized = optimizedPlan(plan); + limit = as(optimized, LimitExec.class); + agg = as(limit.child(), AggregateExec.class); + // Above the exchange (in coordinator) the aggregation is not using doc-values + assertAggregation(agg, "centroid", SpatialCentroid.class, GEO_POINT, false); + exchange = as(agg.child(), ExchangeExec.class); + agg = as(exchange.child(), AggregateExec.class); + // below the exchange (in data node) the aggregation is using doc-values + assertAggregation(agg, "centroid", SpatialCentroid.class, GEO_POINT, true); + var enrichExec = as(agg.child(), EnrichExec.class); + assertThat(enrichExec.mode(), equalTo(Enrich.Mode.ANY)); + assertThat(enrichExec.concreteIndices(), equalTo(Map.of("", "airport_city_boundaries"))); + assertThat(enrichExec.enrichFields().size(), equalTo(3)); + var filter = as(enrichExec.child(), FilterExec.class); + var extract = as(filter.child(), FieldExtractExec.class); + source(extract.child()); + assertTrue("Expect attributes field extract preference to be DOC_VALUES", extract.attributesToExtract().stream().allMatch(attr -> { + MappedFieldType.FieldExtractPreference extractPreference = PlannerUtils.extractPreference(extract.hasDocValuesAttribute(attr)); + return extractPreference == DOC_VALUES && attr.dataType() == GEO_POINT; + })); + } + public void testEnrichBeforeAggregation() { { var plan = physicalPlan(""" diff --git a/x-pack/plugin/ql/src/test/resources/mapping-sample_data.json b/x-pack/plugin/ql/src/test/resources/mapping-sample_data.json new file mode 100644 index 0000000000000..838a8ba09b45a --- /dev/null +++ b/x-pack/plugin/ql/src/test/resources/mapping-sample_data.json @@ -0,0 +1,16 @@ +{ + "properties": { + "@timestamp": { + "type": "date" + }, + "client_ip": { + "type": "ip" + }, + "event_duration": { + "type": "long" + }, + "message": { + "type": "keyword" + } + } +} \ No newline at end of file diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/61_enrich_ip.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/61_enrich_ip.yml index 70a4e0da11b99..89e3c31bd475a 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/61_enrich_ip.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/61_enrich_ip.yml @@ -82,7 +82,6 @@ teardown: --- "IP strings": - - do: allowed_warnings_regex: - "No limit defined, adding default limit of \\[.*\\]" @@ -104,3 +103,39 @@ teardown: - match: { values.1: [ [ "10.100.0.21", "10.101.0.107" ], [ "Production", "QA" ], [ "OPS","Engineering" ], "sending messages" ] } - match: { values.2: [ "10.101.0.107" , "QA", "Engineering", "network disconnected" ] } - match: { values.3: [ "13.101.0.114" , null, null, "authentication failed" ] } + +--- +"Invalid IP strings": + - do: + catch: /'invalid_[\d\.]+' is not an IP string literal/ + esql.query: + body: + query: 'FROM events | eval ip_str = concat("invalid_", to_string(ip)) | ENRICH networks-policy ON ip_str | sort @timestamp | KEEP ip, name, department, message' + +--- +"IP": + - skip: + version: " - 8.13.99" + reason: "IP range ENRICH support was added in 8.14.0" + + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'FROM events | ENRICH networks-policy ON ip | sort @timestamp | KEEP ip, name, department, message' + + - match: { columns.0.name: "ip" } + - match: { columns.0.type: "ip" } + - match: { columns.1.name: "name" } + - match: { columns.1.type: "keyword" } + - match: { columns.2.name: "department" } + - match: { columns.2.type: "keyword" } + - match: { columns.3.name: "message" } + - match: { columns.3.type: "keyword" } + + - length: { values: 4 } + - match: { values.0: [ "10.100.0.21", "Production", "OPS", "network connected" ] } + - match: { values.1: [ [ "10.100.0.21", "10.101.0.107" ], [ "Production", "QA" ], [ "OPS","Engineering" ], "sending messages" ] } + - match: { values.2: [ "10.101.0.107" , "QA", "Engineering", "network disconnected" ] } + - match: { values.3: [ "13.101.0.114" , null, null, "authentication failed" ] } From bc249efefc702ebdcd66ae53b88f6b2b48bb5db1 Mon Sep 17 00:00:00 2001 From: Liam Thompson <32779855+leemthompo@users.noreply.github.com> Date: Mon, 18 Mar 2024 16:09:49 +0100 Subject: [PATCH 241/248] [DOCS] Add contextual info about connectors to API docs (#106420) * [DOCS] Add contextual info about connectors to API docs * Grammar nit --- docs/reference/connector/apis/connector-apis.asciidoc | 9 ++++++++- .../connector/apis/create-connector-api.asciidoc | 7 ++++++- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/docs/reference/connector/apis/connector-apis.asciidoc b/docs/reference/connector/apis/connector-apis.asciidoc index 6652f159053fa..2c43395a7fba1 100644 --- a/docs/reference/connector/apis/connector-apis.asciidoc +++ b/docs/reference/connector/apis/connector-apis.asciidoc @@ -3,7 +3,14 @@ preview::[] -The connector and sync jobs API provides a convenient way to create and manage Elastic {enterprise-search-ref}/connectors.html[connectors^] and sync jobs in an internal index. +The connector and sync jobs APIs provide a convenient way to create and manage Elastic {enterprise-search-ref}/connectors.html[connectors^] and sync jobs in an internal index. + +Connectors are third-party {es} integrations which can be deployed on {ecloud} or hosted on your own infrastructure: + +* *Native connectors* are a managed service on {ecloud} +* *Connector clients* are self-managed on your infrastructure + +Find a list of all supported service types in the {enterprise-search-ref}/connectors.html[connectors documentation^]. This API provides an alternative to relying solely on {kib} UI for connector and sync job management. The API comes with a set of validations and assertions to ensure that the state representation in the internal index remains valid. diff --git a/docs/reference/connector/apis/create-connector-api.asciidoc b/docs/reference/connector/apis/create-connector-api.asciidoc index 2c1c4c9ba7bc4..56e0b378aa1cb 100644 --- a/docs/reference/connector/apis/create-connector-api.asciidoc +++ b/docs/reference/connector/apis/create-connector-api.asciidoc @@ -6,8 +6,13 @@ preview::[] -Creates a connector. +Creates an Elastic connector. +Connectors are third-party {es} integrations which can be deployed on {ecloud} or hosted on your own infrastructure: +* *Native connectors* are a managed service on {ecloud} +* *Connector clients* are self-managed on your infrastructure + +Find a list of all supported service types in the {enterprise-search-ref}/connectors.html[connectors documentation^]. [source,console] -------------------------------------------------- From 7bff3b3becc6fcef12e113ca53e11f38c3276b90 Mon Sep 17 00:00:00 2001 From: Tommaso Teofili Date: Mon, 18 Mar 2024 16:13:38 +0100 Subject: [PATCH 242/248] Add modelId and modelText to KnnVectorQueryBuilder (#106068) * Add modelId and modelText to KnnVectorQueryBuilder Use QueryVectorBuilder within KnnVectorQueryBuilder to make it possible to perform knn queries also when a query vector is not immediately available. Supplying a text_embedding query_vector_builder with model_text and model_id instead of the query_vector will result in the generation of a query_vector by calling inference on the specified model_id with the supplied model_text (during query rewrite). This is consistent with the way query vectors are built from model_id / model_text in KnnSearchBuilder (DFS phase). --- docs/changelog/106068.yaml | 5 + docs/reference/query-dsl/knn-query.asciidoc | 11 +- docs/reference/rest-api/common-parms.asciidoc | 7 + .../org/elasticsearch/TransportVersions.java | 1 + .../search/vectors/KnnVectorQueryBuilder.java | 136 +++++- ...AbstractKnnVectorQueryBuilderTestCase.java | 40 ++ .../ml/integration/TextEmbeddingQueryIT.java | 389 ++++++++++++------ .../ml/search_knn_query_vector_builder.yml | 231 +++++++++++ 8 files changed, 680 insertions(+), 140 deletions(-) create mode 100644 docs/changelog/106068.yaml create mode 100644 x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/search_knn_query_vector_builder.yml diff --git a/docs/changelog/106068.yaml b/docs/changelog/106068.yaml new file mode 100644 index 0000000000000..fbc30aa86a33e --- /dev/null +++ b/docs/changelog/106068.yaml @@ -0,0 +1,5 @@ +pr: 106068 +summary: Add `modelId` and `modelText` to `KnnVectorQueryBuilder` +area: Search +type: enhancement +issues: [] diff --git a/docs/reference/query-dsl/knn-query.asciidoc b/docs/reference/query-dsl/knn-query.asciidoc index c11782f524950..56f90c7d5da01 100644 --- a/docs/reference/query-dsl/knn-query.asciidoc +++ b/docs/reference/query-dsl/knn-query.asciidoc @@ -87,10 +87,19 @@ the top `size` results. `query_vector`:: + -- -(Required, array of floats or string) Query vector. Must have the same number of dimensions +(Optional, array of floats or string) Query vector. Must have the same number of dimensions as the vector field you are searching against. Must be either an array of floats or a hex-encoded byte vector. +Either this or `query_vector_builder` must be provided. -- +`query_vector_builder`:: ++ +-- +(Optional, object) Query vector builder. +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=knn-query-vector-builder] +-- + + `num_candidates`:: + -- diff --git a/docs/reference/rest-api/common-parms.asciidoc b/docs/reference/rest-api/common-parms.asciidoc index 062f832b6f79d..4d71634f38acf 100644 --- a/docs/reference/rest-api/common-parms.asciidoc +++ b/docs/reference/rest-api/common-parms.asciidoc @@ -600,6 +600,13 @@ Query vector. Must have the same number of dimensions as the vector field you are searching against. Must be either an array of floats or a hex-encoded byte vector. end::knn-query-vector[] +tag::knn-query-vector-builder[] +A configuration object indicating how to build a query_vector before executing +the request. You must provide either a `query_vector_builder` or `query_vector`, +but not both. Refer to <> to learn more. +end::knn-query-vector-builder[] + + tag::knn-similarity[] The minimum similarity required for a document to be considered a match. The similarity value calculated relates to the raw <> used. Not the diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 536a5db51e8a8..9184e46b11542 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -149,6 +149,7 @@ static TransportVersion def(int id) { public static final TransportVersion AGGS_EXCLUDED_DELETED_DOCS = def(8_609_00_0); public static final TransportVersion ESQL_SERIALIZE_BIG_ARRAY = def(8_610_00_0); public static final TransportVersion AUTO_SHARDING_ROLLOVER_CONDITION = def(8_611_00_0); + public static final TransportVersion KNN_QUERY_VECTOR_BUILDER = def(8_612_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/search/vectors/KnnVectorQueryBuilder.java b/server/src/main/java/org/elasticsearch/search/vectors/KnnVectorQueryBuilder.java index 149dedd59df46..aa5daa532cf42 100644 --- a/server/src/main/java/org/elasticsearch/search/vectors/KnnVectorQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/vectors/KnnVectorQueryBuilder.java @@ -13,6 +13,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.join.BitSetProducer; import org.apache.lucene.search.join.ToChildBlockJoinQuery; +import org.apache.lucene.util.SetOnce; import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; @@ -39,7 +40,9 @@ import java.util.ArrayList; import java.util.List; import java.util.Objects; +import java.util.function.Supplier; +import static org.elasticsearch.common.Strings.format; import static org.elasticsearch.search.SearchService.DEFAULT_SIZE; import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; @@ -47,7 +50,6 @@ /** * A query that performs kNN search using Lucene's {@link org.apache.lucene.search.KnnFloatVectorQuery} or * {@link org.apache.lucene.search.KnnByteVectorQuery}. - * */ public class KnnVectorQueryBuilder extends AbstractQueryBuilder { public static final String NAME = "knn"; @@ -59,11 +61,19 @@ public class KnnVectorQueryBuilder extends AbstractQueryBuilder PARSER = new ConstructingObjectParser<>( "knn", - args -> new KnnVectorQueryBuilder((String) args[0], (VectorData) args[1], (Integer) args[2], (Float) args[3]) + args -> new KnnVectorQueryBuilder( + (String) args[0], + (VectorData) args[1], + (QueryVectorBuilder) args[4], + null, + (Integer) args[2], + (Float) args[3] + ) ); static { @@ -76,6 +86,11 @@ public class KnnVectorQueryBuilder extends AbstractQueryBuilder p.namedObject(QueryVectorBuilder.class, n, c), + QUERY_VECTOR_BUILDER_FIELD + ); PARSER.declareFieldArray( KnnVectorQueryBuilder::addFilterQueries, (p, c) -> AbstractQueryBuilder.parseTopLevelQuery(p), @@ -94,26 +109,59 @@ public static KnnVectorQueryBuilder fromXContent(XContentParser parser) { private Integer numCands; private final List filterQueries = new ArrayList<>(); private final Float vectorSimilarity; + private final QueryVectorBuilder queryVectorBuilder; + private final Supplier queryVectorSupplier; public KnnVectorQueryBuilder(String fieldName, float[] queryVector, Integer numCands, Float vectorSimilarity) { - this(fieldName, VectorData.fromFloats(queryVector), numCands, vectorSimilarity); + this(fieldName, VectorData.fromFloats(queryVector), null, null, numCands, vectorSimilarity); + } + + protected KnnVectorQueryBuilder(String fieldName, QueryVectorBuilder queryVectorBuilder, Integer numCands, Float vectorSimilarity) { + this(fieldName, null, queryVectorBuilder, null, numCands, vectorSimilarity); } public KnnVectorQueryBuilder(String fieldName, byte[] queryVector, Integer numCands, Float vectorSimilarity) { - this(fieldName, VectorData.fromBytes(queryVector), numCands, vectorSimilarity); + this(fieldName, VectorData.fromBytes(queryVector), null, null, numCands, vectorSimilarity); } public KnnVectorQueryBuilder(String fieldName, VectorData queryVector, Integer numCands, Float vectorSimilarity) { + this(fieldName, queryVector, null, null, numCands, vectorSimilarity); + } + + private KnnVectorQueryBuilder( + String fieldName, + VectorData queryVector, + QueryVectorBuilder queryVectorBuilder, + Supplier queryVectorSupplier, + Integer numCands, + Float vectorSimilarity + ) { if (numCands != null && numCands > NUM_CANDS_LIMIT) { throw new IllegalArgumentException("[" + NUM_CANDS_FIELD.getPreferredName() + "] cannot exceed [" + NUM_CANDS_LIMIT + "]"); } - if (queryVector == null) { - throw new IllegalArgumentException("[" + QUERY_VECTOR_FIELD.getPreferredName() + "] must be provided"); + if (queryVector == null && queryVectorBuilder == null) { + throw new IllegalArgumentException( + format( + "either [%s] or [%s] must be provided", + QUERY_VECTOR_FIELD.getPreferredName(), + QUERY_VECTOR_BUILDER_FIELD.getPreferredName() + ) + ); + } else if (queryVector != null && queryVectorBuilder != null) { + throw new IllegalArgumentException( + format( + "only one of [%s] and [%s] must be provided", + QUERY_VECTOR_FIELD.getPreferredName(), + QUERY_VECTOR_BUILDER_FIELD.getPreferredName() + ) + ); } this.fieldName = fieldName; this.queryVector = queryVector; this.numCands = numCands; this.vectorSimilarity = vectorSimilarity; + this.queryVectorBuilder = queryVectorBuilder; + this.queryVectorSupplier = queryVectorSupplier; } public KnnVectorQueryBuilder(StreamInput in) throws IOException { @@ -144,6 +192,12 @@ public KnnVectorQueryBuilder(StreamInput in) throws IOException { } else { this.vectorSimilarity = null; } + if (in.getTransportVersion().onOrAfter(TransportVersions.KNN_QUERY_VECTOR_BUILDER)) { + this.queryVectorBuilder = in.readOptionalNamedWriteable(QueryVectorBuilder.class); + } else { + this.queryVectorBuilder = null; + } + this.queryVectorSupplier = null; } public String getFieldName() { @@ -168,6 +222,11 @@ public List filterQueries() { return filterQueries; } + @Nullable + public QueryVectorBuilder queryVectorBuilder() { + return queryVectorBuilder; + } + public KnnVectorQueryBuilder addFilterQuery(QueryBuilder filterQuery) { Objects.requireNonNull(filterQuery); this.filterQueries.add(filterQuery); @@ -182,6 +241,9 @@ public KnnVectorQueryBuilder addFilterQueries(List filterQueries) @Override protected void doWriteTo(StreamOutput out) throws IOException { + if (queryVectorSupplier != null) { + throw new IllegalStateException("missing a rewriteAndFetch?"); + } out.writeString(fieldName); if (out.getTransportVersion().onOrAfter(TransportVersions.KNN_QUERY_NUMCANDS_AS_OPTIONAL_PARAM)) { out.writeOptionalVInt(numCands); @@ -216,19 +278,41 @@ protected void doWriteTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { out.writeOptionalFloat(vectorSimilarity); } + if (out.getTransportVersion().before(TransportVersions.KNN_QUERY_VECTOR_BUILDER) && queryVectorBuilder != null) { + throw new IllegalArgumentException( + format( + "cannot serialize [%s] to older node of version [%s]", + QUERY_VECTOR_BUILDER_FIELD.getPreferredName(), + out.getTransportVersion() + ) + ); + } + if (out.getTransportVersion().onOrAfter(TransportVersions.KNN_QUERY_VECTOR_BUILDER)) { + out.writeOptionalNamedWriteable(queryVectorBuilder); + } } @Override protected void doXContent(XContentBuilder builder, Params params) throws IOException { + if (queryVectorSupplier != null) { + throw new IllegalStateException("missing a rewriteAndFetch?"); + } builder.startObject(NAME); builder.field(FIELD_FIELD.getPreferredName(), fieldName); - builder.field(QUERY_VECTOR_FIELD.getPreferredName(), queryVector); + if (queryVector != null) { + builder.field(QUERY_VECTOR_FIELD.getPreferredName(), queryVector); + } if (numCands != null) { builder.field(NUM_CANDS_FIELD.getPreferredName(), numCands); } if (vectorSimilarity != null) { builder.field(VECTOR_SIMILARITY_FIELD.getPreferredName(), vectorSimilarity); } + if (queryVectorBuilder != null) { + builder.startObject(QUERY_VECTOR_BUILDER_FIELD.getPreferredName()); + builder.field(queryVectorBuilder.getWriteableName(), queryVectorBuilder); + builder.endObject(); + } if (filterQueries.isEmpty() == false) { builder.startArray(FILTER_FIELD.getPreferredName()); for (QueryBuilder filterQuery : filterQueries) { @@ -247,6 +331,36 @@ public String getWriteableName() { @Override protected QueryBuilder doRewrite(QueryRewriteContext ctx) throws IOException { + if (queryVectorSupplier != null) { + if (queryVectorSupplier.get() == null) { + return this; + } + return new KnnVectorQueryBuilder(fieldName, queryVectorSupplier.get(), numCands, vectorSimilarity).boost(boost) + .queryName(queryName) + .addFilterQueries(filterQueries); + } + if (queryVectorBuilder != null) { + SetOnce toSet = new SetOnce<>(); + ctx.registerAsyncAction((c, l) -> queryVectorBuilder.buildVector(c, l.delegateFailureAndWrap((ll, v) -> { + toSet.set(v); + if (v == null) { + ll.onFailure( + new IllegalArgumentException( + format( + "[%s] with name [%s] returned null query_vector", + QUERY_VECTOR_BUILDER_FIELD.getPreferredName(), + queryVectorBuilder.getWriteableName() + ) + ) + ); + return; + } + ll.onResponse(null); + }))); + return new KnnVectorQueryBuilder(fieldName, queryVector, queryVectorBuilder, toSet::get, numCands, vectorSimilarity).boost( + boost + ).queryName(queryName).addFilterQueries(filterQueries); + } if (ctx.convertToInnerHitsRewriteContext() != null) { return new ExactKnnQueryBuilder(queryVector, fieldName).boost(boost).queryName(queryName); } @@ -263,7 +377,8 @@ protected QueryBuilder doRewrite(QueryRewriteContext ctx) throws IOException { rewrittenQueries.add(rewrittenQuery); } if (changed) { - return new KnnVectorQueryBuilder(fieldName, queryVector, numCands, vectorSimilarity).boost(boost) + return new KnnVectorQueryBuilder(fieldName, queryVector, queryVectorBuilder, queryVectorSupplier, numCands, vectorSimilarity) + .boost(boost) .queryName(queryName) .addFilterQueries(rewrittenQueries); } @@ -338,7 +453,7 @@ protected Query doToQuery(SearchExecutionContext context) throws IOException { @Override protected int doHashCode() { - return Objects.hash(fieldName, Objects.hashCode(queryVector), numCands, filterQueries, vectorSimilarity); + return Objects.hash(fieldName, Objects.hashCode(queryVector), numCands, filterQueries, vectorSimilarity, queryVectorBuilder); } @Override @@ -347,7 +462,8 @@ protected boolean doEquals(KnnVectorQueryBuilder other) { && Objects.equals(queryVector, other.queryVector) && Objects.equals(numCands, other.numCands) && Objects.equals(filterQueries, other.filterQueries) - && Objects.equals(vectorSimilarity, other.vectorSimilarity); + && Objects.equals(vectorSimilarity, other.vectorSimilarity) + && Objects.equals(queryVectorBuilder, other.queryVectorBuilder); } @Override diff --git a/server/src/test/java/org/elasticsearch/search/vectors/AbstractKnnVectorQueryBuilderTestCase.java b/server/src/test/java/org/elasticsearch/search/vectors/AbstractKnnVectorQueryBuilderTestCase.java index 45ad9d514ba82..b760262cd1ea6 100644 --- a/server/src/test/java/org/elasticsearch/search/vectors/AbstractKnnVectorQueryBuilderTestCase.java +++ b/server/src/test/java/org/elasticsearch/search/vectors/AbstractKnnVectorQueryBuilderTestCase.java @@ -13,6 +13,7 @@ import org.apache.lucene.search.Query; import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; +import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.Strings; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -23,6 +24,8 @@ import org.elasticsearch.index.query.MatchNoneQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.query.QueryRewriteContext; +import org.elasticsearch.index.query.Rewriteable; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.index.query.TermQueryBuilder; import org.elasticsearch.test.AbstractBuilderTestCase; @@ -38,7 +41,9 @@ import static org.elasticsearch.search.SearchService.DEFAULT_SIZE; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.nullValue; abstract class AbstractKnnVectorQueryBuilderTestCase extends AbstractQueryTestCase { private static final String VECTOR_FIELD = "vector"; @@ -248,4 +253,39 @@ private void assertBWCSerialization(QueryBuilder newQuery, QueryBuilder bwcQuery } } } + + public void testRewriteWithQueryVectorBuilder() throws Exception { + int dims = randomInt(1024); + float[] expectedArray = new float[dims]; + for (int i = 0; i < dims; i++) { + expectedArray[i] = randomFloat(); + } + KnnVectorQueryBuilder knnVectorQueryBuilder = new KnnVectorQueryBuilder( + "field", + new TestQueryVectorBuilderPlugin.TestQueryVectorBuilder(expectedArray), + 5, + 1f + ); + knnVectorQueryBuilder.boost(randomFloat()); + List filters = new ArrayList<>(); + int numFilters = randomIntBetween(1, 5); + for (int i = 0; i < numFilters; i++) { + String filterFieldName = randomBoolean() ? KEYWORD_FIELD_NAME : TEXT_FIELD_NAME; + filters.add(QueryBuilders.termQuery(filterFieldName, randomAlphaOfLength(10))); + } + knnVectorQueryBuilder.addFilterQueries(filters); + + QueryRewriteContext context = new QueryRewriteContext(null, null, null); + PlainActionFuture knnFuture = new PlainActionFuture<>(); + Rewriteable.rewriteAndFetch(knnVectorQueryBuilder, context, knnFuture); + KnnVectorQueryBuilder rewritten = (KnnVectorQueryBuilder) knnFuture.get(); + + assertThat(rewritten.getFieldName(), equalTo(knnVectorQueryBuilder.getFieldName())); + assertThat(rewritten.boost(), equalTo(knnVectorQueryBuilder.boost())); + assertThat(rewritten.queryVector().asFloatVector(), equalTo(expectedArray)); + assertThat(rewritten.queryVectorBuilder(), nullValue()); + assertThat(rewritten.getVectorSimilarity(), equalTo(1f)); + assertThat(rewritten.filterQueries(), hasSize(numFilters)); + assertThat(rewritten.filterQueries(), equalTo(filters)); + } } diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/TextEmbeddingQueryIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/TextEmbeddingQueryIT.java index 82597e16837c6..a0a8e9fcfb7a5 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/TextEmbeddingQueryIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/TextEmbeddingQueryIT.java @@ -98,6 +98,158 @@ public class TextEmbeddingQueryIT extends PyTorchModelRestTestCase { RAW_MODEL_SIZE = Base64.getDecoder().decode(BASE_64_ENCODED_MODEL).length; } + private static final String TOP_LEVEL_KNN_TEMPLATE = """ + { + "knn": { + "field": "%s", + "k": 5, + "num_candidates": 10, + "query_vector_builder": { + "text_embedding": { + "model_id": "%s", + "model_text": "%s" + } + } + } + }"""; + private static final String TOP_LEVEL_KNN_FILTER_TEMPLATE = """ + { + "knn": { + "field": "%s", + "k": 5, + "num_candidates": 10, + "filter": %s, + "query_vector_builder": { + "text_embedding": { + "model_id": "%s", + "model_text": "%s" + } + } + } + }"""; + private static final String TOP_LEVEL_KNN_HYBRID_ALL = """ + { + "knn": { + "field": "embedding", + "k": 3, + "num_candidates": 10, + "boost": 10.0, + "query_vector_builder": { + "text_embedding": { + "model_id": "%s", + "model_text": "my words" + } + } + }, + "query": {"match_all": {}}, + "size": 7 + }"""; + private static final String TOP_LEVEL_KNN_HYBRID_MATCH = """ + { + "knn": { + "field": "embedding", + "k": 3, + "num_candidates": 10, + "boost": 10.0, + "query_vector_builder": { + "text_embedding": { + "model_id": "%s", + "model_text": "my words" + } + } + }, + "query": {"match": {"source_text": {"query": "apricot unrelated"}}} + }"""; + + private static final String QUERY_DSL_KNN_TEMPLATE = """ + { + "query": { + "knn" : { + "field": "%s", + "num_candidates": 10, + "query_vector_builder": { + "text_embedding": { + "model_id": "%s", + "model_text": "%s" + } + } + } + } + }"""; + private static final String QUERY_DSL_KNN_FILTER_TEMPLATE = """ + { + "query": { + "knn" : { + "field": "%s", + "num_candidates": 10, + "filter": %s, + "query_vector_builder": { + "text_embedding": { + "model_id": "%s", + "model_text": "%s" + } + } + } + } + }"""; + private static final String QUERY_DSL_KNN_HYBRID_ALL = """ + { + "query": { + "bool": { + "should": [ + { + "match_all": {} + }, + { + "knn": { + "field": "embedding", + "query_vector_builder": { + "text_embedding": { + "model_id": "%s", + "model_text": "my words" + } + }, + "num_candidates": 10, + "boost": 10 + } + } + ] + } + }, + "size": 7 + }"""; + private static final String QUERY_DSL_KNN_HYBRID_MATCH = """ + { + "query": { + "bool": { + "should": [ + { + "match": { + "source_text": { + "query": "apricot unrelated", + "boost": 1 + } + } + }, + { + "knn": { + "field": "embedding", + "query_vector_builder": { + "text_embedding": { + "model_id": "%s", + "model_text": "my words" + } + }, + "num_candidates": 10, + "boost": 10 + } + } + ] + } + }, + "size": 7 + }"""; + @SuppressWarnings("unchecked") public void testTextEmbeddingQuery() throws IOException { String modelId = "text-embedding-test"; @@ -141,39 +293,59 @@ public void testTextEmbeddingQuery() throws IOException { // Test text embedding search against the indexed vectors for (int i = 0; i < 5; i++) { int randomInput = randomIntBetween(0, inputs.size() - 1); - var textEmbeddingSearchResponse = textEmbeddingSearch(indexName, inputs.get(randomInput), modelId, "embedding"); - assertOkWithErrorMessage(textEmbeddingSearchResponse); - - Map responseMap = responseAsMap(textEmbeddingSearchResponse); - List> hits = (List>) MapHelper.dig("hits.hits", responseMap); - Map topHit = hits.get(0); - String sourceText = (String) MapHelper.dig("_source.source_text", topHit); - assertEquals(inputs.get(randomInput), sourceText); + for (String template : new String[] { TOP_LEVEL_KNN_TEMPLATE, QUERY_DSL_KNN_TEMPLATE }) { + var textEmbeddingSearchResponse = textEmbeddingSearch(indexName, inputs.get(randomInput), modelId, "embedding", template); + assertOkWithErrorMessage(textEmbeddingSearchResponse); + + Map responseMap = responseAsMap(textEmbeddingSearchResponse); + List> hits = (List>) MapHelper.dig("hits.hits", responseMap); + Map topHit = hits.get(0); + String sourceText = (String) MapHelper.dig("_source.source_text", topHit); + assertEquals(inputs.get(randomInput), sourceText); + } } // Test text embedding search with filters { - var textEmbeddingSearchResponse = textEmbeddingSearchWithTermsFilter(indexName, inputs.get(0), "foo", modelId, "embedding"); - assertOkWithErrorMessage(textEmbeddingSearchResponse); - - Map responseMap = responseAsMap(textEmbeddingSearchResponse); - List> hits = (List>) MapHelper.dig("hits.hits", responseMap); - assertThat(hits, hasSize(3)); - for (var hit : hits) { - String filter = (String) MapHelper.dig("_source.filter_field", hit); - assertEquals("foo", filter); + for (String template : new String[] { TOP_LEVEL_KNN_FILTER_TEMPLATE, QUERY_DSL_KNN_FILTER_TEMPLATE }) { + var textEmbeddingSearchResponse = textEmbeddingSearchWithTermsFilter( + indexName, + inputs.get(0), + "foo", + modelId, + "embedding", + template + ); + assertOkWithErrorMessage(textEmbeddingSearchResponse); + + Map responseMap = responseAsMap(textEmbeddingSearchResponse); + List> hits = (List>) MapHelper.dig("hits.hits", responseMap); + assertThat(hits, hasSize(3)); + for (var hit : hits) { + String filter = (String) MapHelper.dig("_source.filter_field", hit); + assertEquals("foo", filter); + } } } { - var textEmbeddingSearchResponse = textEmbeddingSearchWithTermsFilter(indexName, inputs.get(2), "baz", modelId, "embedding"); - assertOkWithErrorMessage(textEmbeddingSearchResponse); - - Map responseMap = responseAsMap(textEmbeddingSearchResponse); - List> hits = (List>) MapHelper.dig("hits.hits", responseMap); - assertThat(hits, hasSize(2)); - for (var hit : hits) { - String filter = (String) MapHelper.dig("_source.filter_field", hit); - assertEquals("baz", filter); + for (String template : new String[] { TOP_LEVEL_KNN_FILTER_TEMPLATE, QUERY_DSL_KNN_FILTER_TEMPLATE }) { + var textEmbeddingSearchResponse = textEmbeddingSearchWithTermsFilter( + indexName, + inputs.get(2), + "baz", + modelId, + "embedding", + template + ); + assertOkWithErrorMessage(textEmbeddingSearchResponse); + + Map responseMap = responseAsMap(textEmbeddingSearchResponse); + List> hits = (List>) MapHelper.dig("hits.hits", responseMap); + assertThat(hits, hasSize(2)); + for (var hit : hits) { + String filter = (String) MapHelper.dig("_source.filter_field", hit); + assertEquals("baz", filter); + } } } } @@ -219,76 +391,53 @@ public void testHybridSearch() throws IOException { forceMergeIndex(indexName); { - // combined query should return size documents where size > k - Request request = new Request("GET", indexName + "/_search"); - request.setJsonEntity(Strings.format(""" - { - "knn": { - "field": "embedding", - "k": 3, - "num_candidates": 10, - "boost": 10.0, - "query_vector_builder": { - "text_embedding": { - "model_id": "%s", - "model_text": "my words" - } - } - }, - "query": {"match_all": {}}, - "size": 7 - }""", modelId)); - var semanticSearchResponse = client().performRequest(request); - assertOkWithErrorMessage(semanticSearchResponse); - - Map responseMap = responseAsMap(semanticSearchResponse); - int hitCount = (Integer) MapHelper.dig("hits.total.value", responseMap); - assertEquals(7, hitCount); + for (String template : new String[] { TOP_LEVEL_KNN_HYBRID_ALL, QUERY_DSL_KNN_HYBRID_ALL }) { + // combined query should return size documents where size > k + Request request = new Request("GET", indexName + "/_search"); + request.setJsonEntity(Strings.format(template, modelId)); + var semanticSearchResponse = client().performRequest(request); + assertOkWithErrorMessage(semanticSearchResponse); + + Map responseMap = responseAsMap(semanticSearchResponse); + int hitCount = (Integer) MapHelper.dig("hits.total.value", responseMap); + assertEquals(7, hitCount); + } } { - // boost the knn score, as the query is an exact match the unboosted - // score should be close to 1.0. Use an unrelated query so scores are - // not combined - Request request = new Request("GET", indexName + "/_search"); - request.setJsonEntity(Strings.format(""" - { - "knn": { - "field": "embedding", - "k": 3, - "num_candidates": 10, - "boost": 10.0, - "query_vector_builder": { - "text_embedding": { - "model_id": "%s", - "model_text": "my words" - } - } - }, - "query": {"match": {"source_text": {"query": "apricot unrelated"}}} - }""", modelId)); - var semanticSearchResponse = client().performRequest(request); - assertOkWithErrorMessage(semanticSearchResponse); - - Map responseMap = responseAsMap(semanticSearchResponse); - List> hits = (List>) MapHelper.dig("hits.hits", responseMap); - boolean found = false; - for (var hit : hits) { - String source = (String) MapHelper.dig("_source.source_text", hit); - if (source.equals("my words")) { - assertThat((Double) MapHelper.dig("_score", hit), closeTo(10.0, 0.01)); - found = true; + for (String template : new String[] { TOP_LEVEL_KNN_HYBRID_MATCH, QUERY_DSL_KNN_HYBRID_MATCH }) { + // boost the knn score, as the query is an exact match the unboosted + // score should be close to 1.0. Use an unrelated query so scores are + // not combined + Request request = new Request("GET", indexName + "/_search"); + request.setJsonEntity(Strings.format(template, modelId)); + var semanticSearchResponse = client().performRequest(request); + assertOkWithErrorMessage(semanticSearchResponse); + + Map responseMap = responseAsMap(semanticSearchResponse); + List> hits = (List>) MapHelper.dig("hits.hits", responseMap); + boolean found = false; + for (var hit : hits) { + String source = (String) MapHelper.dig("_source.source_text", hit); + if (source.equals("my words")) { + assertThat((Double) MapHelper.dig("_score", hit), closeTo(10.0, 0.01)); + found = true; + } } + assertTrue("should have found hit for string 'my words'", found); } - assertTrue("should have found hit for string 'my words'", found); } } public void testSearchWithMissingModel() { String modelId = "missing-model"; String indexName = modelId + "-index"; - - var e = expectThrows(ResponseException.class, () -> textEmbeddingSearch(indexName, "the machine is leaking", modelId, "embedding")); - assertThat(e.getMessage(), containsString("[missing-model] is not an inference service model or a deployed ml model")); + for (String template : new String[] { TOP_LEVEL_KNN_TEMPLATE, QUERY_DSL_KNN_TEMPLATE }) { + var e = expectThrows( + ResponseException.class, + () -> textEmbeddingSearch(indexName, "the machine is leaking", modelId, "embedding", template) + ); + assertThat(e.getMessage(), containsString("[missing-model] is not an inference service model or a deployed ml model")); + } } @SuppressWarnings("unchecked") @@ -391,37 +540,32 @@ public void testModelWithPrefixStrings() throws IOException { bulkIndexDocs(inputs, filters, embeddings, indexName); forceMergeIndex(indexName); - // the input "my words" should be prefixed with searchPrefix - var textEmbeddingSearchResponse = textEmbeddingSearch(indexName, "my words", modelId, "embedding"); - assertOkWithErrorMessage(textEmbeddingSearchResponse); - - Map responseMap = responseAsMap(textEmbeddingSearchResponse); - List> hits = (List>) MapHelper.dig("hits.hits", responseMap); - Map topHit = hits.get(0); - String sourceText = (String) MapHelper.dig("_source.source_text", topHit); - // The top hit should have the search prefix - assertEquals(searchPrefix + "my words", sourceText); - List foundEmbedding = (List) MapHelper.dig("_source.embedding", topHit); - assertEquals(embeddings.get(0), foundEmbedding); + for (String template : new String[] { TOP_LEVEL_KNN_TEMPLATE, QUERY_DSL_KNN_TEMPLATE }) { + // the input "my words" should be prefixed with searchPrefix + var textEmbeddingSearchResponse = textEmbeddingSearch(indexName, "my words", modelId, "embedding", template); + assertOkWithErrorMessage(textEmbeddingSearchResponse); + + Map responseMap = responseAsMap(textEmbeddingSearchResponse); + List> hits = (List>) MapHelper.dig("hits.hits", responseMap); + Map topHit = hits.get(0); + String sourceText = (String) MapHelper.dig("_source.source_text", topHit); + // The top hit should have the search prefix + assertEquals(searchPrefix + "my words", sourceText); + List foundEmbedding = (List) MapHelper.dig("_source.embedding", topHit); + assertEquals(embeddings.get(0), foundEmbedding); + } } - protected Response textEmbeddingSearch(String index, String modelText, String modelId, String denseVectorFieldName) throws IOException { + protected Response textEmbeddingSearch( + String index, + String modelText, + String modelId, + String denseVectorFieldName, + String queryTemplate + ) throws IOException { Request request = new Request("GET", index + "/_search?error_trace=true"); - request.setJsonEntity(Strings.format(""" - { - "knn": { - "field": "%s", - "k": 5, - "num_candidates": 10, - "query_vector_builder": { - "text_embedding": { - "model_id": "%s", - "model_text": "%s" - } - } - } - }""", denseVectorFieldName, modelId, modelText)); + request.setJsonEntity(Strings.format(queryTemplate, denseVectorFieldName, modelId, modelText)); return client().performRequest(request); } @@ -430,7 +574,8 @@ protected Response textEmbeddingSearchWithTermsFilter( String modelText, String filter, String modelId, - String denseVectorFieldName + String denseVectorFieldName, + String queryTemplate ) throws IOException { Request request = new Request("GET", index + "/_search?error_trace=true"); @@ -438,21 +583,7 @@ protected Response textEmbeddingSearchWithTermsFilter( {"term": {"filter_field": "%s"}} """, filter); - request.setJsonEntity(Strings.format(""" - { - "knn": { - "field": "%s", - "k": 5, - "num_candidates": 10, - "filter": %s, - "query_vector_builder": { - "text_embedding": { - "model_id": "%s", - "model_text": "%s" - } - } - } - }""", denseVectorFieldName, termsFilter, modelId, modelText)); + request.setJsonEntity(Strings.format(queryTemplate, denseVectorFieldName, termsFilter, modelId, modelText)); return client().performRequest(request); } diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/search_knn_query_vector_builder.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/search_knn_query_vector_builder.yml new file mode 100644 index 0000000000000..97e5146e9af86 --- /dev/null +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/search_knn_query_vector_builder.yml @@ -0,0 +1,231 @@ +# This test uses the same text embedding model from `text_embedding_search-yml` +setup: + - skip: + features: headers + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + indices.create: + index: index-with-embedded-text + body: + mappings: + properties: + source_text: + type: keyword + embedding: + type: dense_vector + dims: 100 + index: true + similarity: cosine + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + indices.create: + index: unrelated + body: + mappings: + properties: + source_text: + type: keyword + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + ml.put_trained_model: + model_id: "text_embedding_model" + body: > + { + "description": "simple model for testing", + "model_type": "pytorch", + "inference_config": { + "text_embedding": { + "tokenization": { + "bert": { + "with_special_tokens": false + } + } + } + } + } + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + ml.put_trained_model_vocabulary: + model_id: "text_embedding_model" + body: > + { "vocabulary": ["[PAD]", "[UNK]", "these", "are", "my", "words", "the", "washing", "machine", "is", "leaking", "octopus", "comforter", "smells"] } + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + ml.put_trained_model_definition_part: + model_id: "text_embedding_model" + part: 0 + body: > + { + "total_definition_length":1694, + "definition": "UEsDBAAACAgAAAAAAAAAAAAAAAAAAAAAAAAUAA4Ac2ltcGxlbW9kZWwvZGF0YS5wa2xGQgoAWlpaWlpaWlpaWoACY19fdG9yY2hfXwpUaW55VGV4dEVtYmVkZGluZwpxACmBfShYCAAAAHRyYWluaW5ncQGJWBYAAABfaXNfZnVsbF9iYWNrd2FyZF9ob29rcQJOdWJxAy5QSwcIsFTQsFgAAABYAAAAUEsDBBQACAgIAAAAAAAAAAAAAAAAAAAAAAAdAB0Ac2ltcGxlbW9kZWwvY29kZS9fX3RvcmNoX18ucHlGQhkAWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWoWPMWvDMBCF9/yKGy1IQ7Ia0q1j2yWbMYdsnWphWWd0Em3+fS3bBEopXd99j/dd77UI3Fy43+grvUwdGePCR/XKJntS9QEAcdZRT5QoCiJcoWnXtMvW/ohS1C4sZaihY/YFcoI2e4+d7sdPHQ0OzONyf5+T46B9U8DSNWTBcixMJeRtvQwkjv2AePpld1wKAC7MOaEzUsONgnDc4sQjBUz3mbbbY2qD2usbB9rQmcWV47/gOiVIReAvUsHT8y5S7yKL/mnSIWuPQmSqLRm0DJWkWD0eUEqtjUgpx7AXow6mai5HuJzPrTp8A1BLBwiD/6yJ6gAAAKkBAABQSwMEFAAICAgAAAAAAAAAAAAAAAAAAAAAACcAQQBzaW1wbGVtb2RlbC9jb2RlL19fdG9yY2hfXy5weS5kZWJ1Z19wa2xGQj0AWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWo2Qz0rDQBDGk/5RmjfwlmMCbWivBZ9gWL0IFkRCdLcmmOwmuxu0N08O3r2rCOrdx9CDgm/hWUUQMdugzUk6LCwzv++bGeak5YE1saoorNgCCwsbzFc9sm1PvivQo2zqToU8iiT1FEunfadXRcLzUocJVWN3i3ElZF3W4pDxUM9yVrPNXCeCR+lOLdp1190NwVktzoVKDF5COh+nQpbtsX+0/tjpOWYJuR8HMuJUZEEW8TJKQ8UY9eJIxZ7S0vvb3vf9yiCZLiV3Fz5v1HdHw6HvFK3JWnUElWR5ygbz8TThB4NMUJYG+axowyoWHbiHBwQbSWbHHXiEJ4QWkmOTPMMLQhvJaZOgSX49Z3a8uPq5Ia/whtBBctEkl4a8wwdCF8lVk1wb8glfCCtIbprkttntrkF00Q1+AFBLBwi4BIswOAEAAP0BAABQSwMEAAAICAAAAAAAAAAAAAAAAAAAAAAAABkAQQBzaW1wbGVtb2RlbC9jb25zdGFudHMucGtsRkI9AFpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlqAAikuUEsHCG0vCVcEAAAABAAAAFBLAwQAAAgIAAAAAAAAAAAAAAAAAAAAAAAAEwA7AHNpbXBsZW1vZGVsL3ZlcnNpb25GQjcAWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWjMKUEsHCNGeZ1UCAAAAAgAAAFBLAQIAAAAACAgAAAAAAACwVNCwWAAAAFgAAAAUAAAAAAAAAAAAAAAAAAAAAABzaW1wbGVtb2RlbC9kYXRhLnBrbFBLAQIAABQACAgIAAAAAACD/6yJ6gAAAKkBAAAdAAAAAAAAAAAAAAAAAKgAAABzaW1wbGVtb2RlbC9jb2RlL19fdG9yY2hfXy5weVBLAQIAABQACAgIAAAAAAC4BIswOAEAAP0BAAAnAAAAAAAAAAAAAAAAAPoBAABzaW1wbGVtb2RlbC9jb2RlL19fdG9yY2hfXy5weS5kZWJ1Z19wa2xQSwECAAAAAAgIAAAAAAAAbS8JVwQAAAAEAAAAGQAAAAAAAAAAAAAAAADIAwAAc2ltcGxlbW9kZWwvY29uc3RhbnRzLnBrbFBLAQIAAAAACAgAAAAAAADRnmdVAgAAAAIAAAATAAAAAAAAAAAAAAAAAFQEAABzaW1wbGVtb2RlbC92ZXJzaW9uUEsGBiwAAAAAAAAAHgMtAAAAAAAAAAAABQAAAAAAAAAFAAAAAAAAAGoBAAAAAAAA0gQAAAAAAABQSwYHAAAAADwGAAAAAAAAAQAAAFBLBQYAAAAABQAFAGoBAADSBAAAAAA=", + "total_parts": 1 + } + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json + bulk: + index: index-with-embedded-text + refresh: true + body: | + {"index": {}} + {"source_text": "the octopus comforter smells", "embedding":[0.3925197124481201, 0.9145996570587158, 0.01372915506362915, 0.9987854957580566, 0.3240084648132324, 0.6188188195228577, 0.926924467086792, 0.12143599987030029, 0.175662100315094, 0.16076070070266724, 0.7671306133270264, 0.9518267512321472, 0.4557478427886963, 0.5410670638084412, 0.7594802975654602, 0.5035953521728516, 0.4115469455718994, 0.038427770137786865, 0.5419668555259705, 0.6362232565879822, 0.17554593086242676, 0.01821446418762207, 0.2931918501853943, 0.294437050819397, 0.6901726722717285, 0.1679999828338623, 0.7995195984840393, 0.8781598210334778, 0.18507736921310425, 0.8614458441734314, 0.690071702003479, 0.7859554886817932, 0.803643524646759, 0.0048452019691467285, 0.19700628519058228, 0.22210919857025146, 0.7043975591659546, 0.6320799589157104, 0.542057991027832, 0.8704766035079956, 0.32195907831192017, 0.7272325158119202, 0.4066658020019531, 0.89588862657547, 0.7947880029678345, 0.06543421745300293, 0.2873639464378357, 0.8773637413978577, 0.36480581760406494, 0.692948043346405, 0.19171112775802612, 0.14275449514389038, 0.17054951190948486, 0.8969640135765076, 0.39838290214538574, 0.26756417751312256, 0.5369327664375305, 0.4736328721046448, 0.21181154251098633, 0.2695402503013611, 0.8651300072669983, 0.8051849603652954, 0.7073134779930115, 0.5963589549064636, 0.09601861238479614, 0.5362404584884644, 0.23020631074905396, 0.8515381813049316, 0.5730932354927063, 0.7235705256462097, 0.08228331804275513, 0.5840849280357361, 0.6030013561248779, 0.2084050178527832, 0.7312950491905212, 0.6159517168998718, 0.6482340693473816, 0.07220339775085449, 0.5136227607727051, 0.9152160286903381, 0.8169018030166626, 0.15515869855880737, 0.7978536486625671, 0.564482569694519, 0.4757157564163208, 0.2718064785003662, 0.6910138726234436, 0.5675734877586365, 0.702862024307251, 0.19079893827438354, 0.8995556235313416, 0.4988499879837036, 0.6378234028816223, 0.2683940529823303, 0.21990180015563965, 0.8442690372467041, 0.8502047061920166, 0.9857811331748962, 0.3549607992172241, 0.7605474591255188]} + {"index": {}} + {"source_text": "the machine is leaking", "embedding":[0.09775793552398682, 0.9594467282295227, 0.7915146946907043, 0.9140479564666748, 0.5148435235023499, 0.8556410670280457, 0.6022665500640869, 0.05222177505493164, 0.9821935296058655, 0.49276530742645264, 0.23147249221801758, 0.2428399920463562, 0.3865380883216858, 0.5778483748435974, 0.5600519776344299, 0.9427472352981567, 0.48832541704177856, 0.8807493448257446, 0.32909590005874634, 0.8452557325363159, 0.811530590057373, 0.13344216346740723, 0.15256845951080322, 0.5025331974029541, 0.4288772940635681, 0.6590417623519897, 0.9282752871513367, 0.8842046856880188, 0.7873250842094421, 0.356731653213501, 0.9959152936935425, 0.07572609186172485, 0.5062583088874817, 0.36245888471603394, 0.6189196705818176, 0.7766605019569397, 0.5198523998260498, 0.7379586100578308, 0.0553441047668457, 0.5035901665687561, 0.24139636754989624, 0.10798943042755127, 0.272808313369751, 0.38171595335006714, 0.24275553226470947, 0.956981897354126, 0.8182021379470825, 0.9383817315101624, 0.06551980972290039, 0.6892690658569336, 0.7068917751312256, 0.5184322595596313, 0.6103079319000244, 0.7020677328109741, 0.7181660532951355, 0.6477184295654297, 0.26282840967178345, 0.9316624402999878, 0.8318467140197754, 0.1487215757369995, 0.39937925338745117, 0.6842989921569824, 0.3496543765068054, 0.6008991003036499, 0.9530165791511536, 0.4209877848625183, 0.5675879716873169, 0.7883706092834473, 0.9547191858291626, 0.6292906403541565, 0.49566715955734253, 0.6907342672348022, 0.0834314227104187, 0.19785481691360474, 0.4896165728569031, 0.8460168838500977, 0.9680339097976685, 0.43386441469192505, 0.7068926095962524, 0.19123870134353638, 0.5661664009094238, 0.610595166683197, 0.23599380254745483, 0.2831611633300781, 0.7919651865959167, 0.0018386244773864746, 0.15559959411621094, 0.4622604250907898, 0.02038663625717163, 0.42241227626800537, 0.4200526475906372, 0.1223069429397583, 0.7035380005836487, 0.09902423620223999, 0.7804107666015625, 0.05339455604553223, 0.6485095024108887, 0.29347676038742065, 0.9716366529464722, 0.30257928371429443]} + {"index": {}} + {"source_text": "these are my words", "embedding":[0.7000167369842529, 0.590781033039093, 0.009879708290100098, 0.7874260544776917, 0.797156572341919, 0.1791083812713623, 0.07826781272888184, 0.25102007389068604, 0.09334254264831543, 0.3819708824157715, 0.7312374711036682, 0.02819347381591797, 0.20099765062332153, 0.7702597975730896, 0.9443559050559998, 0.35520339012145996, 0.25699591636657715, 0.5596823692321777, 0.23947590589523315, 0.47478222846984863, 0.23411548137664795, 0.9809996485710144, 0.3806597590446472, 0.5006771087646484, 0.5724453926086426, 0.21510547399520874, 0.07062828540802002, 0.9858258962631226, 0.9636645317077637, 0.36034029722213745, 0.07260054349899292, 0.06882566213607788, 0.18354403972625732, 0.06756395101547241, 0.5749042630195618, 0.05275309085845947, 0.1865217685699463, 0.5852730870246887, 0.1086682677268982, 0.10090464353561401, 0.32582908868789673, 0.5494027733802795, 0.873362123966217, 0.02236837148666382, 0.37973177433013916, 0.5556552410125732, 0.5083678364753723, 0.8081125020980835, 0.09164196252822876, 0.2207810878753662, 0.8086426258087158, 0.271828293800354, 0.5981417298316956, 0.7745779156684875, 0.40872830152511597, 0.6035888195037842, 0.5598325133323669, 0.19086670875549316, 0.02406853437423706, 0.8299782872200012, 0.4994274377822876, 0.0300295352935791, 0.47190529108047485, 0.8889331817626953, 0.34195321798324585, 0.9380808472633362, 0.4418332576751709, 0.5789303779602051, 0.0526617169380188, 0.7349719405174255, 0.44571834802627563, 0.6602563261985779, 0.3819742202758789, 0.16881734132766724, 0.45588219165802, 0.028081774711608887, 0.6681976914405823, 0.8183007836341858, 0.7887755632400513, 0.4506028890609741, 0.8040162324905396, 0.431918203830719, 0.7408918738365173, 0.39756304025650024, 0.7438145875930786, 0.6120601892471313, 0.5724676251411438, 0.08701330423355103, 0.18344634771347046, 0.7226220369338989, 0.3648560643196106, 0.9813777208328247, 0.2615315318107605, 0.9847549796104431, 0.32967478036880493, 0.47099196910858154, 0.3591546416282654, 0.4132147431373596, 0.48631107807159424, 0.04420149326324463]} + {"index": {}} + {"source_text": "washing machine", "embedding":[0.7044712901115417, 0.12284207344055176, 0.5008929967880249, 0.04643195867538452, 0.3666788339614868, 0.26660799980163574, 0.24114298820495605, 0.0761682391166687, 0.5294214487075806, 0.16935181617736816, 0.6257967948913574, 0.2804388999938965, 0.6417903900146484, 0.169958233833313, 0.4216839075088501, 0.6773303747177124, 0.9472144842147827, 0.21874648332595825, 0.5095921754837036, 0.839306116104126, 0.6176233291625977, 0.5847064852714539, 0.6748610734939575, 0.3264034390449524, 0.4112023115158081, 0.13818275928497314, 0.08356589078903198, 0.4147903323173523, 0.5626787543296814, 0.7167286276817322, 0.6314535737037659, 0.23092854022979736, 0.34547603130340576, 0.7425565719604492, 0.2837678790092468, 0.47037917375564575, 0.1555209755897522, 0.5618507266044617, 0.2076261043548584, 0.3026384711265564, 0.04561811685562134, 0.1691250205039978, 0.2504339814186096, 0.5350574851036072, 0.26857447624206543, 0.23607933521270752, 0.16938960552215576, 0.23708534240722656, 0.026302993297576904, 0.16901731491088867, 0.2847784757614136, 0.944273829460144, 0.28171658515930176, 0.9864799380302429, 0.6811433434486389, 0.9383156895637512, 0.5682582259178162, 0.14361613988876343, 0.7900274395942688, 0.27808505296707153, 0.05677521228790283, 0.08594226837158203, 0.6450491547584534, 0.06500720977783203, 0.36045730113983154, 0.1987738013267517, 0.07287931442260742, 0.5315744280815125, 0.04742676019668579, 0.7842378616333008, 0.0881078839302063, 0.7612627744674683, 0.2528950572013855, 0.27305954694747925, 0.03027820587158203, 0.4686838984489441, 0.13311690092086792, 0.048372089862823486, 0.808062732219696, 0.44010263681411743, 0.5726178288459778, 0.15828031301498413, 0.4597446322441101, 0.6375324130058289, 0.8452948927879333, 0.9763500690460205, 0.5094607472419739, 0.3535742163658142, 0.664739191532135, 0.40749913454055786, 0.8537857532501221, 0.5830079913139343, 0.7949922680854797, 0.6309236288070679, 0.07258343696594238, 0.1224660873413086, 0.24250483512878418, 0.36189037561416626, 0.5156043171882629, 0.1819135546684265]} + {"index": {}} + {"source_text": "washing machine smells", "embedding":[0.7249823808670044, 0.3981819152832031, 0.4572623372077942, 0.7442894577980042, 0.15898281335830688, 0.6481881737709045, 0.1513708233833313, 0.8945682644844055, 0.7708938121795654, 0.5494217276573181, 0.48253726959228516, 0.39402270317077637, 0.6369197368621826, 0.7152248024940491, 0.6326345205307007, 0.7362181544303894, 0.350342333316803, 0.16101288795471191, 0.4180338382720947, 0.04114532470703125, 0.002633512020111084, 0.20396709442138672, 0.8963556885719299, 0.1552276611328125, 0.7476853728294373, 0.9651047587394714, 0.7527561187744141, 0.7041972279548645, 0.12461084127426147, 0.6282403469085693, 0.9631509184837341, 0.16590750217437744, 0.4101366400718689, 0.31320667266845703, 0.13579899072647095, 0.2895740270614624, 0.9905323386192322, 0.02118372917175293, 0.637545645236969, 0.5133231282234192, 0.679695188999176, 0.04641437530517578, 0.21913814544677734, 0.16534924507141113, 0.02987360954284668, 0.14805591106414795, 0.16874665021896362, 0.9378783702850342, 0.8607399463653564, 0.7287217974662781, 0.5402306318283081, 0.9973209500312805, 0.26169413328170776, 0.3835873603820801, 0.1874808669090271, 0.8038567304611206, 0.18557673692703247, 0.8631893992424011, 0.7676172256469727, 0.3599127531051636, 0.48698097467422485, 0.926689088344574, 0.6542723774909973, 0.49722349643707275, 0.7027173638343811, 0.13385021686553955, 0.9873734712600708, 0.17187494039535522, 0.7995050549507141, 0.5259199142456055, 0.33804380893707275, 0.21665722131729126, 0.952264130115509, 0.8337767720222473, 0.879487156867981, 0.5553549528121948, 0.6160674095153809, 0.1315295696258545, 0.8010737895965576, 0.834412693977356, 0.20340144634246826, 0.8993185758590698, 0.6493895649909973, 0.9454924464225769, 0.38529330492019653, 0.6891772150993347, 0.5530646443367004, 0.18555349111557007, 0.8361382484436035, 0.11815804243087769, 0.38942235708236694, 0.945141613483429, 0.6417409181594849, 0.39776402711868286, 0.5133314728736877, 0.5431299805641174, 0.2615429759025574, 0.8987119793891907, 0.023733675479888916, 0.4941052794456482]} + {"index": {}} + {"source_text": "my words", "embedding":[0.19087255001068115, 0.5498749017715454, 0.9536173939704895, 0.25011056661605835, 0.37642204761505127, 0.18271470069885254, 0.670674741268158, 0.5553990006446838, 0.3306507468223572, 0.3368762731552124, 0.053364574909210205, 0.047215282917022705, 0.4221981167793274, 0.7591024041175842, 0.998794436454773, 0.6113318204879761, 0.8178470730781555, 0.8554672598838806, 0.40100908279418945, 0.6486459374427795, 0.804382860660553, 0.6775466799736023, 0.2916865944862366, 0.7019925117492676, 0.9812073707580566, 0.4414554834365845, 0.08203905820846558, 0.9167835116386414, 0.3082762360572815, 0.5454868674278259, 0.6665160655975342, 0.06828844547271729, 0.36014634370803833, 0.01810687780380249, 0.2640475630760193, 0.1856365203857422, 0.4734996557235718, 0.8153479695320129, 0.9614933133125305, 0.4851576089859009, 0.003343045711517334, 0.17352384328842163, 0.26423048973083496, 0.24217921495437622, 0.5694647431373596, 0.8538861274719238, 0.06464511156082153, 0.038984060287475586, 0.7695011496543884, 0.008188009262084961, 0.3858819007873535, 0.7950196862220764, 0.7225212454795837, 0.3982154130935669, 0.4996080994606018, 0.28709208965301514, 0.6753579378128052, 0.6779837608337402, 0.4815831184387207, 0.27917128801345825, 0.8400004506111145, 0.9022405743598938, 0.8253144025802612, 0.6251398324966431, 0.25444501638412476, 0.7694959044456482, 0.006821691989898682, 0.7958594560623169, 0.9144708514213562, 0.8688076138496399, 0.9641174077987671, 0.44437146186828613, 0.06135892868041992, 0.2638128399848938, 0.05436718463897705, 0.9926314353942871, 0.8661795854568481, 0.9176243543624878, 0.5521496534347534, 0.6017677783966064, 0.22096896171569824, 0.7030748128890991, 0.16923701763153076, 0.8178754448890686, 0.47008246183395386, 0.28875309228897095, 0.14314061403274536, 0.3431167006492615, 0.9301973581314087, 0.5416158437728882, 0.563427209854126, 0.7897542119026184, 0.2761036157608032, 0.16855067014694214, 0.42684781551361084, 0.7562968730926514, 0.2551668882369995, 0.7754542827606201, 0.218039870262146, 0.7080662846565247]} + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json + indices.forcemerge: + index: index-with-embedded-text + max_num_segments: 1 + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json + ml.start_trained_model_deployment: + model_id: text_embedding_model + wait_for: started + +--- +"Test vector search with query_vector_builder": + - skip: + version: " - 8.13.99" + reason: "introduced after 8.13" + - do: + search: + index: index-with-embedded-text + body: + query: + knn: + field: embedding + num_candidates: 3 + query_vector_builder: + text_embedding: + model_id: text_embedding_model + model_text: "the octopus comforter smells" + - match: { hits.total.value: 3 } + +--- +"nested kNN search with inner_hits size": + - skip: + version: " - 8.13.99" + reason: "introduced after 8.13" + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json + indices.create: + index: test + body: + settings: + index: + number_of_shards: 1 + mappings: + properties: + name: + type: keyword + nested: + type: nested + properties: + paragraph_id: + type: keyword + vector: + type: dense_vector + dims: 100 + index: true + similarity: l2_norm + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json + index: + index: test + id: "1" + body: + name: washing_machine_1.jpg + nested: + - paragraph_id: 0 + vector: [0.7044712901115417, 0.12284207344055176, 0.5008929967880249, 0.04643195867538452, 0.3666788339614868, 0.26660799980163574, 0.24114298820495605, 0.0761682391166687, 0.5294214487075806, 0.16935181617736816, 0.6257967948913574, 0.2804388999938965, 0.6417903900146484, 0.169958233833313, 0.4216839075088501, 0.6773303747177124, 0.9472144842147827, 0.21874648332595825, 0.5095921754837036, 0.839306116104126, 0.6176233291625977, 0.5847064852714539, 0.6748610734939575, 0.3264034390449524, 0.4112023115158081, 0.13818275928497314, 0.08356589078903198, 0.4147903323173523, 0.5626787543296814, 0.7167286276817322, 0.6314535737037659, 0.23092854022979736, 0.34547603130340576, 0.7425565719604492, 0.2837678790092468, 0.47037917375564575, 0.1555209755897522, 0.5618507266044617, 0.2076261043548584, 0.3026384711265564, 0.04561811685562134, 0.1691250205039978, 0.2504339814186096, 0.5350574851036072, 0.26857447624206543, 0.23607933521270752, 0.16938960552215576, 0.23708534240722656, 0.026302993297576904, 0.16901731491088867, 0.2847784757614136, 0.944273829460144, 0.28171658515930176, 0.9864799380302429, 0.6811433434486389, 0.9383156895637512, 0.5682582259178162, 0.14361613988876343, 0.7900274395942688, 0.27808505296707153, 0.05677521228790283, 0.08594226837158203, 0.6450491547584534, 0.06500720977783203, 0.36045730113983154, 0.1987738013267517, 0.07287931442260742, 0.5315744280815125, 0.04742676019668579, 0.7842378616333008, 0.0881078839302063, 0.7612627744674683, 0.2528950572013855, 0.27305954694747925, 0.03027820587158203, 0.4686838984489441, 0.13311690092086792, 0.048372089862823486, 0.808062732219696, 0.44010263681411743, 0.5726178288459778, 0.15828031301498413, 0.4597446322441101, 0.6375324130058289, 0.8452948927879333, 0.9763500690460205, 0.5094607472419739, 0.3535742163658142, 0.664739191532135, 0.40749913454055786, 0.8537857532501221, 0.5830079913139343, 0.7949922680854797, 0.6309236288070679, 0.07258343696594238, 0.1224660873413086, 0.24250483512878418, 0.36189037561416626, 0.5156043171882629, 0.1819135546684265] + - paragraph_id: 1 + vector: [0.7249823808670044, 0.3981819152832031, 0.4572623372077942, 0.7442894577980042, 0.15898281335830688, 0.6481881737709045, 0.1513708233833313, 0.8945682644844055, 0.7708938121795654, 0.5494217276573181, 0.48253726959228516, 0.39402270317077637, 0.6369197368621826, 0.7152248024940491, 0.6326345205307007, 0.7362181544303894, 0.350342333316803, 0.16101288795471191, 0.4180338382720947, 0.04114532470703125, 0.002633512020111084, 0.20396709442138672, 0.8963556885719299, 0.1552276611328125, 0.7476853728294373, 0.9651047587394714, 0.7527561187744141, 0.7041972279548645, 0.12461084127426147, 0.6282403469085693, 0.9631509184837341, 0.16590750217437744, 0.4101366400718689, 0.31320667266845703, 0.13579899072647095, 0.2895740270614624, 0.9905323386192322, 0.02118372917175293, 0.637545645236969, 0.5133231282234192, 0.679695188999176, 0.04641437530517578, 0.21913814544677734, 0.16534924507141113, 0.02987360954284668, 0.14805591106414795, 0.16874665021896362, 0.9378783702850342, 0.8607399463653564, 0.7287217974662781, 0.5402306318283081, 0.9973209500312805, 0.26169413328170776, 0.3835873603820801, 0.1874808669090271, 0.8038567304611206, 0.18557673692703247, 0.8631893992424011, 0.7676172256469727, 0.3599127531051636, 0.48698097467422485, 0.926689088344574, 0.6542723774909973, 0.49722349643707275, 0.7027173638343811, 0.13385021686553955, 0.9873734712600708, 0.17187494039535522, 0.7995050549507141, 0.5259199142456055, 0.33804380893707275, 0.21665722131729126, 0.952264130115509, 0.8337767720222473, 0.879487156867981, 0.5553549528121948, 0.6160674095153809, 0.1315295696258545, 0.8010737895965576, 0.834412693977356, 0.20340144634246826, 0.8993185758590698, 0.6493895649909973, 0.9454924464225769, 0.38529330492019653, 0.6891772150993347, 0.5530646443367004, 0.18555349111557007, 0.8361382484436035, 0.11815804243087769, 0.38942235708236694, 0.945141613483429, 0.6417409181594849, 0.39776402711868286, 0.5133314728736877, 0.5431299805641174, 0.2615429759025574, 0.8987119793891907, 0.023733675479888916, 0.4941052794456482] + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json + index: + index: test + id: "2" + body: + name: words.jpg + nested: + - paragraph_id: 0 + vector: [0.7000167369842529, 0.590781033039093, 0.009879708290100098, 0.7874260544776917, 0.797156572341919, 0.1791083812713623, 0.07826781272888184, 0.25102007389068604, 0.09334254264831543, 0.3819708824157715, 0.7312374711036682, 0.02819347381591797, 0.20099765062332153, 0.7702597975730896, 0.9443559050559998, 0.35520339012145996, 0.25699591636657715, 0.5596823692321777, 0.23947590589523315, 0.47478222846984863, 0.23411548137664795, 0.9809996485710144, 0.3806597590446472, 0.5006771087646484, 0.5724453926086426, 0.21510547399520874, 0.07062828540802002, 0.9858258962631226, 0.9636645317077637, 0.36034029722213745, 0.07260054349899292, 0.06882566213607788, 0.18354403972625732, 0.06756395101547241, 0.5749042630195618, 0.05275309085845947, 0.1865217685699463, 0.5852730870246887, 0.1086682677268982, 0.10090464353561401, 0.32582908868789673, 0.5494027733802795, 0.873362123966217, 0.02236837148666382, 0.37973177433013916, 0.5556552410125732, 0.5083678364753723, 0.8081125020980835, 0.09164196252822876, 0.2207810878753662, 0.8086426258087158, 0.271828293800354, 0.5981417298316956, 0.7745779156684875, 0.40872830152511597, 0.6035888195037842, 0.5598325133323669, 0.19086670875549316, 0.02406853437423706, 0.8299782872200012, 0.4994274377822876, 0.0300295352935791, 0.47190529108047485, 0.8889331817626953, 0.34195321798324585, 0.9380808472633362, 0.4418332576751709, 0.5789303779602051, 0.0526617169380188, 0.7349719405174255, 0.44571834802627563, 0.6602563261985779, 0.3819742202758789, 0.16881734132766724, 0.45588219165802, 0.028081774711608887, 0.6681976914405823, 0.8183007836341858, 0.7887755632400513, 0.4506028890609741, 0.8040162324905396, 0.431918203830719, 0.7408918738365173, 0.39756304025650024, 0.7438145875930786, 0.6120601892471313, 0.5724676251411438, 0.08701330423355103, 0.18344634771347046, 0.7226220369338989, 0.3648560643196106, 0.9813777208328247, 0.2615315318107605, 0.9847549796104431, 0.32967478036880493, 0.47099196910858154, 0.3591546416282654, 0.4132147431373596, 0.48631107807159424, 0.04420149326324463] + - paragraph_id: 1 + vector: [0.19087255001068115, 0.5498749017715454, 0.9536173939704895, 0.25011056661605835, 0.37642204761505127, 0.18271470069885254, 0.670674741268158, 0.5553990006446838, 0.3306507468223572, 0.3368762731552124, 0.053364574909210205, 0.047215282917022705, 0.4221981167793274, 0.7591024041175842, 0.998794436454773, 0.6113318204879761, 0.8178470730781555, 0.8554672598838806, 0.40100908279418945, 0.6486459374427795, 0.804382860660553, 0.6775466799736023, 0.2916865944862366, 0.7019925117492676, 0.9812073707580566, 0.4414554834365845, 0.08203905820846558, 0.9167835116386414, 0.3082762360572815, 0.5454868674278259, 0.6665160655975342, 0.06828844547271729, 0.36014634370803833, 0.01810687780380249, 0.2640475630760193, 0.1856365203857422, 0.4734996557235718, 0.8153479695320129, 0.9614933133125305, 0.4851576089859009, 0.003343045711517334, 0.17352384328842163, 0.26423048973083496, 0.24217921495437622, 0.5694647431373596, 0.8538861274719238, 0.06464511156082153, 0.038984060287475586, 0.7695011496543884, 0.008188009262084961, 0.3858819007873535, 0.7950196862220764, 0.7225212454795837, 0.3982154130935669, 0.4996080994606018, 0.28709208965301514, 0.6753579378128052, 0.6779837608337402, 0.4815831184387207, 0.27917128801345825, 0.8400004506111145, 0.9022405743598938, 0.8253144025802612, 0.6251398324966431, 0.25444501638412476, 0.7694959044456482, 0.006821691989898682, 0.7958594560623169, 0.9144708514213562, 0.8688076138496399, 0.9641174077987671, 0.44437146186828613, 0.06135892868041992, 0.2638128399848938, 0.05436718463897705, 0.9926314353942871, 0.8661795854568481, 0.9176243543624878, 0.5521496534347534, 0.6017677783966064, 0.22096896171569824, 0.7030748128890991, 0.16923701763153076, 0.8178754448890686, 0.47008246183395386, 0.28875309228897095, 0.14314061403274536, 0.3431167006492615, 0.9301973581314087, 0.5416158437728882, 0.563427209854126, 0.7897542119026184, 0.2761036157608032, 0.16855067014694214, 0.42684781551361084, 0.7562968730926514, 0.2551668882369995, 0.7754542827606201, 0.218039870262146, 0.7080662846565247] + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json + index: + index: test + id: "3" + body: + name: washing_machine_2.jpg + nested: + - paragraph_id: 0 + vector: [0.09775793552398682, 0.9594467282295227, 0.7915146946907043, 0.9140479564666748, 0.5148435235023499, 0.8556410670280457, 0.6022665500640869, 0.05222177505493164, 0.9821935296058655, 0.49276530742645264, 0.23147249221801758, 0.2428399920463562, 0.3865380883216858, 0.5778483748435974, 0.5600519776344299, 0.9427472352981567, 0.48832541704177856, 0.8807493448257446, 0.32909590005874634, 0.8452557325363159, 0.811530590057373, 0.13344216346740723, 0.15256845951080322, 0.5025331974029541, 0.4288772940635681, 0.6590417623519897, 0.9282752871513367, 0.8842046856880188, 0.7873250842094421, 0.356731653213501, 0.9959152936935425, 0.07572609186172485, 0.5062583088874817, 0.36245888471603394, 0.6189196705818176, 0.7766605019569397, 0.5198523998260498, 0.7379586100578308, 0.0553441047668457, 0.5035901665687561, 0.24139636754989624, 0.10798943042755127, 0.272808313369751, 0.38171595335006714, 0.24275553226470947, 0.956981897354126, 0.8182021379470825, 0.9383817315101624, 0.06551980972290039, 0.6892690658569336, 0.7068917751312256, 0.5184322595596313, 0.6103079319000244, 0.7020677328109741, 0.7181660532951355, 0.6477184295654297, 0.26282840967178345, 0.9316624402999878, 0.8318467140197754, 0.1487215757369995, 0.39937925338745117, 0.6842989921569824, 0.3496543765068054, 0.6008991003036499, 0.9530165791511536, 0.4209877848625183, 0.5675879716873169, 0.7883706092834473, 0.9547191858291626, 0.6292906403541565, 0.49566715955734253, 0.6907342672348022, 0.0834314227104187, 0.19785481691360474, 0.4896165728569031, 0.8460168838500977, 0.9680339097976685, 0.43386441469192505, 0.7068926095962524, 0.19123870134353638, 0.5661664009094238, 0.610595166683197, 0.23599380254745483, 0.2831611633300781, 0.7919651865959167, 0.0018386244773864746, 0.15559959411621094, 0.4622604250907898, 0.02038663625717163, 0.42241227626800537, 0.4200526475906372, 0.1223069429397583, 0.7035380005836487, 0.09902423620223999, 0.7804107666015625, 0.05339455604553223, 0.6485095024108887, 0.29347676038742065, 0.9716366529464722, 0.30257928371429443] + - paragraph_id: 1 + vector: [0.09775794552398682, 0.9594467582295227, 0.7915146945707043, 0.9140479664666748, 0.5145735235023499, 0.8562410670280457, 0.6022665500640869, 0.05222177505493164, 0.9821935296058655, 0.49276530742645264, 0.23147249221801758, 0.2428399920463562, 0.3865380883216858, 0.5778483748435974, 0.5600519776344299, 0.9427472352981567, 0.48832541704177856, 0.8807493448257446, 0.32909590005874634, 0.8452557325363159, 0.811530590057373, 0.13344216346740723, 0.15256845951080322, 0.5025331974029541, 0.4288772940635681, 0.6590417623519897, 0.9282752871513367, 0.8842046856880188, 0.7873250842094421, 0.356731653213501, 0.9959152936935425, 0.07572609186172485, 0.5062583088874817, 0.36245888471603394, 0.6189196705818176, 0.7766605019569397, 0.5198523998260498, 0.7379586100578308, 0.0553441047668457, 0.5035901665687561, 0.24139636754989624, 0.10798943042755127, 0.272808313369751, 0.38171595335006714, 0.24275553226470947, 0.956981897354126, 0.8182021379470825, 0.9383817315101624, 0.06551980972290039, 0.6892690658569336, 0.7068917751312256, 0.5184322595596313, 0.6103079319000244, 0.7020677328109741, 0.7181660532951355, 0.6477184295654297, 0.26282840967178345, 0.9316624402999878, 0.8318467140197754, 0.1487215757369995, 0.39937925338745117, 0.6842989921569824, 0.3496543765068054, 0.6008991003036499, 0.9530165791511536, 0.4209877848625183, 0.5675879716873169, 0.7883706092834473, 0.9547191858291626, 0.6292906403541565, 0.49566715955734253, 0.6907342672348022, 0.0834314227104187, 0.19785481691360474, 0.4896165728569031, 0.8460168838500977, 0.9680339097976685, 0.43386441469192505, 0.7068926095962524, 0.19123870134353638, 0.5661664009094238, 0.610595166683197, 0.23599380254745483, 0.2831611633300781, 0.7919651865959167, 0.0018386244773864746, 0.15559959411621094, 0.4622604250907898, 0.02038663625717163, 0.42241227626800537, 0.4200526475906372, 0.1223069429397583, 0.7035380005836487, 0.09902423620223999, 0.7804107666015625, 0.05339455604553223, 0.6485095024108887, 0.29347676038742065, 0.9716366529464722, 0.30257928371429443] + - paragraph_id: 2 + vector: [0.02346789346586842, 0.9275438642474423, 0.8726527523752967, 0.8754723854784285, 0.5128415773834781, 0.6483428957814985, 0.5198325825398418, 0.08612652674671427, 0.9651472417848493, 0.49276530742645264, 0.23147249221801758, 0.2428399920463562, 0.3865380883216858, 0.5778483748435974, 0.5600519776344299, 0.9427472352981567, 0.48832541704177856, 0.8807493448257446, 0.32909590005874634, 0.8452557325363159, 0.811530590057373, 0.13344216346740723, 0.15256845951080322, 0.5025331974029541, 0.4288772940635681, 0.6590417623519897, 0.9282752871513367, 0.8842046856880188, 0.7873250842094421, 0.356731653213501, 0.9959152936935425, 0.07572609186172485, 0.5062583088874817, 0.36245888471603394, 0.6189196705818176, 0.7766605019569397, 0.5198523998260498, 0.7379586100578308, 0.0553441047668457, 0.5035901665687561, 0.24139636754989624, 0.10798943042755127, 0.272808313369751, 0.38171595335006714, 0.24275553226470947, 0.956981897354126, 0.8182021379470825, 0.9383817315101624, 0.06551980972290039, 0.6892690658569336, 0.7068917751312256, 0.5184322595596313, 0.6103079319000244, 0.7020677328109741, 0.7181660532951355, 0.6477184295654297, 0.26282840967178345, 0.9316624402999878, 0.8318467140197754, 0.1487215757369995, 0.39937925338745117, 0.6842989921569824, 0.3496543765068054, 0.5894827345652058, 0.9015289457312641, 0.4977765853434526, 0.4531428523891345, 0.7654123564328341, 0.9873262437874512, 0.6093289487865741, 0.47316352723723781, 0.6351248142951784, 0.0185187914287474, 0.15382925892538523, 0.4795235879578928, 0.8412674355725237, 0.9951738957328592, 0.43386441469192505, 0.6478325728954291, 0.18653673256825923, 0.5719558932789324, 0.587741646751721, 0.20589564274301583, 0.2934815782359231, 0.74165167517957143, 0.0008218558958385265, 0.17592786427483915, 0.4427758518572378, 0.02038663625717163, 0.46159669016581038, 0.3586178072852782, 0.1671659088162495, 0.6491566190571951, 0.10745517851716554, 0.7961801851729441, 0.02146156157517156, 0.6419451771719757, 0.31581518951898949, 0.8947174174177408, 0.28841478085186174] + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json + indices.refresh: { } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json + search: + index: test + size: 5 + body: + fields: [ "name" ] + query: + nested: + path: nested + query: + knn: + field: nested.vector + query_vector_builder: + text_embedding: + model_id: text_embedding_model + model_text: "the machine smells" + num_candidates: 7 + inner_hits: { size: 2, "fields": [ "nested.paragraph_id" ], _source: false } + + - match: {hits.total.value: 3} + - length: { hits.hits.0.inner_hits.nested.hits.hits: 2 } + - length: { hits.hits.1.inner_hits.nested.hits.hits: 2 } + - length: { hits.hits.2.inner_hits.nested.hits.hits: 2 } From b9062d86e17bf899ed5f9446a0a7a42f18bb344f Mon Sep 17 00:00:00 2001 From: Ievgen Degtiarenko Date: Mon, 18 Mar 2024 16:22:51 +0100 Subject: [PATCH 243/248] Ensure TestShardRouting respect seed when generating data (#103423) While working on https://github.com/elastic/elasticsearch/pull/103375 I noticed that test generate different shards with a fixed seed. This change ensures seed is respected when generating test ShardRoutings to make it easier to reproduce tests that add ShardRouting instances to sets or rely on their equality. --- .../rest/action/cat/RestShardsAction.java | 4 +- .../cluster/routing/TestShardRouting.java | 64 +++++++++++-------- 2 files changed, 39 insertions(+), 29 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java index a57d45e07fd15..8f26814def98f 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java @@ -326,7 +326,9 @@ Table buildTable(RestRequest request, ClusterStateResponse state, IndicesStatsRe table.addCell(shard.unassignedInfo().getReason()); Instant unassignedTime = Instant.ofEpochMilli(shard.unassignedInfo().getUnassignedTimeInMillis()); table.addCell(UnassignedInfo.DATE_TIME_FORMATTER.format(unassignedTime)); - table.addCell(TimeValue.timeValueMillis(System.currentTimeMillis() - shard.unassignedInfo().getUnassignedTimeInMillis())); + table.addCell( + TimeValue.timeValueMillis(Math.max(0, System.currentTimeMillis() - shard.unassignedInfo().getUnassignedTimeInMillis())) + ); table.addCell(shard.unassignedInfo().getDetails()); } else { table.addCell(null); diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/routing/TestShardRouting.java b/test/framework/src/main/java/org/elasticsearch/cluster/routing/TestShardRouting.java index b032be87a642b..373e9e285f51f 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/routing/TestShardRouting.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/routing/TestShardRouting.java @@ -8,21 +8,27 @@ package org.elasticsearch.cluster.routing; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.common.UUIDs; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.snapshots.Snapshot; import org.elasticsearch.snapshots.SnapshotId; -import java.util.Collections; +import java.util.Set; -import static org.apache.lucene.tests.util.LuceneTestCase.random; -import static org.elasticsearch.test.ESTestCase.randomAlphaOfLength; +import static org.elasticsearch.cluster.routing.AllocationId.newInitializing; +import static org.elasticsearch.cluster.routing.AllocationId.newRelocation; import static org.elasticsearch.test.ESTestCase.randomBoolean; import static org.elasticsearch.test.ESTestCase.randomFrom; +import static org.elasticsearch.test.ESTestCase.randomIdentifier; import static org.elasticsearch.test.ESTestCase.randomIntBetween; +import static org.elasticsearch.test.ESTestCase.randomLongBetween; +import static org.elasticsearch.test.ESTestCase.randomNonNegativeLong; +import static org.elasticsearch.test.ESTestCase.randomUUID; +import static org.elasticsearch.test.ESTestCase.safeSleep; import static org.junit.Assert.assertNotEquals; /** @@ -33,6 +39,8 @@ */ public class TestShardRouting { + private static final Logger logger = LogManager.getLogger(TestShardRouting.class); + public static Builder shardRoutingBuilder(String index, int shardId, String currentNodeId, boolean primary, ShardRoutingState state) { return shardRoutingBuilder(new ShardId(index, IndexMetadata.INDEX_UUID_NA_VALUE, shardId), currentNodeId, primary, state); } @@ -62,26 +70,11 @@ public Builder(ShardId shardId, String currentNodeId, boolean primary, ShardRout this.state = state; } - public Builder withCurrentNodeId(String currentNodeId) { - this.currentNodeId = currentNodeId; - return this; - } - public Builder withRelocatingNodeId(String relocatingNodeId) { this.relocatingNodeId = relocatingNodeId; return this; } - public Builder withPrimary(boolean primary) { - this.primary = primary; - return this; - } - - public Builder withState(ShardRoutingState state) { - this.state = state; - return this; - } - public Builder withRecoverySource(RecoverySource recoverySource) { this.recoverySource = recoverySource; return this; @@ -201,8 +194,8 @@ public static RecoverySource buildRecoverySource(boolean primary, ShardRoutingSt public static AllocationId buildAllocationId(ShardRoutingState state) { return switch (state) { case UNASSIGNED -> null; - case INITIALIZING, STARTED -> AllocationId.newInitializing(); - case RELOCATING -> AllocationId.newRelocation(AllocationId.newInitializing()); + case INITIALIZING, STARTED -> newInitializing(randomUUID()); + case RELOCATING -> newRelocation(newInitializing(randomUUID())); }; } @@ -228,23 +221,38 @@ public static UnassignedInfo buildUnassignedInfo(String message) { if (randomBoolean()) { delayed = true; } - lastAllocatedNodeId = randomAlphaOfLength(10); + lastAllocatedNodeId = randomIdentifier(); } int failedAllocations = reason == UnassignedInfo.Reason.ALLOCATION_FAILED ? 1 : 0; + + long unassignedTimeMillis = randomNonNegativeLong(); + long unassignedTimeNanos = randomLongBetween(0L, 1_000_000_000); + ensureInPast(unassignedTimeNanos); + return new UnassignedInfo( reason, message, null, failedAllocations, - System.nanoTime(), - System.currentTimeMillis(), + unassignedTimeNanos, + unassignedTimeMillis, delayed, UnassignedInfo.AllocationStatus.NO_ATTEMPT, - Collections.emptySet(), + Set.of(), lastAllocatedNodeId ); } + /** + * This ensures that deterministically selected nano time is actually in past to avoid unassigned info code constraints + */ + private static void ensureInPast(long nanoTime) { + while (System.nanoTime() < nanoTime) { + logger.info("Waiting to ensure selected nano-time [{}] is in past", nanoTime); + safeSleep(1000); + } + } + public static RecoverySource buildRecoverySource() { return randomFrom( RecoverySource.EmptyStoreRecoverySource.INSTANCE, @@ -252,10 +260,10 @@ public static RecoverySource buildRecoverySource() { RecoverySource.PeerRecoverySource.INSTANCE, RecoverySource.LocalShardsRecoverySource.INSTANCE, new RecoverySource.SnapshotRecoverySource( - UUIDs.randomBase64UUID(), - new Snapshot("repo", new SnapshotId(randomAlphaOfLength(8), UUIDs.randomBase64UUID())), + randomUUID(), + new Snapshot("repo", new SnapshotId(randomIdentifier(), randomUUID())), IndexVersion.current(), - new IndexId("some_index", UUIDs.randomBase64UUID(random())) + new IndexId("some_index", randomUUID()) ) ); } From bead96b2fff42362d06eab2a1a39e964ccb0cbdc Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Mon, 18 Mar 2024 16:29:26 +0100 Subject: [PATCH 244/248] Add 3rd party tests for reading blobs with larger lengths (#106239) This change adds a 3rd party test to validate that Azure, GCS and S3 allow requesting bytes ranges that exceed the real length of a blob. --- .../s3/S3RepositoryThirdPartyTests.java | 21 +++++++ .../AbstractThirdPartyRepositoryTestCase.java | 55 ++++++++++++++++++- 2 files changed, 75 insertions(+), 1 deletion(-) diff --git a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java index b8fea485c6276..1b4ab7de0c2ff 100644 --- a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java +++ b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java @@ -7,6 +7,8 @@ */ package org.elasticsearch.repositories.s3; +import com.amazonaws.AmazonClientException; +import com.amazonaws.services.s3.model.AmazonS3Exception; import com.amazonaws.services.s3.model.GetObjectRequest; import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest; import com.amazonaws.services.s3.model.ListMultipartUploadsRequest; @@ -31,6 +33,7 @@ import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.repositories.AbstractThirdPartyRepositoryTestCase; import org.elasticsearch.repositories.RepositoriesService; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.fixtures.minio.MinioTestContainer; import org.elasticsearch.test.fixtures.testcontainers.TestContainersThreadFilter; @@ -50,6 +53,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.not; @@ -222,4 +226,21 @@ List listMultipartUploads() { } } + public void testReadFromPositionLargerThanBlobLength() { + final var blobName = randomIdentifier(); + final var blobBytes = randomBytesReference(randomIntBetween(100, 2_000)); + + final var repository = getRepository(); + executeOnBlobStore(repository, blobStore -> { + blobStore.writeBlob(randomPurpose(), blobName, blobBytes, true); + return null; + }); + + long position = randomLongBetween(blobBytes.length(), Long.MAX_VALUE - 1L); + long length = randomLongBetween(1L, Long.MAX_VALUE - position); + var exception = expectThrows(AmazonClientException.class, () -> readBlob(repository, blobName, position, length)); + + assertThat(exception, instanceOf(AmazonS3Exception.class)); + assertThat(((AmazonS3Exception) exception).getStatusCode(), equalTo(RestStatus.REQUESTED_RANGE_NOT_SATISFIED.getStatus())); + } } diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/AbstractThirdPartyRepositoryTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/AbstractThirdPartyRepositoryTestCase.java index 3d4dea430a9b5..7cdeaeedfdeaf 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/AbstractThirdPartyRepositoryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/AbstractThirdPartyRepositoryTestCase.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.blobstore.support.BlobMetadata; @@ -19,6 +20,7 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.settings.SecureSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.CheckedFunction; import org.elasticsearch.core.Streams; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; import org.elasticsearch.repositories.blobstore.BlobStoreTestUtil; @@ -121,7 +123,7 @@ public void testCreateSnapshot() { assertTrue(clusterAdmin().prepareDeleteSnapshot(TEST_REPO_NAME, snapshotName).get().isAcknowledged()); } - public void testListChildren() throws Exception { + public void testListChildren() { final BlobStoreRepository repo = getRepository(); final PlainActionFuture future = new PlainActionFuture<>(); final Executor genericExec = repo.threadPool().generic(); @@ -246,6 +248,57 @@ public void testIndexLatest() throws Exception { assertTrue(blobContents.add(readIndexLatest(repository))); } + public void testReadFromPositionWithLength() { + final var blobName = randomIdentifier(); + final var blobBytes = randomBytesReference(randomIntBetween(100, 2_000)); + + final var repository = getRepository(); + executeOnBlobStore(repository, blobStore -> { + blobStore.writeBlob(randomPurpose(), blobName, blobBytes, true); + return null; + }); + + { + assertThat("Exact Range", readBlob(repository, blobName, 0L, blobBytes.length()), equalTo(blobBytes)); + } + { + int position = randomIntBetween(0, blobBytes.length() - 1); + int length = randomIntBetween(1, blobBytes.length() - position); + assertThat( + "Random Range: " + position + '-' + (position + length), + readBlob(repository, blobName, position, length), + equalTo(blobBytes.slice(position, length)) + ); + } + { + int position = randomIntBetween(0, blobBytes.length() - 1); + long length = randomLongBetween(1L, Long.MAX_VALUE - position - 1L); + assertThat( + "Random Larger Range: " + position + '-' + (position + length), + readBlob(repository, blobName, position, length), + equalTo(blobBytes.slice(position, Math.toIntExact(Math.min(length, blobBytes.length() - position)))) + ); + } + } + + protected static T executeOnBlobStore(BlobStoreRepository repository, CheckedFunction fn) { + final var future = new PlainActionFuture(); + repository.threadPool().generic().execute(ActionRunnable.supply(future, () -> { + var blobContainer = repository.blobStore().blobContainer(repository.basePath()); + return fn.apply(blobContainer); + })); + return future.actionGet(); + } + + protected static BytesReference readBlob(BlobStoreRepository repository, String blobName, long position, long length) { + return executeOnBlobStore(repository, blobContainer -> { + try (var input = blobContainer.readBlob(randomPurpose(), blobName, position, length); var output = new BytesStreamOutput()) { + Streams.copy(input, output); + return output.bytes(); + } + }); + } + private static BytesReference readIndexLatest(BlobStoreRepository repository) throws IOException { try (var baos = new BytesStreamOutput()) { Streams.copy( From d66c7d4bc86b32b7d17c5eacbdc4e8d9f88c13a0 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Mon, 18 Mar 2024 09:32:31 -0700 Subject: [PATCH 245/248] Resume driver when failing to fetch pages (#106392) I investigated a heap attack test failure and found that an ESQL request was stuck. This occurred in the following: 1. The ExchangeSource on the coordinator was blocked on reading because there were no available pages. 2. Meanwhile, the ExchangeSink on the data node had pages ready for fetching. 3. When an exchange request tried to fetch pages, it failed due to a CircuitBreakingException. Despite the failure, no cancellation was triggered because the status of the ExchangeSource on the coordinator remained unchanged. To fix this issue, this PR introduces two changes: Resumes the ExchangeSourceOperator and Driver on the coordinator, eventually allowing the coordinator to trigger cancellation of the request when failing to fetch pages. Ensures that an exchange sink on the data nodes fails when a data node request is cancelled. This callback was inadvertently omitted when introducing the node-level reduction in Run empty reduction node level on data nodes #106204. I plan to spend some time to harden the exchange and compute service. Closes #106262 --- docs/changelog/106392.yaml | 6 ++ .../xpack/esql/heap_attack/HeapAttackIT.java | 2 - .../operator/exchange/ExchangeService.java | 4 +- .../exchange/ExchangeSinkHandler.java | 8 ++ .../exchange/ExchangeSourceHandler.java | 1 + .../xpack/esql/action/EsqlActionTaskIT.java | 93 +++++++++++++++++++ .../xpack/esql/plugin/ComputeService.java | 1 + 7 files changed, 112 insertions(+), 3 deletions(-) create mode 100644 docs/changelog/106392.yaml diff --git a/docs/changelog/106392.yaml b/docs/changelog/106392.yaml new file mode 100644 index 0000000000000..ff1a0284ee5db --- /dev/null +++ b/docs/changelog/106392.yaml @@ -0,0 +1,6 @@ +pr: 106392 +summary: Resume driver when failing to fetch pages +area: ES|QL +type: bug +issues: + - 106262 diff --git a/test/external-modules/esql-heap-attack/src/javaRestTest/java/org/elasticsearch/xpack/esql/heap_attack/HeapAttackIT.java b/test/external-modules/esql-heap-attack/src/javaRestTest/java/org/elasticsearch/xpack/esql/heap_attack/HeapAttackIT.java index ffa817ed09677..8c87ef5977114 100644 --- a/test/external-modules/esql-heap-attack/src/javaRestTest/java/org/elasticsearch/xpack/esql/heap_attack/HeapAttackIT.java +++ b/test/external-modules/esql-heap-attack/src/javaRestTest/java/org/elasticsearch/xpack/esql/heap_attack/HeapAttackIT.java @@ -10,7 +10,6 @@ import org.apache.http.HttpHost; import org.apache.http.client.config.RequestConfig; import org.apache.http.util.EntityUtils; -import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; @@ -58,7 +57,6 @@ * Tests that run ESQL queries that have, in the past, used so much memory they * crash Elasticsearch. */ -@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/106262") public class HeapAttackIT extends ESRestTestCase { @ClassRule diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeService.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeService.java index efb646daec0e5..a8afce1a3b223 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeService.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeService.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BlockStreamInput; +import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; @@ -112,8 +113,9 @@ public ExchangeSinkHandler getSinkHandler(String exchangeId) { /** * Removes the exchange sink handler associated with the given exchange id. + * W will abort the sink handler if the given failure is not null. */ - public void finishSinkHandler(String exchangeId, Exception failure) { + public void finishSinkHandler(String exchangeId, @Nullable Exception failure) { final ExchangeSinkHandler sinkHandler = sinks.remove(exchangeId); if (sinkHandler != null) { if (failure != null) { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkHandler.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkHandler.java index 945fdff50d31c..ab155d6ee8479 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkHandler.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkHandler.java @@ -184,4 +184,12 @@ private void onChanged() { long lastUpdatedTimeInMillis() { return lastUpdatedInMillis.get(); } + + /** + * Returns the number of pages available in the buffer. + * This method should be used for testing only. + */ + public int bufferSize() { + return buffer.size(); + } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceHandler.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceHandler.java index 7492fa8c19385..f1698ea401d28 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceHandler.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceHandler.java @@ -203,6 +203,7 @@ void onSinkFailed(Exception originEx) { } return first; }); + buffer.waitForReading().onResponse(null); // resume the Driver if it is being blocked on reading onSinkComplete(); } diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionTaskIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionTaskIT.java index 5d022cd25cdab..3728eb624aaa0 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionTaskIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionTaskIT.java @@ -12,35 +12,51 @@ import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksAction; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.compute.lucene.LuceneSourceOperator; import org.elasticsearch.compute.lucene.ValuesSourceReaderOperator; import org.elasticsearch.compute.operator.DriverStatus; import org.elasticsearch.compute.operator.DriverTaskRunner; +import org.elasticsearch.compute.operator.exchange.ExchangeService; +import org.elasticsearch.compute.operator.exchange.ExchangeSinkHandler; import org.elasticsearch.compute.operator.exchange.ExchangeSinkOperator; import org.elasticsearch.compute.operator.exchange.ExchangeSourceOperator; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.tasks.TaskCancelledException; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.tasks.TaskInfo; import org.elasticsearch.test.junit.annotations.TestLogging; +import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.esql.plugin.QueryPragmas; import org.junit.Before; +import java.io.IOException; import java.util.ArrayList; +import java.util.Collection; import java.util.List; import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import static org.elasticsearch.test.MapMatcher.assertMap; import static org.elasticsearch.test.MapMatcher.matchesMap; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.both; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.emptyIterable; import static org.hamcrest.Matchers.emptyOrNullString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.lessThanOrEqualTo; @@ -309,4 +325,81 @@ private void assertCancelled(ActionFuture response) throws Ex ) ); } + + /** + * Ensure that when some exchange requests fail, we cancel the ESQL request, and complete all + * exchange sinks with the failure, despite having outstanding pages in the buffer. + */ + public void testCancelRequestWhenFailingFetchingPages() throws Exception { + String coordinator = internalCluster().startCoordinatingOnlyNode(Settings.EMPTY); + String dataNode = internalCluster().startDataOnlyNode(); + // block, then fail exchange requests when we have outstanding pages + var transportService = (MockTransportService) internalCluster().getInstance(TransportService.class, dataNode); + CountDownLatch fetchingStarted = new CountDownLatch(1); + CountDownLatch allowedFetching = new CountDownLatch(1); + transportService.addRequestHandlingBehavior(ExchangeService.EXCHANGE_ACTION_NAME, (handler, request, channel, task) -> { + AbstractRunnable runnable = new AbstractRunnable() { + @Override + public void onFailure(Exception e) { + channel.sendResponse(e); + } + + @Override + protected void doRun() throws Exception { + fetchingStarted.countDown(); + assertTrue(allowedFetching.await(1, TimeUnit.MINUTES)); + onFailure(new IOException("failed to fetch pages")); + } + }; + transportService.getThreadPool().executor(ThreadPool.Names.GENERIC).execute(runnable); + }); + try { + scriptPermits.release(numberOfDocs()); // do not block Lucene operators + Client client = client(coordinator); + EsqlQueryRequest request = new EsqlQueryRequest(); + client().admin() + .indices() + .prepareUpdateSettings("test") + .setSettings(Settings.builder().put("index.routing.allocation.include._name", dataNode).build()) + .get(); + ensureYellowAndNoInitializingShards("test"); + request.query("FROM test | LIMIT 10"); + request.pragmas(randomPragmas()); + PlainActionFuture future = new PlainActionFuture<>(); + client.execute(EsqlQueryAction.INSTANCE, request, future); + try { + List foundTasks = new ArrayList<>(); + assertBusy(() -> { + List tasks = client().admin() + .cluster() + .prepareListTasks() + .setActions(EsqlQueryAction.NAME) + .setDetailed(true) + .get() + .getTasks(); + assertThat(tasks, hasSize(1)); + foundTasks.addAll(tasks); + }); + String sessionId = foundTasks.get(0).taskId().toString(); + ExchangeService exchangeService = internalCluster().getInstance(ExchangeService.class, dataNode); + assertTrue(fetchingStarted.await(1, TimeUnit.MINUTES)); + ExchangeSinkHandler exchangeSink = exchangeService.getSinkHandler(sessionId); + if (randomBoolean()) { + // do not fail exchange requests when we have some pages + assertBusy(() -> assertThat(exchangeSink.bufferSize(), greaterThan(0))); + } + } finally { + allowedFetching.countDown(); + } + Exception failure = expectThrows(Exception.class, () -> future.actionGet().close()); + assertThat(failure.getMessage(), containsString("failed to fetch pages")); + } finally { + transportService.clearAllRules(); + } + } + + @Override + protected Collection> nodePlugins() { + return CollectionUtils.appendToCopy(super.nodePlugins(), MockTransportService.TestPlugin.class); + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java index ba3d8564e1334..90cbc018b77dc 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java @@ -690,6 +690,7 @@ private void runComputeOnDataNode( dataNodeRequestExecutor.start(); // run the node-level reduction var externalSink = exchangeService.getSinkHandler(externalId); + task.addListener(() -> exchangeService.finishSinkHandler(externalId, new TaskCancelledException(task.getReasonCancelled()))); var exchangeSource = new ExchangeSourceHandler(1, esqlExecutor); exchangeSource.addRemoteSink(internalSink::fetchPageAsync, 1); ActionListener reductionListener = cancelOnFailure(task, cancelled, refs.acquire()); From 91a6df65021dd29e8baf3cd4a319ccc401a506a7 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Mon, 18 Mar 2024 09:34:45 -0700 Subject: [PATCH 246/248] Fix testProfile (#106393) The testProfile failed because the queries hit a different set of target nodes. This occurs with the introduction of node-level reduction. This PR resolves the issue by running the test without replicas, ensuring that queries consistently hit the same set of target nodes. Closes #106273 --- .../esql/action/CrossClustersQueryIT.java | 32 +++++++++++++++++-- 1 file changed, 30 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueryIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueryIT.java index ca93f8d090996..ac2abf21a8f8c 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueryIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueryIT.java @@ -9,8 +9,12 @@ import org.elasticsearch.Build; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.Priority; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.compute.lucene.DataPartitioning; @@ -121,12 +125,36 @@ public void testMetadataIndex() { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/106273") + void waitForNoInitializingShards(Client client, TimeValue timeout, String... indices) { + ClusterHealthResponse resp = client.admin() + .cluster() + .prepareHealth(indices) + .setWaitForEvents(Priority.LANGUID) + .setWaitForNoRelocatingShards(true) + .setWaitForNoInitializingShards(true) + .setTimeout(timeout) + .get(); + assertFalse(Strings.toString(resp, true, true), resp.isTimedOut()); + } + public void testProfile() { assumeTrue("pragmas only enabled on snapshot builds", Build.current().isSnapshot()); - final int localOnlyProfiles; // uses shard partitioning as segments can be merged during these queries var pragmas = new QueryPragmas(Settings.builder().put(QueryPragmas.DATA_PARTITIONING.getKey(), DataPartitioning.SHARD).build()); + // Use single replicas for the target indices, to make sure we hit the same set of target nodes + client(LOCAL_CLUSTER).admin() + .indices() + .prepareUpdateSettings("logs-1") + .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).put("index.routing.rebalance.enable", "none")) + .get(); + waitForNoInitializingShards(client(LOCAL_CLUSTER), TimeValue.timeValueSeconds(30), "logs-1"); + client(REMOTE_CLUSTER).admin() + .indices() + .prepareUpdateSettings("logs-2") + .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).put("index.routing.rebalance.enable", "none")) + .get(); + waitForNoInitializingShards(client(REMOTE_CLUSTER), TimeValue.timeValueSeconds(30), "logs-2"); + final int localOnlyProfiles; { EsqlQueryRequest request = new EsqlQueryRequest(); request.query("FROM logs* | stats sum(v)"); From 3ca808b3cf94e7f0526cea6aebd556b7c900c084 Mon Sep 17 00:00:00 2001 From: Carlos Delgado <6339205+carlosdelest@users.noreply.github.com> Date: Tue, 19 Mar 2024 11:55:04 +0100 Subject: [PATCH 247/248] semantic_text - extract Index Metadata inference information to separate class (#106328) --- .../cluster/ClusterStateDiffIT.java | 24 +-- .../BulkShardRequestInferenceProvider.java | 39 +++- .../metadata/FieldInferenceMetadata.java | 190 ++++++++++++++++++ .../cluster/metadata/IndexMetadata.java | 95 +++------ .../metadata/MetadataCreateIndexService.java | 4 +- .../metadata/MetadataMappingService.java | 2 +- .../index/mapper/FieldTypeLookup.java | 17 +- .../index/mapper/InferenceModelFieldType.java | 2 +- .../index/mapper/MappingLookup.java | 4 +- .../action/bulk/BulkOperationTests.java | 61 +++--- .../cluster/metadata/IndexMetadataTests.java | 42 ++-- .../index/mapper/FieldTypeLookupTests.java | 16 +- .../index/mapper/MappingLookupTests.java | 15 +- .../mapper/MockInferenceModelFieldType.java | 2 +- .../mapper/SemanticTextFieldMapper.java | 2 +- .../SemanticTextClusterMetadataTests.java | 12 +- 16 files changed, 347 insertions(+), 180 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/cluster/metadata/FieldInferenceMetadata.java diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java index 3a1f6e20bb288..fbb3016b925da 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java @@ -54,7 +54,6 @@ import java.util.Collections; import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -62,6 +61,7 @@ import static java.util.Collections.emptyList; import static java.util.Collections.emptySet; import static org.elasticsearch.cluster.metadata.AliasMetadata.newAliasMetadataBuilder; +import static org.elasticsearch.cluster.metadata.IndexMetadataTests.randomFieldInferenceMetadata; import static org.elasticsearch.cluster.routing.RandomShardRoutingMutator.randomChange; import static org.elasticsearch.cluster.routing.TestShardRouting.shardRoutingBuilder; import static org.elasticsearch.cluster.routing.UnassignedInfoTests.randomUnassignedInfo; @@ -587,33 +587,13 @@ public IndexMetadata randomChange(IndexMetadata part) { builder.settings(Settings.builder().put(part.getSettings()).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)); break; case 3: - builder.fieldsForModels(randomFieldsForModels()); + builder.fieldInferenceMetadata(randomFieldInferenceMetadata(true)); break; default: throw new IllegalArgumentException("Shouldn't be here"); } return builder.build(); } - - /** - * Generates a random fieldsForModels map - */ - private Map> randomFieldsForModels() { - if (randomBoolean()) { - return null; - } - - Map> fieldsForModels = new HashMap<>(); - for (int i = 0; i < randomIntBetween(0, 5); i++) { - Set fields = new HashSet<>(); - for (int j = 0; j < randomIntBetween(1, 4); j++) { - fields.add(randomAlphaOfLengthBetween(4, 10)); - } - fieldsForModels.put(randomAlphaOfLengthBetween(4, 10), fields); - } - - return fieldsForModels; - } }); } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkShardRequestInferenceProvider.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkShardRequestInferenceProvider.java index 4b7a67e9ca0e3..e80530f75cf4b 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkShardRequestInferenceProvider.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkShardRequestInferenceProvider.java @@ -14,6 +14,7 @@ import org.elasticsearch.action.support.RefCountingRunnable; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.FieldInferenceMetadata; import org.elasticsearch.common.TriConsumer; import org.elasticsearch.core.Releasable; import org.elasticsearch.index.shard.ShardId; @@ -75,11 +76,13 @@ public static void getInstance( Set shardIds, ActionListener listener ) { - Set inferenceIds = new HashSet<>(); - shardIds.stream().map(ShardId::getIndex).collect(Collectors.toSet()).stream().forEach(index -> { - var fieldsForModels = clusterState.metadata().index(index).getFieldsForModels(); - inferenceIds.addAll(fieldsForModels.keySet()); - }); + Set inferenceIds = shardIds.stream() + .map(ShardId::getIndex) + .collect(Collectors.toSet()) + .stream() + .map(index -> clusterState.metadata().index(index).getFieldInferenceMetadata().getFieldInferenceOptions().values()) + .flatMap(o -> o.stream().map(FieldInferenceMetadata.FieldInferenceOptions::inferenceId)) + .collect(Collectors.toSet()); final Map inferenceProviderMap = new ConcurrentHashMap<>(); Runnable onModelLoadingComplete = () -> listener.onResponse( new BulkShardRequestInferenceProvider(clusterState, inferenceProviderMap) @@ -134,11 +137,11 @@ public void processBulkShardRequest( BiConsumer onBulkItemFailure ) { - Map> fieldsForModels = clusterState.metadata() - .index(bulkShardRequest.shardId().getIndex()) - .getFieldsForModels(); + Map> fieldsForInferenceIds = getFieldsForInferenceIds( + clusterState.metadata().index(bulkShardRequest.shardId().getIndex()).getFieldInferenceMetadata().getFieldInferenceOptions() + ); // No inference fields? Terminate early - if (fieldsForModels.isEmpty()) { + if (fieldsForInferenceIds.isEmpty()) { listener.onResponse(bulkShardRequest); return; } @@ -176,7 +179,7 @@ public void processBulkShardRequest( if (bulkItemRequest != null) { performInferenceOnBulkItemRequest( bulkItemRequest, - fieldsForModels, + fieldsForInferenceIds, i, onBulkItemFailureWithIndex, bulkItemReqRef.acquire() @@ -186,6 +189,22 @@ public void processBulkShardRequest( } } + private static Map> getFieldsForInferenceIds( + Map fieldInferenceMap + ) { + Map> fieldsForInferenceIdsMap = new HashMap<>(); + for (Map.Entry entry : fieldInferenceMap.entrySet()) { + String fieldName = entry.getKey(); + String inferenceId = entry.getValue().inferenceId(); + + // Get or create the set associated with the inferenceId + Set fields = fieldsForInferenceIdsMap.computeIfAbsent(inferenceId, k -> new HashSet<>()); + fields.add(fieldName); + } + + return fieldsForInferenceIdsMap; + } + @SuppressWarnings("unchecked") private void performInferenceOnBulkItemRequest( BulkItemRequest bulkItemRequest, diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/FieldInferenceMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/FieldInferenceMetadata.java new file mode 100644 index 0000000000000..349706c139127 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/FieldInferenceMetadata.java @@ -0,0 +1,190 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.metadata; + +import org.elasticsearch.cluster.Diff; +import org.elasticsearch.cluster.Diffable; +import org.elasticsearch.cluster.DiffableUtils; +import org.elasticsearch.cluster.SimpleDiffable; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.mapper.MappingLookup; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContentFragment; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; + +/** + * Contains field inference information. This is necessary to add to cluster state as inference can be calculated in the coordinator + * node, which not necessarily has mapping information. + */ +public class FieldInferenceMetadata implements Diffable, ToXContentFragment { + + private final ImmutableOpenMap fieldInferenceOptions; + + public static final FieldInferenceMetadata EMPTY = new FieldInferenceMetadata(ImmutableOpenMap.of()); + + public FieldInferenceMetadata(MappingLookup mappingLookup) { + ImmutableOpenMap.Builder builder = ImmutableOpenMap.builder(); + mappingLookup.getInferenceIdsForFields().entrySet().forEach(entry -> { + builder.put(entry.getKey(), new FieldInferenceOptions(entry.getValue(), mappingLookup.sourcePaths(entry.getKey()))); + }); + fieldInferenceOptions = builder.build(); + } + + public FieldInferenceMetadata(StreamInput in) throws IOException { + fieldInferenceOptions = in.readImmutableOpenMap(StreamInput::readString, FieldInferenceOptions::new); + } + + public FieldInferenceMetadata(Map fieldsToInferenceMap) { + fieldInferenceOptions = ImmutableOpenMap.builder(fieldsToInferenceMap).build(); + } + + public ImmutableOpenMap getFieldInferenceOptions() { + return fieldInferenceOptions; + } + + public boolean isEmpty() { + return fieldInferenceOptions.isEmpty(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeMap(fieldInferenceOptions, (o, v) -> v.writeTo(o)); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.map(fieldInferenceOptions); + return builder; + } + + public static FieldInferenceMetadata fromXContent(XContentParser parser) throws IOException { + return new FieldInferenceMetadata(parser.map(HashMap::new, FieldInferenceOptions::fromXContent)); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + FieldInferenceMetadata that = (FieldInferenceMetadata) o; + return Objects.equals(fieldInferenceOptions, that.fieldInferenceOptions); + } + + @Override + public int hashCode() { + return Objects.hash(fieldInferenceOptions); + } + + @Override + public Diff diff(FieldInferenceMetadata previousState) { + if (previousState == null) { + previousState = EMPTY; + } + return new FieldInferenceMetadataDiff(previousState, this); + } + + static class FieldInferenceMetadataDiff implements Diff { + + public static final FieldInferenceMetadataDiff EMPTY = new FieldInferenceMetadataDiff( + FieldInferenceMetadata.EMPTY, + FieldInferenceMetadata.EMPTY + ); + + private final Diff> fieldInferenceMapDiff; + + private static final DiffableUtils.DiffableValueReader FIELD_INFERENCE_DIFF_VALUE_READER = + new DiffableUtils.DiffableValueReader<>(FieldInferenceOptions::new, FieldInferenceMetadataDiff::readDiffFrom); + + FieldInferenceMetadataDiff(FieldInferenceMetadata before, FieldInferenceMetadata after) { + fieldInferenceMapDiff = DiffableUtils.diff( + before.fieldInferenceOptions, + after.fieldInferenceOptions, + DiffableUtils.getStringKeySerializer(), + FIELD_INFERENCE_DIFF_VALUE_READER + ); + } + + FieldInferenceMetadataDiff(StreamInput in) throws IOException { + fieldInferenceMapDiff = DiffableUtils.readImmutableOpenMapDiff( + in, + DiffableUtils.getStringKeySerializer(), + FIELD_INFERENCE_DIFF_VALUE_READER + ); + } + + public static Diff readDiffFrom(StreamInput in) throws IOException { + return SimpleDiffable.readDiffFrom(FieldInferenceOptions::new, in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + fieldInferenceMapDiff.writeTo(out); + } + + @Override + public FieldInferenceMetadata apply(FieldInferenceMetadata part) { + return new FieldInferenceMetadata(fieldInferenceMapDiff.apply(part.fieldInferenceOptions)); + } + } + + public record FieldInferenceOptions(String inferenceId, Set sourceFields) + implements + SimpleDiffable, + ToXContentFragment { + + public static final ParseField INFERENCE_ID_FIELD = new ParseField("inference_id"); + public static final ParseField SOURCE_FIELDS_FIELD = new ParseField("source_fields"); + + FieldInferenceOptions(StreamInput in) throws IOException { + this(in.readString(), in.readCollectionAsImmutableSet(StreamInput::readString)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(inferenceId); + out.writeStringCollection(sourceFields); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(INFERENCE_ID_FIELD.getPreferredName(), inferenceId); + builder.field(SOURCE_FIELDS_FIELD.getPreferredName(), sourceFields); + builder.endObject(); + return builder; + } + + public static FieldInferenceOptions fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "field_inference_parser", + false, + (args, unused) -> new FieldInferenceOptions((String) args[0], new HashSet<>((List) args[1])) + ); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), INFERENCE_ID_FIELD); + PARSER.declareStringArray(ConstructingObjectParser.constructorArg(), SOURCE_FIELDS_FIELD); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java index 81406f0a74ce5..89c925427cf88 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java @@ -78,7 +78,6 @@ import java.util.OptionalLong; import java.util.Set; import java.util.function.Function; -import java.util.stream.Collectors; import static org.elasticsearch.cluster.metadata.Metadata.CONTEXT_MODE_PARAM; import static org.elasticsearch.cluster.metadata.Metadata.DEDUPLICATED_MAPPINGS_PARAM; @@ -541,7 +540,7 @@ public Iterator> settings() { public static final String KEY_SHARD_SIZE_FORECAST = "shard_size_forecast"; - public static final String KEY_FIELDS_FOR_MODELS = "fields_for_models"; + public static final String KEY_FIELD_INFERENCE = "field_inference"; public static final String INDEX_STATE_FILE_PREFIX = "state-"; @@ -632,8 +631,7 @@ public Iterator> settings() { private final Double writeLoadForecast; @Nullable private final Long shardSizeInBytesForecast; - // Key: model ID, Value: Fields that use model - private final ImmutableOpenMap> fieldsForModels; + private final FieldInferenceMetadata fieldInferenceMetadata; private IndexMetadata( final Index index, @@ -680,7 +678,7 @@ private IndexMetadata( @Nullable final IndexMetadataStats stats, @Nullable final Double writeLoadForecast, @Nullable Long shardSizeInBytesForecast, - final ImmutableOpenMap> fieldsForModels + @Nullable FieldInferenceMetadata fieldInferenceMetadata ) { this.index = index; this.version = version; @@ -736,7 +734,7 @@ private IndexMetadata( this.writeLoadForecast = writeLoadForecast; this.shardSizeInBytesForecast = shardSizeInBytesForecast; assert numberOfShards * routingFactor == routingNumShards : routingNumShards + " must be a multiple of " + numberOfShards; - this.fieldsForModels = Objects.requireNonNull(fieldsForModels); + this.fieldInferenceMetadata = Objects.requireNonNullElse(fieldInferenceMetadata, FieldInferenceMetadata.EMPTY); } IndexMetadata withMappingMetadata(MappingMetadata mapping) { @@ -788,7 +786,7 @@ IndexMetadata withMappingMetadata(MappingMetadata mapping) { this.stats, this.writeLoadForecast, this.shardSizeInBytesForecast, - this.fieldsForModels + this.fieldInferenceMetadata ); } @@ -847,7 +845,7 @@ public IndexMetadata withInSyncAllocationIds(int shardId, Set inSyncSet) this.stats, this.writeLoadForecast, this.shardSizeInBytesForecast, - this.fieldsForModels + this.fieldInferenceMetadata ); } @@ -904,7 +902,7 @@ public IndexMetadata withIncrementedPrimaryTerm(int shardId) { this.stats, this.writeLoadForecast, this.shardSizeInBytesForecast, - this.fieldsForModels + this.fieldInferenceMetadata ); } @@ -961,7 +959,7 @@ public IndexMetadata withTimestampRange(IndexLongFieldRange timestampRange) { this.stats, this.writeLoadForecast, this.shardSizeInBytesForecast, - this.fieldsForModels + this.fieldInferenceMetadata ); } @@ -1014,7 +1012,7 @@ public IndexMetadata withIncrementedVersion() { this.stats, this.writeLoadForecast, this.shardSizeInBytesForecast, - this.fieldsForModels + this.fieldInferenceMetadata ); } @@ -1218,8 +1216,8 @@ public OptionalLong getForecastedShardSizeInBytes() { return shardSizeInBytesForecast == null ? OptionalLong.empty() : OptionalLong.of(shardSizeInBytesForecast); } - public Map> getFieldsForModels() { - return fieldsForModels; + public FieldInferenceMetadata getFieldInferenceMetadata() { + return fieldInferenceMetadata; } public static final String INDEX_RESIZE_SOURCE_UUID_KEY = "index.resize.source.uuid"; @@ -1419,7 +1417,7 @@ public boolean equals(Object o) { if (rolloverInfos.equals(that.rolloverInfos) == false) { return false; } - if (fieldsForModels.equals(that.fieldsForModels) == false) { + if (fieldInferenceMetadata.equals(that.fieldInferenceMetadata) == false) { return false; } if (isSystem != that.isSystem) { @@ -1442,7 +1440,7 @@ public int hashCode() { result = 31 * result + Arrays.hashCode(primaryTerms); result = 31 * result + inSyncAllocationIds.hashCode(); result = 31 * result + rolloverInfos.hashCode(); - result = 31 * result + fieldsForModels.hashCode(); + result = 31 * result + fieldInferenceMetadata.hashCode(); result = 31 * result + Boolean.hashCode(isSystem); return result; } @@ -1498,7 +1496,7 @@ private static class IndexMetadataDiff implements Diff { private final IndexMetadataStats stats; private final Double indexWriteLoadForecast; private final Long shardSizeInBytesForecast; - private final Diff>> fieldsForModels; + private final Diff fieldInferenceMetadata; IndexMetadataDiff(IndexMetadata before, IndexMetadata after) { index = after.index.getName(); @@ -1535,12 +1533,7 @@ private static class IndexMetadataDiff implements Diff { stats = after.stats; indexWriteLoadForecast = after.writeLoadForecast; shardSizeInBytesForecast = after.shardSizeInBytesForecast; - fieldsForModels = DiffableUtils.diff( - before.fieldsForModels, - after.fieldsForModels, - DiffableUtils.getStringKeySerializer(), - DiffableUtils.StringSetValueSerializer.getInstance() - ); + fieldInferenceMetadata = after.fieldInferenceMetadata.diff(before.fieldInferenceMetadata); } private static final DiffableUtils.DiffableValueReader ALIAS_METADATA_DIFF_VALUE_READER = @@ -1601,13 +1594,9 @@ private static class IndexMetadataDiff implements Diff { shardSizeInBytesForecast = null; } if (in.getTransportVersion().onOrAfter(TransportVersions.SEMANTIC_TEXT_FIELD_ADDED)) { - fieldsForModels = DiffableUtils.readJdkMapDiff( - in, - DiffableUtils.getStringKeySerializer(), - DiffableUtils.StringSetValueSerializer.getInstance() - ); + fieldInferenceMetadata = in.readOptionalWriteable(FieldInferenceMetadata.FieldInferenceMetadataDiff::new); } else { - fieldsForModels = DiffableUtils.emptyDiff(); + fieldInferenceMetadata = FieldInferenceMetadata.FieldInferenceMetadataDiff.EMPTY; } } @@ -1645,7 +1634,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalLong(shardSizeInBytesForecast); } if (out.getTransportVersion().onOrAfter(TransportVersions.SEMANTIC_TEXT_FIELD_ADDED)) { - fieldsForModels.writeTo(out); + out.writeOptionalWriteable(fieldInferenceMetadata); } } @@ -1676,7 +1665,7 @@ public IndexMetadata apply(IndexMetadata part) { builder.stats(stats); builder.indexWriteLoadForecast(indexWriteLoadForecast); builder.shardSizeInBytesForecast(shardSizeInBytesForecast); - builder.fieldsForModels(fieldsForModels.apply(part.fieldsForModels)); + builder.fieldInferenceMetadata(fieldInferenceMetadata.apply(part.fieldInferenceMetadata)); return builder.build(true); } } @@ -1745,9 +1734,7 @@ public static IndexMetadata readFrom(StreamInput in, @Nullable Function i.readCollectionAsImmutableSet(StreamInput::readString)) - ); + builder.fieldInferenceMetadata(new FieldInferenceMetadata(in)); } return builder.build(true); } @@ -1796,7 +1783,7 @@ public void writeTo(StreamOutput out, boolean mappingsAsHash) throws IOException out.writeOptionalLong(shardSizeInBytesForecast); } if (out.getTransportVersion().onOrAfter(TransportVersions.SEMANTIC_TEXT_FIELD_ADDED)) { - out.writeMap(fieldsForModels, StreamOutput::writeStringCollection); + fieldInferenceMetadata.writeTo(out); } } @@ -1847,7 +1834,7 @@ public static class Builder { private IndexMetadataStats stats = null; private Double indexWriteLoadForecast = null; private Long shardSizeInBytesForecast = null; - private final ImmutableOpenMap.Builder> fieldsForModels; + private FieldInferenceMetadata fieldInferenceMetadata = FieldInferenceMetadata.EMPTY; public Builder(String index) { this.index = index; @@ -1855,7 +1842,6 @@ public Builder(String index) { this.customMetadata = ImmutableOpenMap.builder(); this.inSyncAllocationIds = new HashMap<>(); this.rolloverInfos = ImmutableOpenMap.builder(); - this.fieldsForModels = ImmutableOpenMap.builder(); this.isSystem = false; } @@ -1880,7 +1866,7 @@ public Builder(IndexMetadata indexMetadata) { this.stats = indexMetadata.stats; this.indexWriteLoadForecast = indexMetadata.writeLoadForecast; this.shardSizeInBytesForecast = indexMetadata.shardSizeInBytesForecast; - this.fieldsForModels = ImmutableOpenMap.builder(indexMetadata.fieldsForModels); + this.fieldInferenceMetadata = indexMetadata.fieldInferenceMetadata; } public Builder index(String index) { @@ -2110,8 +2096,8 @@ public Builder shardSizeInBytesForecast(Long shardSizeInBytesForecast) { return this; } - public Builder fieldsForModels(Map> fieldsForModels) { - processFieldsForModels(this.fieldsForModels, fieldsForModels); + public Builder fieldInferenceMetadata(FieldInferenceMetadata fieldInferenceMetadata) { + this.fieldInferenceMetadata = Objects.requireNonNullElse(fieldInferenceMetadata, FieldInferenceMetadata.EMPTY); return this; } @@ -2310,7 +2296,7 @@ IndexMetadata build(boolean repair) { stats, indexWriteLoadForecast, shardSizeInBytesForecast, - fieldsForModels.build() + fieldInferenceMetadata ); } @@ -2436,8 +2422,8 @@ public static void toXContent(IndexMetadata indexMetadata, XContentBuilder build builder.field(KEY_SHARD_SIZE_FORECAST, indexMetadata.shardSizeInBytesForecast); } - if (indexMetadata.fieldsForModels.isEmpty() == false) { - builder.field(KEY_FIELDS_FOR_MODELS, indexMetadata.fieldsForModels); + if (indexMetadata.fieldInferenceMetadata.isEmpty() == false) { + builder.field(KEY_FIELD_INFERENCE, indexMetadata.fieldInferenceMetadata); } builder.endObject(); @@ -2517,18 +2503,8 @@ public static IndexMetadata fromXContent(XContentParser parser, Map> fieldsForModels = parser.map(HashMap::new, XContentParser::list) - .entrySet() - .stream() - .collect( - Collectors.toMap( - Map.Entry::getKey, - v -> v.getValue().stream().map(Object::toString).collect(Collectors.toUnmodifiableSet()) - ) - ); - builder.fieldsForModels(fieldsForModels); + case KEY_FIELD_INFERENCE: + builder.fieldInferenceMetadata(FieldInferenceMetadata.fromXContent(parser)); break; default: // assume it's custom index metadata @@ -2726,17 +2702,6 @@ private static void handleLegacyMapping(Builder builder, Map map builder.putMapping(new MappingMetadata(MapperService.SINGLE_MAPPING_NAME, mapping)); } } - - private static void processFieldsForModels( - ImmutableOpenMap.Builder> builder, - Map> fieldsForModels - ) { - builder.clear(); - if (fieldsForModels != null) { - // Ensure that all field sets contained in the processed map are immutable - fieldsForModels.forEach((k, v) -> builder.put(k, Set.copyOf(v))); - } - } } /** diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java index d8fe0b0c19e52..96ca7a15edc30 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java @@ -1267,8 +1267,8 @@ static IndexMetadata buildIndexMetadata( if (mapper != null) { MappingMetadata mappingMd = new MappingMetadata(mapper); mappingsMetadata.put(mapper.type(), mappingMd); - - indexMetadataBuilder.fieldsForModels(mapper.mappers().getFieldsForModels()); + FieldInferenceMetadata fieldInferenceMetadata = new FieldInferenceMetadata(mapper.mappers()); + indexMetadataBuilder.fieldInferenceMetadata(fieldInferenceMetadata); } for (MappingMetadata mappingMd : mappingsMetadata.values()) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMappingService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMappingService.java index d913a6465482d..0e31592991369 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMappingService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMappingService.java @@ -204,7 +204,7 @@ private static ClusterState applyRequest( DocumentMapper mapper = mapperService.documentMapper(); if (mapper != null) { indexMetadataBuilder.putMapping(new MappingMetadata(mapper)); - indexMetadataBuilder.fieldsForModels(mapper.mappers().getFieldsForModels()); + indexMetadataBuilder.fieldInferenceMetadata(new FieldInferenceMetadata(mapper.mappers())); } if (updatedMapping) { indexMetadataBuilder.mappingVersion(1 + indexMetadataBuilder.mappingVersion()); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/FieldTypeLookup.java b/server/src/main/java/org/elasticsearch/index/mapper/FieldTypeLookup.java index 372b1412df724..0741cfa682b74 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/FieldTypeLookup.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/FieldTypeLookup.java @@ -39,7 +39,7 @@ final class FieldTypeLookup { /** * A map from inference model ID to all fields that use the model to generate embeddings. */ - private final Map> fieldsForModels; + private final Map inferenceIdsForFields; private final int maxParentPathDots; @@ -53,7 +53,7 @@ final class FieldTypeLookup { final Map fullSubfieldNameToParentPath = new HashMap<>(); final Map dynamicFieldTypes = new HashMap<>(); final Map> fieldToCopiedFields = new HashMap<>(); - final Map> fieldsForModels = new HashMap<>(); + final Map inferenceIdsForFields = new HashMap<>(); for (FieldMapper fieldMapper : fieldMappers) { String fieldName = fieldMapper.name(); MappedFieldType fieldType = fieldMapper.fieldType(); @@ -72,11 +72,7 @@ final class FieldTypeLookup { fieldToCopiedFields.get(targetField).add(fieldName); } if (fieldType instanceof InferenceModelFieldType inferenceModelFieldType) { - String inferenceModel = inferenceModelFieldType.getInferenceModel(); - if (inferenceModel != null) { - Set fields = fieldsForModels.computeIfAbsent(inferenceModel, v -> new HashSet<>()); - fields.add(fieldName); - } + inferenceIdsForFields.put(fieldName, inferenceModelFieldType.getInferenceId()); } } @@ -110,8 +106,7 @@ final class FieldTypeLookup { // make values into more compact immutable sets to save memory fieldToCopiedFields.entrySet().forEach(e -> e.setValue(Set.copyOf(e.getValue()))); this.fieldToCopiedFields = Map.copyOf(fieldToCopiedFields); - fieldsForModels.entrySet().forEach(e -> e.setValue(Set.copyOf(e.getValue()))); - this.fieldsForModels = Map.copyOf(fieldsForModels); + this.inferenceIdsForFields = Map.copyOf(inferenceIdsForFields); } public static int dotCount(String path) { @@ -220,8 +215,8 @@ Set sourcePaths(String field) { return fieldToCopiedFields.containsKey(resolvedField) ? fieldToCopiedFields.get(resolvedField) : Set.of(resolvedField); } - Map> getFieldsForModels() { - return fieldsForModels; + Map getInferenceIdsForFields() { + return inferenceIdsForFields; } /** diff --git a/server/src/main/java/org/elasticsearch/index/mapper/InferenceModelFieldType.java b/server/src/main/java/org/elasticsearch/index/mapper/InferenceModelFieldType.java index 490d7f36219cf..6e12a204ed7d0 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/InferenceModelFieldType.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/InferenceModelFieldType.java @@ -17,5 +17,5 @@ public interface InferenceModelFieldType { * * @return model id used by the field type */ - String getInferenceModel(); + String getInferenceId(); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MappingLookup.java b/server/src/main/java/org/elasticsearch/index/mapper/MappingLookup.java index cf2212110a210..c2bd95115f27e 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MappingLookup.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MappingLookup.java @@ -524,7 +524,7 @@ public void validateDoesNotShadow(String name) { } } - public Map> getFieldsForModels() { - return fieldTypeLookup.getFieldsForModels(); + public Map getInferenceIdsForFields() { + return fieldTypeLookup.getInferenceIdsForFields(); } } diff --git a/server/src/test/java/org/elasticsearch/action/bulk/BulkOperationTests.java b/server/src/test/java/org/elasticsearch/action/bulk/BulkOperationTests.java index 2ce7b161d3dd1..c3887f506b891 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/BulkOperationTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/BulkOperationTests.java @@ -15,6 +15,7 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.FieldInferenceMetadata; import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -92,7 +93,7 @@ public class BulkOperationTests extends ESTestCase { public void testNoInference() { - Map> fieldsForModels = Map.of(); + FieldInferenceMetadata fieldInferenceMetadata = FieldInferenceMetadata.EMPTY; ModelRegistry modelRegistry = createModelRegistry( Map.of(INFERENCE_SERVICE_1_ID, SERVICE_1_ID, INFERENCE_SERVICE_2_ID, SERVICE_2_ID) ); @@ -116,7 +117,7 @@ public void testNoInference() { ActionListener bulkOperationListener = mock(ActionListener.class); BulkShardRequest bulkShardRequest = runBulkOperation( originalSource, - fieldsForModels, + fieldInferenceMetadata, modelRegistry, inferenceServiceRegistry, true, @@ -158,7 +159,7 @@ private static Model mockModel(String inferenceServiceId) { public void testFailedBulkShardRequest() { - Map> fieldsForModels = Map.of(); + FieldInferenceMetadata fieldInferenceMetadata = FieldInferenceMetadata.EMPTY; ModelRegistry modelRegistry = createModelRegistry(Map.of()); InferenceServiceRegistry inferenceServiceRegistry = createInferenceServiceRegistry(Map.of()); @@ -176,7 +177,7 @@ public void testFailedBulkShardRequest() { runBulkOperation( originalSource, - fieldsForModels, + fieldInferenceMetadata, modelRegistry, inferenceServiceRegistry, bulkOperationListener, @@ -206,11 +207,15 @@ public void testFailedBulkShardRequest() { @SuppressWarnings("unchecked") public void testInference() { - Map> fieldsForModels = Map.of( - INFERENCE_SERVICE_1_ID, - Set.of(FIRST_INFERENCE_FIELD_SERVICE_1, SECOND_INFERENCE_FIELD_SERVICE_1), - INFERENCE_SERVICE_2_ID, - Set.of(INFERENCE_FIELD_SERVICE_2) + FieldInferenceMetadata fieldInferenceMetadata = new FieldInferenceMetadata( + Map.of( + FIRST_INFERENCE_FIELD_SERVICE_1, + new FieldInferenceMetadata.FieldInferenceOptions(INFERENCE_SERVICE_1_ID, Set.of()), + SECOND_INFERENCE_FIELD_SERVICE_1, + new FieldInferenceMetadata.FieldInferenceOptions(INFERENCE_SERVICE_1_ID, Set.of()), + INFERENCE_FIELD_SERVICE_2, + new FieldInferenceMetadata.FieldInferenceOptions(INFERENCE_SERVICE_2_ID, Set.of()) + ) ); ModelRegistry modelRegistry = createModelRegistry( @@ -244,7 +249,7 @@ public void testInference() { ActionListener bulkOperationListener = mock(ActionListener.class); BulkShardRequest bulkShardRequest = runBulkOperation( originalSource, - fieldsForModels, + fieldInferenceMetadata, modelRegistry, inferenceServiceRegistry, true, @@ -279,7 +284,9 @@ public void testInference() { public void testFailedInference() { - Map> fieldsForModels = Map.of(INFERENCE_SERVICE_1_ID, Set.of(FIRST_INFERENCE_FIELD_SERVICE_1)); + FieldInferenceMetadata fieldInferenceMetadata = new FieldInferenceMetadata( + Map.of(FIRST_INFERENCE_FIELD_SERVICE_1, new FieldInferenceMetadata.FieldInferenceOptions(INFERENCE_SERVICE_1_ID, Set.of())) + ); ModelRegistry modelRegistry = createModelRegistry(Map.of(INFERENCE_SERVICE_1_ID, SERVICE_1_ID)); @@ -298,7 +305,7 @@ public void testFailedInference() { ArgumentCaptor bulkResponseCaptor = ArgumentCaptor.forClass(BulkResponse.class); @SuppressWarnings("unchecked") ActionListener bulkOperationListener = mock(ActionListener.class); - runBulkOperation(originalSource, fieldsForModels, modelRegistry, inferenceServiceRegistry, false, bulkOperationListener); + runBulkOperation(originalSource, fieldInferenceMetadata, modelRegistry, inferenceServiceRegistry, false, bulkOperationListener); verify(bulkOperationListener).onResponse(bulkResponseCaptor.capture()); BulkResponse bulkResponse = bulkResponseCaptor.getValue(); @@ -313,7 +320,9 @@ public void testFailedInference() { public void testInferenceFailsForIncorrectRootObject() { - Map> fieldsForModels = Map.of(INFERENCE_SERVICE_1_ID, Set.of(FIRST_INFERENCE_FIELD_SERVICE_1)); + FieldInferenceMetadata fieldInferenceMetadata = new FieldInferenceMetadata( + Map.of(FIRST_INFERENCE_FIELD_SERVICE_1, new FieldInferenceMetadata.FieldInferenceOptions(INFERENCE_SERVICE_1_ID, Set.of())) + ); ModelRegistry modelRegistry = createModelRegistry(Map.of(INFERENCE_SERVICE_1_ID, SERVICE_1_ID)); @@ -331,7 +340,7 @@ public void testInferenceFailsForIncorrectRootObject() { ArgumentCaptor bulkResponseCaptor = ArgumentCaptor.forClass(BulkResponse.class); @SuppressWarnings("unchecked") ActionListener bulkOperationListener = mock(ActionListener.class); - runBulkOperation(originalSource, fieldsForModels, modelRegistry, inferenceServiceRegistry, false, bulkOperationListener); + runBulkOperation(originalSource, fieldInferenceMetadata, modelRegistry, inferenceServiceRegistry, false, bulkOperationListener); verify(bulkOperationListener).onResponse(bulkResponseCaptor.capture()); BulkResponse bulkResponse = bulkResponseCaptor.getValue(); @@ -343,11 +352,15 @@ public void testInferenceFailsForIncorrectRootObject() { public void testInferenceIdNotFound() { - Map> fieldsForModels = Map.of( - INFERENCE_SERVICE_1_ID, - Set.of(FIRST_INFERENCE_FIELD_SERVICE_1, SECOND_INFERENCE_FIELD_SERVICE_1), - INFERENCE_SERVICE_2_ID, - Set.of(INFERENCE_FIELD_SERVICE_2) + FieldInferenceMetadata fieldInferenceMetadata = new FieldInferenceMetadata( + Map.of( + FIRST_INFERENCE_FIELD_SERVICE_1, + new FieldInferenceMetadata.FieldInferenceOptions(INFERENCE_SERVICE_1_ID, Set.of()), + SECOND_INFERENCE_FIELD_SERVICE_1, + new FieldInferenceMetadata.FieldInferenceOptions(INFERENCE_SERVICE_1_ID, Set.of()), + INFERENCE_FIELD_SERVICE_2, + new FieldInferenceMetadata.FieldInferenceOptions(INFERENCE_SERVICE_2_ID, Set.of()) + ) ); ModelRegistry modelRegistry = createModelRegistry(Map.of(INFERENCE_SERVICE_1_ID, SERVICE_1_ID)); @@ -368,7 +381,7 @@ public void testInferenceIdNotFound() { ActionListener bulkOperationListener = mock(ActionListener.class); doAnswer(invocation -> null).when(bulkOperationListener).onResponse(bulkResponseCaptor.capture()); - runBulkOperation(originalSource, fieldsForModels, modelRegistry, inferenceServiceRegistry, false, bulkOperationListener); + runBulkOperation(originalSource, fieldInferenceMetadata, modelRegistry, inferenceServiceRegistry, false, bulkOperationListener); verify(bulkOperationListener).onResponse(bulkResponseCaptor.capture()); BulkResponse bulkResponse = bulkResponseCaptor.getValue(); @@ -444,7 +457,7 @@ public String toString() { private static BulkShardRequest runBulkOperation( Map docSource, - Map> fieldsForModels, + FieldInferenceMetadata fieldInferenceMetadata, ModelRegistry modelRegistry, InferenceServiceRegistry inferenceServiceRegistry, boolean expectTransportShardBulkActionToExecute, @@ -452,7 +465,7 @@ private static BulkShardRequest runBulkOperation( ) { return runBulkOperation( docSource, - fieldsForModels, + fieldInferenceMetadata, modelRegistry, inferenceServiceRegistry, bulkOperationListener, @@ -463,7 +476,7 @@ private static BulkShardRequest runBulkOperation( private static BulkShardRequest runBulkOperation( Map docSource, - Map> fieldsForModels, + FieldInferenceMetadata fieldInferenceMetadata, ModelRegistry modelRegistry, InferenceServiceRegistry inferenceServiceRegistry, ActionListener bulkOperationListener, @@ -472,7 +485,7 @@ private static BulkShardRequest runBulkOperation( ) { Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()).build(); IndexMetadata indexMetadata = IndexMetadata.builder(INDEX_NAME) - .fieldsForModels(fieldsForModels) + .fieldInferenceMetadata(fieldInferenceMetadata) .settings(settings) .numberOfShards(1) .numberOfReplicas(0) diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataTests.java index b2354a4356595..b32873df71365 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.core.Tuple; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.shard.ShardId; @@ -41,7 +42,6 @@ import java.io.IOException; import java.util.Collections; import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -84,7 +84,7 @@ public void testIndexMetadataSerialization() throws IOException { IndexMetadataStats indexStats = randomBoolean() ? randomIndexStats(numShard) : null; Double indexWriteLoadForecast = randomBoolean() ? randomDoubleBetween(0.0, 128, true) : null; Long shardSizeInBytesForecast = randomBoolean() ? randomLongBetween(1024, 10240) : null; - Map> fieldsForModels = randomFieldsForModels(true); + FieldInferenceMetadata fieldInferenceMetadata = randomFieldInferenceMetadata(true); IndexMetadata metadata = IndexMetadata.builder("foo") .settings(indexSettings(numShard, numberOfReplicas).put("index.version.created", 1)) @@ -110,7 +110,7 @@ public void testIndexMetadataSerialization() throws IOException { .stats(indexStats) .indexWriteLoadForecast(indexWriteLoadForecast) .shardSizeInBytesForecast(shardSizeInBytesForecast) - .fieldsForModels(fieldsForModels) + .fieldInferenceMetadata(fieldInferenceMetadata) .build(); assertEquals(system, metadata.isSystem()); @@ -145,7 +145,7 @@ public void testIndexMetadataSerialization() throws IOException { assertEquals(metadata.getStats(), fromXContentMeta.getStats()); assertEquals(metadata.getForecastedWriteLoad(), fromXContentMeta.getForecastedWriteLoad()); assertEquals(metadata.getForecastedShardSizeInBytes(), fromXContentMeta.getForecastedShardSizeInBytes()); - assertEquals(metadata.getFieldsForModels(), fromXContentMeta.getFieldsForModels()); + assertEquals(metadata.getFieldInferenceMetadata(), fromXContentMeta.getFieldInferenceMetadata()); final BytesStreamOutput out = new BytesStreamOutput(); metadata.writeTo(out); @@ -169,7 +169,7 @@ public void testIndexMetadataSerialization() throws IOException { assertEquals(metadata.getStats(), deserialized.getStats()); assertEquals(metadata.getForecastedWriteLoad(), deserialized.getForecastedWriteLoad()); assertEquals(metadata.getForecastedShardSizeInBytes(), deserialized.getForecastedShardSizeInBytes()); - assertEquals(metadata.getFieldsForModels(), deserialized.getFieldsForModels()); + assertEquals(metadata.getFieldInferenceMetadata(), deserialized.getFieldInferenceMetadata()); } } @@ -553,35 +553,35 @@ public void testPartialIndexReceivesDataFrozenTierPreference() { } } - public void testFieldsForModels() { + public void testFieldInferenceMetadata() { Settings.Builder settings = indexSettings(IndexVersion.current(), randomIntBetween(1, 8), 0); IndexMetadata idxMeta1 = IndexMetadata.builder("test").settings(settings).build(); - assertThat(idxMeta1.getFieldsForModels(), equalTo(Map.of())); + assertSame(idxMeta1.getFieldInferenceMetadata(), FieldInferenceMetadata.EMPTY); - Map> fieldsForModels = randomFieldsForModels(false); - IndexMetadata idxMeta2 = IndexMetadata.builder(idxMeta1).fieldsForModels(fieldsForModels).build(); - assertThat(idxMeta2.getFieldsForModels(), equalTo(fieldsForModels)); + FieldInferenceMetadata fieldInferenceMetadata = randomFieldInferenceMetadata(false); + IndexMetadata idxMeta2 = IndexMetadata.builder(idxMeta1).fieldInferenceMetadata(fieldInferenceMetadata).build(); + assertThat(idxMeta2.getFieldInferenceMetadata(), equalTo(fieldInferenceMetadata)); } private static Settings indexSettingsWithDataTier(String dataTier) { return indexSettings(IndexVersion.current(), 1, 0).put(DataTier.TIER_PREFERENCE, dataTier).build(); } - private static Map> randomFieldsForModels(boolean allowNull) { - if (allowNull && randomBoolean()) { + public static FieldInferenceMetadata randomFieldInferenceMetadata(boolean allowNull) { + if (randomBoolean() && allowNull) { return null; } - Map> fieldsForModels = new HashMap<>(); - for (int i = 0; i < randomIntBetween(0, 5); i++) { - Set fields = new HashSet<>(); - for (int j = 0; j < randomIntBetween(1, 4); j++) { - fields.add(randomAlphaOfLengthBetween(4, 10)); - } - fieldsForModels.put(randomAlphaOfLengthBetween(4, 10), fields); - } + Map fieldInferenceMap = randomMap( + 0, + 10, + () -> new Tuple<>(randomIdentifier(), randomFieldInference()) + ); + return new FieldInferenceMetadata(fieldInferenceMap); + } - return fieldsForModels; + private static FieldInferenceMetadata.FieldInferenceOptions randomFieldInference() { + return new FieldInferenceMetadata.FieldInferenceOptions(randomIdentifier(), randomSet(0, 5, ESTestCase::randomIdentifier)); } private IndexMetadataStats randomIndexStats(int numberOfShards) { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/FieldTypeLookupTests.java b/server/src/test/java/org/elasticsearch/index/mapper/FieldTypeLookupTests.java index 27663edde945c..932eac3e60d27 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/FieldTypeLookupTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/FieldTypeLookupTests.java @@ -37,7 +37,7 @@ public void testEmpty() { assertNotNull(names); assertThat(names, hasSize(0)); - Map> fieldsForModels = lookup.getFieldsForModels(); + Map fieldsForModels = lookup.getInferenceIdsForFields(); assertNotNull(fieldsForModels); assertTrue(fieldsForModels.isEmpty()); } @@ -48,7 +48,7 @@ public void testAddNewField() { assertNull(lookup.get("bar")); assertEquals(f.fieldType(), lookup.get("foo")); - Map> fieldsForModels = lookup.getFieldsForModels(); + Map fieldsForModels = lookup.getInferenceIdsForFields(); assertNotNull(fieldsForModels); assertTrue(fieldsForModels.isEmpty()); } @@ -440,11 +440,13 @@ public void testInferenceModelFieldType() { assertEquals(f2.fieldType(), lookup.get("foo2")); assertEquals(f3.fieldType(), lookup.get("foo3")); - Map> fieldsForModels = lookup.getFieldsForModels(); - assertNotNull(fieldsForModels); - assertEquals(2, fieldsForModels.size()); - assertEquals(Set.of("foo1", "foo2"), fieldsForModels.get("bar1")); - assertEquals(Set.of("foo3"), fieldsForModels.get("bar2")); + Map inferenceIdsForFields = lookup.getInferenceIdsForFields(); + assertNotNull(inferenceIdsForFields); + assertEquals(3, inferenceIdsForFields.size()); + + assertEquals("bar1", inferenceIdsForFields.get("foo1")); + assertEquals("bar1", inferenceIdsForFields.get("foo2")); + assertEquals("bar2", inferenceIdsForFields.get("foo3")); } private static FlattenedFieldMapper createFlattenedMapper(String fieldName) { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MappingLookupTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MappingLookupTests.java index f512f5d352a43..bb337d0c61c93 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MappingLookupTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MappingLookupTests.java @@ -26,7 +26,6 @@ import java.util.Collections; import java.util.List; import java.util.Map; -import java.util.Set; import java.util.stream.Collectors; import static java.util.Collections.emptyList; @@ -122,8 +121,8 @@ public void testEmptyMappingLookup() { assertEquals(0, mappingLookup.getMapping().getMetadataMappersMap().size()); assertFalse(mappingLookup.fieldMappers().iterator().hasNext()); assertEquals(0, mappingLookup.getMatchingFieldNames("*").size()); - assertNotNull(mappingLookup.getFieldsForModels()); - assertTrue(mappingLookup.getFieldsForModels().isEmpty()); + assertNotNull(mappingLookup.getInferenceIdsForFields()); + assertTrue(mappingLookup.getInferenceIdsForFields().isEmpty()); } public void testValidateDoesNotShadow() { @@ -191,7 +190,7 @@ public MetricType getMetricType() { ); } - public void testFieldsForModels() { + public void testInferenceIdsForFields() { MockInferenceModelFieldType fieldType = new MockInferenceModelFieldType("test_field_name", "test_model_id"); MappingLookup mappingLookup = createMappingLookup( Collections.singletonList(new MockFieldMapper(fieldType)), @@ -201,10 +200,10 @@ public void testFieldsForModels() { assertEquals(1, size(mappingLookup.fieldMappers())); assertEquals(fieldType, mappingLookup.getFieldType("test_field_name")); - Map> fieldsForModels = mappingLookup.getFieldsForModels(); - assertNotNull(fieldsForModels); - assertEquals(1, fieldsForModels.size()); - assertEquals(Collections.singleton("test_field_name"), fieldsForModels.get("test_model_id")); + Map inferenceIdsForFields = mappingLookup.getInferenceIdsForFields(); + assertNotNull(inferenceIdsForFields); + assertEquals(1, inferenceIdsForFields.size()); + assertEquals("test_model_id", inferenceIdsForFields.get("test_field_name")); } private void assertAnalyzes(Analyzer analyzer, String field, String output) throws IOException { diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MockInferenceModelFieldType.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MockInferenceModelFieldType.java index 854749d6308db..0d21134b5d9a9 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MockInferenceModelFieldType.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MockInferenceModelFieldType.java @@ -39,7 +39,7 @@ public ValueFetcher valueFetcher(SearchExecutionContext context, String format) } @Override - public String getInferenceModel() { + public String getInferenceId() { return modelId; } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapper.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapper.java index 027b85a9a9f45..d9e18728615ba 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapper.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapper.java @@ -108,7 +108,7 @@ public String typeName() { } @Override - public String getInferenceModel() { + public String getInferenceId() { return modelId; } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/cluster/metadata/SemanticTextClusterMetadataTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/cluster/metadata/SemanticTextClusterMetadataTests.java index 69fa64ffa6d1c..a7d3fcce26116 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/cluster/metadata/SemanticTextClusterMetadataTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/cluster/metadata/SemanticTextClusterMetadataTests.java @@ -20,8 +20,6 @@ import java.util.Collection; import java.util.Collections; import java.util.List; -import java.util.Map; -import java.util.Set; public class SemanticTextClusterMetadataTests extends ESSingleNodeTestCase { @@ -35,7 +33,10 @@ public void testCreateIndexWithSemanticTextField() { "test", client().admin().indices().prepareCreate("test").setMapping("field", "type=semantic_text,model_id=test_model") ); - assertEquals(Map.of("test_model", Set.of("field")), indexService.getMetadata().getFieldsForModels()); + assertEquals( + indexService.getMetadata().getFieldInferenceMetadata().getFieldInferenceOptions().get("field").inferenceId(), + "test_model" + ); } public void testAddSemanticTextField() throws Exception { @@ -52,7 +53,10 @@ public void testAddSemanticTextField() throws Exception { putMappingExecutor, singleTask(request) ); - assertEquals(Map.of("test_model", Set.of("field")), resultingState.metadata().index("test").getFieldsForModels()); + assertEquals( + resultingState.metadata().index("test").getFieldInferenceMetadata().getFieldInferenceOptions().get("field").inferenceId(), + "test_model" + ); } private static List singleTask(PutMappingClusterStateUpdateRequest request) { From c5de0da930dbc329f2000fae7475fe1d90b82488 Mon Sep 17 00:00:00 2001 From: carlosdelest Date: Tue, 19 Mar 2024 14:22:05 +0100 Subject: [PATCH 248/248] Merge from feature branch --- .../action/bulk/BulkOperation.java | 4 +- .../action/bulk/BulkShardRequest.java | 18 ++-- .../ShardBulkInferenceActionFilter.java | 70 +++++++------- .../ShardBulkInferenceActionFilterTests.java | 96 ++++++++++--------- 4 files changed, 94 insertions(+), 94 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java index b7a6387045e3d..452a9ec90443a 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java @@ -209,8 +209,8 @@ private void executeBulkRequestsByShard(Map> requ requests.toArray(new BulkItemRequest[0]) ); var indexMetadata = clusterState.getMetadata().index(shardId.getIndexName()); - if (indexMetadata != null && indexMetadata.getFieldsForModels().isEmpty() == false) { - bulkShardRequest.setFieldInferenceMetadata(indexMetadata.getFieldsForModels()); + if (indexMetadata != null && indexMetadata.getFieldInferenceMetadata().isEmpty() == false) { + bulkShardRequest.setFieldInferenceMetadata(indexMetadata.getFieldInferenceMetadata()); } bulkShardRequest.waitForActiveShards(bulkRequest.waitForActiveShards()); bulkShardRequest.timeout(bulkRequest.timeout()); diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java index 1b5494c6a68f5..39fa791a3e27d 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java @@ -15,6 +15,7 @@ import org.elasticsearch.action.support.replication.ReplicatedWriteRequest; import org.elasticsearch.action.support.replication.ReplicationRequest; import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.cluster.metadata.FieldInferenceMetadata; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.set.Sets; @@ -22,7 +23,6 @@ import org.elasticsearch.transport.RawIndexingDataTransportRequest; import java.io.IOException; -import java.util.Map; import java.util.Set; public final class BulkShardRequest extends ReplicatedWriteRequest @@ -34,7 +34,7 @@ public final class BulkShardRequest extends ReplicatedWriteRequest> fieldsInferenceMetadata = null; + private transient FieldInferenceMetadata fieldsInferenceMetadataMap = null; public BulkShardRequest(StreamInput in) throws IOException { super(in); @@ -51,24 +51,24 @@ public BulkShardRequest(ShardId shardId, RefreshPolicy refreshPolicy, BulkItemRe * Public for test * Set the transient metadata indicating that this request requires running inference before proceeding. */ - public void setFieldInferenceMetadata(Map> fieldsInferenceMetadata) { - this.fieldsInferenceMetadata = fieldsInferenceMetadata; + public void setFieldInferenceMetadata(FieldInferenceMetadata fieldsInferenceMetadata) { + this.fieldsInferenceMetadataMap = fieldsInferenceMetadata; } /** * Consumes the inference metadata to execute inference on the bulk items just once. */ - public Map> consumeFieldInferenceMetadata() { - var ret = fieldsInferenceMetadata; - fieldsInferenceMetadata = null; + public FieldInferenceMetadata consumeFieldInferenceMetadata() { + FieldInferenceMetadata ret = fieldsInferenceMetadataMap; + fieldsInferenceMetadataMap = null; return ret; } /** * Public for test */ - public Map> getFieldsInferenceMetadata() { - return fieldsInferenceMetadata; + public FieldInferenceMetadata getFieldsInferenceMetadataMap() { + return fieldsInferenceMetadataMap; } public long totalSizeInBytes() { diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilter.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilter.java index e679d3c970abf..984a20419b2c8 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilter.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilter.java @@ -24,6 +24,7 @@ import org.elasticsearch.action.support.ActionFilterChain; import org.elasticsearch.action.support.RefCountingRunnable; import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.cluster.metadata.FieldInferenceMetadata; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.core.Nullable; @@ -44,7 +45,6 @@ import java.util.LinkedHashMap; import java.util.List; import java.util.Map; -import java.util.Set; import java.util.stream.Collectors; /** @@ -81,7 +81,7 @@ public void app case TransportShardBulkAction.ACTION_NAME: BulkShardRequest bulkShardRequest = (BulkShardRequest) request; var fieldInferenceMetadata = bulkShardRequest.consumeFieldInferenceMetadata(); - if (fieldInferenceMetadata != null && fieldInferenceMetadata.size() > 0) { + if (fieldInferenceMetadata != null && fieldInferenceMetadata.isEmpty() == false) { Runnable onInferenceCompletion = () -> chain.proceed(task, action, request, listener); processBulkShardRequest(fieldInferenceMetadata, bulkShardRequest, onInferenceCompletion); } else { @@ -96,7 +96,7 @@ public void app } private void processBulkShardRequest( - Map> fieldInferenceMetadata, + FieldInferenceMetadata fieldInferenceMetadata, BulkShardRequest bulkShardRequest, Runnable onCompletion ) { @@ -112,13 +112,13 @@ private record FieldInferenceResponse(String field, Model model, ChunkedInferenc private record FieldInferenceResponseAccumulator(int id, List responses, List failures) {} private class AsyncBulkShardInferenceAction implements Runnable { - private final Map> fieldInferenceMetadata; + private final FieldInferenceMetadata fieldInferenceMetadata; private final BulkShardRequest bulkShardRequest; private final Runnable onCompletion; private final AtomicArray inferenceResults; private AsyncBulkShardInferenceAction( - Map> fieldInferenceMetadata, + FieldInferenceMetadata fieldInferenceMetadata, BulkShardRequest bulkShardRequest, Runnable onCompletion ) { @@ -289,39 +289,35 @@ private Map> createFieldInferenceRequests(Bu continue; } final Map docMap = indexRequest.sourceAsMap(); - for (var entry : fieldInferenceMetadata.entrySet()) { - String inferenceId = entry.getKey(); - for (var field : entry.getValue()) { - var value = XContentMapValues.extractValue(field, docMap); - if (value == null) { - continue; - } - if (inferenceResults.get(item.id()) == null) { - inferenceResults.set( + for (var entry : fieldInferenceMetadata.getFieldInferenceOptions().entrySet()) { + String field = entry.getKey(); + String inferenceId = entry.getValue().inferenceId(); + var value = XContentMapValues.extractValue(field, docMap); + if (value == null) { + continue; + } + if (inferenceResults.get(item.id()) == null) { + inferenceResults.set( + item.id(), + new FieldInferenceResponseAccumulator( item.id(), - new FieldInferenceResponseAccumulator( - item.id(), - Collections.synchronizedList(new ArrayList<>()), - Collections.synchronizedList(new ArrayList<>()) - ) - ); - } - if (value instanceof String valueStr) { - List fieldRequests = fieldRequestsMap.computeIfAbsent( - inferenceId, - k -> new ArrayList<>() - ); - fieldRequests.add(new FieldInferenceRequest(item.id(), field, valueStr)); - } else { - inferenceResults.get(item.id()).failures.add( - new ElasticsearchStatusException( - "Invalid format for field [{}], expected [String] got [{}]", - RestStatus.BAD_REQUEST, - field, - value.getClass().getSimpleName() - ) - ); - } + Collections.synchronizedList(new ArrayList<>()), + Collections.synchronizedList(new ArrayList<>()) + ) + ); + } + if (value instanceof String valueStr) { + List fieldRequests = fieldRequestsMap.computeIfAbsent(inferenceId, k -> new ArrayList<>()); + fieldRequests.add(new FieldInferenceRequest(item.id(), field, valueStr)); + } else { + inferenceResults.get(item.id()).failures.add( + new ElasticsearchStatusException( + "Invalid format for field [{}], expected [String] got [{}]", + RestStatus.BAD_REQUEST, + field, + value.getClass().getSimpleName() + ) + ); } } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterTests.java index 7f3ffbe596543..4a1825303b5a7 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterTests.java @@ -16,6 +16,7 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.ActionFilterChain; import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.cluster.metadata.FieldInferenceMetadata; import org.elasticsearch.common.Strings; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.index.shard.ShardId; @@ -40,7 +41,6 @@ import java.util.ArrayList; import java.util.HashMap; -import java.util.HashSet; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -79,7 +79,7 @@ public void testFilterNoop() throws Exception { CountDownLatch chainExecuted = new CountDownLatch(1); ActionFilterChain actionFilterChain = (task, action, request, listener) -> { try { - assertNull(((BulkShardRequest) request).getFieldsInferenceMetadata()); + assertNull(((BulkShardRequest) request).getFieldsInferenceMetadataMap()); } finally { chainExecuted.countDown(); } @@ -91,7 +91,9 @@ public void testFilterNoop() throws Exception { WriteRequest.RefreshPolicy.NONE, new BulkItemRequest[0] ); - request.setFieldInferenceMetadata(Map.of("foo", Set.of("bar"))); + request.setFieldInferenceMetadata( + new FieldInferenceMetadata(Map.of("foo", new FieldInferenceMetadata.FieldInferenceOptions("bar", Set.of()))) + ); filter.apply(task, TransportShardBulkAction.ACTION_NAME, request, actionListener, actionFilterChain); awaitLatch(chainExecuted, 10, TimeUnit.SECONDS); } @@ -104,7 +106,7 @@ public void testInferenceNotFound() throws Exception { ActionFilterChain actionFilterChain = (task, action, request, listener) -> { try { BulkShardRequest bulkShardRequest = (BulkShardRequest) request; - assertNull(bulkShardRequest.getFieldsInferenceMetadata()); + assertNull(bulkShardRequest.getFieldsInferenceMetadataMap()); for (BulkItemRequest item : bulkShardRequest.items()) { assertNotNull(item.getPrimaryResponse()); assertTrue(item.getPrimaryResponse().isFailed()); @@ -118,11 +120,15 @@ public void testInferenceNotFound() throws Exception { ActionListener actionListener = mock(ActionListener.class); Task task = mock(Task.class); - Map> inferenceFields = Map.of( - model.getInferenceEntityId(), - Set.of("field1"), - "inference_0", - Set.of("field2", "field3") + FieldInferenceMetadata inferenceFields = new FieldInferenceMetadata( + Map.of( + "field1", + new FieldInferenceMetadata.FieldInferenceOptions(model.getInferenceEntityId(), Set.of()), + "field2", + new FieldInferenceMetadata.FieldInferenceOptions("inference_0", Set.of()), + "field3", + new FieldInferenceMetadata.FieldInferenceOptions("inference_0", Set.of()) + ) ); BulkItemRequest[] items = new BulkItemRequest[10]; for (int i = 0; i < items.length; i++) { @@ -144,19 +150,19 @@ public void testManyRandomDocs() throws Exception { } int numInferenceFields = randomIntBetween(1, 5); - Map> inferenceFields = new HashMap<>(); + Map inferenceFieldsMap = new HashMap<>(); for (int i = 0; i < numInferenceFields; i++) { - String inferenceId = randomFrom(inferenceModelMap.keySet()); String field = randomAlphaOfLengthBetween(5, 10); - var res = inferenceFields.computeIfAbsent(inferenceId, k -> new HashSet<>()); - res.add(field); + String inferenceId = randomFrom(inferenceModelMap.keySet()); + inferenceFieldsMap.put(field, new FieldInferenceMetadata.FieldInferenceOptions(inferenceId, Set.of())); } + FieldInferenceMetadata fieldInferenceMetadata = new FieldInferenceMetadata(inferenceFieldsMap); int numRequests = randomIntBetween(100, 1000); BulkItemRequest[] originalRequests = new BulkItemRequest[numRequests]; BulkItemRequest[] modifiedRequests = new BulkItemRequest[numRequests]; for (int id = 0; id < numRequests; id++) { - BulkItemRequest[] res = randomBulkItemRequest(id, inferenceModelMap, inferenceFields); + BulkItemRequest[] res = randomBulkItemRequest(id, inferenceModelMap, fieldInferenceMetadata); originalRequests[id] = res[0]; modifiedRequests[id] = res[1]; } @@ -167,7 +173,7 @@ public void testManyRandomDocs() throws Exception { try { assertThat(request, instanceOf(BulkShardRequest.class)); BulkShardRequest bulkShardRequest = (BulkShardRequest) request; - assertNull(bulkShardRequest.getFieldsInferenceMetadata()); + assertNull(bulkShardRequest.getFieldsInferenceMetadataMap()); BulkItemRequest[] items = bulkShardRequest.items(); assertThat(items.length, equalTo(originalRequests.length)); for (int id = 0; id < items.length; id++) { @@ -186,7 +192,7 @@ public void testManyRandomDocs() throws Exception { ActionListener actionListener = mock(ActionListener.class); Task task = mock(Task.class); BulkShardRequest original = new BulkShardRequest(new ShardId("test", "test", 0), WriteRequest.RefreshPolicy.NONE, originalRequests); - original.setFieldInferenceMetadata(inferenceFields); + original.setFieldInferenceMetadata(fieldInferenceMetadata); filter.apply(task, TransportShardBulkAction.ACTION_NAME, original, actionListener, actionFilterChain); awaitLatch(chainExecuted, 10, TimeUnit.SECONDS); } @@ -257,42 +263,40 @@ private static ShardBulkInferenceActionFilter createFilter(ThreadPool threadPool private static BulkItemRequest[] randomBulkItemRequest( int id, Map modelMap, - Map> inferenceFieldMap + FieldInferenceMetadata fieldInferenceMetadata ) { Map docMap = new LinkedHashMap<>(); Map inferenceResultsMap = new LinkedHashMap<>(); - for (var entry : inferenceFieldMap.entrySet()) { - String inferenceId = entry.getKey(); - var model = modelMap.get(inferenceId); - for (var field : entry.getValue()) { - String text = randomAlphaOfLengthBetween(10, 100); - docMap.put(field, text); - if (model == null) { - // ignore results, the doc should fail with a resource not found exception - continue; - } - int numChunks = randomIntBetween(1, 5); - List chunks = new ArrayList<>(); - for (int i = 0; i < numChunks; i++) { - chunks.add(randomAlphaOfLengthBetween(5, 10)); - } - TaskType taskType = model.getTaskType(); - final ChunkedInferenceServiceResults results; - switch (taskType) { - case TEXT_EMBEDDING: - results = randomTextEmbeddings(chunks); - break; + for (var entry : fieldInferenceMetadata.getFieldInferenceOptions().entrySet()) { + String field = entry.getKey(); + var model = modelMap.get(entry.getValue().inferenceId()); + String text = randomAlphaOfLengthBetween(10, 100); + docMap.put(field, text); + if (model == null) { + // ignore results, the doc should fail with a resource not found exception + continue; + } + int numChunks = randomIntBetween(1, 5); + List chunks = new ArrayList<>(); + for (int i = 0; i < numChunks; i++) { + chunks.add(randomAlphaOfLengthBetween(5, 10)); + } + TaskType taskType = model.getTaskType(); + final ChunkedInferenceServiceResults results; + switch (taskType) { + case TEXT_EMBEDDING: + results = randomTextEmbeddings(chunks); + break; - case SPARSE_EMBEDDING: - results = randomSparseEmbeddings(chunks); - break; + case SPARSE_EMBEDDING: + results = randomSparseEmbeddings(chunks); + break; - default: - throw new AssertionError("Unknown task type " + taskType.name()); - } - model.putResult(text, results); - InferenceResultFieldMapper.applyFieldInference(inferenceResultsMap, field, model, results); + default: + throw new AssertionError("Unknown task type " + taskType.name()); } + model.putResult(text, results); + InferenceResultFieldMapper.applyFieldInference(inferenceResultsMap, field, model, results); } Map expectedDocMap = new LinkedHashMap<>(docMap); expectedDocMap.put(InferenceResultFieldMapper.NAME, inferenceResultsMap);