diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/mapper/MapperServiceFactory.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/mapper/MapperServiceFactory.java index d3f210f774782..74cea5d5f1549 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/mapper/MapperServiceFactory.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/mapper/MapperServiceFactory.java @@ -10,7 +10,6 @@ package org.elasticsearch.benchmark.index.mapper; import org.apache.lucene.analysis.standard.StandardAnalyzer; -import org.apache.lucene.util.Accountable; import org.elasticsearch.TransportVersion; import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -28,7 +27,6 @@ import org.elasticsearch.index.mapper.MapperRegistry; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.ProvidedIdFieldMapper; -import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.script.Script; @@ -56,13 +54,7 @@ public static MapperService create(String mappings) { MapperRegistry mapperRegistry = new IndicesModule(Collections.emptyList()).getMapperRegistry(); SimilarityService similarityService = new SimilarityService(indexSettings, null, Map.of()); - BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(indexSettings, new BitsetFilterCache.Listener() { - @Override - public void onCache(ShardId shardId, Accountable accountable) {} - - @Override - public void onRemoval(ShardId shardId, Accountable accountable) {} - }); + BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(indexSettings, BitsetFilterCache.Listener.NOOP); MapperService mapperService = new MapperService( () -> TransportVersion.current(), indexSettings, diff --git a/docs/changelog/115814.yaml b/docs/changelog/115814.yaml new file mode 100644 index 0000000000000..34f1213272d6f --- /dev/null +++ b/docs/changelog/115814.yaml @@ -0,0 +1,6 @@ +pr: 115814 +summary: "[ES|QL] Implicit casting string literal to intervals" +area: ES|QL +type: enhancement +issues: + - 115352 diff --git a/docs/reference/esql/functions/kibana/definition/length.json b/docs/reference/esql/functions/kibana/definition/length.json index 9ea340ebf7420..bc26acde744f5 100644 --- a/docs/reference/esql/functions/kibana/definition/length.json +++ b/docs/reference/esql/functions/kibana/definition/length.json @@ -31,7 +31,7 @@ } ], "examples" : [ - "FROM airports\n| KEEP city\n| EVAL fn_length = LENGTH(first_name)" + "FROM airports\n| WHERE country == \"India\"\n| KEEP city\n| EVAL fn_length = LENGTH(city)" ], "preview" : false, "snapshot_only" : false diff --git a/docs/reference/esql/functions/kibana/docs/length.md b/docs/reference/esql/functions/kibana/docs/length.md index ce7726d092bae..aed76ee14cedb 100644 --- a/docs/reference/esql/functions/kibana/docs/length.md +++ b/docs/reference/esql/functions/kibana/docs/length.md @@ -7,7 +7,8 @@ Returns the character length of a string. ``` FROM airports +| WHERE country == "India" | KEEP city -| EVAL fn_length = LENGTH(first_name) +| EVAL fn_length = LENGTH(city) ``` Note: All strings are in UTF-8, so a single character can use multiple bytes. diff --git a/docs/reference/esql/implicit-casting.asciidoc b/docs/reference/esql/implicit-casting.asciidoc index f0c0aa3d82063..ffb6d3fc35acb 100644 --- a/docs/reference/esql/implicit-casting.asciidoc +++ b/docs/reference/esql/implicit-casting.asciidoc @@ -5,7 +5,7 @@ Implicit casting ++++ -Often users will input `datetime`, `ip`, `version`, or geospatial objects as simple strings in their queries for use in predicates, functions, or expressions. {esql} provides <> to explicitly convert these strings into the desired data types. +Often users will input `date`, `ip`, `version`, `date_period` or `time_duration` as simple strings in their queries for use in predicates, functions, or expressions. {esql} provides <> to explicitly convert these strings into the desired data types. Without implicit casting users must explicitly code these `to_X` functions in their queries, when string literals don't match the target data types they are assigned or compared to. Here is an example of using `to_datetime` to explicitly perform a data type conversion. @@ -18,7 +18,7 @@ FROM employees | LIMIT 1 ---- -Implicit casting improves usability, by automatically converting string literals to the target data type. This is most useful when the target data type is `datetime`, `ip`, `version` or a geo spatial. It is natural to specify these as a string in queries. +Implicit casting improves usability, by automatically converting string literals to the target data type. This is most useful when the target data type is `date`, `ip`, `version`, `date_period` or `time_duration`. It is natural to specify these as a string in queries. The first query can be coded without calling the `to_datetime` function, as follows: @@ -38,16 +38,28 @@ The following table details which {esql} operations support implicit casting for [%header.monospaced.styled,format=dsv,separator=|] |=== -||ScalarFunction|BinaryComparison|ArithmeticOperation|InListPredicate|AggregateFunction -|DATETIME|Y|Y|Y|Y|N -|DOUBLE|Y|N|N|N|N -|LONG|Y|N|N|N|N -|INTEGER|Y|N|N|N|N -|IP|Y|Y|Y|Y|N -|VERSION|Y|Y|Y|Y|N -|GEO_POINT|Y|N|N|N|N -|GEO_SHAPE|Y|N|N|N|N -|CARTESIAN_POINT|Y|N|N|N|N -|CARTESIAN_SHAPE|Y|N|N|N|N -|BOOLEAN|Y|Y|Y|Y|N +||ScalarFunction*|Operator*|<>|<> +|DATE|Y|Y|Y|N +|IP|Y|Y|Y|N +|VERSION|Y|Y|Y|N +|BOOLEAN|Y|Y|Y|N +|DATE_PERIOD/TIME_DURATION|Y|N|Y|N |=== + +ScalarFunction* includes: + +<> + +<> + +<> + + +Operator* includes: + +<> + +<> + +<> + diff --git a/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryMetricsTests.java b/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryMetricsTests.java index 61940be247861..e049d4cd372e6 100644 --- a/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryMetricsTests.java +++ b/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryMetricsTests.java @@ -112,7 +112,7 @@ public void testThrottleResponsesAreCountedInMetrics() throws IOException { blobContainer.blobExists(purpose, blobName); // Correct metrics are recorded - metricsAsserter(dataNodeName, purpose, AzureBlobStore.Operation.GET_BLOB_PROPERTIES, repository).expectMetrics() + metricsAsserter(dataNodeName, purpose, AzureBlobStore.Operation.GET_BLOB_PROPERTIES).expectMetrics() .withRequests(numThrottles + 1) .withThrottles(numThrottles) .withExceptions(numThrottles) @@ -137,7 +137,7 @@ public void testRangeNotSatisfiedAreCountedInMetrics() throws IOException { assertThrows(RequestedRangeNotSatisfiedException.class, () -> blobContainer.readBlob(purpose, blobName)); // Correct metrics are recorded - metricsAsserter(dataNodeName, purpose, AzureBlobStore.Operation.GET_BLOB, repository).expectMetrics() + metricsAsserter(dataNodeName, purpose, AzureBlobStore.Operation.GET_BLOB).expectMetrics() .withRequests(1) .withThrottles(0) .withExceptions(1) @@ -170,7 +170,7 @@ public void testErrorResponsesAreCountedInMetrics() throws IOException { blobContainer.blobExists(purpose, blobName); // Correct metrics are recorded - metricsAsserter(dataNodeName, purpose, AzureBlobStore.Operation.GET_BLOB_PROPERTIES, repository).expectMetrics() + metricsAsserter(dataNodeName, purpose, AzureBlobStore.Operation.GET_BLOB_PROPERTIES).expectMetrics() .withRequests(numErrors + 1) .withThrottles(throttles.get()) .withExceptions(numErrors) @@ -191,7 +191,7 @@ public void testRequestFailuresAreCountedInMetrics() { assertThrows(IOException.class, () -> blobContainer.listBlobs(purpose)); // Correct metrics are recorded - metricsAsserter(dataNodeName, purpose, AzureBlobStore.Operation.LIST_BLOBS, repository).expectMetrics() + metricsAsserter(dataNodeName, purpose, AzureBlobStore.Operation.LIST_BLOBS).expectMetrics() .withRequests(4) .withThrottles(0) .withExceptions(4) @@ -322,20 +322,14 @@ private void clearMetrics(String discoveryNode) { .forEach(TestTelemetryPlugin::resetMeter); } - private MetricsAsserter metricsAsserter( - String dataNodeName, - OperationPurpose operationPurpose, - AzureBlobStore.Operation operation, - String repository - ) { - return new MetricsAsserter(dataNodeName, operationPurpose, operation, repository); + private MetricsAsserter metricsAsserter(String dataNodeName, OperationPurpose operationPurpose, AzureBlobStore.Operation operation) { + return new MetricsAsserter(dataNodeName, operationPurpose, operation); } private class MetricsAsserter { private final String dataNodeName; private final OperationPurpose purpose; private final AzureBlobStore.Operation operation; - private final String repository; enum Result { Success, @@ -361,11 +355,10 @@ List getMeasurements(TestTelemetryPlugin testTelemetryPlugin, Strin abstract List getMeasurements(TestTelemetryPlugin testTelemetryPlugin, String name); } - private MetricsAsserter(String dataNodeName, OperationPurpose purpose, AzureBlobStore.Operation operation, String repository) { + private MetricsAsserter(String dataNodeName, OperationPurpose purpose, AzureBlobStore.Operation operation) { this.dataNodeName = dataNodeName; this.purpose = purpose; this.operation = operation; - this.repository = repository; } private class Expectations { @@ -458,7 +451,6 @@ private void assertMatchingMetricRecorded(MetricType metricType, String metricNa .filter( m -> m.attributes().get("operation").equals(operation.getKey()) && m.attributes().get("purpose").equals(purpose.getKey()) - && m.attributes().get("repo_name").equals(repository) && m.attributes().get("repo_type").equals("azure") ) .findFirst() @@ -470,8 +462,6 @@ private void assertMatchingMetricRecorded(MetricType metricType, String metricNa + operation.getKey() + " and purpose=" + purpose.getKey() - + " and repo_name=" - + repository + " in " + measurements ) diff --git a/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java b/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java index bd21f208faac4..ab3f3ee4f3728 100644 --- a/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java +++ b/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java @@ -402,10 +402,7 @@ public void testMetrics() throws Exception { ) ); metrics.forEach(metric -> { - assertThat( - metric.attributes(), - allOf(hasEntry("repo_type", AzureRepository.TYPE), hasKey("repo_name"), hasKey("operation"), hasKey("purpose")) - ); + assertThat(metric.attributes(), allOf(hasEntry("repo_type", AzureRepository.TYPE), hasKey("operation"), hasKey("purpose"))); final AzureBlobStore.Operation operation = AzureBlobStore.Operation.fromKey((String) metric.attributes().get("operation")); final AzureBlobStore.StatsKey statsKey = new AzureBlobStore.StatsKey( operation, diff --git a/modules/repository-s3/build.gradle b/modules/repository-s3/build.gradle index 346a458a65f85..59dfa6b9aace2 100644 --- a/modules/repository-s3/build.gradle +++ b/modules/repository-s3/build.gradle @@ -12,6 +12,7 @@ import org.elasticsearch.gradle.internal.test.InternalClusterTestPlugin */ apply plugin: 'elasticsearch.internal-yaml-rest-test' apply plugin: 'elasticsearch.internal-cluster-test' +apply plugin: 'elasticsearch.internal-java-rest-test' esplugin { description 'The S3 repository plugin adds S3 repositories' @@ -48,6 +49,10 @@ dependencies { yamlRestTestImplementation project(':test:fixtures:minio-fixture') internalClusterTestImplementation project(':test:fixtures:minio-fixture') + javaRestTestImplementation project(":test:framework") + javaRestTestImplementation project(':test:fixtures:s3-fixture') + javaRestTestImplementation project(':modules:repository-s3') + yamlRestTestRuntimeOnly "org.slf4j:slf4j-simple:${versions.slf4j}" internalClusterTestRuntimeOnly "org.slf4j:slf4j-simple:${versions.slf4j}" } diff --git a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java index bb8a452e21771..d9480abf21687 100644 --- a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -300,10 +300,7 @@ public void testMetrics() throws Exception { ) ); metrics.forEach(metric -> { - assertThat( - metric.attributes(), - allOf(hasEntry("repo_type", S3Repository.TYPE), hasKey("repo_name"), hasKey("operation"), hasKey("purpose")) - ); + assertThat(metric.attributes(), allOf(hasEntry("repo_type", S3Repository.TYPE), hasKey("operation"), hasKey("purpose"))); final S3BlobStore.Operation operation = S3BlobStore.Operation.parse((String) metric.attributes().get("operation")); final S3BlobStore.StatsKey statsKey = new S3BlobStore.StatsKey( operation, diff --git a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3RestIT.java b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3RestIT.java new file mode 100644 index 0000000000000..2de85f657664a --- /dev/null +++ b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3RestIT.java @@ -0,0 +1,97 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.repositories.s3; + +import fixture.s3.S3HttpFixture; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.MutableSettingsProvider; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +import java.io.IOException; + +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.equalTo; + +public class RepositoryS3RestIT extends ESRestTestCase { + + private static final String BUCKET = "RepositoryS3JavaRestTest-bucket"; + private static final String BASE_PATH = "RepositoryS3JavaRestTest-base-path"; + + public static final S3HttpFixture s3Fixture = new S3HttpFixture(true, BUCKET, BASE_PATH, "ignored"); + + private static final MutableSettingsProvider keystoreSettings = new MutableSettingsProvider(); + + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .module("repository-s3") + .keystore(keystoreSettings) + .setting("s3.client.default.endpoint", s3Fixture::getAddress) + .build(); + + @ClassRule + public static TestRule ruleChain = RuleChain.outerRule(s3Fixture).around(cluster); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + + public void testReloadCredentialsFromKeystore() throws IOException { + // Register repository (?verify=false because we don't have access to the blob store yet) + final var repositoryName = randomIdentifier(); + registerRepository( + repositoryName, + S3Repository.TYPE, + false, + Settings.builder().put("bucket", BUCKET).put("base_path", BASE_PATH).build() + ); + final var verifyRequest = new Request("POST", "/_snapshot/" + repositoryName + "/_verify"); + + // Set up initial credentials + final var accessKey1 = randomIdentifier(); + s3Fixture.setAccessKey(accessKey1); + keystoreSettings.put("s3.client.default.access_key", accessKey1); + keystoreSettings.put("s3.client.default.secret_key", randomIdentifier()); + cluster.updateStoredSecureSettings(); + assertOK(client().performRequest(new Request("POST", "/_nodes/reload_secure_settings"))); + + // Check access using initial credentials + assertOK(client().performRequest(verifyRequest)); + + // Rotate credentials in blob store + final var accessKey2 = randomValueOtherThan(accessKey1, ESTestCase::randomIdentifier); + s3Fixture.setAccessKey(accessKey2); + + // Ensure that initial credentials now invalid + final var accessDeniedException2 = expectThrows(ResponseException.class, () -> client().performRequest(verifyRequest)); + assertThat(accessDeniedException2.getResponse().getStatusLine().getStatusCode(), equalTo(500)); + assertThat( + accessDeniedException2.getMessage(), + allOf(containsString("Bad access key"), containsString("Status Code: 403"), containsString("Error Code: AccessDenied")) + ); + + // Set up refreshed credentials + keystoreSettings.put("s3.client.default.access_key", accessKey2); + cluster.updateStoredSecureSettings(); + assertOK(client().performRequest(new Request("POST", "/_nodes/reload_secure_settings"))); + + // Check access using refreshed credentials + assertOK(client().performRequest(verifyRequest)); + } + +} diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RetryingInputStream.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RetryingInputStream.java index da357dc09ab95..7407522651e55 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RetryingInputStream.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RetryingInputStream.java @@ -327,8 +327,6 @@ private Map metricAttributes(String action) { return Map.of( "repo_type", S3Repository.TYPE, - "repo_name", - blobStore.getRepositoryMetadata().name(), "operation", Operation.GET_OBJECT.getKey(), "purpose", diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java index b292dc5872994..ac49cffc1e0da 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java @@ -1106,7 +1106,7 @@ private List getRetryHistogramMeasurements() { } private Map metricAttributes(String action) { - return Map.of("repo_type", "s3", "repo_name", "repository", "operation", "GetObject", "purpose", "Indices", "action", action); + return Map.of("repo_type", "s3", "operation", "GetObject", "purpose", "Indices", "action", action); } /** diff --git a/muted-tests.yml b/muted-tests.yml index ff115bf4be0f1..2f242b01390be 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -150,12 +150,6 @@ tests: - class: org.elasticsearch.xpack.restart.CoreFullClusterRestartIT method: testSnapshotRestore {cluster=UPGRADED} issue: https://github.com/elastic/elasticsearch/issues/111799 -- class: org.elasticsearch.xpack.restart.CoreFullClusterRestartIT - method: testSnapshotRestore {cluster=OLD} - issue: https://github.com/elastic/elasticsearch/issues/111774 -- class: org.elasticsearch.upgrades.FullClusterRestartIT - method: testSnapshotRestore {cluster=OLD} - issue: https://github.com/elastic/elasticsearch/issues/111777 - class: org.elasticsearch.xpack.ml.integration.DatafeedJobsRestIT method: testLookbackWithIndicesOptions issue: https://github.com/elastic/elasticsearch/issues/116127 @@ -241,6 +235,15 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/116752 - class: org.elasticsearch.xpack.security.authc.ldap.ActiveDirectoryGroupsResolverTests issue: https://github.com/elastic/elasticsearch/issues/116182 +- class: org.elasticsearch.http.netty4.Netty4IncrementalRequestHandlingIT + method: testServerCloseConnectionMidStream + issue: https://github.com/elastic/elasticsearch/issues/116774 +- class: org.elasticsearch.xpack.test.rest.XPackRestIT + method: test {p0=snapshot/20_operator_privileges_disabled/Operator only settings can be set and restored by non-operator user when operator privileges is disabled} + issue: https://github.com/elastic/elasticsearch/issues/116775 +- class: org.elasticsearch.backwards.MixedClusterClientYamlTestSuiteIT + method: test {p0=synonyms/90_synonyms_reloading_for_synset/Reload analyzers for specific synonym set} + issue: https://github.com/elastic/elasticsearch/issues/116777 # Examples: # diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index fcca3f9a4700c..daadf936ae841 100644 --- a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -81,6 +81,7 @@ import static org.elasticsearch.test.MapMatcher.matchesMap; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.anyOf; +import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -90,6 +91,7 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.startsWith; /** * Tests to run before and after a full cluster restart. This is run twice, @@ -1277,12 +1279,16 @@ private void checkSnapshot(String snapshotName, int count, String tookOnVersion, assertEquals(singletonList(snapshotName), XContentMapValues.extractValue("snapshots.snapshot", snapResponse)); assertEquals(singletonList("SUCCESS"), XContentMapValues.extractValue("snapshots.state", snapResponse)); // the format can change depending on the ES node version running & this test code running + // and if there's an in-progress release that hasn't been published yet, + // which could affect the top range of the index release version + String firstReleaseVersion = tookOnIndexVersion.toReleaseVersion().split("-")[0]; assertThat( - XContentMapValues.extractValue("snapshots.version", snapResponse), + (Iterable) XContentMapValues.extractValue("snapshots.version", snapResponse), anyOf( - equalTo(List.of(tookOnVersion)), - equalTo(List.of(tookOnIndexVersion.toString())), - equalTo(List.of(tookOnIndexVersion.toReleaseVersion())) + contains(tookOnVersion), + contains(tookOnIndexVersion.toString()), + contains(firstReleaseVersion), + contains(startsWith(firstReleaseVersion + "-")) ) ); diff --git a/server/src/main/java/org/elasticsearch/common/TimeBasedKOrderedUUIDGenerator.java b/server/src/main/java/org/elasticsearch/common/TimeBasedKOrderedUUIDGenerator.java index 9c97cb8fe7e85..7ea58ee326a79 100644 --- a/server/src/main/java/org/elasticsearch/common/TimeBasedKOrderedUUIDGenerator.java +++ b/server/src/main/java/org/elasticsearch/common/TimeBasedKOrderedUUIDGenerator.java @@ -10,7 +10,7 @@ package org.elasticsearch.common; import java.nio.ByteBuffer; -import java.util.Base64; +import java.util.function.Supplier; /** * Generates a base64-encoded, k-ordered UUID string optimized for compression and efficient indexing. @@ -28,18 +28,25 @@ * The result is a compact base64-encoded string, optimized for efficient compression of the _id field in an inverted index. */ public class TimeBasedKOrderedUUIDGenerator extends TimeBasedUUIDGenerator { - private static final Base64.Encoder BASE_64_NO_PADDING = Base64.getEncoder().withoutPadding(); + + public TimeBasedKOrderedUUIDGenerator( + final Supplier timestampSupplier, + final Supplier sequenceIdSupplier, + final Supplier macAddressSupplier + ) { + super(timestampSupplier, sequenceIdSupplier, macAddressSupplier); + } @Override public String getBase64UUID() { - final int sequenceId = this.sequenceNumber.incrementAndGet() & 0x00FF_FFFF; + final int sequenceId = sequenceNumber.incrementAndGet() & 0x00FF_FFFF; // Calculate timestamp to ensure ordering and avoid backward movement in case of time shifts. // Uses AtomicLong to guarantee that timestamp increases even if the system clock moves backward. // If the sequenceId overflows (reaches 0 within the same millisecond), the timestamp is incremented // to ensure strict ordering. long timestamp = this.lastTimestamp.accumulateAndGet( - currentTimeMillis(), + timestampSupplier.get(), sequenceId == 0 ? (lastTimestamp, currentTimeMillis) -> Math.max(lastTimestamp, currentTimeMillis) + 1 : Math::max ); @@ -68,6 +75,6 @@ public String getBase64UUID() { assert buffer.position() == uuidBytes.length; - return BASE_64_NO_PADDING.encodeToString(uuidBytes); + return Strings.BASE_64_NO_PADDING_URL_ENCODER.encodeToString(uuidBytes); } } diff --git a/server/src/main/java/org/elasticsearch/common/TimeBasedUUIDGenerator.java b/server/src/main/java/org/elasticsearch/common/TimeBasedUUIDGenerator.java index 2ed979ae66ffa..9da878fd4af64 100644 --- a/server/src/main/java/org/elasticsearch/common/TimeBasedUUIDGenerator.java +++ b/server/src/main/java/org/elasticsearch/common/TimeBasedUUIDGenerator.java @@ -11,6 +11,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; +import java.util.function.Supplier; /** * These are essentially flake ids but we use 6 (not 8) bytes for timestamp, and use 3 (not 2) bytes for sequence number. We also reorder @@ -19,15 +20,14 @@ * For more information about flake ids, check out * https://archive.fo/2015.07.08-082503/http://www.boundary.com/blog/2012/01/flake-a-decentralized-k-ordered-unique-id-generator-in-erlang/ */ - class TimeBasedUUIDGenerator implements UUIDGenerator { // We only use bottom 3 bytes for the sequence number. Paranoia: init with random int so that if JVM/OS/machine goes down, clock slips // backwards, and JVM comes back up, we are less likely to be on the same sequenceNumber at the same time: - protected final AtomicInteger sequenceNumber = new AtomicInteger(SecureRandomHolder.INSTANCE.nextInt()); + protected final AtomicInteger sequenceNumber; + protected final AtomicLong lastTimestamp; - // Used to ensure clock moves forward: - protected final AtomicLong lastTimestamp = new AtomicLong(0); + protected final Supplier timestampSupplier; private static final byte[] SECURE_MUNGED_ADDRESS = MacAddressProvider.getSecureMungedAddress(); @@ -35,18 +35,26 @@ class TimeBasedUUIDGenerator implements UUIDGenerator { assert SECURE_MUNGED_ADDRESS.length == 6; } - // protected for testing - protected long currentTimeMillis() { - return System.currentTimeMillis(); + static final int SIZE_IN_BYTES = 15; + private final byte[] macAddress; + + TimeBasedUUIDGenerator( + final Supplier timestampSupplier, + final Supplier sequenceIdSupplier, + final Supplier macAddressSupplier + ) { + this.timestampSupplier = timestampSupplier; + // NOTE: getting the mac address every time using the supplier is expensive, hence we cache it. + this.macAddress = macAddressSupplier.get(); + this.sequenceNumber = new AtomicInteger(sequenceIdSupplier.get()); + // Used to ensure clock moves forward: + this.lastTimestamp = new AtomicLong(0); } - // protected for testing protected byte[] macAddress() { - return SECURE_MUNGED_ADDRESS; + return macAddress; } - static final int SIZE_IN_BYTES = 15; - @Override public String getBase64UUID() { final int sequenceId = sequenceNumber.incrementAndGet() & 0xffffff; @@ -55,7 +63,7 @@ public String getBase64UUID() { // still vulnerable if we are shut down, clock goes backwards, and we restart... for this we // randomize the sequenceNumber on init to decrease chance of collision: long timestamp = this.lastTimestamp.accumulateAndGet( - currentTimeMillis(), + timestampSupplier.get(), // Always force the clock to increment whenever sequence number is 0, in case we have a long // time-slip backwards: sequenceId == 0 ? (lastTimestamp, currentTimeMillis) -> Math.max(lastTimestamp, currentTimeMillis) + 1 : Math::max diff --git a/server/src/main/java/org/elasticsearch/common/UUIDs.java b/server/src/main/java/org/elasticsearch/common/UUIDs.java index 0f73b8172c10f..ebcb375bc01bc 100644 --- a/server/src/main/java/org/elasticsearch/common/UUIDs.java +++ b/server/src/main/java/org/elasticsearch/common/UUIDs.java @@ -12,13 +12,29 @@ import org.elasticsearch.common.settings.SecureString; import java.util.Random; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Supplier; +/** + * Utility class for generating various types of UUIDs. + */ public class UUIDs { + private static final AtomicInteger sequenceNumber = new AtomicInteger(SecureRandomHolder.INSTANCE.nextInt()); + public static final Supplier DEFAULT_TIMESTAMP_SUPPLIER = System::currentTimeMillis; + public static final Supplier DEFAULT_SEQUENCE_ID_SUPPLIER = sequenceNumber::incrementAndGet; + public static final Supplier DEFAULT_MAC_ADDRESS_SUPPLIER = MacAddressProvider::getSecureMungedAddress; + private static final UUIDGenerator RANDOM_UUID_GENERATOR = new RandomBasedUUIDGenerator(); + private static final UUIDGenerator TIME_BASED_K_ORDERED_GENERATOR = new TimeBasedKOrderedUUIDGenerator( + DEFAULT_TIMESTAMP_SUPPLIER, + DEFAULT_SEQUENCE_ID_SUPPLIER, + DEFAULT_MAC_ADDRESS_SUPPLIER + ); - private static final RandomBasedUUIDGenerator RANDOM_UUID_GENERATOR = new RandomBasedUUIDGenerator(); - - private static final UUIDGenerator TIME_BASED_K_ORDERED_GENERATOR = new TimeBasedKOrderedUUIDGenerator(); - private static final UUIDGenerator TIME_UUID_GENERATOR = new TimeBasedUUIDGenerator(); + private static final UUIDGenerator TIME_UUID_GENERATOR = new TimeBasedUUIDGenerator( + DEFAULT_TIMESTAMP_SUPPLIER, + DEFAULT_SEQUENCE_ID_SUPPLIER, + DEFAULT_MAC_ADDRESS_SUPPLIER + ); /** * The length of a UUID string generated by {@link #base64UUID}. diff --git a/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java b/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java index 5277999271984..59607fadc0dd9 100644 --- a/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java +++ b/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java @@ -341,5 +341,13 @@ public interface Listener { * @param accountable the bitsets ram representation */ void onRemoval(ShardId shardId, Accountable accountable); + + Listener NOOP = new Listener() { + @Override + public void onCache(ShardId shardId, Accountable accountable) {} + + @Override + public void onRemoval(ShardId shardId, Accountable accountable) {} + }; } } diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoriesMetrics.java b/server/src/main/java/org/elasticsearch/repositories/RepositoriesMetrics.java index 2cd6e2b11ef7a..3a210199065b7 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoriesMetrics.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoriesMetrics.java @@ -127,16 +127,7 @@ public static Map createAttributesMap( OperationPurpose purpose, String operation ) { - return Map.of( - "repo_type", - repositoryMetadata.type(), - "repo_name", - repositoryMetadata.name(), - "operation", - operation, - "purpose", - purpose.getKey() - ); + return Map.of("repo_type", repositoryMetadata.type(), "operation", operation, "purpose", purpose.getKey()); } } diff --git a/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java b/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java index cc5e96327b241..f55e3740aaa8f 100644 --- a/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java +++ b/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java @@ -1062,13 +1062,6 @@ public static boolean assertCurrentThreadPool(String... permittedThreadPoolNames return true; } - public static boolean assertTestThreadPool() { - final var threadName = Thread.currentThread().getName(); - final var executorName = EsExecutors.executorName(threadName); - assert threadName.startsWith("TEST-") || threadName.startsWith("LuceneTestCase") : threadName + " is not a test thread"; - return true; - } - public static boolean assertInSystemContext(ThreadPool threadPool) { final var threadName = Thread.currentThread().getName(); assert threadName.startsWith("TEST-") || threadName.startsWith("LuceneTestCase") || threadPool.getThreadContext().isSystemContext() diff --git a/server/src/test/java/org/elasticsearch/common/TimeBasedUUIDGeneratorTests.java b/server/src/test/java/org/elasticsearch/common/TimeBasedUUIDGeneratorTests.java new file mode 100644 index 0000000000000..964683a1972ba --- /dev/null +++ b/server/src/test/java/org/elasticsearch/common/TimeBasedUUIDGeneratorTests.java @@ -0,0 +1,270 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.common; + +import org.elasticsearch.test.ESTestCase; + +import java.time.Instant; +import java.time.temporal.ChronoUnit; +import java.util.Base64; +import java.util.HashSet; +import java.util.Set; +import java.util.function.Supplier; +import java.util.stream.IntStream; + +public class TimeBasedUUIDGeneratorTests extends ESTestCase { + + public void testTimeBasedUUIDGeneration() { + assertUUIDFormat(createGenerator(() -> Instant.now().toEpochMilli(), () -> 0, new TestRandomMacAddressSupplier()), 100_000); + } + + public void testTimeBasedUUIDUniqueness() { + assertUUIDUniqueness(createGenerator(() -> Instant.now().toEpochMilli(), () -> 0, new TestRandomMacAddressSupplier()), 100_000); + } + + public void testTimeBasedUUIDSequenceOverflow() { + // The assumption here is that our system will not generate more than 1000 UUIDs within the same millisecond. + // The sequence ID is set close to its max value (0x00FF_FFFF) to quickly trigger an overflow. + // However, since we are generating only 1000 UUIDs, the timestamp is expected to change at least once, + // ensuring uniqueness even if the sequence ID wraps around. + assertEquals( + 1000, + generateUUIDs( + createGenerator(() -> Instant.now().toEpochMilli(), () -> 0x00FF_FFFF - 10, new TestRandomMacAddressSupplier()), + 1000 + ).size() + ); + } + + public void testTimeBasedUUIDClockReset() { + // Simulate a clock that resets itself after reaching a threshold. + final Supplier unreliableClock = new TestClockResetTimestampSupplier( + Instant.now(), + 1, + 50, + ChronoUnit.MILLIS, + Instant.now().plus(100, ChronoUnit.MILLIS) + ); + final UUIDGenerator generator = createGenerator(unreliableClock, () -> 0, new TestRandomMacAddressSupplier()); + + final Set beforeReset = generateUUIDs(generator, 5_000); + final Set afterReset = generateUUIDs(generator, 5_000); + + // Ensure all UUIDs are unique, even after the clock resets. + assertEquals(5_000, beforeReset.size()); + assertEquals(5_000, afterReset.size()); + beforeReset.addAll(afterReset); + assertEquals(10_000, beforeReset.size()); + } + + public void testKOrderedUUIDGeneration() { + assertUUIDFormat(createKOrderedGenerator(() -> Instant.now().toEpochMilli(), () -> 0, new TestRandomMacAddressSupplier()), 100_000); + } + + public void testKOrderedUUIDUniqueness() { + assertUUIDUniqueness( + createKOrderedGenerator(() -> Instant.now().toEpochMilli(), () -> 0, new TestRandomMacAddressSupplier()), + 100_000 + ); + } + + public void testKOrderedUUIDSequenceOverflow() { + final UUIDGenerator generator = createKOrderedGenerator( + () -> Instant.now().toEpochMilli(), + () -> 0x00FF_FFFF - 10, + new TestRandomMacAddressSupplier() + ); + final Set uuids = generateUUIDs(generator, 1000); + + // The assumption here is that our system will not generate more than 1000 UUIDs within the same millisecond. + // The sequence ID is set close to its max value (0x00FF_FFFF) to quickly trigger an overflow. + // However, since we are generating only 1000 UUIDs, the timestamp is expected to change at least once, + // ensuring uniqueness even if the sequence ID wraps around. + assertEquals(1000, uuids.size()); + } + + public void testUUIDEncodingDecoding() { + testUUIDEncodingDecodingHelper( + Instant.parse("2024-11-13T10:12:43Z").toEpochMilli(), + 12345, + new TestRandomMacAddressSupplier().get() + ); + } + + public void testUUIDEncodingDecodingWithRandomValues() { + testUUIDEncodingDecodingHelper( + randomInstantBetween(Instant.now().minus(1, ChronoUnit.DAYS), Instant.now()).toEpochMilli(), + randomIntBetween(0, 0x00FF_FFFF), + new TestRandomMacAddressSupplier().get() + ); + } + + private void testUUIDEncodingDecodingHelper(final long timestamp, final int sequenceId, final byte[] macAddress) { + final TestTimeBasedKOrderedUUIDDecoder decoder = new TestTimeBasedKOrderedUUIDDecoder( + createKOrderedGenerator(() -> timestamp, () -> sequenceId, () -> macAddress).getBase64UUID() + ); + + // The sequence ID is incremented by 1 when generating the UUID. + assertEquals("Sequence ID does not match", sequenceId + 1, decoder.decodeSequenceId()); + // Truncate the timestamp to milliseconds to match the UUID generation granularity. + assertEquals( + "Timestamp does not match", + Instant.ofEpochMilli(timestamp).truncatedTo(ChronoUnit.MILLIS), + Instant.ofEpochMilli(decoder.decodeTimestamp()).truncatedTo(ChronoUnit.MILLIS) + ); + assertArrayEquals("MAC address does not match", macAddress, decoder.decodeMacAddress()); + } + + private void assertUUIDUniqueness(final UUIDGenerator generator, final int count) { + assertEquals(count, generateUUIDs(generator, count).size()); + } + + private Set generateUUIDs(final UUIDGenerator generator, final int count) { + return IntStream.range(0, count).mapToObj(i -> generator.getBase64UUID()).collect(HashSet::new, Set::add, Set::addAll); + } + + private void assertUUIDFormat(final UUIDGenerator generator, final int count) { + IntStream.range(0, count).forEach(i -> { + final String uuid = generator.getBase64UUID(); + assertNotNull(uuid); + assertEquals(20, uuid.length()); + assertFalse(uuid.contains("+")); + assertFalse(uuid.contains("/")); + assertFalse(uuid.contains("=")); + }); + } + + private UUIDGenerator createGenerator( + final Supplier timestampSupplier, + final Supplier sequenceIdSupplier, + final Supplier macAddressSupplier + ) { + return new TimeBasedUUIDGenerator(timestampSupplier, sequenceIdSupplier, macAddressSupplier); + } + + private UUIDGenerator createKOrderedGenerator( + final Supplier timestampSupplier, + final Supplier sequenceIdSupplier, + final Supplier macAddressSupplier + ) { + return new TimeBasedKOrderedUUIDGenerator(timestampSupplier, sequenceIdSupplier, macAddressSupplier); + } + + private static class TestRandomMacAddressSupplier implements Supplier { + private final byte[] macAddress = new byte[] { randomByte(), randomByte(), randomByte(), randomByte(), randomByte(), randomByte() }; + + @Override + public byte[] get() { + return macAddress; + } + } + + /** + * A {@link Supplier} implementation that simulates a clock that can move forward or backward in time. + * This supplier provides timestamps in milliseconds since the epoch, adjusting based on a given delta + * until a reset threshold is reached. After crossing the threshold, the timestamp moves backwards by a reset delta. + */ + private static class TestClockResetTimestampSupplier implements Supplier { + private Instant currentTime; + private final long delta; + private final long resetDelta; + private final ChronoUnit unit; + private final Instant resetThreshold; + + /** + * Constructs a new {@link TestClockResetTimestampSupplier}. + * + * @param startTime The initial starting time. + * @param delta The amount of time to add to the current time in each forward step. + * @param resetDelta The amount of time to subtract once the reset threshold is reached. + * @param unit The unit of time for both delta and resetDelta. + * @param resetThreshold The threshold after which the time is reset backwards. + */ + TestClockResetTimestampSupplier( + final Instant startTime, + final long delta, + final long resetDelta, + final ChronoUnit unit, + final Instant resetThreshold + ) { + this.currentTime = startTime; + this.delta = delta; + this.resetDelta = resetDelta; + this.unit = unit; + this.resetThreshold = resetThreshold; + } + + /** + * Provides the next timestamp in milliseconds since the epoch. + * If the current time is before the reset threshold, it advances the time by the delta. + * Otherwise, it subtracts the reset delta. + * + * @return The current time in milliseconds since the epoch. + */ + @Override + public Long get() { + if (currentTime.isBefore(resetThreshold)) { + currentTime = currentTime.plus(delta, unit); + } else { + currentTime = currentTime.minus(resetDelta, unit); + } + return currentTime.toEpochMilli(); + } + } + + /** + * A utility class to decode the K-ordered UUID extracting the original timestamp, MAC address and sequence ID. + */ + private static class TestTimeBasedKOrderedUUIDDecoder { + + private final byte[] decodedBytes; + + /** + * Constructs a new {@link TestTimeBasedKOrderedUUIDDecoder} using a base64-encoded UUID string. + * + * @param base64UUID The base64-encoded UUID string to decode. + */ + TestTimeBasedKOrderedUUIDDecoder(final String base64UUID) { + this.decodedBytes = Base64.getUrlDecoder().decode(base64UUID); + } + + /** + * Decodes the timestamp from the UUID using the following bytes: + * 0 (most significant), 1, 2, 3, 11, 13 (least significant). + * + * @return The decoded timestamp in milliseconds. + */ + public long decodeTimestamp() { + return ((long) (decodedBytes[0] & 0xFF) << 40) | ((long) (decodedBytes[1] & 0xFF) << 32) | ((long) (decodedBytes[2] & 0xFF) + << 24) | ((long) (decodedBytes[3] & 0xFF) << 16) | ((long) (decodedBytes[11] & 0xFF) << 8) | (decodedBytes[13] & 0xFF); + } + + /** + * Decodes the MAC address from the UUID using bytes 4 to 9. + * + * @return The decoded MAC address as a byte array. + */ + public byte[] decodeMacAddress() { + byte[] macAddress = new byte[6]; + System.arraycopy(decodedBytes, 4, macAddress, 0, 6); + return macAddress; + } + + /** + * Decodes the sequence ID from the UUID using bytes: + * 10 (most significant), 12 (middle), 14 (least significant). + * + * @return The decoded sequence ID. + */ + public int decodeSequenceId() { + return ((decodedBytes[10] & 0xFF) << 16) | ((decodedBytes[12] & 0xFF) << 8) | (decodedBytes[14] & 0xFF); + } + } +} diff --git a/server/src/test/java/org/elasticsearch/common/UUIDTests.java b/server/src/test/java/org/elasticsearch/common/UUIDTests.java index 9fbeaf1c6c081..71c705f5df511 100644 --- a/server/src/test/java/org/elasticsearch/common/UUIDTests.java +++ b/server/src/test/java/org/elasticsearch/common/UUIDTests.java @@ -27,26 +27,37 @@ import org.elasticsearch.test.ESTestCase; import org.hamcrest.Matchers; +import java.util.Base64; import java.util.HashSet; import java.util.Random; import java.util.Set; +import java.util.function.Supplier; public class UUIDTests extends ESTestCase { - static UUIDGenerator timeUUIDGen = new TimeBasedUUIDGenerator(); + static final Base64.Decoder BASE_64_URL_DECODER = Base64.getUrlDecoder(); + static UUIDGenerator timeUUIDGen = new TimeBasedUUIDGenerator( + UUIDs.DEFAULT_TIMESTAMP_SUPPLIER, + UUIDs.DEFAULT_SEQUENCE_ID_SUPPLIER, + UUIDs.DEFAULT_MAC_ADDRESS_SUPPLIER + ); static UUIDGenerator randomUUIDGen = new RandomBasedUUIDGenerator(); - static UUIDGenerator kOrderedUUIDGen = new TimeBasedKOrderedUUIDGenerator(); + static UUIDGenerator kOrderedUUIDGen = new TimeBasedKOrderedUUIDGenerator( + UUIDs.DEFAULT_TIMESTAMP_SUPPLIER, + UUIDs.DEFAULT_SEQUENCE_ID_SUPPLIER, + UUIDs.DEFAULT_MAC_ADDRESS_SUPPLIER + ); public void testRandomUUID() { - verifyUUIDSet(100000, randomUUIDGen); + verifyUUIDSet(100000, randomUUIDGen).forEach(this::verifyUUIDIsUrlSafe); } public void testTimeUUID() { - verifyUUIDSet(100000, timeUUIDGen); + verifyUUIDSet(100000, timeUUIDGen).forEach(this::verifyUUIDIsUrlSafe); } public void testKOrderedUUID() { - verifyUUIDSet(100000, kOrderedUUIDGen); + verifyUUIDSet(100000, kOrderedUUIDGen).forEach(this::verifyUUIDIsUrlSafe); } public void testThreadedRandomUUID() { @@ -143,6 +154,7 @@ public void testUUIDThreaded(UUIDGenerator uuidSource) { globalSet.addAll(runner.uuidSet); } assertEquals(count * uuids, globalSet.size()); + globalSet.forEach(this::verifyUUIDIsUrlSafe); } private static double testCompression(final UUIDGenerator generator, int numDocs, int numDocsPerSecond, int numNodes, Logger logger) @@ -158,35 +170,25 @@ private static double testCompression(final UUIDGenerator generator, int numDocs UUIDGenerator uuidSource = generator; if (generator instanceof TimeBasedUUIDGenerator) { if (generator instanceof TimeBasedKOrderedUUIDGenerator) { - uuidSource = new TimeBasedKOrderedUUIDGenerator() { + uuidSource = new TimeBasedKOrderedUUIDGenerator(new Supplier<>() { double currentTimeMillis = TestUtil.nextLong(random(), 0L, 10000000000L); @Override - protected long currentTimeMillis() { + public Long get() { currentTimeMillis += intervalBetweenDocs * 2 * r.nextDouble(); return (long) currentTimeMillis; } - - @Override - protected byte[] macAddress() { - return RandomPicks.randomFrom(r, macAddresses); - } - }; + }, () -> 0, () -> RandomPicks.randomFrom(r, macAddresses)); } else { - uuidSource = new TimeBasedUUIDGenerator() { + uuidSource = new TimeBasedUUIDGenerator(new Supplier<>() { double currentTimeMillis = TestUtil.nextLong(random(), 0L, 10000000000L); @Override - protected long currentTimeMillis() { + public Long get() { currentTimeMillis += intervalBetweenDocs * 2 * r.nextDouble(); return (long) currentTimeMillis; } - - @Override - protected byte[] macAddress() { - return RandomPicks.randomFrom(r, macAddresses); - } - }; + }, () -> 0, () -> RandomPicks.randomFrom(r, macAddresses)); } } @@ -237,4 +239,13 @@ public void testStringLength() { private static int getUnpaddedBase64StringLength(int sizeInBytes) { return (int) Math.ceil(sizeInBytes * 4.0 / 3.0); } + + private void verifyUUIDIsUrlSafe(final String uuid) { + assertFalse("UUID should not contain padding characters: " + uuid, uuid.contains("=")); + try { + BASE_64_URL_DECODER.decode(uuid); + } catch (IllegalArgumentException e) { + throw new AssertionError("UUID is not a valid Base64 URL-safe encoded string: " + uuid); + } + } } diff --git a/server/src/test/java/org/elasticsearch/index/cache/bitset/BitSetFilterCacheTests.java b/server/src/test/java/org/elasticsearch/index/cache/bitset/BitSetFilterCacheTests.java index d7d5c886e0741..77ab665166926 100644 --- a/server/src/test/java/org/elasticsearch/index/cache/bitset/BitSetFilterCacheTests.java +++ b/server/src/test/java/org/elasticsearch/index/cache/bitset/BitSetFilterCacheTests.java @@ -94,17 +94,7 @@ public void testInvalidateEntries() throws Exception { DirectoryReader reader = DirectoryReader.open(writer); reader = ElasticsearchDirectoryReader.wrap(reader, new ShardId("test", "_na_", 0)); - BitsetFilterCache cache = new BitsetFilterCache(INDEX_SETTINGS, new BitsetFilterCache.Listener() { - @Override - public void onCache(ShardId shardId, Accountable accountable) { - - } - - @Override - public void onRemoval(ShardId shardId, Accountable accountable) { - - } - }); + BitsetFilterCache cache = new BitsetFilterCache(INDEX_SETTINGS, BitsetFilterCache.Listener.NOOP); BitSetProducer filter = cache.getBitSetProducer(new TermQuery(new Term("field", "value"))); assertThat(matchCount(filter, reader), equalTo(3)); @@ -237,17 +227,7 @@ public void testSetNullListener() { } public void testRejectOtherIndex() throws IOException { - BitsetFilterCache cache = new BitsetFilterCache(INDEX_SETTINGS, new BitsetFilterCache.Listener() { - @Override - public void onCache(ShardId shardId, Accountable accountable) { - - } - - @Override - public void onRemoval(ShardId shardId, Accountable accountable) { - - } - }); + BitsetFilterCache cache = new BitsetFilterCache(INDEX_SETTINGS, BitsetFilterCache.Listener.NOOP); Directory dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()); diff --git a/server/src/test/java/org/elasticsearch/index/codec/CodecTests.java b/server/src/test/java/org/elasticsearch/index/codec/CodecTests.java index 9e4a19eb039fd..6b1ffc3693636 100644 --- a/server/src/test/java/org/elasticsearch/index/codec/CodecTests.java +++ b/server/src/test/java/org/elasticsearch/index/codec/CodecTests.java @@ -19,7 +19,6 @@ import org.apache.lucene.index.IndexWriter; import org.apache.lucene.store.Directory; import org.apache.lucene.tests.util.LuceneTestCase.SuppressCodecs; -import org.apache.lucene.util.Accountable; import org.elasticsearch.TransportVersion; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; @@ -30,7 +29,6 @@ import org.elasticsearch.index.mapper.MapperMetrics; import org.elasticsearch.index.mapper.MapperRegistry; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.script.ScriptCompiler; @@ -132,13 +130,7 @@ private CodecService createCodecService() throws IOException { Collections.emptyMap(), MapperPlugin.NOOP_FIELD_FILTER ); - BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(settings, new BitsetFilterCache.Listener() { - @Override - public void onCache(ShardId shardId, Accountable accountable) {} - - @Override - public void onRemoval(ShardId shardId, Accountable accountable) {} - }); + BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(settings, BitsetFilterCache.Listener.NOOP); MapperService service = new MapperService( () -> TransportVersion.current(), settings, diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java index 0bf4c36d70a90..e0f58b8922be2 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java @@ -9,7 +9,6 @@ package org.elasticsearch.index.mapper; -import org.apache.lucene.util.Accountable; import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.common.bytes.BytesReference; @@ -20,7 +19,6 @@ import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.analysis.IndexAnalyzers; import org.elasticsearch.index.cache.bitset.BitsetFilterCache; -import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.script.ScriptService; @@ -47,13 +45,7 @@ private static MappingParser createMappingParser(Settings settings, IndexVersion IndexAnalyzers indexAnalyzers = createIndexAnalyzers(); SimilarityService similarityService = new SimilarityService(indexSettings, scriptService, Collections.emptyMap()); MapperRegistry mapperRegistry = new IndicesModule(Collections.emptyList()).getMapperRegistry(); - BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(indexSettings, new BitsetFilterCache.Listener() { - @Override - public void onCache(ShardId shardId, Accountable accountable) {} - - @Override - public void onRemoval(ShardId shardId, Accountable accountable) {} - }); + BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(indexSettings, BitsetFilterCache.Listener.NOOP); Supplier mappingParserContextSupplier = () -> new MappingParserContext( similarityService::getSimilarity, type -> mapperRegistry.getMapperParser(type, indexSettings.getIndexVersionCreated()), diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregatorTests.java index db32d796ea76a..ba186695bcdae 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregatorTests.java @@ -28,7 +28,6 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.store.Directory; import org.apache.lucene.tests.index.RandomIndexWriter; -import org.apache.lucene.util.Accountable; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; import org.elasticsearch.common.lucene.search.Queries; @@ -649,13 +648,7 @@ public void testMatchAllOnFilteredIndex() throws IOException { try (DirectoryReader directoryReader = DirectoryReader.open(directory)) { final IndexSettings indexSettings = createIndexSettings(); - BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(indexSettings, new BitsetFilterCache.Listener() { - @Override - public void onRemoval(ShardId shardId, Accountable accountable) {} - - @Override - public void onCache(ShardId shardId, Accountable accountable) {} - }); + BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(indexSettings, BitsetFilterCache.Listener.NOOP); DirectoryReader limitedReader = new DocumentSubsetDirectoryReader( ElasticsearchDirectoryReader.wrap(directoryReader, new ShardId(indexSettings.getIndex(), 0)), bitsetFilterCache, @@ -721,13 +714,7 @@ public void testTermOnFilteredIndex() throws IOException { try (DirectoryReader directoryReader = DirectoryReader.open(directory)) { final IndexSettings indexSettings = createIndexSettings(); - BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(indexSettings, new BitsetFilterCache.Listener() { - @Override - public void onRemoval(ShardId shardId, Accountable accountable) {} - - @Override - public void onCache(ShardId shardId, Accountable accountable) {} - }); + BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(indexSettings, BitsetFilterCache.Listener.NOOP); DirectoryReader limitedReader = new DocumentSubsetDirectoryReader( ElasticsearchDirectoryReader.wrap(directoryReader, new ShardId(indexSettings.getIndex(), 0)), bitsetFilterCache, @@ -790,13 +777,7 @@ public void testTermOnFilterWithMatchAll() throws IOException { try (DirectoryReader directoryReader = DirectoryReader.open(directory)) { final IndexSettings indexSettings = createIndexSettings(); - BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(indexSettings, new BitsetFilterCache.Listener() { - @Override - public void onRemoval(ShardId shardId, Accountable accountable) {} - - @Override - public void onCache(ShardId shardId, Accountable accountable) {} - }); + BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(indexSettings, BitsetFilterCache.Listener.NOOP); DirectoryReader limitedReader = new DocumentSubsetDirectoryReader( ElasticsearchDirectoryReader.wrap(directoryReader, new ShardId(indexSettings.getIndex(), 0)), bitsetFilterCache, diff --git a/server/src/test/java/org/elasticsearch/search/internal/ContextIndexSearcherTests.java b/server/src/test/java/org/elasticsearch/search/internal/ContextIndexSearcherTests.java index 9957d8c92b955..fe07cbf8efdfd 100644 --- a/server/src/test/java/org/elasticsearch/search/internal/ContextIndexSearcherTests.java +++ b/server/src/test/java/org/elasticsearch/search/internal/ContextIndexSearcherTests.java @@ -53,7 +53,6 @@ import org.apache.lucene.search.Weight; import org.apache.lucene.store.Directory; import org.apache.lucene.tests.index.RandomIndexWriter; -import org.apache.lucene.util.Accountable; import org.apache.lucene.util.BitSet; import org.apache.lucene.util.BitSetIterator; import org.apache.lucene.util.Bits; @@ -308,19 +307,8 @@ public void doTestContextIndexSearcher(boolean sparse, boolean deletions) throws w.deleteDocuments(new Term("delete", "yes")); IndexSettings settings = IndexSettingsModule.newIndexSettings("_index", Settings.EMPTY); - BitsetFilterCache.Listener listener = new BitsetFilterCache.Listener() { - @Override - public void onCache(ShardId shardId, Accountable accountable) { - - } - - @Override - public void onRemoval(ShardId shardId, Accountable accountable) { - - } - }; DirectoryReader reader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(w), new ShardId(settings.getIndex(), 0)); - BitsetFilterCache cache = new BitsetFilterCache(settings, listener); + BitsetFilterCache cache = new BitsetFilterCache(settings, BitsetFilterCache.Listener.NOOP); Query roleQuery = new TermQuery(new Term("allowed", "yes")); BitSet bitSet = cache.getBitSetProducer(roleQuery).getBitSet(reader.leaves().get(0)); if (sparse) { diff --git a/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixture.java b/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixture.java index 29123d6f2e7eb..421478a53e6bc 100644 --- a/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixture.java +++ b/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixture.java @@ -26,10 +26,10 @@ public class S3HttpFixture extends ExternalResource { private HttpServer server; - private boolean enabled; + private final boolean enabled; private final String bucket; private final String basePath; - protected final String accessKey; + protected volatile String accessKey; public S3HttpFixture() { this(true); @@ -98,4 +98,8 @@ private static InetSocketAddress resolveAddress(String address, int port) { throw new RuntimeException(e); } } + + public void setAccessKey(String accessKey) { + this.accessKey = accessKey; + } } diff --git a/test/framework/src/main/java/org/elasticsearch/index/MapperTestUtils.java b/test/framework/src/main/java/org/elasticsearch/index/MapperTestUtils.java index 2e8dc287a4c40..fe1b08d5e738d 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/MapperTestUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/index/MapperTestUtils.java @@ -9,7 +9,6 @@ package org.elasticsearch.index; -import org.apache.lucene.util.Accountable; import org.elasticsearch.TransportVersion; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.settings.Settings; @@ -20,7 +19,6 @@ import org.elasticsearch.index.mapper.MapperMetrics; import org.elasticsearch.index.mapper.MapperRegistry; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.script.ScriptCompiler; @@ -62,13 +60,7 @@ public static MapperService newMapperService( IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(indexName, finalSettings); IndexAnalyzers indexAnalyzers = createTestAnalysis(indexSettings, finalSettings).indexAnalyzers; SimilarityService similarityService = new SimilarityService(indexSettings, null, Collections.emptyMap()); - BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(indexSettings, new BitsetFilterCache.Listener() { - @Override - public void onCache(ShardId shardId, Accountable accountable) {} - - @Override - public void onRemoval(ShardId shardId, Accountable accountable) {} - }); + BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(indexSettings, BitsetFilterCache.Listener.NOOP); return new MapperService( () -> TransportVersion.current(), indexSettings, diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java index bf47efcad7b53..66d87f3532cbd 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java @@ -19,7 +19,6 @@ import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; import org.apache.lucene.tests.index.RandomIndexWriter; -import org.apache.lucene.util.Accountable; import org.elasticsearch.TransportVersion; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.Strings; @@ -283,13 +282,7 @@ public MapperService build() { getPlugins().stream().filter(p -> p instanceof MapperPlugin).map(p -> (MapperPlugin) p).collect(toList()) ).getMapperRegistry(); - BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(indexSettings, new BitsetFilterCache.Listener() { - @Override - public void onCache(ShardId shardId, Accountable accountable) {} - - @Override - public void onRemoval(ShardId shardId, Accountable accountable) {} - }); + BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(indexSettings, BitsetFilterCache.Listener.NOOP); var mapperService = new MapperService( () -> TransportVersion.current(), @@ -762,17 +755,11 @@ protected SearchExecutionContext createSearchExecutionContext(MapperService mapp IndexSettings indexSettings = new IndexSettings(indexMetadata, Settings.EMPTY); final SimilarityService similarityService = new SimilarityService(indexSettings, null, Map.of()); final long nowInMillis = randomNonNegativeLong(); - return new SearchExecutionContext(0, 0, indexSettings, new BitsetFilterCache(indexSettings, new BitsetFilterCache.Listener() { - @Override - public void onCache(ShardId shardId, Accountable accountable) { - - } - - @Override - public void onRemoval(ShardId shardId, Accountable accountable) { - - } - }), + return new SearchExecutionContext( + 0, + 0, + indexSettings, + new BitsetFilterCache(indexSettings, BitsetFilterCache.Listener.NOOP), (ft, fdc) -> ft.fielddataBuilder(fdc).build(new IndexFieldDataCache.None(), new NoneCircuitBreakerService()), mapperService, mapperService.mappingLookup(), diff --git a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java index 7cd2e6e1cc82e..b50fd4e96044c 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java @@ -48,7 +48,6 @@ import org.apache.lucene.tests.index.AssertingDirectoryReader; import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.tests.util.LuceneTestCase; -import org.apache.lucene.util.Accountable; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.NumericUtils; import org.apache.lucene.util.packed.PackedInts; @@ -366,13 +365,7 @@ private AggregationContext createAggregationContext( context.fielddataOperation() ) ).build(new IndexFieldDataCache.None(), breakerService); - BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(indexSettings, new BitsetFilterCache.Listener() { - @Override - public void onRemoval(ShardId shardId, Accountable accountable) {} - - @Override - public void onCache(ShardId shardId, Accountable accountable) {} - }); + BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(indexSettings, BitsetFilterCache.Listener.NOOP); SearchExecutionContext searchExecutionContext = new SearchExecutionContext( 0, -1, diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java index ef6600032ca1b..bdf323afb8d96 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java @@ -13,7 +13,6 @@ import com.carrotsearch.randomizedtesting.SeedUtils; import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.util.Accountable; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.MockResolvedIndices; import org.elasticsearch.action.OriginalIndices; @@ -58,7 +57,6 @@ import org.elasticsearch.index.query.QueryRewriteContext; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.index.shard.IndexLongFieldRange; -import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardLongFieldRange; import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.indices.DateFieldRangeInfo; @@ -486,13 +484,7 @@ private static class ServiceHolder implements Closeable { IndexAnalyzers indexAnalyzers = analysisModule.getAnalysisRegistry().build(IndexCreationContext.CREATE_INDEX, idxSettings); scriptService = new MockScriptService(Settings.EMPTY, scriptModule.engines, scriptModule.contexts); similarityService = new SimilarityService(idxSettings, null, Collections.emptyMap()); - this.bitsetFilterCache = new BitsetFilterCache(idxSettings, new BitsetFilterCache.Listener() { - @Override - public void onCache(ShardId shardId, Accountable accountable) {} - - @Override - public void onRemoval(ShardId shardId, Accountable accountable) {} - }); + this.bitsetFilterCache = new BitsetFilterCache(idxSettings, BitsetFilterCache.Listener.NOOP); MapperRegistry mapperRegistry = indicesModule.getMapperRegistry(); mapperService = new MapperService( clusterService, diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/BlobCacheMetrics.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/BlobCacheMetrics.java index a253b6bdd2360..0fb4267745cb8 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/BlobCacheMetrics.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/BlobCacheMetrics.java @@ -115,24 +115,16 @@ public LongHistogram getCacheMissLoadTimes() { * * @param bytesCopied The number of bytes copied * @param copyTimeNanos The time taken to copy the bytes in nanoseconds - * @param index The index being loaded - * @param shardId The ID of the shard being loaded * @param cachePopulationReason The reason for the cache being populated * @param cachePopulationSource The source from which the data is being loaded */ public void recordCachePopulationMetrics( int bytesCopied, long copyTimeNanos, - String index, - int shardId, CachePopulationReason cachePopulationReason, CachePopulationSource cachePopulationSource ) { Map metricAttributes = Map.of( - INDEX_ATTRIBUTE_KEY, - index, - SHARD_ID_ATTRIBUTE_KEY, - shardId, CACHE_POPULATION_REASON_ATTRIBUTE_KEY, cachePopulationReason.name(), CACHE_POPULATION_SOURCE_ATTRIBUTE_KEY, diff --git a/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/BlobCacheMetricsTests.java b/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/BlobCacheMetricsTests.java index ea9d0b7356f0e..435798ba93a8b 100644 --- a/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/BlobCacheMetricsTests.java +++ b/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/BlobCacheMetricsTests.java @@ -30,15 +30,11 @@ public void createMetrics() { public void testRecordCachePopulationMetricsRecordsThroughput() { int mebiBytesSent = randomIntBetween(1, 4); int secondsTaken = randomIntBetween(1, 5); - String indexName = randomIdentifier(); - int shardId = randomIntBetween(0, 10); BlobCacheMetrics.CachePopulationReason cachePopulationReason = randomFrom(BlobCacheMetrics.CachePopulationReason.values()); CachePopulationSource cachePopulationSource = randomFrom(CachePopulationSource.values()); metrics.recordCachePopulationMetrics( Math.toIntExact(ByteSizeValue.ofMb(mebiBytesSent).getBytes()), TimeUnit.SECONDS.toNanos(secondsTaken), - indexName, - shardId, cachePopulationReason, cachePopulationSource ); @@ -48,32 +44,28 @@ public void testRecordCachePopulationMetricsRecordsThroughput() { .getMeasurements(InstrumentType.DOUBLE_HISTOGRAM, "es.blob_cache.population.throughput.histogram") .get(0); assertEquals(throughputMeasurement.getDouble(), (double) mebiBytesSent / secondsTaken, 0.0); - assertExpectedAttributesPresent(throughputMeasurement, shardId, indexName, cachePopulationReason, cachePopulationSource); + assertExpectedAttributesPresent(throughputMeasurement, cachePopulationReason, cachePopulationSource); // bytes counter Measurement totalBytesMeasurement = recordingMeterRegistry.getRecorder() .getMeasurements(InstrumentType.LONG_COUNTER, "es.blob_cache.population.bytes.total") .get(0); assertEquals(totalBytesMeasurement.getLong(), ByteSizeValue.ofMb(mebiBytesSent).getBytes()); - assertExpectedAttributesPresent(totalBytesMeasurement, shardId, indexName, cachePopulationReason, cachePopulationSource); + assertExpectedAttributesPresent(totalBytesMeasurement, cachePopulationReason, cachePopulationSource); // time counter Measurement totalTimeMeasurement = recordingMeterRegistry.getRecorder() .getMeasurements(InstrumentType.LONG_COUNTER, "es.blob_cache.population.time.total") .get(0); assertEquals(totalTimeMeasurement.getLong(), TimeUnit.SECONDS.toMillis(secondsTaken)); - assertExpectedAttributesPresent(totalTimeMeasurement, shardId, indexName, cachePopulationReason, cachePopulationSource); + assertExpectedAttributesPresent(totalTimeMeasurement, cachePopulationReason, cachePopulationSource); } private static void assertExpectedAttributesPresent( Measurement measurement, - int shardId, - String indexName, BlobCacheMetrics.CachePopulationReason cachePopulationReason, CachePopulationSource cachePopulationSource ) { - assertEquals(measurement.attributes().get(BlobCacheMetrics.SHARD_ID_ATTRIBUTE_KEY), shardId); - assertEquals(measurement.attributes().get(BlobCacheMetrics.INDEX_ATTRIBUTE_KEY), indexName); assertEquals(measurement.attributes().get(BlobCacheMetrics.CACHE_POPULATION_REASON_ATTRIBUTE_KEY), cachePopulationReason.name()); assertEquals(measurement.attributes().get(BlobCacheMetrics.CACHE_POPULATION_SOURCE_ATTRIBUTE_KEY), cachePopulationSource.name()); } diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java index c3567da5926b8..e980b1509813e 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java @@ -29,7 +29,6 @@ import java.util.function.Function; import static java.util.stream.Collectors.toMap; -import static java.util.stream.Collectors.toUnmodifiableMap; import static org.elasticsearch.xpack.esql.core.util.PlanStreamInput.readCachedStringWithVersionCheck; import static org.elasticsearch.xpack.esql.core.util.PlanStreamOutput.writeCachedStringWithVersionCheck; @@ -276,7 +275,7 @@ public enum DataType { private static final Collection STRING_TYPES = DataType.types().stream().filter(DataType::isString).toList(); - private static final Map NAME_TO_TYPE = TYPES.stream().collect(toUnmodifiableMap(DataType::typeName, t -> t)); + private static final Map NAME_TO_TYPE; private static final Map ES_TO_TYPE; @@ -287,6 +286,10 @@ public enum DataType { map.put("point", DataType.CARTESIAN_POINT); map.put("shape", DataType.CARTESIAN_SHAPE); ES_TO_TYPE = Collections.unmodifiableMap(map); + // DATETIME has different esType and typeName, add an entry in NAME_TO_TYPE with date as key + map = TYPES.stream().collect(toMap(DataType::typeName, t -> t)); + map.put("date", DataType.DATETIME); + NAME_TO_TYPE = Collections.unmodifiableMap(map); } private static final Map NAME_OR_ALIAS_TO_TYPE; diff --git a/x-pack/plugin/esql/qa/server/multi-clusters/build.gradle b/x-pack/plugin/esql/qa/server/multi-clusters/build.gradle index aa19371685ce1..77497597a18c6 100644 --- a/x-pack/plugin/esql/qa/server/multi-clusters/build.gradle +++ b/x-pack/plugin/esql/qa/server/multi-clusters/build.gradle @@ -15,6 +15,7 @@ apply plugin: 'elasticsearch.bwc-test' dependencies { javaRestTestImplementation project(xpackModule('esql:qa:testFixtures')) javaRestTestImplementation project(xpackModule('esql:qa:server')) + javaRestTestImplementation project(xpackModule('esql')) } def supportedVersion = bwcVersion -> { diff --git a/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java index 62391c8ca001a..60eecbb7658b7 100644 --- a/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java +++ b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java @@ -45,6 +45,10 @@ import static org.elasticsearch.xpack.esql.CsvTestUtils.isEnabled; import static org.elasticsearch.xpack.esql.CsvTestsDataLoader.ENRICH_SOURCE_INDICES; import static org.elasticsearch.xpack.esql.EsqlTestUtils.classpathResources; +import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.INLINESTATS; +import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.INLINESTATS_V2; +import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.JOIN_PLANNING_V1; +import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.METADATA_FIELDS_REMOTE_TEST; import static org.elasticsearch.xpack.esql.qa.rest.EsqlSpecTestCase.Mode.SYNC; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doAnswer; @@ -101,16 +105,25 @@ public MultiClusterSpecIT( @Override protected void shouldSkipTest(String testName) throws IOException { + boolean remoteMetadata = testCase.requiredCapabilities.contains(METADATA_FIELDS_REMOTE_TEST.capabilityName()); + if (remoteMetadata) { + // remove the capability from the test to enable it + testCase.requiredCapabilities = testCase.requiredCapabilities.stream() + .filter(c -> c.equals("metadata_fields_remote_test") == false) + .toList(); + } super.shouldSkipTest(testName); checkCapabilities(remoteClusterClient(), remoteFeaturesService(), testName, testCase); - assumeFalse("can't test with _index metadata", hasIndexMetadata(testCase.query)); + // Do not run tests including "METADATA _index" unless marked with metadata_fields_remote_test, + // because they may produce inconsistent results with multiple clusters. + assumeFalse("can't test with _index metadata", (remoteMetadata == false) && hasIndexMetadata(testCase.query)); assumeTrue( "Test " + testName + " is skipped on " + Clusters.oldVersion(), isEnabled(testName, instructions, Clusters.oldVersion()) ); - assumeFalse("INLINESTATS not yet supported in CCS", testCase.requiredCapabilities.contains("inlinestats")); - assumeFalse("INLINESTATS not yet supported in CCS", testCase.requiredCapabilities.contains("inlinestats_v2")); - assumeFalse("INLINESTATS not yet supported in CCS", testCase.requiredCapabilities.contains("join_planning_v1")); + assumeFalse("INLINESTATS not yet supported in CCS", testCase.requiredCapabilities.contains(INLINESTATS.capabilityName())); + assumeFalse("INLINESTATS not yet supported in CCS", testCase.requiredCapabilities.contains(INLINESTATS_V2.capabilityName())); + assumeFalse("INLINESTATS not yet supported in CCS", testCase.requiredCapabilities.contains(JOIN_PLANNING_V1.capabilityName())); } private TestFeatureService remoteFeaturesService() throws IOException { @@ -151,6 +164,9 @@ protected RestClient buildClient(Settings settings, HttpHost[] localHosts) throw return twoClients(localClient, remoteClient); } + // These indices are used in metadata tests so we want them on remote only for consistency + public static final List METADATA_INDICES = List.of("employees", "apps", "ul_logs"); + /** * Creates a new mock client that dispatches every request to both the local and remote clusters, excluding _bulk and _query requests. * - '_bulk' requests are randomly sent to either the local or remote cluster to populate data. Some spec tests, such as AVG, @@ -166,6 +182,8 @@ static RestClient twoClients(RestClient localClient, RestClient remoteClient) th String endpoint = request.getEndpoint(); if (endpoint.startsWith("/_query")) { return localClient.performRequest(request); + } else if (endpoint.endsWith("/_bulk") && METADATA_INDICES.stream().anyMatch(i -> endpoint.equals("/" + i + "/_bulk"))) { + return remoteClient.performRequest(request); } else if (endpoint.endsWith("/_bulk") && ENRICH_SOURCE_INDICES.stream().noneMatch(i -> endpoint.equals("/" + i + "/_bulk"))) { return bulkClient.performRequest(request); } else { @@ -203,6 +221,9 @@ static Request[] cloneRequests(Request orig, int numClones) throws IOException { return clones; } + /** + * Convert FROM employees ... => FROM *:employees,employees + */ static CsvSpecReader.CsvTestCase convertToRemoteIndices(CsvSpecReader.CsvTestCase testCase) { String query = testCase.query; String[] commands = query.split("\\|"); diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/bucket.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/bucket.csv-spec index b8569ead94509..3be3decaf351c 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/bucket.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/bucket.csv-spec @@ -716,3 +716,47 @@ FROM employees 2 |1985-10-01T00:00:00.000Z 4 |1985-11-01T00:00:00.000Z ; + +bucketByWeekInString +required_capability: implicit_casting_string_literal_to_temporal_amount +FROM employees +| WHERE hire_date >= "1985-01-01T00:00:00Z" AND hire_date < "1986-01-01T00:00:00Z" +| STATS hires_per_week = COUNT(*) BY week = BUCKET(hire_date, "1 week") +| SORT week +; + + hires_per_week:long | week:date +2 |1985-02-18T00:00:00.000Z +1 |1985-05-13T00:00:00.000Z +1 |1985-07-08T00:00:00.000Z +1 |1985-09-16T00:00:00.000Z +2 |1985-10-14T00:00:00.000Z +4 |1985-11-18T00:00:00.000Z +; + +bucketByMinuteInString +required_capability: implicit_casting_string_literal_to_temporal_amount + +FROM sample_data +| STATS min = min(@timestamp), max = MAX(@timestamp) BY bucket = BUCKET(@timestamp, "30 minutes") +| SORT min +; + + min:date | max:date | bucket:date +2023-10-23T12:15:03.360Z|2023-10-23T12:27:28.948Z|2023-10-23T12:00:00.000Z +2023-10-23T13:33:34.937Z|2023-10-23T13:55:01.543Z|2023-10-23T13:30:00.000Z +; + +bucketByMonthInString +required_capability: implicit_casting_string_literal_to_temporal_amount + +FROM sample_data +| EVAL adjusted = CASE(TO_LONG(@timestamp) % 2 == 0, @timestamp + 1 month, @timestamp + 2 years) +| STATS c = COUNT(*) BY b = BUCKET(adjusted, "1 month") +| SORT c +; + +c:long |b:date +3 |2025-10-01T00:00:00.000Z +4 |2023-11-01T00:00:00.000Z +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec index 237c6a9af197f..7e7c561fac3a5 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec @@ -1286,3 +1286,108 @@ ROW a = GREATEST(TO_DATETIME("1957-05-23T00:00:00Z"), TO_DATETIME("1958-02-19T00 a:datetime 1958-02-19T00:00:00 ; + +evalDateTruncMonthInString +required_capability: implicit_casting_string_literal_to_temporal_amount + +FROM employees +| SORT hire_date +| EVAL x = date_trunc("1 month", hire_date) +| KEEP emp_no, hire_date, x +| LIMIT 5; + +emp_no:integer | hire_date:date | x:date +10009 | 1985-02-18T00:00:00.000Z | 1985-02-01T00:00:00.000Z +10048 | 1985-02-24T00:00:00.000Z | 1985-02-01T00:00:00.000Z +10098 | 1985-05-13T00:00:00.000Z | 1985-05-01T00:00:00.000Z +10076 | 1985-07-09T00:00:00.000Z | 1985-07-01T00:00:00.000Z +10061 | 1985-09-17T00:00:00.000Z | 1985-09-01T00:00:00.000Z +; + +evalDateTruncHourInString +required_capability: implicit_casting_string_literal_to_temporal_amount + +FROM employees +| SORT hire_date +| EVAL x = date_trunc("240 hours", hire_date) +| KEEP emp_no, hire_date, x +| LIMIT 5; + +emp_no:integer | hire_date:date | x:date +10009 | 1985-02-18T00:00:00.000Z | 1985-02-11T00:00:00.000Z +10048 | 1985-02-24T00:00:00.000Z | 1985-02-21T00:00:00.000Z +10098 | 1985-05-13T00:00:00.000Z | 1985-05-12T00:00:00.000Z +10076 | 1985-07-09T00:00:00.000Z | 1985-07-01T00:00:00.000Z +10061 | 1985-09-17T00:00:00.000Z | 1985-09-09T00:00:00.000Z +; + +evalDateTruncDayInString +required_capability: implicit_casting_string_literal_to_temporal_amount + +FROM sample_data +| SORT @timestamp ASC +| EVAL t = DATE_TRUNC("1 day", @timestamp) +| KEEP t; + +t:date +2023-10-23T00:00:00 +2023-10-23T00:00:00 +2023-10-23T00:00:00 +2023-10-23T00:00:00 +2023-10-23T00:00:00 +2023-10-23T00:00:00 +2023-10-23T00:00:00 +; + +evalDateTruncMinuteInString +required_capability: implicit_casting_string_literal_to_temporal_amount + +FROM sample_data +| SORT @timestamp ASC +| EVAL t = DATE_TRUNC("1 minute", @timestamp) +| KEEP t; + +t:date +2023-10-23T12:15:00 +2023-10-23T12:27:00 +2023-10-23T13:33:00 +2023-10-23T13:51:00 +2023-10-23T13:52:00 +2023-10-23T13:53:00 +2023-10-23T13:55:00 +; + +evalDateTruncDayInStringNull +required_capability: implicit_casting_string_literal_to_temporal_amount + +FROM employees +| WHERE emp_no == 10040 +| EVAL x = date_trunc("1 day", birth_date) +| KEEP emp_no, birth_date, x; + +emp_no:integer | birth_date:date | x:date +10040 | null | null +; + +evalDateTruncYearInString +required_capability: implicit_casting_string_literal_to_temporal_amount + +ROW a = 1 +| EVAL year_hired = DATE_TRUNC("1 year", "1991-06-26T00:00:00.000Z") +; + +a:integer | year_hired:date +1 | 1991-01-01T00:00:00.000Z +; + +filteringWithTemporalAmountInString +required_capability: implicit_casting_string_literal_to_temporal_amount + +FROM employees +| SORT emp_no +| WHERE birth_date < "2024-01-01" - 70 years +| STATS cnt = count(*); + +cnt:long +19 +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/metadata-remote.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/metadata-remote.csv-spec new file mode 100644 index 0000000000000..4d7ee9b1b5af6 --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/metadata-remote.csv-spec @@ -0,0 +1,151 @@ +simpleKeep +required_capability: metadata_fields +required_capability: metadata_fields_remote_test +from employees metadata _index, _version | sort _index desc, emp_no | limit 2 | keep emp_no, _index, _version; + +emp_no:integer |_index:keyword |_version:long +10001 |remote_cluster:employees |1 +10002 |remote_cluster:employees |1 +; + +aliasWithSameName +required_capability: metadata_fields +required_capability: metadata_fields_remote_test +from employees metadata _index, _version | sort _index desc, emp_no | limit 2 | eval _index = _index, _version = _version | keep emp_no, _index, _version; + +emp_no:integer |_index:keyword |_version:long +10001 |remote_cluster:employees |1 +10002 |remote_cluster:employees |1 +; + +inComparison +required_capability: metadata_fields +required_capability: metadata_fields_remote_test +from employees metadata _index, _version | sort emp_no | where _index == "remote_cluster:employees" | where _version == 1 | keep emp_no | limit 2; + +emp_no:integer +10001 +10002 +; + +metaIndexInAggs +required_capability: metadata_fields +required_capability: metadata_fields_remote_test +FROM employees METADATA _index, _id +| STATS max = MAX(emp_no) BY _index | SORT _index; + +max:integer |_index:keyword +10100 |remote_cluster:employees +; + +metaIndexAliasedInAggs +required_capability: metadata_fields +required_capability: metadata_fields_remote_test +from employees metadata _index | eval _i = _index | stats max = max(emp_no) by _i | SORT _i; + +max:integer |_i:keyword +10100 |remote_cluster:employees +; + +metaVersionInAggs +required_capability: metadata_fields +required_capability: metadata_fields_remote_test +from employees metadata _version | stats min = min(emp_no) by _version; + +min:integer |_version:long +10001 |1 +; + +metaVersionAliasedInAggs +required_capability: metadata_fields +required_capability: metadata_fields_remote_test +from employees metadata _version | eval _v = _version | stats min = min(emp_no) by _v; + +min:integer |_v:long +10001 |1 +; + +inAggsAndAsGroups +required_capability: metadata_fields +required_capability: metadata_fields_remote_test +from employees metadata _index, _version | stats max = max(_version) by _index | SORT _index; + +max:long |_index:keyword +1 |remote_cluster:employees +; + +inAggsAndAsGroupsAliased +required_capability: metadata_fields +required_capability: metadata_fields_remote_test +from employees metadata _index, _version | eval _i = _index, _v = _version | stats max = max(_v) by _i | SORT _i; + +max:long |_i:keyword +1 |remote_cluster:employees +; + +inFunction +required_capability: metadata_fields +required_capability: metadata_fields_remote_test +from employees metadata _index, _version | sort emp_no | where length(_index) == length("remote_cluster:employees") | where abs(_version) == 1 | keep emp_no | limit 2; + +emp_no:integer +10001 +10002 +; + +inArithmetics +required_capability: metadata_fields +required_capability: metadata_fields_remote_test +from employees metadata _index, _version | eval i = _version + 2 | stats min = min(emp_no) by i; + +min:integer |i:long +10001 |3 +; + +inSort +required_capability: metadata_fields +required_capability: metadata_fields_remote_test +from employees metadata _index, _version | sort _version, _index desc, emp_no | keep emp_no, _version, _index | limit 2; + +emp_no:integer |_version:long |_index:keyword +10001 |1 |remote_cluster:employees +10002 |1 |remote_cluster:employees +; + +withMvFunction +required_capability: metadata_fields +required_capability: metadata_fields_remote_test +from employees metadata _version | eval i = mv_avg(_version) + 2 | stats min = min(emp_no) by i; + +min:integer |i:double +10001 |3.0 +; + +overwritten +required_capability: metadata_fields +required_capability: metadata_fields_remote_test +from employees metadata _index, _version | sort emp_no | eval _index = 3, _version = "version" | keep emp_no, _index, _version | limit 3; + +emp_no:integer |_index:integer |_version:keyword +10001 |3 |version +10002 |3 |version +10003 |3 |version +; + +multipleIndices +required_capability: metadata_fields +required_capability: metadata_fields_remote_test +FROM ul_logs, apps METADATA _index, _version +| WHERE id IN (13, 14) AND _version == 1 +| EVAL key = CONCAT(_index, "_", TO_STR(id)) +| SORT id, _index +| KEEP id, _index, _version, key +; + + id:long |_index:keyword |_version:long |key:keyword +13 |remote_cluster:apps |1 |remote_cluster:apps_13 +13 |remote_cluster:ul_logs |1 |remote_cluster:ul_logs_13 +14 |remote_cluster:apps |1 |remote_cluster:apps_14 +14 |remote_cluster:ul_logs |1 |remote_cluster:ul_logs_14 + +; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index 1c1929d51a14a..9532e3dc77cb4 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -481,12 +481,20 @@ public enum Cap { ADD_LIMIT_INSIDE_MV_EXPAND, DELAY_DEBUG_FN(Build.current().isSnapshot()), + + /** Capability for remote metadata test */ + METADATA_FIELDS_REMOTE_TEST(false), /** * WIP on Join planning * - Introduce BinaryPlan and co * - Refactor INLINESTATS and LOOKUP as a JOIN block */ - JOIN_PLANNING_V1(Build.current().isSnapshot()); + JOIN_PLANNING_V1(Build.current().isSnapshot()), + + /** + * Support implicit casting from string literal to DATE_PERIOD or TIME_DURATION. + */ + IMPLICIT_CASTING_STRING_LITERAL_TO_TEMPORAL_AMOUNT; private final boolean enabled; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java index 9c173795d0ab1..562d42a94483f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java @@ -8,7 +8,6 @@ package org.elasticsearch.xpack.esql.analysis; import org.elasticsearch.common.logging.HeaderWarning; -import org.elasticsearch.common.logging.LoggerMessageFormat; import org.elasticsearch.compute.data.Block; import org.elasticsearch.logging.Logger; import org.elasticsearch.xpack.core.enrich.EnrichPolicy; @@ -31,7 +30,6 @@ import org.elasticsearch.xpack.esql.core.expression.ReferenceAttribute; import org.elasticsearch.xpack.esql.core.expression.UnresolvedAttribute; import org.elasticsearch.xpack.esql.core.expression.UnresolvedStar; -import org.elasticsearch.xpack.esql.core.expression.function.scalar.ScalarFunction; import org.elasticsearch.xpack.esql.core.expression.predicate.BinaryOperator; import org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison.BinaryComparison; import org.elasticsearch.xpack.esql.core.tree.Source; @@ -49,6 +47,7 @@ import org.elasticsearch.xpack.esql.expression.function.FunctionDefinition; import org.elasticsearch.xpack.esql.expression.function.UnresolvedFunction; import org.elasticsearch.xpack.esql.expression.function.UnsupportedAttribute; +import org.elasticsearch.xpack.esql.expression.function.grouping.GroupingFunction; import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.AbstractConvertFunction; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.FoldablesConvertFunction; @@ -61,6 +60,7 @@ import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.EsqlArithmeticOperation; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.In; import org.elasticsearch.xpack.esql.index.EsIndex; +import org.elasticsearch.xpack.esql.parser.ParsingException; import org.elasticsearch.xpack.esql.plan.TableIdentifier; import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.Drop; @@ -86,6 +86,8 @@ import org.elasticsearch.xpack.esql.stats.FeatureMetric; import org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter; +import java.time.Duration; +import java.time.temporal.TemporalAmount; import java.util.ArrayList; import java.util.Arrays; import java.util.BitSet; @@ -107,6 +109,7 @@ import static org.elasticsearch.xpack.core.enrich.EnrichPolicy.GEO_MATCH_TYPE; import static org.elasticsearch.xpack.esql.core.type.DataType.BOOLEAN; import static org.elasticsearch.xpack.esql.core.type.DataType.DATETIME; +import static org.elasticsearch.xpack.esql.core.type.DataType.DATE_PERIOD; import static org.elasticsearch.xpack.esql.core.type.DataType.DOUBLE; import static org.elasticsearch.xpack.esql.core.type.DataType.FLOAT; import static org.elasticsearch.xpack.esql.core.type.DataType.GEO_POINT; @@ -116,9 +119,11 @@ import static org.elasticsearch.xpack.esql.core.type.DataType.KEYWORD; import static org.elasticsearch.xpack.esql.core.type.DataType.LONG; import static org.elasticsearch.xpack.esql.core.type.DataType.TEXT; +import static org.elasticsearch.xpack.esql.core.type.DataType.TIME_DURATION; import static org.elasticsearch.xpack.esql.core.type.DataType.VERSION; import static org.elasticsearch.xpack.esql.core.type.DataType.isTemporalAmount; import static org.elasticsearch.xpack.esql.stats.FeatureMetric.LIMIT; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.maybeParseTemporalAmount; /** * This class is part of the planner. Resolves references (such as variable and index names) and performs implicit casting. @@ -142,9 +147,14 @@ public class Analyzer extends ParameterizedRuleExecutor( "Resolution", + /* + * ImplicitCasting must be before ResolveRefs. Because a reference is created for a Bucket in Aggregate's aggregates, + * resolving this reference before implicit casting may cause this reference to have customMessage=true, it prevents further + * attempts to resolve this reference. + */ + new ImplicitCasting(), new ResolveRefs(), - new ResolveUnionTypes(), // Must be after ResolveRefs, so union types can be found - new ImplicitCasting() + new ResolveUnionTypes() // Must be after ResolveRefs, so union types can be found ); var finish = new Batch<>("Finish Analysis", Limiter.ONCE, new AddImplicitLimit(), new UnionTypesCleanup()); rules = List.of(init, resolution, finish); @@ -952,13 +962,15 @@ private BitSet gatherPreAnalysisMetrics(LogicalPlan plan, BitSet b) { } /** - * Cast string literals in ScalarFunction, EsqlArithmeticOperation, BinaryComparison and In to desired data types. + * Cast string literals in ScalarFunction, EsqlArithmeticOperation, BinaryComparison, In and GroupingFunction to desired data types. * For example, the string literals in the following expressions will be cast implicitly to the field data type on the left hand side. * date > "2024-08-21" * date in ("2024-08-21", "2024-08-22", "2024-08-23") * date = "2024-08-21" + 3 days * ip == "127.0.0.1" * version != "1.0" + * bucket(dateField, "1 month") + * date_trunc("1 minute", dateField) * * If the inputs to Coalesce are mixed numeric types, cast the rest of the numeric field or value to the first numeric data type if * applicable. For example, implicit casting converts: @@ -972,15 +984,18 @@ private BitSet gatherPreAnalysisMetrics(LogicalPlan plan, BitSet b) { private static class ImplicitCasting extends ParameterizedRule { @Override public LogicalPlan apply(LogicalPlan plan, AnalyzerContext context) { - return plan.transformExpressionsUp(ScalarFunction.class, e -> ImplicitCasting.cast(e, context.functionRegistry())); + return plan.transformExpressionsUp( + org.elasticsearch.xpack.esql.core.expression.function.Function.class, + e -> ImplicitCasting.cast(e, context.functionRegistry()) + ); } - private static Expression cast(ScalarFunction f, EsqlFunctionRegistry registry) { + private static Expression cast(org.elasticsearch.xpack.esql.core.expression.function.Function f, EsqlFunctionRegistry registry) { if (f instanceof In in) { return processIn(in); } - if (f instanceof EsqlScalarFunction esf) { - return processScalarFunction(esf, registry); + if (f instanceof EsqlScalarFunction || f instanceof GroupingFunction) { // exclude AggregateFunction until it is needed + return processScalarOrGroupingFunction(f, registry); } if (f instanceof EsqlArithmeticOperation || f instanceof BinaryComparison) { return processBinaryOperator((BinaryOperator) f); @@ -988,7 +1003,10 @@ private static Expression cast(ScalarFunction f, EsqlFunctionRegistry registry) return f; } - private static Expression processScalarFunction(EsqlScalarFunction f, EsqlFunctionRegistry registry) { + private static Expression processScalarOrGroupingFunction( + org.elasticsearch.xpack.esql.core.expression.function.Function f, + EsqlFunctionRegistry registry + ) { List args = f.arguments(); List targetDataTypes = registry.getDataTypeForStringLiteralConversion(f.getClass()); if (targetDataTypes == null || targetDataTypes.isEmpty()) { @@ -1011,9 +1029,11 @@ private static Expression processScalarFunction(EsqlScalarFunction f, EsqlFuncti } if (targetDataType != DataType.NULL && targetDataType != DataType.UNSUPPORTED) { Expression e = castStringLiteral(arg, targetDataType); - childrenChanged = true; - newChildren.add(e); - continue; + if (e != arg) { + childrenChanged = true; + newChildren.add(e); + continue; + } } } } else if (dataType.isNumeric() && canCastMixedNumericTypes(f) && castNumericArgs) { @@ -1095,7 +1115,7 @@ private static Expression processIn(In in) { return childrenChanged ? in.replaceChildren(newChildren) : in; } - private static boolean canCastMixedNumericTypes(EsqlScalarFunction f) { + private static boolean canCastMixedNumericTypes(org.elasticsearch.xpack.esql.core.expression.function.Function f) { return f instanceof Coalesce; } @@ -1142,19 +1162,37 @@ private static boolean supportsStringImplicitCasting(DataType type) { return type == DATETIME || type == IP || type == VERSION || type == BOOLEAN; } - public static Expression castStringLiteral(Expression from, DataType target) { + private static UnresolvedAttribute unresolvedAttribute(Expression value, String type, Exception e) { + String message = format( + "Cannot convert string [{}] to [{}], error [{}]", + value.fold(), + type, + (e instanceof ParsingException pe) ? pe.getErrorMessage() : e.getMessage() + ); + return new UnresolvedAttribute(value.source(), String.valueOf(value.fold()), message); + } + + private static Expression castStringLiteralToTemporalAmount(Expression from) { + try { + TemporalAmount result = maybeParseTemporalAmount(from.fold().toString().strip()); + if (result == null) { + return from; + } + DataType target = result instanceof Duration ? TIME_DURATION : DATE_PERIOD; + return new Literal(from.source(), result, target); + } catch (Exception e) { + return unresolvedAttribute(from, DATE_PERIOD + " or " + TIME_DURATION, e); + } + } + + private static Expression castStringLiteral(Expression from, DataType target) { assert from.foldable(); try { - Object to = EsqlDataTypeConverter.convert(from.fold(), target); - return new Literal(from.source(), to, target); + return isTemporalAmount(target) + ? castStringLiteralToTemporalAmount(from) + : new Literal(from.source(), EsqlDataTypeConverter.convert(from.fold(), target), target); } catch (Exception e) { - String message = LoggerMessageFormat.format( - "Cannot convert string [{}] to [{}], error [{}]", - from.fold(), - target, - e.getMessage() - ); - return new UnresolvedAttribute(from.source(), String.valueOf(from.fold()), message); + return unresolvedAttribute(from, target.toString(), e); } } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java index 632f52d163349..7be07a7659f66 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java @@ -688,7 +688,7 @@ private static void checkFullTextQueryFunctions(LogicalPlan plan, Set f plan, condition, Match.class, - lp -> (lp instanceof Limit == false), + lp -> (lp instanceof Limit == false) && (lp instanceof Aggregate == false), m -> "[" + m.functionName() + "] " + m.functionType(), failures ); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java index d1aef0e46caca..ca02441d2e1ad 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java @@ -160,27 +160,30 @@ import static org.elasticsearch.xpack.esql.core.type.DataType.CARTESIAN_POINT; import static org.elasticsearch.xpack.esql.core.type.DataType.CARTESIAN_SHAPE; import static org.elasticsearch.xpack.esql.core.type.DataType.DATETIME; +import static org.elasticsearch.xpack.esql.core.type.DataType.DATE_PERIOD; import static org.elasticsearch.xpack.esql.core.type.DataType.DOUBLE; import static org.elasticsearch.xpack.esql.core.type.DataType.GEO_POINT; import static org.elasticsearch.xpack.esql.core.type.DataType.GEO_SHAPE; import static org.elasticsearch.xpack.esql.core.type.DataType.INTEGER; import static org.elasticsearch.xpack.esql.core.type.DataType.IP; -import static org.elasticsearch.xpack.esql.core.type.DataType.KEYWORD; import static org.elasticsearch.xpack.esql.core.type.DataType.LONG; -import static org.elasticsearch.xpack.esql.core.type.DataType.TEXT; +import static org.elasticsearch.xpack.esql.core.type.DataType.TIME_DURATION; import static org.elasticsearch.xpack.esql.core.type.DataType.UNSIGNED_LONG; import static org.elasticsearch.xpack.esql.core.type.DataType.UNSUPPORTED; import static org.elasticsearch.xpack.esql.core.type.DataType.VERSION; +import static org.elasticsearch.xpack.esql.core.type.DataType.isString; public class EsqlFunctionRegistry { - private static final Map, List> dataTypesForStringLiteralConversion = new LinkedHashMap<>(); + private static final Map, List> DATA_TYPES_FOR_STRING_LITERAL_CONVERSIONS = new LinkedHashMap<>(); - private static final Map dataTypeCastingPriority; + private static final Map DATA_TYPE_CASTING_PRIORITY; static { List typePriorityList = Arrays.asList( DATETIME, + DATE_PERIOD, + TIME_DURATION, DOUBLE, LONG, INTEGER, @@ -194,9 +197,9 @@ public class EsqlFunctionRegistry { UNSIGNED_LONG, UNSUPPORTED ); - dataTypeCastingPriority = new HashMap<>(); + DATA_TYPE_CASTING_PRIORITY = new HashMap<>(); for (int i = 0; i < typePriorityList.size(); i++) { - dataTypeCastingPriority.put(typePriorityList.get(i), i); + DATA_TYPE_CASTING_PRIORITY.put(typePriorityList.get(i), i); } } @@ -257,7 +260,7 @@ public Collection listFunctions(String pattern) { .collect(toList()); } - private FunctionDefinition[][] functions() { + private static FunctionDefinition[][] functions() { return new FunctionDefinition[][] { // grouping functions new FunctionDefinition[] { def(Bucket.class, Bucket::new, "bucket", "bin"), }, @@ -437,6 +440,11 @@ public static String normalizeName(String name) { } public record ArgSignature(String name, String[] type, String description, boolean optional, DataType targetDataType) { + + public ArgSignature(String name, String[] type, String description, boolean optional) { + this(name, type, description, optional, UNSUPPORTED); + } + @Override public String toString() { return "ArgSignature{" @@ -477,17 +485,24 @@ public List argDescriptions() { } } - public static DataType getTargetType(String[] names) { + /** + * Build a list target data types, which is used by ImplicitCasting to convert string literals to a target data type. + */ + private static DataType getTargetType(String[] names) { List types = new ArrayList<>(); for (String name : names) { - types.add(DataType.fromEs(name)); - } - if (types.contains(KEYWORD) || types.contains(TEXT)) { - return UNSUPPORTED; + DataType type = DataType.fromTypeName(name); + if (type != null && type != UNSUPPORTED) { // A type should not be null or UNSUPPORTED, just a sanity check here + // If the function takes strings as input, there is no need to cast a string literal to it. + // Return UNSUPPORTED means that ImplicitCasting doesn't support this argument, and it will be skipped by ImplicitCasting. + if (isString(type)) { + return UNSUPPORTED; + } + types.add(type); + } } - return types.stream() - .min((dt1, dt2) -> dataTypeCastingPriority.get(dt1).compareTo(dataTypeCastingPriority.get(dt2))) + .min((dt1, dt2) -> DATA_TYPE_CASTING_PRIORITY.get(dt1).compareTo(DATA_TYPE_CASTING_PRIORITY.get(dt2))) .orElse(UNSUPPORTED); } @@ -559,7 +574,7 @@ private void buildDataTypesForStringLiteralConversion(FunctionDefinition[]... gr for (FunctionDefinition[] group : groupFunctions) { for (FunctionDefinition def : group) { FunctionDescription signature = description(def); - dataTypesForStringLiteralConversion.put( + DATA_TYPES_FOR_STRING_LITERAL_CONVERSIONS.put( def.clazz(), signature.args().stream().map(EsqlFunctionRegistry.ArgSignature::targetDataType).collect(Collectors.toList()) ); @@ -568,7 +583,7 @@ private void buildDataTypesForStringLiteralConversion(FunctionDefinition[]... gr } public List getDataTypeForStringLiteralConversion(Class clazz) { - return dataTypesForStringLiteralConversion.get(clazz); + return DATA_TYPES_FOR_STRING_LITERAL_CONVERSIONS.get(clazz); } private static class SnapshotFunctionRegistry extends EsqlFunctionRegistry { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java index c9c292769b570..4bfc9ac5d848f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java @@ -274,27 +274,11 @@ public static TemporalAmount parseTemporalAmount(Object val, DataType expectedTy return null; } StringBuilder value = new StringBuilder(); - StringBuilder qualifier = new StringBuilder(); - StringBuilder nextBuffer = value; - boolean lastWasSpace = false; - for (char c : str.trim().toCharArray()) { - if (c == ' ') { - if (lastWasSpace == false) { - nextBuffer = nextBuffer == value ? qualifier : null; - } - lastWasSpace = true; - continue; - } - if (nextBuffer == null) { - throw new ParsingException(Source.EMPTY, errorMessage, val, expectedType); - } - nextBuffer.append(c); - lastWasSpace = false; - } - - if ((value.isEmpty() || qualifier.isEmpty()) == false) { + StringBuilder temporalUnit = new StringBuilder(); + separateValueAndTemporalUnitForTemporalAmount(str.strip(), value, temporalUnit, errorMessage, expectedType.toString()); + if ((value.isEmpty() || temporalUnit.isEmpty()) == false) { try { - TemporalAmount result = parseTemporalAmount(Integer.parseInt(value.toString()), qualifier.toString(), Source.EMPTY); + TemporalAmount result = parseTemporalAmount(Integer.parseInt(value.toString()), temporalUnit.toString(), Source.EMPTY); if (DataType.DATE_PERIOD == expectedType && result instanceof Period || DataType.TIME_DURATION == expectedType && result instanceof Duration) { return result; @@ -312,6 +296,48 @@ public static TemporalAmount parseTemporalAmount(Object val, DataType expectedTy throw new ParsingException(Source.EMPTY, errorMessage, val, expectedType); } + public static TemporalAmount maybeParseTemporalAmount(String str) { + // The string literal can be either Date_Period or Time_Duration, derive the data type from its temporal unit + String errorMessage = "Cannot parse [{}] to {}"; + String expectedTypes = DATE_PERIOD + " or " + TIME_DURATION; + StringBuilder value = new StringBuilder(); + StringBuilder temporalUnit = new StringBuilder(); + separateValueAndTemporalUnitForTemporalAmount(str, value, temporalUnit, errorMessage, expectedTypes); + if ((value.isEmpty() || temporalUnit.isEmpty()) == false) { + try { + return parseTemporalAmount(Integer.parseInt(value.toString()), temporalUnit.toString(), Source.EMPTY); + } catch (NumberFormatException ex) { + throw new ParsingException(Source.EMPTY, errorMessage, str, expectedTypes); + } + } + return null; + } + + private static void separateValueAndTemporalUnitForTemporalAmount( + String temporalAmount, + StringBuilder value, + StringBuilder temporalUnit, + String errorMessage, + String expectedType + ) { + StringBuilder nextBuffer = value; + boolean lastWasSpace = false; + for (char c : temporalAmount.toCharArray()) { + if (c == ' ') { + if (lastWasSpace == false) { + nextBuffer = nextBuffer == value ? temporalUnit : null; + } + lastWasSpace = true; + continue; + } + if (nextBuffer == null) { + throw new ParsingException(Source.EMPTY, errorMessage, temporalAmount, expectedType); + } + nextBuffer.append(c); + lastWasSpace = false; + } + } + /** * Converts arbitrary object to the desired data type. *

@@ -394,10 +420,10 @@ public static DataType commonType(DataType left, DataType right) { } // generally supporting abbreviations from https://en.wikipedia.org/wiki/Unit_of_time - public static TemporalAmount parseTemporalAmount(Number value, String qualifier, Source source) throws InvalidArgumentException, + public static TemporalAmount parseTemporalAmount(Number value, String temporalUnit, Source source) throws InvalidArgumentException, ArithmeticException, ParsingException { try { - return switch (INTERVALS.valueOf(qualifier.toUpperCase(Locale.ROOT))) { + return switch (INTERVALS.valueOf(temporalUnit.toUpperCase(Locale.ROOT))) { case MILLISECOND, MILLISECONDS, MS -> Duration.ofMillis(safeToLong(value)); case SECOND, SECONDS, SEC, S -> Duration.ofSeconds(safeToLong(value)); case MINUTE, MINUTES, MIN -> Duration.ofMinutes(safeToLong(value)); @@ -410,7 +436,7 @@ public static TemporalAmount parseTemporalAmount(Number value, String qualifier, case YEAR, YEARS, YR, Y -> Period.ofYears(safeToInt(safeToLong(value))); }; } catch (IllegalArgumentException e) { - throw new ParsingException(source, "Unexpected time interval qualifier: '{}'", qualifier); + throw new ParsingException(source, "Unexpected temporal unit: '{}'", temporalUnit); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java index 0a34d6cd848bb..0e0c2de11fac3 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java @@ -1183,6 +1183,10 @@ public void testMatchFunctionNotAllowedAfterCommands() throws Exception { "1:24: [MATCH] function cannot be used after LIMIT", error("from test | limit 10 | where match(first_name, \"Anna\")") ); + assertEquals( + "1:47: [MATCH] function cannot be used after STATS", + error("from test | STATS c = AVG(salary) BY gender | where match(gender, \"F\")") + ); } public void testMatchFunctionAndOperatorHaveCorrectErrorMessages() throws Exception { @@ -1667,6 +1671,72 @@ public void testToDatePeriodToTimeDurationWithInvalidType() { ); } + public void testIntervalAsString() { + // DateTrunc + for (String interval : List.of("1 minu", "1 dy", "1.5 minutes", "0.5 days", "minutes 1", "day 5")) { + assertThat( + error("from types | EVAL x = date_trunc(\"" + interval + "\", \"1991-06-26T00:00:00.000Z\")"), + containsString("1:35: Cannot convert string [" + interval + "] to [DATE_PERIOD or TIME_DURATION]") + ); + assertThat( + error("from types | EVAL x = \"1991-06-26T00:00:00.000Z\", y = date_trunc(\"" + interval + "\", x::datetime)"), + containsString("1:67: Cannot convert string [" + interval + "] to [DATE_PERIOD or TIME_DURATION]") + ); + } + for (String interval : List.of("1", "0.5", "invalid")) { + assertThat( + error("from types | EVAL x = date_trunc(\"" + interval + "\", \"1991-06-26T00:00:00.000Z\")"), + containsString( + "1:24: first argument of [date_trunc(\"" + + interval + + "\", \"1991-06-26T00:00:00.000Z\")] must be [dateperiod or timeduration], found value [\"" + + interval + + "\"] type [keyword]" + ) + ); + assertThat( + error("from types | EVAL x = \"1991-06-26T00:00:00.000Z\", y = date_trunc(\"" + interval + "\", x::datetime)"), + containsString( + "1:56: first argument of [date_trunc(\"" + + interval + + "\", x::datetime)] " + + "must be [dateperiod or timeduration], found value [\"" + + interval + + "\"] type [keyword]" + ) + ); + } + + // Bucket + assertEquals( + "1:52: Cannot convert string [1 yar] to [DATE_PERIOD or TIME_DURATION], error [Unexpected temporal unit: 'yar']", + error("from test | stats max(emp_no) by bucket(hire_date, \"1 yar\")") + ); + assertEquals( + "1:52: Cannot convert string [1 hur] to [DATE_PERIOD or TIME_DURATION], error [Unexpected temporal unit: 'hur']", + error("from test | stats max(emp_no) by bucket(hire_date, \"1 hur\")") + ); + assertEquals( + "1:58: Cannot convert string [1 mu] to [DATE_PERIOD or TIME_DURATION], error [Unexpected temporal unit: 'mu']", + error("from test | stats max = max(emp_no) by bucket(hire_date, \"1 mu\") | sort max ") + ); + assertEquals( + "1:34: second argument of [bucket(hire_date, \"1\")] must be [integral, date_period or time_duration], " + + "found value [\"1\"] type [keyword]", + error("from test | stats max(emp_no) by bucket(hire_date, \"1\")") + ); + assertEquals( + "1:40: second argument of [bucket(hire_date, \"1\")] must be [integral, date_period or time_duration], " + + "found value [\"1\"] type [keyword]", + error("from test | stats max = max(emp_no) by bucket(hire_date, \"1\") | sort max ") + ); + assertEquals( + "1:68: second argument of [bucket(y, \"1\")] must be [integral, date_period or time_duration], " + + "found value [\"1\"] type [keyword]", + error("from test | eval x = emp_no, y = hire_date | stats max = max(x) by bucket(y, \"1\") | sort max ") + ); + } + private void query(String query) { defaultAnalyzer.analyze(parser.createStatement(query)); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java index 6a552f400d36e..181b8d52bf888 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java @@ -879,8 +879,7 @@ public static void renderDocs() throws IOException { "elseValue", trueValue.type(), "The value that's returned when no condition evaluates to `true`.", - true, - EsqlFunctionRegistry.getTargetType(trueValue.type()) + true ); description = new EsqlFunctionRegistry.FunctionDescription( description.name(), @@ -1085,8 +1084,7 @@ private static void renderDocsForOperators(String name) throws IOException { String[] type = paramInfo == null ? new String[] { "?" } : paramInfo.type(); String desc = paramInfo == null ? "" : paramInfo.description().replace('\n', ' '); boolean optional = paramInfo == null ? false : paramInfo.optional(); - DataType targetDataType = EsqlFunctionRegistry.getTargetType(type); - args.add(new EsqlFunctionRegistry.ArgSignature(paramName, type, desc, optional, targetDataType)); + args.add(new EsqlFunctionRegistry.ArgSignature(paramName, type, desc, optional)); } } renderKibanaFunctionDefinition(name, functionInfo, args, likeOrInOperator(name)); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/ExpressionTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/ExpressionTests.java index 67b4dd71260aa..0177747d27243 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/ExpressionTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/ExpressionTests.java @@ -431,7 +431,7 @@ public void testDatePeriodLiterals() { } public void testUnknownNumericQualifier() { - assertParsingException(() -> whereExpression("1 decade"), "Unexpected time interval qualifier: 'decade'"); + assertParsingException(() -> whereExpression("1 decade"), "Unexpected temporal unit: 'decade'"); } public void testQualifiedDecimalLiteral() { diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/26_aggs_bucket.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/26_aggs_bucket.yml index ea7684fb69a09..9fbe69ac05f0a 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/26_aggs_bucket.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/26_aggs_bucket.yml @@ -234,3 +234,58 @@ - match: { values.2.1: "2024-08-01T00:00:00.000Z" } - match: { values.3.0: 1 } - match: { values.3.1: "2024-09-01T00:00:00.000Z" } + +--- +"Datetime interval as string": + - requires: + test_runner_features: [allowed_warnings_regex, capabilities] + capabilities: + - method: POST + path: /_query + parameters: [ ] + capabilities: [ implicit_casting_string_literal_to_temporal_amount ] + reason: "interval in parameters as string" + + - do: + indices.create: + index: test_bucket + body: + mappings: + properties: + ts : + type : date + + - do: + bulk: + refresh: true + body: + - { "index": { "_index": "test_bucket" } } + - { "ts": "2024-06-16" } + - { "index": { "_index": "test_bucket" } } + - { "ts": "2024-07-16" } + - { "index": { "_index": "test_bucket" } } + - { "ts": "2024-08-16" } + - { "index": { "_index": "test_bucket" } } + - { "ts": "2024-09-16" } + + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'FROM test_bucket | STATS c = COUNT(*) BY b = BUCKET(ts, ?bucket) | SORT b' + params: [{"bucket" : "1 month"}] + + - match: { columns.0.name: c } + - match: { columns.0.type: long } + - match: { columns.1.name: b } + - match: { columns.1.type: date } + - length: { values: 4 } + - match: { values.0.0: 1 } + - match: { values.0.1: "2024-06-01T00:00:00.000Z" } + - match: { values.1.0: 1 } + - match: { values.1.1: "2024-07-01T00:00:00.000Z" } + - match: { values.2.0: 1 } + - match: { values.2.1: "2024-08-01T00:00:00.000Z" } + - match: { values.3.0: 1 } + - match: { values.3.1: "2024-09-01T00:00:00.000Z" }