From d3aec39c72e0a7c5f4ad0ed684a1b57efc822a28 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tim=20R=C3=BChsen?= Date: Tue, 7 Nov 2023 10:18:31 +0100 Subject: [PATCH 01/30] [Profiling] Add helper class StopWatch (#101683) * [Profiling] Add helper class StopWatch * Generalize StopWatch * Simplify StopWatch, use lambda for logging --------- Co-authored-by: Elastic Machine --- .../xpack/profiling/StopWatch.java | 35 +++++++++++++++++++ .../TransportGetFlamegraphAction.java | 12 ++----- .../TransportGetStackTracesAction.java | 16 ++++----- 3 files changed, 46 insertions(+), 17 deletions(-) create mode 100644 x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/StopWatch.java diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/StopWatch.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/StopWatch.java new file mode 100644 index 0000000000000..c423fe12f3581 --- /dev/null +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/StopWatch.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.profiling; + +/** + * Measures time and logs it in milliseconds. + */ +public final class StopWatch { + private final String name; + private final long start; + + public StopWatch(String name) { + this.name = name; + start = System.nanoTime(); + } + + /** + * Return a textual report including the name and the number of elapsed milliseconds since object creation. + */ + public String report() { + return name + " took [" + millis() + " ms]."; + } + + /** + * Return number of elapsed milliseconds since object creation. + */ + public double millis() { + return (System.nanoTime() - start) / 1_000_000.0d; + } +} diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetFlamegraphAction.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetFlamegraphAction.java index f26a6b1fb3a84..b791684bec233 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetFlamegraphAction.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetFlamegraphAction.java @@ -44,20 +44,14 @@ public TransportGetFlamegraphAction(NodeClient nodeClient, TransportService tran @Override protected void doExecute(Task task, GetStackTracesRequest request, ActionListener listener) { Client client = new ParentTaskAssigningClient(this.nodeClient, transportService.getLocalNode(), task); - long start = System.nanoTime(); + StopWatch watch = new StopWatch("getFlamegraphAction"); client.execute(GetStackTracesAction.INSTANCE, request, new ActionListener<>() { @Override public void onResponse(GetStackTracesResponse response) { - long responseStart = System.nanoTime(); try { + StopWatch processingWatch = new StopWatch("Processing response"); GetFlamegraphResponse flamegraphResponse = buildFlamegraph(response); - log.debug( - "getFlamegraphAction took [" - + (System.nanoTime() - start) / 1_000_000.0d - + "] ms (processing response: [" - + (System.nanoTime() - responseStart) / 1_000_000.0d - + "] ms." - ); + log.debug(() -> watch.report() + " " + processingWatch.report()); listener.onResponse(flamegraphResponse); } catch (Exception ex) { listener.onFailure(ex); diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesAction.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesAction.java index e15792adc489d..8b9fce4d04040 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesAction.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesAction.java @@ -126,7 +126,7 @@ public TransportGetStackTracesAction( @Override protected void doExecute(Task submitTask, GetStackTracesRequest request, ActionListener submitListener) { licenseChecker.requireSupportedLicense(); - long start = System.nanoTime(); + StopWatch watch = new StopWatch("getResampledIndex"); Client client = new ParentTaskAssigningClient(this.nodeClient, transportService.getLocalNode(), submitTask); EventsIndex mediumDownsampled = EventsIndex.MEDIUM_DOWNSAMPLED; client.prepareSearch(mediumDownsampled.getName()) @@ -143,7 +143,7 @@ protected void doExecute(Task submitTask, GetStackTracesRequest request, ActionL mediumDownsampled, resampledIndex ); - log.debug("getResampledIndex took [" + (System.nanoTime() - start) / 1_000_000.0d + " ms]."); + log.debug(() -> watch.report()); searchEventGroupByStackTrace(client, request, resampledIndex, submitListener); }, e -> { // All profiling-events data streams are created lazily. In a relatively empty cluster it can happen that there are so few @@ -166,7 +166,7 @@ private void searchEventGroupByStackTrace( EventsIndex eventsIndex, ActionListener submitListener ) { - long start = System.nanoTime(); + StopWatch watch = new StopWatch("searchEventGroupByStackTrace"); GetStackTracesResponseBuilder responseBuilder = new GetStackTracesResponseBuilder(); responseBuilder.setSampleRate(eventsIndex.getSampleRate()); client.prepareSearch(eventsIndex.getName()) @@ -216,7 +216,7 @@ private void searchEventGroupByStackTrace( totalFinalCount, stackTraceEvents.size() ); - log.debug("searchEventGroupByStackTrace took [" + (System.nanoTime() - start) / 1_000_000.0d + " ms]."); + log.debug(() -> watch.report()); if (stackTraceEvents.isEmpty() == false) { responseBuilder.setStart(Instant.ofEpochMilli(minTime)); responseBuilder.setEnd(Instant.ofEpochMilli(maxTime)); @@ -287,7 +287,7 @@ private class StackTraceHandler { private final Set stackFrameIds = new ConcurrentSkipListSet<>(); private final Set executableIds = new ConcurrentSkipListSet<>(); private final AtomicInteger totalFrames = new AtomicInteger(); - private final long start = System.nanoTime(); + private final StopWatch watch = new StopWatch("retrieveStackTraces"); private StackTraceHandler( ClusterState clusterState, @@ -334,7 +334,7 @@ public void onResponse(MultiGetResponse multiGetItemResponses) { stackFrameIds.size(), executableIds.size() ); - log.debug("retrieveStackTraces took [" + (System.nanoTime() - start) / 1_000_000.0d + " ms]."); + log.debug(() -> watch.report()); retrieveStackTraceDetails( clusterState, client, @@ -409,7 +409,7 @@ private static class DetailsHandler { private final Map executables; private final Map stackFrames; private final AtomicInteger expectedSlices; - private final long start = System.nanoTime(); + private final StopWatch watch = new StopWatch("retrieveStackTraceDetails"); private DetailsHandler( GetStackTracesResponseBuilder builder, @@ -479,7 +479,7 @@ public void mayFinish() { builder.setExecutables(executables); builder.setStackFrames(stackFrames); log.debug("retrieveStackTraceDetails found [{}] stack frames, [{}] executables.", stackFrames.size(), executables.size()); - log.debug("retrieveStackTraceDetails took [" + (System.nanoTime() - start) / 1_000_000.0d + " ms]."); + log.debug(() -> watch.report()); submitListener.onResponse(builder.build()); } } From c773e04761f9709c1c190ab0bebb69a6356ce4b7 Mon Sep 17 00:00:00 2001 From: Artem Prigoda Date: Tue, 7 Nov 2023 10:23:23 +0100 Subject: [PATCH 02/30] Respect regional AWS STS endpoints (#101705) The AWS SDK supports regional STS endpoints via the AWS_STS_REGIONAL_ENDPOINTS environment variable. If the user set it to regional and provided the region in the AWS_REGION env variable, we should respect that and make the STS client use the regional adjusted STS endpoint like https://sts.us-west-2.amazonaws.com. See https://docs.aws.amazon.com/sdkref/latest/guide/feature-sts-regionalized-endpoints.html Resolves https://github.com/elastic/elasticsearch/issues/89175 --- docs/changelog/101705.yaml | 6 +++ .../repositories/s3/S3Service.java | 28 ++++++++++-- ...IdentityTokenCredentialsProviderTests.java | 44 ++++++++++++++++--- 3 files changed, 69 insertions(+), 9 deletions(-) create mode 100644 docs/changelog/101705.yaml diff --git a/docs/changelog/101705.yaml b/docs/changelog/101705.yaml new file mode 100644 index 0000000000000..baa7e69d48d88 --- /dev/null +++ b/docs/changelog/101705.yaml @@ -0,0 +1,6 @@ +pr: 101705 +summary: Respect regional AWS STS endpoints +area: Snapshot/Restore +type: bug +issues: + - 89175 diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java index 291cf84019cd1..25bba12db6952 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java @@ -9,6 +9,7 @@ package org.elasticsearch.repositories.s3; import com.amazonaws.ClientConfiguration; +import com.amazonaws.SDKGlobalConfiguration; import com.amazonaws.auth.AWSCredentials; import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.auth.AWSCredentialsProviderChain; @@ -320,6 +321,7 @@ static class CustomWebIdentityTokenCredentialsProvider implements AWSCredentials private STSAssumeRoleWithWebIdentitySessionCredentialsProvider credentialsProvider; private AWSSecurityTokenService stsClient; + private String stsRegion; CustomWebIdentityTokenCredentialsProvider( Environment environment, @@ -361,10 +363,24 @@ static class CustomWebIdentityTokenCredentialsProvider implements AWSCredentials ); AWSSecurityTokenServiceClientBuilder stsClientBuilder = AWSSecurityTokenServiceClient.builder(); - // Custom system property used for specifying a mocked version of the STS for testing - String customStsEndpoint = jvmEnvironment.getProperty("com.amazonaws.sdk.stsMetadataServiceEndpointOverride", STS_HOSTNAME); - // Set the region explicitly via the endpoint URL, so the AWS SDK doesn't make any guesses internally. - stsClientBuilder.withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration(customStsEndpoint, null)); + // Check if we need to use regional STS endpoints + // https://docs.aws.amazon.com/sdkref/latest/guide/feature-sts-regionalized-endpoints.html + if ("regional".equalsIgnoreCase(systemEnvironment.getEnv("AWS_STS_REGIONAL_ENDPOINTS"))) { + // AWS_REGION should be injected by the EKS pod identity webhook: + // https://github.com/aws/amazon-eks-pod-identity-webhook/pull/41 + stsRegion = systemEnvironment.getEnv(SDKGlobalConfiguration.AWS_REGION_ENV_VAR); + if (stsRegion != null) { + stsClientBuilder.withRegion(stsRegion); + } else { + LOGGER.warn("Unable to use regional STS endpoints because the AWS_REGION environment variable is not set"); + } + } + if (stsRegion == null) { + // Custom system property used for specifying a mocked version of the STS for testing + String customStsEndpoint = jvmEnvironment.getProperty("com.amazonaws.sdk.stsMetadataServiceEndpointOverride", STS_HOSTNAME); + // Set the region explicitly via the endpoint URL, so the AWS SDK doesn't make any guesses internally. + stsClientBuilder.withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration(customStsEndpoint, null)); + } stsClientBuilder.withCredentials(new AWSStaticCredentialsProvider(new AnonymousAWSCredentials())); stsClient = SocketAccess.doPrivileged(stsClientBuilder::build); try { @@ -383,6 +399,10 @@ boolean isActive() { return credentialsProvider != null; } + String getStsRegion() { + return stsRegion; + } + @Override public AWSCredentials getCredentials() { Objects.requireNonNull(credentialsProvider, "credentialsProvider is not set"); diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/CustomWebIdentityTokenCredentialsProviderTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/CustomWebIdentityTokenCredentialsProviderTests.java index 04c47bb9b55e6..f245b1ad91fe4 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/CustomWebIdentityTokenCredentialsProviderTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/CustomWebIdentityTokenCredentialsProviderTests.java @@ -22,6 +22,7 @@ import org.junit.Assert; import org.mockito.Mockito; +import java.io.IOException; import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.URLDecoder; @@ -42,6 +43,15 @@ public class CustomWebIdentityTokenCredentialsProviderTests extends ESTestCase { private static final String ROLE_ARN = "arn:aws:iam::123456789012:role/FederatedWebIdentityRole"; private static final String ROLE_NAME = "aws-sdk-java-1651084775908"; + private static Environment getEnvironment() throws IOException { + Path configDirectory = Files.createTempDirectory("web-identity-token-test"); + Files.createDirectory(configDirectory.resolve("repository-s3")); + Files.writeString(configDirectory.resolve("repository-s3/aws-web-identity-token-file"), "YXdzLXdlYi1pZGVudGl0eS10b2tlbi1maWxl"); + Environment environment = Mockito.mock(Environment.class); + Mockito.when(environment.configFile()).thenReturn(configDirectory); + return environment; + } + @SuppressForbidden(reason = "HTTP server is used for testing") public void testCreateWebIdentityTokenCredentialsProvider() throws Exception { HttpServer httpServer = MockHttpServer.createHttp(new InetSocketAddress(InetAddress.getLoopbackAddress().getHostAddress(), 0), 0); @@ -88,11 +98,7 @@ public void testCreateWebIdentityTokenCredentialsProvider() throws Exception { }); httpServer.start(); - Path configDirectory = Files.createTempDirectory("web-identity-token-test"); - Files.createDirectory(configDirectory.resolve("repository-s3")); - Files.writeString(configDirectory.resolve("repository-s3/aws-web-identity-token-file"), "YXdzLXdlYi1pZGVudGl0eS10b2tlbi1maWxl"); - Environment environment = Mockito.mock(Environment.class); - Mockito.when(environment.configFile()).thenReturn(configDirectory); + Environment environment = getEnvironment(); // No region is set, but the SDK shouldn't fail because of that Map environmentVariables = Map.of( @@ -125,4 +131,32 @@ public void testCreateWebIdentityTokenCredentialsProvider() throws Exception { httpServer.stop(0); } } + + public void testSupportRegionalizedEndpoints() throws Exception { + Map environmentVariables = Map.of( + "AWS_WEB_IDENTITY_TOKEN_FILE", + "/var/run/secrets/eks.amazonaws.com/serviceaccount/token", + "AWS_ROLE_ARN", + ROLE_ARN, + "AWS_STS_REGIONAL_ENDPOINTS", + "regional", + "AWS_REGION", + "us-west-2" + ); + Map systemProperties = Map.of(); + + var webIdentityTokenCredentialsProvider = new S3Service.CustomWebIdentityTokenCredentialsProvider( + getEnvironment(), + environmentVariables::get, + systemProperties::getOrDefault, + Clock.systemUTC() + ); + // We can't verify that webIdentityTokenCredentialsProvider's STS client uses the "https://sts.us-west-2.amazonaws.com" + // endpoint in a unit test. The client depends on hardcoded RegionalEndpointsOptionResolver that in turn depends + // on the system environment that we can't change in the test. So we just verify we that we called `withRegion` + // on stsClientBuilder which should internally correctly configure the endpoint when the STS client is built. + assertEquals("us-west-2", webIdentityTokenCredentialsProvider.getStsRegion()); + + webIdentityTokenCredentialsProvider.shutdown(); + } } From eed3c6b15c8291a3fa43bd6972418ab8040b4384 Mon Sep 17 00:00:00 2001 From: Rene Groeschke Date: Tue, 7 Nov 2023 11:16:20 +0100 Subject: [PATCH 03/30] Make enforce TestConvention cc compatible (#101822) --- rest-api-spec/build.gradle | 3 ++- x-pack/plugin/build.gradle | 6 ++++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/rest-api-spec/build.gradle b/rest-api-spec/build.gradle index e484b98d3188e..787d684c3779e 100644 --- a/rest-api-spec/build.gradle +++ b/rest-api-spec/build.gradle @@ -228,8 +228,9 @@ tasks.named("yamlRestTestV7CompatTransform").configure { task -> } tasks.register('enforceYamlTestConvention').configure { + def tree = fileTree('src/main/resources/rest-api-spec/test') doLast { - if (fileTree('src/main/resources/rest-api-spec/test').files) { + if (tree.files) { throw new GradleException("There are YAML tests in src/main source set. These should be moved to src/yamlRestTest.") } } diff --git a/x-pack/plugin/build.gradle b/x-pack/plugin/build.gradle index 17495a3568923..eae3031512d4f 100644 --- a/x-pack/plugin/build.gradle +++ b/x-pack/plugin/build.gradle @@ -177,16 +177,18 @@ tasks.named("yamlRestTestV7CompatTransform").configure { task -> } tasks.register('enforceApiSpecsConvention').configure { + def mainApiSpecs = fileTree('src/test/resources/rest-api-spec/api') doLast { - if (fileTree('src/test/resources/rest-api-spec/api').files) { + if (mainApiSpecs.files) { throw new GradleException("There are REST specs in src/test source set. These should be moved to the :rest-api-spec project.") } } } tasks.register('enforceYamlTestConvention').configure { + def mainYamlFiles = fileTree('src/test/resources/rest-api-spec/test') doLast { - if (fileTree('src/test/resources/rest-api-spec/test').files) { + if (mainYamlFiles.files) { throw new GradleException("There are YAML tests in src/test source set. These should be moved to src/yamlRestTest.") } } From e1184cd7a8ac3d3c1cf3abb5dbf8456fe3c4ab7c Mon Sep 17 00:00:00 2001 From: Rene Groeschke Date: Tue, 7 Nov 2023 11:17:21 +0100 Subject: [PATCH 04/30] Fix snippet task cc incompatibilities (#101823) Addresses some Gradle configuration cache issues related to https://github.com/elastic/elasticsearch/issues/57918 --- .../gradle/internal/doc/DocsTestPlugin.groovy | 19 ++++++++++++----- .../doc/RestTestsFromSnippetsTask.groovy | 21 +++++++++++++++---- .../gradle/internal/doc/SnippetsTask.groovy | 7 ++++--- 3 files changed, 35 insertions(+), 12 deletions(-) diff --git a/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/DocsTestPlugin.groovy b/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/DocsTestPlugin.groovy index 874141f2135ad..38b4cb499eeb9 100644 --- a/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/DocsTestPlugin.groovy +++ b/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/DocsTestPlugin.groovy @@ -12,6 +12,7 @@ import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.VersionProperties import org.elasticsearch.gradle.internal.test.rest.CopyRestApiTask import org.elasticsearch.gradle.internal.test.rest.CopyRestTestsTask +import org.gradle.api.Action import org.gradle.api.Plugin import org.gradle.api.Project import org.gradle.api.file.Directory @@ -61,16 +62,24 @@ class DocsTestPlugin implements Plugin { group 'Docs' description 'List each snippet' defaultSubstitutions = commonDefaultSubstitutions - perSnippet { println(it.toString()) } + perSnippet = new Action() { + @Override + void execute(SnippetsTask.Snippet snippet) { + println(snippet.toString()) + } + } } project.tasks.register('listConsoleCandidates', SnippetsTask) { group 'Docs' description 'List snippets that probably should be marked // CONSOLE' defaultSubstitutions = commonDefaultSubstitutions - perSnippet { - if (RestTestsFromSnippetsTask.isConsoleCandidate(it)) { - println(it.toString()) + perSnippet = new Action() { + @Override + void execute(SnippetsTask.Snippet snippet) { + if (RestTestsFromSnippetsTask.isConsoleCandidate(it)) { + println(it.toString()) + } } } } @@ -80,7 +89,7 @@ class DocsTestPlugin implements Plugin { defaultSubstitutions = commonDefaultSubstitutions testRoot.convention(restRootDir) doFirst { - fileOperations.delete(restRootDir) + getFileOperations().delete(testRoot.get()) } } diff --git a/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/RestTestsFromSnippetsTask.groovy b/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/RestTestsFromSnippetsTask.groovy index eda86355ee306..81207181dc9a7 100644 --- a/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/RestTestsFromSnippetsTask.groovy +++ b/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/RestTestsFromSnippetsTask.groovy @@ -10,8 +10,10 @@ package org.elasticsearch.gradle.internal.doc import groovy.transform.PackageScope import org.elasticsearch.gradle.internal.doc.SnippetsTask.Snippet +import org.gradle.api.Action import org.gradle.api.InvalidUserDataException import org.gradle.api.file.DirectoryProperty +import org.gradle.api.internal.file.FileOperations import org.gradle.api.tasks.Input import org.gradle.api.tasks.Internal import org.gradle.api.tasks.OutputDirectory @@ -24,7 +26,7 @@ import java.nio.file.Path /** * Generates REST tests for each snippet marked // TEST. */ -class RestTestsFromSnippetsTask extends SnippetsTask { +abstract class RestTestsFromSnippetsTask extends SnippetsTask { /** * These languages aren't supported by the syntax highlighter so we * shouldn't use them. @@ -64,13 +66,23 @@ class RestTestsFromSnippetsTask extends SnippetsTask { @Internal Set names = new HashSet<>() + @Inject + abstract FileOperations getFileOperations(); + @Inject RestTestsFromSnippetsTask(ObjectFactory objectFactory) { testRoot = objectFactory.directoryProperty() TestBuilder builder = new TestBuilder() - perSnippet builder.&handleSnippet - doLast builder.&checkUnconverted - doLast builder.&finishLastTest + perSnippet = new Action() { + @Override + void execute(Snippet snippet) { + builder.handleSnippet(snippet) + } + } + doLast { + builder.checkUnconverted() + builder.finishLastTest() + } } /** @@ -190,6 +202,7 @@ class RestTestsFromSnippetsTask extends SnippetsTask { * Called each time a snippet is encountered. Tracks the snippets and * calls buildTest to actually build the test. */ + void handleSnippet(Snippet snippet) { if (RestTestsFromSnippetsTask.isConsoleCandidate(snippet)) { unconvertedCandidates.add(snippet.path.toString() diff --git a/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/SnippetsTask.groovy b/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/SnippetsTask.groovy index 1580ec891ed2b..3e4ad91024082 100644 --- a/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/SnippetsTask.groovy +++ b/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/SnippetsTask.groovy @@ -11,8 +11,9 @@ package org.elasticsearch.gradle.internal.doc import com.fasterxml.jackson.core.JsonFactory; import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.core.JsonParseException; -import com.fasterxml.jackson.core.JsonToken; +import com.fasterxml.jackson.core.JsonToken +import org.gradle.api.Action; import org.gradle.api.DefaultTask import org.gradle.api.InvalidUserDataException import org.gradle.api.file.ConfigurableFileTree @@ -44,7 +45,7 @@ class SnippetsTask extends DefaultTask { * instance of Snippet. */ @Internal - Closure perSnippet + Action perSnippet /** * The docs to scan. Defaults to every file in the directory exception the @@ -134,7 +135,7 @@ class SnippetsTask extends DefaultTask { + "After substitutions and munging, the json looks like:\n" + quoted, e); } } - perSnippet(snippet) + perSnippet.execute(snippet) snippet = null } file.eachLine('UTF-8') { String line, int lineNumber -> From d189d0e5c53fe5ca10f995e9a858d180f6bb0641 Mon Sep 17 00:00:00 2001 From: Rene Groeschke Date: Tue, 7 Nov 2023 11:18:02 +0100 Subject: [PATCH 05/30] Make addRemote task configuration cache compatible (#101830) --- .../elasticsearch/gradle/internal/InternalBwcGitPlugin.java | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalBwcGitPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalBwcGitPlugin.java index d51770ffd30ed..71c76b2045007 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalBwcGitPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalBwcGitPlugin.java @@ -72,20 +72,19 @@ public void apply(Project project) { createClone.commandLine("git", "clone", buildLayout.getRootDirectory(), gitExtension.getCheckoutDir().get()); }); - ExtraPropertiesExtension extraProperties = project.getExtensions().getExtraProperties(); TaskProvider findRemoteTaskProvider = tasks.register("findRemote", LoggedExec.class, findRemote -> { findRemote.dependsOn(createCloneTaskProvider); findRemote.getWorkingDir().set(gitExtension.getCheckoutDir()); findRemote.commandLine("git", "remote", "-v"); findRemote.getCaptureOutput().set(true); - findRemote.doLast(t -> { extraProperties.set("remoteExists", isRemoteAvailable(remote, findRemote.getOutput())); }); + findRemote.doLast(t -> System.setProperty("remoteExists", String.valueOf(isRemoteAvailable(remote, findRemote.getOutput())))); }); TaskProvider addRemoteTaskProvider = tasks.register("addRemote", addRemote -> { String rootProjectName = project.getRootProject().getName(); addRemote.dependsOn(findRemoteTaskProvider); - addRemote.onlyIf("remote exists", task -> ((boolean) extraProperties.get("remoteExists")) == false); + addRemote.onlyIf("remote exists", task -> (Boolean.valueOf(providerFactory.systemProperty("remoteExists").get()) == false)); addRemote.doLast(new Action() { @Override public void execute(Task task) { From c58427d9d3003b3a56dded3e44b82431e5859082 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lorenzo=20Dematt=C3=A9?= Date: Tue, 7 Nov 2023 11:19:48 +0100 Subject: [PATCH 06/30] Add MlFeatures basics + test feature (#101858) --- .../plugin/ml/src/main/java/module-info.java | 1 + .../xpack/ml/MachineLearning.java | 3 +++ .../elasticsearch/xpack/ml/MlFeatures.java | 24 +++++++++++++++++++ ...lasticsearch.features.FeatureSpecification | 8 +++++++ 4 files changed, 36 insertions(+) create mode 100644 x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlFeatures.java create mode 100644 x-pack/plugin/ml/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification diff --git a/x-pack/plugin/ml/src/main/java/module-info.java b/x-pack/plugin/ml/src/main/java/module-info.java index a73c9bdfa32b4..52dee889d15fc 100644 --- a/x-pack/plugin/ml/src/main/java/module-info.java +++ b/x-pack/plugin/ml/src/main/java/module-info.java @@ -33,6 +33,7 @@ provides org.elasticsearch.painless.spi.PainlessExtension with org.elasticsearch.xpack.ml.MachineLearningPainlessExtension; provides org.elasticsearch.xpack.autoscaling.AutoscalingExtension with org.elasticsearch.xpack.ml.autoscaling.MlAutoscalingExtension; + provides org.elasticsearch.features.FeatureSpecification with org.elasticsearch.xpack.ml.MlFeatures; exports org.elasticsearch.xpack.ml; exports org.elasticsearch.xpack.ml.action; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java index f4bce4906c0b0..b4b8084b4b328 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java @@ -44,6 +44,7 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.TimeValue; import org.elasticsearch.env.Environment; +import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.analysis.CharFilterFactory; import org.elasticsearch.index.analysis.TokenizerFactory; import org.elasticsearch.index.query.QueryBuilder; @@ -485,6 +486,8 @@ public class MachineLearning extends Plugin public static final String TRAINED_MODEL_CIRCUIT_BREAKER_NAME = "model_inference"; + public static final NodeFeature STATE_RESET_FALLBACK_ON_DISABLED = new NodeFeature("ml.state_reset_fallback_on_disabled"); + private static final long DEFAULT_MODEL_CIRCUIT_BREAKER_LIMIT = (long) ((0.50) * JvmInfo.jvmInfo().getMem().getHeapMax().getBytes()); private static final double DEFAULT_MODEL_CIRCUIT_BREAKER_OVERHEAD = 1.0D; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlFeatures.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlFeatures.java new file mode 100644 index 0000000000000..29aa189b2acd4 --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlFeatures.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ml; + +import org.elasticsearch.Version; +import org.elasticsearch.features.FeatureSpecification; +import org.elasticsearch.features.NodeFeature; + +import java.util.Map; + +/** + * This class specifies source code features exposed by the Shutdown plugin. + */ +public class MlFeatures implements FeatureSpecification { + @Override + public Map getHistoricalFeatures() { + return Map.of(MachineLearning.STATE_RESET_FALLBACK_ON_DISABLED, Version.V_8_7_0); + } +} diff --git a/x-pack/plugin/ml/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification b/x-pack/plugin/ml/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification new file mode 100644 index 0000000000000..7dbef291bdd46 --- /dev/null +++ b/x-pack/plugin/ml/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification @@ -0,0 +1,8 @@ +# +# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +# or more contributor license agreements. Licensed under the Elastic License +# 2.0; you may not use this file except in compliance with the Elastic License +# 2.0. +# + +org.elasticsearch.xpack.ml.MlFeatures From 73ca01ebf5a40476d009ea7d871e3d3fbde8b44b Mon Sep 17 00:00:00 2001 From: Rene Groeschke Date: Tue, 7 Nov 2023 11:20:44 +0100 Subject: [PATCH 07/30] Fix configuration cache incompatibility in Rest compatibility tests (#101842) related to https://github.com/elastic/elasticsearch/issues/57918 --- .../gradle/internal/test/rest/CopyRestTestsTask.java | 6 +++++- .../rest/compat/compat/RestCompatTestTransformTask.java | 6 ++++-- x-pack/qa/xpack-prefix-rest-compat/build.gradle | 7 ++++--- 3 files changed, 13 insertions(+), 6 deletions(-) diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/CopyRestTestsTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/CopyRestTestsTask.java index 9359272b29610..94345ed80eec7 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/CopyRestTestsTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/CopyRestTestsTask.java @@ -15,6 +15,7 @@ import org.gradle.api.file.FileSystemOperations; import org.gradle.api.file.FileTree; import org.gradle.api.file.ProjectLayout; +import org.gradle.api.internal.file.FileOperations; import org.gradle.api.model.ObjectFactory; import org.gradle.api.provider.ListProperty; import org.gradle.api.tasks.IgnoreEmptyDirectories; @@ -43,7 +44,7 @@ * * @see RestResourcesPlugin */ -public class CopyRestTestsTask extends DefaultTask { +public abstract class CopyRestTestsTask extends DefaultTask { private static final String REST_TEST_PREFIX = "rest-api-spec/test"; private final ListProperty includeCore; private final ListProperty includeXpack; @@ -62,6 +63,9 @@ public class CopyRestTestsTask extends DefaultTask { private final ProjectLayout projectLayout; private final FileSystemOperations fileSystemOperations; + @Inject + public abstract FileOperations getFileOperations(); + @Inject public CopyRestTestsTask( ProjectLayout projectLayout, diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/RestCompatTestTransformTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/RestCompatTestTransformTask.java index 76004e3e5f6db..9b1e8a67deec8 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/RestCompatTestTransformTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/RestCompatTestTransformTask.java @@ -457,15 +457,17 @@ public void transform() throws IOException { Collections.singletonList(new Skip(skippedFilesWithReason.get(file))) ); } else { + List> transformations = new ArrayList<>(getTransformations().get()); + if (skippedFilesWithTestAndReason.containsKey(file)) { // skip the named tests for this file skippedFilesWithTestAndReason.get(file).forEach(fullTestNameAndReasonPair -> { String prefix = file.getName().replace(".yml", "/"); String singleTestName = fullTestNameAndReasonPair.getLeft().replaceAll(".*" + prefix, ""); - getTransformations().add(new Skip(singleTestName, fullTestNameAndReasonPair.getRight())); + transformations.add(new Skip(singleTestName, fullTestNameAndReasonPair.getRight())); }); } - transformRestTests = transformer.transformRestTests(new LinkedList<>(tests), getTransformations().get()); + transformRestTests = transformer.transformRestTests(new LinkedList<>(tests), transformations); } // convert to url to ensure forward slashes diff --git a/x-pack/qa/xpack-prefix-rest-compat/build.gradle b/x-pack/qa/xpack-prefix-rest-compat/build.gradle index caca3b63d4951..8b91aae21ff73 100644 --- a/x-pack/qa/xpack-prefix-rest-compat/build.gradle +++ b/x-pack/qa/xpack-prefix-rest-compat/build.gradle @@ -34,10 +34,11 @@ tasks.named("copyRestCompatTestTask").configure { task -> task.dependsOn(configurations.compatXpackTests); task.setXpackConfig(configurations.compatXpackTests); task.getIncludeXpack().set(List.of("license", "migration", "ml", "rollup", "sql", "ssl")); - task.getOutputResourceDir().set(project.getLayout().getBuildDirectory().dir("restResources/v${compatVersion}/yamlTests/original")); + def fileOperations = task.getFileOperations() + task.getOutputResourceDir().set(project.getLayout().getBuildDirectory().dir("restResources/v${compatVersion}/yamlTests/original")) task.setXpackConfigToFileTree( - config -> fileTree( - config.getSingleFile().toPath() + config -> fileOperations.fileTree( + config.getSingleFile() ) ) } From 9132f95fb4fc07965be10d61ef106c685afc6072 Mon Sep 17 00:00:00 2001 From: Abdon Pijpelink Date: Tue, 7 Nov 2023 11:35:37 +0100 Subject: [PATCH 08/30] [DOCS] Add 'Using ES|QL in Elastic Security' (#101677) * [DOCS] Add 'Using ES|QL in Elastic Security' * Add a note about enabling knowledge base * Update links --- .../esql/esql-security-solution.asciidoc | 41 +++++++++++++++++++ docs/reference/esql/esql-using.asciidoc | 7 +++- docs/reference/esql/index.asciidoc | 4 +- 3 files changed, 49 insertions(+), 3 deletions(-) create mode 100644 docs/reference/esql/esql-security-solution.asciidoc diff --git a/docs/reference/esql/esql-security-solution.asciidoc b/docs/reference/esql/esql-security-solution.asciidoc new file mode 100644 index 0000000000000..45e8e44e44bdd --- /dev/null +++ b/docs/reference/esql/esql-security-solution.asciidoc @@ -0,0 +1,41 @@ +[[esql-elastic-security]] +=== Using {esql} in {elastic-sec} + +++++ +Using {esql} in {elastic-sec} +++++ + +You can use {esql} in {elastic-sec} to investigate events in Timeline and create +detection rules. Use the Elastic AI Assistant to build {esql} queries, or answer +questions about the {esql} query language. + +[discrete] +[[esql-elastic-security-timeline]] +=== Use {esql} to investigate events in Timeline + +You can use {esql} in Timeline to filter, transform, and analyze event data +stored in {es}. To start using {esql}, open the the **{esql}** tab. To learn +more, refer to {security-guide}/timelines-ui.html#esql-in-timeline[Investigate +events in Timeline]. + +[discrete] +[[esql-elastic-security-detection-rules]] +=== Use {esql} to create detection rules + +Use the {esql} rule type to create detection rules using {esql} queries. The +{esql} rule type supports aggregating and non-aggregating queries. To learn +more, refer to {security-guide}/rules-ui-create.html#create-esql-rule[Create an +{esql} rule]. + +[discrete] +[[esql-elastic-security-ai-assistant]] +=== Elastic AI Assistant + +Use the Elastic AI Assistant to build {esql} queries, or answer questions about +the {esql} query language. To learn more, refer to +{security-guide}/security-assistant.html[AI Assistant]. + +NOTE: For AI Assistant to answer questions about {esql} and write {esql} +queries, you need to +{security-guide}/security-assistant.html#set-up-ai-assistant[enable knowledge +base]. \ No newline at end of file diff --git a/docs/reference/esql/esql-using.asciidoc b/docs/reference/esql/esql-using.asciidoc index f586f3a28de5c..dbab521ead4d1 100644 --- a/docs/reference/esql/esql-using.asciidoc +++ b/docs/reference/esql/esql-using.asciidoc @@ -6,11 +6,16 @@ Information about using the <>. <>:: Using {esql} in {kib} to query and aggregate your data, create visualizations, -and set up alerts. +and set up alerts. + +<>:: +Using {esql} in {elastic-sec} to investigate events in Timeline and create +detection rules. <>:: Using the <> to list and cancel {esql} queries. include::esql-rest.asciidoc[] include::esql-kibana.asciidoc[] +include::esql-security-solution.asciidoc[] include::task-management.asciidoc[] \ No newline at end of file diff --git a/docs/reference/esql/index.asciidoc b/docs/reference/esql/index.asciidoc index 799f95751aa69..dcbe426b1bcac 100644 --- a/docs/reference/esql/index.asciidoc +++ b/docs/reference/esql/index.asciidoc @@ -55,8 +55,8 @@ fields>> and <>. And guidance for GROK>> and <>. <>:: -An overview of using the <>, <>, and -<>. +An overview of using the <>, <>, +<>, and <>. <>:: The current limitations of {esql}. From 70128f5b748dbc11c8e85de565f076906889e2d5 Mon Sep 17 00:00:00 2001 From: Abdon Pijpelink Date: Tue, 7 Nov 2023 13:03:49 +0100 Subject: [PATCH 09/30] [DOCS] Mark 'ignore_throttled' deprecated in all docs (#101838) --- docs/reference/indices/resolve.asciidoc | 6 ++++-- .../ml/anomaly-detection/apis/put-datafeed.asciidoc | 2 -- .../ml/anomaly-detection/apis/update-datafeed.asciidoc | 2 -- docs/reference/rest-api/common-parms.asciidoc | 2 ++ docs/reference/search/multi-search.asciidoc | 5 +---- docs/reference/search/search-template-api.asciidoc | 4 +--- docs/reference/search/search.asciidoc | 4 +--- 7 files changed, 9 insertions(+), 16 deletions(-) diff --git a/docs/reference/indices/resolve.asciidoc b/docs/reference/indices/resolve.asciidoc index 1f405a2e49a7a..c919bba5c7651 100644 --- a/docs/reference/indices/resolve.asciidoc +++ b/docs/reference/indices/resolve.asciidoc @@ -88,9 +88,11 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=allow-no-indices] + Defaults to `true`. -include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=ignore_throttled] +`ignore_throttled`:: +(Optional, Boolean) If `true`, concrete, expanded or aliased indices are +ignored when frozen. Defaults to `false`. + -Defaults to `false`. +deprecated:[7.16.0] [[resolve-index-api-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/anomaly-detection/apis/put-datafeed.asciidoc b/docs/reference/ml/anomaly-detection/apis/put-datafeed.asciidoc index ec2ef3631f0c6..05e23d901d5d3 100644 --- a/docs/reference/ml/anomaly-detection/apis/put-datafeed.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/put-datafeed.asciidoc @@ -66,8 +66,6 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=expand-wildcards] Defaults to `open`. include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=ignore_throttled] -+ -deprecated:[7.16.0] `ignore_unavailable`:: (Optional, Boolean) If `true`, unavailable indices (missing or closed) are diff --git a/docs/reference/ml/anomaly-detection/apis/update-datafeed.asciidoc b/docs/reference/ml/anomaly-detection/apis/update-datafeed.asciidoc index 48893f1aadb82..5e6121cd01ac9 100644 --- a/docs/reference/ml/anomaly-detection/apis/update-datafeed.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/update-datafeed.asciidoc @@ -55,8 +55,6 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=expand-wildcards] Defaults to `open`. include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=ignore_throttled] -+ -deprecated:[7.16.0] `ignore_unavailable`:: (Optional, Boolean) If `true`, unavailable indices (missing or closed) are diff --git a/docs/reference/rest-api/common-parms.asciidoc b/docs/reference/rest-api/common-parms.asciidoc index 41fd3eefc31f2..55f277218d210 100644 --- a/docs/reference/rest-api/common-parms.asciidoc +++ b/docs/reference/rest-api/common-parms.asciidoc @@ -438,6 +438,8 @@ tag::ignore_throttled[] `ignore_throttled`:: (Optional, Boolean) If `true`, concrete, expanded or aliased indices are ignored when frozen. Defaults to `true`. ++ +deprecated:[7.16.0] end::ignore_throttled[] tag::index-ignore-unavailable[] diff --git a/docs/reference/search/multi-search.asciidoc b/docs/reference/search/multi-search.asciidoc index e8d29e00ba486..90056d5036558 100644 --- a/docs/reference/search/multi-search.asciidoc +++ b/docs/reference/search/multi-search.asciidoc @@ -84,10 +84,7 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=expand-wildcards] + Defaults to `open`. -`ignore_throttled`:: -(Optional, Boolean) -If `true`, concrete, expanded or aliased indices are ignored when frozen. -Defaults to `true`. +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=ignore_throttled] include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=index-ignore-unavailable] diff --git a/docs/reference/search/search-template-api.asciidoc b/docs/reference/search/search-template-api.asciidoc index 55142b953a194..539048a324746 100644 --- a/docs/reference/search/search-template-api.asciidoc +++ b/docs/reference/search/search-template-api.asciidoc @@ -92,9 +92,7 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=expand-wildcards] (Optional, Boolean) If `true`, the response includes additional details about score computation as part of a hit. Defaults to `false`. -`ignore_throttled`:: -(Optional, Boolean) If `true`, specified concrete, expanded, or aliased indices -are not included in the response when throttled. Defaults to `true`. +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=ignore_throttled] include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=index-ignore-unavailable] diff --git a/docs/reference/search/search.asciidoc b/docs/reference/search/search.asciidoc index f953ce03ab1eb..68d286b3f267b 100644 --- a/docs/reference/search/search.asciidoc +++ b/docs/reference/search/search.asciidoc @@ -109,9 +109,7 @@ By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the <> parameter. -`ignore_throttled`:: -(Optional, Boolean) If `true`, concrete, expanded or aliased indices will be -ignored when frozen. Defaults to `true`. +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=ignore_throttled] include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=index-ignore-unavailable] From e787a28ab7f0b6417b45965043e697892faa86b5 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Tue, 7 Nov 2023 05:22:56 -0700 Subject: [PATCH 10/30] Set ActiveProcessorCount when node.processors is set (#101846) node.processors determines the size of Elasticsearch threadpools. This commit sets the JDK flag -XX:ActiveProcessorCount when node.processors is set so that the JDK similarly sizes its threadpools accordingly. relates #100244 --- .../server/cli/JvmOptionsParser.java | 2 +- .../server/cli/SystemJvmOptions.java | 20 +++++++++++-- .../server/cli/JvmOptionsParserTests.java | 28 +++++++++++++++++++ docs/changelog/101846.yaml | 5 ++++ 4 files changed, 52 insertions(+), 3 deletions(-) create mode 100644 docs/changelog/101846.yaml diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/JvmOptionsParser.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/JvmOptionsParser.java index 5999f618bc0ab..29650e4b74114 100644 --- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/JvmOptionsParser.java +++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/JvmOptionsParser.java @@ -137,7 +137,7 @@ private List jvmOptions( ); substitutedJvmOptions.addAll(machineDependentHeap.determineHeapSettings(config, substitutedJvmOptions)); final List ergonomicJvmOptions = JvmErgonomics.choose(substitutedJvmOptions); - final List systemJvmOptions = SystemJvmOptions.systemJvmOptions(); + final List systemJvmOptions = SystemJvmOptions.systemJvmOptions(args.nodeSettings()); final List apmOptions = APMJvmOptions.apmJvmOptions(args.nodeSettings(), args.secrets(), args.logsDir(), tmpDir); diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java index a55a303517d6f..6e250075f7747 100644 --- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java +++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java @@ -8,13 +8,16 @@ package org.elasticsearch.server.cli; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.EsExecutors; + import java.util.List; import java.util.stream.Collectors; import java.util.stream.Stream; final class SystemJvmOptions { - static List systemJvmOptions() { + static List systemJvmOptions(Settings nodeSettings) { return Stream.of( /* * Cache ttl in seconds for positive DNS lookups noting that this overrides the JDK security property networkaddress.cache.ttl; @@ -61,7 +64,8 @@ static List systemJvmOptions() { * explore alternatives. See org.elasticsearch.xpack.searchablesnapshots.preallocate.Preallocate. */ "--add-opens=java.base/java.io=org.elasticsearch.preallocate", - maybeOverrideDockerCgroup() + maybeOverrideDockerCgroup(), + maybeSetActiveProcessorCount(nodeSettings) ).filter(e -> e.isEmpty() == false).collect(Collectors.toList()); } @@ -85,4 +89,16 @@ private static String maybeOverrideDockerCgroup() { } return ""; } + + /* + * node.processors determines thread pool sizes for Elasticsearch. When it + * is set, we need to also tell the JVM to respect a different value + */ + private static String maybeSetActiveProcessorCount(Settings nodeSettings) { + if (EsExecutors.NODE_PROCESSORS_SETTING.exists(nodeSettings)) { + int allocated = EsExecutors.allocatedProcessors(nodeSettings); + return "-XX:ActiveProcessorCount=" + allocated; + } + return ""; + } } diff --git a/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/JvmOptionsParserTests.java b/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/JvmOptionsParserTests.java index 5d63f29ac584e..03856b1024992 100644 --- a/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/JvmOptionsParserTests.java +++ b/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/JvmOptionsParserTests.java @@ -8,6 +8,8 @@ package org.elasticsearch.server.cli; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.Strings; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase.WithoutSecurityManager; @@ -28,10 +30,13 @@ import java.util.concurrent.atomic.AtomicBoolean; import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.not; @WithoutSecurityManager public class JvmOptionsParserTests extends ESTestCase { @@ -344,4 +349,27 @@ public void accept(final int lineNumber, final String line) { assertThat(seenInvalidLines, equalTo(invalidLines)); } + public void testNodeProcessorsActiveCount() { + { + final List jvmOptions = SystemJvmOptions.systemJvmOptions(Settings.EMPTY); + assertThat(jvmOptions, not(hasItem(containsString("-XX:ActiveProcessorCount=")))); + } + { + Settings nodeSettings = Settings.builder().put(EsExecutors.NODE_PROCESSORS_SETTING.getKey(), 1).build(); + final List jvmOptions = SystemJvmOptions.systemJvmOptions(nodeSettings); + assertThat(jvmOptions, hasItem("-XX:ActiveProcessorCount=1")); + } + { + // check rounding + Settings nodeSettings = Settings.builder().put(EsExecutors.NODE_PROCESSORS_SETTING.getKey(), 0.2).build(); + final List jvmOptions = SystemJvmOptions.systemJvmOptions(nodeSettings); + assertThat(jvmOptions, hasItem("-XX:ActiveProcessorCount=1")); + } + { + // check validation + Settings nodeSettings = Settings.builder().put(EsExecutors.NODE_PROCESSORS_SETTING.getKey(), 10000).build(); + var e = expectThrows(IllegalArgumentException.class, () -> SystemJvmOptions.systemJvmOptions(nodeSettings)); + assertThat(e.getMessage(), containsString("setting [node.processors] must be <=")); + } + } } diff --git a/docs/changelog/101846.yaml b/docs/changelog/101846.yaml new file mode 100644 index 0000000000000..52dfff8801c62 --- /dev/null +++ b/docs/changelog/101846.yaml @@ -0,0 +1,5 @@ +pr: 101846 +summary: Set `ActiveProcessorCount` when `node.processors` is set +area: Infra/CLI +type: enhancement +issues: [] From e8450706bd8e029c4ca02109d1a1f10ab9af6346 Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 7 Nov 2023 12:34:51 +0000 Subject: [PATCH 11/30] Remove Releasable interface from clients (#101850) No `Client` implementations hold any resources, so they don't need to be `Releasable`. This commit removes the unnecessary interface. As well as removing the dead code, this change means that IDEs no longer warn about calling `ESIntegTestCase#client()` outside of a try-with-resources block. --- .../PredicateTokenScriptFilterTests.java | 3 - .../ScriptedConditionTokenFilterTests.java | 3 - .../ClientScrollableHitSourceTests.java | 3 - .../elasticsearch/client/internal/Client.java | 3 +- .../client/internal/FilterClient.java | 5 - .../client/internal/node/NodeClient.java | 5 - .../java/org/elasticsearch/node/Node.java | 3 - .../transport/RemoteClusterAwareClient.java | 5 - .../remote/RemoteClusterNodesActionTests.java | 6 - .../AbstractClientHeadersTestCase.java | 1 - .../health/HealthPeriodicLoggerTests.java | 1 - .../rest/BaseRestHandlerTests.java | 1 - .../RestCancellableNodeClientTests.java | 121 +++++----- .../RemoteClusterAwareClientTests.java | 139 ++++++----- .../test/ExternalTestCluster.java | 6 +- .../test/InternalTestCluster.java | 2 - .../elasticsearch/test/client/NoOpClient.java | 7 +- .../test/client/NoOpNodeClient.java | 3 - .../ilm/LifecyclePolicySecurityClient.java | 6 - .../core/ilm/LifecyclePolicyClientTests.java | 33 +-- .../xpack/ilm/IndexLifecycleRunnerTests.java | 1 - .../ilm/history/ILMHistoryStoreTests.java | 1 - .../ChunkedTrainedModelRestorerTests.java | 154 ++++++------ .../apikey/RestCreateApiKeyActionTests.java | 22 +- .../apikey/RestGetApiKeyActionTests.java | 92 ++++--- .../RestInvalidateApiKeyActionTests.java | 69 +++--- .../apikey/RestQueryApiKeyActionTests.java | 19 +- .../xpack/slm/SnapshotLifecycleTaskTests.java | 19 +- .../history/SnapshotHistoryStoreTests.java | 1 - .../TransformPrivilegeCheckerTests.java | 4 - .../bench/WatcherScheduleEngineBenchmark.java | 226 +++++++++--------- 31 files changed, 421 insertions(+), 543 deletions(-) diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/PredicateTokenScriptFilterTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/PredicateTokenScriptFilterTests.java index 3a519f594a57f..b333c8534d19b 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/PredicateTokenScriptFilterTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/PredicateTokenScriptFilterTests.java @@ -97,9 +97,6 @@ private static class MockClient extends AbstractClient { super(settings, threadPool); } - @Override - public void close() {} - @Override protected void doExecute( ActionType action, diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/ScriptedConditionTokenFilterTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/ScriptedConditionTokenFilterTests.java index 81df5836015f0..98fdb551c27f1 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/ScriptedConditionTokenFilterTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/ScriptedConditionTokenFilterTests.java @@ -97,9 +97,6 @@ private class MockClient extends AbstractClient { super(settings, threadPool); } - @Override - public void close() {} - @Override protected void doExecute( ActionType action, diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/ClientScrollableHitSourceTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/ClientScrollableHitSourceTests.java index 58bda3229cb42..c7c441e3eaff9 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/ClientScrollableHitSourceTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/ClientScrollableHitSourceTests.java @@ -267,9 +267,6 @@ public void val ((ExecuteRequest) executeRequest).validateRequest(action, validator); } - @Override - public void close() {} - public synchronized void awaitOperation() throws InterruptedException { if (executeRequest == null) { wait(10000); diff --git a/server/src/main/java/org/elasticsearch/client/internal/Client.java b/server/src/main/java/org/elasticsearch/client/internal/Client.java index 89cb764549767..5ae3870338c35 100644 --- a/server/src/main/java/org/elasticsearch/client/internal/Client.java +++ b/server/src/main/java/org/elasticsearch/client/internal/Client.java @@ -55,7 +55,6 @@ import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Nullable; -import org.elasticsearch.core.Releasable; import java.util.Map; import java.util.concurrent.Executor; @@ -71,7 +70,7 @@ * * @see org.elasticsearch.node.Node#client() */ -public interface Client extends ElasticsearchClient, Releasable { +public interface Client extends ElasticsearchClient { // Note: This setting is registered only for bwc. The value is never read. Setting CLIENT_TYPE_SETTING_S = new Setting<>("client.type", "node", (s) -> { diff --git a/server/src/main/java/org/elasticsearch/client/internal/FilterClient.java b/server/src/main/java/org/elasticsearch/client/internal/FilterClient.java index 235fc0a150066..53a8e2e189244 100644 --- a/server/src/main/java/org/elasticsearch/client/internal/FilterClient.java +++ b/server/src/main/java/org/elasticsearch/client/internal/FilterClient.java @@ -45,11 +45,6 @@ protected FilterClient(Settings settings, ThreadPool threadPool, Client in) { this.in = in; } - @Override - public void close() { - in().close(); - } - @Override protected void doExecute( ActionType action, diff --git a/server/src/main/java/org/elasticsearch/client/internal/node/NodeClient.java b/server/src/main/java/org/elasticsearch/client/internal/node/NodeClient.java index b1dfc22cf27d3..0228dc7cc61ea 100644 --- a/server/src/main/java/org/elasticsearch/client/internal/node/NodeClient.java +++ b/server/src/main/java/org/elasticsearch/client/internal/node/NodeClient.java @@ -75,11 +75,6 @@ public List getActionNames() { return actions.keySet().stream().map(ActionType::name).toList(); } - @Override - public void close() { - // nothing really to do - } - @Override public void doExecute( ActionType action, diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 5f8f35ad3cd2b..1c1b9745befe8 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -43,7 +43,6 @@ import org.elasticsearch.core.Assertions; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.PathUtils; -import org.elasticsearch.core.Releasables; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; import org.elasticsearch.env.Environment; @@ -508,8 +507,6 @@ public synchronized void close() throws IOException { toClose.add(injector.getInstance(SnapshotsService.class)); toClose.add(injector.getInstance(SnapshotShardsService.class)); toClose.add(injector.getInstance(RepositoriesService.class)); - toClose.add(() -> stopWatch.stop().start("client")); - Releasables.close(injector.getInstance(Client.class)); toClose.add(() -> stopWatch.stop().start("indices_cluster")); toClose.add(injector.getInstance(IndicesClusterStateService.class)); toClose.add(() -> stopWatch.stop().start("indices")); diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterAwareClient.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAwareClient.java index 23a3857397f90..d85cc1d67a8b9 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterAwareClient.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAwareClient.java @@ -84,11 +84,6 @@ private void maybeEnsureConnected(ActionListener ensureConnectedListener) } } - @Override - public void close() { - // do nothing - } - @Override public Client getRemoteClusterClient(String remoteClusterAlias, Executor responseExecutor) { return remoteClusterService.getRemoteClusterClient(threadPool(), remoteClusterAlias, responseExecutor); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/remote/RemoteClusterNodesActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/remote/RemoteClusterNodesActionTests.java index b593c947fa725..91af3383f0670 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/remote/RemoteClusterNodesActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/remote/RemoteClusterNodesActionTests.java @@ -123,9 +123,6 @@ protected void ); listener.onResponse((Response) nodesInfoResponse); } - - @Override - public void close() {} } ); @@ -201,9 +198,6 @@ protected void assertThat(asInstanceOf(NodesInfoRequest.class, request).requestedMetrics(), empty()); listener.onResponse((Response) nodesInfoResponse); } - - @Override - public void close() {} } ); diff --git a/server/src/test/java/org/elasticsearch/client/internal/AbstractClientHeadersTestCase.java b/server/src/test/java/org/elasticsearch/client/internal/AbstractClientHeadersTestCase.java index deec1ec10c5a8..32e9b214ab530 100644 --- a/server/src/test/java/org/elasticsearch/client/internal/AbstractClientHeadersTestCase.java +++ b/server/src/test/java/org/elasticsearch/client/internal/AbstractClientHeadersTestCase.java @@ -83,7 +83,6 @@ public void setUp() throws Exception { @Override public void tearDown() throws Exception { super.tearDown(); - client.close(); terminate(threadPool); } diff --git a/server/src/test/java/org/elasticsearch/health/HealthPeriodicLoggerTests.java b/server/src/test/java/org/elasticsearch/health/HealthPeriodicLoggerTests.java index 846ed3e3021ab..7e77b3a4a1d73 100644 --- a/server/src/test/java/org/elasticsearch/health/HealthPeriodicLoggerTests.java +++ b/server/src/test/java/org/elasticsearch/health/HealthPeriodicLoggerTests.java @@ -92,7 +92,6 @@ public void cleanup() { testHealthPeriodicLogger.close(); } threadPool.shutdownNow(); - client.close(); } public void testConvertToLoggedFields() { diff --git a/server/src/test/java/org/elasticsearch/rest/BaseRestHandlerTests.java b/server/src/test/java/org/elasticsearch/rest/BaseRestHandlerTests.java index 06a95c2628389..0211397fdeee8 100644 --- a/server/src/test/java/org/elasticsearch/rest/BaseRestHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/rest/BaseRestHandlerTests.java @@ -47,7 +47,6 @@ public void setUp() throws Exception { public void tearDown() throws Exception { super.tearDown(); threadPool.shutdown(); - mockClient.close(); } public void testOneUnconsumedParameters() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/rest/action/RestCancellableNodeClientTests.java b/server/src/test/java/org/elasticsearch/rest/action/RestCancellableNodeClientTests.java index a8fe1c53d129a..a21eab1d95911 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/RestCancellableNodeClientTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/RestCancellableNodeClientTests.java @@ -64,30 +64,29 @@ public void stopThreadPool() { * associated with its corresponding channel. Either way, we need to make sure that no tasks are left in the map. */ public void testCompletedTasks() throws Exception { - try (TestClient testClient = new TestClient(Settings.EMPTY, threadPool, false)) { - int initialHttpChannels = RestCancellableNodeClient.getNumChannels(); - int totalSearches = 0; - List> futures = new ArrayList<>(); - int numChannels = randomIntBetween(1, 30); - for (int i = 0; i < numChannels; i++) { - int numTasks = randomIntBetween(1, 30); - TestHttpChannel channel = new TestHttpChannel(); - totalSearches += numTasks; - for (int j = 0; j < numTasks; j++) { - PlainActionFuture actionFuture = new PlainActionFuture<>(); - RestCancellableNodeClient client = new RestCancellableNodeClient(testClient, channel); - threadPool.generic().submit(() -> client.execute(SearchAction.INSTANCE, new SearchRequest(), actionFuture)); - futures.add(actionFuture); - } - } - for (Future future : futures) { - future.get(); + final var testClient = new TestClient(Settings.EMPTY, threadPool, false); + int initialHttpChannels = RestCancellableNodeClient.getNumChannels(); + int totalSearches = 0; + List> futures = new ArrayList<>(); + int numChannels = randomIntBetween(1, 30); + for (int i = 0; i < numChannels; i++) { + int numTasks = randomIntBetween(1, 30); + TestHttpChannel channel = new TestHttpChannel(); + totalSearches += numTasks; + for (int j = 0; j < numTasks; j++) { + PlainActionFuture actionFuture = new PlainActionFuture<>(); + RestCancellableNodeClient client = new RestCancellableNodeClient(testClient, channel); + threadPool.generic().submit(() -> client.execute(SearchAction.INSTANCE, new SearchRequest(), actionFuture)); + futures.add(actionFuture); } - // no channels get closed in this test, hence we expect as many channels as we created in the map - assertEquals(initialHttpChannels + numChannels, RestCancellableNodeClient.getNumChannels()); - assertEquals(0, RestCancellableNodeClient.getNumTasks()); - assertEquals(totalSearches, testClient.searchRequests.get()); } + for (Future future : futures) { + future.get(); + } + // no channels get closed in this test, hence we expect as many channels as we created in the map + assertEquals(initialHttpChannels + numChannels, RestCancellableNodeClient.getNumChannels()); + assertEquals(0, RestCancellableNodeClient.getNumTasks()); + assertEquals(totalSearches, testClient.searchRequests.get()); } /** @@ -95,30 +94,29 @@ public void testCompletedTasks() throws Exception { * removed and all of its corresponding tasks get cancelled. */ public void testCancelledTasks() throws Exception { - try (TestClient nodeClient = new TestClient(Settings.EMPTY, threadPool, true)) { - int initialHttpChannels = RestCancellableNodeClient.getNumChannels(); - int numChannels = randomIntBetween(1, 30); - int totalSearches = 0; - List channels = new ArrayList<>(numChannels); - for (int i = 0; i < numChannels; i++) { - TestHttpChannel channel = new TestHttpChannel(); - channels.add(channel); - int numTasks = randomIntBetween(1, 30); - totalSearches += numTasks; - RestCancellableNodeClient client = new RestCancellableNodeClient(nodeClient, channel); - for (int j = 0; j < numTasks; j++) { - client.execute(SearchAction.INSTANCE, new SearchRequest(), null); - } - assertEquals(numTasks, RestCancellableNodeClient.getNumTasks(channel)); + final var nodeClient = new TestClient(Settings.EMPTY, threadPool, true); + int initialHttpChannels = RestCancellableNodeClient.getNumChannels(); + int numChannels = randomIntBetween(1, 30); + int totalSearches = 0; + List channels = new ArrayList<>(numChannels); + for (int i = 0; i < numChannels; i++) { + TestHttpChannel channel = new TestHttpChannel(); + channels.add(channel); + int numTasks = randomIntBetween(1, 30); + totalSearches += numTasks; + RestCancellableNodeClient client = new RestCancellableNodeClient(nodeClient, channel); + for (int j = 0; j < numTasks; j++) { + client.execute(SearchAction.INSTANCE, new SearchRequest(), null); } - assertEquals(initialHttpChannels + numChannels, RestCancellableNodeClient.getNumChannels()); - for (TestHttpChannel channel : channels) { - channel.awaitClose(); - } - assertEquals(initialHttpChannels, RestCancellableNodeClient.getNumChannels()); - assertEquals(totalSearches, nodeClient.searchRequests.get()); - assertEquals(totalSearches, nodeClient.cancelledTasks.size()); + assertEquals(numTasks, RestCancellableNodeClient.getNumTasks(channel)); + } + assertEquals(initialHttpChannels + numChannels, RestCancellableNodeClient.getNumChannels()); + for (TestHttpChannel channel : channels) { + channel.awaitClose(); } + assertEquals(initialHttpChannels, RestCancellableNodeClient.getNumChannels()); + assertEquals(totalSearches, nodeClient.searchRequests.get()); + assertEquals(totalSearches, nodeClient.cancelledTasks.size()); } /** @@ -128,26 +126,25 @@ public void testCancelledTasks() throws Exception { * the newly added listener will be invoked at registration time. */ public void testChannelAlreadyClosed() { - try (TestClient testClient = new TestClient(Settings.EMPTY, threadPool, true)) { - int initialHttpChannels = RestCancellableNodeClient.getNumChannels(); - int numChannels = randomIntBetween(1, 30); - int totalSearches = 0; - for (int i = 0; i < numChannels; i++) { - TestHttpChannel channel = new TestHttpChannel(); - // no need to wait here, there will be no close listener registered, nothing to wait for. - channel.close(); - int numTasks = randomIntBetween(1, 5); - totalSearches += numTasks; - RestCancellableNodeClient client = new RestCancellableNodeClient(testClient, channel); - for (int j = 0; j < numTasks; j++) { - // here the channel will be first registered, then straight-away removed from the map as the close listener is invoked - client.execute(SearchAction.INSTANCE, new SearchRequest(), null); - } + final var testClient = new TestClient(Settings.EMPTY, threadPool, true); + int initialHttpChannels = RestCancellableNodeClient.getNumChannels(); + int numChannels = randomIntBetween(1, 30); + int totalSearches = 0; + for (int i = 0; i < numChannels; i++) { + TestHttpChannel channel = new TestHttpChannel(); + // no need to wait here, there will be no close listener registered, nothing to wait for. + channel.close(); + int numTasks = randomIntBetween(1, 5); + totalSearches += numTasks; + RestCancellableNodeClient client = new RestCancellableNodeClient(testClient, channel); + for (int j = 0; j < numTasks; j++) { + // here the channel will be first registered, then straight-away removed from the map as the close listener is invoked + client.execute(SearchAction.INSTANCE, new SearchRequest(), null); } - assertEquals(initialHttpChannels, RestCancellableNodeClient.getNumChannels()); - assertEquals(totalSearches, testClient.searchRequests.get()); - assertEquals(totalSearches, testClient.cancelledTasks.size()); } + assertEquals(initialHttpChannels, RestCancellableNodeClient.getNumChannels()); + assertEquals(totalSearches, testClient.searchRequests.get()); + assertEquals(totalSearches, testClient.cancelledTasks.size()); } private static class TestClient extends NodeClient { diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterAwareClientTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterAwareClientTests.java index b4e72af7a184e..e606da040bab4 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterAwareClientTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterAwareClientTests.java @@ -85,39 +85,36 @@ public void testSearchShards() throws Exception { service.start(); service.acceptIncomingRequests(); - try ( - RemoteClusterAwareClient client = new RemoteClusterAwareClient( - Settings.EMPTY, - threadPool, - service, - "cluster1", - threadPool.executor(TEST_THREAD_POOL_NAME), - randomBoolean() - ) - ) { - SearchShardsRequest searchShardsRequest = new SearchShardsRequest( - new String[] { "test-index" }, - IndicesOptions.strictExpandOpen(), - new MatchAllQueryBuilder(), - null, - null, - randomBoolean(), - null - ); - final SearchShardsResponse searchShardsResponse = PlainActionFuture.get( - future -> client.execute( - SearchShardsAction.INSTANCE, - searchShardsRequest, - ActionListener.runBefore( - future, - () -> assertTrue(Thread.currentThread().getName().contains('[' + TEST_THREAD_POOL_NAME + ']')) - ) - ), - 10, - TimeUnit.SECONDS - ); - assertThat(searchShardsResponse.getNodes(), equalTo(knownNodes)); - } + final var client = new RemoteClusterAwareClient( + Settings.EMPTY, + threadPool, + service, + "cluster1", + threadPool.executor(TEST_THREAD_POOL_NAME), + randomBoolean() + ); + SearchShardsRequest searchShardsRequest = new SearchShardsRequest( + new String[] { "test-index" }, + IndicesOptions.strictExpandOpen(), + new MatchAllQueryBuilder(), + null, + null, + randomBoolean(), + null + ); + final SearchShardsResponse searchShardsResponse = PlainActionFuture.get( + future -> client.execute( + SearchShardsAction.INSTANCE, + searchShardsRequest, + ActionListener.runBefore( + future, + () -> assertTrue(Thread.currentThread().getName().contains('[' + TEST_THREAD_POOL_NAME + ']')) + ) + ), + 10, + TimeUnit.SECONDS + ); + assertThat(searchShardsResponse.getNodes(), equalTo(knownNodes)); } } } @@ -145,46 +142,44 @@ public void testSearchShardsThreadContextHeader() { service.start(); service.acceptIncomingRequests(); - try ( - RemoteClusterAwareClient client = new RemoteClusterAwareClient( - Settings.EMPTY, - threadPool, - service, - "cluster1", - EsExecutors.DIRECT_EXECUTOR_SERVICE, - randomBoolean() - ) - ) { - int numThreads = 10; - ExecutorService executorService = Executors.newFixedThreadPool(numThreads); - for (int i = 0; i < numThreads; i++) { - final String threadId = Integer.toString(i); - PlainActionFuture future = new PlainActionFuture<>(); - executorService.submit(() -> { - ThreadContext threadContext = seedTransport.threadPool.getThreadContext(); - threadContext.putHeader("threadId", threadId); - var searchShardsRequest = new SearchShardsRequest( - new String[] { "test-index" }, - IndicesOptions.strictExpandOpen(), - new MatchAllQueryBuilder(), - null, - null, - randomBoolean(), - null - ); - client.execute( - SearchShardsAction.INSTANCE, - searchShardsRequest, - ActionListener.runBefore( - future, - () -> assertThat(seedTransport.threadPool.getThreadContext().getHeader("threadId"), equalTo(threadId)) - ) - ); - assertThat(future.actionGet().getNodes(), equalTo(knownNodes)); - }); - } - ThreadPool.terminate(executorService, 5, TimeUnit.SECONDS); + final var client = new RemoteClusterAwareClient( + Settings.EMPTY, + threadPool, + service, + "cluster1", + EsExecutors.DIRECT_EXECUTOR_SERVICE, + randomBoolean() + ); + + int numThreads = 10; + ExecutorService executorService = Executors.newFixedThreadPool(numThreads); + for (int i = 0; i < numThreads; i++) { + final String threadId = Integer.toString(i); + PlainActionFuture future = new PlainActionFuture<>(); + executorService.submit(() -> { + ThreadContext threadContext = seedTransport.threadPool.getThreadContext(); + threadContext.putHeader("threadId", threadId); + var searchShardsRequest = new SearchShardsRequest( + new String[] { "test-index" }, + IndicesOptions.strictExpandOpen(), + new MatchAllQueryBuilder(), + null, + null, + randomBoolean(), + null + ); + client.execute( + SearchShardsAction.INSTANCE, + searchShardsRequest, + ActionListener.runBefore( + future, + () -> assertThat(seedTransport.threadPool.getThreadContext().getHeader("threadId"), equalTo(threadId)) + ) + ); + assertThat(future.actionGet().getNodes(), equalTo(knownNodes)); + }); } + ThreadPool.terminate(executorService, 5, TimeUnit.SECONDS); } } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/ExternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/ExternalTestCluster.java index 5423263c88da6..3e3759601a1c9 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ExternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ExternalTestCluster.java @@ -135,14 +135,14 @@ public ExternalTestCluster( logger.info("Setup ExternalTestCluster [{}] made of [{}] nodes", nodeInfos.getClusterName().value(), size()); } catch (NodeValidationException e) { try { - IOUtils.close(wrappedClient, mockNode); + IOUtils.close(mockNode); } catch (IOException e1) { e.addSuppressed(e1); } throw new ElasticsearchException(e); } catch (Exception e) { try { - IOUtils.close(wrappedClient, mockNode); + IOUtils.close(mockNode); } catch (IOException e1) { e.addSuppressed(e1); } @@ -182,7 +182,7 @@ public InetSocketAddress[] httpAddresses() { @Override public void close() throws IOException { - IOUtils.close(client, node); + IOUtils.close(node); } @Override diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 8abf10a773764..0ce970943cc0b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -64,7 +64,6 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.Nullable; -import org.elasticsearch.core.Releasables; import org.elasticsearch.core.TimeValue; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; @@ -977,7 +976,6 @@ private Client getOrBuildNodeClient() { void resetClient() { if (closed.get() == false) { - Releasables.close(nodeClient); nodeClient = null; } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/client/NoOpClient.java b/test/framework/src/main/java/org/elasticsearch/test/client/NoOpClient.java index 7914d00be91fc..55aaabf74ba71 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/client/NoOpClient.java +++ b/test/framework/src/main/java/org/elasticsearch/test/client/NoOpClient.java @@ -23,9 +23,7 @@ * See also {@link NoOpNodeClient} if you need to mock a {@link org.elasticsearch.client.internal.node.NodeClient}. */ public class NoOpClient extends AbstractClient { - /** - * Build with {@link ThreadPool}. This {@linkplain ThreadPool} is terminated on {@link #close()}. - */ + public NoOpClient(ThreadPool threadPool) { super(Settings.EMPTY, threadPool); } @@ -38,7 +36,4 @@ protected void ) { listener.onResponse(null); } - - @Override - public void close() {} } diff --git a/test/framework/src/main/java/org/elasticsearch/test/client/NoOpNodeClient.java b/test/framework/src/main/java/org/elasticsearch/test/client/NoOpNodeClient.java index 0300a3c41d00f..766c9176c6846 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/client/NoOpNodeClient.java +++ b/test/framework/src/main/java/org/elasticsearch/test/client/NoOpNodeClient.java @@ -38,9 +38,6 @@ public class NoOpNodeClient extends NodeClient { private final AtomicLong executionCount = new AtomicLong(0); - /** - * Build with {@link ThreadPool}. This {@linkplain ThreadPool} is terminated on {@link #close()}. - */ public NoOpNodeClient(ThreadPool threadPool) { super(Settings.EMPTY, threadPool); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicySecurityClient.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicySecurityClient.java index c5ac78c0a330b..e8f76b655b70e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicySecurityClient.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicySecurityClient.java @@ -39,12 +39,6 @@ public LifecyclePolicySecurityClient(Client client, String origin, Map void doExecute( ActionType action, diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyClientTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyClientTests.java index ed9a4d45b681f..162794865ba5a 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyClientTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyClientTests.java @@ -56,15 +56,8 @@ public void testExecuteWithHeadersAsyncNoHeaders() throws InterruptedException { SearchRequest request = new SearchRequest("foo"); - try ( - LifecyclePolicySecurityClient policyClient = new LifecyclePolicySecurityClient( - client, - ClientHelper.INDEX_LIFECYCLE_ORIGIN, - Collections.emptyMap() - ) - ) { - policyClient.execute(SearchAction.INSTANCE, request, listener); - } + final var policyClient = new LifecyclePolicySecurityClient(client, ClientHelper.INDEX_LIFECYCLE_ORIGIN, Collections.emptyMap()); + policyClient.execute(SearchAction.INSTANCE, request, listener); latch.await(); } @@ -95,15 +88,8 @@ public void testExecuteWithHeadersAsyncWrongHeaders() throws InterruptedExceptio headers.put("foo", "foo"); headers.put("bar", "bar"); - try ( - LifecyclePolicySecurityClient policyClient = new LifecyclePolicySecurityClient( - client, - ClientHelper.INDEX_LIFECYCLE_ORIGIN, - headers - ) - ) { - policyClient.execute(SearchAction.INSTANCE, request, listener); - } + final var policyClient = new LifecyclePolicySecurityClient(client, ClientHelper.INDEX_LIFECYCLE_ORIGIN, headers); + policyClient.execute(SearchAction.INSTANCE, request, listener); latch.await(); } @@ -136,15 +122,8 @@ public void testExecuteWithHeadersAsyncWithHeaders() throws Exception { headers.put("es-security-runas-user", "foo"); headers.put("_xpack_security_authentication", "bar"); - try ( - LifecyclePolicySecurityClient policyClient = new LifecyclePolicySecurityClient( - client, - ClientHelper.INDEX_LIFECYCLE_ORIGIN, - headers - ) - ) { - policyClient.execute(SearchAction.INSTANCE, request, listener); - } + final var policyClient = new LifecyclePolicySecurityClient(client, ClientHelper.INDEX_LIFECYCLE_ORIGIN, headers); + policyClient.execute(SearchAction.INSTANCE, request, listener); latch.await(); } diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunnerTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunnerTests.java index 22376ac789b9d..d2ec684d3d1ab 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunnerTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunnerTests.java @@ -131,7 +131,6 @@ public void prepare() { @After public void shutdown() { historyStore.close(); - noopClient.close(); threadPool.shutdownNow(); } diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/history/ILMHistoryStoreTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/history/ILMHistoryStoreTests.java index 8f8e0451cdbb9..6ac3a4522fb3d 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/history/ILMHistoryStoreTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/history/ILMHistoryStoreTests.java @@ -93,7 +93,6 @@ public void setup() { public void setdown() { historyStore.close(); clusterService.close(); - client.close(); threadPool.shutdownNow(); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/persistence/ChunkedTrainedModelRestorerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/persistence/ChunkedTrainedModelRestorerTests.java index abdd1def956f0..9fb27e8143814 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/persistence/ChunkedTrainedModelRestorerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/persistence/ChunkedTrainedModelRestorerTests.java @@ -51,118 +51,112 @@ public void testRetryingSearch_ReturnsSearchResults() throws InterruptedExceptio } public void testRetryingSearch_ThrowsSearchPhaseExceptionWithNoRetries() { - try (var mockClient = mock(Client.class)) { - var searchPhaseException = new SearchPhaseExecutionException("phase", "error", ShardSearchFailure.EMPTY_ARRAY); - when(mockClient.search(any())).thenThrow(searchPhaseException); - - var request = createSearchRequest(); - - ElasticsearchException exception = expectThrows( - ElasticsearchException.class, - () -> ChunkedTrainedModelRestorer.retryingSearch(mockClient, "1", request, 0, new TimeValue(1, TimeUnit.NANOSECONDS)) - ); - - assertThat(exception.getCause(), is(searchPhaseException)); - assertThat( - exception.getMessage(), - is( - "loading model [1] failed after [0] retries. The deployment is now in a failed state, the error may be " - + "transient please stop the deployment and restart" - ) - ); - verify(mockClient, times(1)).search(any()); - } + final var mockClient = mock(Client.class); + var searchPhaseException = new SearchPhaseExecutionException("phase", "error", ShardSearchFailure.EMPTY_ARRAY); + when(mockClient.search(any())).thenThrow(searchPhaseException); + + var request = createSearchRequest(); + + ElasticsearchException exception = expectThrows( + ElasticsearchException.class, + () -> ChunkedTrainedModelRestorer.retryingSearch(mockClient, "1", request, 0, new TimeValue(1, TimeUnit.NANOSECONDS)) + ); + + assertThat(exception.getCause(), is(searchPhaseException)); + assertThat( + exception.getMessage(), + is( + "loading model [1] failed after [0] retries. The deployment is now in a failed state, the error may be " + + "transient please stop the deployment and restart" + ) + ); + verify(mockClient, times(1)).search(any()); } public void testRetryingSearch_ThrowsSearchPhaseExceptionAfterOneRetry() { - try (var mockClient = mock(Client.class)) { - var searchPhaseException = new SearchPhaseExecutionException("phase", "error", ShardSearchFailure.EMPTY_ARRAY); - when(mockClient.search(any())).thenThrow(searchPhaseException); + final var mockClient = mock(Client.class); + var searchPhaseException = new SearchPhaseExecutionException("phase", "error", ShardSearchFailure.EMPTY_ARRAY); + when(mockClient.search(any())).thenThrow(searchPhaseException); - var request = createSearchRequest(); + var request = createSearchRequest(); - ElasticsearchException exception = expectThrows( - ElasticsearchException.class, - () -> ChunkedTrainedModelRestorer.retryingSearch(mockClient, "", request, 1, new TimeValue(1, TimeUnit.NANOSECONDS)) - ); + ElasticsearchException exception = expectThrows( + ElasticsearchException.class, + () -> ChunkedTrainedModelRestorer.retryingSearch(mockClient, "", request, 1, new TimeValue(1, TimeUnit.NANOSECONDS)) + ); - assertThat(exception.getCause(), is(searchPhaseException)); - verify(mockClient, times(2)).search(any()); - } + assertThat(exception.getCause(), is(searchPhaseException)); + verify(mockClient, times(2)).search(any()); } public void testRetryingSearch_ThrowsCircuitBreakingExceptionAfterOneRetry_FromSearchPhaseException() { - try (var mockClient = mock(Client.class)) { - var searchPhaseException = new SearchPhaseExecutionException("phase", "error", ShardSearchFailure.EMPTY_ARRAY); - var circuitBreakerException = new CircuitBreakingException("error", CircuitBreaker.Durability.TRANSIENT); - when(mockClient.search(any())).thenThrow(searchPhaseException).thenThrow(circuitBreakerException); + final var mockClient = mock(Client.class); + var searchPhaseException = new SearchPhaseExecutionException("phase", "error", ShardSearchFailure.EMPTY_ARRAY); + var circuitBreakerException = new CircuitBreakingException("error", CircuitBreaker.Durability.TRANSIENT); + when(mockClient.search(any())).thenThrow(searchPhaseException).thenThrow(circuitBreakerException); - var request = createSearchRequest(); + var request = createSearchRequest(); - ElasticsearchException exception = expectThrows( - ElasticsearchException.class, - () -> ChunkedTrainedModelRestorer.retryingSearch(mockClient, "", request, 1, new TimeValue(1, TimeUnit.NANOSECONDS)) - ); + ElasticsearchException exception = expectThrows( + ElasticsearchException.class, + () -> ChunkedTrainedModelRestorer.retryingSearch(mockClient, "", request, 1, new TimeValue(1, TimeUnit.NANOSECONDS)) + ); - assertThat(exception.getCause(), is(circuitBreakerException)); - verify(mockClient, times(2)).search(any()); - } + assertThat(exception.getCause(), is(circuitBreakerException)); + verify(mockClient, times(2)).search(any()); } public void testRetryingSearch_EnsureExceptionCannotBeUnwrapped() { - try (var mockClient = mock(Client.class)) { - var searchPhaseExecutionException = new SearchPhaseExecutionException("phase", "error", ShardSearchFailure.EMPTY_ARRAY); - when(mockClient.search(any())).thenThrow(searchPhaseExecutionException); + final var mockClient = mock(Client.class); + var searchPhaseExecutionException = new SearchPhaseExecutionException("phase", "error", ShardSearchFailure.EMPTY_ARRAY); + when(mockClient.search(any())).thenThrow(searchPhaseExecutionException); - var request = createSearchRequest(); + var request = createSearchRequest(); - ElasticsearchException exception = expectThrows( - ElasticsearchException.class, - () -> ChunkedTrainedModelRestorer.retryingSearch(mockClient, "", request, 1, new TimeValue(1, TimeUnit.NANOSECONDS)) - ); + ElasticsearchException exception = expectThrows( + ElasticsearchException.class, + () -> ChunkedTrainedModelRestorer.retryingSearch(mockClient, "", request, 1, new TimeValue(1, TimeUnit.NANOSECONDS)) + ); - assertThat(ExceptionsHelper.unwrapCause(exception), is(exception)); - assertThat(ExceptionsHelper.unwrapCause(exception), instanceOf(ElasticsearchException.class)); - verify(mockClient, times(2)).search(any()); - } + assertThat(ExceptionsHelper.unwrapCause(exception), is(exception)); + assertThat(ExceptionsHelper.unwrapCause(exception), instanceOf(ElasticsearchException.class)); + verify(mockClient, times(2)).search(any()); } public void testRetryingSearch_ThrowsIllegalArgumentExceptionIgnoringRetries() { - try (var mockClient = mock(Client.class)) { - var exception = new IllegalArgumentException("Error"); - when(mockClient.search(any())).thenThrow(exception); + final var mockClient = mock(Client.class); + var exception = new IllegalArgumentException("Error"); + when(mockClient.search(any())).thenThrow(exception); - var request = createSearchRequest(); + var request = createSearchRequest(); - IllegalArgumentException thrownException = expectThrows( - IllegalArgumentException.class, - () -> ChunkedTrainedModelRestorer.retryingSearch(mockClient, "", request, 1, new TimeValue(1, TimeUnit.NANOSECONDS)) - ); + IllegalArgumentException thrownException = expectThrows( + IllegalArgumentException.class, + () -> ChunkedTrainedModelRestorer.retryingSearch(mockClient, "", request, 1, new TimeValue(1, TimeUnit.NANOSECONDS)) + ); - assertThat(thrownException, is(exception)); - verify(mockClient, times(1)).search(any()); - } + assertThat(thrownException, is(exception)); + verify(mockClient, times(1)).search(any()); } public void testRetryingSearch_ThrowsSearchPhaseExceptionOnce_ThenReturnsResponse() throws InterruptedException { - try (var mockClient = mock(Client.class)) { - var mockSearchResponse = mock(SearchResponse.class, RETURNS_DEEP_STUBS); + final var mockClient = mock(Client.class); + var mockSearchResponse = mock(SearchResponse.class, RETURNS_DEEP_STUBS); - PlainActionFuture searchFuture = new PlainActionFuture<>(); - searchFuture.onResponse(mockSearchResponse); + PlainActionFuture searchFuture = new PlainActionFuture<>(); + searchFuture.onResponse(mockSearchResponse); - var searchPhaseException = new SearchPhaseExecutionException("phase", "error", ShardSearchFailure.EMPTY_ARRAY); - when(mockClient.search(any())).thenThrow(searchPhaseException).thenReturn(searchFuture); + var searchPhaseException = new SearchPhaseExecutionException("phase", "error", ShardSearchFailure.EMPTY_ARRAY); + when(mockClient.search(any())).thenThrow(searchPhaseException).thenReturn(searchFuture); - var request = createSearchRequest(); + var request = createSearchRequest(); - assertThat( - ChunkedTrainedModelRestorer.retryingSearch(mockClient, "", request, 1, new TimeValue(1, TimeUnit.NANOSECONDS)), - is(mockSearchResponse) - ); + assertThat( + ChunkedTrainedModelRestorer.retryingSearch(mockClient, "", request, 1, new TimeValue(1, TimeUnit.NANOSECONDS)), + is(mockSearchResponse) + ); - verify(mockClient, times(2)).search(any()); - } + verify(mockClient, times(2)).search(any()); } private static SearchRequest createSearchRequest() { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyActionTests.java index 4bc4fd0ecbc85..791aba46c92ea 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyActionTests.java @@ -86,7 +86,7 @@ public void sendResponse(RestResponse restResponse) { Instant.now().plus(Duration.ofHours(5)) ); - try (NodeClient client = new NodeClient(Settings.EMPTY, threadPool) { + final var client = new NodeClient(Settings.EMPTY, threadPool) { @Override public void doExecute( ActionType action, @@ -103,17 +103,15 @@ public void doE listener.onFailure(new ElasticsearchSecurityException("encountered an error while creating API key")); } } - }) { - final RestCreateApiKeyAction restCreateApiKeyAction = new RestCreateApiKeyAction(Settings.EMPTY, mockLicenseState); - restCreateApiKeyAction.handleRequest(restRequest, restChannel, client); + }; + final RestCreateApiKeyAction restCreateApiKeyAction = new RestCreateApiKeyAction(Settings.EMPTY, mockLicenseState); + restCreateApiKeyAction.handleRequest(restRequest, restChannel, client); - final RestResponse restResponse = responseSetOnce.get(); - assertNotNull(restResponse); - assertThat( - CreateApiKeyResponse.fromXContent(createParser(XContentType.JSON.xContent(), restResponse.content())), - equalTo(expected) - ); - } + final RestResponse restResponse = responseSetOnce.get(); + assertNotNull(restResponse); + assertThat( + CreateApiKeyResponse.fromXContent(createParser(XContentType.JSON.xContent(), restResponse.content())), + equalTo(expected) + ); } - } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyActionTests.java index f4c293edb59d3..e842dd8588fa9 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyActionTests.java @@ -122,7 +122,7 @@ public void sendResponse(RestResponse restResponse) { ) ); - try (NodeClient client = new NodeClient(Settings.EMPTY, threadPool) { + final var client = new NodeClient(Settings.EMPTY, threadPool) { @SuppressWarnings("unchecked") @Override public void doExecute( @@ -149,44 +149,37 @@ public void doE listener.onFailure(new ElasticsearchSecurityException("encountered an error while creating API key")); } } - }) { - final RestGetApiKeyAction restGetApiKeyAction = new RestGetApiKeyAction(Settings.EMPTY, mockLicenseState); + }; + final RestGetApiKeyAction restGetApiKeyAction = new RestGetApiKeyAction(Settings.EMPTY, mockLicenseState); - restGetApiKeyAction.handleRequest(restRequest, restChannel, client); + restGetApiKeyAction.handleRequest(restRequest, restChannel, client); - final RestResponse restResponse = responseSetOnce.get(); - assertNotNull(restResponse); + final RestResponse restResponse = responseSetOnce.get(); + assertNotNull(restResponse); + assertThat(restResponse.status(), (replyEmptyResponse && params.get("id") != null) ? is(RestStatus.NOT_FOUND) : is(RestStatus.OK)); + final GetApiKeyResponse actual = GetApiKeyResponse.fromXContent(createParser(XContentType.JSON.xContent(), restResponse.content())); + if (replyEmptyResponse) { + assertThat(actual.getApiKeyInfos().length, is(0)); + } else { assertThat( - restResponse.status(), - (replyEmptyResponse && params.get("id") != null) ? is(RestStatus.NOT_FOUND) : is(RestStatus.OK) - ); - final GetApiKeyResponse actual = GetApiKeyResponse.fromXContent( - createParser(XContentType.JSON.xContent(), restResponse.content()) - ); - if (replyEmptyResponse) { - assertThat(actual.getApiKeyInfos().length, is(0)); - } else { - assertThat( - actual.getApiKeyInfos(), - arrayContaining( - new ApiKey( - "api-key-name-1", - "api-key-id-1", - type, - creation, - expiration, - false, - "user-x", - "realm-1", - metadata, - roleDescriptors, - limitedByRoleDescriptors - ) + actual.getApiKeyInfos(), + arrayContaining( + new ApiKey( + "api-key-name-1", + "api-key-id-1", + type, + creation, + expiration, + false, + "user-x", + "realm-1", + metadata, + roleDescriptors, + limitedByRoleDescriptors ) - ); - } + ) + ); } - } public void testGetApiKeyOwnedByCurrentAuthenticatedUser() throws Exception { @@ -253,7 +246,7 @@ public void sendResponse(RestResponse restResponse) { final GetApiKeyResponse getApiKeyResponseExpectedWhenOwnerFlagIsTrue = new GetApiKeyResponse(Collections.singletonList(apiKey1)); final GetApiKeyResponse getApiKeyResponseExpectedWhenOwnerFlagIsFalse = new GetApiKeyResponse(List.of(apiKey1, apiKey2)); - try (NodeClient client = new NodeClient(Settings.EMPTY, threadPool) { + final var client = new NodeClient(Settings.EMPTY, threadPool) { @SuppressWarnings("unchecked") @Override public void doExecute( @@ -274,24 +267,21 @@ public void doE listener.onResponse((Response) getApiKeyResponseExpectedWhenOwnerFlagIsFalse); } } - }) { - final RestGetApiKeyAction restGetApiKeyAction = new RestGetApiKeyAction(Settings.EMPTY, mockLicenseState); + }; + final RestGetApiKeyAction restGetApiKeyAction = new RestGetApiKeyAction(Settings.EMPTY, mockLicenseState); - restGetApiKeyAction.handleRequest(restRequest, restChannel, client); + restGetApiKeyAction.handleRequest(restRequest, restChannel, client); - final RestResponse restResponse = responseSetOnce.get(); - assertNotNull(restResponse); - assertThat(restResponse.status(), is(RestStatus.OK)); - final GetApiKeyResponse actual = GetApiKeyResponse.fromXContent( - createParser(XContentType.JSON.xContent(), restResponse.content()) - ); - if (isGetRequestForOwnedKeysOnly) { - assertThat(actual.getApiKeyInfos().length, is(1)); - assertThat(actual.getApiKeyInfos(), arrayContaining(apiKey1)); - } else { - assertThat(actual.getApiKeyInfos().length, is(2)); - assertThat(actual.getApiKeyInfos(), arrayContaining(apiKey1, apiKey2)); - } + final RestResponse restResponse = responseSetOnce.get(); + assertNotNull(restResponse); + assertThat(restResponse.status(), is(RestStatus.OK)); + final GetApiKeyResponse actual = GetApiKeyResponse.fromXContent(createParser(XContentType.JSON.xContent(), restResponse.content())); + if (isGetRequestForOwnedKeysOnly) { + assertThat(actual.getApiKeyInfos().length, is(1)); + assertThat(actual.getApiKeyInfos(), arrayContaining(apiKey1)); + } else { + assertThat(actual.getApiKeyInfos().length, is(2)); + assertThat(actual.getApiKeyInfos(), arrayContaining(apiKey1, apiKey2)); } } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestInvalidateApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestInvalidateApiKeyActionTests.java index e008a674b28fb..3c0e24da32763 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestInvalidateApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestInvalidateApiKeyActionTests.java @@ -88,7 +88,7 @@ public void sendResponse(RestResponse restResponse) { null ); - try (NodeClient client = new NodeClient(Settings.EMPTY, threadPool) { + final var client = new NodeClient(Settings.EMPTY, threadPool) { @Override @SuppressWarnings("unchecked") public void doExecute( @@ -112,24 +112,19 @@ public void doE listener.onFailure(new ElasticsearchSecurityException("encountered an error while creating API key")); } } - }) { - final RestInvalidateApiKeyAction restInvalidateApiKeyAction = new RestInvalidateApiKeyAction(Settings.EMPTY, mockLicenseState); - - restInvalidateApiKeyAction.handleRequest(restRequest, restChannel, client); - - final RestResponse restResponse = responseSetOnce.get(); - assertNotNull(restResponse); - final InvalidateApiKeyResponse actual = InvalidateApiKeyResponse.fromXContent( - createParser(XContentType.JSON.xContent(), restResponse.content()) - ); - assertThat(actual.getInvalidatedApiKeys(), equalTo(invalidateApiKeyResponseExpected.getInvalidatedApiKeys())); - assertThat( - actual.getPreviouslyInvalidatedApiKeys(), - equalTo(invalidateApiKeyResponseExpected.getPreviouslyInvalidatedApiKeys()) - ); - assertThat(actual.getErrors(), equalTo(invalidateApiKeyResponseExpected.getErrors())); - } + }; + final RestInvalidateApiKeyAction restInvalidateApiKeyAction = new RestInvalidateApiKeyAction(Settings.EMPTY, mockLicenseState); + + restInvalidateApiKeyAction.handleRequest(restRequest, restChannel, client); + final RestResponse restResponse = responseSetOnce.get(); + assertNotNull(restResponse); + final InvalidateApiKeyResponse actual = InvalidateApiKeyResponse.fromXContent( + createParser(XContentType.JSON.xContent(), restResponse.content()) + ); + assertThat(actual.getInvalidatedApiKeys(), equalTo(invalidateApiKeyResponseExpected.getInvalidatedApiKeys())); + assertThat(actual.getPreviouslyInvalidatedApiKeys(), equalTo(invalidateApiKeyResponseExpected.getPreviouslyInvalidatedApiKeys())); + assertThat(actual.getErrors(), equalTo(invalidateApiKeyResponseExpected.getErrors())); } public void testInvalidateApiKeyOwnedByCurrentAuthenticatedUser() throws Exception { @@ -165,7 +160,7 @@ public void sendResponse(RestResponse restResponse) { null ); - try (NodeClient client = new NodeClient(Settings.EMPTY, threadPool) { + final var client = new NodeClient(Settings.EMPTY, threadPool) { @SuppressWarnings("unchecked") @Override public void doExecute( @@ -186,25 +181,23 @@ public void doE listener.onResponse((Response) invalidateApiKeyResponseExpectedWhenOwnerFlagIsFalse); } } - }) { - final RestInvalidateApiKeyAction restInvalidateApiKeyAction = new RestInvalidateApiKeyAction(Settings.EMPTY, mockLicenseState); - - restInvalidateApiKeyAction.handleRequest(restRequest, restChannel, client); - - final RestResponse restResponse = responseSetOnce.get(); - assertNotNull(restResponse); - assertThat(restResponse.status(), is(RestStatus.OK)); - final InvalidateApiKeyResponse actual = InvalidateApiKeyResponse.fromXContent( - createParser(XContentType.JSON.xContent(), restResponse.content()) - ); - if (isInvalidateRequestForOwnedKeysOnly) { - assertThat(actual.getInvalidatedApiKeys().size(), is(1)); - assertThat(actual.getInvalidatedApiKeys(), containsInAnyOrder("api-key-id-1")); - } else { - assertThat(actual.getInvalidatedApiKeys().size(), is(2)); - assertThat(actual.getInvalidatedApiKeys(), containsInAnyOrder("api-key-id-1", "api-key-id-2")); - } - } + }; + final RestInvalidateApiKeyAction restInvalidateApiKeyAction = new RestInvalidateApiKeyAction(Settings.EMPTY, mockLicenseState); + + restInvalidateApiKeyAction.handleRequest(restRequest, restChannel, client); + final RestResponse restResponse = responseSetOnce.get(); + assertNotNull(restResponse); + assertThat(restResponse.status(), is(RestStatus.OK)); + final InvalidateApiKeyResponse actual = InvalidateApiKeyResponse.fromXContent( + createParser(XContentType.JSON.xContent(), restResponse.content()) + ); + if (isInvalidateRequestForOwnedKeysOnly) { + assertThat(actual.getInvalidatedApiKeys().size(), is(1)); + assertThat(actual.getInvalidatedApiKeys(), containsInAnyOrder("api-key-id-1")); + } else { + assertThat(actual.getInvalidatedApiKeys().size(), is(2)); + assertThat(actual.getInvalidatedApiKeys(), containsInAnyOrder("api-key-id-1", "api-key-id-2")); + } } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestQueryApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestQueryApiKeyActionTests.java index 83e8dbb96a41e..67d2ab006eb22 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestQueryApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestQueryApiKeyActionTests.java @@ -102,7 +102,7 @@ public void sendResponse(RestResponse restResponse) { } }; - try (NodeClient client = new NodeClient(Settings.EMPTY, threadPool) { + final var client = new NodeClient(Settings.EMPTY, threadPool) { @SuppressWarnings("unchecked") @Override public void doExecute( @@ -127,10 +127,9 @@ public void doE assertThat(((PrefixQueryBuilder) shouldQueryBuilder).fieldName(), equalTo("metadata.environ")); listener.onResponse((Response) new QueryApiKeyResponse(0, List.of())); } - }) { - final RestQueryApiKeyAction restQueryApiKeyAction = new RestQueryApiKeyAction(Settings.EMPTY, mockLicenseState); - restQueryApiKeyAction.handleRequest(restRequest, restChannel, client); - } + }; + final RestQueryApiKeyAction restQueryApiKeyAction = new RestQueryApiKeyAction(Settings.EMPTY, mockLicenseState); + restQueryApiKeyAction.handleRequest(restRequest, restChannel, client); assertNotNull(responseSetOnce.get()); } @@ -160,7 +159,7 @@ public void sendResponse(RestResponse restResponse) { } }; - try (NodeClient client = new NodeClient(Settings.EMPTY, threadPool) { + final var client = new NodeClient(Settings.EMPTY, threadPool) { @SuppressWarnings("unchecked") @Override public void doExecute( @@ -192,10 +191,10 @@ public void doE listener.onResponse((Response) new QueryApiKeyResponse(0, List.of())); } - }) { - final RestQueryApiKeyAction restQueryApiKeyAction = new RestQueryApiKeyAction(Settings.EMPTY, mockLicenseState); - restQueryApiKeyAction.handleRequest(restRequest, restChannel, client); - } + }; + + final RestQueryApiKeyAction restQueryApiKeyAction = new RestQueryApiKeyAction(Settings.EMPTY, mockLicenseState); + restQueryApiKeyAction.handleRequest(restRequest, restChannel, client); assertNotNull(responseSetOnce.get()); } diff --git a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTaskTests.java b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTaskTests.java index 0ab2adfc44639..729cb8ef47292 100644 --- a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTaskTests.java +++ b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTaskTests.java @@ -106,13 +106,11 @@ public void testSkipCreatingSnapshotWhenJobDoesNotMatch() { Settings.EMPTY, Sets.union(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS, Set.of(SLM_HISTORY_INDEX_ENABLED_SETTING)) ); - try ( - ClusterService clusterService = ClusterServiceUtils.createClusterService(state, threadPool, settings); + try (ClusterService clusterService = ClusterServiceUtils.createClusterService(state, threadPool, settings)) { VerifyingClient client = new VerifyingClient(threadPool, (a, r, l) -> { fail("should not have tried to take a snapshot"); return null; - }) - ) { + }); SnapshotHistoryStore historyStore = new VerifyingHistoryStore( null, clusterService, @@ -173,8 +171,7 @@ public void testCreateSnapshotOnTrigger() { final AtomicBoolean clientCalled = new AtomicBoolean(false); final SetOnce snapshotName = new SetOnce<>(); - try ( - ClusterService clusterService = ClusterServiceUtils.createClusterService(state, threadPool, settings); + try (ClusterService clusterService = ClusterServiceUtils.createClusterService(state, threadPool, settings)) { // This verifying client will verify that we correctly invoked // client.admin().createSnapshot(...) with the appropriate // request. It also returns a mock real response @@ -202,8 +199,7 @@ public void testCreateSnapshotOnTrigger() { fail("failed to parse snapshot response"); return null; } - }) - ) { + }); final AtomicBoolean historyStoreCalled = new AtomicBoolean(false); SnapshotHistoryStore historyStore = new VerifyingHistoryStore(null, clusterService, item -> { assertFalse(historyStoreCalled.getAndSet(true)); @@ -247,8 +243,7 @@ public void testPartialFailureSnapshot() throws Exception { ); final AtomicBoolean clientCalled = new AtomicBoolean(false); final SetOnce snapshotName = new SetOnce<>(); - try ( - ClusterService clusterService = ClusterServiceUtils.createClusterService(state, threadPool, settings); + try (ClusterService clusterService = ClusterServiceUtils.createClusterService(state, threadPool, settings)) { VerifyingClient client = new VerifyingClient(threadPool, (action, request, listener) -> { assertFalse(clientCalled.getAndSet(true)); assertThat(action, instanceOf(CreateSnapshotAction.class)); @@ -285,8 +280,8 @@ public void testPartialFailureSnapshot() throws Exception { Collections.emptyMap() ) ); - }) - ) { + }); + final AtomicBoolean historyStoreCalled = new AtomicBoolean(false); SnapshotHistoryStore historyStore = new VerifyingHistoryStore(null, clusterService, item -> { assertFalse(historyStoreCalled.getAndSet(true)); diff --git a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/history/SnapshotHistoryStoreTests.java b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/history/SnapshotHistoryStoreTests.java index cb31c282b73e4..6b2e23594ec3f 100644 --- a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/history/SnapshotHistoryStoreTests.java +++ b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/history/SnapshotHistoryStoreTests.java @@ -68,7 +68,6 @@ public void setup() { public void tearDown() throws Exception { super.tearDown(); clusterService.stop(); - client.close(); threadPool.shutdownNow(); } diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/action/TransformPrivilegeCheckerTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/action/TransformPrivilegeCheckerTests.java index 8549f669dda0d..cab7377695b0a 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/action/TransformPrivilegeCheckerTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/action/TransformPrivilegeCheckerTests.java @@ -89,9 +89,6 @@ public class TransformPrivilegeCheckerTests extends ESTestCase { @Before public void setupClient() { - if (client != null) { - client.close(); - } threadPool = createThreadPool(); client = new MyMockClient(threadPool); securityContext = new SecurityContext(Settings.EMPTY, threadPool.getThreadContext()) { @@ -103,7 +100,6 @@ public User getUser() { @After public void tearDownClient() { - client.close(); threadPool.shutdown(); } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/bench/WatcherScheduleEngineBenchmark.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/bench/WatcherScheduleEngineBenchmark.java index cbe233f0d911d..f0fc8686840e1 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/bench/WatcherScheduleEngineBenchmark.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/bench/WatcherScheduleEngineBenchmark.java @@ -107,44 +107,43 @@ public static void main(String[] args) throws Exception { ) ).start() ) { - try (Client client = node.client()) { - ClusterHealthResponse response = client.admin().cluster().prepareHealth().setWaitForNodes("2").get(); - if (response.getNumberOfNodes() != 2 && response.getNumberOfDataNodes() != 1) { - throw new IllegalStateException("This benchmark needs one extra data only node running outside this benchmark"); - } + final Client client = node.client(); + ClusterHealthResponse response = client.admin().cluster().prepareHealth().setWaitForNodes("2").get(); + if (response.getNumberOfNodes() != 2 && response.getNumberOfDataNodes() != 1) { + throw new IllegalStateException("This benchmark needs one extra data only node running outside this benchmark"); + } - client.admin().indices().prepareDelete("_all").get(); - client.admin().indices().prepareCreate("test").get(); - client.prepareIndex().setIndex("test").setId("1").setSource("{}", XContentType.JSON).get(); - - System.out.println("===============> indexing [" + numWatches + "] watches"); - for (int i = 0; i < numWatches; i++) { - final String id = "_id_" + i; - client.prepareIndex() - .setIndex(Watch.INDEX) - .setId(id) - .setSource( - new WatchSourceBuilder().trigger(schedule(interval(interval + "s"))) - .input(searchInput(templateRequest(new SearchSourceBuilder(), "test"))) - .condition( - new ScriptCondition( - new Script( - ScriptType.INLINE, - Script.DEFAULT_SCRIPT_LANG, - "ctx.payload.hits.total.value > 0", - emptyMap() - ) + client.admin().indices().prepareDelete("_all").get(); + client.admin().indices().prepareCreate("test").get(); + client.prepareIndex().setIndex("test").setId("1").setSource("{}", XContentType.JSON).get(); + + System.out.println("===============> indexing [" + numWatches + "] watches"); + for (int i = 0; i < numWatches; i++) { + final String id = "_id_" + i; + client.prepareIndex() + .setIndex(Watch.INDEX) + .setId(id) + .setSource( + new WatchSourceBuilder().trigger(schedule(interval(interval + "s"))) + .input(searchInput(templateRequest(new SearchSourceBuilder(), "test"))) + .condition( + new ScriptCondition( + new Script( + ScriptType.INLINE, + Script.DEFAULT_SCRIPT_LANG, + "ctx.payload.hits.total.value > 0", + emptyMap() ) ) - .addAction("logging", ActionBuilders.loggingAction("test").setLevel(LoggingLevel.TRACE)) - .buildAsBytes(XContentType.JSON), - XContentType.JSON - ) - .get(); - } - client.admin().indices().prepareFlush(Watch.INDEX, "test").get(); - System.out.println("===============> indexed [" + numWatches + "] watches"); + ) + .addAction("logging", ActionBuilders.loggingAction("test").setLevel(LoggingLevel.TRACE)) + .buildAsBytes(XContentType.JSON), + XContentType.JSON + ) + .get(); } + client.admin().indices().prepareFlush(Watch.INDEX, "test").get(); + System.out.println("===============> indexed [" + numWatches + "] watches"); } // Now for each scheduler impl run the benchmark @@ -160,90 +159,89 @@ public static void main(String[] args) throws Exception { .put("node.data", false) .build(); try (Node node = new MockNode(settings, Arrays.asList(LocalStateWatcher.class))) { - try (Client client = node.client()) { - client.admin().cluster().prepareHealth().setWaitForNodes("2").get(); - client.admin().indices().prepareDelete(HistoryStoreField.DATA_STREAM + "*").get(); - client.admin().cluster().prepareHealth(Watch.INDEX, "test").setWaitForYellowStatus().get(); - - Clock clock = node.injector().getInstance(Clock.class); - while (new WatcherStatsRequestBuilder(client).get() - .getNodes() - .stream() - .allMatch(r -> r.getWatcherState() == WatcherState.STARTED) == false) { - Thread.sleep(100); - } - long actualLoadedWatches = new WatcherStatsRequestBuilder(client).get().getWatchesCount(); - if (actualLoadedWatches != numWatches) { - throw new IllegalStateException( - "Expected [" - + numWatches - + "] watched to be loaded, but only [" - + actualLoadedWatches - + "] watches were actually loaded" - ); - } - long startTime = clock.millis(); - System.out.println("==> watcher started, waiting [" + benchTime + "] seconds now..."); - - final AtomicBoolean start = new AtomicBoolean(true); - final MeanMetric jvmUsedHeapSpace = new MeanMetric(); - Thread sampleThread = new Thread(new Runnable() { - @Override - public void run() { - try { - while (start.get()) { - NodesStatsResponse response = client.admin().cluster().prepareNodesStats("_master").setJvm(true).get(); - ByteSizeValue heapUsed = response.getNodes().get(0).getJvm().getMem().getHeapUsed(); - jvmUsedHeapSpace.inc(heapUsed.getBytes()); - Thread.sleep(1000); - } - } catch (InterruptedException ignored) {} - } - }); - sampleThread.start(); - Thread.sleep(benchTime); - long endTime = clock.millis(); - start.set(false); - sampleThread.join(); - - NodesStatsResponse response = client.admin().cluster().prepareNodesStats().setThreadPool(true).get(); - for (NodeStats nodeStats : response.getNodes()) { - for (ThreadPoolStats.Stats threadPoolStats : nodeStats.getThreadPool()) { - if ("watcher".equals(threadPoolStats.name())) { - stats.setWatcherThreadPoolStats(threadPoolStats); - } - } - } - client.admin().indices().prepareRefresh(HistoryStoreField.DATA_STREAM + "*").get(); - Script script = new Script( - ScriptType.INLINE, - Script.DEFAULT_SCRIPT_LANG, - "doc['trigger_event.schedule.triggered_time'].value - doc['trigger_event.schedule.scheduled_time'].value", - emptyMap() + final Client client = node.client(); + client.admin().cluster().prepareHealth().setWaitForNodes("2").get(); + client.admin().indices().prepareDelete(HistoryStoreField.DATA_STREAM + "*").get(); + client.admin().cluster().prepareHealth(Watch.INDEX, "test").setWaitForYellowStatus().get(); + + Clock clock = node.injector().getInstance(Clock.class); + while (new WatcherStatsRequestBuilder(client).get() + .getNodes() + .stream() + .allMatch(r -> r.getWatcherState() == WatcherState.STARTED) == false) { + Thread.sleep(100); + } + long actualLoadedWatches = new WatcherStatsRequestBuilder(client).get().getWatchesCount(); + if (actualLoadedWatches != numWatches) { + throw new IllegalStateException( + "Expected [" + + numWatches + + "] watched to be loaded, but only [" + + actualLoadedWatches + + "] watches were actually loaded" ); - SearchResponse searchResponse = client.prepareSearch(HistoryStoreField.DATA_STREAM + "*") - .setQuery(QueryBuilders.rangeQuery("trigger_event.schedule.scheduled_time").gte(startTime).lte(endTime)) - .addAggregation(terms("state").field("state")) - .addAggregation(histogram("delay").script(script).interval(10)) - .addAggregation(percentiles("percentile_delay").script(script).percentiles(1.0, 20.0, 50.0, 80.0, 99.0)) - .get(); - Terms terms = searchResponse.getAggregations().get("state"); - stats.setStateStats(terms); - Histogram histogram = searchResponse.getAggregations().get("delay"); - stats.setDelayStats(histogram); - System.out.println("===> State"); - for (Terms.Bucket bucket : terms.getBuckets()) { - System.out.println("\t" + bucket.getKey() + "=" + bucket.getDocCount()); + } + long startTime = clock.millis(); + System.out.println("==> watcher started, waiting [" + benchTime + "] seconds now..."); + + final AtomicBoolean start = new AtomicBoolean(true); + final MeanMetric jvmUsedHeapSpace = new MeanMetric(); + Thread sampleThread = new Thread(new Runnable() { + @Override + public void run() { + try { + while (start.get()) { + NodesStatsResponse response = client.admin().cluster().prepareNodesStats("_master").setJvm(true).get(); + ByteSizeValue heapUsed = response.getNodes().get(0).getJvm().getMem().getHeapUsed(); + jvmUsedHeapSpace.inc(heapUsed.getBytes()); + Thread.sleep(1000); + } + } catch (InterruptedException ignored) {} } - System.out.println("===> Delay"); - for (Histogram.Bucket bucket : histogram.getBuckets()) { - System.out.println("\t" + bucket.getKey() + "=" + bucket.getDocCount()); + }); + sampleThread.start(); + Thread.sleep(benchTime); + long endTime = clock.millis(); + start.set(false); + sampleThread.join(); + + NodesStatsResponse response = client.admin().cluster().prepareNodesStats().setThreadPool(true).get(); + for (NodeStats nodeStats : response.getNodes()) { + for (ThreadPoolStats.Stats threadPoolStats : nodeStats.getThreadPool()) { + if ("watcher".equals(threadPoolStats.name())) { + stats.setWatcherThreadPoolStats(threadPoolStats); + } } - Percentiles percentiles = searchResponse.getAggregations().get("percentile_delay"); - stats.setDelayPercentiles(percentiles); - stats.setAvgJvmUsed(jvmUsedHeapSpace); - new WatcherServiceRequestBuilder(client).stop().get(); } + client.admin().indices().prepareRefresh(HistoryStoreField.DATA_STREAM + "*").get(); + Script script = new Script( + ScriptType.INLINE, + Script.DEFAULT_SCRIPT_LANG, + "doc['trigger_event.schedule.triggered_time'].value - doc['trigger_event.schedule.scheduled_time'].value", + emptyMap() + ); + SearchResponse searchResponse = client.prepareSearch(HistoryStoreField.DATA_STREAM + "*") + .setQuery(QueryBuilders.rangeQuery("trigger_event.schedule.scheduled_time").gte(startTime).lte(endTime)) + .addAggregation(terms("state").field("state")) + .addAggregation(histogram("delay").script(script).interval(10)) + .addAggregation(percentiles("percentile_delay").script(script).percentiles(1.0, 20.0, 50.0, 80.0, 99.0)) + .get(); + Terms terms = searchResponse.getAggregations().get("state"); + stats.setStateStats(terms); + Histogram histogram = searchResponse.getAggregations().get("delay"); + stats.setDelayStats(histogram); + System.out.println("===> State"); + for (Terms.Bucket bucket : terms.getBuckets()) { + System.out.println("\t" + bucket.getKey() + "=" + bucket.getDocCount()); + } + System.out.println("===> Delay"); + for (Histogram.Bucket bucket : histogram.getBuckets()) { + System.out.println("\t" + bucket.getKey() + "=" + bucket.getDocCount()); + } + Percentiles percentiles = searchResponse.getAggregations().get("percentile_delay"); + stats.setDelayPercentiles(percentiles); + stats.setAvgJvmUsed(jvmUsedHeapSpace); + new WatcherServiceRequestBuilder(client).stop().get(); } } From b4d5f6279f2d4526b861ddbc0ff5cd231ab14fad Mon Sep 17 00:00:00 2001 From: Rene Groeschke Date: Tue, 7 Nov 2023 13:46:48 +0100 Subject: [PATCH 12/30] Fix forbidden apis checks ignoring signaturefiles (#101866) We exposed an issue with our forbidden api checks when back porting #101705. This provides a fix for the forbidden apis checks and a fix for one forbidden api call that sneaked in --- .../gradle/internal/precommit/CheckForbiddenApisTask.java | 1 + .../s3/CustomWebIdentityTokenCredentialsProviderTests.java | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/CheckForbiddenApisTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/CheckForbiddenApisTask.java index 194d0361980ec..bb0b8dcf04437 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/CheckForbiddenApisTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/CheckForbiddenApisTask.java @@ -377,6 +377,7 @@ public void checkForbidden() { parameters.getTargetCompatibility().set(getTargetCompatibility()); parameters.getIgnoreFailures().set(getIgnoreFailures()); parameters.getSuccessMarker().set(getSuccessMarker()); + parameters.getSignaturesFiles().from(getSignaturesFiles()); }); } diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/CustomWebIdentityTokenCredentialsProviderTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/CustomWebIdentityTokenCredentialsProviderTests.java index f245b1ad91fe4..cecb0cd147897 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/CustomWebIdentityTokenCredentialsProviderTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/CustomWebIdentityTokenCredentialsProviderTests.java @@ -44,7 +44,7 @@ public class CustomWebIdentityTokenCredentialsProviderTests extends ESTestCase { private static final String ROLE_NAME = "aws-sdk-java-1651084775908"; private static Environment getEnvironment() throws IOException { - Path configDirectory = Files.createTempDirectory("web-identity-token-test"); + Path configDirectory = createTempDir("web-identity-token-test"); Files.createDirectory(configDirectory.resolve("repository-s3")); Files.writeString(configDirectory.resolve("repository-s3/aws-web-identity-token-file"), "YXdzLXdlYi1pZGVudGl0eS10b2tlbi1maWxl"); Environment environment = Mockito.mock(Environment.class); From 31ca2f72d3bff000fb2876950391d5b8b1e0a0e8 Mon Sep 17 00:00:00 2001 From: Valeriy Khakhutskyy <1292899+valeriy42@users.noreply.github.com> Date: Tue, 7 Nov 2023 14:13:42 +0100 Subject: [PATCH 13/30] Revert Revert "[ML] Use perAllocation and perDeployment memory usage in the model assignment planner" (#101853) The original PR #98874 missed the memory overhead adjustment from #86416. As it caused some BWC test failures on the CI, I reverted it in #101834. This PR reintegrates the functionality and extends the BWC integration test with the memory constant depending on the version of the old cluster. --- docs/changelog/98874.yaml | 5 + .../assignment/TrainedModelAssignment.java | 5 + .../TransportGetTrainedModelsStatsAction.java | 24 +- .../TrainedModelAssignmentClusterService.java | 7 +- .../TrainedModelAssignmentRebalancer.java | 36 +- .../planning/AbstractPreserveAllocations.java | 42 +- .../assignment/planning/AssignmentPlan.java | 139 +++- .../planning/AssignmentPlanner.java | 11 +- .../planning/LinearProgrammingPlanSolver.java | 29 +- .../planning/PreserveAllAllocations.java | 2 +- .../planning/PreserveOneAllocation.java | 2 +- .../RandomizedAssignmentRounding.java | 46 +- .../planning/ZoneAwareAssignmentPlanner.java | 16 +- ...TrainedModelAssignmentRebalancerTests.java | 81 +- .../planning/AssignmentPlanTests.java | 511 ++++++++++--- .../planning/AssignmentPlannerTests.java | 698 +++++++++++++++--- .../planning/PreserveAllAllocationsTests.java | 228 ++++-- .../planning/PreserveOneAllocationTests.java | 264 +++++-- .../ZoneAwareAssignmentPlannerTests.java | 126 +++- .../MlAssignmentPlannerUpgradeIT.java | 289 ++++++++ 20 files changed, 2078 insertions(+), 483 deletions(-) create mode 100644 docs/changelog/98874.yaml create mode 100644 x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MlAssignmentPlannerUpgradeIT.java diff --git a/docs/changelog/98874.yaml b/docs/changelog/98874.yaml new file mode 100644 index 0000000000000..e3eb7b5acc63f --- /dev/null +++ b/docs/changelog/98874.yaml @@ -0,0 +1,5 @@ +pr: 98874 +summary: Estimate the memory required to deploy trained models more accurately +area: Machine Learning +type: enhancement +issues: [] diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignment.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignment.java index f69be31939b32..d27d325a5c596 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignment.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignment.java @@ -9,6 +9,7 @@ import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.SimpleDiffable; import org.elasticsearch.common.Randomness; @@ -96,6 +97,10 @@ public final class TrainedModelAssignment implements SimpleDiffable 0L ? StartTrainedModelDeploymentAction.estimateMemoryUsageBytes( model.getModelId(), totalDefinitionLength, - model.getPerDeploymentMemoryBytes(), - model.getPerAllocationMemoryBytes(), + useNewMemoryFields ? model.getPerDeploymentMemoryBytes() : 0, + useNewMemoryFields ? model.getPerAllocationMemoryBytes() : 0, numberOfAllocations ) : 0L; modelSizeStatsByModelId.put( model.getModelId(), - new TrainedModelSizeStats( - totalDefinitionLength, - totalDefinitionLength > 0L - ? StartTrainedModelDeploymentAction.estimateMemoryUsageBytes( - model.getModelId(), - totalDefinitionLength, - model.getPerDeploymentMemoryBytes(), - model.getPerAllocationMemoryBytes(), - numberOfAllocations - ) - : 0L - ) + new TrainedModelSizeStats(totalDefinitionLength, estimatedMemoryUsageBytes) ); } else { modelSizeStatsByModelId.put(model.getModelId(), new TrainedModelSizeStats(model.getModelSize(), 0)); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterService.java index 2caf338d2a3c7..fe4462d6556ee 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterService.java @@ -47,6 +47,7 @@ import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.core.ml.utils.MlPlatformArchitecturesUtil; +import org.elasticsearch.xpack.core.ml.utils.TransportVersionUtils; import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.autoscaling.NodeAvailabilityZoneMapper; import org.elasticsearch.xpack.ml.inference.assignment.planning.AllocationReducer; @@ -76,6 +77,8 @@ public class TrainedModelAssignmentClusterService implements ClusterStateListene private static final TransportVersion RENAME_ALLOCATION_TO_ASSIGNMENT_TRANSPORT_VERSION = TransportVersions.V_8_3_0; public static final TransportVersion DISTRIBUTED_MODEL_ALLOCATION_TRANSPORT_VERSION = TransportVersions.V_8_4_0; + private static final TransportVersion NEW_ALLOCATION_MEMORY_VERSION = TransportVersions.V_8_500_064; + private final ClusterService clusterService; private final ThreadPool threadPool; private final NodeLoadDetector nodeLoadDetector; @@ -644,12 +647,14 @@ private TrainedModelAssignmentMetadata.Builder rebalanceAssignments( Map nodeLoads = detectNodeLoads(nodes, currentState); TrainedModelAssignmentMetadata currentMetadata = TrainedModelAssignmentMetadata.fromState(currentState); + boolean useNewMemoryFields = TrainedModelAssignment.useNewMemoryFields(TransportVersionUtils.getMinTransportVersion(currentState)); TrainedModelAssignmentRebalancer rebalancer = new TrainedModelAssignmentRebalancer( currentMetadata, nodeLoads, nodeAvailabilityZoneMapper.buildMlNodesByAvailabilityZone(currentState), modelToAdd, - allocatedProcessorsScale + allocatedProcessorsScale, + useNewMemoryFields ); Set shuttingDownNodeIds = currentState.metadata().nodeShutdowns().getAllNodeIds(); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancer.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancer.java index e1241dc8a93c3..6e6b447fcea3d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancer.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancer.java @@ -52,18 +52,22 @@ class TrainedModelAssignmentRebalancer { private final Optional deploymentToAdd; private final int allocatedProcessorsScale; + private final boolean useNewMemoryFields; + TrainedModelAssignmentRebalancer( TrainedModelAssignmentMetadata currentMetadata, Map nodeLoads, Map, Collection> mlNodesByZone, Optional deploymentToAdd, - int allocatedProcessorsScale + int allocatedProcessorsScale, + boolean useNewMemoryFields ) { this.currentMetadata = Objects.requireNonNull(currentMetadata); this.nodeLoads = Objects.requireNonNull(nodeLoads); this.mlNodesByZone = Objects.requireNonNull(mlNodesByZone); this.deploymentToAdd = Objects.requireNonNull(deploymentToAdd); this.allocatedProcessorsScale = allocatedProcessorsScale; + this.useNewMemoryFields = useNewMemoryFields; } TrainedModelAssignmentMetadata.Builder rebalance() { @@ -138,9 +142,11 @@ private static void copyAssignments( AssignmentPlan.Node originalNode = originalNodeById.get(assignment.getKey().id()); dest.assignModelToNode(m, originalNode, assignment.getValue()); if (m.currentAllocationsByNodeId().containsKey(originalNode.id())) { + // TODO (#101612) requiredMemory should be calculated by the AssignmentPlan.Builder // As the node has all its available memory we need to manually account memory of models with // current allocations. - dest.accountMemory(m, originalNode); + long requiredMemory = m.estimateMemoryUsageBytes(m.currentAllocationsByNodeId().get(originalNode.id())); + dest.accountMemory(m, originalNode, requiredMemory); } } } @@ -168,11 +174,14 @@ private AssignmentPlan computePlanForNormalPriorityModels( .collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().getTargetAllocations())); return new AssignmentPlan.Deployment( assignment.getDeploymentId(), - assignment.getTaskParams().estimateMemoryUsageBytes(), + assignment.getTaskParams().getModelBytes(), assignment.getTaskParams().getNumberOfAllocations(), assignment.getTaskParams().getThreadsPerAllocation(), currentAssignments, - assignment.getMaxAssignedAllocations() + assignment.getMaxAssignedAllocations(), + // in the mixed cluster state use old memory fields to avoid unstable assignment plans + useNewMemoryFields ? assignment.getTaskParams().getPerDeploymentMemoryBytes() : 0, + useNewMemoryFields ? assignment.getTaskParams().getPerAllocationMemoryBytes() : 0 ); }) .forEach(planDeployments::add); @@ -181,11 +190,14 @@ private AssignmentPlan computePlanForNormalPriorityModels( planDeployments.add( new AssignmentPlan.Deployment( taskParams.getDeploymentId(), - taskParams.estimateMemoryUsageBytes(), + taskParams.getModelBytes(), taskParams.getNumberOfAllocations(), taskParams.getThreadsPerAllocation(), Map.of(), - 0 + 0, + // in the mixed cluster state use old memory fields to avoid unstable assignment plans + useNewMemoryFields ? taskParams.getPerDeploymentMemoryBytes() : 0, + useNewMemoryFields ? taskParams.getPerAllocationMemoryBytes() : 0 ) ); } @@ -217,12 +229,14 @@ private AssignmentPlan computePlanForLowPriorityModels(Set assignableNod .map( assignment -> new AssignmentPlan.Deployment( assignment.getDeploymentId(), - assignment.getTaskParams().estimateMemoryUsageBytes(), + assignment.getTaskParams().getModelBytes(), assignment.getTaskParams().getNumberOfAllocations(), assignment.getTaskParams().getThreadsPerAllocation(), findFittingAssignments(assignment, assignableNodeIds, remainingNodeMemory), assignment.getMaxAssignedAllocations(), - Priority.LOW + Priority.LOW, + (useNewMemoryFields == false) ? assignment.getTaskParams().getPerDeploymentMemoryBytes() : 0, + (useNewMemoryFields == false) ? assignment.getTaskParams().getPerAllocationMemoryBytes() : 0 ) ) .forEach(planDeployments::add); @@ -231,12 +245,14 @@ private AssignmentPlan computePlanForLowPriorityModels(Set assignableNod planDeployments.add( new AssignmentPlan.Deployment( taskParams.getDeploymentId(), - taskParams.estimateMemoryUsageBytes(), + taskParams.getModelBytes(), taskParams.getNumberOfAllocations(), taskParams.getThreadsPerAllocation(), Map.of(), 0, - Priority.LOW + Priority.LOW, + (useNewMemoryFields == false) ? taskParams.getPerDeploymentMemoryBytes() : 0, + (useNewMemoryFields == false) ? taskParams.getPerAllocationMemoryBytes() : 0 ) ); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AbstractPreserveAllocations.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AbstractPreserveAllocations.java index 4843cc43d1187..026b433a8c2d4 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AbstractPreserveAllocations.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AbstractPreserveAllocations.java @@ -35,7 +35,8 @@ private Node modifyNodePreservingAllocations(Node n) { int coresUsed = 0; for (Deployment m : deployments) { if (m.currentAllocationsByNodeId().containsKey(n.id())) { - bytesUsed += m.memoryBytes(); + int allocations = m.currentAllocationsByNodeId().get(n.id()); + bytesUsed += m.estimateMemoryUsageBytes(allocations); coresUsed += calculateUsedCores(n, m); } } @@ -58,7 +59,9 @@ Deployment modifyModelPreservingPreviousAssignments(Deployment m) { m.allocations() - calculatePreservedAllocations(m), m.threadsPerAllocation(), calculateAllocationsPerNodeToPreserve(m), - m.maxAssignedAllocations() + m.maxAssignedAllocations(), + m.perDeploymentMemoryBytes(), + m.perAllocationMemoryBytes() ); } @@ -67,28 +70,37 @@ AssignmentPlan mergePreservedAllocations(AssignmentPlan assignmentPlan) { // they will not match the models/nodes members we have in this class. // Therefore, we build a lookup table based on the ids so we can merge the plan // with its preserved allocations. - final Map, Integer> assignmentsByModelNodeIdPair = new HashMap<>(); + final Map, Integer> plannedAssignmentsByModelNodeIdPair = new HashMap<>(); for (Deployment m : assignmentPlan.models()) { Map assignments = assignmentPlan.assignments(m).orElse(Map.of()); for (Map.Entry nodeAssignment : assignments.entrySet()) { - assignmentsByModelNodeIdPair.put(Tuple.tuple(m.id(), nodeAssignment.getKey().id()), nodeAssignment.getValue()); + plannedAssignmentsByModelNodeIdPair.put(Tuple.tuple(m.id(), nodeAssignment.getKey().id()), nodeAssignment.getValue()); } } AssignmentPlan.Builder mergedPlanBuilder = AssignmentPlan.builder(nodes, deployments); - for (Deployment m : deployments) { - for (Node n : nodes) { - int allocations = assignmentsByModelNodeIdPair.getOrDefault(Tuple.tuple(m.id(), n.id()), 0); - if (m.currentAllocationsByNodeId().containsKey(n.id())) { - if (mergedPlanBuilder.getRemainingMemory(n) >= m.memoryBytes()) { - allocations += addPreservedAllocations(n, m); - // As the node has all its available memory we need to manually account memory of models with - // current allocations. - mergedPlanBuilder.accountMemory(m, n); + for (Node n : nodes) { + // TODO (#101612) Should the first loop happen in the builder constructor? + for (Deployment deploymentAllocationsToPreserve : deployments) { + + // if the model m is already allocated on the node n and I want to preserve this allocation + int preservedAllocations = addPreservedAllocations(n, deploymentAllocationsToPreserve); + if (preservedAllocations > 0) { + long requiredMemory = deploymentAllocationsToPreserve.estimateMemoryUsageBytes(preservedAllocations); + if (mergedPlanBuilder.canAssign(deploymentAllocationsToPreserve, n, preservedAllocations, requiredMemory)) { + mergedPlanBuilder.assignModelToNode(deploymentAllocationsToPreserve, n, preservedAllocations, requiredMemory); } } - if (allocations > 0) { - mergedPlanBuilder.assignModelToNode(m, n, allocations); + } + for (Deployment deploymentNewAllocations : deployments) { + int newAllocations = plannedAssignmentsByModelNodeIdPair.getOrDefault( + Tuple.tuple(deploymentNewAllocations.id(), n.id()), + 0 + ); + + long requiredMemory = mergedPlanBuilder.getDeploymentMemoryRequirement(deploymentNewAllocations, n, newAllocations); + if (newAllocations > 0 && mergedPlanBuilder.canAssign(deploymentNewAllocations, n, newAllocations, requiredMemory)) { + mergedPlanBuilder.assignModelToNode(deploymentNewAllocations, n, newAllocations); } } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlan.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlan.java index 72a83d7579463..1dce7f0bb46ba 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlan.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlan.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.Maps; import org.elasticsearch.core.Tuple; +import org.elasticsearch.xpack.core.ml.action.StartTrainedModelDeploymentAction; import org.elasticsearch.xpack.core.ml.inference.assignment.Priority; import java.util.ArrayList; @@ -36,18 +37,32 @@ public record Deployment( int threadsPerAllocation, Map currentAllocationsByNodeId, int maxAssignedAllocations, - Priority priority + Priority priority, + long perDeploymentMemoryBytes, + long perAllocationMemoryBytes ) { public Deployment( String id, - long memoryBytes, + long modelBytes, int allocations, int threadsPerAllocation, Map currentAllocationsByNodeId, - int maxAssignedAllocations + int maxAssignedAllocations, + long perDeploymentMemoryBytes, + long perAllocationMemoryBytes ) { - this(id, memoryBytes, allocations, threadsPerAllocation, currentAllocationsByNodeId, maxAssignedAllocations, Priority.NORMAL); + this( + id, + modelBytes, + allocations, + threadsPerAllocation, + currentAllocationsByNodeId, + maxAssignedAllocations, + Priority.NORMAL, + perDeploymentMemoryBytes, + perAllocationMemoryBytes + ); } int getCurrentAssignedAllocations() { @@ -58,6 +73,60 @@ boolean hasEverBeenAllocated() { return maxAssignedAllocations > 0; } + public long estimateMemoryUsageBytes(int allocations) { + return StartTrainedModelDeploymentAction.estimateMemoryUsageBytes( + id, + memoryBytes, + perDeploymentMemoryBytes, + perAllocationMemoryBytes, + allocations + ); + } + + long estimateAdditionalMemoryUsageBytes(int allocationsOld, int allocationsNew) { + return StartTrainedModelDeploymentAction.estimateMemoryUsageBytes( + id, + memoryBytes, + perDeploymentMemoryBytes, + perAllocationMemoryBytes, + allocationsNew + ) - StartTrainedModelDeploymentAction.estimateMemoryUsageBytes( + id, + memoryBytes, + perDeploymentMemoryBytes, + perAllocationMemoryBytes, + allocationsOld + ); + + } + + long minimumMemoryRequiredBytes() { + return StartTrainedModelDeploymentAction.estimateMemoryUsageBytes( + id, + memoryBytes, + perDeploymentMemoryBytes, + perAllocationMemoryBytes, + 1 + ); + } + + int findOptimalAllocations(int maxAllocations, long availableMemoryBytes) { + if (perDeploymentMemoryBytes > 0 && perAllocationMemoryBytes > 0) { + return (int) Math.max( + Math.min(maxAllocations, Math.floorDiv(availableMemoryBytes - estimateMemoryUsageBytes(0), perAllocationMemoryBytes)), + 0 + ); + } + return maxAllocations; + } + + int findExcessAllocations(int maxAllocations, long availableMemoryBytes) { + if (perDeploymentMemoryBytes > 0 && perAllocationMemoryBytes > 0) { + return (int) Math.min(maxAllocations, Math.floorDiv(availableMemoryBytes, perAllocationMemoryBytes)); + } + return maxAllocations; + } + @Override public String toString() { return id @@ -71,6 +140,8 @@ public String toString() { + currentAllocationsByNodeId + ") (max_assigned_allocations = " + maxAssignedAllocations + + ") (memory_usage = " + + ByteSizeValue.ofBytes(estimateMemoryUsageBytes(allocations)) + ")"; } }; @@ -304,19 +375,42 @@ int getRemainingAllocations(Deployment m) { } boolean canAssign(Deployment deployment, Node node, int allocations) { - return (isAlreadyAssigned(deployment, node) - || (deployment.memoryBytes() <= remainingNodeMemory.get(node)) - && (deployment.priority == Priority.LOW - || allocations * deployment.threadsPerAllocation() <= remainingNodeCores.get(node))); + long requiredMemory = getDeploymentMemoryRequirement(deployment, node, allocations); + return canAssign(deployment, node, allocations, requiredMemory); + } + + boolean canAssign(Deployment deployment, Node node, int allocations, long requiredMemory) { + return (requiredMemory <= remainingNodeMemory.get(node)) + && (deployment.priority == Priority.LOW || allocations * deployment.threadsPerAllocation() <= remainingNodeCores.get(node)); + } + + public long getDeploymentMemoryRequirement(Deployment deployment, Node node, int newAllocations) { + int assignedAllocations = getAssignedAllocations(deployment, node); + + if (assignedAllocations > 0) { + return deployment.estimateAdditionalMemoryUsageBytes(assignedAllocations, assignedAllocations + newAllocations); + } + return deployment.estimateMemoryUsageBytes(newAllocations); } public Builder assignModelToNode(Deployment deployment, Node node, int allocations) { + return assignModelToNode(deployment, node, allocations, getDeploymentMemoryRequirement(deployment, node, allocations)); + } + + public Builder assignModelToNode(Deployment deployment, Node node, int allocations, long requiredMemory) { if (allocations <= 0) { return this; } - if (isAlreadyAssigned(deployment, node) == false && deployment.memoryBytes() > remainingNodeMemory.get(node)) { + if (/*isAlreadyAssigned(deployment, node) == false + &&*/ requiredMemory > remainingNodeMemory.get(node)) { throw new IllegalArgumentException( - "not enough memory on node [" + node.id() + "] to assign model [" + deployment.id() + "]" + "not enough memory on node [" + + node.id() + + "] to assign [" + + allocations + + "] allocations to deployment [" + + deployment.id() + + "]" ); } if (deployment.priority == Priority.NORMAL && allocations * deployment.threadsPerAllocation() > remainingNodeCores.get(node)) { @@ -333,9 +427,9 @@ public Builder assignModelToNode(Deployment deployment, Node node, int allocatio ); } - long additionalModelMemory = isAlreadyAssigned(deployment, node) ? 0 : deployment.memoryBytes; assignments.get(deployment).compute(node, (n, remAllocations) -> remAllocations + allocations); - remainingNodeMemory.compute(node, (n, remMemory) -> remMemory - additionalModelMemory); + accountMemory(deployment, node, requiredMemory); + if (deployment.priority == Priority.NORMAL) { remainingNodeCores.compute(node, (n, remCores) -> remCores - allocations * deployment.threadsPerAllocation()); } @@ -347,9 +441,26 @@ private boolean isAlreadyAssigned(Deployment deployment, Node node) { return deployment.currentAllocationsByNodeId().containsKey(node.id()) || assignments.get(deployment).get(node) > 0; } + private int getAssignedAllocations(Deployment deployment, Node node) { + int currentAllocations = getCurrentAllocations(deployment, node); + int assignmentAllocations = assignments.get(deployment).get(node); + return currentAllocations + assignmentAllocations; + } + + private static int getCurrentAllocations(Deployment m, Node n) { + return m.currentAllocationsByNodeId.containsKey(n.id()) ? m.currentAllocationsByNodeId.get(n.id()) : 0; + } + public void accountMemory(Deployment m, Node n) { - remainingNodeMemory.computeIfPresent(n, (k, v) -> v - m.memoryBytes()); - if (remainingNodeMemory.get(n) < 0) { + // TODO (#101612) remove or refactor unused method + long requiredMemory = getDeploymentMemoryRequirement(m, n, getCurrentAllocations(m, n)); + accountMemory(m, n, requiredMemory); + } + + public void accountMemory(Deployment m, Node n, long requiredMemory) { + // TODO (#101612) computation of required memory should be done internally + remainingNodeMemory.computeIfPresent(n, (k, v) -> v - requiredMemory); + if (remainingNodeMemory.containsKey(n) && remainingNodeMemory.get(n) < 0) { throw new IllegalArgumentException("not enough memory on node [" + n.id() + "] to assign model [" + m.id() + "]"); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlanner.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlanner.java index 73b713cced32a..b1c017b1a784c 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlanner.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlanner.java @@ -115,8 +115,11 @@ private AssignmentPlan solveAllocatingAtLeastOnceModelsThatWerePreviouslyAllocat m.memoryBytes(), 1, m.threadsPerAllocation(), - m.currentAllocationsByNodeId(), - m.maxAssignedAllocations() + // don't rely on the current allocation + new HashMap<>(), + m.maxAssignedAllocations(), + m.perDeploymentMemoryBytes(), + m.perAllocationMemoryBytes() ) ) .toList(); @@ -145,7 +148,9 @@ private AssignmentPlan solveAllocatingAtLeastOnceModelsThatWerePreviouslyAllocat m.allocations(), m.threadsPerAllocation(), currentAllocationsByNodeId, - m.maxAssignedAllocations() + m.maxAssignedAllocations(), + m.perDeploymentMemoryBytes(), + m.perAllocationMemoryBytes() ); }).toList(); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/LinearProgrammingPlanSolver.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/LinearProgrammingPlanSolver.java index 90c5a2257d94d..bd97680e285cc 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/LinearProgrammingPlanSolver.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/LinearProgrammingPlanSolver.java @@ -68,6 +68,8 @@ class LinearProgrammingPlanSolver { private final Map normalizedMemoryPerNode; private final Map coresPerNode; private final Map normalizedMemoryPerModel; + private final Map normalizedMemoryPerAllocation; + private final Map normalizedMinimumDeploymentMemoryRequired; private final int maxNodeCores; private final long maxModelMemoryBytes; @@ -84,12 +86,17 @@ class LinearProgrammingPlanSolver { .filter(m -> m.threadsPerAllocation() <= maxNodeCores) .toList(); - maxModelMemoryBytes = this.deployments.stream().map(AssignmentPlan.Deployment::memoryBytes).max(Long::compareTo).orElse(1L); + // We use the maximum memory to deploy a model with one allocation as the normalization factor. + maxModelMemoryBytes = this.deployments.stream().map(m -> m.minimumMemoryRequiredBytes()).max(Long::compareTo).orElse(1L); normalizedMemoryPerNode = this.nodes.stream() .collect(Collectors.toMap(Function.identity(), n -> n.availableMemoryBytes() / (double) maxModelMemoryBytes)); coresPerNode = this.nodes.stream().collect(Collectors.toMap(Function.identity(), Node::cores)); normalizedMemoryPerModel = this.deployments.stream() - .collect(Collectors.toMap(Function.identity(), m -> m.memoryBytes() / (double) maxModelMemoryBytes)); + .collect(Collectors.toMap(Function.identity(), m -> m.estimateMemoryUsageBytes(0) / (double) maxModelMemoryBytes)); + normalizedMemoryPerAllocation = this.deployments.stream() + .collect(Collectors.toMap(Function.identity(), m -> m.perAllocationMemoryBytes() / (double) maxModelMemoryBytes)); + normalizedMinimumDeploymentMemoryRequired = this.deployments.stream() + .collect(Collectors.toMap(Function.identity(), m -> m.minimumMemoryRequiredBytes() / (double) maxModelMemoryBytes)); } AssignmentPlan solvePlan(boolean useBinPackingOnly) { @@ -133,8 +140,8 @@ private double weightForAllocationVar( Node n, Map, Double> weights ) { - return (1 + weights.get(Tuple.tuple(m, n)) - (m.memoryBytes() > n.availableMemoryBytes() ? 10 : 0)) - L1 * normalizedMemoryPerModel - .get(m) / maxNodeCores; + return (1 + weights.get(Tuple.tuple(m, n)) - (m.minimumMemoryRequiredBytes() > n.availableMemoryBytes() ? 10 : 0)) - L1 + * normalizedMemoryPerModel.get(m) / maxNodeCores; } private Tuple, Double>, AssignmentPlan> calculateWeightsAndBinPackingPlan() { @@ -156,9 +163,9 @@ private Tuple, Double>, AssignmentPlan> calculateWei .sorted(Comparator.comparingDouble(n -> descendingSizeAnyFitsNodeOrder(n, m, assignmentPlan))) .toList(); for (Node n : orderedNodes) { - int allocations = Math.min( - assignmentPlan.getRemainingCores(n) / m.threadsPerAllocation(), - assignmentPlan.getRemainingAllocations(m) + int allocations = m.findOptimalAllocations( + Math.min(assignmentPlan.getRemainingCores(n) / m.threadsPerAllocation(), assignmentPlan.getRemainingAllocations(m)), + assignmentPlan.getRemainingMemory(n) ); if (allocations > 0 && assignmentPlan.canAssign(m, n, allocations)) { assignmentPlan.assignModelToNode(m, n, allocations); @@ -185,7 +192,8 @@ private Tuple, Double>, AssignmentPlan> calculateWei } private double descendingSizeAnyFitsModelOrder(AssignmentPlan.Deployment m) { - return (m.currentAllocationsByNodeId().isEmpty() ? 1 : 2) * -normalizedMemoryPerModel.get(m) * m.threadsPerAllocation(); + return (m.currentAllocationsByNodeId().isEmpty() ? 1 : 2) * -normalizedMinimumDeploymentMemoryRequired.get(m) * m + .threadsPerAllocation(); } private double descendingSizeAnyFitsNodeOrder(Node n, AssignmentPlan.Deployment m, AssignmentPlan.Builder assignmentPlan) { @@ -307,7 +315,10 @@ private boolean solveLinearProgram( List modelMemories = new ArrayList<>(); deployments.stream().filter(m -> m.currentAllocationsByNodeId().containsKey(n.id()) == false).forEach(m -> { allocations.add(allocationVars.get(Tuple.tuple(m, n))); - modelMemories.add(normalizedMemoryPerModel.get(m) * m.threadsPerAllocation() / (double) coresPerNode.get(n)); + modelMemories.add( + (normalizedMemoryPerModel.get(m) / (double) coresPerNode.get(n) + normalizedMemoryPerAllocation.get(m)) * m + .threadsPerAllocation() + ); }); model.addExpression("used_memory_on_node_" + n.id() + "_not_more_than_available") .upper(normalizedMemoryPerNode.get(n)) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveAllAllocations.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveAllAllocations.java index f10ece8f5a593..72109941ad477 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveAllAllocations.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveAllAllocations.java @@ -37,6 +37,6 @@ protected int calculatePreservedAllocations(Deployment m) { @Override protected int addPreservedAllocations(Node n, Deployment m) { - return m.currentAllocationsByNodeId().get(n.id()); + return m.currentAllocationsByNodeId().containsKey(n.id()) ? m.currentAllocationsByNodeId().get(n.id()) : 0; } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveOneAllocation.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveOneAllocation.java index 324e1a8d69a53..43b8860803596 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveOneAllocation.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveOneAllocation.java @@ -37,6 +37,6 @@ protected int calculatePreservedAllocations(AssignmentPlan.Deployment m) { @Override protected int addPreservedAllocations(Node n, AssignmentPlan.Deployment m) { - return 1; + return m.currentAllocationsByNodeId().containsKey(n.id()) ? 1 : 0; } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/RandomizedAssignmentRounding.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/RandomizedAssignmentRounding.java index dafc07099f850..8bdc99998a0c2 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/RandomizedAssignmentRounding.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/RandomizedAssignmentRounding.java @@ -135,8 +135,9 @@ private void assignUnderSubscribedNodes(Collection nodeSelection) { for (AssignmentPlan.Deployment m : deployments) { Tuple assignment = Tuple.tuple(m, n); if (assignments.get(assignment) > 0) { - totalModelMemory += m.memoryBytes(); - maxTotalThreads += (int) Math.ceil(allocations.get(assignment)) * m.threadsPerAllocation(); + int roundedAllocations = (int) Math.ceil(allocations.get(assignment)); + totalModelMemory += m.estimateMemoryUsageBytes(roundedAllocations); + maxTotalThreads += roundedAllocations * m.threadsPerAllocation(); assignedDeployments.add(m); } } @@ -199,9 +200,12 @@ private void assignExcessCores(Node n) { if (resourceTracker.remainingNodeCores.get(n) <= 0) { break; } - int extraAllocations = Math.min( - resourceTracker.remainingNodeCores.get(n) / m.threadsPerAllocation(), - resourceTracker.remainingModelAllocations.get(m) + int extraAllocations = m.findExcessAllocations( + Math.min( + resourceTracker.remainingNodeCores.get(n) / m.threadsPerAllocation(), + resourceTracker.remainingModelAllocations.get(m) + ), + resourceTracker.remainingNodeMemory.get(n) ); allocations.compute(Tuple.tuple(m, n), (k, v) -> v + extraAllocations); resourceTracker.assign(m, n, extraAllocations); @@ -211,7 +215,7 @@ private void assignExcessCores(Node n) { } private static double remainingModelOrder(AssignmentPlan.Deployment m) { - return (m.currentAllocationsByNodeId().isEmpty() ? 1 : 2) * -m.memoryBytes(); + return (m.currentAllocationsByNodeId().isEmpty() ? 1 : 2) * -m.minimumMemoryRequiredBytes(); } private boolean hasSoftAssignments(Node n) { @@ -275,15 +279,17 @@ private void doRandomizedRounding(List> s int roundedAllocations = random.nextDouble() < roundUpProbability ? (int) Math.ceil(allocations.get(assignment)) : (int) Math.floor(allocations.get(assignment)); - - if (m.memoryBytes() > resourceTracker.remainingNodeMemory.get(n) + if (m.estimateMemoryUsageBytes(roundedAllocations) > resourceTracker.remainingNodeMemory.get(n) || m.threadsPerAllocation() > resourceTracker.remainingNodeCores.get(n) || roundedAllocations == 0 || random.nextDouble() > assignments.get(assignment)) { unassign(assignment); assignUnderSubscribedNodes(Set.of(n)); } else { - roundedAllocations = Math.min(roundedAllocations, resourceTracker.remainingNodeCores.get(n) / m.threadsPerAllocation()); + roundedAllocations = m.findOptimalAllocations( + Math.min(roundedAllocations, resourceTracker.remainingNodeCores.get(n) / m.threadsPerAllocation()), + resourceTracker.remainingNodeMemory.get(n) + ); assignModelToNode(m, n, roundedAllocations); unassignOversizedModels(n); assignExcessCores(n); @@ -294,7 +300,8 @@ private void doRandomizedRounding(List> s private void unassignOversizedModels(Node n) { for (AssignmentPlan.Deployment m : deployments) { Tuple assignment = Tuple.tuple(m, n); - if (assignments.get(assignment) < 1.0 && m.memoryBytes() > resourceTracker.remainingNodeMemory.get(n)) { + int roundedAllocations = (int) Math.ceil(allocations.get(assignment)); + if (assignments.get(assignment) < 1.0 && m.minimumMemoryRequiredBytes() > resourceTracker.remainingNodeMemory.get(n)) { unassign(assignment); } } @@ -303,7 +310,11 @@ private void unassignOversizedModels(Node n) { private AssignmentPlan toPlan() { AssignmentPlan.Builder builder = AssignmentPlan.builder(nodes, deployments); for (Map.Entry, Integer> assignment : tryAssigningRemainingCores().entrySet()) { - builder.assignModelToNode(assignment.getKey().v1(), assignment.getKey().v2(), assignment.getValue()); + // TODO (#101612) The model should be assigned to the node only when it is possible. This means, that canAssign should be + // integrated into the assignModelToNode. + if (builder.canAssign(assignment.getKey().v1(), assignment.getKey().v2(), assignment.getValue())) { + builder.assignModelToNode(assignment.getKey().v1(), assignment.getKey().v2(), assignment.getValue()); + } } return builder.build(); } @@ -338,7 +349,7 @@ private Map, Integer> tryAssigningRemaini .toList()) { for (Node n : nodes.stream() .filter( - n -> resourceTracker.remainingNodeMemory.get(n) >= m.memoryBytes() + n -> resourceTracker.remainingNodeMemory.get(n) >= m.minimumMemoryRequiredBytes() && resourceTracker.remainingNodeCores.get(n) >= m.threadsPerAllocation() && resultAllocations.get(Tuple.tuple(m, n)) == 0 ) @@ -354,10 +365,15 @@ private Map, Integer> tryAssigningRemaini ) ) .toList()) { - int assigningAllocations = Math.min( resourceTracker.remainingNodeCores.get(n) / m.threadsPerAllocation(), - resourceTracker.remainingModelAllocations.get(m) + Math.min( + resourceTracker.remainingModelAllocations.get(m), + m.findOptimalAllocations( + resourceTracker.remainingNodeCores.get(n) / m.threadsPerAllocation(), + resourceTracker.remainingModelAllocations.get(m) + ) + ) ); resourceTracker.assign(m, n, assigningAllocations); resultAllocations.put(Tuple.tuple(m, n), assigningAllocations); @@ -427,7 +443,7 @@ private static class ResourceTracker { void assign(AssignmentPlan.Deployment m, Node n, int allocations) { if (assignments.contains(Tuple.tuple(m, n)) == false) { assignments.add(Tuple.tuple(m, n)); - remainingNodeMemory.compute(n, (k, v) -> v - m.memoryBytes()); + remainingNodeMemory.compute(n, (k, v) -> v - m.estimateMemoryUsageBytes(allocations)); } remainingNodeCores.compute(n, (k, v) -> v - allocations * m.threadsPerAllocation()); remainingModelAllocations.compute(m, (k, v) -> v - allocations); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/ZoneAwareAssignmentPlanner.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/ZoneAwareAssignmentPlanner.java index 9870aa93bf6ce..8c9499ca9e00c 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/ZoneAwareAssignmentPlanner.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/ZoneAwareAssignmentPlanner.java @@ -126,10 +126,12 @@ private AssignmentPlan computeZonePlan( modelIdToTargetAllocations.get(m.id()), m.threadsPerAllocation(), m.currentAllocationsByNodeId(), - // Only force assigning at least once previously assigned models that have not had any allocation yet (tryAssigningPreviouslyAssignedModels && modelIdToRemainingAllocations.get(m.id()) == m.allocations()) ? m.maxAssignedAllocations() - : 0 + : 0, + // Only force assigning at least once previously assigned models that have not had any allocation yet + m.perDeploymentMemoryBytes(), + m.perAllocationMemoryBytes() ) ) .toList(); @@ -151,7 +153,9 @@ private AssignmentPlan computePlanAcrossAllNodes(List plans) { m.allocations(), m.threadsPerAllocation(), allocationsByNodeIdByModelId.get(m.id()), - m.maxAssignedAllocations() + m.maxAssignedAllocations(), + m.perDeploymentMemoryBytes(), + m.perAllocationMemoryBytes() ) ) .toList(); @@ -180,9 +184,13 @@ private AssignmentPlan swapOriginalModelsInPlan( Node originalNode = originalNodeById.get(assignment.getKey().id()); planBuilder.assignModelToNode(originalDeployment, originalNode, assignment.getValue()); if (originalDeployment.currentAllocationsByNodeId().containsKey(originalNode.id())) { + // TODO (#101612) requiredMemory should be calculated by the AssignmentPlan.Builder // As the node has all its available memory we need to manually account memory of models with // current allocations. - planBuilder.accountMemory(m, originalNode); + long requiredMemory = originalDeployment.estimateMemoryUsageBytes( + originalDeployment.currentAllocationsByNodeId().get(originalNode.id()) + ); + planBuilder.accountMemory(m, originalNode, requiredMemory); } } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancerTests.java index 8ccf8839cfc08..334fdfbb8b922 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancerTests.java @@ -44,7 +44,8 @@ public void testRebalance_GivenNoAssignments() { Map.of(), Map.of(), Optional.empty(), - 1 + 1, + false ).rebalance().build(); assertThat(result.allAssignments().isEmpty(), is(true)); } @@ -78,7 +79,8 @@ public void testRebalance_GivenAllAssignmentsAreSatisfied_ShouldMakeNoChanges() nodeLoads, Map.of(), Optional.empty(), - 1 + 1, + false ).rebalance().build(); assertThat(currentMetadata, equalTo(result)); @@ -116,7 +118,8 @@ public void testRebalance_GivenAllAssignmentsAreSatisfied_GivenOutdatedRoutingEn nodeLoads, Map.of(List.of(), List.of(node1, node2)), Optional.empty(), - 1 + 1, + false ).rebalance().build(); assertThat(result.allAssignments(), is(aMapWithSize(2))); @@ -140,7 +143,7 @@ public void testRebalance_GivenModelToAddAlreadyExists() { .build(); expectThrows( ResourceAlreadyExistsException.class, - () -> new TrainedModelAssignmentRebalancer(currentMetadata, Map.of(), Map.of(), Optional.of(taskParams), 1).rebalance() + () -> new TrainedModelAssignmentRebalancer(currentMetadata, Map.of(), Map.of(), Optional.of(taskParams), 1, false).rebalance() ); } @@ -154,7 +157,8 @@ public void testRebalance_GivenFirstModelToAdd_NoMLNodes() throws Exception { Map.of(), Map.of(), Optional.of(taskParams), - 1 + 1, + false ).rebalance().build(); TrainedModelAssignment assignment = result.getDeploymentAssignment(modelId); @@ -181,7 +185,8 @@ public void testRebalance_GivenFirstModelToAdd_NotEnoughProcessors() throws Exce nodeLoads, Map.of(List.of(), List.of(node)), Optional.of(taskParams), - 1 + 1, + false ).rebalance().build(); TrainedModelAssignment assignment = result.getDeploymentAssignment(modelId); @@ -217,7 +222,8 @@ public void testRebalance_GivenFirstModelToAdd_NotEnoughMemory() throws Exceptio nodeLoads, Map.of(), Optional.of(taskParams), - 1 + 1, + false ).rebalance().build(); TrainedModelAssignment assignment = result.getDeploymentAssignment(modelId); @@ -253,7 +259,8 @@ public void testRebalance_GivenFirstModelToAdd_ErrorDetectingNodeLoad() throws E nodeLoads, Map.of(), Optional.of(taskParams), - 1 + 1, + false ).rebalance().build(); TrainedModelAssignment assignment = result.getDeploymentAssignment(modelId); @@ -289,7 +296,8 @@ public void testRebalance_GivenProblemsOnMultipleNodes() throws Exception { nodeLoads, Map.of(List.of(), List.of(node1, node2)), Optional.of(taskParams), - 1 + 1, + false ).rebalance().build(); TrainedModelAssignment assignment = result.getDeploymentAssignment(modelId); @@ -322,7 +330,8 @@ public void testRebalance_GivenFirstModelToAdd_FitsFully() throws Exception { nodeLoads, Map.of(List.of(), List.of(node1)), Optional.of(taskParams), - 1 + 1, + false ).rebalance().build(); TrainedModelAssignment assignment = result.getDeploymentAssignment(modelId); @@ -361,7 +370,8 @@ public void testRebalance_GivenModelToAdd_AndPreviousAssignments_AndTwoNodes_All nodeLoads, Map.of(List.of(), List.of(node1, node2)), Optional.of(taskParams), - 1 + 1, + false ).rebalance().build(); assertThat(result.allAssignments(), is(aMapWithSize(2))); @@ -425,7 +435,8 @@ public void testRebalance_GivenPreviousAssignments_AndNewNode() throws Exception nodeLoads, Map.of(List.of(), List.of(node1, node2, node3)), Optional.empty(), - 1 + 1, + false ).rebalance().build(); assertThat(result.allAssignments(), is(aMapWithSize(2))); @@ -489,7 +500,8 @@ public void testRebalance_GivenPreviousAssignments_AndRemovedNode_AndRemainingNo nodeLoads, Map.of(List.of(), List.of(node1)), Optional.empty(), - 1 + 1, + false ).rebalance().build(); assertThat(result.allAssignments(), is(aMapWithSize(2))); @@ -559,7 +571,8 @@ public void testRebalance_GivenPreviousAssignments_AndRemovedNode_AndRemainingNo nodeLoads, Map.of(List.of(), List.of(node1)), Optional.empty(), - 1 + 1, + false ).rebalance().build(); assertThat(result.allAssignments(), is(aMapWithSize(2))); @@ -608,7 +621,8 @@ public void testRebalance_GivenFailedAssignment_RestartsAssignment() throws Exce nodeLoads, Map.of(List.of(), List.of(node1)), Optional.empty(), - 1 + 1, + false ).rebalance().build(); assertThat(result.allAssignments(), is(aMapWithSize(1))); @@ -642,7 +656,8 @@ public void testRebalance_GivenLowPriorityModelToAdd_OnlyModel_NotEnoughMemory() nodeLoads, Map.of(), Optional.of(taskParams), - 1 + 1, + false ).rebalance().build(); TrainedModelAssignment assignment = result.getDeploymentAssignment(deploymentId); @@ -658,8 +673,8 @@ public void testRebalance_GivenLowPriorityModelToAdd_OnlyModel_NotEnoughMemory() public void testRebalance_GivenLowPriorityModelToAdd_NotEnoughMemoryNorProcessors() throws Exception { long nodeMemoryBytes = ByteSizeValue.ofGb(1).getBytes(); - DiscoveryNode node1 = buildNode("node-1", nodeMemoryBytes, 1); - DiscoveryNode node2 = buildNode("node-2", nodeMemoryBytes, 1); + DiscoveryNode node1 = buildNode("node-1", nodeMemoryBytes, 8); + DiscoveryNode node2 = buildNode("node-2", nodeMemoryBytes, 8); Map nodeLoads = new HashMap<>(); nodeLoads.put(node1, NodeLoad.builder("node-1").setMaxMemory(nodeMemoryBytes).build()); @@ -688,7 +703,8 @@ public void testRebalance_GivenLowPriorityModelToAdd_NotEnoughMemoryNorProcessor nodeLoads, Map.of(List.of("zone-1"), List.of(node1), List.of("zone-2"), List.of(node2)), Optional.of(taskParams1), - 1 + 1, + false ).rebalance().build(); TrainedModelAssignment assignment = result.getDeploymentAssignment(deployment1); @@ -727,7 +743,8 @@ public void testRebalance_GivenMixedPriorityModels_NotEnoughMemoryForLowPriority nodeLoads, Map.of(List.of(), List.of(node1)), Optional.empty(), - 1 + 1, + false ).rebalance().build(); { @@ -780,7 +797,8 @@ public void testRebalance_GivenMixedPriorityModels_TwoZones_EachNodeCanHoldOneMo nodeLoads, Map.of(List.of("zone-1"), List.of(node1), List.of("zone-2"), List.of(node2)), Optional.empty(), - 1 + 1, + false ).rebalance().build(); List assignedNodes = new ArrayList<>(); @@ -834,7 +852,8 @@ public void testRebalance_GivenModelUsingAllCpu_FittingLowPriorityModelCanStart( nodeLoads, Map.of(List.of(), List.of(node1)), Optional.empty(), - 1 + 1, + false ).rebalance().build(); { @@ -884,7 +903,8 @@ public void testRebalance_GivenMultipleLowPriorityModels_AndMultipleNodes() thro nodeLoads, Map.of(List.of(), List.of(node1, node2)), Optional.empty(), - 1 + 1, + false ).rebalance().build(); { @@ -934,7 +954,8 @@ public void testRebalance_GivenNormalPriorityModelToLoad_EvictsLowPriorityModel( nodeLoads, Map.of(List.of(), List.of(node1)), Optional.of(taskParams2), - 1 + 1, + false ).rebalance().build(); { @@ -986,7 +1007,8 @@ public void testRebalance_GivenNormalPriorityModelToLoad_AndLowPriorityModelCanS nodeLoads, Map.of(List.of(), List.of(node1, node2)), Optional.of(taskParams2), - 1 + 1, + false ).rebalance().build(); { @@ -1038,7 +1060,8 @@ public void testRebalance_GivenNormalPriorityModelToLoad_AndLowPriorityModelMust nodeLoads, Map.of(List.of(), List.of(node1, node2)), Optional.of(taskParams2), - 1 + 1, + false ).rebalance().build(); { @@ -1084,7 +1107,8 @@ public void testRebalance_GivenFirstModelToAdd_GivenScalingProcessorSetting() { nodeLoads, Map.of(List.of(), List.of(node)), Optional.of(taskParams), - 2 + 2, + false ).rebalance().build(); TrainedModelAssignment assignment = result.getDeploymentAssignment(modelId); @@ -1106,7 +1130,8 @@ public void testRebalance_GivenFirstModelToAdd_GivenScalingProcessorSetting() { nodeLoads, Map.of(List.of(), List.of(node)), Optional.of(taskParams), - 1 + 1, + false ).rebalance().build(); assignment = result.getDeploymentAssignment(modelId); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlanTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlanTests.java index 3ecdd5000ba35..cbbb38f1d1ddd 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlanTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlanTests.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.ml.inference.assignment.planning; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.ml.inference.assignment.planning.AssignmentPlan.Deployment; import org.elasticsearch.xpack.ml.inference.assignment.planning.AssignmentPlan.Node; @@ -24,109 +25,248 @@ public class AssignmentPlanTests extends ESTestCase { public void testBuilderCtor_GivenDuplicateNode() { Node n = new Node("n_1", 100, 4); - AssignmentPlan.Deployment m = new AssignmentPlan.Deployment("m_1", 40, 1, 2, Map.of(), 0); + AssignmentPlan.Deployment m = new AssignmentPlan.Deployment("m_1", 40, 1, 2, Map.of(), 0, 0, 0); expectThrows(IllegalArgumentException.class, () -> AssignmentPlan.builder(List.of(n, n), List.of(m))); } public void testBuilderCtor_GivenDuplicateModel() { Node n = new Node("n_1", 100, 4); - Deployment m = new AssignmentPlan.Deployment("m_1", 40, 1, 2, Map.of(), 0); + Deployment m = new AssignmentPlan.Deployment("m_1", 40, 1, 2, Map.of(), 0, 0, 0); expectThrows(IllegalArgumentException.class, () -> AssignmentPlan.builder(List.of(n), List.of(m, m))); } public void testAssignModelToNode_GivenNoPreviousAssignment() { - Node n = new Node("n_1", 100, 4); - AssignmentPlan.Deployment m = new AssignmentPlan.Deployment("m_1", 40, 1, 2, Map.of(), 0); + Node n = new Node("n_1", ByteSizeValue.ofMb(350).getBytes(), 4); - AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); + { // old memory format + AssignmentPlan.Deployment m = new AssignmentPlan.Deployment("m_1", ByteSizeValue.ofMb(40).getBytes(), 1, 2, Map.of(), 0, 0, 0); - assertThat(builder.getRemainingCores(n), equalTo(4)); - assertThat(builder.getRemainingMemory(n), equalTo(100L)); - assertThat(builder.getRemainingAllocations(m), equalTo(1)); - assertThat(builder.getRemainingThreads(m), equalTo(2)); + AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); - builder.assignModelToNode(m, n, 1); + assertThat(builder.getRemainingCores(n), equalTo(4)); + assertThat(builder.getRemainingMemory(n), equalTo(ByteSizeValue.ofMb(350).getBytes())); + assertThat(builder.getRemainingAllocations(m), equalTo(1)); + assertThat(builder.getRemainingThreads(m), equalTo(2)); - assertThat(builder.getRemainingCores(n), equalTo(2)); - assertThat(builder.getRemainingMemory(n), equalTo(60L)); - assertThat(builder.getRemainingAllocations(m), equalTo(0)); - assertThat(builder.getRemainingThreads(m), equalTo(0)); + builder.assignModelToNode(m, n, 1); - AssignmentPlan plan = builder.build(); + assertThat(builder.getRemainingCores(n), equalTo(2)); + assertThat(builder.getRemainingMemory(n), equalTo(ByteSizeValue.ofMb(30).getBytes())); + assertThat(builder.getRemainingAllocations(m), equalTo(0)); + assertThat(builder.getRemainingThreads(m), equalTo(0)); - assertThat(plan.models(), contains(m)); - assertThat(plan.satisfiesCurrentAssignments(), is(true)); - assertThat(plan.assignments(m).get(), equalTo(Map.of(n, 1))); + AssignmentPlan plan = builder.build(); + + assertThat(plan.models(), contains(m)); + assertThat(plan.satisfiesCurrentAssignments(), is(true)); + assertThat(plan.assignments(m).get(), equalTo(Map.of(n, 1))); + } + { // new memory format + AssignmentPlan.Deployment m = new AssignmentPlan.Deployment( + "m_1", + ByteSizeValue.ofMb(20).getBytes(), + 1, + 2, + Map.of(), + 0, + ByteSizeValue.ofMb(300).getBytes(), + ByteSizeValue.ofMb(30).getBytes() + ); + + AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); + + assertThat(builder.getRemainingCores(n), equalTo(4)); + assertThat(builder.getRemainingMemory(n), equalTo(ByteSizeValue.ofMb(350).getBytes())); + assertThat(builder.getRemainingAllocations(m), equalTo(1)); + assertThat(builder.getRemainingThreads(m), equalTo(2)); + + builder.assignModelToNode(m, n, 1); + + assertThat(builder.getRemainingCores(n), equalTo(2)); + assertThat(builder.getRemainingMemory(n), equalTo(0L)); + assertThat(builder.getRemainingAllocations(m), equalTo(0)); + assertThat(builder.getRemainingThreads(m), equalTo(0)); + + AssignmentPlan plan = builder.build(); + + assertThat(plan.models(), contains(m)); + assertThat(plan.satisfiesCurrentAssignments(), is(true)); + assertThat(plan.assignments(m).get(), equalTo(Map.of(n, 1))); + } } public void testAssignModelToNode_GivenNewPlanSatisfiesCurrentAssignment() { - Node n = new Node("n_1", 100, 4); - AssignmentPlan.Deployment m = new AssignmentPlan.Deployment("m_1", 40, 2, 2, Map.of("n_1", 1), 0); + Node n = new Node("n_1", ByteSizeValue.ofMb(350).getBytes(), 4); + { // old memory format + AssignmentPlan.Deployment m = new AssignmentPlan.Deployment( + "m_1", + ByteSizeValue.ofMb(30).getBytes(), + 2, + 2, + Map.of("n_1", 1), + 0, + 0, + 0 + ); - AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); + AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); - builder.assignModelToNode(m, n, 1); + builder.assignModelToNode(m, n, 1); - assertThat(builder.getRemainingCores(n), equalTo(2)); - assertThat(builder.getRemainingMemory(n), equalTo(100L)); - assertThat(builder.getRemainingAllocations(m), equalTo(1)); - assertThat(builder.getRemainingThreads(m), equalTo(2)); + assertThat(builder.getRemainingCores(n), equalTo(2)); + assertThat(builder.getRemainingMemory(n), equalTo(ByteSizeValue.ofMb(350).getBytes())); + assertThat(builder.getRemainingAllocations(m), equalTo(1)); + assertThat(builder.getRemainingThreads(m), equalTo(2)); - AssignmentPlan plan = builder.build(); + AssignmentPlan plan = builder.build(); - assertThat(plan.models(), contains(m)); - assertThat(plan.satisfiesCurrentAssignments(), is(true)); - assertThat(plan.assignments(m).get(), equalTo(Map.of(n, 1))); + assertThat(plan.models(), contains(m)); + assertThat(plan.satisfiesCurrentAssignments(), is(true)); + assertThat(plan.assignments(m).get(), equalTo(Map.of(n, 1))); + } + { // new memory format + AssignmentPlan.Deployment m = new AssignmentPlan.Deployment( + "m_1", + ByteSizeValue.ofMb(25).getBytes(), + 2, + 2, + Map.of("n_1", 1), + 0, + ByteSizeValue.ofMb(300).getBytes(), + ByteSizeValue.ofMb(25).getBytes() + ); + + AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); + + builder.assignModelToNode(m, n, 1); + + assertThat(builder.getRemainingCores(n), equalTo(2)); + assertThat(builder.getRemainingMemory(n), equalTo(ByteSizeValue.ofMb(325).getBytes())); + assertThat(builder.getRemainingAllocations(m), equalTo(1)); + assertThat(builder.getRemainingThreads(m), equalTo(2)); + + AssignmentPlan plan = builder.build(); + + assertThat(plan.models(), contains(m)); + assertThat(plan.satisfiesCurrentAssignments(), is(true)); + assertThat(plan.assignments(m).get(), equalTo(Map.of(n, 1))); + + } } public void testAssignModelToNode_GivenNewPlanDoesNotSatisfyCurrentAssignment() { - Node n = new Node("n_1", 100, 4); - AssignmentPlan.Deployment m = new AssignmentPlan.Deployment("m_1", 40, 2, 2, Map.of("n_1", 2), 0); + Node n = new Node("n_1", ByteSizeValue.ofMb(300).getBytes(), 4); + { + // old memory format + Deployment m = new Deployment("m_1", ByteSizeValue.ofMb(30).getBytes(), 2, 2, Map.of("n_1", 2), 0, 0, 0); - AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); + AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); - builder.assignModelToNode(m, n, 1); + builder.assignModelToNode(m, n, 1); - assertThat(builder.getRemainingCores(n), equalTo(2)); - assertThat(builder.getRemainingMemory(n), equalTo(100L)); - assertThat(builder.getRemainingAllocations(m), equalTo(1)); - assertThat(builder.getRemainingThreads(m), equalTo(2)); + assertThat(builder.getRemainingCores(n), equalTo(2)); + assertThat(builder.getRemainingMemory(n), equalTo(ByteSizeValue.ofMb(300).getBytes())); + assertThat(builder.getRemainingAllocations(m), equalTo(1)); + assertThat(builder.getRemainingThreads(m), equalTo(2)); - AssignmentPlan plan = builder.build(); + AssignmentPlan plan = builder.build(); - assertThat(plan.models(), contains(m)); - assertThat(plan.satisfiesCurrentAssignments(), is(false)); - assertThat(plan.assignments(m).get(), equalTo(Map.of(n, 1))); + assertThat(plan.models(), contains(m)); + assertThat(plan.satisfiesCurrentAssignments(), is(false)); + assertThat(plan.assignments(m).get(), equalTo(Map.of(n, 1))); + } + { + // new memory format + Deployment m = new Deployment( + "m_1", + ByteSizeValue.ofMb(25).getBytes(), + 2, + 2, + Map.of("n_1", 2), + 0, + ByteSizeValue.ofMb(250).getBytes(), + ByteSizeValue.ofMb(25).getBytes() + ); + + AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); + + builder.assignModelToNode(m, n, 1); + + assertThat(builder.getRemainingCores(n), equalTo(2)); + assertThat(builder.getRemainingMemory(n), equalTo(ByteSizeValue.ofMb(275).getBytes())); + assertThat(builder.getRemainingAllocations(m), equalTo(1)); + assertThat(builder.getRemainingThreads(m), equalTo(2)); + + AssignmentPlan plan = builder.build(); + + assertThat(plan.models(), contains(m)); + assertThat(plan.satisfiesCurrentAssignments(), is(false)); + assertThat(plan.assignments(m).get(), equalTo(Map.of(n, 1))); + } } public void testAssignModelToNode_GivenPreviouslyUnassignedModelDoesNotFit() { - Node n = new Node("n_1", 100, 4); - Deployment m = new AssignmentPlan.Deployment("m_1", 101, 2, 2, Map.of(), 0); + Node n = new Node("n_1", ByteSizeValue.ofMb(340 - 1).getBytes(), 4); + Deployment m = new AssignmentPlan.Deployment("m_1", ByteSizeValue.ofMb(50).getBytes(), 2, 2, Map.of(), 0, 0, 0); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); Exception e = expectThrows(IllegalArgumentException.class, () -> builder.assignModelToNode(m, n, 1)); - assertThat(e.getMessage(), equalTo("not enough memory on node [n_1] to assign model [m_1]")); + assertThat(e.getMessage(), equalTo("not enough memory on node [n_1] to assign [1] allocations to deployment [m_1]")); } public void testAssignModelToNode_GivenPreviouslyAssignedModelDoesNotFit() { - Node n = new Node("n_1", 100, 4); - AssignmentPlan.Deployment m = new AssignmentPlan.Deployment("m_1", 101, 2, 2, Map.of("n_1", 1), 0); + { // old memory format + Node n = new Node("n_1", ByteSizeValue.ofMb(340 - 1).getBytes(), 4); + AssignmentPlan.Deployment m = new AssignmentPlan.Deployment( + "m_1", + ByteSizeValue.ofMb(50).getBytes(), + 2, + 2, + Map.of("n_1", 1), + 0, + 0, + 0 + ); - AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); - builder.assignModelToNode(m, n, 2); - AssignmentPlan plan = builder.build(); + AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); - assertThat(plan.models(), contains(m)); - assertThat(plan.satisfiesCurrentAssignments(), is(true)); - assertThat(plan.assignments(m).get(), equalTo(Map.of(n, 2))); + builder.assignModelToNode(m, n, 2); + AssignmentPlan plan = builder.build(); + + assertThat(plan.models(), contains(m)); + assertThat(plan.satisfiesCurrentAssignments(), is(true)); + assertThat(plan.assignments(m).get(), equalTo(Map.of(n, 2))); + } + { // new memory format + Node n = new Node("n_1", ByteSizeValue.ofMb(340 - 1).getBytes(), 4); + AssignmentPlan.Deployment m = new AssignmentPlan.Deployment( + "m_1", + ByteSizeValue.ofMb(30).getBytes(), + 2, + 2, + Map.of("n_1", 1), + 0, + ByteSizeValue.ofMb(300).getBytes(), + ByteSizeValue.ofMb(5).getBytes() + ); + + AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); + + builder.assignModelToNode(m, n, 2); + AssignmentPlan plan = builder.build(); + + assertThat(plan.models(), contains(m)); + assertThat(plan.satisfiesCurrentAssignments(), is(true)); + assertThat(plan.assignments(m).get(), equalTo(Map.of(n, 2))); + } } public void testAssignModelToNode_GivenNotEnoughCores_AndSingleThreadPerAllocation() { - Node n = new Node("n_1", 100, 4); - Deployment m = new AssignmentPlan.Deployment("m_1", 100, 5, 1, Map.of(), 0); + Node n = new Node("n_1", ByteSizeValue.ofMb(500).getBytes(), 4); + Deployment m = new AssignmentPlan.Deployment("m_1", ByteSizeValue.ofMb(100).getBytes(), 5, 1, Map.of(), 0, 0, 0); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); Exception e = expectThrows(IllegalArgumentException.class, () -> builder.assignModelToNode(m, n, 5)); @@ -138,8 +278,8 @@ public void testAssignModelToNode_GivenNotEnoughCores_AndSingleThreadPerAllocati } public void testAssignModelToNode_GivenNotEnoughCores_AndMultipleThreadsPerAllocation() { - Node n = new Node("n_1", 100, 5); - AssignmentPlan.Deployment m = new AssignmentPlan.Deployment("m_1", 100, 3, 2, Map.of(), 0); + Node n = new Node("n_1", ByteSizeValue.ofMb(500).getBytes(), 5); + AssignmentPlan.Deployment m = new AssignmentPlan.Deployment("m_1", ByteSizeValue.ofMb(100).getBytes(), 3, 2, Map.of(), 0, 0, 0); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); Exception e = expectThrows(IllegalArgumentException.class, () -> builder.assignModelToNode(m, n, 3)); @@ -151,13 +291,22 @@ public void testAssignModelToNode_GivenNotEnoughCores_AndMultipleThreadsPerAlloc } public void testAssignModelToNode_GivenSameModelAssignedTwice() { - Node n = new Node("n_1", 100, 8); - Deployment m = new AssignmentPlan.Deployment("m_1", 60, 4, 2, Map.of(), 0); + Node n = new Node("n_1", ByteSizeValue.ofMb(1000).getBytes(), 8); + Deployment m = new AssignmentPlan.Deployment( + "m_1", + ByteSizeValue.ofMb(50).getBytes(), + 4, + 2, + Map.of(), + 0, + ByteSizeValue.ofMb(300).getBytes(), + ByteSizeValue.ofMb(50).getBytes() + ); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); assertThat(builder.getRemainingCores(n), equalTo(8)); - assertThat(builder.getRemainingMemory(n), equalTo(100L)); + assertThat(builder.getRemainingMemory(n), equalTo(ByteSizeValue.ofMb(1000).getBytes())); assertThat(builder.getRemainingAllocations(m), equalTo(4)); assertThat(builder.getRemainingThreads(m), equalTo(8)); assertThat(builder.canAssign(m, n, 1), is(true)); @@ -165,7 +314,7 @@ public void testAssignModelToNode_GivenSameModelAssignedTwice() { builder.assignModelToNode(m, n, 1); assertThat(builder.getRemainingCores(n), equalTo(6)); - assertThat(builder.getRemainingMemory(n), equalTo(40L)); + assertThat(builder.getRemainingMemory(n), equalTo(ByteSizeValue.ofMb(600).getBytes())); assertThat(builder.getRemainingAllocations(m), equalTo(3)); assertThat(builder.getRemainingThreads(m), equalTo(6)); assertThat(builder.canAssign(m, n, 2), is(true)); @@ -173,7 +322,7 @@ public void testAssignModelToNode_GivenSameModelAssignedTwice() { builder.assignModelToNode(m, n, 2); assertThat(builder.getRemainingCores(n), equalTo(2)); - assertThat(builder.getRemainingMemory(n), equalTo(40L)); + assertThat(builder.getRemainingMemory(n), equalTo(ByteSizeValue.ofMb(500).getBytes())); assertThat(builder.getRemainingAllocations(m), equalTo(1)); assertThat(builder.getRemainingThreads(m), equalTo(2)); @@ -186,7 +335,7 @@ public void testAssignModelToNode_GivenSameModelAssignedTwice() { public void testCanAssign_GivenPreviouslyUnassignedModelDoesNotFit() { Node n = new Node("n_1", 100, 5); - AssignmentPlan.Deployment m = new AssignmentPlan.Deployment("m_1", 101, 1, 1, Map.of(), 0); + AssignmentPlan.Deployment m = new AssignmentPlan.Deployment("m_1", 101, 1, 1, Map.of(), 0, 0, 0); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); @@ -194,17 +343,33 @@ public void testCanAssign_GivenPreviouslyUnassignedModelDoesNotFit() { } public void testCanAssign_GivenPreviouslyAssignedModelDoesNotFit() { - Node n = new Node("n_1", 100, 5); - Deployment m = new AssignmentPlan.Deployment("m_1", 101, 1, 1, Map.of("n_1", 1), 0); - - AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); - - assertThat(builder.canAssign(m, n, 1), is(true)); + Node n = new Node("n_1", ByteSizeValue.ofMb(300).getBytes(), 5); + { + // old memory format + Deployment m = new Deployment("m_1", ByteSizeValue.ofMb(31).getBytes(), 1, 1, Map.of("n_1", 1), 0, 0, 0); + AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); + assertThat(builder.canAssign(m, n, 1), is(true)); + } + { + // new memory format + Deployment m = new Deployment( + "m_1", + ByteSizeValue.ofMb(25).getBytes(), + 1, + 1, + Map.of("n_1", 1), + 0, + ByteSizeValue.ofMb(300).getBytes(), + ByteSizeValue.ofMb(10).getBytes() + ); + AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); + assertThat(builder.canAssign(m, n, 1), is(true)); + } } public void testCanAssign_GivenEnoughMemory() { - Node n = new Node("n_1", 100, 5); - AssignmentPlan.Deployment m = new AssignmentPlan.Deployment("m_1", 100, 3, 2, Map.of(), 0); + Node n = new Node("n_1", ByteSizeValue.ofMb(440).getBytes(), 5); + AssignmentPlan.Deployment m = new AssignmentPlan.Deployment("m_1", ByteSizeValue.ofMb(100).getBytes(), 3, 2, Map.of(), 0, 0, 0); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); @@ -216,16 +381,25 @@ public void testCanAssign_GivenEnoughMemory() { public void testCompareTo_GivenDifferenceInPreviousAssignments() { AssignmentPlan planSatisfyingPreviousAssignments; AssignmentPlan planNotSatisfyingPreviousAssignments; - Node n = new Node("n_1", 100, 5); + Node n = new Node("n_1", ByteSizeValue.ofMb(300).getBytes(), 5); { - Deployment m = new AssignmentPlan.Deployment("m_1", 100, 3, 2, Map.of("n_1", 2), 0); + Deployment m = new AssignmentPlan.Deployment("m_1", ByteSizeValue.ofMb(30).getBytes(), 3, 2, Map.of("n_1", 2), 0, 0, 0); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); builder.assignModelToNode(m, n, 2); planSatisfyingPreviousAssignments = builder.build(); } { - AssignmentPlan.Deployment m = new AssignmentPlan.Deployment("m_1", 100, 3, 2, Map.of("n_1", 3), 0); + AssignmentPlan.Deployment m = new AssignmentPlan.Deployment( + "m_1", + ByteSizeValue.ofMb(30).getBytes(), + 3, + 2, + Map.of("n_1", 3), + 0, + 0, + 0 + ); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); builder.assignModelToNode(m, n, 2); planNotSatisfyingPreviousAssignments = builder.build(); @@ -238,8 +412,17 @@ public void testCompareTo_GivenDifferenceInPreviousAssignments() { public void testCompareTo_GivenDifferenceInAllocations() { AssignmentPlan planWithMoreAllocations; AssignmentPlan planWithFewerAllocations; - Node n = new Node("n_1", 100, 5); - AssignmentPlan.Deployment m = new AssignmentPlan.Deployment("m_1", 100, 3, 2, Map.of("n_1", 1), 0); + Node n = new Node("n_1", ByteSizeValue.ofMb(300).getBytes(), 5); + AssignmentPlan.Deployment m = new AssignmentPlan.Deployment( + "m_1", + ByteSizeValue.ofMb(30).getBytes(), + 3, + 2, + Map.of("n_1", 1), + 0, + 0, + 0 + ); { AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); @@ -259,16 +442,25 @@ public void testCompareTo_GivenDifferenceInAllocations() { public void testCompareTo_GivenDifferenceInMemory() { AssignmentPlan planUsingMoreMemory; AssignmentPlan planUsingLessMemory; - Node n = new Node("n_1", 100, 5); + Node n = new Node("n_1", ByteSizeValue.ofMb(300).getBytes(), 5); { - Deployment m = new AssignmentPlan.Deployment("m_1", 100, 3, 2, Map.of("n_1", 1), 0); + Deployment m = new AssignmentPlan.Deployment("m_1", ByteSizeValue.ofMb(30).getBytes(), 3, 2, Map.of("n_1", 1), 0, 0, 0); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); builder.assignModelToNode(m, n, 2); planUsingMoreMemory = builder.build(); } { - AssignmentPlan.Deployment m = new AssignmentPlan.Deployment("m_1", 99, 3, 2, Map.of("n_1", 1), 0); + AssignmentPlan.Deployment m = new AssignmentPlan.Deployment( + "m_1", + ByteSizeValue.ofMb(29).getBytes(), + 3, + 2, + Map.of("n_1", 1), + 0, + 0, + 0 + ); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); builder.assignModelToNode(m, n, 2); planUsingLessMemory = builder.build(); @@ -279,26 +471,96 @@ public void testCompareTo_GivenDifferenceInMemory() { } public void testSatisfiesAllModels_GivenAllModelsAreSatisfied() { - Node node1 = new Node("n_1", 100, 4); - Node node2 = new Node("n_2", 100, 4); - AssignmentPlan.Deployment deployment1 = new AssignmentPlan.Deployment("m_1", 50, 1, 2, Map.of(), 0); - AssignmentPlan.Deployment deployment2 = new AssignmentPlan.Deployment("m_2", 30, 2, 1, Map.of(), 0); - AssignmentPlan.Deployment deployment3 = new AssignmentPlan.Deployment("m_3", 20, 4, 1, Map.of(), 0); - AssignmentPlan plan = AssignmentPlan.builder(List.of(node1, node2), List.of(deployment1, deployment2, deployment3)) - .assignModelToNode(deployment1, node1, 1) - .assignModelToNode(deployment2, node2, 2) - .assignModelToNode(deployment3, node1, 2) - .assignModelToNode(deployment3, node2, 2) - .build(); - assertThat(plan.satisfiesAllModels(), is(true)); + Node node1 = new Node("n_1", ByteSizeValue.ofMb(1000).getBytes(), 4); + Node node2 = new Node("n_2", ByteSizeValue.ofMb(1000).getBytes(), 4); + { + // old memory format + AssignmentPlan.Deployment deployment1 = new AssignmentPlan.Deployment( + "m_1", + ByteSizeValue.ofMb(50).getBytes(), + 1, + 2, + Map.of(), + 0, + 0, + 0 + ); + AssignmentPlan.Deployment deployment2 = new AssignmentPlan.Deployment( + "m_2", + ByteSizeValue.ofMb(30).getBytes(), + 2, + 1, + Map.of(), + 0, + 0, + 0 + ); + AssignmentPlan.Deployment deployment3 = new AssignmentPlan.Deployment( + "m_3", + ByteSizeValue.ofMb(20).getBytes(), + 4, + 1, + Map.of(), + 0, + 0, + 0 + ); + AssignmentPlan plan = AssignmentPlan.builder(List.of(node1, node2), List.of(deployment1, deployment2, deployment3)) + .assignModelToNode(deployment1, node1, 1) + .assignModelToNode(deployment2, node2, 2) + .assignModelToNode(deployment3, node1, 2) + .assignModelToNode(deployment3, node2, 2) + .build(); + assertThat(plan.satisfiesAllModels(), is(true)); + } + { + // new memory format + AssignmentPlan.Deployment deployment1 = new AssignmentPlan.Deployment( + "m_1", + ByteSizeValue.ofMb(50).getBytes(), + 1, + 2, + Map.of(), + 0, + ByteSizeValue.ofMb(300).getBytes(), + ByteSizeValue.ofMb(10).getBytes() + ); + AssignmentPlan.Deployment deployment2 = new AssignmentPlan.Deployment( + "m_2", + ByteSizeValue.ofMb(30).getBytes(), + 2, + 1, + Map.of(), + 0, + ByteSizeValue.ofMb(300).getBytes(), + ByteSizeValue.ofMb(10).getBytes() + ); + AssignmentPlan.Deployment deployment3 = new AssignmentPlan.Deployment( + "m_3", + ByteSizeValue.ofMb(20).getBytes(), + 4, + 1, + Map.of(), + 0, + ByteSizeValue.ofMb(300).getBytes(), + ByteSizeValue.ofMb(10).getBytes() + ); + AssignmentPlan plan = AssignmentPlan.builder(List.of(node1, node2), List.of(deployment1, deployment2, deployment3)) + .assignModelToNode(deployment1, node1, 1) + .assignModelToNode(deployment2, node2, 2) + .assignModelToNode(deployment3, node1, 2) + .assignModelToNode(deployment3, node2, 2) + .build(); + assertThat(plan.satisfiesAllModels(), is(true)); + } } public void testSatisfiesAllModels_GivenOneModelHasOneAllocationLess() { - Node node1 = new Node("n_1", 100, 4); - Node node2 = new Node("n_2", 100, 4); - AssignmentPlan.Deployment deployment1 = new AssignmentPlan.Deployment("m_1", 50, 1, 2, Map.of(), 0); - AssignmentPlan.Deployment deployment2 = new AssignmentPlan.Deployment("m_2", 30, 2, 1, Map.of(), 0); - Deployment deployment3 = new Deployment("m_3", 20, 4, 1, Map.of(), 0); + Node node1 = new Node("n_1", ByteSizeValue.ofMb(1000).getBytes(), 4); + Node node2 = new Node("n_2", ByteSizeValue.ofMb(1000).getBytes(), 4); + Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(50).getBytes(), 1, 2, Map.of(), 0, 0, 0); + Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(30).getBytes(), 2, 1, Map.of(), 0, 0, 0); + Deployment deployment3 = new Deployment("m_3", ByteSizeValue.ofMb(20).getBytes(), 4, 1, Map.of(), 0, 0, 0); AssignmentPlan plan = AssignmentPlan.builder(List.of(node1, node2), List.of(deployment1, deployment2, deployment3)) .assignModelToNode(deployment1, node1, 1) .assignModelToNode(deployment2, node2, 2) @@ -309,11 +571,11 @@ public void testSatisfiesAllModels_GivenOneModelHasOneAllocationLess() { } public void testArePreviouslyAssignedModelsAssigned_GivenTrue() { - Node node1 = new Node("n_1", 100, 4); - Node node2 = new Node("n_2", 100, 4); - AssignmentPlan.Deployment deployment1 = new AssignmentPlan.Deployment("m_1", 50, 1, 2, Map.of(), 3); - AssignmentPlan.Deployment deployment2 = new Deployment("m_2", 30, 2, 1, Map.of(), 4); - AssignmentPlan.Deployment deployment3 = new AssignmentPlan.Deployment("m_3", 20, 4, 1, Map.of(), 0); + Node node1 = new Node("n_1", ByteSizeValue.ofMb(1000).getBytes(), 4); + Node node2 = new Node("n_2", ByteSizeValue.ofMb(1000).getBytes(), 4); + Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(50).getBytes(), 1, 2, Map.of(), 3, 0, 0); + Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(30).getBytes(), 2, 1, Map.of(), 4, 0, 0); + Deployment deployment3 = new Deployment("m_3", ByteSizeValue.ofMb(20).getBytes(), 4, 1, Map.of(), 0, 0, 0); AssignmentPlan plan = AssignmentPlan.builder(List.of(node1, node2), List.of(deployment1, deployment2, deployment3)) .assignModelToNode(deployment1, node1, 1) .assignModelToNode(deployment2, node2, 1) @@ -322,10 +584,10 @@ public void testArePreviouslyAssignedModelsAssigned_GivenTrue() { } public void testArePreviouslyAssignedModelsAssigned_GivenFalse() { - Node node1 = new Node("n_1", 100, 4); - Node node2 = new Node("n_2", 100, 4); - AssignmentPlan.Deployment deployment1 = new Deployment("m_1", 50, 1, 2, Map.of(), 3); - AssignmentPlan.Deployment deployment2 = new AssignmentPlan.Deployment("m_2", 30, 2, 1, Map.of(), 4); + Node node1 = new Node("n_1", ByteSizeValue.ofMb(1000).getBytes(), 4); + Node node2 = new Node("n_2", ByteSizeValue.ofMb(1000).getBytes(), 4); + Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(50).getBytes(), 1, 2, Map.of(), 3, 0, 0); + Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(30).getBytes(), 2, 1, Map.of(), 4, 0, 0); AssignmentPlan plan = AssignmentPlan.builder(List.of(node1, node2), List.of(deployment1, deployment2)) .assignModelToNode(deployment1, node1, 1) .build(); @@ -333,12 +595,39 @@ public void testArePreviouslyAssignedModelsAssigned_GivenFalse() { } public void testCountPreviouslyAssignedThatAreStillAssigned() { - Node node1 = new Node("n_1", 100, 4); - Node node2 = new Node("n_2", 100, 4); - Deployment deployment1 = new AssignmentPlan.Deployment("m_1", 50, 1, 2, Map.of(), 3); - AssignmentPlan.Deployment deployment2 = new AssignmentPlan.Deployment("m_2", 30, 2, 1, Map.of(), 4); - AssignmentPlan.Deployment deployment3 = new AssignmentPlan.Deployment("m_3", 20, 4, 1, Map.of(), 1); - AssignmentPlan.Deployment deployment4 = new AssignmentPlan.Deployment("m_4", 20, 4, 1, Map.of(), 0); + Node node1 = new Node("n_1", ByteSizeValue.ofMb(1000).getBytes(), 4); + Node node2 = new Node("n_2", ByteSizeValue.ofMb(1000).getBytes(), 4); + Deployment deployment1 = new AssignmentPlan.Deployment("m_1", ByteSizeValue.ofMb(50).getBytes(), 1, 2, Map.of(), 3, 0, 0); + AssignmentPlan.Deployment deployment2 = new AssignmentPlan.Deployment( + "m_2", + ByteSizeValue.ofMb(30).getBytes(), + 2, + 1, + Map.of(), + 4, + 0, + 0 + ); + AssignmentPlan.Deployment deployment3 = new AssignmentPlan.Deployment( + "m_3", + ByteSizeValue.ofMb(20).getBytes(), + 4, + 1, + Map.of(), + 1, + 0, + 0 + ); + AssignmentPlan.Deployment deployment4 = new AssignmentPlan.Deployment( + "m_4", + ByteSizeValue.ofMb(20).getBytes(), + 4, + 1, + Map.of(), + 0, + 0, + 0 + ); AssignmentPlan plan = AssignmentPlan.builder(List.of(node1, node2), List.of(deployment1, deployment2, deployment3, deployment4)) .assignModelToNode(deployment1, node1, 1) .assignModelToNode(deployment2, node2, 1) diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlannerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlannerTests.java index 82a291a8d9fb2..6a72ccf4c4445 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlannerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlannerTests.java @@ -33,50 +33,144 @@ public class AssignmentPlannerTests extends ESTestCase { + private static long scaleNodeSize(long nodeMemory) { + // 240 Mb is the size in StartTrainedModelDeploymentAction.MEMORY_OVERHEAD + return ByteSizeValue.ofMb(240 + 2 * nodeMemory).getBytes(); + } + public void testModelThatDoesNotFitInMemory() { - List nodes = List.of(new Node("n_1", 100, 4)); - Deployment deployment = new AssignmentPlan.Deployment("m_1", 101, 4, 1, Map.of(), 0); - AssignmentPlan plan = new AssignmentPlanner(nodes, List.of(deployment)).computePlan(); - assertThat(plan.assignments(deployment).isEmpty(), is(true)); + { // Without perDeploymentMemory and perAllocationMemory specified + List nodes = List.of(new Node("n_1", scaleNodeSize(50), 4)); + Deployment deployment = new AssignmentPlan.Deployment("m_1", ByteSizeValue.ofMb(51).getBytes(), 4, 1, Map.of(), 0, 0, 0); + AssignmentPlan plan = new AssignmentPlanner(nodes, List.of(deployment)).computePlan(); + assertThat(plan.assignments(deployment).isEmpty(), is(true)); + } + { // With perDeploymentMemory and perAllocationMemory specified + List nodes = List.of(new Node("n_1", scaleNodeSize(55), 4)); + Deployment deployment = new AssignmentPlan.Deployment( + "m_1", + ByteSizeValue.ofMb(50).getBytes(), + 4, + 1, + Map.of(), + 0, + ByteSizeValue.ofMb(250).getBytes(), + ByteSizeValue.ofMb(51).getBytes() + ); + AssignmentPlan plan = new AssignmentPlanner(nodes, List.of(deployment)).computePlan(); + assertThat(plan.assignments(deployment).isEmpty(), is(true)); + } } public void testModelWithThreadsPerAllocationNotFittingOnAnyNode() { - List nodes = List.of(new Node("n_1", 100, 4), new Node("n_2", 100, 5)); - Deployment deployment = new AssignmentPlan.Deployment("m_1", 1, 1, 6, Map.of(), 0); + List nodes = List.of(new Node("n_1", scaleNodeSize(100), 4), new Node("n_2", scaleNodeSize(100), 5)); + Deployment deployment = new AssignmentPlan.Deployment("m_1", ByteSizeValue.ofMb(1).getBytes(), 1, 6, Map.of(), 0, 0, 0); AssignmentPlan plan = new AssignmentPlanner(nodes, List.of(deployment)).computePlan(); assertThat(plan.assignments(deployment).isEmpty(), is(true)); } public void testSingleModelThatFitsFullyOnSingleNode() { { - Node node = new Node("n_1", 100, 4); - Deployment deployment = new AssignmentPlan.Deployment("m_1", 100, 1, 1, Map.of(), 0); + Node node = new Node("n_1", scaleNodeSize(100), 4); + Deployment deployment = new AssignmentPlan.Deployment("m_1", ByteSizeValue.ofMb(100).getBytes(), 1, 1, Map.of(), 0, 0, 0); + AssignmentPlan plan = new AssignmentPlanner(List.of(node), List.of(deployment)).computePlan(); + assertModelFullyAssignedToNode(plan, deployment, node); + } + { + Node node = new Node("n_1", scaleNodeSize(1000), 8); + Deployment deployment = new Deployment("m_1", ByteSizeValue.ofMb(1000).getBytes(), 8, 1, Map.of(), 0, 0, 0); AssignmentPlan plan = new AssignmentPlanner(List.of(node), List.of(deployment)).computePlan(); assertModelFullyAssignedToNode(plan, deployment, node); } { - Node node = new Node("n_1", 1000, 8); - Deployment deployment = new Deployment("m_1", 1000, 8, 1, Map.of(), 0); + Node node = new Node("n_1", scaleNodeSize(10000), 16); + AssignmentPlan.Deployment deployment = new AssignmentPlan.Deployment( + "m_1", + ByteSizeValue.ofMb(10000).getBytes(), + 1, + 16, + Map.of(), + 0, + 0, + 0 + ); AssignmentPlan plan = new AssignmentPlanner(List.of(node), List.of(deployment)).computePlan(); assertModelFullyAssignedToNode(plan, deployment, node); } { - Node node = new Node("n_1", 10000, 16); - AssignmentPlan.Deployment deployment = new AssignmentPlan.Deployment("m_1", 10000, 1, 16, Map.of(), 0); + Node node = new Node("n_1", scaleNodeSize(100), 4); + Deployment deployment = new AssignmentPlan.Deployment("m_1", ByteSizeValue.ofMb(100).getBytes(), 1, 1, Map.of(), 0, 0, 0); + AssignmentPlan plan = new AssignmentPlanner(List.of(node), List.of(deployment)).computePlan(); + assertModelFullyAssignedToNode(plan, deployment, node); + } + } + + public void testSingleModelThatFitsFullyOnSingleNode_NewMemoryFields() { + { + Node node = new Node("n_1", ByteSizeValue.ofMb(500).getBytes(), 4); + Deployment deployment = new AssignmentPlan.Deployment( + "m_1", + ByteSizeValue.ofMb(100).getBytes(), + 1, + 1, + Map.of(), + 0, + ByteSizeValue.ofMb(300).getBytes(), + ByteSizeValue.ofMb(100).getBytes() + ); + AssignmentPlan plan = new AssignmentPlanner(List.of(node), List.of(deployment)).computePlan(); + assertModelFullyAssignedToNode(plan, deployment, node); + } + { + Node node = new Node("n_1", ByteSizeValue.ofMb(1000).getBytes(), 8); + Deployment deployment = new Deployment( + "m_1", + ByteSizeValue.ofMb(100).getBytes(), + 8, + 1, + Map.of(), + 0, + ByteSizeValue.ofMb(100).getBytes(), + ByteSizeValue.ofMb(100).getBytes() + ); AssignmentPlan plan = new AssignmentPlanner(List.of(node), List.of(deployment)).computePlan(); assertModelFullyAssignedToNode(plan, deployment, node); } } public void testSingleModelThatFitsFullyOnSingleNode_GivenTwoNodes_ShouldBeFullyAssignedOnOneNode() { - Node node1 = new Node("n_1", 100, 4); - Node node2 = new Node("n_2", 100, 4); - AssignmentPlan.Deployment deployment = new Deployment("m_1", 100, 4, 1, Map.of(), 0); + Node node1 = new Node("n_1", scaleNodeSize(100), 4); + Node node2 = new Node("n_2", scaleNodeSize(100), 4); + AssignmentPlan.Deployment deployment = new Deployment("m_1", ByteSizeValue.ofMb(100).getBytes(), 4, 1, Map.of(), 0, 0, 0); AssignmentPlan plan = new AssignmentPlanner(List.of(node1, node2), List.of(deployment)).computePlan(); Map assignments = plan.assignments(deployment).get(); - if (assignments.get(node1) > 0) { + if (assignments.get(node1) != null) { + assertThat(assignments.get(node1), equalTo(4)); + } else { + assertThat(assignments.get(node2), equalTo(4)); + } + } + + public void testSingleModelThatFitsFullyOnSingleNode_GivenTwoNodes_ShouldBeFullyAssignedOnOneNode_NewMemoryFields() { + Node node1 = new Node("n_1", ByteSizeValue.ofMb(1000).getBytes(), 4); + Node node2 = new Node("n_2", ByteSizeValue.ofMb(1000).getBytes(), 4); + AssignmentPlan.Deployment deployment = new Deployment( + "m_1", + ByteSizeValue.ofMb(100).getBytes(), + 4, + 1, + Map.of(), + 0, + ByteSizeValue.ofMb(300).getBytes(), + ByteSizeValue.ofMb(150).getBytes() + ); + + AssignmentPlan plan = new AssignmentPlanner(List.of(node1, node2), List.of(deployment)).computePlan(); + + Map assignments = plan.assignments(deployment).get(); + if (assignments.get(node1) != null) { assertThat(assignments.get(node1), equalTo(4)); } else { assertThat(assignments.get(node2), equalTo(4)); @@ -84,10 +178,53 @@ public void testSingleModelThatFitsFullyOnSingleNode_GivenTwoNodes_ShouldBeFully } public void testModelWithMoreAllocationsThanAvailableCores_GivenSingleThreadPerAllocation() { - AssignmentPlan.Deployment deployment = new Deployment("m_1", 30, 10, 1, Map.of(), 0); + AssignmentPlan.Deployment deployment = new Deployment("m_1", ByteSizeValue.ofMb(30).getBytes(), 10, 1, Map.of(), 0, 0, 0); + // Single node + { + Node node = new Node("n_1", scaleNodeSize(100), 4); + AssignmentPlan assignmentPlan = new AssignmentPlanner(List.of(node), List.of(deployment)).computePlan(); + assertThat(assignmentPlan.assignments(deployment).isPresent(), is(true)); + Map assignments = assignmentPlan.assignments(deployment).get(); + assertThat(assignments.get(node), equalTo(4)); + } + // Two nodes + { + Node node1 = new Node("n_1", scaleNodeSize(100), 4); + Node node2 = new Node("n_2", scaleNodeSize(100), 2); + AssignmentPlan assignmentPlan = new AssignmentPlanner(List.of(node1, node2), List.of(deployment)).computePlan(); + assertThat(assignmentPlan.assignments(deployment).isPresent(), is(true)); + Map assignments = assignmentPlan.assignments(deployment).get(); + assertThat(assignments.get(node1), equalTo(4)); + assertThat(assignments.get(node2), equalTo(2)); + } + // Three nodes + { + Node node1 = new Node("n_1", scaleNodeSize(100), 4); + Node node2 = new Node("n_2", scaleNodeSize(100), 2); + Node node3 = new Node("n_3", scaleNodeSize(100), 3); + AssignmentPlan assignmentPlan = new AssignmentPlanner(List.of(node1, node2, node3), List.of(deployment)).computePlan(); + assertThat(assignmentPlan.assignments(deployment).isPresent(), is(true)); + Map assignments = assignmentPlan.assignments(deployment).get(); + assertThat(assignments.get(node1), equalTo(4)); + assertThat(assignments.get(node2), equalTo(2)); + assertThat(assignments.get(node3), equalTo(3)); + } + } + + public void testModelWithMoreAllocationsThanAvailableCores_GivenSingleThreadPerAllocation_NewMemoryFields() { + AssignmentPlan.Deployment deployment = new Deployment( + "m_1", + ByteSizeValue.ofMb(100).getBytes(), + 10, + 1, + Map.of(), + 0, + ByteSizeValue.ofMb(300).getBytes(), + ByteSizeValue.ofMb(100).getBytes() + ); // Single node { - Node node = new Node("n_1", 100, 4); + Node node = new Node("n_1", ByteSizeValue.ofMb(800).getBytes(), 4); AssignmentPlan assignmentPlan = new AssignmentPlanner(List.of(node), List.of(deployment)).computePlan(); assertThat(assignmentPlan.assignments(deployment).isPresent(), is(true)); Map assignments = assignmentPlan.assignments(deployment).get(); @@ -95,8 +232,8 @@ public void testModelWithMoreAllocationsThanAvailableCores_GivenSingleThreadPerA } // Two nodes { - Node node1 = new Node("n_1", 100, 4); - Node node2 = new Node("n_2", 100, 2); + Node node1 = new Node("n_1", ByteSizeValue.ofMb(800).getBytes(), 4); + Node node2 = new Node("n_2", ByteSizeValue.ofMb(600).getBytes(), 2); AssignmentPlan assignmentPlan = new AssignmentPlanner(List.of(node1, node2), List.of(deployment)).computePlan(); assertThat(assignmentPlan.assignments(deployment).isPresent(), is(true)); Map assignments = assignmentPlan.assignments(deployment).get(); @@ -105,9 +242,9 @@ public void testModelWithMoreAllocationsThanAvailableCores_GivenSingleThreadPerA } // Three nodes { - Node node1 = new Node("n_1", 100, 4); - Node node2 = new Node("n_2", 100, 2); - Node node3 = new Node("n_3", 100, 3); + Node node1 = new Node("n_1", ByteSizeValue.ofMb(800).getBytes(), 4); + Node node2 = new Node("n_2", ByteSizeValue.ofMb(600).getBytes(), 2); + Node node3 = new Node("n_3", ByteSizeValue.ofMb(700).getBytes(), 3); AssignmentPlan assignmentPlan = new AssignmentPlanner(List.of(node1, node2, node3), List.of(deployment)).computePlan(); assertThat(assignmentPlan.assignments(deployment).isPresent(), is(true)); Map assignments = assignmentPlan.assignments(deployment).get(); @@ -118,14 +255,105 @@ public void testModelWithMoreAllocationsThanAvailableCores_GivenSingleThreadPerA } public void testMultipleModelsAndNodesWithSingleSolution() { - Node node1 = new Node("n_1", 100, 7); - Node node2 = new Node("n_2", 100, 7); - Node node3 = new Node("n_3", 100, 2); - Node node4 = new Node("n_4", 100, 2); - Deployment deployment1 = new Deployment("m_1", 50, 2, 4, Map.of(), 0); - AssignmentPlan.Deployment deployment2 = new Deployment("m_2", 50, 2, 3, Map.of(), 0); - Deployment deployment3 = new AssignmentPlan.Deployment("m_3", 50, 1, 2, Map.of(), 0); - AssignmentPlan.Deployment deployment4 = new AssignmentPlan.Deployment("m_4", 50, 2, 1, Map.of(), 0); + Node node1 = new Node("n_1", 2 * scaleNodeSize(50), 7); + Node node2 = new Node("n_2", 2 * scaleNodeSize(50), 7); + Node node3 = new Node("n_3", 2 * scaleNodeSize(50), 2); + Node node4 = new Node("n_4", 2 * scaleNodeSize(50), 2); + Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(50).getBytes(), 2, 4, Map.of(), 0, 0, 0); + Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(50).getBytes(), 2, 3, Map.of(), 0, 0, 0); + Deployment deployment3 = new Deployment("m_3", ByteSizeValue.ofMb(50).getBytes(), 1, 2, Map.of(), 0, 0, 0); + Deployment deployment4 = new Deployment("m_4", ByteSizeValue.ofMb(50).getBytes(), 2, 1, Map.of(), 0, 0, 0); + + AssignmentPlan plan = new AssignmentPlanner( + List.of(node1, node2, node3, node4), + List.of(deployment1, deployment2, deployment3, deployment4) + ).computePlan(); + + { + assertThat(plan.assignments(deployment1).isPresent(), is(true)); + Map assignments = plan.assignments(deployment1).get(); + assertThat(assignments.get(node1), equalTo(1)); + assertThat(assignments.get(node2), equalTo(1)); + assertThat(assignments.get(node3), is(nullValue())); + assertThat(assignments.get(node4), is(nullValue())); + } + { + assertThat(plan.assignments(deployment2).isPresent(), is(true)); + Map assignments = plan.assignments(deployment2).get(); + assertThat(assignments.get(node1), equalTo(1)); + assertThat(assignments.get(node2), equalTo(1)); + assertThat(assignments.get(node3), is(nullValue())); + assertThat(assignments.get(node4), is(nullValue())); + } + { + assertThat(plan.assignments(deployment3).isPresent(), is(true)); + Map assignments = plan.assignments(deployment3).get(); + assertThat(assignments.get(node1), is(nullValue())); + assertThat(assignments.get(node2), is(nullValue())); + // Will either be on node 3 or 4 + Node assignedNode = assignments.get(node3) != null ? node3 : node4; + Node otherNode = assignedNode.equals(node3) ? node4 : node3; + assertThat(assignments.get(assignedNode), equalTo(1)); + assertThat(assignments.get(otherNode), is(nullValue())); + } + { + assertThat(plan.assignments(deployment4).isPresent(), is(true)); + Map assignments = plan.assignments(deployment4).get(); + assertThat(assignments.get(node1), is(nullValue())); + assertThat(assignments.get(node2), is(nullValue())); + // Will either be on node 3 or 4 + Node assignedNode = assignments.get(node3) != null ? node3 : node4; + Node otherNode = assignedNode.equals(node3) ? node4 : node3; + assertThat(assignments.get(assignedNode), equalTo(2)); + assertThat(assignments.get(otherNode), is(nullValue())); + } + } + + public void testMultipleModelsAndNodesWithSingleSolution_NewMemoryFields() { + Node node1 = new Node("n_1", ByteSizeValue.ofMb(800).getBytes(), 7); + Node node2 = new Node("n_2", ByteSizeValue.ofMb(800).getBytes(), 7); + Node node3 = new Node("n_3", ByteSizeValue.ofMb(900).getBytes(), 2); + Node node4 = new Node("n_4", ByteSizeValue.ofMb(900).getBytes(), 2); + Deployment deployment1 = new Deployment( + "m_1", + ByteSizeValue.ofMb(50).getBytes(), + 2, + 4, + Map.of(), + 0, + ByteSizeValue.ofMb(300).getBytes(), + ByteSizeValue.ofMb(50).getBytes() + ); + Deployment deployment2 = new Deployment( + "m_2", + ByteSizeValue.ofMb(50).getBytes(), + 2, + 3, + Map.of(), + 0, + ByteSizeValue.ofMb(300).getBytes(), + ByteSizeValue.ofMb(50).getBytes() + ); + Deployment deployment3 = new Deployment( + "m_3", + ByteSizeValue.ofMb(50).getBytes(), + 1, + 2, + Map.of(), + 0, + ByteSizeValue.ofMb(300).getBytes(), + ByteSizeValue.ofMb(50).getBytes() + ); + Deployment deployment4 = new Deployment( + "m_4", + ByteSizeValue.ofMb(50).getBytes(), + 2, + 1, + Map.of(), + 0, + ByteSizeValue.ofMb(300).getBytes(), + ByteSizeValue.ofMb(50).getBytes() + ); AssignmentPlan plan = new AssignmentPlanner( List.of(node1, node2, node3, node4), @@ -173,10 +401,53 @@ public void testMultipleModelsAndNodesWithSingleSolution() { } public void testModelWithMoreAllocationsThanAvailableCores_GivenThreeThreadsPerAllocation() { - Deployment deployment = new AssignmentPlan.Deployment("m_1", 30, 10, 3, Map.of(), 0); + Deployment deployment = new AssignmentPlan.Deployment("m_1", ByteSizeValue.ofMb(30).getBytes(), 10, 3, Map.of(), 0, 0, 0); + // Single node + { + Node node = new Node("n_1", scaleNodeSize(100), 4); + AssignmentPlan assignmentPlan = new AssignmentPlanner(List.of(node), List.of(deployment)).computePlan(); + assertThat(assignmentPlan.assignments(deployment).isPresent(), is(true)); + Map assignments = assignmentPlan.assignments(deployment).get(); + assertThat(assignments.get(node), equalTo(1)); + } + // Two nodes + { + Node node1 = new Node("n_1", scaleNodeSize(100), 4); + Node node2 = new Node("n_2", scaleNodeSize(100), 8); + AssignmentPlan assignmentPlan = new AssignmentPlanner(List.of(node1, node2), List.of(deployment)).computePlan(); + assertThat(assignmentPlan.assignments(deployment).isPresent(), is(true)); + Map assignments = assignmentPlan.assignments(deployment).get(); + assertThat(assignments.get(node1), equalTo(1)); + assertThat(assignments.get(node2), equalTo(2)); + } + // Three nodes + { + Node node1 = new Node("n_1", scaleNodeSize(100), 4); + Node node2 = new Node("n_2", scaleNodeSize(100), 7); + Node node3 = new Node("n_3", scaleNodeSize(100), 15); + AssignmentPlan assignmentPlan = new AssignmentPlanner(List.of(node1, node2, node3), List.of(deployment)).computePlan(); + assertThat(assignmentPlan.assignments(deployment).isPresent(), is(true)); + Map assignments = assignmentPlan.assignments(deployment).get(); + assertThat(assignments.get(node1), equalTo(1)); + assertThat(assignments.get(node2), equalTo(2)); + assertThat(assignments.get(node3), equalTo(5)); + } + } + + public void testModelWithMoreAllocationsThanAvailableCores_GivenThreeThreadsPerAllocation_NewMemoryFields() { + Deployment deployment = new AssignmentPlan.Deployment( + "m_1", + ByteSizeValue.ofMb(50).getBytes(), + 10, + 3, + Map.of(), + 0, + ByteSizeValue.ofMb(300).getBytes(), + ByteSizeValue.ofMb(50).getBytes() + ); // Single node { - Node node = new Node("n_1", 100, 4); + Node node = new Node("n_1", ByteSizeValue.ofMb(800).getBytes(), 4); AssignmentPlan assignmentPlan = new AssignmentPlanner(List.of(node), List.of(deployment)).computePlan(); assertThat(assignmentPlan.assignments(deployment).isPresent(), is(true)); Map assignments = assignmentPlan.assignments(deployment).get(); @@ -184,8 +455,8 @@ public void testModelWithMoreAllocationsThanAvailableCores_GivenThreeThreadsPerA } // Two nodes { - Node node1 = new Node("n_1", 100, 4); - Node node2 = new Node("n_2", 100, 8); + Node node1 = new Node("n_1", ByteSizeValue.ofMb(800).getBytes(), 4); + Node node2 = new Node("n_2", ByteSizeValue.ofMb(800).getBytes(), 8); AssignmentPlan assignmentPlan = new AssignmentPlanner(List.of(node1, node2), List.of(deployment)).computePlan(); assertThat(assignmentPlan.assignments(deployment).isPresent(), is(true)); Map assignments = assignmentPlan.assignments(deployment).get(); @@ -194,9 +465,9 @@ public void testModelWithMoreAllocationsThanAvailableCores_GivenThreeThreadsPerA } // Three nodes { - Node node1 = new Node("n_1", 100, 4); - Node node2 = new Node("n_2", 100, 7); - Node node3 = new Node("n_3", 100, 15); + Node node1 = new Node("n_1", ByteSizeValue.ofMb(800).getBytes(), 4); + Node node2 = new Node("n_2", ByteSizeValue.ofMb(800).getBytes(), 7); + Node node3 = new Node("n_3", ByteSizeValue.ofMb(800).getBytes(), 15); AssignmentPlan assignmentPlan = new AssignmentPlanner(List.of(node1, node2, node3), List.of(deployment)).computePlan(); assertThat(assignmentPlan.assignments(deployment).isPresent(), is(true)); Map assignments = assignmentPlan.assignments(deployment).get(); @@ -207,8 +478,17 @@ public void testModelWithMoreAllocationsThanAvailableCores_GivenThreeThreadsPerA } public void testModelWithPreviousAssignmentAndNoMoreCoresAvailable() { - Node node = new Node("n_1", 100, 4); - AssignmentPlan.Deployment deployment = new AssignmentPlan.Deployment("m_1", 30, 4, 1, Map.of("n_1", 4), 0); + Node node = new Node("n_1", scaleNodeSize(100), 4); + AssignmentPlan.Deployment deployment = new AssignmentPlan.Deployment( + "m_1", + ByteSizeValue.ofMb(30).getBytes(), + 4, + 1, + Map.of("n_1", 4), + 0, + 0, + 0 + ); AssignmentPlan plan = new AssignmentPlanner(List.of(node), List.of(deployment)).computePlan(); assertThat(plan.assignments(deployment).isPresent(), is(true)); @@ -217,26 +497,117 @@ public void testModelWithPreviousAssignmentAndNoMoreCoresAvailable() { public void testFullCoreUtilization_GivenModelsWithSingleThreadPerAllocation() { List nodes = List.of( - new Node("n_1", ByteSizeValue.ofGb(6).getBytes(), 8), - new Node("n_2", ByteSizeValue.ofGb(6).getBytes(), 8), - new Node("n_3", ByteSizeValue.ofGb(6).getBytes(), 8), - new Node("n_4", ByteSizeValue.ofGb(6).getBytes(), 8), - new Node("n_5", ByteSizeValue.ofGb(16).getBytes(), 16), - new Node("n_6", ByteSizeValue.ofGb(8).getBytes(), 16) + new Node("n_1", ByteSizeValue.ofGb(18).getBytes(), 8), + new Node("n_2", ByteSizeValue.ofGb(18).getBytes(), 8), + new Node("n_3", ByteSizeValue.ofGb(18).getBytes(), 8), + new Node("n_4", ByteSizeValue.ofGb(18).getBytes(), 8), + new Node("n_5", ByteSizeValue.ofGb(64).getBytes(), 16), + new Node("n_6", ByteSizeValue.ofGb(32).getBytes(), 16) ); List deployments = List.of( - new Deployment("m_1", ByteSizeValue.ofGb(4).getBytes(), 10, 1, Map.of("n_1", 5), 0), - new AssignmentPlan.Deployment("m_2", ByteSizeValue.ofGb(2).getBytes(), 3, 1, Map.of("n_3", 2), 0), - new AssignmentPlan.Deployment("m_3", ByteSizeValue.ofGb(3).getBytes(), 3, 1, Map.of(), 0), - new Deployment("m_4", ByteSizeValue.ofGb(1).getBytes(), 4, 1, Map.of("n_3", 2), 0), - new Deployment("m_5", ByteSizeValue.ofGb(6).getBytes(), 2, 1, Map.of(), 0), - new Deployment("m_6", ByteSizeValue.ofGb(1).getBytes(), 12, 1, Map.of(), 0), - new AssignmentPlan.Deployment("m_7", ByteSizeValue.ofGb(1).getBytes() / 2, 12, 1, Map.of("n_2", 6), 0), - new Deployment("m_8", ByteSizeValue.ofGb(2).getBytes(), 4, 1, Map.of(), 0), - new Deployment("m_9", ByteSizeValue.ofGb(1).getBytes(), 4, 1, Map.of(), 0), - new AssignmentPlan.Deployment("m_10", ByteSizeValue.ofGb(7).getBytes(), 7, 1, Map.of(), 0), - new Deployment("m_11", ByteSizeValue.ofGb(2).getBytes(), 3, 1, Map.of(), 0), - new Deployment("m_12", ByteSizeValue.ofGb(1).getBytes(), 10, 1, Map.of(), 0) + new Deployment("m_1", ByteSizeValue.ofGb(4).getBytes(), 10, 1, Map.of("n_1", 5), 0, 0, 0), + new AssignmentPlan.Deployment("m_2", ByteSizeValue.ofGb(2).getBytes(), 3, 1, Map.of("n_3", 2), 0, 0, 0), + new AssignmentPlan.Deployment("m_3", ByteSizeValue.ofGb(3).getBytes(), 3, 1, Map.of(), 0, 0, 0), + new Deployment("m_4", ByteSizeValue.ofGb(1).getBytes(), 4, 1, Map.of("n_3", 2), 0, 0, 0), + new Deployment("m_5", ByteSizeValue.ofGb(6).getBytes(), 2, 1, Map.of(), 0, 0, 0), + new Deployment("m_6", ByteSizeValue.ofGb(1).getBytes(), 12, 1, Map.of(), 0, 0, 0), + new AssignmentPlan.Deployment("m_7", ByteSizeValue.ofGb(1).getBytes() / 2, 12, 1, Map.of("n_2", 6), 0, 0, 0), + new Deployment("m_8", ByteSizeValue.ofGb(2).getBytes(), 4, 1, Map.of(), 0, 0, 0), + new Deployment("m_9", ByteSizeValue.ofGb(1).getBytes(), 4, 1, Map.of(), 0, 0, 0), + new AssignmentPlan.Deployment("m_10", ByteSizeValue.ofGb(7).getBytes(), 7, 1, Map.of(), 0, 0, 0), + new Deployment("m_11", ByteSizeValue.ofGb(2).getBytes(), 3, 1, Map.of(), 0, 0, 0), + new Deployment("m_12", ByteSizeValue.ofGb(1).getBytes(), 10, 1, Map.of(), 0, 0, 0) + ); + + AssignmentPlan assignmentPlan = new AssignmentPlanner(nodes, deployments).computePlan(); + + int usedCores = 0; + for (AssignmentPlan.Deployment m : deployments) { + Map assignments = assignmentPlan.assignments(m).orElse(Map.of()); + usedCores += assignments.values().stream().mapToInt(Integer::intValue).sum(); + } + assertThat(usedCores, equalTo(64)); + + assertPreviousAssignmentsAreSatisfied(deployments, assignmentPlan); + } + + public void testFullCoreUtilization_GivenModelsWithSingleThreadPerAllocation_NewMemoryFields() { + List nodes = List.of( + new Node("n_1", ByteSizeValue.ofGb(18).getBytes(), 8), + new Node("n_2", ByteSizeValue.ofGb(18).getBytes(), 8), + new Node("n_3", ByteSizeValue.ofGb(18).getBytes(), 8), + new Node("n_4", ByteSizeValue.ofGb(18).getBytes(), 8), + new Node("n_5", ByteSizeValue.ofGb(64).getBytes(), 16), + new Node("n_6", ByteSizeValue.ofGb(32).getBytes(), 16) + ); + // Use mix of old and new memory fields + List deployments = List.of( + new Deployment( + "m_1", + ByteSizeValue.ofMb(100).getBytes(), + 10, + 1, + Map.of("n_1", 5), + 0, + ByteSizeValue.ofMb(400).getBytes(), + ByteSizeValue.ofMb(100).getBytes() + ), + new Deployment("m_2", ByteSizeValue.ofMb(100).getBytes(), 3, 1, Map.of("n_3", 2), 0, 0, 0), + new Deployment( + "m_3", + ByteSizeValue.ofMb(50).getBytes(), + 3, + 1, + Map.of(), + 0, + ByteSizeValue.ofMb(300).getBytes(), + ByteSizeValue.ofMb(50).getBytes() + ), + new Deployment( + "m_4", + ByteSizeValue.ofMb(50).getBytes(), + 4, + 1, + Map.of("n_3", 2), + 0, + ByteSizeValue.ofMb(400).getBytes(), + ByteSizeValue.ofMb(100).getBytes() + ), + new Deployment( + "m_5", + ByteSizeValue.ofMb(500).getBytes(), + 2, + 1, + Map.of(), + 0, + ByteSizeValue.ofMb(800).getBytes(), + ByteSizeValue.ofMb(100).getBytes() + ), + new Deployment( + "m_6", + ByteSizeValue.ofMb(50).getBytes(), + 12, + 1, + Map.of(), + 0, + ByteSizeValue.ofMb(50).getBytes(), + ByteSizeValue.ofMb(20).getBytes() + ), + new Deployment( + "m_7", + ByteSizeValue.ofMb(50).getBytes(), + 12, + 1, + Map.of("n_2", 6), + 0, + ByteSizeValue.ofMb(300).getBytes(), + ByteSizeValue.ofMb(50).getBytes() + ), + new Deployment("m_8", ByteSizeValue.ofGb(2).getBytes(), 4, 1, Map.of(), 0, 0, 0), + new Deployment("m_9", ByteSizeValue.ofGb(1).getBytes(), 4, 1, Map.of(), 0, 0, 0), + new Deployment("m_10", ByteSizeValue.ofGb(7).getBytes(), 7, 1, Map.of(), 0, 0, 0), + new Deployment("m_11", ByteSizeValue.ofGb(2).getBytes(), 3, 1, Map.of(), 0, 0, 0), + new Deployment("m_12", ByteSizeValue.ofGb(1).getBytes(), 10, 1, Map.of(), 0, 0, 0) ); AssignmentPlan assignmentPlan = new AssignmentPlanner(nodes, deployments).computePlan(); @@ -297,6 +668,9 @@ public void testRandomBenchmark() { StopWatch stopWatch = new StopWatch(); stopWatch.start(); AssignmentPlan assignmentPlan = solver.computePlan(); + for (Node node : nodes) { + assertThat(assignmentPlan.getRemainingNodeMemory(node.id()), greaterThanOrEqualTo(0L)); + } stopWatch.stop(); Quality quality = computeQuality(nodes, deployments, assignmentPlan); @@ -336,7 +710,16 @@ public void testPreviousAssignmentsGetAtLeastAsManyAllocationsAfterAddingNewMode .stream() .collect(Collectors.toMap(e -> e.getKey().id(), Map.Entry::getValue)); previousModelsPlusNew.add( - new AssignmentPlan.Deployment(m.id(), m.memoryBytes(), m.allocations(), m.threadsPerAllocation(), previousAssignments, 0) + new AssignmentPlan.Deployment( + m.id(), + m.memoryBytes(), + m.allocations(), + m.threadsPerAllocation(), + previousAssignments, + 0, + 0, + 0 + ) ); } previousModelsPlusNew.add(randomModel("new")); @@ -347,18 +730,20 @@ public void testPreviousAssignmentsGetAtLeastAsManyAllocationsAfterAddingNewMode } public void testGivenLargerModelWithPreviousAssignmentsAndSmallerModelWithoutAssignments() { - Node node1 = new Node("n_1", ByteSizeValue.ofGb(2).getBytes(), 2); - Node node2 = new Node("n_2", ByteSizeValue.ofGb(2).getBytes(), 2); - Node node3 = new Node("n_3", ByteSizeValue.ofGb(2).getBytes(), 2); + Node node1 = new Node("n_1", scaleNodeSize(ByteSizeValue.ofGb(2).getMb()), 2); + Node node2 = new Node("n_2", scaleNodeSize(ByteSizeValue.ofGb(2).getMb()), 2); + Node node3 = new Node("n_3", scaleNodeSize(ByteSizeValue.ofGb(2).getMb()), 2); Deployment deployment1 = new AssignmentPlan.Deployment( "m_1", ByteSizeValue.ofMb(1200).getBytes(), 3, 1, Map.of("n_1", 2, "n_2", 1), + 0, + 0, 0 ); - Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(1100).getBytes(), 2, 1, Map.of(), 0); + Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(1100).getBytes(), 2, 1, Map.of(), 0, 0, 0); AssignmentPlan assignmentPlan = new AssignmentPlanner(List.of(node1, node2, node3), List.of(deployment1, deployment2)) .computePlan(); assertThat(assignmentPlan.getRemainingNodeMemory("n_1"), greaterThanOrEqualTo(0L)); @@ -381,15 +766,17 @@ public void testGivenLargerModelWithPreviousAssignmentsAndSmallerModelWithoutAss } public void testModelWithoutCurrentAllocationsGetsAssignedIfAllocatedPreviously() { - Node node1 = new Node("n_1", ByteSizeValue.ofGb(4).getBytes(), 2); - Node node2 = new Node("n_2", ByteSizeValue.ofGb(4).getBytes(), 2); + Node node1 = new Node("n_1", ByteSizeValue.ofGb(6).getBytes(), 2); + Node node2 = new Node("n_2", ByteSizeValue.ofGb(6).getBytes(), 2); AssignmentPlan.Deployment deployment1 = new Deployment( "m_1", ByteSizeValue.ofMb(1200).getBytes(), 3, 1, Map.of("n_1", 2, "n_2", 1), - 3 + 3, + 0, + 0 ); AssignmentPlan.Deployment deployment2 = new AssignmentPlan.Deployment( "m_2", @@ -397,35 +784,84 @@ public void testModelWithoutCurrentAllocationsGetsAssignedIfAllocatedPreviously( 1, 2, Map.of(), - 1 + 1, + 0, + 0 ); AssignmentPlan assignmentPlan = new AssignmentPlanner(List.of(node1, node2), List.of(deployment1, deployment2)).computePlan(); Map> indexedBasedPlan = convertToIdIndexed(assignmentPlan); assertThat(indexedBasedPlan.keySet(), hasItems("m_1", "m_2")); - assertThat(indexedBasedPlan.get("m_1"), equalTo(Map.of("n_1", 2))); - assertThat(indexedBasedPlan.get("m_2"), equalTo(Map.of("n_2", 1))); + if (indexedBasedPlan.get("m_2").containsKey("n_1")) { + assertThat(indexedBasedPlan.get("m_1"), equalTo(Map.of("n_2", 2))); + assertThat(indexedBasedPlan.get("m_2"), equalTo(Map.of("n_1", 1))); + } else { + assertThat(indexedBasedPlan.get("m_1"), equalTo(Map.of("n_1", 2))); + assertThat(indexedBasedPlan.get("m_2"), equalTo(Map.of("n_2", 1))); + } assertThat(assignmentPlan.getRemainingNodeMemory("n_1"), greaterThanOrEqualTo(0L)); assertThat(assignmentPlan.getRemainingNodeMemory("n_2"), greaterThanOrEqualTo(0L)); } public void testGivenPreviouslyAssignedModels_CannotAllBeAllocated() { - Node node1 = new Node("n_1", ByteSizeValue.ofGb(2).getBytes(), 2); - AssignmentPlan.Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(1200).getBytes(), 1, 1, Map.of(), 1); - AssignmentPlan.Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(1100).getBytes(), 1, 1, Map.of(), 1); + Node node1 = new Node("n_1", scaleNodeSize(ByteSizeValue.ofGb(2).getMb()), 2); + AssignmentPlan.Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(1200).getBytes(), 1, 1, Map.of(), 1, 0, 0); + AssignmentPlan.Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(1100).getBytes(), 1, 1, Map.of(), 1, 0, 0); AssignmentPlan assignmentPlan = new AssignmentPlanner(List.of(node1), List.of(deployment1, deployment2)).computePlan(); assertThat(assignmentPlan.countPreviouslyAssignedModelsThatAreStillAssigned(), equalTo(1L)); } + public void testGivenClusterResize_AllocationShouldNotExceedMemoryConstraints() { + Node node1 = new Node("n_1", ByteSizeValue.ofMb(1840).getBytes(), 2); + Node node2 = new Node("n_2", ByteSizeValue.ofMb(2580).getBytes(), 2); + Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(800).getBytes(), 2, 1, Map.of(), 0, 0, 0); + Deployment deployment2 = new AssignmentPlan.Deployment("m_2", ByteSizeValue.ofMb(800).getBytes(), 1, 1, Map.of(), 0, 0, 0); + Deployment deployment3 = new Deployment("m_3", ByteSizeValue.ofMb(250).getBytes(), 4, 1, Map.of(), 0, 0, 0); + + // First only start m_1 + AssignmentPlan assignmentPlan = new AssignmentPlanner(List.of(node1, node2), List.of(deployment1)).computePlan(); + + Map> indexedBasedPlan = convertToIdIndexed(assignmentPlan); + assertThat(indexedBasedPlan.keySet(), hasItems("m_1")); + assertThat(indexedBasedPlan.get("m_1"), equalTo(Map.of("n_1", 2))); + + // Then start m_2 + assignmentPlan = new AssignmentPlanner( + List.of(node1, node2), + Stream.concat(createModelsFromPlan(assignmentPlan).stream(), Stream.of(deployment2)).toList() + ).computePlan(); + + indexedBasedPlan = convertToIdIndexed(assignmentPlan); + assertThat(indexedBasedPlan.keySet(), hasItems("m_1", "m_2")); + assertThat(indexedBasedPlan.get("m_1"), equalTo(Map.of("n_1", 2))); + assertThat(indexedBasedPlan.get("m_2"), equalTo(Map.of("n_2", 1))); + + // Then start m_3 + assignmentPlan = new AssignmentPlanner( + List.of(node1, node2), + Stream.concat(createModelsFromPlan(assignmentPlan).stream(), Stream.of(deployment3)).toList() + ).computePlan(); + + indexedBasedPlan = convertToIdIndexed(assignmentPlan); + assertThat(indexedBasedPlan.keySet(), hasItems("m_1", "m_2", "m_3")); + assertThat(indexedBasedPlan.get("m_1"), equalTo(Map.of("n_1", 2))); + assertThat(indexedBasedPlan.get("m_2"), equalTo(Map.of("n_2", 1))); + assertThat(indexedBasedPlan.get("m_3"), equalTo(Map.of("n_2", 1))); + + // First, one node goes away. + assignmentPlan = new AssignmentPlanner(List.of(node1), createModelsFromPlan(assignmentPlan)).computePlan(); + assertThat(assignmentPlan.getRemainingNodeMemory("n_1"), greaterThanOrEqualTo(0L)); + } + public void testGivenClusterResize_ShouldAllocateEachModelAtLeastOnce() { - Node node1 = new Node("n_1", ByteSizeValue.ofMb(1200).getBytes(), 2); - Node node2 = new Node("n_2", ByteSizeValue.ofMb(1200).getBytes(), 2); - Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(800).getBytes(), 2, 1, Map.of(), 0); - Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(800).getBytes(), 1, 1, Map.of(), 0); - Deployment deployment3 = new Deployment("m_3", ByteSizeValue.ofMb(250).getBytes(), 4, 1, Map.of(), 0); + Node node1 = new Node("n_1", ByteSizeValue.ofMb(2600).getBytes(), 2); + Node node2 = new Node("n_2", ByteSizeValue.ofMb(2600).getBytes(), 2); + Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(800).getBytes(), 2, 1, Map.of(), 0, 0, 0); + Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(800).getBytes(), 1, 1, Map.of(), 0, 0, 0); + Deployment deployment3 = new Deployment("m_3", ByteSizeValue.ofMb(250).getBytes(), 4, 1, Map.of(), 0, 0, 0); // First only start m_1 AssignmentPlan assignmentPlan = new AssignmentPlanner(List.of(node1, node2), List.of(deployment1)).computePlan(); @@ -458,8 +894,8 @@ public void testGivenClusterResize_ShouldAllocateEachModelAtLeastOnce() { assertThat(indexedBasedPlan.get("m_3"), equalTo(Map.of("n_2", 1))); // Now the cluster starts getting resized. - Node node3 = new Node("n_3", ByteSizeValue.ofMb(2400).getBytes(), 2); - Node node4 = new Node("n_4", ByteSizeValue.ofMb(2400).getBytes(), 2); + Node node3 = new Node("n_3", ByteSizeValue.ofMb(2600).getBytes(), 2); + Node node4 = new Node("n_4", ByteSizeValue.ofMb(2600).getBytes(), 2); // First, one node goes away. assignmentPlan = new AssignmentPlanner(List.of(node1), createModelsFromPlan(assignmentPlan)).computePlan(); @@ -492,11 +928,65 @@ public void testGivenClusterResize_ShouldAllocateEachModelAtLeastOnce() { public void testGivenClusterResize_ShouldRemoveAllocatedModels() { // Ensure that plan is removing previously allocated models if not enough memory is available - Node node1 = new Node("n_1", ByteSizeValue.ofMb(1200).getBytes(), 2); - Node node2 = new Node("n_2", ByteSizeValue.ofMb(1200).getBytes(), 2); - Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(800).getBytes(), 2, 1, Map.of(), 0); - Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(800).getBytes(), 1, 1, Map.of(), 0); - Deployment deployment3 = new Deployment("m_3", ByteSizeValue.ofMb(250).getBytes(), 1, 1, Map.of(), 0); + Node node1 = new Node("n_1", ByteSizeValue.ofMb(1840).getBytes(), 2); + Node node2 = new Node("n_2", ByteSizeValue.ofMb(2580).getBytes(), 2); + Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(800).getBytes(), 2, 1, Map.of(), 0, 0, 0); + Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(800).getBytes(), 1, 1, Map.of(), 0, 0, 0); + Deployment deployment3 = new Deployment("m_3", ByteSizeValue.ofMb(250).getBytes(), 1, 1, Map.of(), 0, 0, 0); + + // Create a plan where all deployments are assigned at least once + AssignmentPlan assignmentPlan = new AssignmentPlanner(List.of(node1, node2), List.of(deployment1, deployment2, deployment3)) + .computePlan(); + Map> indexedBasedPlan = convertToIdIndexed(assignmentPlan); + assertThat(indexedBasedPlan.keySet(), hasItems("m_1", "m_2", "m_3")); + assertThat(indexedBasedPlan.get("m_1"), equalTo(Map.of("n_1", 2))); + assertThat(indexedBasedPlan.get("m_2"), equalTo(Map.of("n_2", 1))); + assertThat(indexedBasedPlan.get("m_3"), equalTo(Map.of("n_2", 1))); + assertThat(assignmentPlan.getRemainingNodeMemory(node1.id()), greaterThanOrEqualTo(0L)); + assertThat(assignmentPlan.getRemainingNodeMemory(node2.id()), greaterThanOrEqualTo(0L)); + + // Now the cluster starts getting resized. Ensure that resources are not over-allocated. + assignmentPlan = new AssignmentPlanner(List.of(node1), createModelsFromPlan(assignmentPlan)).computePlan(); + assertThat(indexedBasedPlan.get("m_1"), equalTo(Map.of("n_1", 2))); + assertThat(assignmentPlan.getRemainingNodeMemory(node1.id()), greaterThanOrEqualTo(0L)); + assertThat(assignmentPlan.getRemainingNodeCores(node1.id()), greaterThanOrEqualTo(0)); + + } + + public void testGivenClusterResize_ShouldRemoveAllocatedModels_NewMemoryFields() { + // Ensure that plan is removing previously allocated models if not enough memory is available + Node node1 = new Node("n_1", ByteSizeValue.ofMb(700).getBytes(), 2); + Node node2 = new Node("n_2", ByteSizeValue.ofMb(1000).getBytes(), 2); + Deployment deployment1 = new Deployment( + "m_1", + ByteSizeValue.ofMb(100).getBytes(), + 2, + 1, + Map.of(), + 0, + ByteSizeValue.ofMb(400).getBytes(), + ByteSizeValue.ofMb(100).getBytes() + ); + Deployment deployment2 = new Deployment( + "m_2", + ByteSizeValue.ofMb(100).getBytes(), + 1, + 1, + Map.of(), + 0, + ByteSizeValue.ofMb(400).getBytes(), + ByteSizeValue.ofMb(150).getBytes() + ); + Deployment deployment3 = new Deployment( + "m_3", + ByteSizeValue.ofMb(50).getBytes(), + 1, + 1, + Map.of(), + 0, + ByteSizeValue.ofMb(250).getBytes(), + ByteSizeValue.ofMb(50).getBytes() + ); // Create a plan where all deployments are assigned at least once AssignmentPlan assignmentPlan = new AssignmentPlanner(List.of(node1, node2), List.of(deployment1, deployment2, deployment3)) @@ -536,7 +1026,9 @@ public static List createModelsFromPlan(AssignmentPlan plan) { m.allocations(), m.threadsPerAllocation(), currentAllocations, - Math.max(m.maxAssignedAllocations(), totalAllocations) + Math.max(m.maxAssignedAllocations(), totalAllocations), + 0, + 0 ) ); } @@ -579,7 +1071,7 @@ public static List randomNodes(int scale, String nodeIdPrefix) { for (int i = 0; i < 1 + 3 * scale; i++) { int cores = randomIntBetween(2, 32); long memBytesPerCore = randomFrom(memBytesPerCoreValues); - nodes.add(new Node(nodeIdPrefix + "n_" + i, cores * memBytesPerCore, cores)); + nodes.add(new Node(nodeIdPrefix + "n_" + i, scaleNodeSize(ByteSizeValue.ofBytes(cores * memBytesPerCore).getMb()), cores)); } return nodes; } @@ -594,14 +1086,30 @@ public static List randomModels(int scale, double load) { public static Deployment randomModel(String idSuffix) { int allocations = randomIntBetween(1, 32); - return new Deployment( - "m_" + idSuffix, - randomLongBetween(ByteSizeValue.ofMb(100).getBytes(), ByteSizeValue.ofGb(10).getBytes()), - randomIntBetween(1, 32), - randomIntBetween(1, 4), - Map.of(), - 0 - ); + // randomly choose between old and new memory fields format + if (randomBoolean()) { + return new Deployment( + "m_" + idSuffix, + randomLongBetween(ByteSizeValue.ofMb(100).getBytes(), ByteSizeValue.ofGb(10).getBytes()), + randomIntBetween(1, 32), + randomIntBetween(1, 4), + Map.of(), + 0, + 0, + 0 + ); + } else { + return new Deployment( + "m_" + idSuffix, + randomLongBetween(ByteSizeValue.ofMb(100).getBytes(), ByteSizeValue.ofGb(1).getBytes()), + randomIntBetween(1, 32), + randomIntBetween(1, 4), + Map.of(), + 0, + randomLongBetween(ByteSizeValue.ofMb(100).getBytes(), ByteSizeValue.ofGb(1).getBytes()), + randomLongBetween(ByteSizeValue.ofMb(100).getBytes(), ByteSizeValue.ofGb(1).getBytes()) + ); + } } public static void assertPreviousAssignmentsAreSatisfied(List deployments, AssignmentPlan assignmentPlan) { @@ -628,7 +1136,7 @@ private void runTooManyNodesAndModels(int nodesSize, int modelsSize) { } List deployments = new ArrayList<>(); for (int i = 0; i < modelsSize; i++) { - deployments.add(new Deployment("m_" + i, ByteSizeValue.ofMb(200).getBytes(), 2, 1, Map.of(), 0)); + deployments.add(new Deployment("m_" + i, ByteSizeValue.ofMb(200).getBytes(), 2, 1, Map.of(), 0, 0, 0)); } // Check plan is computed without OOM exception diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveAllAllocationsTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveAllAllocationsTests.java index 4a9b01e535d88..c45ce36394109 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveAllAllocationsTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveAllAllocationsTests.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.ml.inference.assignment.planning; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.ml.inference.assignment.planning.AssignmentPlan.Deployment; import org.elasticsearch.xpack.ml.inference.assignment.planning.AssignmentPlan.Node; @@ -14,7 +15,6 @@ import java.util.List; import java.util.Map; -import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; @@ -22,77 +22,179 @@ public class PreserveAllAllocationsTests extends ESTestCase { public void testGivenNoPreviousAssignments() { - Node node1 = new Node("n_1", 100, 4); - Node node2 = new Node("n_2", 100, 4); - Deployment deployment1 = new Deployment("m_1", 30, 2, 1, Map.of(), 0); - Deployment deployment2 = new Deployment("m_2", 30, 2, 4, Map.of(), 0); + Node node1 = new Node("n_1", ByteSizeValue.ofMb(440).getBytes(), 4); + Node node2 = new Node("n_2", ByteSizeValue.ofMb(440).getBytes(), 4); + Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(30).getBytes(), 2, 1, Map.of(), 0, 0, 0); + Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(30).getBytes(), 2, 4, Map.of(), 0, 0, 0); PreserveAllAllocations preserveAllAllocations = new PreserveAllAllocations( List.of(node1, node2), List.of(deployment1, deployment2) ); - - List nodesPreservingAllocations = preserveAllAllocations.nodesPreservingAllocations(); - assertThat(nodesPreservingAllocations, contains(node1, node2)); - - List modelsPreservingAllocations = preserveAllAllocations.modelsPreservingAllocations(); - assertThat(modelsPreservingAllocations, contains(deployment1, deployment2)); } public void testGivenPreviousAssignments() { - Node node1 = new Node("n_1", 100, 8); - Node node2 = new Node("n_2", 100, 8); - Deployment deployment1 = new AssignmentPlan.Deployment("m_1", 30, 2, 1, Map.of("n_1", 1), 1); - Deployment deployment2 = new Deployment("m_2", 50, 6, 4, Map.of("n_1", 1, "n_2", 2), 3); - PreserveAllAllocations preserveAllAllocations = new PreserveAllAllocations( - List.of(node1, node2), - List.of(deployment1, deployment2) - ); - - List nodesPreservingAllocations = preserveAllAllocations.nodesPreservingAllocations(); - assertThat(nodesPreservingAllocations, hasSize(2)); - - assertThat(nodesPreservingAllocations.get(0).id(), equalTo("n_1")); - assertThat(nodesPreservingAllocations.get(0).availableMemoryBytes(), equalTo(20L)); - assertThat(nodesPreservingAllocations.get(0).cores(), equalTo(3)); - - assertThat(nodesPreservingAllocations.get(1).id(), equalTo("n_2")); - assertThat(nodesPreservingAllocations.get(1).availableMemoryBytes(), equalTo(50L)); - assertThat(nodesPreservingAllocations.get(1).cores(), equalTo(0)); - - List modelsPreservingAllocations = preserveAllAllocations.modelsPreservingAllocations(); - assertThat(modelsPreservingAllocations, hasSize(2)); - - assertThat(modelsPreservingAllocations.get(0).id(), equalTo("m_1")); - assertThat(modelsPreservingAllocations.get(0).memoryBytes(), equalTo(30L)); - assertThat(modelsPreservingAllocations.get(0).allocations(), equalTo(1)); - assertThat(modelsPreservingAllocations.get(0).threadsPerAllocation(), equalTo(1)); - assertThat(modelsPreservingAllocations.get(0).currentAllocationsByNodeId(), equalTo(Map.of("n_1", 0))); - - assertThat(modelsPreservingAllocations.get(1).id(), equalTo("m_2")); - assertThat(modelsPreservingAllocations.get(1).memoryBytes(), equalTo(50L)); - assertThat(modelsPreservingAllocations.get(1).allocations(), equalTo(3)); - assertThat(modelsPreservingAllocations.get(1).threadsPerAllocation(), equalTo(4)); - assertThat(modelsPreservingAllocations.get(1).currentAllocationsByNodeId(), equalTo(Map.of("n_1", 0, "n_2", 0))); - - AssignmentPlan plan = AssignmentPlan.builder(List.of(node1, node2), List.of(deployment1, deployment2)) - .assignModelToNode(deployment1, node1, 2) - .build(); - assertThat(plan.assignments(deployment1).get(), equalTo(Map.of(node1, 2))); - assertThat(plan.assignments(deployment2).isEmpty(), is(true)); - - plan = preserveAllAllocations.mergePreservedAllocations(plan); - - assertThat(plan.assignments(deployment1).get(), equalTo(Map.of(node1, 3))); - assertThat(plan.assignments(deployment2).get(), equalTo(Map.of(node1, 1, node2, 2))); - assertThat(plan.getRemainingNodeMemory("n_1"), equalTo(20L)); - assertThat(plan.getRemainingNodeCores("n_1"), equalTo(1)); - assertThat(plan.getRemainingNodeMemory("n_2"), equalTo(50L)); - assertThat(plan.getRemainingNodeCores("n_2"), equalTo(0)); + { + // old memory format + Node node1 = new Node("n_1", ByteSizeValue.ofMb(640).getBytes(), 8); + Node node2 = new Node("n_2", ByteSizeValue.ofMb(640).getBytes(), 8); + Deployment deployment1 = new AssignmentPlan.Deployment( + "m_1", + ByteSizeValue.ofMb(30).getBytes(), + 2, + 1, + Map.of("n_1", 1), + 1, + 0, + 0 + ); + Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(50).getBytes(), 6, 4, Map.of("n_1", 1, "n_2", 2), 3, 0, 0); + PreserveAllAllocations preserveAllAllocations = new PreserveAllAllocations( + List.of(node1, node2), + List.of(deployment1, deployment2) + ); + + List nodesPreservingAllocations = preserveAllAllocations.nodesPreservingAllocations(); + assertThat(nodesPreservingAllocations, hasSize(2)); + + assertThat(nodesPreservingAllocations.get(0).id(), equalTo("n_1")); + // 640 - [(2*30 + 240) + (2*50 + 240)] = 0: deployments use 640 MB on the node 1 + assertThat(nodesPreservingAllocations.get(0).availableMemoryBytes(), equalTo(0L)); + // 8 - (1*1+1*4) = 3 : deployments use 5 cores on the node + assertThat(nodesPreservingAllocations.get(0).cores(), equalTo(3)); + + assertThat(nodesPreservingAllocations.get(1).id(), equalTo("n_2")); + // 640 - (50*2+240) = 300 : deployments use 340MB on the node + assertThat(nodesPreservingAllocations.get(1).availableMemoryBytes(), equalTo(ByteSizeValue.ofMb(300).getBytes())); + // 8 - (2*4) = 0 : preserving all allocation2 of deployment 2 should use 8 cores on the node + assertThat(nodesPreservingAllocations.get(1).cores(), equalTo(0)); + + List modelsPreservingAllocations = preserveAllAllocations.modelsPreservingAllocations(); + assertThat(modelsPreservingAllocations, hasSize(2)); + + assertThat(modelsPreservingAllocations.get(0).id(), equalTo("m_1")); + assertThat(modelsPreservingAllocations.get(0).memoryBytes(), equalTo(ByteSizeValue.ofMb(30).getBytes())); + assertThat(modelsPreservingAllocations.get(0).allocations(), equalTo(1)); + assertThat(modelsPreservingAllocations.get(0).threadsPerAllocation(), equalTo(1)); + assertThat(modelsPreservingAllocations.get(0).currentAllocationsByNodeId(), equalTo(Map.of("n_1", 0))); + + assertThat(modelsPreservingAllocations.get(1).id(), equalTo("m_2")); + assertThat(modelsPreservingAllocations.get(1).memoryBytes(), equalTo(ByteSizeValue.ofMb(50).getBytes())); + assertThat(modelsPreservingAllocations.get(1).allocations(), equalTo(3)); + assertThat(modelsPreservingAllocations.get(1).threadsPerAllocation(), equalTo(4)); + assertThat(modelsPreservingAllocations.get(1).currentAllocationsByNodeId(), equalTo(Map.of("n_1", 0, "n_2", 0))); + + // Now we have a plan with 2 deployments assigned to 2 nodes. + // Note that deployment 1 has already 1 allocation on node 1, and it gets 2 more. It's more than 2 allocations defined during + // initialization of deployment1, but we don't care at this point. + AssignmentPlan plan = AssignmentPlan.builder(List.of(node1, node2), List.of(deployment1, deployment2)) + .assignModelToNode(deployment1, node1, 2) + .build(); + assertThat(plan.assignments(deployment1).get(), equalTo(Map.of(node1, 2))); + assertThat(plan.assignments(deployment2).isEmpty(), is(true)); + + plan = preserveAllAllocations.mergePreservedAllocations(plan); + assertThat(plan.assignments(deployment1).get(), equalTo(Map.of(node1, 3))); + assertThat(plan.assignments(deployment2).get(), equalTo(Map.of(node1, 1, node2, 2))); + + // Node 1 already had deployments 1 and 2 assigned to it so adding more allocation doesn't change memory usage. + assertThat(plan.getRemainingNodeMemory("n_1"), equalTo(0L)); + // 8 - ((1*1+1*4) + 2*1) = 1 : deployments use 7 cores on the node + assertThat(plan.getRemainingNodeCores("n_1"), equalTo(1)); + // Nothing changed for Node 2 + assertThat(plan.getRemainingNodeMemory("n_2"), equalTo(ByteSizeValue.ofMb(300).getBytes())); + // Nothing changed for Node 2 + assertThat(plan.getRemainingNodeCores("n_2"), equalTo(0)); + } + { + // new memory format + Node node1 = new Node("n_1", ByteSizeValue.ofMb(1000).getBytes(), 8); + Node node2 = new Node("n_2", ByteSizeValue.ofMb(1000).getBytes(), 8); + Deployment deployment1 = new AssignmentPlan.Deployment( + "m_1", + ByteSizeValue.ofMb(30).getBytes(), + 2, + 1, + Map.of("n_1", 1), + 1, + ByteSizeValue.ofMb(300).getBytes(), + ByteSizeValue.ofMb(10).getBytes() + ); + Deployment deployment2 = new Deployment( + "m_2", + ByteSizeValue.ofMb(50).getBytes(), + 6, + 4, + Map.of("n_1", 1, "n_2", 2), + 3, + ByteSizeValue.ofMb(300).getBytes(), + ByteSizeValue.ofMb(10).getBytes() + ); + PreserveAllAllocations preserveAllAllocations = new PreserveAllAllocations( + List.of(node1, node2), + List.of(deployment1, deployment2) + ); + + List nodesPreservingAllocations = preserveAllAllocations.nodesPreservingAllocations(); + assertThat(nodesPreservingAllocations, hasSize(2)); + + assertThat(nodesPreservingAllocations.get(0).id(), equalTo("n_1")); + // 1000 - [(30 + 300+10) + (50 + 300 + 10)] = 300: deployments use 700 MB on the node 1 + assertThat(nodesPreservingAllocations.get(0).availableMemoryBytes(), equalTo(ByteSizeValue.ofMb(300).getBytes())); + // 8 - (1*1+1*4) = 3 : deployments use 5 cores on the node + assertThat(nodesPreservingAllocations.get(0).cores(), equalTo(3)); + + assertThat(nodesPreservingAllocations.get(1).id(), equalTo("n_2")); + // 1000 - (50 + 300 + 2*10) = 630 : deployments use 370MB on the node + assertThat(nodesPreservingAllocations.get(1).availableMemoryBytes(), equalTo(ByteSizeValue.ofMb(630).getBytes())); + // 8 - (2*4) = 0 : preserving all allocation2 of deployment 2 should use 8 cores on the node + assertThat(nodesPreservingAllocations.get(1).cores(), equalTo(0)); + + List modelsPreservingAllocations = preserveAllAllocations.modelsPreservingAllocations(); + assertThat(modelsPreservingAllocations, hasSize(2)); + + assertThat(modelsPreservingAllocations.get(0).id(), equalTo("m_1")); + assertThat(modelsPreservingAllocations.get(0).memoryBytes(), equalTo(ByteSizeValue.ofMb(30).getBytes())); + assertThat(modelsPreservingAllocations.get(0).perDeploymentMemoryBytes(), equalTo(ByteSizeValue.ofMb(300).getBytes())); + assertThat(modelsPreservingAllocations.get(0).perAllocationMemoryBytes(), equalTo(ByteSizeValue.ofMb(10).getBytes())); + assertThat(modelsPreservingAllocations.get(0).allocations(), equalTo(1)); + assertThat(modelsPreservingAllocations.get(0).threadsPerAllocation(), equalTo(1)); + assertThat(modelsPreservingAllocations.get(0).currentAllocationsByNodeId(), equalTo(Map.of("n_1", 0))); + + assertThat(modelsPreservingAllocations.get(1).id(), equalTo("m_2")); + assertThat(modelsPreservingAllocations.get(1).memoryBytes(), equalTo(ByteSizeValue.ofMb(50).getBytes())); + assertThat(modelsPreservingAllocations.get(1).perDeploymentMemoryBytes(), equalTo(ByteSizeValue.ofMb(300).getBytes())); + assertThat(modelsPreservingAllocations.get(1).perAllocationMemoryBytes(), equalTo(ByteSizeValue.ofMb(10).getBytes())); + assertThat(modelsPreservingAllocations.get(1).allocations(), equalTo(3)); + assertThat(modelsPreservingAllocations.get(1).threadsPerAllocation(), equalTo(4)); + assertThat(modelsPreservingAllocations.get(1).currentAllocationsByNodeId(), equalTo(Map.of("n_1", 0, "n_2", 0))); + + // Now we have a plan with 2 deployments assigned to 2 nodes. + // Note that deployment 1 has already 1 allocation on node 1, and it gets 2 more. It's more than 2 allocations defined during + // initialization of deployment1, but we don't care at this point. + AssignmentPlan plan = AssignmentPlan.builder(List.of(node1, node2), List.of(deployment1, deployment2)) + .assignModelToNode(deployment1, node1, 2) + .build(); + assertThat(plan.assignments(deployment1).get(), equalTo(Map.of(node1, 2))); + assertThat(plan.assignments(deployment2).isEmpty(), is(true)); + + plan = preserveAllAllocations.mergePreservedAllocations(plan); + assertThat(plan.assignments(deployment1).get(), equalTo(Map.of(node1, 3))); + assertThat(plan.assignments(deployment2).get(), equalTo(Map.of(node1, 1, node2, 2))); + + // 1000 - ((30 + 300 + 3*10) + (50 + 300 + 10)) = 280 : deployments use 720 MB on the node 1 + assertThat(plan.getRemainingNodeMemory("n_1"), equalTo(ByteSizeValue.ofMb(280).getBytes())); + // 8 - ((1*1+1*4) + 2*1) = 1 : deployments use 7 cores on the node + assertThat(plan.getRemainingNodeCores("n_1"), equalTo(1)); + // Nothing changed for Node 2 + assertThat(plan.getRemainingNodeMemory("n_2"), equalTo(ByteSizeValue.ofMb(630).getBytes())); + // Nothing changed for Node 2 + assertThat(plan.getRemainingNodeCores("n_2"), equalTo(0)); + } } public void testGivenModelWithPreviousAssignments_AndPlanToMergeHasNoAssignments() { - Node node = new Node("n_1", 100, 4); - AssignmentPlan.Deployment deployment = new Deployment("m_1", 30, 2, 2, Map.of("n_1", 2), 2); + Node node = new Node("n_1", ByteSizeValue.ofMb(400).getBytes(), 4); + Deployment deployment = new Deployment("m_1", ByteSizeValue.ofMb(30).getBytes(), 2, 2, Map.of("n_1", 2), 2, 0, 0); PreserveAllAllocations preserveAllAllocations = new PreserveAllAllocations(List.of(node), List.of(deployment)); AssignmentPlan plan = AssignmentPlan.builder(List.of(node), List.of(deployment)).build(); @@ -101,7 +203,7 @@ public void testGivenModelWithPreviousAssignments_AndPlanToMergeHasNoAssignments plan = preserveAllAllocations.mergePreservedAllocations(plan); assertThat(plan.assignments(deployment).isPresent(), is(true)); assertThat(plan.assignments(deployment).get(), equalTo(Map.of(node, 2))); - assertThat(plan.getRemainingNodeMemory("n_1"), equalTo(70L)); + assertThat(plan.getRemainingNodeMemory("n_1"), equalTo(ByteSizeValue.ofMb(100).getBytes())); assertThat(plan.getRemainingNodeCores("n_1"), equalTo(0)); } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveOneAllocationTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveOneAllocationTests.java index d8c3b09422e92..f646bf5cb2e9d 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveOneAllocationTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveOneAllocationTests.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.ml.inference.assignment.planning; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.ml.inference.assignment.planning.AssignmentPlan.Deployment; import org.elasticsearch.xpack.ml.inference.assignment.planning.AssignmentPlan.Node; @@ -22,10 +23,10 @@ public class PreserveOneAllocationTests extends ESTestCase { public void testGivenNoPreviousAssignments() { - Node node1 = new Node("n_1", 100, 4); - Node node2 = new Node("n_2", 100, 4); - Deployment deployment1 = new AssignmentPlan.Deployment("m_1", 30, 2, 1, Map.of(), 0); - AssignmentPlan.Deployment deployment2 = new Deployment("m_2", 30, 2, 4, Map.of(), 0); + Node node1 = new Node("n_1", ByteSizeValue.ofMb(440).getBytes(), 4); + Node node2 = new Node("n_2", ByteSizeValue.ofMb(440).getBytes(), 4); + Deployment deployment1 = new AssignmentPlan.Deployment("m_1", ByteSizeValue.ofMb(30).getBytes(), 2, 1, Map.of(), 0, 0, 0); + AssignmentPlan.Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(30).getBytes(), 2, 4, Map.of(), 0, 0, 0); PreserveOneAllocation preserveOneAllocation = new PreserveOneAllocation(List.of(node1, node2), List.of(deployment1, deployment2)); List nodesPreservingAllocations = preserveOneAllocation.nodesPreservingAllocations(); @@ -36,67 +37,204 @@ public void testGivenNoPreviousAssignments() { } public void testGivenPreviousAssignments() { - Node node1 = new Node("n_1", 100, 8); - Node node2 = new Node("n_2", 100, 8); - AssignmentPlan.Deployment deployment1 = new AssignmentPlan.Deployment("m_1", 30, 2, 1, Map.of("n_1", 1), 1); - AssignmentPlan.Deployment deployment2 = new Deployment("m_2", 50, 6, 4, Map.of("n_1", 1, "n_2", 2), 3); - PreserveOneAllocation preserveOneAllocation = new PreserveOneAllocation(List.of(node1, node2), List.of(deployment1, deployment2)); - - List nodesPreservingAllocations = preserveOneAllocation.nodesPreservingAllocations(); - assertThat(nodesPreservingAllocations, hasSize(2)); - - assertThat(nodesPreservingAllocations.get(0).id(), equalTo("n_1")); - assertThat(nodesPreservingAllocations.get(0).availableMemoryBytes(), equalTo(20L)); - assertThat(nodesPreservingAllocations.get(0).cores(), equalTo(3)); - - assertThat(nodesPreservingAllocations.get(1).id(), equalTo("n_2")); - assertThat(nodesPreservingAllocations.get(1).availableMemoryBytes(), equalTo(50L)); - assertThat(nodesPreservingAllocations.get(1).cores(), equalTo(4)); - - List modelsPreservingAllocations = preserveOneAllocation.modelsPreservingAllocations(); - assertThat(modelsPreservingAllocations, hasSize(2)); - - assertThat(modelsPreservingAllocations.get(0).id(), equalTo("m_1")); - assertThat(modelsPreservingAllocations.get(0).memoryBytes(), equalTo(30L)); - assertThat(modelsPreservingAllocations.get(0).allocations(), equalTo(1)); - assertThat(modelsPreservingAllocations.get(0).threadsPerAllocation(), equalTo(1)); - assertThat(modelsPreservingAllocations.get(0).currentAllocationsByNodeId(), equalTo(Map.of("n_1", 0))); - - assertThat(modelsPreservingAllocations.get(1).id(), equalTo("m_2")); - assertThat(modelsPreservingAllocations.get(1).memoryBytes(), equalTo(50L)); - assertThat(modelsPreservingAllocations.get(1).allocations(), equalTo(4)); - assertThat(modelsPreservingAllocations.get(1).threadsPerAllocation(), equalTo(4)); - assertThat(modelsPreservingAllocations.get(1).currentAllocationsByNodeId(), equalTo(Map.of("n_1", 0, "n_2", 1))); - - AssignmentPlan plan = AssignmentPlan.builder(List.of(node1, node2), List.of(deployment1, deployment2)) - .assignModelToNode(deployment1, node1, 2) - .assignModelToNode(deployment2, node2, 1) - .build(); - assertThat(plan.assignments(deployment1).get(), equalTo(Map.of(node1, 2))); - assertThat(plan.assignments(deployment2).get(), equalTo(Map.of(node2, 1))); - - plan = preserveOneAllocation.mergePreservedAllocations(plan); - - assertThat(plan.assignments(deployment1).get(), equalTo(Map.of(node1, 3))); - assertThat(plan.assignments(deployment2).get(), equalTo(Map.of(node1, 1, node2, 2))); - assertThat(plan.getRemainingNodeMemory("n_1"), equalTo(20L)); - assertThat(plan.getRemainingNodeCores("n_1"), equalTo(1)); - assertThat(plan.getRemainingNodeMemory("n_2"), equalTo(50L)); - assertThat(plan.getRemainingNodeCores("n_2"), equalTo(0)); + { + // old memory format + Node node1 = new Node("n_1", ByteSizeValue.ofMb(640).getBytes(), 8); + Node node2 = new Node("n_2", ByteSizeValue.ofMb(640).getBytes(), 8); + Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(30).getBytes(), 2, 1, Map.of("n_1", 1), 1, 0, 0); + Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(50).getBytes(), 6, 4, Map.of("n_1", 1, "n_2", 2), 3, 0, 0); + PreserveOneAllocation preserveOneAllocation = new PreserveOneAllocation( + List.of(node1, node2), + List.of(deployment1, deployment2) + ); + + List nodesPreservingAllocations = preserveOneAllocation.nodesPreservingAllocations(); + assertThat(nodesPreservingAllocations, hasSize(2)); + + assertThat(nodesPreservingAllocations.get(0).id(), equalTo("n_1")); + // 640 - [(30*2+240)+(50*2+240)] = 0 : deployments use all memory on the node + assertThat(nodesPreservingAllocations.get(0).availableMemoryBytes(), equalTo(0L)); + // 8 - (1*1+1*4) = 3 : deployments use 5 cores on the node + assertThat(nodesPreservingAllocations.get(0).cores(), equalTo(3)); + + assertThat(nodesPreservingAllocations.get(1).id(), equalTo("n_2")); + // 640 - (50*2+240) = 300 : deployments use 340MB on the node + assertThat(nodesPreservingAllocations.get(1).availableMemoryBytes(), equalTo(ByteSizeValue.ofMb(300).getBytes())); + // 8 - (1*4) = 4 : preserving 1 allocation of deployment 2 should use 4 cores on the node + assertThat(nodesPreservingAllocations.get(1).cores(), equalTo(4)); + + List modelsPreservingAllocations = preserveOneAllocation.modelsPreservingAllocations(); + assertThat(modelsPreservingAllocations, hasSize(2)); + + assertThat(modelsPreservingAllocations.get(0).id(), equalTo("m_1")); + assertThat(modelsPreservingAllocations.get(0).memoryBytes(), equalTo(ByteSizeValue.ofMb(30).getBytes())); + assertThat(modelsPreservingAllocations.get(0).perDeploymentMemoryBytes(), equalTo(ByteSizeValue.ofMb(0).getBytes())); + assertThat(modelsPreservingAllocations.get(0).perAllocationMemoryBytes(), equalTo(ByteSizeValue.ofMb(0).getBytes())); + assertThat(modelsPreservingAllocations.get(0).allocations(), equalTo(1)); + assertThat(modelsPreservingAllocations.get(0).threadsPerAllocation(), equalTo(1)); + assertThat(modelsPreservingAllocations.get(0).currentAllocationsByNodeId(), equalTo(Map.of("n_1", 0))); + + assertThat(modelsPreservingAllocations.get(1).id(), equalTo("m_2")); + assertThat(modelsPreservingAllocations.get(1).memoryBytes(), equalTo(ByteSizeValue.ofMb(50).getBytes())); + assertThat(modelsPreservingAllocations.get(1).perDeploymentMemoryBytes(), equalTo(ByteSizeValue.ofMb(0).getBytes())); + assertThat(modelsPreservingAllocations.get(1).perAllocationMemoryBytes(), equalTo(ByteSizeValue.ofMb(0).getBytes())); + assertThat(modelsPreservingAllocations.get(1).allocations(), equalTo(4)); + assertThat(modelsPreservingAllocations.get(1).threadsPerAllocation(), equalTo(4)); + assertThat(modelsPreservingAllocations.get(1).currentAllocationsByNodeId(), equalTo(Map.of("n_1", 0, "n_2", 1))); + + // Now we have a plan with 2 deployments assigned to 2 nodes. + // Note that deployment 1 has already 1 allocation on node 1, and it gets 2 more. It's more than 2 allocations defined during + // initialization of deployment1, but we don't care at this point. + AssignmentPlan plan = AssignmentPlan.builder(List.of(node1, node2), List.of(deployment1, deployment2)) + .assignModelToNode(deployment1, node1, 2) + .assignModelToNode(deployment2, node2, 1) + .build(); + assertThat(plan.assignments(deployment1).get(), equalTo(Map.of(node1, 2))); + assertThat(plan.assignments(deployment2).get(), equalTo(Map.of(node2, 1))); + + plan = preserveOneAllocation.mergePreservedAllocations(plan); + + assertThat(plan.assignments(deployment1).get(), equalTo(Map.of(node1, 3))); + assertThat(plan.assignments(deployment2).get(), equalTo(Map.of(node1, 1, node2, 2))); + // Node 1 already had deployments 1 and 2 assigned to it so adding more allocation doesn't change memory usage. + assertThat(plan.getRemainingNodeMemory("n_1"), equalTo(0L)); + // 8 - ((1*1+1*4) + 2*1) = 1 : deployments use 7 cores on the node + assertThat(plan.getRemainingNodeCores("n_1"), equalTo(1)); + // Node 2 already had deployment 2 assigned to it so adding more allocation doesn't change memory usage. + assertThat(plan.getRemainingNodeMemory("n_2"), equalTo(ByteSizeValue.ofMb(300).getBytes())); + // 8 - [(1*4) + (1*4)] = 4 : deployment 2 should use all cores on the node + assertThat(plan.getRemainingNodeCores("n_2"), equalTo(0)); + } + { + // new memory format + Node node1 = new Node("n_1", ByteSizeValue.ofMb(1000).getBytes(), 8); + Node node2 = new Node("n_2", ByteSizeValue.ofMb(1000).getBytes(), 8); + Deployment deployment1 = new Deployment( + "m_1", + ByteSizeValue.ofMb(30).getBytes(), + 2, + 1, + Map.of("n_1", 1), + 1, + ByteSizeValue.ofMb(300).getBytes(), + ByteSizeValue.ofMb(10).getBytes() + ); + Deployment deployment2 = new Deployment( + "m_2", + ByteSizeValue.ofMb(50).getBytes(), + 6, + 4, + Map.of("n_1", 1, "n_2", 2), + 3, + ByteSizeValue.ofMb(300).getBytes(), + ByteSizeValue.ofMb(10).getBytes() + ); + PreserveOneAllocation preserveOneAllocation = new PreserveOneAllocation( + List.of(node1, node2), + List.of(deployment1, deployment2) + ); + + List nodesPreservingAllocations = preserveOneAllocation.nodesPreservingAllocations(); + assertThat(nodesPreservingAllocations, hasSize(2)); + + assertThat(nodesPreservingAllocations.get(0).id(), equalTo("n_1")); + // 1000 - [(30+300+10)+(50 + 300 +10)] = 300 : deployments use 700 memory on the node + assertThat(nodesPreservingAllocations.get(0).availableMemoryBytes(), equalTo(ByteSizeValue.ofMb(300).getBytes())); + // 8 - (1*1+1*4) = 3 : deployments use 5 cores on the node + assertThat(nodesPreservingAllocations.get(0).cores(), equalTo(3)); + + assertThat(nodesPreservingAllocations.get(1).id(), equalTo("n_2")); + // 1000 - (50 +300 + 2*10) = 630 : deployments use 340MB on the node + assertThat(nodesPreservingAllocations.get(1).availableMemoryBytes(), equalTo(ByteSizeValue.ofMb(630).getBytes())); + // 8 - (1*4) = 0 : preserving 1 allocation of deployment 2 should use 4 cores on the node + assertThat(nodesPreservingAllocations.get(1).cores(), equalTo(4)); + + List modelsPreservingAllocations = preserveOneAllocation.modelsPreservingAllocations(); + assertThat(modelsPreservingAllocations, hasSize(2)); + + assertThat(modelsPreservingAllocations.get(0).id(), equalTo("m_1")); + assertThat(modelsPreservingAllocations.get(0).memoryBytes(), equalTo(ByteSizeValue.ofMb(30).getBytes())); + assertThat(modelsPreservingAllocations.get(0).perDeploymentMemoryBytes(), equalTo(ByteSizeValue.ofMb(300).getBytes())); + assertThat(modelsPreservingAllocations.get(0).perAllocationMemoryBytes(), equalTo(ByteSizeValue.ofMb(10).getBytes())); + assertThat(modelsPreservingAllocations.get(0).allocations(), equalTo(1)); + assertThat(modelsPreservingAllocations.get(0).threadsPerAllocation(), equalTo(1)); + assertThat(modelsPreservingAllocations.get(0).currentAllocationsByNodeId(), equalTo(Map.of("n_1", 0))); + + assertThat(modelsPreservingAllocations.get(1).id(), equalTo("m_2")); + assertThat(modelsPreservingAllocations.get(1).memoryBytes(), equalTo(ByteSizeValue.ofMb(50).getBytes())); + assertThat(modelsPreservingAllocations.get(1).perDeploymentMemoryBytes(), equalTo(ByteSizeValue.ofMb(300).getBytes())); + assertThat(modelsPreservingAllocations.get(1).perAllocationMemoryBytes(), equalTo(ByteSizeValue.ofMb(10).getBytes())); + assertThat(modelsPreservingAllocations.get(1).allocations(), equalTo(4)); + assertThat(modelsPreservingAllocations.get(1).threadsPerAllocation(), equalTo(4)); + assertThat(modelsPreservingAllocations.get(1).currentAllocationsByNodeId(), equalTo(Map.of("n_1", 0, "n_2", 1))); + + // Now we have a plan with 2 deployments assigned to 2 nodes. + // Note that deployment 1 has already 1 allocation on node 1, and it gets 2 more. It's more than 2 allocations defined during + // initialization of deployment1, but we don't care at this point. + AssignmentPlan plan = AssignmentPlan.builder(List.of(node1, node2), List.of(deployment1, deployment2)) + .assignModelToNode(deployment1, node1, 2) + .assignModelToNode(deployment2, node2, 1) + .build(); + assertThat(plan.assignments(deployment1).get(), equalTo(Map.of(node1, 2))); + assertThat(plan.assignments(deployment2).get(), equalTo(Map.of(node2, 1))); + + plan = preserveOneAllocation.mergePreservedAllocations(plan); + + assertThat(plan.assignments(deployment1).get(), equalTo(Map.of(node1, 3))); + assertThat(plan.assignments(deployment2).get(), equalTo(Map.of(node1, 1, node2, 2))); + // 1000 - [(30+300+3*10) + (50+300+10)] = 280 : deployments use 720MB on the node + assertThat(plan.getRemainingNodeMemory("n_1"), equalTo(ByteSizeValue.ofMb(280).getBytes())); + // 8 - ((1*1+1*4) + 2*1) = 1 : deployments use 7 cores on the node + assertThat(plan.getRemainingNodeCores("n_1"), equalTo(1)); + // 1000 - (50 + 300 + 2*10) = 630 : deployments use 370MB on the node + assertThat(plan.getRemainingNodeMemory("n_2"), equalTo(ByteSizeValue.ofMb(630).getBytes())); + // 8 - [(1*4) + (1*4)] = 4 : deployment 2 should use all cores on the node + assertThat(plan.getRemainingNodeCores("n_2"), equalTo(0)); + + } } public void testGivenModelWithPreviousAssignments_AndPlanToMergeHasNoAssignments() { - Node node = new Node("n_1", 100, 4); - AssignmentPlan.Deployment deployment = new Deployment("m_1", 30, 2, 2, Map.of("n_1", 2), 2); - PreserveOneAllocation preserveOneAllocation = new PreserveOneAllocation(List.of(node), List.of(deployment)); - - AssignmentPlan plan = AssignmentPlan.builder(List.of(node), List.of(deployment)).build(); - assertThat(plan.assignments(deployment).isEmpty(), is(true)); - - plan = preserveOneAllocation.mergePreservedAllocations(plan); - assertThat(plan.assignments(deployment).isPresent(), is(true)); - assertThat(plan.assignments(deployment).get(), equalTo(Map.of(node, 1))); - assertThat(plan.getRemainingNodeMemory("n_1"), equalTo(70L)); - assertThat(plan.getRemainingNodeCores("n_1"), equalTo(2)); + { + // old memory format + Node node = new Node("n_1", ByteSizeValue.ofMb(400).getBytes(), 4); + Deployment deployment = new Deployment("m_1", ByteSizeValue.ofMb(30).getBytes(), 2, 2, Map.of("n_1", 2), 2, 0, 0); + PreserveOneAllocation preserveOneAllocation = new PreserveOneAllocation(List.of(node), List.of(deployment)); + + AssignmentPlan plan = AssignmentPlan.builder(List.of(node), List.of(deployment)).build(); + assertThat(plan.assignments(deployment).isEmpty(), is(true)); + + plan = preserveOneAllocation.mergePreservedAllocations(plan); + assertThat(plan.assignments(deployment).isPresent(), is(true)); + assertThat(plan.assignments(deployment).get(), equalTo(Map.of(node, 1))); + // 400 - (30*2 + 240) = 100 : deployments use 300MB on the node + assertThat(plan.getRemainingNodeMemory("n_1"), equalTo(ByteSizeValue.ofMb(100).getBytes())); + assertThat(plan.getRemainingNodeCores("n_1"), equalTo(2)); + } + { + // new memory format + Node node = new Node("n_1", ByteSizeValue.ofMb(400).getBytes(), 4); + Deployment deployment = new Deployment( + "m_1", + ByteSizeValue.ofMb(30).getBytes(), + 2, + 2, + Map.of("n_1", 2), + 2, + ByteSizeValue.ofMb(300).getBytes(), + ByteSizeValue.ofMb(10).getBytes() + ); + PreserveOneAllocation preserveOneAllocation = new PreserveOneAllocation(List.of(node), List.of(deployment)); + + AssignmentPlan plan = AssignmentPlan.builder(List.of(node), List.of(deployment)).build(); + assertThat(plan.assignments(deployment).isEmpty(), is(true)); + + plan = preserveOneAllocation.mergePreservedAllocations(plan); + assertThat(plan.assignments(deployment).isPresent(), is(true)); + assertThat(plan.assignments(deployment).get(), equalTo(Map.of(node, 1))); + // 400 - (30 + 300 + 10) = 60 : deployments use 340MB on the node + assertThat(plan.getRemainingNodeMemory("n_1"), equalTo(ByteSizeValue.ofMb(60).getBytes())); + assertThat(plan.getRemainingNodeCores("n_1"), equalTo(2)); + } } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/ZoneAwareAssignmentPlannerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/ZoneAwareAssignmentPlannerTests.java index 7ceb8bbb86869..651e4764cb894 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/ZoneAwareAssignmentPlannerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/ZoneAwareAssignmentPlannerTests.java @@ -36,7 +36,7 @@ public class ZoneAwareAssignmentPlannerTests extends ESTestCase { public void testGivenOneModel_OneNode_OneZone_DoesNotFit() { Node node = new Node("n_1", 100, 1); - AssignmentPlan.Deployment deployment = new AssignmentPlan.Deployment("m_1", 100, 1, 2, Map.of(), 0); + AssignmentPlan.Deployment deployment = new AssignmentPlan.Deployment("m_1", 100, 1, 2, Map.of(), 0, 0, 0); AssignmentPlan plan = new ZoneAwareAssignmentPlanner(Map.of(List.of(), List.of(node)), List.of(deployment)).computePlan(); @@ -44,8 +44,17 @@ public void testGivenOneModel_OneNode_OneZone_DoesNotFit() { } public void testGivenOneModel_OneNode_OneZone_FullyFits() { - Node node = new Node("n_1", 100, 4); - AssignmentPlan.Deployment deployment = new AssignmentPlan.Deployment("m_1", 100, 2, 2, Map.of(), 0); + Node node = new Node("n_1", ByteSizeValue.ofMb(440).getBytes(), 4); + AssignmentPlan.Deployment deployment = new AssignmentPlan.Deployment( + "m_1", + ByteSizeValue.ofMb(100).getBytes(), + 2, + 2, + Map.of(), + 0, + 0, + 0 + ); AssignmentPlan plan = new ZoneAwareAssignmentPlanner(Map.of(List.of(), List.of(node)), List.of(deployment)).computePlan(); @@ -53,8 +62,17 @@ public void testGivenOneModel_OneNode_OneZone_FullyFits() { } public void testGivenOneModel_OneNode_OneZone_PartiallyFits() { - Node node = new Node("n_1", 100, 5); - AssignmentPlan.Deployment deployment = new AssignmentPlan.Deployment("m_1", 100, 3, 2, Map.of(), 0); + Node node = new Node("n_1", ByteSizeValue.ofMb(440).getBytes(), 5); + AssignmentPlan.Deployment deployment = new AssignmentPlan.Deployment( + "m_1", + ByteSizeValue.ofMb(100).getBytes(), + 3, + 2, + Map.of(), + 0, + 0, + 0 + ); AssignmentPlan plan = new ZoneAwareAssignmentPlanner(Map.of(List.of(), List.of(node)), List.of(deployment)).computePlan(); @@ -64,9 +82,18 @@ public void testGivenOneModel_OneNode_OneZone_PartiallyFits() { } public void testGivenOneModelWithSingleAllocation_OneNode_TwoZones() { - Node node1 = new Node("n_1", 100, 4); - Node node2 = new Node("n_2", 100, 4); - AssignmentPlan.Deployment deployment = new AssignmentPlan.Deployment("m_1", 100, 1, 2, Map.of(), 0); + Node node1 = new Node("n_1", ByteSizeValue.ofMb(440).getBytes(), 4); + Node node2 = new Node("n_2", ByteSizeValue.ofMb(440).getBytes(), 4); + AssignmentPlan.Deployment deployment = new AssignmentPlan.Deployment( + "m_1", + ByteSizeValue.ofMb(100).getBytes(), + 1, + 2, + Map.of(), + 0, + 0, + 0 + ); AssignmentPlan plan = new ZoneAwareAssignmentPlanner( Map.of(List.of("z1"), List.of(node1), List.of("z2"), List.of(node2)), @@ -82,9 +109,18 @@ public void testGivenOneModelWithSingleAllocation_OneNode_TwoZones() { } public void testGivenOneModel_OneNodePerZone_TwoZones_FullyFits() { - Node node1 = new Node("n_1", 100, 4); - Node node2 = new Node("n_2", 100, 4); - AssignmentPlan.Deployment deployment = new AssignmentPlan.Deployment("m_1", 100, 2, 2, Map.of(), 0); + Node node1 = new Node("n_1", ByteSizeValue.ofMb(440).getBytes(), 4); + Node node2 = new Node("n_2", ByteSizeValue.ofMb(440).getBytes(), 4); + AssignmentPlan.Deployment deployment = new AssignmentPlan.Deployment( + "m_1", + ByteSizeValue.ofMb(100).getBytes(), + 2, + 2, + Map.of(), + 0, + 0, + 0 + ); AssignmentPlan plan = new ZoneAwareAssignmentPlanner( Map.of(List.of("z_1"), List.of(node1), List.of("z_2"), List.of(node2)), @@ -99,9 +135,18 @@ public void testGivenOneModel_OneNodePerZone_TwoZones_FullyFits() { } public void testGivenOneModel_OneNodePerZone_TwoZones_PartiallyFits() { - Node node1 = new Node("n_1", 100, 4); - Node node2 = new Node("n_2", 100, 4); - AssignmentPlan.Deployment deployment = new AssignmentPlan.Deployment("m_1", 100, 3, 3, Map.of(), 0); + Node node1 = new Node("n_1", ByteSizeValue.ofMb(440).getBytes(), 4); + Node node2 = new Node("n_2", ByteSizeValue.ofMb(440).getBytes(), 4); + AssignmentPlan.Deployment deployment = new AssignmentPlan.Deployment( + "m_1", + ByteSizeValue.ofMb(100).getBytes(), + 3, + 3, + Map.of(), + 0, + 0, + 0 + ); AssignmentPlan plan = new ZoneAwareAssignmentPlanner( Map.of(List.of("z_1"), List.of(node1), List.of("z_2"), List.of(node2)), @@ -117,15 +162,15 @@ public void testGivenOneModel_OneNodePerZone_TwoZones_PartiallyFits() { } public void testGivenThreeModels_TwoNodesPerZone_ThreeZones_FullyFit() { - Node node1 = new Node("n_1", 100, 4); - Node node2 = new Node("n_2", 100, 4); - Node node3 = new Node("n_3", 100, 4); - Node node4 = new Node("n_4", 100, 4); - Node node5 = new Node("n_5", 100, 4); - Node node6 = new Node("n_6", 100, 4); - AssignmentPlan.Deployment deployment1 = new AssignmentPlan.Deployment("m_1", 25, 4, 1, Map.of(), 0); - Deployment deployment2 = new AssignmentPlan.Deployment("m_2", 25, 6, 2, Map.of(), 0); - AssignmentPlan.Deployment deployment3 = new AssignmentPlan.Deployment("m_3", 25, 2, 3, Map.of(), 0); + Node node1 = new Node("n_1", ByteSizeValue.ofMb(1000).getBytes(), 4); + Node node2 = new Node("n_2", ByteSizeValue.ofMb(1000).getBytes(), 4); + Node node3 = new Node("n_3", ByteSizeValue.ofMb(1000).getBytes(), 4); + Node node4 = new Node("n_4", ByteSizeValue.ofMb(1000).getBytes(), 4); + Node node5 = new Node("n_5", ByteSizeValue.ofMb(1000).getBytes(), 4); + Node node6 = new Node("n_6", ByteSizeValue.ofMb(1000).getBytes(), 4); + Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(30).getBytes(), 4, 1, Map.of(), 0, 0, 0); + Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(30).getBytes(), 6, 2, Map.of(), 0, 0, 0); + Deployment deployment3 = new Deployment("m_3", ByteSizeValue.ofMb(30).getBytes(), 2, 3, Map.of(), 0, 0, 0); Map, List> nodesByZone = Map.of( List.of("z_1"), @@ -168,11 +213,11 @@ public void testGivenThreeModels_TwoNodesPerZone_ThreeZones_FullyFit() { } public void testGivenTwoModelsWithSingleAllocation_OneNode_ThreeZones() { - Node node1 = new Node("n_1", 100, 4); - Node node2 = new Node("n_2", 100, 4); - Node node3 = new Node("n_3", 100, 4); - AssignmentPlan.Deployment deployment1 = new Deployment("m_1", 25, 1, 1, Map.of(), 0); - AssignmentPlan.Deployment deployment2 = new Deployment("m_2", 25, 1, 1, Map.of(), 0); + Node node1 = new Node("n_1", ByteSizeValue.ofMb(1000).getBytes(), 4); + Node node2 = new Node("n_2", ByteSizeValue.ofMb(1000).getBytes(), 4); + Node node3 = new Node("n_3", ByteSizeValue.ofMb(1000).getBytes(), 4); + Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(30).getBytes(), 1, 1, Map.of(), 0, 0, 0); + Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(30).getBytes(), 1, 1, Map.of(), 0, 0, 0); AssignmentPlan plan = new ZoneAwareAssignmentPlanner( Map.of(List.of("z1"), List.of(node1), List.of("z2"), List.of(node2), List.of("z3"), List.of(node3)), @@ -203,7 +248,16 @@ public void testPreviousAssignmentsGetAtLeastAsManyAllocationsAfterAddingNewMode .stream() .collect(Collectors.toMap(e -> e.getKey().id(), Map.Entry::getValue)); previousModelsPlusNew.add( - new AssignmentPlan.Deployment(m.id(), m.memoryBytes(), m.allocations(), m.threadsPerAllocation(), previousAssignments, 0) + new AssignmentPlan.Deployment( + m.id(), + m.memoryBytes(), + m.allocations(), + m.threadsPerAllocation(), + previousAssignments, + 0, + 0, + 0 + ) ); } previousModelsPlusNew.add(randomModel("new")); @@ -214,11 +268,11 @@ public void testPreviousAssignmentsGetAtLeastAsManyAllocationsAfterAddingNewMode } public void testGivenClusterResize_GivenOneZone_ShouldAllocateEachModelAtLeastOnce() { - Node node1 = new Node("n_1", ByteSizeValue.ofMb(1200).getBytes(), 2); - Node node2 = new Node("n_2", ByteSizeValue.ofMb(1200).getBytes(), 2); - AssignmentPlan.Deployment deployment1 = new AssignmentPlan.Deployment("m_1", ByteSizeValue.ofMb(800).getBytes(), 2, 1, Map.of(), 0); - AssignmentPlan.Deployment deployment2 = new AssignmentPlan.Deployment("m_2", ByteSizeValue.ofMb(800).getBytes(), 1, 1, Map.of(), 0); - AssignmentPlan.Deployment deployment3 = new AssignmentPlan.Deployment("m_3", ByteSizeValue.ofMb(250).getBytes(), 4, 1, Map.of(), 0); + Node node1 = new Node("n_1", ByteSizeValue.ofMb(2580).getBytes(), 2); + Node node2 = new Node("n_2", ByteSizeValue.ofMb(2580).getBytes(), 2); + Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(800).getBytes(), 2, 1, Map.of(), 0, 0, 0); + Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(800).getBytes(), 1, 1, Map.of(), 0, 0, 0); + Deployment deployment3 = new Deployment("m_3", ByteSizeValue.ofMb(250).getBytes(), 4, 1, Map.of(), 0, 0, 0); // First only start m_1 AssignmentPlan assignmentPlan = new ZoneAwareAssignmentPlanner(Map.of(List.of(), List.of(node1, node2)), List.of(deployment1)) @@ -252,8 +306,8 @@ public void testGivenClusterResize_GivenOneZone_ShouldAllocateEachModelAtLeastOn assertThat(indexedBasedPlan.get("m_3"), equalTo(Map.of("n_2", 1))); // Now the cluster starts getting resized. - Node node3 = new Node("n_3", ByteSizeValue.ofMb(2400).getBytes(), 2); - Node node4 = new Node("n_4", ByteSizeValue.ofMb(2400).getBytes(), 2); + Node node3 = new Node("n_3", ByteSizeValue.ofMb(5160).getBytes(), 2); + Node node4 = new Node("n_4", ByteSizeValue.ofMb(5160).getBytes(), 2); // First, one node goes away. assignmentPlan = new ZoneAwareAssignmentPlanner(Map.of(List.of(), List.of(node1)), createModelsFromPlan(assignmentPlan)) diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MlAssignmentPlannerUpgradeIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MlAssignmentPlannerUpgradeIT.java new file mode 100644 index 0000000000000..fc78bf36c72fb --- /dev/null +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MlAssignmentPlannerUpgradeIT.java @@ -0,0 +1,289 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.upgrades; + +import org.elasticsearch.Version; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.core.Strings; +import org.elasticsearch.logging.LogManager; +import org.elasticsearch.logging.Logger; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Base64; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +import static org.elasticsearch.client.WarningsHandler.PERMISSIVE; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; + +public class MlAssignmentPlannerUpgradeIT extends AbstractUpgradeTestCase { + + private Logger logger = LogManager.getLogger(MlAssignmentPlannerUpgradeIT.class); + + // See PyTorchModelIT for how this model was created + static final String BASE_64_ENCODED_MODEL = + "UEsDBAAACAgAAAAAAAAAAAAAAAAAAAAAAAAUAA4Ac2ltcGxlbW9kZWwvZGF0YS5wa2xGQgoAWlpaWlpaWlpaWoACY19fdG9yY2hfXwp" + + "TdXBlclNpbXBsZQpxACmBfShYCAAAAHRyYWluaW5ncQGIdWJxAi5QSwcIXOpBBDQAAAA0AAAAUEsDBBQACAgIAAAAAAAAAAAAAAAAAA" + + "AAAAAdAEEAc2ltcGxlbW9kZWwvY29kZS9fX3RvcmNoX18ucHlGQj0AWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaW" + + "lpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWnWOMWvDMBCF9/yKI5MMrnHTQsHgjt2aJdlCEIp9SgWSTpykFvfXV1htaYds0nfv473Jqhjh" + + "kAPywbhgUbzSnC02wwZAyqBYOUzIUUoY4XRe6SVr/Q8lVsYbf4UBLkS2kBk1aOIPxbOIaPVQtEQ8vUnZ/WlrSxTA+JCTNHMc4Ig+Ele" + + "s+Jod+iR3N/jDDf74wxu4e/5+DmtE9mUyhdgFNq7bZ3ekehbruC6aTxS/c1rom6Z698WrEfIYxcn4JGTftLA7tzCnJeD41IJVC+U07k" + + "umUHw3E47Vqh+xnULeFisYLx064mV8UTZibWFMmX0p23wBUEsHCE0EGH3yAAAAlwEAAFBLAwQUAAgICAAAAAAAAAAAAAAAAAAAAAAAJ" + + "wA5AHNpbXBsZW1vZGVsL2NvZGUvX190b3JjaF9fLnB5LmRlYnVnX3BrbEZCNQBaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpa" + + "WlpaWlpaWlpaWlpaWlpaWlpaWlpaWrWST0+DMBiHW6bOod/BGS94kKpo2Mwyox5x3pbgiXSAFtdR/nQu3IwHiZ9oX88CaeGu9tL0efq" + + "+v8P7fmiGA1wgTgoIcECZQqe6vmYD6G4hAJOcB1E8NazTm+ELyzY4C3Q0z8MsRwF+j4JlQUPEEo5wjH0WB9hCNFqgpOCExZY5QnnEw7" + + "ME+0v8GuaIs8wnKI7RigVrKkBzm0lh2OdjkeHllG28f066vK6SfEypF60S+vuYt4gjj2fYr/uPrSvRv356TepfJ9iWJRN0OaELQSZN3" + + "FRPNbcP1PTSntMr0x0HzLZQjPYIEo3UaFeiISRKH0Mil+BE/dyT1m7tCBLwVO1MX4DK3bbuTlXuy8r71j5Aoho66udAoseOnrdVzx28" + + "UFW6ROuO/lT6QKKyo79VU54emj9QSwcInsUTEDMBAAAFAwAAUEsDBAAACAgAAAAAAAAAAAAAAAAAAAAAAAAZAAYAc2ltcGxlbW9kZWw" + + "vY29uc3RhbnRzLnBrbEZCAgBaWoACKS5QSwcIbS8JVwQAAAAEAAAAUEsDBAAACAgAAAAAAAAAAAAAAAAAAAAAAAATADsAc2ltcGxlbW" + + "9kZWwvdmVyc2lvbkZCNwBaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaMwpQSwcI0" + + "Z5nVQIAAAACAAAAUEsBAgAAAAAICAAAAAAAAFzqQQQ0AAAANAAAABQAAAAAAAAAAAAAAAAAAAAAAHNpbXBsZW1vZGVsL2RhdGEucGts" + + "UEsBAgAAFAAICAgAAAAAAE0EGH3yAAAAlwEAAB0AAAAAAAAAAAAAAAAAhAAAAHNpbXBsZW1vZGVsL2NvZGUvX190b3JjaF9fLnB5UEs" + + "BAgAAFAAICAgAAAAAAJ7FExAzAQAABQMAACcAAAAAAAAAAAAAAAAAAgIAAHNpbXBsZW1vZGVsL2NvZGUvX190b3JjaF9fLnB5LmRlYn" + + "VnX3BrbFBLAQIAAAAACAgAAAAAAABtLwlXBAAAAAQAAAAZAAAAAAAAAAAAAAAAAMMDAABzaW1wbGVtb2RlbC9jb25zdGFudHMucGtsU" + + "EsBAgAAAAAICAAAAAAAANGeZ1UCAAAAAgAAABMAAAAAAAAAAAAAAAAAFAQAAHNpbXBsZW1vZGVsL3ZlcnNpb25QSwYGLAAAAAAAAAAe" + + "Ay0AAAAAAAAAAAAFAAAAAAAAAAUAAAAAAAAAagEAAAAAAACSBAAAAAAAAFBLBgcAAAAA/AUAAAAAAAABAAAAUEsFBgAAAAAFAAUAagE" + + "AAJIEAAAAAA=="; + static final long RAW_MODEL_SIZE; // size of the model before base64 encoding + static { + RAW_MODEL_SIZE = Base64.getDecoder().decode(BASE_64_ENCODED_MODEL).length; + } + + public void testMlAssignmentPlannerUpgrade() throws Exception { + assumeTrue("NLP model deployments added in 8.0", isOriginalClusterVersionAtLeast(Version.V_8_0_0)); + + logger.info("Starting testMlAssignmentPlannerUpgrade, model size {}", RAW_MODEL_SIZE); + + switch (CLUSTER_TYPE) { + case OLD -> { + // setup deployments using old and new memory format + setupDeployments(); + + waitForDeploymentStarted("old_memory_format"); + waitForDeploymentStarted("new_memory_format"); + + // assert correct memory format is used + assertOldMemoryFormat("old_memory_format"); + if (isOriginalClusterVersionAtLeast(Version.V_8_11_0)) { + assertNewMemoryFormat("new_memory_format"); + } else { + assertOldMemoryFormat("new_memory_format"); + } + } + case MIXED -> { + ensureHealth(".ml-inference-*,.ml-config*", (request -> { + request.addParameter("wait_for_status", "yellow"); + request.addParameter("timeout", "70s"); + })); + waitForDeploymentStarted("old_memory_format"); + waitForDeploymentStarted("new_memory_format"); + + // assert correct memory format is used + assertOldMemoryFormat("old_memory_format"); + if (isOriginalClusterVersionAtLeast(Version.V_8_11_0)) { + assertNewMemoryFormat("new_memory_format"); + } else { + assertOldMemoryFormat("new_memory_format"); + } + + } + case UPGRADED -> { + ensureHealth(".ml-inference-*,.ml-config*", (request -> { + request.addParameter("wait_for_status", "yellow"); + request.addParameter("timeout", "70s"); + })); + waitForDeploymentStarted("old_memory_format"); + waitForDeploymentStarted("new_memory_format"); + + // assert correct memory format is used + assertOldMemoryFormat("old_memory_format"); + assertNewMemoryFormat("new_memory_format"); + + cleanupDeployments(); + } + } + } + + @SuppressWarnings("unchecked") + private void waitForDeploymentStarted(String modelId) throws Exception { + assertBusy(() -> { + var response = getTrainedModelStats(modelId); + Map map = entityAsMap(response); + List> stats = (List>) map.get("trained_model_stats"); + assertThat(stats, hasSize(1)); + var stat = stats.get(0); + assertThat(stat.toString(), XContentMapValues.extractValue("deployment_stats.state", stat), equalTo("started")); + }, 30, TimeUnit.SECONDS); + } + + @SuppressWarnings("unchecked") + private void assertOldMemoryFormat(String modelId) throws Exception { + // There was a change in the MEMORY_OVERHEAD value in 8.3.0, see #86416 + long memoryOverheadMb = Version.fromString(UPGRADE_FROM_VERSION).onOrAfter(Version.V_8_2_1) ? 240 : 270; + var response = getTrainedModelStats(modelId); + Map map = entityAsMap(response); + List> stats = (List>) map.get("trained_model_stats"); + assertThat(stats, hasSize(1)); + var stat = stats.get(0); + Long expectedMemoryUsage = ByteSizeValue.ofMb(memoryOverheadMb).getBytes() + RAW_MODEL_SIZE * 2; + Integer actualMemoryUsage = (Integer) XContentMapValues.extractValue("model_size_stats.required_native_memory_bytes", stat); + assertThat( + Strings.format("Memory usage mismatch for the model %s in cluster state %s", modelId, CLUSTER_TYPE.toString()), + actualMemoryUsage, + equalTo(expectedMemoryUsage.intValue()) + ); + } + + @SuppressWarnings("unchecked") + private void assertNewMemoryFormat(String modelId) throws Exception { + var response = getTrainedModelStats(modelId); + Map map = entityAsMap(response); + List> stats = (List>) map.get("trained_model_stats"); + assertThat(stats, hasSize(1)); + var stat = stats.get(0); + Long expectedMemoryUsage = ByteSizeValue.ofMb(300).getBytes() + RAW_MODEL_SIZE + ByteSizeValue.ofMb(10).getBytes(); + Integer actualMemoryUsage = (Integer) XContentMapValues.extractValue("model_size_stats.required_native_memory_bytes", stat); + assertThat(stat.toString(), actualMemoryUsage.toString(), equalTo(expectedMemoryUsage.toString())); + } + + private Response getTrainedModelStats(String modelId) throws IOException { + Request request = new Request("GET", "/_ml/trained_models/" + modelId + "/_stats"); + request.setOptions(request.getOptions().toBuilder().setWarningsHandler(PERMISSIVE).build()); + var response = client().performRequest(request); + assertOK(response); + return response; + } + + private Response infer(String input, String modelId) throws IOException { + Request request = new Request("POST", "/_ml/trained_models/" + modelId + "/deployment/_infer"); + request.setJsonEntity(Strings.format(""" + { "docs": [{"input":"%s"}] } + """, input)); + request.setOptions(request.getOptions().toBuilder().setWarningsHandler(PERMISSIVE).build()); + var response = client().performRequest(request); + assertOK(response); + return response; + } + + private void putModelDefinition(String modelId) throws IOException { + Request request = new Request("PUT", "_ml/trained_models/" + modelId + "/definition/0"); + request.setJsonEntity(Strings.format(""" + {"total_definition_length":%s,"definition": "%s","total_parts": 1}""", RAW_MODEL_SIZE, BASE_64_ENCODED_MODEL)); + client().performRequest(request); + } + + private void putVocabulary(List vocabulary, String modelId) throws IOException { + List vocabularyWithPad = new ArrayList<>(); + vocabularyWithPad.add("[PAD]"); + vocabularyWithPad.add("[UNK]"); + vocabularyWithPad.addAll(vocabulary); + String quotedWords = vocabularyWithPad.stream().map(s -> "\"" + s + "\"").collect(Collectors.joining(",")); + + Request request = new Request("PUT", "_ml/trained_models/" + modelId + "/vocabulary"); + request.setJsonEntity(Strings.format(""" + { "vocabulary": [%s] } + """, quotedWords)); + client().performRequest(request); + } + + private void setupDeployments() throws Exception { + createTrainedModel("old_memory_format", 0, 0); + putModelDefinition("old_memory_format"); + putVocabulary(List.of("these", "are", "my", "words"), "old_memory_format"); + startDeployment("old_memory_format"); + + createTrainedModel("new_memory_format", ByteSizeValue.ofMb(300).getBytes(), ByteSizeValue.ofMb(10).getBytes()); + putModelDefinition("new_memory_format"); + putVocabulary(List.of("these", "are", "my", "words"), "new_memory_format"); + startDeployment("new_memory_format"); + } + + private void cleanupDeployments() throws IOException { + stopDeployment("old_memory_format"); + deleteTrainedModel("old_memory_format"); + stopDeployment("new_memory_format"); + deleteTrainedModel("new_memory_format"); + } + + private void createTrainedModel(String modelId, long perDeploymentMemoryBytes, long perAllocationMemoryBytes) throws IOException { + Request request = new Request("PUT", "/_ml/trained_models/" + modelId); + if (perAllocationMemoryBytes > 0 && perDeploymentMemoryBytes > 0) { + request.setJsonEntity(Strings.format(""" + { + "description": "simple model for testing", + "model_type": "pytorch", + "inference_config": { + "pass_through": { + "tokenization": { + "bert": { + "with_special_tokens": false + } + } + } + }, + "metadata": { + "per_deployment_memory_bytes": %s, + "per_allocation_memory_bytes": %s + } + }""", perDeploymentMemoryBytes, perAllocationMemoryBytes)); + } else { + request.setJsonEntity(""" + { + "description": "simple model for testing", + "model_type": "pytorch", + "inference_config": { + "pass_through": { + "tokenization": { + "bert": { + "with_special_tokens": false + } + } + } + } + }"""); + } + client().performRequest(request); + } + + private void deleteTrainedModel(String modelId) throws IOException { + Request request = new Request("DELETE", "_ml/trained_models/" + modelId); + client().performRequest(request); + } + + private Response startDeployment(String modelId) throws IOException { + return startDeployment(modelId, "started"); + } + + private Response startDeployment(String modelId, String waitForState) throws IOException { + Request request = new Request( + "POST", + "/_ml/trained_models/" + + modelId + + "/deployment/_start?timeout=40s&wait_for=" + + waitForState + + "&inference_threads=1&model_threads=1" + ); + request.setOptions(request.getOptions().toBuilder().setWarningsHandler(PERMISSIVE).build()); + var response = client().performRequest(request); + assertOK(response); + return response; + } + + private void stopDeployment(String modelId) throws IOException { + String endpoint = "/_ml/trained_models/" + modelId + "/deployment/_stop"; + Request request = new Request("POST", endpoint); + client().performRequest(request); + } +} From c7135b659312d2f3e12b89b9c2f819f047c84b71 Mon Sep 17 00:00:00 2001 From: Nikolaj Volgushev Date: Tue, 7 Nov 2023 14:34:50 +0100 Subject: [PATCH 14/30] Last load cache for native role mapping store (#101770) To increase resilience to security index unavailability this PR adds a fallback cache for native role mappings. Role mappings are cached when successfully loaded from the security index. The "cache" is _only_ used as a fallback, when the security index is not available. It's cleared when role mappings are modified, but not on security index state changes (e.g., when the index is closed, the reference is not cleared since we want it to work as a fallback for exactly this case). This is a best-effort approach targeting Serverless. The PR uses a new "hidden" setting to toggle this behavior, which is not meant to be exposed or documented. The alternatives to this would be to toggle behavior via an `isStateful` check, however I prefer the flexibility of using a setting and the ability this gives us to centralize the test suite. A system property is also possible; a setting felt more straight-forward. Relates: ES-7057 --- .../xpack/security/authc/jwt/JwtRestIT.java | 10 +- ...JwtWithUnavailableSecurityIndexRestIT.java | 310 ++++++++++++++++++ .../xpack/security/Security.java | 1 + .../mapper/NativeRoleMappingStore.java | 60 +++- .../mapper/NativeRoleMappingStoreTests.java | 259 ++++++++++++++- 5 files changed, 616 insertions(+), 24 deletions(-) create mode 100644 x-pack/plugin/security/qa/jwt-realm/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtWithUnavailableSecurityIndexRestIT.java diff --git a/x-pack/plugin/security/qa/jwt-realm/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtRestIT.java b/x-pack/plugin/security/qa/jwt-realm/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtRestIT.java index a3d4c94e91882..db59bea999852 100644 --- a/x-pack/plugin/security/qa/jwt-realm/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtRestIT.java +++ b/x-pack/plugin/security/qa/jwt-realm/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtRestIT.java @@ -709,11 +709,11 @@ private SignedJWT signHmacJwt(JWTClaimsSet claimsSet, String hmacPassphrase) thr } // JWT construction - private JWTClaimsSet buildJwt(Map claims, Instant issueTime) { + static JWTClaimsSet buildJwt(Map claims, Instant issueTime) { return buildJwt(claims, issueTime, true, true); } - private JWTClaimsSet buildJwt(Map claims, Instant issueTime, boolean includeSub, boolean includeAud) { + static JWTClaimsSet buildJwt(Map claims, Instant issueTime, boolean includeSub, boolean includeAud) { final JWTClaimsSet.Builder builder = new JWTClaimsSet.Builder(); builder.issuer(randomAlphaOfLengthBetween(4, 24)); if (includeSub) { @@ -743,7 +743,7 @@ private JWTClaimsSet buildJwt(Map claims, Instant issueTime, boo return builder.build(); } - private SignedJWT signJWT(JWSSigner signer, String algorithm, JWTClaimsSet claimsSet) throws JOSEException { + static SignedJWT signJWT(JWSSigner signer, String algorithm, JWTClaimsSet claimsSet) throws JOSEException { final JWSHeader.Builder builder = new JWSHeader.Builder(JWSAlgorithm.parse(algorithm)); if (randomBoolean()) { builder.type(JOSEObjectType.JWT); @@ -775,13 +775,13 @@ private TestSecurityClient getSecurityClient(Consumer co } // Utility methods - private Map assertMap(Map response, ParseField field) { + static Map assertMap(Map response, ParseField field) { assertThat(response, hasKey(field.getPreferredName())); assertThat(response, hasEntry(is(field.getPreferredName()), instanceOf(Map.class))); return (Map) response.get(field.getPreferredName()); } - private List assertList(Map response, ParseField field) { + static List assertList(Map response, ParseField field) { assertThat(response, hasKey(field.getPreferredName())); assertThat(response, hasEntry(is(field.getPreferredName()), instanceOf(List.class))); return (List) response.get(field.getPreferredName()); diff --git a/x-pack/plugin/security/qa/jwt-realm/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtWithUnavailableSecurityIndexRestIT.java b/x-pack/plugin/security/qa/jwt-realm/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtWithUnavailableSecurityIndexRestIT.java new file mode 100644 index 0000000000000..015c66aea6164 --- /dev/null +++ b/x-pack/plugin/security/qa/jwt-realm/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtWithUnavailableSecurityIndexRestIT.java @@ -0,0 +1,310 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security.authc.jwt; + +import com.nimbusds.jose.JOSEException; +import com.nimbusds.jose.crypto.RSASSASigner; +import com.nimbusds.jose.jwk.JWK; +import com.nimbusds.jose.jwk.JWKSet; +import com.nimbusds.jose.jwk.RSAKey; +import com.nimbusds.jwt.JWTClaimsSet; +import com.nimbusds.jwt.SignedJWT; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.PathUtils; +import org.elasticsearch.core.Strings; +import org.elasticsearch.test.TestSecurityClient; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.MutableSettingsProvider; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.cluster.util.resource.Resource; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.security.user.User; +import org.hamcrest.Matchers; +import org.junit.BeforeClass; +import org.junit.ClassRule; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.net.URISyntaxException; +import java.net.URL; +import java.nio.file.Path; +import java.text.ParseException; +import java.time.Instant; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.instanceOf; + +public class JwtWithUnavailableSecurityIndexRestIT extends ESRestTestCase { + + // Using this to first test without, then with caching. Since caching is controlled by a static setting, we need a + // MutableSettingsProvider instance + private static final MutableSettingsProvider mutableSettingsForLastLoadCache = new MutableSettingsProvider(); + + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .nodes(1) + .distribution(DistributionType.DEFAULT) + .setting("xpack.ml.enabled", "false") + .setting("xpack.license.self_generated.type", "trial") + .setting("xpack.security.enabled", "true") + .setting("xpack.security.transport.ssl.enabled", "false") + .setting("xpack.security.authc.token.enabled", "true") + .setting("xpack.security.authc.api_key.enabled", "true") + .setting("xpack.security.http.ssl.enabled", "true") + .setting("xpack.security.http.ssl.certificate", "http.crt") + .setting("xpack.security.http.ssl.key", "http.key") + .setting("xpack.security.http.ssl.key_passphrase", "http-password") + .setting("xpack.security.http.ssl.certificate_authorities", "ca.crt") + .setting("xpack.security.http.ssl.client_authentication", "optional") + .setting("xpack.security.authc.realms.jwt.jwt1.order", "1") + .setting("xpack.security.authc.realms.jwt.jwt1.allowed_issuer", "https://issuer.example.com/") + .setting("xpack.security.authc.realms.jwt.jwt1.allowed_audiences", "https://audience.example.com/") + .setting("xpack.security.authc.realms.jwt.jwt1.claims.principal", "sub") + .setting("xpack.security.authc.realms.jwt.jwt1.claims.dn", "dn") + .setting("xpack.security.authc.realms.jwt.jwt1.required_claims.token_use", "id") + .setting("xpack.security.authc.realms.jwt.jwt1.required_claims.version", "2.0") + .setting("xpack.security.authc.realms.jwt.jwt1.client_authentication.type", "NONE") + .setting("xpack.security.authc.realms.jwt.jwt1.pkc_jwkset_path", "rsa.jwkset") + .settings(mutableSettingsForLastLoadCache) + .configFile("http.key", Resource.fromClasspath("ssl/http.key")) + .configFile("http.crt", Resource.fromClasspath("ssl/http.crt")) + .configFile("ca.crt", Resource.fromClasspath("ssl/ca.crt")) + .configFile("rsa.jwkset", Resource.fromClasspath("jwk/rsa-public-jwkset.json")) + .user("admin_user", "admin-password") + .build(); + + private static Path httpCertificateAuthority; + private TestSecurityClient adminSecurityClient; + + @BeforeClass + public static void findTrustStore() throws Exception { + httpCertificateAuthority = findResource("/ssl/ca.crt"); + } + + @Override + protected boolean preserveClusterUponCompletion() { + return true; + } + + private static Path findResource(String name) throws FileNotFoundException, URISyntaxException { + final URL resource = JwtWithUnavailableSecurityIndexRestIT.class.getResource(name); + if (resource == null) { + throw new FileNotFoundException("Cannot find classpath resource " + name); + } + return PathUtils.get(resource.toURI()); + } + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + + @Override + protected String getProtocol() { + return "https"; + } + + @Override + protected Settings restAdminSettings() { + String token = basicAuthHeaderValue("admin_user", new SecureString("admin-password".toCharArray())); + return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).put(restSslSettings()).build(); + } + + @Override + protected Settings restClientSettings() { + return Settings.builder().put(super.restClientSettings()).put(restSslSettings()).build(); + } + + private Settings restSslSettings() { + return Settings.builder().put(CERTIFICATE_AUTHORITIES, httpCertificateAuthority).build(); + } + + protected TestSecurityClient getAdminSecurityClient() { + if (adminSecurityClient == null) { + adminSecurityClient = new TestSecurityClient(adminClient()); + } + return adminSecurityClient; + } + + public void testRoleMappingWithoutCacheFailsWithoutAccessToSecurityIndex() throws Exception { + final String dn = randomDn(); + + final String rules = Strings.format(""" + { "all": [ + { "field": { "realm.name": "jwt1" } }, + { "field": { "dn": "%s" } } + ] } + """, dn); + + final List roles = randomRoles(); + final String roleMappingName = createRoleMapping(roles, rules); + final String principal = randomPrincipal(); + + try { + { + final SignedJWT jwt = buildAndSignJwt(principal, dn, Instant.now()); + + final Map response = getSecurityClient(jwt).authenticate(); + + assertAuthenticationHasUsernameAndRoles(response, principal, roles); + } + + makeSecurityIndexUnavailable(); + + { + final SignedJWT jwt = buildAndSignJwt(principal, dn, Instant.now()); + + final Map response = getSecurityClient(jwt).authenticate(); + + assertAuthenticationHasUsernameAndRoles(response, principal, List.of()); + } + + // Now enable caching (since the setting is not dynamic, this requires a cluster restart), and test caching + makeSecurityIndexAvailable(); + mutableSettingsForLastLoadCache.put("xpack.security.authz.store.role_mappings.last_load_cache.enabled", "true"); + restartClusterAndResetClients(); + + { + final SignedJWT jwt = buildAndSignJwt(principal, dn, Instant.now()); + + final Map response = getSecurityClient(jwt).authenticate(); + + assertAuthenticationHasUsernameAndRoles(response, principal, roles); + } + + makeSecurityIndexUnavailable(); + + { + final SignedJWT jwt = buildAndSignJwt(principal, dn, Instant.now()); + + final Map response = getSecurityClient(jwt).authenticate(); + + assertAuthenticationHasUsernameAndRoles(response, principal, roles); + } + } finally { + makeSecurityIndexAvailable(); + deleteRoleMapping(roleMappingName); + } + } + + private void restartClusterAndResetClients() throws IOException { + cluster.restart(false); + adminSecurityClient = null; + closeClients(); + initClient(); + } + + private void assertAuthenticationHasUsernameAndRoles( + Map response, + String expectedUsername, + List expectedRoles + ) { + final String description = "Authentication response [" + response + "]"; + assertThat(description, response, hasEntry(User.Fields.USERNAME.getPreferredName(), expectedUsername)); + assertThat( + description, + JwtRestIT.assertList(response, User.Fields.ROLES), + Matchers.containsInAnyOrder(expectedRoles.toArray(String[]::new)) + ); + } + + private void makeSecurityIndexUnavailable() throws IOException { + Request closeRequest = new Request("POST", "/.security/_close"); + closeRequest.setOptions(systemIndexWarningHandlerOptions(".security-7")); + assertOK(adminClient().performRequest(closeRequest)); + } + + private void makeSecurityIndexAvailable() throws IOException { + Request openRequest = new Request("POST", "/.security/_open"); + openRequest.setOptions(systemIndexWarningHandlerOptions(".security-7")); + assertOK(adminClient().performRequest(openRequest)); + } + + private RequestOptions.Builder systemIndexWarningHandlerOptions(String index) { + return RequestOptions.DEFAULT.toBuilder() + .setWarningsHandler( + w -> w.size() > 0 + && w.contains( + "this request accesses system indices: [" + + index + + "], but in a future major " + + "version, direct access to system indices will be prevented by default" + ) == false + ); + } + + private String randomPrincipal() { + // We append _test so that it cannot randomly conflict with builtin user + return randomAlphaOfLengthBetween(4, 12) + "_test"; + } + + private String randomDn() { + return "CN=" + randomPrincipal(); + } + + private List randomRoles() { + // We append _test so that it cannot randomly conflict with builtin roles + return randomList(1, 3, () -> randomAlphaOfLengthBetween(4, 12) + "_test"); + } + + private SignedJWT buildAndSignJwt(String principal, String dn, Instant issueTime) throws JOSEException, ParseException, IOException { + final JWTClaimsSet claimsSet = JwtRestIT.buildJwt( + Map.ofEntries( + Map.entry("iss", "https://issuer.example.com/"), + Map.entry("aud", "https://audience.example.com/"), + Map.entry("sub", principal), + Map.entry("dn", dn), + Map.entry("token_use", "id"), + Map.entry("version", "2.0") + ), + issueTime + ); + final RSASSASigner signer = loadRsaSigner(); + return JwtRestIT.signJWT(signer, "RS256", claimsSet); + } + + private RSASSASigner loadRsaSigner() throws IOException, ParseException, JOSEException { + try (var in = getDataInputStream("/jwk/rsa-private-jwkset.json")) { + final JWKSet jwkSet = JWKSet.load(in); + final JWK key = jwkSet.getKeyByKeyId("test-rsa-key"); + assertThat(key, instanceOf(RSAKey.class)); + return new RSASSASigner((RSAKey) key); + } + } + + private TestSecurityClient getSecurityClient(SignedJWT jwt) { + final String bearerHeader = "Bearer " + jwt.serialize(); + final RequestOptions.Builder options = RequestOptions.DEFAULT.toBuilder(); + options.addHeader("Authorization", bearerHeader); + return new TestSecurityClient(client(), options.build()); + } + + private String createRoleMapping(List roles, String rules) throws IOException { + Map mapping = new HashMap<>(); + mapping.put("enabled", true); + mapping.put("roles", roles); + mapping.put("rules", XContentHelper.convertToMap(XContentType.JSON.xContent(), rules, true)); + final String mappingName = "test-" + getTestName() + "-" + randomAlphaOfLength(8); + getAdminSecurityClient().putRoleMapping(mappingName, mapping); + return mappingName; + } + + private void deleteRoleMapping(String name) throws IOException { + getAdminSecurityClient().deleteRoleMapping(name); + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index 42b4c8c459eb0..02de32078469e 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -1198,6 +1198,7 @@ public static List> getSettings(List securityExten settingsList.add(CachingServiceAccountTokenStore.CACHE_HASH_ALGO_SETTING); settingsList.add(CachingServiceAccountTokenStore.CACHE_MAX_TOKENS_SETTING); settingsList.add(SimpleRole.CACHE_SIZE_SETTING); + settingsList.add(NativeRoleMappingStore.LAST_LOAD_CACHE_ENABLED_SETTING); // hide settings settingsList.add(Setting.stringListSetting(SecurityField.setting("hide_settings"), Property.NodeScope, Property.Filtered)); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java index 218e120e30941..ba28e2a9952cc 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java @@ -8,6 +8,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.delete.DeleteResponse; @@ -17,9 +18,11 @@ import org.elasticsearch.common.CheckedBiConsumer; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.core.Nullable; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.script.ScriptService; @@ -52,6 +55,7 @@ import java.util.Objects; import java.util.Set; import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.Supplier; import java.util.stream.Collectors; @@ -87,17 +91,27 @@ public class NativeRoleMappingStore implements UserRoleMapper { private static final String ID_PREFIX = DOC_TYPE_ROLE_MAPPING + "_"; + public static final Setting LAST_LOAD_CACHE_ENABLED_SETTING = Setting.boolSetting( + "xpack.security.authz.store.role_mappings.last_load_cache.enabled", + false, + Setting.Property.NodeScope, + Setting.Property.Filtered + ); + private final Settings settings; private final Client client; private final SecurityIndexManager securityIndex; private final ScriptService scriptService; private final List realmsToRefresh = new CopyOnWriteArrayList<>(); + private final boolean lastLoadCacheEnabled; + private final AtomicReference> lastLoadRef = new AtomicReference<>(null); public NativeRoleMappingStore(Settings settings, Client client, SecurityIndexManager securityIndex, ScriptService scriptService) { this.settings = settings; this.client = client; this.securityIndex = securityIndex; this.scriptService = scriptService; + this.lastLoadCacheEnabled = LAST_LOAD_CACHE_ENABLED_SETTING.get(settings); } private static String getNameFromId(String id) { @@ -105,7 +119,8 @@ private static String getNameFromId(String id) { return id.substring(ID_PREFIX.length()); } - private static String getIdForName(String name) { + // package-private for testing + static String getIdForName(String name) { return ID_PREFIX + name; } @@ -139,6 +154,10 @@ protected void loadMappings(ActionListener> listener new ContextPreservingActionListener<>(supplier, ActionListener.wrap((Collection mappings) -> { final List mappingList = mappings.stream().filter(Objects::nonNull).toList(); logger.debug("successfully loaded [{}] role-mapping(s) from [{}]", mappingList.size(), securityIndex.aliasName()); + if (lastLoadCacheEnabled) { + logger.debug("caching loaded role-mapping(s)"); + lastLoadRef.set(List.copyOf(mappingList)); + } listener.onResponse(mappingList); }, ex -> { logger.error( @@ -294,19 +313,44 @@ public void getRoleMappings(Set names, ActionListener> listener) { final SecurityIndexManager frozenSecurityIndex = securityIndex.defensiveCopy(); if (frozenSecurityIndex.indexExists() == false) { - logger.debug("The security does not index exist - no role mappings can be loaded"); - listener.onResponse(Collections.emptyList()); - } else if (frozenSecurityIndex.indexIsClosed()) { - logger.debug("The security index exists but is closed - no role mappings can be loaded"); + logger.debug("The security index does not exist - no role mappings can be loaded"); listener.onResponse(Collections.emptyList()); + return; + } + final List lastLoad = lastLoadRef.get(); + if (frozenSecurityIndex.indexIsClosed()) { + if (lastLoad != null) { + assert lastLoadCacheEnabled; + logger.debug("The security index exists but is closed - returning previously cached role mappings"); + listener.onResponse(lastLoad); + } else { + logger.debug("The security index exists but is closed - no role mappings can be loaded"); + listener.onResponse(Collections.emptyList()); + } } else if (frozenSecurityIndex.isAvailable(SEARCH_SHARDS) == false) { - logger.debug("The security index exists but is not available - no role mappings can be loaded"); - listener.onFailure(frozenSecurityIndex.getUnavailableReason(SEARCH_SHARDS)); + final ElasticsearchException unavailableReason = frozenSecurityIndex.getUnavailableReason(SEARCH_SHARDS); + if (lastLoad != null) { + assert lastLoadCacheEnabled; + logger.debug( + "The security index exists but is not available - returning previously cached role mappings", + unavailableReason + ); + listener.onResponse(lastLoad); + } else { + logger.debug("The security index exists but is not available - no role mappings can be loaded"); + listener.onFailure(unavailableReason); + } } else { loadMappings(listener); } } + // package-private for testing + @Nullable + List getLastLoad() { + return lastLoadRef.get(); + } + /** * Provides usage statistics for this store. * The resulting map contains the keys @@ -317,7 +361,7 @@ private void getMappings(ActionListener> listener) { * */ public void usageStats(ActionListener> listener) { - if (securityIndex.isAvailable(SEARCH_SHARDS) == false) { + if (securityIndex.indexIsClosed() || securityIndex.isAvailable(SEARCH_SHARDS) == false) { reportStats(listener, Collections.emptyList()); } else { getMappings(ActionListener.wrap(mappings -> reportStats(listener, mappings), listener::onFailure)); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java index 16ef229ed5436..efc97ca30cd1a 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java @@ -6,13 +6,19 @@ */ package org.elasticsearch.xpack.security.authc.support.mapper; +import org.apache.lucene.search.TotalHits; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.search.SearchAction; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.env.Environment; @@ -20,8 +26,14 @@ import org.elasticsearch.script.ScriptModule; import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.mustache.MustacheScriptEngine; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchHits; +import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.core.security.action.realm.ClearRealmCacheAction; import org.elasticsearch.xpack.core.security.action.realm.ClearRealmCacheRequest; import org.elasticsearch.xpack.core.security.action.realm.ClearRealmCacheResponse; @@ -40,6 +52,8 @@ import org.elasticsearch.xpack.security.authc.support.CachingUsernamePasswordRealm; import org.elasticsearch.xpack.security.support.SecurityIndexManager; import org.hamcrest.Matchers; +import org.junit.Before; +import org.mockito.Mockito; import java.time.Instant; import java.util.Arrays; @@ -48,14 +62,22 @@ import java.util.Locale; import java.util.Map; import java.util.Set; +import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; import static org.elasticsearch.test.ActionListenerUtils.anyActionListener; +import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.SECURITY_MAIN_ALIAS; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; public class NativeRoleMappingStoreTests extends ESTestCase { @@ -64,6 +86,20 @@ public class NativeRoleMappingStoreTests extends ESTestCase { TestRestrictedIndices.INTERNAL_SECURITY_MAIN_INDEX_7 ); + private ScriptService scriptService; + private SecurityIndexManager securityIndex; + + @Before + public void setup() { + scriptService = new ScriptService( + Settings.EMPTY, + Collections.singletonMap(MustacheScriptEngine.NAME, new MustacheScriptEngine()), + ScriptModule.CORE_CONTEXTS, + () -> 1L + ); + securityIndex = mockHealthySecurityIndex(); + } + public void testResolveRoles() throws Exception { // Does match DN final ExpressionRoleMapping mapping1 = new ExpressionRoleMapping( @@ -118,17 +154,6 @@ public void testResolveRoles() throws Exception { ); final Client client = mock(Client.class); - SecurityIndexManager securityIndex = mock(SecurityIndexManager.class); - ScriptService scriptService = new ScriptService( - Settings.EMPTY, - Collections.singletonMap(MustacheScriptEngine.NAME, new MustacheScriptEngine()), - ScriptModule.CORE_CONTEXTS, - () -> 1L - ); - when(securityIndex.isAvailable(SecurityIndexManager.Availability.PRIMARY_SHARDS)).thenReturn(true); - when(securityIndex.isAvailable(SecurityIndexManager.Availability.SEARCH_SHARDS)).thenReturn(true); - when(securityIndex.indexExists()).thenReturn(true); - when(securityIndex.defensiveCopy()).thenReturn(securityIndex); final NativeRoleMappingStore store = new NativeRoleMappingStore(Settings.EMPTY, client, securityIndex, scriptService) { @Override @@ -161,6 +186,218 @@ protected void loadMappings(ActionListener> listener store.resolveRoles(user, future); final Set roles = future.get(); assertThat(roles, Matchers.containsInAnyOrder("dept_h", "defence", "flight")); + assertThat(store.getLastLoad(), is(nullValue())); + } + + public void testResolveRolesDoesNotUseLastLoadCacheWhenSecurityIndexAvailable() throws Exception { + final Client client = mock(Client.class); + final ThreadPool mockThreadPool = mock(ThreadPool.class); + when(mockThreadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); + when(client.threadPool()).thenReturn(mockThreadPool); + when(client.prepareSearch(eq(SECURITY_MAIN_ALIAS))).thenReturn( + Mockito.spy(new SearchRequestBuilder(client, SearchAction.INSTANCE)) + ); + final ExpressionRoleMapping mapping = new ExpressionRoleMapping( + "mapping", + new FieldExpression("dn", Collections.singletonList(new FieldValue("*"))), + List.of("role"), + Collections.emptyList(), + Collections.emptyMap(), + true + ); + doAnswerWithSearchResult(client, mapping); + + final NativeRoleMappingStore store = new NativeRoleMappingStore( + Settings.builder().put("xpack.security.authz.store.role_mappings.last_load_cache.enabled", "true").build(), + client, + securityIndex, + scriptService + ); + + final UserRoleMapper.UserData user = new UserRoleMapper.UserData( + "user", + randomiseDn("cn=user,ou=people,dc=org"), + List.of(), + Map.of(), + mock(RealmConfig.class) + ); + assertThat(store.getLastLoad(), is(nullValue())); + + assertThat(resolveRoles(store, user), Matchers.containsInAnyOrder("role")); + assertThat(store.getLastLoad(), contains(mapping)); + verify(client, times(1)).search(any(SearchRequest.class), anyActionListener()); + + // when security index is available, we still run a search + assertThat(resolveRoles(store, user), Matchers.containsInAnyOrder("role")); + assertThat(store.getLastLoad(), contains(mapping)); + verify(client, times(2)).search(any(SearchRequest.class), anyActionListener()); + } + + public void testResolveRolesUsesLastLoadCacheWhenSecurityIndexUnavailable() throws Exception { + final Client client = mock(Client.class); + final ThreadPool mockThreadPool = mock(ThreadPool.class); + when(mockThreadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); + when(client.threadPool()).thenReturn(mockThreadPool); + when(client.prepareSearch(eq(SECURITY_MAIN_ALIAS))).thenReturn( + Mockito.spy(new SearchRequestBuilder(client, SearchAction.INSTANCE)) + ); + final ExpressionRoleMapping mapping = new ExpressionRoleMapping( + "mapping", + new FieldExpression("dn", Collections.singletonList(new FieldValue("*"))), + List.of("role"), + Collections.emptyList(), + Collections.emptyMap(), + true + ); + doAnswerWithSearchResult(client, mapping); + + final NativeRoleMappingStore store = new NativeRoleMappingStore( + Settings.builder().put("xpack.security.authz.store.role_mappings.last_load_cache.enabled", "true").build(), + client, + securityIndex, + scriptService + ); + + final UserRoleMapper.UserData user = new UserRoleMapper.UserData( + "user", + randomiseDn("cn=user,ou=people,dc=org"), + List.of(), + Map.of(), + mock(RealmConfig.class) + ); + assertThat(store.getLastLoad(), is(nullValue())); + + assertThat(resolveRoles(store, user), Matchers.containsInAnyOrder("role")); + assertThat(store.getLastLoad(), contains(mapping)); + verify(client, times(1)).search(any(SearchRequest.class), anyActionListener()); + + final boolean indexAvailable = randomBoolean(); + when(securityIndex.isAvailable(SecurityIndexManager.Availability.SEARCH_SHARDS)).thenReturn(indexAvailable); + final boolean indexClosed = indexAvailable || randomBoolean(); + when(securityIndex.indexIsClosed()).thenReturn(indexClosed); + assertThat(resolveRoles(store, user), Matchers.containsInAnyOrder(mapping.getRoles().toArray())); + assertThat(store.getLastLoad(), contains(mapping)); + // index was unavailable, so we returned result from cache; no new search + verify(client, times(1)).search(any(SearchRequest.class), anyActionListener()); + + // new search result from index overwrites previous + when(securityIndex.indexExists()).thenReturn(true); + when(securityIndex.isAvailable(SecurityIndexManager.Availability.SEARCH_SHARDS)).thenReturn(true); + when(securityIndex.indexIsClosed()).thenReturn(false); + final ExpressionRoleMapping mapping2 = new ExpressionRoleMapping( + "mapping2", + new FieldExpression("dn", Collections.singletonList(new FieldValue("*"))), + List.of("role2"), + Collections.emptyList(), + Collections.emptyMap(), + true + ); + doAnswerWithSearchResult(client, mapping2); + assertThat(resolveRoles(store, user), Matchers.containsInAnyOrder(mapping2.getRoles().toArray())); + assertThat(store.getLastLoad(), contains(mapping2)); + } + + public void testResolveRolesDoesNotUseLastLoadCacheWhenSecurityIndexDoesNotExist() throws Exception { + final Client client = mock(Client.class); + final ThreadPool mockThreadPool = mock(ThreadPool.class); + when(mockThreadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); + when(client.threadPool()).thenReturn(mockThreadPool); + when(client.prepareSearch(eq(SECURITY_MAIN_ALIAS))).thenReturn( + Mockito.spy(new SearchRequestBuilder(client, SearchAction.INSTANCE)) + ); + final ExpressionRoleMapping mapping = new ExpressionRoleMapping( + "mapping", + new FieldExpression("dn", Collections.singletonList(new FieldValue("*"))), + List.of("role"), + Collections.emptyList(), + Collections.emptyMap(), + true + ); + doAnswerWithSearchResult(client, mapping); + + final NativeRoleMappingStore store = new NativeRoleMappingStore( + Settings.builder().put("xpack.security.authz.store.role_mappings.last_load_cache.enabled", "true").build(), + client, + securityIndex, + scriptService + ); + + final UserRoleMapper.UserData user = new UserRoleMapper.UserData( + "user", + randomiseDn("cn=user,ou=people,dc=org"), + List.of(), + Map.of(), + mock(RealmConfig.class) + ); + assertThat(store.getLastLoad(), is(nullValue())); + + assertThat(resolveRoles(store, user), Matchers.containsInAnyOrder("role")); + assertThat(store.getLastLoad(), contains(mapping)); + verify(client, times(1)).search(any(SearchRequest.class), anyActionListener()); + + when(securityIndex.indexExists()).thenReturn(false); + assertThat(resolveRoles(store, user), is(empty())); + assertThat(store.getLastLoad(), contains(mapping)); + } + + private SecurityIndexManager mockHealthySecurityIndex() { + final SecurityIndexManager securityIndex = mock(SecurityIndexManager.class); + when(securityIndex.isAvailable(SecurityIndexManager.Availability.PRIMARY_SHARDS)).thenReturn(true); + when(securityIndex.isAvailable(SecurityIndexManager.Availability.SEARCH_SHARDS)).thenReturn(true); + when(securityIndex.indexExists()).thenReturn(true); + when(securityIndex.isIndexUpToDate()).thenReturn(true); + when(securityIndex.defensiveCopy()).thenReturn(securityIndex); + return securityIndex; + } + + private void doAnswerWithSearchResult(Client client, ExpressionRoleMapping mapping) { + doAnswer(invocation -> { + @SuppressWarnings("unchecked") + final var listener = (ActionListener) invocation.getArguments()[1]; + final var searchHit = new SearchHit( + randomIntBetween(0, Integer.MAX_VALUE), + NativeRoleMappingStore.getIdForName(mapping.getName()) + ); + try (XContentBuilder builder = JsonXContent.contentBuilder()) { + mapping.toXContent(builder, ToXContent.EMPTY_PARAMS); + searchHit.sourceRef(BytesReference.bytes(builder)); + } + final var internalSearchResponse = new InternalSearchResponse( + new SearchHits( + new SearchHit[] { searchHit }, + new TotalHits(1, TotalHits.Relation.EQUAL_TO), + randomFloat(), + null, + null, + null + ), + null, + null, + null, + false, + null, + 0 + ); + final var searchResponse = new SearchResponse( + internalSearchResponse, + randomAlphaOfLengthBetween(3, 8), + 1, + 1, + 0, + 10, + null, + null + ); + listener.onResponse(searchResponse); + return null; + }).when(client).search(any(SearchRequest.class), anyActionListener()); + } + + private Set resolveRoles(NativeRoleMappingStore store, UserRoleMapper.UserData user) throws InterruptedException, + ExecutionException { + final PlainActionFuture> future = new PlainActionFuture<>(); + store.resolveRoles(user, future); + return future.get(); } private String randomiseDn(String dn) { From e44eea3ab37bbcb1239a63ce57de2845fa4751cc Mon Sep 17 00:00:00 2001 From: William Brafford Date: Tue, 7 Nov 2023 08:40:53 -0500 Subject: [PATCH 15/30] Return String from PluginDescriptor#getElasticsearchVersion (#100735) We want plugin descriptors to be able to return opaque strings from getElasticsearchVersion. The one problem here is that the stable plugin API assumes semantic versioning for determining compatibility: a plugin from an earlier release of the same major version can be loaded, but not one from a future release. So we check to see if the current build version can be parsed as a semantic version, and, if so, we apply this logic. For non-stable plugins on a semantically-versioned build, we check that the plugin's version matches the build version. If the build version is not semantic, we assume that the plugins come from the current build, since no one should be installing or modifying plugins on serverless.. * getElasticsearchVersion returns string * Fix utils for snapshot versions * Remove Version.CURRENT from error messages * Only check plugin version compatibility for semantically versioned builds --- .../plugins/PluginDescriptor.java | 4 +- .../elasticsearch/plugins/PluginsUtils.java | 120 ++++++++++++++---- .../plugins/PluginDescriptorTests.java | 4 +- .../plugins/PluginsUtilsTests.java | 12 ++ 4 files changed, 111 insertions(+), 29 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/plugins/PluginDescriptor.java b/server/src/main/java/org/elasticsearch/plugins/PluginDescriptor.java index 13baae5950d6c..e0ee229fe1f98 100644 --- a/server/src/main/java/org/elasticsearch/plugins/PluginDescriptor.java +++ b/server/src/main/java/org/elasticsearch/plugins/PluginDescriptor.java @@ -399,8 +399,8 @@ public String getVersion() { * * @return an Elasticsearch version */ - public Version getElasticsearchVersion() { - return Version.fromString(elasticsearchVersion); + public String getElasticsearchVersion() { + return elasticsearchVersion; } /** diff --git a/server/src/main/java/org/elasticsearch/plugins/PluginsUtils.java b/server/src/main/java/org/elasticsearch/plugins/PluginsUtils.java index 4d30ca1f1a261..0533f535a19f1 100644 --- a/server/src/main/java/org/elasticsearch/plugins/PluginsUtils.java +++ b/server/src/main/java/org/elasticsearch/plugins/PluginsUtils.java @@ -11,7 +11,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.Build; -import org.elasticsearch.Version; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.jdk.JarHell; @@ -30,6 +30,8 @@ import java.util.Map; import java.util.Set; import java.util.function.Function; +import java.util.regex.Matcher; +import java.util.regex.Pattern; import java.util.stream.Collectors; /** @@ -77,43 +79,111 @@ public static List findPluginDirs(final Path rootPath) throws IOException * Verify the given plugin is compatible with the current Elasticsearch installation. */ public static void verifyCompatibility(PluginDescriptor info) { - if (info.isStable()) { - if (info.getElasticsearchVersion().major != Version.CURRENT.major) { - throw new IllegalArgumentException( - "Stable Plugin [" - + info.getName() - + "] was built for Elasticsearch major version " - + info.getElasticsearchVersion().major - + " but version " - + Version.CURRENT - + " is running" + final String currentVersion = Build.current().version(); + Matcher buildVersionMatcher = SemanticVersion.semanticPattern.matcher(currentVersion); + // If we're not on a semantic version, assume plugins are compatible + if (buildVersionMatcher.matches()) { + SemanticVersion currentElasticsearchSemanticVersion; + try { + currentElasticsearchSemanticVersion = new SemanticVersion( + Integer.parseInt(buildVersionMatcher.group(1)), + Integer.parseInt(buildVersionMatcher.group(2)), + Integer.parseInt(buildVersionMatcher.group(3)) ); + } catch (NumberFormatException e) { + throw new IllegalArgumentException("Couldn't parse integers from build version [" + currentVersion + "]", e); } - if (info.getElasticsearchVersion().after(Version.CURRENT)) { + if (info.isStable()) { + Matcher pluginEsVersionMatcher = SemanticVersion.semanticPattern.matcher(info.getElasticsearchVersion()); + if (pluginEsVersionMatcher.matches() == false) { + throw new IllegalArgumentException( + "Expected semantic version for plugin [" + info.getName() + "] but was [" + info.getElasticsearchVersion() + "]" + ); + } + SemanticVersion pluginElasticsearchSemanticVersion; + try { + pluginElasticsearchSemanticVersion = new SemanticVersion( + Integer.parseInt(pluginEsVersionMatcher.group(1)), + Integer.parseInt(pluginEsVersionMatcher.group(2)), + Integer.parseInt(pluginEsVersionMatcher.group(3)) + ); + } catch (NumberFormatException e) { + throw new IllegalArgumentException( + "Expected integer version for plugin [" + info.getName() + "] but found [" + info.getElasticsearchVersion() + "]", + e + ); + } + + // case: Major version mismatch + if (pluginElasticsearchSemanticVersion.major != currentElasticsearchSemanticVersion.major) { + throw new IllegalArgumentException( + "Stable Plugin [" + + info.getName() + + "] was built for Elasticsearch major version " + + pluginElasticsearchSemanticVersion.major + + " but version " + + currentVersion + + " is running" + ); + } + + // case: stable plugin from the future + if (pluginElasticsearchSemanticVersion.after(currentElasticsearchSemanticVersion)) { + throw new IllegalArgumentException( + "Stable Plugin [" + + info.getName() + + "] was built for Elasticsearch version " + + info.getElasticsearchVersion() + + " but earlier version " + + currentVersion + + " is running" + ); + } + } else if (info.getElasticsearchVersion().equals(currentVersion) == false) { throw new IllegalArgumentException( - "Stable Plugin [" + "Plugin [" + info.getName() + "] was built for Elasticsearch version " + info.getElasticsearchVersion() - + " but earlier version " - + Version.CURRENT + + " but version " + + currentVersion + " is running" ); } - } else if (info.getElasticsearchVersion().equals(Version.CURRENT) == false) { - throw new IllegalArgumentException( - "Plugin [" - + info.getName() - + "] was built for Elasticsearch version " - + info.getElasticsearchVersion() - + " but version " - + Version.CURRENT - + " is running" - ); } JarHell.checkJavaVersion(info.getName(), info.getJavaVersion()); } + private record SemanticVersion(int major, int minor, int bugfix) { + + static final Pattern semanticPattern = Pattern.compile("^(\\d+)\\.(\\d+)\\.(\\d+)$"); + + // does not compare anything after the semantic version + boolean after(SemanticVersion other) { + // major + if (this.major < other.major) { + return false; + } + if (this.major > other.major) { + return true; + } + // minor + if (this.minor < other.minor) { + return false; + } + if (this.minor > other.minor) { + return true; + } + // bugfix + return this.bugfix > other.bugfix; + } + + @Override + public String toString() { + return Strings.format("%d.%d.%d", this.major, this.minor, this.bugfix); + } + } + /** * Check for the existence of a marker file that indicates any plugins are in a garbage state from a failed attempt to remove the * plugin. diff --git a/server/src/test/java/org/elasticsearch/plugins/PluginDescriptorTests.java b/server/src/test/java/org/elasticsearch/plugins/PluginDescriptorTests.java index 5ed02bd4b35c9..000dc1a33ed91 100644 --- a/server/src/test/java/org/elasticsearch/plugins/PluginDescriptorTests.java +++ b/server/src/test/java/org/elasticsearch/plugins/PluginDescriptorTests.java @@ -335,7 +335,7 @@ public void testPluginEqualityAndHash() { descriptor1.getName(), randomValueOtherThan(descriptor1.getDescription(), () -> randomAlphaOfLengthBetween(4, 12)), randomValueOtherThan(descriptor1.getVersion(), () -> randomAlphaOfLengthBetween(4, 12)), - descriptor1.getElasticsearchVersion().previousMajor().toString(), + "8.0.0", randomValueOtherThan(descriptor1.getJavaVersion(), () -> randomAlphaOfLengthBetween(4, 12)), descriptor1.isStable() ? randomAlphaOfLengthBetween(4, 12) : null, descriptor1.isStable() ? randomAlphaOfLength(6) : null, @@ -352,7 +352,7 @@ public void testPluginEqualityAndHash() { randomValueOtherThan(descriptor1.getName(), () -> randomAlphaOfLengthBetween(4, 12)), descriptor1.getDescription(), descriptor1.getVersion(), - descriptor1.getElasticsearchVersion().toString(), + descriptor1.getElasticsearchVersion(), descriptor1.getJavaVersion(), classname, descriptor1.getModuleName().orElse(null), diff --git a/server/src/test/java/org/elasticsearch/plugins/PluginsUtilsTests.java b/server/src/test/java/org/elasticsearch/plugins/PluginsUtilsTests.java index 3556c94980773..a7cc74582afdc 100644 --- a/server/src/test/java/org/elasticsearch/plugins/PluginsUtilsTests.java +++ b/server/src/test/java/org/elasticsearch/plugins/PluginsUtilsTests.java @@ -382,6 +382,18 @@ public void testJarHellSpiConflict() throws Exception { assertThat(e.getCause().getMessage(), containsString("DummyClass1")); } + public void testInternalNonSemanticVersions() throws Exception { + PluginDescriptor info = getPluginDescriptorForVersion(randomAlphaOfLengthBetween(6, 32), "1.8", false); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> PluginsUtils.verifyCompatibility(info)); + assertThat(e.getMessage(), containsString("Plugin [my_plugin] was built for Elasticsearch version")); + } + + public void testStableNonSemanticVersions() throws Exception { + PluginDescriptor info = getPluginDescriptorForVersion(randomAlphaOfLengthBetween(6, 32), "1.8", true); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> PluginsUtils.verifyCompatibility(info)); + assertThat(e.getMessage(), containsString("Expected semantic version for plugin [my_plugin] but was")); + } + public void testStableEarlierElasticsearchVersion() throws Exception { PluginDescriptor info = getPluginDescriptorForVersion(Version.fromId(Version.CURRENT.id + 1).toString(), "1.8", true); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> PluginsUtils.verifyCompatibility(info)); From d25435e1855733282413bab0a5ac53d2c0cac11a Mon Sep 17 00:00:00 2001 From: amyjtechwriter <61687663+amyjtechwriter@users.noreply.github.com> Date: Tue, 7 Nov 2023 13:43:28 +0000 Subject: [PATCH 16/30] disabling source (#101839) --- docs/reference/mapping/fields/source-field.asciidoc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/reference/mapping/fields/source-field.asciidoc b/docs/reference/mapping/fields/source-field.asciidoc index f905be3d452ba..ec824e421e015 100644 --- a/docs/reference/mapping/fields/source-field.asciidoc +++ b/docs/reference/mapping/fields/source-field.asciidoc @@ -43,6 +43,8 @@ available then a number of features are not supported: * The <>, <>, and <> APIs. +* In the {kib} link:{kibana-ref}/discover.html[Discover] application, field data will not be displayed. + * On the fly <>. * The ability to reindex from one Elasticsearch index to another, either From 99b651898d995d0245425adc2ce3920ba7e2a30f Mon Sep 17 00:00:00 2001 From: Jack Conradson Date: Tue, 7 Nov 2023 07:58:18 -0800 Subject: [PATCH 17/30] Add an additional tiebreaker to RRF (#101847) This change adds an additional tiebreaker for RRF where when two documents have the same RRF "score" such as identical ranks of (3,4) and (4,3) or (1,-) and (-,1), etc. the ordering will fallback to the highest score from query 1 then query 2, and so on. If all scores are equal then the tiebreaker will be shard index followed by doc id, but these are not necessarily stable. This should resolve most of the stability issues outlined as part of (#101232). Closes #101232 --- docs/changelog/101847.yaml | 6 + .../rank/rrf/RRFRankCoordinatorContext.java | 15 +- .../xpack/rank/rrf/RRFRankShardContext.java | 14 + .../xpack/rank/rrf/RRFRankContextTests.java | 323 +++++++++++++++++- 4 files changed, 349 insertions(+), 9 deletions(-) create mode 100644 docs/changelog/101847.yaml diff --git a/docs/changelog/101847.yaml b/docs/changelog/101847.yaml new file mode 100644 index 0000000000000..91922b9e23ed0 --- /dev/null +++ b/docs/changelog/101847.yaml @@ -0,0 +1,6 @@ +pr: 101847 +summary: Add an additional tiebreaker to RRF +area: Ranking +type: bug +issues: + - 101232 diff --git a/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRankCoordinatorContext.java b/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRankCoordinatorContext.java index d7b96ad439501..50f3646264a92 100644 --- a/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRankCoordinatorContext.java +++ b/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRankCoordinatorContext.java @@ -127,12 +127,25 @@ protected boolean lessThan(RRFRankDoc a, RRFRankDoc b) { } } - // sort the results based on rrf score, tiebreaker based on smaller shard then smaller doc id + // sort the results based on rrf score, tiebreaker based on + // larger individual query score from 1 to n, smaller shard then smaller doc id RRFRankDoc[] sortedResults = results.values().toArray(RRFRankDoc[]::new); Arrays.sort(sortedResults, (RRFRankDoc rrf1, RRFRankDoc rrf2) -> { if (rrf1.score != rrf2.score) { return rrf1.score < rrf2.score ? 1 : -1; } + assert rrf1.positions.length == rrf2.positions.length; + for (int qi = 0; qi < rrf1.positions.length; ++qi) { + if (rrf1.positions[qi] != NO_RANK && rrf2.positions[qi] != NO_RANK) { + if (rrf1.scores[qi] != rrf2.scores[qi]) { + return rrf1.scores[qi] < rrf2.scores[qi] ? 1 : -1; + } + } else if (rrf1.positions[qi] != NO_RANK) { + return -1; + } else if (rrf2.positions[qi] != NO_RANK) { + return 1; + } + } if (rrf1.shardIndex != rrf2.shardIndex) { return rrf1.shardIndex < rrf2.shardIndex ? -1 : 1; } diff --git a/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRankShardContext.java b/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRankShardContext.java index e251207bdcb2a..e22e328193700 100644 --- a/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRankShardContext.java +++ b/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRankShardContext.java @@ -17,6 +17,8 @@ import java.util.List; import java.util.Map; +import static org.elasticsearch.search.rank.RankDoc.NO_RANK; + /** * Executes queries and generates results on the shard for RRF. */ @@ -74,6 +76,18 @@ public RRFRankShardResult combine(List rankResults) { if (rrf1.score != rrf2.score) { return rrf1.score < rrf2.score ? 1 : -1; } + assert rrf1.positions.length == rrf2.positions.length; + for (int qi = 0; qi < rrf1.positions.length; ++qi) { + if (rrf1.positions[qi] != NO_RANK && rrf2.positions[qi] != NO_RANK) { + if (rrf1.scores[qi] != rrf2.scores[qi]) { + return rrf1.scores[qi] < rrf2.scores[qi] ? 1 : -1; + } + } else if (rrf1.positions[qi] != NO_RANK) { + return -1; + } else if (rrf2.positions[qi] != NO_RANK) { + return 1; + } + } return rrf1.doc < rrf2.doc ? -1 : 1; }); // trim the results to window size diff --git a/x-pack/plugin/rank-rrf/src/test/java/org/elasticsearch/xpack/rank/rrf/RRFRankContextTests.java b/x-pack/plugin/rank-rrf/src/test/java/org/elasticsearch/xpack/rank/rrf/RRFRankContextTests.java index f1f19a371ed07..5cb89c071c767 100644 --- a/x-pack/plugin/rank-rrf/src/test/java/org/elasticsearch/xpack/rank/rrf/RRFRankContextTests.java +++ b/x-pack/plugin/rank-rrf/src/test/java/org/elasticsearch/xpack/rank/rrf/RRFRankContextTests.java @@ -239,31 +239,338 @@ public void testCoordinatorRank() { expected.score = 0.6666667f; assertRDEquals(expected, (RRFRankDoc) std.scoreDocs()[0]); - expected = new RRFRankDoc(1, 1, 2); + expected = new RRFRankDoc(3, 1, 2); expected.rank = 2; + expected.positions[0] = 0; + expected.positions[1] = NO_RANK; + expected.scores[0] = 10.0f; + expected.scores[1] = 0.0f; + expected.score = 0.5f; + assertRDEquals(expected, (RRFRankDoc) std.scoreDocs()[1]); + + expected = new RRFRankDoc(1, 1, 2); + expected.rank = 3; expected.positions[0] = NO_RANK; expected.positions[1] = 0; expected.scores[0] = 0.0f; expected.scores[1] = 8.0f; expected.score = 0.5f; + assertRDEquals(expected, (RRFRankDoc) std.scoreDocs()[2]); + + expected = new RRFRankDoc(2, 2, 2); + expected.rank = 4; + expected.positions[0] = 3; + expected.positions[1] = 3; + expected.scores[0] = 8.5f; + expected.scores[1] = 6.5f; + expected.score = 0.4f; + assertRDEquals(expected, (RRFRankDoc) std.scoreDocs()[3]); + } + + public void testShardTieBreaker() { + RRFRankShardContext context = new RRFRankShardContext(null, 0, 10, 1); + + List topDocs = List.of( + new TopDocs(null, new ScoreDoc[] { new ScoreDoc(1, 10.0f, -1), new ScoreDoc(2, 9.0f, -1) }), + new TopDocs(null, new ScoreDoc[] { new ScoreDoc(2, 8.0f, -1), new ScoreDoc(1, 7.0f, -1) }) + ); + + RRFRankShardResult result = context.combine(topDocs); + assertEquals(2, result.queryCount); + assertEquals(2, result.rrfRankDocs.length); + + RRFRankDoc expected = new RRFRankDoc(1, -1, 2); + expected.rank = 1; + expected.positions[0] = 0; + expected.positions[1] = 1; + expected.scores[0] = 10.0f; + expected.scores[1] = 7.0f; + expected.score = Float.NaN; + assertRDEquals(expected, result.rrfRankDocs[0]); + + expected = new RRFRankDoc(2, -1, 2); + expected.rank = 2; + expected.positions[0] = 1; + expected.positions[1] = 0; + expected.scores[0] = 9.0f; + expected.scores[1] = 8.0f; + expected.score = Float.NaN; + assertRDEquals(expected, result.rrfRankDocs[1]); + + topDocs = List.of( + new TopDocs(null, new ScoreDoc[] { new ScoreDoc(1, 10.0f, -1), new ScoreDoc(2, 9.0f, -1), new ScoreDoc(3, 9.0f, -1) }), + new TopDocs(null, new ScoreDoc[] { new ScoreDoc(4, 11.0f, -1), new ScoreDoc(3, 9.0f, -1), new ScoreDoc(2, 7.0f, -1) }) + ); + + result = context.combine(topDocs); + assertEquals(2, result.queryCount); + assertEquals(4, result.rrfRankDocs.length); + + expected = new RRFRankDoc(3, -1, 2); + expected.rank = 1; + expected.positions[0] = 2; + expected.positions[1] = 1; + expected.scores[0] = 9.0f; + expected.scores[1] = 9.0f; + expected.score = Float.NaN; + assertRDEquals(expected, result.rrfRankDocs[0]); + + expected = new RRFRankDoc(2, -1, 2); + expected.rank = 2; + expected.positions[0] = 1; + expected.positions[1] = 2; + expected.scores[0] = 9.0f; + expected.scores[1] = 7.0f; + expected.score = Float.NaN; + assertRDEquals(expected, result.rrfRankDocs[1]); + + expected = new RRFRankDoc(1, -1, 2); + expected.rank = 3; + expected.positions[0] = 0; + expected.positions[1] = -1; + expected.scores[0] = 10.0f; + expected.scores[1] = 0.0f; + expected.score = Float.NaN; + assertRDEquals(expected, result.rrfRankDocs[2]); + + expected = new RRFRankDoc(4, -1, 2); + expected.rank = 4; + expected.positions[0] = -1; + expected.positions[1] = 0; + expected.scores[0] = 0.0f; + expected.scores[1] = 11.0f; + expected.score = Float.NaN; + assertRDEquals(expected, result.rrfRankDocs[3]); + + topDocs = List.of( + new TopDocs(null, new ScoreDoc[] { new ScoreDoc(1, 10.0f, -1), new ScoreDoc(3, 3.0f, -1) }), + new TopDocs(null, new ScoreDoc[] { new ScoreDoc(2, 8.0f, -1), new ScoreDoc(4, 5.0f, -1) }) + ); + + result = context.combine(topDocs); + assertEquals(2, result.queryCount); + assertEquals(4, result.rrfRankDocs.length); + + expected = new RRFRankDoc(1, -1, 2); + expected.rank = 1; + expected.positions[0] = 0; + expected.positions[1] = -1; + expected.scores[0] = 10.0f; + expected.scores[1] = 0.0f; + expected.score = Float.NaN; + assertRDEquals(expected, result.rrfRankDocs[0]); + + expected = new RRFRankDoc(2, -1, 2); + expected.rank = 2; + expected.positions[0] = -1; + expected.positions[1] = 0; + expected.scores[0] = 0.0f; + expected.scores[1] = 8.0f; + expected.score = Float.NaN; + assertRDEquals(expected, result.rrfRankDocs[1]); + + expected = new RRFRankDoc(3, -1, 2); + expected.rank = 3; + expected.positions[0] = 1; + expected.positions[1] = -1; + expected.scores[0] = 3.0f; + expected.scores[1] = 0.0f; + expected.score = Float.NaN; + assertRDEquals(expected, result.rrfRankDocs[2]); + + expected = new RRFRankDoc(4, -1, 2); + expected.rank = 4; + expected.positions[0] = -1; + expected.positions[1] = 1; + expected.scores[0] = 0.0f; + expected.scores[1] = 5.0f; + expected.score = Float.NaN; + assertRDEquals(expected, result.rrfRankDocs[3]); + } + + public void testCoordinatorRankTieBreaker() { + RRFRankCoordinatorContext context = new RRFRankCoordinatorContext(4, 0, 5, 1); + + QuerySearchResult qsr0 = new QuerySearchResult(); + qsr0.setShardIndex(1); + RRFRankDoc rd11 = new RRFRankDoc(1, -1, 2); + rd11.positions[0] = 0; + rd11.positions[1] = 0; + rd11.scores[0] = 10.0f; + rd11.scores[1] = 7.0f; + qsr0.setRankShardResult(new RRFRankShardResult(2, new RRFRankDoc[] { rd11 })); + + QuerySearchResult qsr1 = new QuerySearchResult(); + qsr1.setShardIndex(2); + RRFRankDoc rd21 = new RRFRankDoc(1, -1, 2); + rd21.positions[0] = 0; + rd21.positions[1] = 0; + rd21.scores[0] = 9.0f; + rd21.scores[1] = 8.0f; + qsr1.setRankShardResult(new RRFRankShardResult(2, new RRFRankDoc[] { rd21 })); + + TopDocsStats tds = new TopDocsStats(0); + SortedTopDocs std = context.rank(List.of(qsr0, qsr1), tds); + + assertEquals(2, tds.fetchHits); + assertEquals(2, std.scoreDocs().length); + + RRFRankDoc expected = new RRFRankDoc(1, 1, 2); + expected.rank = 1; + expected.positions[0] = 0; + expected.positions[1] = 1; + expected.scores[0] = 10.0f; + expected.scores[1] = 7.0f; + expected.score = 0.8333333730697632f; + assertRDEquals(expected, (RRFRankDoc) std.scoreDocs()[0]); + + expected = new RRFRankDoc(1, 2, 2); + expected.rank = 2; + expected.positions[0] = 1; + expected.positions[1] = 0; + expected.scores[0] = 9.0f; + expected.scores[1] = 8.0f; + expected.score = 0.8333333730697632f; assertRDEquals(expected, (RRFRankDoc) std.scoreDocs()[1]); - expected = new RRFRankDoc(3, 1, 2); + qsr0 = new QuerySearchResult(); + qsr0.setShardIndex(1); + rd11 = new RRFRankDoc(1, -1, 2); + rd11.positions[0] = 0; + rd11.positions[1] = -1; + rd11.scores[0] = 10.0f; + rd11.scores[1] = 0.0f; + RRFRankDoc rd12 = new RRFRankDoc(2, -1, 2); + rd12.positions[0] = 0; + rd12.positions[1] = 1; + rd12.scores[0] = 9.0f; + rd12.scores[1] = 7.0f; + qsr0.setRankShardResult(new RRFRankShardResult(2, new RRFRankDoc[] { rd11, rd12 })); + + qsr1 = new QuerySearchResult(); + qsr1.setShardIndex(2); + rd21 = new RRFRankDoc(1, -1, 2); + rd21.positions[0] = -1; + rd21.positions[1] = 0; + rd21.scores[0] = 0.0f; + rd21.scores[1] = 11.0f; + RRFRankDoc rd22 = new RRFRankDoc(2, -1, 2); + rd22.positions[0] = 0; + rd22.positions[1] = 1; + rd22.scores[0] = 9.0f; + rd22.scores[1] = 9.0f; + qsr1.setRankShardResult(new RRFRankShardResult(2, new RRFRankDoc[] { rd21, rd22 })); + + tds = new TopDocsStats(0); + std = context.rank(List.of(qsr0, qsr1), tds); + + assertEquals(4, tds.fetchHits); + assertEquals(4, std.scoreDocs().length); + + expected = new RRFRankDoc(2, 2, 2); + expected.rank = 1; + expected.positions[0] = 2; + expected.positions[1] = 1; + expected.scores[0] = 9.0f; + expected.scores[1] = 9.0f; + expected.score = 0.5833333730697632f; + assertRDEquals(expected, (RRFRankDoc) std.scoreDocs()[0]); + + expected = new RRFRankDoc(2, 1, 2); + expected.rank = 2; + expected.positions[0] = 1; + expected.positions[1] = 2; + expected.scores[0] = 9.0f; + expected.scores[1] = 7.0f; + expected.score = 0.5833333730697632f; + assertRDEquals(expected, (RRFRankDoc) std.scoreDocs()[1]); + + expected = new RRFRankDoc(1, 1, 2); expected.rank = 3; expected.positions[0] = 0; - expected.positions[1] = NO_RANK; + expected.positions[1] = -1; + expected.scores[0] = 10.0f; + expected.scores[1] = 0.0f; + expected.score = 0.5f; + assertRDEquals(expected, (RRFRankDoc) std.scoreDocs()[2]); + + expected = new RRFRankDoc(1, 2, 2); + expected.rank = 4; + expected.positions[0] = -1; + expected.positions[1] = 0; + expected.scores[0] = 0.0f; + expected.scores[1] = 11.0f; + expected.score = 0.5f; + assertRDEquals(expected, (RRFRankDoc) std.scoreDocs()[3]); + + qsr0 = new QuerySearchResult(); + qsr0.setShardIndex(1); + rd11 = new RRFRankDoc(1, -1, 2); + rd11.positions[0] = 0; + rd11.positions[1] = -1; + rd11.scores[0] = 10.0f; + rd11.scores[1] = 0.0f; + rd12 = new RRFRankDoc(2, -1, 2); + rd12.positions[0] = -1; + rd12.positions[1] = 0; + rd12.scores[0] = 0.0f; + rd12.scores[1] = 12.0f; + qsr0.setRankShardResult(new RRFRankShardResult(2, new RRFRankDoc[] { rd11, rd12 })); + + qsr1 = new QuerySearchResult(); + qsr1.setShardIndex(2); + rd21 = new RRFRankDoc(1, -1, 2); + rd21.positions[0] = 0; + rd21.positions[1] = -1; + rd21.scores[0] = 3.0f; + rd21.scores[1] = 0.0f; + rd22 = new RRFRankDoc(2, -1, 2); + rd22.positions[0] = -1; + rd22.positions[1] = 0; + rd22.scores[0] = 0.0f; + rd22.scores[1] = 5.0f; + qsr1.setRankShardResult(new RRFRankShardResult(2, new RRFRankDoc[] { rd21, rd22 })); + + tds = new TopDocsStats(0); + std = context.rank(List.of(qsr0, qsr1), tds); + + assertEquals(4, tds.fetchHits); + assertEquals(4, std.scoreDocs().length); + + expected = new RRFRankDoc(1, 1, 2); + expected.rank = 1; + expected.positions[0] = 0; + expected.positions[1] = -1; expected.scores[0] = 10.0f; expected.scores[1] = 0.0f; expected.score = 0.5f; + assertRDEquals(expected, (RRFRankDoc) std.scoreDocs()[0]); + + expected = new RRFRankDoc(2, 1, 2); + expected.rank = 2; + expected.positions[0] = -1; + expected.positions[1] = 0; + expected.scores[0] = 0.0f; + expected.scores[1] = 12.0f; + expected.score = 0.5f; + assertRDEquals(expected, (RRFRankDoc) std.scoreDocs()[1]); + + expected = new RRFRankDoc(1, 2, 2); + expected.rank = 3; + expected.positions[0] = 1; + expected.positions[1] = -1; + expected.scores[0] = 3.0f; + expected.scores[1] = 0.0f; + expected.score = 0.3333333333333333f; assertRDEquals(expected, (RRFRankDoc) std.scoreDocs()[2]); expected = new RRFRankDoc(2, 2, 2); expected.rank = 4; - expected.positions[0] = 3; - expected.positions[1] = 3; - expected.scores[0] = 8.5f; - expected.scores[1] = 6.5f; - expected.score = 0.4f; + expected.positions[0] = -1; + expected.positions[1] = 1; + expected.scores[0] = 0.0f; + expected.scores[1] = 5.0f; + expected.score = 0.3333333333333333f; assertRDEquals(expected, (RRFRankDoc) std.scoreDocs()[3]); } } From e2fb4515bbb50dc1815398916cd7ef4bccd4bb4a Mon Sep 17 00:00:00 2001 From: Matt Culbreth Date: Tue, 7 Nov 2023 11:17:54 -0500 Subject: [PATCH 18/30] Bump versions after 8.11.0 release --- .buildkite/pipelines/intake.yml | 2 +- .buildkite/pipelines/periodic-packaging.yml | 12 ++++++------ .buildkite/pipelines/periodic.yml | 12 ++++++------ .ci/bwcVersions | 2 +- .ci/snapshotBwcVersions | 3 +-- server/src/main/java/org/elasticsearch/Version.java | 2 +- 6 files changed, 16 insertions(+), 17 deletions(-) diff --git a/.buildkite/pipelines/intake.yml b/.buildkite/pipelines/intake.yml index 7a50745a933ae..fd0684d666d64 100644 --- a/.buildkite/pipelines/intake.yml +++ b/.buildkite/pipelines/intake.yml @@ -40,7 +40,7 @@ steps: timeout_in_minutes: 300 matrix: setup: - BWC_VERSION: ["7.17.15", "8.10.5", "8.11.0", "8.12.0"] + BWC_VERSION: ["7.17.15", "8.11.1", "8.12.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.buildkite/pipelines/periodic-packaging.yml b/.buildkite/pipelines/periodic-packaging.yml index 476a59eca58e5..3043872845779 100644 --- a/.buildkite/pipelines/periodic-packaging.yml +++ b/.buildkite/pipelines/periodic-packaging.yml @@ -1681,8 +1681,8 @@ steps: env: BWC_VERSION: 8.10.4 - - label: "{{matrix.image}} / 8.10.5 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.10.5 + - label: "{{matrix.image}} / 8.11.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.11.0 timeout_in_minutes: 300 matrix: setup: @@ -1695,10 +1695,10 @@ steps: machineType: custom-16-32768 buildDirectory: /dev/shm/bk env: - BWC_VERSION: 8.10.5 + BWC_VERSION: 8.11.0 - - label: "{{matrix.image}} / 8.11.0 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.11.0 + - label: "{{matrix.image}} / 8.11.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.11.1 timeout_in_minutes: 300 matrix: setup: @@ -1711,7 +1711,7 @@ steps: machineType: custom-16-32768 buildDirectory: /dev/shm/bk env: - BWC_VERSION: 8.11.0 + BWC_VERSION: 8.11.1 - label: "{{matrix.image}} / 8.12.0 / packaging-tests-upgrade" command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.12.0 diff --git a/.buildkite/pipelines/periodic.yml b/.buildkite/pipelines/periodic.yml index 174a8a3b8c3ec..e1ea27c2468e3 100644 --- a/.buildkite/pipelines/periodic.yml +++ b/.buildkite/pipelines/periodic.yml @@ -1032,8 +1032,8 @@ steps: buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.10.4 - - label: 8.10.5 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.10.5#bwcTest + - label: 8.11.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.11.0#bwcTest timeout_in_minutes: 300 agents: provider: gcp @@ -1041,9 +1041,9 @@ steps: machineType: custom-32-98304 buildDirectory: /dev/shm/bk env: - BWC_VERSION: 8.10.5 - - label: 8.11.0 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.11.0#bwcTest + BWC_VERSION: 8.11.0 + - label: 8.11.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.11.1#bwcTest timeout_in_minutes: 300 agents: provider: gcp @@ -1051,7 +1051,7 @@ steps: machineType: custom-32-98304 buildDirectory: /dev/shm/bk env: - BWC_VERSION: 8.11.0 + BWC_VERSION: 8.11.1 - label: 8.12.0 / bwc command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.12.0#bwcTest timeout_in_minutes: 300 diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 988e7d1e0b453..688d84e1c49c8 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -102,6 +102,6 @@ BWC_VERSION: - "8.10.2" - "8.10.3" - "8.10.4" - - "8.10.5" - "8.11.0" + - "8.11.1" - "8.12.0" diff --git a/.ci/snapshotBwcVersions b/.ci/snapshotBwcVersions index 4246f34222b21..fe40ec8fd1d29 100644 --- a/.ci/snapshotBwcVersions +++ b/.ci/snapshotBwcVersions @@ -1,5 +1,4 @@ BWC_VERSION: - "7.17.15" - - "8.10.5" - - "8.11.0" + - "8.11.1" - "8.12.0" diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 928297397f15c..56a00e25022d4 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -153,8 +153,8 @@ public class Version implements VersionId, ToXContentFragment { public static final Version V_8_10_2 = new Version(8_10_02_99); public static final Version V_8_10_3 = new Version(8_10_03_99); public static final Version V_8_10_4 = new Version(8_10_04_99); - public static final Version V_8_10_5 = new Version(8_10_05_99); public static final Version V_8_11_0 = new Version(8_11_00_99); + public static final Version V_8_11_1 = new Version(8_11_01_99); public static final Version V_8_12_0 = new Version(8_12_00_99); public static final Version CURRENT = V_8_12_0; From 0e3cf7cf64e2cac66b73218fc33fa2b7f0d933c3 Mon Sep 17 00:00:00 2001 From: Matt Culbreth Date: Tue, 7 Nov 2023 11:19:31 -0500 Subject: [PATCH 19/30] Prune changelogs after 8.11.0 release --- docs/changelog/100018.yaml | 5 ----- docs/changelog/100020.yaml | 6 ------ docs/changelog/100064.yaml | 5 ----- docs/changelog/100092.yaml | 6 ------ docs/changelog/100129.yaml | 6 ------ docs/changelog/100138.yaml | 5 ----- docs/changelog/100143.yaml | 5 ----- docs/changelog/100154.yaml | 5 ----- docs/changelog/100187.yaml | 17 ----------------- docs/changelog/100199.yaml | 5 ----- docs/changelog/100205.yaml | 5 ----- docs/changelog/100232.yaml | 5 ----- docs/changelog/100238.yaml | 6 ------ docs/changelog/100253.yaml | 5 ----- docs/changelog/100273.yaml | 5 ----- docs/changelog/100323.yaml | 5 ----- docs/changelog/100351.yaml | 6 ------ docs/changelog/100360.yaml | 5 ----- docs/changelog/100370.yaml | 7 ------- docs/changelog/100377.yaml | 6 ------ docs/changelog/100388.yaml | 6 ------ docs/changelog/100447.yaml | 5 ----- docs/changelog/100470.yaml | 6 ------ docs/changelog/100594.yaml | 5 ----- docs/changelog/100610.yaml | 7 ------- docs/changelog/100624.yaml | 5 ----- docs/changelog/100645.yaml | 7 ------- docs/changelog/100647.yaml | 6 ------ docs/changelog/100650.yaml | 6 ------ docs/changelog/100656.yaml | 6 ------ docs/changelog/100707.yaml | 5 ----- docs/changelog/100760.yaml | 5 ----- docs/changelog/100766.yaml | 6 ------ docs/changelog/100779.yaml | 6 ------ docs/changelog/100782.yaml | 8 -------- docs/changelog/100808.yaml | 5 ----- docs/changelog/100846.yaml | 6 ------ docs/changelog/100866.yaml | 6 ------ docs/changelog/100872.yaml | 5 ----- docs/changelog/100875.yaml | 6 ------ docs/changelog/100886.yaml | 5 ----- docs/changelog/100911.yaml | 6 ------ docs/changelog/101001.yaml | 6 ------ docs/changelog/101012.yaml | 5 ----- docs/changelog/101051.yaml | 6 ------ docs/changelog/101120.yaml | 6 ------ docs/changelog/101133.yaml | 5 ----- docs/changelog/101184.yaml | 6 ------ docs/changelog/101205.yaml | 5 ----- docs/changelog/101212.yaml | 6 ------ docs/changelog/101245.yaml | 5 ----- docs/changelog/101255.yaml | 5 ----- docs/changelog/101264.yaml | 5 ----- docs/changelog/101265.yaml | 13 ------------- docs/changelog/101344.yaml | 5 ----- docs/changelog/101358.yaml | 6 ------ docs/changelog/101362.yaml | 6 ------ docs/changelog/101438.yaml | 6 ------ docs/changelog/101456.yaml | 6 ------ docs/changelog/101486.yaml | 5 ----- docs/changelog/101492.yaml | 6 ------ docs/changelog/101495.yaml | 5 ----- docs/changelog/101497.yaml | 5 ----- docs/changelog/101516.yaml | 5 ----- docs/changelog/101627.yaml | 5 ----- docs/changelog/101629.yaml | 5 ----- docs/changelog/101648.yaml | 6 ------ docs/changelog/101652.yaml | 5 ----- docs/changelog/101713.yaml | 5 ----- docs/changelog/101778.yaml | 7 ------- docs/changelog/94607.yaml | 18 ------------------ docs/changelog/97317.yaml | 6 ------ docs/changelog/97397.yaml | 5 ----- docs/changelog/97409.yaml | 5 ----- docs/changelog/97450.yaml | 5 ----- docs/changelog/97642.yaml | 5 ----- docs/changelog/97729.yaml | 5 ----- docs/changelog/97972.yaml | 6 ------ docs/changelog/98038.yaml | 6 ------ docs/changelog/98061.yaml | 6 ------ docs/changelog/98268.yaml | 5 ----- docs/changelog/98309.yaml | 5 ----- docs/changelog/98332.yaml | 6 ------ docs/changelog/98337.yaml | 5 ----- docs/changelog/98360.yaml | 6 ------ docs/changelog/98406.yaml | 5 ----- docs/changelog/98457.yaml | 5 ----- docs/changelog/98470.yaml | 5 ----- docs/changelog/98512.yaml | 6 ------ docs/changelog/98518.yaml | 6 ------ docs/changelog/98528.yaml | 6 ------ docs/changelog/98550.yaml | 5 ----- docs/changelog/98574.yaml | 6 ------ docs/changelog/98590.yaml | 5 ----- docs/changelog/98622.yaml | 6 ------ docs/changelog/98628.yaml | 5 ----- docs/changelog/98630.yaml | 5 ----- docs/changelog/98635.yaml | 5 ----- docs/changelog/98653.yaml | 6 ------ docs/changelog/98654.yaml | 5 ----- docs/changelog/98684.yaml | 6 ------ docs/changelog/98711.yaml | 5 ----- docs/changelog/98759.yaml | 6 ------ docs/changelog/98809.yaml | 7 ------- docs/changelog/98811.yaml | 5 ----- docs/changelog/98824.yaml | 6 ------ docs/changelog/98840.yaml | 6 ------ docs/changelog/98843.yaml | 5 ----- docs/changelog/98844.yaml | 5 ----- docs/changelog/98847.yaml | 5 ----- docs/changelog/98870.yaml | 6 ------ docs/changelog/98878.yaml | 5 ----- docs/changelog/98888.yaml | 5 ----- docs/changelog/98915.yaml | 5 ----- docs/changelog/98930.yaml | 5 ----- docs/changelog/98942.yaml | 5 ----- docs/changelog/98972.yaml | 6 ------ docs/changelog/98974.yaml | 5 ----- docs/changelog/98996.yaml | 5 ----- docs/changelog/99054.yaml | 5 ----- docs/changelog/99058.yaml | 5 ----- docs/changelog/99091.yaml | 5 ----- docs/changelog/99106.yaml | 6 ------ docs/changelog/99107.yaml | 5 ----- docs/changelog/99117.yaml | 5 ----- docs/changelog/99163.yaml | 6 ------ docs/changelog/99188.yaml | 6 ------ docs/changelog/99193.yaml | 5 ----- docs/changelog/99215.yaml | 6 ------ docs/changelog/99219.yaml | 5 ----- docs/changelog/99222.yaml | 5 ----- docs/changelog/99223.yaml | 11 ----------- docs/changelog/99224.yaml | 5 ----- docs/changelog/99278.yaml | 5 ----- docs/changelog/99286.yaml | 5 ----- docs/changelog/99300.yaml | 5 ----- docs/changelog/99303.yaml | 5 ----- docs/changelog/99310.yaml | 6 ------ docs/changelog/99316.yaml | 5 ----- docs/changelog/99346.yaml | 5 ----- docs/changelog/99382.yaml | 6 ------ docs/changelog/99417.yaml | 6 ------ docs/changelog/99432.yaml | 5 ----- docs/changelog/99470.yaml | 5 ----- docs/changelog/99474.yaml | 5 ----- docs/changelog/99515.yaml | 5 ----- docs/changelog/99527.yaml | 5 ----- docs/changelog/99532.yaml | 5 ----- docs/changelog/99555.yaml | 5 ----- docs/changelog/99566.yaml | 6 ------ docs/changelog/99567.yaml | 6 ------ docs/changelog/99584.yaml | 5 ----- docs/changelog/99588.yaml | 6 ------ docs/changelog/99601.yaml | 6 ------ docs/changelog/99627.yaml | 5 ----- docs/changelog/99631.yaml | 5 ----- docs/changelog/99641.yaml | 5 ----- docs/changelog/99644.yaml | 6 ------ docs/changelog/99655.yaml | 5 ----- docs/changelog/99682.yaml | 5 ----- docs/changelog/99685.yaml | 5 ----- docs/changelog/99694.yaml | 5 ----- docs/changelog/99695.yaml | 5 ----- docs/changelog/99711.yaml | 5 ----- docs/changelog/99712.yaml | 5 ----- docs/changelog/99717.yaml | 5 ----- docs/changelog/99726.yaml | 6 ------ docs/changelog/99736.yaml | 6 ------ docs/changelog/99746.yaml | 5 ----- docs/changelog/99775.yaml | 6 ------ docs/changelog/99796.yaml | 6 ------ docs/changelog/99797.yaml | 5 ----- docs/changelog/99798.yaml | 7 ------- docs/changelog/99804.yaml | 6 ------ docs/changelog/99816.yaml | 6 ------ docs/changelog/99827.yaml | 5 ----- docs/changelog/99832.yaml | 5 ----- docs/changelog/99873.yaml | 5 ----- docs/changelog/99874.yaml | 6 ------ docs/changelog/99909.yaml | 5 ----- docs/changelog/99912.yaml | 6 ------ docs/changelog/99938.yaml | 5 ----- docs/changelog/99947.yaml | 5 ----- docs/changelog/99956.yaml | 5 ----- docs/changelog/99995.yaml | 6 ------ 185 files changed, 1046 deletions(-) delete mode 100644 docs/changelog/100018.yaml delete mode 100644 docs/changelog/100020.yaml delete mode 100644 docs/changelog/100064.yaml delete mode 100644 docs/changelog/100092.yaml delete mode 100644 docs/changelog/100129.yaml delete mode 100644 docs/changelog/100138.yaml delete mode 100644 docs/changelog/100143.yaml delete mode 100644 docs/changelog/100154.yaml delete mode 100644 docs/changelog/100187.yaml delete mode 100644 docs/changelog/100199.yaml delete mode 100644 docs/changelog/100205.yaml delete mode 100644 docs/changelog/100232.yaml delete mode 100644 docs/changelog/100238.yaml delete mode 100644 docs/changelog/100253.yaml delete mode 100644 docs/changelog/100273.yaml delete mode 100644 docs/changelog/100323.yaml delete mode 100644 docs/changelog/100351.yaml delete mode 100644 docs/changelog/100360.yaml delete mode 100644 docs/changelog/100370.yaml delete mode 100644 docs/changelog/100377.yaml delete mode 100644 docs/changelog/100388.yaml delete mode 100644 docs/changelog/100447.yaml delete mode 100644 docs/changelog/100470.yaml delete mode 100644 docs/changelog/100594.yaml delete mode 100644 docs/changelog/100610.yaml delete mode 100644 docs/changelog/100624.yaml delete mode 100644 docs/changelog/100645.yaml delete mode 100644 docs/changelog/100647.yaml delete mode 100644 docs/changelog/100650.yaml delete mode 100644 docs/changelog/100656.yaml delete mode 100644 docs/changelog/100707.yaml delete mode 100644 docs/changelog/100760.yaml delete mode 100644 docs/changelog/100766.yaml delete mode 100644 docs/changelog/100779.yaml delete mode 100644 docs/changelog/100782.yaml delete mode 100644 docs/changelog/100808.yaml delete mode 100644 docs/changelog/100846.yaml delete mode 100644 docs/changelog/100866.yaml delete mode 100644 docs/changelog/100872.yaml delete mode 100644 docs/changelog/100875.yaml delete mode 100644 docs/changelog/100886.yaml delete mode 100644 docs/changelog/100911.yaml delete mode 100644 docs/changelog/101001.yaml delete mode 100644 docs/changelog/101012.yaml delete mode 100644 docs/changelog/101051.yaml delete mode 100644 docs/changelog/101120.yaml delete mode 100644 docs/changelog/101133.yaml delete mode 100644 docs/changelog/101184.yaml delete mode 100644 docs/changelog/101205.yaml delete mode 100644 docs/changelog/101212.yaml delete mode 100644 docs/changelog/101245.yaml delete mode 100644 docs/changelog/101255.yaml delete mode 100644 docs/changelog/101264.yaml delete mode 100644 docs/changelog/101265.yaml delete mode 100644 docs/changelog/101344.yaml delete mode 100644 docs/changelog/101358.yaml delete mode 100644 docs/changelog/101362.yaml delete mode 100644 docs/changelog/101438.yaml delete mode 100644 docs/changelog/101456.yaml delete mode 100644 docs/changelog/101486.yaml delete mode 100644 docs/changelog/101492.yaml delete mode 100644 docs/changelog/101495.yaml delete mode 100644 docs/changelog/101497.yaml delete mode 100644 docs/changelog/101516.yaml delete mode 100644 docs/changelog/101627.yaml delete mode 100644 docs/changelog/101629.yaml delete mode 100644 docs/changelog/101648.yaml delete mode 100644 docs/changelog/101652.yaml delete mode 100644 docs/changelog/101713.yaml delete mode 100644 docs/changelog/101778.yaml delete mode 100644 docs/changelog/94607.yaml delete mode 100644 docs/changelog/97317.yaml delete mode 100644 docs/changelog/97397.yaml delete mode 100644 docs/changelog/97409.yaml delete mode 100644 docs/changelog/97450.yaml delete mode 100644 docs/changelog/97642.yaml delete mode 100644 docs/changelog/97729.yaml delete mode 100644 docs/changelog/97972.yaml delete mode 100644 docs/changelog/98038.yaml delete mode 100644 docs/changelog/98061.yaml delete mode 100644 docs/changelog/98268.yaml delete mode 100644 docs/changelog/98309.yaml delete mode 100644 docs/changelog/98332.yaml delete mode 100644 docs/changelog/98337.yaml delete mode 100644 docs/changelog/98360.yaml delete mode 100644 docs/changelog/98406.yaml delete mode 100644 docs/changelog/98457.yaml delete mode 100644 docs/changelog/98470.yaml delete mode 100644 docs/changelog/98512.yaml delete mode 100644 docs/changelog/98518.yaml delete mode 100644 docs/changelog/98528.yaml delete mode 100644 docs/changelog/98550.yaml delete mode 100644 docs/changelog/98574.yaml delete mode 100644 docs/changelog/98590.yaml delete mode 100644 docs/changelog/98622.yaml delete mode 100644 docs/changelog/98628.yaml delete mode 100644 docs/changelog/98630.yaml delete mode 100644 docs/changelog/98635.yaml delete mode 100644 docs/changelog/98653.yaml delete mode 100644 docs/changelog/98654.yaml delete mode 100644 docs/changelog/98684.yaml delete mode 100644 docs/changelog/98711.yaml delete mode 100644 docs/changelog/98759.yaml delete mode 100644 docs/changelog/98809.yaml delete mode 100644 docs/changelog/98811.yaml delete mode 100644 docs/changelog/98824.yaml delete mode 100644 docs/changelog/98840.yaml delete mode 100644 docs/changelog/98843.yaml delete mode 100644 docs/changelog/98844.yaml delete mode 100644 docs/changelog/98847.yaml delete mode 100644 docs/changelog/98870.yaml delete mode 100644 docs/changelog/98878.yaml delete mode 100644 docs/changelog/98888.yaml delete mode 100644 docs/changelog/98915.yaml delete mode 100644 docs/changelog/98930.yaml delete mode 100644 docs/changelog/98942.yaml delete mode 100644 docs/changelog/98972.yaml delete mode 100644 docs/changelog/98974.yaml delete mode 100644 docs/changelog/98996.yaml delete mode 100644 docs/changelog/99054.yaml delete mode 100644 docs/changelog/99058.yaml delete mode 100644 docs/changelog/99091.yaml delete mode 100644 docs/changelog/99106.yaml delete mode 100644 docs/changelog/99107.yaml delete mode 100644 docs/changelog/99117.yaml delete mode 100644 docs/changelog/99163.yaml delete mode 100644 docs/changelog/99188.yaml delete mode 100644 docs/changelog/99193.yaml delete mode 100644 docs/changelog/99215.yaml delete mode 100644 docs/changelog/99219.yaml delete mode 100644 docs/changelog/99222.yaml delete mode 100644 docs/changelog/99223.yaml delete mode 100644 docs/changelog/99224.yaml delete mode 100644 docs/changelog/99278.yaml delete mode 100644 docs/changelog/99286.yaml delete mode 100644 docs/changelog/99300.yaml delete mode 100644 docs/changelog/99303.yaml delete mode 100644 docs/changelog/99310.yaml delete mode 100644 docs/changelog/99316.yaml delete mode 100644 docs/changelog/99346.yaml delete mode 100644 docs/changelog/99382.yaml delete mode 100644 docs/changelog/99417.yaml delete mode 100644 docs/changelog/99432.yaml delete mode 100644 docs/changelog/99470.yaml delete mode 100644 docs/changelog/99474.yaml delete mode 100644 docs/changelog/99515.yaml delete mode 100644 docs/changelog/99527.yaml delete mode 100644 docs/changelog/99532.yaml delete mode 100644 docs/changelog/99555.yaml delete mode 100644 docs/changelog/99566.yaml delete mode 100644 docs/changelog/99567.yaml delete mode 100644 docs/changelog/99584.yaml delete mode 100644 docs/changelog/99588.yaml delete mode 100644 docs/changelog/99601.yaml delete mode 100644 docs/changelog/99627.yaml delete mode 100644 docs/changelog/99631.yaml delete mode 100644 docs/changelog/99641.yaml delete mode 100644 docs/changelog/99644.yaml delete mode 100644 docs/changelog/99655.yaml delete mode 100644 docs/changelog/99682.yaml delete mode 100644 docs/changelog/99685.yaml delete mode 100644 docs/changelog/99694.yaml delete mode 100644 docs/changelog/99695.yaml delete mode 100644 docs/changelog/99711.yaml delete mode 100644 docs/changelog/99712.yaml delete mode 100644 docs/changelog/99717.yaml delete mode 100644 docs/changelog/99726.yaml delete mode 100644 docs/changelog/99736.yaml delete mode 100644 docs/changelog/99746.yaml delete mode 100644 docs/changelog/99775.yaml delete mode 100644 docs/changelog/99796.yaml delete mode 100644 docs/changelog/99797.yaml delete mode 100644 docs/changelog/99798.yaml delete mode 100644 docs/changelog/99804.yaml delete mode 100644 docs/changelog/99816.yaml delete mode 100644 docs/changelog/99827.yaml delete mode 100644 docs/changelog/99832.yaml delete mode 100644 docs/changelog/99873.yaml delete mode 100644 docs/changelog/99874.yaml delete mode 100644 docs/changelog/99909.yaml delete mode 100644 docs/changelog/99912.yaml delete mode 100644 docs/changelog/99938.yaml delete mode 100644 docs/changelog/99947.yaml delete mode 100644 docs/changelog/99956.yaml delete mode 100644 docs/changelog/99995.yaml diff --git a/docs/changelog/100018.yaml b/docs/changelog/100018.yaml deleted file mode 100644 index b39089db568c0..0000000000000 --- a/docs/changelog/100018.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100018 -summary: Improve time-series error and documentation -area: "TSDB" -type: enhancement -issues: [] diff --git a/docs/changelog/100020.yaml b/docs/changelog/100020.yaml deleted file mode 100644 index 9f97778860eef..0000000000000 --- a/docs/changelog/100020.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100020 -summary: "[CI] `SearchResponseTests#testSerialization` failing resolved" -area: Search -type: bug -issues: - - 100005 diff --git a/docs/changelog/100064.yaml b/docs/changelog/100064.yaml deleted file mode 100644 index f595b7e8e0705..0000000000000 --- a/docs/changelog/100064.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100064 -summary: Update the elastic-apm-agent version -area: Infra/Core -type: enhancement -issues: [] diff --git a/docs/changelog/100092.yaml b/docs/changelog/100092.yaml deleted file mode 100644 index e86b856caf3ad..0000000000000 --- a/docs/changelog/100092.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100092 -summary: Compute SLM retention from `RepositoryData` -area: ILM+SLM -type: bug -issues: - - 99953 diff --git a/docs/changelog/100129.yaml b/docs/changelog/100129.yaml deleted file mode 100644 index aa2c6961b6681..0000000000000 --- a/docs/changelog/100129.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100129 -summary: Refactor `SearchResponseClusters` to use CHM -area: Search -type: enhancement -issues: - - 99101 diff --git a/docs/changelog/100138.yaml b/docs/changelog/100138.yaml deleted file mode 100644 index 0df2004f8539d..0000000000000 --- a/docs/changelog/100138.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100138 -summary: Upgrade main to Lucene 9.8.0 -area: Search -type: upgrade -issues: [] diff --git a/docs/changelog/100143.yaml b/docs/changelog/100143.yaml deleted file mode 100644 index c61a2a8bc7a13..0000000000000 --- a/docs/changelog/100143.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100143 -summary: Preserve order of inference results when calling the _infer API with multiple inputs on a model deployment with more than one allocation the output results order was not guaranteed to match the input order. The fix ensures the output order matches the input order. -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/100154.yaml b/docs/changelog/100154.yaml deleted file mode 100644 index 5e75102390c61..0000000000000 --- a/docs/changelog/100154.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100154 -summary: Log warnings for jobs unassigned for a long time -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/100187.yaml b/docs/changelog/100187.yaml deleted file mode 100644 index f0ab9257e7127..0000000000000 --- a/docs/changelog/100187.yaml +++ /dev/null @@ -1,17 +0,0 @@ -pr: 100187 -summary: GA the data stream lifecycle -area: Data streams -type: "feature" -issues: [] -highlight: - title: The data stream lifecycle is now in Technical Preview - body: "This marks the data stream lifecycle as available in Technical Preview. - Data streams will be able to take advantage of a built-in simplified and - resilient lifecycle implementation. Data streams with a configured lifecycle will - be automatically rolled over and tail merged (a forcemerge implementation that's - lightweight and only merges the long tail of small segments instead of the - whole shard). With the shard and index maintenance tasks being handled automatically - to ensure optimum performance, and trade-off between indexing and searching, - you'll be able to focus on the business related lifecycle aspects like data - retention." - notable: true diff --git a/docs/changelog/100199.yaml b/docs/changelog/100199.yaml deleted file mode 100644 index 0f609194813c5..0000000000000 --- a/docs/changelog/100199.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100199 -summary: "ESQL: Simple check if all blocks get released" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/100205.yaml b/docs/changelog/100205.yaml deleted file mode 100644 index 41b16465ef4c5..0000000000000 --- a/docs/changelog/100205.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100205 -summary: Simplify the Inference Ingest Processor configuration -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/100232.yaml b/docs/changelog/100232.yaml deleted file mode 100644 index 3f8336b6c241c..0000000000000 --- a/docs/changelog/100232.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100232 -summary: "Tracing: Use `doPriv` when working with spans, use `SpanId`" -area: Infra/Core -type: bug -issues: [] diff --git a/docs/changelog/100238.yaml b/docs/changelog/100238.yaml deleted file mode 100644 index 70e3f5340e223..0000000000000 --- a/docs/changelog/100238.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100238 -summary: "ESQL: Remove aliasing inside Eval" -area: ES|QL -type: bug -issues: - - 100174 diff --git a/docs/changelog/100253.yaml b/docs/changelog/100253.yaml deleted file mode 100644 index 7a9d3f3fb13d7..0000000000000 --- a/docs/changelog/100253.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100253 -summary: Propagate cancellation in `DataTiersUsageTransportAction` -area: Data streams -type: bug -issues: [] diff --git a/docs/changelog/100273.yaml b/docs/changelog/100273.yaml deleted file mode 100644 index 4ccd52d033aa7..0000000000000 --- a/docs/changelog/100273.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100273 -summary: Propagate cancellation in `GetHealthAction` -area: Health -type: bug -issues: [] diff --git a/docs/changelog/100323.yaml b/docs/changelog/100323.yaml deleted file mode 100644 index de50da6ec8cf9..0000000000000 --- a/docs/changelog/100323.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100323 -summary: "CCR: Use local cluster state request" -area: CCR -type: bug -issues: [] diff --git a/docs/changelog/100351.yaml b/docs/changelog/100351.yaml deleted file mode 100644 index d8ba19b70cbed..0000000000000 --- a/docs/changelog/100351.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100351 -summary: "ESQL: support metric tsdb fields while querying index patterns" -area: ES|QL -type: bug -issues: - - 100144 diff --git a/docs/changelog/100360.yaml b/docs/changelog/100360.yaml deleted file mode 100644 index 6d0dcafe16a8f..0000000000000 --- a/docs/changelog/100360.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100360 -summary: "ESQL: Limit how many bytes `concat()` can process" -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/100370.yaml b/docs/changelog/100370.yaml deleted file mode 100644 index 3e2e1b762c654..0000000000000 --- a/docs/changelog/100370.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 100370 -summary: "ESQL: Page shouldn't close a block twice" -area: ES|QL -type: bug -issues: - - 100356 - - 100365 diff --git a/docs/changelog/100377.yaml b/docs/changelog/100377.yaml deleted file mode 100644 index a4cbb0ba46a61..0000000000000 --- a/docs/changelog/100377.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100377 -summary: "ESQL: Add identity check in Block equality" -area: ES|QL -type: bug -issues: - - 100374 diff --git a/docs/changelog/100388.yaml b/docs/changelog/100388.yaml deleted file mode 100644 index 4b596b6ea23b6..0000000000000 --- a/docs/changelog/100388.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100388 -summary: Fix for inference requests being sent to every node with a model allocation. If there are more nodes than items in the original request then empty requests were sent. -area: Machine Learning -type: bug -issues: - - 100180 diff --git a/docs/changelog/100447.yaml b/docs/changelog/100447.yaml deleted file mode 100644 index c20eb1599cf41..0000000000000 --- a/docs/changelog/100447.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100447 -summary: Reinstate `RepositoryData` BwC -area: Snapshot/Restore -type: bug -issues: [] diff --git a/docs/changelog/100470.yaml b/docs/changelog/100470.yaml deleted file mode 100644 index 3408ae06f7fe9..0000000000000 --- a/docs/changelog/100470.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100470 -summary: DSL waits for the tsdb time boundaries to lapse -area: Data streams -type: bug -issues: - - 99696 diff --git a/docs/changelog/100594.yaml b/docs/changelog/100594.yaml deleted file mode 100644 index 62d2a8933b9ad..0000000000000 --- a/docs/changelog/100594.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100594 -summary: Grant editor and viewer access to profiling -area: Authorization -type: bug -issues: [] diff --git a/docs/changelog/100610.yaml b/docs/changelog/100610.yaml deleted file mode 100644 index 7423ce9225868..0000000000000 --- a/docs/changelog/100610.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 100610 -summary: Fix interruption of `markAllocationIdAsInSync` -area: Recovery -type: bug -issues: - - 96578 - - 100589 diff --git a/docs/changelog/100624.yaml b/docs/changelog/100624.yaml deleted file mode 100644 index 247343bf03ed8..0000000000000 --- a/docs/changelog/100624.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100624 -summary: Make Transform Feature Reset really wait for all the tasks -area: Transform -type: bug -issues: [] diff --git a/docs/changelog/100645.yaml b/docs/changelog/100645.yaml deleted file mode 100644 index e6bb6ab0fd653..0000000000000 --- a/docs/changelog/100645.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 100645 -summary: "ESQL: Graceful handling of non-bool condition in the filter" -area: ES|QL -type: bug -issues: - - 100049 - - 100409 diff --git a/docs/changelog/100647.yaml b/docs/changelog/100647.yaml deleted file mode 100644 index 399407146af68..0000000000000 --- a/docs/changelog/100647.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100647 -summary: "ESQL: Handle queries with non-existing enrich policies and no field" -area: ES|QL -type: bug -issues: - - 100593 diff --git a/docs/changelog/100650.yaml b/docs/changelog/100650.yaml deleted file mode 100644 index 96d7bc0571403..0000000000000 --- a/docs/changelog/100650.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100650 -summary: "ESQL: Improve verifier error for incorrect agg declaration" -area: ES|QL -type: bug -issues: - - 100641 diff --git a/docs/changelog/100656.yaml b/docs/changelog/100656.yaml deleted file mode 100644 index 1ee9a2ad0e47a..0000000000000 --- a/docs/changelog/100656.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100656 -summary: "ESQL: fix non-null value being returned for unsupported data types in `ValueSources`" -area: ES|QL -type: bug -issues: - - 100048 diff --git a/docs/changelog/100707.yaml b/docs/changelog/100707.yaml deleted file mode 100644 index 6808b781b603a..0000000000000 --- a/docs/changelog/100707.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100707 -summary: Allow `enrich_user` to read/view enrich indices -area: Authorization -type: bug -issues: [] diff --git a/docs/changelog/100760.yaml b/docs/changelog/100760.yaml deleted file mode 100644 index b8d149fff5758..0000000000000 --- a/docs/changelog/100760.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100760 -summary: Remove noisy 'Could not find trained model' message -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/100766.yaml b/docs/changelog/100766.yaml deleted file mode 100644 index c7a3d0479afd6..0000000000000 --- a/docs/changelog/100766.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100766 -summary: "ESQL: Properly handle multi-values in fold() and date math" -area: ES|QL -type: bug -issues: - - 100497 diff --git a/docs/changelog/100779.yaml b/docs/changelog/100779.yaml deleted file mode 100644 index 2d7f40f5b34da..0000000000000 --- a/docs/changelog/100779.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100779 -summary: Fix NullPointerException in RotableSecret -area: Security -type: bug -issues: - - 99759 diff --git a/docs/changelog/100782.yaml b/docs/changelog/100782.yaml deleted file mode 100644 index c6007bfb4d9ba..0000000000000 --- a/docs/changelog/100782.yaml +++ /dev/null @@ -1,8 +0,0 @@ -pr: 100782 -summary: "ESQL: `mv_expand` pushes down limit and project and keep the limit after\ - \ it untouched" -area: ES|QL -type: bug -issues: - - 99971 - - 100774 diff --git a/docs/changelog/100808.yaml b/docs/changelog/100808.yaml deleted file mode 100644 index 1abbfdcebf74e..0000000000000 --- a/docs/changelog/100808.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100808 -summary: Make tasks that calculate checkpoints cancellable -area: Transform -type: bug -issues: [] diff --git a/docs/changelog/100846.yaml b/docs/changelog/100846.yaml deleted file mode 100644 index d13fb78b697a2..0000000000000 --- a/docs/changelog/100846.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100846 -summary: Consistent scores for multi-term `SourceConfirmedTestQuery` -area: Search -type: bug -issues: - - 98712 diff --git a/docs/changelog/100866.yaml b/docs/changelog/100866.yaml deleted file mode 100644 index 67a22cc1e0996..0000000000000 --- a/docs/changelog/100866.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100866 -summary: "ESQL: Preserve intermediate aggregation output in local relation" -area: ES|QL -type: bug -issues: - - 100807 diff --git a/docs/changelog/100872.yaml b/docs/changelog/100872.yaml deleted file mode 100644 index 9877afa28982e..0000000000000 --- a/docs/changelog/100872.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100872 -summary: Improve painless error wrapping -area: Infra/Scripting -type: bug -issues: [] diff --git a/docs/changelog/100875.yaml b/docs/changelog/100875.yaml deleted file mode 100644 index bd0ca59e8b8f0..0000000000000 --- a/docs/changelog/100875.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100875 -summary: Preserve subfields for unsupported types -area: "Query Languages" -type: bug -issues: - - 100869 diff --git a/docs/changelog/100886.yaml b/docs/changelog/100886.yaml deleted file mode 100644 index b926f924c7a7c..0000000000000 --- a/docs/changelog/100886.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100886 -summary: Use the correct writable name for model assignment metadata in mixed version clusters. Prevents a node failure due to IllegalArgumentException Unknown NamedWriteable [trained_model_assignment] -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/100911.yaml b/docs/changelog/100911.yaml deleted file mode 100644 index baab6f2482a76..0000000000000 --- a/docs/changelog/100911.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100911 -summary: '`WaitForSnapshotStep` verifies if the index belongs to the latest snapshot - of that SLM policy' -area: ILM+SLM -type: bug -issues: [] diff --git a/docs/changelog/101001.yaml b/docs/changelog/101001.yaml deleted file mode 100644 index 3ebcefc2c8045..0000000000000 --- a/docs/changelog/101001.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101001 -summary: "ESQL: Support date and time intervals as input params" -area: ES|QL -type: bug -issues: - - 99570 diff --git a/docs/changelog/101012.yaml b/docs/changelog/101012.yaml deleted file mode 100644 index 1d5f62bdddba7..0000000000000 --- a/docs/changelog/101012.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101012 -summary: Adjust `DateHistogram's` bucket accounting to be iteratively -area: Aggregations -type: bug -issues: [] diff --git a/docs/changelog/101051.yaml b/docs/changelog/101051.yaml deleted file mode 100644 index 05e7443dac8b3..0000000000000 --- a/docs/changelog/101051.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101051 -summary: Percolator to support parsing script score query with params -area: Mapping -type: bug -issues: - - 97377 diff --git a/docs/changelog/101120.yaml b/docs/changelog/101120.yaml deleted file mode 100644 index bf359eb21be9f..0000000000000 --- a/docs/changelog/101120.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101120 -summary: "ESQL: Fix escaping of backslash in LIKE operator" -area: ES|QL -type: bug -issues: - - 101106 diff --git a/docs/changelog/101133.yaml b/docs/changelog/101133.yaml deleted file mode 100644 index 546a5392c309a..0000000000000 --- a/docs/changelog/101133.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101133 -summary: Update bundled JDK to 21.0.1 -area: Packaging -type: upgrade -issues: [] diff --git a/docs/changelog/101184.yaml b/docs/changelog/101184.yaml deleted file mode 100644 index ac2f5f3ee8af1..0000000000000 --- a/docs/changelog/101184.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101184 -summary: More robust timeout for repo analysis -area: Snapshot/Restore -type: bug -issues: - - 101182 diff --git a/docs/changelog/101205.yaml b/docs/changelog/101205.yaml deleted file mode 100644 index 528f6fb35846e..0000000000000 --- a/docs/changelog/101205.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101205 -summary: Increase K/V look-back time interval -area: Application -type: bug -issues: [] diff --git a/docs/changelog/101212.yaml b/docs/changelog/101212.yaml deleted file mode 100644 index ed2b433209e8d..0000000000000 --- a/docs/changelog/101212.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101212 -summary: Fix painless execute api and tsdb issue -area: TSDB -type: bug -issues: - - 101072 diff --git a/docs/changelog/101245.yaml b/docs/changelog/101245.yaml deleted file mode 100644 index 2f9fef318f31a..0000000000000 --- a/docs/changelog/101245.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101245 -summary: Make S3 anti-contention delay configurable -area: Snapshot/Restore -type: bug -issues: [] diff --git a/docs/changelog/101255.yaml b/docs/changelog/101255.yaml deleted file mode 100644 index 37d8f7e3c14fe..0000000000000 --- a/docs/changelog/101255.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101255 -summary: Provide stable resampling -area: Application -type: bug -issues: [] diff --git a/docs/changelog/101264.yaml b/docs/changelog/101264.yaml deleted file mode 100644 index 7160240b2f3a0..0000000000000 --- a/docs/changelog/101264.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101264 -summary: Align look-back with client-side cache -area: Application -type: bug -issues: [] diff --git a/docs/changelog/101265.yaml b/docs/changelog/101265.yaml deleted file mode 100644 index f39b57fa9a75e..0000000000000 --- a/docs/changelog/101265.yaml +++ /dev/null @@ -1,13 +0,0 @@ -pr: 101265 -summary: Rollup functionality is now deprecated -area: Rollup -type: deprecation -issues: [] -deprecation: - title: >- - Rollup functionality is now deprecated - area: Rollup - details: |- - {ref}/xpack-rollup[Rollup functionality] has been deprecated and will be removed in a future release. Previously, rollups were available in technical preview. - impact: |- - Use {ref}/downsampling.html[downsampling] to reduce storage costs for time series data by by storing it at reduced granularity. diff --git a/docs/changelog/101344.yaml b/docs/changelog/101344.yaml deleted file mode 100644 index b546e743301f6..0000000000000 --- a/docs/changelog/101344.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101344 -summary: Register `repository_s3` settings -area: Snapshot/Restore -type: bug -issues: [] diff --git a/docs/changelog/101358.yaml b/docs/changelog/101358.yaml deleted file mode 100644 index 3ae2a44e15e5e..0000000000000 --- a/docs/changelog/101358.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101358 -summary: Make DISSECT parameter `append_separator` case insensitive -area: ES|QL -type: bug -issues: - - 101138 diff --git a/docs/changelog/101362.yaml b/docs/changelog/101362.yaml deleted file mode 100644 index e1d763cd416fa..0000000000000 --- a/docs/changelog/101362.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101362 -summary: "ESQL: Remove the swapped-args check for date_xxx()" -area: ES|QL -type: enhancement -issues: - - 99562 diff --git a/docs/changelog/101438.yaml b/docs/changelog/101438.yaml deleted file mode 100644 index 8189ee96b6576..0000000000000 --- a/docs/changelog/101438.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101438 -summary: "ESQL: Fix eval of functions on foldable literals" -area: ES|QL -type: bug -issues: - - 101425 diff --git a/docs/changelog/101456.yaml b/docs/changelog/101456.yaml deleted file mode 100644 index db55dfbde1c64..0000000000000 --- a/docs/changelog/101456.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101456 -summary: "ESQL: adds Enrich implicit `match_fields` to `field_caps` call" -area: ES|QL -type: bug -issues: - - 101328 diff --git a/docs/changelog/101486.yaml b/docs/changelog/101486.yaml deleted file mode 100644 index 99795feda328f..0000000000000 --- a/docs/changelog/101486.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101486 -summary: Improving tika handling -area: Ingest Node -type: bug -issues: [] diff --git a/docs/changelog/101492.yaml b/docs/changelog/101492.yaml deleted file mode 100644 index 2c3cdeee21bbb..0000000000000 --- a/docs/changelog/101492.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101492 -summary: "ESQL: check type before casting" -area: ES|QL -type: bug -issues: - - 101489 diff --git a/docs/changelog/101495.yaml b/docs/changelog/101495.yaml deleted file mode 100644 index f61c9b824b77c..0000000000000 --- a/docs/changelog/101495.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101495 -summary: "[DSL] skip deleting indices that have in-progress downsampling operations" -area: Data streams -type: bug -issues: [] diff --git a/docs/changelog/101497.yaml b/docs/changelog/101497.yaml deleted file mode 100644 index 7909cb1ecdc0d..0000000000000 --- a/docs/changelog/101497.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101497 -summary: Fix snapshot double finalization -area: Snapshot/Restore -type: bug -issues: [] diff --git a/docs/changelog/101516.yaml b/docs/changelog/101516.yaml deleted file mode 100644 index a5445102c33c6..0000000000000 --- a/docs/changelog/101516.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101516 -summary: "Make settings dynamic" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/101627.yaml b/docs/changelog/101627.yaml deleted file mode 100644 index 07992efd8bb3c..0000000000000 --- a/docs/changelog/101627.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101627 -summary: Ignore `IndexNotFound` error when refreshing destination index -area: Transform -type: bug -issues: [] diff --git a/docs/changelog/101629.yaml b/docs/changelog/101629.yaml deleted file mode 100644 index 1b8691c9798ff..0000000000000 --- a/docs/changelog/101629.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101629 -summary: Health report infrastructure doesn't trip the circuit breakers -area: Health -type: bug -issues: [] diff --git a/docs/changelog/101648.yaml b/docs/changelog/101648.yaml deleted file mode 100644 index 48e01739aabc0..0000000000000 --- a/docs/changelog/101648.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101648 -summary: "ESQL: Fix unreleased block in topn" -area: ES|QL -type: bug -issues: - - 101588 diff --git a/docs/changelog/101652.yaml b/docs/changelog/101652.yaml deleted file mode 100644 index 79e3167696aee..0000000000000 --- a/docs/changelog/101652.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101652 -summary: Fix race condition in `SnapshotsService` -area: Snapshot/Restore -type: bug -issues: [] diff --git a/docs/changelog/101713.yaml b/docs/changelog/101713.yaml deleted file mode 100644 index c3addf9296584..0000000000000 --- a/docs/changelog/101713.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101713 -summary: Disable `weight_matches` when kNN query is present -area: Highlighting -type: bug -issues: [] diff --git a/docs/changelog/101778.yaml b/docs/changelog/101778.yaml deleted file mode 100644 index 9ad2f0c9154c3..0000000000000 --- a/docs/changelog/101778.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 101778 -summary: Don't update system index mappings in mixed clusters -area: Infra/Core -type: bug -issues: - - 101331 - - 99778 diff --git a/docs/changelog/94607.yaml b/docs/changelog/94607.yaml deleted file mode 100644 index eea9264ce90f9..0000000000000 --- a/docs/changelog/94607.yaml +++ /dev/null @@ -1,18 +0,0 @@ -pr: 94607 -summary: Use `IndexWriter.flushNextBuffer()` to reclaim memory from indexing buffers -area: Engine -type: enhancement -issues: [] -highlight: - title: Use `IndexWriter.flushNextBuffer()` to reclaim memory from indexing buffers - body: |- - Rather than forcing a refresh to reclaim memory from indexing buffers, which flushes all - segments no matter how large, Elasticsearch now takes advantage of - `IndexWriter#flushNextBuffer` which only flushes the largest pending segment. This should smooth - out indexing allowing for larger segment sizes, fewer merges and higher throughput. - - Furthermore, the selection algorithm to pick which shard to reclaim memory from next was - changed, from picking the shard that uses the most RAM to going over shards in a round-robin - fashion. This approach has proved to work significantly better in practice. - - notable: true diff --git a/docs/changelog/97317.yaml b/docs/changelog/97317.yaml deleted file mode 100644 index 64fcd55e67e28..0000000000000 --- a/docs/changelog/97317.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 97317 -summary: "Fix merges of mappings with `subobjects: false` for composable index templates" -area: Mapping -type: bug -issues: - - 96768 diff --git a/docs/changelog/97397.yaml b/docs/changelog/97397.yaml deleted file mode 100644 index 5c1867d55f9bd..0000000000000 --- a/docs/changelog/97397.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 97397 -summary: Return a 410 (Gone) status code for unavailable API endpoints -area: Infra/REST API -type: enhancement -issues: [] diff --git a/docs/changelog/97409.yaml b/docs/changelog/97409.yaml deleted file mode 100644 index 8c05d6254f7cc..0000000000000 --- a/docs/changelog/97409.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 97409 -summary: Trim stored fields for `_id` field in tsdb -area: TSDB -type: enhancement -issues: [] diff --git a/docs/changelog/97450.yaml b/docs/changelog/97450.yaml deleted file mode 100644 index a057e0beefaca..0000000000000 --- a/docs/changelog/97450.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 97450 -summary: Make `_index` optional for pinned query docs -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/97642.yaml b/docs/changelog/97642.yaml deleted file mode 100644 index cf519e04e2d38..0000000000000 --- a/docs/changelog/97642.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 97642 -summary: fix fuzzy query rewrite parameter not work -area: Search -type: bug -issues: [] diff --git a/docs/changelog/97729.yaml b/docs/changelog/97729.yaml deleted file mode 100644 index f80a04bc58f68..0000000000000 --- a/docs/changelog/97729.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 97729 -summary: Allow parsing on non-string routing fields -area: Aggregations -type: bug -issues: [] diff --git a/docs/changelog/97972.yaml b/docs/changelog/97972.yaml deleted file mode 100644 index d4d55e33b4bb2..0000000000000 --- a/docs/changelog/97972.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 97972 -summary: Automatically flatten objects when subobjects:false -area: Mapping -type: enhancement -issues: - - 88934 diff --git a/docs/changelog/98038.yaml b/docs/changelog/98038.yaml deleted file mode 100644 index d99db24664f30..0000000000000 --- a/docs/changelog/98038.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 98038 -summary: Update enrich execution to only set index false on fields that support it -area: Ingest Node -type: bug -issues: - - 98019 diff --git a/docs/changelog/98061.yaml b/docs/changelog/98061.yaml deleted file mode 100644 index 3955b262017f0..0000000000000 --- a/docs/changelog/98061.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 98061 -summary: Fix possible NPE when getting transform stats for failed transforms -area: Transform -type: bug -issues: - - 98052 diff --git a/docs/changelog/98268.yaml b/docs/changelog/98268.yaml deleted file mode 100644 index ef6f98b8d016c..0000000000000 --- a/docs/changelog/98268.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98268 -summary: Dense vector field types are indexed by default -area: Vector Search -type: enhancement -issues: [] diff --git a/docs/changelog/98309.yaml b/docs/changelog/98309.yaml deleted file mode 100644 index 550f50b3569a1..0000000000000 --- a/docs/changelog/98309.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98309 -summary: "Integrate Elasticsearch Query Language, ES|QL" -area: Query Languages -type: feature -issues: [] diff --git a/docs/changelog/98332.yaml b/docs/changelog/98332.yaml deleted file mode 100644 index 6446707515b3c..0000000000000 --- a/docs/changelog/98332.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 98332 -summary: Correct behaviour of `ContentPath::remove()` -area: Mapping -type: bug -issues: - - 98327 diff --git a/docs/changelog/98337.yaml b/docs/changelog/98337.yaml deleted file mode 100644 index 8664ae15eed00..0000000000000 --- a/docs/changelog/98337.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98337 -summary: TopN sorting with min and max for multi-value fields -area: ES|QL -type: feature -issues: [] diff --git a/docs/changelog/98360.yaml b/docs/changelog/98360.yaml deleted file mode 100644 index b6b8696259c98..0000000000000 --- a/docs/changelog/98360.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 98360 -summary: Use a competitive iterator in `FiltersAggregator` -area: Aggregations -type: enhancement -issues: - - 97544 diff --git a/docs/changelog/98406.yaml b/docs/changelog/98406.yaml deleted file mode 100644 index f62af64171944..0000000000000 --- a/docs/changelog/98406.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98406 -summary: Safely drain deployment request queues before allowing node to shutdown -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/98457.yaml b/docs/changelog/98457.yaml deleted file mode 100644 index 465c9ed30cc5b..0000000000000 --- a/docs/changelog/98457.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98457 -summary: Support cluster/details for CCS minimize_roundtrips=false -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/98470.yaml b/docs/changelog/98470.yaml deleted file mode 100644 index 498b1db244d22..0000000000000 --- a/docs/changelog/98470.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98470 -summary: Reduce verbosity of the bulk indexing audit log -area: Audit -type: enhancement -issues: [] diff --git a/docs/changelog/98512.yaml b/docs/changelog/98512.yaml deleted file mode 100644 index c2108a18c6b91..0000000000000 --- a/docs/changelog/98512.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 98512 -summary: Automatically map float arrays of lengths 128 - 2048 as dense_vector -area: Application -type: feature -issues: - - 97532 diff --git a/docs/changelog/98518.yaml b/docs/changelog/98518.yaml deleted file mode 100644 index 2f961fc11ce69..0000000000000 --- a/docs/changelog/98518.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 98518 -summary: Add `index.look_back_time` setting for tsdb data streams -area: TSDB -type: enhancement -issues: - - 98463 diff --git a/docs/changelog/98528.yaml b/docs/changelog/98528.yaml deleted file mode 100644 index 0004499e58f83..0000000000000 --- a/docs/changelog/98528.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 98528 -summary: "ESQL: Add support for TEXT fields in comparison operators and SORT" -area: ES|QL -type: enhancement -issues: - - 98642 diff --git a/docs/changelog/98550.yaml b/docs/changelog/98550.yaml deleted file mode 100644 index 30c9891b15182..0000000000000 --- a/docs/changelog/98550.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98550 -summary: Report a node's "roles" setting in the /_cluster/allocation/explain response -area: Allocation -type: enhancement -issues: [97859] diff --git a/docs/changelog/98574.yaml b/docs/changelog/98574.yaml deleted file mode 100644 index bf016b4c241c8..0000000000000 --- a/docs/changelog/98574.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 98574 -summary: Specify correct current `IndexVersion` after 8.10 release -area: Infra/Core -type: bug -issues: - - 98555 diff --git a/docs/changelog/98590.yaml b/docs/changelog/98590.yaml deleted file mode 100644 index f3ef3cdd56a12..0000000000000 --- a/docs/changelog/98590.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98590 -summary: "ESQL: LTRIM, RTRIM and fix unicode whitespace" -area: ES|QL -type: feature -issues: [] diff --git a/docs/changelog/98622.yaml b/docs/changelog/98622.yaml deleted file mode 100644 index 8c41444b6c725..0000000000000 --- a/docs/changelog/98622.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 98622 -summary: Add 'dataset' size to cat indices and cat shards -area: CAT APIs -type: enhancement -issues: - - 95092 diff --git a/docs/changelog/98628.yaml b/docs/changelog/98628.yaml deleted file mode 100644 index 2ecd9dd23e0ef..0000000000000 --- a/docs/changelog/98628.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98628 -summary: Add ESQL own flavor of arithmetic operators -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/98630.yaml b/docs/changelog/98630.yaml deleted file mode 100644 index 444c593f87d0b..0000000000000 --- a/docs/changelog/98630.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98630 -summary: "ESQL: LEAST and GREATEST functions" -area: ES|QL -type: feature -issues: [] diff --git a/docs/changelog/98635.yaml b/docs/changelog/98635.yaml deleted file mode 100644 index 274096951fcf6..0000000000000 --- a/docs/changelog/98635.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98635 -summary: Fix NPE in `StableMasterHealthIndicatorService` -area: Health -type: bug -issues: [] diff --git a/docs/changelog/98653.yaml b/docs/changelog/98653.yaml deleted file mode 100644 index 384a29c3cc4ab..0000000000000 --- a/docs/changelog/98653.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 98653 -summary: Reset `GatewayService` flags before reroute -area: Cluster Coordination -type: bug -issues: - - 98606 diff --git a/docs/changelog/98654.yaml b/docs/changelog/98654.yaml deleted file mode 100644 index ea63edb93eb58..0000000000000 --- a/docs/changelog/98654.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98654 -summary: Allow native users/roles to be disabled via setting -area: Authentication -type: enhancement -issues: [] diff --git a/docs/changelog/98684.yaml b/docs/changelog/98684.yaml deleted file mode 100644 index 552e85a04151a..0000000000000 --- a/docs/changelog/98684.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 98684 -summary: Explicit parsing object capabilities of `FieldMappers` -area: Mapping -type: enhancement -issues: - - 98537 diff --git a/docs/changelog/98711.yaml b/docs/changelog/98711.yaml deleted file mode 100644 index 43e0c2a03e8fa..0000000000000 --- a/docs/changelog/98711.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98711 -summary: Support unsigned long in sqrt and log10 for ESQL -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/98759.yaml b/docs/changelog/98759.yaml deleted file mode 100644 index df6180bddc192..0000000000000 --- a/docs/changelog/98759.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 98759 -summary: "ESQL: Support queries that don't return underlying fields" -area: ES|QL -type: bug -issues: - - 98404 diff --git a/docs/changelog/98809.yaml b/docs/changelog/98809.yaml deleted file mode 100644 index f9f5be523e179..0000000000000 --- a/docs/changelog/98809.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 98809 -summary: Avoiding the use of nodes that are no longer in the cluster when computing - master stability -area: Health -type: enhancement -issues: - - 98636 diff --git a/docs/changelog/98811.yaml b/docs/changelog/98811.yaml deleted file mode 100644 index 338efbcf1d8c9..0000000000000 --- a/docs/changelog/98811.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98811 -summary: Allow explain data stream lifecycle to accept a data stream -area: Data streams -type: enhancement -issues: [] diff --git a/docs/changelog/98824.yaml b/docs/changelog/98824.yaml deleted file mode 100644 index 7e2c43d266232..0000000000000 --- a/docs/changelog/98824.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 98824 -summary: Consider node shutdown in `DataTierAllocationDecider` -area: "Allocation" -type: bug -issues: - - 97207 diff --git a/docs/changelog/98840.yaml b/docs/changelog/98840.yaml deleted file mode 100644 index bb358916354dc..0000000000000 --- a/docs/changelog/98840.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 98840 -summary: Don't ignore empty index template that have no template definition -area: TSDB -type: bug -issues: - - 98834 diff --git a/docs/changelog/98843.yaml b/docs/changelog/98843.yaml deleted file mode 100644 index 742ae25697718..0000000000000 --- a/docs/changelog/98843.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98843 -summary: Fix UnsignedLong field range query gt "0" can get the result equal to 0 -area: Search -type: bug -issues: [] diff --git a/docs/changelog/98844.yaml b/docs/changelog/98844.yaml deleted file mode 100644 index a5870e7344d15..0000000000000 --- a/docs/changelog/98844.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98844 -summary: Add accessors required to recreate `TransformStats` object from the fields -area: Transform -type: enhancement -issues: [] diff --git a/docs/changelog/98847.yaml b/docs/changelog/98847.yaml deleted file mode 100644 index ab7455bd783c3..0000000000000 --- a/docs/changelog/98847.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98847 -summary: "ESQL: Add `CEIL` function" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/98870.yaml b/docs/changelog/98870.yaml deleted file mode 100644 index b719fbb0caf22..0000000000000 --- a/docs/changelog/98870.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 98870 -summary: "ESQL: Add ability to perform date math" -area: ES|QL -type: enhancement -issues: - - 98402 diff --git a/docs/changelog/98878.yaml b/docs/changelog/98878.yaml deleted file mode 100644 index 4fa8b23851bf9..0000000000000 --- a/docs/changelog/98878.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98878 -summary: Fix percolator query for stored queries that expand on wildcard field names -area: Percolator -type: bug -issues: [] diff --git a/docs/changelog/98888.yaml b/docs/changelog/98888.yaml deleted file mode 100644 index 1f2f7ea27ff19..0000000000000 --- a/docs/changelog/98888.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98888 -summary: Revert "Kibana system index does not allow user templates to affect it" -area: Infra/Core -type: bug -issues: [] diff --git a/docs/changelog/98915.yaml b/docs/changelog/98915.yaml deleted file mode 100644 index c23ddcc55d98e..0000000000000 --- a/docs/changelog/98915.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98915 -summary: Avoid risk of OOM in datafeeds when memory is constrained -area: Machine Learning -type: bug -issues: [89769] diff --git a/docs/changelog/98930.yaml b/docs/changelog/98930.yaml deleted file mode 100644 index e6a2c74192ebe..0000000000000 --- a/docs/changelog/98930.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98930 -summary: Frozen index input clone copy cache file -area: Snapshot/Restore -type: bug -issues: [] diff --git a/docs/changelog/98942.yaml b/docs/changelog/98942.yaml deleted file mode 100644 index 4d8eeee5192e5..0000000000000 --- a/docs/changelog/98942.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98942 -summary: "ESQL: LEFT function" -area: ES|QL -type: feature -issues: [] diff --git a/docs/changelog/98972.yaml b/docs/changelog/98972.yaml deleted file mode 100644 index acd336ff7d666..0000000000000 --- a/docs/changelog/98972.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 98972 -summary: "ES|QL: Implement serialization of `InvalidMappedField`" -area: ES|QL -type: bug -issues: - - 98851 diff --git a/docs/changelog/98974.yaml b/docs/changelog/98974.yaml deleted file mode 100644 index 90950986141ab..0000000000000 --- a/docs/changelog/98974.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98974 -summary: "ESQL: RIGHT function" -area: ES|QL -type: feature -issues: [] diff --git a/docs/changelog/98996.yaml b/docs/changelog/98996.yaml deleted file mode 100644 index 1f1bdd35ff643..0000000000000 --- a/docs/changelog/98996.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98996 -summary: Reintroduce `sparse_vector` mapping -area: Mapping -type: enhancement -issues: [] diff --git a/docs/changelog/99054.yaml b/docs/changelog/99054.yaml deleted file mode 100644 index a9e4128e7ae97..0000000000000 --- a/docs/changelog/99054.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99054 -summary: "ESQL: Mark counter fields as unsupported" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/99058.yaml b/docs/changelog/99058.yaml deleted file mode 100644 index a112834add071..0000000000000 --- a/docs/changelog/99058.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99058 -summary: "ESQL: log query and execution time" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/99091.yaml b/docs/changelog/99091.yaml deleted file mode 100644 index 2c7be19b161ba..0000000000000 --- a/docs/changelog/99091.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99091 -summary: Add flamegraph API -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/99106.yaml b/docs/changelog/99106.yaml deleted file mode 100644 index 21cb121595d2b..0000000000000 --- a/docs/changelog/99106.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99106 -summary: "Add support for Persian language stemmer" -area: Analysis -type: feature -issues: - - 98911 diff --git a/docs/changelog/99107.yaml b/docs/changelog/99107.yaml deleted file mode 100644 index a808fb57fcf80..0000000000000 --- a/docs/changelog/99107.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99107 -summary: Wait to gracefully stop deployments until alternative allocation exists -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/99117.yaml b/docs/changelog/99117.yaml deleted file mode 100644 index 491692f232081..0000000000000 --- a/docs/changelog/99117.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99117 -summary: Do not report failure after connections are made -area: Network -type: bug -issues: [] diff --git a/docs/changelog/99163.yaml b/docs/changelog/99163.yaml deleted file mode 100644 index f7a44c7f24869..0000000000000 --- a/docs/changelog/99163.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99163 -summary: Use `NamedWritable` to enable `GeoBoundingBox` serialisation -area: Geo -type: bug -issues: - - 99089 diff --git a/docs/changelog/99188.yaml b/docs/changelog/99188.yaml deleted file mode 100644 index c22e3ba4b36e5..0000000000000 --- a/docs/changelog/99188.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99188 -summary: "ESQL: skip synthetic attributes when planning the physical fragment" -area: ES|QL -type: bug -issues: - - 99170 diff --git a/docs/changelog/99193.yaml b/docs/changelog/99193.yaml deleted file mode 100644 index 9db646dc80435..0000000000000 --- a/docs/changelog/99193.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99193 -summary: Wait for cluster state in recovery -area: Recovery -type: enhancement -issues: [] diff --git a/docs/changelog/99215.yaml b/docs/changelog/99215.yaml deleted file mode 100644 index 99227839b491e..0000000000000 --- a/docs/changelog/99215.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99215 -summary: Skip `DisiPriorityQueue` on single filter agg -area: Aggregations -type: enhancement -issues: - - 99202 diff --git a/docs/changelog/99219.yaml b/docs/changelog/99219.yaml deleted file mode 100644 index 811e2df5f83d0..0000000000000 --- a/docs/changelog/99219.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99219 -summary: Reduce copying when creating scroll/PIT ids -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/99222.yaml b/docs/changelog/99222.yaml deleted file mode 100644 index 025c5e01d2a53..0000000000000 --- a/docs/changelog/99222.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99222 -summary: Fork response-sending in `OpenPointInTimeAction` -area: Search -type: bug -issues: [] diff --git a/docs/changelog/99223.yaml b/docs/changelog/99223.yaml deleted file mode 100644 index 914441931033b..0000000000000 --- a/docs/changelog/99223.yaml +++ /dev/null @@ -1,11 +0,0 @@ -pr: 99223 -summary: Remove `transport_versions` from cluster state API -area: Infra/Core -type: breaking -issues: [] -breaking: - title: Remove `transport_versions` from cluster state API - area: REST API - details: The `transport_versions` subobject of the response to `GET _cluster/state` has been replaced by the `nodes_versions` subobject. - impact: If needed, retrieve the per-node transport versions from the `nodes_versions` subobject. - notable: false diff --git a/docs/changelog/99224.yaml b/docs/changelog/99224.yaml deleted file mode 100644 index cde4084ab0e84..0000000000000 --- a/docs/changelog/99224.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99224 -summary: Add new _inference API -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/99278.yaml b/docs/changelog/99278.yaml deleted file mode 100644 index f2788a00e6369..0000000000000 --- a/docs/changelog/99278.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99278 -summary: Support rotatating the JWT shared secret -area: Security -type: enhancement -issues: [] diff --git a/docs/changelog/99286.yaml b/docs/changelog/99286.yaml deleted file mode 100644 index 1b37416d51ba6..0000000000000 --- a/docs/changelog/99286.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99286 -summary: "ESQL: Log execution time consistently" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/99300.yaml b/docs/changelog/99300.yaml deleted file mode 100644 index 508001b98f29e..0000000000000 --- a/docs/changelog/99300.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99300 -summary: Change `GetFromTranslog` to indices action -area: CRUD -type: bug -issues: [] diff --git a/docs/changelog/99303.yaml b/docs/changelog/99303.yaml deleted file mode 100644 index 479c3a3e280c7..0000000000000 --- a/docs/changelog/99303.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99303 -summary: Use DEBUG log level to report ESQL execution steps -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/99310.yaml b/docs/changelog/99310.yaml deleted file mode 100644 index 8b595fe93fd33..0000000000000 --- a/docs/changelog/99310.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99310 -summary: "ESQL: \"params\" correctly parses the values including an optional \"type\"" -area: ES|QL -type: bug -issues: - - 99294 diff --git a/docs/changelog/99316.yaml b/docs/changelog/99316.yaml deleted file mode 100644 index 78857b433b385..0000000000000 --- a/docs/changelog/99316.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99316 -summary: "ESQL: Compact topn" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/99346.yaml b/docs/changelog/99346.yaml deleted file mode 100644 index fc6fe02e6bf14..0000000000000 --- a/docs/changelog/99346.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99346 -summary: Automatically disable `ignore_malformed` on datastream `@timestamp` fields -area: Mapping -type: bug -issues: [] diff --git a/docs/changelog/99382.yaml b/docs/changelog/99382.yaml deleted file mode 100644 index 5f5eb932ed458..0000000000000 --- a/docs/changelog/99382.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99382 -summary: "ESQL: create a Vector when needed for IN" -area: ES|QL -type: bug -issues: - - 99347 diff --git a/docs/changelog/99417.yaml b/docs/changelog/99417.yaml deleted file mode 100644 index 8c88a5a548dff..0000000000000 --- a/docs/changelog/99417.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99417 -summary: Disable `FilterByFilterAggregator` through `ClusterSettings` -area: Aggregations -type: enhancement -issues: - - 99335 diff --git a/docs/changelog/99432.yaml b/docs/changelog/99432.yaml deleted file mode 100644 index df4c5a7f78199..0000000000000 --- a/docs/changelog/99432.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99432 -summary: "ESQL: Enable arithmetics for durations and periods" -area: ES|QL -type: enhancement -issues: [99293] diff --git a/docs/changelog/99470.yaml b/docs/changelog/99470.yaml deleted file mode 100644 index 3e784595cc6ac..0000000000000 --- a/docs/changelog/99470.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99470 -summary: "ESQL: Improve log messages" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/99474.yaml b/docs/changelog/99474.yaml deleted file mode 100644 index ea23481069833..0000000000000 --- a/docs/changelog/99474.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99474 -summary: Add `java.net.NetPermission` to APM module's permissions -area: Infra/Core -type: bug -issues: [] diff --git a/docs/changelog/99515.yaml b/docs/changelog/99515.yaml deleted file mode 100644 index 7de237531a506..0000000000000 --- a/docs/changelog/99515.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99515 -summary: Add `IndexVersion` to node info -area: Infra/REST API -type: enhancement -issues: [] diff --git a/docs/changelog/99527.yaml b/docs/changelog/99527.yaml deleted file mode 100644 index 19eef621fa500..0000000000000 --- a/docs/changelog/99527.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99445 -summary: Add new max_inner_product vector similarity function -area: Vector Search -type: enhancement -issues: [] diff --git a/docs/changelog/99532.yaml b/docs/changelog/99532.yaml deleted file mode 100644 index 859ba963600a8..0000000000000 --- a/docs/changelog/99532.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99532 -summary: Adds `nested` support for indexed `dense_vector` fields -area: Vector Search -type: enhancement -issues: [] diff --git a/docs/changelog/99555.yaml b/docs/changelog/99555.yaml deleted file mode 100644 index 5e53e8782e08c..0000000000000 --- a/docs/changelog/99555.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99555 -summary: Use mappings version to retrieve system index mappings at creation time -area: Infra/Core -type: enhancement -issues: [] diff --git a/docs/changelog/99566.yaml b/docs/changelog/99566.yaml deleted file mode 100644 index caad871bf58ed..0000000000000 --- a/docs/changelog/99566.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99566 -summary: Add additional counters to `_clusters` response for all Cluster search states -area: Search -type: enhancement -issues: - - 98927 diff --git a/docs/changelog/99567.yaml b/docs/changelog/99567.yaml deleted file mode 100644 index aea65e55b6ee2..0000000000000 --- a/docs/changelog/99567.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99567 -summary: Make tsdb settings public in Serverless -area: TSDB -type: bug -issues: - - 99563 diff --git a/docs/changelog/99584.yaml b/docs/changelog/99584.yaml deleted file mode 100644 index 229e3d8024506..0000000000000 --- a/docs/changelog/99584.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99584 -summary: Adding an option for trained models to be platform specific -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/99588.yaml b/docs/changelog/99588.yaml deleted file mode 100644 index 7cbb53376fdf0..0000000000000 --- a/docs/changelog/99588.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99588 -summary: Make ESQL more resilient to non-indexed fields -area: ES|QL -type: bug -issues: - - 99506 diff --git a/docs/changelog/99601.yaml b/docs/changelog/99601.yaml deleted file mode 100644 index 9deba859a5cef..0000000000000 --- a/docs/changelog/99601.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99601 -summary: "ESQL: continue resolving attributes for Eval" -area: ES|QL -type: bug -issues: - - 99576 diff --git a/docs/changelog/99627.yaml b/docs/changelog/99627.yaml deleted file mode 100644 index 84abdf6418dc2..0000000000000 --- a/docs/changelog/99627.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99627 -summary: Fix thread context in `getRepositoryData` -area: Snapshot/Restore -type: bug -issues: [] diff --git a/docs/changelog/99631.yaml b/docs/changelog/99631.yaml deleted file mode 100644 index d9174de76f1ea..0000000000000 --- a/docs/changelog/99631.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99631 -summary: Add component info versions to node info in a pluggable way -area: Infra/REST API -type: enhancement -issues: [] diff --git a/docs/changelog/99641.yaml b/docs/changelog/99641.yaml deleted file mode 100644 index c74f7380bd93a..0000000000000 --- a/docs/changelog/99641.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99641 -summary: Chunk the cluster allocation explain response -area: Network -type: enhancement -issues: [97803] diff --git a/docs/changelog/99644.yaml b/docs/changelog/99644.yaml deleted file mode 100644 index 10c10448c074c..0000000000000 --- a/docs/changelog/99644.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99644 -summary: Add links to docs from failing bootstrap checks -area: Infra/Node Lifecycle -type: enhancement -issues: [99614] - diff --git a/docs/changelog/99655.yaml b/docs/changelog/99655.yaml deleted file mode 100644 index 3d1e76ec47aa3..0000000000000 --- a/docs/changelog/99655.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99655 -summary: "[Profiling] Allow to wait until resources created" -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/99682.yaml b/docs/changelog/99682.yaml deleted file mode 100644 index 48e99a5145674..0000000000000 --- a/docs/changelog/99682.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99682 -summary: Increase the max vector dims to 4096 -area: Vector Search -type: enhancement -issues: [] diff --git a/docs/changelog/99685.yaml b/docs/changelog/99685.yaml deleted file mode 100644 index 43dac2abbb312..0000000000000 --- a/docs/changelog/99685.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99685 -summary: Fix `advanceExact` for doc values from sources -area: Search -type: bug -issues: [] diff --git a/docs/changelog/99694.yaml b/docs/changelog/99694.yaml deleted file mode 100644 index a449ecb2ae378..0000000000000 --- a/docs/changelog/99694.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99694 -summary: Remove shard data files when they fail to write for snapshot -area: Snapshot/Restore -type: enhancement -issues: [] diff --git a/docs/changelog/99695.yaml b/docs/changelog/99695.yaml deleted file mode 100644 index 6dc4037a57763..0000000000000 --- a/docs/changelog/99695.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99695 -summary: "ESQL: Better management of not stored TEXT fiels with synthetic source" -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/99711.yaml b/docs/changelog/99711.yaml deleted file mode 100644 index 34731a52818f0..0000000000000 --- a/docs/changelog/99711.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99711 -summary: "ESQL: Date math for negatives" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/99712.yaml b/docs/changelog/99712.yaml deleted file mode 100644 index c5fa1ac1e64ec..0000000000000 --- a/docs/changelog/99712.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99712 -summary: Make downsample target index replicas configurable -area: Downsampling -type: bug -issues: [] diff --git a/docs/changelog/99717.yaml b/docs/changelog/99717.yaml deleted file mode 100644 index db48c69ed68a2..0000000000000 --- a/docs/changelog/99717.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99717 -summary: Treating watcher webhook response header names as case-insensitive -area: Watcher -type: bug -issues: [] diff --git a/docs/changelog/99726.yaml b/docs/changelog/99726.yaml deleted file mode 100644 index 23350fdb85bd0..0000000000000 --- a/docs/changelog/99726.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99726 -summary: "ESQL: Account for an exception being thrown when building a `BytesRefArrayBlock`" -area: ES|QL -type: bug -issues: - - 99472 diff --git a/docs/changelog/99736.yaml b/docs/changelog/99736.yaml deleted file mode 100644 index fbf177ea152a8..0000000000000 --- a/docs/changelog/99736.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99736 -summary: "ESQL: enhance SHOW FUNCTIONS command" -area: ES|QL -type: enhancement -issues: - - 99507 diff --git a/docs/changelog/99746.yaml b/docs/changelog/99746.yaml deleted file mode 100644 index c4cdbc00f82c1..0000000000000 --- a/docs/changelog/99746.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99746 -summary: "ESQL: Log start and end of queries" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/99775.yaml b/docs/changelog/99775.yaml deleted file mode 100644 index 0c0dbdb1fce87..0000000000000 --- a/docs/changelog/99775.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99775 -summary: Adding support for exist queries to `sparse_vector` fields -area: Search -type: enhancement -issues: - - 99319 diff --git a/docs/changelog/99796.yaml b/docs/changelog/99796.yaml deleted file mode 100644 index cad10564ed294..0000000000000 --- a/docs/changelog/99796.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99796 -summary: Support runtime fields in synthetic source -area: Aggregations -type: bug -issues: - - 98287 diff --git a/docs/changelog/99797.yaml b/docs/changelog/99797.yaml deleted file mode 100644 index e46d4501291b5..0000000000000 --- a/docs/changelog/99797.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99797 -summary: Wait for cluster to recover before resolving index template -area: CRUD -type: bug -issues: [] diff --git a/docs/changelog/99798.yaml b/docs/changelog/99798.yaml deleted file mode 100644 index bd8b9da71541d..0000000000000 --- a/docs/changelog/99798.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 99798 -summary: Chunk `SingleNodeShutdownStatus` and `ShutdownShardMigrationStatus` (and - related action) response -area: Infra/Node Lifecycle -type: enhancement -issues: - - 99678 diff --git a/docs/changelog/99804.yaml b/docs/changelog/99804.yaml deleted file mode 100644 index b4c226217e352..0000000000000 --- a/docs/changelog/99804.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99804 -summary: Correctly handle `ScriptScoreQuery` in plain highlighter -area: Highlighting -type: bug -issues: - - 99700 diff --git a/docs/changelog/99816.yaml b/docs/changelog/99816.yaml deleted file mode 100644 index 4caf8a36f54b4..0000000000000 --- a/docs/changelog/99816.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99816 -summary: "ESQL: Lower the implicit limit, if none is user-provided" -area: ES|QL -type: enhancement -issues: - - 99458 diff --git a/docs/changelog/99827.yaml b/docs/changelog/99827.yaml deleted file mode 100644 index 3e6690a8e9e68..0000000000000 --- a/docs/changelog/99827.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99827 -summary: "ESQL: Fix NPE when aggregating literals" -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/99832.yaml b/docs/changelog/99832.yaml deleted file mode 100644 index 9bd83591ba920..0000000000000 --- a/docs/changelog/99832.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99832 -summary: APM Metering API -area: Infra/Core -type: enhancement -issues: [] diff --git a/docs/changelog/99873.yaml b/docs/changelog/99873.yaml deleted file mode 100644 index d726ba00a1558..0000000000000 --- a/docs/changelog/99873.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99873 -summary: "[Profiling] Tighten resource creation check" -area: Application -type: bug -issues: [] diff --git a/docs/changelog/99874.yaml b/docs/changelog/99874.yaml deleted file mode 100644 index d23fc1ea6edde..0000000000000 --- a/docs/changelog/99874.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99874 -summary: "ESQL: Use exact attributes for data source extraction" -area: ES|QL -type: bug -issues: - - 99183 diff --git a/docs/changelog/99909.yaml b/docs/changelog/99909.yaml deleted file mode 100644 index 2051a30e4efa1..0000000000000 --- a/docs/changelog/99909.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99909 -summary: "[Profiling] Allow to customize the ILM policy" -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/99912.yaml b/docs/changelog/99912.yaml deleted file mode 100644 index 06f0f9baa9661..0000000000000 --- a/docs/changelog/99912.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99912 -summary: Represent histogram value count as long -area: Aggregations -type: enhancement -issues: - - 99820 diff --git a/docs/changelog/99938.yaml b/docs/changelog/99938.yaml deleted file mode 100644 index 4349b73516cae..0000000000000 --- a/docs/changelog/99938.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99938 -summary: "Prune unnecessary information from TransportNodesInfoAction.NodeInfoRequest" -area: Stats -type: enhancement -issues: [99744] diff --git a/docs/changelog/99947.yaml b/docs/changelog/99947.yaml deleted file mode 100644 index 61996c8fde92b..0000000000000 --- a/docs/changelog/99947.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99947 -summary: GET `_data_stream` displays both ILM and DSL information -area: Data streams -type: feature -issues: [] diff --git a/docs/changelog/99956.yaml b/docs/changelog/99956.yaml deleted file mode 100644 index 04646a98898a3..0000000000000 --- a/docs/changelog/99956.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99956 -summary: "ESQL: Serialize the source in expressions" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/99995.yaml b/docs/changelog/99995.yaml deleted file mode 100644 index d67cbdaec1f37..0000000000000 --- a/docs/changelog/99995.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99995 -summary: When a primary is inactive but this is considered expected, the same applies for the replica of this shard. -area: Health -type: enhancement -issues: - - 99951 From 2b4ba7a744f0e025cc608966a7d5f5269d0dfc13 Mon Sep 17 00:00:00 2001 From: Abdon Pijpelink Date: Tue, 7 Nov 2023 17:24:59 +0100 Subject: [PATCH 20/30] [DOCS] Small ES|QL improvements (#101877) * [DOCS] Small ES|QL improvements * Fix test failure --- docs/reference/esql/esql-examples.asciidoc | 14 +++++++------- docs/reference/esql/functions/case.asciidoc | 2 +- docs/reference/esql/functions/date_parse.asciidoc | 2 +- docs/reference/esql/functions/date_trunc.asciidoc | 2 +- .../esql/processing-commands/dissect.asciidoc | 2 +- .../testFixtures/src/main/resources/docs.csv-spec | 2 +- .../testFixtures/src/main/resources/ints.csv-spec | 2 +- .../testFixtures/src/main/resources/null.csv-spec | 4 ++-- 8 files changed, 15 insertions(+), 15 deletions(-) diff --git a/docs/reference/esql/esql-examples.asciidoc b/docs/reference/esql/esql-examples.asciidoc index 569dcf1172b38..817ec4f7b6f24 100644 --- a/docs/reference/esql/esql-examples.asciidoc +++ b/docs/reference/esql/esql-examples.asciidoc @@ -13,11 +13,11 @@ ---- FROM logs-* | WHERE event.code IS NOT NULL -| STATS event_code_count = count(event.code) by event.code,host.name -| ENRICH win_events on event.code with event_description +| STATS event_code_count = COUNT(event.code) BY event.code,host.name +| ENRICH win_events ON event.code WITH event_description | WHERE event_description IS NOT NULL and host.name IS NOT NULL -| RENAME event_description as event.description -| SORT event_code_count desc +| RENAME event_description AS event.description +| SORT event_code_count DESC | KEEP event_code_count,event.code,host.name,event.description ---- @@ -40,7 +40,7 @@ FROM logs-endpoint | WHERE process.name == "curl.exe" | STATS bytes = SUM(destination.bytes) BY destination.address | EVAL kb = bytes/1024 -| SORT kb desc +| SORT kb DESC | LIMIT 10 | KEEP kb,destination.address ---- @@ -60,7 +60,7 @@ FROM logs-endpoint ---- FROM logs-* | GROK dns.question.name "%{DATA}\\.%{GREEDYDATA:dns.question.registered_domain:string}" -| STATS unique_queries = count_distinct(dns.question.name) by dns.question.registered_domain, process.name +| STATS unique_queries = COUNT_DISTINCT(dns.question.name) BY dns.question.registered_domain, process.name | WHERE unique_queries > 10 | SORT unique_queries DESC | RENAME unique_queries AS `Unique Queries`, dns.question.registered_domain AS `Registered Domain`, process.name AS `Process` @@ -85,7 +85,7 @@ FROM logs-* | ENRICH ldap_lookup_new ON user.name | WHERE group.name IS NOT NULL | EVAL follow_up = CASE(destcount >= 100, "true","false") -| SORT destcount desc +| SORT destcount DESC | KEEP destcount, host.name, user.name, group.name, follow_up ---- diff --git a/docs/reference/esql/functions/case.asciidoc b/docs/reference/esql/functions/case.asciidoc index b243adf875cb4..73cefba12dfa5 100644 --- a/docs/reference/esql/functions/case.asciidoc +++ b/docs/reference/esql/functions/case.asciidoc @@ -4,7 +4,7 @@ *Syntax* -[source,txt] +[source,esql] ---- CASE(condition1, value1[, ..., conditionN, valueN][, default_value]) ---- diff --git a/docs/reference/esql/functions/date_parse.asciidoc b/docs/reference/esql/functions/date_parse.asciidoc index c74656ff1dbd7..9580ae238b663 100644 --- a/docs/reference/esql/functions/date_parse.asciidoc +++ b/docs/reference/esql/functions/date_parse.asciidoc @@ -4,7 +4,7 @@ *Syntax* -[source,txt] +[source,esql] ---- DATE_PARSE([format,] date_string) ---- diff --git a/docs/reference/esql/functions/date_trunc.asciidoc b/docs/reference/esql/functions/date_trunc.asciidoc index cacfefe73d0fd..ad0e1eb1170b4 100644 --- a/docs/reference/esql/functions/date_trunc.asciidoc +++ b/docs/reference/esql/functions/date_trunc.asciidoc @@ -8,6 +8,6 @@ Rounds down a date to the closest interval. Intervals can be expressed using the ---- FROM employees | EVAL year_hired = DATE_TRUNC(1 year, hire_date) -| STATS count(emp_no) BY year_hired +| STATS COUNT(emp_no) BY year_hired | SORT year_hired ---- diff --git a/docs/reference/esql/processing-commands/dissect.asciidoc b/docs/reference/esql/processing-commands/dissect.asciidoc index eb7ab80d6174d..c48b72af0de7e 100644 --- a/docs/reference/esql/processing-commands/dissect.asciidoc +++ b/docs/reference/esql/processing-commands/dissect.asciidoc @@ -6,7 +6,7 @@ [source,esql] ---- -DISSECT input "pattern" [ APPEND_SEPARATOR=""] +DISSECT input "pattern" [APPEND_SEPARATOR=""] ---- *Parameters* diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec index dbf76033fbe79..f2052462f4d8b 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec @@ -431,7 +431,7 @@ Hello Universe docsCase // tag::case[] FROM employees -| EVAL type = case( +| EVAL type = CASE( languages <= 1, "monolingual", languages <= 2, "bilingual", "polyglot") diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec index cdc25587793cc..9485bf800dd18 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec @@ -362,7 +362,7 @@ autoBucket // tag::auto_bucket[] FROM employees | WHERE hire_date >= "1985-01-01T00:00:00Z" AND hire_date < "1986-01-01T00:00:00Z" -| EVAL bs = auto_bucket(salary, 20, 25324, 74999) +| EVAL bs = AUTO_BUCKET(salary, 20, 25324, 74999) | SORT hire_date, salary | KEEP hire_date, salary, bs // end::auto_bucket[] diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/null.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/null.csv-spec index 3aa2746266da6..0d7fed9028fe4 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/null.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/null.csv-spec @@ -46,12 +46,12 @@ isNotNullForDocs // tag::is-not-null[] FROM employees | WHERE is_rehired IS NOT NULL -| STATS count(emp_no) +| STATS COUNT(emp_no) // end::is-not-null[] ; // tag::is-not-null-result[] -count(emp_no):long +COUNT(emp_no):long 84 // end::is-not-null-result[] ; From 34d03912bda2304729dca990ecedcb0df8537e5a Mon Sep 17 00:00:00 2001 From: Abdon Pijpelink Date: Tue, 7 Nov 2023 18:08:34 +0100 Subject: [PATCH 21/30] [DOCS] Add CCS to ES|QL limitations (#101867) --- docs/reference/esql/esql-limitations.asciidoc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/docs/reference/esql/esql-limitations.asciidoc b/docs/reference/esql/esql-limitations.asciidoc index 85f107feeb8fd..96103fc135271 100644 --- a/docs/reference/esql/esql-limitations.asciidoc +++ b/docs/reference/esql/esql-limitations.asciidoc @@ -100,6 +100,12 @@ you query, and query `keyword` sub-fields instead of `text` fields. {esql} does not support querying time series data streams (TSDS). +[discrete] +[[esql-limitations-ccs]] +=== {ccs-cap} is not supported + +{esql} does not support {ccs}. + [discrete] [[esql-limitations-date-math]] === Date math limitations From 4a2ed90f9251e54341b05a5b751ec4f2e94373d3 Mon Sep 17 00:00:00 2001 From: Abdon Pijpelink Date: Tue, 7 Nov 2023 18:08:54 +0100 Subject: [PATCH 22/30] [DOCS] Explain ES|QL CASE behavior with even no. arguments (#101829) --- docs/reference/esql/functions/case.asciidoc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/reference/esql/functions/case.asciidoc b/docs/reference/esql/functions/case.asciidoc index 73cefba12dfa5..84ff083147cb9 100644 --- a/docs/reference/esql/functions/case.asciidoc +++ b/docs/reference/esql/functions/case.asciidoc @@ -27,7 +27,8 @@ Accepts pairs of conditions and values. The function returns the value that belongs to the first condition that evaluates to `true`. If the number of arguments is odd, the last argument is the default value which -is returned when no condition matches. +is returned when no condition matches. If the number of arguments is even, and +no condition matches, the function returns `null`. *Example* From 65ed6c0b807b8fa40f86badad7df2ec00bca2d7b Mon Sep 17 00:00:00 2001 From: Nikolaj Volgushev Date: Tue, 7 Nov 2023 18:13:24 +0100 Subject: [PATCH 23/30] Explicitly set last load cache setting in test (#101879) Explicitly set setting in test to avoid future issues with Serverless env where the setting defaults to true. --- .../authc/jwt/JwtWithUnavailableSecurityIndexRestIT.java | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/x-pack/plugin/security/qa/jwt-realm/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtWithUnavailableSecurityIndexRestIT.java b/x-pack/plugin/security/qa/jwt-realm/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtWithUnavailableSecurityIndexRestIT.java index 015c66aea6164..29d94eddbbf0f 100644 --- a/x-pack/plugin/security/qa/jwt-realm/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtWithUnavailableSecurityIndexRestIT.java +++ b/x-pack/plugin/security/qa/jwt-realm/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtWithUnavailableSecurityIndexRestIT.java @@ -53,7 +53,11 @@ public class JwtWithUnavailableSecurityIndexRestIT extends ESRestTestCase { // Using this to first test without, then with caching. Since caching is controlled by a static setting, we need a // MutableSettingsProvider instance - private static final MutableSettingsProvider mutableSettingsForLastLoadCache = new MutableSettingsProvider(); + private static final MutableSettingsProvider mutableSettingsForLastLoadCache = new MutableSettingsProvider() { + { + put("xpack.security.authz.store.role_mappings.last_load_cache.enabled", "false"); + } + }; @ClassRule public static ElasticsearchCluster cluster = ElasticsearchCluster.local() From 43d8343283385ea8be9f90faadf1e26c42e72637 Mon Sep 17 00:00:00 2001 From: Brian Seeders Date: Tue, 7 Nov 2023 12:46:33 -0500 Subject: [PATCH 24/30] Remove branch 8.10 from active branches --- branches.json | 3 --- 1 file changed, 3 deletions(-) diff --git a/branches.json b/branches.json index c63328fb3ee22..c76417a198c57 100644 --- a/branches.json +++ b/branches.json @@ -7,9 +7,6 @@ { "branch": "8.11" }, - { - "branch": "8.10" - }, { "branch": "7.17" } From ae6d18037953609d58a2069121e37b484eba5c87 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Tue, 7 Nov 2023 20:07:17 +0100 Subject: [PATCH 25/30] Clean up some more dead code in o.e.s.aggregations (#101820) Another iteration of mostly automatic cleanup on top of #101806. --- .../AggConstructionContentionBenchmark.java | 5 --- .../StringStatsAggregationBuilder.java | 7 ---- .../AutoDateHistogramAggregationBuilder.java | 5 --- .../ChildrenAggregationBuilder.java | 6 ---- .../ParentAggregationBuilder.java | 6 ---- .../aggregations/AggregatorFactories.java | 1 - .../PipelineAggregationBuilder.java | 11 ++---- .../GeoHashGridAggregationBuilder.java | 5 --- .../GeoTileGridAggregationBuilder.java | 5 --- .../DateHistogramAggregationBuilder.java | 5 --- .../HistogramAggregationBuilder.java | 5 --- ...iableWidthHistogramAggregationBuilder.java | 5 --- .../missing/MissingAggregationBuilder.java | 5 --- .../prefix/IpPrefixAggregationBuilder.java | 5 --- .../range/DateRangeAggregationBuilder.java | 18 ---------- .../range/GeoDistanceAggregationBuilder.java | 5 --- .../range/IpRangeAggregationBuilder.java | 9 ----- .../bucket/range/RangeAggregationBuilder.java | 5 --- .../DiversifiedAggregationBuilder.java | 5 --- .../bucket/terms/InternalMappedRareTerms.java | 10 ------ .../bucket/terms/InternalRareTerms.java | 3 -- .../bucket/terms/LongRareTerms.java | 5 --- .../bucket/terms/ParsedLongRareTerms.java | 4 --- .../bucket/terms/ParsedRareTerms.java | 10 ------ .../terms/ParsedSignificantLongTerms.java | 4 --- .../terms/ParsedSignificantStringTerms.java | 4 --- .../bucket/terms/ParsedStringRareTerms.java | 7 ---- .../aggregations/bucket/terms/RareTerms.java | 6 ---- .../terms/RareTermsAggregationBuilder.java | 5 --- .../bucket/terms/SignificantLongTerms.java | 5 --- .../bucket/terms/SignificantStringTerms.java | 6 ---- .../bucket/terms/SignificantTerms.java | 4 --- .../SignificantTermsAggregationBuilder.java | 5 --- .../bucket/terms/StringRareTerms.java | 14 -------- .../bucket/terms/TermsAggregationBuilder.java | 5 --- .../bucket/terms/UnmappedRareTerms.java | 5 --- .../metrics/AvgAggregationBuilder.java | 5 --- .../CardinalityAggregationBuilder.java | 24 ------------- .../ExtendedStatsAggregationBuilder.java | 5 --- .../metrics/GeoBoundsAggregationBuilder.java | 5 --- .../GeoCentroidAggregationBuilder.java | 5 --- .../aggregations/metrics/InternalStats.java | 2 +- .../metrics/MaxAggregationBuilder.java | 5 --- ...anAbsoluteDeviationAggregationBuilder.java | 12 ------- .../metrics/MinAggregationBuilder.java | 5 --- .../PercentileRanksAggregationBuilder.java | 5 --- .../PercentilesAggregationBuilder.java | 5 --- .../ScriptedMetricAggregationBuilder.java | 36 ------------------- .../metrics/StatsAggregationBuilder.java | 5 --- .../metrics/SumAggregationBuilder.java | 5 --- .../metrics/ValueCountAggregationBuilder.java | 5 --- .../support/AggregationContext.java | 10 ------ .../support/MultiValuesSourceFieldConfig.java | 24 ------------- .../support/TimeSeriesIndexSearcher.java | 3 -- .../aggregations/support/ValueType.java | 6 +--- .../ValuesSourceAggregationBuilder.java | 10 ------ .../support/ValuesSourceRegistry.java | 6 +--- .../search/SearchModuleTests.java | 6 ---- .../index/mapper/MapperServiceTestCase.java | 5 --- .../BasePipelineAggregationTestCase.java | 13 +------ .../boxplot/BoxplotAggregationBuilder.java | 5 --- .../rate/RateAggregationBuilder.java | 5 --- .../StringStatsAggregationBuilder.java | 5 --- .../MockDeprecatedAggregationBuilder.java | 6 ---- .../CountCorrelationFunctionTests.java | 2 -- .../geogrid/GeoHexGridAggregationBuilder.java | 5 --- .../CartesianBoundsAggregationBuilder.java | 5 --- .../CartesianCentroidAggregationBuilder.java | 5 --- 68 files changed, 6 insertions(+), 464 deletions(-) diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/search/aggregations/AggConstructionContentionBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/search/aggregations/AggConstructionContentionBenchmark.java index 1dafdbb9be2b9..5b139f800cb39 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/search/aggregations/AggConstructionContentionBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/search/aggregations/AggConstructionContentionBenchmark.java @@ -224,11 +224,6 @@ public Set getMatchingFieldNames(String pattern) { throw new UnsupportedOperationException(); } - @Override - public boolean isFieldMapped(String field) { - return field.startsWith("int"); - } - @Override public FactoryType compile(Script script, ScriptContext context) { throw new UnsupportedOperationException(); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/analytics/StringStatsAggregationBuilder.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/analytics/StringStatsAggregationBuilder.java index 13a72ee64c03f..f28aabe41f4a9 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/analytics/StringStatsAggregationBuilder.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/analytics/StringStatsAggregationBuilder.java @@ -22,7 +22,6 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; -import org.elasticsearch.search.aggregations.support.ValuesSourceRegistry; import org.elasticsearch.search.aggregations.support.ValuesSourceType; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.xcontent.ParseField; @@ -71,12 +70,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - // This would be called from the same thing that calls innerBuild, which also throws. So it's "safe" to throw here. - throw new UnsupportedOperationException(); - } - @Override public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { return builder.field(StringStatsAggregationBuilder.SHOW_DISTRIBUTION_FIELD.getPreferredName(), showDistribution); diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java index 83f7d496f698f..dd497e8ca5478 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java @@ -166,11 +166,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - public String getMinimumIntervalExpression() { return minimumIntervalExpression; } diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ChildrenAggregationBuilder.java b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ChildrenAggregationBuilder.java index 57649129a638f..0de7b74759828 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ChildrenAggregationBuilder.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ChildrenAggregationBuilder.java @@ -24,7 +24,6 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; -import org.elasticsearch.search.aggregations.support.ValuesSourceRegistry; import org.elasticsearch.search.aggregations.support.ValuesSourceType; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; @@ -175,11 +174,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return ValuesSourceRegistry.UNREGISTERED_KEY; - } - @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.ZERO; diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentAggregationBuilder.java b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentAggregationBuilder.java index d608efcba9b83..b130411e5e099 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentAggregationBuilder.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentAggregationBuilder.java @@ -24,7 +24,6 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; -import org.elasticsearch.search.aggregations.support.ValuesSourceRegistry; import org.elasticsearch.search.aggregations.support.ValuesSourceType; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; @@ -178,11 +177,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return ValuesSourceRegistry.UNREGISTERED_KEY; - } - @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.ZERO; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java b/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java index 98c131213b3fe..0738303020de5 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java @@ -366,7 +366,6 @@ public Builder addPipelineAggregator(PipelineAggregationBuilder pipelineAggregat public ActionRequestValidationException validate(ActionRequestValidationException e) { PipelineAggregationBuilder.ValidationContext context = PipelineAggregationBuilder.ValidationContext.forTreeRoot( aggregationBuilders, - pipelineAggregatorBuilders, e ); validatePipelines(context); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/PipelineAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/PipelineAggregationBuilder.java index 153a54bf890e4..6ce5b32864f18 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/PipelineAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/PipelineAggregationBuilder.java @@ -74,10 +74,9 @@ public abstract static class ValidationContext { */ public static ValidationContext forTreeRoot( Collection siblingAggregations, - Collection siblingPipelineAggregations, ActionRequestValidationException validationFailuresSoFar ) { - return new ForTreeRoot(siblingAggregations, siblingPipelineAggregations, validationFailuresSoFar); + return new ForTreeRoot(siblingAggregations, validationFailuresSoFar); } /** @@ -95,16 +94,10 @@ private ValidationContext(ActionRequestValidationException validationFailuresSoF private static class ForTreeRoot extends ValidationContext { private final Collection siblingAggregations; - private final Collection siblingPipelineAggregations; - ForTreeRoot( - Collection siblingAggregations, - Collection siblingPipelineAggregations, - ActionRequestValidationException validationFailuresSoFar - ) { + ForTreeRoot(Collection siblingAggregations, ActionRequestValidationException validationFailuresSoFar) { super(validationFailuresSoFar); this.siblingAggregations = Objects.requireNonNull(siblingAggregations); - this.siblingPipelineAggregations = Objects.requireNonNull(siblingPipelineAggregations); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java index e0edebd7e5201..faeb569688994 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java @@ -110,11 +110,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.ZERO; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java index b7532bdcb4e5b..72d3ee2267a87 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java @@ -106,11 +106,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.V_7_0_0; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java index 2653f9ac53553..20223f6f92524 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java @@ -401,11 +401,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - @Override protected ValuesSourceAggregatorFactory innerBuild( AggregationContext context, diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregationBuilder.java index f5fb2d128f75f..fa2c5dc219cba 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregationBuilder.java @@ -360,11 +360,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - @Override protected ValuesSourceAggregatorFactory innerBuild( AggregationContext context, diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregationBuilder.java index 04e73691979f8..cc7619070b96a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregationBuilder.java @@ -232,11 +232,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.V_7_9_0; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregationBuilder.java index 915d7c32b4c74..f7c190b443a79 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregationBuilder.java @@ -114,11 +114,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.ZERO; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/prefix/IpPrefixAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/prefix/IpPrefixAggregationBuilder.java index 733a306a70c75..a64c1221698c2 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/prefix/IpPrefixAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/prefix/IpPrefixAggregationBuilder.java @@ -188,11 +188,6 @@ protected void innerWriteTo(StreamOutput out) throws IOException { out.writeBoolean(keyed); } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - @Override protected ValuesSourceType defaultValueSourceType() { return CoreValuesSourceType.IP; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/DateRangeAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/DateRangeAggregationBuilder.java index 59baf14f988f6..b6462f0f17bad 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/DateRangeAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/DateRangeAggregationBuilder.java @@ -124,11 +124,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.ZERO; @@ -227,19 +222,6 @@ public DateRangeAggregationBuilder addRange(double from, double to) { return addRange(null, from, to); } - /** - * Add a new range with no lower bound. - * - * @param key - * the key to use for this range in the response - * @param to - * the upper bound on the dates, exclusive - */ - public DateRangeAggregationBuilder addUnboundedTo(String key, double to) { - addRange(new RangeAggregator.Range(key, null, to)); - return this; - } - /** * Add a new range with no upper bound. * diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/GeoDistanceAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/GeoDistanceAggregationBuilder.java index ffc6d68f21a05..dc0b42f507d84 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/GeoDistanceAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/GeoDistanceAggregationBuilder.java @@ -401,11 +401,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - public GeoDistanceAggregationBuilder unit(DistanceUnit unit) { if (unit == null) { throw new IllegalArgumentException("[unit] must not be null: [" + name + "]"); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/IpRangeAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/IpRangeAggregationBuilder.java index 42f6d9957c329..9c22917b81c8e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/IpRangeAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/IpRangeAggregationBuilder.java @@ -226,20 +226,11 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - public IpRangeAggregationBuilder keyed(boolean keyed) { this.keyed = keyed; return this; } - public boolean keyed() { - return keyed; - } - /** Add a new {@link Range} to this aggregation. */ public IpRangeAggregationBuilder addRange(Range range) { ranges.add(range); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregationBuilder.java index f9fc993c3f347..e94d7c1ebcaed 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregationBuilder.java @@ -198,11 +198,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.ZERO; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedAggregationBuilder.java index 90c29a8e3556f..e77b15e1ed1d4 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedAggregationBuilder.java @@ -185,11 +185,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.ZERO; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedRareTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedRareTerms.java index f1b9608c9c2cf..aaa9857fc1562 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedRareTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedRareTerms.java @@ -28,8 +28,6 @@ import java.util.Map; import java.util.Objects; import java.util.Optional; -import java.util.function.Function; -import java.util.stream.Collectors; public abstract class InternalMappedRareTerms, B extends InternalRareTerms.Bucket> extends InternalRareTerms { @@ -156,14 +154,6 @@ public List getBuckets() { return buckets; } - @Override - public B getBucketByKey(String term) { - if (bucketMap == null) { - bucketMap = buckets.stream().collect(Collectors.toMap(InternalRareTerms.Bucket::getKeyAsString, Function.identity())); - } - return bucketMap.get(term); - } - @Override public boolean equals(Object obj) { if (this == obj) return true; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalRareTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalRareTerms.java index a9870d113ae3a..6cd4c76317106 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalRareTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalRareTerms.java @@ -141,9 +141,6 @@ protected final void doWriteTo(StreamOutput out) throws IOException { @Override public abstract List getBuckets(); - @Override - public abstract B getBucketByKey(String term); - @Override public InternalAggregation reduce(List aggregations, AggregationReduceContext reduceContext) { throw new UnsupportedOperationException(); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongRareTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongRareTerms.java index 2f8b685d4d623..f8e7ca460ea48 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongRareTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongRareTerms.java @@ -57,11 +57,6 @@ public Object getKey() { return term; } - @Override - public Number getKeyAsNumber() { - return term; - } - @Override public int compareKey(Bucket other) { return Long.compare(term, other.term); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/ParsedLongRareTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/ParsedLongRareTerms.java index c383d27022648..616bfb3d5a115 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/ParsedLongRareTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/ParsedLongRareTerms.java @@ -57,10 +57,6 @@ public String getKeyAsString() { return null; } - public Number getKeyAsNumber() { - return key; - } - @Override protected XContentBuilder keyToXContent(XContentBuilder builder) throws IOException { builder.field(CommonFields.KEY.getPreferredName(), key); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/ParsedRareTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/ParsedRareTerms.java index 3edf31b9ed69d..e1e865760d5e7 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/ParsedRareTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/ParsedRareTerms.java @@ -29,16 +29,6 @@ public List getBuckets() { return buckets; } - @Override - public RareTerms.Bucket getBucketByKey(String term) { - for (RareTerms.Bucket bucket : getBuckets()) { - if (bucket.getKeyAsString().equals(term)) { - return bucket; - } - } - return null; - } - @Override protected XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { builder.startArray(CommonFields.BUCKETS.getPreferredName()); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/ParsedSignificantLongTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/ParsedSignificantLongTerms.java index 9f7bfb564b73f..13cdc88a0082d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/ParsedSignificantLongTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/ParsedSignificantLongTerms.java @@ -52,10 +52,6 @@ public String getKeyAsString() { return Long.toString(key); } - public Number getKeyAsNumber() { - return key; - } - @Override protected XContentBuilder keyToXContent(XContentBuilder builder) throws IOException { builder.field(CommonFields.KEY.getPreferredName(), key); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/ParsedSignificantStringTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/ParsedSignificantStringTerms.java index f2b1375b7bd94..28cb9a6fb2a44 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/ParsedSignificantStringTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/ParsedSignificantStringTerms.java @@ -54,10 +54,6 @@ public String getKeyAsString() { return key.utf8ToString(); } - public Number getKeyAsNumber() { - return Double.parseDouble(key.utf8ToString()); - } - @Override protected XContentBuilder keyToXContent(XContentBuilder builder) throws IOException { return builder.field(CommonFields.KEY.getPreferredName(), getKey()); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/ParsedStringRareTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/ParsedStringRareTerms.java index 24923a115b27c..e19d07f5ee22d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/ParsedStringRareTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/ParsedStringRareTerms.java @@ -59,13 +59,6 @@ public String getKeyAsString() { return null; } - public Number getKeyAsNumber() { - if (key != null) { - return Double.parseDouble(key.utf8ToString()); - } - return null; - } - @Override protected XContentBuilder keyToXContent(XContentBuilder builder) throws IOException { return builder.field(CommonFields.KEY.getPreferredName(), getKey()); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/RareTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/RareTerms.java index 9b544296982e8..33d4443a49148 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/RareTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/RareTerms.java @@ -18,7 +18,6 @@ public interface RareTerms extends MultiBucketsAggregation { */ interface Bucket extends MultiBucketsAggregation.Bucket { - Number getKeyAsNumber(); } /** @@ -27,9 +26,4 @@ interface Bucket extends MultiBucketsAggregation.Bucket { @Override List getBuckets(); - /** - * Get the bucket for the given term, or null if there is no such bucket. - */ - Bucket getBucketByKey(String term); - } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsAggregationBuilder.java index 0422428e6b728..f2ea616802655 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsAggregationBuilder.java @@ -237,11 +237,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.V_7_3_0; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantLongTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantLongTerms.java index 6040c5e42e841..3d188e1fd09f7 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantLongTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantLongTerms.java @@ -72,11 +72,6 @@ public String getKeyAsString() { return format.format(term).toString(); } - @Override - public Number getKeyAsNumber() { - return term; - } - @Override protected XContentBuilder keyToXContent(XContentBuilder builder) throws IOException { builder.field(CommonFields.KEY.getPreferredName(), term); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantStringTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantStringTerms.java index 9782093401396..b35359c80a75e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantStringTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantStringTerms.java @@ -66,12 +66,6 @@ public void writeTo(StreamOutput out) throws IOException { aggregations.writeTo(out); } - @Override - public Number getKeyAsNumber() { - // this method is needed for scripted numeric aggregations - return Double.parseDouble(termBytes.utf8ToString()); - } - @Override public String getKeyAsString() { return format.format(termBytes).toString(); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTerms.java index 55be3342a10d7..f28054aaf52eb 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTerms.java @@ -47,10 +47,6 @@ interface Bucket extends MultiBucketsAggregation.Bucket { */ long getSupersetSize(); - /** - * @return The key, expressed as a number - */ - Number getKeyAsNumber(); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTermsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTermsAggregationBuilder.java index bb89e7d54bcb6..056a8a00dd72f 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTermsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTermsAggregationBuilder.java @@ -360,11 +360,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.V_7_3_0; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringRareTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringRareTerms.java index 18cc44c7e32ab..1cbb38790ed37 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringRareTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringRareTerms.java @@ -50,20 +50,6 @@ public Object getKey() { return getKeyAsString(); } - // this method is needed for scripted numeric aggs - @Override - public Number getKeyAsNumber() { - /* - * If the term is a long greater than 2^52 then parsing as a double would lose accuracy. Therefore, we first parse as a long and - * if this fails then we attempt to parse the term as a double. - */ - try { - return Long.parseLong(termBytes.utf8ToString()); - } catch (final NumberFormatException ignored) { - return Double.parseDouble(termBytes.utf8ToString()); - } - } - @Override public String getKeyAsString() { return format.format(termBytes).toString(); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java index 46b5e2d6d7980..ebc6b2c1cc70c 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java @@ -436,11 +436,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.ZERO; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedRareTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedRareTerms.java index 2ad69b98597e2..eb504e05292a6 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedRareTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedRareTerms.java @@ -107,9 +107,4 @@ public List getBuckets() { return emptyList(); } - @Override - public UnmappedRareTerms.Bucket getBucketByKey(String term) { - return null; - } - } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AvgAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AvgAggregationBuilder.java index e66d3d0a34580..3038e35dc06ac 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AvgAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AvgAggregationBuilder.java @@ -99,11 +99,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.ZERO; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregationBuilder.java index 615b46434226d..25d38b11b03ff 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregationBuilder.java @@ -132,25 +132,6 @@ public CardinalityAggregationBuilder precisionThreshold(long precisionThreshold) return this; } - /** - * Get the precision threshold. Higher values improve accuracy but also - * increase memory usage. Will return null if the - * precisionThreshold has not been set yet. - */ - public Long precisionThreshold() { - return precisionThreshold; - } - - /** - * Get the execution hint. This is an optional user specified hint that - * will be used to decide on the specific collection algorithm. Since this - * is a hint, the implementation may choose to ignore it (typically when - * the specified method is not applicable to the given field type) - */ - public String ExecutionHint() { - return executionHint; - } - /** * Set the execution hint. This is an optional user specified hint that * will be used to decide on the specific collection algorithm. Since this @@ -213,11 +194,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.ZERO; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregationBuilder.java index 21b3ebfeeaf4d..91257fb2cd88e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregationBuilder.java @@ -140,11 +140,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.ZERO; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsAggregationBuilder.java index 3360ed63b2307..e109cb4066785 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsAggregationBuilder.java @@ -144,11 +144,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.ZERO; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidAggregationBuilder.java index ac0ffae7ac2e1..d5fbeb0459a3b 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidAggregationBuilder.java @@ -106,11 +106,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.ZERO; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalStats.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalStats.java index 6dccdf0c2fae4..2c6b28362c128 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalStats.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalStats.java @@ -226,7 +226,7 @@ public XContentBuilder doXContentBody(XContentBuilder builder, Params params) th return builder; } - protected XContentBuilder otherStatsToXContent(XContentBuilder builder, Params params) throws IOException { + protected XContentBuilder otherStatsToXContent(XContentBuilder builder, @SuppressWarnings("unused") Params params) throws IOException { return builder; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MaxAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MaxAggregationBuilder.java index b4bb1f01b0662..e14de225dcd92 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MaxAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MaxAggregationBuilder.java @@ -103,11 +103,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.ZERO; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregationBuilder.java index 315e0bab027c5..8f5d3c1b9f322 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregationBuilder.java @@ -81,13 +81,6 @@ protected MedianAbsoluteDeviationAggregationBuilder( this.executionHint = clone.executionHint; } - /** - * Returns the compression factor of the t-digest sketches used - */ - public double compression() { - return compression; - } - /** * Set the compression factor of the t-digest sketches used */ @@ -187,11 +180,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.ZERO; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MinAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MinAggregationBuilder.java index 3d4957feba7db..d410fb032117e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MinAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MinAggregationBuilder.java @@ -105,11 +105,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.ZERO; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentileRanksAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentileRanksAggregationBuilder.java index e61105b5822cf..6854ff8f8b632 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentileRanksAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentileRanksAggregationBuilder.java @@ -110,11 +110,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.ZERO; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentilesAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentilesAggregationBuilder.java index 0347d157cf3c5..934619ae76b7d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentilesAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentilesAggregationBuilder.java @@ -146,11 +146,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.ZERO; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregationBuilder.java index 8386bb8bbdb06..0596af8cbb51d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregationBuilder.java @@ -127,13 +127,6 @@ public ScriptedMetricAggregationBuilder initScript(Script initScript) { return this; } - /** - * Get the {@code init} script. - */ - public Script initScript() { - return initScript; - } - /** * Set the {@code map} script. */ @@ -145,13 +138,6 @@ public ScriptedMetricAggregationBuilder mapScript(Script mapScript) { return this; } - /** - * Get the {@code map} script. - */ - public Script mapScript() { - return mapScript; - } - /** * Set the {@code combine} script. */ @@ -163,13 +149,6 @@ public ScriptedMetricAggregationBuilder combineScript(Script combineScript) { return this; } - /** - * Get the {@code combine} script. - */ - public Script combineScript() { - return combineScript; - } - /** * Set the {@code reduce} script. */ @@ -181,13 +160,6 @@ public ScriptedMetricAggregationBuilder reduceScript(Script reduceScript) { return this; } - /** - * Get the {@code reduce} script. - */ - public Script reduceScript() { - return reduceScript; - } - /** * Set parameters that will be available in the {@code init}, * {@code map} and {@code combine} phases. @@ -200,14 +172,6 @@ public ScriptedMetricAggregationBuilder params(Map params) { return this; } - /** - * Get parameters that will be available in the {@code init}, - * {@code map} and {@code combine} phases. - */ - public Map params() { - return params; - } - @Override public BucketCardinality bucketCardinality() { return BucketCardinality.NONE; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/StatsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/StatsAggregationBuilder.java index e978ffec42b4a..f6a04a9d64684 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/StatsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/StatsAggregationBuilder.java @@ -110,11 +110,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.ZERO; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/SumAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/SumAggregationBuilder.java index da441363020bd..feeecc2d5a06a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/SumAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/SumAggregationBuilder.java @@ -103,11 +103,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.ZERO; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ValueCountAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ValueCountAggregationBuilder.java index 71f745559fc77..3b815640e1199 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ValueCountAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ValueCountAggregationBuilder.java @@ -112,11 +112,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.ZERO; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationContext.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationContext.java index 822dd6d983e5c..c2aa26409f010 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationContext.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationContext.java @@ -150,11 +150,6 @@ public final FieldContext buildFieldContext(MappedFieldType ft) { */ public abstract Set getMatchingFieldNames(String pattern); - /** - * Returns true if the field identified by the provided name is mapped, false otherwise - */ - public abstract boolean isFieldMapped(String field); - /** * Compile a script. */ @@ -474,11 +469,6 @@ public Set getMatchingFieldNames(String pattern) { return context.getMatchingFieldNames(pattern); } - @Override - public boolean isFieldMapped(String field) { - return context.isFieldMapped(field); - } - @Override public FactoryType compile(Script script, ScriptContext scriptContext) { return context.compile(script, scriptContext); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceFieldConfig.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceFieldConfig.java index 9f698528dcefb..42330b995ae94 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceFieldConfig.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceFieldConfig.java @@ -310,37 +310,21 @@ public static class Builder { private String format = null; private IncludeExclude includeExclude = null; - public String getFieldName() { - return fieldName; - } - public Builder setFieldName(String fieldName) { this.fieldName = fieldName; return this; } - public Object getMissing() { - return missing; - } - public Builder setMissing(Object missing) { this.missing = missing; return this; } - public Script getScript() { - return script; - } - public Builder setScript(Script script) { this.script = script; return this; } - public ZoneId getTimeZone() { - return timeZone; - } - public Builder setTimeZone(ZoneId timeZone) { this.timeZone = timeZone; return this; @@ -356,19 +340,11 @@ public Builder setUserValueTypeHint(ValueType userValueTypeHint) { return this; } - public ValueType getUserValueTypeHint() { - return userValueTypeHint; - } - public Builder setFormat(String format) { this.format = format; return this; } - public String getFormat() { - return format; - } - public Builder setIncludeExclude(IncludeExclude includeExclude) { this.includeExclude = includeExclude; return this; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/TimeSeriesIndexSearcher.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/TimeSeriesIndexSearcher.java index 375ccd127dc9e..21138f46e974e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/TimeSeriesIndexSearcher.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/TimeSeriesIndexSearcher.java @@ -240,8 +240,6 @@ private static class LeafWalker { private final SortedNumericDocValues timestamps; // TODO can we have this just a NumericDocValues? private final BytesRefBuilder scratch = new BytesRefBuilder(); - private final Scorer scorer; - int docId = -1; int tsidOrd; long timestamp; @@ -252,7 +250,6 @@ private static class LeafWalker { this.collector = bucketCollector.getLeafCollector(aggCtx); liveDocs = context.reader().getLiveDocs(); this.collector.setScorer(scorer); - this.scorer = scorer; iterator = scorer.iterator(); tsids = DocValues.getSorted(context.reader(), TimeSeriesIdFieldMapper.NAME); timestamps = DocValues.getSortedNumeric(context.reader(), DataStream.TIMESTAMP_FIELD_NAME); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValueType.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValueType.java index 2106976252581..bc83a5b5cd3b1 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValueType.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValueType.java @@ -49,7 +49,7 @@ public enum ValueType implements Writeable { final ValuesSourceType valuesSourceType; final DocValueFormat defaultFormat; private final byte id; - private String preferredName; + private final String preferredName; public static final ParseField VALUE_TYPE = new ParseField("value_type", "valueType"); @@ -101,10 +101,6 @@ public boolean isNotA(ValueType valueType) { return isA(valueType) == false; } - public DocValueFormat defaultFormat() { - return defaultFormat; - } - public static ValueType lenientParse(String type) { return switch (type) { case "string" -> STRING; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java index c1b9b8c376a59..2b7e27eb97c7d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java @@ -374,14 +374,6 @@ public AB missing(Object missing) { return (AB) this; } - /** - * Gets the value to use when the aggregation finds a missing value in a - * document - */ - public Object missing() { - return missing; - } - /** * Sets the time zone to use for this aggregation */ @@ -422,8 +414,6 @@ protected final ValuesSourceAggregatorFactory doBuild(AggregationContext context return factory; } - protected abstract ValuesSourceRegistry.RegistryKey getRegistryKey(); - /** * Aggregations should use this method to define a {@link ValuesSourceType} of last resort. This will only be used when the resolver * can't find a field and the user hasn't provided a value type hint. diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceRegistry.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceRegistry.java index 6249612184157..c33ad5266d4e2 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceRegistry.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceRegistry.java @@ -150,7 +150,7 @@ public ValuesSourceRegistry build() { /** Maps Aggregation names to (ValuesSourceType, Supplier) pairs, keyed by ValuesSourceType */ private final AggregationUsageService usageService; - private Map, Map> aggregatorRegistry; + private final Map, Map> aggregatorRegistry; public ValuesSourceRegistry( Map, List>> aggregatorRegistry, @@ -160,10 +160,6 @@ public ValuesSourceRegistry( this.usageService = usageService; } - public boolean isRegistered(RegistryKey registryKey) { - return aggregatorRegistry.containsKey(registryKey); - } - public T getAggregator(RegistryKey registryKey, ValuesSourceConfig valuesSourceConfig) { if (registryKey != null && aggregatorRegistry.containsKey(registryKey)) { @SuppressWarnings("unchecked") diff --git a/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java b/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java index eb6318d8abe75..0a9336b93bd2c 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java @@ -43,7 +43,6 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; -import org.elasticsearch.search.aggregations.support.ValuesSourceRegistry; import org.elasticsearch.search.aggregations.support.ValuesSourceType; import org.elasticsearch.search.fetch.FetchSubPhase; import org.elasticsearch.search.fetch.subphase.ExplainPhase; @@ -514,11 +513,6 @@ public String getType() { return "test"; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return ValuesSourceRegistry.UNREGISTERED_KEY; - } - @Override protected void innerWriteTo(StreamOutput out) throws IOException {} diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java index 168ab8663a153..aecd81882c108 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java @@ -420,11 +420,6 @@ public Analyzer buildCustomAnalyzer( return null; } - @Override - public boolean isFieldMapped(String field) { - throw new UnsupportedOperationException(); - } - @Override public SearchLookup lookup() { return lookupSupplier.get(); diff --git a/test/framework/src/main/java/org/elasticsearch/search/aggregations/BasePipelineAggregationTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/BasePipelineAggregationTestCase.java index 48daf85938b37..d82d6ac4c4f6b 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/aggregations/BasePipelineAggregationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/aggregations/BasePipelineAggregationTestCase.java @@ -220,18 +220,7 @@ protected String validate(AggregationBuilder parent, AF builder) { * Helper for testing validation. */ protected String validate(Collection siblingAggregations, AF builder) { - return validate(siblingAggregations, emptyList(), builder); - } - - /** - * Helper for testing validation. - */ - protected String validate( - Collection siblingAggregations, - Collection siblingPipelineAggregations, - AF builder - ) { - return validate(ValidationContext.forTreeRoot(siblingAggregations, siblingPipelineAggregations, null), builder); + return validate(ValidationContext.forTreeRoot(siblingAggregations, null), builder); } /** diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/boxplot/BoxplotAggregationBuilder.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/boxplot/BoxplotAggregationBuilder.java index 03ddfee9681fb..b0e8b8ae05b51 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/boxplot/BoxplotAggregationBuilder.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/boxplot/BoxplotAggregationBuilder.java @@ -187,11 +187,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - @Override public Optional> getOutputFieldNames() { return Optional.of(InternalBoxplot.METRIC_NAMES); diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/rate/RateAggregationBuilder.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/rate/RateAggregationBuilder.java index 634c76b819ea0..0b0becc1ae446 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/rate/RateAggregationBuilder.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/rate/RateAggregationBuilder.java @@ -117,11 +117,6 @@ protected void innerWriteTo(StreamOutput out) throws IOException { } } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - @Override protected RateAggregatorFactory innerBuild( AggregationContext context, diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/stringstats/StringStatsAggregationBuilder.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/stringstats/StringStatsAggregationBuilder.java index 0be2ac9f24e62..c75ed46102112 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/stringstats/StringStatsAggregationBuilder.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/stringstats/StringStatsAggregationBuilder.java @@ -116,11 +116,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - /** * Return whether to include the probability distribution of each character in the results. * {@code showDistribution} is true, distribution will be included. diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/MockDeprecatedAggregationBuilder.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/MockDeprecatedAggregationBuilder.java index da65edb957b94..8b5806a869c8e 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/MockDeprecatedAggregationBuilder.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/MockDeprecatedAggregationBuilder.java @@ -21,7 +21,6 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; -import org.elasticsearch.search.aggregations.support.ValuesSourceRegistry; import org.elasticsearch.search.aggregations.support.ValuesSourceType; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; @@ -70,11 +69,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return ValuesSourceRegistry.UNREGISTERED_KEY; - } - @Override protected void innerWriteTo(StreamOutput out) throws IOException {} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/correlation/CountCorrelationFunctionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/correlation/CountCorrelationFunctionTests.java index a717d7f6bfc45..7f857c06959e5 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/correlation/CountCorrelationFunctionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/correlation/CountCorrelationFunctionTests.java @@ -13,7 +13,6 @@ import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.test.ESTestCase; -import java.util.Collections; import java.util.HashSet; import java.util.Set; import java.util.concurrent.atomic.AtomicLong; @@ -59,7 +58,6 @@ public void testValidation() { CountCorrelationFunction function = new CountCorrelationFunction(CountCorrelationIndicatorTests.randomInstance()); PipelineAggregationBuilder.ValidationContext validationContext = PipelineAggregationBuilder.ValidationContext.forTreeRoot( aggBuilders, - Collections.emptyList(), null ); function.validate(validationContext, "terms>metric_agg"); diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/GeoHexGridAggregationBuilder.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/GeoHexGridAggregationBuilder.java index 534c08f39c7e6..f513606144e34 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/GeoHexGridAggregationBuilder.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/GeoHexGridAggregationBuilder.java @@ -118,11 +118,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.V_8_1_0; diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/metrics/CartesianBoundsAggregationBuilder.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/metrics/CartesianBoundsAggregationBuilder.java index a7385b30ea165..cbb02d6d7821c 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/metrics/CartesianBoundsAggregationBuilder.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/metrics/CartesianBoundsAggregationBuilder.java @@ -99,11 +99,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.V_8_6_0; diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/metrics/CartesianCentroidAggregationBuilder.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/metrics/CartesianCentroidAggregationBuilder.java index dbd8c41af671b..340d80695458f 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/metrics/CartesianCentroidAggregationBuilder.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/metrics/CartesianCentroidAggregationBuilder.java @@ -106,11 +106,6 @@ public String getType() { return NAME; } - @Override - protected ValuesSourceRegistry.RegistryKey getRegistryKey() { - return REGISTRY_KEY; - } - @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.V_8_6_0; From e3b8611005931d9746a88f75901dc7833cf4214c Mon Sep 17 00:00:00 2001 From: David Kilfoyle <41695641+kilfoyle@users.noreply.github.com> Date: Tue, 7 Nov 2023 15:06:05 -0500 Subject: [PATCH 26/30] Remove Functionbeat from 'How monitoring works' page (#101889) --- docs/reference/monitoring/how-monitoring-works.asciidoc | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/reference/monitoring/how-monitoring-works.asciidoc b/docs/reference/monitoring/how-monitoring-works.asciidoc index 80cd560bd8f09..13fa006bdbbe0 100644 --- a/docs/reference/monitoring/how-monitoring-works.asciidoc +++ b/docs/reference/monitoring/how-monitoring-works.asciidoc @@ -34,7 +34,6 @@ collection methods, you should migrate to using {agent} or {metricbeat}. * Monitoring {beats}: ** {auditbeat-ref}/monitoring.html[{auditbeat}] ** {filebeat-ref}/monitoring.html[{filebeat}] -** {functionbeat-ref}/monitoring.html[{functionbeat}] ** {heartbeat-ref}/monitoring.html[{heartbeat}] ** {metricbeat-ref}/monitoring.html[{metricbeat}] ** {packetbeat-ref}/monitoring.html[{packetbeat}] From 4b1909a47bc99cb9145968f30fbcefaa89af99e6 Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 7 Nov 2023 21:35:34 +0000 Subject: [PATCH 27/30] Expose roles by default in cat allocation API (#101753) In #96994 we added a column for node roles to the `GET _cat/allocation` API but left it hidden by default to avoid changing behaviour. In fact it's ok to change the behaviour of the CAT APIs since they're only intended for human consumption, and it's awfully useful to see the node roles in this API response, so this commit makes this column display by default. --- docs/changelog/101753.yaml | 5 +++ docs/reference/cat/allocation.asciidoc | 8 ++-- .../test/cat.allocation/10_basic.yml | 42 +++++++++++++++---- .../rest/action/cat/RestAllocationAction.java | 2 +- 4 files changed, 44 insertions(+), 13 deletions(-) create mode 100644 docs/changelog/101753.yaml diff --git a/docs/changelog/101753.yaml b/docs/changelog/101753.yaml new file mode 100644 index 0000000000000..7b64075998430 --- /dev/null +++ b/docs/changelog/101753.yaml @@ -0,0 +1,5 @@ +pr: 101753 +summary: Expose roles by default in cat allocation API +area: CAT APIs +type: enhancement +issues: [] diff --git a/docs/reference/cat/allocation.asciidoc b/docs/reference/cat/allocation.asciidoc index 7153e99e503a8..f9574ed933398 100644 --- a/docs/reference/cat/allocation.asciidoc +++ b/docs/reference/cat/allocation.asciidoc @@ -6,7 +6,7 @@ [IMPORTANT] ==== -cat APIs are only intended for human consumption using the command line or {kib} +cat APIs are only intended for human consumption using the command line or {kib} console. They are _not_ intended for use by applications. ==== @@ -113,10 +113,10 @@ The API returns the following response: [source,txt] -------------------------------------------------- -shards disk.indices disk.used disk.avail disk.total disk.percent host ip node - 1 260b 47.3gb 43.4gb 100.7gb 46 127.0.0.1 127.0.0.1 CSUXak2 +shards disk.indices disk.used disk.avail disk.total disk.percent host ip node node.role + 1 260b 47.3gb 43.4gb 100.7gb 46 127.0.0.1 127.0.0.1 CSUXak2 himrst -------------------------------------------------- // TESTRESPONSE[s/\d+(\.\d+)?[tgmk]?b/\\d+(\\.\\d+)?[tgmk]?b/ s/46/\\d+/] -// TESTRESPONSE[s/CSUXak2/.+/ non_json] +// TESTRESPONSE[s/CSUXak2 himrst/.+/ non_json] This response shows a single shard is allocated to the one node available. diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.allocation/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.allocation/10_basic.yml index f56a1945b2d7c..ed519438f1b1e 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.allocation/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.allocation/10_basic.yml @@ -26,6 +26,10 @@ --- "One index": + - skip: + version: " - 8.11.99" + reason: "node.role column shown by default from 8.12.0 onwards" + - do: indices.create: index: test @@ -45,13 +49,14 @@ (\d+ \s+) #always should return value since we filter out non data nodes by default [-\w.]+ \s+ \d+(\.\d+){3} \s+ - [-\w.]+ + [-\w.]+ \s+ + [\w]+ \n )+ ( \s* #allow leading spaces to account for right-justified text \d+ \s+ - UNASSIGNED + UNASSIGNED \s* \n )? $/ @@ -59,6 +64,10 @@ --- "Node ID": + - skip: + version: " - 8.11.99" + reason: "node.role column shown by default from 8.12.0 onwards" + - do: cat.allocation: node_id: _master @@ -74,7 +83,8 @@ (\d+ \s+)? #no value from client nodes [-\w.]+ \s+ \d+(\.\d+){3} \s+ - [-\w.]+ + [-\w.]+ \s+ + [\w]+ \n ) $/ @@ -92,6 +102,10 @@ "All Nodes": + - skip: + version: " - 8.11.99" + reason: "node.role column shown by default from 8.12.0 onwards" + - do: cat.allocation: node_id: "*" @@ -108,13 +122,14 @@ (\d+ \s+)? #no value from client nodes [-\w.]+ \s+ \d+(\.\d+){3} \s+ - [-\w.]+ + [-\w.]+ \s+ + [\w]+ \n )+ ( \s* #allow leading spaces to account for right-justified text \d+ \s+ - UNASSIGNED + UNASSIGNED \s* \n )? $/ @@ -122,6 +137,10 @@ --- "Column headers": + - skip: + version: " - 8.11.99" + reason: "node.role column shown by default from 8.12.0 onwards" + - do: cat.allocation: v: true @@ -136,7 +155,8 @@ disk.percent \s+ host \s+ ip \s+ - node + node \s+ + node.role \n ( \s* #allow leading spaces to account for right-justified text @@ -148,7 +168,8 @@ (\d+ \s+) #always should return value since we filter out non data nodes by default [-\w.]+ \s+ \d+(\.\d+){3} \s+ - [-\w.]+ + [-\w.]+ \s+ + [\w]+ \n )+ $/ @@ -193,6 +214,10 @@ "Bytes": + - skip: + version: " - 8.11.99" + reason: "node.role column shown by default from 8.12.0 onwards" + - do: cat.allocation: bytes: gb @@ -208,7 +233,8 @@ (\d+ \s+) #always should return value since we filter out non data nodes by default [-\w.]+ \s+ \d+(\.\d+){3} \s+ - [-\w.]+ + [-\w.]+ \s+ + [\w]+ \n )+ $/ diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java index e2767e45f4858..2dc657582a0a1 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java @@ -92,7 +92,7 @@ protected Table getTableWithHeader(final RestRequest request) { table.addCell("host", "alias:h;desc:host of node"); table.addCell("ip", "desc:ip of node"); table.addCell("node", "alias:n;desc:name of node"); - table.addCell("node.role", "default:false;alias:r,role,nodeRole;desc:node roles"); + table.addCell("node.role", "alias:r,role,nodeRole;desc:node roles"); table.endHeaders(); return table; } From 5a2b61814540026dced477513649bf4781369e0e Mon Sep 17 00:00:00 2001 From: Matt Culbreth Date: Tue, 7 Nov 2023 17:16:41 -0500 Subject: [PATCH 28/30] Forward port 8.11.0 docs (#101885) --- .../reference/migration/migrate_8_11.asciidoc | 55 ++- docs/reference/release-notes/8.11.0.asciidoc | 333 +++++++++++++++++- 2 files changed, 383 insertions(+), 5 deletions(-) diff --git a/docs/reference/migration/migrate_8_11.asciidoc b/docs/reference/migration/migrate_8_11.asciidoc index a353d1a6a87fa..098456e1aca42 100644 --- a/docs/reference/migration/migrate_8_11.asciidoc +++ b/docs/reference/migration/migrate_8_11.asciidoc @@ -9,12 +9,61 @@ your application to {es} 8.11. See also <> and <>. -coming::[8.11.0] - [discrete] [[breaking-changes-8.11]] === Breaking changes -There are no breaking changes in {es} 8.11. +The following changes in {es} 8.11 might affect your applications +and prevent them from operating normally. +Before upgrading to 8.11, review these changes and take the described steps +to mitigate the impact. + + +There are no notable breaking changes in {es} 8.11. +But there are some less critical breaking changes. + +[discrete] +[[breaking_811_rest_api_changes]] +==== REST API changes + +[[remove_transport_versions_from_cluster_state_api]] +.Remove `transport_versions` from cluster state API +[%collapsible] +==== +*Details* + +The `transport_versions` subobject of the response to `GET _cluster/state` has been replaced by the `nodes_versions` subobject. + +*Impact* + +If needed, retrieve the per-node transport versions from the `nodes_versions` subobject. +==== + + +[discrete] +[[deprecated-8.11]] +=== Deprecations + +The following functionality has been deprecated in {es} 8.11 +and will be removed in a future version. +While this won't have an immediate impact on your applications, +we strongly encourage you to take the described steps to update your code +after upgrading to 8.11. + +To find out if you are using any deprecated functionality, +enable <>. + +[discrete] +[[deprecations_811_rollup]] +==== Rollup deprecations + +[[rollup_functionality_deprecated]] +.Rollup functionality is now deprecated +[%collapsible] +==== +*Details* + +<> has been deprecated and will be removed in a future release. Previously, rollups were available in technical preview. + +*Impact* + +Use <> to reduce storage costs for time series data by storing it at reduced granularity. +==== diff --git a/docs/reference/release-notes/8.11.0.asciidoc b/docs/reference/release-notes/8.11.0.asciidoc index a5c8c8c73128e..08ddaf5667845 100644 --- a/docs/reference/release-notes/8.11.0.asciidoc +++ b/docs/reference/release-notes/8.11.0.asciidoc @@ -1,8 +1,337 @@ [[release-notes-8.11.0]] == {es} version 8.11.0 -coming[8.11.0] - Also see <>. +[[breaking-8.11.0]] +[float] +=== Breaking changes + +Infra/Core:: +* Remove `transport_versions` from cluster state API {es-pull}99223[#99223] + +[[bug-8.11.0]] +[float] +=== Bug fixes + +Aggregations:: +* Adjust `DateHistogram's` bucket accounting to be iteratively {es-pull}101012[#101012] +* Allow parsing on non-string routing fields {es-pull}97729[#97729] +* Support runtime fields in synthetic source {es-pull}99796[#99796] (issue: {es-issue}98287[#98287]) + +Allocation:: +* Consider node shutdown in `DataTierAllocationDecider` {es-pull}98824[#98824] (issue: {es-issue}97207[#97207]) + +Application:: +* Align look-back with client-side cache {es-pull}101264[#101264] +* Increase K/V look-back time interval {es-pull}101205[#101205] +* Provide stable resampling {es-pull}101255[#101255] +* [Profiling] Tighten resource creation check {es-pull}99873[#99873] + +Authorization:: +* Allow `enrich_user` to read/view enrich indices {es-pull}100707[#100707] +* Grant editor and viewer access to profiling {es-pull}100594[#100594] + +CCR:: +* CCR: Use local cluster state request {es-pull}100323[#100323] + +CRUD:: +* Change `GetFromTranslog` to indices action {es-pull}99300[#99300] +* Wait for cluster to recover before resolving index template {es-pull}99797[#99797] + +Cluster Coordination:: +* Reset `GatewayService` flags before reroute {es-pull}98653[#98653] (issue: {es-issue}98606[#98606]) + +Data streams:: +* DSL waits for the tsdb time boundaries to lapse {es-pull}100470[#100470] (issue: {es-issue}99696[#99696]) +* Propagate cancellation in `DataTiersUsageTransportAction` {es-pull}100253[#100253] +* [DSL] skip deleting indices that have in-progress downsampling operations {es-pull}101495[#101495] + +Downsampling:: +* Make downsample target index replicas configurable {es-pull}99712[#99712] + +ES|QL:: +* "params" correctly parses the values including an optional "type" {es-pull}99310[#99310] (issue: {es-issue}99294[#99294]) +* Account for an exception being thrown when building a `BytesRefArrayBlock` {es-pull}99726[#99726] (issue: {es-issue}99472[#99472]) +* Add arithmetic operators {es-pull}98628[#98628] +* Add identity check in Block equality {es-pull}100377[#100377] (issue: {es-issue}100374[#100374]) +* Adds Enrich implicit `match_fields` to `field_caps` call {es-pull}101456[#101456] (issue: {es-issue}101328[#101328]) +* Better management of not stored TEXT fiels with synthetic source {es-pull}99695[#99695] +* Continue resolving attributes for Eval {es-pull}99601[#99601] (issue: {es-issue}99576[#99576]) +* Create a Vector when needed for IN {es-pull}99382[#99382] (issue: {es-issue}99347[#99347]) +* ESQL: Fix unreleased block in topn {es-pull}101648[#101648] (issue: {es-issue}101588[#101588]) +* ESQL: check type before casting {es-pull}101492[#101492] (issue: {es-issue}101489[#101489]) +* Fix NPE when aggregating literals {es-pull}99827[#99827] +* Fix escaping of backslash in LIKE operator {es-pull}101120[#101120] (issue: {es-issue}101106[#101106]) +* Fix eval of functions on foldable literals {es-pull}101438[#101438] (issue: {es-issue}101425[#101425]) +* Fix non-null value being returned for unsupported data types in `ValueSources` {es-pull}100656[#100656] (issue: {es-issue}100048[#100048]) +* Graceful handling of non-bool condition in the filter {es-pull}100645[#100645] (issues: {es-issue}100049[#100049], {es-issue}100409[#100409]) +* Handle queries with non-existing enrich policies and no field {es-pull}100647[#100647] (issue: {es-issue}100593[#100593]) +* Implement serialization of `InvalidMappedField` {es-pull}98972[#98972] (issue: {es-issue}98851[#98851]) +* Improve verifier error for incorrect agg declaration {es-pull}100650[#100650] (issue: {es-issue}100641[#100641]) +* Limit how many bytes `concat()` can process {es-pull}100360[#100360] +* Make DISSECT parameter `append_separator` case insensitive {es-pull}101358[#101358] (issue: {es-issue}101138[#101138]) +* Page shouldn't close a block twice {es-pull}100370[#100370] (issues: {es-issue}100356[#100356], {es-issue}100365[#100365]) +* Preserve intermediate aggregation output in local relation {es-pull}100866[#100866] (issue: {es-issue}100807[#100807]) +* Properly handle multi-values in fold() and date math {es-pull}100766[#100766] (issue: {es-issue}100497[#100497]) +* Remove aliasing inside Eval {es-pull}100238[#100238] (issue: {es-issue}100174[#100174]) +* Resilience to non-indexed fields {es-pull}99588[#99588] (issue: {es-issue}99506[#99506]) +* Skip synthetic attributes when planning the physical fragment {es-pull}99188[#99188] (issue: {es-issue}99170[#99170]) +* Support date and time intervals as input params {es-pull}101001[#101001] (issue: {es-issue}99570[#99570]) +* Support queries that don't return underlying fields {es-pull}98759[#98759] (issue: {es-issue}98404[#98404]) +* Use exact attributes for data source extraction {es-pull}99874[#99874] (issue: {es-issue}99183[#99183]) +* `mv_expand` pushes down limit and project and keep the limit after it untouched {es-pull}100782[#100782] (issues: {es-issue}99971[#99971], {es-issue}100774[#100774]) +* support metric tsdb fields while querying index patterns {es-pull}100351[#100351] (issue: {es-issue}100144[#100144]) + +Geo:: +* Use `NamedWritable` to enable `GeoBoundingBox` serialisation {es-pull}99163[#99163] (issue: {es-issue}99089[#99089]) + +Health:: +* Fix NPE in `StableMasterHealthIndicatorService` {es-pull}98635[#98635] +* Health report infrastructure doesn't trip the circuit breakers {es-pull}101629[#101629] +* Propagate cancellation in `GetHealthAction` {es-pull}100273[#100273] + +Highlighting:: +* Correctly handle `ScriptScoreQuery` in plain highlighter {es-pull}99804[#99804] (issue: {es-issue}99700[#99700]) +* Disable `weight_matches` when kNN query is present {es-pull}101713[#101713] + +ILM+SLM:: +* Compute SLM retention from `RepositoryData` {es-pull}100092[#100092] (issue: {es-issue}99953[#99953]) +* `WaitForSnapshotStep` verifies if the index belongs to the latest snapshot of that SLM policy {es-pull}100911[#100911] + +Infra/Core:: +* Add `java.net.NetPermission` to APM module's permissions {es-pull}99474[#99474] +* Don't update system index mappings in mixed clusters {es-pull}101778[#101778] (issues: {es-issue}101331[#101331], {es-issue}99778[#99778]) +* Revert "Kibana system index does not allow user templates to affect it" {es-pull}98888[#98888] +* Specify correct current `IndexVersion` after 8.10 release {es-pull}98574[#98574] (issue: {es-issue}98555[#98555]) +* Tracing: Use `doPriv` when working with spans, use `SpanId` {es-pull}100232[#100232] + +Infra/Scripting:: +* Improve painless error wrapping {es-pull}100872[#100872] + +Ingest Node:: +* Improving tika handling {es-pull}101486[#101486] +* Update enrich execution to only set index false on fields that support it {es-pull}98038[#98038] (issue: {es-issue}98019[#98019]) + +Machine Learning:: +* Avoid risk of OOM in datafeeds when memory is constrained {es-pull}98915[#98915] (issue: {es-issue}89769[#89769]) +* Fix for inference requests being sent to every node with a model allocation. If there are more nodes than items in the original request then empty requests were sent. {es-pull}100388[#100388] (issue: {es-issue}100180[#100180]) +* Preserve order of inference results when calling the _infer API with multiple inputs on a model deployment with more than one allocation the output results order was not guaranteed to match the input order. The fix ensures the output order matches the input order. {es-pull}100143[#100143] +* Remove noisy 'Could not find trained model' message {es-pull}100760[#100760] +* Safely drain deployment request queues before allowing node to shutdown {es-pull}98406[#98406] +* Use the correct writable name for model assignment metadata in mixed version clusters. Prevents a node failure due to IllegalArgumentException Unknown NamedWriteable [trained_model_assignment] {es-pull}100886[#100886] +* Wait to gracefully stop deployments until alternative allocation exists {es-pull}99107[#99107] + +Mapping:: +* Automatically disable `ignore_malformed` on datastream `@timestamp` fields {es-pull}99346[#99346] +* Correct behaviour of `ContentPath::remove()` {es-pull}98332[#98332] (issue: {es-issue}98327[#98327]) +* Fix merges of mappings with `subobjects: false` for composable index templates {es-pull}97317[#97317] (issue: {es-issue}96768[#96768]) +* Percolator to support parsing script score query with params {es-pull}101051[#101051] (issue: {es-issue}97377[#97377]) + +Network:: +* Do not report failure after connections are made {es-pull}99117[#99117] + +Percolator:: +* Fix percolator query for stored queries that expand on wildcard field names {es-pull}98878[#98878] + +Query Languages:: +* Preserve subfields for unsupported types {es-pull}100875[#100875] (issue: {es-issue}100869[#100869]) + +Recovery:: +* Fix interruption of `markAllocationIdAsInSync` {es-pull}100610[#100610] (issues: {es-issue}96578[#96578], {es-issue}100589[#100589]) + +Search:: +* Consistent scores for multi-term `SourceConfirmedTestQuery` {es-pull}100846[#100846] (issue: {es-issue}98712[#98712]) +* Fix UnsignedLong field range query gt "0" can get the result equal to 0 {es-pull}98843[#98843] +* Fix `advanceExact` for doc values from sources {es-pull}99685[#99685] +* Fork response-sending in `OpenPointInTimeAction` {es-pull}99222[#99222] +* [CI] `SearchResponseTests#testSerialization` failing resolved {es-pull}100020[#100020] (issue: {es-issue}100005[#100005]) +* fix fuzzy query rewrite parameter not work {es-pull}97642[#97642] + +Security:: +* Fix NullPointerException in RotableSecret {es-pull}100779[#100779] (issue: {es-issue}99759[#99759]) + +Snapshot/Restore:: +* Fix race condition in `SnapshotsService` {es-pull}101652[#101652] +* Fix snapshot double finalization {es-pull}101497[#101497] +* Fix thread context in `getRepositoryData` {es-pull}99627[#99627] +* Frozen index input clone copy cache file {es-pull}98930[#98930] +* Make S3 anti-contention delay configurable {es-pull}101245[#101245] +* More robust timeout for repo analysis {es-pull}101184[#101184] (issue: {es-issue}101182[#101182]) +* Register `repository_s3` settings {es-pull}101344[#101344] +* Reinstate `RepositoryData` BwC {es-pull}100447[#100447] + +TSDB:: +* Don't ignore empty index template that have no template definition {es-pull}98840[#98840] (issue: {es-issue}98834[#98834]) +* Fix painless execute api and tsdb issue {es-pull}101212[#101212] (issue: {es-issue}101072[#101072]) +* Make tsdb settings public in Serverless {es-pull}99567[#99567] (issue: {es-issue}99563[#99563]) + +Transform:: +* Fix possible NPE when getting transform stats for failed transforms {es-pull}98061[#98061] (issue: {es-issue}98052[#98052]) +* Ignore `IndexNotFound` error when refreshing destination index {es-pull}101627[#101627] +* Make Transform Feature Reset really wait for all the tasks {es-pull}100624[#100624] +* Make tasks that calculate checkpoints cancellable {es-pull}100808[#100808] + +Watcher:: +* Treating watcher webhook response header names as case-insensitive {es-pull}99717[#99717] + +[[deprecation-8.11.0]] +[float] +=== Deprecations + +Rollup:: +* Rollup functionality is now deprecated {es-pull}101265[#101265] + +[[enhancement-8.11.0]] +[float] +=== Enhancements + +Aggregations:: +* Disable `FilterByFilterAggregator` through `ClusterSettings` {es-pull}99417[#99417] (issue: {es-issue}99335[#99335]) +* Represent histogram value count as long {es-pull}99912[#99912] (issue: {es-issue}99820[#99820]) +* Skip `DisiPriorityQueue` on single filter agg {es-pull}99215[#99215] (issue: {es-issue}99202[#99202]) +* Use a competitive iterator in `FiltersAggregator` {es-pull}98360[#98360] (issue: {es-issue}97544[#97544]) + +Allocation:: +* Report a node's "roles" setting in the /_cluster/allocation/explain response {es-pull}98550[#98550] (issue: {es-issue}97859[#97859]) + +Application:: +* Add flamegraph API {es-pull}99091[#99091] +* [Profiling] Allow to customize the ILM policy {es-pull}99909[#99909] +* [Profiling] Allow to wait until resources created {es-pull}99655[#99655] + +Audit:: +* Reduce verbosity of the bulk indexing audit log {es-pull}98470[#98470] + +Authentication:: +* Allow native users/roles to be disabled via setting {es-pull}98654[#98654] + +CAT APIs:: +* Add 'dataset' size to cat indices and cat shards {es-pull}98622[#98622] (issue: {es-issue}95092[#95092]) + +Data streams:: +* Allow explain data stream lifecycle to accept a data stream {es-pull}98811[#98811] + +ES|QL:: +* Add `CEIL` function {es-pull}98847[#98847] +* Add ability to perform date math {es-pull}98870[#98870] (issue: {es-issue}98402[#98402]) +* Add support for TEXT fields in comparison operators and SORT {es-pull}98528[#98528] (issue: {es-issue}98642[#98642]) +* Compact topn {es-pull}99316[#99316] +* Date math for negatives {es-pull}99711[#99711] +* Enable arithmetics for durations and periods {es-pull}99432[#99432] (issue: {es-issue}99293[#99293]) +* Enhance SHOW FUNCTIONS command {es-pull}99736[#99736] (issue: {es-issue}99507[#99507]) +* Improve log messages {es-pull}99470[#99470] +* Log execution time consistently {es-pull}99286[#99286] +* Log query and execution time {es-pull}99058[#99058] +* Log start and end of queries {es-pull}99746[#99746] +* Lower the implicit limit, if none is user-provided {es-pull}99816[#99816] (issue: {es-issue}99458[#99458]) +* Make settings dynamic {es-pull}101516[#101516] +* Mark counter fields as unsupported {es-pull}99054[#99054] +* Remove the swapped-args check for date_xxx() {es-pull}101362[#101362] (issue: {es-issue}99562[#99562]) +* Serialize the source in expressions {es-pull}99956[#99956] +* Simple check if all blocks get released {es-pull}100199[#100199] +* Support unsigned long in sqrt and log10 {es-pull}98711[#98711] +* Use DEBUG log level to report execution steps {es-pull}99303[#99303] + +Engine:: +* Use `IndexWriter.flushNextBuffer()` to reclaim memory from indexing buffers {es-pull}94607[#94607] + +Health:: +* Avoiding the use of nodes that are no longer in the cluster when computing master stability {es-pull}98809[#98809] (issue: {es-issue}98636[#98636]) +* When a primary is inactive but this is considered expected, the same applies for the replica of this shard. {es-pull}99995[#99995] (issue: {es-issue}99951[#99951]) + +Infra/Core:: +* APM Metering API {es-pull}99832[#99832] +* Update the elastic-apm-agent version {es-pull}100064[#100064] +* Use mappings version to retrieve system index mappings at creation time {es-pull}99555[#99555] + +Infra/Node Lifecycle:: +* Add links to docs from failing bootstrap checks {es-pull}99644[#99644] (issue: {es-issue}99614[#99614]) +* Chunk `SingleNodeShutdownStatus` and `ShutdownShardMigrationStatus` (and related action) response {es-pull}99798[#99798] (issue: {es-issue}99678[#99678]) + +Infra/REST API:: +* Add `IndexVersion` to node info {es-pull}99515[#99515] +* Add component info versions to node info in a pluggable way {es-pull}99631[#99631] +* Return a 410 (Gone) status code for unavailable API endpoints {es-pull}97397[#97397] + +Machine Learning:: +* Add new _inference API {es-pull}99224[#99224] +* Adding an option for trained models to be platform specific {es-pull}99584[#99584] +* Log warnings for jobs unassigned for a long time {es-pull}100154[#100154] +* Simplify the Inference Ingest Processor configuration {es-pull}100205[#100205] + +Mapping:: +* Automatically flatten objects when subobjects:false {es-pull}97972[#97972] (issue: {es-issue}88934[#88934]) +* Explicit parsing object capabilities of `FieldMappers` {es-pull}98684[#98684] (issue: {es-issue}98537[#98537]) +* Reintroduce `sparse_vector` mapping {es-pull}98996[#98996] + +Network:: +* Chunk the cluster allocation explain response {es-pull}99641[#99641] (issue: {es-issue}97803[#97803]) + +Recovery:: +* Wait for cluster state in recovery {es-pull}99193[#99193] + +Search:: +* Add additional counters to `_clusters` response for all Cluster search states {es-pull}99566[#99566] (issue: {es-issue}98927[#98927]) +* Adding support for exist queries to `sparse_vector` fields {es-pull}99775[#99775] (issue: {es-issue}99319[#99319]) +* Make `_index` optional for pinned query docs {es-pull}97450[#97450] +* Reduce copying when creating scroll/PIT ids {es-pull}99219[#99219] +* Refactor `SearchResponseClusters` to use CHM {es-pull}100129[#100129] (issue: {es-issue}99101[#99101]) +* Support cluster/details for CCS minimize_roundtrips=false {es-pull}98457[#98457] + +Security:: +* Support rotatating the JWT shared secret {es-pull}99278[#99278] + +Snapshot/Restore:: +* Remove shard data files when they fail to write for snapshot {es-pull}99694[#99694] + +Stats:: +* Prune unnecessary information from TransportNodesInfoAction.NodeInfoRequest {es-pull}99938[#99938] (issue: {es-issue}99744[#99744]) + +TSDB:: +* Add `index.look_back_time` setting for tsdb data streams {es-pull}98518[#98518] (issue: {es-issue}98463[#98463]) +* Improve time-series error and documentation {es-pull}100018[#100018] +* Trim stored fields for `_id` field in tsdb {es-pull}97409[#97409] + +Transform:: +* Add accessors required to recreate `TransformStats` object from the fields {es-pull}98844[#98844] + +Vector Search:: +* Add new max_inner_product vector similarity function {es-pull}99445[#99445] +* Adds `nested` support for indexed `dense_vector` fields {es-pull}99532[#99532] +* Dense vector field types are indexed by default {es-pull}98268[#98268] +* Increase the max vector dims to 4096 {es-pull}99682[#99682] + +[[feature-8.11.0]] +[float] +=== New features + +Analysis:: +* Add support for Persian language stemmer {es-pull}99106[#99106] (issue: {es-issue}98911[#98911]) + +Application:: +* Automatically map float arrays of lengths 128 - 2048 as dense_vector {es-pull}98512[#98512] (issue: {es-issue}97532[#97532]) + +Data streams:: +* GA the data stream lifecycle {es-pull}100187[#100187] +* GET `_data_stream` displays both ILM and DSL information {es-pull}99947[#99947] + +ES|QL:: +* Integrate Elasticsearch Query Language, ES|QL {es-pull}98309[#98309] +* LEAST and GREATEST functions {es-pull}98630[#98630] +* LEFT function {es-pull}98942[#98942] +* LTRIM, RTRIM and fix unicode whitespace {es-pull}98590[#98590] +* RIGHT function {es-pull}98974[#98974] +* TopN sorting with min and max for multi-value fields {es-pull}98337[#98337] + +[[upgrade-8.11.0]] +[float] +=== Upgrades + +Packaging:: +* Update bundled JDK to 21.0.1 {es-pull}101133[#101133] + +Search:: +* Upgrade main to Lucene 9.8.0 {es-pull}100138[#100138] + From 16cce1fc1a335caa61a764aa843faaffb3e53cbc Mon Sep 17 00:00:00 2001 From: Felix Barnsteiner Date: Wed, 8 Nov 2023 07:12:10 +0100 Subject: [PATCH 29/30] Mark legacy stack templates as deprecated (#101476) --- docs/changelog/101476.yaml | 5 ++ .../xpack/apmdata/ResourceUtils.java | 7 +- .../apmdata/YamlIngestPipelineConfig.java | 2 +- .../core/template/IngestPipelineConfig.java | 14 +++ .../template/JsonIngestPipelineConfig.java | 14 ++- .../xpack/core/template/TemplateUtils.java | 2 +- .../main/resources/180-days@lifecycle.json | 3 +- .../src/main/resources/30-days@lifecycle.json | 3 +- .../main/resources/365-days@lifecycle.json | 3 +- .../src/main/resources/7-days@lifecycle.json | 3 +- .../src/main/resources/90-days@lifecycle.json | 3 +- .../main/resources/data-streams@mappings.json | 3 +- .../src/main/resources/ecs@mappings.json | 3 +- .../resources/kibana-reporting@template.json | 3 +- .../main/resources/logs@default-pipeline.json | 3 +- .../main/resources/logs@json-pipeline.json | 3 +- .../src/main/resources/logs@lifecycle.json | 3 +- .../src/main/resources/logs@mappings.json | 3 +- .../src/main/resources/logs@settings.json | 3 +- .../src/main/resources/logs@template.json | 3 +- .../src/main/resources/metrics@lifecycle.json | 3 +- .../src/main/resources/metrics@mappings.json | 3 +- .../src/main/resources/metrics@settings.json | 3 +- .../src/main/resources/metrics@template.json | 3 +- .../main/resources/metrics@tsdb-settings.json | 3 +- .../main/resources/synthetics@lifecycle.json | 3 +- .../main/resources/synthetics@mappings.json | 3 +- .../main/resources/synthetics@settings.json | 3 +- .../main/resources/synthetics@template.json | 3 +- .../stack/LegacyStackTemplateRegistry.java | 63 +++++++++---- .../xpack/stack/StackTemplateRegistry.java | 90 ++++++++++++++----- .../LegacyStackTemplateRegistryTests.java | 63 +++++++++++++ .../stack/StackTemplateRegistryTests.java | 17 ++++ 33 files changed, 277 insertions(+), 69 deletions(-) create mode 100644 docs/changelog/101476.yaml create mode 100644 x-pack/plugin/stack/src/test/java/org/elasticsearch/xpack/stack/LegacyStackTemplateRegistryTests.java diff --git a/docs/changelog/101476.yaml b/docs/changelog/101476.yaml new file mode 100644 index 0000000000000..ee4cd9b1e4b1a --- /dev/null +++ b/docs/changelog/101476.yaml @@ -0,0 +1,5 @@ +pr: 101476 +summary: Mark legacy stack templates as deprecated +area: Indices APIs +type: enhancement +issues: [] diff --git a/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/ResourceUtils.java b/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/ResourceUtils.java index b9a6edfb958f3..1e6a9a9998a82 100644 --- a/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/ResourceUtils.java +++ b/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/ResourceUtils.java @@ -13,15 +13,20 @@ import java.io.IOException; import java.io.InputStream; import java.nio.charset.StandardCharsets; +import java.util.Map; public class ResourceUtils { public static final String APM_TEMPLATE_VERSION_VARIABLE = "xpack.apmdata.template.version"; static byte[] loadVersionedResourceUTF8(String name, int version) { + return loadVersionedResourceUTF8(name, version, Map.of()); + } + + static byte[] loadVersionedResourceUTF8(String name, int version, Map variables) { try { String content = loadResource(name); - content = TemplateUtils.replaceVariable(content, APM_TEMPLATE_VERSION_VARIABLE, String.valueOf(version)); + content = TemplateUtils.replaceVariables(content, String.valueOf(version), APM_TEMPLATE_VERSION_VARIABLE, variables); return content.getBytes(StandardCharsets.UTF_8); } catch (IOException e) { throw new RuntimeException(e); diff --git a/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/YamlIngestPipelineConfig.java b/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/YamlIngestPipelineConfig.java index 938fd69f80abe..de1b715dd138d 100644 --- a/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/YamlIngestPipelineConfig.java +++ b/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/YamlIngestPipelineConfig.java @@ -31,6 +31,6 @@ public XContentType getXContentType() { @Override public BytesReference loadConfig() { - return new BytesArray(loadVersionedResourceUTF8("/ingest-pipelines/" + id + ".yaml", version)); + return new BytesArray(loadVersionedResourceUTF8("/ingest-pipelines/" + id + ".yaml", version, variables)); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/IngestPipelineConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/IngestPipelineConfig.java index a216030f1c2e0..2768355183687 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/IngestPipelineConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/IngestPipelineConfig.java @@ -12,6 +12,7 @@ import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.Objects; /** @@ -22,6 +23,7 @@ public abstract class IngestPipelineConfig { protected final String resource; protected final int version; protected final String versionProperty; + protected final Map variables; /** * A list of this pipeline's dependencies, for example - such referred to through a pipeline processor. @@ -35,11 +37,23 @@ public IngestPipelineConfig(String id, String resource, int version, String vers } public IngestPipelineConfig(String id, String resource, int version, String versionProperty, List dependencies) { + this(id, resource, version, versionProperty, dependencies, Map.of()); + } + + public IngestPipelineConfig( + String id, + String resource, + int version, + String versionProperty, + List dependencies, + Map variables + ) { this.id = Objects.requireNonNull(id); this.resource = Objects.requireNonNull(resource); this.version = version; this.versionProperty = Objects.requireNonNull(versionProperty); this.dependencies = dependencies; + this.variables = Objects.requireNonNull(variables); } public String getId() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/JsonIngestPipelineConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/JsonIngestPipelineConfig.java index fc2ca7cbce186..05a27de40aadc 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/JsonIngestPipelineConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/JsonIngestPipelineConfig.java @@ -12,6 +12,7 @@ import org.elasticsearch.xcontent.XContentType; import java.util.List; +import java.util.Map; public class JsonIngestPipelineConfig extends IngestPipelineConfig { public JsonIngestPipelineConfig(String id, String resource, int version, String versionProperty) { @@ -22,6 +23,17 @@ public JsonIngestPipelineConfig(String id, String resource, int version, String super(id, resource, version, versionProperty, dependencies); } + public JsonIngestPipelineConfig( + String id, + String resource, + int version, + String versionProperty, + List dependencies, + Map variables + ) { + super(id, resource, version, versionProperty, dependencies, variables); + } + @Override public XContentType getXContentType() { return XContentType.JSON; @@ -29,6 +41,6 @@ public XContentType getXContentType() { @Override public BytesReference loadConfig() { - return new BytesArray(TemplateUtils.loadTemplate(resource, String.valueOf(version), versionProperty)); + return new BytesArray(TemplateUtils.loadTemplate(resource, String.valueOf(version), versionProperty, variables)); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/TemplateUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/TemplateUtils.java index ad27607e47c5e..d0be0ad9cb697 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/TemplateUtils.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/TemplateUtils.java @@ -98,7 +98,7 @@ public static void validate(String source) { } } - private static String replaceVariables(String input, String version, String versionProperty, Map variables) { + public static String replaceVariables(String input, String version, String versionProperty, Map variables) { String template = replaceVariable(input, versionProperty, version); for (Map.Entry variable : variables.entrySet()) { template = replaceVariable(template, variable.getKey(), variable.getValue()); diff --git a/x-pack/plugin/core/template-resources/src/main/resources/180-days@lifecycle.json b/x-pack/plugin/core/template-resources/src/main/resources/180-days@lifecycle.json index 7929d4cb5594c..0fcaddb9a02ce 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/180-days@lifecycle.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/180-days@lifecycle.json @@ -33,5 +33,6 @@ "_meta": { "description": "built-in ILM policy using the hot, warm, and cold phases with a retention of 180 days", "managed": true - } + }, + "deprecated": ${xpack.stack.template.deprecated} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/30-days@lifecycle.json b/x-pack/plugin/core/template-resources/src/main/resources/30-days@lifecycle.json index 6d5a12b39762d..5764b75299ced 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/30-days@lifecycle.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/30-days@lifecycle.json @@ -29,5 +29,6 @@ "_meta": { "description": "built-in ILM policy using the hot and warm phases with a retention of 30 days", "managed": true - } + }, + "deprecated": ${xpack.stack.template.deprecated} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/365-days@lifecycle.json b/x-pack/plugin/core/template-resources/src/main/resources/365-days@lifecycle.json index 3d2340245f117..4398b14387dec 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/365-days@lifecycle.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/365-days@lifecycle.json @@ -33,5 +33,6 @@ "_meta": { "description": "built-in ILM policy using the hot, warm, and cold phases with a retention of 365 days", "managed": true - } + }, + "deprecated": ${xpack.stack.template.deprecated} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/7-days@lifecycle.json b/x-pack/plugin/core/template-resources/src/main/resources/7-days@lifecycle.json index 2c5778e5af1db..1a1f74beac516 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/7-days@lifecycle.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/7-days@lifecycle.json @@ -29,5 +29,6 @@ "_meta": { "description": "built-in ILM policy using the hot and warm phases with a retention of 7 days", "managed": true - } + }, + "deprecated": ${xpack.stack.template.deprecated} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/90-days@lifecycle.json b/x-pack/plugin/core/template-resources/src/main/resources/90-days@lifecycle.json index cae4e7c83a064..e0d2487c8961a 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/90-days@lifecycle.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/90-days@lifecycle.json @@ -33,5 +33,6 @@ "_meta": { "description": "built-in ILM policy using the hot, warm, and cold phases with a retention of 90 days", "managed": true - } + }, + "deprecated": ${xpack.stack.template.deprecated} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/data-streams@mappings.json b/x-pack/plugin/core/template-resources/src/main/resources/data-streams@mappings.json index f87c0e79b7c45..96bbeca8f7ac8 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/data-streams@mappings.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/data-streams@mappings.json @@ -63,5 +63,6 @@ "description": "general mapping conventions for data streams", "managed": true }, - "version": ${xpack.stack.template.version} + "version": ${xpack.stack.template.version}, + "deprecated": ${xpack.stack.template.deprecated} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/ecs@mappings.json b/x-pack/plugin/core/template-resources/src/main/resources/ecs@mappings.json index fc29fc98dca96..f1d03531e4b6b 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/ecs@mappings.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/ecs@mappings.json @@ -190,5 +190,6 @@ "description": "dynamic mappings based on ECS, installed by x-pack", "managed": true }, - "version": ${xpack.stack.template.version} + "version": ${xpack.stack.template.version}, + "deprecated": ${xpack.stack.template.deprecated} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/kibana-reporting@template.json b/x-pack/plugin/core/template-resources/src/main/resources/kibana-reporting@template.json index a4388d671eb0d..b92942ff010d6 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/kibana-reporting@template.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/kibana-reporting@template.json @@ -173,5 +173,6 @@ "description": "default kibana reporting template installed by elasticsearch", "managed": true }, - "version": ${xpack.stack.template.version} + "version": ${xpack.stack.template.version}, + "deprecated": ${xpack.stack.template.deprecated} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/logs@default-pipeline.json b/x-pack/plugin/core/template-resources/src/main/resources/logs@default-pipeline.json index 518ff3cece752..d8dc9cca5ea7c 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/logs@default-pipeline.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/logs@default-pipeline.json @@ -20,5 +20,6 @@ "description": "default pipeline for the logs index template installed by x-pack", "managed": true }, - "version": ${xpack.stack.template.version} + "version": ${xpack.stack.template.version}, + "deprecated": ${xpack.stack.template.deprecated} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/logs@json-pipeline.json b/x-pack/plugin/core/template-resources/src/main/resources/logs@json-pipeline.json index cebeccd344324..e3b0f85642a46 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/logs@json-pipeline.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/logs@json-pipeline.json @@ -44,5 +44,6 @@ "description": "automatic parsing of JSON log messages", "managed": true }, - "version": ${xpack.stack.template.version} + "version": ${xpack.stack.template.version}, + "deprecated": ${xpack.stack.template.deprecated} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/logs@lifecycle.json b/x-pack/plugin/core/template-resources/src/main/resources/logs@lifecycle.json index 6bce19aaaab49..5b58c138d785f 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/logs@lifecycle.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/logs@lifecycle.json @@ -12,5 +12,6 @@ "_meta": { "description": "default policy for the logs index template installed by x-pack", "managed": true - } + }, + "deprecated": ${xpack.stack.template.deprecated} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/logs@mappings.json b/x-pack/plugin/core/template-resources/src/main/resources/logs@mappings.json index 7417d4809559d..82cbf7e478a27 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/logs@mappings.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/logs@mappings.json @@ -23,5 +23,6 @@ "description": "default mappings for the logs index template installed by x-pack", "managed": true }, - "version": ${xpack.stack.template.version} + "version": ${xpack.stack.template.version}, + "deprecated": ${xpack.stack.template.deprecated} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/logs@settings.json b/x-pack/plugin/core/template-resources/src/main/resources/logs@settings.json index cc61f195402fe..908b01027e833 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/logs@settings.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/logs@settings.json @@ -20,5 +20,6 @@ "description": "default settings for the logs index template installed by x-pack", "managed": true }, - "version": ${xpack.stack.template.version} + "version": ${xpack.stack.template.version}, + "deprecated": ${xpack.stack.template.deprecated} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/logs@template.json b/x-pack/plugin/core/template-resources/src/main/resources/logs@template.json index b41b2d0453c89..f9b945d75f4f8 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/logs@template.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/logs@template.json @@ -14,5 +14,6 @@ "description": "default logs template installed by x-pack", "managed": true }, - "version": ${xpack.stack.template.version} + "version": ${xpack.stack.template.version}, + "deprecated": ${xpack.stack.template.deprecated} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/metrics@lifecycle.json b/x-pack/plugin/core/template-resources/src/main/resources/metrics@lifecycle.json index 3c37e8db4a7da..daa07659e559e 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/metrics@lifecycle.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/metrics@lifecycle.json @@ -12,5 +12,6 @@ "_meta": { "description": "default policy for the metrics index template installed by x-pack", "managed": true - } + }, + "deprecated": ${xpack.stack.template.deprecated} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/metrics@mappings.json b/x-pack/plugin/core/template-resources/src/main/resources/metrics@mappings.json index 5741b441256f9..4e48f6b7adaed 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/metrics@mappings.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/metrics@mappings.json @@ -53,5 +53,6 @@ "description": "default mappings for the metrics index template installed by x-pack", "managed": true }, - "version": ${xpack.stack.template.version} + "version": ${xpack.stack.template.version}, + "deprecated": ${xpack.stack.template.deprecated} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/metrics@settings.json b/x-pack/plugin/core/template-resources/src/main/resources/metrics@settings.json index 1a13139bb18a4..3a0e6feeaede4 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/metrics@settings.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/metrics@settings.json @@ -16,5 +16,6 @@ "description": "default settings for the metrics index template installed by x-pack", "managed": true }, - "version": ${xpack.stack.template.version} + "version": ${xpack.stack.template.version}, + "deprecated": ${xpack.stack.template.deprecated} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/metrics@template.json b/x-pack/plugin/core/template-resources/src/main/resources/metrics@template.json index a596314bc9e8c..464df09ffe2ce 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/metrics@template.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/metrics@template.json @@ -12,5 +12,6 @@ "description": "default metrics template installed by x-pack", "managed": true }, - "version": ${xpack.stack.template.version} + "version": ${xpack.stack.template.version}, + "deprecated": ${xpack.stack.template.deprecated} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/metrics@tsdb-settings.json b/x-pack/plugin/core/template-resources/src/main/resources/metrics@tsdb-settings.json index cbcad39ef78d0..6a64ff9be5473 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/metrics@tsdb-settings.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/metrics@tsdb-settings.json @@ -15,5 +15,6 @@ "description": "default settings for the metrics index template installed by x-pack", "managed": true }, - "version": ${xpack.stack.template.version} + "version": ${xpack.stack.template.version}, + "deprecated": ${xpack.stack.template.deprecated} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/synthetics@lifecycle.json b/x-pack/plugin/core/template-resources/src/main/resources/synthetics@lifecycle.json index 1e4220725177d..aa2cf5489b45f 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/synthetics@lifecycle.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/synthetics@lifecycle.json @@ -12,5 +12,6 @@ "_meta": { "description": "default policy for the synthetics index template installed by x-pack", "managed": true - } + }, + "deprecated": ${xpack.stack.template.deprecated} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/synthetics@mappings.json b/x-pack/plugin/core/template-resources/src/main/resources/synthetics@mappings.json index 9e3e56e3261d0..81b85285450c7 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/synthetics@mappings.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/synthetics@mappings.json @@ -17,5 +17,6 @@ "description": "default mappings for the synthetics index template installed by x-pack", "managed": true }, - "version": ${xpack.stack.template.version} + "version": ${xpack.stack.template.version}, + "deprecated": ${xpack.stack.template.deprecated} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/synthetics@settings.json b/x-pack/plugin/core/template-resources/src/main/resources/synthetics@settings.json index 27ced96be36e3..04d68d083bf9f 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/synthetics@settings.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/synthetics@settings.json @@ -13,5 +13,6 @@ "description": "default settings for the synthetics index template installed by x-pack", "managed": true }, - "version": ${xpack.stack.template.version} + "version": ${xpack.stack.template.version}, + "deprecated": ${xpack.stack.template.deprecated} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/synthetics@template.json b/x-pack/plugin/core/template-resources/src/main/resources/synthetics@template.json index 6369bd5a82c15..344426541b8c5 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/synthetics@template.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/synthetics@template.json @@ -12,5 +12,6 @@ "description": "default synthetics template installed by x-pack", "managed": true }, - "version": ${xpack.stack.template.version} + "version": ${xpack.stack.template.version}, + "deprecated": ${xpack.stack.template.deprecated} } diff --git a/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/LegacyStackTemplateRegistry.java b/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/LegacyStackTemplateRegistry.java index 9fb33db74964a..f186d8e20f874 100644 --- a/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/LegacyStackTemplateRegistry.java +++ b/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/LegacyStackTemplateRegistry.java @@ -52,6 +52,8 @@ public class LegacyStackTemplateRegistry extends IndexTemplateRegistry { private final ClusterService clusterService; private volatile boolean stackTemplateEnabled; + private static final Map ADDITIONAL_TEMPLATE_VARIABLES = Map.of("xpack.stack.template.deprecated", "true"); + // General mappings conventions for any data that ends up in a data stream public static final String DATA_STREAMS_MAPPINGS_COMPONENT_TEMPLATE_NAME = "data-streams-mappings"; @@ -121,14 +123,14 @@ private void updateEnabledSetting(boolean newValue) { } private static final List LIFECYCLE_POLICY_CONFIGS = List.of( - new LifecyclePolicyConfig(LOGS_ILM_POLICY_NAME, "/logs@lifecycle.json"), - new LifecyclePolicyConfig(METRICS_ILM_POLICY_NAME, "/metrics@lifecycle.json"), - new LifecyclePolicyConfig(SYNTHETICS_ILM_POLICY_NAME, "/synthetics@lifecycle.json"), - new LifecyclePolicyConfig(ILM_7_DAYS_POLICY_NAME, "/7-days@lifecycle.json"), - new LifecyclePolicyConfig(ILM_30_DAYS_POLICY_NAME, "/30-days@lifecycle.json"), - new LifecyclePolicyConfig(ILM_90_DAYS_POLICY_NAME, "/90-days@lifecycle.json"), - new LifecyclePolicyConfig(ILM_180_DAYS_POLICY_NAME, "/180-days@lifecycle.json"), - new LifecyclePolicyConfig(ILM_365_DAYS_POLICY_NAME, "/365-days@lifecycle.json") + new LifecyclePolicyConfig(LOGS_ILM_POLICY_NAME, "/logs@lifecycle.json", ADDITIONAL_TEMPLATE_VARIABLES), + new LifecyclePolicyConfig(METRICS_ILM_POLICY_NAME, "/metrics@lifecycle.json", ADDITIONAL_TEMPLATE_VARIABLES), + new LifecyclePolicyConfig(SYNTHETICS_ILM_POLICY_NAME, "/synthetics@lifecycle.json", ADDITIONAL_TEMPLATE_VARIABLES), + new LifecyclePolicyConfig(ILM_7_DAYS_POLICY_NAME, "/7-days@lifecycle.json", ADDITIONAL_TEMPLATE_VARIABLES), + new LifecyclePolicyConfig(ILM_30_DAYS_POLICY_NAME, "/30-days@lifecycle.json", ADDITIONAL_TEMPLATE_VARIABLES), + new LifecyclePolicyConfig(ILM_90_DAYS_POLICY_NAME, "/90-days@lifecycle.json", ADDITIONAL_TEMPLATE_VARIABLES), + new LifecyclePolicyConfig(ILM_180_DAYS_POLICY_NAME, "/180-days@lifecycle.json", ADDITIONAL_TEMPLATE_VARIABLES), + new LifecyclePolicyConfig(ILM_365_DAYS_POLICY_NAME, "/365-days@lifecycle.json", ADDITIONAL_TEMPLATE_VARIABLES) ); @Override @@ -154,55 +156,64 @@ protected List getLifecyclePolicies() { DATA_STREAMS_MAPPINGS_COMPONENT_TEMPLATE_NAME, "/data-streams@mappings.json", REGISTRY_VERSION, - TEMPLATE_VERSION_VARIABLE + TEMPLATE_VERSION_VARIABLE, + ADDITIONAL_TEMPLATE_VARIABLES ), new IndexTemplateConfig( LOGS_MAPPINGS_COMPONENT_TEMPLATE_NAME, "/logs@mappings.json", REGISTRY_VERSION, - TEMPLATE_VERSION_VARIABLE + TEMPLATE_VERSION_VARIABLE, + ADDITIONAL_TEMPLATE_VARIABLES ), new IndexTemplateConfig( ECS_DYNAMIC_MAPPINGS_COMPONENT_TEMPLATE_NAME, "/ecs@mappings.json", REGISTRY_VERSION, - TEMPLATE_VERSION_VARIABLE + TEMPLATE_VERSION_VARIABLE, + ADDITIONAL_TEMPLATE_VARIABLES ), new IndexTemplateConfig( LOGS_SETTINGS_COMPONENT_TEMPLATE_NAME, "/logs@settings.json", REGISTRY_VERSION, - TEMPLATE_VERSION_VARIABLE + TEMPLATE_VERSION_VARIABLE, + ADDITIONAL_TEMPLATE_VARIABLES ), new IndexTemplateConfig( METRICS_MAPPINGS_COMPONENT_TEMPLATE_NAME, "/metrics@mappings.json", REGISTRY_VERSION, - TEMPLATE_VERSION_VARIABLE + TEMPLATE_VERSION_VARIABLE, + ADDITIONAL_TEMPLATE_VARIABLES ), new IndexTemplateConfig( METRICS_SETTINGS_COMPONENT_TEMPLATE_NAME, "/metrics@settings.json", REGISTRY_VERSION, - TEMPLATE_VERSION_VARIABLE + TEMPLATE_VERSION_VARIABLE, + ADDITIONAL_TEMPLATE_VARIABLES ), new IndexTemplateConfig( METRICS_TSDB_SETTINGS_COMPONENT_TEMPLATE_NAME, "/metrics@tsdb-settings.json", REGISTRY_VERSION, - TEMPLATE_VERSION_VARIABLE + TEMPLATE_VERSION_VARIABLE, + ADDITIONAL_TEMPLATE_VARIABLES ), new IndexTemplateConfig( SYNTHETICS_MAPPINGS_COMPONENT_TEMPLATE_NAME, "/synthetics@mappings.json", REGISTRY_VERSION, - TEMPLATE_VERSION_VARIABLE + TEMPLATE_VERSION_VARIABLE, + ADDITIONAL_TEMPLATE_VARIABLES ), new IndexTemplateConfig( SYNTHETICS_SETTINGS_COMPONENT_TEMPLATE_NAME, "/synthetics@settings.json", REGISTRY_VERSION, - TEMPLATE_VERSION_VARIABLE + TEMPLATE_VERSION_VARIABLE, + ADDITIONAL_TEMPLATE_VARIABLES ) )) { try { @@ -232,8 +243,22 @@ protected Map getComposableTemplateConfigs() { } private static final List INGEST_PIPELINE_CONFIGS = List.of( - new JsonIngestPipelineConfig("logs@json-message", "/logs@json-pipeline.json", REGISTRY_VERSION, TEMPLATE_VERSION_VARIABLE), - new JsonIngestPipelineConfig("logs-default-pipeline", "/logs@default-pipeline.json", REGISTRY_VERSION, TEMPLATE_VERSION_VARIABLE) + new JsonIngestPipelineConfig( + "logs@json-message", + "/logs@json-pipeline.json", + REGISTRY_VERSION, + TEMPLATE_VERSION_VARIABLE, + List.of(), + ADDITIONAL_TEMPLATE_VARIABLES + ), + new JsonIngestPipelineConfig( + "logs-default-pipeline", + "/logs@default-pipeline.json", + REGISTRY_VERSION, + TEMPLATE_VERSION_VARIABLE, + List.of(), + ADDITIONAL_TEMPLATE_VARIABLES + ) ); @Override diff --git a/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackTemplateRegistry.java b/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackTemplateRegistry.java index 3471d312d9df8..af5dce0ee927c 100644 --- a/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackTemplateRegistry.java +++ b/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackTemplateRegistry.java @@ -55,6 +55,8 @@ public class StackTemplateRegistry extends IndexTemplateRegistry { private final ClusterService clusterService; private volatile boolean stackTemplateEnabled; + private static final Map ADDITIONAL_TEMPLATE_VARIABLES = Map.of("xpack.stack.template.deprecated", "false"); + // General mappings conventions for any data that ends up in a data stream public static final String DATA_STREAMS_MAPPINGS_COMPONENT_TEMPLATE_NAME = "data-streams@mappings"; @@ -132,14 +134,14 @@ private void updateEnabledSetting(boolean newValue) { } private static final List LIFECYCLE_POLICY_CONFIGS = List.of( - new LifecyclePolicyConfig(LOGS_ILM_POLICY_NAME, "/logs@lifecycle.json"), - new LifecyclePolicyConfig(METRICS_ILM_POLICY_NAME, "/metrics@lifecycle.json"), - new LifecyclePolicyConfig(SYNTHETICS_ILM_POLICY_NAME, "/synthetics@lifecycle.json"), - new LifecyclePolicyConfig(ILM_7_DAYS_POLICY_NAME, "/7-days@lifecycle.json"), - new LifecyclePolicyConfig(ILM_30_DAYS_POLICY_NAME, "/30-days@lifecycle.json"), - new LifecyclePolicyConfig(ILM_90_DAYS_POLICY_NAME, "/90-days@lifecycle.json"), - new LifecyclePolicyConfig(ILM_180_DAYS_POLICY_NAME, "/180-days@lifecycle.json"), - new LifecyclePolicyConfig(ILM_365_DAYS_POLICY_NAME, "/365-days@lifecycle.json") + new LifecyclePolicyConfig(LOGS_ILM_POLICY_NAME, "/logs@lifecycle.json", ADDITIONAL_TEMPLATE_VARIABLES), + new LifecyclePolicyConfig(METRICS_ILM_POLICY_NAME, "/metrics@lifecycle.json", ADDITIONAL_TEMPLATE_VARIABLES), + new LifecyclePolicyConfig(SYNTHETICS_ILM_POLICY_NAME, "/synthetics@lifecycle.json", ADDITIONAL_TEMPLATE_VARIABLES), + new LifecyclePolicyConfig(ILM_7_DAYS_POLICY_NAME, "/7-days@lifecycle.json", ADDITIONAL_TEMPLATE_VARIABLES), + new LifecyclePolicyConfig(ILM_30_DAYS_POLICY_NAME, "/30-days@lifecycle.json", ADDITIONAL_TEMPLATE_VARIABLES), + new LifecyclePolicyConfig(ILM_90_DAYS_POLICY_NAME, "/90-days@lifecycle.json", ADDITIONAL_TEMPLATE_VARIABLES), + new LifecyclePolicyConfig(ILM_180_DAYS_POLICY_NAME, "/180-days@lifecycle.json", ADDITIONAL_TEMPLATE_VARIABLES), + new LifecyclePolicyConfig(ILM_365_DAYS_POLICY_NAME, "/365-days@lifecycle.json", ADDITIONAL_TEMPLATE_VARIABLES) ); @Override @@ -161,55 +163,64 @@ protected List getLifecyclePolicies() { DATA_STREAMS_MAPPINGS_COMPONENT_TEMPLATE_NAME, "/data-streams@mappings.json", REGISTRY_VERSION, - TEMPLATE_VERSION_VARIABLE + TEMPLATE_VERSION_VARIABLE, + ADDITIONAL_TEMPLATE_VARIABLES ), new IndexTemplateConfig( LOGS_MAPPINGS_COMPONENT_TEMPLATE_NAME, "/logs@mappings.json", REGISTRY_VERSION, - TEMPLATE_VERSION_VARIABLE + TEMPLATE_VERSION_VARIABLE, + ADDITIONAL_TEMPLATE_VARIABLES ), new IndexTemplateConfig( ECS_DYNAMIC_MAPPINGS_COMPONENT_TEMPLATE_NAME, "/ecs@mappings.json", REGISTRY_VERSION, - TEMPLATE_VERSION_VARIABLE + TEMPLATE_VERSION_VARIABLE, + ADDITIONAL_TEMPLATE_VARIABLES ), new IndexTemplateConfig( LOGS_SETTINGS_COMPONENT_TEMPLATE_NAME, "/logs@settings.json", REGISTRY_VERSION, - TEMPLATE_VERSION_VARIABLE + TEMPLATE_VERSION_VARIABLE, + ADDITIONAL_TEMPLATE_VARIABLES ), new IndexTemplateConfig( METRICS_MAPPINGS_COMPONENT_TEMPLATE_NAME, "/metrics@mappings.json", REGISTRY_VERSION, - TEMPLATE_VERSION_VARIABLE + TEMPLATE_VERSION_VARIABLE, + ADDITIONAL_TEMPLATE_VARIABLES ), new IndexTemplateConfig( METRICS_SETTINGS_COMPONENT_TEMPLATE_NAME, "/metrics@settings.json", REGISTRY_VERSION, - TEMPLATE_VERSION_VARIABLE + TEMPLATE_VERSION_VARIABLE, + ADDITIONAL_TEMPLATE_VARIABLES ), new IndexTemplateConfig( METRICS_TSDB_SETTINGS_COMPONENT_TEMPLATE_NAME, "/metrics@tsdb-settings.json", REGISTRY_VERSION, - TEMPLATE_VERSION_VARIABLE + TEMPLATE_VERSION_VARIABLE, + ADDITIONAL_TEMPLATE_VARIABLES ), new IndexTemplateConfig( SYNTHETICS_MAPPINGS_COMPONENT_TEMPLATE_NAME, "/synthetics@mappings.json", REGISTRY_VERSION, - TEMPLATE_VERSION_VARIABLE + TEMPLATE_VERSION_VARIABLE, + ADDITIONAL_TEMPLATE_VARIABLES ), new IndexTemplateConfig( SYNTHETICS_SETTINGS_COMPONENT_TEMPLATE_NAME, "/synthetics@settings.json", REGISTRY_VERSION, - TEMPLATE_VERSION_VARIABLE + TEMPLATE_VERSION_VARIABLE, + ADDITIONAL_TEMPLATE_VARIABLES ) )) { try { @@ -230,14 +241,33 @@ protected Map getComponentTemplateConfigs() { } private static final Map COMPOSABLE_INDEX_TEMPLATE_CONFIGS = parseComposableTemplates( - new IndexTemplateConfig(LOGS_INDEX_TEMPLATE_NAME, "/logs@template.json", REGISTRY_VERSION, TEMPLATE_VERSION_VARIABLE), - new IndexTemplateConfig(METRICS_INDEX_TEMPLATE_NAME, "/metrics@template.json", REGISTRY_VERSION, TEMPLATE_VERSION_VARIABLE), - new IndexTemplateConfig(SYNTHETICS_INDEX_TEMPLATE_NAME, "/synthetics@template.json", REGISTRY_VERSION, TEMPLATE_VERSION_VARIABLE), + new IndexTemplateConfig( + LOGS_INDEX_TEMPLATE_NAME, + "/logs@template.json", + REGISTRY_VERSION, + TEMPLATE_VERSION_VARIABLE, + ADDITIONAL_TEMPLATE_VARIABLES + ), + new IndexTemplateConfig( + METRICS_INDEX_TEMPLATE_NAME, + "/metrics@template.json", + REGISTRY_VERSION, + TEMPLATE_VERSION_VARIABLE, + ADDITIONAL_TEMPLATE_VARIABLES + ), + new IndexTemplateConfig( + SYNTHETICS_INDEX_TEMPLATE_NAME, + "/synthetics@template.json", + REGISTRY_VERSION, + TEMPLATE_VERSION_VARIABLE, + ADDITIONAL_TEMPLATE_VARIABLES + ), new IndexTemplateConfig( KIBANA_REPORTING_INDEX_TEMPLATE_NAME, "/kibana-reporting@template.json", REGISTRY_VERSION, - TEMPLATE_VERSION_VARIABLE + TEMPLATE_VERSION_VARIABLE, + ADDITIONAL_TEMPLATE_VARIABLES ) ); @@ -251,8 +281,22 @@ protected Map getComposableTemplateConfigs() { } private static final List INGEST_PIPELINE_CONFIGS = List.of( - new JsonIngestPipelineConfig("logs@json-pipeline", "/logs@json-pipeline.json", REGISTRY_VERSION, TEMPLATE_VERSION_VARIABLE), - new JsonIngestPipelineConfig("logs@default-pipeline", "/logs@default-pipeline.json", REGISTRY_VERSION, TEMPLATE_VERSION_VARIABLE) + new JsonIngestPipelineConfig( + "logs@json-pipeline", + "/logs@json-pipeline.json", + REGISTRY_VERSION, + TEMPLATE_VERSION_VARIABLE, + List.of(), + ADDITIONAL_TEMPLATE_VARIABLES + ), + new JsonIngestPipelineConfig( + "logs@default-pipeline", + "/logs@default-pipeline.json", + REGISTRY_VERSION, + TEMPLATE_VERSION_VARIABLE, + List.of(), + ADDITIONAL_TEMPLATE_VARIABLES + ) ); @Override diff --git a/x-pack/plugin/stack/src/test/java/org/elasticsearch/xpack/stack/LegacyStackTemplateRegistryTests.java b/x-pack/plugin/stack/src/test/java/org/elasticsearch/xpack/stack/LegacyStackTemplateRegistryTests.java new file mode 100644 index 0000000000000..1cb9e909310e1 --- /dev/null +++ b/x-pack/plugin/stack/src/test/java/org/elasticsearch/xpack/stack/LegacyStackTemplateRegistryTests.java @@ -0,0 +1,63 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.stack; + +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.metadata.ComponentTemplate; +import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.ingest.PipelineConfiguration; +import org.elasticsearch.test.ClusterServiceUtils; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.client.NoOpClient; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.ilm.LifecyclePolicy; +import org.junit.After; +import org.junit.Before; + +public class LegacyStackTemplateRegistryTests extends ESTestCase { + private LegacyStackTemplateRegistry registry; + private ThreadPool threadPool; + + @Before + public void createRegistryAndClient() { + threadPool = new TestThreadPool(this.getClass().getName()); + Client client = new NoOpClient(threadPool); + ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool); + registry = new LegacyStackTemplateRegistry(Settings.EMPTY, clusterService, threadPool, client, NamedXContentRegistry.EMPTY); + } + + @After + @Override + public void tearDown() throws Exception { + super.tearDown(); + threadPool.shutdownNow(); + } + + public void testThatTemplatesAreDeprecated() { + for (ComposableIndexTemplate it : registry.getComposableTemplateConfigs().values()) { + assertTrue(it.deprecated()); + } + for (LifecyclePolicy ilm : registry.getLifecyclePolicies()) { + assertTrue(ilm.isDeprecated()); + } + for (ComponentTemplate ct : registry.getComponentTemplateConfigs().values()) { + assertTrue(ct.deprecated()); + } + registry.getIngestPipelines() + .stream() + .map(ipc -> new PipelineConfiguration(ipc.getId(), ipc.loadConfig(), XContentType.JSON)) + .map(PipelineConfiguration::getConfigAsMap) + .forEach(p -> assertTrue((Boolean) p.get("deprecated"))); + } + +} diff --git a/x-pack/plugin/stack/src/test/java/org/elasticsearch/xpack/stack/StackTemplateRegistryTests.java b/x-pack/plugin/stack/src/test/java/org/elasticsearch/xpack/stack/StackTemplateRegistryTests.java index 8e0cbc3f82f35..b3ddb6b132d53 100644 --- a/x-pack/plugin/stack/src/test/java/org/elasticsearch/xpack/stack/StackTemplateRegistryTests.java +++ b/x-pack/plugin/stack/src/test/java/org/elasticsearch/xpack/stack/StackTemplateRegistryTests.java @@ -507,6 +507,23 @@ public void testThatNothingIsInstalledWhenAllNodesAreNotUpdated() { registry.clusterChanged(event); } + public void testThatTemplatesAreNotDeprecated() { + for (ComposableIndexTemplate it : registry.getComposableTemplateConfigs().values()) { + assertFalse(it.deprecated()); + } + for (LifecyclePolicy ilm : registry.getLifecyclePolicies()) { + assertFalse(ilm.isDeprecated()); + } + for (ComponentTemplate ct : registry.getComponentTemplateConfigs().values()) { + assertFalse(ct.deprecated()); + } + registry.getIngestPipelines() + .stream() + .map(ipc -> new PipelineConfiguration(ipc.getId(), ipc.loadConfig(), XContentType.JSON)) + .map(PipelineConfiguration::getConfigAsMap) + .forEach(p -> assertFalse((Boolean) p.get("deprecated"))); + } + // ------------- /** From e786cfa7061b427cf6185ad907069838dd679574 Mon Sep 17 00:00:00 2001 From: Felix Barnsteiner Date: Wed, 8 Nov 2023 08:35:14 +0100 Subject: [PATCH 30/30] Prefer using builder for ComposableIndexTemplate (#101760) There's a recurring pattern where we make a copy of a `ComposableIndexTemplate` while changing a single property. We tend to use the constructor and manually supply all properties. This isn't very robust when adding properties to the class as you can easily overlook supplying the new property to the copied instance. It also leads to a proliferation of overloaded constructors as changing all callers to use the new constructor is sometimes not feasible as it would create a very large change set. Also, we'd mostly be supplying `null` values for newly added properties in existing test cases anyway. Often, tests are just setting one or two properties and the rest are null values, which doesn't make the tests very readable. The `ComposableIndexTemplate` class already has a `Builder` class. This PR deprecates all constructors of `ComposableIndexTemplate` in favor of using `ComposableIndexTemplate.Bulider`. Changing all constructor call to use the builder everywhere is not feasible as there are too many usages. But the PR uses the builder for all instances where we make a copy with a minor modification. We can think about doing the same for other classes but this is the one where it seems most important based on the large number of constructor overloads and the bugs or almost committed bugs related to this class. Also, conveniently, there's already a builder class for it. --- .../DataStreamGetWriteIndexTests.java | 3 +- ...etadataDataStreamRolloverServiceTests.java | 12 +- .../metadata/ComposableIndexTemplate.java | 76 +++++---- .../MetadataIndexTemplateService.java | 40 ++--- .../indices/create/AutoCreateActionTests.java | 8 +- .../MetadataRolloverServiceTests.java | 25 ++- .../SimulateIndexTemplateRequestTests.java | 4 +- .../post/SimulateTemplateRequestTests.java | 4 +- ...utComposableIndexTemplateRequestTests.java | 12 +- .../action/support/AutoCreateIndexTests.java | 12 +- .../ComposableIndexTemplateTests.java | 160 ++++-------------- .../MetadataCreateDataStreamServiceTests.java | 14 +- .../MetadataIndexTemplateServiceTests.java | 84 +++------ .../DataTierAllocationDeciderIT.java | 4 +- .../core/ml/utils/MlIndexAndAliasTests.java | 2 +- ...adataMigrateToDataTiersRoutingService.java | 2 +- ...MigrateToDataTiersRoutingServiceTests.java | 7 +- .../LegacyStackTemplateRegistryTests.java | 2 +- .../stack/StackTemplateRegistryTests.java | 2 +- .../notifications/TransformAuditor.java | 3 +- 20 files changed, 185 insertions(+), 291 deletions(-) diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamGetWriteIndexTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamGetWriteIndexTests.java index e55ff022693b3..07a80683b24fa 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamGetWriteIndexTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamGetWriteIndexTests.java @@ -290,7 +290,8 @@ public void cleanup() { } private ClusterState createInitialState() { - ComposableIndexTemplate template = new ComposableIndexTemplate.Builder().indexPatterns(List.of("logs-*")) + ComposableIndexTemplate template = ComposableIndexTemplate.builder() + .indexPatterns(List.of("logs-*")) .template( new Template(Settings.builder().put("index.mode", "time_series").put("index.routing_path", "uid").build(), null, null) ) diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataDataStreamRolloverServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataDataStreamRolloverServiceTests.java index 0391f91a35fb3..4c333c3f0ab8d 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataDataStreamRolloverServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataDataStreamRolloverServiceTests.java @@ -71,7 +71,8 @@ public void testRolloverClusterStateForDataStream() throws Exception { false, IndexMode.TIME_SERIES ); - ComposableIndexTemplate template = new ComposableIndexTemplate.Builder().indexPatterns(List.of(dataStream.getName() + "*")) + ComposableIndexTemplate template = ComposableIndexTemplate.builder() + .indexPatterns(List.of(dataStream.getName() + "*")) .template( new Template(Settings.builder().put("index.mode", "time_series").put("index.routing_path", "uid").build(), null, null) ) @@ -176,7 +177,8 @@ public void testRolloverAndMigrateDataStream() throws Exception { false, dsIndexMode ); - ComposableIndexTemplate template = new ComposableIndexTemplate.Builder().indexPatterns(List.of(dataStream.getName() + "*")) + ComposableIndexTemplate template = ComposableIndexTemplate.builder() + .indexPatterns(List.of(dataStream.getName() + "*")) .template( new Template(Settings.builder().put("index.mode", "time_series").put("index.routing_path", "uid").build(), null, null) ) @@ -262,7 +264,8 @@ public void testChangingIndexModeFromTimeSeriesToSomethingElseNoEffectOnExisting false, IndexMode.TIME_SERIES ); - ComposableIndexTemplate template = new ComposableIndexTemplate.Builder().indexPatterns(List.of(dataStream.getName() + "*")) + ComposableIndexTemplate template = ComposableIndexTemplate.builder() + .indexPatterns(List.of(dataStream.getName() + "*")) .template( new Template(Settings.builder().put("index.mode", "time_series").put("index.routing_path", "uid").build(), null, null) ) @@ -477,7 +480,8 @@ private static ClusterState createClusterState(String dataStreamName, int number false, null ); - ComposableIndexTemplate template = new ComposableIndexTemplate.Builder().indexPatterns(List.of(dataStream.getName() + "*")) + ComposableIndexTemplate template = ComposableIndexTemplate.builder() + .indexPatterns(List.of(dataStream.getName() + "*")) .template( new Template(Settings.builder().put("index.mode", "time_series").put("index.routing_path", "uid").build(), null, null) ) diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java b/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java index 47ab1d099c037..faa3010adbf72 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java @@ -107,6 +107,14 @@ public static ComposableIndexTemplate parse(XContentParser parser) throws IOExce return PARSER.parse(parser, null); } + public static Builder builder() { + return new Builder(); + } + + /** + * @deprecated use {@link Builder} instead + */ + @Deprecated(forRemoval = true) public ComposableIndexTemplate( List indexPatterns, @Nullable Template template, @@ -115,9 +123,13 @@ public ComposableIndexTemplate( @Nullable Long version, @Nullable Map metadata ) { - this(indexPatterns, template, componentTemplates, priority, version, metadata, null, null, null); + this(indexPatterns, template, componentTemplates, priority, version, metadata, null, null, null, null); } + /** + * @deprecated use {@link Builder} instead + */ + @Deprecated(forRemoval = true) public ComposableIndexTemplate( List indexPatterns, @Nullable Template template, @@ -127,9 +139,13 @@ public ComposableIndexTemplate( @Nullable Map metadata, @Nullable DataStreamTemplate dataStreamTemplate ) { - this(indexPatterns, template, componentTemplates, priority, version, metadata, dataStreamTemplate, null, null); + this(indexPatterns, template, componentTemplates, priority, version, metadata, dataStreamTemplate, null, null, null); } + /** + * @deprecated use {@link Builder} instead + */ + @Deprecated(forRemoval = true) public ComposableIndexTemplate( List indexPatterns, @Nullable Template template, @@ -140,34 +156,13 @@ public ComposableIndexTemplate( @Nullable DataStreamTemplate dataStreamTemplate, @Nullable Boolean allowAutoCreate ) { - this(indexPatterns, template, componentTemplates, priority, version, metadata, dataStreamTemplate, allowAutoCreate, null); - } - - ComposableIndexTemplate( - List indexPatterns, - @Nullable Template template, - @Nullable List componentTemplates, - @Nullable Long priority, - @Nullable Long version, - @Nullable Map metadata, - @Nullable DataStreamTemplate dataStreamTemplate, - @Nullable Boolean allowAutoCreate, - @Nullable List ignoreMissingComponentTemplates - ) { - this( - indexPatterns, - template, - componentTemplates, - priority, - version, - metadata, - dataStreamTemplate, - allowAutoCreate, - ignoreMissingComponentTemplates, - null - ); + this(indexPatterns, template, componentTemplates, priority, version, metadata, dataStreamTemplate, allowAutoCreate, null, null); } + /** + * @deprecated use {@link Builder} instead + */ + @Deprecated(forRemoval = true) public ComposableIndexTemplate( List indexPatterns, @Nullable Template template, @@ -287,10 +282,6 @@ public List getIgnoreMissingComponentTemplates() { return ignoreMissingComponentTemplates; } - public Boolean deprecated() { - return deprecated; - } - public boolean isDeprecated() { return Boolean.TRUE.equals(deprecated); } @@ -412,6 +403,10 @@ static boolean componentTemplatesEquals(List c1, List c2) { return false; } + public Builder toBuilder() { + return new Builder(this); + } + @Override public String toString() { return Strings.toString(this); @@ -535,8 +530,25 @@ public static class Builder { private List ignoreMissingComponentTemplates; private Boolean deprecated; + /** + * @deprecated use {@link ComposableIndexTemplate#builder()} + */ + @Deprecated(forRemoval = true) public Builder() {} + private Builder(ComposableIndexTemplate template) { + this.indexPatterns = template.indexPatterns; + this.template = template.template; + this.componentTemplates = template.componentTemplates; + this.priority = template.priority; + this.version = template.version; + this.metadata = template.metadata; + this.dataStreamTemplate = template.dataStreamTemplate; + this.allowAutoCreate = template.allowAutoCreate; + this.ignoreMissingComponentTemplates = template.ignoreMissingComponentTemplates; + this.deprecated = template.deprecated; + } + public Builder indexPatterns(List indexPatterns) { this.indexPatterns = indexPatterns; return this; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java index e0dc1728eab6a..0c78d497d1194 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java @@ -615,18 +615,7 @@ public ClusterState addIndexTemplateV2( CompressedXContent mappings = innerTemplate.mappings(); CompressedXContent wrappedMappings = wrapMappingsIfNecessary(mappings, xContentRegistry); final Template finalTemplate = new Template(finalSettings, wrappedMappings, innerTemplate.aliases(), innerTemplate.lifecycle()); - finalIndexTemplate = new ComposableIndexTemplate( - template.indexPatterns(), - finalTemplate, - template.composedOf(), - template.priority(), - template.version(), - template.metadata(), - template.getDataStreamTemplate(), - template.getAllowAutoCreate(), - template.getIgnoreMissingComponentTemplates(), - template.deprecated() - ); + finalIndexTemplate = template.toBuilder().template(finalTemplate).build(); } if (finalIndexTemplate.equals(existing)) { @@ -713,23 +702,16 @@ private void validateIndexTemplateV2(String name, ComposableIndexTemplate indexT // Then apply settings resolved from templates: finalSettings.put(finalTemplate.map(Template::settings).orElse(Settings.EMPTY)); - var templateToValidate = new ComposableIndexTemplate( - indexTemplate.indexPatterns(), - new Template( - finalSettings.build(), - finalTemplate.map(Template::mappings).orElse(null), - finalTemplate.map(Template::aliases).orElse(null), - finalTemplate.map(Template::lifecycle).orElse(null) - ), - indexTemplate.composedOf(), - indexTemplate.priority(), - indexTemplate.version(), - indexTemplate.metadata(), - indexTemplate.getDataStreamTemplate(), - indexTemplate.getAllowAutoCreate(), - indexTemplate.getIgnoreMissingComponentTemplates(), - indexTemplate.deprecated() - ); + var templateToValidate = indexTemplate.toBuilder() + .template( + new Template( + finalSettings.build(), + finalTemplate.map(Template::mappings).orElse(null), + finalTemplate.map(Template::aliases).orElse(null), + finalTemplate.map(Template::lifecycle).orElse(null) + ) + ) + .build(); validate(name, templateToValidate); validateDataStreamsStillReferenced(currentState, name, templateToValidate); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/create/AutoCreateActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/create/AutoCreateActionTests.java index f09f565582f4a..9c390cefdcda0 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/create/AutoCreateActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/create/AutoCreateActionTests.java @@ -25,17 +25,19 @@ public void testResolveTemplates() { { Metadata.Builder mdBuilder = new Metadata.Builder(); DataStreamTemplate dataStreamTemplate = new DataStreamTemplate(); - mdBuilder.put("1", new ComposableIndexTemplate.Builder().indexPatterns(List.of("legacy-logs-*")).priority(10L).build()); + mdBuilder.put("1", ComposableIndexTemplate.builder().indexPatterns(List.of("legacy-logs-*")).priority(10L).build()); mdBuilder.put( "2", - new ComposableIndexTemplate.Builder().indexPatterns(List.of("logs-*")) + ComposableIndexTemplate.builder() + .indexPatterns(List.of("logs-*")) .priority(20L) .dataStreamTemplate(dataStreamTemplate) .build() ); mdBuilder.put( "3", - new ComposableIndexTemplate.Builder().indexPatterns(List.of("logs-*")) + ComposableIndexTemplate.builder() + .indexPatterns(List.of("logs-*")) .priority(30L) .dataStreamTemplate(dataStreamTemplate) .build() diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java index 8b5e6182060b4..83bdc68d0b9c0 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java @@ -348,7 +348,8 @@ public void testRejectDuplicateAliasV2() { Map aliases = new HashMap<>(); aliases.put("foo-write", AliasMetadata.builder("foo-write").build()); aliases.put("bar-write", AliasMetadata.builder("bar-write").writeIndex(randomBoolean()).build()); - final ComposableIndexTemplate template = new ComposableIndexTemplate.Builder().indexPatterns(Arrays.asList("foo-*", "bar-*")) + final ComposableIndexTemplate template = ComposableIndexTemplate.builder() + .indexPatterns(Arrays.asList("foo-*", "bar-*")) .template(new Template(null, null, aliases)) .build(); @@ -370,7 +371,8 @@ public void testRejectDuplicateAliasV2UsingComponentTemplates() { aliases.put("foo-write", AliasMetadata.builder("foo-write").build()); aliases.put("bar-write", AliasMetadata.builder("bar-write").writeIndex(randomBoolean()).build()); final ComponentTemplate ct = new ComponentTemplate(new Template(null, null, aliases), null, null); - final ComposableIndexTemplate template = new ComposableIndexTemplate.Builder().indexPatterns(Arrays.asList("foo-*", "bar-*")) + final ComposableIndexTemplate template = ComposableIndexTemplate.builder() + .indexPatterns(Arrays.asList("foo-*", "bar-*")) .componentTemplates(Collections.singletonList("ct")) .build(); @@ -396,9 +398,10 @@ public void testRolloverDoesntRejectOperationIfValidComposableTemplateOverridesL .build(); // v2 template overrides the v1 template and does not define the rollover aliases - final ComposableIndexTemplate composableTemplate = new ComposableIndexTemplate.Builder().indexPatterns( - Arrays.asList("foo-*", "bar-*") - ).template(new Template(null, null, null)).build(); + final ComposableIndexTemplate composableTemplate = ComposableIndexTemplate.builder() + .indexPatterns(Arrays.asList("foo-*", "bar-*")) + .template(new Template(null, null, null)) + .build(); final Metadata metadata = Metadata.builder() .put(createMetadata(randomAlphaOfLengthBetween(5, 7)), false) @@ -441,7 +444,8 @@ public void testHiddenAffectsResolvedV2Templates() { Map aliases = new HashMap<>(); aliases.put("foo-write", AliasMetadata.builder("foo-write").build()); aliases.put("bar-write", AliasMetadata.builder("bar-write").writeIndex(randomBoolean()).build()); - final ComposableIndexTemplate template = new ComposableIndexTemplate.Builder().indexPatterns(Collections.singletonList("*")) + final ComposableIndexTemplate template = ComposableIndexTemplate.builder() + .indexPatterns(Collections.singletonList("*")) .template(new Template(null, null, aliases)) .build(); @@ -472,7 +476,8 @@ public void testHiddenAffectsResolvedV2ComponentTemplates() { aliases.put("foo-write", AliasMetadata.builder("foo-write").build()); aliases.put("bar-write", AliasMetadata.builder("bar-write").writeIndex(randomBoolean()).build()); final ComponentTemplate ct = new ComponentTemplate(new Template(null, null, aliases), null, null); - final ComposableIndexTemplate template = new ComposableIndexTemplate.Builder().indexPatterns(Collections.singletonList("*")) + final ComposableIndexTemplate template = ComposableIndexTemplate.builder() + .indexPatterns(Collections.singletonList("*")) .componentTemplates(Collections.singletonList("ct")) .build(); @@ -575,7 +580,8 @@ public void testRolloverClusterStateForDataStream() throws Exception { final DataStream dataStream = DataStreamTestHelper.randomInstance() // ensure no replicate data stream .promoteDataStream(); - ComposableIndexTemplate template = new ComposableIndexTemplate.Builder().indexPatterns(List.of(dataStream.getName() + "*")) + ComposableIndexTemplate template = ComposableIndexTemplate.builder() + .indexPatterns(List.of(dataStream.getName() + "*")) .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) .build(); Metadata.Builder builder = Metadata.builder(); @@ -651,7 +657,8 @@ public void testValidation() throws Exception { rolloverTarget = dataStream.getName(); sourceIndexName = dataStream.getIndices().get(dataStream.getIndices().size() - 1).getName(); defaultRolloverIndexName = DataStream.getDefaultBackingIndexName(dataStream.getName(), dataStream.getGeneration() + 1); - ComposableIndexTemplate template = new ComposableIndexTemplate.Builder().indexPatterns(List.of(dataStream.getName() + "*")) + ComposableIndexTemplate template = ComposableIndexTemplate.builder() + .indexPatterns(List.of(dataStream.getName() + "*")) .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) .build(); builder.put("template", template); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateRequestTests.java index 452ef77d9d963..ee1c423c74a4d 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateRequestTests.java @@ -52,9 +52,7 @@ public void testIndexNameCannotBeNullOrEmpty() { public void testAddingGlobalTemplateWithHiddenIndexSettingIsIllegal() { Template template = new Template(Settings.builder().put(IndexMetadata.SETTING_INDEX_HIDDEN, true).build(), null, null); - ComposableIndexTemplate globalTemplate = new ComposableIndexTemplate.Builder().indexPatterns(List.of("*")) - .template(template) - .build(); + ComposableIndexTemplate globalTemplate = ComposableIndexTemplate.builder().indexPatterns(List.of("*")).template(template).build(); PutComposableIndexTemplateAction.Request request = new PutComposableIndexTemplateAction.Request("test"); request.indexTemplate(globalTemplate); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/template/post/SimulateTemplateRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/template/post/SimulateTemplateRequestTests.java index cfcdb61813b99..05cfe6ef7068c 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/template/post/SimulateTemplateRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/template/post/SimulateTemplateRequestTests.java @@ -55,9 +55,7 @@ public void testIndexNameCannotBeNullOrEmpty() { public void testAddingGlobalTemplateWithHiddenIndexSettingIsIllegal() { Template template = new Template(Settings.builder().put(IndexMetadata.SETTING_INDEX_HIDDEN, true).build(), null, null); - ComposableIndexTemplate globalTemplate = new ComposableIndexTemplate.Builder().indexPatterns(List.of("*")) - .template(template) - .build(); + ComposableIndexTemplate globalTemplate = ComposableIndexTemplate.builder().indexPatterns(List.of("*")).template(template).build(); PutComposableIndexTemplateAction.Request request = new PutComposableIndexTemplateAction.Request("test"); request.indexTemplate(globalTemplate); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/template/put/PutComposableIndexTemplateRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/template/put/PutComposableIndexTemplateRequestTests.java index 31855a5c7bf67..cd5c1c477a108 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/template/put/PutComposableIndexTemplateRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/template/put/PutComposableIndexTemplateRequestTests.java @@ -46,9 +46,7 @@ protected PutComposableIndexTemplateAction.Request mutateInstance(PutComposableI public void testPutGlobalTemplatesCannotHaveHiddenIndexSetting() { Template template = new Template(Settings.builder().put(IndexMetadata.SETTING_INDEX_HIDDEN, true).build(), null, null); - ComposableIndexTemplate globalTemplate = new ComposableIndexTemplate.Builder().indexPatterns(List.of("*")) - .template(template) - .build(); + ComposableIndexTemplate globalTemplate = ComposableIndexTemplate.builder().indexPatterns(List.of("*")).template(template).build(); PutComposableIndexTemplateAction.Request request = new PutComposableIndexTemplateAction.Request("test"); request.indexTemplate(globalTemplate); @@ -74,7 +72,7 @@ public void testPutIndexTemplateV2RequestMustContainTemplate() { public void testValidationOfPriority() { PutComposableIndexTemplateAction.Request req = new PutComposableIndexTemplateAction.Request("test"); - req.indexTemplate(new ComposableIndexTemplate.Builder().indexPatterns(Arrays.asList("foo", "bar")).priority(-5L).build()); + req.indexTemplate(ComposableIndexTemplate.builder().indexPatterns(Arrays.asList("foo", "bar")).priority(-5L).build()); ActionRequestValidationException validationException = req.validate(); assertThat(validationException, is(notNullValue())); List validationErrors = validationException.validationErrors(); @@ -85,13 +83,11 @@ public void testValidationOfPriority() { public void testValidateNoTemplate() { PutComposableIndexTemplateAction.Request req = new PutComposableIndexTemplateAction.Request("test"); - req.indexTemplate(new ComposableIndexTemplate.Builder().indexPatterns(Collections.singletonList("*")).build()); + req.indexTemplate(ComposableIndexTemplate.builder().indexPatterns(Collections.singletonList("*")).build()); assertNull(req.validate()); req.indexTemplate( - new ComposableIndexTemplate.Builder().indexPatterns(Collections.singletonList("*")) - .template(new Template(null, null, null)) - .build() + ComposableIndexTemplate.builder().indexPatterns(Collections.singletonList("*")).template(new Template(null, null, null)).build() ); assertNull(req.validate()); } diff --git a/server/src/test/java/org/elasticsearch/action/support/AutoCreateIndexTests.java b/server/src/test/java/org/elasticsearch/action/support/AutoCreateIndexTests.java index afe8e10516a4e..6e68276721efb 100644 --- a/server/src/test/java/org/elasticsearch/action/support/AutoCreateIndexTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/AutoCreateIndexTests.java @@ -232,7 +232,8 @@ public void testUpdate() { */ public void testNullAllowAutoCreateInTemplateDoesNotOverrideMatchingAutoCreateIndexSetting() { String randomIndex = randomAlphaOfLengthBetween(2, 10); - final ComposableIndexTemplate template = new ComposableIndexTemplate.Builder().indexPatterns(List.of(randomIndex.charAt(0) + "*")) + final ComposableIndexTemplate template = ComposableIndexTemplate.builder() + .indexPatterns(List.of(randomIndex.charAt(0) + "*")) .componentTemplates(List.of()) .metadata(Map.of()) .build(); @@ -252,7 +253,8 @@ public void testNullAllowAutoCreateInTemplateDoesNotOverrideMatchingAutoCreateIn */ public void testCanHandleNullAutoCreateSettingInTemplate() { String randomIndex = randomAlphaOfLengthBetween(2, 10); - final ComposableIndexTemplate template = new ComposableIndexTemplate.Builder().indexPatterns(List.of(randomIndex.charAt(0) + "*")) + final ComposableIndexTemplate template = ComposableIndexTemplate.builder() + .indexPatterns(List.of(randomIndex.charAt(0) + "*")) .componentTemplates(List.of()) .metadata(Map.of()) .build(); @@ -275,7 +277,8 @@ public void testCanHandleNullAutoCreateSettingInTemplate() { */ public void testDisabledAutoCreateTemplateSettingDoesNotOverride() { String randomIndex = randomAlphaOfLengthBetween(2, 10); - final ComposableIndexTemplate template = new ComposableIndexTemplate.Builder().indexPatterns(List.of(randomIndex.charAt(0) + "*")) + final ComposableIndexTemplate template = ComposableIndexTemplate.builder() + .indexPatterns(List.of(randomIndex.charAt(0) + "*")) .componentTemplates(List.of()) .metadata(Map.of()) .allowAutoCreate(false) @@ -299,7 +302,8 @@ public void testDisabledAutoCreateTemplateSettingDoesNotOverride() { */ public void testEnabledAutoCreateTemplateSettingDoesOverride() { String randomIndex = randomAlphaOfLengthBetween(2, 10); - final ComposableIndexTemplate template = new ComposableIndexTemplate.Builder().indexPatterns(List.of(randomIndex.charAt(0) + "*")) + final ComposableIndexTemplate template = ComposableIndexTemplate.builder() + .indexPatterns(List.of(randomIndex.charAt(0) + "*")) .componentTemplates(List.of()) .metadata(Map.of()) .allowAutoCreate(true) diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplateTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplateTests.java index f617692710ebc..ac969eb7c9a10 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplateTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplateTests.java @@ -84,18 +84,18 @@ public static ComposableIndexTemplate randomInstance() { List indexPatterns = randomList(1, 4, () -> randomAlphaOfLength(4)); List ignoreMissingComponentTemplates = randomList(0, 4, () -> randomAlphaOfLength(4)); - return new ComposableIndexTemplate( - indexPatterns, - template, - randomBoolean() ? null : randomList(0, 10, () -> randomAlphaOfLength(5)), - randomBoolean() ? null : randomNonNegativeLong(), - randomBoolean() ? null : randomNonNegativeLong(), - meta, - dataStreamTemplate, - randomOptionalBoolean(), - ignoreMissingComponentTemplates, - randomOptionalBoolean() - ); + return ComposableIndexTemplate.builder() + .indexPatterns(indexPatterns) + .template(template) + .componentTemplates(randomBoolean() ? null : randomList(0, 10, () -> randomAlphaOfLength(5))) + .priority(randomBoolean() ? null : randomNonNegativeLong()) + .version(randomBoolean() ? null : randomNonNegativeLong()) + .metadata(meta) + .dataStreamTemplate(dataStreamTemplate) + .allowAutoCreate(randomOptionalBoolean()) + .ignoreMissingComponentTemplates(ignoreMissingComponentTemplates) + .deprecated(randomOptionalBoolean()) + .build(); } private static Map randomAliases() { @@ -165,130 +165,39 @@ public static ComposableIndexTemplate mutateTemplate(ComposableIndexTemplate ori orig.indexPatterns(), () -> randomList(1, 4, () -> randomAlphaOfLength(4)) ); - return new ComposableIndexTemplate( - newIndexPatterns, - orig.template(), - orig.composedOf(), - orig.priority(), - orig.version(), - orig.metadata(), - orig.getDataStreamTemplate(), - null, - orig.getIgnoreMissingComponentTemplates(), - orig.deprecated() - ); + return orig.toBuilder().indexPatterns(newIndexPatterns).build(); case 1: - return new ComposableIndexTemplate( - orig.indexPatterns(), - randomValueOtherThan( - orig.template(), - () -> new Template(randomSettings(), randomMappings(orig.getDataStreamTemplate()), randomAliases()) - ), - orig.composedOf(), - orig.priority(), - orig.version(), - orig.metadata(), - orig.getDataStreamTemplate(), - orig.getAllowAutoCreate(), - orig.getIgnoreMissingComponentTemplates(), - orig.deprecated() - ); + return orig.toBuilder() + .template( + randomValueOtherThan( + orig.template(), + () -> new Template(randomSettings(), randomMappings(orig.getDataStreamTemplate()), randomAliases()) + ) + ) + .build(); case 2: List newComposedOf = randomValueOtherThan(orig.composedOf(), () -> randomList(0, 10, () -> randomAlphaOfLength(5))); - return new ComposableIndexTemplate( - orig.indexPatterns(), - orig.template(), - newComposedOf, - orig.priority(), - orig.version(), - orig.metadata(), - orig.getDataStreamTemplate(), - orig.getAllowAutoCreate(), - orig.getIgnoreMissingComponentTemplates(), - orig.deprecated() - ); + return orig.toBuilder().componentTemplates(newComposedOf).build(); case 3: - return new ComposableIndexTemplate( - orig.indexPatterns(), - orig.template(), - orig.composedOf(), - randomValueOtherThan(orig.priority(), ESTestCase::randomNonNegativeLong), - orig.version(), - orig.metadata(), - orig.getDataStreamTemplate(), - orig.getAllowAutoCreate(), - orig.getIgnoreMissingComponentTemplates(), - orig.deprecated() - ); + return orig.toBuilder().priority(randomValueOtherThan(orig.priority(), ESTestCase::randomNonNegativeLong)).build(); case 4: - return new ComposableIndexTemplate( - orig.indexPatterns(), - orig.template(), - orig.composedOf(), - orig.priority(), - randomValueOtherThan(orig.version(), ESTestCase::randomNonNegativeLong), - orig.metadata(), - orig.getDataStreamTemplate(), - orig.getAllowAutoCreate(), - orig.getIgnoreMissingComponentTemplates(), - orig.deprecated() - ); + return orig.toBuilder().version(randomValueOtherThan(orig.version(), ESTestCase::randomNonNegativeLong)).build(); case 5: - return new ComposableIndexTemplate( - orig.indexPatterns(), - orig.template(), - orig.composedOf(), - orig.priority(), - orig.version(), - randomValueOtherThan(orig.metadata(), ComposableIndexTemplateTests::randomMeta), - orig.getDataStreamTemplate(), - orig.getAllowAutoCreate(), - orig.getIgnoreMissingComponentTemplates(), - orig.deprecated() - ); + return orig.toBuilder().metadata(randomValueOtherThan(orig.metadata(), ComposableIndexTemplateTests::randomMeta)).build(); case 6: - return new ComposableIndexTemplate( - orig.indexPatterns(), - orig.template(), - orig.composedOf(), - orig.priority(), - orig.version(), - orig.metadata(), - randomValueOtherThan(orig.getDataStreamTemplate(), ComposableIndexTemplateTests::randomDataStreamTemplate), - orig.getAllowAutoCreate(), - orig.getIgnoreMissingComponentTemplates(), - orig.deprecated() - ); + return orig.toBuilder() + .dataStreamTemplate( + randomValueOtherThan(orig.getDataStreamTemplate(), ComposableIndexTemplateTests::randomDataStreamTemplate) + ) + .build(); case 7: List ignoreMissingComponentTemplates = randomValueOtherThan( orig.getIgnoreMissingComponentTemplates(), () -> randomList(1, 4, () -> randomAlphaOfLength(4)) ); - return new ComposableIndexTemplate( - orig.indexPatterns(), - orig.template(), - orig.composedOf(), - orig.priority(), - orig.version(), - orig.metadata(), - orig.getDataStreamTemplate(), - orig.getAllowAutoCreate(), - ignoreMissingComponentTemplates, - orig.deprecated() - ); + return orig.toBuilder().ignoreMissingComponentTemplates(ignoreMissingComponentTemplates).build(); case 8: - return new ComposableIndexTemplate( - orig.indexPatterns(), - orig.template(), - orig.composedOf(), - orig.priority(), - orig.version(), - orig.metadata(), - orig.getDataStreamTemplate(), - orig.getAllowAutoCreate(), - orig.getIgnoreMissingComponentTemplates(), - orig.isDeprecated() ? randomFrom(false, null) : true - ); + return orig.toBuilder().deprecated(orig.isDeprecated() ? randomFrom(false, null) : true).build(); default: throw new IllegalStateException("illegal randomization branch"); } @@ -342,4 +251,9 @@ public void testXContentSerializationWithRollover() throws IOException { } } } + + public void testBuilderRoundtrip() { + ComposableIndexTemplate template = randomInstance(); + assertEquals(template, template.toBuilder().build()); + } } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamServiceTests.java index a8403ef7e9dcd..2b40e28416129 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamServiceTests.java @@ -49,7 +49,8 @@ public class MetadataCreateDataStreamServiceTests extends ESTestCase { public void testCreateDataStream() throws Exception { final MetadataCreateIndexService metadataCreateIndexService = getMetadataCreateIndexService(); final String dataStreamName = "my-data-stream"; - ComposableIndexTemplate template = new ComposableIndexTemplate.Builder().indexPatterns(List.of(dataStreamName + "*")) + ComposableIndexTemplate template = ComposableIndexTemplate.builder() + .indexPatterns(List.of(dataStreamName + "*")) .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) .build(); ClusterState cs = ClusterState.builder(new ClusterName("_name")) @@ -84,7 +85,8 @@ public void testCreateDataStreamWithAliasFromTemplate() throws Exception { final AliasMetadata am = randomAlias(null); aliases.put(am.alias(), am); } - ComposableIndexTemplate template = new ComposableIndexTemplate.Builder().indexPatterns(List.of(dataStreamName + "*")) + ComposableIndexTemplate template = ComposableIndexTemplate.builder() + .indexPatterns(List.of(dataStreamName + "*")) .dataStreamTemplate(new DataStreamTemplate()) .template(new Template(null, null, aliases)) .build(); @@ -156,7 +158,8 @@ public void testCreateDataStreamWithAliasFromComponentTemplate() throws Exceptio } allAliases.add(aliases); - ComposableIndexTemplate template = new ComposableIndexTemplate.Builder().indexPatterns(List.of(dataStreamName + "*")) + ComposableIndexTemplate template = ComposableIndexTemplate.builder() + .indexPatterns(List.of(dataStreamName + "*")) .dataStreamTemplate(new DataStreamTemplate()) .template(new Template(null, null, aliases)) .componentTemplates(ctNames) @@ -305,7 +308,7 @@ public void testCreateDataStreamNoTemplate() throws Exception { public void testCreateDataStreamNoValidTemplate() throws Exception { final MetadataCreateIndexService metadataCreateIndexService = getMetadataCreateIndexService(); final String dataStreamName = "my-data-stream"; - ComposableIndexTemplate template = new ComposableIndexTemplate.Builder().indexPatterns(List.of(dataStreamName + "*")).build(); + ComposableIndexTemplate template = ComposableIndexTemplate.builder().indexPatterns(List.of(dataStreamName + "*")).build(); ClusterState cs = ClusterState.builder(new ClusterName("_name")) .metadata(Metadata.builder().put("template", template).build()) .build(); @@ -322,7 +325,8 @@ public void testCreateDataStreamNoValidTemplate() throws Exception { public static ClusterState createDataStream(final String dataStreamName) throws Exception { final MetadataCreateIndexService metadataCreateIndexService = getMetadataCreateIndexService(); - ComposableIndexTemplate template = new ComposableIndexTemplate.Builder().indexPatterns(List.of(dataStreamName + "*")) + ComposableIndexTemplate template = ComposableIndexTemplate.builder() + .indexPatterns(List.of(dataStreamName + "*")) .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) .build(); ClusterState cs = ClusterState.builder(new ClusterName("_name")) diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateServiceTests.java index 24afb569a8167..5d1c3fd0650d7 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateServiceTests.java @@ -1789,17 +1789,11 @@ public void testRemoveComponentTemplateInUse() throws Exception { } public void testRemoveRequiredAndNonRequiredComponents() throws Exception { - ComposableIndexTemplate composableIndexTemplate = new ComposableIndexTemplate( - Collections.singletonList("pattern"), - null, - List.of("required1", "non-required", "required2"), - null, - null, - null, - null, - null, - Collections.singletonList("non-required") - ); + ComposableIndexTemplate composableIndexTemplate = ComposableIndexTemplate.builder() + .indexPatterns(Collections.singletonList("pattern")) + .componentTemplates(List.of("required1", "non-required", "required2")) + .ignoreMissingComponentTemplates(Collections.singletonList("non-required")) + .build(); ComponentTemplate ct = new ComponentTemplate(new Template(null, new CompressedXContent("{}"), null), null, null); final MetadataIndexTemplateService service = getMetadataIndexTemplateService(); @@ -2423,17 +2417,12 @@ public void testIgnoreMissingComponentTemplateValid() throws Exception { ignoreMissingComponentTemplates.add("bar"); ignoreMissingComponentTemplates.add("foo"); - ComposableIndexTemplate template = new ComposableIndexTemplate( - Arrays.asList("metrics-test-*"), - null, - componentTemplates, - 1L, - null, - null, - null, - null, - ignoreMissingComponentTemplates - ); + ComposableIndexTemplate template = ComposableIndexTemplate.builder() + .indexPatterns(Arrays.asList("metrics-test-*")) + .componentTemplates(componentTemplates) + .priority(1L) + .ignoreMissingComponentTemplates(ignoreMissingComponentTemplates) + .build(); MetadataIndexTemplateService metadataIndexTemplateService = getMetadataIndexTemplateService(); ClusterState state = metadataIndexTemplateService.addIndexTemplateV2(ClusterState.EMPTY_STATE, false, indexTemplateName, template); @@ -2452,17 +2441,12 @@ public void testIgnoreMissingComponentTemplateInvalid() throws Exception { ignoreMissingComponentTemplates.add("bar"); ignoreMissingComponentTemplates.add("foo"); - ComposableIndexTemplate template = new ComposableIndexTemplate( - Arrays.asList("metrics-foo-*"), - null, - componentTemplates, - 1L, - null, - null, - null, - null, - ignoreMissingComponentTemplates - ); + ComposableIndexTemplate template = ComposableIndexTemplate.builder() + .indexPatterns(Arrays.asList("metrics-foo-*")) + .componentTemplates(componentTemplates) + .priority(1L) + .ignoreMissingComponentTemplates(ignoreMissingComponentTemplates) + .build(); MetadataIndexTemplateService metadataIndexTemplateService = getMetadataIndexTemplateService(); ClusterState state = metadataIndexTemplateService.addIndexTemplateV2(ClusterState.EMPTY_STATE, false, indexTemplateName, template); @@ -2492,17 +2476,12 @@ public void testAddInvalidTemplateIgnoreService() throws Exception { ignoreMissingComponentTemplates.add("bar"); ignoreMissingComponentTemplates.add("foo"); - ComposableIndexTemplate template = new ComposableIndexTemplate( - Arrays.asList("metrics-foo-*"), - null, - componentTemplates, - 1L, - null, - null, - null, - null, - ignoreMissingComponentTemplates - ); + ComposableIndexTemplate template = ComposableIndexTemplate.builder() + .indexPatterns(Arrays.asList("metrics-foo-*")) + .componentTemplates(componentTemplates) + .priority(1L) + .ignoreMissingComponentTemplates(ignoreMissingComponentTemplates) + .build(); ComponentTemplate ct = new ComponentTemplate(new Template(Settings.EMPTY, null, null), null, null); @@ -2604,18 +2583,11 @@ public void testAddIndexTemplateWithDeprecatedComponentTemplate() throws Excepti ComponentTemplate ct = ComponentTemplateTests.randomInstance(false, true); state = service.addComponentTemplate(state, true, "ct", ct); - ComposableIndexTemplate it = new ComposableIndexTemplate( - List.of("test*"), - null, - List.of("ct"), - null, - 1L, - null, - null, - null, - null, - null - ); + ComposableIndexTemplate it = ComposableIndexTemplate.builder() + .indexPatterns(List.of("test*")) + .componentTemplates(List.of("ct")) + .version(1L) + .build(); service.addIndexTemplateV2(state, false, "foo", it); assertWarnings("index template [foo] uses deprecated component template [ct]"); diff --git a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierAllocationDeciderIT.java b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierAllocationDeciderIT.java index 7124db6c1c721..6421b70f9e453 100644 --- a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierAllocationDeciderIT.java +++ b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierAllocationDeciderIT.java @@ -341,9 +341,7 @@ public void testTemplateOverridden() { startContentOnlyNode(); Template t = new Template(Settings.builder().putNull(DataTier.TIER_PREFERENCE).build(), null, null); - ComposableIndexTemplate ct = new ComposableIndexTemplate.Builder().indexPatterns(Collections.singletonList(index)) - .template(t) - .build(); + ComposableIndexTemplate ct = ComposableIndexTemplate.builder().indexPatterns(Collections.singletonList(index)).template(t).build(); client().execute( PutComposableIndexTemplateAction.INSTANCE, new PutComposableIndexTemplateAction.Request("template").indexTemplate(ct) diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/utils/MlIndexAndAliasTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/utils/MlIndexAndAliasTests.java index fdcfe40f488bf..9d959951383dd 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/utils/MlIndexAndAliasTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/utils/MlIndexAndAliasTests.java @@ -410,7 +410,7 @@ private static IndexTemplateMetadata createLegacyIndexTemplateMetaData(String te } private static ComposableIndexTemplate createComposableIndexTemplateMetaData(String templateName, List patterns) { - return new ComposableIndexTemplate.Builder().indexPatterns(patterns).build(); + return ComposableIndexTemplate.builder().indexPatterns(patterns).build(); } private static IndexMetadata createIndexMetadata(String indexName, boolean withAlias) { diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/cluster/metadata/MetadataMigrateToDataTiersRoutingService.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/cluster/metadata/MetadataMigrateToDataTiersRoutingService.java index bb75584a9cf75..283e48a328aa7 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/cluster/metadata/MetadataMigrateToDataTiersRoutingService.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/cluster/metadata/MetadataMigrateToDataTiersRoutingService.java @@ -690,7 +690,7 @@ static List migrateComposableTemplates(Metadata.Builder mb, ClusterState if (settings.keySet().contains(requireRoutingSetting) || settings.keySet().contains(includeRoutingSetting)) { Template currentInnerTemplate = composableTemplate.template(); - ComposableIndexTemplate.Builder migratedComposableTemplateBuilder = new ComposableIndexTemplate.Builder(); + ComposableIndexTemplate.Builder migratedComposableTemplateBuilder = ComposableIndexTemplate.builder(); Settings.Builder settingsBuilder = Settings.builder().put(settings); settingsBuilder.remove(requireRoutingSetting); settingsBuilder.remove(includeRoutingSetting); diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/cluster/metadata/MetadataMigrateToDataTiersRoutingServiceTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/cluster/metadata/MetadataMigrateToDataTiersRoutingServiceTests.java index cbcc19b9f06ec..6aa46dee54829 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/cluster/metadata/MetadataMigrateToDataTiersRoutingServiceTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/cluster/metadata/MetadataMigrateToDataTiersRoutingServiceTests.java @@ -1214,9 +1214,10 @@ public void testDryRunDoesntRequireILMStopped() { } public void testMigrationDoesNotRemoveComposableTemplates() { - ComposableIndexTemplate composableIndexTemplate = new ComposableIndexTemplate.Builder().indexPatterns( - Collections.singletonList("*") - ).template(new Template(Settings.builder().put(DATA_ROUTING_REQUIRE_SETTING, "hot").build(), null, null)).build(); + ComposableIndexTemplate composableIndexTemplate = ComposableIndexTemplate.builder() + .indexPatterns(Collections.singletonList("*")) + .template(new Template(Settings.builder().put(DATA_ROUTING_REQUIRE_SETTING, "hot").build(), null, null)) + .build(); String composableTemplateName = "catch-all-composable-template"; ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) diff --git a/x-pack/plugin/stack/src/test/java/org/elasticsearch/xpack/stack/LegacyStackTemplateRegistryTests.java b/x-pack/plugin/stack/src/test/java/org/elasticsearch/xpack/stack/LegacyStackTemplateRegistryTests.java index 1cb9e909310e1..b8c64f945db0a 100644 --- a/x-pack/plugin/stack/src/test/java/org/elasticsearch/xpack/stack/LegacyStackTemplateRegistryTests.java +++ b/x-pack/plugin/stack/src/test/java/org/elasticsearch/xpack/stack/LegacyStackTemplateRegistryTests.java @@ -45,7 +45,7 @@ public void tearDown() throws Exception { public void testThatTemplatesAreDeprecated() { for (ComposableIndexTemplate it : registry.getComposableTemplateConfigs().values()) { - assertTrue(it.deprecated()); + assertTrue(it.isDeprecated()); } for (LifecyclePolicy ilm : registry.getLifecyclePolicies()) { assertTrue(ilm.isDeprecated()); diff --git a/x-pack/plugin/stack/src/test/java/org/elasticsearch/xpack/stack/StackTemplateRegistryTests.java b/x-pack/plugin/stack/src/test/java/org/elasticsearch/xpack/stack/StackTemplateRegistryTests.java index b3ddb6b132d53..4d68b471a52f8 100644 --- a/x-pack/plugin/stack/src/test/java/org/elasticsearch/xpack/stack/StackTemplateRegistryTests.java +++ b/x-pack/plugin/stack/src/test/java/org/elasticsearch/xpack/stack/StackTemplateRegistryTests.java @@ -509,7 +509,7 @@ public void testThatNothingIsInstalledWhenAllNodesAreNotUpdated() { public void testThatTemplatesAreNotDeprecated() { for (ComposableIndexTemplate it : registry.getComposableTemplateConfigs().values()) { - assertFalse(it.deprecated()); + assertFalse(it.isDeprecated()); } for (LifecyclePolicy ilm : registry.getLifecyclePolicies()) { assertFalse(ilm.isDeprecated()); diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/notifications/TransformAuditor.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/notifications/TransformAuditor.java index 6a14097d4ec24..d06b1a4ed106b 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/notifications/TransformAuditor.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/notifications/TransformAuditor.java @@ -42,7 +42,8 @@ public TransformAuditor(Client client, String nodeName, ClusterService clusterSe () -> { try { return new PutComposableIndexTemplateAction.Request(TransformInternalIndexConstants.AUDIT_INDEX).indexTemplate( - new ComposableIndexTemplate.Builder().template(TransformInternalIndex.getAuditIndexTemplate()) + ComposableIndexTemplate.builder() + .template(TransformInternalIndex.getAuditIndexTemplate()) .version((long) TransformConfigVersion.CURRENT.id()) .indexPatterns(Collections.singletonList(TransformInternalIndexConstants.AUDIT_INDEX_PREFIX + "*")) .priority(Long.MAX_VALUE)