From 4da72997e956532a2a9d2b98c4c4659a5d26ea86 Mon Sep 17 00:00:00 2001 From: Patrick Doyle <810052+prdoyle@users.noreply.github.com> Date: Wed, 11 Dec 2024 09:38:40 -0500 Subject: [PATCH 01/11] Use single-task queues in ReservedClusterStateService (#118351) * Refactor: submitUpdateTask method * Test for one task per reserved state udate; currently fails * Separate queue per task * Spotless --- .../service/ReservedClusterStateService.java | 32 ++++---- .../ReservedClusterStateServiceTests.java | 77 +++++++++++++++++++ 2 files changed, 92 insertions(+), 17 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedClusterStateService.java b/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedClusterStateService.java index 499b5e6515a8c..248d37914cf32 100644 --- a/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedClusterStateService.java +++ b/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedClusterStateService.java @@ -18,7 +18,6 @@ import org.elasticsearch.cluster.metadata.ReservedStateMetadata; import org.elasticsearch.cluster.routing.RerouteService; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.cluster.service.MasterServiceTaskQueue; import org.elasticsearch.common.Priority; import org.elasticsearch.core.Tuple; import org.elasticsearch.env.BuildVersion; @@ -61,8 +60,6 @@ public class ReservedClusterStateService { final Map> handlers; final ClusterService clusterService; - private final MasterServiceTaskQueue updateTaskQueue; - private final MasterServiceTaskQueue errorTaskQueue; @SuppressWarnings("unchecked") private final ConstructingObjectParser stateChunkParser = new ConstructingObjectParser<>( @@ -77,6 +74,8 @@ public class ReservedClusterStateService { return new ReservedStateChunk(stateMap, (ReservedStateVersion) a[1]); } ); + private final ReservedStateUpdateTaskExecutor updateTaskExecutor; + private final ReservedStateErrorTaskExecutor errorTaskExecutor; /** * Controller class for saving and reserving {@link ClusterState}. @@ -89,12 +88,8 @@ public ReservedClusterStateService( List> handlerList ) { this.clusterService = clusterService; - this.updateTaskQueue = clusterService.createTaskQueue( - "reserved state update", - Priority.URGENT, - new ReservedStateUpdateTaskExecutor(rerouteService) - ); - this.errorTaskQueue = clusterService.createTaskQueue("reserved state error", Priority.URGENT, new ReservedStateErrorTaskExecutor()); + this.updateTaskExecutor = new ReservedStateUpdateTaskExecutor(rerouteService); + this.errorTaskExecutor = new ReservedStateErrorTaskExecutor(); this.handlers = handlerList.stream().collect(Collectors.toMap(ReservedClusterStateHandler::name, Function.identity())); stateChunkParser.declareNamedObjects(ConstructingObjectParser.constructorArg(), (p, c, name) -> { if (handlers.containsKey(name) == false) { @@ -160,7 +155,7 @@ public void process( public void initEmpty(String namespace, ActionListener listener) { var missingVersion = new ReservedStateVersion(EMPTY_VERSION, BuildVersion.current()); var emptyState = new ReservedStateChunk(Map.of(), missingVersion); - updateTaskQueue.submitTask( + submitUpdateTask( "empty initial cluster state [" + namespace + "]", new ReservedStateUpdateTask( namespace, @@ -171,10 +166,8 @@ public void initEmpty(String namespace, ActionListener lis // error state should not be possible since there is no metadata being parsed or processed errorState -> { throw new AssertionError(); }, listener - ), - null + ) ); - } /** @@ -234,7 +227,7 @@ public void process( errorListener.accept(error); return; } - updateTaskQueue.submitTask( + submitUpdateTask( "reserved cluster state [" + namespace + "]", new ReservedStateUpdateTask( namespace, @@ -242,7 +235,7 @@ public void process( versionCheck, handlers, orderedHandlers, - ReservedClusterStateService.this::updateErrorState, + this::updateErrorState, new ActionListener<>() { @Override public void onResponse(ActionResponse.Empty empty) { @@ -261,8 +254,7 @@ public void onFailure(Exception e) { } } } - ), - null + ) ); } @@ -293,6 +285,11 @@ Exception checkAndReportError( return null; } + void submitUpdateTask(String source, ReservedStateUpdateTask task) { + var updateTaskQueue = clusterService.createTaskQueue("reserved state update", Priority.URGENT, updateTaskExecutor); + updateTaskQueue.submitTask(source, task, null); + } + // package private for testing void updateErrorState(ErrorState errorState) { // optimistic check here - the cluster state might change after this, so also need to re-check later @@ -305,6 +302,7 @@ void updateErrorState(ErrorState errorState) { } private void submitErrorUpdateTask(ErrorState errorState) { + var errorTaskQueue = clusterService.createTaskQueue("reserved state error", Priority.URGENT, errorTaskExecutor); errorTaskQueue.submitTask( "reserved cluster state update error for [ " + errorState.namespace() + "]", new ReservedStateErrorTask(errorState, new ActionListener<>() { diff --git a/server/src/test/java/org/elasticsearch/reservedstate/service/ReservedClusterStateServiceTests.java b/server/src/test/java/org/elasticsearch/reservedstate/service/ReservedClusterStateServiceTests.java index efe3566064170..982f5c4a93ae0 100644 --- a/server/src/test/java/org/elasticsearch/reservedstate/service/ReservedClusterStateServiceTests.java +++ b/server/src/test/java/org/elasticsearch/reservedstate/service/ReservedClusterStateServiceTests.java @@ -47,6 +47,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; +import java.util.function.LongFunction; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.contains; @@ -67,6 +68,7 @@ import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoInteractions; import static org.mockito.Mockito.verifyNoMoreInteractions; import static org.mockito.Mockito.when; @@ -332,6 +334,81 @@ public void testUpdateErrorState() { verifyNoMoreInteractions(errorQueue); } + @SuppressWarnings("unchecked") + public void testOneUpdateTaskPerQueue() { + ClusterState state = ClusterState.builder(new ClusterName("test")).build(); + MasterServiceTaskQueue queue1 = mockTaskQueue(); + MasterServiceTaskQueue queue2 = mockTaskQueue(); + MasterServiceTaskQueue unusedQueue = mockTaskQueue(); + + ClusterService clusterService = mock(ClusterService.class); + when(clusterService.createTaskQueue(anyString(), any(), any())) // For non-update tasks + .thenReturn(unusedQueue); + when(clusterService.createTaskQueue(ArgumentMatchers.contains("reserved state update"), any(), any())) + .thenReturn(queue1, queue2, unusedQueue); + when(clusterService.state()).thenReturn(state); + + ReservedClusterStateService service = new ReservedClusterStateService(clusterService, mock(RerouteService.class), List.of()); + LongFunction update = version -> { + ReservedStateUpdateTask task = spy( + new ReservedStateUpdateTask( + "test", + new ReservedStateChunk(Map.of(), new ReservedStateVersion(version, BuildVersion.current())), + ReservedStateVersionCheck.HIGHER_VERSION_ONLY, + Map.of(), + Set.of(), + errorState -> {}, + ActionListener.noop() + ) + ); + doReturn(state).when(task).execute(any()); + return task; + }; + + service.submitUpdateTask("test", update.apply(2L)); + service.submitUpdateTask("test", update.apply(3L)); + + // One task to each queue + verify(queue1).submitTask(any(), any(), any()); + verify(queue2).submitTask(any(), any(), any()); + + // No additional unexpected tasks + verifyNoInteractions(unusedQueue); + } + + @SuppressWarnings("unchecked") + public void testOneErrorTaskPerQueue() { + ClusterState state = ClusterState.builder(new ClusterName("test")).build(); + MasterServiceTaskQueue queue1 = mockTaskQueue(); + MasterServiceTaskQueue queue2 = mockTaskQueue(); + MasterServiceTaskQueue unusedQueue = mockTaskQueue(); + + ClusterService clusterService = mock(ClusterService.class); + when(clusterService.createTaskQueue(anyString(), any(), any())) // For non-error tasks + .thenReturn(unusedQueue); + when(clusterService.createTaskQueue(ArgumentMatchers.contains("reserved state error"), any(), any())) + .thenReturn(queue1, queue2, unusedQueue); + when(clusterService.state()).thenReturn(state); + + ReservedClusterStateService service = new ReservedClusterStateService(clusterService, mock(RerouteService.class), List.of()); + LongFunction error = version -> new ErrorState( + "namespace", + version, + ReservedStateVersionCheck.HIGHER_VERSION_ONLY, + List.of("error"), + ReservedStateErrorMetadata.ErrorKind.TRANSIENT + ); + service.updateErrorState(error.apply(2)); + service.updateErrorState(error.apply(3)); + + // One task to each queue + verify(queue1).submitTask(any(), any(), any()); + verify(queue2).submitTask(any(), any(), any()); + + // No additional unexpected tasks + verifyNoInteractions(unusedQueue); + } + public void testErrorStateTask() throws Exception { ClusterState state = ClusterState.builder(new ClusterName("test")).build(); From ea37a8acc0bc1b22867e6d033004ed37be176863 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lorenzo=20Dematt=C3=A9?= Date: Wed, 11 Dec 2024 15:55:02 +0100 Subject: [PATCH 02/11] [Entitlements] Moving and refactoring IT tests (#118254) --- .../entitlement/qa}/build.gradle | 19 +-- libs/entitlement/qa/common/build.gradle | 15 +++ .../qa/common/src/main/java/module-info.java | 16 +++ .../common/RestEntitlementsCheckAction.java | 112 ++++++++++++++++++ .../build.gradle | 24 ++++ .../EntitlementAllowedNonModularPlugin.java | 45 +++++++ .../plugin-metadata/entitlement-policy.yaml | 2 + .../qa/entitlement-allowed/build.gradle | 25 ++++ .../src/main/java/module-info.java | 15 +++ .../qa/EntitlementAllowedPlugin.java | 45 +++++++ .../plugin-metadata/entitlement-policy.yaml | 2 + .../build.gradle | 24 ++++ .../EntitlementDeniedNonModularPlugin.java | 45 +++++++ .../qa/entitlement-denied/build.gradle | 25 ++++ .../src/main/java/module-info.java | 3 +- .../qa/EntitlementDeniedPlugin.java | 9 +- .../entitlement/qa/EntitlementsAllowedIT.java | 66 +++++++++++ .../entitlement/qa/EntitlementsDeniedIT.java | 65 ++++++++++ .../test/entitlements/EntitlementsIT.java | 49 -------- ...estEntitlementsCheckClassLoaderAction.java | 54 --------- ...RestEntitlementsCheckSystemExitAction.java | 46 ------- 21 files changed, 537 insertions(+), 169 deletions(-) rename {qa/entitlements => libs/entitlement/qa}/build.gradle (59%) create mode 100644 libs/entitlement/qa/common/build.gradle create mode 100644 libs/entitlement/qa/common/src/main/java/module-info.java create mode 100644 libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/RestEntitlementsCheckAction.java create mode 100644 libs/entitlement/qa/entitlement-allowed-nonmodular/build.gradle create mode 100644 libs/entitlement/qa/entitlement-allowed-nonmodular/src/main/java/org/elasticsearch/entitlement/qa/nonmodular/EntitlementAllowedNonModularPlugin.java create mode 100644 libs/entitlement/qa/entitlement-allowed-nonmodular/src/main/plugin-metadata/entitlement-policy.yaml create mode 100644 libs/entitlement/qa/entitlement-allowed/build.gradle create mode 100644 libs/entitlement/qa/entitlement-allowed/src/main/java/module-info.java create mode 100644 libs/entitlement/qa/entitlement-allowed/src/main/java/org/elasticsearch/entitlement/qa/EntitlementAllowedPlugin.java create mode 100644 libs/entitlement/qa/entitlement-allowed/src/main/plugin-metadata/entitlement-policy.yaml create mode 100644 libs/entitlement/qa/entitlement-denied-nonmodular/build.gradle create mode 100644 libs/entitlement/qa/entitlement-denied-nonmodular/src/main/java/org/elasticsearch/entitlement/qa/nonmodular/EntitlementDeniedNonModularPlugin.java create mode 100644 libs/entitlement/qa/entitlement-denied/build.gradle rename {qa/entitlements => libs/entitlement/qa/entitlement-denied}/src/main/java/module-info.java (53%) rename qa/entitlements/src/main/java/org/elasticsearch/test/entitlements/EntitlementsCheckPlugin.java => libs/entitlement/qa/entitlement-denied/src/main/java/org/elasticsearch/entitlement/qa/EntitlementDeniedPlugin.java (83%) create mode 100644 libs/entitlement/qa/src/javaRestTest/java/org/elasticsearch/entitlement/qa/EntitlementsAllowedIT.java create mode 100644 libs/entitlement/qa/src/javaRestTest/java/org/elasticsearch/entitlement/qa/EntitlementsDeniedIT.java delete mode 100644 qa/entitlements/src/javaRestTest/java/org/elasticsearch/test/entitlements/EntitlementsIT.java delete mode 100644 qa/entitlements/src/main/java/org/elasticsearch/test/entitlements/RestEntitlementsCheckClassLoaderAction.java delete mode 100644 qa/entitlements/src/main/java/org/elasticsearch/test/entitlements/RestEntitlementsCheckSystemExitAction.java diff --git a/qa/entitlements/build.gradle b/libs/entitlement/qa/build.gradle similarity index 59% rename from qa/entitlements/build.gradle rename to libs/entitlement/qa/build.gradle index 9a5058a3b11ac..86bafc34f4d00 100644 --- a/qa/entitlements/build.gradle +++ b/libs/entitlement/qa/build.gradle @@ -7,23 +7,14 @@ * License v3.0 only", or the "Server Side Public License, v 1". */ -apply plugin: 'elasticsearch.base-internal-es-plugin' apply plugin: 'elasticsearch.internal-java-rest-test' // Necessary to use tests in Serverless apply plugin: 'elasticsearch.internal-test-artifact' -esplugin { - name 'entitlement-qa' - description 'A test module that triggers entitlement checks' - classname 'org.elasticsearch.test.entitlements.EntitlementsCheckPlugin' -} - dependencies { - clusterPlugins project(':qa:entitlements') + javaRestTestImplementation project(':libs:entitlement:qa:common') + clusterPlugins project(':libs:entitlement:qa:entitlement-allowed') + clusterPlugins project(':libs:entitlement:qa:entitlement-allowed-nonmodular') + clusterPlugins project(':libs:entitlement:qa:entitlement-denied') + clusterPlugins project(':libs:entitlement:qa:entitlement-denied-nonmodular') } - -tasks.named("javadoc").configure { - // There seems to be some problem generating javadoc on a QA project that has a module definition - enabled = false -} - diff --git a/libs/entitlement/qa/common/build.gradle b/libs/entitlement/qa/common/build.gradle new file mode 100644 index 0000000000000..df3bc66cba21b --- /dev/null +++ b/libs/entitlement/qa/common/build.gradle @@ -0,0 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +apply plugin: 'elasticsearch.build' + +dependencies { + implementation project(':server') + implementation project(':libs:logging') +} diff --git a/libs/entitlement/qa/common/src/main/java/module-info.java b/libs/entitlement/qa/common/src/main/java/module-info.java new file mode 100644 index 0000000000000..2dd37e3174e08 --- /dev/null +++ b/libs/entitlement/qa/common/src/main/java/module-info.java @@ -0,0 +1,16 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +module org.elasticsearch.entitlement.qa.common { + requires org.elasticsearch.server; + requires org.elasticsearch.base; + requires org.elasticsearch.logging; + + exports org.elasticsearch.entitlement.qa.common; +} diff --git a/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/RestEntitlementsCheckAction.java b/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/RestEntitlementsCheckAction.java new file mode 100644 index 0000000000000..e63fa4f3b726b --- /dev/null +++ b/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/RestEntitlementsCheckAction.java @@ -0,0 +1,112 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.entitlement.qa.common; + +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.common.Strings; +import org.elasticsearch.core.SuppressForbidden; +import org.elasticsearch.logging.LogManager; +import org.elasticsearch.logging.Logger; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.RestStatus; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.net.URL; +import java.net.URLClassLoader; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +import static java.util.Map.entry; +import static org.elasticsearch.rest.RestRequest.Method.GET; + +public class RestEntitlementsCheckAction extends BaseRestHandler { + private static final Logger logger = LogManager.getLogger(RestEntitlementsCheckAction.class); + private final String prefix; + + private record CheckAction(Runnable action, boolean isServerOnly) { + + static CheckAction serverOnly(Runnable action) { + return new CheckAction(action, true); + } + + static CheckAction serverAndPlugin(Runnable action) { + return new CheckAction(action, false); + } + } + + private static final Map checkActions = Map.ofEntries( + entry("system_exit", CheckAction.serverOnly(RestEntitlementsCheckAction::systemExit)), + entry("create_classloader", CheckAction.serverAndPlugin(RestEntitlementsCheckAction::createClassLoader)) + ); + + @SuppressForbidden(reason = "Specifically testing System.exit") + private static void systemExit() { + logger.info("Calling System.exit(123);"); + System.exit(123); + } + + private static void createClassLoader() { + logger.info("Calling new URLClassLoader"); + try (var classLoader = new URLClassLoader("test", new URL[0], RestEntitlementsCheckAction.class.getClassLoader())) { + logger.info("Created URLClassLoader [{}]", classLoader.getName()); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + public RestEntitlementsCheckAction(String prefix) { + this.prefix = prefix; + } + + public static Set getServerAndPluginsCheckActions() { + return checkActions.entrySet() + .stream() + .filter(kv -> kv.getValue().isServerOnly() == false) + .map(Map.Entry::getKey) + .collect(Collectors.toSet()); + } + + public static Set getAllCheckActions() { + return checkActions.keySet(); + } + + @Override + public List routes() { + return List.of(new Route(GET, "/_entitlement/" + prefix + "/_check")); + } + + @Override + public String getName() { + return "check_" + prefix + "_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) { + logger.info("RestEntitlementsCheckAction rest handler [{}]", request.path()); + var actionName = request.param("action"); + if (Strings.isNullOrEmpty(actionName)) { + throw new IllegalArgumentException("Missing action parameter"); + } + var checkAction = checkActions.get(actionName); + if (checkAction == null) { + throw new IllegalArgumentException(Strings.format("Unknown action [%s]", actionName)); + } + + return channel -> { + checkAction.action().run(); + channel.sendResponse(new RestResponse(RestStatus.OK, Strings.format("Succesfully executed action [%s]", actionName))); + }; + } +} diff --git a/libs/entitlement/qa/entitlement-allowed-nonmodular/build.gradle b/libs/entitlement/qa/entitlement-allowed-nonmodular/build.gradle new file mode 100644 index 0000000000000..7b3015a5ab831 --- /dev/null +++ b/libs/entitlement/qa/entitlement-allowed-nonmodular/build.gradle @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +apply plugin: 'elasticsearch.base-internal-es-plugin' + +esplugin { + name 'entitlement-allowed-nonmodular' + description 'A non-modular test module that invokes entitlement checks that are supposed to be granted' + classname 'org.elasticsearch.entitlement.qa.nonmodular.EntitlementAllowedNonModularPlugin' +} + +dependencies { + implementation project(':libs:entitlement:qa:common') +} + +tasks.named("javadoc").configure { + enabled = false +} diff --git a/libs/entitlement/qa/entitlement-allowed-nonmodular/src/main/java/org/elasticsearch/entitlement/qa/nonmodular/EntitlementAllowedNonModularPlugin.java b/libs/entitlement/qa/entitlement-allowed-nonmodular/src/main/java/org/elasticsearch/entitlement/qa/nonmodular/EntitlementAllowedNonModularPlugin.java new file mode 100644 index 0000000000000..d65981c30f0be --- /dev/null +++ b/libs/entitlement/qa/entitlement-allowed-nonmodular/src/main/java/org/elasticsearch/entitlement/qa/nonmodular/EntitlementAllowedNonModularPlugin.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ +package org.elasticsearch.entitlement.qa.nonmodular; + +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.IndexScopedSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsFilter; +import org.elasticsearch.entitlement.qa.common.RestEntitlementsCheckAction; +import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.plugins.ActionPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestHandler; + +import java.util.List; +import java.util.function.Predicate; +import java.util.function.Supplier; + +public class EntitlementAllowedNonModularPlugin extends Plugin implements ActionPlugin { + + @Override + public List getRestHandlers( + final Settings settings, + NamedWriteableRegistry namedWriteableRegistry, + final RestController restController, + final ClusterSettings clusterSettings, + final IndexScopedSettings indexScopedSettings, + final SettingsFilter settingsFilter, + final IndexNameExpressionResolver indexNameExpressionResolver, + final Supplier nodesInCluster, + Predicate clusterSupportsFeature + ) { + return List.of(new RestEntitlementsCheckAction("allowed_nonmodular")); + } +} diff --git a/libs/entitlement/qa/entitlement-allowed-nonmodular/src/main/plugin-metadata/entitlement-policy.yaml b/libs/entitlement/qa/entitlement-allowed-nonmodular/src/main/plugin-metadata/entitlement-policy.yaml new file mode 100644 index 0000000000000..45d4e57f66521 --- /dev/null +++ b/libs/entitlement/qa/entitlement-allowed-nonmodular/src/main/plugin-metadata/entitlement-policy.yaml @@ -0,0 +1,2 @@ +ALL-UNNAMED: + - create_class_loader diff --git a/libs/entitlement/qa/entitlement-allowed/build.gradle b/libs/entitlement/qa/entitlement-allowed/build.gradle new file mode 100644 index 0000000000000..6090d658d2081 --- /dev/null +++ b/libs/entitlement/qa/entitlement-allowed/build.gradle @@ -0,0 +1,25 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +apply plugin: 'elasticsearch.base-internal-es-plugin' + +esplugin { + name 'entitlement-allowed' + description 'A test module that invokes entitlement checks that are supposed to be granted' + classname 'org.elasticsearch.entitlement.qa.EntitlementAllowedPlugin' +} + +dependencies { + implementation project(':libs:entitlement:qa:common') +} + +tasks.named("javadoc").configure { + enabled = false +} + diff --git a/libs/entitlement/qa/entitlement-allowed/src/main/java/module-info.java b/libs/entitlement/qa/entitlement-allowed/src/main/java/module-info.java new file mode 100644 index 0000000000000..a88611e6ac9a5 --- /dev/null +++ b/libs/entitlement/qa/entitlement-allowed/src/main/java/module-info.java @@ -0,0 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +module org.elasticsearch.entitlement.qa.allowed { + requires org.elasticsearch.server; + requires org.elasticsearch.base; + requires org.elasticsearch.logging; + requires org.elasticsearch.entitlement.qa.common; +} diff --git a/libs/entitlement/qa/entitlement-allowed/src/main/java/org/elasticsearch/entitlement/qa/EntitlementAllowedPlugin.java b/libs/entitlement/qa/entitlement-allowed/src/main/java/org/elasticsearch/entitlement/qa/EntitlementAllowedPlugin.java new file mode 100644 index 0000000000000..d81e23e311be1 --- /dev/null +++ b/libs/entitlement/qa/entitlement-allowed/src/main/java/org/elasticsearch/entitlement/qa/EntitlementAllowedPlugin.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ +package org.elasticsearch.entitlement.qa; + +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.IndexScopedSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsFilter; +import org.elasticsearch.entitlement.qa.common.RestEntitlementsCheckAction; +import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.plugins.ActionPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestHandler; + +import java.util.List; +import java.util.function.Predicate; +import java.util.function.Supplier; + +public class EntitlementAllowedPlugin extends Plugin implements ActionPlugin { + + @Override + public List getRestHandlers( + final Settings settings, + NamedWriteableRegistry namedWriteableRegistry, + final RestController restController, + final ClusterSettings clusterSettings, + final IndexScopedSettings indexScopedSettings, + final SettingsFilter settingsFilter, + final IndexNameExpressionResolver indexNameExpressionResolver, + final Supplier nodesInCluster, + Predicate clusterSupportsFeature + ) { + return List.of(new RestEntitlementsCheckAction("allowed")); + } +} diff --git a/libs/entitlement/qa/entitlement-allowed/src/main/plugin-metadata/entitlement-policy.yaml b/libs/entitlement/qa/entitlement-allowed/src/main/plugin-metadata/entitlement-policy.yaml new file mode 100644 index 0000000000000..7b5e848f414b2 --- /dev/null +++ b/libs/entitlement/qa/entitlement-allowed/src/main/plugin-metadata/entitlement-policy.yaml @@ -0,0 +1,2 @@ +org.elasticsearch.entitlement.qa.common: + - create_class_loader diff --git a/libs/entitlement/qa/entitlement-denied-nonmodular/build.gradle b/libs/entitlement/qa/entitlement-denied-nonmodular/build.gradle new file mode 100644 index 0000000000000..bddd6c83c7cc4 --- /dev/null +++ b/libs/entitlement/qa/entitlement-denied-nonmodular/build.gradle @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +apply plugin: 'elasticsearch.base-internal-es-plugin' + +esplugin { + name 'entitlement-denied-nonmodular' + description 'A non-modular test module that invokes non-granted entitlement and triggers exceptions' + classname 'org.elasticsearch.entitlement.qa.nonmodular.EntitlementDeniedNonModularPlugin' +} + +dependencies { + implementation project(':libs:entitlement:qa:common') +} + +tasks.named("javadoc").configure { + enabled = false +} diff --git a/libs/entitlement/qa/entitlement-denied-nonmodular/src/main/java/org/elasticsearch/entitlement/qa/nonmodular/EntitlementDeniedNonModularPlugin.java b/libs/entitlement/qa/entitlement-denied-nonmodular/src/main/java/org/elasticsearch/entitlement/qa/nonmodular/EntitlementDeniedNonModularPlugin.java new file mode 100644 index 0000000000000..0f908d84260fb --- /dev/null +++ b/libs/entitlement/qa/entitlement-denied-nonmodular/src/main/java/org/elasticsearch/entitlement/qa/nonmodular/EntitlementDeniedNonModularPlugin.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ +package org.elasticsearch.entitlement.qa.nonmodular; + +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.IndexScopedSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsFilter; +import org.elasticsearch.entitlement.qa.common.RestEntitlementsCheckAction; +import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.plugins.ActionPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestHandler; + +import java.util.List; +import java.util.function.Predicate; +import java.util.function.Supplier; + +public class EntitlementDeniedNonModularPlugin extends Plugin implements ActionPlugin { + + @Override + public List getRestHandlers( + final Settings settings, + NamedWriteableRegistry namedWriteableRegistry, + final RestController restController, + final ClusterSettings clusterSettings, + final IndexScopedSettings indexScopedSettings, + final SettingsFilter settingsFilter, + final IndexNameExpressionResolver indexNameExpressionResolver, + final Supplier nodesInCluster, + Predicate clusterSupportsFeature + ) { + return List.of(new RestEntitlementsCheckAction("denied_nonmodular")); + } +} diff --git a/libs/entitlement/qa/entitlement-denied/build.gradle b/libs/entitlement/qa/entitlement-denied/build.gradle new file mode 100644 index 0000000000000..cc269135c5bf5 --- /dev/null +++ b/libs/entitlement/qa/entitlement-denied/build.gradle @@ -0,0 +1,25 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +apply plugin: 'elasticsearch.base-internal-es-plugin' + +esplugin { + name 'entitlement-denied' + description 'A test module that invokes non-granted entitlement and triggers exceptions' + classname 'org.elasticsearch.entitlement.qa.EntitlementDeniedPlugin' +} + +dependencies { + implementation project(':libs:entitlement:qa:common') +} + +tasks.named("javadoc").configure { + enabled = false +} + diff --git a/qa/entitlements/src/main/java/module-info.java b/libs/entitlement/qa/entitlement-denied/src/main/java/module-info.java similarity index 53% rename from qa/entitlements/src/main/java/module-info.java rename to libs/entitlement/qa/entitlement-denied/src/main/java/module-info.java index cf33ff95d834c..3def472be7a45 100644 --- a/qa/entitlements/src/main/java/module-info.java +++ b/libs/entitlement/qa/entitlement-denied/src/main/java/module-info.java @@ -1,5 +1,6 @@ -module elasticsearch.qa.entitlements { +module org.elasticsearch.entitlement.qa.denied { requires org.elasticsearch.server; requires org.elasticsearch.base; requires org.apache.logging.log4j; + requires org.elasticsearch.entitlement.qa.common; } diff --git a/qa/entitlements/src/main/java/org/elasticsearch/test/entitlements/EntitlementsCheckPlugin.java b/libs/entitlement/qa/entitlement-denied/src/main/java/org/elasticsearch/entitlement/qa/EntitlementDeniedPlugin.java similarity index 83% rename from qa/entitlements/src/main/java/org/elasticsearch/test/entitlements/EntitlementsCheckPlugin.java rename to libs/entitlement/qa/entitlement-denied/src/main/java/org/elasticsearch/entitlement/qa/EntitlementDeniedPlugin.java index 94ad54c8c8ba8..0ed27e2e576e7 100644 --- a/qa/entitlements/src/main/java/org/elasticsearch/test/entitlements/EntitlementsCheckPlugin.java +++ b/libs/entitlement/qa/entitlement-denied/src/main/java/org/elasticsearch/entitlement/qa/EntitlementDeniedPlugin.java @@ -6,7 +6,7 @@ * your election, the "Elastic License 2.0", the "GNU Affero General Public * License v3.0 only", or the "Server Side Public License, v 1". */ -package org.elasticsearch.test.entitlements; +package org.elasticsearch.entitlement.qa; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNodes; @@ -15,7 +15,7 @@ import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsFilter; -import org.elasticsearch.core.SuppressForbidden; +import org.elasticsearch.entitlement.qa.common.RestEntitlementsCheckAction; import org.elasticsearch.features.NodeFeature; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.Plugin; @@ -26,10 +26,9 @@ import java.util.function.Predicate; import java.util.function.Supplier; -public class EntitlementsCheckPlugin extends Plugin implements ActionPlugin { +public class EntitlementDeniedPlugin extends Plugin implements ActionPlugin { @Override - @SuppressForbidden(reason = "Specifically testing System.exit") public List getRestHandlers( final Settings settings, NamedWriteableRegistry namedWriteableRegistry, @@ -41,6 +40,6 @@ public List getRestHandlers( final Supplier nodesInCluster, Predicate clusterSupportsFeature ) { - return List.of(new RestEntitlementsCheckSystemExitAction(), new RestEntitlementsCheckClassLoaderAction()); + return List.of(new RestEntitlementsCheckAction("denied")); } } diff --git a/libs/entitlement/qa/src/javaRestTest/java/org/elasticsearch/entitlement/qa/EntitlementsAllowedIT.java b/libs/entitlement/qa/src/javaRestTest/java/org/elasticsearch/entitlement/qa/EntitlementsAllowedIT.java new file mode 100644 index 0000000000000..5135fff44531a --- /dev/null +++ b/libs/entitlement/qa/src/javaRestTest/java/org/elasticsearch/entitlement/qa/EntitlementsAllowedIT.java @@ -0,0 +1,66 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.entitlement.qa; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.entitlement.qa.common.RestEntitlementsCheckAction; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.junit.ClassRule; + +import java.io.IOException; +import java.util.stream.Stream; + +import static org.hamcrest.Matchers.equalTo; + +public class EntitlementsAllowedIT extends ESRestTestCase { + + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .plugin("entitlement-allowed") + .plugin("entitlement-allowed-nonmodular") + .systemProperty("es.entitlements.enabled", "true") + .setting("xpack.security.enabled", "false") + .build(); + + private final String pathPrefix; + private final String actionName; + + public EntitlementsAllowedIT(@Name("pathPrefix") String pathPrefix, @Name("actionName") String actionName) { + this.pathPrefix = pathPrefix; + this.actionName = actionName; + } + + @ParametersFactory + public static Iterable data() { + return Stream.of("allowed", "allowed_nonmodular") + .flatMap( + path -> RestEntitlementsCheckAction.getServerAndPluginsCheckActions().stream().map(action -> new Object[] { path, action }) + ) + .toList(); + } + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + + public void testCheckActionWithPolicyPass() throws IOException { + logger.info("Executing Entitlement test [{}] for [{}]", pathPrefix, actionName); + var request = new Request("GET", "/_entitlement/" + pathPrefix + "/_check"); + request.addParameter("action", actionName); + Response result = client().performRequest(request); + assertThat(result.getStatusLine().getStatusCode(), equalTo(200)); + } +} diff --git a/libs/entitlement/qa/src/javaRestTest/java/org/elasticsearch/entitlement/qa/EntitlementsDeniedIT.java b/libs/entitlement/qa/src/javaRestTest/java/org/elasticsearch/entitlement/qa/EntitlementsDeniedIT.java new file mode 100644 index 0000000000000..9f55a7c9e894d --- /dev/null +++ b/libs/entitlement/qa/src/javaRestTest/java/org/elasticsearch/entitlement/qa/EntitlementsDeniedIT.java @@ -0,0 +1,65 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.entitlement.qa; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.client.Request; +import org.elasticsearch.entitlement.qa.common.RestEntitlementsCheckAction; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.junit.ClassRule; + +import java.io.IOException; +import java.util.stream.Stream; + +import static org.hamcrest.Matchers.containsString; + +public class EntitlementsDeniedIT extends ESRestTestCase { + + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .plugin("entitlement-denied") + .plugin("entitlement-denied-nonmodular") + .systemProperty("es.entitlements.enabled", "true") + .setting("xpack.security.enabled", "false") + .build(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + + private final String pathPrefix; + private final String actionName; + + public EntitlementsDeniedIT(@Name("pathPrefix") String pathPrefix, @Name("actionName") String actionName) { + this.pathPrefix = pathPrefix; + this.actionName = actionName; + } + + @ParametersFactory + public static Iterable data() { + return Stream.of("denied", "denied_nonmodular") + .flatMap(path -> RestEntitlementsCheckAction.getAllCheckActions().stream().map(action -> new Object[] { path, action })) + .toList(); + } + + public void testCheckThrows() { + logger.info("Executing Entitlement test [{}] for [{}]", pathPrefix, actionName); + var exception = expectThrows(IOException.class, () -> { + var request = new Request("GET", "/_entitlement/" + pathPrefix + "/_check"); + request.addParameter("action", actionName); + client().performRequest(request); + }); + assertThat(exception.getMessage(), containsString("not_entitled_exception")); + } +} diff --git a/qa/entitlements/src/javaRestTest/java/org/elasticsearch/test/entitlements/EntitlementsIT.java b/qa/entitlements/src/javaRestTest/java/org/elasticsearch/test/entitlements/EntitlementsIT.java deleted file mode 100644 index f8bae10492ba8..0000000000000 --- a/qa/entitlements/src/javaRestTest/java/org/elasticsearch/test/entitlements/EntitlementsIT.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.test.entitlements; - -import org.elasticsearch.client.Request; -import org.elasticsearch.test.cluster.ElasticsearchCluster; -import org.elasticsearch.test.rest.ESRestTestCase; -import org.junit.ClassRule; - -import java.io.IOException; - -import static org.hamcrest.Matchers.containsString; - -public class EntitlementsIT extends ESRestTestCase { - - @ClassRule - public static ElasticsearchCluster cluster = ElasticsearchCluster.local() - .plugin("entitlement-qa") - .systemProperty("es.entitlements.enabled", "true") - .setting("xpack.security.enabled", "false") - .build(); - - @Override - protected String getTestRestCluster() { - return cluster.getHttpAddresses(); - } - - public void testCheckSystemExit() { - var exception = expectThrows( - IOException.class, - () -> { client().performRequest(new Request("GET", "/_entitlement/_check_system_exit")); } - ); - assertThat(exception.getMessage(), containsString("not_entitled_exception")); - } - - public void testCheckCreateURLClassLoader() { - var exception = expectThrows(IOException.class, () -> { - client().performRequest(new Request("GET", "/_entitlement/_check_create_url_classloader")); - }); - assertThat(exception.getMessage(), containsString("not_entitled_exception")); - } -} diff --git a/qa/entitlements/src/main/java/org/elasticsearch/test/entitlements/RestEntitlementsCheckClassLoaderAction.java b/qa/entitlements/src/main/java/org/elasticsearch/test/entitlements/RestEntitlementsCheckClassLoaderAction.java deleted file mode 100644 index 0b5ca28739ed0..0000000000000 --- a/qa/entitlements/src/main/java/org/elasticsearch/test/entitlements/RestEntitlementsCheckClassLoaderAction.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.test.entitlements; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.client.internal.node.NodeClient; -import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestRequest; - -import java.net.URL; -import java.net.URLClassLoader; -import java.util.List; - -import static org.elasticsearch.rest.RestRequest.Method.GET; - -public class RestEntitlementsCheckClassLoaderAction extends BaseRestHandler { - - private static final Logger logger = LogManager.getLogger(RestEntitlementsCheckClassLoaderAction.class); - - RestEntitlementsCheckClassLoaderAction() {} - - @Override - public List routes() { - return List.of(new Route(GET, "/_entitlement/_check_create_url_classloader")); - } - - @Override - public String getName() { - return "check_classloader_action"; - } - - @Override - protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) { - logger.info("RestEntitlementsCheckClassLoaderAction rest handler [{}]", request.path()); - if (request.path().equals("/_entitlement/_check_create_url_classloader")) { - return channel -> { - logger.info("Calling new URLClassLoader"); - try (var classLoader = new URLClassLoader("test", new URL[0], this.getClass().getClassLoader())) { - logger.info("Created URLClassLoader [{}]", classLoader.getName()); - } - }; - } - - throw new UnsupportedOperationException(); - } -} diff --git a/qa/entitlements/src/main/java/org/elasticsearch/test/entitlements/RestEntitlementsCheckSystemExitAction.java b/qa/entitlements/src/main/java/org/elasticsearch/test/entitlements/RestEntitlementsCheckSystemExitAction.java deleted file mode 100644 index 692c8728cbda0..0000000000000 --- a/qa/entitlements/src/main/java/org/elasticsearch/test/entitlements/RestEntitlementsCheckSystemExitAction.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.test.entitlements; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.client.internal.node.NodeClient; -import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestRequest; - -import java.util.List; - -import static org.elasticsearch.rest.RestRequest.Method.GET; - -public class RestEntitlementsCheckSystemExitAction extends BaseRestHandler { - - private static final Logger logger = LogManager.getLogger(RestEntitlementsCheckSystemExitAction.class); - - RestEntitlementsCheckSystemExitAction() {} - - @Override - public List routes() { - return List.of(new Route(GET, "/_entitlement/_check_system_exit")); - } - - @Override - public String getName() { - return "check_system_exit_action"; - } - - @Override - protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) { - logger.info("RestEntitlementsCheckSystemExitAction rest handler"); - return channel -> { - logger.info("Calling System.exit(123);"); - System.exit(123); - }; - } -} From 56e1ca52ea38671a320c9e9421fe3cb8fe5f15e3 Mon Sep 17 00:00:00 2001 From: Liam Thompson <32779855+leemthompo@users.noreply.github.com> Date: Wed, 11 Dec 2024 16:06:24 +0100 Subject: [PATCH 03/11] [DOCS][101] Aggregations quickstart tutorial (#116251) --- .../quickstart/aggs-tutorial.asciidoc | 2184 +++++++++++++++++ docs/reference/quickstart/index.asciidoc | 2 + 2 files changed, 2186 insertions(+) create mode 100644 docs/reference/quickstart/aggs-tutorial.asciidoc diff --git a/docs/reference/quickstart/aggs-tutorial.asciidoc b/docs/reference/quickstart/aggs-tutorial.asciidoc new file mode 100644 index 0000000000000..0a8494c3eb75d --- /dev/null +++ b/docs/reference/quickstart/aggs-tutorial.asciidoc @@ -0,0 +1,2184 @@ +[[aggregations-tutorial]] +== Analyze eCommerce data with aggregations using Query DSL +++++ +Basics: Analyze eCommerce data with aggregations +++++ + +This hands-on tutorial shows you how to analyze eCommerce data using {es} <> with the `_search` API and Query DSL. + +You'll learn how to: + +* Calculate key business metrics such as average order value +* Analyze sales patterns over time +* Compare performance across product categories +* Track moving averages and cumulative totals + +[discrete] +[[aggregations-tutorial-requirements]] +=== Requirements + +You'll need: + +. A running instance of <>, either on {serverless-full} or together with {kib} on Elastic Cloud Hosted/Self Managed deployments. +** If you don't have a deployment, you can run the following command in your terminal to set up a <>: ++ +[source,sh] +---- +curl -fsSL https://elastic.co/start-local | sh +---- +// NOTCONSOLE +. The {kibana-ref}/get-started.html#gs-get-data-into-kibana[sample eCommerce data] loaded into {es}. To load sample data follow these steps in your UI: +* Open the *Integrations* pages by searching in the global search field. +* Search for `sample data` in the **Integrations** search field. +* Open the *Sample data* page. +* Select the *Other sample data sets* collapsible. +* Add the *Sample eCommerce orders* data set. +This will create and populate an index called `kibana_sample_data_ecommerce`. + +[discrete] +[[aggregations-tutorial-inspect-data]] +=== Inspect index structure + +Before we start analyzing the data, let's examine the structure of the documents in our sample eCommerce index. Run this command to see the field <>: + +[source,console] +---- +GET kibana_sample_data_ecommerce/_mapping +---- +// TEST[skip:Using Kibana sample data] + +The response shows the field mappings for the `kibana_sample_data_ecommerce` index. + +.Example response +[%collapsible] +============== +[source,console-response] +---- +{ + "kibana_sample_data_ecommerce": { + "mappings": { + "properties": { + "category": { + "type": "text", + "fields": { <1> + "keyword": { + "type": "keyword" + } + } + }, + "currency": { + "type": "keyword" + }, + "customer_birth_date": { + "type": "date" + }, + "customer_first_name": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + }, + "customer_full_name": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + }, + "customer_gender": { + "type": "keyword" + }, + "customer_id": { + "type": "keyword" + }, + "customer_last_name": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + }, + "customer_phone": { + "type": "keyword" + }, + "day_of_week": { + "type": "keyword" + }, + "day_of_week_i": { + "type": "integer" + }, + "email": { + "type": "keyword" + }, + "event": { + "properties": { + "dataset": { + "type": "keyword" + } + } + }, + "geoip": { + "properties": { <2> + "city_name": { + "type": "keyword" + }, + "continent_name": { + "type": "keyword" + }, + "country_iso_code": { + "type": "keyword" + }, + "location": { + "type": "geo_point" <3> + }, + "region_name": { + "type": "keyword" + } + } + }, + "manufacturer": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword" + } + } + }, + "order_date": { + "type": "date" + }, + "order_id": { + "type": "keyword" + }, + "products": { + "properties": { <4> + "_id": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + }, + "base_price": { + "type": "half_float" + }, + "base_unit_price": { + "type": "half_float" + }, + "category": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword" + } + } + }, + "created_on": { + "type": "date" + }, + "discount_amount": { + "type": "half_float" + }, + "discount_percentage": { + "type": "half_float" + }, + "manufacturer": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword" + } + } + }, + "min_price": { + "type": "half_float" + }, + "price": { + "type": "half_float" + }, + "product_id": { + "type": "long" + }, + "product_name": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword" + } + }, + "analyzer": "english" + }, + "quantity": { + "type": "integer" + }, + "sku": { + "type": "keyword" + }, + "tax_amount": { + "type": "half_float" + }, + "taxful_price": { + "type": "half_float" + }, + "taxless_price": { + "type": "half_float" + }, + "unit_discount_amount": { + "type": "half_float" + } + } + }, + "sku": { + "type": "keyword" + }, + "taxful_total_price": { + "type": "half_float" + }, + "taxless_total_price": { + "type": "half_float" + }, + "total_quantity": { + "type": "integer" + }, + "total_unique_products": { + "type": "integer" + }, + "type": { + "type": "keyword" + }, + "user": { + "type": "keyword" + } + } + } + } +} +---- +<1> `fields`: Multi-field mapping that allows both full text and exact matching +<2> `geoip.properties`: Object type field containing location-related properties +<3> `geoip.location`: Geographic coordinates stored as geo_point for location-based queries +<4> `products.properties`: Nested structure containing details about items in each order +============== + +The sample data includes the following <>: + +* <> and <> for text fields +** Most `text` fields have a `.keyword` subfield for exact matching using <> +* <> for date fields +* 3 <> types: +** `integer` for whole numbers +** `long` for large whole numbers +** `half_float` for floating-point numbers +* <> for geographic coordinates +* <> for nested structures such as `products`, `geoip`, `event` + +Now that we understand the structure of our sample data, let's start analyzing it. + +[discrete] +[[aggregations-tutorial-basic-metrics]] +=== Get key business metrics + +Let's start by calculating important metrics about orders and customers. + +[discrete] +[[aggregations-tutorial-order-value]] +==== Get average order size + +Calculate the average order value across all orders in the dataset using the <> aggregation. + +[source,console] +---- +GET kibana_sample_data_ecommerce/_search +{ + "size": 0, <1> + "aggs": { + "avg_order_value": { <2> + "avg": { <3> + "field": "taxful_total_price" + } + } + } +} +---- +// TEST[skip:Using Kibana sample data] +<1> Set `size` to 0 to avoid returning matched documents in the response and return only the aggregation results +<2> A meaningful name that describes what this metric represents +<3> Configures an `avg` aggregation, which calculates a simple arithmetic mean + +.Example response +[%collapsible] +============== +[source,console-result] +---- +{ + "took": 0, + "timed_out": false, + "_shards": { + "total": 1, + "successful": 1, + "skipped": 0, + "failed": 0 + }, + "hits": { + "total": { + "value": 4675, <1> + "relation": "eq" + }, + "max_score": null, + "hits": [] <2> + }, + "aggregations": { + "avg_order_value": { <3> + "value": 75.05542864304813 <4> + } + } +} +---- +// TEST[skip:Using Kibana sample data] +<1> Total number of orders in the dataset +<2> `hits` is empty because we set `size` to 0 +<3> Results appear under the name we specified in the request +<4> The average order value is calculated dynamically from all the orders in the dataset +============== + +[discrete] +[[aggregations-tutorial-order-stats]] +==== Get multiple order statistics at once + +Calculate multiple statistics about orders in one request using the <> aggregation. + +[source,console] +---- +GET kibana_sample_data_ecommerce/_search +{ + "size": 0, + "aggs": { + "order_stats": { <1> + "stats": { <2> + "field": "taxful_total_price" + } + } + } +} +---- +// TEST[skip:Using Kibana sample data] +<1> A descriptive name for this set of statistics +<2> `stats` returns count, min, max, avg, and sum at once + +.Example response +[%collapsible] +============== +[source,console-result] +---- +{ + "aggregations": { + "order_stats": { + "count": 4675, <1> + "min": 6.98828125, <2> + "max": 2250, <3> + "avg": 75.05542864304813, <4> + "sum": 350884.12890625 <5> + } + } +} +---- +// TEST[skip:Using Kibana sample data] +<1> `"count"`: Total number of orders in the dataset +<2> `"min"`: Lowest individual order value in the dataset +<3> `"max"`: Highest individual order value in the dataset +<4> `"avg"`: Average value per order across all orders +<5> `"sum"`: Total revenue from all orders combined +============== + +[TIP] +==== +The <> is more efficient than running individual min, max, avg, and sum aggregations. +==== + +[discrete] +[[aggregations-tutorial-sales-patterns]] +=== Analyze sales patterns + +Let's group orders in different ways to understand sales patterns. + +[discrete] +[[aggregations-tutorial-category-breakdown]] +==== Break down sales by category + +Group orders by category to see which product categories are most popular, using the <> aggregation. + +[source,console] +---- +GET kibana_sample_data_ecommerce/_search +{ + "size": 0, + "aggs": { + "sales_by_category": { <1> + "terms": { <2> + "field": "category.keyword", <3> + "size": 5, <4> + "order": { "_count": "desc" } <5> + } + } + } +} +---- +// TEST[skip:Using Kibana sample data] +<1> Name reflecting the business purpose of this breakdown +<2> `terms` aggregation groups documents by field values +<3> Use <> field for exact matching on text fields +<4> Limit to top 5 categories +<5> Order by number of orders (descending) + +.Example response +[%collapsible] +============== +[source,console-result] +---- +{ + "took": 4, + "timed_out": false, + "_shards": { + "total": 5, + "successful": 5, + "skipped": 0, + "failed": 0 + }, + "hits": { + "total": { + "value": 4675, + "relation": "eq" + }, + "max_score": null, + "hits": [] + }, + "aggregations": { + "sales_by_category": { + "doc_count_error_upper_bound": 0, <1> + "sum_other_doc_count": 572, <2> + "buckets": [ <3> + { + "key": "Men's Clothing", <4> + "doc_count": 2024 <5> + }, + { + "key": "Women's Clothing", + "doc_count": 1903 + }, + { + "key": "Women's Shoes", + "doc_count": 1136 + }, + { + "key": "Men's Shoes", + "doc_count": 944 + }, + { + "key": "Women's Accessories", + "doc_count": 830 + } + ] + } + } +} +---- +// TEST[skip:Using Kibana sample data] +<1> Due to Elasticsearch's distributed architecture, when <> run across multiple shards, the doc counts may have a small margin of error. This value indicates the maximum possible error in the counts. +<2> Count of documents in categories beyond the requested size. +<3> Array of category buckets, ordered by count. +<4> Category name. +<5> Number of orders in this category. +============== + +[discrete] +[[aggregations-tutorial-daily-sales]] +==== Track daily sales patterns + +Group orders by day to track daily sales patterns using the <> aggregation. + +[source,console] +---- +GET kibana_sample_data_ecommerce/_search +{ + "size": 0, + "aggs": { + "daily_orders": { <1> + "date_histogram": { <2> + "field": "order_date", + "calendar_interval": "day", <3> + "format": "yyyy-MM-dd", <4> + "min_doc_count": 0 <5> + } + } + } +} +---- +// TEST[skip:Using Kibana sample data] +<1> Descriptive name for the time-series aggregation results. +<2> The `date_histogram` aggregration groups documents into time-based buckets, similar to terms aggregation but for dates. +<3> Uses <> to handle months with different lengths. `"day"` ensures consistent daily grouping regardless of timezone. +<4> Formats dates in response using <> (e.g. "yyyy-MM-dd"). Refer to <> for additional options. +<5> When `min_doc_count` is 0, returns buckets for days with no orders, useful for continuous time series visualization. + +.Example response +[%collapsible] +============== +[source,console-result] +---- +{ + "took": 2, + "timed_out": false, + "_shards": { + "total": 5, + "successful": 5, + "skipped": 0, + "failed": 0 + }, + "hits": { + "total": { + "value": 4675, + "relation": "eq" + }, + "max_score": null, + "hits": [] + }, + "aggregations": { + "daily_orders": { <1> + "buckets": [ <2> + { + "key_as_string": "2024-11-28", <3> + "key": 1732752000000, <4> + "doc_count": 146 <5> + }, + { + "key_as_string": "2024-11-29", + "key": 1732838400000, + "doc_count": 153 + }, + { + "key_as_string": "2024-11-30", + "key": 1732924800000, + "doc_count": 143 + }, + { + "key_as_string": "2024-12-01", + "key": 1733011200000, + "doc_count": 140 + }, + { + "key_as_string": "2024-12-02", + "key": 1733097600000, + "doc_count": 139 + }, + { + "key_as_string": "2024-12-03", + "key": 1733184000000, + "doc_count": 157 + }, + { + "key_as_string": "2024-12-04", + "key": 1733270400000, + "doc_count": 145 + }, + { + "key_as_string": "2024-12-05", + "key": 1733356800000, + "doc_count": 152 + }, + { + "key_as_string": "2024-12-06", + "key": 1733443200000, + "doc_count": 163 + }, + { + "key_as_string": "2024-12-07", + "key": 1733529600000, + "doc_count": 141 + }, + { + "key_as_string": "2024-12-08", + "key": 1733616000000, + "doc_count": 151 + }, + { + "key_as_string": "2024-12-09", + "key": 1733702400000, + "doc_count": 143 + }, + { + "key_as_string": "2024-12-10", + "key": 1733788800000, + "doc_count": 143 + }, + { + "key_as_string": "2024-12-11", + "key": 1733875200000, + "doc_count": 142 + }, + { + "key_as_string": "2024-12-12", + "key": 1733961600000, + "doc_count": 161 + }, + { + "key_as_string": "2024-12-13", + "key": 1734048000000, + "doc_count": 144 + }, + { + "key_as_string": "2024-12-14", + "key": 1734134400000, + "doc_count": 157 + }, + { + "key_as_string": "2024-12-15", + "key": 1734220800000, + "doc_count": 158 + }, + { + "key_as_string": "2024-12-16", + "key": 1734307200000, + "doc_count": 144 + }, + { + "key_as_string": "2024-12-17", + "key": 1734393600000, + "doc_count": 151 + }, + { + "key_as_string": "2024-12-18", + "key": 1734480000000, + "doc_count": 145 + }, + { + "key_as_string": "2024-12-19", + "key": 1734566400000, + "doc_count": 157 + }, + { + "key_as_string": "2024-12-20", + "key": 1734652800000, + "doc_count": 158 + }, + { + "key_as_string": "2024-12-21", + "key": 1734739200000, + "doc_count": 153 + }, + { + "key_as_string": "2024-12-22", + "key": 1734825600000, + "doc_count": 165 + }, + { + "key_as_string": "2024-12-23", + "key": 1734912000000, + "doc_count": 153 + }, + { + "key_as_string": "2024-12-24", + "key": 1734998400000, + "doc_count": 158 + }, + { + "key_as_string": "2024-12-25", + "key": 1735084800000, + "doc_count": 160 + }, + { + "key_as_string": "2024-12-26", + "key": 1735171200000, + "doc_count": 159 + }, + { + "key_as_string": "2024-12-27", + "key": 1735257600000, + "doc_count": 152 + }, + { + "key_as_string": "2024-12-28", + "key": 1735344000000, + "doc_count": 142 + } + ] + } + } +} +---- +// TEST[skip:Using Kibana sample data] +<1> Results of our named aggregation "daily_orders" +<2> Time-based buckets from date_histogram aggregation +<3> `key_as_string` is the human-readable date for this bucket +<4> `key` is the same date represented as the Unix timestamp for this bucket +<5> `doc_count` counts the number of documents that fall into this time bucket +============== + +[discrete] +[[aggregations-tutorial-combined-analysis]] +=== Combine metrics with groupings + +Now let's calculate <> within each group to get deeper insights. + +[discrete] +[[aggregations-tutorial-category-metrics]] +==== Compare category performance + +Calculate metrics within each category to compare performance across categories. + +[source,console] +---- +GET kibana_sample_data_ecommerce/_search +{ + "size": 0, + "aggs": { + "categories": { + "terms": { + "field": "category.keyword", + "size": 5, + "order": { "total_revenue": "desc" } <1> + }, + "aggs": { <2> + "total_revenue": { <3> + "sum": { + "field": "taxful_total_price" + } + }, + "avg_order_value": { <4> + "avg": { + "field": "taxful_total_price" + } + }, + "total_items": { <5> + "sum": { + "field": "total_quantity" + } + } + } + } + } +} +---- +// TEST[skip:Using Kibana sample data] +<1> Order categories by their total revenue instead of count +<2> Define metrics to calculate within each category +<3> Total revenue for the category +<4> Average order value in the category +<5> Total number of items sold + +.Example response +[%collapsible] +============== +[source,console-result] +---- +{ + "aggregations": { + "categories": { + "buckets": [ + { + "key": "Men's Clothing", <1> + "doc_count": 2179, <2> + "total_revenue": { <3> + "value": 156729.453125 + }, + "avg_order_value": { <4> + "value": 71.92726898715927 + }, + "total_items": { <5> + "value": 8716 + } + }, + { + "key": "Women's Clothing", + "doc_count": 2262, + ... + } + ] + } + } +} +---- +// TEST[skip:Using Kibana sample data] +<1> Category name +<2> Number of orders +<3> Total revenue for this category +<4> Average order value for this category +<5> Total quantity of items sold +============== + +[discrete] +[[aggregations-tutorial-daily-metrics]] +==== Analyze daily sales performance + +Let's combine metrics to track daily trends: daily revenue, unique customers, and average basket size. + +[source,console] +---- +GET kibana_sample_data_ecommerce/_search +{ + "size": 0, + "aggs": { + "daily_sales": { + "date_histogram": { + "field": "order_date", + "calendar_interval": "day", + "format": "yyyy-MM-dd" + }, + "aggs": { + "revenue": { <1> + "sum": { + "field": "taxful_total_price" + } + }, + "unique_customers": { <2> + "cardinality": { + "field": "customer_id" + } + }, + "avg_basket_size": { <3> + "avg": { + "field": "total_quantity" + } + } + } + } + } +} +---- +// TEST[skip:Using Kibana sample data] +<1> Daily revenue +<2> Uses the <> aggregation to count unique customers per day +<3> Average number of items per order + +.Example response +[%collapsible] +============== +[source,console-result] +---- +{ + "took": 119, + "timed_out": false, + "_shards": { + "total": 5, + "successful": 5, + "skipped": 0, + "failed": 0 + }, + "hits": { + "total": { + "value": 4675, + "relation": "eq" + }, + "max_score": null, + "hits": [] + }, + "aggregations": { + "daily_sales": { + "buckets": [ + { + "key_as_string": "2024-11-14", + "key": 1731542400000, + "doc_count": 146, + "unique_customers": { <1> + "value": 42 + }, + "revenue": { <2> + "value": 10578.53125 + }, + "avg_basket_size": { <3> + "value": 2.1780821917808217 + } + }, + { + "key_as_string": "2024-11-15", + "key": 1731628800000, + "doc_count": 153, + "unique_customers": { + "value": 44 + }, + "revenue": { + "value": 10448 + }, + "avg_basket_size": { + "value": 2.183006535947712 + } + }, + { + "key_as_string": "2024-11-16", + "key": 1731715200000, + "doc_count": 143, + "unique_customers": { + "value": 45 + }, + "revenue": { + "value": 10283.484375 + }, + "avg_basket_size": { + "value": 2.111888111888112 + } + }, + { + "key_as_string": "2024-11-17", + "key": 1731801600000, + "doc_count": 140, + "unique_customers": { + "value": 42 + }, + "revenue": { + "value": 10145.5234375 + }, + "avg_basket_size": { + "value": 2.142857142857143 + } + }, + { + "key_as_string": "2024-11-18", + "key": 1731888000000, + "doc_count": 139, + "unique_customers": { + "value": 42 + }, + "revenue": { + "value": 12012.609375 + }, + "avg_basket_size": { + "value": 2.158273381294964 + } + }, + { + "key_as_string": "2024-11-19", + "key": 1731974400000, + "doc_count": 157, + "unique_customers": { + "value": 43 + }, + "revenue": { + "value": 11009.45703125 + }, + "avg_basket_size": { + "value": 2.0955414012738856 + } + }, + { + "key_as_string": "2024-11-20", + "key": 1732060800000, + "doc_count": 145, + "unique_customers": { + "value": 44 + }, + "revenue": { + "value": 10720.59375 + }, + "avg_basket_size": { + "value": 2.179310344827586 + } + }, + { + "key_as_string": "2024-11-21", + "key": 1732147200000, + "doc_count": 152, + "unique_customers": { + "value": 43 + }, + "revenue": { + "value": 11185.3671875 + }, + "avg_basket_size": { + "value": 2.1710526315789473 + } + }, + { + "key_as_string": "2024-11-22", + "key": 1732233600000, + "doc_count": 163, + "unique_customers": { + "value": 44 + }, + "revenue": { + "value": 13560.140625 + }, + "avg_basket_size": { + "value": 2.2576687116564416 + } + }, + { + "key_as_string": "2024-11-23", + "key": 1732320000000, + "doc_count": 141, + "unique_customers": { + "value": 45 + }, + "revenue": { + "value": 9884.78125 + }, + "avg_basket_size": { + "value": 2.099290780141844 + } + }, + { + "key_as_string": "2024-11-24", + "key": 1732406400000, + "doc_count": 151, + "unique_customers": { + "value": 44 + }, + "revenue": { + "value": 11075.65625 + }, + "avg_basket_size": { + "value": 2.0927152317880795 + } + }, + { + "key_as_string": "2024-11-25", + "key": 1732492800000, + "doc_count": 143, + "unique_customers": { + "value": 41 + }, + "revenue": { + "value": 10323.8515625 + }, + "avg_basket_size": { + "value": 2.167832167832168 + } + }, + { + "key_as_string": "2024-11-26", + "key": 1732579200000, + "doc_count": 143, + "unique_customers": { + "value": 44 + }, + "revenue": { + "value": 10369.546875 + }, + "avg_basket_size": { + "value": 2.167832167832168 + } + }, + { + "key_as_string": "2024-11-27", + "key": 1732665600000, + "doc_count": 142, + "unique_customers": { + "value": 46 + }, + "revenue": { + "value": 11711.890625 + }, + "avg_basket_size": { + "value": 2.1971830985915495 + } + }, + { + "key_as_string": "2024-11-28", + "key": 1732752000000, + "doc_count": 161, + "unique_customers": { + "value": 43 + }, + "revenue": { + "value": 12612.6640625 + }, + "avg_basket_size": { + "value": 2.1180124223602483 + } + }, + { + "key_as_string": "2024-11-29", + "key": 1732838400000, + "doc_count": 144, + "unique_customers": { + "value": 42 + }, + "revenue": { + "value": 10176.87890625 + }, + "avg_basket_size": { + "value": 2.0347222222222223 + } + }, + { + "key_as_string": "2024-11-30", + "key": 1732924800000, + "doc_count": 157, + "unique_customers": { + "value": 43 + }, + "revenue": { + "value": 11480.33203125 + }, + "avg_basket_size": { + "value": 2.159235668789809 + } + }, + { + "key_as_string": "2024-12-01", + "key": 1733011200000, + "doc_count": 158, + "unique_customers": { + "value": 42 + }, + "revenue": { + "value": 11533.265625 + }, + "avg_basket_size": { + "value": 2.0822784810126582 + } + }, + { + "key_as_string": "2024-12-02", + "key": 1733097600000, + "doc_count": 144, + "unique_customers": { + "value": 43 + }, + "revenue": { + "value": 10499.8125 + }, + "avg_basket_size": { + "value": 2.201388888888889 + } + }, + { + "key_as_string": "2024-12-03", + "key": 1733184000000, + "doc_count": 151, + "unique_customers": { + "value": 40 + }, + "revenue": { + "value": 12111.6875 + }, + "avg_basket_size": { + "value": 2.172185430463576 + } + }, + { + "key_as_string": "2024-12-04", + "key": 1733270400000, + "doc_count": 145, + "unique_customers": { + "value": 40 + }, + "revenue": { + "value": 10530.765625 + }, + "avg_basket_size": { + "value": 2.0965517241379312 + } + }, + { + "key_as_string": "2024-12-05", + "key": 1733356800000, + "doc_count": 157, + "unique_customers": { + "value": 43 + }, + "revenue": { + "value": 11872.5625 + }, + "avg_basket_size": { + "value": 2.1464968152866244 + } + }, + { + "key_as_string": "2024-12-06", + "key": 1733443200000, + "doc_count": 158, + "unique_customers": { + "value": 42 + }, + "revenue": { + "value": 12109.453125 + }, + "avg_basket_size": { + "value": 2.151898734177215 + } + }, + { + "key_as_string": "2024-12-07", + "key": 1733529600000, + "doc_count": 153, + "unique_customers": { + "value": 42 + }, + "revenue": { + "value": 11057.40625 + }, + "avg_basket_size": { + "value": 2.111111111111111 + } + }, + { + "key_as_string": "2024-12-08", + "key": 1733616000000, + "doc_count": 165, + "unique_customers": { + "value": 42 + }, + "revenue": { + "value": 13095.609375 + }, + "avg_basket_size": { + "value": 2.1818181818181817 + } + }, + { + "key_as_string": "2024-12-09", + "key": 1733702400000, + "doc_count": 153, + "unique_customers": { + "value": 41 + }, + "revenue": { + "value": 12574.015625 + }, + "avg_basket_size": { + "value": 2.2287581699346406 + } + }, + { + "key_as_string": "2024-12-10", + "key": 1733788800000, + "doc_count": 158, + "unique_customers": { + "value": 42 + }, + "revenue": { + "value": 11188.1875 + }, + "avg_basket_size": { + "value": 2.151898734177215 + } + }, + { + "key_as_string": "2024-12-11", + "key": 1733875200000, + "doc_count": 160, + "unique_customers": { + "value": 42 + }, + "revenue": { + "value": 12117.65625 + }, + "avg_basket_size": { + "value": 2.20625 + } + }, + { + "key_as_string": "2024-12-12", + "key": 1733961600000, + "doc_count": 159, + "unique_customers": { + "value": 45 + }, + "revenue": { + "value": 11558.25 + }, + "avg_basket_size": { + "value": 2.1823899371069184 + } + }, + { + "key_as_string": "2024-12-13", + "key": 1734048000000, + "doc_count": 152, + "unique_customers": { + "value": 45 + }, + "revenue": { + "value": 11921.1171875 + }, + "avg_basket_size": { + "value": 2.289473684210526 + } + }, + { + "key_as_string": "2024-12-14", + "key": 1734134400000, + "doc_count": 142, + "unique_customers": { + "value": 45 + }, + "revenue": { + "value": 11135.03125 + }, + "avg_basket_size": { + "value": 2.183098591549296 + } + } + ] + } + } +} +---- +// TEST[skip:Using Kibana sample data] +============== + +[discrete] +[[aggregations-tutorial-trends]] +=== Track trends and patterns + +You can use <> on the results of other aggregations. +Let's analyze how metrics change over time. + +[discrete] +[[aggregations-tutorial-moving-average]] +==== Smooth out daily fluctuations + +Moving averages help identify trends by reducing day-to-day noise in the data. +Let's observe sales trends more clearly by smoothing daily revenue variations, using the <> aggregation. + +[source,console] +---- +GET kibana_sample_data_ecommerce/_search +{ + "size": 0, + "aggs": { + "daily_sales": { + "date_histogram": { + "field": "order_date", + "calendar_interval": "day" + }, + "aggs": { + "daily_revenue": { <1> + "sum": { + "field": "taxful_total_price" + } + }, + "smoothed_revenue": { <2> + "moving_fn": { <3> + "buckets_path": "daily_revenue", <4> + "window": 3, <5> + "script": "MovingFunctions.unweightedAvg(values)" <6> + } + } + } + } + } +} +---- +// TEST[skip:Using Kibana sample data] +<1> Calculate daily revenue first. +<2> Create a smoothed version of the daily revenue. +<3> Use `moving_fn` for moving window calculations. +<4> Reference the revenue from our date histogram. +<5> Use a 3-day window — use different window sizes to see trends at different time scales. +<6> Use the built-in unweighted average function in the `moving_fn` aggregation. + +.Example response +[%collapsible] +============== +[source,console-result] +---- +{ + "took": 13, + "timed_out": false, + "_shards": { + "total": 5, + "successful": 5, + "skipped": 0, + "failed": 0 + }, + "hits": { + "total": { + "value": 4675, + "relation": "eq" + }, + "max_score": null, + "hits": [] + }, + "aggregations": { + "daily_sales": { + "buckets": [ + { + "key_as_string": "2024-11-14T00:00:00.000Z", <1> + "key": 1731542400000, + "doc_count": 146, <2> + "daily_revenue": { <3> + "value": 10578.53125 + }, + "smoothed_revenue": { <4> + "value": null + } + }, + { + "key_as_string": "2024-11-15T00:00:00.000Z", + "key": 1731628800000, + "doc_count": 153, + "daily_revenue": { + "value": 10448 + }, + "smoothed_revenue": { <5> + "value": 10578.53125 + } + }, + { + "key_as_string": "2024-11-16T00:00:00.000Z", + "key": 1731715200000, + "doc_count": 143, + "daily_revenue": { + "value": 10283.484375 + }, + "smoothed_revenue": { + "value": 10513.265625 + } + }, + { + "key_as_string": "2024-11-17T00:00:00.000Z", + "key": 1731801600000, + "doc_count": 140, + "daily_revenue": { + "value": 10145.5234375 + }, + "smoothed_revenue": { + "value": 10436.671875 + } + }, + { + "key_as_string": "2024-11-18T00:00:00.000Z", + "key": 1731888000000, + "doc_count": 139, + "daily_revenue": { + "value": 12012.609375 + }, + "smoothed_revenue": { + "value": 10292.3359375 + } + }, + { + "key_as_string": "2024-11-19T00:00:00.000Z", + "key": 1731974400000, + "doc_count": 157, + "daily_revenue": { + "value": 11009.45703125 + }, + "smoothed_revenue": { + "value": 10813.872395833334 + } + }, + { + "key_as_string": "2024-11-20T00:00:00.000Z", + "key": 1732060800000, + "doc_count": 145, + "daily_revenue": { + "value": 10720.59375 + }, + "smoothed_revenue": { + "value": 11055.86328125 + } + }, + { + "key_as_string": "2024-11-21T00:00:00.000Z", + "key": 1732147200000, + "doc_count": 152, + "daily_revenue": { + "value": 11185.3671875 + }, + "smoothed_revenue": { + "value": 11247.553385416666 + } + }, + { + "key_as_string": "2024-11-22T00:00:00.000Z", + "key": 1732233600000, + "doc_count": 163, + "daily_revenue": { + "value": 13560.140625 + }, + "smoothed_revenue": { + "value": 10971.805989583334 + } + }, + { + "key_as_string": "2024-11-23T00:00:00.000Z", + "key": 1732320000000, + "doc_count": 141, + "daily_revenue": { + "value": 9884.78125 + }, + "smoothed_revenue": { + "value": 11822.033854166666 + } + }, + { + "key_as_string": "2024-11-24T00:00:00.000Z", + "key": 1732406400000, + "doc_count": 151, + "daily_revenue": { + "value": 11075.65625 + }, + "smoothed_revenue": { + "value": 11543.4296875 + } + }, + { + "key_as_string": "2024-11-25T00:00:00.000Z", + "key": 1732492800000, + "doc_count": 143, + "daily_revenue": { + "value": 10323.8515625 + }, + "smoothed_revenue": { + "value": 11506.859375 + } + }, + { + "key_as_string": "2024-11-26T00:00:00.000Z", + "key": 1732579200000, + "doc_count": 143, + "daily_revenue": { + "value": 10369.546875 + }, + "smoothed_revenue": { + "value": 10428.096354166666 + } + }, + { + "key_as_string": "2024-11-27T00:00:00.000Z", + "key": 1732665600000, + "doc_count": 142, + "daily_revenue": { + "value": 11711.890625 + }, + "smoothed_revenue": { + "value": 10589.684895833334 + } + }, + { + "key_as_string": "2024-11-28T00:00:00.000Z", + "key": 1732752000000, + "doc_count": 161, + "daily_revenue": { + "value": 12612.6640625 + }, + "smoothed_revenue": { + "value": 10801.763020833334 + } + }, + { + "key_as_string": "2024-11-29T00:00:00.000Z", + "key": 1732838400000, + "doc_count": 144, + "daily_revenue": { + "value": 10176.87890625 + }, + "smoothed_revenue": { + "value": 11564.700520833334 + } + }, + { + "key_as_string": "2024-11-30T00:00:00.000Z", + "key": 1732924800000, + "doc_count": 157, + "daily_revenue": { + "value": 11480.33203125 + }, + "smoothed_revenue": { + "value": 11500.477864583334 + } + }, + { + "key_as_string": "2024-12-01T00:00:00.000Z", + "key": 1733011200000, + "doc_count": 158, + "daily_revenue": { + "value": 11533.265625 + }, + "smoothed_revenue": { + "value": 11423.291666666666 + } + }, + { + "key_as_string": "2024-12-02T00:00:00.000Z", + "key": 1733097600000, + "doc_count": 144, + "daily_revenue": { + "value": 10499.8125 + }, + "smoothed_revenue": { + "value": 11063.4921875 + } + }, + { + "key_as_string": "2024-12-03T00:00:00.000Z", + "key": 1733184000000, + "doc_count": 151, + "daily_revenue": { + "value": 12111.6875 + }, + "smoothed_revenue": { + "value": 11171.13671875 + } + }, + { + "key_as_string": "2024-12-04T00:00:00.000Z", + "key": 1733270400000, + "doc_count": 145, + "daily_revenue": { + "value": 10530.765625 + }, + "smoothed_revenue": { + "value": 11381.588541666666 + } + }, + { + "key_as_string": "2024-12-05T00:00:00.000Z", + "key": 1733356800000, + "doc_count": 157, + "daily_revenue": { + "value": 11872.5625 + }, + "smoothed_revenue": { + "value": 11047.421875 + } + }, + { + "key_as_string": "2024-12-06T00:00:00.000Z", + "key": 1733443200000, + "doc_count": 158, + "daily_revenue": { + "value": 12109.453125 + }, + "smoothed_revenue": { + "value": 11505.005208333334 + } + }, + { + "key_as_string": "2024-12-07T00:00:00.000Z", + "key": 1733529600000, + "doc_count": 153, + "daily_revenue": { + "value": 11057.40625 + }, + "smoothed_revenue": { + "value": 11504.260416666666 + } + }, + { + "key_as_string": "2024-12-08T00:00:00.000Z", + "key": 1733616000000, + "doc_count": 165, + "daily_revenue": { + "value": 13095.609375 + }, + "smoothed_revenue": { + "value": 11679.807291666666 + } + }, + { + "key_as_string": "2024-12-09T00:00:00.000Z", + "key": 1733702400000, + "doc_count": 153, + "daily_revenue": { + "value": 12574.015625 + }, + "smoothed_revenue": { + "value": 12087.489583333334 + } + }, + { + "key_as_string": "2024-12-10T00:00:00.000Z", + "key": 1733788800000, + "doc_count": 158, + "daily_revenue": { + "value": 11188.1875 + }, + "smoothed_revenue": { + "value": 12242.34375 + } + }, + { + "key_as_string": "2024-12-11T00:00:00.000Z", + "key": 1733875200000, + "doc_count": 160, + "daily_revenue": { + "value": 12117.65625 + }, + "smoothed_revenue": { + "value": 12285.9375 + } + }, + { + "key_as_string": "2024-12-12T00:00:00.000Z", + "key": 1733961600000, + "doc_count": 159, + "daily_revenue": { + "value": 11558.25 + }, + "smoothed_revenue": { + "value": 11959.953125 + } + }, + { + "key_as_string": "2024-12-13T00:00:00.000Z", + "key": 1734048000000, + "doc_count": 152, + "daily_revenue": { + "value": 11921.1171875 + }, + "smoothed_revenue": { + "value": 11621.364583333334 + } + }, + { + "key_as_string": "2024-12-14T00:00:00.000Z", + "key": 1734134400000, + "doc_count": 142, + "daily_revenue": { + "value": 11135.03125 + }, + "smoothed_revenue": { + "value": 11865.674479166666 + } + } + ] + } + } +} +---- +// TEST[skip:Using Kibana sample data] +<1> Date of the bucket is in default ISO format because we didn't specify a format +<2> Number of orders for this day +<3> Raw daily revenue before smoothing +<4> First day has no smoothed value as it needs previous days for the calculation +<5> Moving average starts from second day, using a 3-day window +============== + +[TIP] +==== +Notice how the smoothed values lag behind the actual values - this is because they need previous days' data to calculate. The first day will always be null when using moving averages. +==== + +[discrete] +[[aggregations-tutorial-cumulative]] +==== Track running totals + +Track running totals over time using the <> aggregation. + +[source,console] +---- +GET kibana_sample_data_ecommerce/_search +{ + "size": 0, + "aggs": { + "daily_sales": { + "date_histogram": { + "field": "order_date", + "calendar_interval": "day" + }, + "aggs": { + "revenue": { + "sum": { + "field": "taxful_total_price" + } + }, + "cumulative_revenue": { <1> + "cumulative_sum": { <2> + "buckets_path": "revenue" <3> + } + } + } + } + } +} +---- +// TEST[skip:Using Kibana sample data] +<1> Name for our running total +<2> `cumulative_sum` adds up values across buckets +<3> Reference the revenue we want to accumulate + +.Example response +[%collapsible] +============== +[source,console-result] +---- +{ + "took": 4, + "timed_out": false, + "_shards": { + "total": 5, + "successful": 5, + "skipped": 0, + "failed": 0 + }, + "hits": { + "total": { + "value": 4675, + "relation": "eq" + }, + "max_score": null, + "hits": [] + }, + "aggregations": { + "daily_sales": { <1> + "buckets": [ <2> + { + "key_as_string": "2024-11-14T00:00:00.000Z", <3> + "key": 1731542400000, + "doc_count": 146, + "revenue": { <4> + "value": 10578.53125 + }, + "cumulative_revenue": { <5> + "value": 10578.53125 + } + }, + { + "key_as_string": "2024-11-15T00:00:00.000Z", + "key": 1731628800000, + "doc_count": 153, + "revenue": { + "value": 10448 + }, + "cumulative_revenue": { + "value": 21026.53125 + } + }, + { + "key_as_string": "2024-11-16T00:00:00.000Z", + "key": 1731715200000, + "doc_count": 143, + "revenue": { + "value": 10283.484375 + }, + "cumulative_revenue": { + "value": 31310.015625 + } + }, + { + "key_as_string": "2024-11-17T00:00:00.000Z", + "key": 1731801600000, + "doc_count": 140, + "revenue": { + "value": 10145.5234375 + }, + "cumulative_revenue": { + "value": 41455.5390625 + } + }, + { + "key_as_string": "2024-11-18T00:00:00.000Z", + "key": 1731888000000, + "doc_count": 139, + "revenue": { + "value": 12012.609375 + }, + "cumulative_revenue": { + "value": 53468.1484375 + } + }, + { + "key_as_string": "2024-11-19T00:00:00.000Z", + "key": 1731974400000, + "doc_count": 157, + "revenue": { + "value": 11009.45703125 + }, + "cumulative_revenue": { + "value": 64477.60546875 + } + }, + { + "key_as_string": "2024-11-20T00:00:00.000Z", + "key": 1732060800000, + "doc_count": 145, + "revenue": { + "value": 10720.59375 + }, + "cumulative_revenue": { + "value": 75198.19921875 + } + }, + { + "key_as_string": "2024-11-21T00:00:00.000Z", + "key": 1732147200000, + "doc_count": 152, + "revenue": { + "value": 11185.3671875 + }, + "cumulative_revenue": { + "value": 86383.56640625 + } + }, + { + "key_as_string": "2024-11-22T00:00:00.000Z", + "key": 1732233600000, + "doc_count": 163, + "revenue": { + "value": 13560.140625 + }, + "cumulative_revenue": { + "value": 99943.70703125 + } + }, + { + "key_as_string": "2024-11-23T00:00:00.000Z", + "key": 1732320000000, + "doc_count": 141, + "revenue": { + "value": 9884.78125 + }, + "cumulative_revenue": { + "value": 109828.48828125 + } + }, + { + "key_as_string": "2024-11-24T00:00:00.000Z", + "key": 1732406400000, + "doc_count": 151, + "revenue": { + "value": 11075.65625 + }, + "cumulative_revenue": { + "value": 120904.14453125 + } + }, + { + "key_as_string": "2024-11-25T00:00:00.000Z", + "key": 1732492800000, + "doc_count": 143, + "revenue": { + "value": 10323.8515625 + }, + "cumulative_revenue": { + "value": 131227.99609375 + } + }, + { + "key_as_string": "2024-11-26T00:00:00.000Z", + "key": 1732579200000, + "doc_count": 143, + "revenue": { + "value": 10369.546875 + }, + "cumulative_revenue": { + "value": 141597.54296875 + } + }, + { + "key_as_string": "2024-11-27T00:00:00.000Z", + "key": 1732665600000, + "doc_count": 142, + "revenue": { + "value": 11711.890625 + }, + "cumulative_revenue": { + "value": 153309.43359375 + } + }, + { + "key_as_string": "2024-11-28T00:00:00.000Z", + "key": 1732752000000, + "doc_count": 161, + "revenue": { + "value": 12612.6640625 + }, + "cumulative_revenue": { + "value": 165922.09765625 + } + }, + { + "key_as_string": "2024-11-29T00:00:00.000Z", + "key": 1732838400000, + "doc_count": 144, + "revenue": { + "value": 10176.87890625 + }, + "cumulative_revenue": { + "value": 176098.9765625 + } + }, + { + "key_as_string": "2024-11-30T00:00:00.000Z", + "key": 1732924800000, + "doc_count": 157, + "revenue": { + "value": 11480.33203125 + }, + "cumulative_revenue": { + "value": 187579.30859375 + } + }, + { + "key_as_string": "2024-12-01T00:00:00.000Z", + "key": 1733011200000, + "doc_count": 158, + "revenue": { + "value": 11533.265625 + }, + "cumulative_revenue": { + "value": 199112.57421875 + } + }, + { + "key_as_string": "2024-12-02T00:00:00.000Z", + "key": 1733097600000, + "doc_count": 144, + "revenue": { + "value": 10499.8125 + }, + "cumulative_revenue": { + "value": 209612.38671875 + } + }, + { + "key_as_string": "2024-12-03T00:00:00.000Z", + "key": 1733184000000, + "doc_count": 151, + "revenue": { + "value": 12111.6875 + }, + "cumulative_revenue": { + "value": 221724.07421875 + } + }, + { + "key_as_string": "2024-12-04T00:00:00.000Z", + "key": 1733270400000, + "doc_count": 145, + "revenue": { + "value": 10530.765625 + }, + "cumulative_revenue": { + "value": 232254.83984375 + } + }, + { + "key_as_string": "2024-12-05T00:00:00.000Z", + "key": 1733356800000, + "doc_count": 157, + "revenue": { + "value": 11872.5625 + }, + "cumulative_revenue": { + "value": 244127.40234375 + } + }, + { + "key_as_string": "2024-12-06T00:00:00.000Z", + "key": 1733443200000, + "doc_count": 158, + "revenue": { + "value": 12109.453125 + }, + "cumulative_revenue": { + "value": 256236.85546875 + } + }, + { + "key_as_string": "2024-12-07T00:00:00.000Z", + "key": 1733529600000, + "doc_count": 153, + "revenue": { + "value": 11057.40625 + }, + "cumulative_revenue": { + "value": 267294.26171875 + } + }, + { + "key_as_string": "2024-12-08T00:00:00.000Z", + "key": 1733616000000, + "doc_count": 165, + "revenue": { + "value": 13095.609375 + }, + "cumulative_revenue": { + "value": 280389.87109375 + } + }, + { + "key_as_string": "2024-12-09T00:00:00.000Z", + "key": 1733702400000, + "doc_count": 153, + "revenue": { + "value": 12574.015625 + }, + "cumulative_revenue": { + "value": 292963.88671875 + } + }, + { + "key_as_string": "2024-12-10T00:00:00.000Z", + "key": 1733788800000, + "doc_count": 158, + "revenue": { + "value": 11188.1875 + }, + "cumulative_revenue": { + "value": 304152.07421875 + } + }, + { + "key_as_string": "2024-12-11T00:00:00.000Z", + "key": 1733875200000, + "doc_count": 160, + "revenue": { + "value": 12117.65625 + }, + "cumulative_revenue": { + "value": 316269.73046875 + } + }, + { + "key_as_string": "2024-12-12T00:00:00.000Z", + "key": 1733961600000, + "doc_count": 159, + "revenue": { + "value": 11558.25 + }, + "cumulative_revenue": { + "value": 327827.98046875 + } + }, + { + "key_as_string": "2024-12-13T00:00:00.000Z", + "key": 1734048000000, + "doc_count": 152, + "revenue": { + "value": 11921.1171875 + }, + "cumulative_revenue": { + "value": 339749.09765625 + } + }, + { + "key_as_string": "2024-12-14T00:00:00.000Z", + "key": 1734134400000, + "doc_count": 142, + "revenue": { + "value": 11135.03125 + }, + "cumulative_revenue": { + "value": 350884.12890625 + } + } + ] + } + } +} +---- +// TEST[skip:Using Kibana sample data] +<1> `daily_sales`: Results from our daily sales date histogram +<2> `buckets`: Array of time-based buckets +<3> `key_as_string`: Date for this bucket (in ISO format since no format specified) +<4> `revenue`: Daily revenue for this date +<5> `cumulative_revenue`: Running total of revenue up to this date +============== + +[discrete] +[[aggregations-tutorial-next-steps]] +=== Next steps + +Refer to the <> for more details on all available aggregation types. \ No newline at end of file diff --git a/docs/reference/quickstart/index.asciidoc b/docs/reference/quickstart/index.asciidoc index 3fa6d53e6345d..cb3a5f2440220 100644 --- a/docs/reference/quickstart/index.asciidoc +++ b/docs/reference/quickstart/index.asciidoc @@ -25,6 +25,7 @@ Alternatively, refer to our <>. Learn about indices, documents, and mappings, and perform a basic search using the Query DSL. * <>. Learn about different options for querying data, including full-text search and filtering, using the Query DSL. +* <>. Learn how to analyze data using different types of aggregations, including metrics, buckets, and pipelines. * <>: Learn how to create embeddings for your data with `semantic_text` and query using the `semantic` query. ** <>: Learn how to combine semantic search with full-text search. * <>: Learn how to ingest dense vector embeddings into {es}. @@ -40,3 +41,4 @@ If you're interested in using {es} with Python, check out Elastic Search Labs: include::getting-started.asciidoc[] include::full-text-filtering-tutorial.asciidoc[] +include::aggs-tutorial.asciidoc[] From c7925957f91fc4ba5a2b834f0e368dd4864f8cf4 Mon Sep 17 00:00:00 2001 From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com> Date: Wed, 11 Dec 2024 17:30:43 +0200 Subject: [PATCH 04/11] Unify logsdb index settings providers (#118342) * Unify logsdb index settings providers * restore diff * rename method --- .../xpack/logsdb/LogsDBPlugin.java | 17 +- .../LogsdbIndexModeSettingsProvider.java | 175 +++++++- .../SyntheticSourceIndexSettingsProvider.java | 200 --------- .../LogsdbIndexModeSettingsProviderTests.java | 408 +++++++++++++++++ ...exSettingsProviderLegacyLicenseTests.java} | 15 +- ...heticSourceIndexSettingsProviderTests.java | 417 ------------------ 6 files changed, 588 insertions(+), 644 deletions(-) delete mode 100644 x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProvider.java rename x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/{SyntheticSourceIndexSettingsProviderLegacyLicenseTests.java => LogsdbIndexSettingsProviderLegacyLicenseTests.java} (91%) delete mode 100644 x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProviderTests.java diff --git a/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsDBPlugin.java b/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsDBPlugin.java index 904b00e6d0450..a8085f3d50a82 100644 --- a/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsDBPlugin.java +++ b/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsDBPlugin.java @@ -43,7 +43,7 @@ public class LogsDBPlugin extends Plugin implements ActionPlugin { public LogsDBPlugin(Settings settings) { this.settings = settings; this.licenseService = new SyntheticSourceLicenseService(settings); - this.logsdbIndexModeSettingsProvider = new LogsdbIndexModeSettingsProvider(settings); + this.logsdbIndexModeSettingsProvider = new LogsdbIndexModeSettingsProvider(licenseService, settings); } @Override @@ -67,16 +67,13 @@ public Collection createComponents(PluginServices services) { @Override public Collection getAdditionalIndexSettingProviders(IndexSettingProvider.Parameters parameters) { - if (DiscoveryNode.isStateless(settings)) { - return List.of(logsdbIndexModeSettingsProvider); + if (DiscoveryNode.isStateless(settings) == false) { + logsdbIndexModeSettingsProvider.init( + parameters.mapperServiceFactory(), + () -> parameters.clusterService().state().nodes().getMinSupportedIndexVersion() + ); } - var syntheticSettingProvider = new SyntheticSourceIndexSettingsProvider( - licenseService, - parameters.mapperServiceFactory(), - logsdbIndexModeSettingsProvider, - () -> parameters.clusterService().state().nodes().getMinSupportedIndexVersion() - ); - return List.of(syntheticSettingProvider, logsdbIndexModeSettingsProvider); + return List.of(logsdbIndexModeSettingsProvider); } @Override diff --git a/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsdbIndexModeSettingsProvider.java b/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsdbIndexModeSettingsProvider.java index 481657eaf7225..977b0e1c57578 100644 --- a/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsdbIndexModeSettingsProvider.java +++ b/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsdbIndexModeSettingsProvider.java @@ -7,25 +7,45 @@ package org.elasticsearch.xpack.logsdb; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.CheckedFunction; +import org.elasticsearch.core.Strings; import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexSettingProvider; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.SourceFieldMapper; +import java.io.IOException; import java.time.Instant; import java.util.List; import java.util.Locale; +import java.util.function.Supplier; +import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_ROUTING_PATH; import static org.elasticsearch.xpack.logsdb.LogsDBPlugin.CLUSTER_LOGSDB_ENABLED; final class LogsdbIndexModeSettingsProvider implements IndexSettingProvider { + private static final Logger LOGGER = LogManager.getLogger(LogsdbIndexModeSettingsProvider.class); private static final String LOGS_PATTERN = "logs-*-*"; + + private final SyntheticSourceLicenseService syntheticSourceLicenseService; + private final SetOnce> mapperServiceFactory = new SetOnce<>(); + private final SetOnce> createdIndexVersion = new SetOnce<>(); + private volatile boolean isLogsdbEnabled; - LogsdbIndexModeSettingsProvider(final Settings settings) { + LogsdbIndexModeSettingsProvider(SyntheticSourceLicenseService syntheticSourceLicenseService, final Settings settings) { + this.syntheticSourceLicenseService = syntheticSourceLicenseService; this.isLogsdbEnabled = CLUSTER_LOGSDB_ENABLED.get(settings); } @@ -33,6 +53,21 @@ void updateClusterIndexModeLogsdbEnabled(boolean isLogsdbEnabled) { this.isLogsdbEnabled = isLogsdbEnabled; } + void init(CheckedFunction factory, Supplier indexVersion) { + mapperServiceFactory.set(factory); + createdIndexVersion.set(indexVersion); + } + + private boolean supportFallbackToStoredSource() { + return mapperServiceFactory.get() != null; + } + + @Override + public boolean overrulesTemplateAndRequestSettings() { + // Indicates that the provider value takes precedence over any user setting. + return true; + } + @Override public Settings getAdditionalIndexSettings( final String indexName, @@ -40,20 +75,42 @@ public Settings getAdditionalIndexSettings( IndexMode templateIndexMode, final Metadata metadata, final Instant resolvedAt, - final Settings settings, + Settings settings, final List combinedTemplateMappings ) { - return getLogsdbModeSetting(dataStreamName, settings); - } - - Settings getLogsdbModeSetting(final String dataStreamName, final Settings settings) { + Settings.Builder settingsBuilder = null; if (isLogsdbEnabled && dataStreamName != null && resolveIndexMode(settings.get(IndexSettings.MODE.getKey())) == null && matchesLogsPattern(dataStreamName)) { - return Settings.builder().put("index.mode", IndexMode.LOGSDB.getName()).build(); + settingsBuilder = Settings.builder().put(IndexSettings.MODE.getKey(), IndexMode.LOGSDB.getName()); + if (supportFallbackToStoredSource()) { + settings = Settings.builder().put(IndexSettings.MODE.getKey(), IndexMode.LOGSDB.getName()).put(settings).build(); + } + } + + if (supportFallbackToStoredSource()) { + // This index name is used when validating component and index templates, we should skip this check in that case. + // (See MetadataIndexTemplateService#validateIndexTemplateV2(...) method) + boolean isTemplateValidation = "validate-index-name".equals(indexName); + boolean legacyLicensedUsageOfSyntheticSourceAllowed = isLegacyLicensedUsageOfSyntheticSourceAllowed( + templateIndexMode, + indexName, + dataStreamName + ); + if (newIndexHasSyntheticSourceUsage(indexName, templateIndexMode, settings, combinedTemplateMappings) + && syntheticSourceLicenseService.fallbackToStoredSource( + isTemplateValidation, + legacyLicensedUsageOfSyntheticSourceAllowed + )) { + LOGGER.debug("creation of index [{}] with synthetic source without it being allowed", indexName); + if (settingsBuilder == null) { + settingsBuilder = Settings.builder(); + } + settingsBuilder.put(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.STORED.toString()); + } } - return Settings.EMPTY; + return settingsBuilder == null ? Settings.EMPTY : settingsBuilder.build(); } private static boolean matchesLogsPattern(final String name) { @@ -63,4 +120,106 @@ private static boolean matchesLogsPattern(final String name) { private IndexMode resolveIndexMode(final String mode) { return mode != null ? Enum.valueOf(IndexMode.class, mode.toUpperCase(Locale.ROOT)) : null; } + + boolean newIndexHasSyntheticSourceUsage( + String indexName, + IndexMode templateIndexMode, + Settings indexTemplateAndCreateRequestSettings, + List combinedTemplateMappings + ) { + if ("validate-index-name".equals(indexName)) { + // This index name is used when validating component and index templates, we should skip this check in that case. + // (See MetadataIndexTemplateService#validateIndexTemplateV2(...) method) + return false; + } + + try { + var tmpIndexMetadata = buildIndexMetadataForMapperService(indexName, templateIndexMode, indexTemplateAndCreateRequestSettings); + var indexMode = tmpIndexMetadata.getIndexMode(); + if (SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.exists(tmpIndexMetadata.getSettings()) + || indexMode == IndexMode.LOGSDB + || indexMode == IndexMode.TIME_SERIES) { + // In case when index mode is tsdb or logsdb and only _source.mode mapping attribute is specified, then the default + // could be wrong. However, it doesn't really matter, because if the _source.mode mapping attribute is set to stored, + // then configuring the index.mapping.source.mode setting to stored has no effect. Additionally _source.mode can't be set + // to disabled, because that isn't allowed with logsdb/tsdb. In other words setting index.mapping.source.mode setting to + // stored when _source.mode mapping attribute is stored is fine as it has no effect, but avoids creating MapperService. + var sourceMode = SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.get(tmpIndexMetadata.getSettings()); + return sourceMode == SourceFieldMapper.Mode.SYNTHETIC; + } + + // TODO: remove this when _source.mode attribute has been removed: + try (var mapperService = mapperServiceFactory.get().apply(tmpIndexMetadata)) { + // combinedTemplateMappings can be null when creating system indices + // combinedTemplateMappings can be empty when creating a normal index that doesn't match any template and without mapping. + if (combinedTemplateMappings == null || combinedTemplateMappings.isEmpty()) { + combinedTemplateMappings = List.of(new CompressedXContent("{}")); + } + mapperService.merge(MapperService.SINGLE_MAPPING_NAME, combinedTemplateMappings, MapperService.MergeReason.INDEX_TEMPLATE); + return mapperService.documentMapper().sourceMapper().isSynthetic(); + } + } catch (AssertionError | Exception e) { + // In case invalid mappings or setting are provided, then mapper service creation can fail. + // In that case it is ok to return false here. The index creation will fail anyway later, so no need to fallback to stored + // source. + LOGGER.info(() -> Strings.format("unable to create mapper service for index [%s]", indexName), e); + return false; + } + } + + // Create a dummy IndexMetadata instance that can be used to create a MapperService in order to check whether synthetic source is used: + private IndexMetadata buildIndexMetadataForMapperService( + String indexName, + IndexMode templateIndexMode, + Settings indexTemplateAndCreateRequestSettings + ) { + var tmpIndexMetadata = IndexMetadata.builder(indexName); + + int dummyPartitionSize = IndexMetadata.INDEX_ROUTING_PARTITION_SIZE_SETTING.get(indexTemplateAndCreateRequestSettings); + int dummyShards = indexTemplateAndCreateRequestSettings.getAsInt( + IndexMetadata.SETTING_NUMBER_OF_SHARDS, + dummyPartitionSize == 1 ? 1 : dummyPartitionSize + 1 + ); + int shardReplicas = indexTemplateAndCreateRequestSettings.getAsInt(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0); + var finalResolvedSettings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, createdIndexVersion.get().get()) + .put(indexTemplateAndCreateRequestSettings) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, dummyShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, shardReplicas) + .put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()); + + if (templateIndexMode == IndexMode.TIME_SERIES) { + finalResolvedSettings.put(IndexSettings.MODE.getKey(), IndexMode.TIME_SERIES); + // Avoid failing because index.routing_path is missing (in case fields are marked as dimension) + finalResolvedSettings.putList(INDEX_ROUTING_PATH.getKey(), List.of("path")); + } + + tmpIndexMetadata.settings(finalResolvedSettings); + return tmpIndexMetadata.build(); + } + + /** + * The GA-ed use cases in which synthetic source usage is allowed with gold or platinum license. + */ + private boolean isLegacyLicensedUsageOfSyntheticSourceAllowed(IndexMode templateIndexMode, String indexName, String dataStreamName) { + if (templateIndexMode == IndexMode.TIME_SERIES) { + return true; + } + + // To allow the following patterns: profiling-metrics and profiling-events + if (dataStreamName != null && dataStreamName.startsWith("profiling-")) { + return true; + } + // To allow the following patterns: .profiling-sq-executables, .profiling-sq-leafframes and .profiling-stacktraces + if (indexName.startsWith(".profiling-")) { + return true; + } + // To allow the following patterns: metrics-apm.transaction.*, metrics-apm.service_transaction.*, metrics-apm.service_summary.*, + // metrics-apm.service_destination.*, "metrics-apm.internal-* and metrics-apm.app.* + if (dataStreamName != null && dataStreamName.startsWith("metrics-apm.")) { + return true; + } + + return false; + } } diff --git a/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProvider.java b/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProvider.java deleted file mode 100644 index 462bad4b19551..0000000000000 --- a/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProvider.java +++ /dev/null @@ -1,200 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.logsdb; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.cluster.metadata.Metadata; -import org.elasticsearch.common.UUIDs; -import org.elasticsearch.common.compress.CompressedXContent; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.core.CheckedFunction; -import org.elasticsearch.core.Strings; -import org.elasticsearch.index.IndexMode; -import org.elasticsearch.index.IndexSettingProvider; -import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.IndexVersion; -import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.SourceFieldMapper; - -import java.io.IOException; -import java.time.Instant; -import java.util.List; -import java.util.function.Supplier; - -import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_ROUTING_PATH; - -/** - * An index setting provider that overwrites the source mode from synthetic to stored if synthetic source isn't allowed to be used. - */ -final class SyntheticSourceIndexSettingsProvider implements IndexSettingProvider { - - private static final Logger LOGGER = LogManager.getLogger(SyntheticSourceIndexSettingsProvider.class); - - private final SyntheticSourceLicenseService syntheticSourceLicenseService; - private final CheckedFunction mapperServiceFactory; - private final LogsdbIndexModeSettingsProvider logsdbIndexModeSettingsProvider; - private final Supplier createdIndexVersion; - - SyntheticSourceIndexSettingsProvider( - SyntheticSourceLicenseService syntheticSourceLicenseService, - CheckedFunction mapperServiceFactory, - LogsdbIndexModeSettingsProvider logsdbIndexModeSettingsProvider, - Supplier createdIndexVersion - ) { - this.syntheticSourceLicenseService = syntheticSourceLicenseService; - this.mapperServiceFactory = mapperServiceFactory; - this.logsdbIndexModeSettingsProvider = logsdbIndexModeSettingsProvider; - this.createdIndexVersion = createdIndexVersion; - } - - @Override - public boolean overrulesTemplateAndRequestSettings() { - // Indicates that the provider value takes precedence over any user setting. - return true; - } - - @Override - public Settings getAdditionalIndexSettings( - String indexName, - String dataStreamName, - IndexMode templateIndexMode, - Metadata metadata, - Instant resolvedAt, - Settings indexTemplateAndCreateRequestSettings, - List combinedTemplateMappings - ) { - var logsdbSettings = logsdbIndexModeSettingsProvider.getLogsdbModeSetting(dataStreamName, indexTemplateAndCreateRequestSettings); - if (logsdbSettings != Settings.EMPTY) { - indexTemplateAndCreateRequestSettings = Settings.builder() - .put(logsdbSettings) - .put(indexTemplateAndCreateRequestSettings) - .build(); - } - - // This index name is used when validating component and index templates, we should skip this check in that case. - // (See MetadataIndexTemplateService#validateIndexTemplateV2(...) method) - boolean isTemplateValidation = "validate-index-name".equals(indexName); - boolean legacyLicensedUsageOfSyntheticSourceAllowed = isLegacyLicensedUsageOfSyntheticSourceAllowed( - templateIndexMode, - indexName, - dataStreamName - ); - if (newIndexHasSyntheticSourceUsage(indexName, templateIndexMode, indexTemplateAndCreateRequestSettings, combinedTemplateMappings) - && syntheticSourceLicenseService.fallbackToStoredSource(isTemplateValidation, legacyLicensedUsageOfSyntheticSourceAllowed)) { - LOGGER.debug("creation of index [{}] with synthetic source without it being allowed", indexName); - return Settings.builder() - .put(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.STORED.toString()) - .build(); - } - return Settings.EMPTY; - } - - boolean newIndexHasSyntheticSourceUsage( - String indexName, - IndexMode templateIndexMode, - Settings indexTemplateAndCreateRequestSettings, - List combinedTemplateMappings - ) { - if ("validate-index-name".equals(indexName)) { - // This index name is used when validating component and index templates, we should skip this check in that case. - // (See MetadataIndexTemplateService#validateIndexTemplateV2(...) method) - return false; - } - - try { - var tmpIndexMetadata = buildIndexMetadataForMapperService(indexName, templateIndexMode, indexTemplateAndCreateRequestSettings); - var indexMode = tmpIndexMetadata.getIndexMode(); - if (SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.exists(tmpIndexMetadata.getSettings()) - || indexMode == IndexMode.LOGSDB - || indexMode == IndexMode.TIME_SERIES) { - // In case when index mode is tsdb or logsdb and only _source.mode mapping attribute is specified, then the default - // could be wrong. However, it doesn't really matter, because if the _source.mode mapping attribute is set to stored, - // then configuring the index.mapping.source.mode setting to stored has no effect. Additionally _source.mode can't be set - // to disabled, because that isn't allowed with logsdb/tsdb. In other words setting index.mapping.source.mode setting to - // stored when _source.mode mapping attribute is stored is fine as it has no effect, but avoids creating MapperService. - var sourceMode = SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.get(tmpIndexMetadata.getSettings()); - return sourceMode == SourceFieldMapper.Mode.SYNTHETIC; - } - - // TODO: remove this when _source.mode attribute has been removed: - try (var mapperService = mapperServiceFactory.apply(tmpIndexMetadata)) { - // combinedTemplateMappings can be null when creating system indices - // combinedTemplateMappings can be empty when creating a normal index that doesn't match any template and without mapping. - if (combinedTemplateMappings == null || combinedTemplateMappings.isEmpty()) { - combinedTemplateMappings = List.of(new CompressedXContent("{}")); - } - mapperService.merge(MapperService.SINGLE_MAPPING_NAME, combinedTemplateMappings, MapperService.MergeReason.INDEX_TEMPLATE); - return mapperService.documentMapper().sourceMapper().isSynthetic(); - } - } catch (AssertionError | Exception e) { - // In case invalid mappings or setting are provided, then mapper service creation can fail. - // In that case it is ok to return false here. The index creation will fail anyway later, so no need to fallback to stored - // source. - LOGGER.info(() -> Strings.format("unable to create mapper service for index [%s]", indexName), e); - return false; - } - } - - // Create a dummy IndexMetadata instance that can be used to create a MapperService in order to check whether synthetic source is used: - private IndexMetadata buildIndexMetadataForMapperService( - String indexName, - IndexMode templateIndexMode, - Settings indexTemplateAndCreateRequestSettings - ) { - var tmpIndexMetadata = IndexMetadata.builder(indexName); - - int dummyPartitionSize = IndexMetadata.INDEX_ROUTING_PARTITION_SIZE_SETTING.get(indexTemplateAndCreateRequestSettings); - int dummyShards = indexTemplateAndCreateRequestSettings.getAsInt( - IndexMetadata.SETTING_NUMBER_OF_SHARDS, - dummyPartitionSize == 1 ? 1 : dummyPartitionSize + 1 - ); - int shardReplicas = indexTemplateAndCreateRequestSettings.getAsInt(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0); - var finalResolvedSettings = Settings.builder() - .put(IndexMetadata.SETTING_VERSION_CREATED, createdIndexVersion.get()) - .put(indexTemplateAndCreateRequestSettings) - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, dummyShards) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, shardReplicas) - .put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()); - - if (templateIndexMode == IndexMode.TIME_SERIES) { - finalResolvedSettings.put(IndexSettings.MODE.getKey(), IndexMode.TIME_SERIES); - // Avoid failing because index.routing_path is missing (in case fields are marked as dimension) - finalResolvedSettings.putList(INDEX_ROUTING_PATH.getKey(), List.of("path")); - } - - tmpIndexMetadata.settings(finalResolvedSettings); - return tmpIndexMetadata.build(); - } - - /** - * The GA-ed use cases in which synthetic source usage is allowed with gold or platinum license. - */ - boolean isLegacyLicensedUsageOfSyntheticSourceAllowed(IndexMode templateIndexMode, String indexName, String dataStreamName) { - if (templateIndexMode == IndexMode.TIME_SERIES) { - return true; - } - - // To allow the following patterns: profiling-metrics and profiling-events - if (dataStreamName != null && dataStreamName.startsWith("profiling-")) { - return true; - } - // To allow the following patterns: .profiling-sq-executables, .profiling-sq-leafframes and .profiling-stacktraces - if (indexName.startsWith(".profiling-")) { - return true; - } - // To allow the following patterns: metrics-apm.transaction.*, metrics-apm.service_transaction.*, metrics-apm.service_summary.*, - // metrics-apm.service_destination.*, "metrics-apm.internal-* and metrics-apm.app.* - if (dataStreamName != null && dataStreamName.startsWith("metrics-apm.")) { - return true; - } - - return false; - } -} diff --git a/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/LogsdbIndexModeSettingsProviderTests.java b/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/LogsdbIndexModeSettingsProviderTests.java index 5f23dbdca1143..de4f0960f50e7 100644 --- a/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/LogsdbIndexModeSettingsProviderTests.java +++ b/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/LogsdbIndexModeSettingsProviderTests.java @@ -9,19 +9,37 @@ import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.ComposableIndexTemplateMetadata; +import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.DataStreamTestHelper; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.Template; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Tuple; import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.MapperTestUtils; +import org.elasticsearch.index.mapper.SourceFieldMapper; +import org.elasticsearch.license.License; +import org.elasticsearch.license.LicenseService; +import org.elasticsearch.license.MockLicenseState; import org.elasticsearch.test.ESTestCase; +import org.junit.Before; import java.io.IOException; import java.time.Instant; import java.time.temporal.ChronoUnit; import java.util.List; import java.util.Map; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.elasticsearch.common.settings.Settings.builder; +import static org.elasticsearch.xpack.logsdb.SyntheticSourceLicenseServiceTests.createEnterpriseLicense; +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; public class LogsdbIndexModeSettingsProviderTests extends ESTestCase { @@ -43,8 +61,39 @@ public class LogsdbIndexModeSettingsProviderTests extends ESTestCase { } """; + private SyntheticSourceLicenseService syntheticSourceLicenseService; + private final AtomicInteger newMapperServiceCounter = new AtomicInteger(); + + @Before + public void setup() throws Exception { + MockLicenseState licenseState = MockLicenseState.createMock(); + when(licenseState.isAllowed(any())).thenReturn(true); + var licenseService = new SyntheticSourceLicenseService(Settings.EMPTY); + licenseService.setLicenseState(licenseState); + var mockLicenseService = mock(LicenseService.class); + License license = createEnterpriseLicense(); + when(mockLicenseService.getLicense()).thenReturn(license); + syntheticSourceLicenseService = new SyntheticSourceLicenseService(Settings.EMPTY); + syntheticSourceLicenseService.setLicenseState(licenseState); + syntheticSourceLicenseService.setLicenseService(mockLicenseService); + } + + LogsdbIndexModeSettingsProvider withSyntheticSourceDemotionSupport(boolean enabled) { + newMapperServiceCounter.set(0); + var provider = new LogsdbIndexModeSettingsProvider( + syntheticSourceLicenseService, + Settings.builder().put("cluster.logsdb.enabled", enabled).build() + ); + provider.init(im -> { + newMapperServiceCounter.incrementAndGet(); + return MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(), im.getSettings(), im.getIndex().getName()); + }, IndexVersion::current); + return provider; + } + public void testLogsDbDisabled() throws IOException { final LogsdbIndexModeSettingsProvider provider = new LogsdbIndexModeSettingsProvider( + syntheticSourceLicenseService, Settings.builder().put("cluster.logsdb.enabled", false).build() ); @@ -63,6 +112,7 @@ public void testLogsDbDisabled() throws IOException { public void testOnIndexCreation() throws IOException { final LogsdbIndexModeSettingsProvider provider = new LogsdbIndexModeSettingsProvider( + syntheticSourceLicenseService, Settings.builder().put("cluster.logsdb.enabled", true).build() ); @@ -81,6 +131,7 @@ public void testOnIndexCreation() throws IOException { public void testOnExplicitStandardIndex() throws IOException { final LogsdbIndexModeSettingsProvider provider = new LogsdbIndexModeSettingsProvider( + syntheticSourceLicenseService, Settings.builder().put("cluster.logsdb.enabled", true).build() ); @@ -99,6 +150,7 @@ public void testOnExplicitStandardIndex() throws IOException { public void testOnExplicitTimeSeriesIndex() throws IOException { final LogsdbIndexModeSettingsProvider provider = new LogsdbIndexModeSettingsProvider( + syntheticSourceLicenseService, Settings.builder().put("cluster.logsdb.enabled", true).build() ); @@ -117,6 +169,7 @@ public void testOnExplicitTimeSeriesIndex() throws IOException { public void testNonLogsDataStream() throws IOException { final LogsdbIndexModeSettingsProvider provider = new LogsdbIndexModeSettingsProvider( + syntheticSourceLicenseService, Settings.builder().put("cluster.logsdb.enabled", true).build() ); @@ -135,6 +188,7 @@ public void testNonLogsDataStream() throws IOException { public void testWithoutLogsComponentTemplate() throws IOException { final LogsdbIndexModeSettingsProvider provider = new LogsdbIndexModeSettingsProvider( + syntheticSourceLicenseService, Settings.builder().put("cluster.logsdb.enabled", true).build() ); @@ -153,6 +207,7 @@ public void testWithoutLogsComponentTemplate() throws IOException { public void testWithLogsComponentTemplate() throws IOException { final LogsdbIndexModeSettingsProvider provider = new LogsdbIndexModeSettingsProvider( + syntheticSourceLicenseService, Settings.builder().put("cluster.logsdb.enabled", true).build() ); @@ -171,6 +226,7 @@ public void testWithLogsComponentTemplate() throws IOException { public void testWithMultipleComponentTemplates() throws IOException { final LogsdbIndexModeSettingsProvider provider = new LogsdbIndexModeSettingsProvider( + syntheticSourceLicenseService, Settings.builder().put("cluster.logsdb.enabled", true).build() ); @@ -189,6 +245,7 @@ public void testWithMultipleComponentTemplates() throws IOException { public void testWithCustomComponentTemplatesOnly() throws IOException { final LogsdbIndexModeSettingsProvider provider = new LogsdbIndexModeSettingsProvider( + syntheticSourceLicenseService, Settings.builder().put("cluster.logsdb.enabled", true).build() ); @@ -207,6 +264,7 @@ public void testWithCustomComponentTemplatesOnly() throws IOException { public void testNonMatchingTemplateIndexPattern() throws IOException { final LogsdbIndexModeSettingsProvider provider = new LogsdbIndexModeSettingsProvider( + syntheticSourceLicenseService, Settings.builder().put("cluster.logsdb.enabled", true).build() ); @@ -225,6 +283,7 @@ public void testNonMatchingTemplateIndexPattern() throws IOException { public void testCaseSensitivity() throws IOException { final LogsdbIndexModeSettingsProvider provider = new LogsdbIndexModeSettingsProvider( + syntheticSourceLicenseService, Settings.builder().put("cluster.logsdb.enabled", true).build() ); @@ -243,6 +302,7 @@ public void testCaseSensitivity() throws IOException { public void testMultipleHyphensInDataStreamName() throws IOException { final LogsdbIndexModeSettingsProvider provider = new LogsdbIndexModeSettingsProvider( + syntheticSourceLicenseService, Settings.builder().put("cluster.logsdb.enabled", true).build() ); @@ -261,6 +321,7 @@ public void testMultipleHyphensInDataStreamName() throws IOException { public void testBeforeAndAFterSettingUpdate() throws IOException { final LogsdbIndexModeSettingsProvider provider = new LogsdbIndexModeSettingsProvider( + syntheticSourceLicenseService, Settings.builder().put("cluster.logsdb.enabled", false).build() ); @@ -323,4 +384,351 @@ private void assertIndexMode(final Settings settings, final String expectedIndex assertEquals(expectedIndexMode, settings.get(IndexSettings.MODE.getKey())); } + public void testNewIndexHasSyntheticSourceUsage() throws IOException { + String dataStreamName = "logs-app1"; + String indexName = DataStream.getDefaultBackingIndexName(dataStreamName, 0); + Settings settings = Settings.EMPTY; + LogsdbIndexModeSettingsProvider provider = withSyntheticSourceDemotionSupport(false); + { + String mapping = """ + { + "_doc": { + "_source": { + "mode": "synthetic" + }, + "properties": { + "my_field": { + "type": "keyword" + } + } + } + } + """; + boolean result = provider.newIndexHasSyntheticSourceUsage(indexName, null, settings, List.of(new CompressedXContent(mapping))); + assertTrue(result); + assertThat(newMapperServiceCounter.get(), equalTo(1)); + assertWarnings(SourceFieldMapper.DEPRECATION_WARNING); + } + { + String mapping; + boolean withSourceMode = randomBoolean(); + if (withSourceMode) { + mapping = """ + { + "_doc": { + "_source": { + "mode": "stored" + }, + "properties": { + "my_field": { + "type": "keyword" + } + } + } + } + """; + } else { + mapping = """ + { + "_doc": { + "properties": { + "my_field": { + "type": "keyword" + } + } + } + } + """; + } + boolean result = provider.newIndexHasSyntheticSourceUsage(indexName, null, settings, List.of(new CompressedXContent(mapping))); + assertFalse(result); + assertThat(newMapperServiceCounter.get(), equalTo(2)); + if (withSourceMode) { + assertWarnings(SourceFieldMapper.DEPRECATION_WARNING); + } + } + } + + public void testValidateIndexName() throws IOException { + String indexName = "validate-index-name"; + String mapping = """ + { + "_doc": { + "_source": { + "mode": "synthetic" + }, + "properties": { + "my_field": { + "type": "keyword" + } + } + } + } + """; + Settings settings = Settings.EMPTY; + LogsdbIndexModeSettingsProvider provider = withSyntheticSourceDemotionSupport(false); + boolean result = provider.newIndexHasSyntheticSourceUsage(indexName, null, settings, List.of(new CompressedXContent(mapping))); + assertFalse(result); + } + + public void testNewIndexHasSyntheticSourceUsageLogsdbIndex() throws IOException { + String dataStreamName = "logs-app1"; + String indexName = DataStream.getDefaultBackingIndexName(dataStreamName, 0); + String mapping = """ + { + "_doc": { + "properties": { + "my_field": { + "type": "keyword" + } + } + } + } + """; + LogsdbIndexModeSettingsProvider provider = withSyntheticSourceDemotionSupport(false); + { + Settings settings = Settings.builder().put("index.mode", "logsdb").build(); + boolean result = provider.newIndexHasSyntheticSourceUsage(indexName, null, settings, List.of(new CompressedXContent(mapping))); + assertTrue(result); + assertThat(newMapperServiceCounter.get(), equalTo(0)); + } + { + Settings settings = Settings.builder().put("index.mode", "logsdb").build(); + boolean result = provider.newIndexHasSyntheticSourceUsage(indexName, null, settings, List.of()); + assertTrue(result); + assertThat(newMapperServiceCounter.get(), equalTo(0)); + } + { + boolean result = provider.newIndexHasSyntheticSourceUsage(indexName, null, Settings.EMPTY, List.of()); + assertFalse(result); + assertThat(newMapperServiceCounter.get(), equalTo(1)); + } + { + boolean result = provider.newIndexHasSyntheticSourceUsage( + indexName, + null, + Settings.EMPTY, + List.of(new CompressedXContent(mapping)) + ); + assertFalse(result); + assertThat(newMapperServiceCounter.get(), equalTo(2)); + } + } + + public void testNewIndexHasSyntheticSourceUsageTimeSeries() throws IOException { + String dataStreamName = "logs-app1"; + String indexName = DataStream.getDefaultBackingIndexName(dataStreamName, 0); + String mapping = """ + { + "_doc": { + "properties": { + "my_field": { + "type": "keyword", + "time_series_dimension": true + } + } + } + } + """; + LogsdbIndexModeSettingsProvider provider = withSyntheticSourceDemotionSupport(false); + { + Settings settings = Settings.builder().put("index.mode", "time_series").put("index.routing_path", "my_field").build(); + boolean result = provider.newIndexHasSyntheticSourceUsage(indexName, null, settings, List.of(new CompressedXContent(mapping))); + assertTrue(result); + } + { + Settings settings = Settings.builder().put("index.mode", "time_series").put("index.routing_path", "my_field").build(); + boolean result = provider.newIndexHasSyntheticSourceUsage(indexName, null, settings, List.of()); + assertTrue(result); + } + { + boolean result = provider.newIndexHasSyntheticSourceUsage(indexName, null, Settings.EMPTY, List.of()); + assertFalse(result); + } + { + boolean result = provider.newIndexHasSyntheticSourceUsage( + indexName, + null, + Settings.EMPTY, + List.of(new CompressedXContent(mapping)) + ); + assertFalse(result); + } + } + + public void testNewIndexHasSyntheticSourceUsage_invalidSettings() throws IOException { + String dataStreamName = "logs-app1"; + String indexName = DataStream.getDefaultBackingIndexName(dataStreamName, 0); + Settings settings = Settings.builder().put("index.soft_deletes.enabled", false).build(); + LogsdbIndexModeSettingsProvider provider = withSyntheticSourceDemotionSupport(false); + { + String mapping = """ + { + "_doc": { + "_source": { + "mode": "synthetic" + }, + "properties": { + "my_field": { + "type": "keyword" + } + } + } + } + """; + boolean result = provider.newIndexHasSyntheticSourceUsage(indexName, null, settings, List.of(new CompressedXContent(mapping))); + assertFalse(result); + assertThat(newMapperServiceCounter.get(), equalTo(1)); + } + { + String mapping = """ + { + "_doc": { + "properties": { + "my_field": { + "type": "keyword" + } + } + } + } + """; + boolean result = provider.newIndexHasSyntheticSourceUsage(indexName, null, settings, List.of(new CompressedXContent(mapping))); + assertFalse(result); + assertThat(newMapperServiceCounter.get(), equalTo(2)); + } + } + + public void testGetAdditionalIndexSettingsDowngradeFromSyntheticSource() throws IOException { + String dataStreamName = "logs-app1"; + Metadata.Builder mb = Metadata.builder( + DataStreamTestHelper.getClusterStateWithDataStreams( + List.of(Tuple.tuple(dataStreamName, 1)), + List.of(), + Instant.now().toEpochMilli(), + builder().build(), + 1 + ).getMetadata() + ); + Metadata metadata = mb.build(); + LogsdbIndexModeSettingsProvider provider = withSyntheticSourceDemotionSupport(false); + Settings settings = builder().put(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.SYNTHETIC) + .build(); + + Settings result = provider.getAdditionalIndexSettings( + DataStream.getDefaultBackingIndexName(dataStreamName, 2), + dataStreamName, + null, + metadata, + Instant.ofEpochMilli(1L), + settings, + List.of() + ); + assertThat(result.size(), equalTo(0)); + assertThat(newMapperServiceCounter.get(), equalTo(0)); + + syntheticSourceLicenseService.setSyntheticSourceFallback(true); + result = provider.getAdditionalIndexSettings( + DataStream.getDefaultBackingIndexName(dataStreamName, 2), + dataStreamName, + null, + metadata, + Instant.ofEpochMilli(1L), + settings, + List.of() + ); + assertThat(result.size(), equalTo(1)); + assertEquals(SourceFieldMapper.Mode.STORED, SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.get(result)); + assertThat(newMapperServiceCounter.get(), equalTo(0)); + + result = provider.getAdditionalIndexSettings( + DataStream.getDefaultBackingIndexName(dataStreamName, 2), + dataStreamName, + IndexMode.TIME_SERIES, + metadata, + Instant.ofEpochMilli(1L), + settings, + List.of() + ); + assertThat(result.size(), equalTo(1)); + assertEquals(SourceFieldMapper.Mode.STORED, SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.get(result)); + assertThat(newMapperServiceCounter.get(), equalTo(0)); + + result = provider.getAdditionalIndexSettings( + DataStream.getDefaultBackingIndexName(dataStreamName, 2), + dataStreamName, + IndexMode.LOGSDB, + metadata, + Instant.ofEpochMilli(1L), + settings, + List.of() + ); + assertThat(result.size(), equalTo(1)); + assertEquals(SourceFieldMapper.Mode.STORED, SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.get(result)); + assertThat(newMapperServiceCounter.get(), equalTo(0)); + } + + public void testGetAdditionalIndexSettingsDowngradeFromSyntheticSourceFileMatch() throws IOException { + syntheticSourceLicenseService.setSyntheticSourceFallback(true); + LogsdbIndexModeSettingsProvider provider = withSyntheticSourceDemotionSupport(true); + final Settings settings = Settings.EMPTY; + + String dataStreamName = "logs-app1"; + Metadata.Builder mb = Metadata.builder( + DataStreamTestHelper.getClusterStateWithDataStreams( + List.of(Tuple.tuple(dataStreamName, 1)), + List.of(), + Instant.now().toEpochMilli(), + builder().build(), + 1 + ).getMetadata() + ); + Metadata metadata = mb.build(); + Settings result = provider.getAdditionalIndexSettings( + DataStream.getDefaultBackingIndexName(dataStreamName, 2), + dataStreamName, + null, + metadata, + Instant.ofEpochMilli(1L), + settings, + List.of() + ); + assertThat(result.size(), equalTo(0)); + + dataStreamName = "logs-app1-0"; + mb = Metadata.builder( + DataStreamTestHelper.getClusterStateWithDataStreams( + List.of(Tuple.tuple(dataStreamName, 1)), + List.of(), + Instant.now().toEpochMilli(), + builder().build(), + 1 + ).getMetadata() + ); + metadata = mb.build(); + + result = provider.getAdditionalIndexSettings( + DataStream.getDefaultBackingIndexName(dataStreamName, 2), + dataStreamName, + null, + metadata, + Instant.ofEpochMilli(1L), + settings, + List.of() + ); + assertThat(result.size(), equalTo(2)); + assertEquals(SourceFieldMapper.Mode.STORED, SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.get(result)); + assertEquals(IndexMode.LOGSDB, IndexSettings.MODE.get(result)); + + result = provider.getAdditionalIndexSettings( + DataStream.getDefaultBackingIndexName(dataStreamName, 2), + dataStreamName, + null, + metadata, + Instant.ofEpochMilli(1L), + builder().put(IndexSettings.MODE.getKey(), IndexMode.STANDARD.toString()).build(), + List.of() + ); + assertThat(result.size(), equalTo(0)); + } + } diff --git a/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProviderLegacyLicenseTests.java b/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/LogsdbIndexSettingsProviderLegacyLicenseTests.java similarity index 91% rename from x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProviderLegacyLicenseTests.java rename to x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/LogsdbIndexSettingsProviderLegacyLicenseTests.java index c871a7d0216ed..8a4adf18b3e67 100644 --- a/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProviderLegacyLicenseTests.java +++ b/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/LogsdbIndexSettingsProviderLegacyLicenseTests.java @@ -25,15 +25,14 @@ import java.time.ZoneOffset; import java.util.List; -import static org.elasticsearch.xpack.logsdb.SyntheticSourceIndexSettingsProviderTests.getLogsdbIndexModeSettingsProvider; import static org.elasticsearch.xpack.logsdb.SyntheticSourceLicenseServiceTests.createGoldOrPlatinumLicense; import static org.hamcrest.Matchers.equalTo; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -public class SyntheticSourceIndexSettingsProviderLegacyLicenseTests extends ESTestCase { +public class LogsdbIndexSettingsProviderLegacyLicenseTests extends ESTestCase { - private SyntheticSourceIndexSettingsProvider provider; + private LogsdbIndexModeSettingsProvider provider; @Before public void setup() throws Exception { @@ -50,10 +49,9 @@ public void setup() throws Exception { syntheticSourceLicenseService.setLicenseState(licenseState); syntheticSourceLicenseService.setLicenseService(mockLicenseService); - provider = new SyntheticSourceIndexSettingsProvider( - syntheticSourceLicenseService, + provider = new LogsdbIndexModeSettingsProvider(syntheticSourceLicenseService, Settings.EMPTY); + provider.init( im -> MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(), im.getSettings(), im.getIndex().getName()), - getLogsdbIndexModeSettingsProvider(false), IndexVersion::current ); } @@ -112,10 +110,9 @@ public void testGetAdditionalIndexSettingsTsdbAfterCutoffDate() throws Exception syntheticSourceLicenseService.setLicenseState(licenseState); syntheticSourceLicenseService.setLicenseService(mockLicenseService); - provider = new SyntheticSourceIndexSettingsProvider( - syntheticSourceLicenseService, + provider = new LogsdbIndexModeSettingsProvider(syntheticSourceLicenseService, Settings.EMPTY); + provider.init( im -> MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(), im.getSettings(), im.getIndex().getName()), - getLogsdbIndexModeSettingsProvider(false), IndexVersion::current ); diff --git a/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProviderTests.java b/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProviderTests.java deleted file mode 100644 index df1fb8f2d958c..0000000000000 --- a/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProviderTests.java +++ /dev/null @@ -1,417 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.logsdb; - -import org.elasticsearch.cluster.metadata.DataStream; -import org.elasticsearch.cluster.metadata.DataStreamTestHelper; -import org.elasticsearch.cluster.metadata.Metadata; -import org.elasticsearch.common.compress.CompressedXContent; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.core.Tuple; -import org.elasticsearch.index.IndexMode; -import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.IndexVersion; -import org.elasticsearch.index.MapperTestUtils; -import org.elasticsearch.index.mapper.SourceFieldMapper; -import org.elasticsearch.license.License; -import org.elasticsearch.license.LicenseService; -import org.elasticsearch.license.MockLicenseState; -import org.elasticsearch.test.ESTestCase; -import org.junit.Before; - -import java.io.IOException; -import java.time.Instant; -import java.util.List; -import java.util.concurrent.atomic.AtomicInteger; - -import static org.elasticsearch.common.settings.Settings.builder; -import static org.elasticsearch.xpack.logsdb.SyntheticSourceLicenseServiceTests.createEnterpriseLicense; -import static org.hamcrest.Matchers.equalTo; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -public class SyntheticSourceIndexSettingsProviderTests extends ESTestCase { - - private SyntheticSourceLicenseService syntheticSourceLicenseService; - private SyntheticSourceIndexSettingsProvider provider; - private final AtomicInteger newMapperServiceCounter = new AtomicInteger(); - - static LogsdbIndexModeSettingsProvider getLogsdbIndexModeSettingsProvider(boolean enabled) { - return new LogsdbIndexModeSettingsProvider(Settings.builder().put("cluster.logsdb.enabled", enabled).build()); - } - - @Before - public void setup() throws Exception { - MockLicenseState licenseState = MockLicenseState.createMock(); - when(licenseState.isAllowed(any())).thenReturn(true); - var licenseService = new SyntheticSourceLicenseService(Settings.EMPTY); - licenseService.setLicenseState(licenseState); - var mockLicenseService = mock(LicenseService.class); - License license = createEnterpriseLicense(); - when(mockLicenseService.getLicense()).thenReturn(license); - syntheticSourceLicenseService = new SyntheticSourceLicenseService(Settings.EMPTY); - syntheticSourceLicenseService.setLicenseState(licenseState); - syntheticSourceLicenseService.setLicenseService(mockLicenseService); - - provider = new SyntheticSourceIndexSettingsProvider(syntheticSourceLicenseService, im -> { - newMapperServiceCounter.incrementAndGet(); - return MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(), im.getSettings(), im.getIndex().getName()); - }, getLogsdbIndexModeSettingsProvider(false), IndexVersion::current); - newMapperServiceCounter.set(0); - } - - public void testNewIndexHasSyntheticSourceUsage() throws IOException { - String dataStreamName = "logs-app1"; - String indexName = DataStream.getDefaultBackingIndexName(dataStreamName, 0); - Settings settings = Settings.EMPTY; - { - String mapping = """ - { - "_doc": { - "_source": { - "mode": "synthetic" - }, - "properties": { - "my_field": { - "type": "keyword" - } - } - } - } - """; - boolean result = provider.newIndexHasSyntheticSourceUsage(indexName, null, settings, List.of(new CompressedXContent(mapping))); - assertTrue(result); - assertThat(newMapperServiceCounter.get(), equalTo(1)); - assertWarnings(SourceFieldMapper.DEPRECATION_WARNING); - } - { - String mapping; - boolean withSourceMode = randomBoolean(); - if (withSourceMode) { - mapping = """ - { - "_doc": { - "_source": { - "mode": "stored" - }, - "properties": { - "my_field": { - "type": "keyword" - } - } - } - } - """; - } else { - mapping = """ - { - "_doc": { - "properties": { - "my_field": { - "type": "keyword" - } - } - } - } - """; - } - boolean result = provider.newIndexHasSyntheticSourceUsage(indexName, null, settings, List.of(new CompressedXContent(mapping))); - assertFalse(result); - assertThat(newMapperServiceCounter.get(), equalTo(2)); - if (withSourceMode) { - assertWarnings(SourceFieldMapper.DEPRECATION_WARNING); - } - } - } - - public void testValidateIndexName() throws IOException { - String indexName = "validate-index-name"; - String mapping = """ - { - "_doc": { - "_source": { - "mode": "synthetic" - }, - "properties": { - "my_field": { - "type": "keyword" - } - } - } - } - """; - Settings settings = Settings.EMPTY; - boolean result = provider.newIndexHasSyntheticSourceUsage(indexName, null, settings, List.of(new CompressedXContent(mapping))); - assertFalse(result); - } - - public void testNewIndexHasSyntheticSourceUsageLogsdbIndex() throws IOException { - String dataStreamName = "logs-app1"; - String indexName = DataStream.getDefaultBackingIndexName(dataStreamName, 0); - String mapping = """ - { - "_doc": { - "properties": { - "my_field": { - "type": "keyword" - } - } - } - } - """; - { - Settings settings = Settings.builder().put("index.mode", "logsdb").build(); - boolean result = provider.newIndexHasSyntheticSourceUsage(indexName, null, settings, List.of(new CompressedXContent(mapping))); - assertTrue(result); - assertThat(newMapperServiceCounter.get(), equalTo(0)); - } - { - Settings settings = Settings.builder().put("index.mode", "logsdb").build(); - boolean result = provider.newIndexHasSyntheticSourceUsage(indexName, null, settings, List.of()); - assertTrue(result); - assertThat(newMapperServiceCounter.get(), equalTo(0)); - } - { - boolean result = provider.newIndexHasSyntheticSourceUsage(indexName, null, Settings.EMPTY, List.of()); - assertFalse(result); - assertThat(newMapperServiceCounter.get(), equalTo(1)); - } - { - boolean result = provider.newIndexHasSyntheticSourceUsage( - indexName, - null, - Settings.EMPTY, - List.of(new CompressedXContent(mapping)) - ); - assertFalse(result); - assertThat(newMapperServiceCounter.get(), equalTo(2)); - } - } - - public void testNewIndexHasSyntheticSourceUsageTimeSeries() throws IOException { - String dataStreamName = "logs-app1"; - String indexName = DataStream.getDefaultBackingIndexName(dataStreamName, 0); - String mapping = """ - { - "_doc": { - "properties": { - "my_field": { - "type": "keyword", - "time_series_dimension": true - } - } - } - } - """; - { - Settings settings = Settings.builder().put("index.mode", "time_series").put("index.routing_path", "my_field").build(); - boolean result = provider.newIndexHasSyntheticSourceUsage(indexName, null, settings, List.of(new CompressedXContent(mapping))); - assertTrue(result); - } - { - Settings settings = Settings.builder().put("index.mode", "time_series").put("index.routing_path", "my_field").build(); - boolean result = provider.newIndexHasSyntheticSourceUsage(indexName, null, settings, List.of()); - assertTrue(result); - } - { - boolean result = provider.newIndexHasSyntheticSourceUsage(indexName, null, Settings.EMPTY, List.of()); - assertFalse(result); - } - { - boolean result = provider.newIndexHasSyntheticSourceUsage( - indexName, - null, - Settings.EMPTY, - List.of(new CompressedXContent(mapping)) - ); - assertFalse(result); - } - } - - public void testNewIndexHasSyntheticSourceUsage_invalidSettings() throws IOException { - String dataStreamName = "logs-app1"; - String indexName = DataStream.getDefaultBackingIndexName(dataStreamName, 0); - Settings settings = Settings.builder().put("index.soft_deletes.enabled", false).build(); - { - String mapping = """ - { - "_doc": { - "_source": { - "mode": "synthetic" - }, - "properties": { - "my_field": { - "type": "keyword" - } - } - } - } - """; - boolean result = provider.newIndexHasSyntheticSourceUsage(indexName, null, settings, List.of(new CompressedXContent(mapping))); - assertFalse(result); - assertThat(newMapperServiceCounter.get(), equalTo(1)); - } - { - String mapping = """ - { - "_doc": { - "properties": { - "my_field": { - "type": "keyword" - } - } - } - } - """; - boolean result = provider.newIndexHasSyntheticSourceUsage(indexName, null, settings, List.of(new CompressedXContent(mapping))); - assertFalse(result); - assertThat(newMapperServiceCounter.get(), equalTo(2)); - } - } - - public void testGetAdditionalIndexSettingsDowngradeFromSyntheticSource() throws IOException { - String dataStreamName = "logs-app1"; - Metadata.Builder mb = Metadata.builder( - DataStreamTestHelper.getClusterStateWithDataStreams( - List.of(Tuple.tuple(dataStreamName, 1)), - List.of(), - Instant.now().toEpochMilli(), - builder().build(), - 1 - ).getMetadata() - ); - Metadata metadata = mb.build(); - - Settings settings = builder().put(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.SYNTHETIC) - .build(); - - Settings result = provider.getAdditionalIndexSettings( - DataStream.getDefaultBackingIndexName(dataStreamName, 2), - dataStreamName, - null, - metadata, - Instant.ofEpochMilli(1L), - settings, - List.of() - ); - assertThat(result.size(), equalTo(0)); - assertThat(newMapperServiceCounter.get(), equalTo(0)); - - syntheticSourceLicenseService.setSyntheticSourceFallback(true); - result = provider.getAdditionalIndexSettings( - DataStream.getDefaultBackingIndexName(dataStreamName, 2), - dataStreamName, - null, - metadata, - Instant.ofEpochMilli(1L), - settings, - List.of() - ); - assertThat(result.size(), equalTo(1)); - assertEquals(SourceFieldMapper.Mode.STORED, SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.get(result)); - assertThat(newMapperServiceCounter.get(), equalTo(0)); - - result = provider.getAdditionalIndexSettings( - DataStream.getDefaultBackingIndexName(dataStreamName, 2), - dataStreamName, - IndexMode.TIME_SERIES, - metadata, - Instant.ofEpochMilli(1L), - settings, - List.of() - ); - assertThat(result.size(), equalTo(1)); - assertEquals(SourceFieldMapper.Mode.STORED, SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.get(result)); - assertThat(newMapperServiceCounter.get(), equalTo(0)); - - result = provider.getAdditionalIndexSettings( - DataStream.getDefaultBackingIndexName(dataStreamName, 2), - dataStreamName, - IndexMode.LOGSDB, - metadata, - Instant.ofEpochMilli(1L), - settings, - List.of() - ); - assertThat(result.size(), equalTo(1)); - assertEquals(SourceFieldMapper.Mode.STORED, SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.get(result)); - assertThat(newMapperServiceCounter.get(), equalTo(0)); - } - - public void testGetAdditionalIndexSettingsDowngradeFromSyntheticSourceFileMatch() throws IOException { - syntheticSourceLicenseService.setSyntheticSourceFallback(true); - provider = new SyntheticSourceIndexSettingsProvider( - syntheticSourceLicenseService, - im -> MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(), im.getSettings(), im.getIndex().getName()), - getLogsdbIndexModeSettingsProvider(true), - IndexVersion::current - ); - final Settings settings = Settings.EMPTY; - - String dataStreamName = "logs-app1"; - Metadata.Builder mb = Metadata.builder( - DataStreamTestHelper.getClusterStateWithDataStreams( - List.of(Tuple.tuple(dataStreamName, 1)), - List.of(), - Instant.now().toEpochMilli(), - builder().build(), - 1 - ).getMetadata() - ); - Metadata metadata = mb.build(); - Settings result = provider.getAdditionalIndexSettings( - DataStream.getDefaultBackingIndexName(dataStreamName, 2), - dataStreamName, - null, - metadata, - Instant.ofEpochMilli(1L), - settings, - List.of() - ); - assertThat(result.size(), equalTo(0)); - assertThat(newMapperServiceCounter.get(), equalTo(0)); - - dataStreamName = "logs-app1-0"; - mb = Metadata.builder( - DataStreamTestHelper.getClusterStateWithDataStreams( - List.of(Tuple.tuple(dataStreamName, 1)), - List.of(), - Instant.now().toEpochMilli(), - builder().build(), - 1 - ).getMetadata() - ); - metadata = mb.build(); - - result = provider.getAdditionalIndexSettings( - DataStream.getDefaultBackingIndexName(dataStreamName, 2), - dataStreamName, - null, - metadata, - Instant.ofEpochMilli(1L), - settings, - List.of() - ); - assertThat(result.size(), equalTo(1)); - assertEquals(SourceFieldMapper.Mode.STORED, SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.get(result)); - assertThat(newMapperServiceCounter.get(), equalTo(0)); - - result = provider.getAdditionalIndexSettings( - DataStream.getDefaultBackingIndexName(dataStreamName, 2), - dataStreamName, - null, - metadata, - Instant.ofEpochMilli(1L), - builder().put(IndexSettings.MODE.getKey(), IndexMode.STANDARD.toString()).build(), - List.of() - ); - assertThat(result.size(), equalTo(0)); - assertThat(newMapperServiceCounter.get(), equalTo(0)); - } -} From d839205135774841bdad8a6604264e0256684830 Mon Sep 17 00:00:00 2001 From: Dan Rubinstein Date: Wed, 11 Dec 2024 10:47:46 -0500 Subject: [PATCH 05/11] Removing index alias creation for deprecated transforms notification index (#117583) * Removing index alias creation for deprecated transforms notification index * Update docs/changelog/117583.yaml * Updating changelog * Updating deprecation area to Transform --------- Co-authored-by: Elastic Machine --- docs/changelog/117583.yaml | 17 +++++ .../TransformInternalIndexConstants.java | 1 - .../integration/TransformAuditorIT.java | 27 -------- .../TransformClusterStateListener.java | 63 ------------------- 4 files changed, 17 insertions(+), 91 deletions(-) create mode 100644 docs/changelog/117583.yaml diff --git a/docs/changelog/117583.yaml b/docs/changelog/117583.yaml new file mode 100644 index 0000000000000..e0c482b8d9f72 --- /dev/null +++ b/docs/changelog/117583.yaml @@ -0,0 +1,17 @@ +pr: 117583 +summary: Removing index alias creation for deprecated transforms notification index +area: Machine Learning +type: deprecation +issues: [] +deprecation: + title: Removing index alias creation for deprecated transforms notification index + area: Transform + details: >- + As part of the migration from 7.x to 8.x, the `.data-frame-notifications-1` index + was deprecated and replaced with the `.transform-notifications-000002` index. + The index is no longer created by default, all writes are directed to the new index, + and any clusters with the deprecated index will have an alias created to ensure that + reads are still retrieving data that was written to the index before the migration to 8.x. + This change removes the alias from the deprecated index in 9.x. Any clusters with the alias present + will retain it, but it will not be created on new clusters. + impact: No known end user impact. diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/persistence/TransformInternalIndexConstants.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/persistence/TransformInternalIndexConstants.java index 0d54583b89976..8439c9cd76fad 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/persistence/TransformInternalIndexConstants.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/persistence/TransformInternalIndexConstants.java @@ -45,7 +45,6 @@ public final class TransformInternalIndexConstants { public static final String AUDIT_TEMPLATE_VERSION = "000002"; public static final String AUDIT_INDEX_PREFIX = TRANSFORM_PREFIX + "notifications-"; public static final String AUDIT_INDEX_PATTERN = AUDIT_INDEX_PREFIX + "*"; - public static final String AUDIT_INDEX_DEPRECATED = TRANSFORM_PREFIX_DEPRECATED + "notifications-1"; public static final String AUDIT_INDEX_PATTERN_DEPRECATED = TRANSFORM_PREFIX_DEPRECATED + "notifications-*"; public static final String AUDIT_INDEX_READ_ALIAS = TRANSFORM_PREFIX + "notifications-read"; diff --git a/x-pack/plugin/transform/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformAuditorIT.java b/x-pack/plugin/transform/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformAuditorIT.java index 7e31b7ec0c5e4..97851f79322b3 100644 --- a/x-pack/plugin/transform/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformAuditorIT.java +++ b/x-pack/plugin/transform/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformAuditorIT.java @@ -8,9 +8,6 @@ package org.elasticsearch.xpack.transform.integration; import org.elasticsearch.client.Request; -import org.elasticsearch.client.RequestOptions; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.xpack.core.transform.transforms.persistence.TransformInternalIndexConstants; import org.junit.Before; @@ -92,28 +89,4 @@ public void testAuditorWritesAudits() throws Exception { }); } - - public void testAliasCreatedforBWCIndexes() throws Exception { - Settings.Builder settings = indexSettings(1, 0); - - // These indices should only exist if created in previous versions, ignore the deprecation warning for this test - RequestOptions options = expectWarnings( - "index name [" - + TransformInternalIndexConstants.AUDIT_INDEX_DEPRECATED - + "] starts " - + "with a dot '.', in the next major version, index names starting with a dot are reserved for hidden indices " - + "and system indices" - ).toBuilder().addHeader("X-elastic-product-origin", "elastic").build(); - Request request = new Request("PUT", "/" + TransformInternalIndexConstants.AUDIT_INDEX_DEPRECATED); - String entity = "{\"settings\": " + Strings.toString(settings.build()) + "}"; - request.setJsonEntity(entity); - request.setOptions(options); - client().performRequest(request); - - assertBusy( - () -> assertTrue( - aliasExists(TransformInternalIndexConstants.AUDIT_INDEX_DEPRECATED, TransformInternalIndexConstants.AUDIT_INDEX_READ_ALIAS) - ) - ); - } } diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/TransformClusterStateListener.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/TransformClusterStateListener.java index 4c867616e9be0..e49beb9d57f4d 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/TransformClusterStateListener.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/TransformClusterStateListener.java @@ -9,26 +9,18 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; -import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; -import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.gateway.GatewayService; -import org.elasticsearch.xpack.core.transform.transforms.persistence.TransformInternalIndexConstants; import java.util.Optional; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Supplier; -import static org.elasticsearch.xpack.core.ClientHelper.TRANSFORM_ORIGIN; -import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; - class TransformClusterStateListener implements ClusterStateListener, Supplier> { private static final Logger logger = LogManager.getLogger(TransformClusterStateListener.class); @@ -51,61 +43,6 @@ public void clusterChanged(ClusterChangedEvent event) { } clusterState.set(event.state()); - - // The atomic flag prevents multiple simultaneous attempts to run alias creation - // if there is a flurry of cluster state updates in quick succession - if (event.localNodeMaster() && isIndexCreationInProgress.compareAndSet(false, true)) { - createAuditAliasForDataFrameBWC(event.state(), client, ActionListener.wrap(r -> { - isIndexCreationInProgress.set(false); - if (r) { - logger.info("Created alias for deprecated data frame notifications index"); - } else { - logger.debug("Skipped creating alias for deprecated data frame notifications index"); - } - }, e -> { - isIndexCreationInProgress.set(false); - logger.error("Error creating alias for deprecated data frame notifications index", e); - })); - } - } - - private static void createAuditAliasForDataFrameBWC(ClusterState state, Client client, final ActionListener finalListener) { - - // check if old audit index exists, no need to create the alias if it does not - if (state.getMetadata().hasIndexAbstraction(TransformInternalIndexConstants.AUDIT_INDEX_DEPRECATED) == false) { - finalListener.onResponse(false); - return; - } - - Metadata metadata = state.metadata(); - if (state.getMetadata() - .getIndicesLookup() - .get(TransformInternalIndexConstants.AUDIT_INDEX_DEPRECATED) - .getIndices() - .stream() - .anyMatch(name -> metadata.index(name).getAliases().containsKey(TransformInternalIndexConstants.AUDIT_INDEX_READ_ALIAS))) { - finalListener.onResponse(false); - return; - } - - final IndicesAliasesRequest request = client.admin() - .indices() - .prepareAliases() - .addAliasAction( - IndicesAliasesRequest.AliasActions.add() - .index(TransformInternalIndexConstants.AUDIT_INDEX_DEPRECATED) - .alias(TransformInternalIndexConstants.AUDIT_INDEX_READ_ALIAS) - .isHidden(true) - ) - .request(); - - executeAsyncWithOrigin( - client.threadPool().getThreadContext(), - TRANSFORM_ORIGIN, - request, - ActionListener.wrap(r -> finalListener.onResponse(r.isAcknowledged()), finalListener::onFailure), - client.admin().indices()::aliases - ); } /** From 03fa2705e7bbf38e886cc095a0e1723e6a524585 Mon Sep 17 00:00:00 2001 From: Iraklis Psaroudakis Date: Wed, 11 Dec 2024 17:49:12 +0200 Subject: [PATCH 06/11] Specialize skip for InputStreamIndexInput (#118436) Skip would previously defer to the default implementation that reads bytes unnecessarily and may be slow. We now specialize it so that it seeks quickly. Closes ES-10234 --- .../lucene/store/InputStreamIndexInput.java | 11 ++++++ .../store/InputStreamIndexInputTests.java | 37 ++++++++++++++++++- 2 files changed, 47 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/common/lucene/store/InputStreamIndexInput.java b/server/src/main/java/org/elasticsearch/common/lucene/store/InputStreamIndexInput.java index 5603a1d4f1ab0..f3a3ec91ee931 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/store/InputStreamIndexInput.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/store/InputStreamIndexInput.java @@ -88,4 +88,15 @@ public synchronized void reset() throws IOException { indexInput.seek(markPointer); counter = markCounter; } + + @Override + public long skip(long n) throws IOException { + long skipBytes = Math.min(n, Math.min(indexInput.length() - indexInput.getFilePointer(), limit - counter)); + if (skipBytes <= 0) { + return 0; + } + indexInput.skipBytes(skipBytes); + counter += skipBytes; + return skipBytes; + } } diff --git a/server/src/test/java/org/elasticsearch/common/lucene/store/InputStreamIndexInputTests.java b/server/src/test/java/org/elasticsearch/common/lucene/store/InputStreamIndexInputTests.java index a1bcf1b91fa4d..4bea6f50c7c4b 100644 --- a/server/src/test/java/org/elasticsearch/common/lucene/store/InputStreamIndexInputTests.java +++ b/server/src/test/java/org/elasticsearch/common/lucene/store/InputStreamIndexInputTests.java @@ -218,7 +218,7 @@ public void testReadMultiFourBytesLimit() throws IOException { assertThat(is.read(read), equalTo(-1)); } - public void testMarkRest() throws Exception { + public void testMarkReset() throws Exception { Directory dir = new ByteBuffersDirectory(); IndexOutput output = dir.createOutput("test", IOContext.DEFAULT); for (int i = 0; i < 3; i++) { @@ -243,6 +243,41 @@ public void testMarkRest() throws Exception { assertThat(is.read(), equalTo(2)); } + public void testSkipBytes() throws Exception { + Directory dir = new ByteBuffersDirectory(); + IndexOutput output = dir.createOutput("test", IOContext.DEFAULT); + int bytes = randomIntBetween(10, 100); + for (int i = 0; i < bytes; i++) { + output.writeByte((byte) i); + } + output.close(); + + int limit = randomIntBetween(0, bytes * 2); + int initialReadBytes = randomIntBetween(0, limit); + int skipBytes = randomIntBetween(0, limit); + int seekExpected = Math.min(Math.min(initialReadBytes + skipBytes, limit), bytes); + int skipBytesExpected = Math.max(seekExpected - initialReadBytes, 0); + logger.debug( + "bytes: {}, limit: {}, initialReadBytes: {}, skipBytes: {}, seekExpected: {}, skipBytesExpected: {}", + bytes, + limit, + initialReadBytes, + skipBytes, + seekExpected, + skipBytesExpected + ); + + IndexInput input = dir.openInput("test", IOContext.DEFAULT); + InputStreamIndexInput is = new InputStreamIndexInput(input, limit); + is.readNBytes(initialReadBytes); + assertThat(is.skip(skipBytes), equalTo((long) skipBytesExpected)); + + int remainingBytes = Math.min(bytes, limit) - seekExpected; + for (int i = seekExpected; i < seekExpected + remainingBytes; i++) { + assertThat(is.read(), equalTo(i)); + } + } + public void testReadZeroShouldReturnZero() throws IOException { try (Directory dir = new ByteBuffersDirectory()) { try (IndexOutput output = dir.createOutput("test", IOContext.DEFAULT)) { From 912d37abef76fd741767765adaf731dea4f21984 Mon Sep 17 00:00:00 2001 From: Iraklis Psaroudakis Date: Wed, 11 Dec 2024 17:49:32 +0200 Subject: [PATCH 07/11] No-op reset in SlicedInputStream (#118437) Previously if reset was called at the exact marked offset, it would unnecessarily re-open the current slice and skip bytes. We now detect this situation, and just do nothing in this case. Closes ES-10235 --- .../index/snapshots/blobstore/SlicedInputStream.java | 4 ++++ .../index/snapshots/blobstore/SlicedInputStreamTests.java | 7 +++++-- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/SlicedInputStream.java b/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/SlicedInputStream.java index 1edd69a6443a7..2486cc66fd4c9 100644 --- a/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/SlicedInputStream.java +++ b/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/SlicedInputStream.java @@ -171,6 +171,10 @@ public void reset() throws IOException { if (markedSlice < 0 || markedSliceOffset < 0) { throw new IOException("Mark has not been set"); } + if (initialized && nextSlice == markedSlice + 1 && currentSliceOffset == markedSliceOffset) { + // Reset at the marked offset should return immediately without re-opening the slice + return; + } nextSlice = markedSlice; initialized = true; diff --git a/server/src/test/java/org/elasticsearch/index/snapshots/blobstore/SlicedInputStreamTests.java b/server/src/test/java/org/elasticsearch/index/snapshots/blobstore/SlicedInputStreamTests.java index c31a68f36de71..256d0f269edb4 100644 --- a/server/src/test/java/org/elasticsearch/index/snapshots/blobstore/SlicedInputStreamTests.java +++ b/server/src/test/java/org/elasticsearch/index/snapshots/blobstore/SlicedInputStreamTests.java @@ -155,9 +155,10 @@ protected InputStream openSlice(int slice) throws IOException { // Mark input.mark(randomNonNegativeInt()); + int slicesOpenedAtMark = streamsOpened.size(); // Read or skip up to another random point - final int moreBytes = randomIntBetween(0, bytes.length - mark); + int moreBytes = randomIntBetween(0, bytes.length - mark); if (moreBytes > 0) { if (randomBoolean()) { final var moreBytesRead = new byte[moreBytes]; @@ -171,11 +172,13 @@ protected InputStream openSlice(int slice) throws IOException { // Randomly read to EOF if (randomBoolean()) { - input.readAllBytes(); + moreBytes += input.readAllBytes().length; } // Reset input.reset(); + int slicesOpenedAfterReset = streamsOpened.size(); + assert moreBytes > 0 || mark == 0 || slicesOpenedAfterReset == slicesOpenedAtMark : "Reset at mark should not re-open slices"; // Read all remaining bytes, which should be the bytes from mark up to the end final int remainingBytes = bytes.length - mark; From e0ad97e8d56cd2fee9834fc3bf60e73c074c439a Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Wed, 11 Dec 2024 17:03:52 +0100 Subject: [PATCH 08/11] Add QA test module for Lucene N-2 version (#118363) This change introduces a new QA project to test Lucene support for reading indices created in version N-2. The test suite is inspired from the various full-cluster restart suites we already have. It creates a cluster in version N-2 (today 7.17.25), then upgrades the cluster to N-1 (today 8.18.0) and finally upgrades the cluster to the current version (today 9.0), allowing to execute test methods after every upgrade. The test suite has two variants: one for searchable snapshots and one for snapshot restore. The suites demonstrates that Elasticsearch does not allow reading indices written in version N-2 but we hope to make this feasible. Also, the tests can be used for investigation and debug with the command `./gradlew ":qa:lucene-index-compatibility:check" --debug-jvm-server` Relates ES-10274 --- .../gradle/internal/BwcVersions.java | 14 ++ qa/lucene-index-compatibility/build.gradle | 25 ++++ ...tractLuceneIndexCompatibilityTestCase.java | 141 ++++++++++++++++++ .../lucene/LuceneCompatibilityIT.java | 114 ++++++++++++++ .../SearchableSnapshotCompatibilityIT.java | 117 +++++++++++++++ 5 files changed, 411 insertions(+) create mode 100644 qa/lucene-index-compatibility/build.gradle create mode 100644 qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/AbstractLuceneIndexCompatibilityTestCase.java create mode 100644 qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/LuceneCompatibilityIT.java create mode 100644 qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/SearchableSnapshotCompatibilityIT.java diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcVersions.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcVersions.java index 37b28389ad97b..9f7645349e852 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcVersions.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcVersions.java @@ -252,6 +252,20 @@ private List getReleased() { .toList(); } + public List getReadOnlyIndexCompatible() { + // Lucene can read indices in version N-2 + int compatibleMajor = currentVersion.getMajor() - 2; + return versions.stream().filter(v -> v.getMajor() == compatibleMajor).sorted(Comparator.naturalOrder()).toList(); + } + + public void withLatestReadOnlyIndexCompatible(Consumer versionAction) { + var compatibleVersions = getReadOnlyIndexCompatible(); + if (compatibleVersions == null || compatibleVersions.isEmpty()) { + throw new IllegalStateException("No read-only compatible version found."); + } + versionAction.accept(compatibleVersions.getLast()); + } + /** * Return versions of Elasticsearch which are index compatible with the current version. */ diff --git a/qa/lucene-index-compatibility/build.gradle b/qa/lucene-index-compatibility/build.gradle new file mode 100644 index 0000000000000..37e5eea85a08b --- /dev/null +++ b/qa/lucene-index-compatibility/build.gradle @@ -0,0 +1,25 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ +apply plugin: 'elasticsearch.internal-java-rest-test' +apply plugin: 'elasticsearch.internal-test-artifact' +apply plugin: 'elasticsearch.bwc-test' + +buildParams.bwcVersions.withLatestReadOnlyIndexCompatible { bwcVersion -> + tasks.named("javaRestTest").configure { + systemProperty("tests.minimum.index.compatible", bwcVersion) + usesBwcDistribution(bwcVersion) + enabled = true + } +} + +tasks.withType(Test).configureEach { + // CI doesn't like it when there's multiple clusters running at once + maxParallelForks = 1 +} + diff --git a/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/AbstractLuceneIndexCompatibilityTestCase.java b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/AbstractLuceneIndexCompatibilityTestCase.java new file mode 100644 index 0000000000000..c42e879f84892 --- /dev/null +++ b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/AbstractLuceneIndexCompatibilityTestCase.java @@ -0,0 +1,141 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.lucene; + +import com.carrotsearch.randomizedtesting.TestMethodAndParams; +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import com.carrotsearch.randomizedtesting.annotations.TestCaseOrdering; + +import org.elasticsearch.client.Request; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.LocalClusterConfigProvider; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.cluster.util.Version; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TemporaryFolder; +import org.junit.rules.TestRule; + +import java.util.Comparator; +import java.util.Locale; +import java.util.stream.Stream; + +import static org.elasticsearch.test.cluster.util.Version.CURRENT; +import static org.elasticsearch.test.cluster.util.Version.fromString; +import static org.elasticsearch.test.rest.ObjectPath.createFromResponse; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; + +/** + * Test suite for Lucene indices backward compatibility with N-2 versions. The test suite creates a cluster in N-2 version, then upgrades it + * to N-1 version and finally upgrades it to the current version. Test methods are executed after each upgrade. + */ +@TestCaseOrdering(AbstractLuceneIndexCompatibilityTestCase.TestCaseOrdering.class) +public abstract class AbstractLuceneIndexCompatibilityTestCase extends ESRestTestCase { + + protected static final Version VERSION_MINUS_2 = fromString(System.getProperty("tests.minimum.index.compatible")); + protected static final Version VERSION_MINUS_1 = fromString(System.getProperty("tests.minimum.wire.compatible")); + protected static final Version VERSION_CURRENT = CURRENT; + + protected static TemporaryFolder REPOSITORY_PATH = new TemporaryFolder(); + + protected static LocalClusterConfigProvider clusterConfig = c -> {}; + private static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .version(VERSION_MINUS_2) + .nodes(2) + .setting("path.repo", () -> REPOSITORY_PATH.getRoot().getPath()) + .setting("xpack.security.enabled", "false") + .setting("xpack.ml.enabled", "false") + .setting("path.repo", () -> REPOSITORY_PATH.getRoot().getPath()) + .apply(() -> clusterConfig) + .build(); + + @ClassRule + public static TestRule ruleChain = RuleChain.outerRule(REPOSITORY_PATH).around(cluster); + + private static boolean upgradeFailed = false; + + private final Version clusterVersion; + + public AbstractLuceneIndexCompatibilityTestCase(@Name("cluster") Version clusterVersion) { + this.clusterVersion = clusterVersion; + } + + @ParametersFactory + public static Iterable parameters() { + return Stream.of(VERSION_MINUS_2, VERSION_MINUS_1, CURRENT).map(v -> new Object[] { v }).toList(); + } + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + + @Override + protected boolean preserveClusterUponCompletion() { + return true; + } + + @Before + public void maybeUpgrade() throws Exception { + // We want to use this test suite for the V9 upgrade, but we are not fully committed to necessarily having N-2 support + // in V10, so we add a check here to ensure we'll revisit this decision once V10 exists. + assertThat("Explicit check that N-2 version is Elasticsearch 7", VERSION_MINUS_2.getMajor(), equalTo(7)); + + var currentVersion = clusterVersion(); + if (currentVersion.before(clusterVersion)) { + try { + cluster.upgradeToVersion(clusterVersion); + closeClients(); + initClient(); + } catch (Exception e) { + upgradeFailed = true; + throw e; + } + } + + // Skip remaining tests if upgrade failed + assumeFalse("Cluster upgrade failed", upgradeFailed); + } + + protected String suffix(String name) { + return name + '-' + getTestName().split(" ")[0].toLowerCase(Locale.ROOT); + } + + protected static Version clusterVersion() throws Exception { + var response = assertOK(client().performRequest(new Request("GET", "/"))); + var responseBody = createFromResponse(response); + var version = Version.fromString(responseBody.evaluate("version.number").toString()); + assertThat("Failed to retrieve cluster version", version, notNullValue()); + return version; + } + + protected static Version indexLuceneVersion(String indexName) throws Exception { + var response = assertOK(client().performRequest(new Request("GET", "/" + indexName + "/_settings"))); + int id = Integer.parseInt(createFromResponse(response).evaluate(indexName + ".settings.index.version.created")); + return new Version((byte) ((id / 1000000) % 100), (byte) ((id / 10000) % 100), (byte) ((id / 100) % 100)); + } + + /** + * Execute the test suite with the parameters provided by the {@link #parameters()} in version order. + */ + public static class TestCaseOrdering implements Comparator { + @Override + public int compare(TestMethodAndParams o1, TestMethodAndParams o2) { + var version1 = (Version) o1.getInstanceArguments().get(0); + var version2 = (Version) o2.getInstanceArguments().get(0); + return version1.compareTo(version2); + } + } +} diff --git a/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/LuceneCompatibilityIT.java b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/LuceneCompatibilityIT.java new file mode 100644 index 0000000000000..d6dd949b843d6 --- /dev/null +++ b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/LuceneCompatibilityIT.java @@ -0,0 +1,114 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.lucene; + +import org.elasticsearch.client.Request; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.repositories.fs.FsRepository; +import org.elasticsearch.test.cluster.util.Version; + +import java.util.stream.IntStream; + +import static org.elasticsearch.test.rest.ObjectPath.createFromResponse; +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; + +public class LuceneCompatibilityIT extends AbstractLuceneIndexCompatibilityTestCase { + + static { + clusterConfig = config -> config.setting("xpack.license.self_generated.type", "trial"); + } + + public LuceneCompatibilityIT(Version version) { + super(version); + } + + public void testRestoreIndex() throws Exception { + final String repository = suffix("repository"); + final String snapshot = suffix("snapshot"); + final String index = suffix("index"); + final int numDocs = 1234; + + logger.debug("--> registering repository [{}]", repository); + registerRepository( + client(), + repository, + FsRepository.TYPE, + true, + Settings.builder().put("location", REPOSITORY_PATH.getRoot().getPath()).build() + ); + + if (VERSION_MINUS_2.equals(clusterVersion())) { + logger.debug("--> creating index [{}]", index); + createIndex( + client(), + index, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) + .build() + ); + + logger.debug("--> indexing [{}] docs in [{}]", numDocs, index); + final var bulks = new StringBuilder(); + IntStream.range(0, numDocs).forEach(n -> bulks.append(Strings.format(""" + {"index":{"_id":"%s","_index":"%s"}} + {"test":"test"} + """, n, index))); + + var bulkRequest = new Request("POST", "/_bulk"); + bulkRequest.setJsonEntity(bulks.toString()); + var bulkResponse = client().performRequest(bulkRequest); + assertOK(bulkResponse); + assertThat(entityAsMap(bulkResponse).get("errors"), allOf(notNullValue(), is(false))); + + logger.debug("--> creating snapshot [{}]", snapshot); + createSnapshot(client(), repository, snapshot, true); + return; + } + + if (VERSION_MINUS_1.equals(clusterVersion())) { + ensureGreen(index); + + assertThat(indexLuceneVersion(index), equalTo(VERSION_MINUS_2)); + assertDocCount(client(), index, numDocs); + + logger.debug("--> deleting index [{}]", index); + deleteIndex(index); + return; + } + + if (VERSION_CURRENT.equals(clusterVersion())) { + var restoredIndex = suffix("index-restored"); + logger.debug("--> restoring index [{}] as archive [{}]", index, restoredIndex); + + // Restoring the archive will fail as Elasticsearch does not support reading N-2 yet + var request = new Request("POST", "/_snapshot/" + repository + "/" + snapshot + "/_restore"); + request.addParameter("wait_for_completion", "true"); + request.setJsonEntity(Strings.format(""" + { + "indices": "%s", + "include_global_state": false, + "rename_pattern": "(.+)", + "rename_replacement": "%s", + "include_aliases": false + }""", index, restoredIndex)); + var responseBody = createFromResponse(client().performRequest(request)); + assertThat(responseBody.evaluate("snapshot.shards.total"), equalTo((int) responseBody.evaluate("snapshot.shards.failed"))); + assertThat(responseBody.evaluate("snapshot.shards.successful"), equalTo(0)); + } + } +} diff --git a/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/SearchableSnapshotCompatibilityIT.java b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/SearchableSnapshotCompatibilityIT.java new file mode 100644 index 0000000000000..4f348b7fb122f --- /dev/null +++ b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/SearchableSnapshotCompatibilityIT.java @@ -0,0 +1,117 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.lucene; + +import org.elasticsearch.client.Request; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.repositories.fs.FsRepository; +import org.elasticsearch.test.cluster.util.Version; + +import java.util.stream.IntStream; + +import static org.elasticsearch.test.rest.ObjectPath.createFromResponse; +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; + +public class SearchableSnapshotCompatibilityIT extends AbstractLuceneIndexCompatibilityTestCase { + + static { + clusterConfig = config -> config.setting("xpack.license.self_generated.type", "trial") + .setting("xpack.searchable.snapshot.shared_cache.size", "16MB") + .setting("xpack.searchable.snapshot.shared_cache.region_size", "256KB"); + } + + public SearchableSnapshotCompatibilityIT(Version version) { + super(version); + } + + // TODO Add a test to mount the N-2 index on N-1 and then search it on N + + public void testSearchableSnapshot() throws Exception { + final String repository = suffix("repository"); + final String snapshot = suffix("snapshot"); + final String index = suffix("index"); + final int numDocs = 1234; + + logger.debug("--> registering repository [{}]", repository); + registerRepository( + client(), + repository, + FsRepository.TYPE, + true, + Settings.builder().put("location", REPOSITORY_PATH.getRoot().getPath()).build() + ); + + if (VERSION_MINUS_2.equals(clusterVersion())) { + logger.debug("--> creating index [{}]", index); + createIndex( + client(), + index, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) + .build() + ); + + logger.debug("--> indexing [{}] docs in [{}]", numDocs, index); + final var bulks = new StringBuilder(); + IntStream.range(0, numDocs).forEach(n -> bulks.append(Strings.format(""" + {"index":{"_id":"%s","_index":"%s"}} + {"test":"test"} + """, n, index))); + + var bulkRequest = new Request("POST", "/_bulk"); + bulkRequest.setJsonEntity(bulks.toString()); + var bulkResponse = client().performRequest(bulkRequest); + assertOK(bulkResponse); + assertThat(entityAsMap(bulkResponse).get("errors"), allOf(notNullValue(), is(false))); + + logger.debug("--> creating snapshot [{}]", snapshot); + createSnapshot(client(), repository, snapshot, true); + return; + } + + if (VERSION_MINUS_1.equals(clusterVersion())) { + ensureGreen(index); + + assertThat(indexLuceneVersion(index), equalTo(VERSION_MINUS_2)); + assertDocCount(client(), index, numDocs); + + logger.debug("--> deleting index [{}]", index); + deleteIndex(index); + return; + } + + if (VERSION_CURRENT.equals(clusterVersion())) { + var mountedIndex = suffix("index-mounted"); + logger.debug("--> mounting index [{}] as [{}]", index, mountedIndex); + + // Mounting the index will fail as Elasticsearch does not support reading N-2 yet + var request = new Request("POST", "/_snapshot/" + repository + "/" + snapshot + "/_mount"); + request.addParameter("wait_for_completion", "true"); + var storage = randomBoolean() ? "shared_cache" : "full_copy"; + request.addParameter("storage", storage); + request.setJsonEntity(Strings.format(""" + { + "index": "%s", + "renamed_index": "%s" + }""", index, mountedIndex)); + var responseBody = createFromResponse(client().performRequest(request)); + assertThat(responseBody.evaluate("snapshot.shards.total"), equalTo((int) responseBody.evaluate("snapshot.shards.failed"))); + assertThat(responseBody.evaluate("snapshot.shards.successful"), equalTo(0)); + } + } +} From 9837e782e1a5787ce99afab31426e29be8aa3bc4 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Wed, 11 Dec 2024 08:09:21 -0800 Subject: [PATCH 09/11] Rename instrumenter tests (#118462) The "sythetic" tests are the only unit tests for the instrumenter. This commit renames the test suite to be more clear it is the place to put instrumenter tests. --- ...rumenterTests.java => InstrumenterTests.java} | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) rename libs/entitlement/asm-provider/src/test/java/org/elasticsearch/entitlement/instrumentation/impl/{SyntheticInstrumenterTests.java => InstrumenterTests.java} (96%) diff --git a/libs/entitlement/asm-provider/src/test/java/org/elasticsearch/entitlement/instrumentation/impl/SyntheticInstrumenterTests.java b/libs/entitlement/asm-provider/src/test/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumenterTests.java similarity index 96% rename from libs/entitlement/asm-provider/src/test/java/org/elasticsearch/entitlement/instrumentation/impl/SyntheticInstrumenterTests.java rename to libs/entitlement/asm-provider/src/test/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumenterTests.java index 8e0409971ba61..75102b0bf260d 100644 --- a/libs/entitlement/asm-provider/src/test/java/org/elasticsearch/entitlement/instrumentation/impl/SyntheticInstrumenterTests.java +++ b/libs/entitlement/asm-provider/src/test/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumenterTests.java @@ -34,8 +34,8 @@ * some ad-hoc test cases (e.g. overloaded methods, overloaded targets, multiple instrumentation, etc.) */ @ESTestCase.WithoutSecurityManager -public class SyntheticInstrumenterTests extends ESTestCase { - private static final Logger logger = LogManager.getLogger(SyntheticInstrumenterTests.class); +public class InstrumenterTests extends ESTestCase { + private static final Logger logger = LogManager.getLogger(InstrumenterTests.class); /** * Contains all the virtual methods from {@link TestClassToInstrument}, @@ -137,10 +137,10 @@ public void checkSomeStaticMethod(Class callerClass, int arg, String anotherA @Override public void checkSomeInstanceMethod(Class callerClass, Testable that, int arg, String anotherArg) { checkSomeInstanceMethodCallCount++; - assertSame(SyntheticInstrumenterTests.class, callerClass); + assertSame(InstrumenterTests.class, callerClass); assertThat( that.getClass().getName(), - startsWith("org.elasticsearch.entitlement.instrumentation.impl.SyntheticInstrumenterTests$TestClassToInstrument") + startsWith("org.elasticsearch.entitlement.instrumentation.impl.InstrumenterTests$TestClassToInstrument") ); assertEquals(123, arg); assertEquals("def", anotherArg); @@ -150,14 +150,14 @@ public void checkSomeInstanceMethod(Class callerClass, Testable that, int arg @Override public void checkCtor(Class callerClass) { checkCtorCallCount++; - assertSame(SyntheticInstrumenterTests.class, callerClass); + assertSame(InstrumenterTests.class, callerClass); throwIfActive(); } @Override public void checkCtor(Class callerClass, int arg) { checkCtorIntCallCount++; - assertSame(SyntheticInstrumenterTests.class, callerClass); + assertSame(InstrumenterTests.class, callerClass); assertEquals(123, arg); throwIfActive(); } @@ -374,8 +374,8 @@ public void testInstrumenterWorksWithConstructors() throws Exception { * Testable) which is not what would happen when it's run by the agent. */ private InstrumenterImpl createInstrumenter(Map checkMethods) { - String checkerClass = Type.getInternalName(SyntheticInstrumenterTests.MockEntitlementChecker.class); - String handleClass = Type.getInternalName(SyntheticInstrumenterTests.TestEntitlementCheckerHolder.class); + String checkerClass = Type.getInternalName(InstrumenterTests.MockEntitlementChecker.class); + String handleClass = Type.getInternalName(InstrumenterTests.TestEntitlementCheckerHolder.class); String getCheckerClassMethodDescriptor = Type.getMethodDescriptor(Type.getObjectType(checkerClass)); return new InstrumenterImpl(handleClass, getCheckerClassMethodDescriptor, "_NEW", checkMethods); From 55727779c04f3817ce1d504cc62dceccb60b11d1 Mon Sep 17 00:00:00 2001 From: Jonathan Buttner <56361221+jonathan-buttner@users.noreply.github.com> Date: Wed, 11 Dec 2024 12:21:48 -0500 Subject: [PATCH 10/11] [ML] Fixing streaming tests locale issue (#118481) * Fixing the string locale * Missing a toUpper --- muted-tests.yml | 3 --- .../src/main/java/org/elasticsearch/test/ESTestCase.java | 1 + .../org/elasticsearch/xpack/inference/InferenceCrudIT.java | 7 +++++-- .../mock/TestStreamingCompletionServiceExtension.java | 5 +++-- 4 files changed, 9 insertions(+), 7 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index c0e3c217abce2..9416113770d5a 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -315,9 +315,6 @@ tests: - class: org.elasticsearch.xpack.test.rest.XPackRestIT method: test {p0=migrate/10_reindex/Test Reindex With Unsupported Mode} issue: https://github.com/elastic/elasticsearch/issues/118273 -- class: org.elasticsearch.xpack.inference.InferenceCrudIT - method: testUnifiedCompletionInference - issue: https://github.com/elastic/elasticsearch/issues/118405 - class: org.elasticsearch.xpack.security.operator.OperatorPrivilegesIT method: testEveryActionIsEitherOperatorOnlyOrNonOperator issue: https://github.com/elastic/elasticsearch/issues/118220 diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index 6612f0da0c43f..f678f4af22328 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -1210,6 +1210,7 @@ public static String randomAlphaOfLength(int codeUnits) { /** * Generate a random string containing only alphanumeric characters. + * The locale for the string is {@link Locale#ROOT}. * @param length the length of the string to generate * @return the generated string */ diff --git a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java index 90d4f3a8eb33b..fc593a6a8b0fa 100644 --- a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java +++ b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java @@ -24,6 +24,7 @@ import java.util.Arrays; import java.util.Iterator; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.Set; @@ -471,7 +472,7 @@ public void testSupportedStream() throws Exception { var events = streamInferOnMockService(modelId, TaskType.COMPLETION, input); var expectedResponses = Stream.concat( - input.stream().map(String::toUpperCase).map(str -> "{\"completion\":[{\"delta\":\"" + str + "\"}]}"), + input.stream().map(s -> s.toUpperCase(Locale.ROOT)).map(str -> "{\"completion\":[{\"delta\":\"" + str + "\"}]}"), Stream.of("[DONE]") ).iterator(); assertThat(events.size(), equalTo((input.size() + 1) * 2)); @@ -510,7 +511,9 @@ public void testUnifiedCompletionInference() throws Exception { } private static Iterator expectedResultsIterator(List input) { - return Stream.concat(input.stream().map(String::toUpperCase).map(InferenceCrudIT::expectedResult), Stream.of("[DONE]")).iterator(); + // The Locale needs to be ROOT to match what the test service is going to respond with + return Stream.concat(input.stream().map(s -> s.toUpperCase(Locale.ROOT)).map(InferenceCrudIT::expectedResult), Stream.of("[DONE]")) + .iterator(); } private static String expectedResult(String input) { diff --git a/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestStreamingCompletionServiceExtension.java b/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestStreamingCompletionServiceExtension.java index f7a05a27354ef..80696a285fb26 100644 --- a/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestStreamingCompletionServiceExtension.java +++ b/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestStreamingCompletionServiceExtension.java @@ -43,6 +43,7 @@ import java.util.EnumSet; import java.util.HashMap; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Set; import java.util.concurrent.Flow; @@ -142,7 +143,7 @@ public void unifiedCompletionInfer( } private StreamingChatCompletionResults makeResults(List input) { - var responseIter = input.stream().map(String::toUpperCase).iterator(); + var responseIter = input.stream().map(s -> s.toUpperCase(Locale.ROOT)).iterator(); return new StreamingChatCompletionResults(subscriber -> { subscriber.onSubscribe(new Flow.Subscription() { @Override @@ -173,7 +174,7 @@ private ChunkedToXContent completionChunk(String delta) { } private StreamingUnifiedChatCompletionResults makeUnifiedResults(UnifiedCompletionRequest request) { - var responseIter = request.messages().stream().map(message -> message.content().toString().toUpperCase()).iterator(); + var responseIter = request.messages().stream().map(message -> message.content().toString().toUpperCase(Locale.ROOT)).iterator(); return new StreamingUnifiedChatCompletionResults(subscriber -> { subscriber.onSubscribe(new Flow.Subscription() { @Override From a8a4a7bc2348460bfa79fabc820e7b0a88907af1 Mon Sep 17 00:00:00 2001 From: Patrick Doyle <810052+prdoyle@users.noreply.github.com> Date: Wed, 11 Dec 2024 12:35:30 -0500 Subject: [PATCH 11/11] Fix testInvalidJSON (#118398) * Fix testInvalidJSON * CURSE YOU SPOTLESS --- muted-tests.yml | 3 - .../service/FileSettingsServiceTests.java | 69 +++++++++---------- 2 files changed, 31 insertions(+), 41 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 9416113770d5a..c07363657b3ec 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -123,9 +123,6 @@ tests: - class: org.elasticsearch.xpack.downsample.ILMDownsampleDisruptionIT method: testILMDownsampleRollingRestart issue: https://github.com/elastic/elasticsearch/issues/114233 -- class: org.elasticsearch.reservedstate.service.FileSettingsServiceTests - method: testInvalidJSON - issue: https://github.com/elastic/elasticsearch/issues/116521 - class: org.elasticsearch.reservedstate.service.RepositoriesFileSettingsIT method: testSettingsApplied issue: https://github.com/elastic/elasticsearch/issues/116694 diff --git a/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java b/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java index ae60a21b6fc22..08d83e48b7152 100644 --- a/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java @@ -74,8 +74,10 @@ import static org.hamcrest.Matchers.hasEntry; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.argThat; +import static org.mockito.ArgumentMatchers.contains; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; @@ -288,63 +290,54 @@ public void testProcessFileChanges() throws Exception { verifyNoMoreInteractions(healthIndicatorService); } - @SuppressWarnings("unchecked") public void testInvalidJSON() throws Exception { - doAnswer((Answer) invocation -> { - invocation.getArgument(1, XContentParser.class).map(); // Throw if JSON is invalid - ((Consumer) invocation.getArgument(3)).accept(null); - return null; - }).when(controller).process(any(), any(XContentParser.class), any(), any()); - - CyclicBarrier fileChangeBarrier = new CyclicBarrier(2); - fileSettingsService.addFileChangedListener(() -> awaitOrBust(fileChangeBarrier)); + // Chop off the functionality so we don't run too much of the actual cluster logic that we're not testing + doNothing().when(controller).updateErrorState(any()); + doAnswer( + (Answer) invocation -> { throw new AssertionError("Parse error should happen before this process method is called"); } + ).when(controller).process(any(), any(ReservedStateChunk.class), any(), any()); + // Don't really care about the initial state Files.createDirectories(fileSettingsService.watchedFileDir()); - // contents of the JSON don't matter, we just need a file to exist - writeTestFile(fileSettingsService.watchedFile(), "{}"); + doNothing().when(fileSettingsService).processInitialFileMissing(); + fileSettingsService.start(); + fileSettingsService.clusterChanged(new ClusterChangedEvent("test", clusterService.state(), ClusterState.EMPTY_STATE)); + // Now break the JSON and wait + CyclicBarrier fileChangeBarrier = new CyclicBarrier(2); doAnswer((Answer) invocation -> { - boolean returnedNormally = false; try { - var result = invocation.callRealMethod(); - returnedNormally = true; - return result; - } catch (XContentParseException e) { - // We're expecting a parse error. processFileChanges specifies that this is supposed to throw ExecutionException. - throw new ExecutionException(e); - } catch (Throwable e) { - throw new AssertionError("Unexpected exception", e); + return invocation.callRealMethod(); } finally { - if (returnedNormally == false) { - // Because of the exception, listeners aren't notified, so we need to activate the barrier ourselves - awaitOrBust(fileChangeBarrier); - } + awaitOrBust(fileChangeBarrier); } }).when(fileSettingsService).processFileChanges(); - - // Establish the initial valid JSON - fileSettingsService.start(); - fileSettingsService.clusterChanged(new ClusterChangedEvent("test", clusterService.state(), ClusterState.EMPTY_STATE)); - awaitOrBust(fileChangeBarrier); - - // Now break the JSON writeTestFile(fileSettingsService.watchedFile(), "test_invalid_JSON"); awaitOrBust(fileChangeBarrier); - verify(fileSettingsService, times(1)).processFileOnServiceStart(); // The initial state - verify(fileSettingsService, times(1)).processFileChanges(); // The changed state verify(fileSettingsService, times(1)).onProcessFileChangesException( - argThat(e -> e instanceof ExecutionException && e.getCause() instanceof XContentParseException) + argThat(e -> unwrapException(e) instanceof XContentParseException) ); // Note: the name "processFileOnServiceStart" is a bit misleading because it is not // referring to fileSettingsService.start(). Rather, it is referring to the initialization // of the watcher thread itself, which occurs asynchronously when clusterChanged is first called. - verify(healthIndicatorService, times(2)).changeOccurred(); - verify(healthIndicatorService, times(1)).successOccurred(); - verify(healthIndicatorService, times(1)).failureOccurred(argThat(s -> s.startsWith(IllegalArgumentException.class.getName()))); - verifyNoMoreInteractions(healthIndicatorService); + verify(healthIndicatorService).failureOccurred(contains(XContentParseException.class.getName())); + } + + /** + * Looks for the ultimate cause of {@code e} by stripping off layers of bookkeeping exception wrappers. + */ + private Throwable unwrapException(Throwable e) { + while (e != null) { + if (e instanceof ExecutionException || e instanceof IllegalStateException) { + e = e.getCause(); + } else { + break; + } + } + return e; } private static void awaitOrBust(CyclicBarrier barrier) {