diff --git a/docs/changelog/119054.yaml b/docs/changelog/119054.yaml new file mode 100644 index 0000000000000..720f2e0ab02ed --- /dev/null +++ b/docs/changelog/119054.yaml @@ -0,0 +1,6 @@ +pr: 119054 +summary: "[Security Solution] allows `kibana_system` user to manage .reindexed-v8-*\ + \ Security Solution indices" +area: Authorization +type: enhancement +issues: [] diff --git a/docs/changelog/119233.yaml b/docs/changelog/119233.yaml new file mode 100644 index 0000000000000..ef89c011ce4f6 --- /dev/null +++ b/docs/changelog/119233.yaml @@ -0,0 +1,5 @@ +pr: 119233 +summary: Fixing `GetDatabaseConfigurationAction` response serialization +area: Ingest Node +type: bug +issues: [] diff --git a/docs/changelog/119474.yaml b/docs/changelog/119474.yaml new file mode 100644 index 0000000000000..e37561277d220 --- /dev/null +++ b/docs/changelog/119474.yaml @@ -0,0 +1,5 @@ +pr: 119474 +summary: "Add ES|QL cross-cluster query telemetry collection" +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/119495.yaml b/docs/changelog/119495.yaml new file mode 100644 index 0000000000000..b3e8f7e79d984 --- /dev/null +++ b/docs/changelog/119495.yaml @@ -0,0 +1,5 @@ +pr: 119495 +summary: Add mapping for `event_name` for OTel logs +area: Data streams +type: enhancement +issues: [] diff --git a/docs/reference/cluster/stats.asciidoc b/docs/reference/cluster/stats.asciidoc index 4a7a54a5b290d..f078fd2b7f2ee 100644 --- a/docs/reference/cluster/stats.asciidoc +++ b/docs/reference/cluster/stats.asciidoc @@ -25,7 +25,6 @@ Returns cluster statistics. * If the {es} {security-features} are enabled, you must have the `monitor` or `manage` <> to use this API. - [[cluster-stats-api-desc]] ==== {api-description-title} @@ -1397,7 +1396,7 @@ as a human-readable string. `_search`::: -(object) Contains the information about the <> usage in the cluster. +(object) Contains information about <> usage. + .Properties of `_search` [%collapsible%open] @@ -1528,7 +1527,11 @@ This may include requests where partial results were returned, but not requests ======= + ====== +`_esql`::: +(object) Contains information about <> usage. +The structure of the object is the same as the `_search` object above. ===== diff --git a/docs/reference/indices/index-templates.asciidoc b/docs/reference/indices/index-templates.asciidoc index 5b152ecf177ec..90c4a6952446e 100644 --- a/docs/reference/indices/index-templates.asciidoc +++ b/docs/reference/indices/index-templates.asciidoc @@ -61,7 +61,7 @@ applying the templates, do one or more of the following: - Use a non-overlapping index pattern. -- Assign templates with an overlapping pattern a `priority` higher than `200`. +- Assign templates with an overlapping pattern a `priority` higher than `500`. For example, if you don't use {fleet} or {agent} and want to create a template for the `logs-*` index pattern, assign your template a priority of `500`. This ensures your template is applied instead of the built-in template for diff --git a/docs/reference/inference/service-elasticsearch.asciidoc b/docs/reference/inference/service-elasticsearch.asciidoc index 19e3f7a5dcffe..8870fbed357a6 100644 --- a/docs/reference/inference/service-elasticsearch.asciidoc +++ b/docs/reference/inference/service-elasticsearch.asciidoc @@ -9,8 +9,11 @@ For the most up-to-date API details, refer to {api-es}/group/endpoint-inference[ Creates an {infer} endpoint to perform an {infer} task with the `elasticsearch` service. -NOTE: If you use the ELSER or the E5 model through the `elasticsearch` service, the API request will automatically download and deploy the model if it isn't downloaded yet. - +[NOTE] +==== +* Your {es} deployment contains <>, you only need to create the enpoints using the API if you want to customize the settings. +* If you use the ELSER or the E5 model through the `elasticsearch` service, the API request will automatically download and deploy the model if it isn't downloaded yet. +==== [discrete] [[infer-service-elasticsearch-api-request]] diff --git a/docs/reference/inference/service-elser.asciidoc b/docs/reference/inference/service-elser.asciidoc index 56e56215124af..47aaa58814602 100644 --- a/docs/reference/inference/service-elser.asciidoc +++ b/docs/reference/inference/service-elser.asciidoc @@ -10,14 +10,17 @@ For the most up-to-date API details, refer to {api-es}/group/endpoint-inference[ Creates an {infer} endpoint to perform an {infer} task with the `elser` service. You can also deploy ELSER by using the <>. -NOTE: The API request will automatically download and deploy the ELSER model if -it isn't already downloaded. +[NOTE] +==== +* Your {es} deployment contains <>, you only need to create the enpoint using the API if you want to customize the settings. +* The API request will automatically download and deploy the ELSER model if it isn't already downloaded. +==== [WARNING] .Deprecated in 8.16 ==== -The elser service is deprecated and will be removed in a future release. -Use the <> instead, with model_id included in the service_settings. +The `elser` service is deprecated and will be removed in a future release. +Use the <> instead, with `model_id` included in the `service_settings`. ==== [discrete] diff --git a/libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/bridge/EntitlementChecker.java b/libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/bridge/EntitlementChecker.java index 8becc1e50ffcc..67d006868b48d 100644 --- a/libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/bridge/EntitlementChecker.java +++ b/libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/bridge/EntitlementChecker.java @@ -26,6 +26,20 @@ public interface EntitlementChecker { void check$java_lang_Runtime$halt(Class callerClass, Runtime runtime, int status); + // ClassLoader ctor + void check$java_lang_ClassLoader$(Class callerClass); + + void check$java_lang_ClassLoader$(Class callerClass, ClassLoader parent); + + void check$java_lang_ClassLoader$(Class callerClass, String name, ClassLoader parent); + + // SecureClassLoader ctor + void check$java_security_SecureClassLoader$(Class callerClass); + + void check$java_security_SecureClassLoader$(Class callerClass, ClassLoader parent); + + void check$java_security_SecureClassLoader$(Class callerClass, String name, ClassLoader parent); + // URLClassLoader constructors void check$java_net_URLClassLoader$(Class callerClass, URL[] urls); diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/initialization/EntitlementInitialization.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/initialization/EntitlementInitialization.java index aded5344024d3..21fb7c3d3dc2e 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/initialization/EntitlementInitialization.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/initialization/EntitlementInitialization.java @@ -20,6 +20,7 @@ import org.elasticsearch.entitlement.instrumentation.Transformer; import org.elasticsearch.entitlement.runtime.api.ElasticsearchEntitlementChecker; import org.elasticsearch.entitlement.runtime.policy.CreateClassLoaderEntitlement; +import org.elasticsearch.entitlement.runtime.policy.Entitlement; import org.elasticsearch.entitlement.runtime.policy.ExitVMEntitlement; import org.elasticsearch.entitlement.runtime.policy.Policy; import org.elasticsearch.entitlement.runtime.policy.PolicyManager; @@ -93,9 +94,17 @@ private static PolicyManager createPolicyManager() throws IOException { // TODO(ES-10031): Decide what goes in the elasticsearch default policy and extend it var serverPolicy = new Policy( "server", - List.of(new Scope("org.elasticsearch.server", List.of(new ExitVMEntitlement(), new CreateClassLoaderEntitlement()))) + List.of( + new Scope("org.elasticsearch.base", List.of(new CreateClassLoaderEntitlement())), + new Scope("org.elasticsearch.xcontent", List.of(new CreateClassLoaderEntitlement())), + new Scope("org.elasticsearch.server", List.of(new ExitVMEntitlement(), new CreateClassLoaderEntitlement())) + ) ); - return new PolicyManager(serverPolicy, pluginPolicies, EntitlementBootstrap.bootstrapArgs().pluginResolver(), ENTITLEMENTS_MODULE); + // agents run without a module, so this is a special hack for the apm agent + // this should be removed once https://github.com/elastic/elasticsearch/issues/109335 is completed + List agentEntitlements = List.of(new CreateClassLoaderEntitlement()); + var resolver = EntitlementBootstrap.bootstrapArgs().pluginResolver(); + return new PolicyManager(serverPolicy, agentEntitlements, pluginPolicies, resolver, ENTITLEMENTS_MODULE); } private static Map createPluginPolicies(Collection pluginData) throws IOException { @@ -120,12 +129,12 @@ private static Policy loadPluginPolicy(Path pluginRoot, boolean isModular, Strin // TODO: should this check actually be part of the parser? for (Scope scope : policy.scopes) { - if (moduleNames.contains(scope.name) == false) { + if (moduleNames.contains(scope.moduleName) == false) { throw new IllegalStateException( Strings.format( "Invalid module name in policy: plugin [%s] does not have module [%s]; available modules [%s]; policy file [%s]", pluginName, - scope.name, + scope.moduleName, String.join(", ", moduleNames), policyFile ) diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java index 27bf9ea553d87..450786ee57d86 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java @@ -27,6 +27,7 @@ * The trampoline module loads this object via SPI. */ public class ElasticsearchEntitlementChecker implements EntitlementChecker { + private final PolicyManager policyManager; public ElasticsearchEntitlementChecker(PolicyManager policyManager) { @@ -43,6 +44,36 @@ public ElasticsearchEntitlementChecker(PolicyManager policyManager) { policyManager.checkExitVM(callerClass); } + @Override + public void check$java_lang_ClassLoader$(Class callerClass) { + policyManager.checkCreateClassLoader(callerClass); + } + + @Override + public void check$java_lang_ClassLoader$(Class callerClass, ClassLoader parent) { + policyManager.checkCreateClassLoader(callerClass); + } + + @Override + public void check$java_lang_ClassLoader$(Class callerClass, String name, ClassLoader parent) { + policyManager.checkCreateClassLoader(callerClass); + } + + @Override + public void check$java_security_SecureClassLoader$(Class callerClass) { + policyManager.checkCreateClassLoader(callerClass); + } + + @Override + public void check$java_security_SecureClassLoader$(Class callerClass, ClassLoader parent) { + policyManager.checkCreateClassLoader(callerClass); + } + + @Override + public void check$java_security_SecureClassLoader$(Class callerClass, String name, ClassLoader parent) { + policyManager.checkCreateClassLoader(callerClass); + } + @Override public void check$java_net_URLClassLoader$(Class callerClass, URL[] urls) { policyManager.checkCreateClassLoader(callerClass); diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java index 330c7e59c60c7..a0e13bb3ac93d 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java @@ -17,34 +17,31 @@ import java.lang.StackWalker.StackFrame; import java.lang.module.ModuleFinder; import java.lang.module.ModuleReference; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.IdentityHashMap; import java.util.List; import java.util.Map; -import java.util.Objects; import java.util.Optional; import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.Stream; import static java.lang.StackWalker.Option.RETAIN_CLASS_REFERENCE; import static java.util.Objects.requireNonNull; +import static java.util.stream.Collectors.groupingBy; public class PolicyManager { private static final Logger logger = LogManager.getLogger(PolicyManager.class); - static class ModuleEntitlements { - public static final ModuleEntitlements NONE = new ModuleEntitlements(List.of()); - private final IdentityHashMap, List> entitlementsByType; + record ModuleEntitlements(Map, List> entitlementsByType) { + public static final ModuleEntitlements NONE = new ModuleEntitlements(Map.of()); - ModuleEntitlements(List entitlements) { - this.entitlementsByType = entitlements.stream() - .collect(Collectors.toMap(Entitlement::getClass, e -> new ArrayList<>(List.of(e)), (a, b) -> { - a.addAll(b); - return a; - }, IdentityHashMap::new)); + ModuleEntitlements { + entitlementsByType = Map.copyOf(entitlementsByType); + } + + public static ModuleEntitlements from(List entitlements) { + return new ModuleEntitlements(entitlements.stream().collect(groupingBy(Entitlement::getClass))); } public boolean hasEntitlement(Class entitlementClass) { @@ -56,9 +53,10 @@ public Stream getEntitlements(Class entitlementCla } } - final Map moduleEntitlementsMap = new HashMap<>(); + final Map moduleEntitlementsMap = new ConcurrentHashMap<>(); protected final Map> serverEntitlements; + protected final List agentEntitlements; protected final Map>> pluginsEntitlements; private final Function, String> pluginResolver; @@ -85,12 +83,14 @@ private static Set findSystemModules() { private final Module entitlementsModule; public PolicyManager( - Policy defaultPolicy, + Policy serverPolicy, + List agentEntitlements, Map pluginPolicies, Function, String> pluginResolver, Module entitlementsModule ) { - this.serverEntitlements = buildScopeEntitlementsMap(requireNonNull(defaultPolicy)); + this.serverEntitlements = buildScopeEntitlementsMap(requireNonNull(serverPolicy)); + this.agentEntitlements = agentEntitlements; this.pluginsEntitlements = requireNonNull(pluginPolicies).entrySet() .stream() .collect(Collectors.toUnmodifiableMap(Map.Entry::getKey, e -> buildScopeEntitlementsMap(e.getValue()))); @@ -99,7 +99,7 @@ public PolicyManager( } private static Map> buildScopeEntitlementsMap(Policy policy) { - return policy.scopes.stream().collect(Collectors.toUnmodifiableMap(scope -> scope.name, scope -> scope.entitlements)); + return policy.scopes.stream().collect(Collectors.toUnmodifiableMap(scope -> scope.moduleName, scope -> scope.entitlements)); } public void checkStartProcess(Class callerClass) { @@ -107,7 +107,7 @@ public void checkStartProcess(Class callerClass) { } private void neverEntitled(Class callerClass, String operationDescription) { - var requestingModule = requestingModule(callerClass); + var requestingModule = requestingClass(callerClass); if (isTriviallyAllowed(requestingModule)) { return; } @@ -139,18 +139,18 @@ public void checkSetGlobalHttpsConnectionProperties(Class callerClass) { } private void checkEntitlementPresent(Class callerClass, Class entitlementClass) { - var requestingModule = requestingModule(callerClass); - if (isTriviallyAllowed(requestingModule)) { + var requestingClass = requestingClass(callerClass); + if (isTriviallyAllowed(requestingClass)) { return; } - ModuleEntitlements entitlements = getEntitlementsOrThrow(callerClass, requestingModule); + ModuleEntitlements entitlements = getEntitlements(requestingClass); if (entitlements.hasEntitlement(entitlementClass)) { logger.debug( () -> Strings.format( - "Entitled: caller [%s], module [%s], type [%s]", - callerClass, - requestingModule.getName(), + "Entitled: class [%s], module [%s], entitlement [%s]", + requestingClass, + requestingClass.getModule().getName(), entitlementClass.getSimpleName() ) ); @@ -158,30 +158,26 @@ private void checkEntitlementPresent(Class callerClass, Class callerClass, Module requestingModule) { - ModuleEntitlements cachedEntitlement = moduleEntitlementsMap.get(requestingModule); - if (cachedEntitlement != null) { - if (cachedEntitlement == ModuleEntitlements.NONE) { - throw new NotEntitledException(buildModuleNoPolicyMessage(callerClass, requestingModule) + "[CACHED]"); - } - return cachedEntitlement; - } + ModuleEntitlements getEntitlements(Class requestingClass) { + return moduleEntitlementsMap.computeIfAbsent(requestingClass.getModule(), m -> computeEntitlements(requestingClass)); + } + private ModuleEntitlements computeEntitlements(Class requestingClass) { + Module requestingModule = requestingClass.getModule(); if (isServerModule(requestingModule)) { - var scopeName = requestingModule.getName(); - return getModuleEntitlementsOrThrow(callerClass, requestingModule, serverEntitlements, scopeName); + return getModuleScopeEntitlements(requestingClass, serverEntitlements, requestingModule.getName()); } // plugins - var pluginName = pluginResolver.apply(callerClass); + var pluginName = pluginResolver.apply(requestingClass); if (pluginName != null) { var pluginEntitlements = pluginsEntitlements.get(pluginName); if (pluginEntitlements != null) { @@ -191,34 +187,30 @@ ModuleEntitlements getEntitlementsOrThrow(Class callerClass, Module requestin } else { scopeName = requestingModule.getName(); } - return getModuleEntitlementsOrThrow(callerClass, requestingModule, pluginEntitlements, scopeName); + return getModuleScopeEntitlements(requestingClass, pluginEntitlements, scopeName); } } - moduleEntitlementsMap.put(requestingModule, ModuleEntitlements.NONE); - throw new NotEntitledException(buildModuleNoPolicyMessage(callerClass, requestingModule)); - } + if (requestingModule.isNamed() == false) { + // agents are the only thing running non-modular + return ModuleEntitlements.from(agentEntitlements); + } - private static String buildModuleNoPolicyMessage(Class callerClass, Module requestingModule) { - return Strings.format("Missing entitlement policy: caller [%s], module [%s]", callerClass, requestingModule.getName()); + logger.warn("No applicable entitlement policy for class [{}]", requestingClass.getName()); + return ModuleEntitlements.NONE; } - private ModuleEntitlements getModuleEntitlementsOrThrow( + private ModuleEntitlements getModuleScopeEntitlements( Class callerClass, - Module module, Map> scopeEntitlements, String moduleName ) { var entitlements = scopeEntitlements.get(moduleName); if (entitlements == null) { - // Module without entitlements - remember we don't have any - moduleEntitlementsMap.put(module, ModuleEntitlements.NONE); - throw new NotEntitledException(buildModuleNoPolicyMessage(callerClass, module)); + logger.warn("No applicable entitlement policy for module [{}], class [{}]", moduleName, callerClass); + return ModuleEntitlements.NONE; } - // We have a policy for this module - var classEntitlements = new ModuleEntitlements(entitlements); - moduleEntitlementsMap.put(module, classEntitlements); - return classEntitlements; + return ModuleEntitlements.from(entitlements); } private static boolean isServerModule(Module requestingModule) { @@ -226,25 +218,22 @@ private static boolean isServerModule(Module requestingModule) { } /** - * Walks the stack to determine which module's entitlements should be checked. + * Walks the stack to determine which class should be checked for entitlements. * - * @param callerClass when non-null will be used if its module is suitable; + * @param callerClass when non-null will be returned; * this is a fast-path check that can avoid the stack walk * in cases where the caller class is available. - * @return the requesting module, or {@code null} if the entire call stack + * @return the requesting class, or {@code null} if the entire call stack * comes from the entitlement library itself. */ - Module requestingModule(Class callerClass) { + Class requestingClass(Class callerClass) { if (callerClass != null) { - var callerModule = callerClass.getModule(); - if (callerModule != null && entitlementsModule.equals(callerModule) == false) { - // fast path - return callerModule; - } + // fast path + return callerClass; } - Optional module = StackWalker.getInstance(RETAIN_CLASS_REFERENCE) - .walk(frames -> findRequestingModule(frames.map(StackFrame::getDeclaringClass))); - return module.orElse(null); + Optional> result = StackWalker.getInstance(RETAIN_CLASS_REFERENCE) + .walk(frames -> findRequestingClass(frames.map(StackFrame::getDeclaringClass))); + return result.orElse(null); } /** @@ -253,33 +242,25 @@ Module requestingModule(Class callerClass) { * * @throws NullPointerException if the requesting module is {@code null} */ - Optional findRequestingModule(Stream> classes) { - return classes.map(Objects::requireNonNull) - .map(PolicyManager::moduleOf) - .filter(m -> m != entitlementsModule) // Ignore the entitlements library itself entirely - .skip(1) // Skip the sensitive method itself + Optional> findRequestingClass(Stream> classes) { + return classes.filter(c -> c.getModule() != entitlementsModule) // Ignore the entitlements library + .skip(1) // Skip the sensitive caller method .findFirst(); } - private static Module moduleOf(Class c) { - var result = c.getModule(); - if (result == null) { - throw new NullPointerException("Entitlements system does not support non-modular class [" + c.getName() + "]"); - } else { - return result; - } - } - - private static boolean isTriviallyAllowed(Module requestingModule) { + /** + * @return true if permission is granted regardless of the entitlement + */ + private static boolean isTriviallyAllowed(Class requestingClass) { if (logger.isTraceEnabled()) { logger.trace("Stack trace for upcoming trivially-allowed check", new Exception()); } - if (requestingModule == null) { + if (requestingClass == null) { logger.debug("Entitlement trivially allowed: no caller frames outside the entitlement library"); return true; } - if (systemModules.contains(requestingModule)) { - logger.debug("Entitlement trivially allowed from system module [{}]", requestingModule.getName()); + if (systemModules.contains(requestingClass.getModule())) { + logger.debug("Entitlement trivially allowed from system module [{}]", requestingClass.getModule().getName()); return true; } logger.trace("Entitlement not trivially allowed"); diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/Scope.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/Scope.java index 0fe63eb8da1b7..a75192936e46d 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/Scope.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/Scope.java @@ -18,11 +18,11 @@ */ public class Scope { - public final String name; + public final String moduleName; public final List entitlements; - public Scope(String name, List entitlements) { - this.name = Objects.requireNonNull(name); + public Scope(String moduleName, List entitlements) { + this.moduleName = moduleName; this.entitlements = Collections.unmodifiableList(Objects.requireNonNull(entitlements)); } @@ -31,16 +31,16 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Scope scope = (Scope) o; - return Objects.equals(name, scope.name) && Objects.equals(entitlements, scope.entitlements); + return Objects.equals(moduleName, scope.moduleName) && Objects.equals(entitlements, scope.entitlements); } @Override public int hashCode() { - return Objects.hash(name, entitlements); + return Objects.hash(moduleName, entitlements); } @Override public String toString() { - return "Scope{" + "name='" + name + '\'' + ", entitlements=" + entitlements + '}'; + return "Scope{" + "name='" + moduleName + '\'' + ", entitlements=" + entitlements + '}'; } } diff --git a/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyManagerTests.java b/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyManagerTests.java index 31e3e62f56bf5..d22c2f598e344 100644 --- a/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyManagerTests.java +++ b/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyManagerTests.java @@ -9,7 +9,7 @@ package org.elasticsearch.entitlement.runtime.policy; -import org.elasticsearch.entitlement.runtime.api.NotEntitledException; +import org.elasticsearch.entitlement.runtime.policy.PolicyManager.ModuleEntitlements; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.compiler.InMemoryJavaCompiler; import org.elasticsearch.test.jar.JarUtils; @@ -31,8 +31,6 @@ import static org.hamcrest.Matchers.aMapWithSize; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.endsWith; -import static org.hamcrest.Matchers.hasEntry; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.sameInstance; @@ -58,6 +56,7 @@ public static void beforeClass() { public void testGetEntitlementsThrowsOnMissingPluginUnnamedModule() { var policyManager = new PolicyManager( createEmptyTestServerPolicy(), + List.of(), Map.of("plugin1", createPluginPolicy("plugin.module")), c -> "plugin1", NO_ENTITLEMENTS_MODULE @@ -67,60 +66,44 @@ public void testGetEntitlementsThrowsOnMissingPluginUnnamedModule() { var callerClass = this.getClass(); var requestingModule = callerClass.getModule(); - var ex = assertThrows( - "No policy for the unnamed module", - NotEntitledException.class, - () -> policyManager.getEntitlementsOrThrow(callerClass, requestingModule) - ); + assertEquals("No policy for the unnamed module", ModuleEntitlements.NONE, policyManager.getEntitlements(callerClass)); - assertEquals( - "Missing entitlement policy: caller [class org.elasticsearch.entitlement.runtime.policy.PolicyManagerTests], module [null]", - ex.getMessage() - ); - assertThat(policyManager.moduleEntitlementsMap, hasEntry(requestingModule, PolicyManager.ModuleEntitlements.NONE)); + assertEquals(Map.of(requestingModule, ModuleEntitlements.NONE), policyManager.moduleEntitlementsMap); } public void testGetEntitlementsThrowsOnMissingPolicyForPlugin() { - var policyManager = new PolicyManager(createEmptyTestServerPolicy(), Map.of(), c -> "plugin1", NO_ENTITLEMENTS_MODULE); + var policyManager = new PolicyManager(createEmptyTestServerPolicy(), List.of(), Map.of(), c -> "plugin1", NO_ENTITLEMENTS_MODULE); // Any class from the current module (unnamed) will do var callerClass = this.getClass(); var requestingModule = callerClass.getModule(); - var ex = assertThrows( - "No policy for this plugin", - NotEntitledException.class, - () -> policyManager.getEntitlementsOrThrow(callerClass, requestingModule) - ); + assertEquals("No policy for this plugin", ModuleEntitlements.NONE, policyManager.getEntitlements(callerClass)); - assertEquals( - "Missing entitlement policy: caller [class org.elasticsearch.entitlement.runtime.policy.PolicyManagerTests], module [null]", - ex.getMessage() - ); - assertThat(policyManager.moduleEntitlementsMap, hasEntry(requestingModule, PolicyManager.ModuleEntitlements.NONE)); + assertEquals(Map.of(requestingModule, ModuleEntitlements.NONE), policyManager.moduleEntitlementsMap); } public void testGetEntitlementsFailureIsCached() { - var policyManager = new PolicyManager(createEmptyTestServerPolicy(), Map.of(), c -> "plugin1", NO_ENTITLEMENTS_MODULE); + var policyManager = new PolicyManager(createEmptyTestServerPolicy(), List.of(), Map.of(), c -> "plugin1", NO_ENTITLEMENTS_MODULE); // Any class from the current module (unnamed) will do var callerClass = this.getClass(); var requestingModule = callerClass.getModule(); - assertThrows(NotEntitledException.class, () -> policyManager.getEntitlementsOrThrow(callerClass, requestingModule)); - assertThat(policyManager.moduleEntitlementsMap, hasEntry(requestingModule, PolicyManager.ModuleEntitlements.NONE)); + assertEquals(ModuleEntitlements.NONE, policyManager.getEntitlements(callerClass)); + assertEquals(Map.of(requestingModule, ModuleEntitlements.NONE), policyManager.moduleEntitlementsMap); // A second time - var ex = assertThrows(NotEntitledException.class, () -> policyManager.getEntitlementsOrThrow(callerClass, requestingModule)); + assertEquals(ModuleEntitlements.NONE, policyManager.getEntitlements(callerClass)); - assertThat(ex.getMessage(), endsWith("[CACHED]")); // Nothing new in the map - assertThat(policyManager.moduleEntitlementsMap, aMapWithSize(1)); + assertEquals(Map.of(requestingModule, ModuleEntitlements.NONE), policyManager.moduleEntitlementsMap); } public void testGetEntitlementsReturnsEntitlementsForPluginUnnamedModule() { var policyManager = new PolicyManager( createEmptyTestServerPolicy(), + List.of(), Map.ofEntries(entry("plugin2", createPluginPolicy(ALL_UNNAMED))), c -> "plugin2", NO_ENTITLEMENTS_MODULE @@ -128,14 +111,13 @@ public void testGetEntitlementsReturnsEntitlementsForPluginUnnamedModule() { // Any class from the current module (unnamed) will do var callerClass = this.getClass(); - var requestingModule = callerClass.getModule(); - var entitlements = policyManager.getEntitlementsOrThrow(callerClass, requestingModule); + var entitlements = policyManager.getEntitlements(callerClass); assertThat(entitlements.hasEntitlement(CreateClassLoaderEntitlement.class), is(true)); } public void testGetEntitlementsThrowsOnMissingPolicyForServer() throws ClassNotFoundException { - var policyManager = new PolicyManager(createTestServerPolicy("example"), Map.of(), c -> null, NO_ENTITLEMENTS_MODULE); + var policyManager = new PolicyManager(createTestServerPolicy("example"), List.of(), Map.of(), c -> null, NO_ENTITLEMENTS_MODULE); // Tests do not run modular, so we cannot use a server class. // But we know that in production code the server module and its classes are in the boot layer. @@ -144,21 +126,19 @@ public void testGetEntitlementsThrowsOnMissingPolicyForServer() throws ClassNotF var mockServerClass = ModuleLayer.boot().findLoader("jdk.httpserver").loadClass("com.sun.net.httpserver.HttpServer"); var requestingModule = mockServerClass.getModule(); - var ex = assertThrows( - "No policy for this module in server", - NotEntitledException.class, - () -> policyManager.getEntitlementsOrThrow(mockServerClass, requestingModule) - ); + assertEquals("No policy for this module in server", ModuleEntitlements.NONE, policyManager.getEntitlements(mockServerClass)); - assertEquals( - "Missing entitlement policy: caller [class com.sun.net.httpserver.HttpServer], module [jdk.httpserver]", - ex.getMessage() - ); - assertThat(policyManager.moduleEntitlementsMap, hasEntry(requestingModule, PolicyManager.ModuleEntitlements.NONE)); + assertEquals(Map.of(requestingModule, ModuleEntitlements.NONE), policyManager.moduleEntitlementsMap); } public void testGetEntitlementsReturnsEntitlementsForServerModule() throws ClassNotFoundException { - var policyManager = new PolicyManager(createTestServerPolicy("jdk.httpserver"), Map.of(), c -> null, NO_ENTITLEMENTS_MODULE); + var policyManager = new PolicyManager( + createTestServerPolicy("jdk.httpserver"), + List.of(), + Map.of(), + c -> null, + NO_ENTITLEMENTS_MODULE + ); // Tests do not run modular, so we cannot use a server class. // But we know that in production code the server module and its classes are in the boot layer. @@ -167,7 +147,7 @@ public void testGetEntitlementsReturnsEntitlementsForServerModule() throws Class var mockServerClass = ModuleLayer.boot().findLoader("jdk.httpserver").loadClass("com.sun.net.httpserver.HttpServer"); var requestingModule = mockServerClass.getModule(); - var entitlements = policyManager.getEntitlementsOrThrow(mockServerClass, requestingModule); + var entitlements = policyManager.getEntitlements(mockServerClass); assertThat(entitlements.hasEntitlement(CreateClassLoaderEntitlement.class), is(true)); assertThat(entitlements.hasEntitlement(ExitVMEntitlement.class), is(true)); } @@ -179,6 +159,7 @@ public void testGetEntitlementsReturnsEntitlementsForPluginModule() throws IOExc var policyManager = new PolicyManager( createEmptyTestServerPolicy(), + List.of(), Map.of("mock-plugin", createPluginPolicy("org.example.plugin")), c -> "mock-plugin", NO_ENTITLEMENTS_MODULE @@ -188,7 +169,7 @@ public void testGetEntitlementsReturnsEntitlementsForPluginModule() throws IOExc var mockPluginClass = layer.findLoader("org.example.plugin").loadClass("q.B"); var requestingModule = mockPluginClass.getModule(); - var entitlements = policyManager.getEntitlementsOrThrow(mockPluginClass, requestingModule); + var entitlements = policyManager.getEntitlements(mockPluginClass); assertThat(entitlements.hasEntitlement(CreateClassLoaderEntitlement.class), is(true)); assertThat( entitlements.getEntitlements(FileEntitlement.class).toList(), @@ -199,6 +180,7 @@ public void testGetEntitlementsReturnsEntitlementsForPluginModule() throws IOExc public void testGetEntitlementsResultIsCached() { var policyManager = new PolicyManager( createEmptyTestServerPolicy(), + List.of(), Map.ofEntries(entry("plugin2", createPluginPolicy(ALL_UNNAMED))), c -> "plugin2", NO_ENTITLEMENTS_MODULE @@ -206,22 +188,21 @@ public void testGetEntitlementsResultIsCached() { // Any class from the current module (unnamed) will do var callerClass = this.getClass(); - var requestingModule = callerClass.getModule(); - var entitlements = policyManager.getEntitlementsOrThrow(callerClass, requestingModule); + var entitlements = policyManager.getEntitlements(callerClass); assertThat(entitlements.hasEntitlement(CreateClassLoaderEntitlement.class), is(true)); assertThat(policyManager.moduleEntitlementsMap, aMapWithSize(1)); var cachedResult = policyManager.moduleEntitlementsMap.values().stream().findFirst().get(); - var entitlementsAgain = policyManager.getEntitlementsOrThrow(callerClass, requestingModule); + var entitlementsAgain = policyManager.getEntitlements(callerClass); // Nothing new in the map assertThat(policyManager.moduleEntitlementsMap, aMapWithSize(1)); assertThat(entitlementsAgain, sameInstance(cachedResult)); } - public void testRequestingModuleFastPath() throws IOException, ClassNotFoundException { + public void testRequestingClassFastPath() throws IOException, ClassNotFoundException { var callerClass = makeClassInItsOwnModule(); - assertEquals(callerClass.getModule(), policyManagerWithEntitlementsModule(NO_ENTITLEMENTS_MODULE).requestingModule(callerClass)); + assertEquals(callerClass, policyManagerWithEntitlementsModule(NO_ENTITLEMENTS_MODULE).requestingClass(callerClass)); } public void testRequestingModuleWithStackWalk() throws IOException, ClassNotFoundException { @@ -232,24 +213,21 @@ public void testRequestingModuleWithStackWalk() throws IOException, ClassNotFoun var policyManager = policyManagerWithEntitlementsModule(entitlementsClass.getModule()); - var requestingModule = requestingClass.getModule(); - assertEquals( "Skip entitlement library and the instrumented method", - requestingModule, - policyManager.findRequestingModule(Stream.of(entitlementsClass, instrumentedClass, requestingClass, ignorableClass)) - .orElse(null) + requestingClass, + policyManager.findRequestingClass(Stream.of(entitlementsClass, instrumentedClass, requestingClass, ignorableClass)).orElse(null) ); assertEquals( "Skip multiple library frames", - requestingModule, - policyManager.findRequestingModule(Stream.of(entitlementsClass, entitlementsClass, instrumentedClass, requestingClass)) + requestingClass, + policyManager.findRequestingClass(Stream.of(entitlementsClass, entitlementsClass, instrumentedClass, requestingClass)) .orElse(null) ); assertThrows( "Non-modular caller frames are not supported", NullPointerException.class, - () -> policyManager.findRequestingModule(Stream.of(entitlementsClass, null)) + () -> policyManager.findRequestingClass(Stream.of(entitlementsClass, null)) ); } @@ -261,7 +239,7 @@ private static Class makeClassInItsOwnModule() throws IOException, ClassNotFo } private static PolicyManager policyManagerWithEntitlementsModule(Module entitlementsModule) { - return new PolicyManager(createEmptyTestServerPolicy(), Map.of(), c -> "test", entitlementsModule); + return new PolicyManager(createEmptyTestServerPolicy(), List.of(), Map.of(), c -> "test", entitlementsModule); } private static Policy createEmptyTestServerPolicy() { diff --git a/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserTests.java b/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserTests.java index bee8767fcd900..a8552c5a72359 100644 --- a/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserTests.java +++ b/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserTests.java @@ -68,7 +68,7 @@ public void testParseCreateClassloader() throws IOException { assertThat( parsedPolicy.scopes, contains( - both(transformedMatch((Scope scope) -> scope.name, equalTo("entitlement-module-name"))).and( + both(transformedMatch((Scope scope) -> scope.moduleName, equalTo("entitlement-module-name"))).and( transformedMatch(scope -> scope.entitlements, contains(instanceOf(CreateClassLoaderEntitlement.class))) ) ) @@ -87,7 +87,7 @@ public void testParseSetHttpsConnectionProperties() throws IOException { assertThat( parsedPolicy.scopes, contains( - both(transformedMatch((Scope scope) -> scope.name, equalTo("entitlement-module-name"))).and( + both(transformedMatch((Scope scope) -> scope.moduleName, equalTo("entitlement-module-name"))).and( transformedMatch(scope -> scope.entitlements, contains(instanceOf(SetHttpsConnectionPropertiesEntitlement.class))) ) ) diff --git a/modules/apm/src/main/plugin-metadata/entitlement-policy.yaml b/modules/apm/src/main/plugin-metadata/entitlement-policy.yaml index 30b2bd1978d1b..9c10bafca42f9 100644 --- a/modules/apm/src/main/plugin-metadata/entitlement-policy.yaml +++ b/modules/apm/src/main/plugin-metadata/entitlement-policy.yaml @@ -1,2 +1,4 @@ +org.elasticsearch.telemetry.apm: + - create_class_loader elastic.apm.agent: - set_https_connection_properties diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/GetDatabaseConfigurationAction.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/GetDatabaseConfigurationAction.java index 1970883e91b3e..68b3ce279a89d 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/GetDatabaseConfigurationAction.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/GetDatabaseConfigurationAction.java @@ -25,6 +25,7 @@ import java.io.IOException; import java.util.Arrays; import java.util.List; +import java.util.Map; import java.util.Objects; import static org.elasticsearch.ingest.geoip.direct.DatabaseConfigurationMetadata.DATABASE; @@ -91,6 +92,11 @@ protected Response(StreamInput in) throws IOException { this.databases = in.readCollectionAsList(DatabaseConfigurationMetadata::new); } + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeCollection(databases); + } + @Override protected List readNodesFrom(StreamInput in) throws IOException { return in.readCollectionAsList(NodeResponse::new); @@ -122,6 +128,63 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.endObject(); return builder; } + + /* + * This implementation of equals exists solely for testing the serialization of this object. + */ + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Response response = (Response) o; + return Objects.equals(databases, response.databases) + && Objects.equals(getClusterName(), response.getClusterName()) + && Objects.equals(equalsHashCodeFailures(), response.equalsHashCodeFailures()) + && Objects.equals(getNodes(), response.getNodes()) + && Objects.equals(equalsHashCodeNodesMap(), response.equalsHashCodeNodesMap()); + } + + /* + * This implementation of hashCode exists solely for testing the serialization of this object. + */ + @Override + public int hashCode() { + return Objects.hash(databases, getClusterName(), equalsHashCodeFailures(), getNodes(), equalsHashCodeNodesMap()); + } + + /* + * FailedNodeException does not implement equals or hashCode, making it difficult to test the serialization of this class. This + * helper method wraps the failures() list with a class that does implement equals and hashCode. + */ + private List equalsHashCodeFailures() { + return failures().stream().map(EqualsHashCodeFailedNodeException::new).toList(); + } + + private record EqualsHashCodeFailedNodeException(FailedNodeException failedNodeException) { + @Override + public boolean equals(Object o) { + if (o == this) return true; + if (o == null || getClass() != o.getClass()) return false; + EqualsHashCodeFailedNodeException other = (EqualsHashCodeFailedNodeException) o; + return Objects.equals(failedNodeException.nodeId(), other.failedNodeException.nodeId()) + && Objects.equals(failedNodeException.getMessage(), other.failedNodeException.getMessage()); + } + + @Override + public int hashCode() { + return Objects.hash(failedNodeException.nodeId(), failedNodeException.getMessage()); + } + } + + /* + * The getNodesMap method changes the value of the nodesMap, causing failures when testing the concurrent serialization and + * deserialization of this class. Since this is a response object, we do not actually care about concurrency since it will not + * happen in practice. So this helper method synchronizes access to getNodesMap, which can be used from equals and hashCode for + * tests. + */ + private synchronized Map equalsHashCodeNodesMap() { + return getNodesMap(); + } } public static class NodeRequest extends TransportRequest { @@ -186,6 +249,7 @@ public List getDatabases() { @Override public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); out.writeCollection(databases); } diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/direct/GetDatabaseConfigurationActionNodeResponseTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/direct/GetDatabaseConfigurationActionNodeResponseTests.java new file mode 100644 index 0000000000000..12fb08a5a1abf --- /dev/null +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/direct/GetDatabaseConfigurationActionNodeResponseTests.java @@ -0,0 +1,98 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.ingest.geoip.direct; + +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodeUtils; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; + +import java.io.IOException; +import java.util.List; + +import static java.util.Collections.emptySet; + +public class GetDatabaseConfigurationActionNodeResponseTests extends AbstractWireSerializingTestCase< + GetDatabaseConfigurationAction.NodeResponse> { + @Override + protected Writeable.Reader instanceReader() { + return GetDatabaseConfigurationAction.NodeResponse::new; + } + + @Override + protected GetDatabaseConfigurationAction.NodeResponse createTestInstance() { + return getRandomDatabaseConfigurationActionNodeResponse(); + } + + static GetDatabaseConfigurationAction.NodeResponse getRandomDatabaseConfigurationActionNodeResponse() { + return new GetDatabaseConfigurationAction.NodeResponse(randomDiscoveryNode(), getRandomDatabaseConfigurationMetadata()); + } + + private static DiscoveryNode randomDiscoveryNode() { + return DiscoveryNodeUtils.builder(randomAlphaOfLength(6)).roles(emptySet()).build(); + } + + static List getRandomDatabaseConfigurationMetadata() { + return randomList( + 0, + 20, + () -> new DatabaseConfigurationMetadata( + new DatabaseConfiguration( + randomAlphaOfLength(20), + randomAlphaOfLength(20), + randomFrom( + List.of( + new DatabaseConfiguration.Local(randomAlphaOfLength(10)), + new DatabaseConfiguration.Web(), + new DatabaseConfiguration.Ipinfo(), + new DatabaseConfiguration.Maxmind(randomAlphaOfLength(10)) + ) + ) + ), + randomNonNegativeLong(), + randomNonNegativeLong() + ) + ); + } + + @Override + protected GetDatabaseConfigurationAction.NodeResponse mutateInstance(GetDatabaseConfigurationAction.NodeResponse instance) + throws IOException { + return null; + } + + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry( + List.of( + new NamedWriteableRegistry.Entry( + DatabaseConfiguration.Provider.class, + DatabaseConfiguration.Maxmind.NAME, + DatabaseConfiguration.Maxmind::new + ), + new NamedWriteableRegistry.Entry( + DatabaseConfiguration.Provider.class, + DatabaseConfiguration.Ipinfo.NAME, + DatabaseConfiguration.Ipinfo::new + ), + new NamedWriteableRegistry.Entry( + DatabaseConfiguration.Provider.class, + DatabaseConfiguration.Local.NAME, + DatabaseConfiguration.Local::new + ), + new NamedWriteableRegistry.Entry( + DatabaseConfiguration.Provider.class, + DatabaseConfiguration.Web.NAME, + DatabaseConfiguration.Web::new + ) + ) + ); + } +} diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/direct/GetDatabaseConfigurationActionResponseTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/direct/GetDatabaseConfigurationActionResponseTests.java new file mode 100644 index 0000000000000..1b48a409d7876 --- /dev/null +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/direct/GetDatabaseConfigurationActionResponseTests.java @@ -0,0 +1,89 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.ingest.geoip.direct; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; + +import java.io.IOException; +import java.util.List; + +public class GetDatabaseConfigurationActionResponseTests extends AbstractWireSerializingTestCase { + @Override + protected Writeable.Reader instanceReader() { + return GetDatabaseConfigurationAction.Response::new; + } + + @Override + protected GetDatabaseConfigurationAction.Response createTestInstance() { + return new GetDatabaseConfigurationAction.Response( + GetDatabaseConfigurationActionNodeResponseTests.getRandomDatabaseConfigurationMetadata(), + getTestClusterName(), + getTestNodeResponses(), + getTestFailedNodeExceptions() + ); + } + + @Override + protected GetDatabaseConfigurationAction.Response mutateInstance(GetDatabaseConfigurationAction.Response instance) throws IOException { + return null; + } + + private ClusterName getTestClusterName() { + return new ClusterName(randomAlphaOfLength(30)); + } + + private List getTestNodeResponses() { + return randomList(0, 20, GetDatabaseConfigurationActionNodeResponseTests::getRandomDatabaseConfigurationActionNodeResponse); + } + + private List getTestFailedNodeExceptions() { + return randomList( + 0, + 5, + () -> new FailedNodeException( + randomAlphaOfLength(10), + randomAlphaOfLength(20), + new ElasticsearchException(randomAlphaOfLength(10)) + ) + ); + } + + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry( + List.of( + new NamedWriteableRegistry.Entry( + DatabaseConfiguration.Provider.class, + DatabaseConfiguration.Maxmind.NAME, + DatabaseConfiguration.Maxmind::new + ), + new NamedWriteableRegistry.Entry( + DatabaseConfiguration.Provider.class, + DatabaseConfiguration.Ipinfo.NAME, + DatabaseConfiguration.Ipinfo::new + ), + new NamedWriteableRegistry.Entry( + DatabaseConfiguration.Provider.class, + DatabaseConfiguration.Local.NAME, + DatabaseConfiguration.Local::new + ), + new NamedWriteableRegistry.Entry( + DatabaseConfiguration.Provider.class, + DatabaseConfiguration.Web.NAME, + DatabaseConfiguration.Web::new + ) + ) + ); + } +} diff --git a/modules/lang-expression/src/main/plugin-metadata/entitlement-policy.yaml b/modules/lang-expression/src/main/plugin-metadata/entitlement-policy.yaml new file mode 100644 index 0000000000000..b05e6e3a7bf7c --- /dev/null +++ b/modules/lang-expression/src/main/plugin-metadata/entitlement-policy.yaml @@ -0,0 +1,2 @@ +org.elasticsearch.script.expression: + - create_class_loader diff --git a/modules/lang-painless/src/main/plugin-metadata/entitlement-policy.yaml b/modules/lang-painless/src/main/plugin-metadata/entitlement-policy.yaml new file mode 100644 index 0000000000000..d7e4ad872fc32 --- /dev/null +++ b/modules/lang-painless/src/main/plugin-metadata/entitlement-policy.yaml @@ -0,0 +1,2 @@ +org.elasticsearch.painless: + - create_class_loader diff --git a/muted-tests.yml b/muted-tests.yml index fc93134be9c99..e5fae9116c9f0 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -226,8 +226,6 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/116777 - class: org.elasticsearch.xpack.security.authc.ldap.ActiveDirectoryRunAsIT issue: https://github.com/elastic/elasticsearch/issues/115727 -- class: org.elasticsearch.xpack.security.authc.kerberos.KerberosAuthenticationIT - issue: https://github.com/elastic/elasticsearch/issues/118414 - class: org.elasticsearch.smoketest.DocsClientYamlTestSuiteIT method: test {yaml=reference/search/search-your-data/retrievers-examples/line_98} issue: https://github.com/elastic/elasticsearch/issues/119155 @@ -243,9 +241,6 @@ tests: - class: org.elasticsearch.xpack.test.rest.XPackRestIT method: test {p0=ml/data_frame_analytics_cat_apis/Test cat data frame analytics all jobs with header} issue: https://github.com/elastic/elasticsearch/issues/119332 -- class: org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT - method: test {lookup-join.MvJoinKeyOnTheDataNode ASYNC} - issue: https://github.com/elastic/elasticsearch/issues/119179 - class: org.elasticsearch.smoketest.SmokeTestMultiNodeClientYamlTestSuiteIT issue: https://github.com/elastic/elasticsearch/issues/119191 - class: org.elasticsearch.xpack.logsdb.qa.LogsDbVersusLogsDbReindexedIntoStandardModeChallengeRestIT @@ -254,9 +249,15 @@ tests: - class: org.elasticsearch.xpack.logsdb.qa.LogsDbVersusLogsDbReindexedIntoStandardModeChallengeRestIT method: testMatchAllQuery issue: https://github.com/elastic/elasticsearch/issues/119432 -- class: org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT - method: test {lookup-join.MvJoinKeyOnTheDataNode SYNC} - issue: https://github.com/elastic/elasticsearch/issues/119446 +- class: org.elasticsearch.xpack.logsdb.qa.LogsDbVersusLogsDbReindexedIntoStandardModeChallengeRestIT + method: testTermsQuery + issue: https://github.com/elastic/elasticsearch/issues/119486 +- class: org.elasticsearch.xpack.test.rest.XPackRestIT + method: test {p0=transform/transforms_start_stop/Test start/stop/start transform} + issue: https://github.com/elastic/elasticsearch/issues/119508 +- class: org.elasticsearch.xpack.logsdb.qa.LogsDbVersusLogsDbReindexedIntoStandardModeChallengeRestIT + method: testEsqlSource + issue: https://github.com/elastic/elasticsearch/issues/119510 # Examples: # diff --git a/qa/packaging/src/test/java/org/elasticsearch/packaging/util/docker/Docker.java b/qa/packaging/src/test/java/org/elasticsearch/packaging/util/docker/Docker.java index 0cd2823080b9b..808aec92fb35d 100644 --- a/qa/packaging/src/test/java/org/elasticsearch/packaging/util/docker/Docker.java +++ b/qa/packaging/src/test/java/org/elasticsearch/packaging/util/docker/Docker.java @@ -206,13 +206,32 @@ public static void waitForElasticsearchToStart() { ps output: %s - stdout(): + Stdout: %s Stderr: + %s + + Thread dump: %s\ - """, psOutput, dockerLogs.stdout(), dockerLogs.stderr())); + """, psOutput, dockerLogs.stdout(), dockerLogs.stderr(), getThreadDump())); + } + } + + /** + * @return output of jstack for currently running Java process + */ + private static String getThreadDump() { + try { + String pid = dockerShell.run("/usr/share/elasticsearch/jdk/bin/jps | grep -v 'Jps' | awk '{print $1}'").stdout(); + if (pid.isEmpty() == false) { + return dockerShell.run("/usr/share/elasticsearch/jdk/bin/jstack " + Integer.parseInt(pid)).stdout(); + } + } catch (Exception e) { + logger.error("Failed to get thread dump", e); } + + return ""; } /** diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CCSUsageTelemetryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CCSUsageTelemetryIT.java index 9c1daccd2cc9e..ab79fd7ba1813 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CCSUsageTelemetryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CCSUsageTelemetryIT.java @@ -40,18 +40,12 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.test.AbstractMultiClustersTestCase; import org.elasticsearch.test.InternalTestCluster; +import org.elasticsearch.test.SkipUnavailableRule; +import org.elasticsearch.test.SkipUnavailableRule.NotSkipped; import org.elasticsearch.usage.UsageService; import org.junit.Assert; import org.junit.Rule; -import org.junit.rules.TestRule; -import org.junit.runner.Description; -import org.junit.runners.model.Statement; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; -import java.util.Arrays; + import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -59,8 +53,6 @@ import java.util.Map; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; -import java.util.function.Function; -import java.util.stream.Collectors; import static org.elasticsearch.action.admin.cluster.stats.CCSUsageTelemetry.ASYNC_FEATURE; import static org.elasticsearch.action.admin.cluster.stats.CCSUsageTelemetry.MRT_FEATURE; @@ -498,7 +490,7 @@ public void testRemoteOnlyTimesOut() throws Exception { assertThat(perCluster.get(REMOTE2), equalTo(null)); } - @SkipOverride(aliases = { REMOTE1 }) + @NotSkipped(aliases = { REMOTE1 }) public void testRemoteTimesOutFailure() throws Exception { Map testClusterInfo = setupClusters(); String remoteIndex = (String) testClusterInfo.get("remote.index"); @@ -528,7 +520,7 @@ public void testRemoteTimesOutFailure() throws Exception { /** * Search when all the remotes failed and not skipped */ - @SkipOverride(aliases = { REMOTE1, REMOTE2 }) + @NotSkipped(aliases = { REMOTE1, REMOTE2 }) public void testFailedAllRemotesSearch() throws Exception { Map testClusterInfo = setupClusters(); String localIndex = (String) testClusterInfo.get("local.index"); @@ -577,7 +569,7 @@ public void testRemoteHasNoIndex() throws Exception { /** * Test that we're still counting remote search even if remote cluster has no such index */ - @SkipOverride(aliases = { REMOTE1 }) + @NotSkipped(aliases = { REMOTE1 }) public void testRemoteHasNoIndexFailure() throws Exception { SearchRequest searchRequest = makeSearchRequest(REMOTE1 + ":no_such_index"); CCSTelemetrySnapshot telemetry = getTelemetryFromFailedSearch(searchRequest); @@ -695,40 +687,4 @@ private void indexDocs(Client client, String index, ActionListener listene bulkRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).execute(listener.safeMap(r -> null)); } - /** - * Annotation to mark specific cluster in a test as not to be skipped when unavailable - */ - @Retention(RetentionPolicy.RUNTIME) - @Target(ElementType.METHOD) - @interface SkipOverride { - String[] aliases(); - } - - /** - * Test rule to process skip annotations - */ - static class SkipUnavailableRule implements TestRule { - private final Map skipMap; - - SkipUnavailableRule(String... clusterAliases) { - this.skipMap = Arrays.stream(clusterAliases).collect(Collectors.toMap(Function.identity(), alias -> true)); - } - - public Map getMap() { - return skipMap; - } - - @Override - public Statement apply(Statement base, Description description) { - // Check for annotation named "SkipOverride" and set the overrides accordingly - var aliases = description.getAnnotation(SkipOverride.class); - if (aliases != null) { - for (String alias : aliases.aliases()) { - skipMap.put(alias, false); - } - } - return base; - } - - } } diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index bff1fa4015acd..1cee9ca7bd495 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -148,6 +148,7 @@ static TransportVersion def(int id) { public static final TransportVersion SIMULATE_IGNORED_FIELDS = def(8_813_00_0); public static final TransportVersion TRANSFORMS_UPGRADE_MODE = def(8_814_00_0); public static final TransportVersion NODE_SHUTDOWN_EPHEMERAL_ID_ADDED = def(8_815_00_0); + public static final TransportVersion ESQL_CCS_TELEMETRY_STATS = def(8_816_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSTelemetrySnapshot.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSTelemetrySnapshot.java index 3bbaa80ec200e..8500302e4f755 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSTelemetrySnapshot.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSTelemetrySnapshot.java @@ -41,7 +41,6 @@ *
*/ public final class CCSTelemetrySnapshot implements Writeable, ToXContentFragment { - public static final String CCS_TELEMETRY_FIELD_NAME = "_search"; private long totalCount; private long successCount; private final Map failureReasons; @@ -66,6 +65,9 @@ public final class CCSTelemetrySnapshot implements Writeable, ToXContentFragment private final Map clientCounts; private final Map byRemoteCluster; + // Whether we should use per-MRT (minimize roundtrips) metrics. + // ES|QL does not have "minimize_roundtrips" option, so we don't collect those metrics for ES|QL usage. + private boolean useMRT = true; /** * Creates a new stats instance with the provided info. @@ -191,6 +193,11 @@ public Map getByRemoteCluster() { return Collections.unmodifiableMap(byRemoteCluster); } + public CCSTelemetrySnapshot setUseMRT(boolean useMRT) { + this.useMRT = useMRT; + return this; + } + public static class PerClusterCCSTelemetry implements Writeable, ToXContentFragment { private long count; private long skippedCount; @@ -270,6 +277,11 @@ public boolean equals(Object o) { public int hashCode() { return Objects.hash(count, skippedCount, took); } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } } /** @@ -291,8 +303,10 @@ public void add(CCSTelemetrySnapshot stats) { stats.featureCounts.forEach((k, v) -> featureCounts.merge(k, v, Long::sum)); stats.clientCounts.forEach((k, v) -> clientCounts.merge(k, v, Long::sum)); took.add(stats.took); - tookMrtTrue.add(stats.tookMrtTrue); - tookMrtFalse.add(stats.tookMrtFalse); + if (useMRT) { + tookMrtTrue.add(stats.tookMrtTrue); + tookMrtFalse.add(stats.tookMrtFalse); + } remotesPerSearchMax = Math.max(remotesPerSearchMax, stats.remotesPerSearchMax); if (totalCount > 0 && oldCount > 0) { // Weighted average @@ -328,30 +342,28 @@ private static void publishLatency(XContentBuilder builder, String name, LongMet @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(CCS_TELEMETRY_FIELD_NAME); - { - builder.field("total", totalCount); - builder.field("success", successCount); - builder.field("skipped", skippedRemotes); - publishLatency(builder, "took", took); + builder.field("total", totalCount); + builder.field("success", successCount); + builder.field("skipped", skippedRemotes); + publishLatency(builder, "took", took); + if (useMRT) { publishLatency(builder, "took_mrt_true", tookMrtTrue); publishLatency(builder, "took_mrt_false", tookMrtFalse); - builder.field("remotes_per_search_max", remotesPerSearchMax); - builder.field("remotes_per_search_avg", remotesPerSearchAvg); - builder.field("failure_reasons", failureReasons); - builder.field("features", featureCounts); - builder.field("clients", clientCounts); - builder.startObject("clusters"); - { - for (var entry : byRemoteCluster.entrySet()) { - String remoteName = entry.getKey(); - if (RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY.equals(remoteName)) { - remoteName = SearchResponse.LOCAL_CLUSTER_NAME_REPRESENTATION; - } - builder.field(remoteName, entry.getValue()); + } + builder.field("remotes_per_search_max", remotesPerSearchMax); + builder.field("remotes_per_search_avg", remotesPerSearchAvg); + builder.field("failure_reasons", failureReasons); + builder.field("features", featureCounts); + builder.field("clients", clientCounts); + builder.startObject("clusters"); + { + for (var entry : byRemoteCluster.entrySet()) { + String remoteName = entry.getKey(); + if (RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY.equals(remoteName)) { + remoteName = SearchResponse.LOCAL_CLUSTER_NAME_REPRESENTATION; } + builder.field(remoteName, entry.getValue()); } - builder.endObject(); } builder.endObject(); return builder; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSUsage.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSUsage.java index 9e58d6d8febef..29a7dcb5d07d8 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSUsage.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSUsage.java @@ -10,6 +10,7 @@ package org.elasticsearch.action.admin.cluster.stats; import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ShardOperationFailedException; @@ -20,6 +21,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.query.SearchTimeoutException; +import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskCancelledException; import java.util.Arrays; @@ -84,6 +86,15 @@ public Builder setClient(String client) { return this; } + public Builder setClientFromTask(Task task) { + String client = task.getHeader(Task.X_ELASTIC_PRODUCT_ORIGIN_HTTP_HEADER); + if (client != null) { + return setClient(client); + } else { + return this; + } + } + public Builder skippedRemote(String remote) { this.skippedRemotes.add(remote); return this; @@ -133,6 +144,10 @@ public static Result getFailureType(Exception e) { if (ExceptionsHelper.unwrapCorruption(e) != null) { return Result.CORRUPTION; } + ElasticsearchStatusException se = (ElasticsearchStatusException) ExceptionsHelper.unwrap(e, ElasticsearchStatusException.class); + if (se != null && se.getDetailedMessage().contains("license")) { + return Result.LICENSE; + } // This is kind of last resort check - if we still don't know the reason but all shard failures are remote, // we assume it's remote's fault somehow. if (e instanceof SearchPhaseExecutionException spe) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSUsageTelemetry.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSUsageTelemetry.java index 6c8178282d3c3..3f04eceed7eb5 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSUsageTelemetry.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSUsageTelemetry.java @@ -47,6 +47,7 @@ public enum Result { TIMEOUT("timeout"), CORRUPTION("corruption"), SECURITY("security"), + LICENSE("license"), // May be helpful if there's a lot of other reasons, and it may be hard to calculate the unknowns for some clients. UNKNOWN("other"); @@ -106,8 +107,14 @@ public String getName() { private final Map clientCounts; private final Map byRemoteCluster; + // Should we calculate separate metrics per MRT? + private final boolean useMRT; public CCSUsageTelemetry() { + this(true); + } + + public CCSUsageTelemetry(boolean useMRT) { this.byRemoteCluster = new ConcurrentHashMap<>(); totalCount = new LongAdder(); successCount = new LongAdder(); @@ -119,6 +126,7 @@ public CCSUsageTelemetry() { skippedRemotes = new LongAdder(); featureCounts = new ConcurrentHashMap<>(); clientCounts = new ConcurrentHashMap<>(); + this.useMRT = useMRT; } public void updateUsage(CCSUsage ccsUsage) { @@ -134,10 +142,12 @@ private void doUpdate(CCSUsage ccsUsage) { if (isSuccess(ccsUsage)) { successCount.increment(); took.record(searchTook); - if (isMRT(ccsUsage)) { - tookMrtTrue.record(searchTook); - } else { - tookMrtFalse.record(searchTook); + if (useMRT) { + if (isMRT(ccsUsage)) { + tookMrtTrue.record(searchTook); + } else { + tookMrtFalse.record(searchTook); + } } ccsUsage.getPerClusterUsage().forEach((r, u) -> byRemoteCluster.computeIfAbsent(r, PerClusterCCSTelemetry::new).update(u)); } else { @@ -243,6 +253,6 @@ public CCSTelemetrySnapshot getCCSTelemetrySnapshot() { Collections.unmodifiableMap(Maps.transformValues(featureCounts, LongAdder::longValue)), Collections.unmodifiableMap(Maps.transformValues(clientCounts, LongAdder::longValue)), Collections.unmodifiableMap(Maps.transformValues(byRemoteCluster, PerClusterCCSTelemetry::getSnapshot)) - ); + ).setUseMRT(useMRT); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java index abeb73e5d8c3e..48b4e967742cd 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java @@ -31,7 +31,8 @@ public class ClusterStatsNodeResponse extends BaseNodeResponse { private final ClusterHealthStatus clusterStatus; private final SearchUsageStats searchUsageStats; private final RepositoryUsageStats repositoryUsageStats; - private final CCSTelemetrySnapshot ccsMetrics; + private final CCSTelemetrySnapshot searchCcsMetrics; + private final CCSTelemetrySnapshot esqlCcsMetrics; public ClusterStatsNodeResponse(StreamInput in) throws IOException { super(in); @@ -46,10 +47,15 @@ public ClusterStatsNodeResponse(StreamInput in) throws IOException { } if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { repositoryUsageStats = RepositoryUsageStats.readFrom(in); - ccsMetrics = new CCSTelemetrySnapshot(in); + searchCcsMetrics = new CCSTelemetrySnapshot(in); } else { repositoryUsageStats = RepositoryUsageStats.EMPTY; - ccsMetrics = new CCSTelemetrySnapshot(); + searchCcsMetrics = new CCSTelemetrySnapshot(); + } + if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_CCS_TELEMETRY_STATS)) { + esqlCcsMetrics = new CCSTelemetrySnapshot(in); + } else { + esqlCcsMetrics = new CCSTelemetrySnapshot(); } } @@ -61,7 +67,8 @@ public ClusterStatsNodeResponse( ShardStats[] shardsStats, SearchUsageStats searchUsageStats, RepositoryUsageStats repositoryUsageStats, - CCSTelemetrySnapshot ccsTelemetrySnapshot + CCSTelemetrySnapshot ccsTelemetrySnapshot, + CCSTelemetrySnapshot esqlTelemetrySnapshot ) { super(node); this.nodeInfo = nodeInfo; @@ -70,7 +77,8 @@ public ClusterStatsNodeResponse( this.clusterStatus = clusterStatus; this.searchUsageStats = Objects.requireNonNull(searchUsageStats); this.repositoryUsageStats = Objects.requireNonNull(repositoryUsageStats); - this.ccsMetrics = ccsTelemetrySnapshot; + this.searchCcsMetrics = ccsTelemetrySnapshot; + this.esqlCcsMetrics = esqlTelemetrySnapshot; } public NodeInfo nodeInfo() { @@ -101,8 +109,12 @@ public RepositoryUsageStats repositoryUsageStats() { return repositoryUsageStats; } - public CCSTelemetrySnapshot getCcsMetrics() { - return ccsMetrics; + public CCSTelemetrySnapshot getSearchCcsMetrics() { + return searchCcsMetrics; + } + + public CCSTelemetrySnapshot getEsqlCcsMetrics() { + return esqlCcsMetrics; } @Override @@ -117,8 +129,11 @@ public void writeTo(StreamOutput out) throws IOException { } if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { repositoryUsageStats.writeTo(out); - ccsMetrics.writeTo(out); + searchCcsMetrics.writeTo(out); } // else just drop these stats, ok for bwc + if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_CCS_TELEMETRY_STATS)) { + esqlCcsMetrics.writeTo(out); + } } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java index 5f7c45c5807a5..ed8ca2f94a78b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java @@ -36,10 +36,14 @@ public class ClusterStatsResponse extends BaseNodesResponse remoteClustersStats; + public static final String CCS_TELEMETRY_FIELD_NAME = "_search"; + public static final String ESQL_TELEMETRY_FIELD_NAME = "_esql"; + public ClusterStatsResponse( long timestamp, String clusterUUID, @@ -58,6 +62,7 @@ public ClusterStatsResponse( nodesStats = new ClusterStatsNodes(nodes); indicesStats = new ClusterStatsIndices(nodes, mappingStats, analysisStats, versionStats); ccsMetrics = new CCSTelemetrySnapshot(); + esqlMetrics = new CCSTelemetrySnapshot().setUseMRT(false); ClusterHealthStatus status = null; for (ClusterStatsNodeResponse response : nodes) { // only the master node populates the status @@ -66,7 +71,10 @@ public ClusterStatsResponse( break; } } - nodes.forEach(node -> ccsMetrics.add(node.getCcsMetrics())); + nodes.forEach(node -> { + ccsMetrics.add(node.getSearchCcsMetrics()); + esqlMetrics.add(node.getEsqlCcsMetrics()); + }); this.status = status; this.clusterSnapshotStats = clusterSnapshotStats; @@ -147,9 +155,18 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (remoteClustersStats != null) { builder.field("clusters", remoteClustersStats); } + builder.startObject(CCS_TELEMETRY_FIELD_NAME); ccsMetrics.toXContent(builder, params); builder.endObject(); + if (esqlMetrics.getTotalCount() > 0) { + builder.startObject(ESQL_TELEMETRY_FIELD_NAME); + esqlMetrics.toXContent(builder, params); + builder.endObject(); + } + + builder.endObject(); + return builder; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java index 2c20daa5d7afb..6f69def7aa4e0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -103,6 +103,7 @@ public class TransportClusterStatsAction extends TransportNodesAction< private final RepositoriesService repositoriesService; private final SearchUsageHolder searchUsageHolder; private final CCSUsageTelemetry ccsUsageHolder; + private final CCSUsageTelemetry esqlUsageHolder; private final Executor clusterStateStatsExecutor; private final MetadataStatsCache mappingStatsCache; @@ -135,6 +136,7 @@ public TransportClusterStatsAction( this.repositoriesService = repositoriesService; this.searchUsageHolder = usageService.getSearchUsageHolder(); this.ccsUsageHolder = usageService.getCcsUsageHolder(); + this.esqlUsageHolder = usageService.getEsqlUsageHolder(); this.clusterStateStatsExecutor = threadPool.executor(ThreadPool.Names.MANAGEMENT); this.mappingStatsCache = new MetadataStatsCache<>(threadPool.getThreadContext(), MappingStats::of); this.analysisStatsCache = new MetadataStatsCache<>(threadPool.getThreadContext(), AnalysisStats::of); @@ -293,6 +295,7 @@ protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeReq final RepositoryUsageStats repositoryUsageStats = repositoriesService.getUsageStats(); final CCSTelemetrySnapshot ccsTelemetry = ccsUsageHolder.getCCSTelemetrySnapshot(); + final CCSTelemetrySnapshot esqlTelemetry = esqlUsageHolder.getCCSTelemetrySnapshot(); return new ClusterStatsNodeResponse( nodeInfo.getNode(), @@ -302,7 +305,8 @@ protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeReq shardsStats.toArray(new ShardStats[shardsStats.size()]), searchUsageStats, repositoryUsageStats, - ccsTelemetry + ccsTelemetry, + esqlTelemetry ); } diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index ae27406bf396d..70a7f4c8cad0c 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -388,10 +388,7 @@ void executeRequest( if (original.pointInTimeBuilder() != null) { tl.setFeature(CCSUsageTelemetry.PIT_FEATURE); } - String client = task.getHeader(Task.X_ELASTIC_PRODUCT_ORIGIN_HTTP_HEADER); - if (client != null) { - tl.setClient(client); - } + tl.setClient(task); // Check if any of the index patterns are wildcard patterns var localIndices = resolvedIndices.getLocalIndices(); if (localIndices != null && Arrays.stream(localIndices.indices()).anyMatch(Regex::isSimpleMatchPattern)) { @@ -508,6 +505,7 @@ void executeRequest( } } }); + final SearchSourceBuilder source = original.source(); if (shouldOpenPIT(source)) { // disabling shard reordering for request @@ -1883,7 +1881,7 @@ private interface TelemetryListener { void setFeature(String feature); - void setClient(String client); + void setClient(Task task); } private class SearchResponseActionListener extends DelegatingActionListener @@ -1917,8 +1915,8 @@ public void setFeature(String feature) { } @Override - public void setClient(String client) { - usageBuilder.setClient(client); + public void setClient(Task task) { + usageBuilder.setClientFromTask(task); } @Override diff --git a/server/src/main/java/org/elasticsearch/features/FeatureService.java b/server/src/main/java/org/elasticsearch/features/FeatureService.java index c04fbae05ee2c..da71b8f0ec2f7 100644 --- a/server/src/main/java/org/elasticsearch/features/FeatureService.java +++ b/server/src/main/java/org/elasticsearch/features/FeatureService.java @@ -29,7 +29,7 @@ public class FeatureService { /** * A feature indicating that node features are supported. */ - public static final NodeFeature FEATURES_SUPPORTED = new NodeFeature("features_supported"); + public static final NodeFeature FEATURES_SUPPORTED = new NodeFeature("features_supported", true); public static final NodeFeature TEST_FEATURES_ENABLED = new NodeFeature("test_features_enabled"); private static final Logger logger = LogManager.getLogger(FeatureService.class); diff --git a/server/src/main/java/org/elasticsearch/features/NodeFeature.java b/server/src/main/java/org/elasticsearch/features/NodeFeature.java index 961b386d62802..ad270540274b9 100644 --- a/server/src/main/java/org/elasticsearch/features/NodeFeature.java +++ b/server/src/main/java/org/elasticsearch/features/NodeFeature.java @@ -17,7 +17,7 @@ * @param id The feature id. Must be unique in the node. * @param assumedAfterNextCompatibilityBoundary * {@code true} if this feature is removed at the next compatibility boundary (ie next major version), - * and so should be assumed to be true for all nodes after that boundary. + * and so should be assumed to be met by all nodes after that boundary, even if they don't publish it. */ public record NodeFeature(String id, boolean assumedAfterNextCompatibilityBoundary) { diff --git a/server/src/main/java/org/elasticsearch/index/IndexVersions.java b/server/src/main/java/org/elasticsearch/index/IndexVersions.java index 7a5cd97e5a3a3..8d6404e0530e5 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexVersions.java +++ b/server/src/main/java/org/elasticsearch/index/IndexVersions.java @@ -133,6 +133,7 @@ private static Version parseUnchecked(String version) { public static final IndexVersion V8_DEPRECATE_SOURCE_MODE_MAPPER = def(8_521_00_0, Version.LUCENE_9_12_0); public static final IndexVersion USE_SYNTHETIC_SOURCE_FOR_RECOVERY_BACKPORT = def(8_522_00_0, Version.LUCENE_9_12_0); public static final IndexVersion UPGRADE_TO_LUCENE_9_12_1 = def(8_523_00_0, parseUnchecked("9.12.1")); + public static final IndexVersion INFERENCE_METADATA_FIELDS_BACKPORT = def(8_524_00_0, parseUnchecked("9.12.1")); public static final IndexVersion UPGRADE_TO_LUCENE_10_0_0 = def(9_000_00_0, Version.LUCENE_10_0_0); public static final IndexVersion LOGSDB_DEFAULT_IGNORE_DYNAMIC_BEYOND_LIMIT = def(9_001_00_0, Version.LUCENE_10_0_0); public static final IndexVersion TIME_BASED_K_ORDERED_DOC_ID = def(9_002_00_0, Version.LUCENE_10_0_0); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/InferenceMetadataFieldsMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/InferenceMetadataFieldsMapper.java index 6051aafb9f742..80fee58e93110 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/InferenceMetadataFieldsMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/InferenceMetadataFieldsMapper.java @@ -86,8 +86,12 @@ public abstract ValueFetcher valueFetcher( * @return {@code true} if the new format is enabled; {@code false} otherwise */ public static boolean isEnabled(Settings settings) { - return IndexMetadata.SETTING_INDEX_VERSION_CREATED.get(settings).onOrAfter(IndexVersions.INFERENCE_METADATA_FIELDS) - && USE_LEGACY_SEMANTIC_TEXT_FORMAT.get(settings) == false; + var version = IndexMetadata.SETTING_INDEX_VERSION_CREATED.get(settings); + if (version.before(IndexVersions.INFERENCE_METADATA_FIELDS) + && version.between(IndexVersions.INFERENCE_METADATA_FIELDS_BACKPORT, IndexVersions.UPGRADE_TO_LUCENE_10_0_0) == false) { + return false; + } + return USE_LEGACY_SEMANTIC_TEXT_FORMAT.get(settings) == false; } /** diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterStatsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterStatsAction.java index 63bd4523f9bd1..690f3155971ca 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterStatsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterStatsAction.java @@ -33,7 +33,8 @@ public class RestClusterStatsAction extends BaseRestHandler { "human-readable-total-docs-size", "verbose-dense-vector-mapping-stats", "ccs-stats", - "retrievers-usage-stats" + "retrievers-usage-stats", + "esql-stats" ); private static final Set SUPPORTED_QUERY_PARAMETERS = Set.of("include_remotes", "nodeId", REST_TIMEOUT_PARAM); diff --git a/server/src/main/java/org/elasticsearch/usage/UsageService.java b/server/src/main/java/org/elasticsearch/usage/UsageService.java index dd4895eb4bdc2..5b4fa0f27bf48 100644 --- a/server/src/main/java/org/elasticsearch/usage/UsageService.java +++ b/server/src/main/java/org/elasticsearch/usage/UsageService.java @@ -26,11 +26,13 @@ public class UsageService { private final Map handlers; private final SearchUsageHolder searchUsageHolder; private final CCSUsageTelemetry ccsUsageHolder; + private final CCSUsageTelemetry esqlUsageHolder; public UsageService() { this.handlers = new HashMap<>(); this.searchUsageHolder = new SearchUsageHolder(); this.ccsUsageHolder = new CCSUsageTelemetry(); + this.esqlUsageHolder = new CCSUsageTelemetry(false); } /** @@ -89,4 +91,8 @@ public SearchUsageHolder getSearchUsageHolder() { public CCSUsageTelemetry getCcsUsageHolder() { return ccsUsageHolder; } + + public CCSUsageTelemetry getEsqlUsageHolder() { + return esqlUsageHolder; + } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/CCSTelemetrySnapshotTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/CCSTelemetrySnapshotTests.java index a72630c327ea2..6444caf08f831 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/CCSTelemetrySnapshotTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/CCSTelemetrySnapshotTests.java @@ -352,4 +352,20 @@ public void testRanges() throws IOException { assertThat(value2Read.count(), equalTo(count1 + count2)); assertThat(value2Read.max(), equalTo(max1)); } + + public void testUseMRTFalse() { + CCSTelemetrySnapshot empty = new CCSTelemetrySnapshot(); + // Ignore MRT data + empty.setUseMRT(false); + + var randomWithMRT = randomValueOtherThanMany( + v -> v.getTookMrtTrue().count() == 0 || v.getTookMrtFalse().count() == 0, + this::randomCCSTelemetrySnapshot + ); + + empty.add(randomWithMRT); + assertThat(empty.getTook().count(), equalTo(randomWithMRT.getTook().count())); + assertThat(empty.getTookMrtFalse().count(), equalTo(0L)); + assertThat(empty.getTookMrtTrue().count(), equalTo(0L)); + } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/CCSUsageTelemetryTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/CCSUsageTelemetryTests.java index c4a2fdee1111e..5eb2224ec5f8e 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/CCSUsageTelemetryTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/CCSUsageTelemetryTests.java @@ -340,4 +340,23 @@ public void testConcurrentUpdates() throws InterruptedException { CCSTelemetrySnapshot expectedSnapshot = ccsUsageHolder.getCCSTelemetrySnapshot(); assertThat(snapshot, equalTo(expectedSnapshot)); } + + public void testUseMRTFalse() { + // Ignore MRT counters if instructed. + CCSUsageTelemetry ccsUsageHolder = new CCSUsageTelemetry(false); + + CCSUsage.Builder builder = new CCSUsage.Builder(); + builder.took(10L).setRemotesCount(1).setClient("kibana"); + builder.setFeature(MRT_FEATURE); + ccsUsageHolder.updateUsage(builder.build()); + + builder = new CCSUsage.Builder(); + builder.took(11L).setRemotesCount(1).setClient("kibana"); + ccsUsageHolder.updateUsage(builder.build()); + + CCSTelemetrySnapshot snapshot = ccsUsageHolder.getCCSTelemetrySnapshot(); + assertThat(snapshot.getTook().count(), equalTo(2L)); + assertThat(snapshot.getTookMrtFalse().count(), equalTo(0L)); + assertThat(snapshot.getTookMrtTrue().count(), equalTo(0L)); + } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/VersionStatsTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/VersionStatsTests.java index 9bf4ad7c3cb64..fbd6e0916eefe 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/VersionStatsTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/VersionStatsTests.java @@ -130,6 +130,7 @@ public void testCreation() { new ShardStats[] { shardStats }, new SearchUsageStats(), RepositoryUsageStats.EMPTY, + null, null ); diff --git a/server/src/test/resources/org/elasticsearch/action/admin/cluster/stats/telemetry_test.json b/server/src/test/resources/org/elasticsearch/action/admin/cluster/stats/telemetry_test.json index fe9c77cb2a183..a92bab739b37d 100644 --- a/server/src/test/resources/org/elasticsearch/action/admin/cluster/stats/telemetry_test.json +++ b/server/src/test/resources/org/elasticsearch/action/admin/cluster/stats/telemetry_test.json @@ -1,5 +1,4 @@ { - "_search" : { "total" : 10, "success" : 20, "skipped" : 5, @@ -63,5 +62,4 @@ } } } - } -} \ No newline at end of file +} diff --git a/test/fixtures/krb5kdc-fixture/Dockerfile b/test/fixtures/krb5kdc-fixture/Dockerfile index e862c7a71f2ba..47fc05d5aaf5b 100644 --- a/test/fixtures/krb5kdc-fixture/Dockerfile +++ b/test/fixtures/krb5kdc-fixture/Dockerfile @@ -1,9 +1,12 @@ -FROM ubuntu:14.04 -ADD . /fixture +FROM alpine:3.21.0 + +ADD src/main/resources /fixture +RUN apk update && apk add -y --no-cache python3 krb5 krb5-server + RUN echo kerberos.build.elastic.co > /etc/hostname -RUN bash /fixture/src/main/resources/provision/installkdc.sh +RUN sh /fixture/provision/installkdc.sh EXPOSE 88 EXPOSE 88/udp -CMD sleep infinity +CMD ["sleep", "infinity"] diff --git a/test/fixtures/krb5kdc-fixture/build.gradle b/test/fixtures/krb5kdc-fixture/build.gradle index c9540011d80dd..887d6a2b68761 100644 --- a/test/fixtures/krb5kdc-fixture/build.gradle +++ b/test/fixtures/krb5kdc-fixture/build.gradle @@ -16,8 +16,8 @@ apply plugin: 'elasticsearch.deploy-test-fixtures' dockerFixtures { krb5dc { dockerContext = projectDir - version = "1.0" - baseImages = ["ubuntu:14.04"] + version = "1.1" + baseImages = ["alpine:3.21.0"] } } diff --git a/test/fixtures/krb5kdc-fixture/src/main/java/org/elasticsearch/test/fixtures/krb5kdc/Krb5kDcContainer.java b/test/fixtures/krb5kdc-fixture/src/main/java/org/elasticsearch/test/fixtures/krb5kdc/Krb5kDcContainer.java index cb1f86de51b1f..f44058d0ebcc4 100644 --- a/test/fixtures/krb5kdc-fixture/src/main/java/org/elasticsearch/test/fixtures/krb5kdc/Krb5kDcContainer.java +++ b/test/fixtures/krb5kdc-fixture/src/main/java/org/elasticsearch/test/fixtures/krb5kdc/Krb5kDcContainer.java @@ -29,7 +29,7 @@ import java.util.List; public final class Krb5kDcContainer extends DockerEnvironmentAwareTestContainer { - public static final String DOCKER_BASE_IMAGE = "docker.elastic.co/elasticsearch-dev/krb5dc-fixture:1.0"; + public static final String DOCKER_BASE_IMAGE = "docker.elastic.co/elasticsearch-dev/krb5dc-fixture:1.1"; private final TemporaryFolder temporaryFolder = new TemporaryFolder(); private final ProvisioningId provisioningId; private Path krb5ConfFile; @@ -39,14 +39,14 @@ public final class Krb5kDcContainer extends DockerEnvironmentAwareTestContainer public enum ProvisioningId { HDFS( "hdfs", - "/fixture/src/main/resources/provision/hdfs.sh", + "/fixture/provision/hdfs.sh", "/fixture/build/keytabs/hdfs_hdfs.build.elastic.co.keytab", "/fixture/build/keytabs/elasticsearch.keytab", "hdfs/hdfs.build.elastic.co@BUILD.ELASTIC.CO" ), PEPPA( "peppa", - "/fixture/src/main/resources/provision/peppa.sh", + "/fixture/provision/peppa.sh", "/fixture/build/keytabs/peppa.keytab", "/fixture/build/keytabs/HTTP_localhost.keytab", "peppa@BUILD.ELASTIC.CO" @@ -94,7 +94,7 @@ public Krb5kDcContainer(ProvisioningId provisioningId) { withNetworkAliases("kerberos.build.elastic.co", "build.elastic.co"); withCopyFileToContainer(MountableFile.forHostPath("/dev/urandom"), "/dev/random"); withExtraHost("kerberos.build.elastic.co", "127.0.0.1"); - withCommand("bash", provisioningId.scriptPath); + withCommand("sh", provisioningId.scriptPath); } @Override @@ -122,7 +122,7 @@ public String getConf() { .findFirst(); String hostPortSpec = bindings.get().getHostPortSpec(); String s = copyFileFromContainer("/fixture/build/krb5.conf.template", i -> IOUtils.toString(i, StandardCharsets.UTF_8)); - return s.replace("${MAPPED_PORT}", hostPortSpec); + return s.replace("#KDC_DOCKER_HOST", "kdc = 127.0.0.1:" + hostPortSpec); } public Path getKeytab() { diff --git a/test/fixtures/krb5kdc-fixture/src/main/resources/provision/addprinc.sh b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/addprinc.sh index 44bd7a841dedb..553bd8f85f70c 100755 --- a/test/fixtures/krb5kdc-fixture/src/main/resources/provision/addprinc.sh +++ b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/addprinc.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/bin/sh # Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one # or more contributor license agreements. Licensed under the "Elastic License @@ -24,7 +24,7 @@ PASSWD="$2" USER=$(echo $PRINC | tr "/" "_") VDIR=/fixture -RESOURCES=$VDIR/src/main/resources +RESOURCES=$VDIR PROV_DIR=$RESOURCES/provision ENVPROP_FILE=$RESOURCES/env.properties BUILD_DIR=$VDIR/build @@ -45,16 +45,16 @@ USER_KTAB=$LOCALSTATEDIR/$USER.keytab if [ -f $USER_KTAB ] && [ -z "$PASSWD" ]; then echo "Principal '${PRINC}@${REALM}' already exists. Re-copying keytab..." - sudo cp $USER_KTAB $KEYTAB_DIR/$USER.keytab + cp $USER_KTAB $KEYTAB_DIR/$USER.keytab else if [ -z "$PASSWD" ]; then echo "Provisioning '${PRINC}@${REALM}' principal and keytab..." - sudo kadmin -p $ADMIN_PRIN -kt $ADMIN_KTAB -q "addprinc -randkey $USER_PRIN" - sudo kadmin -p $ADMIN_PRIN -kt $ADMIN_KTAB -q "ktadd -k $USER_KTAB $USER_PRIN" - sudo cp $USER_KTAB $KEYTAB_DIR/$USER.keytab + kadmin -p $ADMIN_PRIN -kt $ADMIN_KTAB -q "addprinc -randkey $USER_PRIN" + kadmin -p $ADMIN_PRIN -kt $ADMIN_KTAB -q "ktadd -k $USER_KTAB $USER_PRIN" + cp $USER_KTAB $KEYTAB_DIR/$USER.keytab else echo "Provisioning '${PRINC}@${REALM}' principal with password..." - sudo kadmin -p $ADMIN_PRIN -kt $ADMIN_KTAB -q "addprinc -pw $PASSWD $PRINC" + kadmin -p $ADMIN_PRIN -kt $ADMIN_KTAB -q "addprinc -pw $PASSWD $PRINC" fi fi diff --git a/test/fixtures/krb5kdc-fixture/src/main/resources/provision/hdfs.sh b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/hdfs.sh index de08a52df3306..cf2eb5a1b7233 100644 --- a/test/fixtures/krb5kdc-fixture/src/main/resources/provision/hdfs.sh +++ b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/hdfs.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/bin/sh set -e diff --git a/test/fixtures/krb5kdc-fixture/src/main/resources/provision/installkdc.sh b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/installkdc.sh index 428747075ff36..a364349c56c68 100755 --- a/test/fixtures/krb5kdc-fixture/src/main/resources/provision/installkdc.sh +++ b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/installkdc.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/bin/sh # Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one # or more contributor license agreements. Licensed under the "Elastic License @@ -12,8 +12,7 @@ set -e # KDC installation steps and considerations based on https://web.mit.edu/kerberos/krb5-latest/doc/admin/install_kdc.html # and helpful input from https://help.ubuntu.com/community/Kerberos -VDIR=/fixture -RESOURCES=$VDIR/src/main/resources +RESOURCES=/fixture PROV_DIR=$RESOURCES/provision ENVPROP_FILE=$RESOURCES/env.properties LOCALSTATEDIR=/etc @@ -49,33 +48,11 @@ touch $LOGDIR/kadmin.log touch $LOGDIR/krb5kdc.log touch $LOGDIR/krb5lib.log -# Update package manager -apt-get update -qqy - -# Installation asks a bunch of questions via debconf. Set the answers ahead of time -debconf-set-selections <<< "krb5-config krb5-config/read_conf boolean true" -debconf-set-selections <<< "krb5-config krb5-config/kerberos_servers string $KDC_NAME" -debconf-set-selections <<< "krb5-config krb5-config/add_servers boolean true" -debconf-set-selections <<< "krb5-config krb5-config/admin_server string $KDC_NAME" -debconf-set-selections <<< "krb5-config krb5-config/add_servers_realm string $REALM_NAME" -debconf-set-selections <<< "krb5-config krb5-config/default_realm string $REALM_NAME" -debconf-set-selections <<< "krb5-admin-server krb5-admin-server/kadmind boolean true" -debconf-set-selections <<< "krb5-admin-server krb5-admin-server/newrealm note" -debconf-set-selections <<< "krb5-kdc krb5-kdc/debconf boolean true" -debconf-set-selections <<< "krb5-kdc krb5-kdc/purge_data_too boolean false" - -# Install krb5 packages -apt-get install -qqy krb5-{admin-server,kdc} - -# /dev/random produces output very slowly on Ubuntu VM's. Install haveged to increase entropy. -apt-get install -qqy haveged -haveged - # Create kerberos database with stash file and garbage password kdb5_util create -s -r $REALM_NAME -P zyxwvutsrpqonmlk9876 # Set up admin acls -cat << EOF > /etc/krb5kdc/kadm5.acl +cat << EOF > /var/lib/krb5kdc/kadm5.acl */admin@$REALM_NAME * */*@$REALM_NAME i EOF diff --git a/test/fixtures/krb5kdc-fixture/src/main/resources/provision/krb5.conf.template b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/krb5.conf.template index b66709968839a..e79caecbcf334 100644 --- a/test/fixtures/krb5kdc-fixture/src/main/resources/provision/krb5.conf.template +++ b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/krb5.conf.template @@ -6,6 +6,7 @@ # License v3.0 only", or the "Server Side Public License, v 1". [libdefaults] + spake_preauth_groups = edwards25519 default_realm = ${REALM_NAME} dns_canonicalize_hostname = false dns_lookup_kdc = false @@ -25,7 +26,7 @@ [realms] ${REALM_NAME} = { kdc = 127.0.0.1:88 - kdc = 127.0.0.1:${MAPPED_PORT} + #KDC_DOCKER_HOST admin_server = ${KDC_NAME}:749 default_domain = ${BUILD_ZONE} } diff --git a/test/fixtures/krb5kdc-fixture/src/main/resources/provision/peppa.sh b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/peppa.sh index da6480d891af7..24179da5882c7 100644 --- a/test/fixtures/krb5kdc-fixture/src/main/resources/provision/peppa.sh +++ b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/peppa.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/bin/sh set -e diff --git a/test/framework/src/main/java/org/elasticsearch/test/SkipUnavailableRule.java b/test/framework/src/main/java/org/elasticsearch/test/SkipUnavailableRule.java new file mode 100644 index 0000000000000..d5ce943b4d8fe --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/test/SkipUnavailableRule.java @@ -0,0 +1,60 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.test; + +import org.junit.rules.TestRule; +import org.junit.runner.Description; +import org.junit.runners.model.Statement; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; +import java.util.Arrays; +import java.util.Map; +import java.util.function.Function; +import java.util.stream.Collectors; + +/** + * Test rule to process skip_unavailable override annotations + */ +public class SkipUnavailableRule implements TestRule { + private final Map skipMap; + + public SkipUnavailableRule(String... clusterAliases) { + this.skipMap = Arrays.stream(clusterAliases).collect(Collectors.toMap(Function.identity(), alias -> true)); + } + + public Map getMap() { + return skipMap; + } + + @Override + public Statement apply(Statement base, Description description) { + // Check for annotation named "SkipOverride" and set the overrides accordingly + var aliases = description.getAnnotation(NotSkipped.class); + if (aliases != null) { + for (String alias : aliases.aliases()) { + skipMap.put(alias, false); + } + } + return base; + } + + /** + * Annotation to mark specific cluster in a test as not to be skipped when unavailable + */ + @Retention(RetentionPolicy.RUNTIME) + @Target(ElementType.METHOD) + public @interface NotSkipped { + String[] aliases(); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java index d8b4b15307c47..ee9c8adc47adc 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java @@ -225,11 +225,19 @@ static RoleDescriptor kibanaSystem(String name) { RoleDescriptor.IndicesPrivileges.builder().indices("logs-fleet_server*").privileges("read", "delete_index").build(), // Legacy "Alerts as data" used in Security Solution. // Kibana user creates these indices; reads / writes to them. - RoleDescriptor.IndicesPrivileges.builder().indices(ReservedRolesStore.ALERTS_LEGACY_INDEX).privileges("all").build(), + RoleDescriptor.IndicesPrivileges.builder() + .indices(ReservedRolesStore.ALERTS_LEGACY_INDEX, ReservedRolesStore.ALERTS_LEGACY_INDEX_REINDEXED_V8) + .privileges("all") + .build(), // Used in Security Solution for value lists. // Kibana user creates these indices; reads / writes to them. RoleDescriptor.IndicesPrivileges.builder() - .indices(ReservedRolesStore.LISTS_INDEX, ReservedRolesStore.LISTS_ITEMS_INDEX) + .indices( + ReservedRolesStore.LISTS_INDEX, + ReservedRolesStore.LISTS_ITEMS_INDEX, + ReservedRolesStore.LISTS_INDEX_REINDEXED_V8, + ReservedRolesStore.LISTS_ITEMS_INDEX_REINDEXED_V8 + ) .privileges("all") .build(), // "Alerts as data" internal backing indices used in Security Solution, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java index e43ae2d1b360b..3ab9bcc024614 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java @@ -43,6 +43,7 @@ public class ReservedRolesStore implements BiConsumer, ActionListener> { /** "Security Solutions" only legacy signals index */ public static final String ALERTS_LEGACY_INDEX = ".siem-signals*"; + public static final String ALERTS_LEGACY_INDEX_REINDEXED_V8 = ".reindexed-v8-siem-signals*"; /** Alerts, Rules, Cases (RAC) index used by multiple solutions */ public static final String ALERTS_BACKING_INDEX = ".internal.alerts*"; @@ -60,9 +61,11 @@ public class ReservedRolesStore implements BiConsumer, ActionListene /** "Security Solutions" only lists index for value lists for detections */ public static final String LISTS_INDEX = ".lists-*"; + public static final String LISTS_INDEX_REINDEXED_V8 = ".reindexed-v8-lists-*"; /** "Security Solutions" only lists index for value list items for detections */ public static final String LISTS_ITEMS_INDEX = ".items-*"; + public static final String LISTS_ITEMS_INDEX_REINDEXED_V8 = ".reindexed-v8-items-*"; /** Index pattern for Universal Profiling */ public static final String UNIVERSAL_PROFILING_ALIASES = "profiling-*"; @@ -829,7 +832,14 @@ private static RoleDescriptor buildViewerRoleDescriptor() { .build(), // Security RoleDescriptor.IndicesPrivileges.builder() - .indices(ReservedRolesStore.ALERTS_LEGACY_INDEX, ReservedRolesStore.LISTS_INDEX, ReservedRolesStore.LISTS_ITEMS_INDEX) + .indices( + ReservedRolesStore.ALERTS_LEGACY_INDEX, + ReservedRolesStore.LISTS_INDEX, + ReservedRolesStore.LISTS_ITEMS_INDEX, + ReservedRolesStore.ALERTS_LEGACY_INDEX_REINDEXED_V8, + ReservedRolesStore.LISTS_INDEX_REINDEXED_V8, + ReservedRolesStore.LISTS_ITEMS_INDEX_REINDEXED_V8 + ) .privileges("read", "view_index_metadata") .build(), // Alerts-as-data @@ -880,7 +890,14 @@ private static RoleDescriptor buildEditorRoleDescriptor() { .build(), // Security RoleDescriptor.IndicesPrivileges.builder() - .indices(ReservedRolesStore.ALERTS_LEGACY_INDEX, ReservedRolesStore.LISTS_INDEX, ReservedRolesStore.LISTS_ITEMS_INDEX) + .indices( + ReservedRolesStore.ALERTS_LEGACY_INDEX, + ReservedRolesStore.LISTS_INDEX, + ReservedRolesStore.LISTS_ITEMS_INDEX, + ReservedRolesStore.ALERTS_LEGACY_INDEX_REINDEXED_V8, + ReservedRolesStore.LISTS_INDEX_REINDEXED_V8, + ReservedRolesStore.LISTS_ITEMS_INDEX_REINDEXED_V8 + ) .privileges("read", "view_index_metadata", "write", "maintenance") .build(), // Alerts-as-data diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java index a96b26ddcb1eb..141b9a7092337 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java @@ -613,6 +613,7 @@ public void testKibanaSystemRole() { ".apm-custom-link", ".apm-source-map", ReservedRolesStore.ALERTS_LEGACY_INDEX + randomAlphaOfLength(randomIntBetween(0, 13)), + ReservedRolesStore.ALERTS_LEGACY_INDEX_REINDEXED_V8 + randomAlphaOfLength(randomIntBetween(0, 13)), ReservedRolesStore.ALERTS_BACKING_INDEX + randomAlphaOfLength(randomIntBetween(0, 13)), ReservedRolesStore.ALERTS_BACKING_INDEX_REINDEXED + randomAlphaOfLength(randomIntBetween(0, 13)), ReservedRolesStore.ALERTS_INDEX_ALIAS + randomAlphaOfLength(randomIntBetween(0, 13)), @@ -620,7 +621,9 @@ public void testKibanaSystemRole() { ReservedRolesStore.PREVIEW_ALERTS_BACKING_INDEX + randomAlphaOfLength(randomIntBetween(0, 13)), ReservedRolesStore.PREVIEW_ALERTS_BACKING_INDEX_REINDEXED + randomAlphaOfLength(randomIntBetween(0, 13)), ReservedRolesStore.LISTS_INDEX + randomAlphaOfLength(randomIntBetween(0, 13)), + ReservedRolesStore.LISTS_INDEX_REINDEXED_V8 + randomAlphaOfLength(randomIntBetween(0, 13)), ReservedRolesStore.LISTS_ITEMS_INDEX + randomAlphaOfLength(randomIntBetween(0, 13)), + ReservedRolesStore.LISTS_ITEMS_INDEX_REINDEXED_V8 + randomAlphaOfLength(randomIntBetween(0, 13)), ".slo-observability." + randomAlphaOfLength(randomIntBetween(0, 13)) ).forEach(index -> assertAllIndicesAccessAllowed(kibanaRole, index)); diff --git a/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClustersIT.java b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClustersIT.java index 452f40baa34a8..c93b6404863e8 100644 --- a/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClustersIT.java +++ b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClustersIT.java @@ -12,6 +12,7 @@ import org.apache.http.HttpHost; import org.elasticsearch.Version; import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; import org.elasticsearch.client.RestClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; @@ -37,9 +38,11 @@ import static org.elasticsearch.test.MapMatcher.assertMap; import static org.elasticsearch.test.MapMatcher.matchesMap; +import static org.elasticsearch.xpack.esql.ccq.Clusters.REMOTE_CLUSTER_NAME; import static org.hamcrest.Matchers.any; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.hasKey; @ThreadLeakFilters(filters = TestClustersThreadFilter.class) public class MultiClustersIT extends ESRestTestCase { @@ -395,6 +398,38 @@ public void testIndexPattern() throws Exception { } } + @SuppressWarnings("unchecked") + public void testStats() throws IOException { + Request caps = new Request("GET", "_capabilities?method=GET&path=_cluster/stats&capabilities=esql-stats"); + Response capsResponse = client().performRequest(caps); + Map capsResult = entityAsMap(capsResponse.getEntity()); + assumeTrue("esql stats capability missing", capsResult.get("supported").equals(true)); + + run("FROM test-local-index,*:test-remote-index | STATS total = SUM(data) BY color | SORT color", includeCCSMetadata()); + Request stats = new Request("GET", "_cluster/stats"); + Response statsResponse = client().performRequest(stats); + Map result = entityAsMap(statsResponse.getEntity()); + assertThat(result, hasKey("ccs")); + Map ccs = (Map) result.get("ccs"); + assertThat(ccs, hasKey("_esql")); + Map esql = (Map) ccs.get("_esql"); + assertThat(esql, hasKey("total")); + assertThat(esql, hasKey("success")); + assertThat(esql, hasKey("took")); + assertThat(esql, hasKey("remotes_per_search_max")); + assertThat(esql, hasKey("remotes_per_search_avg")); + assertThat(esql, hasKey("failure_reasons")); + assertThat(esql, hasKey("features")); + assertThat(esql, hasKey("clusters")); + Map clusters = (Map) esql.get("clusters"); + assertThat(clusters, hasKey(REMOTE_CLUSTER_NAME)); + assertThat(clusters, hasKey("(local)")); + Map clusterData = (Map) clusters.get(REMOTE_CLUSTER_NAME); + assertThat(clusterData, hasKey("total")); + assertThat(clusterData, hasKey("skipped")); + assertThat(clusterData, hasKey("took")); + } + private RestClient remoteClusterClient() throws IOException { var clusterHosts = parseClusterHosts(remoteCluster.getHttpAddresses()); return buildClient(restClientSettings(), clusterHosts.toArray(new HttpHost[0])); diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup-join.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup-join.csv-spec index 9b1356438141c..95119cae95590 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup-join.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup-join.csv-spec @@ -419,6 +419,7 @@ FROM employees | EVAL language_code = emp_no % 10 | LOOKUP JOIN languages_lookup_non_unique_key ON language_code | SORT emp_no +| EVAL language_name = MV_SORT(language_name) | KEEP emp_no, language_code, language_name ; diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractCrossClustersUsageTelemetryIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractCrossClustersUsageTelemetryIT.java new file mode 100644 index 0000000000000..ffbddd52b2551 --- /dev/null +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractCrossClustersUsageTelemetryIT.java @@ -0,0 +1,205 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.admin.cluster.stats.CCSTelemetrySnapshot; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.test.AbstractMultiClustersTestCase; +import org.elasticsearch.test.SkipUnavailableRule; +import org.elasticsearch.usage.UsageService; +import org.elasticsearch.xpack.core.async.GetAsyncResultRequest; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Rule; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; + +import static org.elasticsearch.core.TimeValue.timeValueMillis; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; + +public class AbstractCrossClustersUsageTelemetryIT extends AbstractMultiClustersTestCase { + private static final Logger LOGGER = LogManager.getLogger(AbstractCrossClustersUsageTelemetryIT.class); + protected static final String REMOTE1 = "cluster-a"; + protected static final String REMOTE2 = "cluster-b"; + protected static final String LOCAL_INDEX = "logs-1"; + protected static final String REMOTE_INDEX = "logs-2"; + // We want to send search to a specific node (we don't care which one) so that we could + // collect the CCS telemetry from it later + protected String queryNode; + + @Before + public void setupQueryNode() { + // The tests are set up in a way that all queries within a single test are sent to the same node, + // thus enabling incremental collection of telemetry data, but the node is random for each test. + queryNode = cluster(LOCAL_CLUSTER).getRandomNodeName(); + } + + protected CCSTelemetrySnapshot getTelemetryFromQuery(String query, String client) throws ExecutionException, InterruptedException { + EsqlQueryRequest request = EsqlQueryRequest.syncEsqlQueryRequest(); + request.query(query); + request.pragmas(AbstractEsqlIntegTestCase.randomPragmas()); + request.columnar(randomBoolean()); + request.includeCCSMetadata(randomBoolean()); + return getTelemetryFromQuery(request, client); + } + + protected CCSTelemetrySnapshot getTelemetryFromQuery(EsqlQueryRequest request, String client) throws ExecutionException, + InterruptedException { + // We don't care here too much about the response, we just want to trigger the telemetry collection. + // So we check it's not null and leave the rest to other tests. + if (client != null) { + assertResponse( + cluster(LOCAL_CLUSTER).client(queryNode) + .filterWithHeader(Map.of(Task.X_ELASTIC_PRODUCT_ORIGIN_HTTP_HEADER, client)) + .execute(EsqlQueryAction.INSTANCE, request), + Assert::assertNotNull + ); + + } else { + assertResponse(cluster(LOCAL_CLUSTER).client(queryNode).execute(EsqlQueryAction.INSTANCE, request), Assert::assertNotNull); + } + return getTelemetrySnapshot(queryNode); + } + + protected CCSTelemetrySnapshot getTelemetryFromAsyncQuery(String query) throws Exception { + EsqlQueryRequest request = EsqlQueryRequest.asyncEsqlQueryRequest(); + request.query(query); + request.pragmas(AbstractEsqlIntegTestCase.randomPragmas()); + request.columnar(randomBoolean()); + request.includeCCSMetadata(randomBoolean()); + request.waitForCompletionTimeout(TimeValue.timeValueMillis(100)); + request.keepOnCompletion(false); + return getTelemetryFromAsyncQuery(request); + } + + protected CCSTelemetrySnapshot getTelemetryFromAsyncQuery(EsqlQueryRequest request) throws Exception { + AtomicReference asyncExecutionId = new AtomicReference<>(); + assertResponse(cluster(LOCAL_CLUSTER).client(queryNode).execute(EsqlQueryAction.INSTANCE, request), resp -> { + if (resp.isRunning()) { + assertNotNull("async execution id is null", resp.asyncExecutionId()); + asyncExecutionId.set(resp.asyncExecutionId().get()); + } + }); + if (asyncExecutionId.get() != null) { + assertBusy(() -> { + var getResultsRequest = new GetAsyncResultRequest(asyncExecutionId.get()).setWaitForCompletionTimeout(timeValueMillis(1)); + try ( + var resp = cluster(LOCAL_CLUSTER).client(queryNode) + .execute(EsqlAsyncGetResultAction.INSTANCE, getResultsRequest) + .actionGet(30, TimeUnit.SECONDS) + ) { + assertFalse(resp.isRunning()); + } + }); + } + return getTelemetrySnapshot(queryNode); + } + + protected CCSTelemetrySnapshot getTelemetryFromFailedQuery(String query) throws Exception { + EsqlQueryRequest request = EsqlQueryRequest.syncEsqlQueryRequest(); + request.query(query); + request.pragmas(AbstractEsqlIntegTestCase.randomPragmas()); + request.columnar(randomBoolean()); + request.includeCCSMetadata(randomBoolean()); + + ExecutionException ee = expectThrows( + ExecutionException.class, + cluster(LOCAL_CLUSTER).client(queryNode).execute(EsqlQueryAction.INSTANCE, request)::get + ); + assertNotNull(ee.getCause()); + + return getTelemetrySnapshot(queryNode); + } + + private CCSTelemetrySnapshot getTelemetrySnapshot(String nodeName) { + var usage = cluster(LOCAL_CLUSTER).getInstance(UsageService.class, nodeName); + return usage.getEsqlUsageHolder().getCCSTelemetrySnapshot(); + } + + @Override + protected boolean reuseClusters() { + return false; + } + + @Override + protected List remoteClusterAlias() { + return List.of(REMOTE1, REMOTE2); + } + + @Rule + public SkipUnavailableRule skipOverride = new SkipUnavailableRule(REMOTE1, REMOTE2); + + protected Map setupClusters() { + int numShardsLocal = randomIntBetween(1, 5); + populateLocalIndices(LOCAL_INDEX, numShardsLocal); + + int numShardsRemote = randomIntBetween(1, 5); + populateRemoteIndices(REMOTE1, REMOTE_INDEX, numShardsRemote); + + Map clusterInfo = new HashMap<>(); + clusterInfo.put("local.num_shards", numShardsLocal); + clusterInfo.put("local.index", LOCAL_INDEX); + clusterInfo.put("remote.num_shards", numShardsRemote); + clusterInfo.put("remote.index", REMOTE_INDEX); + + int numShardsRemote2 = randomIntBetween(1, 5); + populateRemoteIndices(REMOTE2, REMOTE_INDEX, numShardsRemote2); + clusterInfo.put("remote2.index", REMOTE_INDEX); + clusterInfo.put("remote2.num_shards", numShardsRemote2); + + return clusterInfo; + } + + void populateLocalIndices(String indexName, int numShards) { + Client localClient = client(LOCAL_CLUSTER); + assertAcked( + localClient.admin() + .indices() + .prepareCreate(indexName) + .setSettings(Settings.builder().put("index.number_of_shards", numShards)) + .setMapping("id", "type=keyword", "tag", "type=keyword", "v", "type=long") + ); + for (int i = 0; i < 10; i++) { + localClient.prepareIndex(indexName).setSource("id", "local-" + i, "tag", "local", "v", i).get(); + } + localClient.admin().indices().prepareRefresh(indexName).get(); + } + + void populateRemoteIndices(String clusterAlias, String indexName, int numShards) { + Client remoteClient = client(clusterAlias); + assertAcked( + remoteClient.admin() + .indices() + .prepareCreate(indexName) + .setSettings(Settings.builder().put("index.number_of_shards", numShards)) + .setMapping("id", "type=keyword", "tag", "type=keyword", "v", "type=long") + ); + for (int i = 0; i < 10; i++) { + remoteClient.prepareIndex(indexName).setSource("id", "remote-" + i, "tag", "remote", "v", i * i).get(); + } + remoteClient.admin().indices().prepareRefresh(indexName).get(); + } + + @Override + protected Map skipUnavailableForRemoteClusters() { + var map = skipOverride.getMap(); + LOGGER.info("Using skip_unavailable map: [{}]", map); + return map; + } +} diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersUsageTelemetryIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersUsageTelemetryIT.java new file mode 100644 index 0000000000000..33d868e7a69eb --- /dev/null +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersUsageTelemetryIT.java @@ -0,0 +1,231 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import org.elasticsearch.action.admin.cluster.stats.CCSTelemetrySnapshot; +import org.elasticsearch.action.admin.cluster.stats.CCSUsageTelemetry; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.SkipUnavailableRule; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.action.admin.cluster.stats.CCSUsageTelemetry.ASYNC_FEATURE; +import static org.hamcrest.Matchers.equalTo; + +public class CrossClustersUsageTelemetryIT extends AbstractCrossClustersUsageTelemetryIT { + + @Override + protected Collection> nodePlugins(String clusterAlias) { + List> plugins = new ArrayList<>(super.nodePlugins(clusterAlias)); + plugins.add(EsqlPluginWithEnterpriseOrTrialLicense.class); + plugins.add(CrossClustersQueryIT.InternalExchangePlugin.class); + return plugins; + } + + public void assertPerClusterCount(CCSTelemetrySnapshot.PerClusterCCSTelemetry perCluster, long count) { + assertThat(perCluster.getCount(), equalTo(count)); + assertThat(perCluster.getSkippedCount(), equalTo(0L)); + assertThat(perCluster.getTook().count(), equalTo(count)); + } + + public void testLocalRemote() throws Exception { + setupClusters(); + var telemetry = getTelemetryFromQuery("from logs-*,c*:logs-* | stats sum (v)", "kibana"); + + assertThat(telemetry.getTotalCount(), equalTo(1L)); + assertThat(telemetry.getSuccessCount(), equalTo(1L)); + assertThat(telemetry.getFailureReasons().size(), equalTo(0)); + assertThat(telemetry.getTook().count(), equalTo(1L)); + assertThat(telemetry.getTookMrtFalse().count(), equalTo(0L)); + assertThat(telemetry.getTookMrtTrue().count(), equalTo(0L)); + assertThat(telemetry.getRemotesPerSearchAvg(), equalTo(2.0)); + assertThat(telemetry.getRemotesPerSearchMax(), equalTo(2L)); + assertThat(telemetry.getSearchCountWithSkippedRemotes(), equalTo(0L)); + assertThat(telemetry.getClientCounts().size(), equalTo(1)); + assertThat(telemetry.getClientCounts().get("kibana"), equalTo(1L)); + assertThat(telemetry.getFeatureCounts().get(ASYNC_FEATURE), equalTo(null)); + + var perCluster = telemetry.getByRemoteCluster(); + assertThat(perCluster.size(), equalTo(3)); + for (String clusterAlias : remoteClusterAlias()) { + assertPerClusterCount(perCluster.get(clusterAlias), 1L); + } + assertPerClusterCount(perCluster.get(LOCAL_CLUSTER), 1L); + + telemetry = getTelemetryFromQuery("from logs-*,c*:logs-* | stats sum (v)", "kibana"); + assertThat(telemetry.getTotalCount(), equalTo(2L)); + assertThat(telemetry.getClientCounts().get("kibana"), equalTo(2L)); + perCluster = telemetry.getByRemoteCluster(); + assertThat(perCluster.size(), equalTo(3)); + for (String clusterAlias : remoteClusterAlias()) { + assertPerClusterCount(perCluster.get(clusterAlias), 2L); + } + assertPerClusterCount(perCluster.get(LOCAL_CLUSTER), 2L); + } + + public void testLocalOnly() throws Exception { + setupClusters(); + // Should not produce any usage info since it's a local search + var telemetry = getTelemetryFromQuery("from logs-* | stats sum (v)", "kibana"); + + assertThat(telemetry.getTotalCount(), equalTo(0L)); + assertThat(telemetry.getSuccessCount(), equalTo(0L)); + assertThat(telemetry.getByRemoteCluster().size(), equalTo(0)); + } + + @SkipUnavailableRule.NotSkipped(aliases = REMOTE1) + public void testFailed() throws Exception { + setupClusters(); + // Should not produce any usage info since it's a local search + var telemetry = getTelemetryFromFailedQuery("from no_such_index | stats sum (v)"); + + assertThat(telemetry.getTotalCount(), equalTo(0L)); + assertThat(telemetry.getSuccessCount(), equalTo(0L)); + assertThat(telemetry.getByRemoteCluster().size(), equalTo(0)); + + // One remote is skipped, one is not + telemetry = getTelemetryFromFailedQuery("from logs-*,c*:no_such_index | stats sum (v)"); + + assertThat(telemetry.getTotalCount(), equalTo(1L)); + assertThat(telemetry.getSuccessCount(), equalTo(0L)); + assertThat(telemetry.getByRemoteCluster().size(), equalTo(1)); + assertThat(telemetry.getRemotesPerSearchAvg(), equalTo(2.0)); + assertThat(telemetry.getRemotesPerSearchMax(), equalTo(2L)); + assertThat(telemetry.getSearchCountWithSkippedRemotes(), equalTo(1L)); + Map expectedFailure = Map.of(CCSUsageTelemetry.Result.NOT_FOUND.getName(), 1L); + assertThat(telemetry.getFailureReasons(), equalTo(expectedFailure)); + // cluster-b should be skipped + assertThat(telemetry.getByRemoteCluster().get(REMOTE2).getCount(), equalTo(0L)); + assertThat(telemetry.getByRemoteCluster().get(REMOTE2).getSkippedCount(), equalTo(1L)); + + // this is only for cluster-a so no skipped remotes + telemetry = getTelemetryFromFailedQuery("from logs-*,cluster-a:no_such_index | stats sum (v)"); + assertThat(telemetry.getTotalCount(), equalTo(2L)); + assertThat(telemetry.getSuccessCount(), equalTo(0L)); + assertThat(telemetry.getByRemoteCluster().size(), equalTo(1)); + assertThat(telemetry.getRemotesPerSearchAvg(), equalTo(2.0)); + assertThat(telemetry.getRemotesPerSearchMax(), equalTo(2L)); + assertThat(telemetry.getSearchCountWithSkippedRemotes(), equalTo(1L)); + expectedFailure = Map.of(CCSUsageTelemetry.Result.NOT_FOUND.getName(), 2L); + assertThat(telemetry.getFailureReasons(), equalTo(expectedFailure)); + assertThat(telemetry.getByRemoteCluster().size(), equalTo(1)); + } + + // TODO: enable when skip-up patch is merged + // public void testSkipAllRemotes() throws Exception { + // var telemetry = getTelemetryFromQuery("from logs-*,c*:no_such_index | stats sum (v)", "unknown"); + // + // assertThat(telemetry.getTotalCount(), equalTo(1L)); + // assertThat(telemetry.getSuccessCount(), equalTo(1L)); + // assertThat(telemetry.getFailureReasons().size(), equalTo(0)); + // assertThat(telemetry.getTook().count(), equalTo(1L)); + // assertThat(telemetry.getTookMrtFalse().count(), equalTo(0L)); + // assertThat(telemetry.getTookMrtTrue().count(), equalTo(0L)); + // assertThat(telemetry.getRemotesPerSearchAvg(), equalTo(2.0)); + // assertThat(telemetry.getRemotesPerSearchMax(), equalTo(2L)); + // assertThat(telemetry.getSearchCountWithSkippedRemotes(), equalTo(1L)); + // assertThat(telemetry.getClientCounts().size(), equalTo(0)); + // + // var perCluster = telemetry.getByRemoteCluster(); + // assertThat(perCluster.size(), equalTo(3)); + // for (String clusterAlias : remoteClusterAlias()) { + // var clusterData = perCluster.get(clusterAlias); + // assertThat(clusterData.getCount(), equalTo(0L)); + // assertThat(clusterData.getSkippedCount(), equalTo(1L)); + // assertThat(clusterData.getTook().count(), equalTo(0L)); + // } + // assertPerClusterCount(perCluster.get(LOCAL_CLUSTER), 1L); + // } + + public void testRemoteOnly() throws Exception { + setupClusters(); + var telemetry = getTelemetryFromQuery("from c*:logs-* | stats sum (v)", "kibana"); + + assertThat(telemetry.getTotalCount(), equalTo(1L)); + assertThat(telemetry.getSuccessCount(), equalTo(1L)); + assertThat(telemetry.getFailureReasons().size(), equalTo(0)); + assertThat(telemetry.getTook().count(), equalTo(1L)); + assertThat(telemetry.getTookMrtFalse().count(), equalTo(0L)); + assertThat(telemetry.getTookMrtTrue().count(), equalTo(0L)); + assertThat(telemetry.getRemotesPerSearchAvg(), equalTo(2.0)); + assertThat(telemetry.getRemotesPerSearchMax(), equalTo(2L)); + assertThat(telemetry.getSearchCountWithSkippedRemotes(), equalTo(0L)); + assertThat(telemetry.getClientCounts().size(), equalTo(1)); + assertThat(telemetry.getClientCounts().get("kibana"), equalTo(1L)); + assertThat(telemetry.getFeatureCounts().get(ASYNC_FEATURE), equalTo(null)); + + var perCluster = telemetry.getByRemoteCluster(); + assertThat(perCluster.size(), equalTo(2)); + for (String clusterAlias : remoteClusterAlias()) { + assertPerClusterCount(perCluster.get(clusterAlias), 1L); + } + assertThat(telemetry.getByRemoteCluster().size(), equalTo(2)); + } + + public void testAsync() throws Exception { + setupClusters(); + var telemetry = getTelemetryFromAsyncQuery("from logs-*,c*:logs-* | stats sum (v)"); + + assertThat(telemetry.getTotalCount(), equalTo(1L)); + assertThat(telemetry.getSuccessCount(), equalTo(1L)); + assertThat(telemetry.getFailureReasons().size(), equalTo(0)); + assertThat(telemetry.getTook().count(), equalTo(1L)); + assertThat(telemetry.getTookMrtFalse().count(), equalTo(0L)); + assertThat(telemetry.getTookMrtTrue().count(), equalTo(0L)); + assertThat(telemetry.getRemotesPerSearchAvg(), equalTo(2.0)); + assertThat(telemetry.getRemotesPerSearchMax(), equalTo(2L)); + assertThat(telemetry.getSearchCountWithSkippedRemotes(), equalTo(0L)); + assertThat(telemetry.getClientCounts().size(), equalTo(0)); + assertThat(telemetry.getFeatureCounts().get(ASYNC_FEATURE), equalTo(1L)); + + var perCluster = telemetry.getByRemoteCluster(); + assertThat(perCluster.size(), equalTo(3)); + for (String clusterAlias : remoteClusterAlias()) { + assertPerClusterCount(perCluster.get(clusterAlias), 1L); + } + assertPerClusterCount(perCluster.get(LOCAL_CLUSTER), 1L); + + // do it again + telemetry = getTelemetryFromAsyncQuery("from logs-*,c*:logs-* | stats sum (v)"); + assertThat(telemetry.getTotalCount(), equalTo(2L)); + assertThat(telemetry.getFeatureCounts().get(ASYNC_FEATURE), equalTo(2L)); + perCluster = telemetry.getByRemoteCluster(); + assertThat(perCluster.size(), equalTo(3)); + for (String clusterAlias : remoteClusterAlias()) { + assertPerClusterCount(perCluster.get(clusterAlias), 2L); + } + assertPerClusterCount(perCluster.get(LOCAL_CLUSTER), 2L); + } + + public void testNoSuchCluster() throws Exception { + setupClusters(); + // This is not recognized as a cross-cluster search + var telemetry = getTelemetryFromFailedQuery("from c*:logs*, nocluster:nomatch | stats sum (v)"); + + assertThat(telemetry.getTotalCount(), equalTo(0L)); + assertThat(telemetry.getSuccessCount(), equalTo(0L)); + assertThat(telemetry.getByRemoteCluster().size(), equalTo(0)); + } + + @SkipUnavailableRule.NotSkipped(aliases = REMOTE1) + public void testDisconnect() throws Exception { + setupClusters(); + // Disconnect remote1 + cluster(REMOTE1).close(); + var telemetry = getTelemetryFromFailedQuery("from logs-*,cluster-a:logs-* | stats sum (v)"); + + assertThat(telemetry.getTotalCount(), equalTo(1L)); + assertThat(telemetry.getSuccessCount(), equalTo(0L)); + Map expectedFailure = Map.of(CCSUsageTelemetry.Result.REMOTES_UNAVAILABLE.getName(), 1L); + assertThat(telemetry.getFailureReasons(), equalTo(expectedFailure)); + } + +} diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersUsageTelemetryNoLicenseIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersUsageTelemetryNoLicenseIT.java new file mode 100644 index 0000000000000..2b993e9474062 --- /dev/null +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersUsageTelemetryNoLicenseIT.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import org.elasticsearch.action.admin.cluster.stats.CCSUsageTelemetry; +import org.elasticsearch.plugins.Plugin; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; + +public class CrossClustersUsageTelemetryNoLicenseIT extends AbstractCrossClustersUsageTelemetryIT { + + @Override + protected Collection> nodePlugins(String clusterAlias) { + List> plugins = new ArrayList<>(super.nodePlugins(clusterAlias)); + plugins.add(EsqlPluginWithNonEnterpriseOrExpiredLicense.class); + plugins.add(CrossClustersQueryIT.InternalExchangePlugin.class); + return plugins; + } + + public void testLicenseFailure() throws Exception { + setupClusters(); + var telemetry = getTelemetryFromFailedQuery("from logs-*,c*:logs-* | stats sum (v)"); + + assertThat(telemetry.getTotalCount(), equalTo(1L)); + assertThat(telemetry.getSuccessCount(), equalTo(0L)); + assertThat(telemetry.getTook().count(), equalTo(0L)); + assertThat(telemetry.getRemotesPerSearchAvg(), equalTo(2.0)); + assertThat(telemetry.getRemotesPerSearchMax(), equalTo(2L)); + Map expectedFailure = Map.of(CCSUsageTelemetry.Result.LICENSE.getName(), 1L); + assertThat(telemetry.getFailureReasons(), equalTo(expectedFailure)); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlExecutionInfo.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlExecutionInfo.java index 9b21efc069e9f..c1afa728bc37b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlExecutionInfo.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlExecutionInfo.java @@ -206,6 +206,10 @@ public Cluster getCluster(String clusterAlias) { return clusterInfo.get(clusterAlias); } + public Map getClusters() { + return clusterInfo; + } + /** * Utility to swap a Cluster object. Guidelines for the remapping function: *
    diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/execution/PlanExecutor.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/execution/PlanExecutor.java index dad63d25046d9..974f029eab2ef 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/execution/PlanExecutor.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/execution/PlanExecutor.java @@ -80,7 +80,8 @@ public void esql( ); QueryMetric clientId = QueryMetric.fromString("rest"); metrics.total(clientId); - session.execute(request, executionInfo, planRunner, wrap(x -> { + + ActionListener executeListener = wrap(x -> { planningMetricsManager.publish(planningMetrics, true); listener.onResponse(x); }, ex -> { @@ -88,7 +89,10 @@ public void esql( metrics.failed(clientId); planningMetricsManager.publish(planningMetrics, false); listener.onFailure(ex); - })); + }); + // Wrap it in a listener so that if we have any exceptions during execution, the listener picks it up + // and all the metrics are properly updated + ActionListener.run(executeListener, l -> session.execute(request, executionInfo, planRunner, l)); } public IndexResolver indexResolver() { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java index 50d5819688e46..b44e249e38006 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java @@ -9,6 +9,8 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; +import org.elasticsearch.action.admin.cluster.stats.CCSUsage; +import org.elasticsearch.action.admin.cluster.stats.CCSUsageTelemetry; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.client.internal.Client; @@ -20,16 +22,20 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.operator.exchange.ExchangeService; +import org.elasticsearch.core.Nullable; import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.search.SearchService; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.RemoteClusterAware; import org.elasticsearch.transport.RemoteClusterService; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.usage.UsageService; import org.elasticsearch.xpack.core.XPackPlugin; import org.elasticsearch.xpack.core.async.AsyncExecutionId; +import org.elasticsearch.xpack.esql.VerificationException; import org.elasticsearch.xpack.esql.action.ColumnInfoImpl; import org.elasticsearch.xpack.esql.action.EsqlExecutionInfo; import org.elasticsearch.xpack.esql.action.EsqlQueryAction; @@ -52,6 +58,7 @@ import java.util.Locale; import java.util.Map; import java.util.concurrent.Executor; +import java.util.concurrent.atomic.AtomicInteger; import static org.elasticsearch.xpack.core.ClientHelper.ASYNC_SEARCH_ORIGIN; @@ -71,6 +78,7 @@ public class TransportEsqlQueryAction extends HandledTransportAction asyncTaskManagementService; private final RemoteClusterService remoteClusterService; private final QueryBuilderResolver queryBuilderResolver; + private final UsageService usageService; @Inject @SuppressWarnings("this-escape") @@ -86,8 +94,8 @@ public TransportEsqlQueryAction( BlockFactory blockFactory, Client client, NamedWriteableRegistry registry, - IndexNameExpressionResolver indexNameExpressionResolver - + IndexNameExpressionResolver indexNameExpressionResolver, + UsageService usageService ) { // TODO replace SAME when removing workaround for https://github.com/elastic/elasticsearch/issues/97916 super(EsqlQueryAction.NAME, transportService, actionFilters, EsqlQueryRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); @@ -126,6 +134,7 @@ public TransportEsqlQueryAction( ); this.remoteClusterService = transportService.getRemoteClusterService(); this.queryBuilderResolver = new QueryBuilderResolver(searchService, clusterService, transportService, indexNameExpressionResolver); + this.usageService = usageService; } @Override @@ -197,8 +206,65 @@ private void innerExecute(Task task, EsqlQueryRequest request, ActionListener toResponse(task, request, configuration, result)) + ActionListener.wrap(result -> { + recordCCSTelemetry(task, executionInfo, request, null); + listener.onResponse(toResponse(task, request, configuration, result)); + }, ex -> { + recordCCSTelemetry(task, executionInfo, request, ex); + listener.onFailure(ex); + }) ); + + } + + private void recordCCSTelemetry(Task task, EsqlExecutionInfo executionInfo, EsqlQueryRequest request, @Nullable Exception exception) { + if (executionInfo.isCrossClusterSearch() == false) { + return; + } + + CCSUsage.Builder usageBuilder = new CCSUsage.Builder(); + usageBuilder.setClientFromTask(task); + if (exception != null) { + if (exception instanceof VerificationException ve) { + CCSUsageTelemetry.Result failureType = classifyVerificationException(ve); + if (failureType != CCSUsageTelemetry.Result.UNKNOWN) { + usageBuilder.setFailure(failureType); + } else { + usageBuilder.setFailure(exception); + } + } else { + usageBuilder.setFailure(exception); + } + } + var took = executionInfo.overallTook(); + if (took != null) { + usageBuilder.took(took.getMillis()); + } + if (request.async()) { + usageBuilder.setFeature(CCSUsageTelemetry.ASYNC_FEATURE); + } + + AtomicInteger remotesCount = new AtomicInteger(); + executionInfo.getClusters().forEach((clusterAlias, cluster) -> { + if (cluster.getStatus() == EsqlExecutionInfo.Cluster.Status.SKIPPED) { + usageBuilder.skippedRemote(clusterAlias); + } else { + usageBuilder.perClusterUsage(clusterAlias, cluster.getTook()); + } + if (clusterAlias.equals(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY) == false) { + remotesCount.getAndIncrement(); + } + }); + assert remotesCount.get() > 0 : "Got cross-cluster search telemetry without any remote clusters"; + usageBuilder.setRemotesCount(remotesCount.get()); + usageService.getEsqlUsageHolder().updateUsage(usageBuilder.build()); + } + + private CCSUsageTelemetry.Result classifyVerificationException(VerificationException exception) { + if (exception.getDetailedMessage().contains("Unknown index")) { + return CCSUsageTelemetry.Result.NOT_FOUND; + } + return CCSUsageTelemetry.Result.UNKNOWN; } private EsqlExecutionInfo getOrCreateExecutionInfo(Task task, EsqlQueryRequest request) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java index bd3b3bdb3483c..eb5e8206e9e6f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java @@ -312,7 +312,7 @@ public void analyzedPlan( .collect(Collectors.toSet()); final List indices = preAnalysis.indices; - EsqlSessionCCSUtils.checkForCcsLicense(indices, indicesExpressionGrouper, verifier.licenseState()); + EsqlSessionCCSUtils.checkForCcsLicense(executionInfo, indices, indicesExpressionGrouper, verifier.licenseState()); final Set targetClusters = enrichPolicyResolver.groupIndicesPerCluster( indices.stream().flatMap(t -> Arrays.stream(Strings.commaDelimitedListToStringArray(t.id().index()))).toArray(String[]::new) diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSessionCCSUtils.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSessionCCSUtils.java index 662572c466511..95f7a37ce4d62 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSessionCCSUtils.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSessionCCSUtils.java @@ -308,6 +308,7 @@ static void updateExecutionInfoAtEndOfPlanning(EsqlExecutionInfo execInfo) { * @throws org.elasticsearch.ElasticsearchStatusException if the license is not valid (or present) for ES|QL CCS search. */ public static void checkForCcsLicense( + EsqlExecutionInfo executionInfo, List indices, IndicesExpressionGrouper indicesGrouper, XPackLicenseState licenseState @@ -326,6 +327,17 @@ public static void checkForCcsLicense( // check if it is a cross-cluster query if (groupedIndices.size() > 1 || groupedIndices.containsKey(RemoteClusterService.LOCAL_CLUSTER_GROUP_KEY) == false) { if (EsqlLicenseChecker.isCcsAllowed(licenseState) == false) { + // initialize the cluster entries in EsqlExecutionInfo before throwing the invalid license error + // so that the CCS telemetry handler can recognize that this error is CCS-related + for (Map.Entry entry : groupedIndices.entrySet()) { + executionInfo.swapCluster( + entry.getKey(), + (k, v) -> new EsqlExecutionInfo.Cluster( + entry.getKey(), + Strings.arrayToCommaDelimitedString(entry.getValue().indices()) + ) + ); + } throw EsqlLicenseChecker.invalidLicenseForCcsException(licenseState); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/EsqlSessionCCSUtilsTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/EsqlSessionCCSUtilsTests.java index 1000c05282fdb..6b01010ffa5f4 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/EsqlSessionCCSUtilsTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/EsqlSessionCCSUtilsTests.java @@ -644,6 +644,7 @@ public void testMissingIndicesIsFatal() { public void testCheckForCcsLicense() { final TestIndicesExpressionGrouper indicesGrouper = new TestIndicesExpressionGrouper(); + EsqlExecutionInfo executionInfo = new EsqlExecutionInfo(true); // this seems to be used only for tracking usage of features, not for checking if a license is expired final LongSupplier currTime = () -> System.currentTimeMillis(); @@ -671,22 +672,22 @@ public void testCheckForCcsLicense() { List indices = new ArrayList<>(); indices.add(new TableInfo(new TableIdentifier(EMPTY, null, randomFrom("idx", "idx1,idx2*")))); - checkForCcsLicense(indices, indicesGrouper, enterpriseLicenseValid); - checkForCcsLicense(indices, indicesGrouper, platinumLicenseValid); - checkForCcsLicense(indices, indicesGrouper, goldLicenseValid); - checkForCcsLicense(indices, indicesGrouper, trialLicenseValid); - checkForCcsLicense(indices, indicesGrouper, basicLicenseValid); - checkForCcsLicense(indices, indicesGrouper, standardLicenseValid); - checkForCcsLicense(indices, indicesGrouper, missingLicense); - checkForCcsLicense(indices, indicesGrouper, nullLicense); - - checkForCcsLicense(indices, indicesGrouper, enterpriseLicenseInactive); - checkForCcsLicense(indices, indicesGrouper, platinumLicenseInactive); - checkForCcsLicense(indices, indicesGrouper, goldLicenseInactive); - checkForCcsLicense(indices, indicesGrouper, trialLicenseInactive); - checkForCcsLicense(indices, indicesGrouper, basicLicenseInactive); - checkForCcsLicense(indices, indicesGrouper, standardLicenseInactive); - checkForCcsLicense(indices, indicesGrouper, missingLicenseInactive); + checkForCcsLicense(executionInfo, indices, indicesGrouper, enterpriseLicenseValid); + checkForCcsLicense(executionInfo, indices, indicesGrouper, platinumLicenseValid); + checkForCcsLicense(executionInfo, indices, indicesGrouper, goldLicenseValid); + checkForCcsLicense(executionInfo, indices, indicesGrouper, trialLicenseValid); + checkForCcsLicense(executionInfo, indices, indicesGrouper, basicLicenseValid); + checkForCcsLicense(executionInfo, indices, indicesGrouper, standardLicenseValid); + checkForCcsLicense(executionInfo, indices, indicesGrouper, missingLicense); + checkForCcsLicense(executionInfo, indices, indicesGrouper, nullLicense); + + checkForCcsLicense(executionInfo, indices, indicesGrouper, enterpriseLicenseInactive); + checkForCcsLicense(executionInfo, indices, indicesGrouper, platinumLicenseInactive); + checkForCcsLicense(executionInfo, indices, indicesGrouper, goldLicenseInactive); + checkForCcsLicense(executionInfo, indices, indicesGrouper, trialLicenseInactive); + checkForCcsLicense(executionInfo, indices, indicesGrouper, basicLicenseInactive); + checkForCcsLicense(executionInfo, indices, indicesGrouper, standardLicenseInactive); + checkForCcsLicense(executionInfo, indices, indicesGrouper, missingLicenseInactive); } // cross-cluster search requires a valid (active, non-expired) enterprise license OR a valid trial license @@ -701,8 +702,8 @@ public void testCheckForCcsLicense() { } // licenses that work - checkForCcsLicense(indices, indicesGrouper, enterpriseLicenseValid); - checkForCcsLicense(indices, indicesGrouper, trialLicenseValid); + checkForCcsLicense(executionInfo, indices, indicesGrouper, enterpriseLicenseValid); + checkForCcsLicense(executionInfo, indices, indicesGrouper, trialLicenseValid); // all others fail --- @@ -739,9 +740,10 @@ private void assertLicenseCheckFails( XPackLicenseState licenseState, String expectedErrorMessageSuffix ) { + EsqlExecutionInfo executionInfo = new EsqlExecutionInfo(true); ElasticsearchStatusException e = expectThrows( ElasticsearchStatusException.class, - () -> checkForCcsLicense(indices, indicesGrouper, licenseState) + () -> checkForCcsLicense(executionInfo, indices, indicesGrouper, licenseState) ); assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST)); assertThat( diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceFeatures.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceFeatures.java index 876ff01812064..62c302e97815d 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceFeatures.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceFeatures.java @@ -9,6 +9,7 @@ import org.elasticsearch.features.FeatureSpecification; import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.xpack.inference.mapper.SemanticInferenceMetadataFieldsMapper; import org.elasticsearch.xpack.inference.mapper.SemanticTextFieldMapper; import org.elasticsearch.xpack.inference.queries.SemanticQueryBuilder; import org.elasticsearch.xpack.inference.rank.random.RandomRankRetrieverBuilder; @@ -48,7 +49,8 @@ public Set getTestFeatures() { SemanticTextFieldMapper.SEMANTIC_TEXT_ALWAYS_EMIT_INFERENCE_ID_FIX, SEMANTIC_TEXT_HIGHLIGHTER, SEMANTIC_MATCH_QUERY_REWRITE_INTERCEPTION_SUPPORTED, - SEMANTIC_SPARSE_VECTOR_QUERY_REWRITE_INTERCEPTION_SUPPORTED + SEMANTIC_SPARSE_VECTOR_QUERY_REWRITE_INTERCEPTION_SUPPORTED, + SemanticInferenceMetadataFieldsMapper.EXPLICIT_NULL_FIXES ); } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilter.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilter.java index 22d6157b335ca..f4aa49bad1648 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilter.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilter.java @@ -39,6 +39,7 @@ import org.elasticsearch.inference.UnparsedModel; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.Task; +import org.elasticsearch.xcontent.XContent; import org.elasticsearch.xpack.core.inference.results.ChunkedInferenceError; import org.elasticsearch.xpack.inference.mapper.SemanticTextField; import org.elasticsearch.xpack.inference.mapper.SemanticTextFieldMapper; @@ -50,6 +51,7 @@ import java.util.Collections; import java.util.Comparator; import java.util.HashMap; +import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -67,6 +69,8 @@ */ public class ShardBulkInferenceActionFilter implements MappedActionFilter { protected static final int DEFAULT_BATCH_SIZE = 512; + private static final Object EXPLICIT_NULL = new Object(); + private static final ChunkedInference EMPTY_CHUNKED_INFERENCE = new EmptyChunkedInference(); private final ClusterService clusterService; private final InferenceServiceRegistry inferenceServiceRegistry; @@ -393,11 +397,22 @@ private void applyInferenceResponses(BulkItemRequest item, FieldInferenceRespons for (var entry : response.responses.entrySet()) { var fieldName = entry.getKey(); var responses = entry.getValue(); - var model = responses.get(0).model(); + Model model = null; + + InferenceFieldMetadata inferenceFieldMetadata = fieldInferenceMap.get(fieldName); + if (inferenceFieldMetadata == null) { + throw new IllegalStateException("No inference field metadata for field [" + fieldName + "]"); + } + // ensure that the order in the original field is consistent in case of multiple inputs Collections.sort(responses, Comparator.comparingInt(FieldInferenceResponse::inputOrder)); Map> chunkMap = new LinkedHashMap<>(); for (var resp : responses) { + // Get the first non-null model from the response list + if (model == null) { + model = resp.model; + } + var lst = chunkMap.computeIfAbsent(resp.sourceField, k -> new ArrayList<>()); lst.addAll( SemanticTextField.toSemanticTextFieldChunks( @@ -409,21 +424,26 @@ private void applyInferenceResponses(BulkItemRequest item, FieldInferenceRespons ) ); } + List inputs = responses.stream() .filter(r -> r.sourceField().equals(fieldName)) .map(r -> r.input) .collect(Collectors.toList()); + + // The model can be null if we are only processing update requests that clear inference results. This is ok because we will + // merge in the field's existing model settings on the data node. var result = new SemanticTextField( useLegacyFormat, fieldName, useLegacyFormat ? inputs : null, new SemanticTextField.InferenceResult( - model.getInferenceEntityId(), - new SemanticTextField.ModelSettings(model), + inferenceFieldMetadata.getInferenceId(), + model != null ? new SemanticTextField.ModelSettings(model) : null, chunkMap ), indexRequest.getContentType() ); + if (useLegacyFormat) { SemanticTextUtils.insertValue(fieldName, newDocMap, result); } else { @@ -490,7 +510,8 @@ private Map> createFieldInferenceRequests(Bu } else { var inferenceMetadataFieldsValue = XContentMapValues.extractValue( InferenceMetadataFieldsMapper.NAME + "." + field, - docMap + docMap, + EXPLICIT_NULL ); if (inferenceMetadataFieldsValue != null) { // Inference has already been computed @@ -500,9 +521,22 @@ private Map> createFieldInferenceRequests(Bu int order = 0; for (var sourceField : entry.getSourceFields()) { - // TODO: Detect when the field is provided with an explicit null value - var valueObj = XContentMapValues.extractValue(sourceField, docMap); - if (valueObj == null) { + var valueObj = XContentMapValues.extractValue(sourceField, docMap, EXPLICIT_NULL); + if (useLegacyFormat == false && isUpdateRequest && valueObj == EXPLICIT_NULL) { + /** + * It's an update request, and the source field is explicitly set to null, + * so we need to propagate this information to the inference fields metadata + * to overwrite any inference previously computed on the field. + * This ensures that the field is treated as intentionally cleared, + * preventing any unintended carryover of prior inference results. + */ + var slot = ensureResponseAccumulatorSlot(itemIndex); + slot.addOrUpdateResponse( + new FieldInferenceResponse(field, sourceField, null, order++, 0, null, EMPTY_CHUNKED_INFERENCE) + ); + continue; + } + if (valueObj == null || valueObj == EXPLICIT_NULL) { if (isUpdateRequest && useLegacyFormat) { addInferenceResponseFailure( item.id(), @@ -552,4 +586,11 @@ static IndexRequest getIndexRequestOrNull(DocWriteRequest docWriteRequest) { return null; } } + + private static class EmptyChunkedInference implements ChunkedInference { + @Override + public Iterator chunksAsMatchedTextAndByteReference(XContent xcontent) { + return Collections.emptyIterator(); + } + } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticInferenceMetadataFieldsMapper.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticInferenceMetadataFieldsMapper.java index 7a1a9b056d0a1..3f49973d6e35f 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticInferenceMetadataFieldsMapper.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticInferenceMetadataFieldsMapper.java @@ -12,6 +12,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.join.BitSetProducer; import org.elasticsearch.common.xcontent.XContentParserUtils; +import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.mapper.ContentPath; import org.elasticsearch.index.mapper.DocumentParserContext; import org.elasticsearch.index.mapper.InferenceMetadataFieldsMapper; @@ -38,6 +39,8 @@ public class SemanticInferenceMetadataFieldsMapper extends InferenceMetadataFieldsMapper { private static final SemanticInferenceMetadataFieldsMapper INSTANCE = new SemanticInferenceMetadataFieldsMapper(); + public static final NodeFeature EXPLICIT_NULL_FIXES = new NodeFeature("semantic_text.inference_metadata_fields.explicit_null_fixes"); + public static final TypeParser PARSER = new FixedTypeParser( c -> InferenceMetadataFieldsMapper.isEnabled(c.getSettings()) ? INSTANCE : null ); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextField.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextField.java index cfd05cb29ca03..fddff17dab4cf 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextField.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextField.java @@ -338,16 +338,13 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws static { SEMANTIC_TEXT_FIELD_PARSER.declareStringArray(optionalConstructorArg(), new ParseField(TEXT_FIELD)); - SEMANTIC_TEXT_FIELD_PARSER.declareObject( - constructorArg(), - (p, c) -> INFERENCE_RESULT_PARSER.parse(p, c), - new ParseField(INFERENCE_FIELD) - ); + SEMANTIC_TEXT_FIELD_PARSER.declareObject(constructorArg(), INFERENCE_RESULT_PARSER, new ParseField(INFERENCE_FIELD)); INFERENCE_RESULT_PARSER.declareString(constructorArg(), new ParseField(INFERENCE_ID_FIELD)); - INFERENCE_RESULT_PARSER.declareObject( + INFERENCE_RESULT_PARSER.declareObjectOrNull( constructorArg(), (p, c) -> MODEL_SETTINGS_PARSER.parse(p, null), + null, new ParseField(MODEL_SETTINGS_FIELD) ); INFERENCE_RESULT_PARSER.declareField(constructorArg(), (p, c) -> { diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapper.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapper.java index b47c55c302273..690a136c566e0 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapper.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapper.java @@ -384,6 +384,17 @@ void parseCreateFieldFromContext(DocumentParserContext context, SemanticTextFiel mapper = this; } + if (mapper.fieldType().getModelSettings() == null) { + for (var chunkList : field.inference().chunks().values()) { + if (chunkList.isEmpty() == false) { + throw new DocumentParsingException( + xContentLocation, + "[" + MODEL_SETTINGS_FIELD + "] must be set for field [" + fullFieldName + "] when chunks are provided" + ); + } + } + } + var chunksField = mapper.fieldType().getChunksField(); var embeddingsField = mapper.fieldType().getEmbeddingsField(); var offsetsField = mapper.fieldType().getOffsetsField(); @@ -895,7 +906,7 @@ private static boolean canMergeModelSettings( if (Objects.equals(previous, current)) { return true; } - if (previous == null) { + if (previous == null || current == null) { return true; } conflicts.addConflict("model_settings", ""); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterTests.java index 478c81f7c5a32..0432a2ff3fc9e 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterTests.java @@ -18,6 +18,7 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.ActionFilterChain; import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -67,6 +68,8 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.awaitLatch; import static org.elasticsearch.xpack.inference.action.filter.ShardBulkInferenceActionFilter.DEFAULT_BATCH_SIZE; import static org.elasticsearch.xpack.inference.action.filter.ShardBulkInferenceActionFilter.getIndexRequestOrNull; +import static org.elasticsearch.xpack.inference.mapper.SemanticTextField.getChunksFieldName; +import static org.elasticsearch.xpack.inference.mapper.SemanticTextField.getOriginalTextFieldName; import static org.elasticsearch.xpack.inference.mapper.SemanticTextFieldTests.randomChunkedInferenceEmbeddingSparse; import static org.elasticsearch.xpack.inference.mapper.SemanticTextFieldTests.randomSemanticText; import static org.elasticsearch.xpack.inference.mapper.SemanticTextFieldTests.randomSemanticTextInput; @@ -75,12 +78,15 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; import static org.mockito.Mockito.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; public class ShardBulkInferenceActionFilterTests extends ESTestCase { + private static final Object EXPLICIT_NULL = new Object(); + private final boolean useLegacyFormat; private ThreadPool threadPool; @@ -205,6 +211,11 @@ public void testItemFailures() throws Exception { XContentMapValues.extractValue(useLegacyFormat ? "field1.text" : "field1", actualRequest.sourceAsMap()), equalTo("I am a success") ); + if (useLegacyFormat == false) { + assertNotNull( + XContentMapValues.extractValue(InferenceMetadataFieldsMapper.NAME + ".field1", actualRequest.sourceAsMap()) + ); + } // item 2 is a failure assertNotNull(bulkShardRequest.items()[2].getPrimaryResponse()); @@ -232,6 +243,79 @@ public void testItemFailures() throws Exception { awaitLatch(chainExecuted, 10, TimeUnit.SECONDS); } + @SuppressWarnings({ "unchecked", "rawtypes" }) + public void testExplicitNull() throws Exception { + StaticModel model = StaticModel.createRandomInstance(); + model.putResult("I am a failure", new ChunkedInferenceError(new IllegalArgumentException("boom"))); + model.putResult("I am a success", randomChunkedInferenceEmbeddingSparse(List.of("I am a success"))); + + ShardBulkInferenceActionFilter filter = createFilter( + threadPool, + Map.of(model.getInferenceEntityId(), model), + randomIntBetween(1, 10), + useLegacyFormat + ); + + CountDownLatch chainExecuted = new CountDownLatch(1); + ActionFilterChain actionFilterChain = (task, action, request, listener) -> { + try { + BulkShardRequest bulkShardRequest = (BulkShardRequest) request; + assertNull(bulkShardRequest.getInferenceFieldMap()); + assertThat(bulkShardRequest.items().length, equalTo(5)); + + // item 0 + assertNull(bulkShardRequest.items()[0].getPrimaryResponse()); + IndexRequest actualRequest = getIndexRequestOrNull(bulkShardRequest.items()[0].request()); + assertThat(XContentMapValues.extractValue("obj.field1", actualRequest.sourceAsMap(), EXPLICIT_NULL), is(EXPLICIT_NULL)); + assertNull(XContentMapValues.extractValue(InferenceMetadataFieldsMapper.NAME, actualRequest.sourceAsMap(), EXPLICIT_NULL)); + + // item 1 is a success + assertNull(bulkShardRequest.items()[1].getPrimaryResponse()); + actualRequest = getIndexRequestOrNull(bulkShardRequest.items()[1].request()); + assertInferenceResults(useLegacyFormat, actualRequest, "obj.field1", "I am a success", 1); + + // item 2 is a failure + assertNotNull(bulkShardRequest.items()[2].getPrimaryResponse()); + assertTrue(bulkShardRequest.items()[2].getPrimaryResponse().isFailed()); + var failure = bulkShardRequest.items()[2].getPrimaryResponse().getFailure(); + assertThat(failure.getCause().getCause().getMessage(), containsString("boom")); + + // item 3 + assertNull(bulkShardRequest.items()[3].getPrimaryResponse()); + actualRequest = getIndexRequestOrNull(bulkShardRequest.items()[3].request()); + assertInferenceResults(useLegacyFormat, actualRequest, "obj.field1", EXPLICIT_NULL, 0); + + // item 4 + assertNull(bulkShardRequest.items()[4].getPrimaryResponse()); + actualRequest = getIndexRequestOrNull(bulkShardRequest.items()[4].request()); + assertNull(XContentMapValues.extractValue("obj.field1", actualRequest.sourceAsMap(), EXPLICIT_NULL)); + assertNull(XContentMapValues.extractValue(InferenceMetadataFieldsMapper.NAME, actualRequest.sourceAsMap(), EXPLICIT_NULL)); + } finally { + chainExecuted.countDown(); + } + }; + ActionListener actionListener = mock(ActionListener.class); + Task task = mock(Task.class); + + Map inferenceFieldMap = Map.of( + "obj.field1", + new InferenceFieldMetadata("obj.field1", model.getInferenceEntityId(), new String[] { "obj.field1" }) + ); + Map sourceWithNull = new HashMap<>(); + sourceWithNull.put("field1", null); + + BulkItemRequest[] items = new BulkItemRequest[5]; + items[0] = new BulkItemRequest(0, new IndexRequest("index").source(Map.of("obj", sourceWithNull))); + items[1] = new BulkItemRequest(1, new IndexRequest("index").source("obj.field1", "I am a success")); + items[2] = new BulkItemRequest(2, new IndexRequest("index").source("obj.field1", "I am a failure")); + items[3] = new BulkItemRequest(3, new UpdateRequest().doc(new IndexRequest("index").source(Map.of("obj", sourceWithNull)))); + items[4] = new BulkItemRequest(4, new UpdateRequest().doc(new IndexRequest("index").source(Map.of("field2", "value")))); + BulkShardRequest request = new BulkShardRequest(new ShardId("test", "test", 0), WriteRequest.RefreshPolicy.NONE, items); + request.setInferenceFieldMap(inferenceFieldMap); + filter.apply(task, TransportShardBulkAction.ACTION_NAME, request, actionListener, actionFilterChain); + awaitLatch(chainExecuted, 10, TimeUnit.SECONDS); + } + @SuppressWarnings({ "unchecked", "rawtypes" }) public void testManyRandomDocs() throws Exception { Map inferenceModelMap = new HashMap<>(); @@ -435,6 +519,53 @@ private static BulkItemRequest[] randomBulkItemRequest( new BulkItemRequest(requestId, new IndexRequest("index").source(expectedDocMap, requestContentType)) }; } + @SuppressWarnings({ "unchecked" }) + private static void assertInferenceResults( + boolean useLegacyFormat, + IndexRequest request, + String fieldName, + Object expectedOriginalValue, + int expectedChunkCount + ) { + final Map requestMap = request.sourceAsMap(); + if (useLegacyFormat) { + assertThat( + XContentMapValues.extractValue(getOriginalTextFieldName(fieldName), requestMap, EXPLICIT_NULL), + equalTo(expectedOriginalValue) + ); + + List chunks = (List) XContentMapValues.extractValue(getChunksFieldName(fieldName), requestMap); + if (expectedChunkCount > 0) { + assertNotNull(chunks); + assertThat(chunks.size(), equalTo(expectedChunkCount)); + } else { + // If the expected chunk count is 0, we expect that no inference has been performed. In this case, the source should not be + // transformed, and thus the semantic text field structure should not be created. + assertNull(chunks); + } + } else { + assertThat(XContentMapValues.extractValue(fieldName, requestMap, EXPLICIT_NULL), equalTo(expectedOriginalValue)); + + Map inferenceMetadataFields = (Map) XContentMapValues.extractValue( + InferenceMetadataFieldsMapper.NAME, + requestMap, + EXPLICIT_NULL + ); + assertNotNull(inferenceMetadataFields); + + // When using the inference metadata fields format, chunks are mapped by source field. We handle clearing inference results for + // a field by emitting an empty chunk list for it. This is done to prevent the clear operation from clearing inference results + // for other source fields. + List chunks = (List) XContentMapValues.extractValue( + getChunksFieldName(fieldName) + "." + fieldName, + inferenceMetadataFields, + EXPLICIT_NULL + ); + assertNotNull(chunks); + assertThat(chunks.size(), equalTo(expectedChunkCount)); + } + } + private static class StaticModel extends TestModel { private final Map resultMap; diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticInferenceMetadataFieldMapperTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticInferenceMetadataFieldMapperTests.java index 6504ccc4dd39f..8fcc0df0093ce 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticInferenceMetadataFieldMapperTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticInferenceMetadataFieldMapperTests.java @@ -9,10 +9,15 @@ import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.FieldInfos; +import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.IndexVersions; +import org.elasticsearch.index.mapper.InferenceMetadataFieldsMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperServiceTestCase; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.index.IndexVersionUtils; import org.elasticsearch.xpack.inference.InferencePlugin; import java.util.Collection; @@ -24,6 +29,32 @@ protected Collection getPlugins() { return Collections.singletonList(new InferencePlugin(Settings.EMPTY)); } + public void testIsEnabled() { + var settings = Settings.builder() + .put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), getRandomCompatibleIndexVersion(true)) + .put(InferenceMetadataFieldsMapper.USE_LEGACY_SEMANTIC_TEXT_FORMAT.getKey(), true) + .build(); + assertFalse(InferenceMetadataFieldsMapper.isEnabled(settings)); + + settings = Settings.builder() + .put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), getRandomCompatibleIndexVersion(true)) + .put(InferenceMetadataFieldsMapper.USE_LEGACY_SEMANTIC_TEXT_FORMAT.getKey(), false) + .build(); + assertFalse(InferenceMetadataFieldsMapper.isEnabled(settings)); + + settings = Settings.builder() + .put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), getRandomCompatibleIndexVersion(false)) + .put(InferenceMetadataFieldsMapper.USE_LEGACY_SEMANTIC_TEXT_FORMAT.getKey(), true) + .build(); + assertFalse(InferenceMetadataFieldsMapper.isEnabled(settings)); + + settings = Settings.builder() + .put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), getRandomCompatibleIndexVersion(false)) + .put(InferenceMetadataFieldsMapper.USE_LEGACY_SEMANTIC_TEXT_FORMAT.getKey(), false) + .build(); + assertTrue(InferenceMetadataFieldsMapper.isEnabled(settings)); + } + @Override public void testFieldHasValue() { assertTrue( @@ -42,4 +73,26 @@ public void testFieldHasValueWithEmptyFieldInfos() { public MappedFieldType getMappedFieldType() { return new SemanticInferenceMetadataFieldsMapper.FieldType(); } + + static IndexVersion getRandomCompatibleIndexVersion(boolean useLegacyFormat) { + if (useLegacyFormat) { + if (randomBoolean()) { + return IndexVersionUtils.randomVersionBetween( + random(), + IndexVersions.UPGRADE_TO_LUCENE_10_0_0, + IndexVersionUtils.getPreviousVersion(IndexVersions.INFERENCE_METADATA_FIELDS) + ); + } + return IndexVersionUtils.randomPreviousCompatibleVersion(random(), IndexVersions.INFERENCE_METADATA_FIELDS_BACKPORT); + } else { + if (randomBoolean()) { + return IndexVersionUtils.randomVersionBetween(random(), IndexVersions.INFERENCE_METADATA_FIELDS, IndexVersion.current()); + } + return IndexVersionUtils.randomVersionBetween( + random(), + IndexVersions.INFERENCE_METADATA_FIELDS_BACKPORT, + IndexVersionUtils.getPreviousVersion(IndexVersions.UPGRADE_TO_LUCENE_10_0_0) + ); + } + } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapperTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapperTests.java index 11362c3cedd06..e6d68c8343d8b 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapperTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapperTests.java @@ -24,6 +24,7 @@ import org.apache.lucene.search.join.QueryBitSetProducer; import org.apache.lucene.search.join.ScoreMode; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; +import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.CheckedBiConsumer; import org.elasticsearch.common.CheckedBiFunction; import org.elasticsearch.common.Strings; @@ -112,6 +113,10 @@ protected Collection getPlugins() { private MapperService createMapperService(XContentBuilder mappings, boolean useLegacyFormat) throws IOException { var settings = Settings.builder() + .put( + IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), + SemanticInferenceMetadataFieldMapperTests.getRandomCompatibleIndexVersion(useLegacyFormat) + ) .put(InferenceMetadataFieldsMapper.USE_LEGACY_SEMANTIC_TEXT_FORMAT.getKey(), useLegacyFormat) .build(); return createMapperService(settings, mappings); @@ -770,6 +775,35 @@ public void testDenseVectorElementType() throws IOException { assertMapperService.accept(byteMapperService, DenseVectorFieldMapper.ElementType.BYTE); } + public void testModelSettingsRequiredWithChunks() throws IOException { + // Create inference results where model settings are set to null and chunks are provided + Model model = TestModel.createRandomInstance(TaskType.SPARSE_EMBEDDING); + SemanticTextField randomSemanticText = randomSemanticText(useLegacyFormat, "field", model, List.of("a"), XContentType.JSON); + SemanticTextField inferenceResults = new SemanticTextField( + randomSemanticText.useLegacyFormat(), + randomSemanticText.fieldName(), + randomSemanticText.originalValues(), + new SemanticTextField.InferenceResult( + randomSemanticText.inference().inferenceId(), + null, + randomSemanticText.inference().chunks() + ), + randomSemanticText.contentType() + ); + + MapperService mapperService = createMapperService( + mapping(b -> addSemanticTextMapping(b, "field", model.getInferenceEntityId(), null)), + useLegacyFormat + ); + SourceToParse source = source(b -> addSemanticTextInferenceResults(useLegacyFormat, b, List.of(inferenceResults))); + DocumentParsingException ex = expectThrows( + DocumentParsingException.class, + DocumentParsingException.class, + () -> mapperService.documentMapper().parse(source) + ); + assertThat(ex.getMessage(), containsString("[model_settings] must be set for field [field] when chunks are provided")); + } + private MapperService mapperServiceForFieldWithModelSettings( String fieldName, String inferenceId, diff --git a/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/60_semantic_text_inference_update.yml b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/60_semantic_text_inference_update.yml index 660d3e37f4242..27c405f6c23bf 100644 --- a/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/60_semantic_text_inference_update.yml +++ b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/60_semantic_text_inference_update.yml @@ -819,84 +819,210 @@ setup: - match: { hits.hits.0._source._inference_fields.dense_field.inference.chunks.dense_field.0.start_offset: 0 } - match: { hits.hits.0._source._inference_fields.dense_field.inference.chunks.dense_field.0.end_offset: 30 } -# TODO: Uncomment this test once we implement a fix -#--- -#"Bypass inference on bulk update operation": -# # Update as upsert -# - do: -# bulk: -# body: -# - '{"update": {"_index": "test-index", "_id": "doc_1"}}' -# - '{"doc": { "sparse_field": "inference test", "dense_field": "another inference test", "non_inference_field": "non inference test" }, "doc_as_upsert": true}' -# -# - match: { errors: false } -# - match: { items.0.update.result: "created" } -# -# - do: -# bulk: -# body: -# - '{"update": {"_index": "test-index", "_id": "doc_1"}}' -# - '{"doc": { "non_inference_field": "another value" }, "doc_as_upsert": true}' -# refresh: true -# -# - match: { errors: false } -# - match: { items.0.update.result: "updated" } -# -# - do: -# search: -# index: test-index -# body: -# fields: [ _inference_fields ] -# query: -# match_all: { } -# -# - match: { hits.total.value: 1 } -# - match: { hits.total.relation: eq } -# -# - length: { hits.hits.0._source._inference_fields.sparse_field.inference.chunks: 1 } -# - length: { hits.hits.0._source._inference_fields.sparse_field.inference.chunks.sparse_field: 1 } -# - exists: hits.hits.0._source._inference_fields.sparse_field.inference.chunks.sparse_field.0.embeddings -# - match: { hits.hits.0._source._inference_fields.sparse_field.inference.chunks.sparse_field.0.start_offset: 0 } -# - match: { hits.hits.0._source._inference_fields.sparse_field.inference.chunks.sparse_field.0.end_offset: 14 } -# -# - length: { hits.hits.0._source._inference_fields.dense_field.inference.chunks: 1 } -# - length: { hits.hits.0._source._inference_fields.dense_field.inference.chunks.dense_field: 1 } -# - exists: hits.hits.0._source._inference_fields.dense_field.inference.chunks.dense_field.0.embeddings -# - match: { hits.hits.0._source._inference_fields.dense_field.inference.chunks.dense_field.0.start_offset: 0 } -# - match: { hits.hits.0._source._inference_fields.dense_field.inference.chunks.dense_field.0.end_offset: 22 } -# -# - match: { hits.hits.0._source.sparse_field: "inference test" } -# - match: { hits.hits.0._source.dense_field: "another inference test" } -# - match: { hits.hits.0._source.non_inference_field: "another value" } -# -# - do: -# bulk: -# body: -# - '{"update": {"_index": "test-index", "_id": "doc_1"}}' -# - '{"doc": { "sparse_field": null, "dense_field": null, "non_inference_field": "updated value" }, "doc_as_upsert": true}' -# refresh: true -# -# - match: { errors: false } -# - match: { items.0.update.result: "updated" } -# -# - do: -# search: -# index: test-index -# body: -# fields: [ _inference_fields ] -# query: -# match_all: { } -# -# - match: { hits.total.value: 1 } -# - match: { hits.total.relation: eq } -# -# # TODO: BUG! Setting sparse_field & dense_field to null does not clear _inference_fields -# - length: { hits.hits.0._source._inference_fields.sparse_field.inference.chunks: 1 } -# - length: { hits.hits.0._source._inference_fields.sparse_field.inference.chunks.sparse_field: 0 } -# -# - length: { hits.hits.0._source._inference_fields.dense_field.inference.chunks: 1 } -# - length: { hits.hits.0._source._inference_fields.dense_field.inference.chunks.dense_field: 0 } -# -# - not_exists: hits.hits.0._source.sparse_field -# - not_exists: hits.hits.0._source.dense_field -# - match: { hits.hits.0._source.non_inference_field: "updated value" } +--- +"Bypass inference on bulk update operation": + # Update as upsert + - do: + bulk: + body: + - '{"update": {"_index": "test-index", "_id": "doc_1"}}' + - '{"doc": { "sparse_field": "inference test", "dense_field": "another inference test", "non_inference_field": "non inference test" }, "doc_as_upsert": true}' + + - match: { errors: false } + - match: { items.0.update.result: "created" } + + - do: + bulk: + body: + - '{"update": {"_index": "test-index", "_id": "doc_1"}}' + - '{"doc": { "non_inference_field": "another value" }, "doc_as_upsert": true}' + refresh: true + + - match: { errors: false } + - match: { items.0.update.result: "updated" } + + - do: + search: + index: test-index + body: + fields: [ _inference_fields ] + query: + match_all: { } + + - match: { hits.total.value: 1 } + - match: { hits.total.relation: eq } + + - length: { hits.hits.0._source._inference_fields.sparse_field.inference.chunks: 1 } + - length: { hits.hits.0._source._inference_fields.sparse_field.inference.chunks.sparse_field: 1 } + - exists: hits.hits.0._source._inference_fields.sparse_field.inference.chunks.sparse_field.0.embeddings + - match: { hits.hits.0._source._inference_fields.sparse_field.inference.chunks.sparse_field.0.start_offset: 0 } + - match: { hits.hits.0._source._inference_fields.sparse_field.inference.chunks.sparse_field.0.end_offset: 14 } + + - length: { hits.hits.0._source._inference_fields.dense_field.inference.chunks: 1 } + - length: { hits.hits.0._source._inference_fields.dense_field.inference.chunks.dense_field: 1 } + - exists: hits.hits.0._source._inference_fields.dense_field.inference.chunks.dense_field.0.embeddings + - match: { hits.hits.0._source._inference_fields.dense_field.inference.chunks.dense_field.0.start_offset: 0 } + - match: { hits.hits.0._source._inference_fields.dense_field.inference.chunks.dense_field.0.end_offset: 22 } + + - match: { hits.hits.0._source.sparse_field: "inference test" } + - match: { hits.hits.0._source.dense_field: "another inference test" } + - match: { hits.hits.0._source.non_inference_field: "another value" } + +--- +"Explicit nulls clear inference results on bulk update operation": + - requires: + cluster_features: "semantic_text.inference_metadata_fields.explicit_null_fixes" + reason: Fixes explicit null handling when using the _inference_fields metafield + + - skip: + features: [ "headers" ] + + - do: + indices.create: + index: test-copy-to-index + body: + settings: + index: + mapping: + semantic_text: + use_legacy_format: false + mappings: + properties: + sparse_field: + type: semantic_text + inference_id: sparse-inference-id + sparse_source_field: + type: text + copy_to: sparse_field + dense_field: + type: semantic_text + inference_id: dense-inference-id + dense_source_field: + type: text + copy_to: dense_field + non_inference_field: + type: text + + - do: + index: + index: test-copy-to-index + id: doc_1 + body: + sparse_field: "inference test" + sparse_source_field: "sparse source test" + dense_field: "another inference test" + dense_source_field: "dense source test" + non_inference_field: "non inference test" + refresh: true + + - do: + headers: + # Force JSON content type so that we use a parser that interprets the embeddings as doubles + Content-Type: application/json + search: + index: test-copy-to-index + body: + fields: [ _inference_fields ] + query: + match_all: { } + + - match: { hits.total.value: 1 } + - match: { hits.total.relation: eq } + + - length: { hits.hits.0._source._inference_fields.sparse_field.inference.chunks: 2 } + - length: { hits.hits.0._source._inference_fields.sparse_field.inference.chunks.sparse_field: 1 } + - exists: hits.hits.0._source._inference_fields.sparse_field.inference.chunks.sparse_field.0.embeddings + - match: { hits.hits.0._source._inference_fields.sparse_field.inference.chunks.sparse_field.0.start_offset: 0 } + - match: { hits.hits.0._source._inference_fields.sparse_field.inference.chunks.sparse_field.0.end_offset: 14 } + - length: { hits.hits.0._source._inference_fields.sparse_field.inference.chunks.sparse_source_field: 1 } + - exists: hits.hits.0._source._inference_fields.sparse_field.inference.chunks.sparse_source_field.0.embeddings + - set: { hits.hits.0._source._inference_fields.sparse_field.inference.chunks.sparse_source_field.0.embeddings: sparse_source_field_embeddings } + - match: { hits.hits.0._source._inference_fields.sparse_field.inference.chunks.sparse_source_field.0.start_offset: 0 } + - match: { hits.hits.0._source._inference_fields.sparse_field.inference.chunks.sparse_source_field.0.end_offset: 18 } + + - length: { hits.hits.0._source._inference_fields.dense_field.inference.chunks: 2 } + - length: { hits.hits.0._source._inference_fields.dense_field.inference.chunks.dense_field: 1 } + - exists: hits.hits.0._source._inference_fields.dense_field.inference.chunks.dense_field.0.embeddings + - match: { hits.hits.0._source._inference_fields.dense_field.inference.chunks.dense_field.0.start_offset: 0 } + - match: { hits.hits.0._source._inference_fields.dense_field.inference.chunks.dense_field.0.end_offset: 22 } + - length: { hits.hits.0._source._inference_fields.dense_field.inference.chunks.dense_source_field: 1 } + - exists: hits.hits.0._source._inference_fields.dense_field.inference.chunks.dense_source_field.0.embeddings + - set: { hits.hits.0._source._inference_fields.dense_field.inference.chunks.dense_source_field.0.embeddings: dense_source_field_embeddings } + - match: { hits.hits.0._source._inference_fields.dense_field.inference.chunks.dense_source_field.0.start_offset: 0 } + - match: { hits.hits.0._source._inference_fields.dense_field.inference.chunks.dense_source_field.0.end_offset: 17 } + + - match: { hits.hits.0._source.sparse_field: "inference test" } + - match: { hits.hits.0._source.sparse_source_field: "sparse source test" } + - match: { hits.hits.0._source.dense_field: "another inference test" } + - match: { hits.hits.0._source.dense_source_field: "dense source test" } + - match: { hits.hits.0._source.non_inference_field: "non inference test" } + + - do: + bulk: + body: + - '{"update": {"_index": "test-copy-to-index", "_id": "doc_1"}}' + - '{"doc": { "sparse_field": null, "dense_field": null, "non_inference_field": "updated value" }, "doc_as_upsert": true}' + refresh: true + + - match: { errors: false } + - match: { items.0.update.result: "updated" } + + - do: + headers: + # Force JSON content type so that we use a parser that interprets the embeddings as doubles + Content-Type: application/json + search: + index: test-copy-to-index + body: + fields: [ _inference_fields ] + query: + match_all: { } + + - match: { hits.total.value: 1 } + - match: { hits.total.relation: eq } + + - length: { hits.hits.0._source._inference_fields.sparse_field.inference.chunks: 1 } + - length: { hits.hits.0._source._inference_fields.sparse_field.inference.chunks.sparse_source_field: 1 } + - match: { hits.hits.0._source._inference_fields.sparse_field.inference.chunks.sparse_source_field.0.embeddings: $sparse_source_field_embeddings } + - match: { hits.hits.0._source._inference_fields.sparse_field.inference.chunks.sparse_source_field.0.start_offset: 0 } + - match: { hits.hits.0._source._inference_fields.sparse_field.inference.chunks.sparse_source_field.0.end_offset: 18 } + + - length: { hits.hits.0._source._inference_fields.dense_field.inference.chunks: 1 } + - length: { hits.hits.0._source._inference_fields.dense_field.inference.chunks.dense_source_field: 1 } + - match: { hits.hits.0._source._inference_fields.dense_field.inference.chunks.dense_source_field.0.embeddings: $dense_source_field_embeddings } + - match: { hits.hits.0._source._inference_fields.dense_field.inference.chunks.dense_source_field.0.start_offset: 0 } + - match: { hits.hits.0._source._inference_fields.dense_field.inference.chunks.dense_source_field.0.end_offset: 17 } + + - not_exists: hits.hits.0._source.sparse_field + - match: { hits.hits.0._source.sparse_source_field: "sparse source test" } + - not_exists: hits.hits.0._source.dense_field + - match: { hits.hits.0._source.dense_source_field: "dense source test" } + - match: { hits.hits.0._source.non_inference_field: "updated value" } + + - do: + bulk: + body: + - '{"update": {"_index": "test-copy-to-index", "_id": "doc_1"}}' + - '{"doc": { "sparse_source_field": null, "dense_source_field": null, "non_inference_field": "another value" }, "doc_as_upsert": true}' + refresh: true + + - match: { errors: false } + - match: { items.0.update.result: "updated" } + + - do: + search: + index: test-copy-to-index + body: + fields: [ _inference_fields ] + query: + match_all: { } + + - match: { hits.total.value: 1 } + - match: { hits.total.relation: eq } + + - not_exists: hits.hits.0._source._inference_fields + - not_exists: hits.hits.0._source.sparse_field + - not_exists: hits.hits.0._source.sparse_source_field + - not_exists: hits.hits.0._source.dense_field + - not_exists: hits.hits.0._source.dense_source_field + - match: { hits.hits.0._source.non_inference_field: "another value" } diff --git a/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/60_semantic_text_inference_update_bwc.yml b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/60_semantic_text_inference_update_bwc.yml index 6b494d531b2d1..912cdb5a85d35 100644 --- a/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/60_semantic_text_inference_update_bwc.yml +++ b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/60_semantic_text_inference_update_bwc.yml @@ -632,6 +632,31 @@ setup: - match: { _source.dense_field.inference.chunks.0.text: "another inference test" } - match: { _source.non_inference_field: "another value" } +--- +"Explicit nulls clear inference results on bulk update operation": + # Update as upsert + - do: + bulk: + body: + - '{"update": {"_index": "test-index", "_id": "doc_1"}}' + - '{"doc": { "sparse_field": "inference test", "dense_field": "another inference test", "non_inference_field": "non inference test" }, "doc_as_upsert": true}' + + - match: { errors: false } + - match: { items.0.update.result: "created" } + + - do: + get: + index: test-index + id: doc_1 + + - match: { _source.sparse_field.text: "inference test" } + - exists: _source.sparse_field.inference.chunks.0.embeddings + - match: { _source.sparse_field.inference.chunks.0.text: "inference test" } + - match: { _source.dense_field.text: "another inference test" } + - exists: _source.dense_field.inference.chunks.0.embeddings + - match: { _source.dense_field.inference.chunks.0.text: "another inference test" } + - match: { _source.non_inference_field: "non inference test" } + - do: bulk: body: diff --git a/x-pack/plugin/otel-data/src/main/resources/component-templates/logs-otel@mappings.yaml b/x-pack/plugin/otel-data/src/main/resources/component-templates/logs-otel@mappings.yaml index 5f4dcbd416720..9f19e2e04d2ca 100644 --- a/x-pack/plugin/otel-data/src/main/resources/component-templates/logs-otel@mappings.yaml +++ b/x-pack/plugin/otel-data/src/main/resources/component-templates/logs-otel@mappings.yaml @@ -39,6 +39,8 @@ template: log.level: type: alias path: severity_text + event_name: + type: keyword body: type: object properties: diff --git a/x-pack/plugin/otel-data/src/yamlRestTest/resources/rest-api-spec/test/20_logs_tests.yml b/x-pack/plugin/otel-data/src/yamlRestTest/resources/rest-api-spec/test/20_logs_tests.yml index 95a42b137df52..635ba386f739c 100644 --- a/x-pack/plugin/otel-data/src/yamlRestTest/resources/rest-api-spec/test/20_logs_tests.yml +++ b/x-pack/plugin/otel-data/src/yamlRestTest/resources/rest-api-spec/test/20_logs_tests.yml @@ -105,6 +105,7 @@ Event body: service.name: my-service attributes: event.name: foo + event_name: foo body: structured: foo: @@ -119,6 +120,7 @@ Event body: index: $datastream-backing-index - is_true: $datastream-backing-index - match: { .$datastream-backing-index.mappings.properties.body.properties.structured.properties.foo\.bar.type: "keyword" } + - match: { .$datastream-backing-index.mappings.properties.event_name.type: "keyword" } --- Structured log body: - do: