From 67231ab0d8ad9fbe940276ff42063baf6c981174 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lorenzo=20Dematt=C3=A9?= Date: Mon, 11 Nov 2024 17:05:38 +0100 Subject: [PATCH 01/12] Adding full CompatibilityVersions to NodeInfo (#116582) Co-authored-by: Elastic Machine --- .../org/elasticsearch/TransportVersions.java | 2 ++ .../admin/cluster/node/info/NodeInfo.java | 35 ++++++++++++++----- .../elasticsearch/node/NodeConstruction.java | 3 +- .../org/elasticsearch/node/NodeService.java | 9 +++-- .../cluster/node/info/NodeInfoTests.java | 3 +- .../remote/RemoteClusterNodesActionTests.java | 5 +-- .../cluster/stats/ClusterStatsNodesTests.java | 3 +- .../ingest/ReservedPipelineActionTests.java | 3 +- .../TransportVersionsFixupListenerTests.java | 32 +++++++++++++---- .../nodesinfo/NodeInfoStreamingTests.java | 3 +- .../action/cat/RestPluginsActionTests.java | 3 +- .../AutoscalingNodesInfoServiceTests.java | 3 +- .../TransportNodeEnrollmentActionTests.java | 3 +- ...InternalEnrollmentTokenGeneratorTests.java | 5 +-- 14 files changed, 81 insertions(+), 31 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index dc63d6827af2..c9e5cdbad850 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -175,6 +175,7 @@ static TransportVersion def(int id) { public static final TransportVersion ML_INFERENCE_ATTACH_TO_EXISTSING_DEPLOYMENT = def(8_771_00_0); public static final TransportVersion CONVERT_FAILURE_STORE_OPTIONS_TO_SELECTOR_OPTIONS_INTERNALLY = def(8_772_00_0); public static final TransportVersion INFERENCE_DONT_PERSIST_ON_READ_BACKPORT_8_16 = def(8_772_00_1); + public static final TransportVersion ADD_COMPATIBILITY_VERSIONS_TO_NODE_INFO_BACKPORT_8_16 = def(8_772_00_2); public static final TransportVersion REMOVE_MIN_COMPATIBLE_SHARD_NODE = def(8_773_00_0); public static final TransportVersion REVERT_REMOVE_MIN_COMPATIBLE_SHARD_NODE = def(8_774_00_0); public static final TransportVersion ESQL_FIELD_ATTRIBUTE_PARENT_SIMPLIFIED = def(8_775_00_0); @@ -190,6 +191,7 @@ static TransportVersion def(int id) { public static final TransportVersion LOGSDB_TELEMETRY_STATS = def(8_785_00_0); public static final TransportVersion KQL_QUERY_ADDED = def(8_786_00_0); public static final TransportVersion ROLE_MONITOR_STATS = def(8_787_00_0); + public static final TransportVersion ADD_COMPATIBILITY_VERSIONS_TO_NODE_INFO = def(8_788_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java index 71e3185329ed..a7d92682b763 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java @@ -15,6 +15,7 @@ import org.elasticsearch.Version; import org.elasticsearch.action.support.nodes.BaseNodeResponse; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; @@ -22,6 +23,7 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.http.HttpInfo; import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.indices.SystemIndexDescriptor; import org.elasticsearch.ingest.IngestInfo; import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.monitor.os.OsInfo; @@ -42,7 +44,7 @@ public class NodeInfo extends BaseNodeResponse { private final String version; - private final TransportVersion transportVersion; + private final CompatibilityVersions compatibilityVersions; private final IndexVersion indexVersion; private final Map componentVersions; private final Build build; @@ -64,15 +66,20 @@ public NodeInfo(StreamInput in) throws IOException { super(in); if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_12_0)) { version = in.readString(); - transportVersion = TransportVersion.readVersion(in); + if (in.getTransportVersion().isPatchFrom(TransportVersions.ADD_COMPATIBILITY_VERSIONS_TO_NODE_INFO_BACKPORT_8_16) + || in.getTransportVersion().onOrAfter(TransportVersions.ADD_COMPATIBILITY_VERSIONS_TO_NODE_INFO)) { + compatibilityVersions = CompatibilityVersions.readVersion(in); + } else { + compatibilityVersions = new CompatibilityVersions(TransportVersion.readVersion(in), Map.of()); // unknown mappings versions + } indexVersion = IndexVersion.readVersion(in); } else { Version legacyVersion = Version.readVersion(in); version = legacyVersion.toString(); if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { - transportVersion = TransportVersion.readVersion(in); + compatibilityVersions = new CompatibilityVersions(TransportVersion.readVersion(in), Map.of()); // unknown mappings versions } else { - transportVersion = TransportVersion.fromId(legacyVersion.id); + compatibilityVersions = new CompatibilityVersions(TransportVersion.fromId(legacyVersion.id), Map.of()); } if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_11_X)) { indexVersion = IndexVersion.readVersion(in); @@ -114,7 +121,7 @@ public NodeInfo(StreamInput in) throws IOException { public NodeInfo( String version, - TransportVersion transportVersion, + CompatibilityVersions compatibilityVersions, IndexVersion indexVersion, Map componentVersions, Build build, @@ -134,7 +141,7 @@ public NodeInfo( ) { super(node); this.version = version; - this.transportVersion = transportVersion; + this.compatibilityVersions = compatibilityVersions; this.indexVersion = indexVersion; this.componentVersions = componentVersions; this.build = build; @@ -171,7 +178,7 @@ public String getVersion() { * The most recent transport version that can be used by this node */ public TransportVersion getTransportVersion() { - return transportVersion; + return compatibilityVersions.transportVersion(); } /** @@ -188,6 +195,13 @@ public Map getComponentVersions() { return componentVersions; } + /** + * A map of system index names to versions for their mappings supported by this node. + */ + public Map getCompatibilityVersions() { + return compatibilityVersions.systemIndexMappingsVersion(); + } + /** * The build version of the node. */ @@ -240,8 +254,11 @@ public void writeTo(StreamOutput out) throws IOException { } else { Version.writeVersion(Version.fromString(version), out); } - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { - TransportVersion.writeVersion(transportVersion, out); + if (out.getTransportVersion().isPatchFrom(TransportVersions.ADD_COMPATIBILITY_VERSIONS_TO_NODE_INFO_BACKPORT_8_16) + || out.getTransportVersion().onOrAfter(TransportVersions.ADD_COMPATIBILITY_VERSIONS_TO_NODE_INFO)) { + compatibilityVersions.writeTo(out); + } else if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { + TransportVersion.writeVersion(compatibilityVersions.transportVersion(), out); } if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_11_X)) { IndexVersion.writeVersion(indexVersion, out); diff --git a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java index 9e5eff382de2..eba24c407e4d 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java +++ b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java @@ -1078,7 +1078,8 @@ private void construct( searchTransportService, indexingLimits, searchModule.getValuesSourceRegistry().getUsageService(), - repositoriesService + repositoriesService, + compatibilityVersions ); final TimeValue metricsInterval = settings.getAsTime("telemetry.agent.metrics_interval", TimeValue.timeValueSeconds(10)); diff --git a/server/src/main/java/org/elasticsearch/node/NodeService.java b/server/src/main/java/org/elasticsearch/node/NodeService.java index 9310849ba811..7c71487ed68c 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeService.java +++ b/server/src/main/java/org/elasticsearch/node/NodeService.java @@ -10,7 +10,6 @@ package org.elasticsearch.node; import org.elasticsearch.Build; -import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.node.info.ComponentVersionNumber; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; @@ -19,6 +18,7 @@ import org.elasticsearch.action.search.SearchTransportService; import org.elasticsearch.cluster.coordination.Coordinator; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.common.unit.ByteSizeValue; @@ -65,6 +65,7 @@ public class NodeService implements Closeable { private final Coordinator coordinator; private final RepositoriesService repositoriesService; private final Map componentVersions; + private final CompatibilityVersions compatibilityVersions; NodeService( Settings settings, @@ -84,7 +85,8 @@ public class NodeService implements Closeable { SearchTransportService searchTransportService, IndexingPressure indexingPressure, AggregationUsageService aggregationUsageService, - RepositoriesService repositoriesService + RepositoriesService repositoriesService, + CompatibilityVersions compatibilityVersions ) { this.settings = settings; this.threadPool = threadPool; @@ -104,6 +106,7 @@ public class NodeService implements Closeable { this.aggregationUsageService = aggregationUsageService; this.repositoriesService = repositoriesService; this.componentVersions = findComponentVersions(pluginService); + this.compatibilityVersions = compatibilityVersions; clusterService.addStateApplier(ingestService); } @@ -124,7 +127,7 @@ public NodeInfo info( return new NodeInfo( // TODO: revert to Build.current().version() when Kibana is updated Version.CURRENT.toString(), - TransportVersion.current(), + compatibilityVersions, IndexVersion.current(), componentVersions, Build.current(), diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfoTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfoTests.java index c0bf8f7c3bf1..af65a452c61f 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfoTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfoTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.Build; import org.elasticsearch.TransportVersion; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.monitor.jvm.JvmInfo; @@ -40,7 +41,7 @@ public class NodeInfoTests extends ESTestCase { public void testGetInfo() { NodeInfo nodeInfo = new NodeInfo( Build.current().version(), - TransportVersion.current(), + new CompatibilityVersions(TransportVersion.current(), Map.of()), IndexVersion.current(), Map.of(), Build.current(), diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/remote/RemoteClusterNodesActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/remote/RemoteClusterNodesActionTests.java index 3eb0ff9fae67..6a9d6973a004 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/remote/RemoteClusterNodesActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/remote/RemoteClusterNodesActionTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.TransportAddress; @@ -78,7 +79,7 @@ public void testDoExecuteForRemoteServerNodes() { nodeInfos.add( new NodeInfo( Build.current().version(), - TransportVersion.current(), + new CompatibilityVersions(TransportVersion.current(), Map.of()), IndexVersion.current(), Map.of(), null, @@ -156,7 +157,7 @@ public void testDoExecuteForRemoteNodes() { nodeInfos.add( new NodeInfo( Build.current().version(), - TransportVersion.current(), + new CompatibilityVersions(TransportVersion.current(), Map.of()), IndexVersion.current(), Map.of(), null, diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodesTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodesTests.java index 44ceb94b392e..627c57e07a1f 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodesTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodesTests.java @@ -15,6 +15,7 @@ import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodeStatsTests; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.network.InetAddresses; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; @@ -327,7 +328,7 @@ private static NodeInfo createNodeInfo(String nodeId, String transportType, Stri } return new NodeInfo( Build.current().version(), - TransportVersion.current(), + new CompatibilityVersions(TransportVersion.current(), Map.of()), IndexVersion.current(), Map.of(), Build.current(), diff --git a/server/src/test/java/org/elasticsearch/action/ingest/ReservedPipelineActionTests.java b/server/src/test/java/org/elasticsearch/action/ingest/ReservedPipelineActionTests.java index 9729b653ae3d..c45b7618a629 100644 --- a/server/src/test/java/org/elasticsearch/action/ingest/ReservedPipelineActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/ingest/ReservedPipelineActionTests.java @@ -20,6 +20,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsExecutors; @@ -105,7 +106,7 @@ public void setup() { NodeInfo nodeInfo = new NodeInfo( Build.current().version(), - TransportVersion.current(), + new CompatibilityVersions(TransportVersion.current(), Map.of()), IndexVersion.current(), Map.of(), Build.current(), diff --git a/server/src/test/java/org/elasticsearch/cluster/service/TransportVersionsFixupListenerTests.java b/server/src/test/java/org/elasticsearch/cluster/service/TransportVersionsFixupListenerTests.java index f9d3b7fcc920..9eec8309bbb8 100644 --- a/server/src/test/java/org/elasticsearch/cluster/service/TransportVersionsFixupListenerTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/service/TransportVersionsFixupListenerTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.Maps; import org.elasticsearch.features.FeatureService; +import org.elasticsearch.indices.SystemIndexDescriptor; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.Scheduler; import org.mockito.ArgumentCaptor; @@ -34,11 +35,14 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.Executor; +import static java.util.Map.entry; import static org.elasticsearch.test.LambdaMatchers.transformedMatch; import static org.hamcrest.Matchers.arrayContainingInAnyOrder; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.everyItem; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.ArgumentMatchers.same; @@ -77,7 +81,7 @@ private static Map versions(T... versions) { return tvs; } - private static NodesInfoResponse getResponse(Map responseData) { + private static NodesInfoResponse getResponse(Map responseData) { return new NodesInfoResponse( ClusterName.DEFAULT, responseData.entrySet() @@ -207,10 +211,19 @@ public void testVersionsAreFixed() { argThat(transformedMatch(NodesInfoRequest::nodesIds, arrayContainingInAnyOrder("node1", "node2"))), action.capture() ); - action.getValue().onResponse(getResponse(Map.of("node1", NEXT_TRANSPORT_VERSION, "node2", NEXT_TRANSPORT_VERSION))); + action.getValue() + .onResponse( + getResponse( + Map.ofEntries( + entry("node1", new CompatibilityVersions(NEXT_TRANSPORT_VERSION, Map.of())), + entry("node2", new CompatibilityVersions(NEXT_TRANSPORT_VERSION, Map.of())) + ) + ) + ); verify(taskQueue).submitTask(anyString(), task.capture(), any()); - assertThat(task.getValue().results(), equalTo(Map.of("node1", NEXT_TRANSPORT_VERSION, "node2", NEXT_TRANSPORT_VERSION))); + assertThat(task.getValue().results().keySet(), equalTo(Set.of("node1", "node2"))); + assertThat(task.getValue().results().values(), everyItem(equalTo(NEXT_TRANSPORT_VERSION))); } public void testConcurrentChangesDoNotOverlap() { @@ -259,12 +272,17 @@ public void testFailedRequestsAreRetried() { Scheduler scheduler = mock(Scheduler.class); Executor executor = mock(Executor.class); + var compatibilityVersions = new CompatibilityVersions( + TransportVersion.current(), + Map.of(".system-index-1", new SystemIndexDescriptor.MappingsVersion(1, 1234)) + ); ClusterState testState1 = ClusterState.builder(ClusterState.EMPTY_STATE) - .nodes(node(NEXT_VERSION, NEXT_VERSION, NEXT_VERSION)) + .nodes(node(Version.CURRENT, Version.CURRENT, Version.CURRENT)) .nodeIdsToCompatibilityVersions( - Maps.transformValues( - versions(NEXT_TRANSPORT_VERSION, TransportVersions.V_8_8_0, TransportVersions.V_8_8_0), - transportVersion -> new CompatibilityVersions(transportVersion, Map.of()) + Map.ofEntries( + entry("node0", compatibilityVersions), + entry("node1", new CompatibilityVersions(TransportVersions.V_8_8_0, Map.of())), + entry("node2", new CompatibilityVersions(TransportVersions.V_8_8_0, Map.of())) ) ) .build(); diff --git a/server/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java b/server/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java index fd839999edf2..33801dfb9841 100644 --- a/server/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java +++ b/server/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.action.admin.cluster.node.info.PluginsAndModules; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; @@ -241,7 +242,7 @@ private static NodeInfo createNodeInfo() { } return new NodeInfo( randomAlphaOfLengthBetween(6, 32), - TransportVersionUtils.randomVersion(random()), + new CompatibilityVersions(TransportVersionUtils.randomVersion(random()), Map.of()), IndexVersionUtils.randomVersion(random()), componentVersions, build, diff --git a/server/src/test/java/org/elasticsearch/rest/action/cat/RestPluginsActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/cat/RestPluginsActionTests.java index 766fefbeddb0..0994f9bf2303 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/cat/RestPluginsActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/cat/RestPluginsActionTests.java @@ -20,6 +20,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.Table; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.plugins.PluginDescriptor; @@ -66,7 +67,7 @@ private Table buildTable(List pluginDescriptor) { nodeInfos.add( new NodeInfo( Build.current().version(), - TransportVersion.current(), + new CompatibilityVersions(TransportVersion.current(), Map.of()), IndexVersion.current(), Map.of(), null, diff --git a/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/capacity/nodeinfo/AutoscalingNodesInfoServiceTests.java b/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/capacity/nodeinfo/AutoscalingNodesInfoServiceTests.java index 9658db911f6d..85cd41510212 100644 --- a/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/capacity/nodeinfo/AutoscalingNodesInfoServiceTests.java +++ b/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/capacity/nodeinfo/AutoscalingNodesInfoServiceTests.java @@ -30,6 +30,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; @@ -452,7 +453,7 @@ private static org.elasticsearch.action.admin.cluster.node.info.NodeInfo infoFor OsInfo osInfo = new OsInfo(randomLong(), processors, Processors.of((double) processors), null, null, null, null); return new org.elasticsearch.action.admin.cluster.node.info.NodeInfo( Build.current().version(), - TransportVersion.current(), + new CompatibilityVersions(TransportVersion.current(), Map.of()), IndexVersion.current(), Map.of(), Build.current(), diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/enrollment/TransportNodeEnrollmentActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/enrollment/TransportNodeEnrollmentActionTests.java index 26bb64eb0b07..87a20dcd7a12 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/enrollment/TransportNodeEnrollmentActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/enrollment/TransportNodeEnrollmentActionTests.java @@ -20,6 +20,7 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.ssl.SslConfiguration; @@ -103,7 +104,7 @@ public void testDoExecute() throws Exception { nodeInfos.add( new NodeInfo( Build.current().version(), - TransportVersion.current(), + new CompatibilityVersions(TransportVersion.current(), Map.of()), IndexVersion.current(), Map.of(), null, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/enrollment/InternalEnrollmentTokenGeneratorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/enrollment/InternalEnrollmentTokenGeneratorTests.java index 0a1f5f801143..ea4a340ffddf 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/enrollment/InternalEnrollmentTokenGeneratorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/enrollment/InternalEnrollmentTokenGeneratorTests.java @@ -18,6 +18,7 @@ import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; @@ -236,7 +237,7 @@ public Answer answerNullHttpInfo(InvocationOnMock invocationO List.of( new NodeInfo( Build.current().version(), - TransportVersion.current(), + new CompatibilityVersions(TransportVersion.current(), Map.of()), IndexVersion.current(), Map.of(), null, @@ -271,7 +272,7 @@ private Answer answerWithInfo(InvocationOnMock invocationOnMo List.of( new NodeInfo( Build.current().version(), - TransportVersion.current(), + new CompatibilityVersions(TransportVersion.current(), Map.of()), IndexVersion.current(), Map.of(), null, From c8134bf787d70a82f10460c821cb201c2cda3763 Mon Sep 17 00:00:00 2001 From: Joe Gallo Date: Mon, 11 Nov 2024 11:41:21 -0500 Subject: [PATCH 02/12] Document new ip geolocation fields (#116603) (#116606) --- docs/changelog/114193.yaml | 5 +++++ docs/changelog/114268.yaml | 5 +++++ docs/changelog/114521.yaml | 5 +++++ docs/reference/ingest/processors/geoip.asciidoc | 16 ++++++++++------ 4 files changed, 25 insertions(+), 6 deletions(-) create mode 100644 docs/changelog/114193.yaml create mode 100644 docs/changelog/114268.yaml create mode 100644 docs/changelog/114521.yaml diff --git a/docs/changelog/114193.yaml b/docs/changelog/114193.yaml new file mode 100644 index 000000000000..f18f9359007b --- /dev/null +++ b/docs/changelog/114193.yaml @@ -0,0 +1,5 @@ +pr: 114193 +summary: Add postal_code support to the City and Enterprise databases +area: Ingest Node +type: enhancement +issues: [] diff --git a/docs/changelog/114268.yaml b/docs/changelog/114268.yaml new file mode 100644 index 000000000000..5e4457005d7d --- /dev/null +++ b/docs/changelog/114268.yaml @@ -0,0 +1,5 @@ +pr: 114268 +summary: Support more maxmind fields in the geoip processor +area: Ingest Node +type: enhancement +issues: [] diff --git a/docs/changelog/114521.yaml b/docs/changelog/114521.yaml new file mode 100644 index 000000000000..c3a9c7cdd084 --- /dev/null +++ b/docs/changelog/114521.yaml @@ -0,0 +1,5 @@ +pr: 114521 +summary: Add support for registered country fields for maxmind geoip databases +area: Ingest Node +type: enhancement +issues: [] diff --git a/docs/reference/ingest/processors/geoip.asciidoc b/docs/reference/ingest/processors/geoip.asciidoc index 3a9ba58dedbf..2eff56f87e82 100644 --- a/docs/reference/ingest/processors/geoip.asciidoc +++ b/docs/reference/ingest/processors/geoip.asciidoc @@ -51,10 +51,12 @@ field instead. *Depends on what is available in `database_file`: * If a GeoLite2 City or GeoIP2 City database is used, then the following fields may be added under the `target_field`: `ip`, -`country_iso_code`, `country_name`, `continent_code`, `continent_name`, `region_iso_code`, `region_name`, `city_name`, `timezone`, -and `location`. The fields actually added depend on what has been found and which properties were configured in `properties`. +`country_iso_code`, `country_name`, `country_in_european_union`, `registered_country_iso_code`, `registered_country_name`, `registered_country_in_european_union`, +`continent_code`, `continent_name`, `region_iso_code`, `region_name`, `city_name`, `postal_code`, `timezone`, +`location`, and `accuracy_radius`. The fields actually added depend on what has been found and which properties were configured in `properties`. * If a GeoLite2 Country or GeoIP2 Country database is used, then the following fields may be added under the `target_field`: `ip`, -`country_iso_code`, `country_name`, `continent_code`, and `continent_name`. The fields actually added depend on what has been found +`country_iso_code`, `country_name`, `country_in_european_union`, `registered_country_iso_code`, `registered_country_name`, `registered_country_in_european_union`, +`continent_code`, and `continent_name`. The fields actually added depend on what has been found and which properties were configured in `properties`. * If the GeoLite2 ASN database is used, then the following fields may be added under the `target_field`: `ip`, `asn`, `organization_name` and `network`. The fields actually added depend on what has been found and which properties were configured @@ -70,10 +72,12 @@ The fields actually added depend on what has been found and which properties wer `organization_name`, `network`, `isp`, `isp_organization_name`, `mobile_country_code`, and `mobile_network_code`. The fields actually added depend on what has been found and which properties were configured in `properties`. * If the GeoIP2 Enterprise database is used, then the following fields may be added under the `target_field`: `ip`, -`country_iso_code`, `country_name`, `continent_code`, `continent_name`, `region_iso_code`, `region_name`, `city_name`, `timezone`, -`location`, `asn`, `organization_name`, `network`, `hosting_provider`, `tor_exit_node`, `anonymous_vpn`, `anonymous`, `public_proxy`, +`country_iso_code`, `country_name`, `country_in_european_union`, `registered_country_iso_code`, `registered_country_name`, `registered_country_in_european_union`, +`continent_code`, `continent_name`, `region_iso_code`, `region_name`, `city_name`, `postal_code`, `timezone`, +`location`, `accuracy_radius`, `country_confidence`, `city_confidence`, `postal_confidence`, `asn`, `organization_name`, `network`, +`hosting_provider`, `tor_exit_node`, `anonymous_vpn`, `anonymous`, `public_proxy`, `residential_proxy`, `domain`, `isp`, `isp_organization_name`, `mobile_country_code`, `mobile_network_code`, `user_type`, and -`connection_type`. The fields actually added depend on what has been found and which properties were configured in `properties`. +`connection_type`. The fields actually added depend on what has been found and which properties were configured in `properties`. preview::["Do not use the GeoIP2 Anonymous IP, GeoIP2 Connection Type, GeoIP2 Domain, GeoIP2 ISP, and GeoIP2 Enterprise databases in production environments. This functionality is in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA of official GA features."] From 1c299fd7e9e10562f57ed5960aeb9a4f75a1a002 Mon Sep 17 00:00:00 2001 From: Carlos Delgado <6339205+carlosdelest@users.noreply.github.com> Date: Mon, 11 Nov 2024 18:58:47 +0100 Subject: [PATCH 03/12] Synonyms test fix - update number of shards (#116224) (#116609) (cherry picked from commit e59407251b66a50b5bd5aaec8767cf909fef8fda) --- .../test/synonyms/90_synonyms_reloading_for_synset.yml | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/90_synonyms_reloading_for_synset.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/90_synonyms_reloading_for_synset.yml index dc94b3622240..efaef80ee093 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/90_synonyms_reloading_for_synset.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/90_synonyms_reloading_for_synset.yml @@ -34,7 +34,6 @@ settings: index: number_of_shards: 1 - number_of_replicas: 0 analysis: filter: my_synonym_filter: @@ -68,7 +67,6 @@ settings: index: number_of_shards: 1 - number_of_replicas: 0 analysis: filter: my_synonym_filter: @@ -95,7 +93,6 @@ - '{"index": {"_index": "my_index2", "_id": "2"}}' - '{"my_field": "goodbye"}' - # An update of synonyms_set1 must trigger auto-reloading of analyzers only for synonyms_set1 - do: synonyms.put_synonym: @@ -105,8 +102,9 @@ - synonyms: "hello, salute" - synonyms: "ciao => goodbye" - match: { result: "updated" } - - match: { reload_analyzers_details._shards.total: 2 } # shard requests are still sent to 2 indices - - match: { reload_analyzers_details._shards.successful: 2 } + - gt: { reload_analyzers_details._shards.total: 0 } + - gt: { reload_analyzers_details._shards.successful: 0 } + - match: { reload_analyzers_details._shards.failed: 0 } - length: { reload_analyzers_details.reload_details: 1 } # reload details contain only a single index - match: { reload_analyzers_details.reload_details.0.index: "my_index1" } - match: { reload_analyzers_details.reload_details.0.reloaded_analyzers.0: "my_analyzer1" } From a9003c9586a8bddc1630a90e3e73392cebef7e35 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Iv=C3=A1n=20Cea=20Fontenla?= Date: Mon, 11 Nov 2024 19:21:19 +0100 Subject: [PATCH 04/12] [8.x] Fix NPE in EnrichLookupService on mixed clusters with <8.14 versions (#116583) (#116607) * Fix NPE in EnrichLookupService on mixed clusters with <8.14 versions (#116583) Fixes https://github.com/elastic/elasticsearch/issues/116529 Fixes https://github.com/elastic/elasticsearch/issues/116544 * Changed switch type for 8.x --- docs/changelog/116583.yaml | 7 +++++++ .../xpack/esql/enrich/AbstractLookupService.java | 8 ++++++++ .../xpack/esql/enrich/EnrichLookupService.java | 6 +++--- 3 files changed, 18 insertions(+), 3 deletions(-) create mode 100644 docs/changelog/116583.yaml diff --git a/docs/changelog/116583.yaml b/docs/changelog/116583.yaml new file mode 100644 index 000000000000..3dc8337fe5b8 --- /dev/null +++ b/docs/changelog/116583.yaml @@ -0,0 +1,7 @@ +pr: 116583 +summary: Fix NPE in `EnrichLookupService` on mixed clusters with <8.14 versions +area: ES|QL +type: bug +issues: + - 116529 + - 116544 diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/AbstractLookupService.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/AbstractLookupService.java index 2419aa83845a..d2f90bd6c1e1 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/AbstractLookupService.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/AbstractLookupService.java @@ -45,6 +45,7 @@ import org.elasticsearch.compute.operator.lookup.MergePositionsOperator; import org.elasticsearch.compute.operator.lookup.QueryList; import org.elasticsearch.core.AbstractRefCounted; +import org.elasticsearch.core.Nullable; import org.elasticsearch.core.RefCounted; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; @@ -182,6 +183,9 @@ protected static QueryList termQueryList( Block block, DataType inputDataType ) { + if (inputDataType == null) { + return QueryList.rawTermQueryList(field, searchExecutionContext, block); + } return switch (inputDataType) { case IP -> QueryList.ipTermQueryList(field, searchExecutionContext, (BytesRefBlock) block); case DATETIME -> QueryList.dateTermQueryList(field, searchExecutionContext, (LongBlock) block); @@ -459,6 +463,10 @@ abstract static class Request { abstract static class TransportRequest extends org.elasticsearch.transport.TransportRequest implements IndicesRequest { final String sessionId; final ShardId shardId; + /** + * For mixed clusters with nodes <8.14, this will be null. + */ + @Nullable final DataType inputDataType; final Page inputPage; final List extractFields; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java index f24a16bb6369..2d85b46e33a8 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java @@ -127,9 +127,9 @@ static TransportRequest readFrom(StreamInput in, BlockFactory blockFactory) thro TaskId parentTaskId = TaskId.readFromStream(in); String sessionId = in.readString(); ShardId shardId = new ShardId(in); - DataType inputDataType = DataType.fromTypeName( - (in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0)) ? in.readString() : "unknown" - ); + DataType inputDataType = (in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0)) + ? DataType.fromTypeName(in.readString()) + : null; String matchType = in.readString(); String matchField = in.readString(); Page inputPage; From d698e72af3dfe53d9679f4ef069007c302eae196 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lorenzo=20Dematt=C3=A9?= Date: Mon, 11 Nov 2024 19:45:10 +0100 Subject: [PATCH 05/12] [8.x] Add a cluster listener to fix missing system index mappings after upgrade (#115771) This PR modifies `TransportVersionsFixupListener` to include all of compatibility versions (not only TransportVersion) in the fixup. `TransportVersionsFixupListener` spots the instances when the master has been upgraded to the most recent code version, along with non-master nodes, but some nodes are missing a "proper" (non-inferred) Transport version. This PR adds another check to also ensure that we have real (non-empty) system index mapping versions. To do so, it modifies NodeInfo so it carries all of CompatibilityVersions (TransportVersion + SystemIndexDescriptor.MappingVersions). This was initially done via a separate fixup listener + ad-hoc transport action, but the 2 listeners "raced" to update ClusterState on the same CompatibilityVersions structure; it just made sense to do it at the same time. The fixup is very similar to https://github.com/elastic/elasticsearch/pull/110710, which does the same for cluster features; plus, it adds a CI test to cover the bug raised in https://github.com/elastic/elasticsearch/issues/112694 Closes https://github.com/elastic/elasticsearch/issues/112694 --- .../upgrades/SystemIndexMappingUpgradeIT.java | 168 ++++++++++++++++ ...> CompatibilityVersionsFixupListener.java} | 117 ++++++++--- .../cluster/service/TransportFeatures.java | 2 +- .../elasticsearch/node/NodeConstruction.java | 4 +- ...patibilityVersionsFixupListenerTests.java} | 186 ++++++++++++++++-- 5 files changed, 428 insertions(+), 49 deletions(-) create mode 100644 qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/SystemIndexMappingUpgradeIT.java rename server/src/main/java/org/elasticsearch/cluster/service/{TransportVersionsFixupListener.java => CompatibilityVersionsFixupListener.java} (59%) rename server/src/test/java/org/elasticsearch/cluster/service/{TransportVersionsFixupListenerTests.java => CompatibilityVersionsFixupListenerTests.java} (60%) diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/SystemIndexMappingUpgradeIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/SystemIndexMappingUpgradeIT.java new file mode 100644 index 000000000000..042d0cc9023b --- /dev/null +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/SystemIndexMappingUpgradeIT.java @@ -0,0 +1,168 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.upgrades; + +import org.elasticsearch.Build; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.SuppressForbidden; +import org.elasticsearch.core.UpdateForV9; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.cluster.util.Version; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.test.rest.ObjectPath; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TemporaryFolder; +import org.junit.rules.TestRule; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.function.Supplier; + +import static org.elasticsearch.test.LambdaMatchers.transformedMatch; +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.anEmptyMap; +import static org.hamcrest.Matchers.anyOf; +import static org.hamcrest.Matchers.everyItem; +import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.not; + +public class SystemIndexMappingUpgradeIT extends ESRestTestCase { + + private static final String OLD_CLUSTER_VERSION = System.getProperty("tests.old_cluster_version"); + private static final TemporaryFolder repoDirectory = new TemporaryFolder(); + + private static final ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .version(Version.fromString(OLD_CLUSTER_VERSION)) + .nodes(3) + .setting("path.repo", new Supplier<>() { + @Override + @SuppressForbidden(reason = "TemporaryFolder only has io.File methods, not nio.File") + public String get() { + return repoDirectory.getRoot().getPath(); + } + }) + .setting("xpack.security.enabled", "false") + .build(); + + @ClassRule + public static TestRule ruleChain = RuleChain.outerRule(repoDirectory).around(cluster); + + private String testRestCluster = cluster.getHttpAddresses(); + + @Override + protected String getTestRestCluster() { + return testRestCluster; + } + + @Override + protected final Settings restClientSettings() { + return Settings.builder() + .put(super.restClientSettings()) + // increase the timeout here to 90 seconds to handle long waits for a green + // cluster health. the waits for green need to be longer than a minute to + // account for delayed shards + .put(ESRestTestCase.CLIENT_SOCKET_TIMEOUT, "90s") + .build(); + } + + private static String getNodesAttribute(ObjectPath objectPath, String nodeId, String attribute) { + try { + return objectPath.evaluate("nodes." + nodeId + "." + attribute); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + private static List getUpgradedNodesAddresses() throws IOException { + Response response = client().performRequest(new Request("GET", "_nodes")); + ObjectPath objectPath = ObjectPath.createFromResponse(response); + Map nodesAsMap = objectPath.evaluate("nodes"); + return nodesAsMap.keySet() + .stream() + .filter(id -> getNodesAttribute(objectPath, id, "version").equals(Build.current().version())) // nodes on current version + .map(id -> getNodesAttribute(objectPath, id, "http.publish_address")) + .toList(); + } + + @SuppressWarnings("unchecked") + private List> fieldAsObjectList(Map objectMap, String field) { + return (List>) (objectMap.get(field)); + } + + @SuppressWarnings("unchecked") + private Map fieldAsObject(Map objectMap, String field) { + return (Map) (objectMap.get(field)); + } + + @UpdateForV9() + public void testGrowShrinkUpgradeUpdatesSystemIndexMapping() throws Exception { + /* + * From 8.11, CompatibilityVersions holds a map of system index names to their mappings versions, alongside the transport version. + * See https://github.com/elastic/elasticsearch/pull/99307 + * The problem was fixed in 8.16.1/8.17, so we want to test upgrade from a cluster pre-8.11 to a cluster post-8.16.0 + * For testing the first condition, we use a synthetic cluster feature. The second condition is implied, as the fix and this test + * are applied only to post-8.16.0 + */ + assumeFalse( + "Testing upgrades from before CompatibilityVersions held mapping versions in cluster state", + testFeatureService.clusterHasFeature("gte_v8.11.0") + ); + + // Upgrade node 0 and 1 to the current version, leave node 2 to the BwC version + logger.info("Upgrading node 0 to version {}", Version.CURRENT); + cluster.upgradeNodeToVersion(0, Version.CURRENT); + + logger.info("Upgrading node 1 to version {}", Version.CURRENT); + cluster.upgradeNodeToVersion(1, Version.CURRENT); + + // Query the nodes, ensure we do _not_ have node versions in the answer, or if we do, mappings are empty + Map nodesVersions = entityAsMap(client().performRequest(new Request("GET", "/_cluster/state"))); + assertThat( + nodesVersions, + anyOf( + not(hasKey("nodes_versions")), + transformedMatch( + x -> fieldAsObjectList(x, "nodes_versions"), + everyItem(transformedMatch(item -> fieldAsObject(item, "mappings_versions"), anEmptyMap())) + ) + ) + ); + + var upgradedNodes = getUpgradedNodesAddresses(); + + // Stop the last "old" node + cluster.stopNode(2, false); + + // Ensure we talk only to the 2 live, upgraded nodes + closeClients(); + testRestCluster = String.join(",", upgradedNodes); + initClient(); + + assertBusy(() -> { + var newNodesVersions = entityAsMap(client().performRequest(new Request("GET", "/_cluster/state"))); + assertThat( + newNodesVersions, + allOf( + hasKey("nodes_versions"), + transformedMatch( + x -> fieldAsObjectList(x, "nodes_versions"), + everyItem(transformedMatch(item -> fieldAsObject(item, "mappings_versions"), not(anEmptyMap()))) + ) + ) + ); + }); + } +} diff --git a/server/src/main/java/org/elasticsearch/cluster/service/TransportVersionsFixupListener.java b/server/src/main/java/org/elasticsearch/cluster/service/CompatibilityVersionsFixupListener.java similarity index 59% rename from server/src/main/java/org/elasticsearch/cluster/service/TransportVersionsFixupListener.java rename to server/src/main/java/org/elasticsearch/cluster/service/CompatibilityVersionsFixupListener.java index 93f0d8b80190..5cd538245391 100644 --- a/server/src/main/java/org/elasticsearch/cluster/service/TransportVersionsFixupListener.java +++ b/server/src/main/java/org/elasticsearch/cluster/service/CompatibilityVersionsFixupListener.java @@ -10,9 +10,9 @@ package org.elasticsearch.cluster.service; import org.elasticsearch.TransportVersion; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.FailedNodeException; -import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; import org.elasticsearch.client.internal.ClusterAdminClient; @@ -21,6 +21,7 @@ import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.ClusterStateTaskListener; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.Priority; import org.elasticsearch.common.util.set.Sets; @@ -34,39 +35,46 @@ import org.elasticsearch.threadpool.Scheduler; import org.elasticsearch.threadpool.ThreadPool; +import java.util.ArrayList; import java.util.Collections; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Optional; import java.util.Set; import java.util.concurrent.Executor; +import java.util.function.Function; import java.util.stream.Collectors; +import java.util.stream.Stream; import static org.elasticsearch.cluster.ClusterState.INFERRED_TRANSPORT_VERSION; /** - * This fixes up the transport version from pre-8.8.0 cluster state that was inferred as the minimum possible, + * This fixes up the compatibility versions (transport version and system index mapping versions) in cluster state. + * Transport version from pre-8.8.0 cluster state was inferred as the minimum possible, * due to the master node not understanding cluster state with transport versions added in 8.8.0. - * Any nodes with the inferred placeholder cluster state is then refreshed with their actual transport version + * Any nodes with the inferred placeholder cluster state is then refreshed with their actual transport version. + * Same for system index mapping versions: when upgraded from pre-8.11.0, cluster state holds and empty map of system index mapping + * versions. Any nodes with an empty system index mapping versions map in cluster state is refreshed with their actual versions. */ @UpdateForV9 // this can be removed in v9 -public class TransportVersionsFixupListener implements ClusterStateListener { +public class CompatibilityVersionsFixupListener implements ClusterStateListener { - private static final Logger logger = LogManager.getLogger(TransportVersionsFixupListener.class); + private static final Logger logger = LogManager.getLogger(CompatibilityVersionsFixupListener.class); static final NodeFeature FIX_TRANSPORT_VERSION = new NodeFeature("transport.fix_transport_version"); private static final TimeValue RETRY_TIME = TimeValue.timeValueSeconds(30); - private final MasterServiceTaskQueue taskQueue; + private final MasterServiceTaskQueue taskQueue; private final ClusterAdminClient client; private final Scheduler scheduler; private final Executor executor; private final Set pendingNodes = Collections.synchronizedSet(new HashSet<>()); private final FeatureService featureService; - public TransportVersionsFixupListener( + public CompatibilityVersionsFixupListener( ClusterService service, ClusterAdminClient client, FeatureService featureService, @@ -83,8 +91,8 @@ public TransportVersionsFixupListener( ); } - TransportVersionsFixupListener( - MasterServiceTaskQueue taskQueue, + CompatibilityVersionsFixupListener( + MasterServiceTaskQueue taskQueue, ClusterAdminClient client, FeatureService featureService, Scheduler scheduler, @@ -97,42 +105,54 @@ public TransportVersionsFixupListener( this.executor = executor; } - class NodeTransportVersionTask implements ClusterStateTaskListener { - private final Map results; + class NodeCompatibilityVersionsTask implements ClusterStateTaskListener { + private final Map results; private final int retryNum; - NodeTransportVersionTask(Map results, int retryNum) { + NodeCompatibilityVersionsTask(Map results, int retryNum) { this.results = results; this.retryNum = retryNum; } @Override public void onFailure(Exception e) { - logger.error("Could not apply transport version for nodes {} to cluster state", results.keySet(), e); + logger.error("Could not apply compatibility versions for nodes {} to cluster state", results.keySet(), e); scheduleRetry(results.keySet(), retryNum); } - public Map results() { + public Map results() { return results; } } - private static class TransportVersionUpdater implements ClusterStateTaskExecutor { + static class TransportVersionUpdater implements ClusterStateTaskExecutor { @Override - public ClusterState execute(BatchExecutionContext context) throws Exception { + public ClusterState execute(BatchExecutionContext context) throws Exception { ClusterState.Builder builder = ClusterState.builder(context.initialState()); boolean modified = false; for (var c : context.taskContexts()) { for (var e : c.getTask().results().entrySet()) { // this node's transport version might have been updated already/node has gone away var cvMap = builder.compatibilityVersions(); - TransportVersion recordedTv = Optional.ofNullable(cvMap.get(e.getKey())) + var currentCompatibilityVersions = cvMap.get(e.getKey()); + + TransportVersion recordedTv = Optional.ofNullable(currentCompatibilityVersions) .map(CompatibilityVersions::transportVersion) .orElse(null); - assert (recordedTv != null) || (context.initialState().nodes().nodeExists(e.getKey()) == false) + + assert (currentCompatibilityVersions != null) || (context.initialState().nodes().nodeExists(e.getKey()) == false) : "Node " + e.getKey() + " is in the cluster but does not have an associated transport version recorded"; - if (Objects.equals(recordedTv, INFERRED_TRANSPORT_VERSION)) { - builder.putCompatibilityVersions(e.getKey(), e.getValue(), Map.of()); // unknown mappings versions + + var systemIndexMappingsVersion = Optional.ofNullable(currentCompatibilityVersions) + .map(CompatibilityVersions::systemIndexMappingsVersion) + .orElse(Map.of()); + + if (Objects.equals(recordedTv, INFERRED_TRANSPORT_VERSION) || systemIndexMappingsVersion.isEmpty()) { + builder.putCompatibilityVersions( + e.getKey(), + e.getValue().transportVersion(), + e.getValue().systemIndexMappingsVersion() + ); modified = true; } } @@ -151,6 +171,9 @@ private static Map getCompatibilityVersions(Clust public void clusterChanged(ClusterChangedEvent event) { if (event.localNodeMaster() == false) return; // only if we're master + List> queries = new ArrayList<>(); + Map compatibilityVersions = getCompatibilityVersions(event.state()); + // if the min node version > 8.8.0, and the cluster state has some transport versions == 8.8.0, // then refresh all inferred transport versions to their real versions // now that everything should understand cluster state with transport versions @@ -158,13 +181,44 @@ public void clusterChanged(ClusterChangedEvent event) { && event.state().getMinTransportVersion().equals(INFERRED_TRANSPORT_VERSION)) { // find all the relevant nodes - Set nodes = getCompatibilityVersions(event.state()).entrySet() + queries.add( + compatibilityVersions.entrySet() + .stream() + .filter(e -> e.getValue().transportVersion().equals(INFERRED_TRANSPORT_VERSION)) + .map(Map.Entry::getKey) + ); + } + + /* + * Also check if there are any nodes that should have SystemIndex mapping versions in cluster state, but don't. + * This can happen if the master was upgraded from before 8.11, and one or more non-master nodes + * were already upgraded. They don't re-join the cluster with the new master, so never get their CompatibilityVersions + * updated into cluster state. + * So we need to do a separate transport call to get the node SystemIndex mapping versions and add them to cluster state. + * We can't use features to determine when this should happen, as that is unreliable for upgrades (same issue, + * see NodeFeaturesFixupListener). + * We also can't use transport version, as that is unreliable for upgrades from versions before 8.8 + * (see TransportVersionFixupListener). + * So the only thing we can use is release version. This is ok here, as Serverless will never hit this case, so the node + * feature fetch action will never be called on Serverless. + * The problem affects 8.11+, but as we fixed the problem in 8.16.1 and 8.17.0 (by adding systemIndexMappingsVersion to + * NodesInfoResponse), we can fix only nodes with version > 8.16.0. + * This whole class will be removed in ES v9. + */ + queries.add( + event.state() + .nodes() .stream() - .filter(e -> e.getValue().transportVersion().equals(INFERRED_TRANSPORT_VERSION)) - .map(Map.Entry::getKey) - .collect(Collectors.toSet()); + .filter(n -> n.getVersion().after(Version.V_8_16_0)) + .map(DiscoveryNode::getId) + .filter(n -> compatibilityVersions.getOrDefault(n, CompatibilityVersions.EMPTY).systemIndexMappingsVersion().isEmpty()) + ); + + Set queryNodes = queries.stream().flatMap(Function.identity()).collect(Collectors.toSet()); - updateTransportVersions(nodes, 0); + if (queryNodes.isEmpty() == false) { + logger.debug("Fetching actual compatibility versions for nodes {}", queryNodes); + updateTransportVersions(queryNodes, 0); } } @@ -201,7 +255,7 @@ public void onResponse(NodesInfoResponse response) { @Override public void onFailure(Exception e) { pendingNodes.removeAll(outstandingNodes); - logger.warn("Could not read transport versions for nodes {}", outstandingNodes, e); + logger.warn("Could not read nodes info for nodes {}", outstandingNodes, e); scheduleRetry(outstandingNodes, retryNum); } }); @@ -218,12 +272,17 @@ private void handleResponse(NodesInfoResponse response, int retryNum) { } // carry on and read what we can - Map results = response.getNodes() + Map results = response.getNodes() .stream() - .collect(Collectors.toUnmodifiableMap(n -> n.getNode().getId(), NodeInfo::getTransportVersion)); + .collect( + Collectors.toUnmodifiableMap( + n -> n.getNode().getId(), + n -> new CompatibilityVersions(n.getTransportVersion(), n.getCompatibilityVersions()) + ) + ); if (results.isEmpty() == false) { - taskQueue.submitTask("update-transport-version", new NodeTransportVersionTask(results, retryNum), null); + taskQueue.submitTask("update-transport-version", new NodeCompatibilityVersionsTask(results, retryNum), null); } } } diff --git a/server/src/main/java/org/elasticsearch/cluster/service/TransportFeatures.java b/server/src/main/java/org/elasticsearch/cluster/service/TransportFeatures.java index 6e0a8afd6cf8..a4bad7c2db4f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/service/TransportFeatures.java +++ b/server/src/main/java/org/elasticsearch/cluster/service/TransportFeatures.java @@ -20,6 +20,6 @@ public class TransportFeatures implements FeatureSpecification { public Map getHistoricalFeatures() { // transport version was introduced in 8.8.0, but we need to wait until all nodes are >8.8.0 // to properly detect when we need to fix transport versions - return Map.of(TransportVersionsFixupListener.FIX_TRANSPORT_VERSION, Version.V_8_8_1); + return Map.of(CompatibilityVersionsFixupListener.FIX_TRANSPORT_VERSION, Version.V_8_8_1); } } diff --git a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java index eba24c407e4d..7b9a26a4e7da 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java +++ b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java @@ -62,7 +62,7 @@ import org.elasticsearch.cluster.routing.allocation.DiskThresholdMonitor; import org.elasticsearch.cluster.routing.allocation.WriteLoadForecaster; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.cluster.service.TransportVersionsFixupListener; +import org.elasticsearch.cluster.service.CompatibilityVersionsFixupListener; import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.component.LifecycleComponent; @@ -787,7 +787,7 @@ private void construct( if (DiscoveryNode.isMasterNode(settings)) { clusterService.addListener(new SystemIndexMappingUpdateService(systemIndices, client)); clusterService.addListener( - new TransportVersionsFixupListener(clusterService, client.admin().cluster(), featureService, threadPool) + new CompatibilityVersionsFixupListener(clusterService, client.admin().cluster(), featureService, threadPool) ); clusterService.addListener(new NodeFeaturesFixupListener(clusterService, client.admin().cluster(), threadPool)); } diff --git a/server/src/test/java/org/elasticsearch/cluster/service/TransportVersionsFixupListenerTests.java b/server/src/test/java/org/elasticsearch/cluster/service/CompatibilityVersionsFixupListenerTests.java similarity index 60% rename from server/src/test/java/org/elasticsearch/cluster/service/TransportVersionsFixupListenerTests.java rename to server/src/test/java/org/elasticsearch/cluster/service/CompatibilityVersionsFixupListenerTests.java index 9eec8309bbb8..9fed50f127b9 100644 --- a/server/src/test/java/org/elasticsearch/cluster/service/TransportVersionsFixupListenerTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/service/CompatibilityVersionsFixupListenerTests.java @@ -20,13 +20,16 @@ import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.cluster.service.TransportVersionsFixupListener.NodeTransportVersionTask; +import org.elasticsearch.cluster.node.VersionInformation; +import org.elasticsearch.cluster.service.CompatibilityVersionsFixupListener.NodeCompatibilityVersionsTask; import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.Maps; import org.elasticsearch.features.FeatureService; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.indices.SystemIndexDescriptor; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.Scheduler; @@ -37,9 +40,12 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.Executor; +import java.util.function.Function; +import java.util.stream.Collectors; import static java.util.Map.entry; import static org.elasticsearch.test.LambdaMatchers.transformedMatch; +import static org.hamcrest.Matchers.arrayContaining; import static org.hamcrest.Matchers.arrayContainingInAnyOrder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.everyItem; @@ -53,13 +59,13 @@ import static org.mockito.Mockito.verifyNoMoreInteractions; import static org.mockito.hamcrest.MockitoHamcrest.argThat; -public class TransportVersionsFixupListenerTests extends ESTestCase { +public class CompatibilityVersionsFixupListenerTests extends ESTestCase { private static final Version NEXT_VERSION = Version.V_8_8_1; private static final TransportVersion NEXT_TRANSPORT_VERSION = TransportVersion.fromId(NEXT_VERSION.id); @SuppressWarnings("unchecked") - private static MasterServiceTaskQueue newMockTaskQueue() { + private static MasterServiceTaskQueue newMockTaskQueue() { return mock(MasterServiceTaskQueue.class); } @@ -72,6 +78,20 @@ private static DiscoveryNodes node(Version... versions) { return builder.build(); } + private static DiscoveryNodes node(VersionInformation... versions) { + var builder = DiscoveryNodes.builder(); + for (int i = 0; i < versions.length; i++) { + builder.add( + DiscoveryNodeUtils.builder("node" + i) + .address(new TransportAddress(TransportAddress.META_ADDRESS, 9200 + i)) + .version(versions[i]) + .build() + ); + } + builder.localNodeId("node0").masterNodeId("node0"); + return builder.build(); + } + @SafeVarargs private static Map versions(T... versions) { Map tvs = new HashMap<>(); @@ -114,7 +134,7 @@ private static NodesInfoResponse getResponse(Map } public void testNothingFixedWhenNothingToInfer() { - MasterServiceTaskQueue taskQueue = newMockTaskQueue(); + MasterServiceTaskQueue taskQueue = newMockTaskQueue(); ClusterAdminClient client = mock(ClusterAdminClient.class); ClusterState testState = ClusterState.builder(ClusterState.EMPTY_STATE) @@ -122,7 +142,7 @@ public void testNothingFixedWhenNothingToInfer() { .nodeIdsToCompatibilityVersions(versions(new CompatibilityVersions(TransportVersions.V_8_8_0, Map.of()))) .build(); - TransportVersionsFixupListener listeners = new TransportVersionsFixupListener( + CompatibilityVersionsFixupListener listeners = new CompatibilityVersionsFixupListener( taskQueue, client, new FeatureService(List.of(new TransportFeatures())), @@ -135,7 +155,7 @@ public void testNothingFixedWhenNothingToInfer() { } public void testNothingFixedWhenOnNextVersion() { - MasterServiceTaskQueue taskQueue = newMockTaskQueue(); + MasterServiceTaskQueue taskQueue = newMockTaskQueue(); ClusterAdminClient client = mock(ClusterAdminClient.class); ClusterState testState = ClusterState.builder(ClusterState.EMPTY_STATE) @@ -143,7 +163,7 @@ public void testNothingFixedWhenOnNextVersion() { .nodeIdsToCompatibilityVersions(versions(new CompatibilityVersions(NEXT_TRANSPORT_VERSION, Map.of()))) .build(); - TransportVersionsFixupListener listeners = new TransportVersionsFixupListener( + CompatibilityVersionsFixupListener listeners = new CompatibilityVersionsFixupListener( taskQueue, client, new FeatureService(List.of(new TransportFeatures())), @@ -156,7 +176,7 @@ public void testNothingFixedWhenOnNextVersion() { } public void testNothingFixedWhenOnPreviousVersion() { - MasterServiceTaskQueue taskQueue = newMockTaskQueue(); + MasterServiceTaskQueue taskQueue = newMockTaskQueue(); ClusterAdminClient client = mock(ClusterAdminClient.class); ClusterState testState = ClusterState.builder(ClusterState.EMPTY_STATE) @@ -169,7 +189,41 @@ public void testNothingFixedWhenOnPreviousVersion() { ) .build(); - TransportVersionsFixupListener listeners = new TransportVersionsFixupListener( + CompatibilityVersionsFixupListener listeners = new CompatibilityVersionsFixupListener( + taskQueue, + client, + new FeatureService(List.of(new TransportFeatures())), + null, + null + ); + listeners.clusterChanged(new ClusterChangedEvent("test", testState, ClusterState.EMPTY_STATE)); + + verify(taskQueue, never()).submitTask(anyString(), any(), any()); + } + + public void testNothingFixedWhenVersionMappingsAlreadyPresent() { + MasterServiceTaskQueue taskQueue = newMockTaskQueue(); + ClusterAdminClient client = mock(ClusterAdminClient.class); + + var nodes = node(Version.CURRENT, Version.CURRENT); + var versions = nodes.stream() + .map(DiscoveryNode::getId) + .collect( + Collectors.toUnmodifiableMap( + Function.identity(), + x -> new CompatibilityVersions( + TransportVersion.current(), + Map.of(".system-index-1", new SystemIndexDescriptor.MappingsVersion(1, 1234)) + ) + ) + ); + + ClusterState testState = ClusterState.builder(ClusterState.EMPTY_STATE) + .nodes(nodes) + .nodeIdsToCompatibilityVersions(versions) + .build(); + + CompatibilityVersionsFixupListener listeners = new CompatibilityVersionsFixupListener( taskQueue, client, new FeatureService(List.of(new TransportFeatures())), @@ -183,7 +237,7 @@ public void testNothingFixedWhenOnPreviousVersion() { @SuppressWarnings("unchecked") public void testVersionsAreFixed() { - MasterServiceTaskQueue taskQueue = newMockTaskQueue(); + MasterServiceTaskQueue taskQueue = newMockTaskQueue(); ClusterAdminClient client = mock(ClusterAdminClient.class); ClusterState testState = ClusterState.builder(ClusterState.EMPTY_STATE) @@ -197,9 +251,9 @@ public void testVersionsAreFixed() { .build(); ArgumentCaptor> action = ArgumentCaptor.forClass(ActionListener.class); - ArgumentCaptor task = ArgumentCaptor.forClass(NodeTransportVersionTask.class); + ArgumentCaptor task = ArgumentCaptor.forClass(NodeCompatibilityVersionsTask.class); - TransportVersionsFixupListener listeners = new TransportVersionsFixupListener( + CompatibilityVersionsFixupListener listeners = new CompatibilityVersionsFixupListener( taskQueue, client, new FeatureService(List.of(new TransportFeatures())), @@ -223,11 +277,109 @@ public void testVersionsAreFixed() { verify(taskQueue).submitTask(anyString(), task.capture(), any()); assertThat(task.getValue().results().keySet(), equalTo(Set.of("node1", "node2"))); - assertThat(task.getValue().results().values(), everyItem(equalTo(NEXT_TRANSPORT_VERSION))); + assertThat( + task.getValue().results().values(), + everyItem(transformedMatch(CompatibilityVersions::transportVersion, equalTo(NEXT_TRANSPORT_VERSION))) + ); + } + + public void testMappingVersionsFixedAfterNewMaster() throws Exception { + MasterServiceTaskQueue taskQueue = newMockTaskQueue(); + ClusterAdminClient client = mock(ClusterAdminClient.class); + + var compatibilityVersions = new CompatibilityVersions( + TransportVersion.current(), + Map.of(".system-index-1", new SystemIndexDescriptor.MappingsVersion(1, 1234)) + ); + + var nodes = node(Version.CURRENT, Version.CURRENT, Version.CURRENT); + ClusterState testState = ClusterState.builder(ClusterState.EMPTY_STATE) + .nodes(nodes) + .nodeIdsToCompatibilityVersions( + Map.ofEntries( + entry("node0", compatibilityVersions), + entry("node1", CompatibilityVersions.EMPTY), + entry("node2", CompatibilityVersions.EMPTY) + ) + ) + .build(); + + ArgumentCaptor> action = ArgumentCaptor.captor(); + ArgumentCaptor task = ArgumentCaptor.captor(); + + CompatibilityVersionsFixupListener listeners = new CompatibilityVersionsFixupListener( + taskQueue, + client, + new FeatureService(List.of(new TransportFeatures())), + null, + null + ); + listeners.clusterChanged(new ClusterChangedEvent("test", testState, ClusterState.EMPTY_STATE)); + verify(client).nodesInfo( + argThat(transformedMatch(NodesInfoRequest::nodesIds, arrayContainingInAnyOrder("node1", "node2"))), + action.capture() + ); + + action.getValue().onResponse(getResponse(Map.of("node1", compatibilityVersions, "node2", compatibilityVersions))); + verify(taskQueue).submitTask(anyString(), task.capture(), any()); + + ClusterState newState = ClusterStateTaskExecutorUtils.executeAndAssertSuccessful( + testState, + new CompatibilityVersionsFixupListener.TransportVersionUpdater(), + List.of(task.getValue()) + ); + + assertThat( + newState.compatibilityVersions().values(), + everyItem( + transformedMatch( + CompatibilityVersions::systemIndexMappingsVersion, + equalTo(compatibilityVersions.systemIndexMappingsVersion()) + ) + ) + ); + } + + public void testMappingVerisionsFetchedOnlyForUpdatedNodes() { + MasterServiceTaskQueue taskQueue = newMockTaskQueue(); + ClusterAdminClient client = mock(ClusterAdminClient.class); + + var compatibilityVersions = new CompatibilityVersions( + TransportVersion.current(), + Map.of(".system-index-1", new SystemIndexDescriptor.MappingsVersion(1, 1234)) + ); + ClusterState testState = ClusterState.builder(ClusterState.EMPTY_STATE) + .nodes( + node( + VersionInformation.CURRENT, + VersionInformation.CURRENT, + new VersionInformation(Version.V_8_15_0, IndexVersion.current(), IndexVersion.current()) + ) + ) + .nodeIdsToCompatibilityVersions( + Map.ofEntries( + entry("node0", compatibilityVersions), + entry("node1", CompatibilityVersions.EMPTY), + entry("node2", CompatibilityVersions.EMPTY) + ) + ) + .build(); + + ArgumentCaptor> action = ArgumentCaptor.captor(); + + CompatibilityVersionsFixupListener listeners = new CompatibilityVersionsFixupListener( + taskQueue, + client, + new FeatureService(List.of(new TransportFeatures())), + null, + null + ); + listeners.clusterChanged(new ClusterChangedEvent("test", testState, ClusterState.EMPTY_STATE)); + verify(client).nodesInfo(argThat(transformedMatch(NodesInfoRequest::nodesIds, arrayContaining("node1"))), action.capture()); } public void testConcurrentChangesDoNotOverlap() { - MasterServiceTaskQueue taskQueue = newMockTaskQueue(); + MasterServiceTaskQueue taskQueue = newMockTaskQueue(); ClusterAdminClient client = mock(ClusterAdminClient.class); ClusterState testState1 = ClusterState.builder(ClusterState.EMPTY_STATE) @@ -240,7 +392,7 @@ public void testConcurrentChangesDoNotOverlap() { ) .build(); - TransportVersionsFixupListener listeners = new TransportVersionsFixupListener( + CompatibilityVersionsFixupListener listeners = new CompatibilityVersionsFixupListener( taskQueue, client, new FeatureService(List.of(new TransportFeatures())), @@ -267,7 +419,7 @@ public void testConcurrentChangesDoNotOverlap() { @SuppressWarnings("unchecked") public void testFailedRequestsAreRetried() { - MasterServiceTaskQueue taskQueue = newMockTaskQueue(); + MasterServiceTaskQueue taskQueue = newMockTaskQueue(); ClusterAdminClient client = mock(ClusterAdminClient.class); Scheduler scheduler = mock(Scheduler.class); Executor executor = mock(Executor.class); @@ -290,7 +442,7 @@ public void testFailedRequestsAreRetried() { ArgumentCaptor> action = ArgumentCaptor.forClass(ActionListener.class); ArgumentCaptor retry = ArgumentCaptor.forClass(Runnable.class); - TransportVersionsFixupListener listeners = new TransportVersionsFixupListener( + CompatibilityVersionsFixupListener listeners = new CompatibilityVersionsFixupListener( taskQueue, client, new FeatureService(List.of(new TransportFeatures())), From 807d988c9df32432e66f8df37b44ed6cb20f1d9f Mon Sep 17 00:00:00 2001 From: Joe Gallo Date: Mon, 11 Nov 2024 14:24:17 -0500 Subject: [PATCH 06/12] Document new ip_location APIs (#116611) (#116615) --- docs/changelog/114548.yaml | 5 + ...c => delete-ip-location-database.asciidoc} | 28 +++--- ...idoc => get-ip-location-database.asciidoc} | 30 +++--- docs/reference/ingest/apis/index.asciidoc | 18 ++-- .../ingest/apis/put-geoip-database.asciidoc | 72 --------------- .../apis/put-ip-location-database.asciidoc | 92 +++++++++++++++++++ docs/reference/redirects.asciidoc | 17 +++- 7 files changed, 150 insertions(+), 112 deletions(-) create mode 100644 docs/changelog/114548.yaml rename docs/reference/ingest/apis/{delete-geoip-database.asciidoc => delete-ip-location-database.asciidoc} (52%) rename docs/reference/ingest/apis/{get-geoip-database.asciidoc => get-ip-location-database.asciidoc} (65%) delete mode 100644 docs/reference/ingest/apis/put-geoip-database.asciidoc create mode 100644 docs/reference/ingest/apis/put-ip-location-database.asciidoc diff --git a/docs/changelog/114548.yaml b/docs/changelog/114548.yaml new file mode 100644 index 000000000000..b9692bcb2d10 --- /dev/null +++ b/docs/changelog/114548.yaml @@ -0,0 +1,5 @@ +pr: 114548 +summary: Support IPinfo database configurations +area: Ingest Node +type: enhancement +issues: [] diff --git a/docs/reference/ingest/apis/delete-geoip-database.asciidoc b/docs/reference/ingest/apis/delete-ip-location-database.asciidoc similarity index 52% rename from docs/reference/ingest/apis/delete-geoip-database.asciidoc rename to docs/reference/ingest/apis/delete-ip-location-database.asciidoc index 957e59f0f0de..c3a10a914d2f 100644 --- a/docs/reference/ingest/apis/delete-geoip-database.asciidoc +++ b/docs/reference/ingest/apis/delete-ip-location-database.asciidoc @@ -1,30 +1,30 @@ -[[delete-geoip-database-api]] -=== Delete geoip database configuration API +[[delete-ip-location-database-api]] +=== Delete IP geolocation database configuration API ++++ -Delete geoip database configuration +Delete IP geolocation database configuration ++++ -Deletes a geoip database configuration. +Deletes a IP geolocation database configuration. [source,console] ---- -DELETE /_ingest/geoip/database/my-database-id +DELETE /_ingest/ip_location/database/my-database-id ---- // TEST[skip:we don't want to leak the enterprise-geoip-downloader task, which touching these APIs would cause. Therefore, skip this test.] -[[delete-geoip-database-api-request]] +[[delete-ip-location-database-api-request]] ==== {api-request-title} -`DELETE /_ingest/geoip/database/` +`DELETE /_ingest/ip_location/database/` -[[delete-geoip-database-api-prereqs]] +[[delete-ip-location-database-api-prereqs]] ==== {api-prereq-title} * If the {es} {security-features} are enabled, you must have the `manage` <> to use this API. -[[delete-geoip-database-api-path-params]] +[[delete-ip-location-database-api-path-params]] ==== {api-path-parms-title} ``:: @@ -35,21 +35,21 @@ DELETE /_ingest/geoip/database/my-database-id -- -[[delete-geoip-database-api-query-params]] +[[delete-ip-location-database-api-query-params]] ==== {api-query-parms-title} include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=timeoutparms] -[[delete-geoip-database-api-example]] +[[delete-ip-location-database-api-example]] ==== {api-examples-title} -[[delete-geoip-database-api-specific-ex]] -===== Delete a specific geoip database configuration +[[delete-ip-location-database-api-specific-ex]] +===== Delete a specific IP geolocation database configuration [source,console] ---- -DELETE /_ingest/geoip/database/example-database-id +DELETE /_ingest/ip_location/database/example-database-id ---- // TEST[skip:we don't want to leak the enterprise-geoip-downloader task, which touching these APIs would cause. Therefore, skip this test.] diff --git a/docs/reference/ingest/apis/get-geoip-database.asciidoc b/docs/reference/ingest/apis/get-ip-location-database.asciidoc similarity index 65% rename from docs/reference/ingest/apis/get-geoip-database.asciidoc rename to docs/reference/ingest/apis/get-ip-location-database.asciidoc index f055e3e759db..26e9ddc1eee5 100644 --- a/docs/reference/ingest/apis/get-geoip-database.asciidoc +++ b/docs/reference/ingest/apis/get-ip-location-database.asciidoc @@ -1,33 +1,33 @@ -[[get-geoip-database-api]] -=== Get geoip database configuration API +[[get-ip-location-database-api]] +=== Get IP geolocation database configuration API ++++ -Get geoip database configuration +Get IP geolocation database configuration ++++ -Returns information about one or more geoip database configurations. +Returns information about one or more IP geolocation database configurations. [source,console] ---- -GET /_ingest/geoip/database/my-database-id +GET /_ingest/ip_location/database/my-database-id ---- // TEST[skip:we don't want to leak the enterprise-geoip-downloader task, which touching these APIs would cause. Therefore, skip this test.] -[[get-geoip-database-api-request]] +[[get-ip-location-database-api-request]] ==== {api-request-title} -`GET /_ingest/geoip/database/` +`GET /_ingest/ip_location/database/` -`GET /_ingest/geoip/database` +`GET /_ingest/ip_location/database` -[[get-geoip-database-api-prereqs]] +[[get-ip-location-database-api-prereqs]] ==== {api-prereq-title} * If the {es} {security-features} are enabled, you must have the `manage` <> to use this API. -[[get-geoip-database-api-path-params]] +[[get-ip-location-database-api-path-params]] ==== {api-path-parms-title} ``:: @@ -38,22 +38,22 @@ supported. To get all database configurations, omit this parameter or use `*`. -[[get-geoip-database-api-query-params]] +[[get-ip-location-database-api-query-params]] ==== {api-query-parms-title} include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=master-timeout] -[[get-geoip-database-api-example]] +[[get-ip-location-database-api-example]] ==== {api-examples-title} -[[get-geoip-database-api-specific-ex]] -===== Get information for a specific geoip database configuration +[[get-ip-location-database-api-specific-ex]] +===== Get information for a specific IP geolocation database configuration [source,console] ---- -GET /_ingest/geoip/database/my-database-id +GET /_ingest/ip_location/database/my-database-id ---- // TEST[skip:we don't want to leak the enterprise-geoip-downloader task, which touching these APIs would cause. Therefore, skip this test.] diff --git a/docs/reference/ingest/apis/index.asciidoc b/docs/reference/ingest/apis/index.asciidoc index e068f99ea0ad..35adc4782197 100644 --- a/docs/reference/ingest/apis/index.asciidoc +++ b/docs/reference/ingest/apis/index.asciidoc @@ -25,16 +25,14 @@ Use the following APIs to get statistics about ingest processing: the <>. [discrete] -[[ingest-geoip-database-apis]] -=== Ingest GeoIP Database APIs - -preview::["The commercial IP geolocation database download management APIs are in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but this feature is not subject to the support SLA of official GA features."] +[[ingest-ip-location-database-apis]] +=== Ingest IP Location Database APIs Use the following APIs to configure and manage commercial IP geolocation database downloads: -* <> to create or update a database configuration -* <> to retrieve a database configuration -* <> to delete a database configuration +* <> to create or update a database configuration +* <> to retrieve a database configuration +* <> to delete a database configuration include::put-pipeline.asciidoc[] include::get-pipeline.asciidoc[] @@ -42,6 +40,6 @@ include::delete-pipeline.asciidoc[] include::simulate-pipeline.asciidoc[] include::simulate-ingest.asciidoc[] include::geoip-stats.asciidoc[] -include::put-geoip-database.asciidoc[] -include::get-geoip-database.asciidoc[] -include::delete-geoip-database.asciidoc[] +include::put-ip-location-database.asciidoc[] +include::get-ip-location-database.asciidoc[] +include::delete-ip-location-database.asciidoc[] diff --git a/docs/reference/ingest/apis/put-geoip-database.asciidoc b/docs/reference/ingest/apis/put-geoip-database.asciidoc deleted file mode 100644 index 311c30300238..000000000000 --- a/docs/reference/ingest/apis/put-geoip-database.asciidoc +++ /dev/null @@ -1,72 +0,0 @@ -[[put-geoip-database-api]] -=== Create or update geoip database configuration API -++++ -Create or update geoip database configuration -++++ - -Creates or updates an IP geolocation database configuration. - -IMPORTANT: The Maxmind `account_id` shown below requires a license key. Because the license key is sensitive information, -it is stored as a <> in {es} named `ingest.geoip.downloader.maxmind.license_key`. Only -one Maxmind license key is currently allowed per {es} cluster. A valid license key must be in the secure settings in order -to download from Maxmind. The license key setting does not take effect until all nodes are restarted. - -[source,console] ----- -PUT _ingest/geoip/database/my-database-id -{ - "name": "GeoIP2-Domain", - "maxmind": { - "account_id": "1025402" - } -} ----- -// TEST[skip:we don't want to leak the enterprise-geoip-downloader task, which touching these APIs would cause. Therefore, skip this test.] - -[[put-geoip-database-api-request]] -==== {api-request-title} - -`PUT /_ingest/geoip/database/` - -[[put-geoip-database-api-prereqs]] -==== {api-prereq-title} - -* If the {es} {security-features} are enabled, you must have the -`manage` <> to use this API. - - -[[put-geoip-database-api-path-params]] -==== {api-path-parms-title} - -``:: -+ -__ -(Required, string) ID of the database configuration to create or update. - -[[put-geoip-database-api-query-params]] -==== {api-query-parms-title} - -include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=timeoutparms] - -[[put-geoip-database-api-request-body]] -==== {api-request-body-title} - -// tag::geoip-database-object[] -`name`:: -(Required, string) -The provider-assigned name of the IP geolocation database to download. - -``:: -(Required, a provider object and its associated configuration) -The configuration necessary to identify which IP geolocation provider to use to download -the database, as well as any provider-specific configuration necessary for such downloading. -+ -At present, the only supported provider is `maxmind`, and the maxmind provider -requires that an `account_id` (string) is configured. -// end::geoip-database-object[] - -[[geoip-database-configuration-licensing]] -==== Licensing - -Downloading databases from third party providers is a commercial feature that requires an -appropriate license. For more information, refer to https://www.elastic.co/subscriptions. diff --git a/docs/reference/ingest/apis/put-ip-location-database.asciidoc b/docs/reference/ingest/apis/put-ip-location-database.asciidoc new file mode 100644 index 000000000000..e42d84752694 --- /dev/null +++ b/docs/reference/ingest/apis/put-ip-location-database.asciidoc @@ -0,0 +1,92 @@ +[[put-ip-location-database-api]] +=== Create or update IP geolocation database configuration API +++++ +Create or update IP geolocation database configuration +++++ + +Creates or updates an IP geolocation database configuration. + +IMPORTANT: The Maxmind `account_id` shown below requires a license key. Because the license key is sensitive information, +it is stored as a <> in {es} named `ingest.geoip.downloader.maxmind.license_key`. Only +one Maxmind license key is currently allowed per {es} cluster. A valid license key must be in the secure settings in order +to download from Maxmind. The license key setting does not take effect until all nodes are restarted or a +<> request is executed. + +[source,console] +---- +PUT _ingest/ip_location/database/my-database-1 +{ + "name": "GeoIP2-Domain", + "maxmind": { + "account_id": "1234567" + } +} +---- +// TEST[skip:we don't want to leak the enterprise-geoip-downloader task, which touching these APIs would cause. Therefore, skip this test.] + +IMPORTANT: The IPinfo configuration shown below requires a token. Because the token is sensitive information, +it is stored as a <> in {es} named `ingest.ip_location.downloader.ipinfo.token`. Only +one IPinfo token is currently allowed per {es} cluster. A valid token must be in the secure settings in order +to download from IPinfo. The token setting does not take effect until all nodes are restarted or a +<> request is executed. + +[source,console] +---- +PUT _ingest/ip_location/database/my-database-2 +{ + "name": "standard_location", + "ipinfo": { + } +} +---- +// TEST[skip:we don't want to leak the enterprise-geoip-downloader task, which touching these APIs would cause. Therefore, skip this test.] + + +[[put-ip-location-database-api-request]] +==== {api-request-title} + +`PUT /_ingest/ip_location/database/` + +[[put-ip-location-database-api-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have the +`manage` <> to use this API. + + +[[put-ip-location-database-api-path-params]] +==== {api-path-parms-title} + +``:: ++ +__ +(Required, string) ID of the database configuration to create or update. + +[[put-ip-location-database-api-query-params]] +==== {api-query-parms-title} + +include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=timeoutparms] + +[[put-ip-location-database-api-request-body]] +==== {api-request-body-title} + +// tag::ip-location-database-object[] +`name`:: +(Required, string) +The provider-assigned name of the IP geolocation database to download. + +``:: +(Required, a provider object and its associated configuration) +The configuration necessary to identify which IP geolocation provider to use to download +the database, as well as any provider-specific configuration necessary for such downloading. ++ +At present, the only supported providers are `maxmind` and `ipinfo`. The maxmind provider +requires that an `account_id` (string) is configured. The ipinfo provider does not require +additional configuration in the request body. +// end::ip-location-database-object[] + +[[ip-location-database-configuration-licensing]] +==== Licensing + +Downloading databases from third party providers is a commercial feature that requires an +appropriate license. For more information, refer to https://www.elastic.co/subscriptions. diff --git a/docs/reference/redirects.asciidoc b/docs/reference/redirects.asciidoc index e0568f500f26..506dff7891ad 100644 --- a/docs/reference/redirects.asciidoc +++ b/docs/reference/redirects.asciidoc @@ -1926,4 +1926,19 @@ Refer to <>. [role="exclude",id="remote-clusters-privileges"] === Configure roles and users for remote clusters -Refer to <>. \ No newline at end of file +Refer to <>. + +[role="exclude",id="put-geoip-database-api"] +=== Create or update geoip database configuration API + +Refer to <>. + +[role="exclude",id="get-geoip-database-api"] +=== Get geoip database configuration + +Refer to <>. + +[role="exclude",id="delete-geoip-database-api"] +=== Delete geoip database configuration API + +Refer to <>. From f9077a09ef9995fb06c033f122d8c874ed7151d6 Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Mon, 11 Nov 2024 16:58:44 -0500 Subject: [PATCH 07/12] Clarify the vector files utilized for preloading (#116488) (#116622) Adds clarification for vector preloading, what extension is to what storage kind, and that quantized vectors are stored in separate files allowing for individual preload. closes: https://github.com/elastic/elasticsearch/issues/116273 --- docs/reference/how-to/knn-search.asciidoc | 13 ++++++++++--- docs/reference/index-modules/store.asciidoc | 4 ++-- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/docs/reference/how-to/knn-search.asciidoc b/docs/reference/how-to/knn-search.asciidoc index e884c01dd350..60c32cabdb5c 100644 --- a/docs/reference/how-to/knn-search.asciidoc +++ b/docs/reference/how-to/knn-search.asciidoc @@ -95,13 +95,20 @@ and https://elasticsearch-benchmarks.elastic.co/#tracks/dense_vector[here] some of datasets and configurations that we use for our nightly benchmarks. [discrete] +[[dense-vector-preloading]] include::search-speed.asciidoc[tag=warm-fs-cache] The following file extensions are used for the approximate kNN search: +Each extension is broken down by the quantization types. -* `vec` and `veq` for vector values -* `vex` for HNSW graph -* `vem`, `vemf`, and `vemq` for metadata +* `vex` for the HNSW graph +* `vec` for all non-quantized vector values. This includes all element types: `float`, `byte`, and `bit`. +* `veq` for quantized vectors indexed with <>: `int4` or `int8` +* `veb` for binary vectors indexed with <>: `bbq` +* `vem`, `vemf`, `vemq`, and `vemb` for metadata, usually small and not a concern for preloading + +Generally, if you are using a quantized index, you should only preload the relevant quantized values and the HNSW graph. +Preloading the raw vectors is not necessary and might be counterproductive. [discrete] === Reduce the number of index segments diff --git a/docs/reference/index-modules/store.asciidoc b/docs/reference/index-modules/store.asciidoc index 9b30ba9dbde3..aba0850c7643 100644 --- a/docs/reference/index-modules/store.asciidoc +++ b/docs/reference/index-modules/store.asciidoc @@ -143,8 +143,8 @@ terms dictionaries, postings lists and points, which are the most important parts of the index for search and aggregations. For vector search, you use <>, -you might want to set the setting to vector search files: `["vec", "vex", "vem"]` -("vec" is used for vector values, "vex" – for HNSW graph, "vem" – for metadata). +you might want to set the setting to vector search files. See <> for a detailed +list of the files. Note that this setting can be dangerous on indices that are larger than the size of the main memory of the host, as it would cause the filesystem cache to be From 7c3d4027cdff322abaad8e1268b604050d158483 Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Mon, 11 Nov 2024 18:26:32 -0600 Subject: [PATCH 08/12] Adding a deprecation info API warning for data streams with old indices (#116447) (#116626) * Adding a deprecation info API warning for data streams with old indices (#116447) * removing use of a method not available in 8.x --- docs/changelog/116447.yaml | 5 + .../org/elasticsearch/TransportVersions.java | 3 +- .../DataStreamDeprecationChecks.java | 74 +++++++++++ .../xpack/deprecation/DeprecationChecks.java | 6 + .../deprecation/DeprecationInfoAction.java | 39 +++++- .../TransportDeprecationInfoAction.java | 2 + .../DataStreamDeprecationChecksTests.java | 124 ++++++++++++++++++ .../DeprecationInfoActionResponseTests.java | 47 ++++++- 8 files changed, 295 insertions(+), 5 deletions(-) create mode 100644 docs/changelog/116447.yaml create mode 100644 x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DataStreamDeprecationChecks.java create mode 100644 x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/DataStreamDeprecationChecksTests.java diff --git a/docs/changelog/116447.yaml b/docs/changelog/116447.yaml new file mode 100644 index 000000000000..8c0cea4b5457 --- /dev/null +++ b/docs/changelog/116447.yaml @@ -0,0 +1,5 @@ +pr: 116447 +summary: Adding a deprecation info API warning for data streams with old indices +area: Data streams +type: enhancement +issues: [] diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index c9e5cdbad850..2317ff8256fe 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -191,7 +191,8 @@ static TransportVersion def(int id) { public static final TransportVersion LOGSDB_TELEMETRY_STATS = def(8_785_00_0); public static final TransportVersion KQL_QUERY_ADDED = def(8_786_00_0); public static final TransportVersion ROLE_MONITOR_STATS = def(8_787_00_0); - public static final TransportVersion ADD_COMPATIBILITY_VERSIONS_TO_NODE_INFO = def(8_788_00_0); + public static final TransportVersion DATA_STREAM_INDEX_VERSION_DEPRECATION_CHECK = def(8_788_00_0); + public static final TransportVersion ADD_COMPATIBILITY_VERSIONS_TO_NODE_INFO = def(8_789_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DataStreamDeprecationChecks.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DataStreamDeprecationChecks.java new file mode 100644 index 000000000000..ee029d01427a --- /dev/null +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DataStreamDeprecationChecks.java @@ -0,0 +1,74 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.deprecation; + +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexVersions; +import org.elasticsearch.xpack.core.deprecation.DeprecationIssue; + +import java.util.List; + +import static java.util.Map.entry; +import static java.util.Map.ofEntries; + +public class DataStreamDeprecationChecks { + static DeprecationIssue oldIndicesCheck(DataStream dataStream, ClusterState clusterState) { + List backingIndices = dataStream.getIndices(); + boolean hasOldIndices = backingIndices.stream() + .anyMatch(index -> clusterState.metadata().index(index).getCompatibilityVersion().before(IndexVersions.V_8_0_0)); + if (hasOldIndices) { + long totalIndices = backingIndices.size(); + List oldIndices = backingIndices.stream() + .filter(index -> clusterState.metadata().index(index).getCompatibilityVersion().before(IndexVersions.V_8_0_0)) + .toList(); + long totalOldIndices = oldIndices.size(); + long totalOldSearchableSnapshots = oldIndices.stream() + .filter(index -> clusterState.metadata().index(index).isSearchableSnapshot()) + .count(); + long totalOldPartiallyMountedSearchableSnapshots = oldIndices.stream() + .filter(index -> clusterState.metadata().index(index).isPartialSearchableSnapshot()) + .count(); + long totalOldFullyMountedSearchableSnapshots = totalOldSearchableSnapshots - totalOldPartiallyMountedSearchableSnapshots; + return new DeprecationIssue( + DeprecationIssue.Level.CRITICAL, + "Old data stream with a compatibility version < 8.0", + "https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking-changes-9.0.html", + "This data stream has backing indices that were created before Elasticsearch 8.0.0", + false, + ofEntries( + entry( + "backing_indices", + ofEntries( + entry("count", totalIndices), + entry( + "need_upgrading", + ofEntries( + entry("count", totalOldIndices), + entry( + "searchable_snapshots", + ofEntries( + entry("count", totalOldSearchableSnapshots), + entry("fully_mounted", ofEntries(entry("count", totalOldFullyMountedSearchableSnapshots))), + entry( + "partially_mounted", + ofEntries(entry("count", totalOldPartiallyMountedSearchableSnapshots)) + ) + ) + ) + ) + ) + ) + ) + ) + ); + } + return null; + } +} diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationChecks.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationChecks.java index 4329cc65f262..814349424b90 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationChecks.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationChecks.java @@ -8,6 +8,7 @@ import org.elasticsearch.action.admin.cluster.node.info.PluginsAndModules; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; @@ -16,6 +17,7 @@ import java.util.List; import java.util.Objects; +import java.util.function.BiFunction; import java.util.function.Function; import java.util.stream.Collectors; @@ -98,6 +100,10 @@ private DeprecationChecks() {} IndexDeprecationChecks::deprecatedCamelCasePattern ); + static List> DATA_STREAM_CHECKS = List.of( + DataStreamDeprecationChecks::oldIndicesCheck + ); + /** * helper utility function to reduce repeat of running a specific {@link List} of checks. * diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationInfoAction.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationInfoAction.java index cb9efd526fb2..cd26e23394e8 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationInfoAction.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationInfoAction.java @@ -16,6 +16,7 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.MasterNodeReadRequest; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.Metadata; @@ -42,6 +43,7 @@ import java.util.Map; import java.util.Objects; import java.util.Set; +import java.util.function.BiFunction; import java.util.function.Function; import java.util.stream.Collectors; @@ -144,10 +146,11 @@ private static Map> getMergedIssuesToNodesMap( } public static class Response extends ActionResponse implements ToXContentObject { - static final Set RESERVED_NAMES = Set.of("cluster_settings", "node_settings", "index_settings"); + static final Set RESERVED_NAMES = Set.of("cluster_settings", "node_settings", "index_settings", "data_streams"); private final List clusterSettingsIssues; private final List nodeSettingsIssues; private final Map> indexSettingsIssues; + private final Map> dataStreamIssues; private final Map> pluginSettingsIssues; public Response(StreamInput in) throws IOException { @@ -155,6 +158,11 @@ public Response(StreamInput in) throws IOException { clusterSettingsIssues = in.readCollectionAsList(DeprecationIssue::new); nodeSettingsIssues = in.readCollectionAsList(DeprecationIssue::new); indexSettingsIssues = in.readMapOfLists(DeprecationIssue::new); + if (in.getTransportVersion().onOrAfter(TransportVersions.DATA_STREAM_INDEX_VERSION_DEPRECATION_CHECK)) { + dataStreamIssues = in.readMapOfLists(DeprecationIssue::new); + } else { + dataStreamIssues = Map.of(); + } if (in.getTransportVersion().before(TransportVersions.V_7_11_0)) { List mlIssues = in.readCollectionAsList(DeprecationIssue::new); pluginSettingsIssues = new HashMap<>(); @@ -168,11 +176,13 @@ public Response( List clusterSettingsIssues, List nodeSettingsIssues, Map> indexSettingsIssues, + Map> dataStreamIssues, Map> pluginSettingsIssues ) { this.clusterSettingsIssues = clusterSettingsIssues; this.nodeSettingsIssues = nodeSettingsIssues; this.indexSettingsIssues = indexSettingsIssues; + this.dataStreamIssues = dataStreamIssues; Set intersection = Sets.intersection(RESERVED_NAMES, pluginSettingsIssues.keySet()); if (intersection.isEmpty() == false) { throw new ElasticsearchStatusException( @@ -205,6 +215,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeCollection(clusterSettingsIssues); out.writeCollection(nodeSettingsIssues); out.writeMap(indexSettingsIssues, StreamOutput::writeCollection); + if (out.getTransportVersion().onOrAfter(TransportVersions.DATA_STREAM_INDEX_VERSION_DEPRECATION_CHECK)) { + out.writeMap(dataStreamIssues, StreamOutput::writeCollection); + } if (out.getTransportVersion().before(TransportVersions.V_7_11_0)) { out.writeCollection(pluginSettingsIssues.getOrDefault("ml_settings", Collections.emptyList())); } else { @@ -219,6 +232,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws .array("node_settings", nodeSettingsIssues.toArray()) .field("index_settings") .map(indexSettingsIssues) + .field("data_streams") + .map(dataStreamIssues) .mapContents(pluginSettingsIssues) .endObject(); } @@ -260,6 +275,7 @@ public static DeprecationInfoAction.Response from( Request request, NodesDeprecationCheckResponse nodeDeprecationResponse, List> indexSettingsChecks, + List> dataStreamChecks, List> clusterSettingsChecks, Map> pluginSettingIssues, List skipTheseDeprecatedSettings @@ -283,6 +299,19 @@ public static DeprecationInfoAction.Response from( } } + List dataStreamNames = indexNameExpressionResolver.dataStreamNames( + state, + IndicesOptions.LENIENT_EXPAND_OPEN_CLOSED_HIDDEN + ); + Map> dataStreamIssues = new HashMap<>(); + for (String dataStreamName : dataStreamNames) { + DataStream dataStream = stateWithSkippedSettingsRemoved.metadata().dataStreams().get(dataStreamName); + List issuesForSingleDataStream = filterChecks(dataStreamChecks, c -> c.apply(dataStream, state)); + if (issuesForSingleDataStream.isEmpty() == false) { + dataStreamIssues.put(dataStreamName, issuesForSingleDataStream); + } + } + // WORKAROUND: move transform deprecation issues into cluster_settings List transformDeprecations = pluginSettingIssues.remove( TransformDeprecationChecker.TRANSFORM_DEPRECATION_KEY @@ -291,7 +320,13 @@ public static DeprecationInfoAction.Response from( clusterSettingsIssues.addAll(transformDeprecations); } - return new DeprecationInfoAction.Response(clusterSettingsIssues, nodeSettingsIssues, indexSettingsIssues, pluginSettingIssues); + return new DeprecationInfoAction.Response( + clusterSettingsIssues, + nodeSettingsIssues, + indexSettingsIssues, + dataStreamIssues, + pluginSettingIssues + ); } } diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/TransportDeprecationInfoAction.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/TransportDeprecationInfoAction.java index 91e77762870b..683c29815399 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/TransportDeprecationInfoAction.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/TransportDeprecationInfoAction.java @@ -36,6 +36,7 @@ import java.util.stream.Collectors; import static org.elasticsearch.xpack.deprecation.DeprecationChecks.CLUSTER_SETTINGS_CHECKS; +import static org.elasticsearch.xpack.deprecation.DeprecationChecks.DATA_STREAM_CHECKS; import static org.elasticsearch.xpack.deprecation.DeprecationChecks.INDEX_SETTINGS_CHECKS; public class TransportDeprecationInfoAction extends TransportMasterNodeReadAction< @@ -134,6 +135,7 @@ protected final void masterOperation( request, response, INDEX_SETTINGS_CHECKS, + DATA_STREAM_CHECKS, CLUSTER_SETTINGS_CHECKS, deprecationIssues, skipTheseDeprecations diff --git a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/DataStreamDeprecationChecksTests.java b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/DataStreamDeprecationChecksTests.java new file mode 100644 index 000000000000..c391e852014f --- /dev/null +++ b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/DataStreamDeprecationChecksTests.java @@ -0,0 +1,124 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.deprecation; + +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.DataStreamOptions; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexMode; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.deprecation.DeprecationIssue; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static java.util.Collections.singletonList; +import static org.elasticsearch.xpack.deprecation.DeprecationChecks.DATA_STREAM_CHECKS; +import static org.hamcrest.Matchers.equalTo; + +public class DataStreamDeprecationChecksTests extends ESTestCase { + + public void testOldIndicesCheck() { + long oldIndexCount = randomIntBetween(1, 100); + long newIndexCount = randomIntBetween(1, 100); + long oldSearchableSnapshotCount = 0; + long oldFullyManagedSearchableSnapshotCount = 0; + long oldPartiallyManagedSearchableSnapshotCount = 0; + List allIndices = new ArrayList<>(); + Map nameToIndexMetadata = new HashMap<>(); + for (int i = 0; i < oldIndexCount; i++) { + Settings.Builder settingsBuilder = settings(IndexVersion.fromId(7170099)); + if (randomBoolean()) { + settingsBuilder.put("index.store.type", "snapshot"); + if (randomBoolean()) { + oldFullyManagedSearchableSnapshotCount++; + } else { + settingsBuilder.put("index.store.snapshot.partial", true); + oldPartiallyManagedSearchableSnapshotCount++; + } + oldSearchableSnapshotCount++; + } + IndexMetadata oldIndexMetadata = IndexMetadata.builder("old-data-stream-index-" + i) + .settings(settingsBuilder) + .numberOfShards(1) + .numberOfReplicas(0) + .build(); + allIndices.add(oldIndexMetadata.getIndex()); + nameToIndexMetadata.put(oldIndexMetadata.getIndex().getName(), oldIndexMetadata); + } + for (int i = 0; i < newIndexCount; i++) { + Settings.Builder settingsBuilder = settings(IndexVersion.current()); + if (randomBoolean()) { + settingsBuilder.put("index.store.type", "snapshot"); + } + IndexMetadata newIndexMetadata = IndexMetadata.builder("new-data-stream-index-" + i) + .settings(settingsBuilder) + .numberOfShards(1) + .numberOfReplicas(0) + .build(); + allIndices.add(newIndexMetadata.getIndex()); + nameToIndexMetadata.put(newIndexMetadata.getIndex().getName(), newIndexMetadata); + } + DataStream dataStream = new DataStream( + randomAlphaOfLength(10), + allIndices, + randomNonNegativeLong(), + Map.of(), + randomBoolean(), + false, + false, + randomBoolean(), + randomFrom(IndexMode.values()), + null, + randomFrom(DataStreamOptions.EMPTY, DataStreamOptions.FAILURE_STORE_DISABLED, DataStreamOptions.FAILURE_STORE_ENABLED, null), + List.of(), + randomBoolean(), + null + ); + Metadata metadata = Metadata.builder().indices(nameToIndexMetadata).build(); + ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).build(); + DeprecationIssue expected = new DeprecationIssue( + DeprecationIssue.Level.CRITICAL, + "Old data stream with a compatibility version < 8.0", + "https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking-changes-9.0.html", + "This data stream has backing indices that were created before Elasticsearch 8.0.0", + false, + Map.of( + "backing_indices", + Map.of( + "count", + oldIndexCount + newIndexCount, + "need_upgrading", + Map.of( + "count", + oldIndexCount, + "searchable_snapshots", + Map.of( + "count", + oldSearchableSnapshotCount, + "fully_mounted", + Map.of("count", oldFullyManagedSearchableSnapshotCount), + "partially_mounted", + Map.of("count", oldPartiallyManagedSearchableSnapshotCount) + ) + ) + ) + ) + ); + List issues = DeprecationChecks.filterChecks(DATA_STREAM_CHECKS, c -> c.apply(dataStream, clusterState)); + assertThat(issues, equalTo(singletonList(expected))); + } +} diff --git a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/DeprecationInfoActionResponseTests.java b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/DeprecationInfoActionResponseTests.java index 480ac2103fbf..5750daa8e367 100644 --- a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/DeprecationInfoActionResponseTests.java +++ b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/DeprecationInfoActionResponseTests.java @@ -9,6 +9,7 @@ import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.Metadata; @@ -36,7 +37,9 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.BiFunction; import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -63,6 +66,13 @@ protected DeprecationInfoAction.Response createTestInstance() { .collect(Collectors.toList()); indexIssues.put(randomAlphaOfLength(10), perIndexIssues); } + Map> dataStreamIssues = new HashMap<>(); + for (int i = 0; i < randomIntBetween(0, 10); i++) { + List perDataStreamIssues = Stream.generate(DeprecationInfoActionResponseTests::createTestDeprecationIssue) + .limit(randomIntBetween(0, 10)) + .collect(Collectors.toList()); + dataStreamIssues.put(randomAlphaOfLength(10), perDataStreamIssues); + } Map> pluginIssues = new HashMap<>(); for (int i = 0; i < randomIntBetween(0, 10); i++) { List perPluginIssues = Stream.generate(DeprecationInfoActionResponseTests::createTestDeprecationIssue) @@ -70,7 +80,7 @@ protected DeprecationInfoAction.Response createTestInstance() { .collect(Collectors.toList()); pluginIssues.put(randomAlphaOfLength(10), perPluginIssues); } - return new DeprecationInfoAction.Response(clusterIssues, nodeIssues, indexIssues, pluginIssues); + return new DeprecationInfoAction.Response(clusterIssues, nodeIssues, indexIssues, dataStreamIssues, pluginIssues); } @Override @@ -104,9 +114,13 @@ public void testFrom() throws IOException { boolean clusterIssueFound = randomBoolean(); boolean nodeIssueFound = randomBoolean(); boolean indexIssueFound = randomBoolean(); + boolean dataStreamIssueFound = randomBoolean(); DeprecationIssue foundIssue = createTestDeprecationIssue(); List> clusterSettingsChecks = List.of((s) -> clusterIssueFound ? foundIssue : null); List> indexSettingsChecks = List.of((idx) -> indexIssueFound ? foundIssue : null); + List> dataStreamChecks = List.of( + (ds, cs) -> dataStreamIssueFound ? foundIssue : null + ); NodesDeprecationCheckResponse nodeDeprecationIssues = new NodesDeprecationCheckResponse( new ClusterName(randomAlphaOfLength(5)), @@ -125,6 +139,7 @@ public void testFrom() throws IOException { request, nodeDeprecationIssues, indexSettingsChecks, + dataStreamChecks, clusterSettingsChecks, Collections.emptyMap(), Collections.emptyList() @@ -197,6 +212,7 @@ public void testFromWithMergeableNodeIssues() throws IOException { DeprecationIssue foundIssue2 = createTestDeprecationIssue(foundIssue1, metaMap2); List> clusterSettingsChecks = Collections.emptyList(); List> indexSettingsChecks = List.of((idx) -> null); + List> dataStreamChecks = List.of((ds, cs) -> null); NodesDeprecationCheckResponse nodeDeprecationIssues = new NodesDeprecationCheckResponse( new ClusterName(randomAlphaOfLength(5)), @@ -214,6 +230,7 @@ public void testFromWithMergeableNodeIssues() throws IOException { request, nodeDeprecationIssues, indexSettingsChecks, + dataStreamChecks, clusterSettingsChecks, Collections.emptyMap(), Collections.emptyList() @@ -239,8 +256,15 @@ public void testRemoveSkippedSettings() throws IOException { settingsBuilder.put("some.undeprecated.property", "someValue3"); settingsBuilder.putList("some.undeprecated.list.property", List.of("someValue4", "someValue5")); Settings inputSettings = settingsBuilder.build(); + IndexMetadata dataStreamIndexMetadata = IndexMetadata.builder("ds-test-index-1") + .settings(inputSettings) + .numberOfShards(1) + .numberOfReplicas(0) + .build(); Metadata metadata = Metadata.builder() .put(IndexMetadata.builder("test").settings(inputSettings).numberOfShards(1).numberOfReplicas(0)) + .put(dataStreamIndexMetadata, true) + .put(DataStream.builder("ds-test", List.of(dataStreamIndexMetadata.getIndex())).build()) .persistentSettings(inputSettings) .build(); @@ -256,6 +280,13 @@ public void testRemoveSkippedSettings() throws IOException { visibleIndexSettings.set(idx.getSettings()); return null; })); + AtomicInteger backingIndicesCount = new AtomicInteger(0); + List> dataStreamChecks = Collections.unmodifiableList( + Arrays.asList((ds, cs) -> { + backingIndicesCount.set(ds.getIndices().size()); + return null; + }) + ); NodesDeprecationCheckResponse nodeDeprecationIssues = new NodesDeprecationCheckResponse( new ClusterName(randomAlphaOfLength(5)), @@ -270,6 +301,7 @@ public void testRemoveSkippedSettings() throws IOException { request, nodeDeprecationIssues, indexSettingsChecks, + dataStreamChecks, clusterSettingsChecks, Collections.emptyMap(), List.of("some.deprecated.property", "some.other.*.deprecated.property") @@ -288,19 +320,30 @@ public void testRemoveSkippedSettings() throws IOException { Assert.assertTrue(resultIndexSettings.getAsList("some.undeprecated.list.property").equals(List.of("someValue4", "someValue5"))); Assert.assertFalse(resultIndexSettings.hasValue("some.deprecated.property")); Assert.assertFalse(resultIndexSettings.hasValue("some.other.bad.deprecated.property")); + + assertThat(backingIndicesCount.get(), equalTo(1)); } public void testCtorFailure() { Map> indexNames = Stream.generate(() -> randomAlphaOfLength(10)) .limit(10) .collect(Collectors.toMap(Function.identity(), (_k) -> Collections.emptyList())); + Map> dataStreamNames = Stream.generate(() -> randomAlphaOfLength(10)) + .limit(10) + .collect(Collectors.toMap(Function.identity(), (_k) -> Collections.emptyList())); Set shouldCauseFailure = new HashSet<>(RESERVED_NAMES); for (int i = 0; i < NUMBER_OF_TEST_RUNS; i++) { Map> pluginSettingsIssues = randomSubsetOf(3, shouldCauseFailure).stream() .collect(Collectors.toMap(Function.identity(), (_k) -> Collections.emptyList())); expectThrows( ElasticsearchStatusException.class, - () -> new DeprecationInfoAction.Response(Collections.emptyList(), Collections.emptyList(), indexNames, pluginSettingsIssues) + () -> new DeprecationInfoAction.Response( + Collections.emptyList(), + Collections.emptyList(), + indexNames, + dataStreamNames, + pluginSettingsIssues + ) ); } } From 7427eb97b6050af4234626240cd45f1ee3d2ce5e Mon Sep 17 00:00:00 2001 From: Joe Gallo Date: Mon, 11 Nov 2024 20:20:52 -0500 Subject: [PATCH 09/12] Document new ip_location processor (#116623) (#116630) --- docs/reference/ingest/processors.asciidoc | 6 +- .../ingest/processors/geoip.asciidoc | 28 +-- .../ingest/processors/ip-location.asciidoc | 225 ++++++++++++++++++ 3 files changed, 243 insertions(+), 16 deletions(-) create mode 100644 docs/reference/ingest/processors/ip-location.asciidoc diff --git a/docs/reference/ingest/processors.asciidoc b/docs/reference/ingest/processors.asciidoc index 8f7cef06d12a..f4fcc0fc84d0 100644 --- a/docs/reference/ingest/processors.asciidoc +++ b/docs/reference/ingest/processors.asciidoc @@ -77,7 +77,10 @@ Computes a hash of the document’s content. Converts geo-grid definitions of grid tiles or cells to regular bounding boxes or polygons which describe their shape. <>:: -Adds information about the geographical location of an IPv4 or IPv6 address. +Adds information about the geographical location of an IPv4 or IPv6 address from a Maxmind database. + +<>:: +Adds information about the geographical location of an IPv4 or IPv6 address from an ip geolocation database. <>:: Calculates the network direction given a source IP address, destination IP address, and a list of internal networks. @@ -245,6 +248,7 @@ include::processors/grok.asciidoc[] include::processors/gsub.asciidoc[] include::processors/html_strip.asciidoc[] include::processors/inference.asciidoc[] +include::processors/ip-location.asciidoc[] include::processors/join.asciidoc[] include::processors/json.asciidoc[] include::processors/kv.asciidoc[] diff --git a/docs/reference/ingest/processors/geoip.asciidoc b/docs/reference/ingest/processors/geoip.asciidoc index 2eff56f87e82..78ebe3f5b5ee 100644 --- a/docs/reference/ingest/processors/geoip.asciidoc +++ b/docs/reference/ingest/processors/geoip.asciidoc @@ -13,7 +13,7 @@ ASN IP geolocation databases from http://dev.maxmind.com/geoip/geoip2/geolite2/[ CC BY-SA 4.0 license. It automatically downloads these databases if your nodes can connect to `storage.googleapis.com` domain and either: * `ingest.geoip.downloader.eager.download` is set to true -* your cluster has at least one pipeline with a `geoip` processor +* your cluster has at least one pipeline with a `geoip` or `ip_location` processor {es} automatically downloads updates for these databases from the Elastic GeoIP endpoint: @@ -25,10 +25,10 @@ If your cluster can't connect to the Elastic GeoIP endpoint or you want to manage your own updates, see <>. If you would like to have {es} download database files directly from Maxmind using your own provided -license key, see <>. +license key, see <>. If {es} can't connect to the endpoint for 30 days all updated databases will become -invalid. {es} will stop enriching documents with geoip data and will add `tags: ["_geoip_expired_database"]` +invalid. {es} will stop enriching documents with ip geolocation data and will add `tags: ["_geoip_expired_database"]` field instead. [[using-ingest-geoip]] @@ -40,11 +40,11 @@ field instead. |====== | Name | Required | Default | Description | `field` | yes | - | The field to get the IP address from for the geographical lookup. -| `target_field` | no | geoip | The field that will hold the geographical information looked up from the MaxMind database. -| `database_file` | no | GeoLite2-City.mmdb | The database filename referring to one of the automatically downloaded GeoLite2 databases (GeoLite2-City.mmdb, GeoLite2-Country.mmdb, or GeoLite2-ASN.mmdb), or the name of a supported database file in the `ingest-geoip` config directory, or the name of a <> (with the `.mmdb` suffix appended). -| `properties` | no | [`continent_name`, `country_iso_code`, `country_name`, `region_iso_code`, `region_name`, `city_name`, `location`] * | Controls what properties are added to the `target_field` based on the geoip lookup. +| `target_field` | no | geoip | The field that will hold the geographical information looked up from the database. +| `database_file` | no | GeoLite2-City.mmdb | The database filename referring to one of the automatically downloaded GeoLite2 databases (GeoLite2-City.mmdb, GeoLite2-Country.mmdb, or GeoLite2-ASN.mmdb), or the name of a supported database file in the `ingest-geoip` config directory, or the name of a <> (with the `.mmdb` suffix appended). +| `properties` | no | [`continent_name`, `country_iso_code`, `country_name`, `region_iso_code`, `region_name`, `city_name`, `location`] * | Controls what properties are added to the `target_field` based on the ip geolocation lookup. | `ignore_missing` | no | `false` | If `true` and `field` does not exist, the processor quietly exits without modifying the document -| `first_only` | no | `true` | If `true` only first found geoip data will be returned, even if `field` contains array +| `first_only` | no | `true` | If `true` only first found ip geolocation data, will be returned, even if `field` contains array | `download_database_on_pipeline_creation` | no | `true` | If `true` (and if `ingest.geoip.downloader.eager.download` is `false`), the missing database is downloaded when the pipeline is created. Else, the download is triggered by when the pipeline is used as the `default_pipeline` or `final_pipeline` in an index. |====== @@ -79,15 +79,13 @@ depend on what has been found and which properties were configured in `propertie `residential_proxy`, `domain`, `isp`, `isp_organization_name`, `mobile_country_code`, `mobile_network_code`, `user_type`, and `connection_type`. The fields actually added depend on what has been found and which properties were configured in `properties`. -preview::["Do not use the GeoIP2 Anonymous IP, GeoIP2 Connection Type, GeoIP2 Domain, GeoIP2 ISP, and GeoIP2 Enterprise databases in production environments. This functionality is in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA of official GA features."] - Here is an example that uses the default city database and adds the geographical information to the `geoip` field based on the `ip` field: [source,console] -------------------------------------------------- PUT _ingest/pipeline/geoip { - "description" : "Add geoip info", + "description" : "Add ip geolocation info", "processors" : [ { "geoip" : { @@ -138,7 +136,7 @@ this database is downloaded automatically. So this: -------------------------------------------------- PUT _ingest/pipeline/geoip { - "description" : "Add geoip info", + "description" : "Add ip geolocation info", "processors" : [ { "geoip" : { @@ -190,7 +188,7 @@ cannot be found: -------------------------------------------------- PUT _ingest/pipeline/geoip { - "description" : "Add geoip info", + "description" : "Add ip geolocation info", "processors" : [ { "geoip" : { @@ -256,7 +254,7 @@ PUT my_ip_locations -------------------------------------------------- PUT _ingest/pipeline/geoip { - "description" : "Add geoip info", + "description" : "Add ip geolocation info", "processors" : [ { "geoip" : { @@ -429,7 +427,7 @@ The `geoip` processor supports the following setting: The maximum number of results that should be cached. Defaults to `1000`. -Note that these settings are node settings and apply to all `geoip` processors, i.e. there is one cache for all defined `geoip` processors. +Note that these settings are node settings and apply to all `geoip` and `ip_location` processors, i.e. there is a single cache for all such processors. [[geoip-cluster-settings]] ===== Cluster settings @@ -458,7 +456,7 @@ each node's <> at `$ES_TMPDIR/geoip-databases/IP Location +++++ + +The `ip_location` processor adds information about the geographical location of an +IPv4 or IPv6 address. + +[[ip-location-automatic-updates]] +By default, the processor uses the GeoLite2 City, GeoLite2 Country, and GeoLite2 +ASN IP geolocation databases from http://dev.maxmind.com/geoip/geoip2/geolite2/[MaxMind], shared under the +CC BY-SA 4.0 license. It automatically downloads these databases if your nodes can connect to `storage.googleapis.com` domain and either: + +* `ingest.geoip.downloader.eager.download` is set to true +* your cluster has at least one pipeline with a `geoip` or `ip_location` processor + +{es} automatically downloads updates for these databases from the Elastic GeoIP +endpoint: +https://geoip.elastic.co/v1/database?elastic_geoip_service_tos=agree[https://geoip.elastic.co/v1/database]. +To get download statistics for these updates, use the <>. + +If your cluster can't connect to the Elastic GeoIP endpoint or you want to +manage your own updates, see <>. + +If you would like to have {es} download database files directly from Maxmind using your own provided +license key, see <>. + +If {es} can't connect to the endpoint for 30 days all updated databases will become +invalid. {es} will stop enriching documents with ip geolocation data and will add `tags: ["_ip_location_expired_database"]` +field instead. + +[[using-ingest-ip-location]] +==== Using the `ip_location` Processor in a Pipeline + +[[ingest-ip-location-options]] +.`ip-location` options +[options="header"] +|====== +| Name | Required | Default | Description +| `field` | yes | - | The field to get the IP address from for the geographical lookup. +| `target_field` | no | ip_location | The field that will hold the geographical information looked up from the database. +| `database_file` | no | GeoLite2-City.mmdb | The database filename referring to one of the automatically downloaded GeoLite2 databases (GeoLite2-City.mmdb, GeoLite2-Country.mmdb, or GeoLite2-ASN.mmdb), or the name of a supported database file in the `ingest-geoip` config directory, or the name of a <> (with the `.mmdb` suffix appended). +| `properties` | no | [`continent_name`, `country_iso_code`, `country_name`, `region_iso_code`, `region_name`, `city_name`, `location`] * | Controls what properties are added to the `target_field` based on the ip geolocation lookup. +| `ignore_missing` | no | `false` | If `true` and `field` does not exist, the processor quietly exits without modifying the document +| `first_only` | no | `true` | If `true` only first found ip geolocation data, will be returned, even if `field` contains array +| `download_database_on_pipeline_creation` | no | `true` | If `true` (and if `ingest.geoip.downloader.eager.download` is `false`), the missing database is downloaded when the pipeline is created. Else, the download is triggered by when the pipeline is used as the `default_pipeline` or `final_pipeline` in an index. +|====== + +*Depends on what is available in `database_file`: + +* If a GeoLite2 City or GeoIP2 City database is used, then the following fields may be added under the `target_field`: `ip`, +`country_iso_code`, `country_name`, `country_in_european_union`, `registered_country_iso_code`, `registered_country_name`, `registered_country_in_european_union`, +`continent_code`, `continent_name`, `region_iso_code`, `region_name`, `city_name`, `postal_code`, `timezone`, +`location`, and `accuracy_radius`. The fields actually added depend on what has been found and which properties were configured in `properties`. +* If a GeoLite2 Country or GeoIP2 Country database is used, then the following fields may be added under the `target_field`: `ip`, +`country_iso_code`, `country_name`, `country_in_european_union`, `registered_country_iso_code`, `registered_country_name`, `registered_country_in_european_union`, +`continent_code`, and `continent_name`. The fields actually added depend on what has been found +and which properties were configured in `properties`. +* If the GeoLite2 ASN database is used, then the following fields may be added under the `target_field`: `ip`, +`asn`, `organization_name` and `network`. The fields actually added depend on what has been found and which properties were configured +in `properties`. +* If the GeoIP2 Anonymous IP database is used, then the following fields may be added under the `target_field`: `ip`, +`hosting_provider`, `tor_exit_node`, `anonymous_vpn`, `anonymous`, `public_proxy`, and `residential_proxy`. The fields actually added +depend on what has been found and which properties were configured in `properties`. +* If the GeoIP2 Connection Type database is used, then the following fields may be added under the `target_field`: `ip`, and +`connection_type`. The fields actually added depend on what has been found and which properties were configured in `properties`. +* If the GeoIP2 Domain database is used, then the following fields may be added under the `target_field`: `ip`, and `domain`. +The fields actually added depend on what has been found and which properties were configured in `properties`. +* If the GeoIP2 ISP database is used, then the following fields may be added under the `target_field`: `ip`, `asn`, +`organization_name`, `network`, `isp`, `isp_organization_name`, `mobile_country_code`, and `mobile_network_code`. The fields actually added +depend on what has been found and which properties were configured in `properties`. +* If the GeoIP2 Enterprise database is used, then the following fields may be added under the `target_field`: `ip`, +`country_iso_code`, `country_name`, `country_in_european_union`, `registered_country_iso_code`, `registered_country_name`, `registered_country_in_european_union`, +`continent_code`, `continent_name`, `region_iso_code`, `region_name`, `city_name`, `postal_code`, `timezone`, +`location`, `accuracy_radius`, `country_confidence`, `city_confidence`, `postal_confidence`, `asn`, `organization_name`, `network`, +`hosting_provider`, `tor_exit_node`, `anonymous_vpn`, `anonymous`, `public_proxy`, +`residential_proxy`, `domain`, `isp`, `isp_organization_name`, `mobile_country_code`, `mobile_network_code`, `user_type`, and +`connection_type`. The fields actually added depend on what has been found and which properties were configured in `properties`. + +Here is an example that uses the default city database and adds the geographical information to the `ip_location` field based on the `ip` field: + +[source,console] +-------------------------------------------------- +PUT _ingest/pipeline/ip_location +{ + "description" : "Add ip geolocation info", + "processors" : [ + { + "ip_location" : { + "field" : "ip" + } + } + ] +} +PUT my-index-000001/_doc/my_id?pipeline=ip_location +{ + "ip": "89.160.20.128" +} +GET my-index-000001/_doc/my_id +-------------------------------------------------- + +Which returns: + +[source,console-result] +-------------------------------------------------- +{ + "found": true, + "_index": "my-index-000001", + "_id": "my_id", + "_version": 1, + "_seq_no": 55, + "_primary_term": 1, + "_source": { + "ip": "89.160.20.128", + "ip_location": { + "continent_name": "Europe", + "country_name": "Sweden", + "country_iso_code": "SE", + "city_name" : "Linköping", + "region_iso_code" : "SE-E", + "region_name" : "Östergötland County", + "location": { "lat": 58.4167, "lon": 15.6167 } + } + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"_seq_no": \d+/"_seq_no" : $body._seq_no/ s/"_primary_term":1/"_primary_term" : $body._primary_term/] + +Here is an example that uses the default country database and adds the +geographical information to the `geo` field based on the `ip` field. Note that +this database is downloaded automatically. So this: + +[source,console] +-------------------------------------------------- +PUT _ingest/pipeline/ip_location +{ + "description" : "Add ip geolocation info", + "processors" : [ + { + "ip_location" : { + "field" : "ip", + "target_field" : "geo", + "database_file" : "GeoLite2-Country.mmdb" + } + } + ] +} +PUT my-index-000001/_doc/my_id?pipeline=ip_location +{ + "ip": "89.160.20.128" +} +GET my-index-000001/_doc/my_id +-------------------------------------------------- + +returns this: + +[source,console-result] +-------------------------------------------------- +{ + "found": true, + "_index": "my-index-000001", + "_id": "my_id", + "_version": 1, + "_seq_no": 65, + "_primary_term": 1, + "_source": { + "ip": "89.160.20.128", + "geo": { + "continent_name": "Europe", + "country_name": "Sweden", + "country_iso_code": "SE" + } + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"_seq_no": \d+/"_seq_no" : $body._seq_no/ s/"_primary_term" : 1/"_primary_term" : $body._primary_term/] + + +Not all IP addresses find geo information from the database, When this +occurs, no `target_field` is inserted into the document. + +Here is an example of what documents will be indexed as when information for "80.231.5.0" +cannot be found: + +[source,console] +-------------------------------------------------- +PUT _ingest/pipeline/ip_location +{ + "description" : "Add ip geolocation info", + "processors" : [ + { + "ip_location" : { + "field" : "ip" + } + } + ] +} + +PUT my-index-000001/_doc/my_id?pipeline=ip_location +{ + "ip": "80.231.5.0" +} + +GET my-index-000001/_doc/my_id +-------------------------------------------------- + +Which returns: + +[source,console-result] +-------------------------------------------------- +{ + "_index" : "my-index-000001", + "_id" : "my_id", + "_version" : 1, + "_seq_no" : 71, + "_primary_term": 1, + "found" : true, + "_source" : { + "ip" : "80.231.5.0" + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"_seq_no" : \d+/"_seq_no" : $body._seq_no/ s/"_primary_term" : 1/"_primary_term" : $body._primary_term/] From 8dc9b7b51b2341c9dfe74fa5a6b4d25f679ffe75 Mon Sep 17 00:00:00 2001 From: Fang Xing <155562079+fang-xing-esql@users.noreply.github.com> Date: Tue, 12 Nov 2024 00:17:50 -0500 Subject: [PATCH 10/12] [ES|QL] To_DatePeriod and To_TimeDuration return better error messages on union_type fields (#114934) (#116631) * better error messages with union_type fields (cherry picked from commit eb6d47f0f927a70aeba11a10a10c6527a63a8be1) --- docs/changelog/114934.yaml | 6 ++++ .../xpack/esql/analysis/Analyzer.java | 11 ++++++++ .../convert/FoldablesConvertFunction.java | 3 +- .../xpack/esql/analysis/VerifierTests.java | 28 +++++++++++++++++++ 4 files changed, 47 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/114934.yaml diff --git a/docs/changelog/114934.yaml b/docs/changelog/114934.yaml new file mode 100644 index 000000000000..68628993b1c8 --- /dev/null +++ b/docs/changelog/114934.yaml @@ -0,0 +1,6 @@ +pr: 114934 +summary: "[ES|QL] To_DatePeriod and To_TimeDuration return better error messages on\ + \ `union_type` fields" +area: ES|QL +type: bug +issues: [] diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java index 9039177e0643..9c173795d0ab 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java @@ -51,6 +51,7 @@ import org.elasticsearch.xpack.esql.expression.function.UnsupportedAttribute; import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.AbstractConvertFunction; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.FoldablesConvertFunction; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToDouble; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToInteger; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToLong; @@ -1226,6 +1227,16 @@ private Expression resolveConvertFunction(AbstractConvertFunction convert, List< if (convert.field() instanceof FieldAttribute fa && fa.field() instanceof InvalidMappedField imf) { HashMap typeResolutions = new HashMap<>(); Set supportedTypes = convert.supportedTypes(); + if (convert instanceof FoldablesConvertFunction fcf) { + // FoldablesConvertFunction does not accept fields as inputs, they only accept constants + String unresolvedMessage = "argument of [" + + fcf.sourceText() + + "] must be a constant, received [" + + Expressions.name(fa) + + "]"; + Expression ua = new UnresolvedAttribute(fa.source(), fa.name(), unresolvedMessage); + return fcf.replaceChildren(Collections.singletonList(ua)); + } imf.types().forEach(type -> { if (supportedTypes.contains(type.widenSmallNumeric())) { TypeResolutionKey key = new TypeResolutionKey(fa.name(), type); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/FoldablesConvertFunction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/FoldablesConvertFunction.java index 6e2b5bb63532..8f43a6481db0 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/FoldablesConvertFunction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/FoldablesConvertFunction.java @@ -59,7 +59,8 @@ protected final TypeResolution resolveType() { @Override protected final Map factories() { - // TODO if a union type field is provided as an input, the correct error message is not shown, #112668 is a follow up + // This is used by ResolveUnionTypes, which is expected to be applied to ES fields only + // FoldablesConvertFunction takes only constants as inputs, so this is empty return Map.of(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java index d6cda4a3a9ff..0a34d6cd848b 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java @@ -244,6 +244,34 @@ public void testUnsupportedAndMultiTypedFields() { + " [ip] in [test1, test2, test3] and [2] other indices, [keyword] in [test6]", error("from test* | where multi_typed is not null", analyzer) ); + + for (String functionName : List.of("to_timeduration", "to_dateperiod")) { + String lineNumber = functionName.equalsIgnoreCase("to_timeduration") ? "47" : "45"; + String errorType = functionName.equalsIgnoreCase("to_timeduration") ? "time_duration" : "date_period"; + assertEquals( + "1:" + lineNumber + ": Cannot use field [unsupported] with unsupported type [flattened]", + error("from test* | eval x = now() + " + functionName + "(unsupported)", analyzer) + ); + assertEquals( + "1:" + lineNumber + ": argument of [" + functionName + "(multi_typed)] must be a constant, received [multi_typed]", + error("from test* | eval x = now() + " + functionName + "(multi_typed)", analyzer) + ); + assertThat( + error("from test* | eval x = unsupported, y = now() + " + functionName + "(x)", analyzer), + containsString("1:23: Cannot use field [unsupported] with unsupported type [flattened]") + ); + assertThat( + error("from test* | eval x = multi_typed, y = now() + " + functionName + "(x)", analyzer), + containsString( + "1:48: argument of [" + + functionName + + "(x)] must be [" + + errorType + + " or string], " + + "found value [x] type [unsupported]" + ) + ); + } } public void testRoundFunctionInvalidInputs() { From 41e07cad23cd8537a962e76a99151f388c754185 Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Tue, 12 Nov 2024 08:49:45 +0100 Subject: [PATCH 11/12] Deduplicate non-empty InternalAggregation metadata when deserializing (#116589) (#116635) --- .../search/aggregations/InternalAggregation.java | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java b/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java index 38cab1761d40..b829afb0c23b 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java @@ -35,7 +35,6 @@ */ public abstract class InternalAggregation implements Aggregation, NamedWriteable { protected final String name; - protected final Map metadata; /** @@ -53,12 +52,14 @@ protected InternalAggregation(String name, Map metadata) { */ protected InternalAggregation(StreamInput in) throws IOException { final String name = in.readString(); + final Map metadata = in.readGenericMap(); if (in instanceof DelayableWriteable.Deduplicator d) { this.name = d.deduplicate(name); + this.metadata = metadata == null || metadata.isEmpty() ? metadata : d.deduplicate(metadata); } else { this.name = name; + this.metadata = metadata; } - metadata = in.readGenericMap(); } @Override From 803de168b851eca381fc7ae09bcb2ae0d275f79b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20Fred=C3=A9n?= <109296772+jfreden@users.noreply.github.com> Date: Tue, 12 Nov 2024 09:09:40 +0100 Subject: [PATCH 12/12] [DOCS] Remove tech preview from bulk create/update/delete roles (#116601) (#116639) Mark bulk create/update/delete roles GA in 9.0 and 8.17 (cherry picked from commit bfb30d2e72f9980a1f9d917ad6f1e3acf4bbff00) --- docs/reference/rest-api/security/bulk-create-roles.asciidoc | 1 - docs/reference/rest-api/security/bulk-delete-roles.asciidoc | 1 - 2 files changed, 2 deletions(-) diff --git a/docs/reference/rest-api/security/bulk-create-roles.asciidoc b/docs/reference/rest-api/security/bulk-create-roles.asciidoc index a198f4938390..560e8b74cdd2 100644 --- a/docs/reference/rest-api/security/bulk-create-roles.asciidoc +++ b/docs/reference/rest-api/security/bulk-create-roles.asciidoc @@ -1,7 +1,6 @@ [role="xpack"] [[security-api-bulk-put-role]] === Bulk create or update roles API -preview::[] ++++ Bulk create or update roles API ++++ diff --git a/docs/reference/rest-api/security/bulk-delete-roles.asciidoc b/docs/reference/rest-api/security/bulk-delete-roles.asciidoc index a782b5e37fcb..b9978c89bef3 100644 --- a/docs/reference/rest-api/security/bulk-delete-roles.asciidoc +++ b/docs/reference/rest-api/security/bulk-delete-roles.asciidoc @@ -1,7 +1,6 @@ [role="xpack"] [[security-api-bulk-delete-role]] === Bulk delete roles API -preview::[] ++++ Bulk delete roles API ++++