Skip to content

Commit

Permalink
Merge branch 'main' into enhancement/esql-match-disjunction-restrictions
Browse files Browse the repository at this point in the history
  • Loading branch information
carlosdelest committed Dec 17, 2024
2 parents 4968e9e + 312c21a commit 317f7d8
Show file tree
Hide file tree
Showing 32 changed files with 683 additions and 151 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -155,10 +155,8 @@ org.elasticsearch.cluster.ClusterState#compatibilityVersions()

@defaultMessage ClusterFeatures#nodeFeatures is for internal use only. Use FeatureService#clusterHasFeature to determine if a feature is present on the cluster.
org.elasticsearch.cluster.ClusterFeatures#nodeFeatures()
@defaultMessage ClusterFeatures#allNodeFeatures is for internal use only. Use FeatureService#clusterHasFeature to determine if a feature is present on the cluster.
org.elasticsearch.cluster.ClusterFeatures#allNodeFeatures()
@defaultMessage ClusterFeatures#clusterHasFeature is for internal use only. Use FeatureService#clusterHasFeature to determine if a feature is present on the cluster.
org.elasticsearch.cluster.ClusterFeatures#clusterHasFeature(org.elasticsearch.features.NodeFeature)
org.elasticsearch.cluster.ClusterFeatures#clusterHasFeature(org.elasticsearch.cluster.node.DiscoveryNodes, org.elasticsearch.features.NodeFeature)

@defaultMessage Do not construct this records outside the source files they are declared in
org.elasticsearch.cluster.SnapshotsInProgress$ShardSnapshotStatus#<init>(java.lang.String, org.elasticsearch.cluster.SnapshotsInProgress$ShardState, org.elasticsearch.repositories.ShardGeneration, java.lang.String, org.elasticsearch.repositories.ShardSnapshotResult)
Expand Down
1 change: 1 addition & 0 deletions docs/Versions.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ include::{docs-root}/shared/versions/stack/{source_branch}.asciidoc[]

:docker-repo: docker.elastic.co/elasticsearch/elasticsearch
:docker-image: {docker-repo}:{version}
:docker-wolfi-image: {docker-repo}-wolfi:{version}
:kib-docker-repo: docker.elastic.co/kibana/kibana
:kib-docker-image: {kib-docker-repo}:{version}
:plugin_url: https://artifacts.elastic.co/downloads/elasticsearch-plugins
Expand Down
5 changes: 5 additions & 0 deletions docs/changelog/118143.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 118143
summary: Infrastructure for assuming cluster features in the next major version
area: "Infra/Core"
type: feature
issues: []
6 changes: 6 additions & 0 deletions docs/reference/setup/install/docker.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,12 @@ docker pull {docker-image}
// REVIEWED[DEC.10.24]
--

Alternatevely, you can use the Wolfi based image. Using Wolfi based images requires Docker version 20.10.10 or superior.
[source,sh,subs="attributes"]
----
docker pull {docker-wolfi-image}
----

. Optional: Install
https://docs.sigstore.dev/cosign/system_config/installation/[Cosign] for your
environment. Then use Cosign to verify the {es} image's signature.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,6 @@
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.xcontent.LoggingDeprecationHandler;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.index.IndexVersion;
Expand Down Expand Up @@ -80,28 +79,19 @@
import java.util.Collections;
import java.util.List;
import java.util.Objects;
import java.util.function.BiConsumer;
import java.util.function.Supplier;

import static org.elasticsearch.search.SearchService.ALLOW_EXPENSIVE_QUERIES;
import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg;
import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg;

public class PercolateQueryBuilder extends AbstractQueryBuilder<PercolateQueryBuilder> {
private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(ParseField.class);
static final String DOCUMENT_TYPE_DEPRECATION_MESSAGE = "[types removal] Types are deprecated in [percolate] queries. "
+ "The [document_type] should no longer be specified.";
static final String TYPE_DEPRECATION_MESSAGE = "[types removal] Types are deprecated in [percolate] queries. "
+ "The [type] of the indexed document should no longer be specified.";

public static final String NAME = "percolate";

static final ParseField DOCUMENT_FIELD = new ParseField("document");
static final ParseField DOCUMENTS_FIELD = new ParseField("documents");
private static final ParseField NAME_FIELD = new ParseField("name");
private static final ParseField QUERY_FIELD = new ParseField("field");
private static final ParseField DOCUMENT_TYPE_FIELD = new ParseField("document_type");
private static final ParseField INDEXED_DOCUMENT_FIELD_TYPE = new ParseField("type");
private static final ParseField INDEXED_DOCUMENT_FIELD_INDEX = new ParseField("index");
private static final ParseField INDEXED_DOCUMENT_FIELD_ID = new ParseField("id");
private static final ParseField INDEXED_DOCUMENT_FIELD_ROUTING = new ParseField("routing");
Expand Down Expand Up @@ -368,10 +358,6 @@ protected void doXContent(XContentBuilder builder, Params params) throws IOExcep
);
}

private static BiConsumer<PercolateQueryBuilder, String> deprecateAndIgnoreType(String key, String message) {
return (target, type) -> deprecationLogger.compatibleCritical(key, message);
}

private static BytesReference parseDocument(XContentParser parser) throws IOException {
try (XContentBuilder builder = XContentFactory.jsonBuilder()) {
builder.copyCurrentStructure(parser);
Expand Down
6 changes: 3 additions & 3 deletions muted-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -156,9 +156,6 @@ tests:
issue: https://github.com/elastic/elasticsearch/issues/117473
- class: org.elasticsearch.repositories.s3.RepositoryS3EcsClientYamlTestSuiteIT
issue: https://github.com/elastic/elasticsearch/issues/117525
- class: org.elasticsearch.backwards.MixedClusterClientYamlTestSuiteIT
method: test {p0=synonyms/90_synonyms_reloading_for_synset/Reload analyzers for specific synonym set}
issue: https://github.com/elastic/elasticsearch/issues/116777
- class: "org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT"
method: "test {scoring.*}"
issue: https://github.com/elastic/elasticsearch/issues/117641
Expand Down Expand Up @@ -307,6 +304,9 @@ tests:
issue: https://github.com/elastic/elasticsearch/issues/118806
- class: org.elasticsearch.xpack.esql.session.IndexResolverFieldNamesTests
issue: https://github.com/elastic/elasticsearch/issues/118814
- class: org.elasticsearch.index.engine.RecoverySourcePruneMergePolicyTests
method: testPruneSome
issue: https://github.com/elastic/elasticsearch/issues/118728

# Examples:
#
Expand Down
1 change: 1 addition & 0 deletions rest-api-spec/build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -69,4 +69,5 @@ tasks.named("yamlRestCompatTestTransform").configure ({ task ->
task.skipTest("search/520_fetch_fields/fetch _seq_no via fields", "error code is changed from 5xx to 400 in 9.0")
task.skipTest("search.vectors/41_knn_search_bbq_hnsw/Test knn search", "Scoring has changed in latest versions")
task.skipTest("search.vectors/42_knn_search_bbq_flat/Test knn search", "Scoring has changed in latest versions")
task.skipTest("synonyms/90_synonyms_reloading_for_synset/Reload analyzers for specific synonym set", "Can't work until auto-expand replicas is 0-1 for synonyms index")
})
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
---
"Reload analyzers for specific synonym set":
setup:
- requires:
cluster_features: ["gte_v8.10.0"]
reason: Reloading analyzers for specific synonym set is introduced in 8.10.0

# Create synonyms_set1
- do:
synonyms.put_synonym:
Expand Down Expand Up @@ -100,21 +100,25 @@
- '{"index": {"_index": "my_index2", "_id": "2"}}'
- '{"my_field": "goodbye"}'

# An update of synonyms_set1 must trigger auto-reloading of analyzers only for synonyms_set1
---
"Reload analyzers for specific synonym set":
# These specific tests can't succeed in BwC, as synonyms auto-expand replicas are 0-all. Replicas can't be associated to
# upgraded nodes, and thus we are not able to guarantee that the shards are not failed.
# This test is skipped for BwC until synonyms index has auto-exapnd replicas set to 0-1.

- do:
synonyms.put_synonym:
id: synonyms_set1
body:
synonyms_set:
- synonyms: "hello, salute"
- synonyms: "ciao => goodbye"

- match: { result: "updated" }
- gt: { reload_analyzers_details._shards.total: 0 }
- gt: { reload_analyzers_details._shards.successful: 0 }
- match: { reload_analyzers_details._shards.failed: 0 }
- length: { reload_analyzers_details.reload_details: 1 } # reload details contain only a single index
- match: { reload_analyzers_details.reload_details.0.index: "my_index1" }
- match: { reload_analyzers_details.reload_details.0.reloaded_analyzers.0: "my_analyzer1" }


# Confirm that the index analyzers are reloaded for my_index1
- do:
Expand All @@ -127,6 +131,23 @@
query: salute
- match: { hits.total.value: 1 }

---
"Check analyzer reloaded and non failed shards for bwc tests":

- do:
synonyms.put_synonym:
id: synonyms_set1
body:
synonyms_set:
- synonyms: "hello, salute"
- synonyms: "ciao => goodbye"
- match: { result: "updated" }
- gt: { reload_analyzers_details._shards.total: 0 }
- gt: { reload_analyzers_details._shards.successful: 0 }
- length: { reload_analyzers_details.reload_details: 1 } # reload details contain only a single index
- match: { reload_analyzers_details.reload_details.0.index: "my_index1" }
- match: { reload_analyzers_details.reload_details.0.reloaded_analyzers.0: "my_analyzer1" }

# Confirm that the index analyzers are still the same for my_index2
- do:
search:
Expand Down
56 changes: 45 additions & 11 deletions server/src/main/java/org/elasticsearch/cluster/ClusterFeatures.java
Original file line number Diff line number Diff line change
Expand Up @@ -9,11 +9,12 @@

package org.elasticsearch.cluster;

import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.ChunkedToXContent;
import org.elasticsearch.common.xcontent.ChunkedToXContentObject;
import org.elasticsearch.core.SuppressForbidden;
import org.elasticsearch.features.NodeFeature;
import org.elasticsearch.xcontent.ToXContent;

Expand Down Expand Up @@ -79,28 +80,61 @@ public Map<String, Set<String>> nodeFeatures() {
return nodeFeatures;
}

/**
* The features in all nodes in the cluster.
* <p>
* NOTE: This should not be used directly.
* Please use {@link org.elasticsearch.features.FeatureService#clusterHasFeature} instead.
*/
public Set<String> allNodeFeatures() {
private Set<String> allNodeFeatures() {
if (allNodeFeatures == null) {
allNodeFeatures = Set.copyOf(calculateAllNodeFeatures(nodeFeatures.values()));
}
return allNodeFeatures;
}

/**
* Returns {@code true} if {@code node} can have assumed features.
* @see org.elasticsearch.env.BuildVersion#canRemoveAssumedFeatures
*/
public static boolean featuresCanBeAssumedForNode(DiscoveryNode node) {
return node.getBuildVersion().canRemoveAssumedFeatures();
}

/**
* Returns {@code true} if one or more nodes in {@code nodes} can have assumed features.
* @see org.elasticsearch.env.BuildVersion#canRemoveAssumedFeatures
*/
public static boolean featuresCanBeAssumedForNodes(DiscoveryNodes nodes) {
return nodes.getAllNodes().stream().anyMatch(n -> n.getBuildVersion().canRemoveAssumedFeatures());
}

/**
* {@code true} if {@code feature} is present on all nodes in the cluster.
* <p>
* NOTE: This should not be used directly.
* Please use {@link org.elasticsearch.features.FeatureService#clusterHasFeature} instead.
*/
@SuppressForbidden(reason = "directly reading cluster features")
public boolean clusterHasFeature(NodeFeature feature) {
return allNodeFeatures().contains(feature.id());
public boolean clusterHasFeature(DiscoveryNodes nodes, NodeFeature feature) {
assert nodes.getNodes().keySet().equals(nodeFeatures.keySet())
: "Cluster features nodes " + nodeFeatures.keySet() + " is different to discovery nodes " + nodes.getNodes().keySet();

// basic case
boolean allNodesHaveFeature = allNodeFeatures().contains(feature.id());
if (allNodesHaveFeature) {
return true;
}

// if the feature is assumed, check the versions more closely
// it's actually ok if the feature is assumed, and all nodes missing the feature can assume it
// TODO: do we need some kind of transient cache of this calculation?
if (feature.assumedAfterNextCompatibilityBoundary()) {
for (var nf : nodeFeatures.entrySet()) {
if (nf.getValue().contains(feature.id()) == false
&& featuresCanBeAssumedForNode(nodes.getNodes().get(nf.getKey())) == false) {
return false;
}
}

// all nodes missing the feature can assume it - so that's alright then
return true;
}

return false;
}

/**
Expand Down
Loading

0 comments on commit 317f7d8

Please sign in to comment.