Skip to content

Commit

Permalink
Merge branch 'main' into remove-scale-from-0-flag
Browse files Browse the repository at this point in the history
  • Loading branch information
davidkyle authored Nov 18, 2024
2 parents b7d6839 + d6cc86a commit 9dc2ac4
Show file tree
Hide file tree
Showing 76 changed files with 118 additions and 849 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ import org.elasticsearch.gradle.testclusters.TestDistribution
// Common config when running with a FIPS-140 runtime JVM
if (buildParams.inFipsJvm) {
allprojects {
String javaSecurityFilename = buildParams.runtimeJavaDetails.toLowerCase().contains('oracle') ? 'fips_java_oracle.security' : 'fips_java.security'
String javaSecurityFilename = buildParams.runtimeJavaDetails.get().toLowerCase().contains('oracle') ? 'fips_java_oracle.security' : 'fips_java.security'
File fipsResourcesDir = new File(project.buildDir, 'fips-resources')
File fipsSecurity = new File(fipsResourcesDir, javaSecurityFilename)
File fipsPolicy = new File(fipsResourcesDir, 'fips_java.policy')
Expand Down
5 changes: 5 additions & 0 deletions docs/changelog/116915.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 116915
summary: Improve message about insecure S3 settings
area: Snapshot/Restore
type: enhancement
issues: []
5 changes: 5 additions & 0 deletions docs/changelog/116931.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 116931
summary: Enable built-in Inference Endpoints and default for Semantic Text
area: "Machine Learning"
type: enhancement
issues: []
Original file line number Diff line number Diff line change
Expand Up @@ -9,30 +9,22 @@

package org.elasticsearch.datastreams;

import org.elasticsearch.Version;
import org.elasticsearch.action.admin.indices.rollover.LazyRolloverAction;
import org.elasticsearch.action.datastreams.autosharding.DataStreamAutoShardingService;
import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention;
import org.elasticsearch.datastreams.lifecycle.health.DataStreamLifecycleHealthInfoPublisher;
import org.elasticsearch.features.FeatureSpecification;
import org.elasticsearch.features.NodeFeature;

import java.util.Map;
import java.util.Set;

/**
* Provides the features for data streams that this version of the code supports
*/
public class DataStreamFeatures implements FeatureSpecification {

public static final NodeFeature DATA_STREAM_LIFECYCLE = new NodeFeature("data_stream.lifecycle");
public static final NodeFeature DATA_STREAM_FAILURE_STORE_TSDB_FIX = new NodeFeature("data_stream.failure_store.tsdb_fix");

@Override
public Map<NodeFeature, Version> getHistoricalFeatures() {
return Map.of(DATA_STREAM_LIFECYCLE, Version.V_8_11_0);
}

@Override
public Set<NodeFeature> getFeatures() {
return Set.of(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -318,8 +318,7 @@ class S3Repository extends MeteredBlobStoreRepository {
deprecationLogger.critical(
DeprecationCategory.SECURITY,
"s3_repository_secret_settings",
"Using s3 access/secret key from repository settings. Instead "
+ "store these in named clients and the elasticsearch keystore for secure settings."
INSECURE_CREDENTIALS_DEPRECATION_WARNING
);
}

Expand All @@ -336,6 +335,11 @@ class S3Repository extends MeteredBlobStoreRepository {
);
}

static final String INSECURE_CREDENTIALS_DEPRECATION_WARNING = Strings.format("""
This repository's settings include a S3 access key and secret key, but repository settings are stored in plaintext and must not be \
used for security-sensitive information. Instead, store all secure settings in the keystore. See [%s] for more information.\
""", ReferenceDocs.SECURE_SETTINGS);

private static Map<String, String> buildLocation(RepositoryMetadata metadata) {
return Map.of("base_path", BASE_PATH_SETTING.get(metadata.settings()), "bucket", BUCKET_SETTING.get(metadata.settings()));
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -107,10 +107,9 @@ public void testRepositoryCredentialsOverrideSecureCredentials() {
assertThat(credentials.getAWSSecretKey(), is("insecure_aws_secret"));

assertCriticalWarnings(
"[access_key] setting was deprecated in Elasticsearch and will be removed in a future release.",
"[secret_key] setting was deprecated in Elasticsearch and will be removed in a future release.",
"Using s3 access/secret key from repository settings. Instead store these in named clients and"
+ " the elasticsearch keystore for secure settings.",
"[access_key] setting was deprecated in Elasticsearch and will be removed in a future release."
S3Repository.INSECURE_CREDENTIALS_DEPRECATION_WARNING
);
}

Expand Down Expand Up @@ -194,10 +193,9 @@ public void testReinitSecureCredentials() {

if (hasInsecureSettings) {
assertCriticalWarnings(
"[access_key] setting was deprecated in Elasticsearch and will be removed in a future release.",
"[secret_key] setting was deprecated in Elasticsearch and will be removed in a future release.",
"Using s3 access/secret key from repository settings. Instead store these in named clients and"
+ " the elasticsearch keystore for secure settings.",
"[access_key] setting was deprecated in Elasticsearch and will be removed in a future release."
S3Repository.INSECURE_CREDENTIALS_DEPRECATION_WARNING
);
}
}
Expand Down Expand Up @@ -238,10 +236,7 @@ public void sendResponse(RestResponse response) {
throw error.get();
}

assertWarnings(
"Using s3 access/secret key from repository settings. Instead store these in named clients and"
+ " the elasticsearch keystore for secure settings."
);
assertWarnings(S3Repository.INSECURE_CREDENTIALS_DEPRECATION_WARNING);
}

private void createRepository(final String name, final Settings repositorySettings) {
Expand Down

This file was deleted.

1 change: 0 additions & 1 deletion server/src/main/java/module-info.java
Original file line number Diff line number Diff line change
Expand Up @@ -426,7 +426,6 @@
org.elasticsearch.cluster.service.TransportFeatures,
org.elasticsearch.cluster.metadata.MetadataFeatures,
org.elasticsearch.rest.RestFeatures,
org.elasticsearch.indices.IndicesFeatures,
org.elasticsearch.repositories.RepositoriesFeatures,
org.elasticsearch.action.admin.cluster.allocation.AllocationStatsFeatures,
org.elasticsearch.rest.action.admin.cluster.ClusterRerouteFeatures,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -853,7 +853,7 @@ public void initRestHandlers(Supplier<DiscoveryNodes> nodesInCluster, Predicate<
registerHandler.accept(new RestClusterStateAction(settingsFilter, threadPool));
registerHandler.accept(new RestClusterHealthAction());
registerHandler.accept(new RestClusterUpdateSettingsAction());
registerHandler.accept(new RestClusterGetSettingsAction(settings, clusterSettings, settingsFilter, clusterSupportsFeature));
registerHandler.accept(new RestClusterGetSettingsAction(settings, clusterSettings, settingsFilter));
registerHandler.accept(new RestClusterRerouteAction(settingsFilter));
registerHandler.accept(new RestClusterSearchShardsAction());
registerHandler.accept(new RestPendingClusterTasksAction());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,7 @@ public enum ReferenceDocs {
CIRCUIT_BREAKER_ERRORS,
ALLOCATION_EXPLAIN_NO_COPIES,
ALLOCATION_EXPLAIN_MAX_RETRY,
SECURE_SETTINGS,
// this comment keeps the ';' on the next line so every entry above has a trailing ',' which makes the diff for adding new links cleaner
;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -185,9 +185,7 @@ private InsecureStringSetting(String name) {
@Override
public SecureString get(Settings settings) {
if (ALLOW_INSECURE_SETTINGS == false && exists(settings)) {
throw new IllegalArgumentException(
"Setting [" + name + "] is insecure, " + "but property [allow_insecure_settings] is not set"
);
throw new IllegalArgumentException("Setting [" + name + "] is insecure, use the elasticsearch keystore instead");
}
return super.get(settings);
}
Expand Down
17 changes: 0 additions & 17 deletions server/src/main/java/org/elasticsearch/health/HealthFeatures.java
Original file line number Diff line number Diff line change
Expand Up @@ -9,34 +9,17 @@

package org.elasticsearch.health;

import org.elasticsearch.Version;
import org.elasticsearch.features.FeatureSpecification;
import org.elasticsearch.features.NodeFeature;

import java.util.Map;
import java.util.Set;

public class HealthFeatures implements FeatureSpecification {

public static final NodeFeature SUPPORTS_HEALTH = new NodeFeature("health.supports_health");
public static final NodeFeature SUPPORTS_HEALTH_REPORT_API = new NodeFeature("health.supports_health_report_api");
public static final NodeFeature SUPPORTS_SHARDS_CAPACITY_INDICATOR = new NodeFeature("health.shards_capacity_indicator");
public static final NodeFeature SUPPORTS_EXTENDED_REPOSITORY_INDICATOR = new NodeFeature("health.extended_repository_indicator");

@Override
public Set<NodeFeature> getFeatures() {
return Set.of(SUPPORTS_EXTENDED_REPOSITORY_INDICATOR);
}

@Override
public Map<NodeFeature, Version> getHistoricalFeatures() {
return Map.of(
SUPPORTS_HEALTH,
Version.V_8_5_0, // health accessible via /_internal/_health
SUPPORTS_HEALTH_REPORT_API,
Version.V_8_7_0, // health accessible via /_health_report
SUPPORTS_SHARDS_CAPACITY_INDICATOR,
Version.V_8_8_0
);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@
import org.elasticsearch.core.Tuple;
import org.elasticsearch.features.FeatureService;
import org.elasticsearch.gateway.GatewayService;
import org.elasticsearch.health.HealthFeatures;

import java.util.List;
import java.util.stream.Stream;
Expand Down Expand Up @@ -137,7 +136,7 @@ private void updateOnHealthNodeEnabledChange(boolean enabled) {

private boolean canPostClusterStateUpdates(ClusterState state) {
// Wait until every node in the cluster supports health checks
return isMaster && state.clusterRecovered() && featureService.clusterHasFeature(state, HealthFeatures.SUPPORTS_HEALTH);
return isMaster && state.clusterRecovered();
}

private void updateOnClusterStateChange(ClusterChangedEvent event) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@
import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.features.FeatureService;
import org.elasticsearch.health.Diagnosis;
import org.elasticsearch.health.HealthFeatures;
import org.elasticsearch.health.HealthIndicatorDetails;
import org.elasticsearch.health.HealthIndicatorImpact;
import org.elasticsearch.health.HealthIndicatorResult;
Expand Down Expand Up @@ -91,15 +90,6 @@ public HealthIndicatorResult calculate(boolean verbose, int maxAffectedResources
ClusterState clusterState = clusterService.state();
Map<String, DiskHealthInfo> diskHealthInfoMap = healthInfo.diskInfoByNode();
if (diskHealthInfoMap == null || diskHealthInfoMap.isEmpty()) {
if (featureService.clusterHasFeature(clusterState, HealthFeatures.SUPPORTS_HEALTH) == false) {
return createIndicator(
HealthStatus.GREEN,
"No disk usage data available. The cluster currently has mixed versions (an upgrade may be in progress).",
HealthIndicatorDetails.EMPTY,
List.of(),
List.of()
);
}
/*
* If there is no disk health info, that either means that a new health node was just elected, or something is seriously
* wrong with health data collection on the health node. Either way, we immediately return UNKNOWN. If there are at least
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@
import org.elasticsearch.common.util.concurrent.RunOnce;
import org.elasticsearch.core.TimeValue;
import org.elasticsearch.features.FeatureService;
import org.elasticsearch.health.HealthFeatures;
import org.elasticsearch.health.metadata.HealthMetadata;
import org.elasticsearch.health.node.action.HealthNodeNotDiscoveredException;
import org.elasticsearch.health.node.selection.HealthNode;
Expand Down Expand Up @@ -200,7 +199,6 @@ public void clusterChanged(ClusterChangedEvent event) {
}
}
prerequisitesFulfilled = event.state().clusterRecovered()
&& featureService.clusterHasFeature(event.state(), HealthFeatures.SUPPORTS_HEALTH)
&& HealthMetadata.getFromClusterState(event.state()) != null
&& currentHealthNode != null
&& currentMasterNode != null;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.features.FeatureService;
import org.elasticsearch.health.Diagnosis;
import org.elasticsearch.health.HealthFeatures;
import org.elasticsearch.health.HealthIndicatorDetails;
import org.elasticsearch.health.HealthIndicatorImpact;
import org.elasticsearch.health.HealthIndicatorResult;
Expand Down Expand Up @@ -111,15 +110,6 @@ public HealthIndicatorResult calculate(boolean verbose, int maxAffectedResources
var state = clusterService.state();
var healthMetadata = HealthMetadata.getFromClusterState(state);
if (healthMetadata == null || healthMetadata.getShardLimitsMetadata() == null) {
if (featureService.clusterHasFeature(state, HealthFeatures.SUPPORTS_SHARDS_CAPACITY_INDICATOR) == false) {
return createIndicator(
HealthStatus.GREEN,
"No shard limits configured yet. The cluster currently has mixed versions (an upgrade may be in progress).",
HealthIndicatorDetails.EMPTY,
List.of(),
List.of()
);
}
return unknownIndicator();
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.features.FeatureService;
import org.elasticsearch.health.HealthFeatures;
import org.elasticsearch.node.NodeClosedException;
import org.elasticsearch.persistent.AllocatedPersistentTask;
import org.elasticsearch.persistent.PersistentTaskParams;
Expand Down Expand Up @@ -157,11 +156,8 @@ public PersistentTasksCustomMetadata.Assignment getAssignment(

// visible for testing
void startTask(ClusterChangedEvent event) {
// Wait until every node in the cluster supports health checks
if (event.localNodeMaster()
&& event.state().clusterRecovered()
&& HealthNode.findTask(event.state()) == null
&& featureService.clusterHasFeature(event.state(), HealthFeatures.SUPPORTS_HEALTH)) {
// Wait until master is stable before starting health task
if (event.localNodeMaster() && event.state().clusterRecovered() && HealthNode.findTask(event.state()) == null) {
persistentTasksService.sendStartRequest(
TASK_NAME,
TASK_NAME,
Expand Down

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@
import org.elasticsearch.action.ResolvedIndices;
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
import org.elasticsearch.action.admin.indices.mapping.put.TransportAutoPutMappingAction;
import org.elasticsearch.action.admin.indices.mapping.put.TransportPutMappingAction;
import org.elasticsearch.action.admin.indices.stats.CommonStats;
import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags;
import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags.Flag;
Expand Down Expand Up @@ -79,7 +78,6 @@
import org.elasticsearch.env.ShardLock;
import org.elasticsearch.env.ShardLockObtainFailedException;
import org.elasticsearch.features.FeatureService;
import org.elasticsearch.features.NodeFeature;
import org.elasticsearch.gateway.MetaStateService;
import org.elasticsearch.gateway.MetadataStateFormat;
import org.elasticsearch.index.CloseUtils;
Expand Down Expand Up @@ -211,8 +209,6 @@ public class IndicesService extends AbstractLifecycleComponent
Setting.Property.NodeScope
);

static final NodeFeature SUPPORTS_AUTO_PUT = new NodeFeature("indices.auto_put_supported");

/**
* The node's settings.
*/
Expand Down Expand Up @@ -910,9 +906,7 @@ public void createShard(
.setConcreteIndex(shardRouting.index())
.source(mapping.source().string(), XContentType.JSON);
client.execute(
featureService.clusterHasFeature(clusterService.state(), SUPPORTS_AUTO_PUT)
? TransportAutoPutMappingAction.TYPE
: TransportPutMappingAction.TYPE,
TransportAutoPutMappingAction.TYPE,
putMappingRequestAcknowledgedRequest.ackTimeout(TimeValue.MAX_VALUE).masterNodeTimeout(TimeValue.MAX_VALUE),
new RefCountAwareThreadedActionListener<>(threadPool.generic(), listener.map(ignored -> null))
);
Expand Down
Loading

0 comments on commit 9dc2ac4

Please sign in to comment.