Skip to content

Commit

Permalink
Merge remote-tracking branch 'upstream/main' into docs_v9
Browse files Browse the repository at this point in the history
  • Loading branch information
jakelandis committed Oct 23, 2024
2 parents a1b8f5d + d8bcbb6 commit ae4d210
Show file tree
Hide file tree
Showing 112 changed files with 1,794 additions and 1,682 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream;
import org.apache.commons.compress.compressors.bzip2.BZip2CompressorOutputStream;
import org.apache.commons.io.IOUtils;
import org.elasticsearch.gradle.OS;
import org.elasticsearch.gradle.util.GradleUtils;
import org.gradle.api.Plugin;
import org.gradle.api.Project;
Expand Down Expand Up @@ -61,7 +62,7 @@ public void apply(Project target) {
? System.getenv("BUILD_NUMBER")
: System.getenv("BUILDKITE_BUILD_NUMBER");
String performanceTest = System.getenv("BUILD_PERFORMANCE_TEST");
if (buildNumber != null && performanceTest == null && GradleUtils.isIncludedBuild(target) == false) {
if (buildNumber != null && performanceTest == null && GradleUtils.isIncludedBuild(target) == false && OS.current() != OS.WINDOWS) {
File targetFile = calculateTargetFile(target, buildNumber);
File projectDir = target.getProjectDir();
File gradleWorkersDir = new File(target.getGradle().getGradleUserHomeDir(), "workers/");
Expand Down
6 changes: 6 additions & 0 deletions docs/changelog/115041.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
pr: 115041
summary: Increase default `queue_capacity` to 10_000 and decrease max `queue_capacity`
to 100_000
area: Machine Learning
type: enhancement
issues: []
6 changes: 6 additions & 0 deletions docs/changelog/115241.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
pr: 115241
summary: "[Security Solution] Add `create_index` to `kibana_system` role for index/DS\
\ `.logs-endpoint.action.responses-*`"
area: Authorization
type: enhancement
issues: []
6 changes: 6 additions & 0 deletions docs/changelog/115308.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
pr: 115308
summary: "ESQL: Disable pushdown of WHERE past STATS"
area: ES|QL
type: bug
issues:
- 115281
6 changes: 6 additions & 0 deletions docs/changelog/115312.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
pr: 115312
summary: "ESQL: Fix filtered grouping on ords"
area: ES|QL
type: bug
issues:
- 114897
9 changes: 9 additions & 0 deletions docs/changelog/115414.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
pr: 115414
summary: Mitigate IOSession timeouts
area: Machine Learning
type: bug
issues:
- 114385
- 114327
- 114105
- 114232
62 changes: 40 additions & 22 deletions docs/reference/index.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -6,41 +6,45 @@ include::links.asciidoc[]

include::landing-page.asciidoc[]

include::release-notes/highlights.asciidoc[]
// overview / install

include::intro.asciidoc[]

include::quickstart/index.asciidoc[]

include::setup.asciidoc[]

include::upgrade.asciidoc[]
// search solution

include::index-modules.asciidoc[]
include::search/search-your-data/search-your-data.asciidoc[]

include::mapping.asciidoc[]
include::reranking/index.asciidoc[]

include::analysis.asciidoc[]
// data management

include::index-modules.asciidoc[]

include::indices/index-templates.asciidoc[]

include::data-streams/data-streams.asciidoc[]
include::alias.asciidoc[]

include::ingest.asciidoc[]
include::mapping.asciidoc[]

include::alias.asciidoc[]
include::analysis.asciidoc[]

include::search/search-your-data/search-your-data.asciidoc[]
include::ingest.asciidoc[]

include::reranking/index.asciidoc[]
include::connector/docs/index.asciidoc[]

include::query-dsl.asciidoc[]
include::data-streams/data-streams.asciidoc[]

include::aggregations.asciidoc[]
include::data-management.asciidoc[]

include::geospatial-analysis.asciidoc[]
include::data-rollup-transform.asciidoc[]

include::connector/docs/index.asciidoc[]
// analysis tools

include::query-dsl.asciidoc[]

include::eql/eql.asciidoc[]

Expand All @@ -50,34 +54,48 @@ include::sql/index.asciidoc[]

include::scripting.asciidoc[]

include::data-management.asciidoc[]
include::aggregations.asciidoc[]

include::autoscaling/index.asciidoc[]
include::geospatial-analysis.asciidoc[]

include::watcher/index.asciidoc[]

// cluster management

include::monitoring/index.asciidoc[]

include::data-rollup-transform.asciidoc[]
include::security/index.asciidoc[]

// production tasks

include::high-availability.asciidoc[]

include::how-to.asciidoc[]

include::autoscaling/index.asciidoc[]

include::snapshot-restore/index.asciidoc[]

include::security/index.asciidoc[]
// reference

include::watcher/index.asciidoc[]
include::rest-api/index.asciidoc[]

include::commands/index.asciidoc[]

include::how-to.asciidoc[]

include::troubleshooting.asciidoc[]

include::rest-api/index.asciidoc[]
// upgrades

include::upgrade.asciidoc[]

include::migration/index.asciidoc[]

include::release-notes/highlights.asciidoc[]

include::release-notes.asciidoc[]

include::dependencies-versions.asciidoc[]

// etc

include::redirects.asciidoc[]
3 changes: 2 additions & 1 deletion docs/reference/release-notes/highlights.asciidoc
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
[chapter]
[[release-highlights]]
== What's new in {minor-version}
= What's new in {minor-version}

coming::[{minor-version}]

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -309,14 +309,14 @@ private static boolean hasAtLeastOneGeoipProcessor(Map<String, Object> processor
{
final Map<String, Object> processorConfig = (Map<String, Object>) processor.get(GEOIP_TYPE);
if (processorConfig != null) {
return downloadDatabaseOnPipelineCreation(GEOIP_TYPE, processorConfig, null) == downloadDatabaseOnPipelineCreation;
return downloadDatabaseOnPipelineCreation(processorConfig) == downloadDatabaseOnPipelineCreation;
}
}

{
final Map<String, Object> processorConfig = (Map<String, Object>) processor.get(IP_LOCATION_TYPE);
if (processorConfig != null) {
return downloadDatabaseOnPipelineCreation(IP_LOCATION_TYPE, processorConfig, null) == downloadDatabaseOnPipelineCreation;
return downloadDatabaseOnPipelineCreation(processorConfig) == downloadDatabaseOnPipelineCreation;
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -238,9 +238,8 @@ public Processor create(
boolean ignoreMissing = readBooleanProperty(type, processorTag, config, "ignore_missing", false);
boolean firstOnly = readBooleanProperty(type, processorTag, config, "first_only", true);

// Validating the download_database_on_pipeline_creation even if the result
// is not used directly by the factory.
downloadDatabaseOnPipelineCreation(type, config, processorTag);
// validate (and consume) the download_database_on_pipeline_creation property even though the result is not used by the factory
readBooleanProperty(type, processorTag, config, "download_database_on_pipeline_creation", true);

// noop, should be removed in 9.0
Object value = config.remove("fallback_to_default_databases");
Expand Down Expand Up @@ -319,8 +318,15 @@ public Processor create(
);
}

public static boolean downloadDatabaseOnPipelineCreation(String type, Map<String, Object> config, String processorTag) {
return readBooleanProperty(type, processorTag, config, "download_database_on_pipeline_creation", true);
/**
* Get the value of the "download_database_on_pipeline_creation" property from a processor's config map.
* <p>
* As with the actual property definition, the default value of the property is 'true'. Unlike the actual
* property definition, this method doesn't consume (that is, <code>config.remove</code>) the property from
* the config map.
*/
public static boolean downloadDatabaseOnPipelineCreation(Map<String, Object> config) {
return (boolean) config.getOrDefault("download_database_on_pipeline_creation", true);
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -364,8 +364,7 @@ public BlockLoader blockLoader(BlockLoaderContext blContext) {
SourceValueFetcher fetcher = SourceValueFetcher.toString(blContext.sourcePaths(name()));
// MatchOnlyText never has norms, so we have to use the field names field
BlockSourceReader.LeafIteratorLookup lookup = BlockSourceReader.lookupFromFieldNames(blContext.fieldNames(), name());
var sourceMode = blContext.indexSettings().getIndexMappingSourceMode();
return new BlockSourceReader.BytesRefsBlockLoader(fetcher, lookup, sourceMode);
return new BlockSourceReader.BytesRefsBlockLoader(fetcher, lookup);
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -319,8 +319,7 @@ public BlockLoader blockLoader(BlockLoaderContext blContext) {
BlockSourceReader.LeafIteratorLookup lookup = isStored() || isIndexed()
? BlockSourceReader.lookupFromFieldNames(blContext.fieldNames(), name())
: BlockSourceReader.lookupMatchingAll();
var sourceMode = blContext.indexSettings().getIndexMappingSourceMode();
return new BlockSourceReader.DoublesBlockLoader(valueFetcher, lookup, sourceMode);
return new BlockSourceReader.DoublesBlockLoader(valueFetcher, lookup);
}

@Override
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,139 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the "Elastic License
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
* Public License v 1"; you may not use this file except in compliance with, at
* your election, the "Elastic License 2.0", the "GNU Affero General Public
* License v3.0 only", or the "Server Side Public License, v 1".
*/

package org.elasticsearch.index.reindex;

import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.core.TimeValue;
import org.elasticsearch.node.ShutdownPrepareService;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.reindex.ReindexPlugin;
import org.elasticsearch.tasks.TaskInfo;
import org.elasticsearch.tasks.TaskManager;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.transport.TransportService;

import java.util.Arrays;
import java.util.Collection;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import java.util.stream.IntStream;

import static org.elasticsearch.node.ShutdownPrepareService.MAXIMUM_REINDEXING_TIMEOUT_SETTING;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;

/**
* Test that a wait added during shutdown is necessary for a large reindexing task to complete.
* The test works as follows:
* 1. Start a large (reasonably long running) reindexing request on the coordinator-only node.
* 2. Check that the reindexing task appears on the coordinating node
* 3. With a 10s timeout value for MAXIMUM_REINDEXING_TIMEOUT_SETTING,
* wait for the reindexing task to complete before closing the node
* 4. Confirm that the reindexing task succeeds with the wait (it will fail without it)
*/
@ESIntegTestCase.ClusterScope(numDataNodes = 0, numClientNodes = 0, scope = ESIntegTestCase.Scope.TEST)
public class ReindexNodeShutdownIT extends ESIntegTestCase {

protected static final String INDEX = "reindex-shutdown-index";
protected static final String DEST_INDEX = "dest-index";

@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return Arrays.asList(ReindexPlugin.class);
}

protected ReindexRequestBuilder reindex(String nodeName) {
return new ReindexRequestBuilder(internalCluster().client(nodeName));
}

public void testReindexWithShutdown() throws Exception {
final String masterNodeName = internalCluster().startMasterOnlyNode();
final String dataNodeName = internalCluster().startDataOnlyNode();

final Settings COORD_SETTINGS = Settings.builder()
.put(MAXIMUM_REINDEXING_TIMEOUT_SETTING.getKey(), TimeValue.timeValueSeconds(10))
.build();
final String coordNodeName = internalCluster().startCoordinatingOnlyNode(Settings.EMPTY);

ensureStableCluster(3);

int numDocs = 20000;
createIndex(numDocs);
createReindexTaskAndShutdown(coordNodeName);
checkDestinationIndex(dataNodeName, numDocs);
}

private void createIndex(int numDocs) {
// INDEX will be created on the dataNode
createIndex(INDEX);

logger.debug("setting up [{}] docs", numDocs);
indexRandom(
true,
false,
true,
IntStream.range(0, numDocs)
.mapToObj(i -> prepareIndex(INDEX).setId(String.valueOf(i)).setSource("n", i))
.collect(Collectors.toList())
);

// Checks that the all documents have been indexed and correctly counted
assertHitCount(prepareSearch(INDEX).setSize(0).setTrackTotalHits(true), numDocs);
}

private void createReindexTaskAndShutdown(final String coordNodeName) throws Exception {
AbstractBulkByScrollRequestBuilder<?, ?> builder = reindex(coordNodeName).source(INDEX).destination(DEST_INDEX);
AbstractBulkByScrollRequest<?> reindexRequest = builder.request();
ShutdownPrepareService shutdownPrepareService = internalCluster().getInstance(ShutdownPrepareService.class, coordNodeName);

TaskManager taskManager = internalCluster().getInstance(TransportService.class, coordNodeName).getTaskManager();

// Now execute the reindex action...
ActionListener<BulkByScrollResponse> reindexListener = new ActionListener<BulkByScrollResponse>() {
@Override
public void onResponse(BulkByScrollResponse bulkByScrollResponse) {
assertNull(bulkByScrollResponse.getReasonCancelled());
logger.debug(bulkByScrollResponse.toString());
}

@Override
public void onFailure(Exception e) {
logger.debug("Encounterd " + e.toString());
fail(e, "Encounterd " + e.toString());
}
};
internalCluster().client(coordNodeName).execute(ReindexAction.INSTANCE, reindexRequest, reindexListener);

// Check for reindex task to appear in the tasks list and Immediately stop coordinating node
waitForTask(ReindexAction.INSTANCE.name(), coordNodeName);
shutdownPrepareService.prepareForShutdown(taskManager);
internalCluster().stopNode(coordNodeName);
}

// Make sure all documents from the source index have been reindexed into the destination index
private void checkDestinationIndex(String dataNodeName, int numDocs) throws Exception {
assertTrue(indexExists(DEST_INDEX));
flushAndRefresh(DEST_INDEX);
assertBusy(() -> { assertHitCount(prepareSearch(DEST_INDEX).setSize(0).setTrackTotalHits(true), numDocs); });
}

private static void waitForTask(String actionName, String nodeName) throws Exception {
assertBusy(() -> {
ListTasksResponse tasks = clusterAdmin().prepareListTasks(nodeName).setActions(actionName).setDetailed(true).get();
tasks.rethrowFailures("Find my task");
for (TaskInfo taskInfo : tasks.getTasks()) {
// Skip tasks with a parent because those are children of the task we want
if (taskInfo.parentTaskId().isSet() == false) return;
}
fail("Couldn't find task after waiting, tasks=" + tasks.getTasks());
}, 10, TimeUnit.SECONDS);
}
}
Loading

0 comments on commit ae4d210

Please sign in to comment.