diff --git a/.buildkite/packer_cache.sh b/.buildkite/packer_cache.sh new file mode 100755 index 0000000000000..752914ba55c23 --- /dev/null +++ b/.buildkite/packer_cache.sh @@ -0,0 +1,34 @@ +#!/bin/bash + +ROOT_DIR=$(cd "$(dirname "$0")/.." && pwd) + +branches=($(cat "$ROOT_DIR/branches.json" | jq -r '.branches[].branch')) +for branch in "${branches[@]}"; do + echo "Resolving dependencies for ${branch} branch" + rm -rf "checkout/$branch" + git clone /opt/git-mirrors/elastic-elasticsearch --branch "$branch" --single-branch "checkout/$branch" + + CHECKOUT_DIR=$(cd "./checkout/${branch}" && pwd) + CI_DIR="$CHECKOUT_DIR/.ci" + + if [ "$(uname -m)" = "arm64" ] || [ "$(uname -m)" = "aarch64" ]; then + ## On ARM we use a different properties file for setting java home + ## Also, we don't bother attempting to resolve dependencies for the 6.8 branch + source "$CI_DIR/java-versions-aarch64.properties" + export JAVA16_HOME="$HOME/.java/jdk16" + else + source "$CI_DIR/java-versions.properties" + ## We are caching BWC versions too, need these so we can build those + export JAVA8_HOME="$HOME/.java/java8" + export JAVA11_HOME="$HOME/.java/java11" + export JAVA12_HOME="$HOME/.java/openjdk12" + export JAVA13_HOME="$HOME/.java/openjdk13" + export JAVA14_HOME="$HOME/.java/openjdk14" + export JAVA15_HOME="$HOME/.java/openjdk15" + export JAVA16_HOME="$HOME/.java/openjdk16" + fi + + export JAVA_HOME="$HOME/.java/$ES_BUILD_JAVA" + "checkout/${branch}/gradlew" --project-dir "$CHECKOUT_DIR" --parallel -s resolveAllDependencies -Dorg.gradle.warning.mode=none -DisCI + rm -rf "checkout/${branch}" +done diff --git a/.buildkite/pipelines/intake.yml b/.buildkite/pipelines/intake.yml index a200e871ec8e6..3271007a00077 100644 --- a/.buildkite/pipelines/intake.yml +++ b/.buildkite/pipelines/intake.yml @@ -48,7 +48,7 @@ steps: timeout_in_minutes: 300 matrix: setup: - BWC_VERSION: ["7.17.16", "8.11.3", "8.12.0", "8.13.0"] + BWC_VERSION: ["7.17.17", "8.11.4", "8.12.0", "8.13.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.buildkite/pipelines/periodic-packaging.yml b/.buildkite/pipelines/periodic-packaging.yml index d397039128457..66eb1fc79e3ca 100644 --- a/.buildkite/pipelines/periodic-packaging.yml +++ b/.buildkite/pipelines/periodic-packaging.yml @@ -1089,6 +1089,22 @@ steps: env: BWC_VERSION: 7.17.16 + - label: "{{matrix.image}} / 7.17.17 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.17.17 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.17 + - label: "{{matrix.image}} / 8.0.0 / packaging-tests-upgrade" command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.0.0 timeout_in_minutes: 300 @@ -1761,6 +1777,22 @@ steps: env: BWC_VERSION: 8.11.3 + - label: "{{matrix.image}} / 8.11.4 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.11.4 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.11.4 + - label: "{{matrix.image}} / 8.12.0 / packaging-tests-upgrade" command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.12.0 timeout_in_minutes: 300 diff --git a/.buildkite/pipelines/periodic-platform-support.yml b/.buildkite/pipelines/periodic-platform-support.yml index b52f8506885c9..faf904f2f8b04 100644 --- a/.buildkite/pipelines/periodic-platform-support.yml +++ b/.buildkite/pipelines/periodic-platform-support.yml @@ -80,3 +80,19 @@ steps: diskName: /dev/sda1 env: GRADLE_TASK: "{{matrix.GRADLE_TASK}}" + - group: platform-support-unix-aws + steps: + - label: "{{matrix.image}} / platform-support-aws" + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true functionalTests + timeout_in_minutes: 420 + matrix: + setup: + image: + - amazonlinux-2023 + agents: + provider: aws + imagePrefix: elasticsearch-{{matrix.image}} + instanceType: m6a.8xlarge + diskSizeGb: 350 + diskType: gp3 + diskName: /dev/sda1 diff --git a/.buildkite/pipelines/periodic.yml b/.buildkite/pipelines/periodic.yml index 248bfd52742d7..3ce048533d131 100644 --- a/.buildkite/pipelines/periodic.yml +++ b/.buildkite/pipelines/periodic.yml @@ -662,6 +662,16 @@ steps: buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.17.16 + - label: 7.17.17 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.17.17#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: n1-standard-32 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.17 - label: 8.0.0 / bwc command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.0.0#bwcTest timeout_in_minutes: 300 @@ -1082,6 +1092,16 @@ steps: buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.11.3 + - label: 8.11.4 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.11.4#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: n1-standard-32 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.11.4 - label: 8.12.0 / bwc command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.12.0#bwcTest timeout_in_minutes: 300 diff --git a/.buildkite/scripts/periodic.trigger.sh b/.buildkite/scripts/periodic.trigger.sh index 3571d112c5b6d..cc10a5ae41861 100755 --- a/.buildkite/scripts/periodic.trigger.sh +++ b/.buildkite/scripts/periodic.trigger.sh @@ -6,11 +6,26 @@ echo "steps:" source .buildkite/scripts/branches.sh +IS_FIRST=true +SKIP_DELAY="${SKIP_DELAY:-false}" + for BRANCH in "${BRANCHES[@]}"; do INTAKE_PIPELINE_SLUG="elasticsearch-intake" BUILD_JSON=$(curl -sH "Authorization: Bearer ${BUILDKITE_API_TOKEN}" "https://api.buildkite.com/v2/organizations/elastic/pipelines/${INTAKE_PIPELINE_SLUG}/builds?branch=${BRANCH}&state=passed&per_page=1" | jq '.[0] | {commit: .commit, url: .web_url}') LAST_GOOD_COMMIT=$(echo "${BUILD_JSON}" | jq -r '.commit') + # Put a delay between each branch's set of pipelines by prepending each non-first branch with a sleep + # This is to smooth out the spike in agent requests + if [[ "$IS_FIRST" != "true" && "$SKIP_DELAY" != "true" ]]; then + cat < createRunBwcGradleTask( loggedExec.args("-Dorg.elasticsearch.build.cache.url=" + buildCacheUrl); } + if (System.getProperty("isCI") != null) { + loggedExec.args("-DisCI"); + } + loggedExec.args("-Dbuild.snapshot=true", "-Dscan.tag.NESTED"); final LogLevel logLevel = project.getGradle().getStartParameter().getLogLevel(); List nonDefaultLogLevels = Arrays.asList(LogLevel.QUIET, LogLevel.WARN, LogLevel.INFO, LogLevel.DEBUG); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java index 4f9a7284c83e1..4a695e93ebdfe 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java @@ -132,6 +132,7 @@ public static void configureCompile(Project project) { compileTask.getConventionMapping().map("sourceCompatibility", () -> java.getSourceCompatibility().toString()); compileTask.getConventionMapping().map("targetCompatibility", () -> java.getTargetCompatibility().toString()); compileOptions.getRelease().set(releaseVersionProviderFromCompileTask(project, compileTask)); + compileOptions.setIncremental(BuildParams.isCi() == false); }); // also apply release flag to groovy, which is used in build-tools project.getTasks().withType(GroovyCompile.class).configureEach(compileTask -> { diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java index 115c4b0694141..5e62790a9d78a 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java @@ -124,7 +124,9 @@ public void apply(Project project) { params.setGitOrigin(gitInfo.getOrigin()); params.setBuildDate(ZonedDateTime.now(ZoneOffset.UTC)); params.setTestSeed(getTestSeed()); - params.setIsCi(System.getenv("JENKINS_URL") != null || System.getenv("BUILDKITE_BUILD_URL") != null); + params.setIsCi( + System.getenv("JENKINS_URL") != null || System.getenv("BUILDKITE_BUILD_URL") != null || System.getProperty("isCI") != null + ); params.setDefaultParallel(ParallelDetector.findDefaultParallel(project)); params.setInFipsJvm(Util.getBooleanProperty("tests.fips.enabled", false)); params.setIsSnapshotBuild(Util.getBooleanProperty("build.snapshot", true)); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/packer/CacheCacheableTestFixtures.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/packer/CacheCacheableTestFixtures.java index a01b1c28a851f..bfc52adcdecfd 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/packer/CacheCacheableTestFixtures.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/packer/CacheCacheableTestFixtures.java @@ -25,7 +25,9 @@ import java.io.File; import java.io.IOException; +import java.lang.reflect.Constructor; import java.lang.reflect.Method; +import java.lang.reflect.Modifier; import java.net.MalformedURLException; import java.net.URL; import java.net.URLClassLoader; @@ -74,9 +76,14 @@ public void execute() { Set> classes = (Set>) reflections.getSubTypesOf(ifClass); for (Class cacheableTestFixtureClazz : classes) { - Object o = cacheableTestFixtureClazz.getDeclaredConstructor().newInstance(); - Method cacheMethod = cacheableTestFixtureClazz.getMethod("cache"); - cacheMethod.invoke(o); + if (Modifier.isAbstract(cacheableTestFixtureClazz.getModifiers()) == false) { + Constructor declaredConstructor = cacheableTestFixtureClazz.getDeclaredConstructor(); + declaredConstructor.setAccessible(true); + Object o = declaredConstructor.newInstance(); + Method cacheMethod = cacheableTestFixtureClazz.getMethod("cache"); + System.out.println("Caching resources from " + cacheableTestFixtureClazz.getName()); + cacheMethod.invoke(o); + } } } catch (Exception e) { throw new RuntimeException(e); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/testfixtures/TestFixtureTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/testfixtures/TestFixtureTask.java new file mode 100644 index 0000000000000..da7bcfa289808 --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/testfixtures/TestFixtureTask.java @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +package org.elasticsearch.gradle.internal.testfixtures; + +import org.gradle.api.DefaultTask; +import org.gradle.api.file.DirectoryProperty; +import org.gradle.api.tasks.Internal; + +public abstract class TestFixtureTask extends DefaultTask { + + @Internal + abstract DirectoryProperty getFixturesDir(); +} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/testfixtures/TestFixturesPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/testfixtures/TestFixturesPlugin.java index 89e8747ee814d..c50ff97498c31 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/testfixtures/TestFixturesPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/testfixtures/TestFixturesPlugin.java @@ -81,37 +81,36 @@ public void apply(Project project) { ); ExtraPropertiesExtension ext = project.getExtensions().getByType(ExtraPropertiesExtension.class); - File testfixturesDir = project.file("testfixtures_shared"); - ext.set("testFixturesDir", testfixturesDir); + File testFixturesDir = project.file("testfixtures_shared"); + ext.set("testFixturesDir", testFixturesDir); if (project.file(DOCKER_COMPOSE_YML).exists()) { project.getPluginManager().apply(BasePlugin.class); project.getPluginManager().apply(DockerComposePlugin.class); - - TaskProvider preProcessFixture = project.getTasks().register("preProcessFixture", t -> { - t.doFirst(new Action() { - @Override - public void execute(Task task) { - try { - Files.createDirectories(testfixturesDir.toPath()); - } catch (IOException e) { - throw new UncheckedIOException(e); - } + TaskProvider preProcessFixture = project.getTasks().register("preProcessFixture", TestFixtureTask.class, t -> { + t.getFixturesDir().set(testFixturesDir); + t.doFirst(task -> { + try { + Files.createDirectories(testFixturesDir.toPath()); + } catch (IOException e) { + throw new UncheckedIOException(e); } }); }); TaskProvider buildFixture = project.getTasks() .register("buildFixture", t -> t.dependsOn(preProcessFixture, tasks.named("composeUp"))); - TaskProvider postProcessFixture = project.getTasks().register("postProcessFixture", task -> { - task.dependsOn(buildFixture); - configureServiceInfoForTask( - task, - project, - false, - (name, port) -> task.getExtensions().getByType(ExtraPropertiesExtension.class).set(name, port) - ); - }); + TaskProvider postProcessFixture = project.getTasks() + .register("postProcessFixture", TestFixtureTask.class, task -> { + task.getFixturesDir().set(testFixturesDir); + task.dependsOn(buildFixture); + configureServiceInfoForTask( + task, + project, + false, + (name, port) -> task.getExtensions().getByType(ExtraPropertiesExtension.class).set(name, port) + ); + }); maybeSkipTask(dockerSupport, preProcessFixture); maybeSkipTask(dockerSupport, postProcessFixture); @@ -138,7 +137,7 @@ public void execute(Task task) { t.mustRunAfter(preProcessFixture); }); tasks.named("composePull").configure(t -> t.mustRunAfter(preProcessFixture)); - tasks.named("composeDown").configure(t -> t.doLast(t2 -> getFileSystemOperations().delete(d -> d.delete(testfixturesDir)))); + tasks.named("composeDown").configure(t -> t.doLast(t2 -> getFileSystemOperations().delete(d -> d.delete(testFixturesDir)))); } else { project.afterEvaluate(spec -> { if (extension.fixtures.isEmpty()) { @@ -179,7 +178,7 @@ private void maybeSkipTasks(TaskContainer tasks, Provider tasks.withType(taskClass).configureEach(t -> maybeSkipTask(dockerSupport, t)); } - private void maybeSkipTask(Provider dockerSupport, TaskProvider task) { + private void maybeSkipTask(Provider dockerSupport, TaskProvider task) { task.configure(t -> maybeSkipTask(dockerSupport, t)); } diff --git a/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt b/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt index 48c888acd35e2..7475e77bc0805 100644 --- a/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt +++ b/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt @@ -163,6 +163,10 @@ org.elasticsearch.cluster.ClusterFeatures#allNodeFeatures() @defaultMessage ClusterFeatures#clusterHasFeature is for internal use only. Use FeatureService#clusterHasFeature to determine if a feature is present on the cluster. org.elasticsearch.cluster.ClusterFeatures#clusterHasFeature(org.elasticsearch.features.NodeFeature) +@defaultMessage Do not construct this records outside the source files they are declared in +org.elasticsearch.cluster.SnapshotsInProgress$ShardSnapshotStatus#(java.lang.String, org.elasticsearch.cluster.SnapshotsInProgress$ShardState, org.elasticsearch.repositories.ShardGeneration, java.lang.String, org.elasticsearch.repositories.ShardSnapshotResult) +org.elasticsearch.cluster.SnapshotDeletionsInProgress$Entry#(java.lang.String, java.util.List, long, long, org.elasticsearch.cluster.SnapshotDeletionsInProgress$State, java.lang.String) + @defaultMessage Use a Thread constructor with a name, anonymous threads are more difficult to debug java.lang.Thread#(java.lang.Runnable) java.lang.Thread#(java.lang.ThreadGroup, java.lang.Runnable) diff --git a/build-tools-internal/version.properties b/build-tools-internal/version.properties index adf33dd070a22..c34bdc95046b3 100644 --- a/build-tools-internal/version.properties +++ b/build-tools-internal/version.properties @@ -1,5 +1,5 @@ elasticsearch = 8.13.0 -lucene = 9.9.0 +lucene = 9.9.0-snapshot-bb4fec631e6 bundled_jdk_vendor = openjdk bundled_jdk = 21.0.1+12@415e3f918a1f4062a0074a2794853d0d diff --git a/build-tools/build.gradle b/build-tools/build.gradle index 3fe2639bfe2a4..eb5573ac03e0e 100644 --- a/build-tools/build.gradle +++ b/build-tools/build.gradle @@ -158,6 +158,10 @@ dependencies { } +tasks.withType(JavaCompile).configureEach { + options.incremental = System.getenv("JENKINS_URL") == null && System.getenv("BUILDKITE_BUILD_URL") == null && System.getProperty("isCI") == null +} + tasks.named('test').configure { useJUnitPlatform() } diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/RunTask.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/RunTask.java index 86df3544ddfc6..ca2cbc09f7c2f 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/RunTask.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/RunTask.java @@ -201,14 +201,20 @@ public void beforeStart() { try { mockServer.start(); node.setting("telemetry.metrics.enabled", "true"); + node.setting("tracing.apm.agent.enabled", "true"); + node.setting("tracing.apm.agent.transaction_sample_rate", "0.10"); + node.setting("tracing.apm.agent.metrics_interval", "10s"); node.setting("tracing.apm.agent.server_url", "http://127.0.0.1:" + mockServer.getPort()); } catch (IOException e) { logger.warn("Unable to start APM server", e); } - } else if (node.getSettingKeys().contains("telemetry.metrics.enabled") == false) { - // in serverless metrics are enabled by default - // if metrics were not enabled explicitly for gradlew run we should disable them + } + // in serverless metrics are enabled by default + // if metrics were not enabled explicitly for gradlew run we should disable them + else if (node.getSettingKeys().contains("telemetry.metrics.enabled") == false) { // metrics node.setting("telemetry.metrics.enabled", "false"); + } else if (node.getSettingKeys().contains("tracing.apm.agent.enabled") == false) { // tracing + node.setting("tracing.apm.agent.enable", "false"); } } diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/JvmOptionsParser.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/JvmOptionsParser.java index 29650e4b74114..d312fae4456f1 100644 --- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/JvmOptionsParser.java +++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/JvmOptionsParser.java @@ -10,6 +10,7 @@ import org.elasticsearch.bootstrap.ServerArgs; import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.ProcessInfo; import org.elasticsearch.cli.UserException; import java.io.BufferedReader; @@ -39,7 +40,7 @@ /** * Parses JVM options from a file and prints a single line with all JVM options to standard output. */ -final class JvmOptionsParser { +public final class JvmOptionsParser { static class JvmOptionsFileParserException extends Exception { @@ -59,7 +60,6 @@ SortedMap invalidLines() { this.jvmOptionsFile = jvmOptionsFile; this.invalidLines = invalidLines; } - } /** @@ -70,25 +70,27 @@ SortedMap invalidLines() { * variable. * * @param args the start-up arguments - * @param configDir the ES config dir + * @param processInfo information about the CLI process. * @param tmpDir the directory that should be passed to {@code -Djava.io.tmpdir} - * @param envOptions the options passed through the ES_JAVA_OPTS env var * @return the list of options to put on the Java command line * @throws InterruptedException if the java subprocess is interrupted * @throws IOException if there is a problem reading any of the files * @throws UserException if there is a problem parsing the `jvm.options` file or `jvm.options.d` files */ - static List determineJvmOptions(ServerArgs args, Path configDir, Path tmpDir, String envOptions) throws InterruptedException, + public static List determineJvmOptions(ServerArgs args, ProcessInfo processInfo, Path tmpDir) throws InterruptedException, IOException, UserException { - final JvmOptionsParser parser = new JvmOptionsParser(); final Map substitutions = new HashMap<>(); substitutions.put("ES_TMPDIR", tmpDir.toString()); - substitutions.put("ES_PATH_CONF", configDir.toString()); + substitutions.put("ES_PATH_CONF", args.configDir().toString()); + + final String envOptions = processInfo.envVars().get("ES_JAVA_OPTS"); try { - return parser.jvmOptions(args, configDir, tmpDir, envOptions, substitutions); + return Collections.unmodifiableList( + parser.jvmOptions(args, args.configDir(), tmpDir, envOptions, substitutions, processInfo.sysprops()) + ); } catch (final JvmOptionsFileParserException e) { final String errorMessage = String.format( Locale.ROOT, @@ -122,7 +124,8 @@ private List jvmOptions( final Path config, Path tmpDir, final String esJavaOpts, - final Map substitutions + final Map substitutions, + final Map cliSysprops ) throws InterruptedException, IOException, JvmOptionsFileParserException, UserException { final List jvmOptions = readJvmOptionsFiles(config); @@ -137,7 +140,7 @@ private List jvmOptions( ); substitutedJvmOptions.addAll(machineDependentHeap.determineHeapSettings(config, substitutedJvmOptions)); final List ergonomicJvmOptions = JvmErgonomics.choose(substitutedJvmOptions); - final List systemJvmOptions = SystemJvmOptions.systemJvmOptions(args.nodeSettings()); + final List systemJvmOptions = SystemJvmOptions.systemJvmOptions(args.nodeSettings(), cliSysprops); final List apmOptions = APMJvmOptions.apmJvmOptions(args.nodeSettings(), args.secrets(), args.logsDir(), tmpDir); diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerCli.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerCli.java index ea2df72fb2c0b..aac5f718081b4 100644 --- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerCli.java +++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerCli.java @@ -243,8 +243,15 @@ protected Command loadTool(String toolname, String libs) { } // protected to allow tests to override - protected ServerProcess startServer(Terminal terminal, ProcessInfo processInfo, ServerArgs args) throws UserException { - return ServerProcess.start(terminal, processInfo, args); + protected ServerProcess startServer(Terminal terminal, ProcessInfo processInfo, ServerArgs args) throws Exception { + var tempDir = ServerProcessUtils.setupTempDir(processInfo); + var jvmOptions = JvmOptionsParser.determineJvmOptions(args, processInfo, tempDir); + var serverProcessBuilder = new ServerProcessBuilder().withTerminal(terminal) + .withProcessInfo(processInfo) + .withServerArgs(args) + .withTempDir(tempDir) + .withJvmOptions(jvmOptions); + return serverProcessBuilder.start(); } // protected to allow tests to override diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerProcess.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerProcess.java index d4b4d57977f5d..3972095a3a5c0 100644 --- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerProcess.java +++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerProcess.java @@ -9,34 +9,17 @@ package org.elasticsearch.server.cli; import org.elasticsearch.bootstrap.BootstrapInfo; -import org.elasticsearch.bootstrap.ServerArgs; -import org.elasticsearch.cli.ExitCodes; -import org.elasticsearch.cli.ProcessInfo; -import org.elasticsearch.cli.Terminal; -import org.elasticsearch.cli.UserException; -import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; import org.elasticsearch.core.IOUtils; -import org.elasticsearch.core.PathUtils; -import org.elasticsearch.core.SuppressForbidden; import java.io.IOException; import java.io.OutputStream; -import java.io.UncheckedIOException; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.nio.file.attribute.FileAttribute; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; import static org.elasticsearch.server.cli.ProcessUtil.nonInterruptible; /** * A helper to control a {@link Process} running the main Elasticsearch server. * - *

The process can be started by calling {@link #start(Terminal, ProcessInfo, ServerArgs)}. + *

The process can be started by calling {@link ServerProcessBuilder#start()}. * The process is controlled by internally sending arguments and control signals on stdin, * and receiving control signals on stderr. The start method does not return until the * server is ready to process requests and has exited the bootstrap thread. @@ -64,68 +47,6 @@ public class ServerProcess { this.errorPump = errorPump; } - // this allows mocking the process building by tests - interface OptionsBuilder { - List getJvmOptions(ServerArgs args, Path configDir, Path tmpDir, String envOptions) throws InterruptedException, - IOException, UserException; - } - - // this allows mocking the process building by tests - interface ProcessStarter { - Process start(ProcessBuilder pb) throws IOException; - } - - /** - * Start a server in a new process. - * - * @param terminal A terminal to connect the standard inputs and outputs to for the new process. - * @param processInfo Info about the current process, for passing through to the subprocess. - * @param args Arguments to the server process. - * @return A running server process that is ready for requests - * @throws UserException If the process failed during bootstrap - */ - public static ServerProcess start(Terminal terminal, ProcessInfo processInfo, ServerArgs args) throws UserException { - return start(terminal, processInfo, args, JvmOptionsParser::determineJvmOptions, ProcessBuilder::start); - } - - // package private so tests can mock options building and process starting - static ServerProcess start( - Terminal terminal, - ProcessInfo processInfo, - ServerArgs args, - OptionsBuilder optionsBuilder, - ProcessStarter processStarter - ) throws UserException { - Process jvmProcess = null; - ErrorPumpThread errorPump; - - boolean success = false; - try { - jvmProcess = createProcess(args, processInfo, args.configDir(), optionsBuilder, processStarter); - errorPump = new ErrorPumpThread(terminal.getErrorWriter(), jvmProcess.getErrorStream()); - errorPump.start(); - sendArgs(args, jvmProcess.getOutputStream()); - - String errorMsg = errorPump.waitUntilReady(); - if (errorMsg != null) { - // something bad happened, wait for the process to exit then rethrow - int exitCode = jvmProcess.waitFor(); - throw new UserException(exitCode, errorMsg); - } - success = true; - } catch (InterruptedException e) { - throw new RuntimeException(e); - } catch (IOException e) { - throw new UncheckedIOException(e); - } finally { - if (success == false && jvmProcess != null && jvmProcess.isAlive()) { - jvmProcess.destroyForcibly(); - } - } - - return new ServerProcess(jvmProcess, errorPump); - } - /** * Return the process id of the server. */ @@ -169,19 +90,6 @@ public synchronized void stop() { waitFor(); // ignore exit code, we are already shutting down } - private static void sendArgs(ServerArgs args, OutputStream processStdin) { - // DO NOT close the underlying process stdin, since we need to be able to write to it to signal exit - var out = new OutputStreamStreamOutput(processStdin); - try { - args.writeTo(out); - out.flush(); - } catch (IOException ignore) { - // A failure to write here means the process has problems, and it will die anyway. We let this fall through - // so the pump thread can complete, writing out the actual error. All we get here is the failure to write to - // the process pipe, which isn't helpful to print. - } - } - private void sendShutdownMarker() { try { OutputStream os = jvmProcess.getOutputStream(); @@ -191,80 +99,4 @@ private void sendShutdownMarker() { // process is already effectively dead, fall through to wait for it, or should we SIGKILL? } } - - private static Process createProcess( - ServerArgs args, - ProcessInfo processInfo, - Path configDir, - OptionsBuilder optionsBuilder, - ProcessStarter processStarter - ) throws InterruptedException, IOException, UserException { - Map envVars = new HashMap<>(processInfo.envVars()); - Path tempDir = setupTempDir(processInfo, envVars.remove("ES_TMPDIR")); - if (envVars.containsKey("LIBFFI_TMPDIR") == false) { - envVars.put("LIBFFI_TMPDIR", tempDir.toString()); - } - - List jvmOptions = optionsBuilder.getJvmOptions(args, configDir, tempDir, envVars.remove("ES_JAVA_OPTS")); - // also pass through distribution type - jvmOptions.add("-Des.distribution.type=" + processInfo.sysprops().get("es.distribution.type")); - - Path esHome = processInfo.workingDir(); - Path javaHome = PathUtils.get(processInfo.sysprops().get("java.home")); - List command = new ArrayList<>(); - boolean isWindows = processInfo.sysprops().get("os.name").startsWith("Windows"); - command.add(javaHome.resolve("bin").resolve("java" + (isWindows ? ".exe" : "")).toString()); - command.addAll(jvmOptions); - command.add("--module-path"); - command.add(esHome.resolve("lib").toString()); - // Special circumstances require some modules (not depended on by the main server module) to be explicitly added: - command.add("--add-modules=jdk.net"); // needed to reflectively set extended socket options - // we control the module path, which may have additional modules not required by server - command.add("--add-modules=ALL-MODULE-PATH"); - command.add("-m"); - command.add("org.elasticsearch.server/org.elasticsearch.bootstrap.Elasticsearch"); - - var builder = new ProcessBuilder(command); - builder.environment().putAll(envVars); - builder.redirectOutput(ProcessBuilder.Redirect.INHERIT); - - return processStarter.start(builder); - } - - /** - * Returns the java.io.tmpdir Elasticsearch should use, creating it if necessary. - * - *

On non-Windows OS, this will be created as a subdirectory of the default temporary directory. - * Note that this causes the created temporary directory to be a private temporary directory. - */ - private static Path setupTempDir(ProcessInfo processInfo, String tmpDirOverride) throws UserException, IOException { - final Path path; - if (tmpDirOverride != null) { - path = Paths.get(tmpDirOverride); - if (Files.exists(path) == false) { - throw new UserException(ExitCodes.CONFIG, "Temporary directory [" + path + "] does not exist or is not accessible"); - } - if (Files.isDirectory(path) == false) { - throw new UserException(ExitCodes.CONFIG, "Temporary directory [" + path + "] is not a directory"); - } - } else { - if (processInfo.sysprops().get("os.name").startsWith("Windows")) { - /* - * On Windows, we avoid creating a unique temporary directory per invocation lest - * we pollute the temporary directory. On other operating systems, temporary directories - * will be cleaned automatically via various mechanisms (e.g., systemd, or restarts). - */ - path = Paths.get(processInfo.sysprops().get("java.io.tmpdir"), "elasticsearch"); - Files.createDirectories(path); - } else { - path = createTempDirectory("elasticsearch-"); - } - } - return path; - } - - @SuppressForbidden(reason = "Files#createTempDirectory(String, FileAttribute...)") - private static Path createTempDirectory(final String prefix, final FileAttribute... attrs) throws IOException { - return Files.createTempDirectory(prefix, attrs); - } } diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerProcessBuilder.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerProcessBuilder.java new file mode 100644 index 0000000000000..4ef1e2bfd4737 --- /dev/null +++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerProcessBuilder.java @@ -0,0 +1,208 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.server.cli; + +import org.elasticsearch.bootstrap.ServerArgs; +import org.elasticsearch.cli.ProcessInfo; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cli.UserException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; +import org.elasticsearch.core.PathUtils; + +import java.io.IOException; +import java.io.OutputStream; +import java.io.UncheckedIOException; +import java.nio.file.Path; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Stream; + +/** + * This class is used to create a {@link ServerProcess}. + * Each ServerProcessBuilder instance manages a collection of process attributes. The {@link ServerProcessBuilder#start()} method creates + * a new {@link ServerProcess} instance with those attributes. + * + * Each process builder manages these process attributes: + * - a temporary directory + * - process info to pass through to the new Java subprocess + * - the command line arguments to run Elasticsearch + * - a list of JVM options to be passed to the Elasticsearch Java process + * - a {@link Terminal} to read input and write output from/to the cli console + */ +public class ServerProcessBuilder { + private Path tempDir; + private ServerArgs serverArgs; + private ProcessInfo processInfo; + private List jvmOptions; + private Terminal terminal; + + // this allows mocking the process building by tests + interface ProcessStarter { + Process start(ProcessBuilder pb) throws IOException; + } + + /** + * Specifies the temporary directory to be used by the server process + */ + public ServerProcessBuilder withTempDir(Path tempDir) { + this.tempDir = tempDir; + return this; + } + + /** + * Specifies the process info to pass through to the new Java subprocess + */ + public ServerProcessBuilder withProcessInfo(ProcessInfo processInfo) { + this.processInfo = processInfo; + return this; + } + + /** + * Specifies the command line arguments to run Elasticsearch + */ + public ServerProcessBuilder withServerArgs(ServerArgs serverArgs) { + this.serverArgs = serverArgs; + return this; + } + + /** + * Specifies the JVM options to be passed to the Elasticsearch Java process + */ + public ServerProcessBuilder withJvmOptions(List jvmOptions) { + this.jvmOptions = jvmOptions; + return this; + } + + /** + * Specifies the {@link Terminal} to use for reading input and writing output from/to the cli console + */ + public ServerProcessBuilder withTerminal(Terminal terminal) { + this.terminal = terminal; + return this; + } + + private Map getEnvironment() { + Map envVars = new HashMap<>(processInfo.envVars()); + + envVars.remove("ES_TMPDIR"); + if (envVars.containsKey("LIBFFI_TMPDIR") == false) { + envVars.put("LIBFFI_TMPDIR", tempDir.toString()); + } + envVars.remove("ES_JAVA_OPTS"); + + return envVars; + } + + private List getJvmArgs() { + Path esHome = processInfo.workingDir(); + return List.of( + "--module-path", + esHome.resolve("lib").toString(), + // Special circumstances require some modules (not depended on by the main server module) to be explicitly added: + "--add-modules=jdk.net", // needed to reflectively set extended socket options + // we control the module path, which may have additional modules not required by server + "--add-modules=ALL-MODULE-PATH", + "-m", + "org.elasticsearch.server/org.elasticsearch.bootstrap.Elasticsearch" + ); + } + + private String getCommand() { + Path javaHome = PathUtils.get(processInfo.sysprops().get("java.home")); + + boolean isWindows = processInfo.sysprops().get("os.name").startsWith("Windows"); + return javaHome.resolve("bin").resolve("java" + (isWindows ? ".exe" : "")).toString(); + } + + /** + * Start a server in a new process. + * + * @return A running server process that is ready for requests + * @throws UserException If the process failed during bootstrap + */ + public ServerProcess start() throws UserException { + return start(ProcessBuilder::start); + } + + private static void checkRequiredArgument(Object argument, String argumentName) { + if (argument == null) { + throw new IllegalStateException( + Strings.format("'%s' is a required argument and needs to be specified before calling start()", argumentName) + ); + } + } + + // package private for testing + ServerProcess start(ProcessStarter processStarter) throws UserException { + checkRequiredArgument(tempDir, "tempDir"); + checkRequiredArgument(serverArgs, "serverArgs"); + checkRequiredArgument(processInfo, "processInfo"); + checkRequiredArgument(jvmOptions, "jvmOptions"); + checkRequiredArgument(terminal, "terminal"); + + Process jvmProcess = null; + ErrorPumpThread errorPump; + + boolean success = false; + try { + jvmProcess = createProcess(getCommand(), getJvmArgs(), jvmOptions, getEnvironment(), processStarter); + errorPump = new ErrorPumpThread(terminal.getErrorWriter(), jvmProcess.getErrorStream()); + errorPump.start(); + sendArgs(serverArgs, jvmProcess.getOutputStream()); + + String errorMsg = errorPump.waitUntilReady(); + if (errorMsg != null) { + // something bad happened, wait for the process to exit then rethrow + int exitCode = jvmProcess.waitFor(); + throw new UserException(exitCode, errorMsg); + } + success = true; + } catch (InterruptedException e) { + throw new RuntimeException(e); + } catch (IOException e) { + throw new UncheckedIOException(e); + } finally { + if (success == false && jvmProcess != null && jvmProcess.isAlive()) { + jvmProcess.destroyForcibly(); + } + } + + return new ServerProcess(jvmProcess, errorPump); + } + + private static Process createProcess( + String command, + List jvmArgs, + List jvmOptions, + Map environment, + ProcessStarter processStarter + ) throws InterruptedException, IOException { + + var builder = new ProcessBuilder(Stream.concat(Stream.of(command), Stream.concat(jvmOptions.stream(), jvmArgs.stream())).toList()); + builder.environment().putAll(environment); + builder.redirectOutput(ProcessBuilder.Redirect.INHERIT); + + return processStarter.start(builder); + } + + private static void sendArgs(ServerArgs args, OutputStream processStdin) { + // DO NOT close the underlying process stdin, since we need to be able to write to it to signal exit + var out = new OutputStreamStreamOutput(processStdin); + try { + args.writeTo(out); + out.flush(); + } catch (IOException ignore) { + // A failure to write here means the process has problems, and it will die anyway. We let this fall through + // so the pump thread can complete, writing out the actual error. All we get here is the failure to write to + // the process pipe, which isn't helpful to print. + } + } +} diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerProcessUtils.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerProcessUtils.java new file mode 100644 index 0000000000000..ebbc68b1be90b --- /dev/null +++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerProcessUtils.java @@ -0,0 +1,66 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.server.cli; + +import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.ProcessInfo; +import org.elasticsearch.cli.UserException; +import org.elasticsearch.core.SuppressForbidden; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.attribute.FileAttribute; + +public class ServerProcessUtils { + + /** + * Returns the java.io.tmpdir Elasticsearch should use, creating it if necessary. + * + *

On non-Windows OS, this will be created as a subdirectory of the default temporary directory. + * Note that this causes the created temporary directory to be a private temporary directory. + */ + public static Path setupTempDir(ProcessInfo processInfo) throws UserException { + final Path path; + String tmpDirOverride = processInfo.envVars().get("ES_TMPDIR"); + if (tmpDirOverride != null) { + path = Paths.get(tmpDirOverride); + if (Files.exists(path) == false) { + throw new UserException(ExitCodes.CONFIG, "Temporary directory [" + path + "] does not exist or is not accessible"); + } + if (Files.isDirectory(path) == false) { + throw new UserException(ExitCodes.CONFIG, "Temporary directory [" + path + "] is not a directory"); + } + } else { + try { + if (processInfo.sysprops().get("os.name").startsWith("Windows")) { + /* + * On Windows, we avoid creating a unique temporary directory per invocation lest + * we pollute the temporary directory. On other operating systems, temporary directories + * will be cleaned automatically via various mechanisms (e.g., systemd, or restarts). + */ + path = Paths.get(processInfo.sysprops().get("java.io.tmpdir"), "elasticsearch"); + Files.createDirectories(path); + } else { + path = createTempDirectory("elasticsearch-"); + } + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + return path; + } + + @SuppressForbidden(reason = "Files#createTempDirectory(String, FileAttribute...)") + private static Path createTempDirectory(final String prefix, final FileAttribute... attrs) throws IOException { + return Files.createTempDirectory(prefix, attrs); + } +} diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java index 6e250075f7747..4a8b3da4777a0 100644 --- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java +++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java @@ -12,12 +12,13 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; import java.util.List; +import java.util.Map; import java.util.stream.Collectors; import java.util.stream.Stream; final class SystemJvmOptions { - static List systemJvmOptions(Settings nodeSettings) { + static List systemJvmOptions(Settings nodeSettings, final Map sysprops) { return Stream.of( /* * Cache ttl in seconds for positive DNS lookups noting that this overrides the JDK security property networkaddress.cache.ttl; @@ -65,7 +66,9 @@ static List systemJvmOptions(Settings nodeSettings) { */ "--add-opens=java.base/java.io=org.elasticsearch.preallocate", maybeOverrideDockerCgroup(), - maybeSetActiveProcessorCount(nodeSettings) + maybeSetActiveProcessorCount(nodeSettings), + // Pass through distribution type + "-Des.distribution.type=" + sysprops.get("es.distribution.type") ).filter(e -> e.isEmpty() == false).collect(Collectors.toList()); } diff --git a/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/JvmOptionsParserTests.java b/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/JvmOptionsParserTests.java index 03856b1024992..101be4301b522 100644 --- a/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/JvmOptionsParserTests.java +++ b/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/JvmOptionsParserTests.java @@ -53,7 +53,6 @@ public void testUnversionedOptions() throws IOException { try (StringReader sr = new StringReader("-Xms1g\n-Xmx1g"); BufferedReader br = new BufferedReader(sr)) { assertExpectedJvmOptions(randomIntBetween(8, Integer.MAX_VALUE), br, Arrays.asList("-Xms1g", "-Xmx1g")); } - } public void testSingleVersionOption() throws IOException { @@ -351,25 +350,30 @@ public void accept(final int lineNumber, final String line) { public void testNodeProcessorsActiveCount() { { - final List jvmOptions = SystemJvmOptions.systemJvmOptions(Settings.EMPTY); + final List jvmOptions = SystemJvmOptions.systemJvmOptions(Settings.EMPTY, Map.of()); assertThat(jvmOptions, not(hasItem(containsString("-XX:ActiveProcessorCount=")))); } { Settings nodeSettings = Settings.builder().put(EsExecutors.NODE_PROCESSORS_SETTING.getKey(), 1).build(); - final List jvmOptions = SystemJvmOptions.systemJvmOptions(nodeSettings); + final List jvmOptions = SystemJvmOptions.systemJvmOptions(nodeSettings, Map.of()); assertThat(jvmOptions, hasItem("-XX:ActiveProcessorCount=1")); } { // check rounding Settings nodeSettings = Settings.builder().put(EsExecutors.NODE_PROCESSORS_SETTING.getKey(), 0.2).build(); - final List jvmOptions = SystemJvmOptions.systemJvmOptions(nodeSettings); + final List jvmOptions = SystemJvmOptions.systemJvmOptions(nodeSettings, Map.of()); assertThat(jvmOptions, hasItem("-XX:ActiveProcessorCount=1")); } { // check validation Settings nodeSettings = Settings.builder().put(EsExecutors.NODE_PROCESSORS_SETTING.getKey(), 10000).build(); - var e = expectThrows(IllegalArgumentException.class, () -> SystemJvmOptions.systemJvmOptions(nodeSettings)); + var e = expectThrows(IllegalArgumentException.class, () -> SystemJvmOptions.systemJvmOptions(nodeSettings, Map.of())); assertThat(e.getMessage(), containsString("setting [node.processors] must be <=")); } } + + public void testCommandLineDistributionType() { + final List jvmOptions = SystemJvmOptions.systemJvmOptions(Settings.EMPTY, Map.of("es.distribution.type", "testdistro")); + assertThat(jvmOptions, hasItem("-Des.distribution.type=testdistro")); + } } diff --git a/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/ServerCliTests.java b/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/ServerCliTests.java index da2c0104dd08e..e469764590bd6 100644 --- a/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/ServerCliTests.java +++ b/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/ServerCliTests.java @@ -314,6 +314,21 @@ public void testIgnoreNullExceptionOutput() throws Exception { assertThat(terminal.getErrorOutput(), not(containsString("null"))); } + public void testOptionsBuildingInterrupted() throws IOException { + Command command = new TestServerCli() { + @Override + protected ServerProcess startServer(Terminal terminal, ProcessInfo processInfo, ServerArgs args) throws Exception { + throw new InterruptedException("interrupted while get jvm options"); + } + }; + var e = expectThrows( + InterruptedException.class, + () -> command.main(new String[0], terminal, new ProcessInfo(sysprops, envVars, esHomeDir)) + ); + assertThat(e.getMessage(), equalTo("interrupted while get jvm options")); + command.close(); + } + public void testServerExitsNonZero() throws Exception { mockServerExitCode = 140; int exitCode = executeMain(); @@ -480,63 +495,65 @@ void reset() { } } - @Override - protected Command newCommand() { - return new ServerCli() { - @Override - protected Command loadTool(String toolname, String libs) { - if (toolname.equals("auto-configure-node")) { - assertThat(libs, equalTo("modules/x-pack-core,modules/x-pack-security,lib/tools/security-cli")); - return AUTO_CONFIG_CLI; - } else if (toolname.equals("sync-plugins")) { - assertThat(libs, equalTo("lib/tools/plugin-cli")); - return SYNC_PLUGINS_CLI; - } - throw new AssertionError("Unknown tool: " + toolname); + private class TestServerCli extends ServerCli { + @Override + protected Command loadTool(String toolname, String libs) { + if (toolname.equals("auto-configure-node")) { + assertThat(libs, equalTo("modules/x-pack-core,modules/x-pack-security,lib/tools/security-cli")); + return AUTO_CONFIG_CLI; + } else if (toolname.equals("sync-plugins")) { + assertThat(libs, equalTo("lib/tools/plugin-cli")); + return SYNC_PLUGINS_CLI; } + throw new AssertionError("Unknown tool: " + toolname); + } - @Override - Environment autoConfigureSecurity( - Terminal terminal, - OptionSet options, - ProcessInfo processInfo, - Environment env, - SecureString keystorePassword - ) throws Exception { - if (mockSecureSettingsLoader != null && mockSecureSettingsLoader.supportsSecurityAutoConfiguration() == false) { - fail("We shouldn't be calling auto configure on loaders that don't support it"); - } - return super.autoConfigureSecurity(terminal, options, processInfo, env, keystorePassword); + @Override + Environment autoConfigureSecurity( + Terminal terminal, + OptionSet options, + ProcessInfo processInfo, + Environment env, + SecureString keystorePassword + ) throws Exception { + if (mockSecureSettingsLoader != null && mockSecureSettingsLoader.supportsSecurityAutoConfiguration() == false) { + fail("We shouldn't be calling auto configure on loaders that don't support it"); } + return super.autoConfigureSecurity(terminal, options, processInfo, env, keystorePassword); + } - @Override - protected ServerProcess startServer(Terminal terminal, ProcessInfo processInfo, ServerArgs args) { - if (argsValidator != null) { - argsValidator.accept(args); - } - mockServer.reset(); - return mockServer; + @Override + void syncPlugins(Terminal terminal, Environment env, ProcessInfo processInfo) throws Exception { + if (mockSecureSettingsLoader != null && mockSecureSettingsLoader instanceof MockSecureSettingsLoader mock) { + mock.verifiedEnv = true; + // equals as a pointer, environment shouldn't be changed if autoconfigure is not supported + assertFalse(mockSecureSettingsLoader.supportsSecurityAutoConfiguration()); + assertTrue(mock.environment == env); } - @Override - void syncPlugins(Terminal terminal, Environment env, ProcessInfo processInfo) throws Exception { - if (mockSecureSettingsLoader != null && mockSecureSettingsLoader instanceof MockSecureSettingsLoader mock) { - mock.verifiedEnv = true; - // equals as a pointer, environment shouldn't be changed if autoconfigure is not supported - assertFalse(mockSecureSettingsLoader.supportsSecurityAutoConfiguration()); - assertTrue(mock.environment == env); - } + super.syncPlugins(terminal, env, processInfo); + } - super.syncPlugins(terminal, env, processInfo); + @Override + protected SecureSettingsLoader secureSettingsLoader(Environment env) { + if (mockSecureSettingsLoader != null) { + return mockSecureSettingsLoader; } + return new KeystoreSecureSettingsLoader(); + } + } + + @Override + protected Command newCommand() { + return new TestServerCli() { @Override - protected SecureSettingsLoader secureSettingsLoader(Environment env) { - if (mockSecureSettingsLoader != null) { - return mockSecureSettingsLoader; + protected ServerProcess startServer(Terminal terminal, ProcessInfo processInfo, ServerArgs args) { + if (argsValidator != null) { + argsValidator.accept(args); } - - return new KeystoreSecureSettingsLoader(); + mockServer.reset(); + return mockServer; } }; } diff --git a/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/ServerProcessTests.java b/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/ServerProcessTests.java index 57993d40391ac..fa36007b40af7 100644 --- a/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/ServerProcessTests.java +++ b/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/ServerProcessTests.java @@ -13,7 +13,6 @@ import org.elasticsearch.cli.ExitCodes; import org.elasticsearch.cli.MockTerminal; import org.elasticsearch.cli.ProcessInfo; -import org.elasticsearch.cli.UserException; import org.elasticsearch.common.io.stream.InputStreamStreamInput; import org.elasticsearch.common.settings.KeyStoreWrapper; import org.elasticsearch.common.settings.SecureSettings; @@ -34,7 +33,6 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; -import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -47,7 +45,6 @@ import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.server.cli.ProcessUtil.nonInterruptibleVoid; -import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasEntry; @@ -56,7 +53,6 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; -import static org.hamcrest.Matchers.startsWith; public class ServerProcessTests extends ESTestCase { @@ -66,7 +62,6 @@ public class ServerProcessTests extends ESTestCase { protected final Map envVars = new HashMap<>(); Path esHomeDir; Settings.Builder nodeSettings; - ServerProcess.OptionsBuilder optionsBuilder; ProcessValidator processValidator; MainMethod mainCallback; MockElasticsearchProcess process; @@ -81,7 +76,7 @@ interface ProcessValidator { } int runForeground() throws Exception { - var server = startProcess(false, false, ""); + var server = startProcess(false, false); return server.waitFor(); } @@ -94,7 +89,6 @@ public void resetEnv() { envVars.clear(); esHomeDir = createTempDir(); nodeSettings = Settings.builder(); - optionsBuilder = (args, configDir, tmpDir, envOptions) -> new ArrayList<>(); processValidator = null; mainCallback = null; secrets = KeyStoreWrapper.create(); @@ -193,9 +187,12 @@ public Process destroyForcibly() { } } - ServerProcess startProcess(boolean daemonize, boolean quiet, String keystorePassword) throws Exception { - var pinfo = new ProcessInfo(Map.copyOf(sysprops), Map.copyOf(envVars), esHomeDir); - var args = new ServerArgs( + ProcessInfo createProcessInfo() { + return new ProcessInfo(Map.copyOf(sysprops), Map.copyOf(envVars), esHomeDir); + } + + ServerArgs createServerArgs(boolean daemonize, boolean quiet) { + return new ServerArgs( daemonize, quiet, null, @@ -204,14 +201,23 @@ ServerProcess startProcess(boolean daemonize, boolean quiet, String keystorePass esHomeDir.resolve("config"), esHomeDir.resolve("logs") ); - ServerProcess.ProcessStarter starter = pb -> { + } + + ServerProcess startProcess(boolean daemonize, boolean quiet) throws Exception { + var pinfo = createProcessInfo(); + ServerProcessBuilder.ProcessStarter starter = pb -> { if (processValidator != null) { processValidator.validate(pb); } process = new MockElasticsearchProcess(); return process; }; - return ServerProcess.start(terminal, pinfo, args, optionsBuilder, starter); + var serverProcessBuilder = new ServerProcessBuilder().withTerminal(terminal) + .withProcessInfo(pinfo) + .withServerArgs(createServerArgs(daemonize, quiet)) + .withJvmOptions(List.of()) + .withTempDir(ServerProcessUtils.setupTempDir(pinfo)); + return serverProcessBuilder.start(starter); } public void testProcessBuilder() throws Exception { @@ -231,7 +237,7 @@ public void testProcessBuilder() throws Exception { } public void testPid() throws Exception { - var server = startProcess(true, false, ""); + var server = startProcess(true, false); assertThat(server.pid(), equalTo(12345L)); server.stop(); } @@ -246,18 +252,12 @@ public void testBootstrapError() throws Exception { assertThat(terminal.getErrorOutput(), containsString("a bootstrap exception")); } - public void testStartError() throws Exception { + public void testStartError() { processValidator = pb -> { throw new IOException("something went wrong"); }; - var e = expectThrows(UncheckedIOException.class, () -> runForeground()); + var e = expectThrows(UncheckedIOException.class, this::runForeground); assertThat(e.getCause().getMessage(), equalTo("something went wrong")); } - public void testOptionsBuildingInterrupted() throws Exception { - optionsBuilder = (args, configDir, tmpDir, envOptions) -> { throw new InterruptedException("interrupted while get jvm options"); }; - var e = expectThrows(RuntimeException.class, () -> runForeground()); - assertThat(e.getCause().getMessage(), equalTo("interrupted while get jvm options")); - } - public void testEnvPassthrough() throws Exception { envVars.put("MY_ENV", "foo"); processValidator = pb -> { assertThat(pb.environment(), hasEntry(equalTo("MY_ENV"), equalTo("foo"))); }; @@ -276,83 +276,48 @@ public void testLibffiEnv() throws Exception { runForeground(); } - public void testTempDir() throws Exception { - optionsBuilder = (args, configDir, tmpDir, envOptions) -> { - assertThat(tmpDir.toString(), Files.exists(tmpDir), is(true)); - assertThat(tmpDir.getFileName().toString(), startsWith("elasticsearch-")); - return new ArrayList<>(); - }; - runForeground(); - } - - public void testTempDirWindows() throws Exception { - Path baseTmpDir = createTempDir(); - sysprops.put("os.name", "Windows 10"); - sysprops.put("java.io.tmpdir", baseTmpDir.toString()); - optionsBuilder = (args, configDir, tmpDir, envOptions) -> { - assertThat(tmpDir.toString(), Files.exists(tmpDir), is(true)); - assertThat(tmpDir.getFileName().toString(), equalTo("elasticsearch")); - assertThat(tmpDir.getParent().toString(), equalTo(baseTmpDir.toString())); - return new ArrayList<>(); - }; - runForeground(); - } - - public void testTempDirOverride() throws Exception { + public void testEnvCleared() throws Exception { Path customTmpDir = createTempDir(); envVars.put("ES_TMPDIR", customTmpDir.toString()); - optionsBuilder = (args, configDir, tmpDir, envOptions) -> { - assertThat(tmpDir.toString(), equalTo(customTmpDir.toString())); - return new ArrayList<>(); - }; - processValidator = pb -> assertThat(pb.environment(), not(hasKey("ES_TMPDIR"))); - runForeground(); - } - - public void testTempDirOverrideMissing() throws Exception { - Path baseDir = createTempDir(); - envVars.put("ES_TMPDIR", baseDir.resolve("dne").toString()); - var e = expectThrows(UserException.class, () -> runForeground()); - assertThat(e.exitCode, equalTo(ExitCodes.CONFIG)); - assertThat(e.getMessage(), containsString("dne] does not exist")); - } - - public void testTempDirOverrideNotADirectory() throws Exception { - Path tmpFile = createTempFile(); - envVars.put("ES_TMPDIR", tmpFile.toString()); - var e = expectThrows(UserException.class, () -> runForeground()); - assertThat(e.exitCode, equalTo(ExitCodes.CONFIG)); - assertThat(e.getMessage(), containsString("is not a directory")); - } - - public void testCustomJvmOptions() throws Exception { envVars.put("ES_JAVA_OPTS", "-Dmyoption=foo"); - optionsBuilder = (args, configDir, tmpDir, envOptions) -> { - assertThat(envOptions, equalTo("-Dmyoption=foo")); - return new ArrayList<>(); + + processValidator = pb -> { + assertThat(pb.environment(), not(hasKey("ES_TMPDIR"))); + assertThat(pb.environment(), not(hasKey("ES_JAVA_OPTS"))); }; - processValidator = pb -> assertThat(pb.environment(), not(hasKey("ES_JAVA_OPTS"))); runForeground(); } public void testCommandLineSysprops() throws Exception { - optionsBuilder = (args, configDir, tmpDir, envOptions) -> List.of("-Dfoo1=bar", "-Dfoo2=baz"); - processValidator = pb -> { - assertThat(pb.command(), contains("-Dfoo1=bar")); - assertThat(pb.command(), contains("-Dfoo2=bar")); + ServerProcessBuilder.ProcessStarter starter = pb -> { + assertThat(pb.command(), hasItems("-Dfoo1=bar", "-Dfoo2=baz")); + process = new MockElasticsearchProcess(); + return process; }; + var serverProcessBuilder = new ServerProcessBuilder().withTerminal(terminal) + .withProcessInfo(createProcessInfo()) + .withServerArgs(createServerArgs(false, false)) + .withJvmOptions(List.of("-Dfoo1=bar", "-Dfoo2=baz")) + .withTempDir(Path.of(".")); + serverProcessBuilder.start(starter).waitFor(); + } + + public void testServerProcessBuilderMissingArgumentError() throws Exception { + ServerProcessBuilder.ProcessStarter starter = pb -> new MockElasticsearchProcess(); + var serverProcessBuilder = new ServerProcessBuilder().withTerminal(terminal) + .withProcessInfo(createProcessInfo()) + .withServerArgs(createServerArgs(false, false)) + .withTempDir(Path.of(".")); + var ex = expectThrows(IllegalStateException.class, () -> serverProcessBuilder.start(starter).waitFor()); + assertThat(ex.getMessage(), equalTo("'jvmOptions' is a required argument and needs to be specified before calling start()")); } public void testCommandLine() throws Exception { String mainClass = "org.elasticsearch.server/org.elasticsearch.bootstrap.Elasticsearch"; - String distroSysprop = "-Des.distribution.type=testdistro"; String modulePath = esHomeDir.resolve("lib").toString(); Path javaBin = Paths.get("javahome").resolve("bin"); - sysprops.put("es.distribution.type", "testdistro"); AtomicReference expectedJava = new AtomicReference<>(javaBin.resolve("java").toString()); - processValidator = pb -> { - assertThat(pb.command(), hasItems(expectedJava.get(), distroSysprop, "--module-path", modulePath, "-m", mainClass)); - }; + processValidator = pb -> { assertThat(pb.command(), hasItems(expectedJava.get(), "--module-path", modulePath, "-m", mainClass)); }; runForeground(); sysprops.put("os.name", "Windows 10"); @@ -370,7 +335,7 @@ public void testDetach() throws Exception { // will block until stdin closed manually after test assertThat(stdin.read(), equalTo(-1)); }; - var server = startProcess(true, false, ""); + var server = startProcess(true, false); server.detach(); assertThat(terminal.getErrorOutput(), containsString("final message")); server.stop(); // this should be a noop, and will fail the stdin read assert above if shutdown sent @@ -384,7 +349,7 @@ public void testStop() throws Exception { nonInterruptibleVoid(mainReady::await); stderr.println("final message"); }; - var server = startProcess(false, false, ""); + var server = startProcess(false, false); mainReady.countDown(); server.stop(); assertThat(process.main.isDone(), is(true)); // stop should have waited @@ -399,7 +364,7 @@ public void testWaitFor() throws Exception { assertThat(stdin.read(), equalTo((int) BootstrapInfo.SERVER_SHUTDOWN_MARKER)); stderr.println("final message"); }; - var server = startProcess(false, false, ""); + var server = startProcess(false, false); new Thread(() -> { // simulate stop run as shutdown hook in another thread, eg from Ctrl-C nonInterruptibleVoid(mainReady::await); @@ -420,7 +385,7 @@ public void testProcessDies() throws Exception { nonInterruptibleVoid(mainExit::await); exitCode.set(-9); }; - var server = startProcess(false, false, ""); + var server = startProcess(false, false); mainExit.countDown(); int exitCode = server.waitFor(); assertThat(exitCode, equalTo(-9)); diff --git a/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/ServerProcessUtilsTests.java b/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/ServerProcessUtilsTests.java new file mode 100644 index 0000000000000..8cd1b63e41b03 --- /dev/null +++ b/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/ServerProcessUtilsTests.java @@ -0,0 +1,82 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.server.cli; + +import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.ProcessInfo; +import org.elasticsearch.cli.UserException; +import org.elasticsearch.test.ESTestCase; +import org.junit.Before; + +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.startsWith; + +public class ServerProcessUtilsTests extends ESTestCase { + + protected final Map sysprops = new HashMap<>(); + protected final Map envVars = new HashMap<>(); + + @Before + public void resetEnv() { + sysprops.clear(); + sysprops.put("os.name", "Linux"); + sysprops.put("java.home", "javahome"); + envVars.clear(); + } + + private ProcessInfo createProcessInfo() { + return new ProcessInfo(Map.copyOf(sysprops), Map.copyOf(envVars), Path.of(".")); + } + + public void testTempDir() throws Exception { + var tmpDir = ServerProcessUtils.setupTempDir(createProcessInfo()); + assertThat(tmpDir.toString(), Files.exists(tmpDir), is(true)); + assertThat(tmpDir.getFileName().toString(), startsWith("elasticsearch-")); + } + + public void testTempDirWindows() throws Exception { + Path baseTmpDir = createTempDir(); + sysprops.put("os.name", "Windows 10"); + sysprops.put("java.io.tmpdir", baseTmpDir.toString()); + var tmpDir = ServerProcessUtils.setupTempDir(createProcessInfo()); + assertThat(tmpDir.toString(), Files.exists(tmpDir), is(true)); + assertThat(tmpDir.getFileName().toString(), equalTo("elasticsearch")); + assertThat(tmpDir.getParent().toString(), equalTo(baseTmpDir.toString())); + } + + public void testTempDirOverride() throws Exception { + Path customTmpDir = createTempDir(); + envVars.put("ES_TMPDIR", customTmpDir.toString()); + var tmpDir = ServerProcessUtils.setupTempDir(createProcessInfo()); + assertThat(tmpDir.toString(), equalTo(customTmpDir.toString())); + } + + public void testTempDirOverrideMissing() { + Path baseDir = createTempDir(); + envVars.put("ES_TMPDIR", baseDir.resolve("dne").toString()); + var e = expectThrows(UserException.class, () -> ServerProcessUtils.setupTempDir(createProcessInfo())); + assertThat(e.exitCode, equalTo(ExitCodes.CONFIG)); + assertThat(e.getMessage(), containsString("dne] does not exist")); + } + + public void testTempDirOverrideNotADirectory() throws Exception { + Path tmpFile = createTempFile(); + envVars.put("ES_TMPDIR", tmpFile.toString()); + var e = expectThrows(UserException.class, () -> ServerProcessUtils.setupTempDir(createProcessInfo())); + assertThat(e.exitCode, equalTo(ExitCodes.CONFIG)); + assertThat(e.getMessage(), containsString("is not a directory")); + } +} diff --git a/distribution/tools/windows-service-cli/src/main/java/org/elasticsearch/windows/service/WindowsServiceDaemon.java b/distribution/tools/windows-service-cli/src/main/java/org/elasticsearch/windows/service/WindowsServiceDaemon.java index 858787b361654..2c42dcf5cb2f5 100644 --- a/distribution/tools/windows-service-cli/src/main/java/org/elasticsearch/windows/service/WindowsServiceDaemon.java +++ b/distribution/tools/windows-service-cli/src/main/java/org/elasticsearch/windows/service/WindowsServiceDaemon.java @@ -17,7 +17,10 @@ import org.elasticsearch.common.settings.KeyStoreWrapper; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.env.Environment; +import org.elasticsearch.server.cli.JvmOptionsParser; import org.elasticsearch.server.cli.ServerProcess; +import org.elasticsearch.server.cli.ServerProcessBuilder; +import org.elasticsearch.server.cli.ServerProcessUtils; /** * Starts an Elasticsearch process, but does not wait for it to exit. @@ -38,7 +41,14 @@ public void execute(Terminal terminal, OptionSet options, Environment env, Proce // the Windows service daemon doesn't support secure settings implementations other than the keystore try (var loadedSecrets = KeyStoreWrapper.bootstrap(env.configFile(), () -> new SecureString(new char[0]))) { var args = new ServerArgs(false, true, null, loadedSecrets, env.settings(), env.configFile(), env.logsFile()); - this.server = ServerProcess.start(terminal, processInfo, args); + var tempDir = ServerProcessUtils.setupTempDir(processInfo); + var jvmOptions = JvmOptionsParser.determineJvmOptions(args, processInfo, tempDir); + var serverProcessBuilder = new ServerProcessBuilder().withTerminal(terminal) + .withProcessInfo(processInfo) + .withServerArgs(args) + .withTempDir(tempDir) + .withJvmOptions(jvmOptions); + this.server = serverProcessBuilder.start(); // start does not return until the server is ready, and we do not wait for the process } } diff --git a/docs/build.gradle b/docs/build.gradle index da3d83378e894..b6f696f0aae6a 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -111,9 +111,11 @@ testClusters.matching { it.name == "yamlRestTest"}.configureEach { systemProperty 'es.transport.cname_in_publish_address', 'true' requiresFeature 'es.index_mode_feature_flag_registered', Version.fromString("8.0.0") + requiresFeature 'es.failure_store_feature_flag_enabled', Version.fromString("8.12.0") - extraConfigFile 'op-jwks.json', project(':x-pack:test:idp-fixture').file("oidc/op-jwks.json") - extraConfigFile 'idp-docs-metadata.xml', project(':x-pack:test:idp-fixture').file("idp/shibboleth-idp/metadata/idp-docs-metadata.xml") + // TODO Rene: clean up this kind of cross project file references + extraConfigFile 'op-jwks.json', project(':x-pack:test:idp-fixture').file("src/main/resources/oidc/op-jwks.json") + extraConfigFile 'idp-docs-metadata.xml', project(':x-pack:test:idp-fixture').file("src/main/resources/idp/shibboleth-idp/metadata/idp-docs-metadata.xml") extraConfigFile 'testClient.crt', project(':x-pack:plugin:security').file("src/test/resources/org/elasticsearch/xpack/security/action/pki_delegation/testClient.crt") setting 'xpack.security.enabled', 'true' setting 'xpack.security.authc.api_key.enabled', 'true' diff --git a/docs/changelog/100740.yaml b/docs/changelog/100740.yaml new file mode 100644 index 0000000000000..c93fbf676ef81 --- /dev/null +++ b/docs/changelog/100740.yaml @@ -0,0 +1,6 @@ +pr: 100740 +summary: "ESQL: Referencing expressions that contain backticks requires <>." +area: ES|QL +type: enhancement +issues: + - 100312 diff --git a/docs/changelog/102078.yaml b/docs/changelog/102078.yaml new file mode 100644 index 0000000000000..d031aa0dbf6f7 --- /dev/null +++ b/docs/changelog/102078.yaml @@ -0,0 +1,5 @@ +pr: 102078 +summary: Derive expected replica size from primary +area: Allocation +type: enhancement +issues: [] diff --git a/docs/changelog/102734.yaml b/docs/changelog/102734.yaml deleted file mode 100644 index c27846d7d8478..0000000000000 --- a/docs/changelog/102734.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102734 -summary: Allow match field in enrich fields -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/102741.yaml b/docs/changelog/102741.yaml deleted file mode 100644 index 84a4b8092632f..0000000000000 --- a/docs/changelog/102741.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 102741 -summary: "[ILM] More resilient when a policy is added to searchable snapshot" -area: ILM+SLM -type: bug -issues: - - 101958 diff --git a/docs/changelog/102759.yaml b/docs/changelog/102759.yaml new file mode 100644 index 0000000000000..1c002ef2b678e --- /dev/null +++ b/docs/changelog/102759.yaml @@ -0,0 +1,6 @@ +pr: 102759 +summary: Close rather than stop `HttpServerTransport` on shutdown +area: Infra/Node Lifecycle +type: bug +issues: + - 102501 diff --git a/docs/changelog/102765.yaml b/docs/changelog/102765.yaml new file mode 100644 index 0000000000000..eb73da2650542 --- /dev/null +++ b/docs/changelog/102765.yaml @@ -0,0 +1,5 @@ +pr: 102765 +summary: "Add APM metrics to `HealthPeriodicLogger`" +area: Health +type: enhancement +issues: [] diff --git a/docs/changelog/102798.yaml b/docs/changelog/102798.yaml new file mode 100644 index 0000000000000..986ad99f96a19 --- /dev/null +++ b/docs/changelog/102798.yaml @@ -0,0 +1,5 @@ +pr: 102798 +summary: Hot-reloadable remote cluster credentials +area: Security +type: enhancement +issues: [] diff --git a/docs/changelog/102862.yaml b/docs/changelog/102862.yaml new file mode 100644 index 0000000000000..bb453163009d5 --- /dev/null +++ b/docs/changelog/102862.yaml @@ -0,0 +1,5 @@ +pr: 102862 +summary: Add optional pruning configuration (weighted terms scoring) to text expansion query +area: "Machine Learning" +type: enhancement +issues: [] diff --git a/docs/changelog/102879.yaml b/docs/changelog/102879.yaml new file mode 100644 index 0000000000000..b35d36dd0a3a9 --- /dev/null +++ b/docs/changelog/102879.yaml @@ -0,0 +1,5 @@ +pr: 102879 +summary: Fix disk computation when initializing new shards +area: Allocation +type: bug +issues: [] diff --git a/docs/changelog/102958.yaml b/docs/changelog/102958.yaml deleted file mode 100644 index bb357c1eb09b5..0000000000000 --- a/docs/changelog/102958.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 102958 -summary: Ensure transform `_schedule_now` API only triggers the expected transform - task -area: Transform -type: bug -issues: - - 102956 diff --git a/docs/changelog/102967.yaml b/docs/changelog/102967.yaml new file mode 100644 index 0000000000000..cdde735f6c077 --- /dev/null +++ b/docs/changelog/102967.yaml @@ -0,0 +1,6 @@ +pr: 102967 +summary: "ES|QL: Improve resolution error management in `mv_expand`" +area: ES|QL +type: bug +issues: + - 102964 diff --git a/docs/changelog/103003.yaml b/docs/changelog/103003.yaml new file mode 100644 index 0000000000000..accacc2b62416 --- /dev/null +++ b/docs/changelog/103003.yaml @@ -0,0 +1,6 @@ +pr: 103003 +summary: "Fix: Watcher REST API `GET /_watcher/settings` now includes product header" +area: "Watcher" +type: bug +issues: + - 102928 diff --git a/docs/changelog/103031.yaml b/docs/changelog/103031.yaml deleted file mode 100644 index f63094139f5ca..0000000000000 --- a/docs/changelog/103031.yaml +++ /dev/null @@ -1,9 +0,0 @@ -pr: 103031 -summary: Collect warnings in compute service -area: ES|QL -type: bug -issues: - - 100163 - - 103028 - - 102871 - - 102982 diff --git a/docs/changelog/103032.yaml b/docs/changelog/103032.yaml new file mode 100644 index 0000000000000..81d84fca0bdb0 --- /dev/null +++ b/docs/changelog/103032.yaml @@ -0,0 +1,5 @@ +pr: 103032 +summary: "x-pack/plugin/apm-data: Map some APM fields as flattened and fix error.grouping_name script" +area: Data streams +type: enhancement +issues: [] diff --git a/docs/changelog/103035.yaml b/docs/changelog/103035.yaml new file mode 100644 index 0000000000000..5b1c9d6629767 --- /dev/null +++ b/docs/changelog/103035.yaml @@ -0,0 +1,5 @@ +pr: 103035 +summary: "x-pack/plugin/core: add `match_mapping_type` to `ecs@mappings` dynamic templates" +area: Data streams +type: bug +issues: [] diff --git a/docs/changelog/103047.yaml b/docs/changelog/103047.yaml deleted file mode 100644 index 59f86d679b55f..0000000000000 --- a/docs/changelog/103047.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103047 -summary: Ensure `dynamicMapping` updates are handled in insertion order -area: Mapping -type: bug -issues: [] diff --git a/docs/changelog/103084.yaml b/docs/changelog/103084.yaml new file mode 100644 index 0000000000000..fb5a718a086de --- /dev/null +++ b/docs/changelog/103084.yaml @@ -0,0 +1,6 @@ +pr: 103084 +summary: Return `matched_queries` in Percolator +area: Percolator +type: enhancement +issues: + - 10163 diff --git a/docs/changelog/103087.yaml b/docs/changelog/103087.yaml deleted file mode 100644 index 5824bc53edb8d..0000000000000 --- a/docs/changelog/103087.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103087 -summary: Use latest version of entsearch ingestion pipeline -area: Application -type: bug -issues: [] diff --git a/docs/changelog/103099.yaml b/docs/changelog/103099.yaml new file mode 100644 index 0000000000000..c3fd3f9d7b8e4 --- /dev/null +++ b/docs/changelog/103099.yaml @@ -0,0 +1,6 @@ +pr: 103099 +summary: "ESQL: Simpify IS NULL/IS NOT NULL evaluation" +area: ES|QL +type: enhancement +issues: + - 103097 diff --git a/docs/changelog/103151.yaml b/docs/changelog/103151.yaml new file mode 100644 index 0000000000000..bd9eea97cac6d --- /dev/null +++ b/docs/changelog/103151.yaml @@ -0,0 +1,6 @@ +pr: 103151 +summary: Wrap painless explain error +area: Infra/Scripting +type: bug +issues: + - 103018 diff --git a/docs/changelog/103176.yaml b/docs/changelog/103176.yaml new file mode 100644 index 0000000000000..a0f46c1462f62 --- /dev/null +++ b/docs/changelog/103176.yaml @@ -0,0 +1,5 @@ +pr: 103176 +summary: Validate settings in `ReloadSecureSettings` API +area: Client +type: bug +issues: [] diff --git a/docs/changelog/103183.yaml b/docs/changelog/103183.yaml new file mode 100644 index 0000000000000..cb28033cff6a7 --- /dev/null +++ b/docs/changelog/103183.yaml @@ -0,0 +1,6 @@ +pr: 103183 +summary: "[Connectors API] Handle nullable fields correctly in the `ConnectorSyncJob`\ + \ parser" +area: Application +type: bug +issues: [] diff --git a/docs/changelog/103185.yaml b/docs/changelog/103185.yaml new file mode 100644 index 0000000000000..3a1a4960ba98c --- /dev/null +++ b/docs/changelog/103185.yaml @@ -0,0 +1,5 @@ +pr: 103185 +summary: Fix format string in `OldLuceneVersions` +area: Search +type: bug +issues: [] diff --git a/docs/changelog/103190.yaml b/docs/changelog/103190.yaml new file mode 100644 index 0000000000000..5e6927d3eadd7 --- /dev/null +++ b/docs/changelog/103190.yaml @@ -0,0 +1,5 @@ +pr: 103190 +summary: ILM/SLM history policies forcemerge in hot and dsl configuration +area: ILM+SLM +type: enhancement +issues: [] diff --git a/docs/changelog/103203.yaml b/docs/changelog/103203.yaml new file mode 100644 index 0000000000000..d2aa3e9961c6a --- /dev/null +++ b/docs/changelog/103203.yaml @@ -0,0 +1,5 @@ +pr: 103203 +summary: Fix NPE & empty result handling in `CountOnlyQueryPhaseResultConsumer` +area: Search +type: bug +issues: [] diff --git a/docs/changelog/103209.yaml b/docs/changelog/103209.yaml new file mode 100644 index 0000000000000..05ae8c13bcb5c --- /dev/null +++ b/docs/changelog/103209.yaml @@ -0,0 +1,6 @@ +pr: 103209 +summary: "ESQL: Fix `to_degrees()` returning infinity" +area: ES|QL +type: bug +issues: + - 102987 diff --git a/docs/changelog/103212.yaml b/docs/changelog/103212.yaml new file mode 100644 index 0000000000000..3cbbddc8f2229 --- /dev/null +++ b/docs/changelog/103212.yaml @@ -0,0 +1,5 @@ +pr: 103212 +summary: Use the eql query filter for the open-pit request +area: EQL +type: enhancement +issues: [] diff --git a/docs/changelog/103223.yaml b/docs/changelog/103223.yaml new file mode 100644 index 0000000000000..c2f4c1b6a2cf4 --- /dev/null +++ b/docs/changelog/103223.yaml @@ -0,0 +1,10 @@ +pr: 103223 +summary: "[Synonyms] Mark Synonyms as GA" +area: "Search" +type: feature +issues: [] +highlight: + title: "GA Release of Synonyms API" + body: |- + Removes the beta label for the Synonyms API to make it GA. + notable: true diff --git a/docs/changelog/103251.yaml b/docs/changelog/103251.yaml new file mode 100644 index 0000000000000..0c5c6d6e4d776 --- /dev/null +++ b/docs/changelog/103251.yaml @@ -0,0 +1,5 @@ +pr: 103251 +summary: Wait for reroute before acking put-shutdown +area: Infra/Node Lifecycle +type: bug +issues: [] diff --git a/docs/changelog/103310.yaml b/docs/changelog/103310.yaml new file mode 100644 index 0000000000000..a7a0746b6b8c4 --- /dev/null +++ b/docs/changelog/103310.yaml @@ -0,0 +1,5 @@ +pr: 103310 +summary: Revert "Validate settings in `ReloadSecureSettings` API" +area: Security +type: bug +issues: [] diff --git a/docs/changelog/103316.yaml b/docs/changelog/103316.yaml new file mode 100644 index 0000000000000..47eddcc34d924 --- /dev/null +++ b/docs/changelog/103316.yaml @@ -0,0 +1,5 @@ +pr: 103316 +summary: Review KEEP logic to prevent duplicate column names +area: ES|QL +type: bug +issues: [] diff --git a/docs/changelog/103325.yaml b/docs/changelog/103325.yaml new file mode 100644 index 0000000000000..7de6c41986490 --- /dev/null +++ b/docs/changelog/103325.yaml @@ -0,0 +1,6 @@ +pr: 103325 +summary: Added Duplicate Word Check Feature to Analysis Nori +area: Search +type: feature +issues: + - 103321 diff --git a/docs/changelog/103339.yaml b/docs/changelog/103339.yaml new file mode 100644 index 0000000000000..6ea1ab0cf799a --- /dev/null +++ b/docs/changelog/103339.yaml @@ -0,0 +1,6 @@ +pr: 103339 +summary: "ESQL: Fix resolution of MV_EXPAND after KEEP *" +area: ES|QL +type: bug +issues: + - 103331 diff --git a/docs/changelog/103342.yaml b/docs/changelog/103342.yaml new file mode 100644 index 0000000000000..32711d7a6b390 --- /dev/null +++ b/docs/changelog/103342.yaml @@ -0,0 +1,5 @@ +pr: 103342 +summary: Use dataset size instead of on-disk size for data stream stats +area: Data streams +type: bug +issues: [] diff --git a/docs/changelog/103361.yaml b/docs/changelog/103361.yaml new file mode 100644 index 0000000000000..441acc09895ef --- /dev/null +++ b/docs/changelog/103361.yaml @@ -0,0 +1,5 @@ +pr: 103361 +summary: Prevent attempts to access non-existent node information during rebalancing +area: Machine Learning +type: bug +issues: [ ] diff --git a/docs/changelog/103408.yaml b/docs/changelog/103408.yaml new file mode 100644 index 0000000000000..bf5081b854f08 --- /dev/null +++ b/docs/changelog/103408.yaml @@ -0,0 +1,6 @@ +pr: 103408 +summary: Cache component versions +area: Infra/Core +type: bug +issues: + - 102103 diff --git a/docs/changelog/103427.yaml b/docs/changelog/103427.yaml new file mode 100644 index 0000000000000..57a27aa687ab7 --- /dev/null +++ b/docs/changelog/103427.yaml @@ -0,0 +1,5 @@ +pr: 103427 +summary: "[Connector API] Fix bug with nullable tooltip field in parser" +area: Application +type: bug +issues: [] diff --git a/docs/changelog/103430.yaml b/docs/changelog/103430.yaml new file mode 100644 index 0000000000000..cd2444270849d --- /dev/null +++ b/docs/changelog/103430.yaml @@ -0,0 +1,5 @@ +pr: 103430 +summary: "[Connectors API] Fix bug with missing TEXT `DisplayType` enum" +area: Application +type: bug +issues: [] diff --git a/docs/changelog/103435.yaml b/docs/changelog/103435.yaml new file mode 100644 index 0000000000000..95e3c7169ada9 --- /dev/null +++ b/docs/changelog/103435.yaml @@ -0,0 +1,5 @@ +pr: 103435 +summary: Dispatch `ClusterStateAction#buildResponse` to executor +area: Distributed +type: bug +issues: [] diff --git a/docs/changelog/103474.yaml b/docs/changelog/103474.yaml new file mode 100644 index 0000000000000..a1da15a6bfbe5 --- /dev/null +++ b/docs/changelog/103474.yaml @@ -0,0 +1,6 @@ +pr: 103474 +summary: Fix now in millis for ESQL search contexts +area: ES|QL +type: bug +issues: + - 103455 diff --git a/docs/reference/connector/apis/cancel-connector-sync-job-api.asciidoc b/docs/reference/connector/apis/cancel-connector-sync-job-api.asciidoc new file mode 100644 index 0000000000000..6123b7eb5511d --- /dev/null +++ b/docs/reference/connector/apis/cancel-connector-sync-job-api.asciidoc @@ -0,0 +1,50 @@ +[[cancel-connector-sync-job-api]] +=== Cancel connector sync job API +++++ +Cancel connector sync job +++++ + +Cancels a connector sync job. + +[[cancel-connector-sync-job-api-request]] +==== {api-request-title} +`PUT _connector/_sync_job//_cancel` + +[[cancel-connector-sync-job-api-prereqs]] +==== {api-prereq-title} + +* To sync data using connectors, it's essential to have the Elastic connectors service running. +* The `connector_sync_job_id` parameter should reference an existing connector sync job. + +[[cancel-connector-sync-job-api-desc]] +==== {api-description-title} + +Cancels a connector sync job, which sets the `status` to `cancelling` and updates `cancellation_requested_at` to the current time. +The connector service is then responsible for setting the `status` of connector sync jobs to `cancelled`. + +[[cancel-connector-sync-job-api-path-params]] +==== {api-path-parms-title} + +`connector_sync_job_id`:: +(Required, string) + +[[cancel-connector-sync-job-api-response-codes]] +==== {api-response-codes-title} + +`200`:: +Connector sync job cancellation was successfully requested. + +`404`:: +No connector sync job matching `connector_sync_job_id` could be found. + +[[cancel-connector-sync-job-api-example]] +==== {api-examples-title} + +The following example cancels the connector sync job with ID `my-connector-sync-job-id`: + +[source,console] +---- +PUT _connector/_sync_job/my-connector-sync-job-id/_cancel +---- +// TEST[skip:there's no way to clean up after creating a connector sync job, as we don't know the id ahead of time. Therefore, skip this test.] + diff --git a/docs/reference/connector/apis/check-in-connector-sync-job-api.asciidoc b/docs/reference/connector/apis/check-in-connector-sync-job-api.asciidoc new file mode 100644 index 0000000000000..04c8057e2c115 --- /dev/null +++ b/docs/reference/connector/apis/check-in-connector-sync-job-api.asciidoc @@ -0,0 +1,48 @@ +[[check-in-connector-sync-job-api]] +=== Check in connector sync job API +++++ +Check in connector sync job +++++ + +Checks in a connector sync job (updates `last_seen` to the current time). + +[[check-in-connector-sync-job-api-request]] +==== {api-request-title} +`PUT _connector/_sync_job//_check_in/` + +[[check-in-connector-sync-job-api-prereqs]] +==== {api-prereq-title} + +* To sync data using connectors, it's essential to have the Elastic connectors service running. +* The `connector_sync_job_id` parameter should reference an existing connector sync job. + +[[check-in-connector-sync-job-api-desc]] +==== {api-description-title} + +Checks in a connector sync job and sets `last_seen` to the time right before updating it in the internal index. + +[[check-in-connector-sync-job-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) + +[[check-in-connector-sync-job-api-response-codes]] +==== {api-response-codes-title} + +`200`:: +Connector sync job was successfully checked in. + +`404`:: +No connector sync job matching `connector_sync_job_id` could be found. + +[[check-in-connector-sync-job-api-example]] +==== {api-examples-title} + +The following example checks in the connector sync job `my-connector-sync-job`: + +[source,console] +---- +PUT _connector/_sync_job/my-connector-sync-job/_check_in +---- +// TEST[skip:there's no way to clean up after creating a connector sync job, as we don't know the id ahead of time. Therefore, skip this test.] diff --git a/docs/reference/connector/apis/connector-apis.asciidoc b/docs/reference/connector/apis/connector-apis.asciidoc new file mode 100644 index 0000000000000..1e7a8c51dde47 --- /dev/null +++ b/docs/reference/connector/apis/connector-apis.asciidoc @@ -0,0 +1,61 @@ +[[connector-apis]] +== Connector APIs + +preview::[] + +++++ +Connector APIs +++++ + +--- + +The connector and sync jobs API provides a convenient way to create and manage Elastic connectors and sync jobs in an internal index. + +This API provides an alternative to relying solely on {kib} UI for connector and sync job management. The API comes with a set of +validations and assertions to ensure that the state representation in the internal index remains valid. + +[discrete] +[[elastic-connector-apis]] +=== Connector APIs + +You can use these APIs to create, get, delete and update connectors. + +Use the following APIs to manage connectors: + +* <> +* <> +* <> +* <> + + +[discrete] +[[sync-job-apis]] +=== Sync Job APIs + +You can use these APIs to create, cancel, delete and update sync jobs. + +Use the following APIs to manage sync jobs: + + +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> + + +include::cancel-connector-sync-job-api.asciidoc[] +include::check-in-connector-sync-job-api.asciidoc[] +include::create-connector-api.asciidoc[] +include::create-connector-sync-job-api.asciidoc[] +include::delete-connector-api.asciidoc[] +include::delete-connector-sync-job-api.asciidoc[] +include::get-connector-api.asciidoc[] +include::get-connector-sync-job-api.asciidoc[] +include::list-connectors-api.asciidoc[] +include::list-connector-sync-jobs-api.asciidoc[] +include::set-connector-sync-job-error-api.asciidoc[] +include::set-connector-sync-job-stats-api.asciidoc[] diff --git a/docs/reference/connector/apis/create-connector-api.asciidoc b/docs/reference/connector/apis/create-connector-api.asciidoc new file mode 100644 index 0000000000000..b62ca4ad070a4 --- /dev/null +++ b/docs/reference/connector/apis/create-connector-api.asciidoc @@ -0,0 +1,128 @@ +[[create-connector-api]] +=== Create connector API +++++ +Create connector +++++ + +Creates a connector. + + +[source,console] +-------------------------------------------------- +PUT _connector/my-connector +{ + "index_name": "search-google-drive", + "name": "My Connector", + "service_type": "google_drive" +} +-------------------------------------------------- +//// +[source,console] +---- +DELETE _connector/my-connector +---- +// TEST[continued] +//// + +[[create-connector-api-request]] +==== {api-request-title} +`POST _connector` + +`PUT _connector/` + + +[[create-connector-api-prereqs]] +==== {api-prereq-title} + +* To sync data using connectors, it's essential to have the Elastic connectors service running. +* The `service_type` parameter should reference an existing connector service type. + + +[[create-connector-api-desc]] +==== {api-description-title} + +Creates a connector document in the internal index and initializes its configuration, filtering, and scheduling with default values. These values can be updated later as needed. + +[[create-connector-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) Unique identifier of a connector. + + +[role="child_attributes"] +[[create-connector-api-request-body]] +==== {api-request-body-title} + +`description`:: +(Optional, string) The description of the connector. + +`index_name`:: +(Required, string) The target index for syncing data by the connector. + +`name`:: +(Optional, string) The name of the connector. + +`is_native`:: +(Optional, boolean) Indicates if it's a native connector. Defaults to `false`. + +`language`:: +(Optional, string) Language analyzer for the data. Limited to supported languages. + +`service_type`:: +(Optional, string) Connector service type. Can reference Elastic-supported connector types or a custom connector type. + + +[role="child_attributes"] +[[create-connector-api-response-body]] +==== {api-response-body-title} + +`id`:: + (string) The ID associated with the connector document. Returned when using a POST request. + +`result`:: + (string) The result of the indexing operation, `created` or `updated`. Returned when using a PUT request. + +[[create-connector-api-response-codes]] +==== {api-response-codes-title} + +`200`:: +Indicates that an existing connector was updated successfully. + +`201`:: +Indicates that the connector was created successfully. + +`400`:: +Indicates that the request was malformed. + +[[create-connector-api-example]] +==== {api-examples-title} + +[source,console] +---- +PUT _connector/my-connector +{ + "index_name": "search-google-drive", + "name": "My Connector", + "description": "My Connector to sync data to Elastic index from Google Drive", + "service_type": "google_drive", + "language": "english" +} +---- + + +The API returns the following result: + +[source,console-result] +---- +{ + "result": "created" +} +---- +//// +[source,console] +---- +DELETE _connector/my-connector +---- +// TEST[continued] +//// diff --git a/docs/reference/connector/apis/create-connector-sync-job-api.asciidoc b/docs/reference/connector/apis/create-connector-sync-job-api.asciidoc new file mode 100644 index 0000000000000..e8c2c364797c4 --- /dev/null +++ b/docs/reference/connector/apis/create-connector-sync-job-api.asciidoc @@ -0,0 +1,69 @@ +[[create-connector-sync-job-api]] +=== Create connector sync job API +++++ +Create connector sync job +++++ + +Creates a connector sync job. + +[source, console] +-------------------------------------------------- +POST _connector/_sync_job +{ + "id": "connector-id", + "job_type": "full", + "trigger_method": "on_demand" +} +-------------------------------------------------- +// TEST[skip:there's no way to clean up after this code snippet, as we don't know the id ahead of time] + + +[[create-connector-sync-job-api-request]] +==== {api-request-title} +`POST _connector/_sync_job` + + +[[create-connector-sync-job-api-prereqs]] +==== {api-prereq-title} + +* To sync data using connectors, it's essential to have the Elastic connectors service running. +* The `id` parameter should reference an existing connector. + +[[create-connector-sync-job-api-desc]] +==== {api-description-title} + +Creates a connector sync job document in the internal index and initializes its counters and timestamps with default values. +Certain values can be updated via the API. + +[role="child_attributes"] +[[create-connector-sync-job-api-request-body]] +==== {api-request-body-title} + +`id`:: +(Required, string) The id of the connector to create the sync job for. + +`job_type`:: +(Optional, string) The job type of the created sync job. Defaults to `full`. + +`trigger_method`:: +(Optional, string) The trigger method of the created sync job. Defaults to `on_demand`. + + +[role="child_attributes"] +[[create-connector-sync-job-api-response-body]] +==== {api-response-body-title} + +`id`:: +(string) The ID associated with the connector sync job document. + +[[create-connector-sync-job-api-response-codes]] +==== {api-response-codes-title} + +`201`:: +Indicates that the connector sync job was created successfully. + +`400`:: +Indicates that the request was malformed. + +`404`:: +Indicates that either the index or the referenced connector is missing. diff --git a/docs/reference/connector/apis/delete-connector-api.asciidoc b/docs/reference/connector/apis/delete-connector-api.asciidoc new file mode 100644 index 0000000000000..2bda7da72cb72 --- /dev/null +++ b/docs/reference/connector/apis/delete-connector-api.asciidoc @@ -0,0 +1,66 @@ +[[delete-connector-api]] +=== Delete connector API + +preview::[] + +++++ +Delete connector +++++ + +Removes a connector and its associated data. +This is a destructive action that is not recoverable. + +[[delete-connector-api-request]] +==== {api-request-title} + +`DELETE _connector/` + +[[delete-connector-api-prereq]] +==== {api-prereq-title} + +* To sync data using connectors, it's essential to have the Elastic connectors service running. + +[[delete-connector-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) + +[[delete-connector-api-response-codes]] +==== {api-response-codes-title} + +`400`:: +The `connector_id` was not provided. + +`404` (Missing resources):: +No connector matching `connector_id` could be found. + +[[delete-connector-api-example]] +==== {api-examples-title} + +The following example deletes the connector with ID `my-connector`: + +//// +[source, console] +-------------------------------------------------- +PUT _connector/my-connector +{ + "index_name": "search-google-drive", + "name": "My Connector", + "service_type": "google_drive" +} +-------------------------------------------------- +// TESTSETUP +//// + +[source,console] +---- +DELETE _connector/my-connector +---- + +[source,console-result] +---- +{ + "acknowledged": true +} +---- diff --git a/docs/reference/connector/apis/delete-connector-sync-job-api.asciidoc b/docs/reference/connector/apis/delete-connector-sync-job-api.asciidoc new file mode 100644 index 0000000000000..8641794576bf1 --- /dev/null +++ b/docs/reference/connector/apis/delete-connector-sync-job-api.asciidoc @@ -0,0 +1,54 @@ +[[delete-connector-sync-job-api]] +=== Delete connector sync job API + +preview::[] + +++++ +Delete connector sync job +++++ + +Removes a connector sync job and its associated data. +This is a destructive action that is not recoverable. + +[[delete-connector-sync-job-api-request]] +==== {api-request-title} + +`DELETE _connector/_sync_job/` + +[[delete-connector-sync-job-api-prereq]] +==== {api-prereq-title} + +* To sync data using connectors, it's essential to have the Elastic connectors service running. + +[[delete-connector-sync-job-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) + +[[delete-connector-sync-job-api-response-codes]] +==== {api-response-codes-title} + +`400`:: +The `connector_sync_job_id` was not provided. + +`404`:: +No connector sync job matching `connector_sync_job_id` could be found. + +[[delete-connector-sync-job-api-example]] +==== {api-examples-title} + +The following example deletes the connector sync job with ID `my-connector-sync-job-id`: + +[source,console] +---- +DELETE _connector/_sync_job/my-connector-sync-job-id +---- +// TEST[skip:there's no way to clean up after this code snippet, as we don't know the ids of sync jobs ahead of time] + +[source,console-result] +---- +{ + "acknowledged": true +} +---- diff --git a/docs/reference/connector/apis/get-connector-api.asciidoc b/docs/reference/connector/apis/get-connector-api.asciidoc new file mode 100644 index 0000000000000..ab4a2758ce4f1 --- /dev/null +++ b/docs/reference/connector/apis/get-connector-api.asciidoc @@ -0,0 +1,63 @@ +[[get-connector-api]] +=== Get connector API +preview::[] +++++ +Get connector +++++ + +Retrieves the details about a connector. + +[[get-connector-api-request]] +==== {api-request-title} + +`GET _connector/` + +[[get-connector-api-prereq]] +==== {api-prereq-title} + +* To sync data using connectors, it's essential to have the Elastic connectors service running. + +[[get-connector-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) + +[[get-connector-api-response-codes]] +==== {api-response-codes-title} + +`400`:: +The `connector_id` was not provided. + +`404` (Missing resources):: +No connector matching `connector_id` could be found. + +[[get-connector-api-example]] +==== {api-examples-title} + +The following example gets the connector `my-connector`: + +//// +[source,console] +-------------------------------------------------- +PUT _connector/my-connector +{ + "index_name": "search-google-drive", + "name": "Google Drive Connector", + "service_type": "google_drive" +} + +-------------------------------------------------- +// TESTSETUP + +[source,console] +-------------------------------------------------- +DELETE _connector/my-connector +-------------------------------------------------- +// TEARDOWN +//// + +[source,console] +---- +GET _connector/my-connector +---- diff --git a/docs/reference/connector/apis/get-connector-sync-job-api.asciidoc b/docs/reference/connector/apis/get-connector-sync-job-api.asciidoc new file mode 100644 index 0000000000000..b33aec8c55e60 --- /dev/null +++ b/docs/reference/connector/apis/get-connector-sync-job-api.asciidoc @@ -0,0 +1,44 @@ +[[get-connector-sync-job-api]] +=== Get connector sync job API +preview::[] +++++ +Get connector sync job +++++ + +Retrieves the details about a connector sync job. + +[[get-connector-sync-job-api-request]] +==== {api-request-title} + +`GET _connector/_sync_job/` + +[[get-connector-sync-job-api-prereq]] +==== {api-prereq-title} + +* To sync data using connectors, it's essential to have the Elastic connectors service running. + +[[get-connector-sync-job-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) + +[[get-connector-sync-job-api-response-codes]] +==== {api-response-codes-title} + +`400`:: +The `connector_sync_job_id` was not provided. + +`404` (Missing resources):: +No connector sync job matching `connector_sync_job_id` could be found. + +[[get-connector-sync-job-api-example]] +==== {api-examples-title} + +The following example gets the connector sync job `my-connector-sync-job`: + +[source,console] +---- +GET _connector/_sync_job/my-connector-sync-job +---- +// TEST[skip:there's no way to clean up after creating a connector sync job, as we don't know the id ahead of time. Therefore, skip this test.] diff --git a/docs/reference/connector/apis/list-connector-sync-jobs-api.asciidoc b/docs/reference/connector/apis/list-connector-sync-jobs-api.asciidoc new file mode 100644 index 0000000000000..8b88f318f5304 --- /dev/null +++ b/docs/reference/connector/apis/list-connector-sync-jobs-api.asciidoc @@ -0,0 +1,80 @@ +[role="xpack"] +[[list-connector-sync-jobs-api]] +=== List connector sync jobs API + +preview::[] + +++++ +List connector sync jobs +++++ + +Returns information about all stored connector sync jobs ordered by their creation date in ascending order. + + +[[list-connector-sync-jobs-api-request]] +==== {api-request-title} + +`GET _connector/_sync_job` + +[[list-connector-sync-jobs-api-prereq]] +==== {api-prereq-title} + +* To sync data using connectors, it's essential to have the Elastic connectors service running. + +[[list-connector-sync-jobs-api-path-params]] +==== {api-path-parms-title} + +`size`:: +(Optional, integer) Maximum number of results to retrieve. Defaults to `100`. + +`from`:: +(Optional, integer) The offset from the first result to fetch. Defaults to `0`. + +`status`:: +(Optional, job status) The job status the fetched sync jobs need to have. + +`connector_id`:: +(Optional, string) The connector id the fetched sync jobs need to have. + +[[list-connector-sync-jobs-api-example]] +==== {api-examples-title} + +The following example lists all connector sync jobs: + + +[source,console] +---- +GET _connector/_sync_job +---- +// TEST[skip:there's no way to clean up after this code snippet, as we don't know the ids of sync jobs ahead of time] + +The following example lists the first two connector sync jobs: + +[source,console] +---- +GET _connector/_sync_job?from=0&size=2 +---- +// TEST[skip:there's no way to clean up after this code snippet, as we don't know the ids of sync jobs ahead of time] + +The following example lists pending connector sync jobs (the first 100 per default): +[source,console] +---- +GET _connector/_sync_job?status=pending +---- +// TEST[skip:there's no way to clean up after this code snippet, as we don't know the ids of sync jobs ahead of time] + +The following example lists connector sync jobs (the first 100 per default) for the connector with id `connector-1`: +[source,console] +---- +GET _connector/_sync_job?connector_id=connector-1 +---- +// TEST[skip:there's no way to clean up after this code snippet, as we don't know the ids of sync jobs ahead of time] + +[[list-connector-sync-jobs-api-response-codes]] +==== {api-response-codes-title} + +`200`: +Indicates that results were successfully returned (results can also be empty). + +`400`: +Indicates that the request was malformed. diff --git a/docs/reference/connector/apis/list-connectors-api.asciidoc b/docs/reference/connector/apis/list-connectors-api.asciidoc new file mode 100644 index 0000000000000..57d3cc47aeb7a --- /dev/null +++ b/docs/reference/connector/apis/list-connectors-api.asciidoc @@ -0,0 +1,77 @@ +[role="xpack"] +[[list-connector-api]] +=== List connectors API + +preview::[] + +++++ +List connectors +++++ + +Returns information about all stored connectors. + + +[[list-connector-api-request]] +==== {api-request-title} + +`GET _connector` + +[[list-connector-api-prereq]] +==== {api-prereq-title} + +* To sync data using connectors, it's essential to have the Elastic connectors service running. + +[[list-connector-api-path-params]] +==== {api-path-parms-title} + +`size`:: +(Optional, integer) Maximum number of results to retrieve. + +`from`:: +(Optional, integer) The offset from the first result to fetch. + +[[list-connector-api-example]] +==== {api-examples-title} + +The following example lists all connectors: + +//// +[source,console] +-------------------------------------------------- +PUT _connector/connector-1 +{ + "index_name": "search-google-drive", + "name": "Google Drive Connector", + "service_type": "google_drive" +} + +PUT _connector/connector-2 +{ + "index_name": "search-sharepoint-online", + "name": "Sharepoint Online Connector", + "service_type": "sharepoint_online" +} + +-------------------------------------------------- +// TESTSETUP + +[source,console] +-------------------------------------------------- +DELETE _connector/connector-1 + +DELETE _connector/connector-2 +-------------------------------------------------- +// TEARDOWN +//// + +[source,console] +---- +GET _connector +---- + +The following example lists the first two connectors: + +[source,console] +---- +GET _connector/?from=0&size=2 +---- diff --git a/docs/reference/connector/apis/set-connector-sync-job-error-api.asciidoc b/docs/reference/connector/apis/set-connector-sync-job-error-api.asciidoc new file mode 100644 index 0000000000000..935fcccc77fcf --- /dev/null +++ b/docs/reference/connector/apis/set-connector-sync-job-error-api.asciidoc @@ -0,0 +1,58 @@ +[[set-connector-sync-job-error-api]] +=== Set connector sync job error API +++++ +Set connector sync job error +++++ + +Sets a connector sync job error. + +[[set-connector-sync-job-error-api-request]] +==== {api-request-title} +`PUT _connector/_sync_job//_error` + +[[set-connector-sync-job-error-api-prereqs]] +==== {api-prereq-title} + +* To sync data using connectors, it's essential to have the Elastic connectors service running. +* The `connector_sync_job_id` parameter should reference an existing connector sync job. + +[[set-connector-sync-job-error-api-desc]] +==== {api-description-title} + +Sets the `error` field for the specified connector sync job and sets its `status` to `error`. + +[[set-connector-sync-job-error-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) + +[role="child_attributes"] +[[set-connector-sync-job-error-api-request-body]] +==== {api-request-body-title} + +`error`:: +(Required, string) The error to set the connector sync job `error` field to. + +[[set-connector-sync-job-api-response-codes]] +==== {api-response-codes-title} + +`200`:: +Indicates that the connector sync job error was set successfully. + +`404`:: +No connector sync job matching `connector_sync_job_id` could be found. + +[[set-connector-sync-job-error-api-example]] +==== {api-examples-title} + +The following example sets the error `some-error` in the connector sync job `my-connector-sync-job`: + +[source,console] +---- +PUT _connector/_sync_job/my-connector-sync-job/_error +{ + "error": "some-error" +} +---- +// TEST[skip:there's no way to clean up after creating a connector sync job, as we don't know the id ahead of time. Therefore, skip this test.] diff --git a/docs/reference/connector/apis/set-connector-sync-job-stats-api.asciidoc b/docs/reference/connector/apis/set-connector-sync-job-stats-api.asciidoc new file mode 100644 index 0000000000000..0513155312bb4 --- /dev/null +++ b/docs/reference/connector/apis/set-connector-sync-job-stats-api.asciidoc @@ -0,0 +1,77 @@ +[[set-connector-sync-job-stats-api]] +=== Set connector sync job stats API +++++ +Set connector sync job stats +++++ + +Sets connector sync job stats. + +[[set-connector-sync-job-stats-api-request]] +==== {api-request-title} +`PUT _connector/_sync_job//_stats` + +[[set-connector-sync-job-stats-api-prereqs]] +==== {api-prereq-title} + +* To sync data using connectors, it's essential to have the Elastic connectors service running. +* The `connector_sync_job_id` parameter should reference an existing connector sync job. + +[[set-connector-sync-job-stats-api-desc]] +==== {api-description-title} + +Sets the stats for a connector sync job. +Stats include: `deleted_document_count`, `indexed_document_count`, `indexed_document_volume` and `total_document_count`. +`last_seen` can also be updated using this API. +This API is mainly used by the connector service for updating sync job information. + +[[set-connector-sync-job-stats-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) + +[role="child_attributes"] +[[set-connector-sync-job-stats-api-request-body]] +==== {api-request-body-title} + +`deleted_document_count`:: +(Required, int) The number of documents the sync job deleted. + +`indexed_document_count`:: +(Required, int) The number of documents the sync job indexed. + +`indexed_document_volume`:: +(Required, int) The total size of the data (in MiB) the sync job indexed. + +`total_document_count`:: +(Optional, int) The total number of documents in the target index after the sync job finished. + +`last_seen`:: +(Optional, instant) The timestamp to set the connector sync job's `last_seen` property. + +[[set-connector-sync-job-stats-api-response-codes]] +==== {api-response-codes-title} + +`200`:: +Indicates that the connector sync job stats were successfully updated. + +`404`:: +No connector sync job matching `connector_sync_job_id` could be found. + +[[set-connector-sync-job-stats-api-example]] +==== {api-examples-title} + +The following example sets all mandatory and optional stats for the connector sync job `my-connector-sync-job`: + +[source,console] +---- +PUT _connector/_sync_job/my-connector-sync-job/_stats +{ + "deleted_document_count": 10, + "indexed_document_count": 20, + "indexed_document_volume": 1000, + "total_document_count": 2000, + "last_seen": "2023-01-02T10:00:00Z" +} +---- +// TEST[skip:there's no way to clean up after creating a connector sync job, as we don't know the id ahead of time. Therefore, skip this test.] diff --git a/docs/reference/data-streams/set-up-tsds.asciidoc b/docs/reference/data-streams/set-up-tsds.asciidoc index c175da2e991e9..ed6b79653e61f 100644 --- a/docs/reference/data-streams/set-up-tsds.asciidoc +++ b/docs/reference/data-streams/set-up-tsds.asciidoc @@ -176,9 +176,7 @@ PUT _component_template/my-weather-sensor-mappings Optionally, the index settings component template for a TSDS can include: * Your lifecycle policy in the `index.lifecycle.name` index setting. -* The <> index setting. -* The <> index setting. -* Other index settings, such as <>, for your TSDS's +* Other index settings, such as <>, for your TSDS's backing indices. IMPORTANT: Don't specify the `index.routing_path` index setting in a component @@ -191,8 +189,7 @@ PUT _component_template/my-weather-sensor-settings { "template": { "settings": { - "index.lifecycle.name": "my-lifecycle-policy", - "index.look_ahead_time": "3h" + "index.lifecycle.name": "my-lifecycle-policy" } }, "_meta": { diff --git a/docs/reference/esql/esql-functions.asciidoc b/docs/reference/esql/esql-functions.asciidoc index b921719fc097b..c2e943f7555d6 100644 --- a/docs/reference/esql/esql-functions.asciidoc +++ b/docs/reference/esql/esql-functions.asciidoc @@ -127,9 +127,11 @@ include::functions/tan.asciidoc[] include::functions/tanh.asciidoc[] include::functions/tau.asciidoc[] include::functions/to_boolean.asciidoc[] +include::functions/to_cartesianpoint.asciidoc[] include::functions/to_datetime.asciidoc[] include::functions/to_degrees.asciidoc[] include::functions/to_double.asciidoc[] +include::functions/to_geopoint.asciidoc[] include::functions/to_integer.asciidoc[] include::functions/to_ip.asciidoc[] include::functions/to_long.asciidoc[] diff --git a/docs/reference/esql/esql-limitations.asciidoc b/docs/reference/esql/esql-limitations.asciidoc index f1971fd409754..00f5b056c7ebe 100644 --- a/docs/reference/esql/esql-limitations.asciidoc +++ b/docs/reference/esql/esql-limitations.asciidoc @@ -34,6 +34,9 @@ include::processing-commands/limit.asciidoc[tag=limitation] * `text` * `unsigned_long` * `version` +* Spatial types +** `geo_point` +** `point` [discrete] ==== Unsupported types @@ -44,10 +47,8 @@ include::processing-commands/limit.asciidoc[tag=limitation] ** `counter` ** `position` ** `aggregate_metric_double` -* Geo/spatial -** `geo_point` +* Spatial types ** `geo_shape` -** `point` ** `shape` * Date/time ** `date_nanos` @@ -119,7 +120,7 @@ consequences. An {esql} query on a `text` field is case-sensitive. Furthermore, a subfield may have been mapped with a <>, which can transform the original string. Or it may have been mapped with <>, which can truncate the string. None of these mapping operations are applied to -an {esql} query, which may lead to false positives or negatives. +an {esql} query, which may lead to false positives or negatives. To avoid these issues, a best practice is to be explicit about the field that you query, and query `keyword` sub-fields instead of `text` fields. @@ -197,4 +198,4 @@ the <>. [[esql-limitations-kibana]] === Kibana limitations -include::esql-kibana.asciidoc[tag=limitations] \ No newline at end of file +include::esql-kibana.asciidoc[tag=limitations] diff --git a/docs/reference/esql/esql-process-data-with-dissect-grok.asciidoc b/docs/reference/esql/esql-process-data-with-dissect-grok.asciidoc index a13633a9f8d92..87748fee4f202 100644 --- a/docs/reference/esql/esql-process-data-with-dissect-grok.asciidoc +++ b/docs/reference/esql/esql-process-data-with-dissect-grok.asciidoc @@ -62,9 +62,8 @@ clientip:keyword | @timestamp:keyword | status:keyword include::../ingest/processors/dissect.asciidoc[tag=intro-example-explanation] -A <> can be used to match values, but -exclude the value from the output. -// TODO: Change back to original text when https://github.com/elastic/elasticsearch/pull/102580 is merged +An empty key (`%{}`) or <> can be used to +match values, but exclude the value from the output. All matched values are output as keyword string data types. Use the <> to convert to another data type. @@ -137,8 +136,6 @@ include::{esql-specs}/docs.csv-spec[tag=dissectRightPaddingModifier] include::{esql-specs}/docs.csv-spec[tag=dissectRightPaddingModifier-result] |=== -//// -// TODO: Re-enable when https://github.com/elastic/elasticsearch/pull/102580 is merged include::../ingest/processors/dissect.asciidoc[tag=dissect-modifier-empty-right-padding] For example: @@ -150,7 +147,6 @@ include::{esql-specs}/docs.csv-spec[tag=dissectEmptyRightPaddingModifier] |=== include::{esql-specs}/docs.csv-spec[tag=dissectEmptyRightPaddingModifier-result] |=== -//// [[esql-append-modifier]] ====== Append modifier (`+`) @@ -180,11 +176,9 @@ include::{esql-specs}/docs.csv-spec[tag=dissectAppendWithOrderModifier-result] [[esql-named-skip-key]] ====== Named skip key (`?`) -// include::../ingest/processors/dissect.asciidoc[tag=named-skip-key] -// TODO: Re-enable when https://github.com/elastic/elasticsearch/pull/102580 is merged - -Dissect supports ignoring matches in the final result. This can be done with a -named skip key using the `{?name}` syntax: +include::../ingest/processors/dissect.asciidoc[tag=named-skip-key] +This can be done with a named skip key using the `{?name}` syntax. In the +following query, `ident` and `auth` are not added to the output table: [source.merge.styled,esql] ---- @@ -199,7 +193,7 @@ include::{esql-specs}/docs.csv-spec[tag=dissectNamedSkipKey-result] ===== Limitations // tag::dissect-limitations[] -The `DISSECT` command does not support reference keys and empty keys. +The `DISSECT` command does not support reference keys. // end::dissect-limitations[] [[esql-process-data-with-grok]] diff --git a/docs/reference/esql/functions/binary.asciidoc b/docs/reference/esql/functions/binary.asciidoc index 32e97b7316d84..2d4daa6ad2eca 100644 --- a/docs/reference/esql/functions/binary.asciidoc +++ b/docs/reference/esql/functions/binary.asciidoc @@ -2,19 +2,91 @@ [[esql-binary-operators]] === Binary operators -These binary comparison operators are supported: +[[esql-binary-operators-equality]] +==== Equality +[.text-center] +image::esql/functions/signature/equals.svg[Embedded,opts=inline] + +Supported types: + +include::types/equals.asciidoc[] + +==== Inequality `!=` +[.text-center] +image::esql/functions/signature/not_equals.svg[Embedded,opts=inline] -* equality: `==` -* inequality: `!=` -* less than: `<` -* less than or equal: `<=` -* larger than: `>` -* larger than or equal: `>=` +Supported types: -And these mathematical operators are supported: +include::types/not_equals.asciidoc[] +==== Less than `<` +[.text-center] +image::esql/functions/signature/less_than.svg[Embedded,opts=inline] + +Supported types: + +include::types/less_than.asciidoc[] + +==== Less than or equal to `<=` +[.text-center] +image::esql/functions/signature/less_than_or_equal.svg[Embedded,opts=inline] + +Supported types: + +include::types/less_than_or_equal.asciidoc[] + +==== Greater than `>` +[.text-center] +image::esql/functions/signature/greater_than.svg[Embedded,opts=inline] + +Supported types: + +include::types/greater_than.asciidoc[] + +==== Greater than or equal to `>=` +[.text-center] +image::esql/functions/signature/greater_than_or_equal.svg[Embedded,opts=inline] + +Supported types: + +include::types/greater_than_or_equal.asciidoc[] + +==== Add `+` [.text-center] image::esql/functions/signature/add.svg[Embedded,opts=inline] +Supported types: + +include::types/add.asciidoc[] + +==== Subtract `-` [.text-center] image::esql/functions/signature/sub.svg[Embedded,opts=inline] + +Supported types: + +include::types/sub.asciidoc[] + +==== Multiply `*` +[.text-center] +image::esql/functions/signature/mul.svg[Embedded,opts=inline] + +Supported types: + +include::types/mul.asciidoc[] + +==== Divide `/` +[.text-center] +image::esql/functions/signature/div.svg[Embedded,opts=inline] + +Supported types: + +include::types/div.asciidoc[] + +==== Modulus `%` +[.text-center] +image::esql/functions/signature/mod.svg[Embedded,opts=inline] + +Supported types: + +include::types/mod.asciidoc[] diff --git a/docs/reference/esql/functions/signature/greater_than_or_equal.svg b/docs/reference/esql/functions/signature/greater_than_or_equal.svg new file mode 100644 index 0000000000000..6afb36d4b4eff --- /dev/null +++ b/docs/reference/esql/functions/signature/greater_than_or_equal.svg @@ -0,0 +1 @@ +lhs>=rhs \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/less_than_or_equal.svg b/docs/reference/esql/functions/signature/less_than_or_equal.svg new file mode 100644 index 0000000000000..da93c172b7136 --- /dev/null +++ b/docs/reference/esql/functions/signature/less_than_or_equal.svg @@ -0,0 +1 @@ +lhs<=rhs \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/to_degrees.svg b/docs/reference/esql/functions/signature/to_degrees.svg deleted file mode 100644 index 01fe0a4770156..0000000000000 --- a/docs/reference/esql/functions/signature/to_degrees.svg +++ /dev/null @@ -1 +0,0 @@ -TO_DEGREES(v) \ No newline at end of file diff --git a/docs/reference/esql/functions/to_cartesianpoint.asciidoc b/docs/reference/esql/functions/to_cartesianpoint.asciidoc new file mode 100644 index 0000000000000..1fb64542681e2 --- /dev/null +++ b/docs/reference/esql/functions/to_cartesianpoint.asciidoc @@ -0,0 +1,19 @@ +[discrete] +[[esql-to_cartesianpoint]] +=== `TO_CARTESIANPOINT` +Converts an input value to a `point` value. + +The input can be a single- or multi-valued field or an expression. +The input type must be a string or a cartesian `point`. + +A string will only be successfully converted if it respects the +https://en.wikipedia.org/wiki/Well-known_text_representation_of_geometry[WKT Point] format: + +[source.merge.styled,esql] +---- +include::{esql-specs}/spatial.csv-spec[tag=to_cartesianpoint-str] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/spatial.csv-spec[tag=to_cartesianpoint-str-result] +|=== diff --git a/docs/reference/esql/functions/to_geopoint.asciidoc b/docs/reference/esql/functions/to_geopoint.asciidoc new file mode 100644 index 0000000000000..83936af0c71b3 --- /dev/null +++ b/docs/reference/esql/functions/to_geopoint.asciidoc @@ -0,0 +1,19 @@ +[discrete] +[[esql-to_geopoint]] +=== `TO_GEOPOINT` +Converts an input value to a `geo_point` value. + +The input can be a single- or multi-valued field or an expression. +The input type must be a string or a `geo_point`. + +A string will only be successfully converted if it respects the +https://en.wikipedia.org/wiki/Well-known_text_representation_of_geometry[WKT Point] format: + +[source.merge.styled,esql] +---- +include::{esql-specs}/spatial.csv-spec[tag=to_geopoint-str] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/spatial.csv-spec[tag=to_geopoint-str-result] +|=== diff --git a/docs/reference/esql/functions/type-conversion-functions.asciidoc b/docs/reference/esql/functions/type-conversion-functions.asciidoc index 640006c936526..48a9b175d3d65 100644 --- a/docs/reference/esql/functions/type-conversion-functions.asciidoc +++ b/docs/reference/esql/functions/type-conversion-functions.asciidoc @@ -9,9 +9,11 @@ // tag::type_list[] * <> +* <> * <> * <> * <> +* <> * <> * <> * <> @@ -22,9 +24,11 @@ // end::type_list[] include::to_boolean.asciidoc[] +include::to_cartesianpoint.asciidoc[] include::to_datetime.asciidoc[] include::to_degrees.asciidoc[] include::to_double.asciidoc[] +include::to_geopoint.asciidoc[] include::to_integer.asciidoc[] include::to_ip.asciidoc[] include::to_long.asciidoc[] diff --git a/docs/reference/esql/functions/types/add.asciidoc b/docs/reference/esql/functions/types/add.asciidoc new file mode 100644 index 0000000000000..7783d08bc3aaa --- /dev/null +++ b/docs/reference/esql/functions/types/add.asciidoc @@ -0,0 +1,12 @@ +[%header.monospaced.styled,format=dsv,separator=|] +|=== +lhs | rhs | result +date_period | date_period | date_period +date_period | datetime | datetime +datetime | date_period | datetime +datetime | time_duration | datetime +double | double | double +integer | integer | integer +long | long | long +time_duration | time_duration | time_duration +|=== diff --git a/docs/reference/esql/functions/types/div.asciidoc b/docs/reference/esql/functions/types/div.asciidoc new file mode 100644 index 0000000000000..eee2d68e4653f --- /dev/null +++ b/docs/reference/esql/functions/types/div.asciidoc @@ -0,0 +1,7 @@ +[%header.monospaced.styled,format=dsv,separator=|] +|=== +lhs | rhs | result +double | double | double +integer | integer | integer +long | long | long +|=== diff --git a/docs/reference/esql/functions/types/equals.asciidoc b/docs/reference/esql/functions/types/equals.asciidoc new file mode 100644 index 0000000000000..27fb19b6d38a2 --- /dev/null +++ b/docs/reference/esql/functions/types/equals.asciidoc @@ -0,0 +1,5 @@ +[%header.monospaced.styled,format=dsv,separator=|] +|=== +lhs | rhs | result +integer | integer | boolean +|=== diff --git a/docs/reference/esql/functions/types/greater_than.asciidoc b/docs/reference/esql/functions/types/greater_than.asciidoc new file mode 100644 index 0000000000000..27fb19b6d38a2 --- /dev/null +++ b/docs/reference/esql/functions/types/greater_than.asciidoc @@ -0,0 +1,5 @@ +[%header.monospaced.styled,format=dsv,separator=|] +|=== +lhs | rhs | result +integer | integer | boolean +|=== diff --git a/docs/reference/esql/functions/types/greater_than_or_equal.asciidoc b/docs/reference/esql/functions/types/greater_than_or_equal.asciidoc new file mode 100644 index 0000000000000..27fb19b6d38a2 --- /dev/null +++ b/docs/reference/esql/functions/types/greater_than_or_equal.asciidoc @@ -0,0 +1,5 @@ +[%header.monospaced.styled,format=dsv,separator=|] +|=== +lhs | rhs | result +integer | integer | boolean +|=== diff --git a/docs/reference/esql/functions/types/less_than.asciidoc b/docs/reference/esql/functions/types/less_than.asciidoc new file mode 100644 index 0000000000000..27fb19b6d38a2 --- /dev/null +++ b/docs/reference/esql/functions/types/less_than.asciidoc @@ -0,0 +1,5 @@ +[%header.monospaced.styled,format=dsv,separator=|] +|=== +lhs | rhs | result +integer | integer | boolean +|=== diff --git a/docs/reference/esql/functions/types/less_than_or_equal.asciidoc b/docs/reference/esql/functions/types/less_than_or_equal.asciidoc new file mode 100644 index 0000000000000..27fb19b6d38a2 --- /dev/null +++ b/docs/reference/esql/functions/types/less_than_or_equal.asciidoc @@ -0,0 +1,5 @@ +[%header.monospaced.styled,format=dsv,separator=|] +|=== +lhs | rhs | result +integer | integer | boolean +|=== diff --git a/docs/reference/esql/functions/types/mod.asciidoc b/docs/reference/esql/functions/types/mod.asciidoc new file mode 100644 index 0000000000000..eee2d68e4653f --- /dev/null +++ b/docs/reference/esql/functions/types/mod.asciidoc @@ -0,0 +1,7 @@ +[%header.monospaced.styled,format=dsv,separator=|] +|=== +lhs | rhs | result +double | double | double +integer | integer | integer +long | long | long +|=== diff --git a/docs/reference/esql/functions/types/mul.asciidoc b/docs/reference/esql/functions/types/mul.asciidoc new file mode 100644 index 0000000000000..eee2d68e4653f --- /dev/null +++ b/docs/reference/esql/functions/types/mul.asciidoc @@ -0,0 +1,7 @@ +[%header.monospaced.styled,format=dsv,separator=|] +|=== +lhs | rhs | result +double | double | double +integer | integer | integer +long | long | long +|=== diff --git a/docs/reference/esql/functions/types/to_degrees.asciidoc b/docs/reference/esql/functions/types/neg.asciidoc similarity index 50% rename from docs/reference/esql/functions/types/to_degrees.asciidoc rename to docs/reference/esql/functions/types/neg.asciidoc index 7cb7ca46022c2..1b841483fb22e 100644 --- a/docs/reference/esql/functions/types/to_degrees.asciidoc +++ b/docs/reference/esql/functions/types/neg.asciidoc @@ -1,8 +1,9 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== v | result +date_period | date_period double | double -integer | double -long | double -unsigned_long | double +integer | integer +long | long +time_duration | time_duration |=== diff --git a/docs/reference/esql/functions/types/not_equals.asciidoc b/docs/reference/esql/functions/types/not_equals.asciidoc new file mode 100644 index 0000000000000..27fb19b6d38a2 --- /dev/null +++ b/docs/reference/esql/functions/types/not_equals.asciidoc @@ -0,0 +1,5 @@ +[%header.monospaced.styled,format=dsv,separator=|] +|=== +lhs | rhs | result +integer | integer | boolean +|=== diff --git a/docs/reference/esql/functions/types/sub.asciidoc b/docs/reference/esql/functions/types/sub.asciidoc new file mode 100644 index 0000000000000..ed26adf06ecde --- /dev/null +++ b/docs/reference/esql/functions/types/sub.asciidoc @@ -0,0 +1,11 @@ +[%header.monospaced.styled,format=dsv,separator=|] +|=== +lhs | rhs | result +date_period | date_period | date_period +datetime | date_period | datetime +datetime | time_duration | datetime +double | double | double +integer | integer | integer +long | long | long +time_duration | time_duration | time_duration +|=== diff --git a/docs/reference/esql/functions/unary.asciidoc b/docs/reference/esql/functions/unary.asciidoc index 2ee35b6c6256f..69ce754c1b4a0 100644 --- a/docs/reference/esql/functions/unary.asciidoc +++ b/docs/reference/esql/functions/unary.asciidoc @@ -2,7 +2,11 @@ [[esql-unary-operators]] === Unary operators -These unary mathematical operators are supported: +The only unary operators is negation (`-`): [.text-center] image::esql/functions/signature/neg.svg[Embedded,opts=inline] + +Supported types: + +include::types/neg.asciidoc[] diff --git a/docs/reference/mapping/types/flattened.asciidoc b/docs/reference/mapping/types/flattened.asciidoc index 87f5cebe21993..0a72ebc98ecef 100644 --- a/docs/reference/mapping/types/flattened.asciidoc +++ b/docs/reference/mapping/types/flattened.asciidoc @@ -294,8 +294,8 @@ The following mapping parameters are accepted: <>:: A string value which is substituted for any explicit `null` values within - the flattened object field. Defaults to `null`, which means null sields are - treated as if it were missing. + the flattened object field. Defaults to `null`, which means null fields are + treated as if they were missing. <>:: diff --git a/docs/reference/query-dsl/percolate-query.asciidoc b/docs/reference/query-dsl/percolate-query.asciidoc index c5a3ebb782edd..25b995eefc219 100644 --- a/docs/reference/query-dsl/percolate-query.asciidoc +++ b/docs/reference/query-dsl/percolate-query.asciidoc @@ -326,6 +326,7 @@ case the search request would fail with a version conflict error. The search response returned is identical as in the previous example. + ==== Percolate query and highlighting The `percolate` query is handled in a special way when it comes to highlighting. The queries hits are used @@ -549,6 +550,136 @@ The slightly different response: <1> The highlight fields have been prefixed with the document slot they belong to, in order to know which highlight field belongs to what document. +==== Named queries within percolator queries + +If a stored percolator query is a complex query, and you want to track which +its sub-queries matched a percolated document, then you can use the `\_name` +parameter for its sub-queries. In this case, in a response, each hit together with +a `_percolator_document_slot` field contains +`_percolator_document_slot__matched_queries` fields that show +which sub-queries matched each percolated document. + +For example: + +[source,console] +-------------------------------------------------- +PUT /my-index-000001/_doc/5?refresh +{ + "query": { + "bool": { + "should": [ + { + "match": { + "message": { + "query": "Japanese art", + "_name": "query1" + } + } + }, + { + "match": { + "message": { + "query": "Holand culture", + "_name": "query2" + } + } + } + ] + } + } +} +-------------------------------------------------- +// TEST[continued] + +[source,console] +-------------------------------------------------- +GET /my-index-000001/_search +{ + "query": { + "percolate": { + "field": "query", + "documents": [ + { + "message": "Japanse art" + }, + { + "message": "Holand culture" + }, + { + "message": "Japanese art and Holand culture" + }, + { + "message": "no-match" + } + ] + } + } +} +-------------------------------------------------- +// TEST[continued] + +[source,console-result] +-------------------------------------------------- +{ + "took": 55, + "timed_out": false, + "_shards": { + "total": 1, + "successful": 1, + "skipped" : 0, + "failed": 0 + }, + "hits": { + "total" : { + "value": 1, + "relation": "eq" + }, + "max_score": 1.1181908, + "hits": [ + { + "_index": "my-index-000001", + "_id": "5", + "_score": 1.1181908, + "_source": { + "query": { + "bool": { + "should": [ + { + "match": { + "message": { + "query": "Japanese art", + "_name": "query1" + } + } + }, + { + "match": { + "message": { + "query": "Holand culture", + "_name": "query2" + } + } + } + ] + } + } + }, + "fields" : { + "_percolator_document_slot" : [0, 1, 2], + "_percolator_document_slot_0_matched_queries" : ["query1"], <1> + "_percolator_document_slot_1_matched_queries" : ["query2"], <2> + "_percolator_document_slot_2_matched_queries" : ["query1", "query2"] <3> + } + } + ] + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"took": 55,/"took": "$body.took",/] +<1> The first document matched only the first sub-query. +<2> The second document matched only the second sub-query. +<3> The third document matched both sub-queries. + ==== Specifying multiple percolate queries It is possible to specify multiple `percolate` queries in a single search request: diff --git a/docs/reference/query-dsl/text-expansion-query.asciidoc b/docs/reference/query-dsl/text-expansion-query.asciidoc index e924cc05376d9..46a9aafdd1af8 100644 --- a/docs/reference/query-dsl/text-expansion-query.asciidoc +++ b/docs/reference/query-dsl/text-expansion-query.asciidoc @@ -52,10 +52,42 @@ text. (Required, string) The query text you want to use for search. +`pruning_config` :::: +(Optional, object) +preview:[] +Optional pruning configuration. If enabled, this will omit non-significant tokens from the query in order to improve query performance. +Default: Disabled. ++ +-- +Parameters for `` are: + +`tokens_freq_ratio_threshold`:: +(Optional, float) +preview:[] +Tokens whose frequency is more than `tokens_freq_ratio_threshold` times the average frequency of all tokens in the specified field are considered outliers and pruned. +This value must between 1 and 100. +Default: `5`. + +`tokens_weight_threshold`:: +(Optional, float) +preview:[] +Tokens whose weight is less than `tokens_weight_threshold` are considered nonsignificant and pruned. +This value must be between 0 and 1. +Default: `0.4`. + +`only_score_pruned_tokens`:: +(Optional, boolean) +preview:[] +If `true` we only input pruned tokens into scoring, and discard non-pruned tokens. +It is strongly recommended to set this to `false` for the main query, but this can be set to `true` for a rescore query to get more relevant results. +Default: `false`. + +NOTE: The default values for `tokens_freq_ratio_threshold` and `tokens_weight_threshold` were chosen based on tests using ELSER that provided the most optimal results. +-- [discrete] [[text-expansion-query-example]] -=== Example +=== Example ELSER query The following is an example of the `text_expansion` query that references the ELSER model to perform semantic search. For a more detailed description of how @@ -69,7 +101,7 @@ GET my-index/_search "query":{ "text_expansion":{ "ml.tokens":{ - "model_id":".elser_model_1", + "model_id":".elser_model_2", "model_text":"How is the weather in Jamaica?" } } @@ -78,7 +110,108 @@ GET my-index/_search ---- // TEST[skip: TBD] +[discrete] +[[text-expansion-query-with-pruning-config-example]] +=== Example ELSER query with pruning configuration + +The following is an extension to the above example that adds a preview:[] pruning configuration to the `text_expansion` query. +The pruning configuration identifies non-significant tokens to prune from the query in order to improve query performance. +[source,console] +---- +GET my-index/_search +{ + "query":{ + "text_expansion":{ + "ml.tokens":{ + "model_id":".elser_model_2", + "model_text":"How is the weather in Jamaica?" + }, + "pruning_config": { + "tokens_freq_ratio_threshold": 5, + "tokens_weight_threshold": 0.4, + "only_score_pruned_tokens": false + } + } + } +} +---- +// TEST[skip: TBD] + +[discrete] +[[text-expansion-query-with-pruning-config-and-rescore-example]] +=== Example ELSER query with pruning configuration and rescore + +The following is an extension to the above example that adds a <> function on top of the preview:[] pruning configuration to the `text_expansion` query. +The pruning configuration identifies non-significant tokens to prune from the query in order to improve query performance. +Rescoring the query with the tokens that were originally pruned from the query may improve overall search relevance when using this pruning strategy. + +[source,console] +---- +GET my-index/_search +{ + "query":{ + "text_expansion":{ + "ml.tokens":{ + "model_id":".elser_model_2", + "model_text":"How is the weather in Jamaica?" + }, + "pruning_config": { + "tokens_freq_ratio_threshold": 5, + "tokens_weight_threshold": 0.4, + "only_score_pruned_tokens": false + } + } + }, + "rescore": { + "window_size": 100, + "query": { + "rescore_query": { + "text_expansion": { + "ml.tokens": { + "model_id": ".elser_model_2", + "model_text": "How is the weather in Jamaica?" + }, + "pruning_config": { + "tokens_freq_ratio_threshold": 5, + "tokens_weight_threshold": 0.4, + "only_score_pruned_tokens": false + } + } + } + } + } +} +---- +//TEST[skip: TBD] + [NOTE] ==== Depending on your data, the text expansion query may be faster with `track_total_hits: false`. ==== + +[discrete] +[[weighted-tokens-query-example]] +=== Example Weighted token query + +In order to quickly iterate during tests, we exposed a new preview:[] `weighted_tokens` query for evaluation of tokenized datasets. +While this is not a query that is intended for production use, it can be used to quickly evaluate relevance using various pruning configurations. + +[source,console] +---- +POST /docs/_search +{ + "query": { + "weighted_tokens": { + "query_expansion": { + "tokens": {"2161": 0.4679, "2621": 0.307, "2782": 0.1299, "2851": 0.1056, "3088": 0.3041, "3376": 0.1038, "3467": 0.4873, "3684": 0.8958, "4380": 0.334, "4542": 0.4636, "4633": 2.2805, "4785": 1.2628, "4860": 1.0655, "5133": 1.0709, "7139": 1.0016, "7224": 0.2486, "7387": 0.0985, "7394": 0.0542, "8915": 0.369, "9156": 2.8947, "10505": 0.2771, "11464": 0.3996, "13525": 0.0088, "14178": 0.8161, "16893": 0.1376, "17851": 1.5348, "19939": 0.6012}, + "pruning_config": { + "tokens_freq_ratio_threshold": 5, + "tokens_weight_threshold": 0.4, + "only_score_pruned_tokens": false + } + } + } + } +} +---- +//TEST[skip: TBD] diff --git a/docs/reference/release-notes.asciidoc b/docs/reference/release-notes.asciidoc index 340ef3a5c57c4..068cb3d2f127b 100644 --- a/docs/reference/release-notes.asciidoc +++ b/docs/reference/release-notes.asciidoc @@ -8,6 +8,7 @@ This section summarizes the changes in each release. * <> * <> +* <> * <> * <> * <> @@ -59,6 +60,7 @@ This section summarizes the changes in each release. include::release-notes/8.13.0.asciidoc[] include::release-notes/8.12.0.asciidoc[] +include::release-notes/8.11.3.asciidoc[] include::release-notes/8.11.2.asciidoc[] include::release-notes/8.11.1.asciidoc[] include::release-notes/8.11.0.asciidoc[] diff --git a/docs/reference/release-notes/8.11.2.asciidoc b/docs/reference/release-notes/8.11.2.asciidoc index ebf5db2e2505e..75987ce6139a6 100644 --- a/docs/reference/release-notes/8.11.2.asciidoc +++ b/docs/reference/release-notes/8.11.2.asciidoc @@ -8,6 +8,12 @@ Also see <>. === Known issues include::8.10.3.asciidoc[tag=no-preventive-gc-issue] +[float] +[[security-updates-8.11.2]] +=== Security updates + +* The 8.11.2 patch release contains a fix for a potential security vulnerability. https://discuss.elastic.co/c/announcements/security-announcements/31[Please see our security advisory for more details]. + [[bug-8.11.2]] [float] === Bug fixes diff --git a/docs/reference/release-notes/8.11.3.asciidoc b/docs/reference/release-notes/8.11.3.asciidoc new file mode 100644 index 0000000000000..ddeb50dad1f75 --- /dev/null +++ b/docs/reference/release-notes/8.11.3.asciidoc @@ -0,0 +1,28 @@ +[[release-notes-8.11.3]] +== {es} version 8.11.3 + +coming[8.11.3] + +Also see <>. + +[[bug-8.11.3]] +[float] +=== Bug fixes + +Application:: +* Use latest version of entsearch ingestion pipeline {es-pull}103087[#103087] + +ES|QL:: +* Allow match field in enrich fields {es-pull}102734[#102734] +* Collect warnings in compute service {es-pull}103031[#103031] (issues: {es-issue}100163[#100163], {es-issue}103028[#103028], {es-issue}102871[#102871], {es-issue}102982[#102982]) + +ILM+SLM:: +* [ILM] More resilient when a policy is added to searchable snapshot {es-pull}102741[#102741] (issue: {es-issue}101958[#101958]) + +Mapping:: +* Ensure `dynamicMapping` updates are handled in insertion order {es-pull}103047[#103047] + +Transform:: +* Ensure transform `_schedule_now` API only triggers the expected transform task {es-pull}102958[#102958] (issue: {es-issue}102956[#102956]) + + diff --git a/docs/reference/rest-api/common-parms.asciidoc b/docs/reference/rest-api/common-parms.asciidoc index 55f277218d210..ca8a191ad4b2c 100644 --- a/docs/reference/rest-api/common-parms.asciidoc +++ b/docs/reference/rest-api/common-parms.asciidoc @@ -611,9 +611,10 @@ The `similarity` parameter is the direct vector similarity calculation. * `l2_norm`: also known as Euclidean, will include documents where the vector is within the `dims` dimensional hypersphere with radius `similarity` with origin at `query_vector`. -* `cosine` & `dot_product`: Only return vectors where the cosine similarity or dot-product are at least the provided +* `cosine`, `dot_product`, and `max_inner_product`: Only return vectors where the cosine similarity or dot-product are at least the provided `similarity`. -- +Read more here: <> end::knn-similarity[] tag::lenient[] diff --git a/docs/reference/rest-api/index.asciidoc b/docs/reference/rest-api/index.asciidoc index 15f7961298bf2..59d96d1a26904 100644 --- a/docs/reference/rest-api/index.asciidoc +++ b/docs/reference/rest-api/index.asciidoc @@ -18,6 +18,7 @@ not be included yet. * <> * <> * <> +* <> * <> * <> * <> @@ -66,6 +67,7 @@ include::{es-repo-dir}/behavioral-analytics/apis/index.asciidoc[] include::{es-repo-dir}/cat.asciidoc[] include::{es-repo-dir}/cluster.asciidoc[] include::{es-repo-dir}/ccr/apis/ccr-apis.asciidoc[] +include::{es-repo-dir}/connector/apis/connector-apis.asciidoc[] include::{es-repo-dir}/data-streams/data-stream-apis.asciidoc[] include::{es-repo-dir}/docs.asciidoc[] include::{es-repo-dir}/ingest/apis/enrich/index.asciidoc[] diff --git a/docs/reference/search/rrf.asciidoc b/docs/reference/search/rrf.asciidoc index ef772ed4c0402..b162083ebb926 100644 --- a/docs/reference/search/rrf.asciidoc +++ b/docs/reference/search/rrf.asciidoc @@ -51,7 +51,7 @@ equal to `1`. Defaults to `60`. `window_size`:: (Optional, integer) This value determines the size of the individual result sets per query. A higher value will improve result relevance at the cost of performance. The final -ranked result set is pruned down to the search request's <. +ranked result set is pruned down to the search request's <>. `window_size` must be greater than or equal to `size` and greater than or equal to `1`. Defaults to `100`. diff --git a/docs/reference/search/search-your-data/knn-search.asciidoc b/docs/reference/search/search-your-data/knn-search.asciidoc index ff64535c705d9..496e0cf1b9d4f 100644 --- a/docs/reference/search/search-your-data/knn-search.asciidoc +++ b/docs/reference/search/search-your-data/knn-search.asciidoc @@ -547,6 +547,7 @@ score = 0.9 * match_score + 0.1 * knn_score_image-vector + 0.5 * knn_score_title ``` [discrete] +[[knn-similarity-search]] ==== Search kNN with expected similarity While kNN is a powerful tool, it always tries to return `k` nearest neighbors. Consequently, when using `knn` with @@ -563,6 +564,18 @@ minimum similarity for a vector to be considered a match. The `knn` search flow * Do not return any vectors that are further away than the configured `similarity` -- +NOTE: `similarity` is the true <> before it has been transformed into `_score` and boost applied. + +For each configured <>, here is the corresponding inverted `_score` function. This is so if you are wanting to filter from a `_score` perspective, you can do this minor transformation to correctly reject irrelevant results. +-- + - `l2_norm`: `sqrt((1 / _score) - 1)` + - `cosine`: `(2 * _score) - 1` + - `dot_product`: `(2 * _score) - 1` + - `max_inner_product`: + - `_score < 1`: `1 - (1 / _score)` + - `_score >= 1`: `_score - 1` +-- + Here is an example. In this example we search for the given `query_vector` for `k` nearest neighbors. However, with `filter` applied and requiring that the found vectors have at least the provided `similarity` between them. [source,console] diff --git a/docs/reference/search/search-your-data/semantic-search-inference.asciidoc b/docs/reference/search/search-your-data/semantic-search-inference.asciidoc new file mode 100644 index 0000000000000..7fbdecc0aebce --- /dev/null +++ b/docs/reference/search/search-your-data/semantic-search-inference.asciidoc @@ -0,0 +1,276 @@ +[[semantic-search-inference]] +=== Tutorial: semantic search with the {infer} API +++++ +Semantic search with the {infer} API +++++ + +The instructions in this tutorial shows you how to use the {infer} API with the +Open AI service to perform semantic search on your data. The following example +uses OpenAI's `text-embedding-ada-002` second generation embedding model. You +can use any OpenAI models, they are all supported by the {infer} API. + + +[discrete] +[[infer-openai-requirements]] +==== Requirements + +An https://openai.com/[OpenAI account] is required to use the {infer} API with +the OpenAI service. + + +[discrete] +[[infer-text-embedding-task]] +==== Create the inference task + +Create the {infer} task by using the <>: + +[source,console] +------------------------------------------------------------ +PUT _inference/text_embedding/openai_embeddings <1> +{ + "service": "openai", + "service_settings": { + "api_key": "" <2> + }, + "task_settings": { + "model": "text-embedding-ada-002" <3> + } +} +------------------------------------------------------------ +// TEST[skip:TBD] +<1> The task type is `text_embedding` in the path. +<2> The API key of your OpenAI account. You can find your OpenAI API keys in +your OpenAI account under the +https://platform.openai.com/api-keys[API keys section]. You need to provide +your API key only once. The <> does not return your API +key. +<3> The name of the embedding model to use. You can find the list of OpenAI +embedding models +https://platform.openai.com/docs/guides/embeddings/embedding-models[here]. + + +[discrete] +[[infer-openai-mappings]] +==== Create the index mapping + +The mapping of the destination index - the index that contains the embeddings +that the model will create based on your input text - must be created. The +destination index must have a field with the <> +field type to index the output of the OpenAI model. + +[source,console] +-------------------------------------------------- +PUT openai-embeddings +{ + "mappings": { + "properties": { + "content_embedding": { <1> + "type": "dense_vector", <2> + "dims": 1536, <3> + "element_type": "byte", + "similarity": "dot_product" <4> + }, + "content": { <5> + "type": "text" <6> + } + } + } +} +-------------------------------------------------- +<1> The name of the field to contain the generated tokens. It must be refrenced +in the {infer} pipeline configuration in the next step. +<2> The field to contain the tokens is a `dense_vector` field. +<3> The output dimensions of the model. Find this value in the +https://platform.openai.com/docs/guides/embeddings/embedding-models[OpenAI documentation] +of the model you use. +<4> The faster` dot_product` function can be used to calculate similarity +because OpenAI embeddings are normalised to unit length. You can check the +https://platform.openai.com/docs/guides/embeddings/which-distance-function-should-i-use[OpenAI docs] +about which similarity function to use. +<5> The name of the field from which to create the sparse vector representation. +In this example, the name of the field is `content`. It must be referenced in +the {infer} pipeline configuration in the next step. +<6> The field type which is text in this example. + + +[discrete] +[[infer-openai-inference-ingest-pipeline]] +==== Create an ingest pipeline with an inference processor + +Create an <> with an +<> and use the OpenAI model you created +above to infer against the data that is being ingested in the +pipeline. + +[source,console] +-------------------------------------------------- +PUT _ingest/pipeline/openai_embeddings +{ + "processors": [ + { + "inference": { + "model_id": "openai_embeddings", <1> + "input_output": { <2> + "input_field": "content", + "output_field": "content_embedding" + } + } + } + ] +} +-------------------------------------------------- +<1> The name of the inference model you created by using the +<>. +<2> Configuration object that defines the `input_field` for the {infer} process +and the `output_field` that will contain the {infer} results. + +//// +[source,console] +---- +DELETE _ingest/pipeline/openai_embeddings +---- +// TEST[continued] +//// + + +[discrete] +[[infer-load-data]] +==== Load data + +In this step, you load the data that you later use in the {infer} ingest +pipeline to create embeddings from it. + +Use the `msmarco-passagetest2019-top1000` data set, which is a subset of the MS +MARCO Passage Ranking data set. It consists of 200 queries, each accompanied by +a list of relevant text passages. All unique passages, along with their IDs, +have been extracted from that data set and compiled into a +https://github.com/elastic/stack-docs/blob/main/docs/en/stack/ml/nlp/data/msmarco-passagetest2019-unique.tsv[tsv file]. + +Download the file and upload it to your cluster using the +{kibana-ref}/connect-to-elasticsearch.html#upload-data-kibana[Data Visualizer] +in the {ml-app} UI. Assign the name `id` to the first column and `content` to +the second column. The index name is `test-data`. Once the upload is complete, +you can see an index named `test-data` with 182469 documents. + + +[discrete] +[[reindexing-data-infer]] +==== Ingest the data through the {infer} ingest pipeline + +Create the embeddings from the text by reindexing the data throught the {infer} +pipeline that uses the OpenAI model as the inference model. + +[source,console] +---- +POST _reindex?wait_for_completion=false +{ + "source": { + "index": "test-data", + "size": 50 <1> + }, + "dest": { + "index": "openai-embeddings", + "pipeline": "openai_embeddings" + } +} +---- +// TEST[skip:TBD] +<1> The default batch size for reindexing is 1000. Reducing `size` to a smaller +number makes the update of the reindexing process quicker which enables you to +follow the progress closely and detect errors early. + +NOTE: The +https://platform.openai.com/account/limits[rate limit of your OpenAI account] +may affect the throughput of the reindexing process. If this happens, change +`size` to `3` or a similar value in magnitude. + +The call returns a task ID to monitor the progress: + +[source,console] +---- +GET _tasks/ +---- +// TEST[skip:TBD] + +You can also cancel the reindexing process if you don't want to wait until the +reindexing process is fully complete which might take hours: + +[source,console] +---- +POST _tasks//_cancel +---- +// TEST[skip:TBD] + + +[discrete] +[[infer-semantic-search]] +==== Semantic search + +After the dataset has been enriched with the embeddings, you can query the data +using {ref}/knn-search.html#knn-semantic-search[semantic search]. Pass a +`query_vector_builder` to the k-nearest neighbor (kNN) vector search API, and +provide the query text and the model you have used to create the embeddings. + +NOTE: If you cancelled the reindexing process, you run the query only a part of +the data which affects the quality of your results. + +[source,console] +-------------------------------------------------- +GET openai-embeddings/_search +{ + "knn": { + "field": "content_embedding", + "query_vector_builder": { + "text_embedding": { + "model_id": "openai_embeddings", + "model_text": "Calculate fuel cost" + } + }, + "k": 10, + "num_candidates": 100 + }, + "_source": [ + "id", + "content" + ] +} +-------------------------------------------------- +// TEST[skip:TBD] + +As a result, you receive the top 10 documents that are closest in meaning to the +query from the `openai-embeddings` index sorted by their proximity to the query: + +[source,consol-result] +-------------------------------------------------- +"hits": [ + { + "_index": "openai-embeddings", + "_id": "DDd5OowBHxQKHyc3TDSC", + "_score": 0.83704096, + "_source": { + "id": 862114, + "body": "How to calculate fuel cost for a road trip. By Tara Baukus Mello • Bankrate.com. Dear Driving for Dollars, My family is considering taking a long road trip to finish off the end of the summer, but I'm a little worried about gas prices and our overall fuel cost.It doesn't seem easy to calculate since we'll be traveling through many states and we are considering several routes.y family is considering taking a long road trip to finish off the end of the summer, but I'm a little worried about gas prices and our overall fuel cost. It doesn't seem easy to calculate since we'll be traveling through many states and we are considering several routes." + } + }, + { + "_index": "openai-embeddings", + "_id": "ajd5OowBHxQKHyc3TDSC", + "_score": 0.8345704, + "_source": { + "id": 820622, + "body": "Home Heating Calculator. Typically, approximately 50% of the energy consumed in a home annually is for space heating. When deciding on a heating system, many factors will come into play: cost of fuel, installation cost, convenience and life style are all important.This calculator can help you estimate the cost of fuel for different heating appliances.hen deciding on a heating system, many factors will come into play: cost of fuel, installation cost, convenience and life style are all important. This calculator can help you estimate the cost of fuel for different heating appliances." + } + }, + { + "_index": "openai-embeddings", + "_id": "Djd5OowBHxQKHyc3TDSC", + "_score": 0.8327426, + "_source": { + "id": 8202683, + "body": "Fuel is another important cost. This cost will depend on your boat, how far you travel, and how fast you travel. A 33-foot sailboat traveling at 7 knots should be able to travel 300 miles on 50 gallons of diesel fuel.If you are paying $4 per gallon, the trip would cost you $200.Most boats have much larger gas tanks than cars.uel is another important cost. This cost will depend on your boat, how far you travel, and how fast you travel. A 33-foot sailboat traveling at 7 knots should be able to travel 300 miles on 50 gallons of diesel fuel." + } + }, + (...) + ] +-------------------------------------------------- +// NOTCONSOLE diff --git a/docs/reference/search/search-your-data/semantic-search.asciidoc b/docs/reference/search/search-your-data/semantic-search.asciidoc index 96281d12102bb..f4768e5c3a23d 100644 --- a/docs/reference/search/search-your-data/semantic-search.asciidoc +++ b/docs/reference/search/search-your-data/semantic-search.asciidoc @@ -135,3 +135,4 @@ include::{es-repo-dir}/tab-widgets/semantic-search/hybrid-search-widget.asciidoc ** The https://github.com/elastic/elasticsearch-labs[`elasticsearch-labs`] repo contains a number of interactive semantic search examples in the form of executable Python notebooks, using the {es} Python client include::semantic-search-elser.asciidoc[] +include::semantic-search-inference.asciidoc[] diff --git a/docs/reference/snapshot-restore/repository-s3.asciidoc b/docs/reference/snapshot-restore/repository-s3.asciidoc index 032d4f47bf678..e204061c28458 100644 --- a/docs/reference/snapshot-restore/repository-s3.asciidoc +++ b/docs/reference/snapshot-restore/repository-s3.asciidoc @@ -123,7 +123,9 @@ settings belong in the `elasticsearch.yml` file. `https`. Defaults to `https`. When using HTTPS, this repository type validates the repository's certificate chain using the JVM-wide truststore. Ensure that the root certificate authority is in this truststore using the JVM's - `keytool` tool. + `keytool` tool. If you have a custom certificate authority for your S3 repository + and you use the {es} <>, then you will need to reinstall your + CA certificate every time you upgrade {es}. `proxy.host`:: diff --git a/docs/reference/synonyms/apis/delete-synonym-rule.asciidoc b/docs/reference/synonyms/apis/delete-synonym-rule.asciidoc index eaff47f5d7909..74cbab8c0b4a2 100644 --- a/docs/reference/synonyms/apis/delete-synonym-rule.asciidoc +++ b/docs/reference/synonyms/apis/delete-synonym-rule.asciidoc @@ -1,8 +1,6 @@ [[delete-synonym-rule]] === Delete synonym rule -beta::[] - ++++ Delete synonym rule ++++ diff --git a/docs/reference/synonyms/apis/delete-synonyms-set.asciidoc b/docs/reference/synonyms/apis/delete-synonyms-set.asciidoc index 6ba4dcdc8f7be..9ba33ff3a5c75 100644 --- a/docs/reference/synonyms/apis/delete-synonyms-set.asciidoc +++ b/docs/reference/synonyms/apis/delete-synonyms-set.asciidoc @@ -1,8 +1,6 @@ [[delete-synonyms-set]] === Delete synonyms set -beta::[] - ++++ Delete synonyms set ++++ diff --git a/docs/reference/synonyms/apis/get-synonym-rule.asciidoc b/docs/reference/synonyms/apis/get-synonym-rule.asciidoc index 6ce978ae68ac6..c6c35e0efecca 100644 --- a/docs/reference/synonyms/apis/get-synonym-rule.asciidoc +++ b/docs/reference/synonyms/apis/get-synonym-rule.asciidoc @@ -1,8 +1,6 @@ [[get-synonym-rule]] === Get synonym rule -beta::[] - ++++ Get synonym rule ++++ diff --git a/docs/reference/synonyms/apis/get-synonyms-set.asciidoc b/docs/reference/synonyms/apis/get-synonyms-set.asciidoc index ddd7d2079dbf5..70bb5fb69526d 100644 --- a/docs/reference/synonyms/apis/get-synonyms-set.asciidoc +++ b/docs/reference/synonyms/apis/get-synonyms-set.asciidoc @@ -1,8 +1,6 @@ [[get-synonyms-set]] === Get synonyms set -beta::[] - ++++ Get synonyms set ++++ diff --git a/docs/reference/synonyms/apis/list-synonyms-sets.asciidoc b/docs/reference/synonyms/apis/list-synonyms-sets.asciidoc index 2522542886d9e..705a24c809e99 100644 --- a/docs/reference/synonyms/apis/list-synonyms-sets.asciidoc +++ b/docs/reference/synonyms/apis/list-synonyms-sets.asciidoc @@ -1,8 +1,6 @@ [[list-synonyms-sets]] === List synonyms sets -beta::[] - ++++ List synonyms sets ++++ diff --git a/docs/reference/synonyms/apis/put-synonym-rule.asciidoc b/docs/reference/synonyms/apis/put-synonym-rule.asciidoc index 95492c95d36fe..de2865632d55e 100644 --- a/docs/reference/synonyms/apis/put-synonym-rule.asciidoc +++ b/docs/reference/synonyms/apis/put-synonym-rule.asciidoc @@ -1,8 +1,6 @@ [[put-synonym-rule]] === Create or update synonym rule -beta::[] - ++++ Create or update synonym rule ++++ diff --git a/docs/reference/synonyms/apis/put-synonyms-set.asciidoc b/docs/reference/synonyms/apis/put-synonyms-set.asciidoc index a3c06c70db17b..5651c4c99adcd 100644 --- a/docs/reference/synonyms/apis/put-synonyms-set.asciidoc +++ b/docs/reference/synonyms/apis/put-synonyms-set.asciidoc @@ -1,8 +1,6 @@ [[put-synonyms-set]] === Create or update synonyms set -beta::[] - ++++ Create or update synonyms set ++++ diff --git a/docs/reference/synonyms/apis/synonyms-apis.asciidoc b/docs/reference/synonyms/apis/synonyms-apis.asciidoc index 6849477177dcf..9b92ba8e8579d 100644 --- a/docs/reference/synonyms/apis/synonyms-apis.asciidoc +++ b/docs/reference/synonyms/apis/synonyms-apis.asciidoc @@ -1,8 +1,6 @@ [[synonyms-apis]] == Synonyms APIs -beta::[] - ++++ Synonyms APIs ++++ diff --git a/docs/reference/troubleshooting/network-timeouts.asciidoc b/docs/reference/troubleshooting/network-timeouts.asciidoc index ab60eeff1b1a9..1920dafe62210 100644 --- a/docs/reference/troubleshooting/network-timeouts.asciidoc +++ b/docs/reference/troubleshooting/network-timeouts.asciidoc @@ -34,9 +34,9 @@ end::troubleshooting-network-timeouts-packet-capture-fault-detection[] tag::troubleshooting-network-timeouts-threads[] * Long waits for particular threads to be available can be identified by taking -stack dumps (for example, using `jstack`) or a profiling trace (for example, -using Java Flight Recorder) in the few seconds leading up to the relevant log -message. +stack dumps of the main {es} process (for example, using `jstack`) or a +profiling trace (for example, using Java Flight Recorder) in the few seconds +leading up to the relevant log message. + The <> API sometimes yields useful information, but bear in mind that this API also requires a number of `transport_worker` and diff --git a/docs/src/yamlRestTest/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java b/docs/src/yamlRestTest/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java index 36e3a2cb5e2a9..6df51189e918e 100644 --- a/docs/src/yamlRestTest/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java +++ b/docs/src/yamlRestTest/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java @@ -48,6 +48,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.function.Predicate; import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; @@ -99,7 +100,7 @@ protected ClientYamlTestClient initClientYamlTestClient( final RestClient restClient, final List hosts, final Version esVersion, - final Version masterVersion, + final Predicate clusterFeaturesPredicate, final String os ) { return new ClientYamlDocsTestClient( @@ -107,7 +108,7 @@ protected ClientYamlTestClient initClientYamlTestClient( restClient, hosts, esVersion, - masterVersion, + clusterFeaturesPredicate, os, this::getClientBuilderWithSniffedHosts ); diff --git a/gradle/verification-metadata.xml b/gradle/verification-metadata.xml index 7f672ece21f66..263602c9841a8 100644 --- a/gradle/verification-metadata.xml +++ b/gradle/verification-metadata.xml @@ -2664,121 +2664,241 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/modules/apm/METERING.md b/modules/apm/METERING.md new file mode 100644 index 0000000000000..0f5fcc977295d --- /dev/null +++ b/modules/apm/METERING.md @@ -0,0 +1,142 @@ +# Metrics in Elasticsearch + +Elasticsearch has the metrics API available in server's package +`org.elasticsearch.telemetry.metric`. +This package contains base classes/interfaces for creating and working with metrics. +Please refer to the javadocs provided in these classes in that package for more details. +The entry point for working with metrics is `MeterRegistry`. + +## Implementation +We use elastic's apm-java-agent as an implementation of the API we expose. +the implementation can be found in `:modules:apm` +The apm-java-agent is responsible for buffering metrics and upon metrics_interval +send them over to apm server. +Metrics_interval is configured via a `tracing.apm.agent.metrics_interval` setting +The agent also collects a number of JVM metrics. +see https://www.elastic.co/guide/en/apm/agent/java/current/metrics.html#metrics-jvm + + +## How to choose an instrument + +The choice of the right instrument is not always easy as often differences are subtle. +The simplified algorithm could be as follows: + +1. You want to measure something (absolute value) + 1. values are non-additive + 1. use a gauge + 2. Example: a cpu temperature + 2. values are additive + 1. use asynchronous counter + 2. Example: total number of requests +2. You want to count something + 1. values are monotonously increasing + 1. use a counter + 2. Example: Recording a failed authentication count + 2. values can be decreased + 1. use UpDownCounter + 2. Example: Number of orders in a queue +3. You want to record a statistics + 1. use a histogram + 1. Example: A statistics about how long it took to access a value from cache + +refer to https://opentelemetry.io/docs/specs/otel/metrics/supplementary-guidelines/#instrument-selection +for more details + +## How to name an instrument +See the naming guidelines for metrics: +[NAMING GUIDE](NAMING.md) + +### Restarts and overflows +if the instrument is correctly chosen, the apm server will be able to determine if the metrics +were restarted (i.e. node was restarted) or there was a counter overflow +(the metric in ES might use an int internally, but apm backend might have a long ) + +## How to use an instrument +There are 2 types of usages of an instrument depending on a type. +- For synchronous instrument (counter/UpDownCounter) we need to register an instrument with + `MeterRegistry` and use the returned value to increment a value of that instrument +```java + MeterRegistry registry; + LongCounter longCounter = registry.registerLongCounter("es.test.requests.count", "a test counter", "count"); + longCounter.increment(); + longCounter.incrementBy(1, Map.of("name", "Alice")); + longCounter.incrementBy(1, Map.of("name", "Bob")); +``` + +- For asynchronous instrument (gauge/AsynchronousCounter) we register an instrument + and have to provide a callback that will report the absolute measured value. + This callback has to be provided upon registration and cannot be changed. +```java +MeterRegistry registry; +long someValue = 1; +registry.registerLongGauge("es.test.cpu.temperature", "the current CPU temperature as measured by psensor", "degrees Celsius", +() -> new LongWithAttributes(someValue, Map.of("cpuNumber", 1))); +``` + +If we don’t have access to ‘state’ that will be fetched on metric event (when callback is executed) +we can use a utility LongGaugeMetric or LongGaugeMetric +```java +MeterRegistry meterRegistry ; +LongGaugeMetric longGaugeMetric = LongGaugeMetric.create(meterRegistry, "es.test.gauge", "a test gauge", "total value"); +longGaugeMetric.set(123L); +``` +### The use of attributes aka dimensions +Each instrument can attach attributes to a reported value. This helps drilling down into the details +of value that was reported during the metric event + + +## Development + +### Mock http server + +The quickest way to verify that your metrics are working is to run `./gradlew run --with-apm-server`. +This will run ES node (or nodes in serverless) and also start a mock http server that will act +as an apm server. This fake http server will log all the http messages it receives from apm-agent + +### With APM server in cloud +You can also run local ES node with an apm server in cloud. +Create a new deployment in cloud, then click the 'hamburger' on the left, scroll to Observability and click APM under it. +At the upper right corner there is `Add data` link, then scroll down to `ApmAgents` section and pick Java +There you should be able to see `elastic.apm.secret_token` and `elastic.apm.server_url. You will use them in the next step. + +edit your `~/.gradle/init.d/apm.gradle` and replace the secret_token and the server_url. +```groovy +rootProject { + if (project.name == 'elasticsearch' && Boolean.getBoolean('metrics.enabled')) { + afterEvaluate { + testClusters.matching { it.name == "runTask" }.configureEach { + setting 'xpack.security.audit.enabled', 'true' + keystore 'tracing.apm.secret_token', 'TODO-REPLACE' + setting 'telemetry.metrics.enabled', 'true' + setting 'tracing.apm.agent.server_url', 'https://TODO-REPLACE-URL.apm.eastus2.staging.azure.foundit.no:443' + } + } + } +} +``` + +The example use: +``` +./gradlew :run -Dmetrics.enabled=true +``` + +#### Logging +with any approach you took to run your ES with APM you will find apm-agent.json file +in ES's logs directory. If there are any problems with connecting to APM you will see WARN/ERROR messages. +We run apm-agent with logs at WARN level, so normally you should not see any logs there. + +When running ES in cloud, logs are being also indexed in a logging cluster, so you will be able to find them +in kibana. The `datastream.dataset` is `elasticsearch.apm_agent` + + +### Testing +We currently provide a base `TestTelemetryPlugin` which should help you write an integration test. +See an example `S3BlobStoreRepositoryTests` + + + + +# Links and further reading +https://opentelemetry.io/docs/specs/otel/metrics/supplementary-guidelines/ + +https://www.elastic.co/guide/en/apm/guide/current/data-model-metrics.html diff --git a/modules/apm/NAMING.md b/modules/apm/NAMING.md new file mode 100644 index 0000000000000..8e8d1bf2463e2 --- /dev/null +++ b/modules/apm/NAMING.md @@ -0,0 +1,73 @@ +# Metrics Naming Guidelines + +We propose a set of guidelines to build consistent and readable names for metrics. The guidelines cover how to build a good **hierarchical** name, the syntax of **elements** in a name, the usage of **dimensions** (attributes), **pluralization **and** suffixes**. + +This set of “rules” has been built by looking at naming conventions and best practices used by other software (e.g. Prometheus, Datadog) or standards (OpenTelemetry, OpenMetrics - see for details). \ +They follow OpenTelemetry guidelines most closely with some ES specifics. + +## Guidelines + +A metric name should be composed of **elements** limited by **separators** to organize them in a **hierarchy**. + +**Elements** should be lower-case, and use underscore (`_`) to combine words in the same element level (e.g. `blob_cache`). + +The **separator** character is dot (`.`) + +The **hierarchy** should be built by putting "more common" elements at the beginning, in order to facilitate the creation of new metrics under a common namespace. Each element in the metric name specializes or describes the prefix that precedes it. Rule of thumb: you could truncate the name at any segment, and what you're left with is something that makes sense by itself. + +Example: +* prefer `es.indices.docs.deleted.total `to `es.indices.total.deleted.docs` +* This way you can later add` es.indices.docs.count, es.indices.docs.ingested.total`, etc.) + +Prefix metrics: +* Always use `es` as our root application name: this will give us a separate namespace and avoid any possibility of clashes with other metrics, and quick identification of Elasticsearch metrics on a dashboard. +* Follow the root prefix with a simple module name, team or area of code. E.g. `snapshot, repositories, indices, threadpool`. Notice the mix of singular and plural - here this is intentional, to reflect closely the existing names in the codebase (e.g. `reindex` and `indices`) +* In building a metric name, look for existing prefixes (e.g. module name and/or area of code, e.g. `blob_cache`) and for existing sub-elements as well (e.g. `error`) to build a good, consistent name. E.g. prefer the consistent use of `error.count` rather than introducing `failures`, `failed.count` or `errors`.` ` +* Avoid having sub-metrics under a name that is also a metric (e.g. do not create names like `es.repositories.elements`,` es.repositories.elements.utilization`; use` es.repositories.element.count` and` es.repositories.element.utilization `instead). Such metrics are hard to handle well in Elasticsearch, or in some internal structures (e.g. nested maps). + +Keep the hierarchy compact: do not add elements if you don’t need to. There is a description field when registering a metric, prefer using that as an explanation. \ +For example, if emitting existing metrics from node stats, do not use the whole “object path”, but choose the most significant terms. + +The metric name can be generated but there should be no dynamic or variable content in the name: that content belongs to a **dimension** (attributes/labels). + +* Node name, node id, cluster id, etc. are all considered dynamic content that belongs to attributes, not to the metric name. +* When there are different "flavors" of a metric (i.e. `s3`, `azure`, etc) use an attribute rather than inserting it in the metric name. +* Rule of thumb: you should be able to do aggregations (e.g. sum, avg) across a dimension of a given metric (without the need to aggregate over different metric names); on the other hand, any aggregation across any dimension of a given metric should be meaningful. +* There might be exceptions of course. For example: + * When similar metrics have significantly different implementations/related metrics. \ + If we have only common metrics like `es.repositories.element.count, es.repositories.element.utilization, es.repositories.writes.total` for every blob storage implementation, then `s3,azure` should be an attribute. \ + If we have specific metrics, e.g. for s3 storage classes, prefer using prefixed metric names for the specific metrics: es.repositories.s3.deep_archive_access.total (but keep `es.repositories.elements`) + * When you have a finite and fixed set of names it might be OK to have them in the name (e.g. "`young`" and "`old`" for GC generations). + +The metric name should NOT include its **unit**. Instead, the associated physical quantity should be added as a suffix, possibly following the general semantic names ([link](https://opentelemetry.io/docs/specs/semconv/general/metrics/#instrument-naming)). +Examples : +* es.process.jvm.collection.time instead of es.process.jvm.collection.seconds. +* es.process.mem.virtual.size, es.indices.storage.size (instead of es.process.mem.virtual.bytes, es.indices.storage.bytes) +* In case `size` has a known upper limit, consider using `usage` (e.g.: es.process.jvm.heap.usage when there is a es.process.jvm.heap.limit) +* es.indices.storage.write.io, instead of es.indices.storage.write.bytes_per_sec +* These can all be composed with the suffixes below, e.g. es.process.jvm.collection.time.total, es.indices.storage.write.total to represent the monotonic sum of time spent in GC and the total number of bytes written to indices respectively. + +**Pluralization** and **suffixes**: +* If the metric is unit-less, use plural: `es.threadpool.activethreads`, `es.indices.docs` +* Use `total` as a suffix for monotonic sums (e.g. es.indices.docs.deleted.total) +* Use `count` to represent the count of "things" in the metric name/namespace (e.g. if we have `es.process.jvm.classes.loaded`, we will express the number of classes currently loaded by the JVM as es.process.jvm.classes.loaded.count, and the total number of classes loaded since the JVM started as es.process.jvm.classes.loaded.total +* Use `ratio` to represent the ratio of two measures with identical unit (or unit-less) or measures that represent a fraction in the range [0, 1]. Examples: + * Exception: consider using utilization when the ratio is between a usage and its limit, e.g. the ratio between es.process.jvm.heap.usage and es.process.jvm.heap.limit should be es.process.jvm.heap.utilization +* If it has a unit of measure, then it should not be plural (and also not include the unit of measure, see above). Examples: es.process.jvm.collection.time, es.process.mem.virtual.usage, es.indices.storage.utilization + +### Attributes + +Attribute names should follow the same rules. In particular, these rules apply to attributes too: +* elements and separators +* hierarchy/namespaces +* units +* pluralization (when an attribute represents a measurement) + +For **pluralization**, when an attribute represents an entity, the attribute name should be singular (e.g.` es.security.realm_type`, not` es.security.realms_type` or `es.security.realm_types`), unless it represents a collection (e.g.` es.rest.request_headers`) + + +### List of previously registered metric names +You can inspect all previously registered metrics names with +`./gradlew run -Dtests.es.logger.org.elasticsearch.telemetry.apm=debug` +This should help you find out the already registered group that your meteric +might fit diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APM.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APM.java index 979815f497583..2c33b4f2dc992 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APM.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APM.java @@ -73,7 +73,7 @@ public Collection createComponents(PluginServices services) { final APMMeterService apmMeter = new APMMeterService(settings); apmAgentSettings.addClusterSettingsListeners(services.clusterService(), telemetryProvider.get(), apmMeter); logger.info("Sending apm metrics is {}", APMAgentSettings.TELEMETRY_METRICS_ENABLED_SETTING.get(settings) ? "enabled" : "disabled"); - logger.info("Sending apm traces is {}", APMAgentSettings.APM_ENABLED_SETTING.get(settings) ? "enabled" : "disabled"); + logger.info("Sending apm tracing is {}", APMAgentSettings.APM_ENABLED_SETTING.get(settings) ? "enabled" : "disabled"); return List.of(apmTracer, apmMeter); } diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APMMeterRegistry.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APMMeterRegistry.java index cd6d3d209b3ed..382fc9417eac0 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APMMeterRegistry.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APMMeterRegistry.java @@ -10,6 +10,8 @@ import io.opentelemetry.api.metrics.Meter; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.ReleasableLock; import org.elasticsearch.telemetry.apm.internal.metrics.DoubleAsyncCounterAdapter; @@ -47,6 +49,7 @@ * {@link #setProvider(Meter)} is used to change the provider for all existing meterRegistrar. */ public class APMMeterRegistry implements MeterRegistry { + private static final Logger logger = LogManager.getLogger(APMMeterRegistry.class); private final Registrar doubleCounters = new Registrar<>(); private final Registrar doubleAsynchronousCounters = new Registrar<>(); private final Registrar doubleUpDownCounters = new Registrar<>(); @@ -207,6 +210,7 @@ public LongHistogram getLongHistogram(String name) { private > T register(Registrar registrar, T adapter) { assert registrars.contains(registrar) : "usage of unknown registrar"; + logger.debug("Registering an instrument with name: " + adapter.getName()); return registrar.register(adapter); } diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/DisabledSecurityDataStreamTestCase.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/DisabledSecurityDataStreamTestCase.java index c5ca8445b08eb..ae33c06b497db 100644 --- a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/DisabledSecurityDataStreamTestCase.java +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/DisabledSecurityDataStreamTestCase.java @@ -12,6 +12,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.FeatureFlag; import org.elasticsearch.test.cluster.local.distribution.DistributionType; import org.elasticsearch.test.rest.ESRestTestCase; import org.junit.ClassRule; @@ -25,6 +26,7 @@ public abstract class DisabledSecurityDataStreamTestCase extends ESRestTestCase @ClassRule public static ElasticsearchCluster cluster = ElasticsearchCluster.local() .distribution(DistributionType.DEFAULT) + .feature(FeatureFlag.FAILURE_STORE_ENABLED) .setting("xpack.security.enabled", "false") .setting("xpack.watcher.enabled", "false") .build(); diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecyclePermissionsRestIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecyclePermissionsRestIT.java index d662427c99d13..1c6329dcf922f 100644 --- a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecyclePermissionsRestIT.java +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecyclePermissionsRestIT.java @@ -19,6 +19,7 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.FeatureFlag; import org.elasticsearch.test.cluster.local.distribution.DistributionType; import org.elasticsearch.test.cluster.util.resource.Resource; import org.elasticsearch.test.rest.ESRestTestCase; @@ -38,6 +39,7 @@ public class DataStreamLifecyclePermissionsRestIT extends ESRestTestCase { @ClassRule public static ElasticsearchCluster cluster = ElasticsearchCluster.local() .distribution(DistributionType.DEFAULT) + .feature(FeatureFlag.FAILURE_STORE_ENABLED) .setting("xpack.watcher.enabled", "false") .setting("xpack.ml.enabled", "false") .setting("xpack.security.enabled", "true") diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DataStreamsStatsTransportAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DataStreamsStatsTransportAction.java index 029fd88abd9c6..2aa5c07ad6be5 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DataStreamsStatsTransportAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DataStreamsStatsTransportAction.java @@ -229,11 +229,11 @@ public DataStreamsStatsAction.Response newResponse( assert dataStream != null; // Aggregate global stats - totalStoreSizeBytes += shardStat.getStoreStats().sizeInBytes(); + totalStoreSizeBytes += shardStat.getStoreStats().totalDataSetSizeInBytes(); // Aggregate data stream stats AggregatedStats stats = aggregatedDataStreamsStats.computeIfAbsent(dataStream.getName(), s -> new AggregatedStats()); - stats.storageBytes += shardStat.getStoreStats().sizeInBytes(); + stats.storageBytes += shardStat.getStoreStats().totalDataSetSizeInBytes(); stats.maxTimestamp = Math.max(stats.maxTimestamp, shardStat.getMaxTimestamp()); } diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java index 4d2c2af2266b1..21b1316e5685b 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java @@ -15,8 +15,8 @@ import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ResultDeduplicator; -import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; +import org.elasticsearch.action.admin.indices.delete.TransportDeleteIndexAction; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeAction; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; @@ -719,7 +719,7 @@ private void deleteIndexOnce(String indexName, String reason) { transportActionsDeduplicator.executeOnce( deleteIndexRequest, new ErrorRecordingActionListener( - DeleteIndexAction.NAME, + TransportDeleteIndexAction.TYPE.name(), indexName, errorStore, Strings.format("Data stream lifecycle encountered an error trying to delete index [%s]", indexName), diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/DeleteDataStreamLifecycleAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/DeleteDataStreamLifecycleAction.java index baa163c1ae75e..67bfae0740fb5 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/DeleteDataStreamLifecycleAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/DeleteDataStreamLifecycleAction.java @@ -26,9 +26,8 @@ */ public class DeleteDataStreamLifecycleAction { - public static final ActionType INSTANCE = new ActionType<>( - "indices:admin/data_stream/lifecycle/delete", - AcknowledgedResponse::readFrom + public static final ActionType INSTANCE = ActionType.acknowledgedResponse( + "indices:admin/data_stream/lifecycle/delete" ); private DeleteDataStreamLifecycleAction() {/* no instances */} diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/PutDataStreamLifecycleAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/PutDataStreamLifecycleAction.java index a4f4b88d17bca..f01d06fda8101 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/PutDataStreamLifecycleAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/PutDataStreamLifecycleAction.java @@ -40,9 +40,8 @@ */ public class PutDataStreamLifecycleAction { - public static final ActionType INSTANCE = new ActionType<>( - "indices:admin/data_stream/lifecycle/put", - AcknowledgedResponse::readFrom + public static final ActionType INSTANCE = ActionType.acknowledgedResponse( + "indices:admin/data_stream/lifecycle/put" ); private PutDataStreamLifecycleAction() {/* no instances */} diff --git a/modules/data-streams/src/yamlRestTest/java/org/elasticsearch/datastreams/DataStreamsClientYamlTestSuiteIT.java b/modules/data-streams/src/yamlRestTest/java/org/elasticsearch/datastreams/DataStreamsClientYamlTestSuiteIT.java index fa7b4ca1a80c0..37a83deeb3550 100644 --- a/modules/data-streams/src/yamlRestTest/java/org/elasticsearch/datastreams/DataStreamsClientYamlTestSuiteIT.java +++ b/modules/data-streams/src/yamlRestTest/java/org/elasticsearch/datastreams/DataStreamsClientYamlTestSuiteIT.java @@ -19,6 +19,8 @@ import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; import org.junit.ClassRule; +import static org.elasticsearch.test.cluster.FeatureFlag.FAILURE_STORE_ENABLED; + public class DataStreamsClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { public DataStreamsClientYamlTestSuiteIT(final ClientYamlTestCandidate testCandidate) { @@ -43,6 +45,7 @@ protected Settings restClientSettings() { private static ElasticsearchCluster createCluster() { LocalClusterSpecBuilder clusterBuilder = ElasticsearchCluster.local() .distribution(DistributionType.DEFAULT) + .feature(FAILURE_STORE_ENABLED) .setting("xpack.security.enabled", "true") .keystore("bootstrap.password", "x-pack-test-password") .user("x_pack_rest_user", "x-pack-test-password"); diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/10_basic.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/10_basic.yml index b1e0cf8ed7d90..6496930764ab8 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/10_basic.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/10_basic.yml @@ -210,10 +210,8 @@ setup: --- "Create data stream with failure store": - skip: - version: all - reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/102873" -# version: " - 8.10.99" -# reason: "data stream failure stores only creatable in 8.11+" + version: " - 8.10.99" + reason: "data stream failure stores only creatable in 8.11+" - do: allowed_warnings: diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/30_auto_create_data_stream.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/30_auto_create_data_stream.yml index a7d8476ee2dcf..303a584555f8f 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/30_auto_create_data_stream.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/30_auto_create_data_stream.yml @@ -50,10 +50,8 @@ --- "Put index template with failure store": - skip: - version: all - reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/102873" -# version: " - 8.10.99" -# reason: "data stream failure stores only creatable in 8.11+" + version: " - 8.10.99" + reason: "data stream failure stores only creatable in 8.11+" features: allowed_warnings - do: diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/AppendProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/AppendProcessor.java index 273b76955060b..4f28c9bb14f80 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/AppendProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/AppendProcessor.java @@ -48,9 +48,10 @@ public ValueSource getValue() { } @Override - public IngestDocument execute(IngestDocument ingestDocument) throws Exception { - ingestDocument.appendFieldValue(field, value, allowDuplicates); - return ingestDocument; + public IngestDocument execute(IngestDocument document) throws Exception { + String path = document.renderTemplate(field); + document.appendFieldValue(path, value, allowDuplicates); + return document; } @Override diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/RemoveProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/RemoveProcessor.java index c27bc4de85ec4..fa86bcda5047b 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/RemoveProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/RemoveProcessor.java @@ -64,7 +64,7 @@ private void fieldsToRemoveProcessor(IngestDocument document) { } } else { for (TemplateScript.Factory field : fieldsToRemove) { - document.removeField(field); + document.removeField(document.renderTemplate(field)); } } } diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/SetProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/SetProcessor.java index 229b796b89c75..2d7db39f3738e 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/SetProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/SetProcessor.java @@ -78,12 +78,13 @@ public boolean isIgnoreEmptyValue() { @Override public IngestDocument execute(IngestDocument document) { - if (overrideEnabled || document.hasField(field) == false || document.getFieldValue(field, Object.class) == null) { + String path = document.renderTemplate(field); + if (overrideEnabled || document.hasField(path) == false || document.getFieldValue(path, Object.class) == null) { if (copyFrom != null) { Object fieldValue = document.getFieldValue(copyFrom, Object.class, ignoreEmptyValue); - document.setFieldValue(field, IngestDocument.deepCopy(fieldValue), ignoreEmptyValue); + document.setFieldValue(path, IngestDocument.deepCopy(fieldValue), ignoreEmptyValue); } else { - document.setFieldValue(field, value, ignoreEmptyValue); + document.setFieldValue(path, value, ignoreEmptyValue); } } return document; diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GrokProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GrokProcessorTests.java index 1e40345208a1b..0b20fbc22e1cc 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GrokProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GrokProcessorTests.java @@ -120,7 +120,7 @@ public void testMatchWithoutCaptures() throws Exception { public void testNullField() { String fieldName = RandomDocumentPicks.randomFieldName(random()); IngestDocument doc = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>()); - doc.setFieldValue(fieldName, null); + doc.setFieldValue(fieldName, (Object) null); GrokProcessor processor = new GrokProcessor( randomAlphaOfLength(10), null, @@ -138,7 +138,7 @@ public void testNullField() { public void testNullFieldWithIgnoreMissing() throws Exception { String fieldName = RandomDocumentPicks.randomFieldName(random()); IngestDocument originalIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>()); - originalIngestDocument.setFieldValue(fieldName, null); + originalIngestDocument.setFieldValue(fieldName, (Object) null); IngestDocument ingestDocument = new IngestDocument(originalIngestDocument); GrokProcessor processor = new GrokProcessor( randomAlphaOfLength(10), diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RenameProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RenameProcessorTests.java index 1d10c30909906..f472e9d9bacd4 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RenameProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RenameProcessorTests.java @@ -123,7 +123,7 @@ public void testRenameNewFieldAlreadyExists() throws Exception { public void testRenameExistingFieldNullValue() throws Exception { IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>()); String fieldName = RandomDocumentPicks.randomFieldName(random()); - ingestDocument.setFieldValue(fieldName, null); + ingestDocument.setFieldValue(fieldName, (Object) null); String newFieldName = randomValueOtherThanMany(ingestDocument::hasField, () -> RandomDocumentPicks.randomFieldName(random())); Processor processor = createRenameProcessor(fieldName, newFieldName, false); processor.execute(ingestDocument); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/ErrorCauseWrapper.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/ErrorCauseWrapper.java index aeaf44bfd014c..308d6223c666e 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/ErrorCauseWrapper.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/ErrorCauseWrapper.java @@ -23,6 +23,7 @@ class ErrorCauseWrapper extends ElasticsearchException { private static final List> wrappedErrors = List.of( PainlessError.class, + PainlessExplainError.class, OutOfMemoryError.class, StackOverflowError.class, LinkageError.class diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java index b7c790fe1d0ad..7b84e3c9f1417 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java @@ -13,6 +13,7 @@ import org.elasticsearch.painless.Compiler.Loader; import org.elasticsearch.painless.lookup.PainlessLookup; import org.elasticsearch.painless.lookup.PainlessLookupBuilder; +import org.elasticsearch.painless.lookup.PainlessMethod; import org.elasticsearch.painless.spi.Whitelist; import org.elasticsearch.painless.symbol.ScriptScope; import org.elasticsearch.script.ScriptContext; @@ -84,9 +85,11 @@ public PainlessScriptEngine(Settings settings, Map, List, Compiler> mutableContextsToCompilers = new HashMap<>(); Map, PainlessLookup> mutableContextsToLookups = new HashMap<>(); + final Map dedup = new HashMap<>(); + final Map filteredMethodCache = new HashMap<>(); for (Map.Entry, List> entry : contexts.entrySet()) { ScriptContext context = entry.getKey(); - PainlessLookup lookup = PainlessLookupBuilder.buildFromWhitelists(entry.getValue()); + PainlessLookup lookup = PainlessLookupBuilder.buildFromWhitelists(entry.getValue(), dedup, filteredMethodCache); mutableContextsToCompilers.put( context, new Compiler(context.instanceClazz, context.factoryClazz, context.statefulFactoryClazz, lookup) diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessClass.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessClass.java index d32639bf3968f..e6f7c1a3bb617 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessClass.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessClass.java @@ -49,7 +49,7 @@ public final class PainlessClass { this.getterMethodHandles = Map.copyOf(getterMethodHandles); this.setterMethodHandles = Map.copyOf(setterMethodHandles); - this.runtimeMethods = Map.copyOf(runtimeMethods); + this.runtimeMethods = runtimeMethods.equals(methods) ? this.methods : Map.copyOf(runtimeMethods); } @Override diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookup.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookup.java index c775ba4b5b9b6..5bf8e5cde2afb 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookup.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookup.java @@ -163,18 +163,6 @@ public PainlessMethod lookupPainlessMethod(Class targetClass, boolean isStati return lookupPainlessObject(targetClass, objectLookup); } - public List lookupPainlessSubClassesMethod(String targetCanonicalClassName, String methodName, int methodArity) { - Objects.requireNonNull(targetCanonicalClassName); - - Class targetClass = canonicalTypeNameToType(targetCanonicalClassName); - - if (targetClass == null) { - return null; - } - - return lookupPainlessSubClassesMethod(targetClass, methodName, methodArity); - } - public List lookupPainlessSubClassesMethod(Class targetClass, String methodName, int methodArity) { Objects.requireNonNull(targetClass); Objects.requireNonNull(methodName); @@ -218,18 +206,6 @@ public List lookupPainlessSubClassesMethod(Class targetClass, return subMethods; } - public PainlessField lookupPainlessField(String targetCanonicalClassName, boolean isStatic, String fieldName) { - Objects.requireNonNull(targetCanonicalClassName); - - Class targetClass = canonicalTypeNameToType(targetCanonicalClassName); - - if (targetClass == null) { - return null; - } - - return lookupPainlessField(targetClass, isStatic, fieldName); - } - public PainlessField lookupPainlessField(Class targetClass, boolean isStatic, String fieldName) { Objects.requireNonNull(targetClass); Objects.requireNonNull(fieldName); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java index 0c1497b541954..d3f14b08c7dc2 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java @@ -56,18 +56,14 @@ public final class PainlessLookupBuilder { - private static final Map painlessConstructorCache = new HashMap<>(); - private static final Map painlessMethodCache = new HashMap<>(); - private static final Map painlessFieldCache = new HashMap<>(); - private static final Map painlessClassBindingCache = new HashMap<>(); - private static final Map painlessInstanceBindingCache = new HashMap<>(); - private static final Map painlessFilteredCache = new HashMap<>(); - private static final Pattern CLASS_NAME_PATTERN = Pattern.compile("^[_a-zA-Z][._a-zA-Z0-9]*$"); - private static final Pattern METHOD_NAME_PATTERN = Pattern.compile("^[_a-zA-Z][_a-zA-Z0-9]*$"); - private static final Pattern FIELD_NAME_PATTERN = Pattern.compile("^[_a-zA-Z][_a-zA-Z0-9]*$"); + private static final Pattern METHOD_AND_FIELD_NAME_PATTERN = Pattern.compile("^[_a-zA-Z][_a-zA-Z0-9]*$"); - public static PainlessLookup buildFromWhitelists(List whitelists) { + public static PainlessLookup buildFromWhitelists( + List whitelists, + Map dedup, + Map filteredMethodCache + ) { PainlessLookupBuilder painlessLookupBuilder = new PainlessLookupBuilder(); String origin = "internal error"; @@ -92,7 +88,8 @@ public static PainlessLookup buildFromWhitelists(List whitelists) { painlessLookupBuilder.addPainlessConstructor( targetCanonicalClassName, whitelistConstructor.canonicalTypeNameParameters, - whitelistConstructor.painlessAnnotations + whitelistConstructor.painlessAnnotations, + dedup ); } @@ -105,7 +102,8 @@ public static PainlessLookup buildFromWhitelists(List whitelists) { whitelistMethod.methodName, whitelistMethod.returnCanonicalTypeName, whitelistMethod.canonicalTypeNameParameters, - whitelistMethod.painlessAnnotations + whitelistMethod.painlessAnnotations, + dedup ); } @@ -116,7 +114,8 @@ public static PainlessLookup buildFromWhitelists(List whitelists) { targetCanonicalClassName, whitelistField.fieldName, whitelistField.canonicalTypeNameParameter, - whitelistField.painlessAnnotations + whitelistField.painlessAnnotations, + dedup ); } } @@ -129,7 +128,8 @@ public static PainlessLookup buildFromWhitelists(List whitelists) { whitelistStatic.methodName, whitelistStatic.returnCanonicalTypeName, whitelistStatic.canonicalTypeNameParameters, - whitelistStatic.painlessAnnotations + whitelistStatic.painlessAnnotations, + dedup ); } @@ -141,7 +141,8 @@ public static PainlessLookup buildFromWhitelists(List whitelists) { whitelistClassBinding.methodName, whitelistClassBinding.returnCanonicalTypeName, whitelistClassBinding.canonicalTypeNameParameters, - whitelistClassBinding.painlessAnnotations + whitelistClassBinding.painlessAnnotations, + dedup ); } @@ -152,7 +153,8 @@ public static PainlessLookup buildFromWhitelists(List whitelists) { whitelistInstanceBinding.methodName, whitelistInstanceBinding.returnCanonicalTypeName, whitelistInstanceBinding.canonicalTypeNameParameters, - whitelistInstanceBinding.painlessAnnotations + whitelistInstanceBinding.painlessAnnotations, + dedup ); } } @@ -160,7 +162,7 @@ public static PainlessLookup buildFromWhitelists(List whitelists) { throw new IllegalArgumentException("error loading whitelist(s) " + origin, exception); } - return painlessLookupBuilder.build(); + return painlessLookupBuilder.build(dedup, filteredMethodCache); } // javaClassNamesToClasses is all the classes that need to be available to the custom classloader @@ -269,7 +271,7 @@ private static IllegalArgumentException lookupException(Throwable cause, String return new IllegalArgumentException(Strings.format(formatText, args), cause); } - public void addPainlessClass(Class clazz, Map, Object> annotations) { + private void addPainlessClass(Class clazz, Map, Object> annotations) { Objects.requireNonNull(clazz); Objects.requireNonNull(annotations); @@ -355,10 +357,11 @@ public void addPainlessClass(Class clazz, Map, Object> annotations) } } - public void addPainlessConstructor( + private void addPainlessConstructor( String targetCanonicalClassName, List canonicalTypeNameParameters, - Map, Object> annotations + Map, Object> annotations, + Map dedup ) { Objects.requireNonNull(targetCanonicalClassName); Objects.requireNonNull(canonicalTypeNameParameters); @@ -391,10 +394,15 @@ public void addPainlessConstructor( typeParameters.add(typeParameter); } - addPainlessConstructor(targetClass, typeParameters, annotations); + addPainlessConstructor(targetClass, typeParameters, annotations, dedup); } - public void addPainlessConstructor(Class targetClass, List> typeParameters, Map, Object> annotations) { + private void addPainlessConstructor( + Class targetClass, + List> typeParameters, + Map, Object> annotations, + Map dedup + ) { Objects.requireNonNull(targetClass); Objects.requireNonNull(typeParameters); @@ -473,7 +481,7 @@ public void addPainlessConstructor(Class targetClass, List> typePara ); if (existingPainlessConstructor == null) { - newPainlessConstructor = painlessConstructorCache.computeIfAbsent(newPainlessConstructor, Function.identity()); + newPainlessConstructor = (PainlessConstructor) dedup.computeIfAbsent(newPainlessConstructor, Function.identity()); painlessClassBuilder.constructors.put(painlessConstructorKey.intern(), newPainlessConstructor); } else if (newPainlessConstructor.equals(existingPainlessConstructor) == false) { throw lookupException( @@ -486,14 +494,15 @@ public void addPainlessConstructor(Class targetClass, List> typePara } } - public void addPainlessMethod( + private void addPainlessMethod( ClassLoader classLoader, String targetCanonicalClassName, String augmentedCanonicalClassName, String methodName, String returnCanonicalTypeName, List canonicalTypeNameParameters, - Map, Object> annotations + Map, Object> annotations, + Map dedup ) { Objects.requireNonNull(classLoader); @@ -561,7 +570,7 @@ public void addPainlessMethod( ); } - addPainlessMethod(targetClass, augmentedClass, methodName, returnType, typeParameters, annotations); + addPainlessMethod(targetClass, augmentedClass, methodName, returnType, typeParameters, annotations, dedup); } public void addPainlessMethod( @@ -570,7 +579,8 @@ public void addPainlessMethod( String methodName, Class returnType, List> typeParameters, - Map, Object> annotations + Map, Object> annotations, + Map dedup ) { Objects.requireNonNull(targetClass); @@ -585,7 +595,7 @@ public void addPainlessMethod( String targetCanonicalClassName = typeToCanonicalTypeName(targetClass); - if (METHOD_NAME_PATTERN.matcher(methodName).matches() == false) { + if (METHOD_AND_FIELD_NAME_PATTERN.matcher(methodName).matches() == false) { throw new IllegalArgumentException( "invalid method name [" + methodName + "] for target class [" + targetCanonicalClassName + "]." ); @@ -748,7 +758,7 @@ public void addPainlessMethod( ); if (existingPainlessMethod == null) { - newPainlessMethod = painlessMethodCache.computeIfAbsent(newPainlessMethod, key -> key); + newPainlessMethod = (PainlessMethod) dedup.computeIfAbsent(newPainlessMethod, Function.identity()); if (isStatic) { painlessClassBuilder.staticMethods.put(painlessMethodKey.intern(), newPainlessMethod); @@ -771,12 +781,13 @@ public void addPainlessMethod( } } - public void addPainlessField( + private void addPainlessField( ClassLoader classLoader, String targetCanonicalClassName, String fieldName, String canonicalTypeNameParameter, - Map, Object> annotations + Map, Object> annotations, + Map dedup ) { Objects.requireNonNull(classLoader); @@ -827,15 +838,16 @@ public void addPainlessField( ); } - addPainlessField(targetClass, augmentedClass, fieldName, typeParameter, annotations); + addPainlessField(targetClass, augmentedClass, fieldName, typeParameter, annotations, dedup); } - public void addPainlessField( + private void addPainlessField( Class targetClass, Class augmentedClass, String fieldName, Class typeParameter, - Map, Object> annotations + Map, Object> annotations, + Map dedup ) { Objects.requireNonNull(targetClass); @@ -849,7 +861,7 @@ public void addPainlessField( String targetCanonicalClassName = typeToCanonicalTypeName(targetClass); - if (FIELD_NAME_PATTERN.matcher(fieldName).matches() == false) { + if (METHOD_AND_FIELD_NAME_PATTERN.matcher(fieldName).matches() == false) { throw new IllegalArgumentException( "invalid field name [" + fieldName + "] for target class [" + targetCanonicalClassName + "]." ); @@ -946,7 +958,7 @@ public void addPainlessField( PainlessField newPainlessField = new PainlessField(javaField, typeParameter, annotations, methodHandleGetter, null); if (existingPainlessField == null) { - newPainlessField = painlessFieldCache.computeIfAbsent(newPainlessField, Function.identity()); + newPainlessField = (PainlessField) dedup.computeIfAbsent(newPainlessField, Function.identity()); painlessClassBuilder.staticFields.put(painlessFieldKey.intern(), newPainlessField); } else if (newPainlessField.equals(existingPainlessField) == false) { throw lookupException( @@ -981,7 +993,7 @@ public void addPainlessField( ); if (existingPainlessField == null) { - newPainlessField = painlessFieldCache.computeIfAbsent(newPainlessField, key -> key); + newPainlessField = (PainlessField) dedup.computeIfAbsent(newPainlessField, Function.identity()); painlessClassBuilder.fields.put(painlessFieldKey.intern(), newPainlessField); } else if (newPainlessField.equals(existingPainlessField) == false) { throw lookupException( @@ -1004,7 +1016,8 @@ public void addImportedPainlessMethod( String methodName, String returnCanonicalTypeName, List canonicalTypeNameParameters, - Map, Object> annotations + Map, Object> annotations, + Map dedup ) { Objects.requireNonNull(classLoader); @@ -1046,7 +1059,7 @@ public void addImportedPainlessMethod( ); } - addImportedPainlessMethod(targetClass, methodName, returnType, typeParameters, annotations); + addImportedPainlessMethod(targetClass, methodName, returnType, typeParameters, annotations, dedup); } public void addImportedPainlessMethod( @@ -1054,7 +1067,8 @@ public void addImportedPainlessMethod( String methodName, Class returnType, List> typeParameters, - Map, Object> annotations + Map, Object> annotations, + Map dedup ) { Objects.requireNonNull(targetClass); Objects.requireNonNull(methodName); @@ -1077,7 +1091,7 @@ public void addImportedPainlessMethod( ); } - if (METHOD_NAME_PATTERN.matcher(methodName).matches() == false) { + if (METHOD_AND_FIELD_NAME_PATTERN.matcher(methodName).matches() == false) { throw new IllegalArgumentException( "invalid imported method name [" + methodName + "] for target class [" + targetCanonicalClassName + "]." ); @@ -1182,7 +1196,7 @@ public void addImportedPainlessMethod( ); if (existingImportedPainlessMethod == null) { - newImportedPainlessMethod = painlessMethodCache.computeIfAbsent(newImportedPainlessMethod, key -> key); + newImportedPainlessMethod = (PainlessMethod) dedup.computeIfAbsent(newImportedPainlessMethod, Function.identity()); painlessMethodKeysToImportedPainlessMethods.put(painlessMethodKey.intern(), newImportedPainlessMethod); } else if (newImportedPainlessMethod.equals(existingImportedPainlessMethod) == false) { throw lookupException( @@ -1206,7 +1220,8 @@ public void addPainlessClassBinding( String methodName, String returnCanonicalTypeName, List canonicalTypeNameParameters, - Map, Object> annotations + Map, Object> annotations, + Map dedup ) { Objects.requireNonNull(classLoader); @@ -1247,15 +1262,16 @@ public void addPainlessClassBinding( ); } - addPainlessClassBinding(targetClass, methodName, returnType, typeParameters, annotations); + addPainlessClassBinding(targetClass, methodName, returnType, typeParameters, annotations, dedup); } - public void addPainlessClassBinding( + private void addPainlessClassBinding( Class targetClass, String methodName, Class returnType, List> typeParameters, - Map, Object> annotations + Map, Object> annotations, + Map dedup ) { Objects.requireNonNull(targetClass); Objects.requireNonNull(methodName); @@ -1333,7 +1349,7 @@ public void addPainlessClassBinding( } } - if (METHOD_NAME_PATTERN.matcher(methodName).matches() == false) { + if (METHOD_AND_FIELD_NAME_PATTERN.matcher(methodName).matches() == false) { throw new IllegalArgumentException( "invalid method name [" + methodName + "] for class binding [" + targetCanonicalClassName + "]." ); @@ -1446,7 +1462,7 @@ public void addPainlessClassBinding( ); if (existingPainlessClassBinding == null) { - newPainlessClassBinding = painlessClassBindingCache.computeIfAbsent(newPainlessClassBinding, Function.identity()); + newPainlessClassBinding = (PainlessClassBinding) dedup.computeIfAbsent(newPainlessClassBinding, Function.identity()); painlessMethodKeysToPainlessClassBindings.put(painlessMethodKey.intern(), newPainlessClassBinding); } else if (newPainlessClassBinding.equals(existingPainlessClassBinding) == false) { throw lookupException( @@ -1469,7 +1485,8 @@ public void addPainlessInstanceBinding( String methodName, String returnCanonicalTypeName, List canonicalTypeNameParameters, - Map, Object> painlessAnnotations + Map, Object> painlessAnnotations, + Map dedup ) { Objects.requireNonNull(targetInstance); @@ -1509,7 +1526,7 @@ public void addPainlessInstanceBinding( ); } - addPainlessInstanceBinding(targetInstance, methodName, returnType, typeParameters, painlessAnnotations); + addPainlessInstanceBinding(targetInstance, methodName, returnType, typeParameters, painlessAnnotations, dedup); } public void addPainlessInstanceBinding( @@ -1517,7 +1534,8 @@ public void addPainlessInstanceBinding( String methodName, Class returnType, List> typeParameters, - Map, Object> painlessAnnotations + Map, Object> painlessAnnotations, + Map dedup ) { Objects.requireNonNull(targetInstance); Objects.requireNonNull(methodName); @@ -1542,7 +1560,7 @@ public void addPainlessInstanceBinding( ); } - if (METHOD_NAME_PATTERN.matcher(methodName).matches() == false) { + if (METHOD_AND_FIELD_NAME_PATTERN.matcher(methodName).matches() == false) { throw new IllegalArgumentException( "invalid method name [" + methodName + "] for instance binding [" + targetCanonicalClassName + "]." ); @@ -1629,7 +1647,7 @@ public void addPainlessInstanceBinding( ); if (existingPainlessInstanceBinding == null) { - newPainlessInstanceBinding = painlessInstanceBindingCache.computeIfAbsent(newPainlessInstanceBinding, key -> key); + newPainlessInstanceBinding = (PainlessInstanceBinding) dedup.computeIfAbsent(newPainlessInstanceBinding, Function.identity()); painlessMethodKeysToPainlessInstanceBindings.put(painlessMethodKey.intern(), newPainlessInstanceBinding); } else if (newPainlessInstanceBinding.equals(existingPainlessInstanceBinding) == false) { throw lookupException( @@ -1649,16 +1667,19 @@ public void addPainlessInstanceBinding( } } - public PainlessLookup build() { + public PainlessLookup build(Map dedup, Map filteredMethodCache) { buildPainlessClassHierarchy(); setFunctionalInterfaceMethods(); - generateRuntimeMethods(); + generateRuntimeMethods(filteredMethodCache); cacheRuntimeHandles(); Map, PainlessClass> classesToPainlessClasses = Maps.newMapWithExpectedSize(classesToPainlessClassBuilders.size()); for (Map.Entry, PainlessClassBuilder> painlessClassBuilderEntry : classesToPainlessClassBuilders.entrySet()) { - classesToPainlessClasses.put(painlessClassBuilderEntry.getKey(), painlessClassBuilderEntry.getValue().build()); + classesToPainlessClasses.put( + painlessClassBuilderEntry.getKey(), + (PainlessClass) dedup.computeIfAbsent(painlessClassBuilderEntry.getValue().build(), Function.identity()) + ); } if (javaClassNamesToClasses.values().containsAll(canonicalClassNamesToClasses.values()) == false) { @@ -1817,7 +1838,7 @@ private void setFunctionalInterfaceMethod(Class targetClass, PainlessClassBui * {@link Map}. The {@link PainlessClass#runtimeMethods} {@link Map} is used exclusively to look up methods at * run-time resulting from calls with a def type value target. */ - private void generateRuntimeMethods() { + private void generateRuntimeMethods(Map filteredMethodCache) { for (Map.Entry, PainlessClassBuilder> painlessClassBuilderEntry : classesToPainlessClassBuilders.entrySet()) { Class targetClass = painlessClassBuilderEntry.getKey(); PainlessClassBuilder painlessClassBuilder = painlessClassBuilderEntry.getValue(); @@ -1832,7 +1853,7 @@ private void generateRuntimeMethods() { || typeParameter == Long.class || typeParameter == Float.class || typeParameter == Double.class) { - generateFilteredMethod(targetClass, painlessClassBuilder, painlessMethod); + generateFilteredMethod(targetClass, painlessClassBuilder, painlessMethod, filteredMethodCache); } } } @@ -1842,10 +1863,11 @@ private void generateRuntimeMethods() { private static void generateFilteredMethod( Class targetClass, PainlessClassBuilder painlessClassBuilder, - PainlessMethod painlessMethod + PainlessMethod painlessMethod, + Map filteredMethodCache ) { String painlessMethodKey = buildPainlessMethodKey(painlessMethod.javaMethod().getName(), painlessMethod.typeParameters().size()); - PainlessMethod filteredPainlessMethod = painlessFilteredCache.get(painlessMethod); + PainlessMethod filteredPainlessMethod = filteredMethodCache.get(painlessMethod); if (filteredPainlessMethod == null) { Method javaMethod = painlessMethod.javaMethod(); @@ -1899,7 +1921,7 @@ private static void generateFilteredMethod( Map.of() ); painlessClassBuilder.runtimeMethods.put(painlessMethodKey.intern(), filteredPainlessMethod); - painlessFilteredCache.put(painlessMethod, filteredPainlessMethod); + filteredMethodCache.put(painlessMethod, filteredPainlessMethod); } catch (Exception exception) { throw new IllegalStateException( "internal error occurred attempting to generate a runtime method [" + painlessMethodKey + "]", diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/AliasTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/AliasTests.java index f9d87f5ce46b8..2ccc70685e6d7 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/AliasTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/AliasTests.java @@ -34,7 +34,9 @@ public void testNoShadowing() { IllegalArgumentException err = expectThrows( IllegalArgumentException.class, () -> PainlessLookupBuilder.buildFromWhitelists( - List.of(WhitelistLoader.loadFromResourceFiles(PainlessPlugin.class, "org.elasticsearch.painless.alias-shadow")) + List.of(WhitelistLoader.loadFromResourceFiles(PainlessPlugin.class, "org.elasticsearch.painless.alias-shadow")), + new HashMap<>(), + new HashMap<>() ) ); assertEquals( diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/DebugTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/DebugTests.java index 87b199cd1b43f..7b0b3b500d12c 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/DebugTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/DebugTests.java @@ -16,43 +16,52 @@ import org.elasticsearch.script.ScriptException; import java.io.IOException; +import java.util.HashMap; import java.util.Map; import static java.util.Collections.singletonList; import static java.util.Collections.singletonMap; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasEntry; import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.not; public class DebugTests extends ScriptTestCase { - private final PainlessLookup painlessLookup = PainlessLookupBuilder.buildFromWhitelists(PainlessPlugin.BASE_WHITELISTS); + private final PainlessLookup painlessLookup = PainlessLookupBuilder.buildFromWhitelists( + PainlessPlugin.BASE_WHITELISTS, + new HashMap<>(), + new HashMap<>() + ); public void testExplain() { // Debug.explain can explain an object Object dummy = new Object(); - PainlessExplainError e = expectScriptThrows( - PainlessExplainError.class, - () -> exec("Debug.explain(params.a)", singletonMap("a", dummy), true) - ); + var wrapper = expectScriptThrows(ErrorCauseWrapper.class, () -> exec("Debug.explain(params.a)", singletonMap("a", dummy), true)); + assertThat(wrapper.realCause.getClass(), equalTo(PainlessExplainError.class)); + var e = (PainlessExplainError) wrapper.realCause; assertSame(dummy, e.getObjectToExplain()); assertThat(e.getHeaders(painlessLookup), hasEntry("es.to_string", singletonList(dummy.toString()))); assertThat(e.getHeaders(painlessLookup), hasEntry("es.java_class", singletonList("java.lang.Object"))); assertThat(e.getHeaders(painlessLookup), hasEntry("es.painless_class", singletonList("java.lang.Object"))); // Null should be ok - e = expectScriptThrows(PainlessExplainError.class, () -> exec("Debug.explain(null)")); + wrapper = expectScriptThrows(ErrorCauseWrapper.class, () -> exec("Debug.explain(null)")); + assertThat(wrapper.realCause.getClass(), equalTo(PainlessExplainError.class)); + e = (PainlessExplainError) wrapper.realCause; assertNull(e.getObjectToExplain()); assertThat(e.getHeaders(painlessLookup), hasEntry("es.to_string", singletonList("null"))); assertThat(e.getHeaders(painlessLookup), not(hasKey("es.java_class"))); assertThat(e.getHeaders(painlessLookup), not(hasKey("es.painless_class"))); // You can't catch the explain exception - e = expectScriptThrows(PainlessExplainError.class, () -> exec(""" + wrapper = expectScriptThrows(ErrorCauseWrapper.class, () -> exec(""" try { Debug.explain(params.a) } catch (Exception e) { return 1 }""", singletonMap("a", dummy), true)); + assertThat(wrapper.realCause.getClass(), equalTo(PainlessExplainError.class)); + e = (PainlessExplainError) wrapper.realCause; assertSame(dummy, e.getObjectToExplain()); } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/Debugger.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/Debugger.java index 04577d8ca9d81..b44be595b4178 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/Debugger.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/Debugger.java @@ -19,6 +19,7 @@ import java.io.PrintWriter; import java.io.StringWriter; +import java.util.HashMap; import java.util.List; /** quick and dirty tools for debugging */ @@ -35,12 +36,8 @@ static String toString(Class iface, String source, CompilerSettings settings, PrintWriter outputWriter = new PrintWriter(output); Textifier textifier = new Textifier(); try { - new Compiler(iface, null, null, PainlessLookupBuilder.buildFromWhitelists(whitelists)).compile( - "", - source, - settings, - textifier - ); + new Compiler(iface, null, null, PainlessLookupBuilder.buildFromWhitelists(whitelists, new HashMap<>(), new HashMap<>())) + .compile("", source, settings, textifier); } catch (RuntimeException e) { textifier.print(outputWriter); e.addSuppressed(new Exception("current bytecode: \n" + output)); @@ -65,15 +62,8 @@ private static String tree( PrintWriter outputWriter = new PrintWriter(output); Textifier textifier = new Textifier(); try { - new Compiler(iface, null, null, PainlessLookupBuilder.buildFromWhitelists(whitelists)).compile( - "", - source, - settings, - textifier, - semanticPhaseVisitor, - irPhaseVisitor, - asmPhaseVisitor - ); + new Compiler(iface, null, null, PainlessLookupBuilder.buildFromWhitelists(whitelists, new HashMap<>(), new HashMap<>())) + .compile("", source, settings, textifier, semanticPhaseVisitor, irPhaseVisitor, asmPhaseVisitor); } catch (RuntimeException e) { textifier.print(outputWriter); e.addSuppressed(new Exception("current bytecode: \n" + output)); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/DefBootstrapTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/DefBootstrapTests.java index fc42caa364588..ed7fef33302bb 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/DefBootstrapTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/DefBootstrapTests.java @@ -22,7 +22,11 @@ import java.util.HashMap; public class DefBootstrapTests extends ESTestCase { - private final PainlessLookup painlessLookup = PainlessLookupBuilder.buildFromWhitelists(PainlessPlugin.BASE_WHITELISTS); + private final PainlessLookup painlessLookup = PainlessLookupBuilder.buildFromWhitelists( + PainlessPlugin.BASE_WHITELISTS, + new HashMap<>(), + new HashMap<>() + ); /** calls toString() on integers, twice */ public void testOneType() throws Throwable { diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/LookupTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/LookupTests.java index c04dc4cd2f893..6a403c0692540 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/LookupTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/LookupTests.java @@ -16,6 +16,7 @@ import org.junit.Before; import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Set; @@ -26,7 +27,9 @@ public class LookupTests extends ESTestCase { @Before public void setup() { painlessLookup = PainlessLookupBuilder.buildFromWhitelists( - Collections.singletonList(WhitelistLoader.loadFromResourceFiles(PainlessPlugin.class, "org.elasticsearch.painless.lookup")) + Collections.singletonList(WhitelistLoader.loadFromResourceFiles(PainlessPlugin.class, "org.elasticsearch.painless.lookup")), + new HashMap<>(), + new HashMap<>() ); } diff --git a/modules/mapper-extras/src/internalClusterTest/java/org/elasticsearch/index/mapper/MatchOnlyTextMapperIT.java b/modules/mapper-extras/src/internalClusterTest/java/org/elasticsearch/index/mapper/MatchOnlyTextMapperIT.java index 0430fe3404f91..adca59bafcb36 100644 --- a/modules/mapper-extras/src/internalClusterTest/java/org/elasticsearch/index/mapper/MatchOnlyTextMapperIT.java +++ b/modules/mapper-extras/src/internalClusterTest/java/org/elasticsearch/index/mapper/MatchOnlyTextMapperIT.java @@ -55,7 +55,7 @@ public void testHighlightingWithMatchOnlyTextFieldMatchPhrase() throws IOExcepti .startObject() .field( "message", - "[.ds-.slm-history-5-2023.09.20-" + "[.ds-.slm-history-6-2023.09.20-" + randomInt() + "][0] marking and sending shard failed due to [failed recovery]" ) @@ -104,7 +104,7 @@ public void testHighlightingWithMatchOnlyTextFieldSyntheticSource() throws IOExc .startObject() .field( "message", - "[.ds-.slm-history-5-2023.09.20-" + "[.ds-.slm-history-6-2023.09.20-" + randomInt() + "][0] marking and sending shard failed due to [failed recovery]" ) diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeatureFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeatureFieldMapper.java index 7342a432dd5df..c4634f8d52729 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeatureFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeatureFieldMapper.java @@ -9,8 +9,6 @@ package org.elasticsearch.index.mapper.extras; import org.apache.lucene.document.FeatureField; -import org.apache.lucene.document.FieldType; -import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.Term; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; @@ -42,18 +40,6 @@ public class RankFeatureFieldMapper extends FieldMapper { public static final String CONTENT_TYPE = "rank_feature"; - public static class Defaults { - public static final FieldType FIELD_TYPE; - - static { - FieldType ft = new FieldType(); - ft.setTokenized(false); - ft.setIndexOptions(IndexOptions.NONE); - ft.setOmitNorms(true); - FIELD_TYPE = freezeAndDeduplicateFieldType(ft); - } - } - private static RankFeatureFieldType ft(FieldMapper in) { return ((RankFeatureFieldMapper) in).fieldType(); } diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/TokenCountFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/TokenCountFieldMapper.java index 12829ca802425..4d04e83361252 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/TokenCountFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/TokenCountFieldMapper.java @@ -206,14 +206,6 @@ public String analyzer() { return analyzer.name(); } - /** - * Indicates if position increments are counted. - * @return true if position increments are counted - */ - public boolean enablePositionIncrements() { - return enablePositionIncrements; - } - @Override protected String contentType() { return CONTENT_TYPE; diff --git a/modules/parent-join/src/internalClusterTest/java/org/elasticsearch/join/query/ChildQuerySearchIT.java b/modules/parent-join/src/internalClusterTest/java/org/elasticsearch/join/query/ChildQuerySearchIT.java index ae1adf4160c2a..02776eb277020 100644 --- a/modules/parent-join/src/internalClusterTest/java/org/elasticsearch/join/query/ChildQuerySearchIT.java +++ b/modules/parent-join/src/internalClusterTest/java/org/elasticsearch/join/query/ChildQuerySearchIT.java @@ -12,7 +12,6 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.common.lucene.search.function.CombineFunction; @@ -69,6 +68,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertScrollResponsesAndHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; @@ -1403,22 +1403,15 @@ public void testParentChildQueriesViaScrollApi() throws Exception { boolQuery().must(matchAllQuery()).filter(hasParentQuery("parent", matchAllQuery(), false)) }; for (QueryBuilder query : queries) { - SearchResponse scrollResponse = prepareSearch("test").setScroll(TimeValue.timeValueSeconds(30)) - .setSize(1) - .addStoredField("_id") - .setQuery(query) - .get(); - - assertNoFailures(scrollResponse); - assertThat(scrollResponse.getHits().getTotalHits().value, equalTo(10L)); - int scannedDocs = 0; - do { - assertThat(scrollResponse.getHits().getTotalHits().value, equalTo(10L)); - scannedDocs += scrollResponse.getHits().getHits().length; - scrollResponse = client().prepareSearchScroll(scrollResponse.getScrollId()).setScroll(TimeValue.timeValueSeconds(30)).get(); - } while (scrollResponse.getHits().getHits().length > 0); - clearScroll(scrollResponse.getScrollId()); - assertThat(scannedDocs, equalTo(10)); + assertScrollResponsesAndHitCount( + TimeValue.timeValueSeconds(60), + prepareSearch("test").setScroll(TimeValue.timeValueSeconds(30)).setSize(1).addStoredField("_id").setQuery(query), + 10, + (respNum, response) -> { + assertNoFailures(response); + assertThat(response.getHits().getTotalHits().value, equalTo(10L)); + } + ); } } diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQuery.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQuery.java index 11c726481d0b3..408b3f204de1a 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQuery.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQuery.java @@ -143,7 +143,7 @@ boolean matchDocId(int docId) throws IOException { } @Override - public float score() throws IOException { + public float score() { return score; } }; diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java index 886a67443e831..9a2653a61b60d 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java @@ -664,6 +664,11 @@ public > IFD getForField( CircuitBreakerService circuitBreaker = new NoneCircuitBreakerService(); return (IFD) builder.build(cache, circuitBreaker); } + + @Override + public void addNamedQuery(String name, Query query) { + delegate.addNamedQuery(name, query); + } }; } diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java index e212264287937..be8d342254afd 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java @@ -530,6 +530,8 @@ static SearchExecutionContext configureContext(SearchExecutionContext context, b // as an analyzed string. wrapped.setAllowUnmappedFields(false); wrapped.setMapUnmappedFieldAsString(mapUnmappedFieldsAsString); + // We need to rewrite queries with name to Lucene NamedQuery to find matched sub-queries of percolator query + wrapped.setRewriteToNamedQueries(); return wrapped; } diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhase.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhase.java index 43f365a2a722b..83703dcf10971 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhase.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhase.java @@ -11,6 +11,7 @@ import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.NamedMatches; import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; @@ -96,7 +97,30 @@ public void process(HitContext hitContext) throws IOException { IntStream slots = convertTopDocsToSlots(topDocs, pc.rootDocsBySlot); // _percolator_document_slot fields are document fields and should be under "fields" section in a hit - hitContext.hit().setDocumentField(fieldName, new DocumentField(fieldName, slots.boxed().collect(Collectors.toList()))); + List docSlots = slots.boxed().collect(Collectors.toList()); + hitContext.hit().setDocumentField(fieldName, new DocumentField(fieldName, docSlots)); + + // Add info what sub-queries of percolator query matched this each percolated document + if (fetchContext.getSearchExecutionContext().hasNamedQueries()) { + List leafContexts = percolatorIndexSearcher.getLeafContexts(); + assert leafContexts.size() == 1 : "Expected single leaf, but got [" + leafContexts.size() + "]"; + LeafReaderContext memoryReaderContext = leafContexts.get(0); + Weight weight = percolatorIndexSearcher.createWeight(query, ScoreMode.COMPLETE_NO_SCORES, 1); + for (int i = 0; i < topDocs.scoreDocs.length; i++) { + List namedMatchesList = NamedMatches.findNamedMatches( + weight.matches(memoryReaderContext, topDocs.scoreDocs[i].doc) + ); + if (namedMatchesList.isEmpty()) { + continue; + } + List matchedQueries = new ArrayList<>(namedMatchesList.size()); + for (NamedMatches match : namedMatchesList) { + matchedQueries.add(match.getName()); + } + String matchedFieldName = fieldName + "_" + docSlots.get(i) + "_matched_queries"; + hitContext.hit().setDocumentField(matchedFieldName, new DocumentField(matchedFieldName, matchedQueries)); + } + } } } }; diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchTests.java index 5f3ff5264497a..a924c0e323f96 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchTests.java @@ -9,7 +9,9 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.join.ScoreMode; +import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexService; @@ -24,6 +26,7 @@ import org.elasticsearch.script.MockScriptPlugin; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; +import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.lookup.LeafDocLookup; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.ESSingleNodeTestCase; @@ -36,6 +39,7 @@ import java.util.Collection; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.function.Function; @@ -335,4 +339,93 @@ public void testRangeQueriesWithNow() throws Exception { } } + public void testPercolateNamedQueries() { + String mapping = """ + { + "dynamic" : "strict", + "properties" : { + "my_query" : { "type" : "percolator" }, + "description" : { "type" : "text"}, + "num_of_bedrooms" : { "type" : "integer"}, + "type" : { "type" : "keyword"}, + "price": { "type": "float"} + } + } + """; + indicesAdmin().prepareCreate("houses").setMapping(mapping).get(); + String source = """ + { + "my_query" : { + "bool": { + "should": [ + { "match": { "description": { "query": "fireplace", "_name": "fireplace_query" } } }, + { "match": { "type": { "query": "detached", "_name": "detached_query" } } } + ], + "filter": { + "match": { + "num_of_bedrooms": {"query": 3, "_name": "3_bedrooms_query"} + } + } + } + } + } + """; + prepareIndex("houses").setId("query_3_bedroom_detached_house_with_fireplace").setSource(source, XContentType.JSON).get(); + indicesAdmin().prepareRefresh().get(); + + source = """ + { + "my_query" : { + "bool": { + "filter": [ + { "match": { "description": { "query": "swimming pool", "_name": "swimming_pool_query" } } }, + { "match": { "num_of_bedrooms": {"query": 3, "_name": "3_bedrooms_query"} } } + ] + } + } + } + """; + prepareIndex("houses").setId("query_3_bedroom_house_with_swimming_pool").setSource(source, XContentType.JSON).get(); + indicesAdmin().prepareRefresh().get(); + + BytesArray house1_doc = new BytesArray(""" + { + "description": "house with a beautiful fireplace and swimming pool", + "num_of_bedrooms": 3, + "type": "detached", + "price": 1000000 + } + """); + + BytesArray house2_doc = new BytesArray(""" + { + "description": "house has a wood burning fireplace", + "num_of_bedrooms": 3, + "type": "semi-detached", + "price": 500000 + } + """); + + QueryBuilder query = new PercolateQueryBuilder("my_query", List.of(house1_doc, house2_doc), XContentType.JSON); + SearchResponse response = client().prepareSearch("houses").setQuery(query).get(); + assertEquals(2, response.getHits().getTotalHits().value); + + SearchHit[] hits = response.getHits().getHits(); + assertThat(hits[0].getFields().get("_percolator_document_slot").getValues(), equalTo(Arrays.asList(0, 1))); + assertThat( + hits[0].getFields().get("_percolator_document_slot_0_matched_queries").getValues(), + equalTo(Arrays.asList("fireplace_query", "detached_query", "3_bedrooms_query")) + ); + assertThat( + hits[0].getFields().get("_percolator_document_slot_1_matched_queries").getValues(), + equalTo(Arrays.asList("fireplace_query", "3_bedrooms_query")) + ); + + assertThat(hits[1].getFields().get("_percolator_document_slot").getValues(), equalTo(Arrays.asList(0))); + assertThat( + hits[1].getFields().get("_percolator_document_slot_0_matched_queries").getValues(), + equalTo(Arrays.asList("swimming_pool_query", "3_bedrooms_query")) + ); + } + } diff --git a/modules/percolator/src/yamlRestTest/resources/rest-api-spec/test/20_matched_queries.yml b/modules/percolator/src/yamlRestTest/resources/rest-api-spec/test/20_matched_queries.yml new file mode 100644 index 0000000000000..1e692bc43faba --- /dev/null +++ b/modules/percolator/src/yamlRestTest/resources/rest-api-spec/test/20_matched_queries.yml @@ -0,0 +1,125 @@ +setup: + - skip: + version: " - 8.12.99" + reason: "Displaying matched named queries within percolator queries was added in 8.13" + - do: + indices.create: + index: houses + body: + mappings: + dynamic: strict + properties: + my_query: + type: percolator + description: + type: text + num_of_bedrooms: + type: integer + type: + type: keyword + price: + type: integer + + - do: + index: + refresh: true + index: houses + id: query_3_bedroom_detached_house_with_fireplace + body: + my_query: + { + "bool": { + "should": [ + { "match": { "description": { "query": "fireplace"} } }, + { "match": { "type": { "query": "detached", "_name": "detached_query" } } } + ], + "filter": { + "match": { + "num_of_bedrooms": {"query": 3, "_name": "3_bedrooms_query"} + } + } + } + } + + - do: + index: + refresh: true + index: houses + id: query_3_bedroom_house_with_swimming_pool + body: + my_query: + { + "bool": { + "filter": [ + { "match": { "description": { "query": "swimming pool", "_name": "swimming_pool_query" } } }, + { "match": { "num_of_bedrooms": {"query": 3, "_name": "3_bedrooms_query"} } } + ] + } + } + +--- +"Matched named queries within percolator queries: percolate existing document": + - do: + index: + refresh: true + index: houses + id: house1 + body: + description: "house with a beautiful fireplace and swimming pool" + num_of_bedrooms: 3 + type: detached + price: 1000000 + + - do: + search: + index: houses + body: + query: + percolate: + field: my_query + index: houses + id: house1 + + - match: { hits.total.value: 2 } + + - match: { hits.hits.0._id: query_3_bedroom_detached_house_with_fireplace } + - match: { hits.hits.0.fields._percolator_document_slot: [0] } + - match: { hits.hits.0.fields._percolator_document_slot_0_matched_queries: ["detached_query", "3_bedrooms_query"] } + + - match: { hits.hits.1._id: query_3_bedroom_house_with_swimming_pool } + - match: { hits.hits.1.fields._percolator_document_slot: [0] } + - match: { hits.hits.1.fields._percolator_document_slot_0_matched_queries: ["swimming_pool_query", "3_bedrooms_query"] } + + +--- +"Matched named queries within percolator queries: percolate multiple documents in request": + - do: + search: + index: houses + body: + query: + percolate: + field: my_query + documents: + - { + "description": "house with a beautiful fireplace and swimming pool", + "num_of_bedrooms": 3, + "type": "detached", + "price": 1000000 + } + - { + "description": "house has a wood burning fireplace", + "num_of_bedrooms": 3, + "type": "semi-detached", + "price": 500000 + } + + - match: { hits.total.value: 2 } + + - match: { hits.hits.0._id: query_3_bedroom_detached_house_with_fireplace } + - match: { hits.hits.0.fields._percolator_document_slot: [0, 1] } + - match: { hits.hits.0.fields._percolator_document_slot_0_matched_queries: ["detached_query", "3_bedrooms_query"] } + + - match: { hits.hits.1._id: query_3_bedroom_house_with_swimming_pool } + - match: { hits.hits.1.fields._percolator_document_slot: [0] } + - match: { hits.hits.1.fields._percolator_document_slot_0_matched_queries: ["swimming_pool_query", "3_bedrooms_query"] } diff --git a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/CrossClusterReindexIT.java b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/CrossClusterReindexIT.java index 50dea29683540..a4f939fbe3af8 100644 --- a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/CrossClusterReindexIT.java +++ b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/CrossClusterReindexIT.java @@ -9,12 +9,12 @@ package org.elasticsearch.index.reindex; import org.apache.lucene.search.TotalHits; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.reindex.ReindexPlugin; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.test.AbstractMultiClustersTestCase; import java.util.Collection; @@ -60,11 +60,9 @@ public void testReindexFromRemoteGivenIndexExists() throws Exception { new ReindexRequestBuilder(client(LOCAL_CLUSTER)).source(sourceIndexInRemote).destination("desc-index-001").get(); assertTrue("Number of documents in source and desc indexes does not match", waitUntil(() -> { - SearchResponse resp = client(LOCAL_CLUSTER).prepareSearch("desc-index-001") - .setQuery(new MatchAllQueryBuilder()) - .setSize(1000) - .get(); - final TotalHits totalHits = resp.getHits().getTotalHits(); + final TotalHits totalHits = SearchResponseUtils.getTotalHits( + client(LOCAL_CLUSTER).prepareSearch("desc-index-001").setQuery(new MatchAllQueryBuilder()).setSize(1000) + ); return totalHits.relation == TotalHits.Relation.EQUAL_TO && totalHits.value == docsNumber; })); } @@ -77,11 +75,9 @@ public void testReindexFromRemoteGivenSameIndexNames() throws Exception { new ReindexRequestBuilder(client(LOCAL_CLUSTER)).source(sourceIndexInRemote).destination("test-index-001").get(); assertTrue("Number of documents in source and desc indexes does not match", waitUntil(() -> { - SearchResponse resp = client(LOCAL_CLUSTER).prepareSearch("test-index-001") - .setQuery(new MatchAllQueryBuilder()) - .setSize(1000) - .get(); - final TotalHits totalHits = resp.getHits().getTotalHits(); + final TotalHits totalHits = SearchResponseUtils.getTotalHits( + client(LOCAL_CLUSTER).prepareSearch("test-index-001").setQuery(new MatchAllQueryBuilder()).setSize(1000) + ); return totalHits.relation == TotalHits.Relation.EQUAL_TO && totalHits.value == docsNumber; })); } @@ -108,11 +104,9 @@ public void testReindexManyTimesFromRemoteGivenSameIndexNames() throws Exception } assertTrue("Number of documents in source and desc indexes does not match", waitUntil(() -> { - SearchResponse resp = client(LOCAL_CLUSTER).prepareSearch("test-index-001") - .setQuery(new MatchAllQueryBuilder()) - .setSize(1000) - .get(); - final TotalHits totalHits = resp.getHits().getTotalHits(); + final TotalHits totalHits = SearchResponseUtils.getTotalHits( + client(LOCAL_CLUSTER).prepareSearch("test-index-001").setQuery(new MatchAllQueryBuilder()).setSize(1000) + ); return totalHits.relation == TotalHits.Relation.EQUAL_TO && totalHits.value == docsNumber; })); } @@ -142,11 +136,9 @@ public void testReindexFromRemoteGivenSimpleDateMathIndexName() throws Interrupt new ReindexRequestBuilder(client(LOCAL_CLUSTER)).source(sourceIndexInRemote).destination("desc-index-001").get(); assertTrue("Number of documents in source and desc indexes does not match", waitUntil(() -> { - SearchResponse resp = client(LOCAL_CLUSTER).prepareSearch("desc-index-001") - .setQuery(new MatchAllQueryBuilder()) - .setSize(1000) - .get(); - final TotalHits totalHits = resp.getHits().getTotalHits(); + final TotalHits totalHits = SearchResponseUtils.getTotalHits( + client(LOCAL_CLUSTER).prepareSearch("desc-index-001").setQuery(new MatchAllQueryBuilder()).setSize(1000) + ); return totalHits.relation == TotalHits.Relation.EQUAL_TO && totalHits.value == docsNumber; })); } @@ -160,11 +152,9 @@ public void testReindexFromRemoteGivenComplexDateMathIndexName() throws Interrup new ReindexRequestBuilder(client(LOCAL_CLUSTER)).source(sourceIndexInRemote).destination("desc-index-001").get(); assertTrue("Number of documents in source and desc indexes does not match", waitUntil(() -> { - SearchResponse resp = client(LOCAL_CLUSTER).prepareSearch("desc-index-001") - .setQuery(new MatchAllQueryBuilder()) - .setSize(1000) - .get(); - final TotalHits totalHits = resp.getHits().getTotalHits(); + final TotalHits totalHits = SearchResponseUtils.getTotalHits( + client(LOCAL_CLUSTER).prepareSearch("desc-index-001").setQuery(new MatchAllQueryBuilder()).setSize(1000) + ); return totalHits.relation == TotalHits.Relation.EQUAL_TO && totalHits.value == docsNumber; })); } diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/DeleteByQueryBasicTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/DeleteByQueryBasicTests.java index fac18c4f6f544..17dd1503e6c89 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/DeleteByQueryBasicTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/DeleteByQueryBasicTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.index.reindex.BulkByScrollResponse; import org.elasticsearch.index.reindex.DeleteByQueryRequestBuilder; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.InternalSettingsPlugin; @@ -159,7 +160,7 @@ public void testDeleteByQueryWithRouting() throws Exception { String routing = String.valueOf(randomIntBetween(2, docs)); logger.info("--> counting documents with routing [{}]", routing); - long expected = prepareSearch().setSize(0).setRouting(routing).get().getHits().getTotalHits().value; + long expected = SearchResponseUtils.getTotalHitsValue(prepareSearch().setSize(0).setRouting(routing)); logger.info("--> delete all documents with routing [{}] with a delete-by-query", routing); DeleteByQueryRequestBuilder delete = deleteByQuery().source("test").filter(QueryBuilders.matchAllQuery()); diff --git a/modules/repository-url/build.gradle b/modules/repository-url/build.gradle index 2850aee68a2fb..3537d430e212b 100644 --- a/modules/repository-url/build.gradle +++ b/modules/repository-url/build.gradle @@ -44,14 +44,3 @@ tasks.named("thirdPartyAudit").configure { ) } -//File repositoryDir = fixture.fsRepositoryDir as File - -testClusters.configureEach { - // repositoryDir is used by a FS repository to create snapshots - setting 'path.repo', "${repositoryDir.absolutePath}", PropertyNormalization.IGNORE_VALUE - // repositoryDir is used by two URL repositories to restore snapshots - setting 'repositories.url.allowed_urls', { - "http://snapshot.test*,${fixtureAddress('url-fixture')}" - }, PropertyNormalization.IGNORE_VALUE -} - diff --git a/modules/repository-url/src/internalClusterTest/java/org/elasticsearch/repositories/url/URLSnapshotRestoreIT.java b/modules/repository-url/src/internalClusterTest/java/org/elasticsearch/repositories/url/URLSnapshotRestoreIT.java index a47b9d8b622b5..e0d8eb86613ba 100644 --- a/modules/repository-url/src/internalClusterTest/java/org/elasticsearch/repositories/url/URLSnapshotRestoreIT.java +++ b/modules/repository-url/src/internalClusterTest/java/org/elasticsearch/repositories/url/URLSnapshotRestoreIT.java @@ -26,6 +26,7 @@ import java.util.Collections; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -63,7 +64,7 @@ public void testUrlRepository() throws Exception { indexDoc("test-idx", Integer.toString(i), "foo", "bar" + i); } refresh(); - assertThat(client.prepareSearch("test-idx").setSize(0).get().getHits().getTotalHits().value, equalTo(100L)); + assertHitCount(client.prepareSearch("test-idx").setSize(0), 100); logger.info("--> snapshot"); CreateSnapshotResponse createSnapshotResponse = client.admin() @@ -110,7 +111,7 @@ public void testUrlRepository() throws Exception { .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); - assertThat(client.prepareSearch("test-idx").setSize(0).get().getHits().getTotalHits().value, equalTo(100L)); + assertHitCount(client.prepareSearch("test-idx").setSize(0), 100); logger.info("--> list available shapshots"); GetSnapshotsResponse getSnapshotsResponse = client.admin().cluster().prepareGetSnapshots("url-repo").get(); diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/transport/netty4/ESLoggingHandlerIT.java b/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/transport/netty4/ESLoggingHandlerIT.java index 61ef5f1973854..3c869a89cfaa9 100644 --- a/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/transport/netty4/ESLoggingHandlerIT.java +++ b/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/transport/netty4/ESLoggingHandlerIT.java @@ -11,6 +11,8 @@ import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; import org.elasticsearch.ESNetty4IntegTestCase; +import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsRequest; +import org.elasticsearch.action.admin.cluster.node.hotthreads.TransportNodesHotThreadsAction; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.MockLogAppender; @@ -19,6 +21,7 @@ import org.elasticsearch.transport.TransportLogger; import java.io.IOException; +import java.util.concurrent.TimeUnit; @ESIntegTestCase.ClusterScope(numDataNodes = 2, scope = ESIntegTestCase.Scope.TEST) public class ESLoggingHandlerIT extends ESNetty4IntegTestCase { @@ -84,7 +87,7 @@ public void testLoggingHandler() { appender.addExpectation(writeExpectation); appender.addExpectation(flushExpectation); appender.addExpectation(readExpectation); - clusterAdmin().prepareNodesHotThreads().get(); + client().execute(TransportNodesHotThreadsAction.TYPE, new NodesHotThreadsRequest()).actionGet(10, TimeUnit.SECONDS); appender.assertAllExpectationsMatched(); } diff --git a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/KuromojiTokenizerFactory.java b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/KuromojiTokenizerFactory.java index d662003530c22..65276c04bed56 100644 --- a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/KuromojiTokenizerFactory.java +++ b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/KuromojiTokenizerFactory.java @@ -12,7 +12,6 @@ import org.apache.lucene.analysis.ja.JapaneseTokenizer; import org.apache.lucene.analysis.ja.JapaneseTokenizer.Mode; import org.apache.lucene.analysis.ja.dict.UserDictionary; -import org.apache.lucene.analysis.util.CSVUtil; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; @@ -23,10 +22,8 @@ import java.io.IOException; import java.io.Reader; import java.io.StringReader; -import java.util.HashSet; import java.util.List; import java.util.Locale; -import java.util.Set; public class KuromojiTokenizerFactory extends AbstractTokenizerFactory { @@ -60,11 +57,10 @@ public static UserDictionary getUserDictionary(Environment env, Settings setting "It is not allowed to use [" + USER_DICT_PATH_OPTION + "] in conjunction" + " with [" + USER_DICT_RULES_OPTION + "]" ); } - List ruleList = Analysis.getWordList(env, settings, USER_DICT_PATH_OPTION, USER_DICT_RULES_OPTION, false); + List ruleList = Analysis.getWordList(env, settings, USER_DICT_PATH_OPTION, USER_DICT_RULES_OPTION, false, true); if (ruleList == null || ruleList.isEmpty()) { return null; } - validateDuplicatedWords(ruleList); StringBuilder sb = new StringBuilder(); for (String line : ruleList) { sb.append(line).append(System.lineSeparator()); @@ -76,23 +72,6 @@ public static UserDictionary getUserDictionary(Environment env, Settings setting } } - private static void validateDuplicatedWords(List ruleList) { - Set dup = new HashSet<>(); - int lineNum = 0; - for (String line : ruleList) { - // ignore comments - if (line.startsWith("#") == false) { - String[] values = CSVUtil.parse(line); - if (dup.add(values[0]) == false) { - throw new IllegalArgumentException( - "Found duplicate term [" + values[0] + "] in user dictionary " + "at line [" + lineNum + "]" - ); - } - } - ++lineNum; - } - } - public static JapaneseTokenizer.Mode getMode(Settings settings) { String modeSetting = settings.get("mode", JapaneseTokenizer.DEFAULT_MODE.name()); return JapaneseTokenizer.Mode.valueOf(modeSetting.toUpperCase(Locale.ENGLISH)); diff --git a/plugins/analysis-nori/src/main/java/org/elasticsearch/plugin/analysis/nori/NoriAnalyzerProvider.java b/plugins/analysis-nori/src/main/java/org/elasticsearch/plugin/analysis/nori/NoriAnalyzerProvider.java index f2949e45964a4..65c9bb9833177 100644 --- a/plugins/analysis-nori/src/main/java/org/elasticsearch/plugin/analysis/nori/NoriAnalyzerProvider.java +++ b/plugins/analysis-nori/src/main/java/org/elasticsearch/plugin/analysis/nori/NoriAnalyzerProvider.java @@ -30,7 +30,7 @@ public class NoriAnalyzerProvider extends AbstractIndexAnalyzerProvider tagList = Analysis.getWordList(env, settings, "stoptags"); final Set stopTags = tagList != null ? resolvePOSList(tagList) : KoreanPartOfSpeechStopFilter.DEFAULT_STOP_TAGS; analyzer = new KoreanAnalyzer(userDictionary, mode, stopTags, false); diff --git a/plugins/analysis-nori/src/main/java/org/elasticsearch/plugin/analysis/nori/NoriTokenizerFactory.java b/plugins/analysis-nori/src/main/java/org/elasticsearch/plugin/analysis/nori/NoriTokenizerFactory.java index c0be8322ade95..72eb8a3d34e1c 100644 --- a/plugins/analysis-nori/src/main/java/org/elasticsearch/plugin/analysis/nori/NoriTokenizerFactory.java +++ b/plugins/analysis-nori/src/main/java/org/elasticsearch/plugin/analysis/nori/NoriTokenizerFactory.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.analysis.AbstractTokenizerFactory; import org.elasticsearch.index.analysis.Analysis; @@ -35,17 +36,24 @@ public class NoriTokenizerFactory extends AbstractTokenizerFactory { public NoriTokenizerFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, settings, name); decompoundMode = getMode(settings); - userDictionary = getUserDictionary(env, settings); + userDictionary = getUserDictionary(env, settings, indexSettings); discardPunctuation = settings.getAsBoolean("discard_punctuation", true); } - public static UserDictionary getUserDictionary(Environment env, Settings settings) { + public static UserDictionary getUserDictionary(Environment env, Settings settings, IndexSettings indexSettings) { if (settings.get(USER_DICT_PATH_OPTION) != null && settings.get(USER_DICT_RULES_OPTION) != null) { throw new IllegalArgumentException( "It is not allowed to use [" + USER_DICT_PATH_OPTION + "] in conjunction" + " with [" + USER_DICT_RULES_OPTION + "]" ); } - List ruleList = Analysis.getWordList(env, settings, USER_DICT_PATH_OPTION, USER_DICT_RULES_OPTION, true); + List ruleList = Analysis.getWordList( + env, + settings, + USER_DICT_PATH_OPTION, + USER_DICT_RULES_OPTION, + true, + isSupportDuplicateCheck(indexSettings) + ); if (ruleList == null || ruleList.isEmpty()) { return null; } @@ -60,6 +68,21 @@ public static UserDictionary getUserDictionary(Environment env, Settings setting } } + /** + * Determines if the specified index version supports duplicate checks. + * This method checks if the version of the index where it was created + * is at Version 8.13.0 or above. + * The feature of duplicate checks is introduced starting + * from version 8.13.0, hence any versions earlier than this do not support duplicate checks. + * + * @param indexSettings The settings of the index in question. + * @return Returns true if the version is 8.13.0 or later which means + * that the duplicate check feature is supported. + */ + private static boolean isSupportDuplicateCheck(IndexSettings indexSettings) { + return indexSettings.getIndexVersionCreated().onOrAfter(IndexVersions.ES_VERSION_8_13); + } + public static KoreanTokenizer.DecompoundMode getMode(Settings settings) { String modeSetting = settings.get("decompound_mode", KoreanTokenizer.DEFAULT_DECOMPOUND.name()); return KoreanTokenizer.DecompoundMode.valueOf(modeSetting.toUpperCase(Locale.ENGLISH)); diff --git a/plugins/analysis-nori/src/test/java/org/elasticsearch/plugin/analysis/nori/NoriAnalysisTests.java b/plugins/analysis-nori/src/test/java/org/elasticsearch/plugin/analysis/nori/NoriAnalysisTests.java index e091813184472..f6d2f7b86ea1a 100644 --- a/plugins/analysis-nori/src/test/java/org/elasticsearch/plugin/analysis/nori/NoriAnalysisTests.java +++ b/plugins/analysis-nori/src/test/java/org/elasticsearch/plugin/analysis/nori/NoriAnalysisTests.java @@ -17,6 +17,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.analysis.AnalysisTestsHelper; import org.elasticsearch.index.analysis.IndexAnalyzers; import org.elasticsearch.index.analysis.NamedAnalyzer; @@ -117,6 +118,31 @@ public void testNoriAnalyzerInvalidUserDictOption() throws Exception { ); } + public void testNoriAnalyzerDuplicateUserDictRule() throws Exception { + Settings settings = Settings.builder() + .put("index.analysis.analyzer.my_analyzer.type", "nori") + .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersions.ES_VERSION_8_13) + .putList("index.analysis.analyzer.my_analyzer.user_dictionary_rules", "c++", "C쁠쁠", "세종", "세종", "세종시 세종 시") + .build(); + + final IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, () -> createTestAnalysis(settings)); + assertThat(exc.getMessage(), containsString("[세종] in user dictionary at line [3]")); + } + + public void testNoriAnalyzerDuplicateUserDictRuleWithLegacyVersion() throws IOException { + Settings settings = Settings.builder() + .put("index.analysis.analyzer.my_analyzer.type", "nori") + .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersions.V_8_10_0) + .putList("index.analysis.analyzer.my_analyzer.user_dictionary_rules", "c++", "C쁠쁠", "세종", "세종", "세종시 세종 시") + .build(); + + final TestAnalysis analysis = createTestAnalysis(settings); + Analyzer analyzer = analysis.indexAnalyzers.get("my_analyzer"); + try (TokenStream stream = analyzer.tokenStream("", "세종")) { + assertTokenStreamContents(stream, new String[] { "세종" }); + } + } + public void testNoriTokenizer() throws Exception { Settings settings = Settings.builder() .put("index.analysis.tokenizer.my_tokenizer.type", "nori_tokenizer") diff --git a/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java b/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java index 706fb057cc8ee..c1e2888c47c62 100644 --- a/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java +++ b/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java @@ -8,10 +8,8 @@ package org.elasticsearch.index.mapper.murmur3; -import org.apache.lucene.document.FieldType; import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.document.StoredField; -import org.apache.lucene.index.IndexOptions; import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.hash.MurmurHash3; @@ -36,15 +34,6 @@ public class Murmur3FieldMapper extends FieldMapper { public static final String CONTENT_TYPE = "murmur3"; - public static class Defaults { - public static final FieldType FIELD_TYPE; - static { - FieldType ft = new FieldType(); - ft.setIndexOptions(IndexOptions.NONE); - FIELD_TYPE = freezeAndDeduplicateFieldType(ft); - } - } - private static Murmur3FieldMapper toType(FieldMapper in) { return (Murmur3FieldMapper) in; } diff --git a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsTests.java b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsTests.java index 16e8d2610f3fb..313dcdd6623c4 100644 --- a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsTests.java +++ b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsTests.java @@ -19,6 +19,7 @@ import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; import org.elasticsearch.repositories.blobstore.BlobStoreTestUtil; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.snapshots.SnapshotState; import org.elasticsearch.test.ESSingleNodeTestCase; @@ -276,6 +277,6 @@ public void testReplicationFactorOverReplicationMax() { } private long count(Client client, String index) { - return client.prepareSearch(index).setSize(0).get().getHits().getTotalHits().value; + return SearchResponseUtils.getTotalHitsValue(client.prepareSearch(index).setSize(0)); } } diff --git a/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/CcsCommonYamlTestSuiteIT.java b/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/CcsCommonYamlTestSuiteIT.java index 7c1514d2d1a6a..b818de468ea2c 100644 --- a/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/CcsCommonYamlTestSuiteIT.java +++ b/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/CcsCommonYamlTestSuiteIT.java @@ -29,6 +29,7 @@ import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.cluster.FeatureFlag; import org.elasticsearch.test.cluster.local.LocalClusterConfigProvider; +import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.ObjectPath; import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestApi; import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestSpec; @@ -50,6 +51,7 @@ import java.util.Map; import java.util.Set; import java.util.function.BiPredicate; +import java.util.function.Predicate; import static java.util.Collections.unmodifiableList; @@ -159,7 +161,6 @@ public void initSearchClient() throws IOException { Tuple versionVersionTuple = readVersionsFromCatNodes(adminSearchClient); final Version esVersion = versionVersionTuple.v1(); - final Version masterVersion = versionVersionTuple.v2(); final String os = readOsFromNodesInfo(adminSearchClient); searchYamlTestClient = new TestCandidateAwareClient( @@ -167,7 +168,7 @@ public void initSearchClient() throws IOException { searchClient, hosts, esVersion, - masterVersion, + ESRestTestCase::clusterHasFeature, os, this::getClientBuilderWithSniffedHosts ); @@ -328,11 +329,11 @@ static class TestCandidateAwareClient extends ClientYamlTestClient { RestClient restClient, List hosts, Version esVersion, - Version masterVersion, + Predicate clusterFeaturesPredicate, String os, CheckedSupplier clientBuilderWithSniffedNodes ) { - super(restSpec, restClient, hosts, esVersion, masterVersion, os, clientBuilderWithSniffedNodes); + super(restSpec, restClient, hosts, esVersion, clusterFeaturesPredicate, os, clientBuilderWithSniffedNodes); } public void setTestCandidate(ClientYamlTestCandidate testCandidate) { diff --git a/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/RcsCcsCommonYamlTestSuiteIT.java b/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/RcsCcsCommonYamlTestSuiteIT.java index 4fc82bb77fbb6..51d499db61932 100644 --- a/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/RcsCcsCommonYamlTestSuiteIT.java +++ b/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/RcsCcsCommonYamlTestSuiteIT.java @@ -31,6 +31,7 @@ import org.elasticsearch.test.cluster.FeatureFlag; import org.elasticsearch.test.cluster.local.LocalClusterConfigProvider; import org.elasticsearch.test.cluster.util.resource.Resource; +import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.ObjectPath; import org.elasticsearch.test.rest.yaml.CcsCommonYamlTestSuiteIT.TestCandidateAwareClient; import org.junit.AfterClass; @@ -222,7 +223,6 @@ public void initSearchClient() throws IOException { Tuple versionVersionTuple = readVersionsFromCatNodes(adminSearchClient); final Version esVersion = versionVersionTuple.v1(); - final Version masterVersion = versionVersionTuple.v2(); final String os = readOsFromNodesInfo(adminSearchClient); searchYamlTestClient = new TestCandidateAwareClient( @@ -230,7 +230,7 @@ public void initSearchClient() throws IOException { searchClient, hosts, esVersion, - masterVersion, + ESRestTestCase::clusterHasFeature, os, this::getClientBuilderWithSniffedHosts ); diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index db2904a53dd11..47f7bb488d83d 100644 --- a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -112,6 +112,7 @@ public class FullClusterRestartIT extends ParameterizedFullClusterRestartTestCas .setting("indices.memory.shard_inactive_time", "60m") .apply(() -> clusterConfig) .feature(FeatureFlag.TIME_SERIES_MODE) + .feature(FeatureFlag.FAILURE_STORE_ENABLED) .build(); @ClassRule diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java index 3b58cf932fa61..d75519002f92e 100644 --- a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java @@ -52,6 +52,7 @@ import java.util.List; import java.util.Map; +import static org.elasticsearch.cluster.ClusterState.VERSION_INTRODUCING_TRANSPORT_VERSIONS; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; /** @@ -251,9 +252,9 @@ public void testQueryBuilderBWC() throws Exception { StreamInput input = new NamedWriteableAwareStreamInput(new InputStreamStreamInput(in), registry) ) { - @UpdateForV9 // always true + @UpdateForV9 // condition will always be true var originalClusterHasTransportVersion = parseLegacyVersion(getOldClusterVersion()).map( - v -> v.onOrAfter(Version.V_8_8_0) + v -> v.onOrAfter(VERSION_INTRODUCING_TRANSPORT_VERSIONS) ).orElse(true); final TransportVersion transportVersion; diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/java/org/elasticsearch/ingest/IngestDocumentMustacheIT.java b/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/java/org/elasticsearch/ingest/IngestDocumentMustacheIT.java index 724f5c2d51be6..f5a1839001e5c 100644 --- a/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/java/org/elasticsearch/ingest/IngestDocumentMustacheIT.java +++ b/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/java/org/elasticsearch/ingest/IngestDocumentMustacheIT.java @@ -23,10 +23,13 @@ public void testAccessMetadataViaTemplate() { Map document = new HashMap<>(); document.put("foo", "bar"); IngestDocument ingestDocument = new IngestDocument("index", "id", 1, null, null, document); - ingestDocument.setFieldValue(compile("field1"), ValueSource.wrap("1 {{foo}}", scriptService)); + ingestDocument.setFieldValue(ingestDocument.renderTemplate(compile("field1")), ValueSource.wrap("1 {{foo}}", scriptService)); assertThat(ingestDocument.getFieldValue("field1", String.class), equalTo("1 bar")); - ingestDocument.setFieldValue(compile("field1"), ValueSource.wrap("2 {{_source.foo}}", scriptService)); + ingestDocument.setFieldValue( + ingestDocument.renderTemplate(compile("field1")), + ValueSource.wrap("2 {{_source.foo}}", scriptService) + ); assertThat(ingestDocument.getFieldValue("field1", String.class), equalTo("2 bar")); } @@ -38,11 +41,14 @@ public void testAccessMapMetadataViaTemplate() { innerObject.put("qux", Collections.singletonMap("fubar", "hello qux and fubar")); document.put("foo", innerObject); IngestDocument ingestDocument = new IngestDocument("index", "id", 1, null, null, document); - ingestDocument.setFieldValue(compile("field1"), ValueSource.wrap("1 {{foo.bar}} {{foo.baz}} {{foo.qux.fubar}}", scriptService)); + ingestDocument.setFieldValue( + ingestDocument.renderTemplate(compile("field1")), + ValueSource.wrap("1 {{foo.bar}} {{foo.baz}} {{foo.qux.fubar}}", scriptService) + ); assertThat(ingestDocument.getFieldValue("field1", String.class), equalTo("1 hello bar hello baz hello qux and fubar")); ingestDocument.setFieldValue( - compile("field1"), + ingestDocument.renderTemplate(compile("field1")), ValueSource.wrap("2 {{_source.foo.bar}} {{_source.foo.baz}} {{_source.foo.qux.fubar}}", scriptService) ); assertThat(ingestDocument.getFieldValue("field1", String.class), equalTo("2 hello bar hello baz hello qux and fubar")); @@ -58,7 +64,10 @@ public void testAccessListMetadataViaTemplate() { list.add(null); document.put("list2", list); IngestDocument ingestDocument = new IngestDocument("index", "id", 1, null, null, document); - ingestDocument.setFieldValue(compile("field1"), ValueSource.wrap("1 {{list1.0}} {{list2.0}}", scriptService)); + ingestDocument.setFieldValue( + ingestDocument.renderTemplate(compile("field1")), + ValueSource.wrap("1 {{list1.0}} {{list2.0}}", scriptService) + ); assertThat(ingestDocument.getFieldValue("field1", String.class), equalTo("1 foo {field=value}")); } @@ -69,7 +78,7 @@ public void testAccessIngestMetadataViaTemplate() { document.put("_ingest", ingestMap); IngestDocument ingestDocument = new IngestDocument("index", "id", 1, null, null, document); ingestDocument.setFieldValue( - compile("ingest_timestamp"), + ingestDocument.renderTemplate(compile("ingest_timestamp")), ValueSource.wrap("{{_ingest.timestamp}} and {{_source._ingest.timestamp}}", scriptService) ); assertThat( diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/java/org/elasticsearch/ingest/ValueSourceMustacheIT.java b/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/java/org/elasticsearch/ingest/ValueSourceMustacheIT.java index c93ef30731960..df4c5827cebc1 100644 --- a/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/java/org/elasticsearch/ingest/ValueSourceMustacheIT.java +++ b/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/java/org/elasticsearch/ingest/ValueSourceMustacheIT.java @@ -57,9 +57,9 @@ public void testValueSourceWithTemplates() { public void testAccessSourceViaTemplate() { IngestDocument ingestDocument = new IngestDocument("marvel", "id", 1, null, null, new HashMap<>()); assertThat(ingestDocument.hasField("marvel"), is(false)); - ingestDocument.setFieldValue(compile("{{_index}}"), ValueSource.wrap("{{_index}}", scriptService)); + ingestDocument.setFieldValue(ingestDocument.renderTemplate(compile("{{_index}}")), ValueSource.wrap("{{_index}}", scriptService)); assertThat(ingestDocument.getFieldValue("marvel", String.class), equalTo("marvel")); - ingestDocument.removeField(compile("{{marvel}}")); + ingestDocument.removeField(ingestDocument.renderTemplate(compile("{{marvel}}"))); assertThat(ingestDocument.hasField("index"), is(false)); } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.post.json b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.post.json index e76124bbecf7d..aadb59e99af7a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.post.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.post.json @@ -26,7 +26,7 @@ }, "body": { "description": "The connector configuration.", - "required": false + "required": true } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/synonyms.delete_synonym.json b/rest-api-spec/src/main/resources/rest-api-spec/api/synonyms.delete_synonym.json index 00142ebcf00fc..9273a8dea87c3 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/synonyms.delete_synonym.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/synonyms.delete_synonym.json @@ -4,7 +4,7 @@ "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-synonyms-set.html", "description": "Deletes a synonym set" }, - "stability": "experimental", + "stability": "stable", "visibility": "public", "headers": { "accept": [ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/synonyms.delete_synonym_rule.json b/rest-api-spec/src/main/resources/rest-api-spec/api/synonyms.delete_synonym_rule.json index 11fb113d6b629..5a0de4ab94a7c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/synonyms.delete_synonym_rule.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/synonyms.delete_synonym_rule.json @@ -4,7 +4,7 @@ "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-synonym-rule.html", "description": "Deletes a synonym rule in a synonym set" }, - "stability": "experimental", + "stability": "stable", "visibility": "public", "headers": { "accept": [ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/synonyms.get_synonym.json b/rest-api-spec/src/main/resources/rest-api-spec/api/synonyms.get_synonym.json index 6cb4fcc46f26b..25c177cabbdf1 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/synonyms.get_synonym.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/synonyms.get_synonym.json @@ -4,7 +4,7 @@ "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/get-synonyms-set.html", "description": "Retrieves a synonym set" }, - "stability": "experimental", + "stability": "stable", "visibility": "public", "headers": { "accept": [ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/synonyms.get_synonym_rule.json b/rest-api-spec/src/main/resources/rest-api-spec/api/synonyms.get_synonym_rule.json index 5a718f1a48e46..ff9e7eb57b8a7 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/synonyms.get_synonym_rule.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/synonyms.get_synonym_rule.json @@ -4,7 +4,7 @@ "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/get-synonym-rule.html", "description": "Retrieves a synonym rule from a synonym set" }, - "stability": "experimental", + "stability": "stable", "visibility": "public", "headers": { "accept": [ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/synonyms.get_synonyms_sets.json b/rest-api-spec/src/main/resources/rest-api-spec/api/synonyms.get_synonyms_sets.json index 66bd8df92e1e7..d94bef32cddcd 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/synonyms.get_synonyms_sets.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/synonyms.get_synonyms_sets.json @@ -4,7 +4,7 @@ "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/list-synonyms-sets.html", "description": "Retrieves a summary of all defined synonym sets" }, - "stability": "experimental", + "stability": "stable", "visibility": "public", "headers": { "accept": [ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/synonyms.put_synonym.json b/rest-api-spec/src/main/resources/rest-api-spec/api/synonyms.put_synonym.json index 6c412d174434b..e09bbb7e428a1 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/synonyms.put_synonym.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/synonyms.put_synonym.json @@ -4,7 +4,7 @@ "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/put-synonyms-set.html", "description": "Creates or updates a synonyms set" }, - "stability": "experimental", + "stability": "stable", "visibility": "public", "headers": { "accept": [ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/synonyms.put_synonym_rule.json b/rest-api-spec/src/main/resources/rest-api-spec/api/synonyms.put_synonym_rule.json index 082432ae662f0..51503b5819862 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/synonyms.put_synonym_rule.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/synonyms.put_synonym_rule.json @@ -4,7 +4,7 @@ "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/put-synonym-rule.html", "description": "Creates or updates a synonym rule in a synonym set" }, - "stability": "experimental", + "stability": "stable", "visibility": "public", "headers": { "accept": [ diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java index 36f317474f5a9..f11144d698242 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java @@ -14,8 +14,8 @@ import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest; import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; import org.elasticsearch.action.admin.indices.close.TransportCloseIndexAction; -import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; +import org.elasticsearch.action.admin.indices.delete.TransportDeleteIndexAction; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.flush.TransportShardFlushAction; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeAction; @@ -502,14 +502,14 @@ public void testCloseIndex() { } public void testDeleteIndex() { - interceptTransportActions(DeleteIndexAction.NAME); + interceptTransportActions(TransportDeleteIndexAction.TYPE.name()); String[] randomIndicesOrAliases = randomUniqueIndices(); DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest(randomIndicesOrAliases); assertAcked(internalCluster().coordOnlyNodeClient().admin().indices().delete(deleteIndexRequest).actionGet()); clearInterceptedActions(); - assertSameIndices(deleteIndexRequest, DeleteIndexAction.NAME); + assertSameIndices(deleteIndexRequest, TransportDeleteIndexAction.TYPE.name()); } public void testGetMappings() { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/HotThreadsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/HotThreadsIT.java index 05e3b81c3683f..5fa63aaed0508 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/HotThreadsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/HotThreadsIT.java @@ -7,19 +7,24 @@ */ package org.elasticsearch.action.admin; +import org.apache.logging.log4j.Level; import org.apache.lucene.util.Constants; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.node.hotthreads.NodeHotThreads; -import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsRequestBuilder; +import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsRequest; import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsResponse; +import org.elasticsearch.action.admin.cluster.node.hotthreads.TransportNodesHotThreadsAction; +import org.elasticsearch.common.ReferenceDocs; +import org.elasticsearch.common.logging.ChunkedLoggingStreamTests; import org.elasticsearch.core.TimeValue; import org.elasticsearch.monitor.jvm.HotThreads; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.junit.annotations.TestLogging; import org.hamcrest.Matcher; import java.util.Map; import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import static org.elasticsearch.index.query.QueryBuilders.boolQuery; @@ -29,6 +34,7 @@ import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.CoreMatchers.notNullValue; +import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.not; @@ -36,38 +42,26 @@ public class HotThreadsIT extends ESIntegTestCase { - public void testHotThreadsDontFail() throws ExecutionException, InterruptedException { - /** - * This test just checks if nothing crashes or gets stuck etc. - */ + public void testHotThreadsDontFail() throws InterruptedException { + // This test just checks if nothing crashes or gets stuck etc. createIndex("test"); final int iters = scaledRandomIntBetween(2, 20); final AtomicBoolean hasErrors = new AtomicBoolean(false); for (int i = 0; i < iters; i++) { - final String type; - NodesHotThreadsRequestBuilder nodesHotThreadsRequestBuilder = clusterAdmin().prepareNodesHotThreads(); + final NodesHotThreadsRequest request = new NodesHotThreadsRequest(); if (randomBoolean()) { TimeValue timeValue = new TimeValue(rarely() ? randomIntBetween(500, 5000) : randomIntBetween(20, 500)); - nodesHotThreadsRequestBuilder.setInterval(timeValue); + request.interval(timeValue); } if (randomBoolean()) { - nodesHotThreadsRequestBuilder.setThreads(rarely() ? randomIntBetween(500, 5000) : randomIntBetween(1, 500)); + request.threads(rarely() ? randomIntBetween(500, 5000) : randomIntBetween(1, 500)); } - nodesHotThreadsRequestBuilder.setIgnoreIdleThreads(randomBoolean()); + request.ignoreIdleThreads(randomBoolean()); if (randomBoolean()) { - type = switch (randomIntBetween(0, 3)) { - case 3 -> "mem"; - case 2 -> "cpu"; - case 1 -> "wait"; - default -> "block"; - }; - assertThat(type, notNullValue()); - nodesHotThreadsRequestBuilder.setType(HotThreads.ReportType.of(type)); - } else { - type = null; + request.type(HotThreads.ReportType.of(randomFrom("block", "mem", "cpu", "wait"))); } final CountDownLatch latch = new CountDownLatch(1); - nodesHotThreadsRequestBuilder.execute(new ActionListener() { + client().execute(TransportNodesHotThreadsAction.TYPE, request, new ActionListener<>() { @Override public void onResponse(NodesHotThreadsResponse nodeHotThreads) { boolean success = false; @@ -78,7 +72,6 @@ public void onResponse(NodesHotThreadsResponse nodeHotThreads) { assertThat(nodesMap.size(), equalTo(cluster().size())); for (NodeHotThreads ht : nodeHotThreads.getNodes()) { assertNotNull(ht.getHotThreads()); - // logger.info(ht.getHotThreads()); } success = true; } finally { @@ -115,40 +108,39 @@ public void onFailure(Exception e) { 3L ); } - latch.await(); + safeAwait(latch); assertThat(hasErrors.get(), is(false)); } } - public void testIgnoreIdleThreads() throws ExecutionException, InterruptedException { + public void testIgnoreIdleThreads() { assumeTrue("no support for hot_threads on FreeBSD", Constants.FREE_BSD == false); // First time, don't ignore idle threads: - NodesHotThreadsRequestBuilder builder = clusterAdmin().prepareNodesHotThreads(); - builder.setIgnoreIdleThreads(false); - builder.setThreads(Integer.MAX_VALUE); - NodesHotThreadsResponse response = builder.execute().get(); + final NodesHotThreadsResponse firstResponse = client().execute( + TransportNodesHotThreadsAction.TYPE, + new NodesHotThreadsRequest().ignoreIdleThreads(false).threads(Integer.MAX_VALUE) + ).actionGet(10, TimeUnit.SECONDS); final Matcher containsCachedTimeThreadRunMethod = containsString( "org.elasticsearch.threadpool.ThreadPool$CachedTimeThread.run" ); int totSizeAll = 0; - for (NodeHotThreads node : response.getNodesMap().values()) { + for (NodeHotThreads node : firstResponse.getNodesMap().values()) { totSizeAll += node.getHotThreads().length(); assertThat(node.getHotThreads(), containsCachedTimeThreadRunMethod); } // Second time, do ignore idle threads: - builder = clusterAdmin().prepareNodesHotThreads(); - builder.setThreads(Integer.MAX_VALUE); - + final var request = new NodesHotThreadsRequest().threads(Integer.MAX_VALUE); // Make sure default is true: - assertEquals(true, builder.request().ignoreIdleThreads()); - response = builder.execute().get(); + assertTrue(request.ignoreIdleThreads()); + final NodesHotThreadsResponse secondResponse = client().execute(TransportNodesHotThreadsAction.TYPE, request) + .actionGet(10, TimeUnit.SECONDS); int totSizeIgnoreIdle = 0; - for (NodeHotThreads node : response.getNodesMap().values()) { + for (NodeHotThreads node : secondResponse.getNodesMap().values()) { totSizeIgnoreIdle += node.getHotThreads().length(); assertThat(node.getHotThreads(), not(containsCachedTimeThreadRunMethod)); } @@ -157,23 +149,48 @@ public void testIgnoreIdleThreads() throws ExecutionException, InterruptedExcept assertThat(totSizeIgnoreIdle, lessThan(totSizeAll)); } - public void testTimestampAndParams() throws ExecutionException, InterruptedException { + public void testTimestampAndParams() { - NodesHotThreadsResponse response = clusterAdmin().prepareNodesHotThreads().execute().get(); + final NodesHotThreadsResponse response = client().execute(TransportNodesHotThreadsAction.TYPE, new NodesHotThreadsRequest()) + .actionGet(10, TimeUnit.SECONDS); if (Constants.FREE_BSD) { for (NodeHotThreads node : response.getNodesMap().values()) { - String result = node.getHotThreads(); - assertTrue(result.indexOf("hot_threads is not supported") != -1); + assertThat(node.getHotThreads(), containsString("hot_threads is not supported")); } } else { for (NodeHotThreads node : response.getNodesMap().values()) { - String result = node.getHotThreads(); - assertTrue(result.indexOf("Hot threads at") != -1); - assertTrue(result.indexOf("interval=500ms") != -1); - assertTrue(result.indexOf("busiestThreads=3") != -1); - assertTrue(result.indexOf("ignoreIdleThreads=true") != -1); + assertThat( + node.getHotThreads(), + allOf( + containsString("Hot threads at"), + containsString("interval=500ms"), + containsString("busiestThreads=3"), + containsString("ignoreIdleThreads=true") + ) + ); } } } + + @TestLogging(reason = "testing logging at various levels", value = "org.elasticsearch.action.admin.HotThreadsIT:TRACE") + public void testLogLocalHotThreads() { + final var level = randomFrom(Level.TRACE, Level.DEBUG, Level.INFO, Level.WARN, Level.ERROR); + assertThat( + ChunkedLoggingStreamTests.getDecodedLoggedBody( + logger, + level, + getTestName(), + ReferenceDocs.LOGGING, + () -> HotThreads.logLocalHotThreads(logger, level, getTestName(), ReferenceDocs.LOGGING) + ).utf8ToString(), + allOf( + containsString("Hot threads at"), + containsString("interval=500ms"), + containsString("busiestThreads=500"), + containsString("ignoreIdleThreads=false"), + containsString("cpu usage by thread") + ) + ); + } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/ReloadSecureSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/ReloadSecureSettingsIT.java index ad17e4f0d49dd..4aa3598608fb6 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/ReloadSecureSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/ReloadSecureSettingsIT.java @@ -11,10 +11,13 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsResponse; +import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.KeyStoreWrapper; +import org.elasticsearch.common.settings.SecureSetting; import org.elasticsearch.common.settings.SecureSettings; import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.Plugin; @@ -43,6 +46,8 @@ @ESIntegTestCase.ClusterScope(minNumDataNodes = 2) public class ReloadSecureSettingsIT extends ESIntegTestCase { + private static final String VALID_SECURE_SETTING_NAME = "some.setting.that.exists"; + @BeforeClass public static void disableInFips() { // Reload secure settings with a password protected keystore is tested in ReloadSecureSettingsWithPasswordProtectedKeystoreRestIT @@ -350,9 +355,46 @@ public void testReloadWhileKeystoreChanged() throws Exception { } } + public void testInvalidKeyInSettings() throws Exception { + final Environment environment = internalCluster().getInstance(Environment.class); + + try (KeyStoreWrapper keyStoreWrapper = KeyStoreWrapper.create()) { + keyStoreWrapper.setString(VALID_SECURE_SETTING_NAME, new char[0]); + keyStoreWrapper.save(environment.configFile(), new char[0], false); + } + + PlainActionFuture actionFuture = new PlainActionFuture<>(); + clusterAdmin().prepareReloadSecureSettings() + .setSecureStorePassword(new SecureString(new char[0])) + .setNodesIds(Strings.EMPTY_ARRAY) + .execute(actionFuture); + + actionFuture.get().getNodes().forEach(nodeResponse -> assertThat(nodeResponse.reloadException(), nullValue())); + + try (KeyStoreWrapper keyStoreWrapper = KeyStoreWrapper.create()) { + assertThat(keyStoreWrapper, notNullValue()); + keyStoreWrapper.setString("some.setting.that.does.not.exist", new char[0]); + keyStoreWrapper.save(environment.configFile(), new char[0], false); + } + + actionFuture = new PlainActionFuture<>(); + clusterAdmin().prepareReloadSecureSettings() + .setSecureStorePassword(new SecureString(new char[0])) + .setNodesIds(Strings.EMPTY_ARRAY) + .execute(actionFuture); + + actionFuture.get() + .getNodes() + .forEach(nodeResponse -> assertThat(nodeResponse.reloadException(), instanceOf(IllegalArgumentException.class))); + } + @Override protected Collection> nodePlugins() { - final List> plugins = Arrays.asList(MockReloadablePlugin.class, MisbehavingReloadablePlugin.class); + final List> plugins = Arrays.asList( + MockWithSecureSettingPlugin.class, + MockReloadablePlugin.class, + MisbehavingReloadablePlugin.class + ); // shuffle as reload is called in order Collections.shuffle(plugins, random()); return plugins; @@ -455,4 +497,10 @@ public synchronized void setShouldThrow(boolean shouldThrow) { } } + public static class MockWithSecureSettingPlugin extends Plugin { + public List> getSettings() { + return List.of(SecureSetting.secureString(VALID_SECURE_SETTING_NAME, null)); + } + }; + } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetDesiredBalanceActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetDesiredBalanceActionIT.java index c9f3b0202b111..a4cf7843beb41 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetDesiredBalanceActionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetDesiredBalanceActionIT.java @@ -40,7 +40,7 @@ public void testDesiredBalanceOnMultiNodeCluster() throws Exception { var clusterHealthResponse = clusterAdmin().health(new ClusterHealthRequest().waitForStatus(ClusterHealthStatus.GREEN)).get(); assertEquals(RestStatus.OK, clusterHealthResponse.status()); - DesiredBalanceResponse desiredBalanceResponse = client().execute(GetDesiredBalanceAction.INSTANCE, new DesiredBalanceRequest()) + DesiredBalanceResponse desiredBalanceResponse = client().execute(TransportGetDesiredBalanceAction.TYPE, new DesiredBalanceRequest()) .get(); assertEquals(1, desiredBalanceResponse.getRoutingTable().size()); @@ -75,7 +75,7 @@ public void testDesiredBalanceWithUnassignedShards() throws Exception { var clusterHealthResponse = clusterAdmin().health(new ClusterHealthRequest(index).waitForStatus(ClusterHealthStatus.YELLOW)).get(); assertEquals(RestStatus.OK, clusterHealthResponse.status()); - DesiredBalanceResponse desiredBalanceResponse = client().execute(GetDesiredBalanceAction.INSTANCE, new DesiredBalanceRequest()) + DesiredBalanceResponse desiredBalanceResponse = client().execute(TransportGetDesiredBalanceAction.TYPE, new DesiredBalanceRequest()) .get(); assertEquals(1, desiredBalanceResponse.getRoutingTable().size()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportDesiredNodesActionsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportDesiredNodesActionsIT.java index 76d456bae1c06..a3c1304cfbae9 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportDesiredNodesActionsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportDesiredNodesActionsIT.java @@ -271,7 +271,9 @@ public void testDeleteDesiredNodesTasksAreBatchedCorrectly() throws Exception { final List> deleteDesiredNodesFutures = new ArrayList<>(15); for (int i = 0; i < 15; i++) { - deleteDesiredNodesFutures.add(client().execute(DeleteDesiredNodesAction.INSTANCE, new DeleteDesiredNodesAction.Request())); + deleteDesiredNodesFutures.add( + client().execute(TransportDeleteDesiredNodesAction.TYPE, new TransportDeleteDesiredNodesAction.Request()) + ); } for (ActionFuture future : deleteDesiredNodesFutures) { @@ -347,8 +349,8 @@ private UpdateDesiredNodesRequest randomDryRunUpdateDesiredNodesRequest(Version } private void deleteDesiredNodes() { - final DeleteDesiredNodesAction.Request request = new DeleteDesiredNodesAction.Request(); - client().execute(DeleteDesiredNodesAction.INSTANCE, request).actionGet(); + final TransportDeleteDesiredNodesAction.Request request = new TransportDeleteDesiredNodesAction.Request(); + client().execute(TransportDeleteDesiredNodesAction.TYPE, request).actionGet(); } private DesiredNodes getLatestDesiredNodes() { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java index 93fc17a9a02eb..1fda9c67a0beb 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java @@ -207,7 +207,7 @@ public void testValuesSmokeScreen() throws IOException, ExecutionException, Inte ClusterStatsResponse response = clusterAdmin().prepareClusterStats().get(); String msg = response.toString(); assertThat(msg, response.getTimestamp(), greaterThan(946681200000L)); // 1 Jan 2000 - assertThat(msg, response.indicesStats.getStore().getSizeInBytes(), greaterThan(0L)); + assertThat(msg, response.indicesStats.getStore().sizeInBytes(), greaterThan(0L)); assertThat(msg, response.nodesStats.getFs().getTotal().getBytes(), greaterThan(0L)); assertThat(msg, response.nodesStats.getJvm().getVersions().size(), greaterThan(0)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/tasks/ListTasksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/tasks/ListTasksIT.java index d99ebe6a3e2e7..cb508334f835e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/tasks/ListTasksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/tasks/ListTasksIT.java @@ -142,10 +142,7 @@ protected Collection> getPlugins() { return List.of(TestPlugin.class); } - private static final ActionType TEST_ACTION = new ActionType<>( - TestTransportAction.NAME, - in -> ActionResponse.Empty.INSTANCE - ); + private static final ActionType TEST_ACTION = ActionType.emptyResponse(TestTransportAction.NAME); public static class TestPlugin extends Plugin implements ActionPlugin { volatile CyclicBarrier barrier; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/tasks/PendingTasksBlocksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/tasks/PendingTasksBlocksIT.java index 595788b1eb9f5..eaf8948348684 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/tasks/PendingTasksBlocksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/tasks/PendingTasksBlocksIT.java @@ -37,7 +37,7 @@ public void testPendingTasksWithIndexBlocks() { )) { try { enableIndexBlock("test", blockSetting); - PendingClusterTasksResponse response = clusterAdmin().preparePendingClusterTasks().get(); + PendingClusterTasksResponse response = getClusterPendingTasks(); assertNotNull(response.pendingTasks()); } finally { disableIndexBlock("test", blockSetting); @@ -53,7 +53,7 @@ public void testPendingTasksWithClusterReadOnlyBlock() { try { setClusterReadOnly(true); - PendingClusterTasksResponse response = clusterAdmin().preparePendingClusterTasks().get(); + PendingClusterTasksResponse response = getClusterPendingTasks(); assertNotNull(response.pendingTasks()); } finally { setClusterReadOnly(false); @@ -80,7 +80,7 @@ public boolean validateClusterForming() { } }); - assertNotNull(clusterAdmin().preparePendingClusterTasks().get().pendingTasks()); + assertNotNull(getClusterPendingTasks().pendingTasks()); // starting one more node allows the cluster to recover internalCluster().startNode(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java index 09c14df3566af..d3cbab2760747 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java @@ -8,10 +8,10 @@ package org.elasticsearch.cluster; -import org.elasticsearch.action.admin.cluster.configuration.AddVotingConfigExclusionsAction; import org.elasticsearch.action.admin.cluster.configuration.AddVotingConfigExclusionsRequest; -import org.elasticsearch.action.admin.cluster.configuration.ClearVotingConfigExclusionsAction; import org.elasticsearch.action.admin.cluster.configuration.ClearVotingConfigExclusionsRequest; +import org.elasticsearch.action.admin.cluster.configuration.TransportAddVotingConfigExclusionsAction; +import org.elasticsearch.action.admin.cluster.configuration.TransportClearVotingConfigExclusionsAction; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.coordination.FailedToCommitClusterStateException; @@ -111,7 +111,7 @@ public void testTwoNodesNoMasterBlock() throws Exception { String masterNode = internalCluster().getMasterName(); String otherNode = node1Name.equals(masterNode) ? node2Name : node1Name; logger.info("--> add voting config exclusion for non-master node, to be sure it's not elected"); - client().execute(AddVotingConfigExclusionsAction.INSTANCE, new AddVotingConfigExclusionsRequest(otherNode)).get(); + client().execute(TransportAddVotingConfigExclusionsAction.TYPE, new AddVotingConfigExclusionsRequest(otherNode)).get(); logger.info("--> stop master node, no master block should appear"); Settings masterDataPathSettings = internalCluster().dataPathSettings(masterNode); internalCluster().stopNode(masterNode); @@ -156,12 +156,12 @@ public void testTwoNodesNoMasterBlock() throws Exception { logger.info("--> clearing voting config exclusions"); ClearVotingConfigExclusionsRequest clearRequest = new ClearVotingConfigExclusionsRequest(); clearRequest.setWaitForRemoval(false); - client().execute(ClearVotingConfigExclusionsAction.INSTANCE, clearRequest).get(); + client().execute(TransportClearVotingConfigExclusionsAction.TYPE, clearRequest).get(); masterNode = internalCluster().getMasterName(); otherNode = node1Name.equals(masterNode) ? node2Name : node1Name; logger.info("--> add voting config exclusion for master node, to be sure it's not elected"); - client().execute(AddVotingConfigExclusionsAction.INSTANCE, new AddVotingConfigExclusionsRequest(masterNode)).get(); + client().execute(TransportAddVotingConfigExclusionsAction.TYPE, new AddVotingConfigExclusionsRequest(masterNode)).get(); logger.info("--> stop non-master node, no master block should appear"); Settings otherNodeDataPathSettings = internalCluster().dataPathSettings(otherNode); internalCluster().stopNode(otherNode); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/NoMasterNodeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/NoMasterNodeIT.java index 23c13a3dbf579..aa54e46389676 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/NoMasterNodeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/NoMasterNodeIT.java @@ -9,8 +9,8 @@ package org.elasticsearch.cluster; import org.elasticsearch.action.ActionRequestBuilder; -import org.elasticsearch.action.admin.cluster.configuration.AddVotingConfigExclusionsAction; import org.elasticsearch.action.admin.cluster.configuration.AddVotingConfigExclusionsRequest; +import org.elasticsearch.action.admin.cluster.configuration.TransportAddVotingConfigExclusionsAction; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.get.GetResponse; @@ -320,7 +320,7 @@ public void testNoMasterActionsMetadataWriteMasterBlock() throws Exception { .toList(); client().execute( - AddVotingConfigExclusionsAction.INSTANCE, + TransportAddVotingConfigExclusionsAction.TYPE, new AddVotingConfigExclusionsRequest(nodesWithShards.toArray(new String[0])) ).get(); ensureGreen("test1"); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java index 93d714c79c391..43506647f89ba 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java @@ -9,8 +9,8 @@ package org.elasticsearch.cluster; import org.apache.lucene.search.join.ScoreMode; -import org.elasticsearch.action.admin.cluster.configuration.AddVotingConfigExclusionsAction; import org.elasticsearch.action.admin.cluster.configuration.AddVotingConfigExclusionsRequest; +import org.elasticsearch.action.admin.cluster.configuration.TransportAddVotingConfigExclusionsAction; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.MasterNotDiscoveredException; import org.elasticsearch.index.query.QueryBuilders; @@ -113,7 +113,7 @@ public void testElectOnlyBetweenMasterNodes() throws Exception { ); logger.info("--> closing master node (1)"); - client().execute(AddVotingConfigExclusionsAction.INSTANCE, new AddVotingConfigExclusionsRequest(masterNodeName)).get(); + client().execute(TransportAddVotingConfigExclusionsAction.TYPE, new AddVotingConfigExclusionsRequest(masterNodeName)).get(); // removing the master from the voting configuration immediately triggers the master to step down assertBusy(() -> { assertThat( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/VotingConfigurationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/VotingConfigurationIT.java index dee6ac3859b15..b0cc81bf34811 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/VotingConfigurationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/VotingConfigurationIT.java @@ -8,8 +8,8 @@ package org.elasticsearch.cluster.coordination; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.action.admin.cluster.configuration.AddVotingConfigExclusionsAction; import org.elasticsearch.action.admin.cluster.configuration.AddVotingConfigExclusionsRequest; +import org.elasticsearch.action.admin.cluster.configuration.TransportAddVotingConfigExclusionsAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Priority; @@ -43,7 +43,7 @@ public void testAbdicateAfterVotingConfigExclusionAdded() throws ExecutionExcept final String originalMaster = internalCluster().getMasterName(); logger.info("--> excluding master node {}", originalMaster); - client().execute(AddVotingConfigExclusionsAction.INSTANCE, new AddVotingConfigExclusionsRequest(originalMaster)).get(); + client().execute(TransportAddVotingConfigExclusionsAction.TYPE, new AddVotingConfigExclusionsRequest(originalMaster)).get(); clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).get(); assertNotEquals(originalMaster, internalCluster().getMasterName()); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java index b1ac5b02f7dd2..c044fafe31efc 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.elasticsearch.action.admin.indices.stats.ShardStats; -import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.cluster.ClusterInfoService; import org.elasticsearch.cluster.ClusterInfoServiceUtils; import org.elasticsearch.cluster.DiskUsageIntegTestCase; @@ -39,13 +38,16 @@ import org.hamcrest.TypeSafeMatcher; import java.util.Arrays; +import java.util.Comparator; import java.util.HashSet; -import java.util.Locale; +import java.util.List; import java.util.Set; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; -import static java.util.stream.Collectors.toMap; import static java.util.stream.Collectors.toSet; +import static org.elasticsearch.cluster.routing.RoutingNodesHelper.numberOfShardsWithState; +import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING; import static org.elasticsearch.index.store.Store.INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.empty; @@ -74,26 +76,25 @@ public void testHighWatermarkNotExceeded() throws Exception { final String dataNodeName = internalCluster().startDataOnlyNode(); ensureStableCluster(3); - final InternalClusterInfoService clusterInfoService = (InternalClusterInfoService) internalCluster().getCurrentMasterNodeInstance( - ClusterInfoService.class - ); - internalCluster().getCurrentMasterNodeInstance(ClusterService.class) - .addListener(event -> ClusterInfoServiceUtils.refresh(clusterInfoService)); + final InternalClusterInfoService clusterInfoService = getInternalClusterInfoService(); + internalCluster().getCurrentMasterNodeInstance(ClusterService.class).addListener(event -> { + ClusterInfoServiceUtils.refresh(clusterInfoService); + }); final String dataNode0Id = internalCluster().getInstance(NodeEnvironment.class, dataNodeName).nodeId(); - final String indexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + final String indexName = randomIdentifier(); createIndex(indexName, indexSettings(6, 0).put(INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING.getKey(), "0ms").build()); - var smallestShard = createReasonableSizedShards(indexName); + var shardSizes = createReasonableSizedShards(indexName); // reduce disk size of node 0 so that no shards fit below the high watermark, forcing all shards onto the other data node // (subtract the translog size since the disk threshold decider ignores this and may therefore move the shard back again) - getTestFileStore(dataNodeName).setTotalSpace(smallestShard.size + WATERMARK_BYTES - 1L); + getTestFileStore(dataNodeName).setTotalSpace(shardSizes.getSmallestShardSize() + WATERMARK_BYTES - 1L); assertBusyWithDiskUsageRefresh(dataNode0Id, indexName, empty()); // increase disk size of node 0 to allow just enough room for one shard, and check that it's rebalanced back - getTestFileStore(dataNodeName).setTotalSpace(smallestShard.size + WATERMARK_BYTES); - assertBusyWithDiskUsageRefresh(dataNode0Id, indexName, new ContainsExactlyOneOf<>(smallestShard.shardIds)); + getTestFileStore(dataNodeName).setTotalSpace(shardSizes.getSmallestShardSize() + WATERMARK_BYTES); + assertBusyWithDiskUsageRefresh(dataNode0Id, indexName, new ContainsExactlyOneOf<>(shardSizes.getSmallestShardIds())); } public void testRestoreSnapshotAllocationDoesNotExceedWatermark() throws Exception { @@ -108,17 +109,20 @@ public void testRestoreSnapshotAllocationDoesNotExceedWatermark() throws Excepti .setSettings(Settings.builder().put("location", randomRepoPath()).put("compress", randomBoolean())) ); - final InternalClusterInfoService clusterInfoService = (InternalClusterInfoService) internalCluster().getCurrentMasterNodeInstance( - ClusterInfoService.class - ); - internalCluster().getCurrentMasterNodeInstance(ClusterService.class) - .addListener(event -> ClusterInfoServiceUtils.refresh(clusterInfoService)); + final AtomicBoolean allowRelocations = new AtomicBoolean(true); + final InternalClusterInfoService clusterInfoService = getInternalClusterInfoService(); + internalCluster().getCurrentMasterNodeInstance(ClusterService.class).addListener(event -> { + ClusterInfoServiceUtils.refresh(clusterInfoService); + if (allowRelocations.get() == false) { + assertThat(numberOfShardsWithState(event.state().getRoutingNodes(), ShardRoutingState.RELOCATING), equalTo(0)); + } + }); final String dataNode0Id = internalCluster().getInstance(NodeEnvironment.class, dataNodeName).nodeId(); - final String indexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + final String indexName = randomIdentifier(); createIndex(indexName, indexSettings(6, 0).put(INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING.getKey(), "0ms").build()); - var smallestShard = createReasonableSizedShards(indexName); + var shardSizes = createReasonableSizedShards(indexName); final CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot("repo", "snap") .setWaitForCompletion(true) @@ -128,15 +132,13 @@ public void testRestoreSnapshotAllocationDoesNotExceedWatermark() throws Excepti assertThat(snapshotInfo.state(), is(SnapshotState.SUCCESS)); assertAcked(indicesAdmin().prepareDelete(indexName).get()); + updateClusterSettings(Settings.builder().put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), Rebalance.NONE.toString())); + allowRelocations.set(false); // reduce disk size of node 0 so that no shards fit below the low watermark, forcing shards to be assigned to the other data node - getTestFileStore(dataNodeName).setTotalSpace(smallestShard.size + WATERMARK_BYTES - 1L); + getTestFileStore(dataNodeName).setTotalSpace(shardSizes.getSmallestShardSize() + WATERMARK_BYTES - 1L); refreshDiskUsage(); - updateClusterSettings( - Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), Rebalance.NONE.toString()) - ); - final RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("repo", "snap") .setWaitForCompletion(true) .get(); @@ -144,13 +146,71 @@ public void testRestoreSnapshotAllocationDoesNotExceedWatermark() throws Excepti assertThat(restoreInfo.successfulShards(), is(snapshotInfo.totalShards())); assertThat(restoreInfo.failedShards(), is(0)); - assertBusy(() -> assertThat(getShardIds(dataNode0Id, indexName), empty())); + assertThat(getShardIds(dataNode0Id, indexName), empty()); - updateClusterSettings(Settings.builder().putNull(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey())); + allowRelocations.set(true); + updateClusterSettings(Settings.builder().putNull(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey())); // increase disk size of node 0 to allow just enough room for one shard, and check that it's rebalanced back - getTestFileStore(dataNodeName).setTotalSpace(smallestShard.size + WATERMARK_BYTES); - assertBusyWithDiskUsageRefresh(dataNode0Id, indexName, new ContainsExactlyOneOf<>(smallestShard.shardIds)); + getTestFileStore(dataNodeName).setTotalSpace(shardSizes.getSmallestShardSize() + WATERMARK_BYTES); + assertBusyWithDiskUsageRefresh(dataNode0Id, indexName, new ContainsExactlyOneOf<>(shardSizes.getSmallestShardIds())); + } + + public void testRestoreSnapshotAllocationDoesNotExceedWatermarkWithMultipleShards() throws Exception { + internalCluster().startMasterOnlyNode(); + internalCluster().startDataOnlyNode(); + final String dataNodeName = internalCluster().startDataOnlyNode(); + ensureStableCluster(3); + + assertAcked( + clusterAdmin().preparePutRepository("repo") + .setType(FsRepository.TYPE) + .setSettings(Settings.builder().put("location", randomRepoPath()).put("compress", randomBoolean())) + ); + + final AtomicBoolean allowRelocations = new AtomicBoolean(true); + final InternalClusterInfoService clusterInfoService = getInternalClusterInfoService(); + internalCluster().getCurrentMasterNodeInstance(ClusterService.class).addListener(event -> { + ClusterInfoServiceUtils.refresh(clusterInfoService); + if (allowRelocations.get() == false) { + assertThat(numberOfShardsWithState(event.state().getRoutingNodes(), ShardRoutingState.RELOCATING), equalTo(0)); + } + }); + + final String dataNode0Id = internalCluster().getInstance(NodeEnvironment.class, dataNodeName).nodeId(); + + final String indexName = randomIdentifier(); + createIndex(indexName, indexSettings(6, 0).put(INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING.getKey(), "0ms").build()); + var shardSizes = createReasonableSizedShards(indexName); + + final CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot("repo", "snap") + .setWaitForCompletion(true) + .get(); + final SnapshotInfo snapshotInfo = createSnapshotResponse.getSnapshotInfo(); + assertThat(snapshotInfo.successfulShards(), is(snapshotInfo.totalShards())); + assertThat(snapshotInfo.state(), is(SnapshotState.SUCCESS)); + + assertAcked(indicesAdmin().prepareDelete(indexName).get()); + updateClusterSettings(Settings.builder().put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), Rebalance.NONE.toString())); + allowRelocations.set(false); + + // reduce disk size of node 0 so that only 1 of 2 smallest shards can be allocated + var usableSpace = shardSizes.sizes().get(1).size(); + getTestFileStore(dataNodeName).setTotalSpace(usableSpace + WATERMARK_BYTES + 1L); + refreshDiskUsage(); + + final RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("repo", "snap") + .setWaitForCompletion(true) + .get(); + final RestoreInfo restoreInfo = restoreSnapshotResponse.getRestoreInfo(); + assertThat(restoreInfo.successfulShards(), is(snapshotInfo.totalShards())); + assertThat(restoreInfo.failedShards(), is(0)); + + assertBusyWithDiskUsageRefresh( + dataNode0Id, + indexName, + new ContainsExactlyOneOf<>(shardSizes.getShardIdsWithSizeSmallerOrEqual(usableSpace)) + ); } private Set getShardIds(final String nodeId, final String indexName) { @@ -178,13 +238,9 @@ private Set getShardIds(final String nodeId, final String indexName) { /** * Index documents until all the shards are at least WATERMARK_BYTES in size, and return the one with the smallest size */ - private SmallestShards createReasonableSizedShards(final String indexName) throws InterruptedException { + private ShardSizes createReasonableSizedShards(final String indexName) throws InterruptedException { while (true) { - final IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[scaledRandomIntBetween(100, 10000)]; - for (int i = 0; i < indexRequestBuilders.length; i++) { - indexRequestBuilders[i] = prepareIndex(indexName).setSource("field", randomAlphaOfLength(10)); - } - indexRandom(true, indexRequestBuilders); + indexRandom(true, indexName, scaledRandomIntBetween(100, 10000)); forceMerge(); refresh(); @@ -201,23 +257,36 @@ private SmallestShards createReasonableSizedShards(final String indexName) throw .orElseThrow(() -> new AssertionError("no shards")); if (smallestShardSize > WATERMARK_BYTES) { - var smallestShardIds = Arrays.stream(shardStates) - .filter(it -> it.getStats().getStore().sizeInBytes() == smallestShardSize) - .map(it -> removeIndexUUID(it.getShardRouting().shardId())) - .collect(toSet()); - - logger.info( - "Created shards with sizes {}", - Arrays.stream(shardStates) - .collect(toMap(it -> it.getShardRouting().shardId(), it -> it.getStats().getStore().sizeInBytes())) - ); - - return new SmallestShards(smallestShardSize, smallestShardIds); + var shardSizes = Arrays.stream(shardStates) + .map(it -> new ShardSize(removeIndexUUID(it.getShardRouting().shardId()), it.getStats().getStore().sizeInBytes())) + .sorted(Comparator.comparing(ShardSize::size)) + .toList(); + logger.info("Created shards with sizes {}", shardSizes); + return new ShardSizes(shardSizes); } } } - private record SmallestShards(long size, Set shardIds) {} + private record ShardSizes(List sizes) { + + public long getSmallestShardSize() { + return sizes.get(0).size(); + } + + public Set getShardIdsWithSizeSmallerOrEqual(long size) { + return sizes.stream().filter(entry -> entry.size <= size).map(ShardSize::shardId).collect(toSet()); + } + + public Set getSmallestShardIds() { + return getShardIdsWithSizeSmallerOrEqual(getSmallestShardSize()); + } + + public Set getAllShardIds() { + return sizes.stream().map(ShardSize::shardId).collect(toSet()); + } + } + + private record ShardSize(ShardId shardId, long size) {} private static ShardId removeIndexUUID(ShardId shardId) { return ShardId.fromString(shardId.toString()); @@ -246,16 +315,20 @@ private void refreshDiskUsage() { ); } - private void assertBusyWithDiskUsageRefresh(String nodeName, String indexName, Matcher> matcher) throws Exception { + private void assertBusyWithDiskUsageRefresh(String nodeId, String indexName, Matcher> matcher) throws Exception { assertBusy(() -> { // refresh the master's ClusterInfoService before checking the assigned shards because DiskThresholdMonitor might still // be processing a previous ClusterInfo update and will skip the new one (see DiskThresholdMonitor#onNewInfo(ClusterInfo) // and its internal checkInProgress flag) refreshDiskUsage(); - final Set shardRoutings = getShardIds(nodeName, indexName); + final Set shardRoutings = getShardIds(nodeId, indexName); assertThat("Mismatching shard routings: " + shardRoutings, shardRoutings, matcher); - }, 30L, TimeUnit.SECONDS); + }, 5L, TimeUnit.SECONDS); + } + + private InternalClusterInfoService getInternalClusterInfoService() { + return (InternalClusterInfoService) internalCluster().getCurrentMasterNodeInstance(ClusterInfoService.class); } private static final class ContainsExactlyOneOf extends TypeSafeMatcher> { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/service/ClusterServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/service/ClusterServiceIT.java index 873f8083f4e0c..fde465346d4be 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/service/ClusterServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/service/ClusterServiceIT.java @@ -357,7 +357,7 @@ public void clusterStateProcessed(ClusterState oldState, ClusterState newState) assertTrue(controlSources.isEmpty()); controlSources = new HashSet<>(Arrays.asList("1", "2", "3", "4", "5", "6", "7", "8", "9", "10")); - PendingClusterTasksResponse response = internalCluster().coordOnlyNodeClient().admin().cluster().preparePendingClusterTasks().get(); + PendingClusterTasksResponse response = getClusterPendingTasks(internalCluster().coordOnlyNodeClient()); assertThat(response.pendingTasks().size(), greaterThanOrEqualTo(10)); assertThat(response.pendingTasks().get(0).getSource().string(), equalTo("1")); assertThat(response.pendingTasks().get(0).isExecuting(), equalTo(true)); @@ -419,7 +419,7 @@ public void onFailure(Exception e) { } assertTrue(controlSources.isEmpty()); - response = internalCluster().coordOnlyNodeClient().admin().cluster().preparePendingClusterTasks().get(); + response = getClusterPendingTasks(internalCluster().coordOnlyNodeClient()); assertThat(response.pendingTasks().size(), greaterThanOrEqualTo(5)); controlSources = new HashSet<>(Arrays.asList("1", "2", "3", "4", "5")); for (PendingClusterTask task : response.pendingTasks()) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java b/server/src/internalClusterTest/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java index f05a83e861e52..3baabe4cc888e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java @@ -8,10 +8,10 @@ package org.elasticsearch.gateway; -import org.elasticsearch.action.admin.cluster.configuration.AddVotingConfigExclusionsAction; import org.elasticsearch.action.admin.cluster.configuration.AddVotingConfigExclusionsRequest; -import org.elasticsearch.action.admin.cluster.configuration.ClearVotingConfigExclusionsAction; import org.elasticsearch.action.admin.cluster.configuration.ClearVotingConfigExclusionsRequest; +import org.elasticsearch.action.admin.cluster.configuration.TransportAddVotingConfigExclusionsAction; +import org.elasticsearch.action.admin.cluster.configuration.TransportClearVotingConfigExclusionsAction; import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; import org.elasticsearch.action.admin.indices.stats.IndexStats; import org.elasticsearch.action.admin.indices.stats.ShardStats; @@ -316,7 +316,7 @@ public void testTwoNodeFirstNodeCleared() throws Exception { Map primaryTerms = assertAndCapturePrimaryTerms(null); - client().execute(AddVotingConfigExclusionsAction.INSTANCE, new AddVotingConfigExclusionsRequest(firstNode)).get(); + client().execute(TransportAddVotingConfigExclusionsAction.TYPE, new AddVotingConfigExclusionsRequest(firstNode)).get(); internalCluster().fullRestart(new RestartCallback() { @Override @@ -342,7 +342,7 @@ public boolean clearData(String nodeName) { assertHitCount(prepareSearch().setSize(0).setQuery(matchAllQuery()), 2); } - client().execute(ClearVotingConfigExclusionsAction.INSTANCE, new ClearVotingConfigExclusionsRequest()).get(); + client().execute(TransportClearVotingConfigExclusionsAction.TYPE, new ClearVotingConfigExclusionsRequest()).get(); } public void testLatestVersionLoaded() throws Exception { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/SearchIdleIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/SearchIdleIT.java index 1a8f928d9c10f..199a397f52ad2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/SearchIdleIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/SearchIdleIT.java @@ -26,6 +26,7 @@ import org.elasticsearch.index.query.RangeQueryBuilder; import org.elasticsearch.index.refresh.RefreshStats; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.XContentType; @@ -48,14 +49,7 @@ public class SearchIdleIT extends ESSingleNodeTestCase { public void testAutomaticRefreshSearch() throws InterruptedException { - runTestAutomaticRefresh(numDocs -> { - var resp = client().prepareSearch("test").get(); - try { - return resp.getHits().getTotalHits().value; - } finally { - resp.decRef(); - } - }); + runTestAutomaticRefresh(numDocs -> SearchResponseUtils.getTotalHitsValue(client().prepareSearch("test"))); } public void testAutomaticRefreshGet() throws InterruptedException { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java index ec79b53ccd174..c1da93140a0b0 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java @@ -182,11 +182,7 @@ public void testCorruptFileAndRecover() throws InterruptedException, IOException .waitForNoRelocatingShards(true) ).actionGet(); if (health.isTimedOut()) { - logger.info( - "cluster state:\n{}\n{}", - clusterAdmin().prepareState().get().getState(), - clusterAdmin().preparePendingClusterTasks().get() - ); + logger.info("cluster state:\n{}\n{}", clusterAdmin().prepareState().get().getState(), getClusterPendingTasks()); assertThat("timed out waiting for green state", health.isTimedOut(), equalTo(false)); } assertThat(health.getStatus(), equalTo(ClusterHealthStatus.GREEN)); @@ -295,11 +291,7 @@ public void testCorruptPrimaryNoReplica() throws ExecutionException, Interrupted if (response.getStatus() != ClusterHealthStatus.RED) { logger.info("Cluster turned red in busy loop: {}", didClusterTurnRed); - logger.info( - "cluster state:\n{}\n{}", - clusterAdmin().prepareState().get().getState(), - clusterAdmin().preparePendingClusterTasks().get() - ); + logger.info("cluster state:\n{}\n{}", clusterAdmin().prepareState().get().getState(), getClusterPendingTasks()); } assertThat(response.getStatus(), is(ClusterHealthStatus.RED)); ClusterState state = clusterAdmin().prepareState().get().getState(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java index 0fe5845e9ed32..779072272e59a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java @@ -8,22 +8,23 @@ package org.elasticsearch.indices.recovery; +import org.apache.logging.log4j.Level; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; -import org.elasticsearch.action.admin.cluster.node.hotthreads.NodeHotThreads; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand; import org.elasticsearch.common.Priority; +import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.monitor.jvm.HotThreads; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; -import java.util.stream.Collectors; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST) public class IndexPrimaryRelocationIT extends ESIntegTestCase { @@ -71,20 +72,14 @@ public void run() { .setWaitForNoRelocatingShards(true) .get(); if (clusterHealthResponse.isTimedOut()) { - final String hotThreads = clusterAdmin().prepareNodesHotThreads() - .setIgnoreIdleThreads(false) - .get() - .getNodes() - .stream() - .map(NodeHotThreads::getHotThreads) - .collect(Collectors.joining("\n")); - final ClusterState clusterState = clusterAdmin().prepareState().get().getState(); - logger.info( - "timed out for waiting for relocation iteration [{}] \ncluster state {} \nhot threads {}", - i, - clusterState, - hotThreads + HotThreads.logLocalHotThreads( + logger, + Level.INFO, + "timed out waiting for relocation iteration [" + i + "]", + ReferenceDocs.LOGGING ); + final ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + logger.info("timed out for waiting for relocation iteration [{}] \ncluster state {}", i, clusterState); finished.set(true); indexingThread.join(); throw new AssertionError("timed out waiting for relocation iteration [" + i + "] "); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index 2cbc3477cb49d..5dcd8b5b0e34f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -782,6 +782,7 @@ public Settings onNodeStopped(String nodeName) { * Tests shard recovery throttling on the target node. Node statistics should show throttling time on the target node, while no * throttling should be shown on the source node because the target will accept data more slowly than the source's throttling threshold. */ + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103204") public void testTargetThrottling() throws Exception { logger.info("--> starting node A with default settings"); final String nodeA = internalCluster().startNode(); @@ -1744,12 +1745,12 @@ public void testReservesBytesDuringPeerRecoveryPhaseOne() throws Exception { .getNodes() .get(0) .getIndices(); - assertThat(nodeIndicesStats.getStore().getReservedSize().getBytes(), equalTo(0L)); + assertThat(nodeIndicesStats.getStore().reservedSizeInBytes(), equalTo(0L)); assertThat( nodeIndicesStats.getShardStats(clusterState.metadata().index(indexName).getIndex()) .stream() .flatMap(s -> Arrays.stream(s.getShards())) - .map(s -> s.getStats().getStore().getReservedSize().getBytes()) + .map(s -> s.getStats().getStore().reservedSizeInBytes()) .toList(), everyItem(equalTo(StoreStats.UNKNOWN_RESERVED_BYTES)) ); @@ -1765,8 +1766,7 @@ public void testReservesBytesDuringPeerRecoveryPhaseOne() throws Exception { .get(0) .getIndices() .getStore() - .getReservedSize() - .getBytes(), + .reservedSizeInBytes(), greaterThan(0L) ); } @@ -1784,7 +1784,7 @@ public void testReservesBytesDuringPeerRecoveryPhaseOne() throws Exception { .get() .getNodes() .stream() - .mapToLong(n -> n.getIndices().getStore().getReservedSize().getBytes()) + .mapToLong(n -> n.getIndices().getStore().reservedSizeInBytes()) .sum(), equalTo(0L) ); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/RepositoriesFileSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/RepositoriesFileSettingsIT.java index 91cd5e0e6e971..9d6a53d8bc818 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/RepositoriesFileSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/RepositoriesFileSettingsIT.java @@ -10,8 +10,8 @@ import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesAction; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; -import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryAction; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; +import org.elasticsearch.action.admin.cluster.repositories.put.TransportPutRepositoryAction; import org.elasticsearch.action.admin.cluster.repositories.reservedstate.ReservedRepositoryAction; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterChangedEvent; @@ -151,7 +151,7 @@ private void assertClusterStateSaveOK(CountDownLatch savedClusterState, AtomicLo + "with errors: [[repo] set as read-only by [file_settings]]", expectThrows( IllegalArgumentException.class, - () -> client().execute(PutRepositoryAction.INSTANCE, sampleRestRequest("repo")).actionGet() + () -> client().execute(TransportPutRepositoryAction.TYPE, sampleRestRequest("repo")).actionGet() ).getMessage() ); } @@ -211,7 +211,7 @@ private void assertClusterStateNotSaved(CountDownLatch savedClusterState, Atomic ); // This should succeed, nothing was reserved - client().execute(PutRepositoryAction.INSTANCE, sampleRestRequest("err-repo")).get(); + client().execute(TransportPutRepositoryAction.TYPE, sampleRestRequest("err-repo")).get(); } public void testErrorSaved() throws Exception { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/AggregationsIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/AggregationsIntegrationIT.java index df8f3825a5ea6..a856ee36aadc2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/AggregationsIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/AggregationsIntegrationIT.java @@ -18,7 +18,8 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertScrollResponsesAndHitCount; @ESIntegTestCase.SuiteScopeTestCase public class AggregationsIntegrationIT extends ESIntegTestCase { @@ -38,32 +39,22 @@ public void setupSuiteScopeCluster() throws Exception { public void testScroll() { final int size = randomIntBetween(1, 4); - final String[] scroll = new String[1]; - final int[] total = new int[1]; - assertNoFailuresAndResponse( - prepareSearch("index").setSize(size).setScroll(TimeValue.timeValueMinutes(1)).addAggregation(terms("f").field("f")), - response -> { - Aggregations aggregations = response.getAggregations(); - assertNotNull(aggregations); - Terms terms = aggregations.get("f"); - assertEquals(Math.min(numDocs, 3L), terms.getBucketByKey("0").getDocCount()); - scroll[0] = response.getScrollId(); - total[0] = response.getHits().getHits().length; + assertScrollResponsesAndHitCount( + TimeValue.timeValueSeconds(60), + prepareSearch("index").setSize(size).addAggregation(terms("f").field("f")), + numDocs, + (respNum, response) -> { + assertNoFailures(response); + + if (respNum == 1) { // initial response. + Aggregations aggregations = response.getAggregations(); + assertNotNull(aggregations); + Terms terms = aggregations.get("f"); + assertEquals(Math.min(numDocs, 3L), terms.getBucketByKey("0").getDocCount()); + } else { + assertNull(response.getAggregations()); + } } ); - int currentTotal = 0; - while (total[0] - currentTotal > 0) { - currentTotal = total[0]; - assertNoFailuresAndResponse( - client().prepareSearchScroll(scroll[0]).setScroll(TimeValue.timeValueMinutes(1)), - scrollResponse -> { - assertNull(scrollResponse.getAggregations()); - total[0] += scrollResponse.getHits().getHits().length; - scroll[0] = scrollResponse.getScrollId(); - } - ); - } - clearScroll(scroll[0]); - assertEquals(numDocs, total[0]); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java index b04aa321f70f1..df59ab18bef72 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java @@ -510,7 +510,7 @@ public void testMasterFailOverWithQueuedDeletes() throws Exception { logger.info("--> wait for delete to be enqueued in cluster state"); awaitClusterState(state -> { final SnapshotDeletionsInProgress deletionsInProgress = state.custom(SnapshotDeletionsInProgress.TYPE); - return deletionsInProgress.getEntries().size() == 1 && deletionsInProgress.getEntries().get(0).getSnapshots().size() == 3; + return deletionsInProgress.getEntries().size() == 1 && deletionsInProgress.getEntries().get(0).snapshots().size() == 3; }); logger.info("--> waiting for second snapshot to finish and the other two snapshots to become aborted"); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index 59fc54347d1d5..089f6c09806cd 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -965,8 +965,7 @@ public void testRetentionLeasesClearedOnRestore() throws Exception { final String leaseId = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); logger.debug("--> adding retention lease with id {} to {}", leaseId, shardId); - client().execute(RetentionLeaseActions.Add.INSTANCE, new RetentionLeaseActions.AddRequest(shardId, leaseId, RETAIN_ALL, "test")) - .actionGet(); + client().execute(RetentionLeaseActions.ADD, new RetentionLeaseActions.AddRequest(shardId, leaseId, RETAIN_ALL, "test")).actionGet(); final ShardStats shardStats = Arrays.stream(indicesAdmin().prepareStats(indexName).get().getShards()) .filter(s -> s.getShardRouting().shardId().equals(shardId)) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java index 2005d63ab6413..a6c8e0b08c9ed 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java @@ -11,7 +11,7 @@ import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryResponse; -import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotAction; +import org.elasticsearch.action.admin.cluster.snapshots.delete.TransportDeleteSnapshotAction; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.master.AcknowledgedResponse; @@ -277,7 +277,7 @@ public void testRepositoryConflict() throws Exception { assertTrue( clusterAdmin().prepareListTasks() - .setActions(DeleteSnapshotAction.NAME) + .setActions(TransportDeleteSnapshotAction.TYPE.name()) .setDetailed(true) .get() .getTasks() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoryThrottlingStatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoryThrottlingStatsIT.java index 0f0858982b4ad..d8bc9327a2edd 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoryThrottlingStatsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoryThrottlingStatsIT.java @@ -37,7 +37,7 @@ public void testRepositoryThrottlingStats() throws Exception { IndexStats indexStats = indicesStats.getIndex("test-idx"); long totalSizeInBytes = 0; for (ShardStats shard : indexStats.getShards()) { - totalSizeInBytes += shard.getStats().getStore().getSizeInBytes(); + totalSizeInBytes += shard.getStats().getStore().sizeInBytes(); } logger.info("--> total shards size: {} bytes", totalSizeInBytes); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java index 7eaa49b27007d..fa49dc26f2259 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java @@ -8,6 +8,7 @@ package org.elasticsearch.snapshots; +import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.tests.util.LuceneTestCase; @@ -16,7 +17,6 @@ import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequestBuilder; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; -import org.elasticsearch.action.admin.cluster.node.hotthreads.NodeHotThreads; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequestBuilder; import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequestBuilder; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequestBuilder; @@ -34,6 +34,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.Randomness; +import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AbstractRunnable; @@ -46,6 +47,7 @@ import org.elasticsearch.core.Releasables; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; +import org.elasticsearch.monitor.jvm.HotThreads; import org.elasticsearch.repositories.RepositoryCleanupResult; import org.elasticsearch.repositories.fs.FsRepository; import org.elasticsearch.test.InternalTestCluster; @@ -371,16 +373,11 @@ private void acquirePermitsAtEnd( "--> current cluster state:\n{}", Strings.toString(clusterAdmin().prepareState().get().getState(), true, true) ); - logger.info( - "--> hot threads:\n{}", - clusterAdmin().prepareNodesHotThreads() - .setThreads(99999) - .setIgnoreIdleThreads(false) - .get() - .getNodes() - .stream() - .map(NodeHotThreads::getHotThreads) - .collect(Collectors.joining("\n")) + HotThreads.logLocalHotThreads( + logger, + Level.INFO, + "hot threads while failing to acquire permit [" + label + "]", + ReferenceDocs.LOGGING ); failedPermitAcquisitions.add(label); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotsServiceDoubleFinalizationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotsServiceDoubleFinalizationIT.java index 4c9de6cb5369f..23f218130a053 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotsServiceDoubleFinalizationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotsServiceDoubleFinalizationIT.java @@ -86,7 +86,7 @@ public void testNoDoubleFinalization() throws Exception { final SnapshotDeletionsInProgress snapshotDeletionsInProgress = SnapshotDeletionsInProgress.get(state); return snapshotDeletionsInProgress.getEntries() .stream() - .flatMap(entry -> entry.getSnapshots().stream()) + .flatMap(entry -> entry.snapshots().stream()) .anyMatch(snapshotId -> snapshotId.getName().equals("snap-1")); }); @@ -149,7 +149,7 @@ public void testNoDoubleFinalization() throws Exception { .stream() .anyMatch( entry -> entry.state() == SnapshotDeletionsInProgress.State.WAITING - && entry.getSnapshots().stream().anyMatch(snapshotId -> snapshotId.getName().equals("snap-2")) + && entry.snapshots().stream().anyMatch(snapshotId -> snapshotId.getName().equals("snap-2")) ); }); new Thread(() -> { diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index ad29384b16f45..625871d25734b 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -192,6 +192,7 @@ static TransportVersion def(int id) { public static final TransportVersion INFERENCE_SERVICE_EMBEDDING_SIZE_ADDED = def(8_559_00_0); public static final TransportVersion ENRICH_ELASTICSEARCH_VERSION_REMOVED = def(8_560_00_0); public static final TransportVersion NODE_STATS_REQUEST_SIMPLIFIED = def(8_561_00_0); + public static final TransportVersion TEXT_EXPANSION_TOKEN_PRUNING_CONFIG_ADDED = def(8_562_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 3d6995bd9e90f..4181b077cb185 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -116,6 +116,7 @@ public class Version implements VersionId, ToXContentFragment { public static final Version V_7_17_14 = new Version(7_17_14_99); public static final Version V_7_17_15 = new Version(7_17_15_99); public static final Version V_7_17_16 = new Version(7_17_16_99); + public static final Version V_7_17_17 = new Version(7_17_17_99); public static final Version V_8_0_0 = new Version(8_00_00_99); public static final Version V_8_0_1 = new Version(8_00_01_99); public static final Version V_8_1_0 = new Version(8_01_00_99); @@ -158,6 +159,7 @@ public class Version implements VersionId, ToXContentFragment { public static final Version V_8_11_1 = new Version(8_11_01_99); public static final Version V_8_11_2 = new Version(8_11_02_99); public static final Version V_8_11_3 = new Version(8_11_03_99); + public static final Version V_8_11_4 = new Version(8_11_04_99); public static final Version V_8_12_0 = new Version(8_12_00_99); public static final Version V_8_13_0 = new Version(8_13_00_99); public static final Version CURRENT = V_8_13_0; diff --git a/server/src/main/java/org/elasticsearch/action/ActionListener.java b/server/src/main/java/org/elasticsearch/action/ActionListener.java index 5017f0af0007c..aebe4922e416a 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionListener.java +++ b/server/src/main/java/org/elasticsearch/action/ActionListener.java @@ -145,15 +145,6 @@ public String toString() { }; } - /** - * @deprecated in favour of {@link #running(Runnable)} because this implementation doesn't "wrap" exceptions from {@link #onResponse} - * into {@link #onFailure}. - */ - @Deprecated(forRemoval = true) - static ActionListener wrap(Runnable runnable) { - return running(runnable); - } - /** * Creates a listener that executes the appropriate consumer when the response (or failure) is received. This listener is "wrapped" in * the sense that an exception from the {@code onResponse} consumer is passed into the {@code onFailure} consumer. @@ -193,29 +184,6 @@ public String toString() { }; } - /** - * Adds a wrapper around a listener which catches exceptions thrown by its {@link #onResponse} method and feeds them to its - * {@link #onFailure} method. - */ - static ActionListener wrap(ActionListener delegate) { - return new ActionListener<>() { - @Override - public void onResponse(Response response) { - ActionListener.run(delegate, l -> l.onResponse(response)); - } - - @Override - public void onFailure(Exception e) { - safeOnFailure(delegate, e); - } - - @Override - public String toString() { - return "wrapped{" + delegate + "}"; - } - }; - } - /** * Notifies every given listener with the response passed to {@link #onResponse(Object)}. If a listener itself throws an exception * the exception is forwarded to {@link #onFailure(Exception)}. If in turn {@link #onFailure(Exception)} fails all remaining diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index 01e51d47722f6..2039acda89b8a 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -10,20 +10,14 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplainAction; -import org.elasticsearch.action.admin.cluster.allocation.DeleteDesiredBalanceAction; -import org.elasticsearch.action.admin.cluster.allocation.GetDesiredBalanceAction; import org.elasticsearch.action.admin.cluster.allocation.TransportClusterAllocationExplainAction; import org.elasticsearch.action.admin.cluster.allocation.TransportDeleteDesiredBalanceAction; import org.elasticsearch.action.admin.cluster.allocation.TransportGetDesiredBalanceAction; -import org.elasticsearch.action.admin.cluster.configuration.AddVotingConfigExclusionsAction; -import org.elasticsearch.action.admin.cluster.configuration.ClearVotingConfigExclusionsAction; import org.elasticsearch.action.admin.cluster.configuration.TransportAddVotingConfigExclusionsAction; import org.elasticsearch.action.admin.cluster.configuration.TransportClearVotingConfigExclusionsAction; import org.elasticsearch.action.admin.cluster.coordination.ClusterFormationInfoAction; import org.elasticsearch.action.admin.cluster.coordination.CoordinationDiagnosticsAction; import org.elasticsearch.action.admin.cluster.coordination.MasterHistoryAction; -import org.elasticsearch.action.admin.cluster.desirednodes.DeleteDesiredNodesAction; import org.elasticsearch.action.admin.cluster.desirednodes.GetDesiredNodesAction; import org.elasticsearch.action.admin.cluster.desirednodes.TransportDeleteDesiredNodesAction; import org.elasticsearch.action.admin.cluster.desirednodes.TransportGetDesiredNodesAction; @@ -52,11 +46,9 @@ import org.elasticsearch.action.admin.cluster.remote.TransportRemoteInfoAction; import org.elasticsearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryAction; import org.elasticsearch.action.admin.cluster.repositories.cleanup.TransportCleanupRepositoryAction; -import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryAction; import org.elasticsearch.action.admin.cluster.repositories.delete.TransportDeleteRepositoryAction; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesAction; import org.elasticsearch.action.admin.cluster.repositories.get.TransportGetRepositoriesAction; -import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryAction; import org.elasticsearch.action.admin.cluster.repositories.put.TransportPutRepositoryAction; import org.elasticsearch.action.admin.cluster.repositories.verify.TransportVerifyRepositoryAction; import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryAction; @@ -68,11 +60,9 @@ import org.elasticsearch.action.admin.cluster.settings.TransportClusterUpdateSettingsAction; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsAction; import org.elasticsearch.action.admin.cluster.shards.TransportClusterSearchShardsAction; -import org.elasticsearch.action.admin.cluster.snapshots.clone.CloneSnapshotAction; import org.elasticsearch.action.admin.cluster.snapshots.clone.TransportCloneSnapshotAction; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotAction; import org.elasticsearch.action.admin.cluster.snapshots.create.TransportCreateSnapshotAction; -import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotAction; import org.elasticsearch.action.admin.cluster.snapshots.delete.TransportDeleteSnapshotAction; import org.elasticsearch.action.admin.cluster.snapshots.features.ResetFeatureStateAction; import org.elasticsearch.action.admin.cluster.snapshots.features.SnapshottableFeaturesAction; @@ -91,11 +81,9 @@ import org.elasticsearch.action.admin.cluster.state.TransportClusterStateAction; import org.elasticsearch.action.admin.cluster.stats.ClusterStatsAction; import org.elasticsearch.action.admin.cluster.stats.TransportClusterStatsAction; -import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptAction; import org.elasticsearch.action.admin.cluster.storedscripts.GetScriptContextAction; import org.elasticsearch.action.admin.cluster.storedscripts.GetScriptLanguageAction; import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptAction; -import org.elasticsearch.action.admin.cluster.storedscripts.PutStoredScriptAction; import org.elasticsearch.action.admin.cluster.storedscripts.TransportDeleteStoredScriptAction; import org.elasticsearch.action.admin.cluster.storedscripts.TransportGetScriptContextAction; import org.elasticsearch.action.admin.cluster.storedscripts.TransportGetScriptLanguageAction; @@ -116,15 +104,12 @@ import org.elasticsearch.action.admin.indices.create.AutoCreateAction; import org.elasticsearch.action.admin.indices.create.CreateIndexAction; import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction; -import org.elasticsearch.action.admin.indices.dangling.delete.DeleteDanglingIndexAction; import org.elasticsearch.action.admin.indices.dangling.delete.TransportDeleteDanglingIndexAction; import org.elasticsearch.action.admin.indices.dangling.find.FindDanglingIndexAction; import org.elasticsearch.action.admin.indices.dangling.find.TransportFindDanglingIndexAction; -import org.elasticsearch.action.admin.indices.dangling.import_index.ImportDanglingIndexAction; import org.elasticsearch.action.admin.indices.dangling.import_index.TransportImportDanglingIndexAction; import org.elasticsearch.action.admin.indices.dangling.list.ListDanglingIndicesAction; import org.elasticsearch.action.admin.indices.dangling.list.TransportListDanglingIndicesAction; -import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction; import org.elasticsearch.action.admin.indices.delete.TransportDeleteIndexAction; import org.elasticsearch.action.admin.indices.diskusage.AnalyzeIndexDiskUsageAction; import org.elasticsearch.action.admin.indices.diskusage.TransportAnalyzeIndexDiskUsageAction; @@ -664,11 +649,11 @@ public void reg actions.register(PrevalidateNodeRemovalAction.INSTANCE, TransportPrevalidateNodeRemovalAction.class); actions.register(HealthApiStatsAction.INSTANCE, HealthApiStatsTransportAction.class); - actions.register(AddVotingConfigExclusionsAction.INSTANCE, TransportAddVotingConfigExclusionsAction.class); - actions.register(ClearVotingConfigExclusionsAction.INSTANCE, TransportClearVotingConfigExclusionsAction.class); - actions.register(ClusterAllocationExplainAction.INSTANCE, TransportClusterAllocationExplainAction.class); - actions.register(GetDesiredBalanceAction.INSTANCE, TransportGetDesiredBalanceAction.class); - actions.register(DeleteDesiredBalanceAction.INSTANCE, TransportDeleteDesiredBalanceAction.class); + actions.register(TransportAddVotingConfigExclusionsAction.TYPE, TransportAddVotingConfigExclusionsAction.class); + actions.register(TransportClearVotingConfigExclusionsAction.TYPE, TransportClearVotingConfigExclusionsAction.class); + actions.register(TransportClusterAllocationExplainAction.TYPE, TransportClusterAllocationExplainAction.class); + actions.register(TransportGetDesiredBalanceAction.TYPE, TransportGetDesiredBalanceAction.class); + actions.register(TransportDeleteDesiredBalanceAction.TYPE, TransportDeleteDesiredBalanceAction.class); actions.register(ClusterStatsAction.INSTANCE, TransportClusterStatsAction.class); actions.register(ClusterStateAction.INSTANCE, TransportClusterStateAction.class); actions.register(TransportClusterHealthAction.TYPE, TransportClusterHealthAction.class); @@ -678,15 +663,15 @@ public void reg actions.register(ClusterSearchShardsAction.INSTANCE, TransportClusterSearchShardsAction.class); actions.register(ClusterFormationInfoAction.INSTANCE, ClusterFormationInfoAction.TransportAction.class); actions.register(TransportPendingClusterTasksAction.TYPE, TransportPendingClusterTasksAction.class); - actions.register(PutRepositoryAction.INSTANCE, TransportPutRepositoryAction.class); + actions.register(TransportPutRepositoryAction.TYPE, TransportPutRepositoryAction.class); actions.register(GetRepositoriesAction.INSTANCE, TransportGetRepositoriesAction.class); - actions.register(DeleteRepositoryAction.INSTANCE, TransportDeleteRepositoryAction.class); + actions.register(TransportDeleteRepositoryAction.TYPE, TransportDeleteRepositoryAction.class); actions.register(VerifyRepositoryAction.INSTANCE, TransportVerifyRepositoryAction.class); actions.register(CleanupRepositoryAction.INSTANCE, TransportCleanupRepositoryAction.class); actions.register(GetSnapshotsAction.INSTANCE, TransportGetSnapshotsAction.class); - actions.register(DeleteSnapshotAction.INSTANCE, TransportDeleteSnapshotAction.class); + actions.register(TransportDeleteSnapshotAction.TYPE, TransportDeleteSnapshotAction.class); actions.register(CreateSnapshotAction.INSTANCE, TransportCreateSnapshotAction.class); - actions.register(CloneSnapshotAction.INSTANCE, TransportCloneSnapshotAction.class); + actions.register(TransportCloneSnapshotAction.TYPE, TransportCloneSnapshotAction.class); actions.register(RestoreSnapshotAction.INSTANCE, TransportRestoreSnapshotAction.class); actions.register(SnapshotsStatusAction.INSTANCE, TransportSnapshotsStatusAction.class); actions.register(SnapshottableFeaturesAction.INSTANCE, TransportSnapshottableFeaturesAction.class); @@ -701,7 +686,7 @@ public void reg actions.register(CreateIndexAction.INSTANCE, TransportCreateIndexAction.class); actions.register(ResizeAction.INSTANCE, TransportResizeAction.class); actions.register(RolloverAction.INSTANCE, TransportRolloverAction.class); - actions.register(DeleteIndexAction.INSTANCE, TransportDeleteIndexAction.class); + actions.register(TransportDeleteIndexAction.TYPE, TransportDeleteIndexAction.class); actions.register(GetIndexAction.INSTANCE, TransportGetIndexAction.class); actions.register(OpenIndexAction.INSTANCE, TransportOpenIndexAction.class); actions.register(TransportCloseIndexAction.TYPE, TransportCloseIndexAction.class); @@ -764,9 +749,9 @@ public void reg actions.register(CoordinationDiagnosticsAction.INSTANCE, CoordinationDiagnosticsAction.TransportAction.class); // Indexed scripts - actions.register(PutStoredScriptAction.INSTANCE, TransportPutStoredScriptAction.class); + actions.register(TransportPutStoredScriptAction.TYPE, TransportPutStoredScriptAction.class); actions.register(GetStoredScriptAction.INSTANCE, TransportGetStoredScriptAction.class); - actions.register(DeleteStoredScriptAction.INSTANCE, TransportDeleteStoredScriptAction.class); + actions.register(TransportDeleteStoredScriptAction.TYPE, TransportDeleteStoredScriptAction.class); actions.register(GetScriptContextAction.INSTANCE, TransportGetScriptContextAction.class); actions.register(GetScriptLanguageAction.INSTANCE, TransportGetScriptLanguageAction.class); @@ -786,14 +771,14 @@ public void reg actions.register(RemovePersistentTaskAction.INSTANCE, RemovePersistentTaskAction.TransportAction.class); // retention leases - actions.register(RetentionLeaseActions.Add.INSTANCE, RetentionLeaseActions.Add.TransportAction.class); - actions.register(RetentionLeaseActions.Renew.INSTANCE, RetentionLeaseActions.Renew.TransportAction.class); - actions.register(RetentionLeaseActions.Remove.INSTANCE, RetentionLeaseActions.Remove.TransportAction.class); + actions.register(RetentionLeaseActions.ADD, RetentionLeaseActions.TransportAddAction.class); + actions.register(RetentionLeaseActions.RENEW, RetentionLeaseActions.TransportRenewAction.class); + actions.register(RetentionLeaseActions.REMOVE, RetentionLeaseActions.TransportRemoveAction.class); // Dangling indices actions.register(ListDanglingIndicesAction.INSTANCE, TransportListDanglingIndicesAction.class); - actions.register(ImportDanglingIndexAction.INSTANCE, TransportImportDanglingIndexAction.class); - actions.register(DeleteDanglingIndexAction.INSTANCE, TransportDeleteDanglingIndexAction.class); + actions.register(TransportImportDanglingIndexAction.TYPE, TransportImportDanglingIndexAction.class); + actions.register(TransportDeleteDanglingIndexAction.TYPE, TransportDeleteDanglingIndexAction.class); actions.register(FindDanglingIndexAction.INSTANCE, TransportFindDanglingIndexAction.class); // internal actions @@ -810,7 +795,7 @@ public void reg // desired nodes actions.register(GetDesiredNodesAction.INSTANCE, TransportGetDesiredNodesAction.class); actions.register(UpdateDesiredNodesAction.INSTANCE, TransportUpdateDesiredNodesAction.class); - actions.register(DeleteDesiredNodesAction.INSTANCE, TransportDeleteDesiredNodesAction.class); + actions.register(TransportDeleteDesiredNodesAction.TYPE, TransportDeleteDesiredNodesAction.class); actions.register(UpdateHealthInfoCacheAction.INSTANCE, UpdateHealthInfoCacheAction.TransportAction.class); actions.register(FetchHealthInfoCacheAction.INSTANCE, FetchHealthInfoCacheAction.TransportAction.class); diff --git a/server/src/main/java/org/elasticsearch/action/ActionType.java b/server/src/main/java/org/elasticsearch/action/ActionType.java index 478fab0f2cf36..b8e4c8b88aa5e 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionType.java +++ b/server/src/main/java/org/elasticsearch/action/ActionType.java @@ -8,6 +8,7 @@ package org.elasticsearch.action; +import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.common.io.stream.Writeable; /** @@ -22,6 +23,14 @@ public static ActionType localOnly(String name) { return new ActionType<>(name, Writeable.Reader.localOnly()); } + public static ActionType emptyResponse(String name) { + return new ActionType<>(name, in -> ActionResponse.Empty.INSTANCE); + } + + public static ActionType acknowledgedResponse(String name) { + return new ActionType<>(name, AcknowledgedResponse::readFrom); + } + /** * @param name The name of the action, must be unique across actions. * @param responseReader A reader for the response type diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainAction.java deleted file mode 100644 index 34d5874cea3cb..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainAction.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.cluster.allocation; - -import org.elasticsearch.action.ActionType; - -/** - * ActionType for explaining shard allocation for a shard in the cluster - */ -public class ClusterAllocationExplainAction extends ActionType { - - public static final ClusterAllocationExplainAction INSTANCE = new ClusterAllocationExplainAction(); - public static final String NAME = "cluster:monitor/allocation/explain"; - - private ClusterAllocationExplainAction() { - super(NAME, ClusterAllocationExplainResponse::new); - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainRequestBuilder.java index 6ceea15d8fd11..3053ebe1f3db9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainRequestBuilder.java @@ -19,8 +19,8 @@ public class ClusterAllocationExplainRequestBuilder extends MasterNodeOperationR ClusterAllocationExplainResponse, ClusterAllocationExplainRequestBuilder> { - public ClusterAllocationExplainRequestBuilder(ElasticsearchClient client, ClusterAllocationExplainAction action) { - super(client, action, new ClusterAllocationExplainRequest()); + public ClusterAllocationExplainRequestBuilder(ElasticsearchClient client) { + super(client, TransportClusterAllocationExplainAction.TYPE, new ClusterAllocationExplainRequest()); } /** The index name to use when finding the shard to explain */ diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/DeleteDesiredBalanceAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/DeleteDesiredBalanceAction.java deleted file mode 100644 index 23a2e75d5d401..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/DeleteDesiredBalanceAction.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.cluster.allocation; - -import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.ActionType; - -public class DeleteDesiredBalanceAction extends ActionType { - - public static final DeleteDesiredBalanceAction INSTANCE = new DeleteDesiredBalanceAction(); - public static final String NAME = "cluster:admin/desired_balance/reset"; - - DeleteDesiredBalanceAction() { - super(NAME, in -> ActionResponse.Empty.INSTANCE); - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/GetDesiredBalanceAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/GetDesiredBalanceAction.java deleted file mode 100644 index f9f90791c223f..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/GetDesiredBalanceAction.java +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -package org.elasticsearch.action.admin.cluster.allocation; - -import org.elasticsearch.action.ActionType; - -public class GetDesiredBalanceAction extends ActionType { - public static final GetDesiredBalanceAction INSTANCE = new GetDesiredBalanceAction(); - public static final String NAME = "cluster:admin/desired_balance/get"; - - GetDesiredBalanceAction() { - super(NAME, DesiredBalanceResponse::from); - } - -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java index 68302df47d6f2..7599eb2faef96 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java @@ -11,6 +11,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; import org.elasticsearch.cluster.ClusterInfo; @@ -43,6 +44,10 @@ public class TransportClusterAllocationExplainAction extends TransportMasterNode ClusterAllocationExplainRequest, ClusterAllocationExplainResponse> { + public static final ActionType TYPE = new ActionType<>( + "cluster:monitor/allocation/explain", + ClusterAllocationExplainResponse::new + ); private static final Logger logger = LogManager.getLogger(TransportClusterAllocationExplainAction.class); private final ClusterInfoService clusterInfoService; @@ -63,7 +68,7 @@ public TransportClusterAllocationExplainAction( AllocationService allocationService ) { super( - ClusterAllocationExplainAction.NAME, + TYPE.name(), transportService, clusterService, threadPool, diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportDeleteDesiredBalanceAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportDeleteDesiredBalanceAction.java index 4360d7c1925f6..76b563c3f540a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportDeleteDesiredBalanceAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportDeleteDesiredBalanceAction.java @@ -11,6 +11,7 @@ import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; import org.elasticsearch.cluster.ClusterState; @@ -34,6 +35,7 @@ public class TransportDeleteDesiredBalanceAction extends TransportMasterNodeAction { + public static final ActionType TYPE = ActionType.emptyResponse("cluster:admin/desired_balance/reset"); @Nullable private final MasterServiceTaskQueue resetDesiredBalanceTaskQueue; @@ -48,7 +50,7 @@ public TransportDeleteDesiredBalanceAction( ShardsAllocator shardsAllocator ) { super( - DeleteDesiredBalanceAction.NAME, + TYPE.name(), transportService, clusterService, threadPool, diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetDesiredBalanceAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetDesiredBalanceAction.java index fc11790079521..49611ffae8718 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetDesiredBalanceAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetDesiredBalanceAction.java @@ -9,6 +9,7 @@ import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; import org.elasticsearch.cluster.ClusterInfoService; @@ -42,6 +43,10 @@ public class TransportGetDesiredBalanceAction extends TransportMasterNodeReadAction { + public static final ActionType TYPE = new ActionType<>( + "cluster:admin/desired_balance/get", + DesiredBalanceResponse::from + ); @Nullable private final DesiredBalanceShardsAllocator desiredBalanceShardsAllocator; private final ClusterInfoService clusterInfoService; @@ -59,7 +64,7 @@ public TransportGetDesiredBalanceAction( WriteLoadForecaster writeLoadForecaster ) { super( - GetDesiredBalanceAction.NAME, + TYPE.name(), transportService, clusterService, threadPool, diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsAction.java deleted file mode 100644 index 7445096722c28..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsAction.java +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -package org.elasticsearch.action.admin.cluster.configuration; - -import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.ActionType; - -public class AddVotingConfigExclusionsAction extends ActionType { - public static final AddVotingConfigExclusionsAction INSTANCE = new AddVotingConfigExclusionsAction(); - public static final String NAME = "cluster:admin/voting_config/add_exclusions"; - - private AddVotingConfigExclusionsAction() { - super(NAME, in -> ActionResponse.Empty.INSTANCE); - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/ClearVotingConfigExclusionsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/ClearVotingConfigExclusionsAction.java deleted file mode 100644 index 98f4dd62763e5..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/ClearVotingConfigExclusionsAction.java +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -package org.elasticsearch.action.admin.cluster.configuration; - -import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.ActionType; - -public class ClearVotingConfigExclusionsAction extends ActionType { - public static final ClearVotingConfigExclusionsAction INSTANCE = new ClearVotingConfigExclusionsAction(); - public static final String NAME = "cluster:admin/voting_config/clear_exclusions"; - - private ClearVotingConfigExclusionsAction() { - super(NAME, in -> ActionResponse.Empty.INSTANCE); - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsAction.java index 57332429135b6..b9bcf0944cd83 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsAction.java @@ -13,6 +13,7 @@ import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; import org.elasticsearch.cluster.ClusterState; @@ -48,6 +49,7 @@ public class TransportAddVotingConfigExclusionsAction extends TransportMasterNod AddVotingConfigExclusionsRequest, ActionResponse.Empty> { + public static final ActionType TYPE = ActionType.emptyResponse("cluster:admin/voting_config/add_exclusions"); private static final Logger logger = LogManager.getLogger(TransportAddVotingConfigExclusionsAction.class); public static final Setting MAXIMUM_VOTING_CONFIG_EXCLUSIONS_SETTING = Setting.intSetting( @@ -73,7 +75,7 @@ public TransportAddVotingConfigExclusionsAction( Reconfigurator reconfigurator ) { super( - AddVotingConfigExclusionsAction.NAME, + TYPE.name(), false, transportService, clusterService, diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsAction.java index 46069f01ecda3..113d085f51fdb 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsAction.java @@ -13,6 +13,7 @@ import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; import org.elasticsearch.cluster.ClusterState; @@ -42,6 +43,7 @@ public class TransportClearVotingConfigExclusionsAction extends TransportMasterN ClearVotingConfigExclusionsRequest, ActionResponse.Empty> { + public static final ActionType TYPE = ActionType.emptyResponse("cluster:admin/voting_config/clear_exclusions"); private static final Logger logger = LogManager.getLogger(TransportClearVotingConfigExclusionsAction.class); private final Reconfigurator reconfigurator; @@ -55,7 +57,7 @@ public TransportClearVotingConfigExclusionsAction( Reconfigurator reconfigurator ) { super( - ClearVotingConfigExclusionsAction.NAME, + TYPE.name(), false, transportService, clusterService, diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/DeleteDesiredNodesAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/DeleteDesiredNodesAction.java deleted file mode 100644 index 720f38e16a86a..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/DeleteDesiredNodesAction.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.cluster.desirednodes; - -import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.ActionType; -import org.elasticsearch.action.support.master.AcknowledgedRequest; -import org.elasticsearch.common.io.stream.StreamInput; - -import java.io.IOException; - -public class DeleteDesiredNodesAction extends ActionType { - public static final DeleteDesiredNodesAction INSTANCE = new DeleteDesiredNodesAction(); - public static final String NAME = "cluster:admin/desired_nodes/delete"; - - DeleteDesiredNodesAction() { - super(NAME, in -> ActionResponse.Empty.INSTANCE); - } - - public static class Request extends AcknowledgedRequest { - public Request() {} - - public Request(StreamInput in) throws IOException { - super(in); - } - - @Override - public ActionRequestValidationException validate() { - return null; - } - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportDeleteDesiredNodesAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportDeleteDesiredNodesAction.java index 48ea8beef2fd4..689e0579d1cbd 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportDeleteDesiredNodesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportDeleteDesiredNodesAction.java @@ -9,8 +9,11 @@ package org.elasticsearch.action.admin.cluster.desirednodes; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.action.support.master.TransportMasterNodeAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateTaskListener; @@ -23,14 +26,20 @@ import org.elasticsearch.cluster.service.MasterServiceTaskQueue; import org.elasticsearch.common.Priority; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.Tuple; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -public class TransportDeleteDesiredNodesAction extends TransportMasterNodeAction { +import java.io.IOException; +public class TransportDeleteDesiredNodesAction extends TransportMasterNodeAction< + TransportDeleteDesiredNodesAction.Request, + ActionResponse.Empty> { + + public static final ActionType TYPE = ActionType.emptyResponse("cluster:admin/desired_nodes/delete"); private final MasterServiceTaskQueue taskQueue; @Inject @@ -42,12 +51,12 @@ public TransportDeleteDesiredNodesAction( IndexNameExpressionResolver indexNameExpressionResolver ) { super( - DeleteDesiredNodesAction.NAME, + TYPE.name(), transportService, clusterService, threadPool, actionFilters, - DeleteDesiredNodesAction.Request::new, + Request::new, indexNameExpressionResolver, in -> ActionResponse.Empty.INSTANCE, EsExecutors.DIRECT_EXECUTOR_SERVICE @@ -56,17 +65,13 @@ public TransportDeleteDesiredNodesAction( } @Override - protected void masterOperation( - Task task, - DeleteDesiredNodesAction.Request request, - ClusterState state, - ActionListener listener - ) throws Exception { + protected void masterOperation(Task task, Request request, ClusterState state, ActionListener listener) + throws Exception { taskQueue.submitTask("delete-desired-nodes", new DeleteDesiredNodesTask(listener), request.masterNodeTimeout()); } @Override - protected ClusterBlockException checkBlock(DeleteDesiredNodesAction.Request request, ClusterState state) { + protected ClusterBlockException checkBlock(Request request, ClusterState state) { return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); } @@ -93,4 +98,17 @@ public ClusterState afterBatchExecution(ClusterState clusterState, boolean clust return clusterState.copyAndUpdateMetadata(metadata -> metadata.removeCustom(DesiredNodesMetadata.TYPE)); } } + + public static class Request extends AcknowledgedRequest { + public Request() {} + + public Request(StreamInput in) throws IOException { + super(in); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequestBuilder.java deleted file mode 100644 index 6593b90fb7f65..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequestBuilder.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.cluster.node.hotthreads; - -import org.elasticsearch.action.support.nodes.NodesOperationRequestBuilder; -import org.elasticsearch.client.internal.ElasticsearchClient; -import org.elasticsearch.core.TimeValue; -import org.elasticsearch.monitor.jvm.HotThreads; - -public class NodesHotThreadsRequestBuilder extends NodesOperationRequestBuilder< - NodesHotThreadsRequest, - NodesHotThreadsResponse, - NodesHotThreadsRequestBuilder> { - - public NodesHotThreadsRequestBuilder(ElasticsearchClient client) { - super(client, TransportNodesHotThreadsAction.TYPE, new NodesHotThreadsRequest()); - } - - public NodesHotThreadsRequestBuilder setThreads(int threads) { - request.threads(threads); - return this; - } - - public NodesHotThreadsRequestBuilder setIgnoreIdleThreads(boolean ignoreIdleThreads) { - request.ignoreIdleThreads(ignoreIdleThreads); - return this; - } - - public NodesHotThreadsRequestBuilder setType(HotThreads.ReportType type) { - request.type(type); - return this; - } - - public NodesHotThreadsRequestBuilder setSortOrder(HotThreads.SortOrder sortOrder) { - request.sortOrder(sortOrder); - return this; - } - - public NodesHotThreadsRequestBuilder setInterval(TimeValue interval) { - request.interval(interval); - return this; - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java index 3fabd53299674..ea56c85e36a3a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java @@ -25,6 +25,7 @@ import org.elasticsearch.transport.TransportService; import java.io.IOException; +import java.io.StringWriter; import java.util.List; public class TransportNodesHotThreadsAction extends TransportNodesAction< @@ -79,8 +80,9 @@ protected NodeHotThreads nodeOperation(NodeRequest request, Task task) { .interval(request.request.interval) .threadElementsSnapshotCount(request.request.snapshots) .ignoreIdleThreads(request.request.ignoreIdleThreads); - try { - return new NodeHotThreads(clusterService.localNode(), hotThreads.detect()); + try (var writer = new StringWriter()) { + hotThreads.detect(writer); + return new NodeHotThreads(clusterService.localNode(), writer.toString()); } catch (Exception e) { throw new ElasticsearchException("failed to detect hot threads", e); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java index 7fa97f1ee14b7..ed63e6d1b4474 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java @@ -122,6 +122,8 @@ protected NodesReloadSecureSettingsResponse.NodeResponse nodeOperation( keystore.decrypt(request.hasPassword() ? request.getSecureSettingsPassword().getChars() : new char[0]); // add the keystore to the original node settings object final Settings settingsWithKeystore = Settings.builder().put(environment.settings(), false).setSecureSettings(keystore).build(); + clusterService.getClusterSettings().validate(settingsWithKeystore, true); + final List exceptions = new ArrayList<>(); // broadcast the new settings object (with the open embedded keystore) to all reloadable plugins pluginsService.filterPlugins(ReloadablePlugin.class).forEach(p -> { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryAction.java deleted file mode 100644 index 590460e9025b6..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryAction.java +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.cluster.repositories.delete; - -import org.elasticsearch.action.ActionType; -import org.elasticsearch.action.support.master.AcknowledgedResponse; - -/** - * Unregister repository action - */ -public class DeleteRepositoryAction extends ActionType { - - public static final DeleteRepositoryAction INSTANCE = new DeleteRepositoryAction(); - public static final String NAME = "cluster:admin/repository/delete"; - - private DeleteRepositoryAction() { - super(NAME, AcknowledgedResponse::readFrom); - } - -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java index e2f614246b81c..6accb02418df8 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java @@ -24,7 +24,7 @@ public class DeleteRepositoryRequestBuilder extends AcknowledgedRequestBuilder< * Constructs unregister repository request builder with specified repository name */ public DeleteRepositoryRequestBuilder(ElasticsearchClient client, String name) { - super(client, DeleteRepositoryAction.INSTANCE, new DeleteRepositoryRequest(name)); + super(client, TransportDeleteRepositoryAction.TYPE, new DeleteRepositoryRequest(name)); } /** diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java index b1f78408c7829..69568462731e8 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.admin.cluster.repositories.delete; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.admin.cluster.repositories.reservedstate.ReservedRepositoryAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.AcknowledgedResponse; @@ -33,6 +34,7 @@ */ public class TransportDeleteRepositoryAction extends AcknowledgedTransportMasterNodeAction { + public static final ActionType TYPE = ActionType.acknowledgedResponse("cluster:admin/repository/delete"); private final RepositoriesService repositoriesService; @Inject @@ -45,7 +47,7 @@ public TransportDeleteRepositoryAction( IndexNameExpressionResolver indexNameExpressionResolver ) { super( - DeleteRepositoryAction.NAME, + TYPE.name(), transportService, clusterService, threadPool, diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryAction.java deleted file mode 100644 index 3ac2134afef83..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryAction.java +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.cluster.repositories.put; - -import org.elasticsearch.action.ActionType; -import org.elasticsearch.action.support.master.AcknowledgedResponse; - -/** - * Register repository action - */ -public class PutRepositoryAction extends ActionType { - - public static final PutRepositoryAction INSTANCE = new PutRepositoryAction(); - public static final String NAME = "cluster:admin/repository/put"; - - private PutRepositoryAction() { - super(NAME, AcknowledgedResponse::readFrom); - } - -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java index 79195725ad962..86ed38c2ddad9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java @@ -28,7 +28,7 @@ public class PutRepositoryRequestBuilder extends AcknowledgedRequestBuilder< * Constructs register repository request for the repository with a given name */ public PutRepositoryRequestBuilder(ElasticsearchClient client, String name) { - super(client, PutRepositoryAction.INSTANCE, new PutRepositoryRequest(name)); + super(client, TransportPutRepositoryAction.TYPE, new PutRepositoryRequest(name)); } /** diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java index bb17b0d8ab8fe..c6b471ff25bdf 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.admin.cluster.repositories.put; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.admin.cluster.repositories.reservedstate.ReservedRepositoryAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.AcknowledgedResponse; @@ -33,6 +34,7 @@ */ public class TransportPutRepositoryAction extends AcknowledgedTransportMasterNodeAction { + public static final ActionType TYPE = ActionType.acknowledgedResponse("cluster:admin/repository/put"); private final RepositoriesService repositoriesService; @Inject @@ -45,7 +47,7 @@ public TransportPutRepositoryAction( IndexNameExpressionResolver indexNameExpressionResolver ) { super( - PutRepositoryAction.NAME, + TYPE.name(), transportService, clusterService, threadPool, diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/clone/CloneSnapshotAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/clone/CloneSnapshotAction.java deleted file mode 100644 index b73e8e3668cd2..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/clone/CloneSnapshotAction.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.cluster.snapshots.clone; - -import org.elasticsearch.action.ActionType; -import org.elasticsearch.action.support.master.AcknowledgedResponse; - -public final class CloneSnapshotAction extends ActionType { - - public static final CloneSnapshotAction INSTANCE = new CloneSnapshotAction(); - public static final String NAME = "cluster:admin/snapshot/clone"; - - private CloneSnapshotAction() { - super(NAME, AcknowledgedResponse::readFrom); - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequestBuilder.java index efa4c4895a12e..818f0fadf92ef 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequestBuilder.java @@ -20,7 +20,7 @@ public class CloneSnapshotRequestBuilder extends MasterNodeOperationRequestBuild CloneSnapshotRequestBuilder> { public CloneSnapshotRequestBuilder(ElasticsearchClient client, String repository, String source, String target) { - super(client, CloneSnapshotAction.INSTANCE, new CloneSnapshotRequest(repository, source, target, Strings.EMPTY_ARRAY)); + super(client, TransportCloneSnapshotAction.TYPE, new CloneSnapshotRequest(repository, source, target, Strings.EMPTY_ARRAY)); } /** diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/clone/TransportCloneSnapshotAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/clone/TransportCloneSnapshotAction.java index 1a37cd0204c30..7ab8b704a3ee8 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/clone/TransportCloneSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/clone/TransportCloneSnapshotAction.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.admin.cluster.snapshots.clone; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.AcknowledgedTransportMasterNodeAction; @@ -29,6 +30,7 @@ */ public final class TransportCloneSnapshotAction extends AcknowledgedTransportMasterNodeAction { + public static final ActionType TYPE = ActionType.acknowledgedResponse("cluster:admin/snapshot/clone"); private final SnapshotsService snapshotsService; @Inject @@ -41,7 +43,7 @@ public TransportCloneSnapshotAction( IndexNameExpressionResolver indexNameExpressionResolver ) { super( - CloneSnapshotAction.NAME, + TYPE.name(), transportService, clusterService, threadPool, diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotAction.java deleted file mode 100644 index 9d5e30b604702..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotAction.java +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.cluster.snapshots.delete; - -import org.elasticsearch.action.ActionType; -import org.elasticsearch.action.support.master.AcknowledgedResponse; - -/** - * Delete snapshot action - */ -public class DeleteSnapshotAction extends ActionType { - - public static final DeleteSnapshotAction INSTANCE = new DeleteSnapshotAction(); - public static final String NAME = "cluster:admin/snapshot/delete"; - - private DeleteSnapshotAction() { - super(NAME, AcknowledgedResponse::readFrom); - } - -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequestBuilder.java index 8d2c8997b42e6..f18ed209ba11e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequestBuilder.java @@ -24,7 +24,7 @@ public class DeleteSnapshotRequestBuilder extends MasterNodeOperationRequestBuil * Constructs delete snapshot request builder with specified repository and snapshot names */ public DeleteSnapshotRequestBuilder(ElasticsearchClient client, String repository, String... snapshots) { - super(client, DeleteSnapshotAction.INSTANCE, new DeleteSnapshotRequest(repository, snapshots)); + super(client, TransportDeleteSnapshotAction.TYPE, new DeleteSnapshotRequest(repository, snapshots)); } /** diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java index df7a5e5595055..39b03b479ffdf 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.admin.cluster.snapshots.delete; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.AcknowledgedTransportMasterNodeAction; @@ -28,6 +29,7 @@ * Transport action for delete snapshot operation */ public class TransportDeleteSnapshotAction extends AcknowledgedTransportMasterNodeAction { + public static final ActionType TYPE = ActionType.acknowledgedResponse("cluster:admin/snapshot/delete"); private final SnapshotsService snapshotsService; @Inject @@ -40,7 +42,7 @@ public TransportDeleteSnapshotAction( IndexNameExpressionResolver indexNameExpressionResolver ) { super( - DeleteSnapshotAction.NAME, + TYPE.name(), transportService, clusterService, threadPool, diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java index f4e301e0748bb..29bffa3949258 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java @@ -11,6 +11,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; import org.elasticsearch.cluster.ClusterState; @@ -112,7 +113,7 @@ public void onNewClusterState(ClusterState newState) { } if (acceptableClusterStatePredicate.test(newState)) { - ActionListener.completeWith(listener, () -> buildResponse(request, newState)); + executor.execute(ActionRunnable.supply(listener, () -> buildResponse(request, newState))); } else { listener.onFailure( new NotMasterException( @@ -150,6 +151,8 @@ private static Map> getClusterFeatures(ClusterState clusterS } private ClusterStateResponse buildResponse(final ClusterStateRequest request, final ClusterState currentState) { + ThreadPool.assertCurrentThreadPool(ThreadPool.Names.MANAGEMENT); // too heavy to construct & serialize cluster state without forking + logger.trace("Serving cluster state request using version {}", currentState.version()); ClusterState.Builder builder = ClusterState.builder(currentState.getClusterName()); builder.version(currentState.version()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/DeleteStoredScriptAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/DeleteStoredScriptAction.java deleted file mode 100644 index 1ac899666a1eb..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/DeleteStoredScriptAction.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.cluster.storedscripts; - -import org.elasticsearch.action.ActionType; -import org.elasticsearch.action.support.master.AcknowledgedResponse; - -public class DeleteStoredScriptAction extends ActionType { - - public static final DeleteStoredScriptAction INSTANCE = new DeleteStoredScriptAction(); - public static final String NAME = "cluster:admin/script/delete"; - - private DeleteStoredScriptAction() { - super(NAME, AcknowledgedResponse::readFrom); - } - -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequestBuilder.java index d8f22216073a5..ce074e17ebb75 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequestBuilder.java @@ -18,7 +18,7 @@ public class DeleteStoredScriptRequestBuilder extends AcknowledgedRequestBuilder DeleteStoredScriptRequestBuilder> { public DeleteStoredScriptRequestBuilder(ElasticsearchClient client) { - super(client, DeleteStoredScriptAction.INSTANCE, new DeleteStoredScriptRequest()); + super(client, TransportDeleteStoredScriptAction.TYPE, new DeleteStoredScriptRequest()); } public DeleteStoredScriptRequestBuilder setId(String id) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptAction.java deleted file mode 100644 index f7506f379de8a..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptAction.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.cluster.storedscripts; - -import org.elasticsearch.action.ActionType; -import org.elasticsearch.action.support.master.AcknowledgedResponse; - -public class PutStoredScriptAction extends ActionType { - - public static final PutStoredScriptAction INSTANCE = new PutStoredScriptAction(); - public static final String NAME = "cluster:admin/script/put"; - - private PutStoredScriptAction() { - super(NAME, AcknowledgedResponse::readFrom); - } - -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequestBuilder.java index 24f5900629cfb..9e353382f84a9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequestBuilder.java @@ -20,7 +20,7 @@ public class PutStoredScriptRequestBuilder extends AcknowledgedRequestBuilder< PutStoredScriptRequestBuilder> { public PutStoredScriptRequestBuilder(ElasticsearchClient client) { - super(client, PutStoredScriptAction.INSTANCE, new PutStoredScriptRequest()); + super(client, TransportPutStoredScriptAction.TYPE, new PutStoredScriptRequest()); } public PutStoredScriptRequestBuilder setId(String id) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportDeleteStoredScriptAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportDeleteStoredScriptAction.java index dfb3745d4101a..829b00b7cc1c9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportDeleteStoredScriptAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportDeleteStoredScriptAction.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.admin.cluster.storedscripts; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.AcknowledgedTransportMasterNodeAction; @@ -26,6 +27,8 @@ public class TransportDeleteStoredScriptAction extends AcknowledgedTransportMasterNodeAction { + public static final ActionType TYPE = ActionType.acknowledgedResponse("cluster:admin/script/delete"); + @Inject public TransportDeleteStoredScriptAction( TransportService transportService, @@ -35,7 +38,7 @@ public TransportDeleteStoredScriptAction( IndexNameExpressionResolver indexNameExpressionResolver ) { super( - DeleteStoredScriptAction.NAME, + TYPE.name(), transportService, clusterService, threadPool, diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportPutStoredScriptAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportPutStoredScriptAction.java index 8025d983d2668..4fb0f68bce625 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportPutStoredScriptAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportPutStoredScriptAction.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.admin.cluster.storedscripts; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.AcknowledgedTransportMasterNodeAction; @@ -26,6 +27,7 @@ public class TransportPutStoredScriptAction extends AcknowledgedTransportMasterNodeAction { + public static final ActionType TYPE = ActionType.acknowledgedResponse("cluster:admin/script/put"); private final ScriptService scriptService; @Inject @@ -38,7 +40,7 @@ public TransportPutStoredScriptAction( ScriptService scriptService ) { super( - PutStoredScriptAction.NAME, + TYPE.name(), transportService, clusterService, threadPool, diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksRequestBuilder.java deleted file mode 100644 index aa3f226d23c9d..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksRequestBuilder.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.cluster.tasks; - -import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; -import org.elasticsearch.client.internal.ElasticsearchClient; - -public class PendingClusterTasksRequestBuilder extends MasterNodeReadOperationRequestBuilder< - PendingClusterTasksRequest, - PendingClusterTasksResponse, - PendingClusterTasksRequestBuilder> { - - public PendingClusterTasksRequestBuilder(ElasticsearchClient client) { - super(client, TransportPendingClusterTasksAction.TYPE, new PendingClusterTasksRequest()); - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java index 66a489933c3ee..0001fec4e71e5 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java @@ -59,7 +59,7 @@ public class TransportIndicesAliasesAction extends AcknowledgedTransportMasterNodeAction { public static final String NAME = "indices:admin/aliases"; - public static final ActionType TYPE = new ActionType<>(NAME, AcknowledgedResponse::readFrom); + public static final ActionType TYPE = ActionType.acknowledgedResponse(NAME); private static final Logger logger = LogManager.getLogger(TransportIndicesAliasesAction.class); private final MetadataIndexAliasesService indexAliasesService; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/delete/DeleteDanglingIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/delete/DeleteDanglingIndexAction.java deleted file mode 100644 index 0435f603be8ac..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/delete/DeleteDanglingIndexAction.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.indices.dangling.delete; - -import org.elasticsearch.action.ActionType; -import org.elasticsearch.action.support.master.AcknowledgedResponse; - -/** - * This action causes a dangling index to be considered as deleted by the cluster. - */ -public class DeleteDanglingIndexAction extends ActionType { - - public static final DeleteDanglingIndexAction INSTANCE = new DeleteDanglingIndexAction(); - public static final String NAME = "cluster:admin/indices/dangling/delete"; - - private DeleteDanglingIndexAction() { - super(NAME, AcknowledgedResponse::readFrom); - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/delete/TransportDeleteDanglingIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/delete/TransportDeleteDanglingIndexAction.java index 1207c2c1e60ff..93fae72810ad0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/delete/TransportDeleteDanglingIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/delete/TransportDeleteDanglingIndexAction.java @@ -12,6 +12,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.admin.indices.dangling.DanglingIndexInfo; import org.elasticsearch.action.admin.indices.dangling.list.ListDanglingIndicesAction; @@ -43,11 +44,12 @@ import java.util.stream.Collectors; /** - * Implements the deletion of a dangling index. When handling a {@link DeleteDanglingIndexAction}, + * Implements the deletion of a dangling index. When handling a {@link DeleteDanglingIndexRequest}, * this class first checks that such a dangling index exists. It then submits a cluster state update * to add the index to the index graveyard. */ public class TransportDeleteDanglingIndexAction extends AcknowledgedTransportMasterNodeAction { + public static final ActionType TYPE = ActionType.acknowledgedResponse("cluster:admin/indices/dangling/delete"); private static final Logger logger = LogManager.getLogger(TransportDeleteDanglingIndexAction.class); private final Settings settings; @@ -64,7 +66,7 @@ public TransportDeleteDanglingIndexAction( NodeClient nodeClient ) { super( - DeleteDanglingIndexAction.NAME, + TYPE.name(), transportService, clusterService, threadPool, diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/import_index/ImportDanglingIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/import_index/ImportDanglingIndexAction.java deleted file mode 100644 index c64a8b81fc2de..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/import_index/ImportDanglingIndexAction.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.indices.dangling.import_index; - -import org.elasticsearch.action.ActionType; -import org.elasticsearch.action.support.master.AcknowledgedResponse; - -/** - * Represents a request to import a particular dangling index. - */ -public class ImportDanglingIndexAction extends ActionType { - - public static final ImportDanglingIndexAction INSTANCE = new ImportDanglingIndexAction(); - public static final String NAME = "cluster:admin/indices/dangling/import"; - - private ImportDanglingIndexAction() { - super(NAME, AcknowledgedResponse::readFrom); - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/import_index/TransportImportDanglingIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/import_index/TransportImportDanglingIndexAction.java index 0362128c6403a..0348b46bedcae 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/import_index/TransportImportDanglingIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/import_index/TransportImportDanglingIndexAction.java @@ -12,6 +12,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.admin.indices.dangling.find.FindDanglingIndexAction; import org.elasticsearch.action.admin.indices.dangling.find.FindDanglingIndexRequest; @@ -33,11 +34,12 @@ import java.util.stream.Collectors; /** - * Implements the import of a dangling index. When handling a {@link ImportDanglingIndexAction}, + * Implements the import of a dangling index. When handling a {@link ImportDanglingIndexRequest}, * this class first checks that such a dangling index exists. It then calls {@link LocalAllocateDangledIndices} * to perform the actual allocation. */ public class TransportImportDanglingIndexAction extends HandledTransportAction { + public static final ActionType TYPE = ActionType.acknowledgedResponse("cluster:admin/indices/dangling/import"); private static final Logger logger = LogManager.getLogger(TransportImportDanglingIndexAction.class); private final LocalAllocateDangledIndices danglingIndexAllocator; @@ -50,13 +52,7 @@ public TransportImportDanglingIndexAction( LocalAllocateDangledIndices danglingIndexAllocator, NodeClient nodeClient ) { - super( - ImportDanglingIndexAction.NAME, - transportService, - actionFilters, - ImportDanglingIndexRequest::new, - EsExecutors.DIRECT_EXECUTOR_SERVICE - ); + super(TYPE.name(), transportService, actionFilters, ImportDanglingIndexRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); this.danglingIndexAllocator = danglingIndexAllocator; this.nodeClient = nodeClient; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexAction.java deleted file mode 100644 index c652375be2de0..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexAction.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.indices.delete; - -import org.elasticsearch.action.ActionType; -import org.elasticsearch.action.support.master.AcknowledgedResponse; - -public class DeleteIndexAction extends ActionType { - - public static final DeleteIndexAction INSTANCE = new DeleteIndexAction(); - public static final String NAME = "indices:admin/delete"; - - private DeleteIndexAction() { - super(NAME, AcknowledgedResponse::readFrom); - } - -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequestBuilder.java index a6ae02dddde20..5c0aec258176a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequestBuilder.java @@ -19,7 +19,7 @@ public class DeleteIndexRequestBuilder extends AcknowledgedRequestBuilder< DeleteIndexRequestBuilder> { public DeleteIndexRequestBuilder(ElasticsearchClient client, String... indices) { - super(client, DeleteIndexAction.INSTANCE, new DeleteIndexRequest(indices)); + super(client, TransportDeleteIndexAction.TYPE, new DeleteIndexRequest(indices)); } /** diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java index 8fe6e0b67e827..eff4fe24c10ac 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java @@ -11,6 +11,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.DestructiveOperations; import org.elasticsearch.action.support.master.AcknowledgedResponse; @@ -36,6 +37,7 @@ */ public class TransportDeleteIndexAction extends AcknowledgedTransportMasterNodeAction { + public static final ActionType TYPE = ActionType.acknowledgedResponse("indices:admin/delete"); private static final Logger logger = LogManager.getLogger(TransportDeleteIndexAction.class); private final MetadataDeleteIndexService deleteIndexService; @@ -52,7 +54,7 @@ public TransportDeleteIndexAction( DestructiveOperations destructiveOperations ) { super( - DeleteIndexAction.NAME, + TYPE.name(), transportService, clusterService, threadPool, diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeNumberOfShardsCalculator.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeNumberOfShardsCalculator.java index 31807919fd9d9..076841e3efadc 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeNumberOfShardsCalculator.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeNumberOfShardsCalculator.java @@ -69,7 +69,7 @@ public int calculate(Integer numberOfShards, ByteSizeValue maxPrimaryShardSize, } } else if (maxPrimaryShardSize != null) { int sourceIndexShardsNum = sourceMetadata.getNumberOfShards(); - long sourceIndexStorageBytes = indexStoreStats.getSizeInBytes(); + long sourceIndexStorageBytes = indexStoreStats.sizeInBytes(); long maxPrimaryShardSizeBytes = maxPrimaryShardSize.getBytes(); long minShardsNum = sourceIndexStorageBytes / maxPrimaryShardSizeBytes; if (minShardsNum * maxPrimaryShardSizeBytes < sourceIndexStorageBytes) { diff --git a/server/src/main/java/org/elasticsearch/action/search/CountOnlyQueryPhaseResultConsumer.java b/server/src/main/java/org/elasticsearch/action/search/CountOnlyQueryPhaseResultConsumer.java index 1e67522f6a671..13972ea2bf64a 100644 --- a/server/src/main/java/org/elasticsearch/action/search/CountOnlyQueryPhaseResultConsumer.java +++ b/server/src/main/java/org/elasticsearch/action/search/CountOnlyQueryPhaseResultConsumer.java @@ -49,12 +49,17 @@ Stream getSuccessfulResults() { public void consumeResult(SearchPhaseResult result, Runnable next) { assert results.contains(result.getShardIndex()) == false : "shardIndex: " + result.getShardIndex() + " is already set"; results.add(result.getShardIndex()); + progressListener.notifyQueryResult(result.getShardIndex(), result.queryResult()); + // We have an empty result, track that we saw it for this shard and continue; + if (result.queryResult().isNull()) { + next.run(); + return; + } // set the relation to the first non-equal relation relationAtomicReference.compareAndSet(TotalHits.Relation.EQUAL_TO, result.queryResult().getTotalHits().relation); totalHits.add(result.queryResult().getTotalHits().value); terminatedEarly.compareAndSet(false, (result.queryResult().terminatedEarly() != null && result.queryResult().terminatedEarly())); timedOut.compareAndSet(false, result.queryResult().searchTimedOut()); - progressListener.notifyQueryResult(result.getShardIndex(), result.queryResult()); next.run(); } @@ -80,7 +85,7 @@ public SearchPhaseController.ReducedQueryPhase reduce() throws Exception { 1, 0, 0, - false + results.isEmpty() ); if (progressListener != SearchProgressListener.NOOP) { progressListener.notifyFinalReduce( diff --git a/server/src/main/java/org/elasticsearch/action/support/SubscribableListener.java b/server/src/main/java/org/elasticsearch/action/support/SubscribableListener.java index cebb4ed6e06e6..fd10c509d8ef2 100644 --- a/server/src/main/java/org/elasticsearch/action/support/SubscribableListener.java +++ b/server/src/main/java/org/elasticsearch/action/support/SubscribableListener.java @@ -94,8 +94,7 @@ private SubscribableListener(Object initialState) { * listener immediately with the result with which this listener was completed. Otherwise, the subscribing listener is retained and * completed when this listener is completed. *

- * Subscribed listeners must not throw any exceptions. Use {@link ActionListener#wrap(ActionListener)} if you have a listener for which - * exceptions from its {@link ActionListener#onResponse} method should be handled by its own {@link ActionListener#onFailure} method. + * Subscribed listeners must not throw any exceptions. *

* Listeners added strictly before this listener is completed will themselves be completed in the order in which their subscriptions * were received. However, there are no guarantees about the ordering of the completions of listeners which are added concurrently with @@ -113,8 +112,7 @@ public final void addListener(ActionListener listener) { * listener immediately with the result with which this listener was completed. Otherwise, the subscribing listener is retained and * completed when this listener is completed. *

- * Subscribed listeners must not throw any exceptions. Use {@link ActionListener#wrap(ActionListener)} if you have a listener for which - * exceptions from its {@link ActionListener#onResponse} method should be handled by its own {@link ActionListener#onFailure} method. + * Subscribed listeners must not throw any exceptions. *

* Listeners added strictly before this listener is completed will themselves be completed in the order in which their subscriptions * were received. However, there are no guarantees about the ordering of the completions of listeners which are added concurrently with diff --git a/server/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java b/server/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java index 0a4951c8c4125..e37f248246920 100644 --- a/server/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java @@ -15,7 +15,6 @@ import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.CancellableFanOut; -import org.elasticsearch.action.support.ChannelActionListener; import org.elasticsearch.action.support.ThreadedActionListener; import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.cluster.ClusterState; @@ -27,7 +26,6 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.tasks.Task; -import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestHandler; @@ -83,32 +81,6 @@ protected TransportNodesAction( transportService.registerRequestHandler(transportNodeAction, finalExecutor, nodeRequest, new NodeTransportHandler()); } - /** - * @deprecated Use the local-only constructor instead. - */ - @Deprecated(forRemoval = true) - @SuppressWarnings("this-escape") - protected TransportNodesAction( - String actionName, - ThreadPool threadPool, - ClusterService clusterService, - TransportService transportService, - ActionFilters actionFilters, - Writeable.Reader requestReader, - Writeable.Reader nodeRequest, - Executor executor - ) { - this(actionName, clusterService, transportService, actionFilters, nodeRequest, executor); - transportService.registerRequestHandler( - actionName, - executor, - false, - true, - requestReader, - (request, channel, task) -> execute(task, request, new ChannelActionListener<>(channel)) - ); - } - @Override protected void doExecute(Task task, NodesRequest request, ActionListener listener) { // coordination can run on SAME because it's only O(#nodes) work diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/PostWriteRefresh.java b/server/src/main/java/org/elasticsearch/action/support/replication/PostWriteRefresh.java index 9f6d4ed27cf6c..67793fb525644 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/PostWriteRefresh.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/PostWriteRefresh.java @@ -118,21 +118,13 @@ private void refreshUnpromotables( return; } - engineOrNull.addFlushListener(location, ActionListener.wrap(new ActionListener<>() { - @Override - public void onResponse(Long generation) { - try ( - ThreadContext.StoredContext ignore = transportService.getThreadPool() - .getThreadContext() - .stashWithOrigin(POST_WRITE_REFRESH_ORIGIN) - ) { - sendUnpromotableRequests(indexShard, generation, forced, listener, postWriteRefreshTimeout); - } - } - - @Override - public void onFailure(Exception e) { - listener.onFailure(e); + engineOrNull.addFlushListener(location, listener.delegateFailureAndWrap((l, generation) -> { + try ( + ThreadContext.StoredContext ignore = transportService.getThreadPool() + .getThreadContext() + .stashWithOrigin(POST_WRITE_REFRESH_ORIGIN) + ) { + sendUnpromotableRequests(indexShard, generation, forced, l, postWriteRefreshTimeout); } })); } diff --git a/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java b/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java index eb2c2b7f6738e..dd60a2085acc1 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java @@ -460,6 +460,8 @@ private void start() throws NodeValidationException { } private static void shutdown() { + ElasticsearchProcess.markStopping(); + if (INSTANCE == null) { return; // never got far enough } diff --git a/server/src/main/java/org/elasticsearch/bootstrap/ElasticsearchProcess.java b/server/src/main/java/org/elasticsearch/bootstrap/ElasticsearchProcess.java new file mode 100644 index 0000000000000..7397bb98322f5 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/bootstrap/ElasticsearchProcess.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.bootstrap; + +/** + * Helper class to determine if the ES process is shutting down + */ +public class ElasticsearchProcess { + private static volatile boolean stopping; + + static void markStopping() { + stopping = true; + } + + public static boolean isStopping() { + return stopping; + } +} diff --git a/server/src/main/java/org/elasticsearch/client/internal/ClusterAdminClient.java b/server/src/main/java/org/elasticsearch/client/internal/ClusterAdminClient.java index 17d712bdf5af4..9e3bed8cef09a 100644 --- a/server/src/main/java/org/elasticsearch/client/internal/ClusterAdminClient.java +++ b/server/src/main/java/org/elasticsearch/client/internal/ClusterAdminClient.java @@ -16,9 +16,6 @@ import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequestBuilder; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; -import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsRequest; -import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsRequestBuilder; -import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsResponse; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequestBuilder; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; @@ -88,9 +85,6 @@ import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptResponse; import org.elasticsearch.action.admin.cluster.storedscripts.PutStoredScriptRequest; import org.elasticsearch.action.admin.cluster.storedscripts.PutStoredScriptRequestBuilder; -import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksRequest; -import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksRequestBuilder; -import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksResponse; import org.elasticsearch.action.admin.indices.dangling.delete.DeleteDanglingIndexRequest; import org.elasticsearch.action.admin.indices.dangling.import_index.ImportDanglingIndexRequest; import org.elasticsearch.action.admin.indices.dangling.list.ListDanglingIndicesRequest; @@ -256,18 +250,6 @@ public interface ClusterAdminClient extends ElasticsearchClient { */ void nodesUsage(NodesUsageRequest request, ActionListener listener); - /** - * Returns top N hot-threads samples per node. The hot-threads are only sampled - * for the node ids specified in the request. - */ - void nodesHotThreads(NodesHotThreadsRequest request, ActionListener listener); - - /** - * Returns a request builder to fetch top N hot-threads samples per node. The hot-threads are only sampled - * for the node ids provided. Note: Use {@code *} to fetch samples for all nodes - */ - NodesHotThreadsRequestBuilder prepareNodesHotThreads(String... nodesIds); - /** * List tasks * @@ -456,18 +438,6 @@ public interface ClusterAdminClient extends ElasticsearchClient { */ RestoreSnapshotRequestBuilder prepareRestoreSnapshot(String repository, String snapshot); - /** - * Returns a list of the pending cluster tasks, that are scheduled to be executed. This includes operations - * that update the cluster state (for example, a create index operation) - */ - void pendingClusterTasks(PendingClusterTasksRequest request, ActionListener listener); - - /** - * Returns a list of the pending cluster tasks, that are scheduled to be executed. This includes operations - * that update the cluster state (for example, a create index operation) - */ - PendingClusterTasksRequestBuilder preparePendingClusterTasks(); - /** * Get snapshot status. */ diff --git a/server/src/main/java/org/elasticsearch/client/internal/support/AbstractClient.java b/server/src/main/java/org/elasticsearch/client/internal/support/AbstractClient.java index 182e9ee497c07..075d1a4bb1e66 100644 --- a/server/src/main/java/org/elasticsearch/client/internal/support/AbstractClient.java +++ b/server/src/main/java/org/elasticsearch/client/internal/support/AbstractClient.java @@ -16,18 +16,14 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.DocWriteResponse; -import org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplainAction; import org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplainRequest; import org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplainRequestBuilder; import org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplainResponse; +import org.elasticsearch.action.admin.cluster.allocation.TransportClusterAllocationExplainAction; import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequestBuilder; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.health.TransportClusterHealthAction; -import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsRequest; -import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsRequestBuilder; -import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsResponse; -import org.elasticsearch.action.admin.cluster.node.hotthreads.TransportNodesHotThreadsAction; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequestBuilder; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; @@ -56,16 +52,16 @@ import org.elasticsearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryRequest; import org.elasticsearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryRequestBuilder; import org.elasticsearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryResponse; -import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryAction; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequestBuilder; +import org.elasticsearch.action.admin.cluster.repositories.delete.TransportDeleteRepositoryAction; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesAction; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequestBuilder; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; -import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryAction; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequestBuilder; +import org.elasticsearch.action.admin.cluster.repositories.put.TransportPutRepositoryAction; import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryAction; import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryRequest; import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryRequestBuilder; @@ -82,16 +78,16 @@ import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequest; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequestBuilder; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse; -import org.elasticsearch.action.admin.cluster.snapshots.clone.CloneSnapshotAction; import org.elasticsearch.action.admin.cluster.snapshots.clone.CloneSnapshotRequest; import org.elasticsearch.action.admin.cluster.snapshots.clone.CloneSnapshotRequestBuilder; +import org.elasticsearch.action.admin.cluster.snapshots.clone.TransportCloneSnapshotAction; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotAction; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequestBuilder; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; -import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotAction; import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest; import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequestBuilder; +import org.elasticsearch.action.admin.cluster.snapshots.delete.TransportDeleteSnapshotAction; import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsAction; import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest; import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsRequestBuilder; @@ -112,20 +108,16 @@ import org.elasticsearch.action.admin.cluster.stats.ClusterStatsRequest; import org.elasticsearch.action.admin.cluster.stats.ClusterStatsRequestBuilder; import org.elasticsearch.action.admin.cluster.stats.ClusterStatsResponse; -import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptAction; import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptRequest; import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptRequestBuilder; import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptAction; import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptRequest; import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptRequestBuilder; import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptResponse; -import org.elasticsearch.action.admin.cluster.storedscripts.PutStoredScriptAction; import org.elasticsearch.action.admin.cluster.storedscripts.PutStoredScriptRequest; import org.elasticsearch.action.admin.cluster.storedscripts.PutStoredScriptRequestBuilder; -import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksRequest; -import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksRequestBuilder; -import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksResponse; -import org.elasticsearch.action.admin.cluster.tasks.TransportPendingClusterTasksAction; +import org.elasticsearch.action.admin.cluster.storedscripts.TransportDeleteStoredScriptAction; +import org.elasticsearch.action.admin.cluster.storedscripts.TransportPutStoredScriptAction; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequestBuilder; import org.elasticsearch.action.admin.indices.alias.TransportIndicesAliasesAction; @@ -147,16 +139,16 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; -import org.elasticsearch.action.admin.indices.dangling.delete.DeleteDanglingIndexAction; import org.elasticsearch.action.admin.indices.dangling.delete.DeleteDanglingIndexRequest; -import org.elasticsearch.action.admin.indices.dangling.import_index.ImportDanglingIndexAction; +import org.elasticsearch.action.admin.indices.dangling.delete.TransportDeleteDanglingIndexAction; import org.elasticsearch.action.admin.indices.dangling.import_index.ImportDanglingIndexRequest; +import org.elasticsearch.action.admin.indices.dangling.import_index.TransportImportDanglingIndexAction; import org.elasticsearch.action.admin.indices.dangling.list.ListDanglingIndicesAction; import org.elasticsearch.action.admin.indices.dangling.list.ListDanglingIndicesRequest; import org.elasticsearch.action.admin.indices.dangling.list.ListDanglingIndicesResponse; -import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequestBuilder; +import org.elasticsearch.action.admin.indices.delete.TransportDeleteIndexAction; import org.elasticsearch.action.admin.indices.flush.FlushAction; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.flush.FlushRequestBuilder; @@ -795,16 +787,6 @@ public ClusterStatsRequestBuilder prepareClusterStats() { return new ClusterStatsRequestBuilder(this); } - @Override - public void nodesHotThreads(NodesHotThreadsRequest request, ActionListener listener) { - execute(TransportNodesHotThreadsAction.TYPE, request, listener); - } - - @Override - public NodesHotThreadsRequestBuilder prepareNodesHotThreads(String... nodesIds) { - return new NodesHotThreadsRequestBuilder(this).setNodesIds(nodesIds); - } - @Override public ActionFuture listTasks(final ListTasksRequest request) { return execute(TransportListTasksAction.TYPE, request); @@ -865,19 +847,9 @@ public ClusterSearchShardsRequestBuilder prepareSearchShards(String... indices) return new ClusterSearchShardsRequestBuilder(this).setIndices(indices); } - @Override - public PendingClusterTasksRequestBuilder preparePendingClusterTasks() { - return new PendingClusterTasksRequestBuilder(this); - } - - @Override - public void pendingClusterTasks(PendingClusterTasksRequest request, ActionListener listener) { - execute(TransportPendingClusterTasksAction.TYPE, request, listener); - } - @Override public void putRepository(PutRepositoryRequest request, ActionListener listener) { - execute(PutRepositoryAction.INSTANCE, request, listener); + execute(TransportPutRepositoryAction.TYPE, request, listener); } @Override @@ -907,7 +879,7 @@ public CloneSnapshotRequestBuilder prepareCloneSnapshot(String repository, Strin @Override public void cloneSnapshot(CloneSnapshotRequest request, ActionListener listener) { - execute(CloneSnapshotAction.INSTANCE, request, listener); + execute(TransportCloneSnapshotAction.TYPE, request, listener); } @Override @@ -922,7 +894,7 @@ public GetSnapshotsRequestBuilder prepareGetSnapshots(String... repositories) { @Override public void deleteSnapshot(DeleteSnapshotRequest request, ActionListener listener) { - execute(DeleteSnapshotAction.INSTANCE, request, listener); + execute(TransportDeleteSnapshotAction.TYPE, request, listener); } @Override @@ -932,7 +904,7 @@ public DeleteSnapshotRequestBuilder prepareDeleteSnapshot(String repository, Str @Override public void deleteRepository(DeleteRepositoryRequest request, ActionListener listener) { - execute(DeleteRepositoryAction.INSTANCE, request, listener); + execute(TransportDeleteRepositoryAction.TYPE, request, listener); } @Override @@ -1057,17 +1029,17 @@ public SimulatePipelineRequestBuilder prepareSimulatePipeline(BytesReference sou @Override public void allocationExplain(ClusterAllocationExplainRequest request, ActionListener listener) { - execute(ClusterAllocationExplainAction.INSTANCE, request, listener); + execute(TransportClusterAllocationExplainAction.TYPE, request, listener); } @Override public ActionFuture allocationExplain(ClusterAllocationExplainRequest request) { - return execute(ClusterAllocationExplainAction.INSTANCE, request); + return execute(TransportClusterAllocationExplainAction.TYPE, request); } @Override public ClusterAllocationExplainRequestBuilder prepareAllocationExplain() { - return new ClusterAllocationExplainRequestBuilder(this, ClusterAllocationExplainAction.INSTANCE); + return new ClusterAllocationExplainRequestBuilder(this); } @Override @@ -1087,22 +1059,22 @@ public void listDanglingIndices(ListDanglingIndicesRequest request, ActionListen @Override public ActionFuture importDanglingIndex(ImportDanglingIndexRequest request) { - return execute(ImportDanglingIndexAction.INSTANCE, request); + return execute(TransportImportDanglingIndexAction.TYPE, request); } @Override public void importDanglingIndex(ImportDanglingIndexRequest request, ActionListener listener) { - execute(ImportDanglingIndexAction.INSTANCE, request, listener); + execute(TransportImportDanglingIndexAction.TYPE, request, listener); } @Override public ActionFuture deleteDanglingIndex(DeleteDanglingIndexRequest request) { - return execute(DeleteDanglingIndexAction.INSTANCE, request); + return execute(TransportDeleteDanglingIndexAction.TYPE, request); } @Override public void deleteDanglingIndex(DeleteDanglingIndexRequest request, ActionListener listener) { - execute(DeleteDanglingIndexAction.INSTANCE, request, listener); + execute(TransportDeleteDanglingIndexAction.TYPE, request, listener); } @Override @@ -1117,13 +1089,13 @@ public PutStoredScriptRequestBuilder preparePutStoredScript() { @Override public void putStoredScript(final PutStoredScriptRequest request, ActionListener listener) { - execute(PutStoredScriptAction.INSTANCE, request, listener); + execute(TransportPutStoredScriptAction.TYPE, request, listener); } @Override public void deleteStoredScript(DeleteStoredScriptRequest request, ActionListener listener) { - execute(DeleteStoredScriptAction.INSTANCE, request, listener); + execute(TransportDeleteStoredScriptAction.TYPE, request, listener); } @Override @@ -1239,12 +1211,12 @@ public CreateIndexRequestBuilder prepareCreate(String index) { @Override public ActionFuture delete(final DeleteIndexRequest request) { - return execute(DeleteIndexAction.INSTANCE, request); + return execute(TransportDeleteIndexAction.TYPE, request); } @Override public void delete(final DeleteIndexRequest request, final ActionListener listener) { - execute(DeleteIndexAction.INSTANCE, request, listener); + execute(TransportDeleteIndexAction.TYPE, request, listener); } @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterSnapshotStats.java b/server/src/main/java/org/elasticsearch/cluster/ClusterSnapshotStats.java index 98dfc48dd3cd0..c62eeeab3e479 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterSnapshotStats.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterSnapshotStats.java @@ -106,9 +106,9 @@ private static ClusterSnapshotStats of( for (SnapshotDeletionsInProgress.Entry entry : snapshotDeletionsInProgress.getEntries()) { if (entry.repository().equals(repositoryName)) { - firstStartTimeMillis = Math.min(firstStartTimeMillis, entry.getStartTime()); + firstStartTimeMillis = Math.min(firstStartTimeMillis, entry.startTime()); deletionsCount += 1; - snapshotDeletionsCount += entry.getSnapshots().size(); + snapshotDeletionsCount += entry.snapshots().size(); if (entry.state() == SnapshotDeletionsInProgress.State.STARTED) { activeDeletionsCount += 1; } diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java index e861ff3ecf27e..6deac76b171d6 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -1031,9 +1031,11 @@ public static ClusterState readFrom(StreamInput in, DiscoveryNode localNode) thr */ public static final TransportVersion INFERRED_TRANSPORT_VERSION = TransportVersions.V_8_8_0; + public static final Version VERSION_INTRODUCING_TRANSPORT_VERSIONS = Version.V_8_8_0; + private static TransportVersion inferTransportVersion(DiscoveryNode node) { TransportVersion tv; - if (node.getVersion().before(Version.V_8_8_0)) { + if (node.getVersion().before(VERSION_INTRODUCING_TRANSPORT_VERSIONS)) { // 1-to-1 mapping between Version and TransportVersion tv = TransportVersion.fromId(node.getPre811VersionId().getAsInt()); } else { diff --git a/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java b/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java index 1744bcc91b834..26c453d419f4c 100644 --- a/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java +++ b/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java @@ -26,7 +26,6 @@ import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.DiskThresholdSettings; import org.elasticsearch.cluster.service.ClusterService; @@ -97,7 +96,6 @@ public class InternalClusterInfoService implements ClusterInfoService, ClusterSt private final Object mutex = new Object(); private final List> nextRefreshListeners = new ArrayList<>(); - private final ClusterService clusterService; private AsyncRefresh currentRefresh; private RefreshScheduler refreshScheduler; @@ -108,7 +106,6 @@ public InternalClusterInfoService(Settings settings, ClusterService clusterServi this.indicesStatsSummary = IndicesStatsSummary.EMPTY; this.threadPool = threadPool; this.client = client; - this.clusterService = clusterService; this.updateFrequency = INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING.get(settings); this.fetchTimeout = INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.get(settings); this.enabled = DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.get(settings); @@ -250,7 +247,6 @@ public void onResponse(IndicesStatsResponse indicesStatsResponse) { final Map reservedSpaceBuilders = new HashMap<>(); buildShardLevelInfo( - clusterService.state().routingTable(), adjustShardStats(stats), shardSizeByIdentifierBuilder, shardDataSetSizeBuilder, @@ -445,7 +441,6 @@ public void addListener(Consumer clusterInfoConsumer) { } static void buildShardLevelInfo( - RoutingTable routingTable, ShardStats[] stats, Map shardSizes, Map shardDataSetSizeBuilder, @@ -453,7 +448,7 @@ static void buildShardLevelInfo( Map reservedSpaceByShard ) { for (ShardStats s : stats) { - final ShardRouting shardRouting = routingTable.deduplicate(s.getShardRouting()); + final ShardRouting shardRouting = s.getShardRouting(); dataPathByShard.put(ClusterInfo.NodeAndShard.from(shardRouting), s.getDataPath()); final StoreStats storeStats = s.getStats().getStore(); @@ -462,7 +457,7 @@ static void buildShardLevelInfo( } final long size = storeStats.sizeInBytes(); final long dataSetSize = storeStats.totalDataSetSizeInBytes(); - final long reserved = storeStats.getReservedSize().getBytes(); + final long reserved = storeStats.reservedSizeInBytes(); final String shardIdentifier = ClusterInfo.shardIdentifierFromRouting(shardRouting); logger.trace("shard: {} size: {} reserved: {}", shardIdentifier, size, reserved); diff --git a/server/src/main/java/org/elasticsearch/cluster/RepositoryCleanupInProgress.java b/server/src/main/java/org/elasticsearch/cluster/RepositoryCleanupInProgress.java index d3a9397d148cf..2dba73a3ec68f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/RepositoryCleanupInProgress.java +++ b/server/src/main/java/org/elasticsearch/cluster/RepositoryCleanupInProgress.java @@ -38,7 +38,7 @@ public RepositoryCleanupInProgress(List entries) { } RepositoryCleanupInProgress(StreamInput in) throws IOException { - this.entries = in.readCollectionAsList(Entry::new); + this.entries = in.readCollectionAsList(Entry::readFrom); } public static NamedDiff readDiffFrom(StreamInput in) throws IOException { @@ -92,20 +92,10 @@ public TransportVersion getMinimalSupportedVersion() { return TransportVersions.V_7_4_0; } - public static final class Entry implements Writeable, RepositoryOperation { + public record Entry(String repository, long repositoryStateId) implements Writeable, RepositoryOperation { - private final String repository; - - private final long repositoryStateId; - - private Entry(StreamInput in) throws IOException { - repository = in.readString(); - repositoryStateId = in.readLong(); - } - - public Entry(String repository, long repositoryStateId) { - this.repository = repository; - this.repositoryStateId = repositoryStateId; + public static Entry readFrom(StreamInput in) throws IOException { + return new Entry(in.readString(), in.readLong()); } @Override @@ -123,10 +113,5 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(repository); out.writeLong(repositoryStateId); } - - @Override - public String toString() { - return "{" + repository + '}' + '{' + repositoryStateId + '}'; - } } } diff --git a/server/src/main/java/org/elasticsearch/cluster/RestoreInProgress.java b/server/src/main/java/org/elasticsearch/cluster/RestoreInProgress.java index bd7a2ed1cffc0..2b618aa53a354 100644 --- a/server/src/main/java/org/elasticsearch/cluster/RestoreInProgress.java +++ b/server/src/main/java/org/elasticsearch/cluster/RestoreInProgress.java @@ -153,12 +153,7 @@ public Entry( /** * Represents status of a restored shard */ - public static class ShardRestoreStatus implements Writeable { - private State state; - private String nodeId; - private String reason; - - private ShardRestoreStatus() {} + public record ShardRestoreStatus(String nodeId, State state, String reason) implements Writeable { /** * Constructs a new shard restore status in initializing state on the given node @@ -179,67 +174,8 @@ public ShardRestoreStatus(String nodeId, State state) { this(nodeId, state, null); } - /** - * Constructs a new shard restore status in with specified state on the given node with specified failure reason - * - * @param nodeId node id - * @param state restore state - * @param reason failure reason - */ - public ShardRestoreStatus(String nodeId, State state, String reason) { - this.nodeId = nodeId; - this.state = state; - this.reason = reason; - } - - /** - * Returns current state - * - * @return current state - */ - public State state() { - return state; - } - - /** - * Returns node id of the node where shared is getting restored - * - * @return node id - */ - public String nodeId() { - return nodeId; - } - - /** - * Returns failure reason - * - * @return failure reason - */ - public String reason() { - return reason; - } - - /** - * Reads restore status from stream input - * - * @param in stream input - * @return restore status - */ - public static ShardRestoreStatus readShardRestoreStatus(StreamInput in) throws IOException { - ShardRestoreStatus shardSnapshotStatus = new ShardRestoreStatus(); - shardSnapshotStatus.readFrom(in); - return shardSnapshotStatus; - } - - /** - * Reads restore status from stream input - * - * @param in stream input - */ - public void readFrom(StreamInput in) throws IOException { - nodeId = in.readOptionalString(); - state = State.fromValue(in.readByte()); - reason = in.readOptionalString(); + public static ShardRestoreStatus readFrom(StreamInput in) throws IOException { + return new ShardRestoreStatus(in.readOptionalString(), State.fromValue(in.readByte()), in.readOptionalString()); } /** @@ -253,24 +189,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeByte(state.value); out.writeOptionalString(reason); } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - - ShardRestoreStatus status = (ShardRestoreStatus) o; - return state == status.state && Objects.equals(nodeId, status.nodeId) && Objects.equals(reason, status.reason); - } - - @Override - public int hashCode() { - return Objects.hash(state, nodeId, reason); - } } /** @@ -375,14 +293,7 @@ public RestoreInProgress(StreamInput in) throws IOException { List indices = in.readCollectionAsImmutableList(StreamInput::readString); entriesBuilder.put( uuid, - new Entry( - uuid, - snapshot, - state, - quiet, - indices, - in.readImmutableMap(ShardId::new, ShardRestoreStatus::readShardRestoreStatus) - ) + new Entry(uuid, snapshot, state, quiet, indices, in.readImmutableMap(ShardId::new, ShardRestoreStatus::readFrom)) ); } this.entries = Collections.unmodifiableMap(entriesBuilder); diff --git a/server/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java b/server/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java index 234c9a924d8a8..eea89c6ff3714 100644 --- a/server/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java +++ b/server/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java @@ -11,13 +11,13 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.ClusterState.Custom; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.repositories.RepositoryOperation; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.xcontent.ToXContent; @@ -29,7 +29,6 @@ import java.util.HashSet; import java.util.Iterator; import java.util.List; -import java.util.Objects; import java.util.Set; /** @@ -58,7 +57,7 @@ public static SnapshotDeletionsInProgress of(List entries) { @@ -195,7 +194,7 @@ public Iterator toXContentChunked(ToXContent.Params ignore public String toString() { StringBuilder builder = new StringBuilder("SnapshotDeletionsInProgress["); for (int i = 0; i < entries.size(); i++) { - builder.append(entries.get(i).getSnapshots()); + builder.append(entries.get(i).snapshots()); if (i + 1 < entries.size()) { builder.append(","); } @@ -206,98 +205,56 @@ public String toString() { /** * A class representing a snapshot deletion request entry in the cluster state. */ - public static final class Entry implements Writeable, RepositoryOperation { - private final List snapshots; - private final String repoName; - private final State state; - private final long startTime; - private final long repositoryStateId; - private final String uuid; - - public Entry(List snapshots, String repoName, long startTime, long repositoryStateId, State state) { - this(snapshots, repoName, startTime, repositoryStateId, state, UUIDs.randomBase64UUID()); + public record Entry(String repoName, List snapshots, long startTime, long repositoryStateId, State state, String uuid) + implements + Writeable, + RepositoryOperation { + + @SuppressForbidden(reason = "using a private constructor within the same file") + public Entry(String repoName, List snapshots, long startTime, long repositoryStateId, State state) { + this(repoName, snapshots, startTime, repositoryStateId, state, UUIDs.randomBase64UUID()); } - private Entry(List snapshots, String repoName, long startTime, long repositoryStateId, State state, String uuid) { - this.snapshots = snapshots; + public Entry { assert snapshots.size() == new HashSet<>(snapshots).size() : "Duplicate snapshot ids in " + snapshots; - this.repoName = repoName; - this.startTime = startTime; - this.repositoryStateId = repositoryStateId; - this.state = state; - this.uuid = uuid; } - public Entry(StreamInput in) throws IOException { - this.repoName = in.readString(); - this.snapshots = in.readCollectionAsImmutableList(SnapshotId::new); - this.startTime = in.readVLong(); - this.repositoryStateId = in.readLong(); - this.state = State.readFrom(in); - this.uuid = in.readString(); + @SuppressForbidden(reason = "using a private constructor within the same file") + public static Entry readFrom(StreamInput in) throws IOException { + return new Entry( + in.readString(), + in.readCollectionAsImmutableList(SnapshotId::new), + in.readVLong(), + in.readLong(), + State.readFrom(in), + in.readString() + ); } + @SuppressForbidden(reason = "using a private constructor within the same file") public Entry started() { assert state == State.WAITING; - return new Entry(snapshots, repository(), startTime, repositoryStateId, State.STARTED, uuid); + return new Entry(repository(), snapshots, startTime, repositoryStateId, State.STARTED, uuid); } + @SuppressForbidden(reason = "using a private constructor within the same file") public Entry withAddedSnapshots(Collection newSnapshots) { assert state == State.WAITING; final Collection updatedSnapshots = new HashSet<>(snapshots); if (updatedSnapshots.addAll(newSnapshots) == false) { return this; } - return new Entry(List.copyOf(updatedSnapshots), repository(), startTime, repositoryStateId, State.WAITING, uuid); + return new Entry(repository(), List.copyOf(updatedSnapshots), startTime, repositoryStateId, State.WAITING, uuid); } + @SuppressForbidden(reason = "using a private constructor within the same file") public Entry withSnapshots(Collection snapshots) { - return new Entry(List.copyOf(snapshots), repository(), startTime, repositoryStateId, state, uuid); + return new Entry(repository(), List.copyOf(snapshots), startTime, repositoryStateId, state, uuid); } + @SuppressForbidden(reason = "using a private constructor within the same file") public Entry withRepoGen(long repoGen) { - return new Entry(snapshots, repository(), startTime, repoGen, state, uuid); - } - - public State state() { - return state; - } - - public String uuid() { - return uuid; - } - - public List getSnapshots() { - return snapshots; - } - - /** - * The start time in milliseconds for deleting the snapshots. - */ - public long getStartTime() { - return startTime; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - Entry that = (Entry) o; - return repoName.equals(that.repoName) - && snapshots.equals(that.snapshots) - && startTime == that.startTime - && repositoryStateId == that.repositoryStateId - && state == that.state - && uuid.equals(that.uuid); - } - - @Override - public int hashCode() { - return Objects.hash(snapshots, repoName, startTime, repositoryStateId, state, uuid); + return new Entry(repository(), snapshots, startTime, repoGen, state, uuid); } @Override @@ -319,18 +276,6 @@ public String repository() { public long repositoryStateId() { return repositoryStateId; } - - @Override - public String toString() { - return Strings.format( - "SnapshotDeletionsInProgress.Entry[[%s@%d][%s][%s]%s]", - repoName, - repositoryStateId, - uuid, - state, - snapshots - ); - } } public enum State implements Writeable { diff --git a/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java b/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java index 1a079d03405d7..470f175deb247 100644 --- a/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java +++ b/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java @@ -20,6 +20,7 @@ import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexVersion; @@ -468,7 +469,14 @@ public static State fromValue(byte value) { } } - public static class ShardSnapshotStatus implements Writeable { + public record ShardSnapshotStatus( + @Nullable String nodeId, + ShardState state, + @Nullable ShardGeneration generation, + @Nullable String reason, + @Nullable // only present in state SUCCESS; may be null even in SUCCESS if this state came over the wire from an older node + ShardSnapshotResult shardSnapshotResult + ) implements Writeable { /** * Shard snapshot status for shards that are waiting for another operation to finish before they can be assigned to a node. @@ -486,41 +494,38 @@ public static class ShardSnapshotStatus implements Writeable { public static final ShardSnapshotStatus MISSING = new SnapshotsInProgress.ShardSnapshotStatus( null, ShardState.MISSING, - "missing index", - null + null, + "missing index" ); - private final ShardState state; - - @Nullable - private final String nodeId; - - @Nullable - private final ShardGeneration generation; - - @Nullable - private final String reason; - - @Nullable // only present in state SUCCESS; may be null even in SUCCESS if this state came over the wire from an older node - private final ShardSnapshotResult shardSnapshotResult; - public ShardSnapshotStatus(String nodeId, ShardGeneration generation) { this(nodeId, ShardState.INIT, generation); } public ShardSnapshotStatus(@Nullable String nodeId, ShardState state, @Nullable ShardGeneration generation) { - this(nodeId, assertNotSuccess(state), null, generation); + this(nodeId, assertNotSuccess(state), generation, null); } - public ShardSnapshotStatus(@Nullable String nodeId, ShardState state, String reason, @Nullable ShardGeneration generation) { - this(nodeId, assertNotSuccess(state), reason, generation, null); + @SuppressForbidden(reason = "using a private constructor within the same file") + public ShardSnapshotStatus(@Nullable String nodeId, ShardState state, @Nullable ShardGeneration generation, String reason) { + this(nodeId, assertNotSuccess(state), generation, reason, null); } - private ShardSnapshotStatus( + private static ShardState assertNotSuccess(ShardState shardState) { + assert shardState != ShardState.SUCCESS : "use ShardSnapshotStatus#success"; + return shardState; + } + + @SuppressForbidden(reason = "using a private constructor within the same file") + public static ShardSnapshotStatus success(String nodeId, ShardSnapshotResult shardSnapshotResult) { + return new ShardSnapshotStatus(nodeId, ShardState.SUCCESS, shardSnapshotResult.getGeneration(), null, shardSnapshotResult); + } + + public ShardSnapshotStatus( @Nullable String nodeId, ShardState state, - String reason, @Nullable ShardGeneration generation, + String reason, @Nullable ShardSnapshotResult shardSnapshotResult ) { this.nodeId = nodeId; @@ -531,15 +536,6 @@ private ShardSnapshotStatus( assert assertConsistent(); } - private static ShardState assertNotSuccess(ShardState shardState) { - assert shardState != ShardState.SUCCESS : "use ShardSnapshotStatus#success"; - return shardState; - } - - public static ShardSnapshotStatus success(String nodeId, ShardSnapshotResult shardSnapshotResult) { - return new ShardSnapshotStatus(nodeId, ShardState.SUCCESS, null, shardSnapshotResult.getGeneration(), shardSnapshotResult); - } - private boolean assertConsistent() { // If the state is failed we have to have a reason for this failure assert state.failed() == false || reason != null; @@ -552,6 +548,7 @@ private boolean assertConsistent() { return true; } + @SuppressForbidden(reason = "using a private constructor within the same file") public static ShardSnapshotStatus readFrom(StreamInput in) throws IOException { final String nodeId = DiscoveryNode.deduplicateNodeIdentifier(in.readOptionalString()); final ShardState state = ShardState.fromValue(in.readByte()); @@ -561,34 +558,17 @@ public static ShardSnapshotStatus readFrom(StreamInput in) throws IOException { if (state == ShardState.QUEUED) { return UNASSIGNED_QUEUED; } - return new ShardSnapshotStatus(nodeId, state, reason, generation, shardSnapshotResult); - } - - public ShardState state() { - return state; - } - - @Nullable - public String nodeId() { - return nodeId; - } - - @Nullable - public ShardGeneration generation() { - return this.generation; - } - - public String reason() { - return reason; + return new ShardSnapshotStatus(nodeId, state, generation, reason, shardSnapshotResult); } + @SuppressForbidden(reason = "using a private constructor within the same file") public ShardSnapshotStatus withUpdatedGeneration(ShardGeneration newGeneration) { assert state == ShardState.SUCCESS : "can't move generation in state " + state; return new ShardSnapshotStatus( nodeId, state, - reason, newGeneration, + reason, shardSnapshotResult == null ? null : new ShardSnapshotResult(newGeneration, shardSnapshotResult.getSize(), shardSnapshotResult.getSegmentCount()) @@ -618,43 +598,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(reason); out.writeOptionalWriteable(shardSnapshotResult); } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - ShardSnapshotStatus status = (ShardSnapshotStatus) o; - return Objects.equals(nodeId, status.nodeId) - && Objects.equals(reason, status.reason) - && Objects.equals(generation, status.generation) - && state == status.state - && Objects.equals(shardSnapshotResult, status.shardSnapshotResult); - } - - @Override - public int hashCode() { - int result = state != null ? state.hashCode() : 0; - result = 31 * result + (nodeId != null ? nodeId.hashCode() : 0); - result = 31 * result + (reason != null ? reason.hashCode() : 0); - result = 31 * result + (generation != null ? generation.hashCode() : 0); - result = 31 * result + (shardSnapshotResult != null ? shardSnapshotResult.hashCode() : 0); - return result; - } - - @Override - public String toString() { - return "ShardSnapshotStatus[state=" - + state - + ", nodeId=" - + nodeId - + ", reason=" - + reason - + ", generation=" - + generation - + ", shardSnapshotResult=" - + shardSnapshotResult - + "]"; - } } public static class Entry implements Writeable, ToXContentObject, RepositoryOperation, Diffable { @@ -1029,8 +972,8 @@ public Entry abort() { status = new ShardSnapshotStatus( nodeId, nodeId == null ? ShardState.FAILED : ShardState.ABORTED, - "aborted by snapshot deletion", - status.generation() + status.generation(), + "aborted by snapshot deletion" ); } completed &= status.state().completed(); diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsService.java b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsService.java index fb5c9f2fea7de..229c34ecc1a14 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsService.java @@ -68,7 +68,7 @@ * this will report GREEN. * If we have had a master within the last 30 seconds, but that master has changed more than 3 times in the last 30 minutes (and that is * confirmed by checking with the last-known master), then this will report YELLOW. - * If we have not had a master within the last 30 seconds, then this will will report RED with one exception. That exception is when: + * If we have not had a master within the last 30 seconds, then this will report RED with one exception. That exception is when: * (1) no node is elected master, (2) this node is not master eligible, (3) some node is master eligible, (4) we ask a master-eligible node * to run this service, and (5) it comes back with a result that is not RED. * Since this service needs to be able to run when there is no master at all, it does not depend on the dedicated health node (which @@ -99,7 +99,7 @@ public class CoordinationDiagnosticsService implements ClusterStateListener { /* * This is a Map of tasks that are periodically reaching out to other master eligible nodes to get their ClusterFormationStates for - * diagnosis. The key is the DisoveryNode for the master eligible node being polled, and the value is a Cancellable. + * diagnosis. The key is the DiscoveryNode for the master eligible node being polled, and the value is a Cancellable. * The field is accessed (reads/writes) from multiple threads, but the reference itself is only ever changed on the cluster change * event thread. */ @@ -121,7 +121,7 @@ public class CoordinationDiagnosticsService implements ClusterStateListener { volatile AtomicReference remoteCoordinationDiagnosisTask = null; /* * This field holds the result of the task in the remoteCoordinationDiagnosisTask field above. The field is accessed - * (reads/writes) from multiple threads, but is only ever reassigned on a the initialization thread and the cluster change event thread. + * (reads/writes) from multiple threads, but is only ever reassigned on the initialization thread and the cluster change event thread. */ volatile AtomicReference remoteCoordinationDiagnosisResult = null; @@ -294,7 +294,7 @@ private static CoordinationDiagnosticsDetails getDetails( /** * Returns the health result when we have detected locally that the master has changed to null repeatedly (by default more than 3 times - * in the last 30 minutes). This method attemtps to use the master history from a remote node to confirm what we are seeing locally. + * in the last 30 minutes). This method attempts to use the master history from a remote node to confirm what we are seeing locally. * If the information from the remote node confirms that the master history has been unstable, a YELLOW status is returned. If the * information from the remote node shows that the master history has been stable, then we assume that the problem is with this node * and a GREEN status is returned (the problems with this node will be covered in a separate health indicator). If there had been @@ -1133,7 +1133,7 @@ private Scheduler.Cancellable sendTransportRequest ); responseConsumer.accept(responseTransformationFunction.apply(response, null)); }, e -> { - logger.warn("Exception in remote request to master" + masterEligibleNode, e); + logger.warn("Exception in remote request to master " + masterEligibleNode, e); responseConsumer.accept(responseTransformationFunction.apply(null, e)); })); @@ -1143,7 +1143,7 @@ public void run() { if (masterEligibleNode == null) { /* * This node's PeerFinder hasn't yet discovered the master-eligible nodes. By notifying the responseConsumer with a null - * value we effectively do nothing, and allow this request to be recheduled. + * value we effectively do nothing, and allow this request to be rescheduled. */ responseConsumer.accept(null); } else { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/ExpectedShardSizeEstimator.java b/server/src/main/java/org/elasticsearch/cluster/routing/ExpectedShardSizeEstimator.java index 05c0876669732..1f364e1ace6e4 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/ExpectedShardSizeEstimator.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/ExpectedShardSizeEstimator.java @@ -20,9 +20,13 @@ public class ExpectedShardSizeEstimator { - public static long getExpectedShardSize(ShardRouting shardRouting, long defaultSize, RoutingAllocation allocation) { + public static boolean shouldReserveSpaceForInitializingShard(ShardRouting shard, RoutingAllocation allocation) { + return shouldReserveSpaceForInitializingShard(shard, allocation.metadata()); + } + + public static long getExpectedShardSize(ShardRouting shard, long defaultSize, RoutingAllocation allocation) { return getExpectedShardSize( - shardRouting, + shard, defaultSize, allocation.clusterInfo(), allocation.snapshotShardSizeInfo(), @@ -31,6 +35,27 @@ public static long getExpectedShardSize(ShardRouting shardRouting, long defaultS ); } + public static boolean shouldReserveSpaceForInitializingShard(ShardRouting shard, Metadata metadata) { + assert shard.initializing() : "Expected initializing shard, got: " + shard; + return switch (shard.recoverySource().getType()) { + // No need to reserve disk space when initializing a new empty shard + case EMPTY_STORE -> false; + + // No need to reserve disk space if the shard is already allocated on the disk. Starting it is not going to use more. + case EXISTING_STORE -> false; + + // Peer recovery require downloading all segments locally to start the shard. Reserve disk space for this + case PEER -> true; + + // Snapshot restore (unless it is partial) require downloading all segments locally from the blobstore to start the shard. + case SNAPSHOT -> metadata.getIndexSafe(shard.index()).isPartialSearchableSnapshot() == false; + + // shrink/split/clone operation is going to clone existing locally placed shards using file system hard links + // so no additional space is going to be used until future merges + case LOCAL_SHARDS -> false; + }; + } + /** * Returns the expected shard size for the given shard or the default value provided if not enough information are available * to estimate the shards size. @@ -47,11 +72,18 @@ public static long getExpectedShardSize( if (indexMetadata.getResizeSourceIndex() != null && shard.active() == false && shard.recoverySource().getType() == RecoverySource.Type.LOCAL_SHARDS) { + assert shard.primary() : "All replica shards are recovering from " + RecoverySource.Type.PEER; return getExpectedSizeOfResizedShard(shard, defaultValue, indexMetadata, clusterInfo, metadata, routingTable); - } else if (shard.unassigned() && shard.recoverySource().getType() == RecoverySource.Type.SNAPSHOT) { + } else if (shard.active() == false && shard.recoverySource().getType() == RecoverySource.Type.SNAPSHOT) { + assert shard.primary() : "All replica shards are recovering from " + RecoverySource.Type.PEER; return snapshotShardSizeInfo.getShardSize(shard, defaultValue); } else { - return clusterInfo.getShardSize(shard, defaultValue); + var shardSize = clusterInfo.getShardSize(shard.shardId(), shard.primary()); + if (shardSize == null && shard.primary() == false) { + // derive replica size from corresponding primary + shardSize = clusterInfo.getShardSize(shard.shardId(), true); + } + return shardSize == null ? defaultValue : shardSize; } } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java index 986a6bd0385e8..723d65fbc2a3f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java @@ -148,33 +148,6 @@ public IndexShardRoutingTable shardRoutingTable(ShardId shardId) { return shard; } - /** - * Try to deduplicate the given shard routing with an equal instance found in this routing table. This is used by the logic of the - * {@link org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider} and - * {@link org.elasticsearch.cluster.InternalClusterInfoService} to deduplicate instances created by a master node and those read from - * the network to speed up the use of {@link ShardRouting} as a map key in {@link org.elasticsearch.cluster.ClusterInfo#getDataPath}. - * - * @param shardRouting shard routing to deduplicate - * @return deduplicated shard routing from this routing table if an equivalent shard routing was found or the given instance otherwise - */ - public ShardRouting deduplicate(ShardRouting shardRouting) { - final IndexRoutingTable indexShardRoutingTable = indicesRouting.get(shardRouting.index().getName()); - if (indexShardRoutingTable == null) { - return shardRouting; - } - final IndexShardRoutingTable shardRoutingTable = indexShardRoutingTable.shard(shardRouting.id()); - if (shardRoutingTable == null) { - return shardRouting; - } - for (int i = 0; i < shardRoutingTable.size(); i++) { - ShardRouting found = shardRoutingTable.shard(i); - if (shardRouting.equals(found)) { - return found; - } - } - return shardRouting; - } - @Nullable public ShardRouting getByAllocationId(ShardId shardId, String allocationId) { final IndexRoutingTable indexRoutingTable = index(shardId.getIndex()); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java index 5c216b9a5b308..6645fd7d0e895 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java @@ -34,6 +34,7 @@ import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.core.Releasable; import org.elasticsearch.gateway.GatewayService; +import org.elasticsearch.snapshots.SnapshotShardSizeInfo; import java.util.ArrayList; import java.util.Collections; @@ -425,6 +426,7 @@ long sizeOfRelocatingShards(RoutingNode routingNode, DiskUsage diskUsage, Cluste true, diskUsage.getPath(), info, + SnapshotShardSizeInfo.EMPTY, reroutedClusterState.metadata(), reroutedClusterState.routingTable(), 0L diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java index 0e0d15a02d042..2fa1994f9f74b 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java @@ -27,10 +27,12 @@ import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.UpdateForV9; +import org.elasticsearch.snapshots.SnapshotShardSizeInfo; import java.util.Map; import static org.elasticsearch.cluster.routing.ExpectedShardSizeEstimator.getExpectedShardSize; +import static org.elasticsearch.cluster.routing.ExpectedShardSizeEstimator.shouldReserveSpaceForInitializingShard; /** * The {@link DiskThresholdDecider} checks that the node a shard is potentially @@ -116,6 +118,7 @@ public static long sizeOfUnaccountedShards( boolean subtractShardsMovingAway, String dataPath, ClusterInfo clusterInfo, + SnapshotShardSizeInfo snapshotShardSizeInfo, Metadata metadata, RoutingTable routingTable, long sizeOfUnaccountableSearchableSnapshotShards @@ -128,28 +131,18 @@ public static long sizeOfUnaccountedShards( // Where reserved space is unavailable (e.g. stats are out-of-sync) compute a conservative estimate for initialising shards for (ShardRouting routing : node.initializing()) { - if (routing.relocatingNodeId() == null && metadata.getIndexSafe(routing.index()).isSearchableSnapshot() == false) { - // in practice the only initializing-but-not-relocating non-searchable-snapshot shards with a nonzero expected shard size - // will be ones created - // by a resize (shrink/split/clone) operation which we expect to happen using hard links, so they shouldn't be taking - // any additional space and can be ignored here - continue; - } - if (reservedSpace.containsShardId(routing.shardId())) { - continue; - } - final String actualPath = clusterInfo.getDataPath(routing); - // if we don't yet know the actual path of the incoming shard then conservatively assume it's going to the path with the least - // free space - if (actualPath == null || actualPath.equals(dataPath)) { - totalSize += getExpectedShardSize( - routing, - Math.max(routing.getExpectedShardSize(), 0L), - clusterInfo, - null, - metadata, - routingTable - ); + // Space needs to be reserved only when initializing shards that are going to use additional space + // that is not yet accounted for by `reservedSpace` in case of lengthy recoveries + if (shouldReserveSpaceForInitializingShard(routing, metadata) && reservedSpace.containsShardId(routing.shardId()) == false) { + final String actualPath = clusterInfo.getDataPath(routing); + // if we don't yet know the actual path of the incoming shard then conservatively assume + // it's going to the path with the least free space + if (actualPath == null || actualPath.equals(dataPath)) { + totalSize += Math.max( + routing.getExpectedShardSize(), + getExpectedShardSize(routing, 0L, clusterInfo, snapshotShardSizeInfo, metadata, routingTable) + ); + } } } @@ -158,7 +151,7 @@ public static long sizeOfUnaccountedShards( if (subtractShardsMovingAway) { for (ShardRouting routing : node.relocating()) { if (dataPath.equals(clusterInfo.getDataPath(routing))) { - totalSize -= getExpectedShardSize(routing, 0L, clusterInfo, null, metadata, routingTable); + totalSize -= getExpectedShardSize(routing, 0L, clusterInfo, snapshotShardSizeInfo, metadata, routingTable); } } } @@ -203,6 +196,7 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing false, usage.getPath(), allocation.clusterInfo(), + allocation.snapshotShardSizeInfo(), allocation.metadata(), allocation.routingTable(), allocation.unaccountedSearchableSnapshotSize(node) @@ -411,6 +405,7 @@ public Decision canRemain(IndexMetadata indexMetadata, ShardRouting shardRouting true, usage.getPath(), allocation.clusterInfo(), + allocation.snapshotShardSizeInfo(), allocation.metadata(), allocation.routingTable(), allocation.unaccountedSearchableSnapshotSize(node) @@ -490,6 +485,7 @@ private static DiskUsageWithRelocations getDiskUsage( subtractLeavingShards, usage.getPath(), allocation.clusterInfo(), + allocation.snapshotShardSizeInfo(), allocation.metadata(), allocation.routingTable(), allocation.unaccountedSearchableSnapshotSize(node) diff --git a/server/src/main/java/org/elasticsearch/common/component/Lifecycle.java b/server/src/main/java/org/elasticsearch/common/component/Lifecycle.java index 793975048f846..1488963ab2644 100644 --- a/server/src/main/java/org/elasticsearch/common/component/Lifecycle.java +++ b/server/src/main/java/org/elasticsearch/common/component/Lifecycle.java @@ -8,6 +8,8 @@ package org.elasticsearch.common.component; +import org.elasticsearch.bootstrap.ElasticsearchProcess; + /** * Lifecycle state. Allows the following transitions: *