signatures) {
+ this.signatures = signatures;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * This setting is to conform with {@link VerificationTask} interface.
+ * Default is {@code false}.
+ */
@Override
+ @Input
+ public boolean getIgnoreFailures() {
+ return ignoreFailures;
+ }
+
+ @Override
+ public void setIgnoreFailures(boolean ignoreFailures) {
+ this.ignoreFailures = ignoreFailures;
+ }
+
+ /**
+ * The default compiler target version used to expand references to bundled JDK signatures.
+ * E.g., if you use "jdk-deprecated", it will expand to this version.
+ * This setting should be identical to the target version used in the compiler task.
+ * Defaults to {@code project.targetCompatibility}.
+ */
+ @Input
+ @Optional
+ public String getTargetCompatibility() {
+ return targetCompatibility;
+ }
+
+ /** @see #getTargetCompatibility */
+ public void setTargetCompatibility(String targetCompatibility) {
+ this.targetCompatibility = targetCompatibility;
+ }
+
+ // PatternFilterable implementation:
+
+ /**
+ * {@inheritDoc}
+ *
+ * Set of patterns matching all class files to be parsed from the classesDirectory.
+ * Can be changed to e.g. exclude several files (using excludes).
+ * The default is a single include with pattern '**/*.class'
+ */
+ @Override
+ @Internal
+ public Set getIncludes() {
+ return getPatternSet().getIncludes();
+ }
+
+ @Override
+ public CheckForbiddenApisTask setIncludes(Iterable includes) {
+ getPatternSet().setIncludes(includes);
+ return this;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * Set of patterns matching class files to be excluded from checking.
+ */
+ @Override
+ @Internal
+ public Set getExcludes() {
+ return getPatternSet().getExcludes();
+ }
+
+ @Override
+ public CheckForbiddenApisTask setExcludes(Iterable excludes) {
+ getPatternSet().setExcludes(excludes);
+ return this;
+ }
+
+ @Override
+ public CheckForbiddenApisTask exclude(String... arg0) {
+ getPatternSet().exclude(arg0);
+ return this;
+ }
+
+ @Override
+ public CheckForbiddenApisTask exclude(Iterable arg0) {
+ getPatternSet().exclude(arg0);
+ return this;
+ }
+
+ @Override
+ public CheckForbiddenApisTask exclude(Spec arg0) {
+ getPatternSet().exclude(arg0);
+ return this;
+ }
+
+ @Override
+ public CheckForbiddenApisTask exclude(@SuppressWarnings("rawtypes") Closure arg0) {
+ getPatternSet().exclude(arg0);
+ return this;
+ }
+
+ @Override
+ public CheckForbiddenApisTask include(String... arg0) {
+ getPatternSet().include(arg0);
+ return this;
+ }
+
+ @Override
+ public CheckForbiddenApisTask include(Iterable arg0) {
+ getPatternSet().include(arg0);
+ return this;
+ }
+
+ @Override
+ public CheckForbiddenApisTask include(Spec arg0) {
+ getPatternSet().include(arg0);
+ return this;
+ }
+
+ @Override
+ public CheckForbiddenApisTask include(@SuppressWarnings("rawtypes") Closure arg0) {
+ getPatternSet().include(arg0);
+ return this;
+ }
+
+ /** Returns the classes to check. */
+ @InputFiles
+ @SkipWhenEmpty
@IgnoreEmptyDirectories
+ @PathSensitive(PathSensitivity.RELATIVE)
public FileTree getClassFiles() {
- return super.getClassFiles();
+ return getClassesDirs().getAsFileTree().matching(getPatternSet());
+ }
+
+ @Inject
+ public abstract WorkerExecutor getWorkerExecutor();
+
+ /** Executes the forbidden apis task. */
+ @TaskAction
+ public void checkForbidden() {
+ WorkQueue workQueue = getWorkerExecutor().noIsolation();
+ workQueue.submit(ForbiddenApisCheckWorkAction.class, parameters -> {
+ parameters.getClasspath().setFrom(getClasspath());
+ parameters.getClassDirectories().setFrom(getClassesDirs());
+ parameters.getClassFiles().from(getClassFiles().getFiles());
+ parameters.getSuppressAnnotations().set(getSuppressAnnotations());
+ parameters.getBundledSignatures().set(getBundledSignatures());
+ parameters.getSignatures().set(getSignatures());
+ parameters.getTargetCompatibility().set(getTargetCompatibility());
+ parameters.getIgnoreFailures().set(getIgnoreFailures());
+ parameters.getSuccessMarker().set(getSuccessMarker());
+ });
+ }
+
+ abstract static class ForbiddenApisCheckWorkAction implements WorkAction {
+
+ private final org.gradle.api.logging.Logger logger = Logging.getLogger(getClass());
+
+ @Inject
+ public ForbiddenApisCheckWorkAction() {}
+
+ private boolean checkIsUnsupportedJDK(Checker checker) {
+ if (checker.isSupportedJDK == false) {
+ final String msg = String.format(
+ Locale.ENGLISH,
+ "Your Java runtime (%s %s) is not supported by the forbiddenapis plugin. Please run the checks with a supported JDK!",
+ System.getProperty("java.runtime.name"),
+ System.getProperty("java.runtime.version")
+ );
+ logger.warn(msg);
+ return true;
+ }
+ return false;
+ }
+
+ @Override
+ public void execute() {
+
+ final URLClassLoader urlLoader = createClassLoader(getParameters().getClasspath(), getParameters().getClassDirectories());
+ try {
+ final Checker checker = createChecker(urlLoader);
+ if (checkIsUnsupportedJDK(checker)) {
+ return;
+ }
+
+ final Set suppressAnnotations = getParameters().getSuppressAnnotations().get();
+ for (String a : suppressAnnotations) {
+ checker.addSuppressAnnotation(a);
+ }
+
+ try {
+ final Set bundledSignatures = getParameters().getBundledSignatures().get();
+ if (bundledSignatures.isEmpty() == false) {
+ final String bundledSigsJavaVersion = getParameters().getTargetCompatibility().get();
+ if (bundledSigsJavaVersion == null) {
+ logger.warn(
+ "The 'targetCompatibility' project or task property is missing. "
+ + "Trying to read bundled JDK signatures without compiler target. "
+ + "You have to explicitly specify the version in the resource name."
+ );
+ }
+ for (String bs : bundledSignatures) {
+ checker.addBundledSignatures(bs, bundledSigsJavaVersion);
+ }
+ }
+
+ final FileCollection signaturesFiles = getParameters().getSignaturesFiles();
+ if (signaturesFiles != null) for (final File f : signaturesFiles) {
+ checker.parseSignaturesFile(f);
+ }
+ final List signatures = getParameters().getSignatures().get();
+ if ((signatures != null) && !signatures.isEmpty()) {
+ final StringBuilder sb = new StringBuilder();
+ for (String line : signatures) {
+ sb.append(line).append(NL);
+ }
+ checker.parseSignaturesString(sb.toString());
+ }
+ } catch (IOException ioe) {
+ throw new GradleException("IO problem while reading files with API signatures.", ioe);
+ } catch (ParseException pe) {
+ throw new InvalidUserDataException("Parsing signatures failed: " + pe.getMessage(), pe);
+ }
+
+ if (checker.hasNoSignatures()) {
+ if (checker.noSignaturesFilesParsed()) {
+ throw new InvalidUserDataException(
+ "No signatures were added to task; use properties 'signatures', 'bundledSignatures', 'signaturesURLs', and/or 'signaturesFiles' to define those!"
+ );
+ } else {
+ logger.info("Skipping execution because no API signatures are available.");
+ return;
+ }
+ }
+
+ try {
+ checker.addClassesToCheck(getParameters().getClassFiles());
+ } catch (IOException ioe) {
+ throw new GradleException("Failed to load one of the given class files.", ioe);
+ }
+ checker.run();
+ writeMarker(getParameters().getSuccessMarker().getAsFile().get());
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ } finally {
+ // Close the classloader to free resources:
+ try {
+ if (urlLoader != null) urlLoader.close();
+ } catch (IOException ioe) {
+ // getLogger().warn("Cannot close classloader: ".concat(ioe.toString()));
+ }
+ }
+ }
+
+ private void writeMarker(File successMarker) throws IOException {
+ Files.write(successMarker.toPath(), new byte[] {}, StandardOpenOption.CREATE);
+ }
+
+ private URLClassLoader createClassLoader(FileCollection classpath, FileCollection classesDirs) {
+ if (classesDirs == null || classpath == null) {
+ throw new InvalidUserDataException("Missing 'classesDirs' or 'classpath' property.");
+ }
+
+ final Set cpElements = new LinkedHashSet<>();
+ cpElements.addAll(classpath.getFiles());
+ cpElements.addAll(classesDirs.getFiles());
+ final URL[] urls = new URL[cpElements.size()];
+ try {
+ int i = 0;
+ for (final File cpElement : cpElements) {
+ urls[i++] = cpElement.toURI().toURL();
+ }
+ assert i == urls.length;
+ } catch (MalformedURLException mfue) {
+ throw new InvalidUserDataException("Failed to build classpath URLs.", mfue);
+ }
+
+ return URLClassLoader.newInstance(urls, ClassLoader.getSystemClassLoader());
+ }
+
+ @NotNull
+ private Checker createChecker(URLClassLoader urlLoader) {
+ final EnumSet options = EnumSet.noneOf(Checker.Option.class);
+ options.add(FAIL_ON_MISSING_CLASSES);
+ if (getParameters().getIgnoreFailures().get() == false) {
+ options.add(FAIL_ON_VIOLATION);
+ }
+ options.add(FAIL_ON_UNRESOLVABLE_SIGNATURES);
+ options.add(DISABLE_CLASSLOADING_CACHE);
+ final Checker checker = new Checker(new GradleForbiddenApiLogger(logger), urlLoader, options);
+ return checker;
+ }
+
+ private static class GradleForbiddenApiLogger implements Logger {
+
+ private final org.gradle.api.logging.Logger delegate;
+
+ GradleForbiddenApiLogger(org.gradle.api.logging.Logger delegate) {
+ this.delegate = delegate;
+ }
+
+ @Override
+ public void error(String msg) {
+ delegate.error(msg);
+ }
+
+ @Override
+ public void warn(String msg) {
+ delegate.warn(msg);
+ }
+
+ @Override
+ public void info(String msg) {
+ delegate.info(msg);
+ }
+
+ @Override
+ public void debug(String msg) {
+ delegate.debug(msg);
+ }
+ };
}
+
+ interface Parameters extends WorkParameters {
+ ConfigurableFileCollection getClassDirectories();
+
+ ConfigurableFileCollection getClassFiles();
+
+ ConfigurableFileCollection getClasspath();
+
+ SetProperty getSuppressAnnotations();
+
+ RegularFileProperty getSuccessMarker();
+
+ ConfigurableFileCollection getSignaturesFiles();
+
+ SetProperty getBundledSignatures();
+
+ Property getTargetCompatibility();
+
+ Property getIgnoreFailures();
+
+ ListProperty getSignatures();
+
+ }
+
}
diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/DependencyLicensesTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/DependencyLicensesTask.java
index 71de2626d5fca..092230a2b12ea 100644
--- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/DependencyLicensesTask.java
+++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/DependencyLicensesTask.java
@@ -88,8 +88,6 @@ public class DependencyLicensesTask extends DefaultTask {
private final Logger logger = Logging.getLogger(getClass());
- private static final String SHA_EXTENSION = ".sha1";
-
// TODO: we should be able to default this to eg compile deps, but we need to move the licenses
// check from distribution to core (ie this should only be run on java projects)
/**
diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ForbiddenApisPrecommitPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ForbiddenApisPrecommitPlugin.java
index 96fb11214902a..e24dd5ab2094b 100644
--- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ForbiddenApisPrecommitPlugin.java
+++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ForbiddenApisPrecommitPlugin.java
@@ -8,50 +8,37 @@
package org.elasticsearch.gradle.internal.precommit;
-import de.thetaphi.forbiddenapis.gradle.CheckForbiddenApisExtension;
-import groovy.lang.Closure;
-
import org.elasticsearch.gradle.internal.ExportElasticsearchBuildResourcesTask;
import org.elasticsearch.gradle.internal.conventions.precommit.PrecommitPlugin;
import org.elasticsearch.gradle.internal.info.BuildParams;
import org.gradle.api.Project;
import org.gradle.api.Task;
-import org.gradle.api.plugins.ExtraPropertiesExtension;
import org.gradle.api.plugins.JavaBasePlugin;
-import org.gradle.api.plugins.JavaPluginExtension;
import org.gradle.api.specs.Specs;
import org.gradle.api.tasks.SourceSetContainer;
import org.gradle.api.tasks.TaskProvider;
-import java.nio.file.Path;
-import java.util.ArrayList;
-import java.util.List;
+import java.io.File;
import java.util.Set;
-import static de.thetaphi.forbiddenapis.gradle.ForbiddenApisPlugin.FORBIDDEN_APIS_EXTENSION_NAME;
import static de.thetaphi.forbiddenapis.gradle.ForbiddenApisPlugin.FORBIDDEN_APIS_TASK_NAME;
+import static org.elasticsearch.gradle.internal.precommit.CheckForbiddenApisTask.BUNDLED_SIGNATURE_DEFAULTS;
public class ForbiddenApisPrecommitPlugin extends PrecommitPlugin {
+
@Override
public TaskProvider extends Task> createTask(Project project) {
project.getPluginManager().apply(JavaBasePlugin.class);
- // create Extension for defaults:
- var checkForbiddenApisExtension = project.getExtensions()
- .create(FORBIDDEN_APIS_EXTENSION_NAME, CheckForbiddenApisExtension.class, project);
-
// Create a convenience task for all checks (this does not conflict with extension, as it has higher priority in DSL):
var forbiddenTask = project.getTasks()
.register(FORBIDDEN_APIS_TASK_NAME, task -> { task.setDescription("Runs forbidden-apis checks."); });
- JavaPluginExtension javaPluginExtension = project.getExtensions().getByType(JavaPluginExtension.class);
- // Define our tasks (one for each SourceSet):
-
TaskProvider resourcesTask = project.getTasks()
.register("forbiddenApisResources", ExportElasticsearchBuildResourcesTask.class);
- Path resourcesDir = project.getBuildDir().toPath().resolve("forbidden-apis-config");
+ File resourcesDir = project.getLayout().getBuildDirectory().dir("forbidden-apis-config").get().getAsFile();
resourcesTask.configure(t -> {
- t.setOutputDir(resourcesDir.toFile());
+ t.setOutputDir(resourcesDir);
t.copy("forbidden/jdk-signatures.txt");
t.copy("forbidden/jdk-deprecated.txt");
t.copy("forbidden/es-all-signatures.txt");
@@ -65,60 +52,36 @@ public TaskProvider extends Task> createTask(Project project) {
String sourceSetTaskName = sourceSet.getTaskName(FORBIDDEN_APIS_TASK_NAME, null);
var sourceSetTask = project.getTasks().register(sourceSetTaskName, CheckForbiddenApisTask.class, t -> {
t.setDescription("Runs forbidden-apis checks on '${sourceSet.name}' classes.");
+ t.setResourcesDir(resourcesDir);
t.getOutputs().upToDateWhen(Specs.SATISFIES_ALL);
t.setClassesDirs(sourceSet.getOutput().getClassesDirs());
t.dependsOn(resourcesTask);
- t.setClasspath(sourceSet.getRuntimeClasspath().plus(sourceSet.getCompileClasspath()).plus(sourceSet.getOutput()));
+ t.setClasspath(sourceSet.getRuntimeClasspath().plus(sourceSet.getCompileClasspath()));
t.setTargetCompatibility(BuildParams.getMinimumRuntimeVersion().getMajorVersion());
- t.setBundledSignatures(Set.of("jdk-unsafe", "jdk-non-portable", "jdk-system-out"));
+ t.getBundledSignatures().set(BUNDLED_SIGNATURE_DEFAULTS);
t.setSignaturesFiles(
project.files(
- resourcesDir.resolve("forbidden/jdk-signatures.txt"),
- resourcesDir.resolve("forbidden/es-all-signatures.txt"),
- resourcesDir.resolve("forbidden/jdk-deprecated.txt")
+ resourcesDir.toPath().resolve("forbidden/jdk-signatures.txt"),
+ resourcesDir.toPath().resolve("forbidden/es-all-signatures.txt"),
+ resourcesDir.toPath().resolve("forbidden/jdk-deprecated.txt")
)
);
- t.setSuppressAnnotations(Set.of("**.SuppressForbidden"));
+ t.getSuppressAnnotations().set(Set.of("**.SuppressForbidden"));
if (t.getName().endsWith("Test")) {
t.setSignaturesFiles(
t.getSignaturesFiles()
.plus(
project.files(
- resourcesDir.resolve("forbidden/es-test-signatures.txt"),
- resourcesDir.resolve("forbidden/http-signatures.txt")
+ resourcesDir.toPath().resolve("forbidden/es-test-signatures.txt"),
+ resourcesDir.toPath().resolve("forbidden/http-signatures.txt")
)
)
);
} else {
t.setSignaturesFiles(
- t.getSignaturesFiles().plus(project.files(resourcesDir.resolve("forbidden/es-server-signatures.txt")))
+ t.getSignaturesFiles().plus(project.files(resourcesDir.toPath().resolve("forbidden/es-server-signatures.txt")))
);
}
- ExtraPropertiesExtension ext = t.getExtensions().getExtraProperties();
- ext.set("replaceSignatureFiles", new Closure(t) {
- @Override
- public Void call(Object... names) {
- List resources = new ArrayList<>(names.length);
- for (Object name : names) {
- resources.add(resourcesDir.resolve("forbidden/" + name + ".txt"));
- }
- t.setSignaturesFiles(project.files(resources));
- return null;
- }
-
- });
- ext.set("addSignatureFiles", new Closure(t) {
- @Override
- public Void call(Object... names) {
- List resources = new ArrayList<>(names.length);
- for (Object name : names) {
- resources.add(resourcesDir.resolve("forbidden/" + name + ".txt"));
- }
- t.setSignaturesFiles(t.getSignaturesFiles().plus(project.files(resources)));
- return null;
- }
- });
-
});
forbiddenTask.configure(t -> t.dependsOn(sourceSetTask));
});
diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/LoggerUsageTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/LoggerUsageTask.java
index 0059913ad086d..559d7536c310a 100644
--- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/LoggerUsageTask.java
+++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/LoggerUsageTask.java
@@ -52,7 +52,7 @@ public LoggerUsageTask(ObjectFactory objectFactory) {
}
@Inject
- abstract public WorkerExecutor getWorkerExecutor();
+ public abstract WorkerExecutor getWorkerExecutor();
@TaskAction
public void runLoggerUsageTask() {
diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java
index 1ff6e2f505436..c602a50c2adb8 100644
--- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java
+++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java
@@ -58,6 +58,7 @@
*/
public class RestTestBasePlugin implements Plugin {
+ private static final String TESTS_MAX_PARALLEL_FORKS_SYSPROP = "tests.max.parallel.forks";
private static final String TESTS_RUNTIME_JAVA_SYSPROP = "tests.runtime.java";
private static final String DEFAULT_DISTRIBUTION_SYSPROP = "tests.default.distribution";
private static final String INTEG_TEST_DISTRIBUTION_SYSPROP = "tests.integ-test.distribution";
@@ -123,6 +124,7 @@ public void apply(Project project) {
// Enable parallel execution for these tests since each test gets its own cluster
task.setMaxParallelForks(task.getProject().getGradle().getStartParameter().getMaxWorkerCount() / 2);
+ nonInputSystemProperties.systemProperty(TESTS_MAX_PARALLEL_FORKS_SYSPROP, () -> String.valueOf(task.getMaxParallelForks()));
// Disable test failure reporting since this stuff is now captured in build scans
task.getInputs().property(ElasticsearchTestBasePlugin.DUMP_OUTPUT_ON_FAILURE_PROP_NAME, false);
diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/RestCompatTestTransformTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/RestCompatTestTransformTask.java
index eee1c4c21eb08..76004e3e5f6db 100644
--- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/RestCompatTestTransformTask.java
+++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/RestCompatTestTransformTask.java
@@ -18,6 +18,7 @@
import com.fasterxml.jackson.databind.node.TextNode;
import com.fasterxml.jackson.dataformat.yaml.YAMLFactory;
import com.fasterxml.jackson.dataformat.yaml.YAMLParser;
+import com.google.common.collect.Sets;
import org.apache.commons.lang3.tuple.Pair;
import org.elasticsearch.gradle.Version;
@@ -44,6 +45,7 @@
import org.gradle.api.file.FileSystemOperations;
import org.gradle.api.file.FileTree;
import org.gradle.api.model.ObjectFactory;
+import org.gradle.api.provider.ListProperty;
import org.gradle.api.tasks.IgnoreEmptyDirectories;
import org.gradle.api.tasks.Input;
import org.gradle.api.tasks.InputFiles;
@@ -67,7 +69,6 @@
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
-import java.util.Set;
import java.util.stream.Collectors;
import javax.inject.Inject;
@@ -75,7 +76,7 @@
/**
* A task to transform REST tests for use in REST API compatibility before they are executed.
*/
-public class RestCompatTestTransformTask extends DefaultTask {
+public abstract class RestCompatTestTransformTask extends DefaultTask {
private static final YAMLFactory YAML_FACTORY = new YAMLFactory();
private static final ObjectMapper MAPPER = new ObjectMapper(YAML_FACTORY);
@@ -90,30 +91,28 @@ public class RestCompatTestTransformTask extends DefaultTask {
private final DirectoryProperty sourceDirectory;
private final DirectoryProperty outputDirectory;
private final PatternFilterable testPatternSet;
- private final Factory patternSetFactory;
- private final List> transformations = new ArrayList<>();
// PatternFilterable -> reason why skipped.
private final Map skippedTestByFilePatternTransformations = new HashMap<>();
// PatternFilterable -> list of full test names and reasons. Needed for 1 pattern may include many tests and reasons
private final Map>> skippedTestByTestNameTransformations = new HashMap<>();
@Inject
- public RestCompatTestTransformTask(
- FileSystemOperations fileSystemOperations,
- Factory patternSetFactory,
- ObjectFactory objectFactory
- ) {
- this.patternSetFactory = patternSetFactory;
+ protected Factory getPatternSetFactory() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Inject
+ public RestCompatTestTransformTask(FileSystemOperations fileSystemOperations, ObjectFactory objectFactory) {
this.fileSystemOperations = fileSystemOperations;
this.compatibleVersion = Version.fromString(VersionProperties.getVersions().get("elasticsearch")).getMajor() - 1;
this.sourceDirectory = objectFactory.directoryProperty();
this.outputDirectory = objectFactory.directoryProperty();
- this.testPatternSet = patternSetFactory.create();
+ this.testPatternSet = getPatternSetFactory().create();
this.testPatternSet.include("/*" + "*/*.yml"); // concat these strings to keep build from thinking this is invalid javadoc
// always inject compat headers
headers.put("Content-Type", "application/vnd.elasticsearch+json;compatible-with=" + compatibleVersion);
headers.put("Accept", "application/vnd.elasticsearch+json;compatible-with=" + compatibleVersion);
- transformations.add(new InjectHeaders(headers, Set.of(RestCompatTestTransformTask::doesNotHaveCatOperation)));
+ getTransformations().add(new InjectHeaders(headers, Sets.newHashSet(RestCompatTestTransformTask::doesNotHaveCatOperation)));
}
private static boolean doesNotHaveCatOperation(ObjectNode doNodeValue) {
@@ -143,7 +142,7 @@ public void skipTest(String fullTestName, String reason) {
);
}
- PatternSet skippedPatternSet = patternSetFactory.create();
+ PatternSet skippedPatternSet = getPatternSetFactory().create();
// create file patterns for all a1/a2/a3/b.yml possibilities.
for (int i = testParts.length - 1; i > 1; i--) {
final String lastPart = testParts[i];
@@ -157,7 +156,7 @@ public void skipTest(String fullTestName, String reason) {
}
public void skipTestsByFilePattern(String filePattern, String reason) {
- PatternSet skippedPatternSet = patternSetFactory.create();
+ PatternSet skippedPatternSet = getPatternSetFactory().create();
skippedPatternSet.include(filePattern);
skippedTestByFilePatternTransformations.put(skippedPatternSet, reason);
}
@@ -170,7 +169,7 @@ public void skipTestsByFilePattern(String filePattern, String reason) {
* @param value the value used in the replacement. For example "bar"
*/
public void replaceValueInMatch(String subKey, Object value) {
- transformations.add(new ReplaceValueInMatch(subKey, MAPPER.convertValue(value, JsonNode.class)));
+ getTransformations().add(new ReplaceValueInMatch(subKey, MAPPER.convertValue(value, JsonNode.class)));
}
/**
@@ -181,7 +180,7 @@ public void replaceValueInMatch(String subKey, Object value) {
* @param testName the testName to apply replacement
*/
public void replaceValueInMatch(String subKey, Object value, String testName) {
- transformations.add(new ReplaceValueInMatch(subKey, MAPPER.convertValue(value, JsonNode.class), testName));
+ getTransformations().add(new ReplaceValueInMatch(subKey, MAPPER.convertValue(value, JsonNode.class), testName));
}
/**
@@ -193,7 +192,7 @@ public void replaceValueInMatch(String subKey, Object value, String testName) {
* @see ReplaceKeyInDo
*/
public void replaceKeyInDo(String oldKeyName, String newKeyName, String testName) {
- transformations.add(new ReplaceKeyInDo(oldKeyName, newKeyName, testName));
+ getTransformations().add(new ReplaceKeyInDo(oldKeyName, newKeyName, testName));
}
/**
@@ -204,7 +203,7 @@ public void replaceKeyInDo(String oldKeyName, String newKeyName, String testName
* @see ReplaceKeyInDo
*/
public void replaceKeyInDo(String oldKeyName, String newKeyName) {
- transformations.add(new ReplaceKeyInDo(oldKeyName, newKeyName, null));
+ getTransformations().add(new ReplaceKeyInDo(oldKeyName, newKeyName, null));
}
/**
@@ -215,7 +214,7 @@ public void replaceKeyInDo(String oldKeyName, String newKeyName) {
* @see ReplaceKeyInLength
*/
public void replaceKeyInLength(String oldKeyName, String newKeyName) {
- transformations.add(new ReplaceKeyInLength(oldKeyName, newKeyName, null));
+ getTransformations().add(new ReplaceKeyInLength(oldKeyName, newKeyName, null));
}
/**
@@ -226,7 +225,7 @@ public void replaceKeyInLength(String oldKeyName, String newKeyName) {
* @param value the value used in the replacement. For example 99
*/
public void replaceValueInLength(String subKey, int value) {
- transformations.add(new ReplaceValueInLength(subKey, MAPPER.convertValue(value, NumericNode.class)));
+ getTransformations().add(new ReplaceValueInLength(subKey, MAPPER.convertValue(value, NumericNode.class)));
}
/**
@@ -238,7 +237,7 @@ public void replaceValueInLength(String subKey, int value) {
* @param testName the testName to apply replacement
*/
public void replaceValueInLength(String subKey, int value, String testName) {
- transformations.add(new ReplaceValueInLength(subKey, MAPPER.convertValue(value, NumericNode.class), testName));
+ getTransformations().add(new ReplaceValueInLength(subKey, MAPPER.convertValue(value, NumericNode.class), testName));
}
/**
@@ -249,7 +248,7 @@ public void replaceValueInLength(String subKey, int value, String testName) {
* @see ReplaceKeyInMatch
*/
public void replaceKeyInMatch(String oldKeyName, String newKeyName) {
- transformations.add(new ReplaceKeyInMatch(oldKeyName, newKeyName, null));
+ getTransformations().add(new ReplaceKeyInMatch(oldKeyName, newKeyName, null));
}
/**
@@ -260,7 +259,7 @@ public void replaceKeyInMatch(String oldKeyName, String newKeyName) {
* @param newValue the value used in the replacement
*/
public void replaceIsTrue(String oldValue, Object newValue) {
- transformations.add(new ReplaceIsTrue(oldValue, MAPPER.convertValue(newValue, TextNode.class)));
+ getTransformations().add(new ReplaceIsTrue(oldValue, MAPPER.convertValue(newValue, TextNode.class)));
}
/**
@@ -271,7 +270,7 @@ public void replaceIsTrue(String oldValue, Object newValue) {
* @param newValue the value used in the replacement
*/
public void replaceIsFalse(String oldValue, Object newValue) {
- transformations.add(new ReplaceIsFalse(oldValue, MAPPER.convertValue(newValue, TextNode.class)));
+ getTransformations().add(new ReplaceIsFalse(oldValue, MAPPER.convertValue(newValue, TextNode.class)));
}
/**
@@ -283,7 +282,7 @@ public void replaceIsFalse(String oldValue, Object newValue) {
* @param testName the testName to apply replacement
*/
public void replaceIsFalse(String oldValue, Object newValue, String testName) {
- transformations.add(new ReplaceIsFalse(oldValue, MAPPER.convertValue(newValue, TextNode.class), testName));
+ getTransformations().add(new ReplaceIsFalse(oldValue, MAPPER.convertValue(newValue, TextNode.class), testName));
}
/**
@@ -295,7 +294,7 @@ public void replaceIsFalse(String oldValue, Object newValue, String testName) {
* @param newValue the value used in the replacement
*/
public void replaceValueTextByKeyValue(String key, String oldValue, Object newValue) {
- transformations.add(new ReplaceTextual(key, oldValue, MAPPER.convertValue(newValue, TextNode.class)));
+ getTransformations().add(new ReplaceTextual(key, oldValue, MAPPER.convertValue(newValue, TextNode.class)));
}
/**
@@ -308,7 +307,7 @@ public void replaceValueTextByKeyValue(String key, String oldValue, Object newVa
* @param testName the testName to apply replacement
*/
public void replaceValueTextByKeyValue(String key, String oldValue, Object newValue, String testName) {
- transformations.add(new ReplaceTextual(key, oldValue, MAPPER.convertValue(newValue, TextNode.class), testName));
+ getTransformations().add(new ReplaceTextual(key, oldValue, MAPPER.convertValue(newValue, TextNode.class), testName));
}
/**
@@ -319,7 +318,7 @@ public void replaceValueTextByKeyValue(String key, String oldValue, Object newVa
* @param subKey the key name directly under match to replace. For example "_type"
*/
public void removeMatch(String subKey) {
- transformations.add(new RemoveMatch(subKey));
+ getTransformations().add(new RemoveMatch(subKey));
}
/**
@@ -331,7 +330,7 @@ public void removeMatch(String subKey) {
* @param testName the testName to apply removal
*/
public void removeMatch(String subKey, String testName) {
- transformations.add(new RemoveMatch(subKey, testName));
+ getTransformations().add(new RemoveMatch(subKey, testName));
}
/**
@@ -342,7 +341,7 @@ public void removeMatch(String subKey, String testName) {
* @param testName the testName to apply addition
*/
public void addMatch(String subKey, Object value, String testName) {
- transformations.add(new AddMatch(subKey, MAPPER.convertValue(value, JsonNode.class), testName));
+ getTransformations().add(new AddMatch(subKey, MAPPER.convertValue(value, JsonNode.class), testName));
}
/**
@@ -352,7 +351,7 @@ public void addMatch(String subKey, Object value, String testName) {
* @param warnings the warning(s) to add
*/
public void addWarning(String testName, String... warnings) {
- transformations.add(new InjectWarnings(Arrays.asList(warnings), testName));
+ getTransformations().add(new InjectWarnings(Arrays.asList(warnings), testName));
}
/**
@@ -362,7 +361,7 @@ public void addWarning(String testName, String... warnings) {
* @param warningsRegex the regex warning(s) to add
*/
public void addWarningRegex(String testName, String... warningsRegex) {
- transformations.add(new InjectWarnings(true, Arrays.asList(warningsRegex), testName));
+ getTransformations().add(new InjectWarnings(true, Arrays.asList(warningsRegex), testName));
}
/**
@@ -371,7 +370,7 @@ public void addWarningRegex(String testName, String... warningsRegex) {
* @param warnings the warning(s) to remove
*/
public void removeWarning(String... warnings) {
- transformations.add(new RemoveWarnings(Set.copyOf(Arrays.asList(warnings))));
+ getTransformations().add(new RemoveWarnings(Sets.newHashSet(warnings)));
}
/**
@@ -381,7 +380,7 @@ public void removeWarning(String... warnings) {
* @param testName the test name to remove the warning
*/
public void removeWarningForTest(String warnings, String testName) {
- transformations.add(new RemoveWarnings(Set.copyOf(Arrays.asList(warnings)), testName));
+ getTransformations().add(new RemoveWarnings(Sets.newHashSet(warnings), testName));
}
/**
@@ -390,7 +389,7 @@ public void removeWarningForTest(String warnings, String testName) {
* @param allowedWarnings the warning(s) to add
*/
public void addAllowedWarning(String... allowedWarnings) {
- transformations.add(new InjectAllowedWarnings(Arrays.asList(allowedWarnings)));
+ getTransformations().add(new InjectAllowedWarnings(Arrays.asList(allowedWarnings)));
}
/**
@@ -399,7 +398,7 @@ public void addAllowedWarning(String... allowedWarnings) {
* @param allowedWarningsRegex the regex warning(s) to add
*/
public void addAllowedWarningRegex(String... allowedWarningsRegex) {
- transformations.add(new InjectAllowedWarnings(true, Arrays.asList(allowedWarningsRegex)));
+ getTransformations().add(new InjectAllowedWarnings(true, Arrays.asList(allowedWarningsRegex)));
}
/**
@@ -409,7 +408,7 @@ public void addAllowedWarningRegex(String... allowedWarningsRegex) {
* @testName the test name to add a allowedWarningRegex
*/
public void addAllowedWarningRegexForTest(String allowedWarningsRegex, String testName) {
- transformations.add(new InjectAllowedWarnings(true, Arrays.asList(allowedWarningsRegex), testName));
+ getTransformations().add(new InjectAllowedWarnings(true, Arrays.asList(allowedWarningsRegex), testName));
}
@OutputDirectory
@@ -463,10 +462,10 @@ public void transform() throws IOException {
skippedFilesWithTestAndReason.get(file).forEach(fullTestNameAndReasonPair -> {
String prefix = file.getName().replace(".yml", "/");
String singleTestName = fullTestNameAndReasonPair.getLeft().replaceAll(".*" + prefix, "");
- transformations.add(new Skip(singleTestName, fullTestNameAndReasonPair.getRight()));
+ getTransformations().add(new Skip(singleTestName, fullTestNameAndReasonPair.getRight()));
});
}
- transformRestTests = transformer.transformRestTests(new LinkedList<>(tests), transformations);
+ transformRestTests = transformer.transformRestTests(new LinkedList<>(tests), getTransformations().get());
}
// convert to url to ensure forward slashes
@@ -490,9 +489,7 @@ public DirectoryProperty getSourceDirectory() {
}
@Nested
- public List> getTransformations() {
- return transformations;
- }
+ public abstract ListProperty> getTransformations();
@Input
public String getSkippedTestByFilePatternTransformations() {
diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java
index bfc1b1e6be960..b0998957910a2 100644
--- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java
+++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java
@@ -232,15 +232,7 @@ public class RestHighLevelClient implements Closeable {
* {@link RestClient} to be used to perform requests.
*/
public RestHighLevelClient(RestClientBuilder restClientBuilder) {
- this(restClientBuilder, Collections.emptyList());
- }
-
- /**
- * Creates a {@link RestHighLevelClient} given the low level {@link RestClientBuilder} that allows to build the
- * {@link RestClient} to be used to perform requests and parsers for custom response sections added to Elasticsearch through plugins.
- */
- protected RestHighLevelClient(RestClientBuilder restClientBuilder, List namedXContentEntries) {
- this(restClientBuilder.build(), RestClient::close, namedXContentEntries);
+ this(restClientBuilder.build(), RestClient::close, Collections.emptyList());
}
/**
@@ -265,7 +257,7 @@ protected RestHighLevelClient(
* The consumer argument allows to control what needs to be done when the {@link #close()} method is called.
* Also subclasses can provide parsers for custom response sections added to Elasticsearch through plugins.
*/
- protected RestHighLevelClient(
+ private RestHighLevelClient(
RestClient restClient,
CheckedConsumer doClose,
List namedXContentEntries,
@@ -309,17 +301,6 @@ public final void close() throws IOException {
doClose.accept(client);
}
- /**
- * Executes a bulk request using the Bulk API.
- * See Bulk API on elastic.co
- * @param bulkRequest the request
- * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
- * @return the response
- */
- public final BulkResponse bulk(BulkRequest bulkRequest, RequestOptions options) throws IOException {
- return performRequestAndParseEntity(bulkRequest, RequestConverters::bulk, options, BulkResponse::fromXContent, emptySet());
- }
-
/**
* Asynchronously executes a bulk request using the Bulk API.
* See Bulk API on elastic.co
@@ -410,7 +391,7 @@ public final SearchResponse scroll(SearchScrollRequest searchScrollRequest, Requ
* layer has been added to the ReST client, and requests should extend {@link Validatable} instead of {@link ActionRequest}.
*/
@Deprecated
- protected final Resp performRequestAndParseEntity(
+ private Resp performRequestAndParseEntity(
Req request,
CheckedFunction requestConverter,
RequestOptions options,
@@ -425,7 +406,7 @@ protected final Resp performRequestAndParseEnt
* layer has been added to the ReST client, and requests should extend {@link Validatable} instead of {@link ActionRequest}.
*/
@Deprecated
- protected final Resp performRequest(
+ private Resp performRequest(
Req request,
CheckedFunction requestConverter,
RequestOptions options,
@@ -439,23 +420,6 @@ protected final Resp performRequest(
return internalPerformRequest(request, requestConverter, options, responseConverter, ignores);
}
- /**
- * Defines a helper method for performing a request.
- */
- protected final Resp performRequest(
- Req request,
- CheckedFunction requestConverter,
- RequestOptions options,
- CheckedFunction responseConverter,
- Set ignores
- ) throws IOException {
- Optional validationException = request.validate();
- if (validationException != null && validationException.isPresent()) {
- throw validationException.get();
- }
- return internalPerformRequest(request, requestConverter, options, responseConverter, ignores);
- }
-
/**
* Provides common functionality for performing a request.
*/
@@ -499,7 +463,7 @@ private Resp internalPerformRequest(
* @return Cancellable instance that may be used to cancel the request
*/
@Deprecated
- protected final Cancellable performRequestAsyncAndParseEntity(
+ private Cancellable performRequestAsyncAndParseEntity(
Req request,
CheckedFunction requestConverter,
RequestOptions options,
@@ -523,7 +487,7 @@ protected final Cancellable performRequestAsyn
* @return Cancellable instance that may be used to cancel the request
*/
@Deprecated
- protected final Cancellable performRequestAsync(
+ private Cancellable performRequestAsync(
Req request,
CheckedFunction requestConverter,
RequestOptions options,
@@ -564,7 +528,7 @@ private Cancellable internalPerformRequestAsync(
return performClientRequestAsync(req, responseListener);
}
- final ResponseListener wrapResponseListener(
+ private ResponseListener wrapResponseListener(
CheckedFunction responseConverter,
ActionListener actionListener,
Set ignores
@@ -611,7 +575,7 @@ public void onFailure(Exception exception) {
* that wraps the original {@link ResponseException}. The potential exception obtained while parsing is added to the returned
* exception as a suppressed exception. This method is guaranteed to not throw any exception eventually thrown while parsing.
*/
- protected final ElasticsearchStatusException parseResponseException(ResponseException responseException) {
+ private ElasticsearchStatusException parseResponseException(ResponseException responseException) {
Response response = responseException.getResponse();
HttpEntity entity = response.getEntity();
ElasticsearchStatusException elasticsearchException;
@@ -631,7 +595,7 @@ protected final ElasticsearchStatusException parseResponseException(ResponseExce
return elasticsearchException;
}
- protected final Resp parseEntity(final HttpEntity entity, final CheckedFunction entityParser)
+ private Resp parseEntity(final HttpEntity entity, final CheckedFunction entityParser)
throws IOException {
if (entity == null) {
throw new IllegalStateException("Response body expected but not returned");
@@ -735,7 +699,7 @@ private Cancellable performClientRequestAsync(Request request, ResponseListener
ListenableFuture> versionCheck = getVersionValidationFuture();
// Create a future that tracks cancellation of this method's result and forwards cancellation to the actual LLRC request.
- CompletableFuture cancellationForwarder = new CompletableFuture();
+ CompletableFuture cancellationForwarder = new CompletableFuture<>();
Cancellable result = new Cancellable() {
@Override
public void cancel() {
@@ -754,7 +718,7 @@ void runIfNotCancelled(Runnable runnable) {
// Send the request after we have done the version compatibility check. Note that if it has already happened, the listener will
// be called immediately on the same thread with no asynchronous scheduling overhead.
- versionCheck.addListener(new ActionListener>() {
+ versionCheck.addListener(new ActionListener<>() {
@Override
public void onResponse(Optional validation) {
if (validation.isPresent() == false) {
@@ -779,13 +743,13 @@ public void onFailure(Exception e) {
});
return result;
- };
+ }
/**
* Go through all the request's existing headers, looking for {@code headerName} headers and if they exist,
* changing them to use version compatibility. If no request headers are changed, modify the entity type header if appropriate
*/
- boolean addCompatibilityFor(RequestOptions.Builder newOptions, Header entityHeader, String headerName) {
+ private boolean addCompatibilityFor(RequestOptions.Builder newOptions, Header entityHeader, String headerName) {
// Modify any existing "Content-Type" headers on the request to use the version compatibility, if available
boolean contentTypeModified = false;
for (Header header : new ArrayList<>(newOptions.getHeaders())) {
@@ -807,7 +771,7 @@ boolean addCompatibilityFor(RequestOptions.Builder newOptions, Header entityHead
* Modify the given header to be version compatible, if necessary.
* Returns true if a modification was made, false otherwise.
*/
- boolean modifyHeader(RequestOptions.Builder newOptions, Header header, String headerName) {
+ private boolean modifyHeader(RequestOptions.Builder newOptions, Header header, String headerName) {
for (EntityType type : EntityType.values()) {
final String headerValue = header.getValue();
if (headerValue.startsWith(type.header())) {
@@ -825,7 +789,7 @@ boolean modifyHeader(RequestOptions.Builder newOptions, Header header, String he
* modifying the "Content-Type" and "Accept" headers if present, or modifying the header based
* on the request's entity type.
*/
- void modifyRequestForCompatibility(Request request) {
+ private void modifyRequestForCompatibility(Request request) {
final Header entityHeader = request.getEntity() == null ? null : request.getEntity().getContentType();
final RequestOptions.Builder newOptions = request.getOptions().toBuilder();
@@ -982,7 +946,7 @@ private Optional getVersionValidation(Response response) throws IOExcept
return Optional.empty();
}
- static List getDefaultNamedXContents() {
+ private static List getDefaultNamedXContents() {
Map> map = new HashMap<>();
map.put(CardinalityAggregationBuilder.NAME, (p, c) -> ParsedCardinality.fromXContent(p, (String) c));
map.put(InternalHDRPercentiles.NAME, (p, c) -> ParsedHDRPercentiles.fromXContent(p, (String) c));
@@ -1068,7 +1032,7 @@ static List getDefaultNamedXContents() {
/**
* Loads and returns the {@link NamedXContentRegistry.Entry} parsers provided by plugins.
*/
- static List getProvidedNamedXContents() {
+ private static List getProvidedNamedXContents() {
List entries = new ArrayList<>();
for (NamedXContentProvider service : ServiceLoader.load(NamedXContentProvider.class)) {
entries.addAll(service.getNamedXContentParsers());
diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/AcknowledgedResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/AcknowledgedResponse.java
deleted file mode 100644
index 7adcee74cb206..0000000000000
--- a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/AcknowledgedResponse.java
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the Elastic License
- * 2.0 and the Server Side Public License, v 1; you may not use this file except
- * in compliance with, at your election, the Elastic License 2.0 or the Server
- * Side Public License, v 1.
- */
-
-package org.elasticsearch.client.core;
-
-import org.elasticsearch.xcontent.ConstructingObjectParser;
-import org.elasticsearch.xcontent.ParseField;
-import org.elasticsearch.xcontent.XContentParser;
-
-import java.io.IOException;
-import java.util.Objects;
-import java.util.function.Function;
-
-import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg;
-
-public class AcknowledgedResponse {
-
- protected static final String PARSE_FIELD_NAME = "acknowledged";
- private static final ConstructingObjectParser PARSER = AcknowledgedResponse.generateParser(
- "acknowledged_response",
- AcknowledgedResponse::new,
- AcknowledgedResponse.PARSE_FIELD_NAME
- );
-
- private final boolean acknowledged;
-
- public AcknowledgedResponse(final boolean acknowledged) {
- this.acknowledged = acknowledged;
- }
-
- public boolean isAcknowledged() {
- return acknowledged;
- }
-
- protected static ConstructingObjectParser generateParser(String name, Function ctor, String parseField) {
- ConstructingObjectParser p = new ConstructingObjectParser<>(name, true, args -> ctor.apply((boolean) args[0]));
- p.declareBoolean(constructorArg(), new ParseField(parseField));
- return p;
- }
-
- public static AcknowledgedResponse fromXContent(final XContentParser parser) throws IOException {
- return PARSER.parse(parser, null);
- }
-
- @Override
- public boolean equals(Object o) {
- if (this == o) {
- return true;
- }
- if (o == null || getClass() != o.getClass()) {
- return false;
- }
- final AcknowledgedResponse that = (AcknowledgedResponse) o;
- return isAcknowledged() == that.isAcknowledged();
- }
-
- @Override
- public int hashCode() {
- return Objects.hash(acknowledged);
- }
-
- /**
- * @return the field name this response uses to output the acknowledged flag
- */
- protected String getFieldName() {
- return PARSE_FIELD_NAME;
- }
-}
diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/ShardsAcknowledgedResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/ShardsAcknowledgedResponse.java
deleted file mode 100644
index a80a6bb2a15b7..0000000000000
--- a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/ShardsAcknowledgedResponse.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the Elastic License
- * 2.0 and the Server Side Public License, v 1; you may not use this file except
- * in compliance with, at your election, the Elastic License 2.0 or the Server
- * Side Public License, v 1.
- */
-package org.elasticsearch.client.core;
-
-import org.elasticsearch.xcontent.ConstructingObjectParser;
-import org.elasticsearch.xcontent.ParseField;
-import org.elasticsearch.xcontent.XContentParser;
-
-import java.io.IOException;
-
-import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg;
-
-public class ShardsAcknowledgedResponse extends AcknowledgedResponse {
-
- protected static final String SHARDS_PARSE_FIELD_NAME = "shards_acknowledged";
-
- private static ConstructingObjectParser buildParser() {
-
- ConstructingObjectParser p = new ConstructingObjectParser<>(
- "freeze",
- true,
- args -> new ShardsAcknowledgedResponse((boolean) args[0], (boolean) args[1])
- );
- p.declareBoolean(constructorArg(), new ParseField(AcknowledgedResponse.PARSE_FIELD_NAME));
- p.declareBoolean(constructorArg(), new ParseField(SHARDS_PARSE_FIELD_NAME));
- return p;
- }
-
- private static final ConstructingObjectParser PARSER = buildParser();
-
- private final boolean shardsAcknowledged;
-
- public ShardsAcknowledgedResponse(boolean acknowledged, boolean shardsAcknowledged) {
- super(acknowledged);
- this.shardsAcknowledged = shardsAcknowledged;
- }
-
- public boolean isShardsAcknowledged() {
- return shardsAcknowledged;
- }
-
- public static ShardsAcknowledgedResponse fromXContent(XContentParser parser) throws IOException {
- return PARSER.parse(parser, null);
- }
-}
diff --git a/client/rest/build.gradle b/client/rest/build.gradle
index 85d38b007e632..6006fae1c2d84 100644
--- a/client/rest/build.gradle
+++ b/client/rest/build.gradle
@@ -16,7 +16,7 @@
* specific language governing permissions and limitations
* under the License.
*/
-import de.thetaphi.forbiddenapis.gradle.CheckForbiddenApis
+import org.elasticsearch.gradle.internal.precommit.CheckForbiddenApisTask
import org.elasticsearch.gradle.VersionProperties
import org.elasticsearch.gradle.internal.conventions.precommit.LicenseHeadersTask
@@ -60,7 +60,7 @@ tasks.named("processResources").configure {
]
}
-tasks.withType(CheckForbiddenApis).configureEach {
+tasks.withType(CheckForbiddenApisTask).configureEach {
//client does not depend on server, so only jdk and http signatures should be checked
replaceSignatureFiles('jdk-signatures', 'http-signatures')
}
@@ -71,8 +71,11 @@ tasks.named("forbiddenPatterns").configure {
tasks.named('forbiddenApisTest').configure {
//we are using jdk-internal instead of jdk-non-portable to allow for com.sun.net.httpserver.* usage
- bundledSignatures -= 'jdk-non-portable'
- bundledSignatures += 'jdk-internal'
+ modifyBundledSignatures { signatures ->
+ signatures -= 'jdk-non-portable'
+ signatures += 'jdk-internal'
+ signatures
+ }
}
// JarHell is part of es server, which we don't want to pull in
diff --git a/client/sniffer/build.gradle b/client/sniffer/build.gradle
index 546e81445bb89..901917c7b25f8 100644
--- a/client/sniffer/build.gradle
+++ b/client/sniffer/build.gradle
@@ -57,8 +57,12 @@ tasks.named('forbiddenApisMain').configure {
tasks.named('forbiddenApisTest').configure {
//we are using jdk-internal instead of jdk-non-portable to allow for com.sun.net.httpserver.* usage
- bundledSignatures -= 'jdk-non-portable'
- bundledSignatures += 'jdk-internal'
+ modifyBundledSignatures { bundledSignatures ->
+ bundledSignatures -= 'jdk-non-portable'
+ bundledSignatures += 'jdk-internal'
+ bundledSignatures
+ }
+
//client does not depend on server, so only jdk signatures should be checked
replaceSignatureFiles 'jdk-signatures'
}
diff --git a/client/test/build.gradle b/client/test/build.gradle
index 18eb16883ab15..9ee222b036cd1 100644
--- a/client/test/build.gradle
+++ b/client/test/build.gradle
@@ -40,8 +40,11 @@ tasks.named('forbiddenApisMain').configure {
tasks.named('forbiddenApisTest').configure {
//we are using jdk-internal instead of jdk-non-portable to allow for com.sun.net.httpserver.* usage
- bundledSignatures -= 'jdk-non-portable'
- bundledSignatures += 'jdk-internal'
+ modifyBundledSignatures { bundledSignatures ->
+ bundledSignatures -= 'jdk-non-portable'
+ bundledSignatures += 'jdk-internal'
+ bundledSignatures
+ }
//client does not depend on core, so only jdk signatures should be checked
replaceSignatureFiles 'jdk-signatures'
}
diff --git a/distribution/tools/server-cli/build.gradle b/distribution/tools/server-cli/build.gradle
index 3ab5e6e86f5ba..623f9d40cd49e 100644
--- a/distribution/tools/server-cli/build.gradle
+++ b/distribution/tools/server-cli/build.gradle
@@ -5,7 +5,7 @@
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
-import de.thetaphi.forbiddenapis.gradle.CheckForbiddenApis
+import org.elasticsearch.gradle.internal.precommit.CheckForbiddenApisTask
apply plugin: 'elasticsearch.build'
@@ -20,7 +20,7 @@ tasks.named("test").configure {
systemProperty "tests.security.manager", "false"
}
-tasks.withType(CheckForbiddenApis).configureEach {
+tasks.withType(CheckForbiddenApisTask).configureEach {
replaceSignatureFiles 'jdk-signatures'
}
diff --git a/docs/changelog/101148.yaml b/docs/changelog/101148.yaml
new file mode 100644
index 0000000000000..eabe288e69e88
--- /dev/null
+++ b/docs/changelog/101148.yaml
@@ -0,0 +1,6 @@
+pr: 101148
+summary: Add support for marking component templates as deprecated
+area: Indices APIs
+type: enhancement
+issues:
+ - 100992
diff --git a/docs/changelog/101426.yaml b/docs/changelog/101426.yaml
new file mode 100644
index 0000000000000..f9053ba1c1ec1
--- /dev/null
+++ b/docs/changelog/101426.yaml
@@ -0,0 +1,5 @@
+pr: 101426
+summary: Add undesired shard count
+area: Allocation
+type: enhancement
+issues: []
diff --git a/docs/changelog/101629.yaml b/docs/changelog/101629.yaml
new file mode 100644
index 0000000000000..1b8691c9798ff
--- /dev/null
+++ b/docs/changelog/101629.yaml
@@ -0,0 +1,5 @@
+pr: 101629
+summary: Health report infrastructure doesn't trip the circuit breakers
+area: Health
+type: bug
+issues: []
diff --git a/docs/changelog/101648.yaml b/docs/changelog/101648.yaml
new file mode 100644
index 0000000000000..48e01739aabc0
--- /dev/null
+++ b/docs/changelog/101648.yaml
@@ -0,0 +1,6 @@
+pr: 101648
+summary: "ESQL: Fix unreleased block in topn"
+area: ES|QL
+type: bug
+issues:
+ - 101588
diff --git a/docs/changelog/101652.yaml b/docs/changelog/101652.yaml
new file mode 100644
index 0000000000000..79e3167696aee
--- /dev/null
+++ b/docs/changelog/101652.yaml
@@ -0,0 +1,5 @@
+pr: 101652
+summary: Fix race condition in `SnapshotsService`
+area: Snapshot/Restore
+type: bug
+issues: []
diff --git a/docs/changelog/101713.yaml b/docs/changelog/101713.yaml
new file mode 100644
index 0000000000000..c3addf9296584
--- /dev/null
+++ b/docs/changelog/101713.yaml
@@ -0,0 +1,5 @@
+pr: 101713
+summary: Disable `weight_matches` when kNN query is present
+area: Highlighting
+type: bug
+issues: []
diff --git a/docs/changelog/101727.yaml b/docs/changelog/101727.yaml
new file mode 100644
index 0000000000000..24a7e1d5b4e48
--- /dev/null
+++ b/docs/changelog/101727.yaml
@@ -0,0 +1,5 @@
+pr: 101727
+summary: Fix listeners in `SharedBlobCacheService.readMultiRegions`
+area: Distributed
+type: bug
+issues: []
diff --git a/docs/changelog/98916.yaml b/docs/changelog/98916.yaml
new file mode 100644
index 0000000000000..a466e3deba009
--- /dev/null
+++ b/docs/changelog/98916.yaml
@@ -0,0 +1,5 @@
+pr: 98916
+summary: Make knn search a query
+area: Vector Search
+type: feature
+issues: []
diff --git a/docs/painless/painless-contexts/painless-reindex-context.asciidoc b/docs/painless/painless-contexts/painless-reindex-context.asciidoc
index 13b216bac6345..9aae1ae70c5ac 100644
--- a/docs/painless/painless-contexts/painless-reindex-context.asciidoc
+++ b/docs/painless/painless-contexts/painless-reindex-context.asciidoc
@@ -19,7 +19,7 @@ reindexed into a target index.
{ref}/mapping-index-field.html[`ctx['_index']`] (`String`)::
The name of the index.
-{ref}/mapping-id-field.html[`ctx['_id']`] (`int`, read-only)::
+{ref}/mapping-id-field.html[`ctx['_id']`] (`String`)::
The unique document id.
`ctx['_version']` (`int`)::
diff --git a/docs/painless/painless-contexts/painless-update-by-query-context.asciidoc b/docs/painless/painless-contexts/painless-update-by-query-context.asciidoc
index d8f9d4d7bae70..78a8b8d36d6bb 100644
--- a/docs/painless/painless-contexts/painless-update-by-query-context.asciidoc
+++ b/docs/painless/painless-contexts/painless-update-by-query-context.asciidoc
@@ -20,7 +20,7 @@ result of query.
{ref}/mapping-index-field.html[`ctx['_index']`] (`String`, read-only)::
The name of the index.
-{ref}/mapping-id-field.html[`ctx['_id']`] (`int`, read-only)::
+{ref}/mapping-id-field.html[`ctx['_id']`] (`String`, read-only)::
The unique document id.
`ctx['_version']` (`int`, read-only)::
diff --git a/docs/painless/painless-contexts/painless-update-context.asciidoc b/docs/painless/painless-contexts/painless-update-context.asciidoc
index f9ae3434827d9..53b1008cfebff 100644
--- a/docs/painless/painless-contexts/painless-update-context.asciidoc
+++ b/docs/painless/painless-contexts/painless-update-context.asciidoc
@@ -18,7 +18,7 @@ add, modify, or delete fields within a single document.
{ref}/mapping-index-field.html[`ctx['_index']`] (`String`, read-only)::
The name of the index.
-{ref}/mapping-id-field.html[`ctx['_id']`] (`int`, read-only)::
+{ref}/mapping-id-field.html[`ctx['_id']`] (`String`, read-only)::
The unique document id.
`ctx['_version']` (`int`, read-only)::
diff --git a/docs/reference/cluster/get-desired-balance.asciidoc b/docs/reference/cluster/get-desired-balance.asciidoc
index bd99f1d737bd8..2628b5abca9f3 100644
--- a/docs/reference/cluster/get-desired-balance.asciidoc
+++ b/docs/reference/cluster/get-desired-balance.asciidoc
@@ -6,7 +6,12 @@
NOTE: {cloud-only}
-Exposes the desired balance and basic metrics.
+Exposes:
+* the desired balance computation and reconciliation stats
+* balancing stats such as distribution of shards, disk and ingest forecasts
+ across nodes and data tiers (based on the current cluster state)
+* routing table with each shard current and desired location
+* cluster info with nodes disk usages
[[get-desired-balance-request]]
==== {api-request-title}
@@ -33,6 +38,8 @@ The API returns the following result:
"reconciliation_time_in_millis": 0
},
"cluster_balance_stats" : {
+ "shard_count": 37,
+ "undesired_shard_allocation_count": 0,
"tiers": {
"data_hot" : {
"shard_count" : {
@@ -42,6 +49,13 @@ The API returns the following result:
"average" : 2.3333333333333335,
"std_dev" : 0.4714045207910317
},
+ "undesired_shard_allocation_count" : {
+ "total" : 0.0,
+ "min" : 0.0,
+ "max" : 0.0,
+ "average" : 0.0,
+ "std_dev" : 0.0
+ },
"forecast_write_load" : {
"total" : 21.0,
"min" : 6.0,
@@ -72,6 +86,13 @@ The API returns the following result:
"average" : 1.0,
"std_dev" : 0.0
},
+ "undesired_shard_allocation_count" : {
+ "total" : 0.0,
+ "min" : 0.0,
+ "max" : 0.0,
+ "average" : 0.0,
+ "std_dev" : 0.0
+ },
"forecast_write_load" : {
"total" : 0.0,
"min" : 0.0,
@@ -100,6 +121,7 @@ The API returns the following result:
"node_id": "UPYt8VwWTt-IADAEbqpLxA",
"roles": ["data_content"],
"shard_count": 10,
+ "undesired_shard_allocation_count": 0,
"forecast_write_load": 8.5,
"forecast_disk_usage_bytes": 498435,
"actual_disk_usage_bytes": 498435
@@ -108,6 +130,7 @@ The API returns the following result:
"node_id": "bgC66tboTIeFQ0VgRGI4Gg",
"roles": ["data_content"],
"shard_count": 15,
+ "undesired_shard_allocation_count": 0,
"forecast_write_load": 3.25,
"forecast_disk_usage_bytes": 384935,
"actual_disk_usage_bytes": 384935
@@ -116,6 +139,7 @@ The API returns the following result:
"node_id": "2x1VTuSOQdeguXPdN73yRw",
"roles": ["data_content"],
"shard_count": 12,
+ "undesired_shard_allocation_count": 0,
"forecast_write_load": 6.0,
"forecast_disk_usage_bytes": 648766,
"actual_disk_usage_bytes": 648766
diff --git a/docs/reference/eql/syntax.asciidoc b/docs/reference/eql/syntax.asciidoc
index f592610f487c9..33a6fb745ac54 100644
--- a/docs/reference/eql/syntax.asciidoc
+++ b/docs/reference/eql/syntax.asciidoc
@@ -243,7 +243,7 @@ my_field like ("Value-*", "VALUE2", "VAL?") // case-sensitive
my_field like~ ("value-*", "value2", "val?") // case-insensitive
my_field regex ("[vV]alue-[0-9]", "VALUE[^2].?", "VAL3") // case-sensitive
-my_field regex~ ("value-[0-9]", "value[^2].?", "val3") // case-sensitive
+my_field regex~ ("value-[0-9]", "value[^2].?", "val3") // case-insensitive
----
`in` (case-sensitive)::
diff --git a/docs/reference/indices/put-component-template.asciidoc b/docs/reference/indices/put-component-template.asciidoc
index 794f01cb7f3ae..faf7e67039de7 100644
--- a/docs/reference/indices/put-component-template.asciidoc
+++ b/docs/reference/indices/put-component-template.asciidoc
@@ -169,6 +169,12 @@ created.
Optional user metadata about the component template. May have any contents.
This map is not automatically generated by {es}.
+`deprecated`::
+(Optional, boolean)
+Marks this component template as deprecated.
+When a deprecated component template is referenced when creating or updating a non-deprecated index template,
+{es} will emit a deprecation warning.
+
[[put-component-template-api-example]]
==== {api-examples-title}
diff --git a/docs/reference/indices/put-index-template.asciidoc b/docs/reference/indices/put-index-template.asciidoc
index 4dfd7252a9fa5..b9460bda86a09 100644
--- a/docs/reference/indices/put-index-template.asciidoc
+++ b/docs/reference/indices/put-index-template.asciidoc
@@ -167,6 +167,12 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=settings]
(Optional, integer)
Version number used to manage index templates externally.
This number is not automatically generated by {es}.
+
+`deprecated`::
+(Optional, boolean)
+Marks this index template as deprecated.
+When creating or updating a non-deprecated index template that uses deprecated components,
+{es} will emit a deprecation warning.
// end::index-template-api-body[]
[[put-index-template-api-example]]
diff --git a/docs/reference/inference/delete-inference.asciidoc b/docs/reference/inference/delete-inference.asciidoc
index 874bfa64d3551..c9c3e16458618 100644
--- a/docs/reference/inference/delete-inference.asciidoc
+++ b/docs/reference/inference/delete-inference.asciidoc
@@ -2,6 +2,8 @@
[[delete-inference-api]]
=== Delete {infer} API
+experimental[]
+
Deletes an {infer} model deployment.
diff --git a/docs/reference/inference/get-inference.asciidoc b/docs/reference/inference/get-inference.asciidoc
index bbf1d59c56213..b81f2663ec9e1 100644
--- a/docs/reference/inference/get-inference.asciidoc
+++ b/docs/reference/inference/get-inference.asciidoc
@@ -2,6 +2,8 @@
[[get-inference-api]]
=== Get {infer} API
+experimental[]
+
Retrieves {infer} model information.
[discrete]
diff --git a/docs/reference/inference/inference-apis.asciidoc b/docs/reference/inference/inference-apis.asciidoc
index ec1f01bc4d093..0476ac57287d9 100644
--- a/docs/reference/inference/inference-apis.asciidoc
+++ b/docs/reference/inference/inference-apis.asciidoc
@@ -2,6 +2,8 @@
[[inference-apis]]
== {infer-cap} APIs
+experimental[]
+
You can use the following APIs to manage {infer} models and perform {infer}:
* <>
diff --git a/docs/reference/inference/post-inference.asciidoc b/docs/reference/inference/post-inference.asciidoc
index 99dd4a059519f..f26a73d093091 100644
--- a/docs/reference/inference/post-inference.asciidoc
+++ b/docs/reference/inference/post-inference.asciidoc
@@ -2,6 +2,8 @@
[[post-inference-api]]
=== Perform inference API
+experimental[]
+
Performs an inference task on an input text by using an {infer} model.
diff --git a/docs/reference/inference/put-inference.asciidoc b/docs/reference/inference/put-inference.asciidoc
index f4737875971c7..3b8cd19aded53 100644
--- a/docs/reference/inference/put-inference.asciidoc
+++ b/docs/reference/inference/put-inference.asciidoc
@@ -2,6 +2,8 @@
[[put-inference-api]]
=== Create {infer} API
+experimental[]
+
Creates a model to perform an {infer} task.
diff --git a/docs/reference/ingest/apis/put-pipeline.asciidoc b/docs/reference/ingest/apis/put-pipeline.asciidoc
index 97c6a176dc256..ab1139b999952 100644
--- a/docs/reference/ingest/apis/put-pipeline.asciidoc
+++ b/docs/reference/ingest/apis/put-pipeline.asciidoc
@@ -94,6 +94,12 @@ how the version attribute is used.
(Optional, object)
Optional metadata about the ingest pipeline. May have any contents. This
map is not automatically generated by {es}.
+
+`deprecated`::
+(Optional, boolean)
+Marks this ingest pipeline as deprecated.
+When a deprecated ingest pipeline is referenced as the default or final pipeline when creating or updating a non-deprecated index template,
+{es} will emit a deprecation warning.
// end::pipeline-object[]
[[put-pipeline-api-example]]
diff --git a/docs/reference/query-dsl/knn-query.asciidoc b/docs/reference/query-dsl/knn-query.asciidoc
new file mode 100644
index 0000000000000..f9cc31748ef71
--- /dev/null
+++ b/docs/reference/query-dsl/knn-query.asciidoc
@@ -0,0 +1,222 @@
+[[query-dsl-knn-query]]
+=== Knn query
+++++
+Knn
+++++
+
+Finds the _k_ nearest vectors to a query vector, as measured by a similarity
+metric. _knn_ query finds nearest vectors through approximate search on indexed
+dense_vectors. The preferred way to do approximate kNN search is through the
+<> of a search request. _knn_ query is reserved for
+expert cases, where there is a need to combine this query with other queries.
+
+[[knn-query-ex-request]]
+==== Example request
+
+[source,console]
+----
+PUT my-image-index
+{
+ "mappings": {
+ "properties": {
+ "image-vector": {
+ "type": "dense_vector",
+ "dims": 3,
+ "index": true,
+ "similarity": "l2_norm"
+ },
+ "file-type": {
+ "type": "keyword"
+ }
+ }
+ }
+}
+----
+
+. Index your data.
++
+[source,console]
+----
+POST my-image-index/_bulk?refresh=true
+{ "index": { "_id": "1" } }
+{ "image-vector": [1, 5, -20], "file-type": "jpg" }
+{ "index": { "_id": "2" } }
+{ "image-vector": [42, 8, -15], "file-type": "png" }
+{ "index": { "_id": "3" } }
+{ "image-vector": [15, 11, 23], "file-type": "jpg" }
+----
+//TEST[continued]
+
+. Run the search using the `knn` query, asking for the top 3 nearest vectors.
++
+[source,console]
+----
+POST my-image-index/_search
+{
+ "size" : 3,
+ "query" : {
+ "knn": {
+ "field": "image-vector",
+ "query_vector": [-5, 9, -12],
+ "num_candidates": 10
+ }
+ }
+}
+----
+//TEST[continued]
+
+NOTE: `knn` query doesn't have a separate `k` parameter. `k` is defined by
+`size` parameter of a search request similar to other queries. `knn` query
+collects `num_candidates` results from each shard, then merges them to get
+the top `size` results.
+
+
+[[knn-query-top-level-parameters]]
+==== Top-level parameters for `knn`
+
+`field`::
++
+--
+(Required, string) The name of the vector field to search against. Must be a
+<>.
+--
+
+`query_vector`::
++
+--
+(Required, array of floats) Query vector. Must have the same number of dimensions
+as the vector field you are searching against.
+--
+
+`num_candidates`::
++
+--
+(Required, integer) The number of nearest neighbor candidates to consider per shard.
+Cannot exceed 10,000. {es} collects `num_candidates` results from each shard, then
+merges them to find the top results. Increasing `num_candidates` tends to improve the
+accuracy of the final results.
+--
+
+`filter`::
++
+--
+(Optional, query object) Query to filter the documents that can match.
+The kNN search will return the top documents that also match this filter.
+The value can be a single query or a list of queries. If `filter` is not provided,
+all documents are allowed to match.
+
+The filter is a pre-filter, meaning that it is applied **during** the approximate
+kNN search to ensure that `num_candidates` matching documents are returned.
+--
+
+`similarity`::
++
+--
+(Optional, float) The minimum similarity required for a document to be considered
+a match. The similarity value calculated relates to the raw
+<> used. Not the document score. The matched
+documents are then scored according to <>
+and the provided `boost` is applied.
+--
+
+`boost`::
++
+--
+(Optional, float) Floating point number used to multiply the
+scores of matched documents. This value cannot be negative. Defaults to `1.0`.
+--
+
+`_name`::
++
+--
+(Optional, string) Name field to identify the query
+--
+
+[[knn-query-filtering]]
+==== Pre-filters and post-filters in knn query
+
+There are two ways to filter documents that match a kNN query:
+
+. **pre-filtering** – filter is applied during the approximate kNN search
+to ensure that `k` matching documents are returned.
+. **post-filtering** – filter is applied after the approximate kNN search
+completes, which results in fewer than k results, even when there are enough
+matching documents.
+
+Pre-filtering is supported through the `filter` parameter of the `knn` query.
+Also filters from <> are applied as pre-filters.
+
+All other filters found in the Query DSL tree are applied as post-filters.
+For example, `knn` query finds the top 3 documents with the nearest vectors
+(num_candidates=3), which are combined with `term` filter, that is
+post-filtered. The final set of documents will contain only a single document
+that passes the post-filter.
+
+
+[source,console]
+----
+POST my-image-index/_search
+{
+ "size" : 10,
+ "query" : {
+ "bool" : {
+ "must" : {
+ "knn": {
+ "field": "image-vector",
+ "query_vector": [-5, 9, -12],
+ "num_candidates": 3
+ }
+ },
+ "filter" : {
+ "term" : { "file-type" : "png" }
+ }
+ }
+ }
+}
+----
+//TEST[continued]
+
+[[knn-query-with-nested-query]]
+==== Knn query inside a nested query
+
+`knn` query can be used inside a nested query. The behaviour here is similar
+to <>:
+
+* kNN search over nested dense_vectors diversifies the top results over
+the top-level document
+* `filter` over the top-level document metadata is supported and acts as a
+post-filter
+* `filter` over `nested` field metadata is not supported
+
+A sample query can look like below:
+
+[source,js]
+----
+{
+ "query" : {
+ "nested" : {
+ "path" : "paragraph",
+ "query" : {
+ "knn": {
+ "query_vector": [
+ 0.45,
+ 45
+ ],
+ "field": "paragraph.vector",
+ "num_candidates": 2
+ }
+ }
+ }
+ }
+}
+----
+// NOTCONSOLE
+
+[[knn-query-aggregations]]
+==== Knn query with aggregations
+`knn` query calculates aggregations on `num_candidates` from each shard.
+Thus, the final results from aggregations contain
+`num_candidates * number_of_shards` documents. This is different from
+the <> where aggregations are
+calculated on the global top k nearest documents.
+
diff --git a/docs/reference/query-dsl/special-queries.asciidoc b/docs/reference/query-dsl/special-queries.asciidoc
index a6d35d4f9b707..d46377f698359 100644
--- a/docs/reference/query-dsl/special-queries.asciidoc
+++ b/docs/reference/query-dsl/special-queries.asciidoc
@@ -17,6 +17,10 @@ or collection of documents.
This query finds queries that are stored as documents that match with
the specified document.
+<>::
+A query that finds the _k_ nearest vectors to a query
+vector, as measured by a similarity metric.
+
<>::
A query that computes scores based on the values of numeric features and is
able to efficiently skip non-competitive hits.
@@ -43,6 +47,8 @@ include::mlt-query.asciidoc[]
include::percolate-query.asciidoc[]
+include::knn-query.asciidoc[]
+
include::rank-feature-query.asciidoc[]
include::script-query.asciidoc[]
diff --git a/docs/reference/search/search-your-data/knn-search.asciidoc b/docs/reference/search/search-your-data/knn-search.asciidoc
index 8c676a5515ca3..4bf1ceabe08d8 100644
--- a/docs/reference/search/search-your-data/knn-search.asciidoc
+++ b/docs/reference/search/search-your-data/knn-search.asciidoc
@@ -43,7 +43,7 @@ based on a similarity metric, the better its match.
{es} supports two methods for kNN search:
* <> using the `knn` search
-option
+option or `knn` query
* <> using a `script_score` query with a
vector function
@@ -129,7 +129,8 @@ POST image-index/_bulk?refresh=true
//TEST[continued]
//TEST[s/\.\.\.//]
-. Run the search using the <>.
+. Run the search using the <> or the
+<> (expert case).
+
[source,console]
----
diff --git a/docs/reference/settings/data-stream-lifecycle-settings.asciidoc b/docs/reference/settings/data-stream-lifecycle-settings.asciidoc
index 8c3f4c793e5e0..023a8fcf860eb 100644
--- a/docs/reference/settings/data-stream-lifecycle-settings.asciidoc
+++ b/docs/reference/settings/data-stream-lifecycle-settings.asciidoc
@@ -51,6 +51,17 @@ segment size is a way to prevent indices from having a long tail of very small s
This setting controls what value does <>
configures on the target index. It defaults to `100MB`.
+[[data-streams-lifecycle-signalling-error-retry-interval]]
+`data_streams.lifecycle.signalling.error_retry_interval`::
+(<>, integer)
+Represents the number of retries data stream lifecycle has to perform for an index
+in an error step in order to signal that the index is not progressing (i.e. it's
+stuck in an error step).
+The current signalling mechanism is a log statement at the `error` level however,
+the signalling mechanism can be extended in the future.
+Defaults to 10 retries.
+
+
==== Index level settings
The following index-level settings are typically configured on the backing indices of a data stream.
diff --git a/libs/ssl-config/src/test/java/org/elasticsearch/common/ssl/SslConfigurationLoaderTests.java b/libs/ssl-config/src/test/java/org/elasticsearch/common/ssl/SslConfigurationLoaderTests.java
index 61d42e5db7083..5ec0d129b8f95 100644
--- a/libs/ssl-config/src/test/java/org/elasticsearch/common/ssl/SslConfigurationLoaderTests.java
+++ b/libs/ssl-config/src/test/java/org/elasticsearch/common/ssl/SslConfigurationLoaderTests.java
@@ -29,9 +29,8 @@
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.notNullValue;
-public class SslConfigurationLoaderTests extends ESTestCase {
+public final class SslConfigurationLoaderTests extends ESTestCase {
- @SuppressWarnings("this-escape")
private final Path certRoot = getDataPath("/certs/ca1/ca.crt").getParent().getParent();
private Settings settings;
diff --git a/libs/tdigest/src/main/java/org/elasticsearch/tdigest/Centroid.java b/libs/tdigest/src/main/java/org/elasticsearch/tdigest/Centroid.java
index fe9b1f673f715..37bdf37ce51a5 100644
--- a/libs/tdigest/src/main/java/org/elasticsearch/tdigest/Centroid.java
+++ b/libs/tdigest/src/main/java/org/elasticsearch/tdigest/Centroid.java
@@ -26,7 +26,7 @@
/**
* A single centroid which represents a number of data points.
*/
-public class Centroid implements Comparable {
+public final class Centroid implements Comparable {
private static final AtomicInteger uniqueCount = new AtomicInteger(1);
private double centroid = 0;
@@ -40,19 +40,16 @@ private Centroid() {
id = uniqueCount.getAndIncrement();
}
- @SuppressWarnings("this-escape")
public Centroid(double x) {
this();
start(x, 1, uniqueCount.getAndIncrement());
}
- @SuppressWarnings("this-escape")
public Centroid(double x, long w) {
this();
start(x, w, uniqueCount.getAndIncrement());
}
- @SuppressWarnings("this-escape")
public Centroid(double x, long w, int id) {
this();
start(x, w, id);
diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APMMeterRegistry.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APMMeterRegistry.java
index 57649f7e3dfa6..07bbc5c55f7cd 100644
--- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APMMeterRegistry.java
+++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APMMeterRegistry.java
@@ -52,7 +52,7 @@ public class APMMeterRegistry implements MeterRegistry {
private final Registrar longGauges = new Registrar<>();
private final Registrar longHistograms = new Registrar<>();
- private final Meter meter;
+ private Meter meter;
public APMMeterRegistry(Meter meter) {
this.meter = meter;
@@ -170,8 +170,9 @@ public LongHistogram getLongHistogram(String name) {
public void setProvider(Meter meter) {
try (ReleasableLock lock = registerLock.acquire()) {
+ this.meter = meter;
for (Registrar> registrar : registrars) {
- registrar.setProvider(meter);
+ registrar.setProvider(this.meter);
}
}
}
diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/AbstractInstrument.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/AbstractInstrument.java
index 61b53f2087f6e..2a806ca19a4e0 100644
--- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/AbstractInstrument.java
+++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/AbstractInstrument.java
@@ -25,6 +25,7 @@
* @param delegated instrument
*/
public abstract class AbstractInstrument implements Instrument {
+ private static final int MAX_NAME_LENGTH = 63; // TODO(stu): change to 255 when we upgrade to otel 1.30+, see #101679
private final AtomicReference delegate;
private final String name;
private final String description;
@@ -33,6 +34,11 @@ public abstract class AbstractInstrument implements Instrument {
@SuppressWarnings("this-escape")
public AbstractInstrument(Meter meter, String name, String description, String unit) {
this.name = Objects.requireNonNull(name);
+ if (name.length() > MAX_NAME_LENGTH) {
+ throw new IllegalArgumentException(
+ "Instrument name [" + name + "] with length [" + name.length() + "] exceeds maximum length [" + MAX_NAME_LENGTH + "]"
+ );
+ }
this.description = Objects.requireNonNull(description);
this.unit = Objects.requireNonNull(unit);
this.delegate = new AtomicReference<>(doBuildInstrument(meter));
diff --git a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/APMMeterRegistryTests.java b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/APMMeterRegistryTests.java
index 38fb0f0e0a8ac..b393edd6e58e3 100644
--- a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/APMMeterRegistryTests.java
+++ b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/APMMeterRegistryTests.java
@@ -16,15 +16,20 @@
import org.elasticsearch.telemetry.apm.internal.APMMeterService;
import org.elasticsearch.telemetry.apm.internal.TestAPMMeterService;
import org.elasticsearch.telemetry.metric.DoubleCounter;
+import org.elasticsearch.telemetry.metric.LongCounter;
import org.elasticsearch.test.ESTestCase;
+import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.sameInstance;
public class APMMeterRegistryTests extends ESTestCase {
- Meter testOtel = OpenTelemetry.noop().getMeter("test");
+ Meter testOtel = new RecordingOtelMeter();
Meter noopOtel = OpenTelemetry.noop().getMeter("noop");
+ private Settings TELEMETRY_ENABLED = Settings.builder().put(APMAgentSettings.TELEMETRY_METRICS_ENABLED_SETTING.getKey(), true).build();
+
public void testMeterIsSetUponConstruction() {
// test default
APMMeterService apmMeter = new APMMeterService(Settings.EMPTY, () -> testOtel, () -> noopOtel);
@@ -33,14 +38,13 @@ public void testMeterIsSetUponConstruction() {
assertThat(meter, sameInstance(noopOtel));
// test explicitly enabled
- var settings = Settings.builder().put(APMAgentSettings.TELEMETRY_METRICS_ENABLED_SETTING.getKey(), true).build();
- apmMeter = new APMMeterService(settings, () -> testOtel, () -> noopOtel);
+ apmMeter = new APMMeterService(TELEMETRY_ENABLED, () -> testOtel, () -> noopOtel);
meter = apmMeter.getMeterRegistry().getMeter();
assertThat(meter, sameInstance(testOtel));
// test explicitly disabled
- settings = Settings.builder().put(APMAgentSettings.TELEMETRY_METRICS_ENABLED_SETTING.getKey(), true).build();
+ var settings = Settings.builder().put(APMAgentSettings.TELEMETRY_METRICS_ENABLED_SETTING.getKey(), false).build();
apmMeter = new APMMeterService(settings, () -> testOtel, () -> noopOtel);
meter = apmMeter.getMeterRegistry().getMeter();
@@ -60,9 +64,7 @@ public void testMeterIsOverridden() {
}
public void testLookupByName() {
- var settings = Settings.builder().put(APMAgentSettings.TELEMETRY_METRICS_ENABLED_SETTING.getKey(), true).build();
-
- var apmMeter = new APMMeterService(settings, () -> testOtel, () -> noopOtel).getMeterRegistry();
+ var apmMeter = new APMMeterService(TELEMETRY_ENABLED, () -> testOtel, () -> noopOtel).getMeterRegistry();
DoubleCounter registeredCounter = apmMeter.registerDoubleCounter("name", "desc", "unit");
DoubleCounter lookedUpCounter = apmMeter.getDoubleCounter("name");
@@ -71,8 +73,7 @@ public void testLookupByName() {
}
public void testNoopIsSetOnStop() {
- var settings = Settings.builder().put(APMAgentSettings.TELEMETRY_METRICS_ENABLED_SETTING.getKey(), true).build();
- APMMeterService apmMeter = new APMMeterService(settings, () -> testOtel, () -> noopOtel);
+ APMMeterService apmMeter = new APMMeterService(TELEMETRY_ENABLED, () -> testOtel, () -> noopOtel);
apmMeter.start();
Meter meter = apmMeter.getMeterRegistry().getMeter();
@@ -84,4 +85,16 @@ public void testNoopIsSetOnStop() {
assertThat(meter, sameInstance(noopOtel));
}
+ public void testMaxNameLength() {
+ APMMeterService apmMeter = new APMMeterService(TELEMETRY_ENABLED, () -> testOtel, () -> noopOtel);
+ apmMeter.start();
+ int max_length = 63;
+ var counter = apmMeter.getMeterRegistry().registerLongCounter("a".repeat(max_length), "desc", "count");
+ assertThat(counter, instanceOf(LongCounter.class));
+ IllegalArgumentException iae = expectThrows(
+ IllegalArgumentException.class,
+ () -> apmMeter.getMeterRegistry().registerLongCounter("a".repeat(max_length + 1), "desc", "count")
+ );
+ assertThat(iae.getMessage(), containsString("exceeds maximum length [63]"));
+ }
}
diff --git a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/MeterRegistryConcurrencyTests.java b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/MeterRegistryConcurrencyTests.java
index f18d39fb39c6c..d1c74681c2bd7 100644
--- a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/MeterRegistryConcurrencyTests.java
+++ b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/MeterRegistryConcurrencyTests.java
@@ -90,6 +90,7 @@ public ObservableLongCounter buildWithCallback(Consumer contentTypeHeader = Collections.singletonList(compatibleMediaType(XContentType.VND_JSON, RestApiVersion.V_7));
@Before
diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/RestSearchTemplateActionTests.java b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/RestSearchTemplateActionTests.java
index bc0a5f87e25d3..4e30d87b6a174 100644
--- a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/RestSearchTemplateActionTests.java
+++ b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/RestSearchTemplateActionTests.java
@@ -21,8 +21,7 @@
import java.util.List;
import java.util.Map;
-public class RestSearchTemplateActionTests extends RestActionTestCase {
- @SuppressWarnings("this-escape")
+public final class RestSearchTemplateActionTests extends RestActionTestCase {
final List contentTypeHeader = Collections.singletonList(randomCompatibleMediaType(RestApiVersion.V_7));
@Before
diff --git a/modules/mapper-extras/src/internalClusterTest/java/org/elasticsearch/index/mapper/MatchOnlyTextMapperIT.java b/modules/mapper-extras/src/internalClusterTest/java/org/elasticsearch/index/mapper/MatchOnlyTextMapperIT.java
index 9e5ca7a3cdc05..0430fe3404f91 100644
--- a/modules/mapper-extras/src/internalClusterTest/java/org/elasticsearch/index/mapper/MatchOnlyTextMapperIT.java
+++ b/modules/mapper-extras/src/internalClusterTest/java/org/elasticsearch/index/mapper/MatchOnlyTextMapperIT.java
@@ -10,7 +10,6 @@
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.bulk.BulkResponse;
-import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.index.mapper.extras.MapperExtrasPlugin;
import org.elasticsearch.index.query.QueryBuilders;
@@ -27,6 +26,7 @@
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse;
import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder;
import static org.hamcrest.Matchers.containsString;
@@ -66,16 +66,19 @@ public void testHighlightingWithMatchOnlyTextFieldMatchPhrase() throws IOExcepti
BulkResponse bulkItemResponses = bulk.get();
assertNoFailures(bulkItemResponses);
- SearchResponse searchResponse = prepareSearch("test").setQuery(
- QueryBuilders.matchPhraseQuery("message", "marking and sending shard")
- ).setSize(500).highlighter(new HighlightBuilder().field("message")).get();
- assertNoFailures(searchResponse);
- for (SearchHit searchHit : searchResponse.getHits()) {
- assertThat(
- searchHit.getHighlightFields().get("message").fragments()[0].string(),
- containsString("marking and sending shard")
- );
- }
+ assertNoFailuresAndResponse(
+ prepareSearch("test").setQuery(QueryBuilders.matchPhraseQuery("message", "marking and sending shard"))
+ .setSize(500)
+ .highlighter(new HighlightBuilder().field("message")),
+ searchResponse -> {
+ for (SearchHit searchHit : searchResponse.getHits()) {
+ assertThat(
+ searchHit.getHighlightFields().get("message").fragments()[0].string(),
+ containsString("marking and sending shard")
+ );
+ }
+ }
+ );
}
public void testHighlightingWithMatchOnlyTextFieldSyntheticSource() throws IOException {
@@ -112,16 +115,19 @@ public void testHighlightingWithMatchOnlyTextFieldSyntheticSource() throws IOExc
BulkResponse bulkItemResponses = bulk.get();
assertNoFailures(bulkItemResponses);
- SearchResponse searchResponse = prepareSearch("test").setQuery(
- QueryBuilders.matchPhraseQuery("message", "marking and sending shard")
- ).setSize(500).highlighter(new HighlightBuilder().field("message")).get();
- assertNoFailures(searchResponse);
- for (SearchHit searchHit : searchResponse.getHits()) {
- assertThat(
- searchHit.getHighlightFields().get("message").fragments()[0].string(),
- containsString("marking and sending shard")
- );
- }
+ assertNoFailuresAndResponse(
+ prepareSearch("test").setQuery(QueryBuilders.matchPhraseQuery("message", "marking and sending shard"))
+ .setSize(500)
+ .highlighter(new HighlightBuilder().field("message")),
+ searchResponse -> {
+ for (SearchHit searchHit : searchResponse.getHits()) {
+ assertThat(
+ searchHit.getHighlightFields().get("message").fragments()[0].string(),
+ containsString("marking and sending shard")
+ );
+ }
+ }
+ );
}
}
diff --git a/modules/mapper-extras/src/internalClusterTest/java/org/elasticsearch/index/mapper/RankFeaturesMapperIntegrationIT.java b/modules/mapper-extras/src/internalClusterTest/java/org/elasticsearch/index/mapper/RankFeaturesMapperIntegrationIT.java
index 87699f285063f..3a7f9a1ca6eb5 100644
--- a/modules/mapper-extras/src/internalClusterTest/java/org/elasticsearch/index/mapper/RankFeaturesMapperIntegrationIT.java
+++ b/modules/mapper-extras/src/internalClusterTest/java/org/elasticsearch/index/mapper/RankFeaturesMapperIntegrationIT.java
@@ -9,7 +9,6 @@
package org.elasticsearch.index.mapper;
import org.elasticsearch.action.bulk.BulkResponse;
-import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.mapper.extras.MapperExtrasPlugin;
import org.elasticsearch.index.query.QueryBuilders;
@@ -22,6 +21,7 @@
import java.util.Collection;
import java.util.Map;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse;
import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder;
import static org.hamcrest.Matchers.equalTo;
@@ -39,39 +39,51 @@ protected Collection> nodePlugins() {
public void testRankFeaturesTermQuery() throws IOException {
init();
- SearchResponse response = prepareSearch(INDEX_NAME).setQuery(QueryBuilders.termQuery(FIELD_NAME, HIGHER_RANKED_FEATURE)).get();
- assertThat(response.getHits().getTotalHits().value, equalTo(2L));
- for (SearchHit hit : response.getHits().getHits()) {
- assertThat(hit.getScore(), equalTo(20f));
- }
-
- response = prepareSearch(INDEX_NAME).setQuery(QueryBuilders.termQuery(FIELD_NAME, HIGHER_RANKED_FEATURE).boost(100f)).get();
- assertThat(response.getHits().getTotalHits().value, equalTo(2L));
- for (SearchHit hit : response.getHits().getHits()) {
- assertThat(hit.getScore(), equalTo(2000f));
- }
-
- response = prepareSearch(INDEX_NAME).setQuery(
- QueryBuilders.boolQuery()
- .should(QueryBuilders.termQuery(FIELD_NAME, HIGHER_RANKED_FEATURE))
- .should(QueryBuilders.termQuery(FIELD_NAME, LOWER_RANKED_FEATURE).boost(3f))
- .minimumShouldMatch(1)
- ).get();
- assertThat(response.getHits().getTotalHits().value, equalTo(3L));
- for (SearchHit hit : response.getHits().getHits()) {
- if (hit.getId().equals("all")) {
- assertThat(hit.getScore(), equalTo(50f));
+ assertNoFailuresAndResponse(
+ prepareSearch(INDEX_NAME).setQuery(QueryBuilders.termQuery(FIELD_NAME, HIGHER_RANKED_FEATURE)),
+ searchResponse -> {
+ assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L));
+ for (SearchHit hit : searchResponse.getHits().getHits()) {
+ assertThat(hit.getScore(), equalTo(20f));
+ }
}
- if (hit.getId().equals("lower")) {
- assertThat(hit.getScore(), equalTo(30f));
+ );
+ assertNoFailuresAndResponse(
+ prepareSearch(INDEX_NAME).setQuery(QueryBuilders.termQuery(FIELD_NAME, HIGHER_RANKED_FEATURE).boost(100f)),
+ searchResponse -> {
+ assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L));
+ for (SearchHit hit : searchResponse.getHits().getHits()) {
+ assertThat(hit.getScore(), equalTo(2000f));
+ }
}
- if (hit.getId().equals("higher")) {
- assertThat(hit.getScore(), equalTo(20f));
- }
- }
+ );
- response = prepareSearch(INDEX_NAME).setQuery(QueryBuilders.termQuery(FIELD_NAME, "missing_feature")).get();
- assertThat(response.getHits().getTotalHits().value, equalTo(0L));
+ assertNoFailuresAndResponse(
+ prepareSearch(INDEX_NAME).setQuery(
+ QueryBuilders.boolQuery()
+ .should(QueryBuilders.termQuery(FIELD_NAME, HIGHER_RANKED_FEATURE))
+ .should(QueryBuilders.termQuery(FIELD_NAME, LOWER_RANKED_FEATURE).boost(3f))
+ .minimumShouldMatch(1)
+ ),
+ searchResponse -> {
+ assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L));
+ for (SearchHit hit : searchResponse.getHits().getHits()) {
+ if (hit.getId().equals("all")) {
+ assertThat(hit.getScore(), equalTo(50f));
+ }
+ if (hit.getId().equals("lower")) {
+ assertThat(hit.getScore(), equalTo(30f));
+ }
+ if (hit.getId().equals("higher")) {
+ assertThat(hit.getScore(), equalTo(20f));
+ }
+ }
+ }
+ );
+ assertNoFailuresAndResponse(
+ prepareSearch(INDEX_NAME).setQuery(QueryBuilders.termQuery(FIELD_NAME, "missing_feature")),
+ response -> assertThat(response.getHits().getTotalHits().value, equalTo(0L))
+ );
}
private void init() throws IOException {
diff --git a/modules/percolator/src/internalClusterTest/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java b/modules/percolator/src/internalClusterTest/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java
index 9362080c9cb33..cad976411b8da 100644
--- a/modules/percolator/src/internalClusterTest/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java
+++ b/modules/percolator/src/internalClusterTest/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java
@@ -9,6 +9,7 @@
import org.apache.lucene.search.join.ScoreMode;
import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.action.search.MultiSearchResponse;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.support.WriteRequest;
@@ -22,10 +23,12 @@
import org.elasticsearch.index.query.MatchPhraseQueryBuilder;
import org.elasticsearch.index.query.MultiMatchQueryBuilder;
import org.elasticsearch.index.query.Operator;
+import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder;
import org.elasticsearch.search.sort.SortOrder;
+import org.elasticsearch.search.vectors.KnnVectorQueryBuilder;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.xcontent.XContentBuilder;
import org.elasticsearch.xcontent.XContentFactory;
@@ -1295,4 +1298,34 @@ public void testWithWildcardFieldNames() throws Exception {
).get();
assertEquals(1, response.getHits().getTotalHits().value);
}
+
+ public void testKnnQueryNotSupportedInPercolator() throws IOException {
+ String mappings = org.elasticsearch.common.Strings.format("""
+ {
+ "properties": {
+ "my_query" : {
+ "type" : "percolator"
+ },
+ "my_vector" : {
+ "type" : "dense_vector",
+ "dims" : 5,
+ "index" : true,
+ "similarity" : "l2_norm"
+ }
+
+ }
+ }
+ """);
+ indicesAdmin().prepareCreate("index1").setMapping(mappings).get();
+ ensureGreen();
+ QueryBuilder knnVectorQueryBuilder = new KnnVectorQueryBuilder("my_vector", new float[] { 1, 1, 1, 1, 1 }, 10, null);
+
+ IndexRequestBuilder indexRequestBuilder = client().prepareIndex("index1")
+ .setId("knn_query1")
+ .setSource(jsonBuilder().startObject().field("my_query", knnVectorQueryBuilder).endObject());
+
+ DocumentParsingException exception = expectThrows(DocumentParsingException.class, () -> indexRequestBuilder.get());
+ assertThat(exception.getMessage(), containsString("the [knn] query is unsupported inside a percolator"));
+ }
+
}
diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java
index c00eaa894dd69..e212264287937 100644
--- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java
+++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java
@@ -61,6 +61,7 @@
import org.elasticsearch.index.query.QueryShardException;
import org.elasticsearch.index.query.Rewriteable;
import org.elasticsearch.index.query.SearchExecutionContext;
+import org.elasticsearch.search.vectors.KnnVectorQueryBuilder;
import org.elasticsearch.xcontent.XContentParser;
import java.io.ByteArrayOutputStream;
@@ -438,6 +439,8 @@ static QueryBuilder parseQueryBuilder(DocumentParserContext context) {
throw new IllegalArgumentException("the [has_child] query is unsupported inside a percolator query");
} else if (queryName.equals("has_parent")) {
throw new IllegalArgumentException("the [has_parent] query is unsupported inside a percolator query");
+ } else if (queryName.equals(KnnVectorQueryBuilder.NAME)) {
+ throw new IllegalArgumentException("the [knn] query is unsupported inside a percolator query");
}
});
} catch (IOException e) {
diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalRequest.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalRequest.java
index 15f9798abe88b..ce63bcba0345c 100644
--- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalRequest.java
+++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalRequest.java
@@ -26,7 +26,7 @@
/**
* Request to perform a search ranking evaluation.
*/
-public class RankEvalRequest extends ActionRequest implements IndicesRequest.Replaceable {
+public final class RankEvalRequest extends ActionRequest implements IndicesRequest.Replaceable {
private RankEvalSpec rankingEvaluationSpec;
@@ -35,7 +35,6 @@ public class RankEvalRequest extends ActionRequest implements IndicesRequest.Rep
private SearchType searchType = SearchType.DEFAULT;
- @SuppressWarnings("this-escape")
public RankEvalRequest(RankEvalSpec rankingEvaluationSpec, String[] indices) {
this.rankingEvaluationSpec = Objects.requireNonNull(rankingEvaluationSpec, "ranking evaluation specification must not be null");
indices(indices);
diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/TransportRankEvalActionTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/TransportRankEvalActionTests.java
index f99a22cbac6ef..982d1afcf6dd3 100644
--- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/TransportRankEvalActionTests.java
+++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/TransportRankEvalActionTests.java
@@ -30,10 +30,9 @@
import static org.mockito.Mockito.mock;
-public class TransportRankEvalActionTests extends ESTestCase {
+public final class TransportRankEvalActionTests extends ESTestCase {
- @SuppressWarnings("this-escape")
- private Settings settings = Settings.builder()
+ private final Settings settings = Settings.builder()
.put("path.home", createTempDir().toString())
.put("node.name", "test-" + getTestName())
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/RestDeleteByQueryActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/RestDeleteByQueryActionTests.java
index 8e1cfb309a671..fdd98992503d7 100644
--- a/modules/reindex/src/test/java/org/elasticsearch/reindex/RestDeleteByQueryActionTests.java
+++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/RestDeleteByQueryActionTests.java
@@ -23,9 +23,8 @@
import java.util.List;
import java.util.Map;
-public class RestDeleteByQueryActionTests extends RestActionTestCase {
+public final class RestDeleteByQueryActionTests extends RestActionTestCase {
- @SuppressWarnings("this-escape")
final List contentTypeHeader = Collections.singletonList(compatibleMediaType(XContentType.VND_JSON, RestApiVersion.V_7));
@Before
diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/RestUpdateByQueryActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/RestUpdateByQueryActionTests.java
index 7222b5efe9c85..889c8d0091c81 100644
--- a/modules/reindex/src/test/java/org/elasticsearch/reindex/RestUpdateByQueryActionTests.java
+++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/RestUpdateByQueryActionTests.java
@@ -23,9 +23,8 @@
import java.util.List;
import java.util.Map;
-public class RestUpdateByQueryActionTests extends RestActionTestCase {
+public final class RestUpdateByQueryActionTests extends RestActionTestCase {
- @SuppressWarnings("this-escape")
final List contentTypeHeader = Collections.singletonList(compatibleMediaType(XContentType.VND_JSON, RestApiVersion.V_7));
@Before
diff --git a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java
index fbf57a0198644..16a9f60a3d28d 100644
--- a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java
+++ b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java
@@ -35,7 +35,7 @@
public class AzureBlobContainer extends AbstractBlobContainer {
- private final Logger logger = LogManager.getLogger(AzureBlobContainer.class);
+ private static final Logger logger = LogManager.getLogger(AzureBlobContainer.class);
private final AzureBlobStore blobStore;
private final String keyPath;
diff --git a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureClientProvider.java b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureClientProvider.java
index ae5ae07c9078a..cdfd83b79b370 100644
--- a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureClientProvider.java
+++ b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureClientProvider.java
@@ -239,8 +239,8 @@ protected void doStop() {
protected void doClose() throws IOException {}
private static final class SuccessfulRequestTracker implements HttpPipelinePolicy {
+ private static final Logger logger = LogManager.getLogger(SuccessfulRequestTracker.class);
private final BiConsumer onSuccessfulRequest;
- private final Logger logger = LogManager.getLogger(SuccessfulRequestTracker.class);
private SuccessfulRequestTracker(BiConsumer onSuccessfulRequest) {
this.onSuccessfulRequest = onSuccessfulRequest;
diff --git a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/CancellableRateLimitedFluxIterator.java b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/CancellableRateLimitedFluxIterator.java
index 3c131affbb84c..2dff8a10d39f7 100644
--- a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/CancellableRateLimitedFluxIterator.java
+++ b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/CancellableRateLimitedFluxIterator.java
@@ -50,7 +50,7 @@ public void cancel() {
private final Condition condition;
private final Consumer cleaner;
private final AtomicReference subscription = new AtomicReference<>();
- private final Logger logger = LogManager.getLogger(CancellableRateLimitedFluxIterator.class);
+ private static final Logger logger = LogManager.getLogger(CancellableRateLimitedFluxIterator.class);
private volatile Throwable error;
private volatile boolean done;
private int emittedElements;
diff --git a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/executors/ReactorScheduledExecutorService.java b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/executors/ReactorScheduledExecutorService.java
index a3359e07119b5..5f18a417dcccd 100644
--- a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/executors/ReactorScheduledExecutorService.java
+++ b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/executors/ReactorScheduledExecutorService.java
@@ -37,7 +37,7 @@
public class ReactorScheduledExecutorService extends AbstractExecutorService implements ScheduledExecutorService {
private final ThreadPool threadPool;
private final ExecutorService delegate;
- private final Logger logger = LogManager.getLogger(ReactorScheduledExecutorService.class);
+ private static final Logger logger = LogManager.getLogger(ReactorScheduledExecutorService.class);
public ReactorScheduledExecutorService(ThreadPool threadPool, String executorName) {
this.threadPool = threadPool;
diff --git a/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/http/RetryingHttpInputStream.java b/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/http/RetryingHttpInputStream.java
index 70aaf9864d56d..ddf23bce09721 100644
--- a/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/http/RetryingHttpInputStream.java
+++ b/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/http/RetryingHttpInputStream.java
@@ -33,7 +33,7 @@ class RetryingHttpInputStream extends InputStream {
public static final int MAX_SUPPRESSED_EXCEPTIONS = 10;
public static final long MAX_RANGE_VAL = Long.MAX_VALUE - 1;
- private final Logger logger = LogManager.getLogger(RetryingHttpInputStream.class);
+ private static final Logger logger = LogManager.getLogger(RetryingHttpInputStream.class);
private final String blobName;
private final URI blobURI;
diff --git a/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/http/URLHttpClient.java b/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/http/URLHttpClient.java
index 42bece3dbea16..490787714ff3a 100644
--- a/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/http/URLHttpClient.java
+++ b/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/http/URLHttpClient.java
@@ -37,7 +37,7 @@
public class URLHttpClient implements Closeable {
public static final int MAX_ERROR_MESSAGE_BODY_SIZE = 1024;
private static final int MAX_CONNECTIONS = 50;
- private final Logger logger = LogManager.getLogger(URLHttpClient.class);
+ private static final Logger logger = LogManager.getLogger(URLHttpClient.class);
private final CloseableHttpClient client;
private final URLHttpClientSettings httpClientSettings;
@@ -142,7 +142,7 @@ public void close() throws IOException {
};
}
- private void handleInvalidResponse(CloseableHttpResponse response) {
+ private static void handleInvalidResponse(CloseableHttpResponse response) {
int statusCode = response.getStatusLine().getStatusCode();
String errorBody = parseBodyAsString(response, MAX_ERROR_MESSAGE_BODY_SIZE);
throw new URLHttpClientException(statusCode, createErrorMessage(statusCode, errorBody));
@@ -156,7 +156,7 @@ static String createErrorMessage(int statusCode, String errorMessage) {
}
}
- private String parseBodyAsString(CloseableHttpResponse response, int maxSize) {
+ private static String parseBodyAsString(CloseableHttpResponse response, int maxSize) {
String errorMessage = "";
InputStream bodyContent = null;
try {
diff --git a/qa/ccs-rolling-upgrade-remote-cluster/src/test/java/org/elasticsearch/upgrades/SearchStatesIT.java b/qa/ccs-rolling-upgrade-remote-cluster/src/test/java/org/elasticsearch/upgrades/SearchStatesIT.java
index 6d89571e5af90..1bb2116cc680a 100644
--- a/qa/ccs-rolling-upgrade-remote-cluster/src/test/java/org/elasticsearch/upgrades/SearchStatesIT.java
+++ b/qa/ccs-rolling-upgrade-remote-cluster/src/test/java/org/elasticsearch/upgrades/SearchStatesIT.java
@@ -12,14 +12,12 @@
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.Version;
-import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.client.Request;
-import org.elasticsearch.client.RequestOptions;
import org.elasticsearch.client.Response;
import org.elasticsearch.client.RestClient;
-import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.cluster.metadata.IndexMetadata;
+import org.elasticsearch.common.Strings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
import org.elasticsearch.test.rest.ESRestTestCase;
@@ -44,7 +42,6 @@
* This test ensure that we keep the search states of a CCS request correctly when the local and remote clusters
* have different but compatible versions. See SearchService#createAndPutReaderContext
*/
-@SuppressWarnings("removal")
public class SearchStatesIT extends ESRestTestCase {
private static final Logger LOGGER = LogManager.getLogger(SearchStatesIT.class);
@@ -90,7 +87,7 @@ static List parseHosts(String props) {
public static void configureRemoteClusters(List remoteNodes) throws Exception {
assertThat(remoteNodes, hasSize(3));
final String remoteClusterSettingPrefix = "cluster.remote." + CLUSTER_ALIAS + ".";
- try (RestClient localClient = newLocalClient().getLowLevelClient()) {
+ try (RestClient localClient = newLocalClient()) {
final Settings remoteConnectionSettings;
if (randomBoolean()) {
final List seeds = remoteNodes.stream()
@@ -124,28 +121,32 @@ public static void configureRemoteClusters(List remoteNodes) throws Except
}
}
- static RestHighLevelClient newLocalClient() {
+ static RestClient newLocalClient() {
final List hosts = parseHosts("tests.rest.cluster");
final int index = random().nextInt(hosts.size());
LOGGER.info("Using client node {}", index);
- return new RestHighLevelClient(RestClient.builder(hosts.get(index)));
+ return RestClient.builder(hosts.get(index)).build();
}
- static RestHighLevelClient newRemoteClient() {
- return new RestHighLevelClient(RestClient.builder(randomFrom(parseHosts("tests.rest.remote_cluster"))));
+ static RestClient newRemoteClient() {
+ return RestClient.builder(randomFrom(parseHosts("tests.rest.remote_cluster"))).build();
}
- static int indexDocs(RestHighLevelClient client, String index, int numDocs) throws IOException {
+ static int indexDocs(RestClient client, String index, int numDocs) throws IOException {
for (int i = 0; i < numDocs; i++) {
- client.index(new IndexRequest(index).id("id_" + i).source("f", i), RequestOptions.DEFAULT);
+ Request createDoc = new Request("POST", "/" + index + "/_doc/id_" + i);
+ createDoc.setJsonEntity(Strings.format("""
+ { "f": %s }
+ """, i));
+ assertOK(client.performRequest(createDoc));
}
- refresh(client.getLowLevelClient(), index);
+ refresh(client, index);
return numDocs;
}
void verifySearch(String localIndex, int localNumDocs, String remoteIndex, int remoteNumDocs, Integer preFilterShardSize) {
- try (RestClient localClient = newLocalClient().getLowLevelClient()) {
+ try (RestClient localClient = newLocalClient()) {
Request request = new Request("POST", "/_search");
final int expectedDocs;
if (randomBoolean()) {
@@ -185,56 +186,40 @@ void verifySearch(String localIndex, int localNumDocs, String remoteIndex, int r
public void testBWCSearchStates() throws Exception {
String localIndex = "test_bwc_search_states_index";
String remoteIndex = "test_bwc_search_states_remote_index";
- try (RestHighLevelClient localClient = newLocalClient(); RestHighLevelClient remoteClient = newRemoteClient()) {
- createIndex(
- localClient.getLowLevelClient(),
- localIndex,
- Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, between(1, 5)).build()
- );
+ try (RestClient localClient = newLocalClient(); RestClient remoteClient = newRemoteClient()) {
+ createIndex(localClient, localIndex, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, between(1, 5)).build());
int localNumDocs = indexDocs(localClient, localIndex, between(10, 100));
- createIndex(
- remoteClient.getLowLevelClient(),
- remoteIndex,
- Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, between(1, 5)).build()
- );
+ createIndex(remoteClient, remoteIndex, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, between(1, 5)).build());
int remoteNumDocs = indexDocs(remoteClient, remoteIndex, between(10, 100));
- configureRemoteClusters(getNodes(remoteClient.getLowLevelClient()));
+ configureRemoteClusters(getNodes(remoteClient));
int iterations = between(1, 20);
for (int i = 0; i < iterations; i++) {
verifySearch(localIndex, localNumDocs, CLUSTER_ALIAS + ":" + remoteIndex, remoteNumDocs, null);
}
- deleteIndex(localClient.getLowLevelClient(), localIndex);
- deleteIndex(remoteClient.getLowLevelClient(), remoteIndex);
+ deleteIndex(localClient, localIndex);
+ deleteIndex(remoteClient, remoteIndex);
}
}
public void testCanMatch() throws Exception {
String localIndex = "test_can_match_local_index";
String remoteIndex = "test_can_match_remote_index";
- try (RestHighLevelClient localClient = newLocalClient(); RestHighLevelClient remoteClient = newRemoteClient()) {
- createIndex(
- localClient.getLowLevelClient(),
- localIndex,
- Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, between(5, 20)).build()
- );
+ try (RestClient localClient = newLocalClient(); RestClient remoteClient = newRemoteClient()) {
+ createIndex(localClient, localIndex, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, between(5, 20)).build());
int localNumDocs = indexDocs(localClient, localIndex, between(10, 100));
- createIndex(
- remoteClient.getLowLevelClient(),
- remoteIndex,
- Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, between(5, 20)).build()
- );
+ createIndex(remoteClient, remoteIndex, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, between(5, 20)).build());
int remoteNumDocs = indexDocs(remoteClient, remoteIndex, between(10, 100));
- configureRemoteClusters(getNodes(remoteClient.getLowLevelClient()));
+ configureRemoteClusters(getNodes(remoteClient));
int iterations = between(1, 10);
for (int i = 0; i < iterations; i++) {
verifySearch(localIndex, localNumDocs, CLUSTER_ALIAS + ":" + remoteIndex, remoteNumDocs, between(1, 10));
}
- deleteIndex(localClient.getLowLevelClient(), localIndex);
- deleteIndex(remoteClient.getLowLevelClient(), remoteIndex);
+ deleteIndex(localClient, localIndex);
+ deleteIndex(remoteClient, remoteIndex);
}
}
}
diff --git a/qa/packaging/src/test/java/org/elasticsearch/packaging/util/Packages.java b/qa/packaging/src/test/java/org/elasticsearch/packaging/util/Packages.java
index d1fefd425ae7f..54f82b2366d14 100644
--- a/qa/packaging/src/test/java/org/elasticsearch/packaging/util/Packages.java
+++ b/qa/packaging/src/test/java/org/elasticsearch/packaging/util/Packages.java
@@ -310,7 +310,7 @@ public static void restartElasticsearch(Shell sh, Installation installation) thr
* when instantiated, and advancing that cursor when the {@code clear()}
* method is called.
*/
- public static class JournaldWrapper {
+ public static final class JournaldWrapper {
private Shell sh;
private String cursor;
@@ -318,7 +318,6 @@ public static class JournaldWrapper {
* Create a new wrapper for Elasticsearch JournalD logs.
* @param sh A shell with appropriate permissions.
*/
- @SuppressWarnings("this-escape")
public JournaldWrapper(Shell sh) {
this.sh = sh;
clear();
diff --git a/qa/remote-clusters/src/test/java/org/elasticsearch/cluster/remote/test/AbstractMultiClusterRemoteTestCase.java b/qa/remote-clusters/src/test/java/org/elasticsearch/cluster/remote/test/AbstractMultiClusterRemoteTestCase.java
index 824f4db5c4cf5..d9be4045c37e0 100644
--- a/qa/remote-clusters/src/test/java/org/elasticsearch/cluster/remote/test/AbstractMultiClusterRemoteTestCase.java
+++ b/qa/remote-clusters/src/test/java/org/elasticsearch/cluster/remote/test/AbstractMultiClusterRemoteTestCase.java
@@ -11,7 +11,6 @@
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.client.Request;
import org.elasticsearch.client.RestClient;
-import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.common.settings.SecureString;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.ThreadContext;
@@ -26,10 +25,8 @@
import java.net.URISyntaxException;
import java.nio.file.Files;
import java.nio.file.Path;
-import java.util.Collections;
import java.util.function.Consumer;
-@SuppressWarnings("removal")
public abstract class AbstractMultiClusterRemoteTestCase extends ESRestTestCase {
private static final String USER = "x_pack_rest_user";
@@ -40,8 +37,8 @@ protected boolean preserveClusterUponCompletion() {
return true;
}
- private static RestHighLevelClient cluster1Client;
- private static RestHighLevelClient cluster2Client;
+ private static RestClient cluster1Client;
+ private static RestClient cluster2Client;
private static boolean initialized = false;
@Override
@@ -62,8 +59,8 @@ public void initClientsAndConfigureClusters() throws Exception {
request.addParameter("wait_for_status", "yellow");
request.addParameter("wait_for_nodes", "1");
};
- ensureHealth(cluster1Client().getLowLevelClient(), waitForYellowRequest);
- ensureHealth(cluster2Client().getLowLevelClient(), waitForYellowRequest);
+ ensureHealth(cluster1Client, waitForYellowRequest);
+ ensureHealth(cluster2Client, waitForYellowRequest);
initialized = true;
}
@@ -86,28 +83,22 @@ public static void destroyClients() throws IOException {
}
}
- protected static RestHighLevelClient cluster1Client() {
+ protected static RestClient cluster1Client() {
return cluster1Client;
}
- protected static RestHighLevelClient cluster2Client() {
+ protected static RestClient cluster2Client() {
return cluster2Client;
}
- private static class HighLevelClient extends RestHighLevelClient {
- private HighLevelClient(RestClient restClient) {
- super(restClient, RestClient::close, Collections.emptyList());
- }
- }
-
- private RestHighLevelClient buildClient(final String url) throws IOException {
+ private RestClient buildClient(final String url) throws IOException {
int portSeparator = url.lastIndexOf(':');
HttpHost httpHost = new HttpHost(
url.substring(0, portSeparator),
Integer.parseInt(url.substring(portSeparator + 1)),
getProtocol()
);
- return new HighLevelClient(buildClient(restAdminSettings(), new HttpHost[] { httpHost }));
+ return buildClient(restAdminSettings(), new HttpHost[] { httpHost });
}
protected boolean isOss() {
diff --git a/qa/remote-clusters/src/test/java/org/elasticsearch/cluster/remote/test/RemoteClustersIT.java b/qa/remote-clusters/src/test/java/org/elasticsearch/cluster/remote/test/RemoteClustersIT.java
index 9a0303ab60714..78ffb9cb7b7b6 100644
--- a/qa/remote-clusters/src/test/java/org/elasticsearch/cluster/remote/test/RemoteClustersIT.java
+++ b/qa/remote-clusters/src/test/java/org/elasticsearch/cluster/remote/test/RemoteClustersIT.java
@@ -7,13 +7,10 @@
*/
package org.elasticsearch.cluster.remote.test;
-import org.elasticsearch.action.index.IndexRequest;
-import org.elasticsearch.action.search.SearchRequest;
-import org.elasticsearch.action.support.WriteRequest;
-import org.elasticsearch.client.RequestOptions;
+import org.elasticsearch.client.Request;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.xcontent.XContentFactory;
+import org.elasticsearch.test.rest.ObjectPath;
import org.junit.After;
import org.junit.Before;
@@ -28,44 +25,53 @@ public class RemoteClustersIT extends AbstractMultiClusterRemoteTestCase {
@Before
public void setupIndices() throws IOException {
- RestClient cluster1Client = cluster1Client().getLowLevelClient();
- assertTrue(createIndex(cluster1Client, "test1", Settings.builder().put("index.number_of_replicas", 0).build()).isAcknowledged());
- cluster1Client().index(
- new IndexRequest("test1").id("id1")
- .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE)
- .source(XContentFactory.jsonBuilder().startObject().field("foo", "bar").endObject()),
- RequestOptions.DEFAULT
- );
-
- RestClient cluster2Client = cluster2Client().getLowLevelClient();
- assertTrue(createIndex(cluster2Client, "test2", Settings.builder().put("index.number_of_replicas", 0).build()).isAcknowledged());
- cluster2Client().index(
- new IndexRequest("test2").id("id1").source(XContentFactory.jsonBuilder().startObject().field("foo", "bar").endObject()),
- RequestOptions.DEFAULT
- );
- cluster2Client().index(
- new IndexRequest("test2").id("id2")
- .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE)
- .source(XContentFactory.jsonBuilder().startObject().field("foo", "bar").endObject()),
- RequestOptions.DEFAULT
- );
- assertEquals(1L, cluster1Client().search(new SearchRequest("test1"), RequestOptions.DEFAULT).getHits().getTotalHits().value);
- assertEquals(2L, cluster2Client().search(new SearchRequest("test2"), RequestOptions.DEFAULT).getHits().getTotalHits().value);
+ assertTrue(createIndex(cluster1Client(), "test1", Settings.builder().put("index.number_of_replicas", 0).build()).isAcknowledged());
+ {
+ Request createDoc = new Request("POST", "/test1/_doc/id1?refresh=true");
+ createDoc.setJsonEntity("""
+ { "foo": "bar" }
+ """);
+ assertOK(cluster1Client().performRequest(createDoc));
+ }
+ {
+ Request searchRequest = new Request("POST", "/test1/_search");
+ ObjectPath doc = ObjectPath.createFromResponse(cluster1Client().performRequest(searchRequest));
+ assertEquals(1, (int) doc.evaluate("hits.total.value"));
+ }
+
+ assertTrue(createIndex(cluster2Client(), "test2", Settings.builder().put("index.number_of_replicas", 0).build()).isAcknowledged());
+ {
+ Request createDoc = new Request("POST", "/test2/_doc/id1?refresh=true");
+ createDoc.setJsonEntity("""
+ { "foo": "bar" }
+ """);
+ assertOK(cluster2Client().performRequest(createDoc));
+ }
+ {
+ Request createDoc = new Request("POST", "/test2/_doc/id2?refresh=true");
+ createDoc.setJsonEntity("""
+ { "foo": "bar" }
+ """);
+ assertOK(cluster2Client().performRequest(createDoc));
+ }
+ {
+ Request searchRequest = new Request("POST", "/test2/_search");
+ ObjectPath doc = ObjectPath.createFromResponse(cluster2Client().performRequest(searchRequest));
+ assertEquals(2, (int) doc.evaluate("hits.total.value"));
+ }
}
@After
public void clearIndices() throws IOException {
- RestClient cluster1Client = cluster1Client().getLowLevelClient();
- assertTrue(deleteIndex(cluster1Client, "*").isAcknowledged());
- RestClient cluster2Client = cluster2Client().getLowLevelClient();
- assertTrue(deleteIndex(cluster2Client, "*").isAcknowledged());
+ assertTrue(deleteIndex(cluster1Client(), "*").isAcknowledged());
+ assertTrue(deleteIndex(cluster2Client(), "*").isAcknowledged());
}
@After
public void clearRemoteClusterSettings() throws IOException {
Settings setting = Settings.builder().putNull("cluster.remote.*").build();
- updateClusterSettings(cluster1Client().getLowLevelClient(), setting);
- updateClusterSettings(cluster2Client().getLowLevelClient(), setting);
+ updateClusterSettings(cluster1Client(), setting);
+ updateClusterSettings(cluster2Client(), setting);
}
public void testProxyModeConnectionWorks() throws IOException {
@@ -76,14 +82,15 @@ public void testProxyModeConnectionWorks() throws IOException {
.put("cluster.remote.cluster2.proxy_address", cluster2RemoteClusterSeed)
.build();
- updateClusterSettings(cluster1Client().getLowLevelClient(), settings);
+ updateClusterSettings(cluster1Client(), settings);
- assertTrue(isConnected(cluster1Client().getLowLevelClient()));
+ assertTrue(isConnected(cluster1Client()));
- assertEquals(
- 2L,
- cluster1Client().search(new SearchRequest("cluster2:test2"), RequestOptions.DEFAULT).getHits().getTotalHits().value
- );
+ {
+ Request searchRequest = new Request("POST", "/cluster2:test2/_search");
+ ObjectPath doc = ObjectPath.createFromResponse(cluster1Client().performRequest(searchRequest));
+ assertEquals(2, (int) doc.evaluate("hits.total.value"));
+ }
}
public void testSniffModeConnectionFails() throws IOException {
@@ -93,9 +100,9 @@ public void testSniffModeConnectionFails() throws IOException {
.put("cluster.remote.cluster2alt.mode", "sniff")
.put("cluster.remote.cluster2alt.seeds", cluster2RemoteClusterSeed)
.build();
- updateClusterSettings(cluster1Client().getLowLevelClient(), settings);
+ updateClusterSettings(cluster1Client(), settings);
- assertFalse(isConnected(cluster1Client().getLowLevelClient()));
+ assertFalse(isConnected(cluster1Client()));
}
public void testHAProxyModeConnectionWorks() throws IOException {
@@ -105,14 +112,15 @@ public void testHAProxyModeConnectionWorks() throws IOException {
.put("cluster.remote.haproxynosn.mode", "proxy")
.put("cluster.remote.haproxynosn.proxy_address", proxyAddress)
.build();
- updateClusterSettings(cluster1Client().getLowLevelClient(), settings);
+ updateClusterSettings(cluster1Client(), settings);
- assertTrue(isConnected(cluster1Client().getLowLevelClient()));
+ assertTrue(isConnected(cluster1Client()));
- assertEquals(
- 2L,
- cluster1Client().search(new SearchRequest("haproxynosn:test2"), RequestOptions.DEFAULT).getHits().getTotalHits().value
- );
+ {
+ Request searchRequest = new Request("POST", "/haproxynosn:test2/_search");
+ ObjectPath doc = ObjectPath.createFromResponse(cluster1Client().performRequest(searchRequest));
+ assertEquals(2, (int) doc.evaluate("hits.total.value"));
+ }
}
public void testHAProxyModeConnectionWithSNIToCluster1Works() throws IOException {
@@ -123,14 +131,15 @@ public void testHAProxyModeConnectionWithSNIToCluster1Works() throws IOException
.put("cluster.remote.haproxysni1.proxy_address", "haproxy:9600")
.put("cluster.remote.haproxysni1.server_name", "application1.example.com")
.build();
- updateClusterSettings(cluster2Client().getLowLevelClient(), settings);
+ updateClusterSettings(cluster2Client(), settings);
- assertTrue(isConnected(cluster2Client().getLowLevelClient()));
+ assertTrue(isConnected(cluster2Client()));
- assertEquals(
- 1L,
- cluster2Client().search(new SearchRequest("haproxysni1:test1"), RequestOptions.DEFAULT).getHits().getTotalHits().value
- );
+ {
+ Request searchRequest = new Request("POST", "/haproxysni1:test1/_search");
+ ObjectPath doc = ObjectPath.createFromResponse(cluster2Client().performRequest(searchRequest));
+ assertEquals(1, (int) doc.evaluate("hits.total.value"));
+ }
}
public void testHAProxyModeConnectionWithSNIToCluster2Works() throws IOException {
@@ -141,14 +150,15 @@ public void testHAProxyModeConnectionWithSNIToCluster2Works() throws IOException
.put("cluster.remote.haproxysni2.proxy_address", "haproxy:9600")
.put("cluster.remote.haproxysni2.server_name", "application2.example.com")
.build();
- updateClusterSettings(cluster1Client().getLowLevelClient(), settings);
+ updateClusterSettings(cluster1Client(), settings);
- assertTrue(isConnected(cluster1Client().getLowLevelClient()));
+ assertTrue(isConnected(cluster1Client()));
- assertEquals(
- 2L,
- cluster1Client().search(new SearchRequest("haproxysni2:test2"), RequestOptions.DEFAULT).getHits().getTotalHits().value
- );
+ {
+ Request searchRequest = new Request("POST", "/haproxysni2:test2/_search");
+ ObjectPath doc = ObjectPath.createFromResponse(cluster1Client().performRequest(searchRequest));
+ assertEquals(2, (int) doc.evaluate("hits.total.value"));
+ }
}
@SuppressWarnings("unchecked")
diff --git a/qa/smoke-test-multinode/src/yamlRestTest/resources/rest-api-spec/test/smoke_test_multinode/30_desired_balance.yml b/qa/smoke-test-multinode/src/yamlRestTest/resources/rest-api-spec/test/smoke_test_multinode/30_desired_balance.yml
index 0c814fd0f9692..f8b1de5155527 100644
--- a/qa/smoke-test-multinode/src/yamlRestTest/resources/rest-api-spec/test/smoke_test_multinode/30_desired_balance.yml
+++ b/qa/smoke-test-multinode/src/yamlRestTest/resources/rest-api-spec/test/smoke_test_multinode/30_desired_balance.yml
@@ -148,3 +148,41 @@ setup:
_internal.get_desired_balance: { }
- is_true: 'cluster_info'
+
+---
+"Test undesired_shard_allocation_count":
+
+ - skip:
+ version: " - 8.11.99"
+ reason: "undesired_shard_allocation_count added in in 8.12.0"
+
+ - do:
+ indices.create:
+ index: test
+ body:
+ settings:
+ number_of_shards: 1
+ number_of_replicas: 0
+
+ - do:
+ cluster.health:
+ index: test
+ wait_for_status: green
+
+ - do:
+ cluster.state: {}
+ - set: { nodes._arbitrary_key_ : node_id }
+ - set: { nodes.$node_id.name : node_name }
+
+ - do:
+ _internal.get_desired_balance: { }
+
+ - gte: { 'cluster_balance_stats.shard_count' : 0 }
+ - gte: { 'cluster_balance_stats.undesired_shard_allocation_count' : 0 }
+ - gte: { 'cluster_balance_stats.nodes.$node_name.undesired_shard_allocation_count' : 0 }
+ - exists: 'cluster_balance_stats.tiers.data_content.undesired_shard_allocation_count'
+ - exists: 'cluster_balance_stats.tiers.data_content.undesired_shard_allocation_count.total'
+ - exists: 'cluster_balance_stats.tiers.data_content.undesired_shard_allocation_count.min'
+ - exists: 'cluster_balance_stats.tiers.data_content.undesired_shard_allocation_count.max'
+ - exists: 'cluster_balance_stats.tiers.data_content.undesired_shard_allocation_count.average'
+ - exists: 'cluster_balance_stats.tiers.data_content.undesired_shard_allocation_count.std_dev'
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_balance/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_balance/10_basic.yml
index 4f943abf1106a..8e1d3431069cf 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_balance/10_basic.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_balance/10_basic.yml
@@ -183,3 +183,41 @@ setup:
- do:
_internal.delete_desired_balance: { }
+
+---
+"Test undesired_shard_allocation_count":
+
+ - skip:
+ version: " - 8.11.99"
+ reason: "undesired_shard_allocation_count added in in 8.12.0"
+
+ - do:
+ indices.create:
+ index: test
+ body:
+ settings:
+ number_of_shards: 1
+ number_of_replicas: 0
+
+ - do:
+ cluster.health:
+ index: test
+ wait_for_status: green
+
+ - do:
+ cluster.state: {}
+ - set: { nodes._arbitrary_key_ : node_id }
+ - set: { nodes.$node_id.name : node_name }
+
+ - do:
+ _internal.get_desired_balance: { }
+
+ - gte: { 'cluster_balance_stats.shard_count' : 0 }
+ - gte: { 'cluster_balance_stats.undesired_shard_allocation_count' : 0 }
+ - gte: { 'cluster_balance_stats.nodes.$node_name.undesired_shard_allocation_count' : 0 }
+ - exists: 'cluster_balance_stats.tiers.data_content.undesired_shard_allocation_count'
+ - exists: 'cluster_balance_stats.tiers.data_content.undesired_shard_allocation_count.total'
+ - exists: 'cluster_balance_stats.tiers.data_content.undesired_shard_allocation_count.min'
+ - exists: 'cluster_balance_stats.tiers.data_content.undesired_shard_allocation_count.max'
+ - exists: 'cluster_balance_stats.tiers.data_content.undesired_shard_allocation_count.average'
+ - exists: 'cluster_balance_stats.tiers.data_content.undesired_shard_allocation_count.std_dev'
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_index_template/15_composition.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_index_template/15_composition.yml
index 51c12892c4859..890162787f04a 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_index_template/15_composition.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_index_template/15_composition.yml
@@ -492,3 +492,52 @@
index: test-generic
- match: { test-generic.mappings.properties.field.type: "keyword" }
- match: { test-generic.mappings.properties.field.ignore_above: 1024 }
+---
+"Using deprecated component templates and pipelines in index template":
+ - skip:
+ version: ' - 8.11.99'
+ reason: 'The deprecated flags have been introduced in 8.12.0'
+ features: allowed_warnings
+
+ - do:
+ cluster.put_component_template:
+ name: mapping
+ body:
+ template:
+ mappings:
+ properties:
+ field:
+ type: long
+ deprecated: true
+
+ - do:
+ ingest.put_pipeline:
+ id: "my_deprecated_pipeline"
+ body:
+ deprecated: true
+ processors: []
+ - match: { acknowledged: true }
+
+ - do:
+ cluster.put_component_template:
+ name: setting
+ body:
+ template:
+ settings:
+ index:
+ default_pipeline: my_deprecated_pipeline
+
+ - do:
+ allowed_warnings:
+ - "index template [test-composable-template] has index patterns [test-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [test-composable-template] will take precedence during new index creation"
+ - "index template [test-composable-template] uses deprecated component template [mapping]"
+ - "index template [test-composable-template] uses deprecated ingest pipeline [my_deprecated_pipeline]"
+ indices.put_index_template:
+ name: test-composable-template
+ body:
+ index_patterns:
+ - test-*
+ composed_of:
+ - mapping
+ - setting
+ - is_true: acknowledged
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/10_unified.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/10_unified.yml
index 1a03896f6d087..4607ae758b91f 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/10_unified.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/10_unified.yml
@@ -93,3 +93,50 @@ teardown:
- match: {hits.hits.0.highlight.text.0: "The quick brown fox is brown."}
- match: {hits.hits.0.highlight.text\.fvh.0: "The quick brown fox is brown."}
- match: {hits.hits.0.highlight.text\.postings.0: "The quick brown fox is brown."}
+---
+"Test hybrid search with knn where automatically disables weighted mode":
+ - skip:
+ version: ' - 8.11.99'
+ reason: 'kNN was not correctly skipped until 8.12'
+
+ - do:
+ indices.create:
+ index: test-highlighting-knn
+ body:
+ mappings:
+ "properties":
+ "vectors":
+ "type": "dense_vector"
+ "dims": 2
+ "index": true
+ "similarity": "l2_norm"
+ "text":
+ "type": "text"
+ "fields":
+ "fvh":
+ "type": "text"
+ "term_vector": "with_positions_offsets"
+ "postings":
+ "type": "text"
+ "index_options": "offsets"
+ - do:
+ index:
+ index: test-highlighting-knn
+ id: "1"
+ body:
+ "text" : "The quick brown fox is brown."
+ "vectors": [1, 2]
+ - do:
+ indices.refresh: {}
+
+ - do:
+ search:
+ index: test-highlighting-knn
+ body: {
+ "query": { "multi_match": { "query": "quick brown fox", "type": "phrase", "fields": [ "text*" ] } },
+ "highlight": { "type": "unified", "fields": { "*": { } } },
+ "knn": { "field": "vectors", "query_vector": [1, 2], "k": 10, "num_candidates": 10 } }
+
+ - match: { hits.hits.0.highlight.text.0: "The quick brown fox is brown." }
+ - match: { hits.hits.0.highlight.text\.fvh.0: "The quick brown fox is brown." }
+ - match: { hits.hits.0.highlight.text\.postings.0: "The quick brown fox is brown." }
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/110_knn_query_with_filter.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/110_knn_query_with_filter.yml
new file mode 100644
index 0000000000000..849df86a30568
--- /dev/null
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/110_knn_query_with_filter.yml
@@ -0,0 +1,274 @@
+# test how knn query interacts with filters
+setup:
+ - skip:
+ version: ' - 8.11.99'
+ reason: 'knn as query added in 8.12'
+
+ - do:
+ indices.create:
+ index: my_index
+ body:
+ settings:
+ number_of_shards: 1
+ mappings:
+ dynamic: false
+ properties:
+ my_vector:
+ type: dense_vector
+ dims: 4
+ index : true
+ similarity : l2_norm
+ my_name:
+ type: keyword
+ store: true
+ aliases:
+ my_alias:
+ filter:
+ term:
+ my_name: v2
+ my_alias1:
+ filter:
+ term:
+ my_name: v1
+
+ - do:
+ bulk:
+ refresh: true
+ index: my_index
+ body:
+ - '{"index": {"_id": "1"}}'
+ - '{"my_vector": [1, 1, 1, 1], "my_name": "v1"}'
+ - '{"index": {"_id": "2"}}'
+ - '{"my_vector": [1, 1, 1, 2], "my_name": "v2"}'
+ - '{"index": {"_id": "3"}}'
+ - '{"my_vector": [1, 1, 1, 3], "my_name": "v1"}'
+ - '{"index": {"_id": "4"}}'
+ - '{"my_vector": [1, 1, 1, 4], "my_name": "v2"}'
+ - '{"index": {"_id": "5"}}'
+ - '{"my_vector": [1, 1, 1, 5], "my_name": "v1"}'
+ - '{"index": {"_id": "6"}}'
+ - '{"my_vector": [1, 1, 1, 6], "my_name": "v2"}'
+ - '{"index": {"_id": "7"}}'
+ - '{"my_vector": [1, 1, 1, 7], "my_name": "v1"}'
+ - '{"index": {"_id": "8"}}'
+ - '{"my_vector": [1, 1, 1, 8], "my_name": "v2"}'
+ - '{"index": {"_id": "9"}}'
+ - '{"my_vector": [1, 1, 1, 9], "my_name": "v1"}'
+ - '{"index": {"_id": "10"}}'
+ - '{"my_vector": [1, 1, 1, 10], "my_name": "v2"}'
+
+---
+"Simple knn query":
+
+ - do:
+ search:
+ index: my_index
+ body:
+ size: 3
+ fields: [ my_name ]
+ query:
+ knn:
+ field: my_vector
+ query_vector: [1, 1, 1, 1]
+ num_candidates: 5
+
+ - match: { hits.total.value: 5 } # collector sees num_candidates docs
+ - length: {hits.hits: 3}
+ - match: { hits.hits.0._id: "1" }
+ - match: { hits.hits.0.fields.my_name.0: v1 }
+ - match: { hits.hits.1._id: "2" }
+ - match: { hits.hits.1.fields.my_name.0: v2 }
+ - match: { hits.hits.2._id: "3" }
+ - match: { hits.hits.2.fields.my_name.0: v1 }
+---
+"PRE_FILTER: knn query with alias filter as pre-filter":
+ - do:
+ search:
+ index: my_alias
+ body:
+ size: 3
+ fields: [ my_name ]
+ query:
+ knn:
+ field: my_vector
+ query_vector: [1, 1, 1, 1]
+ num_candidates: 5
+
+ - match: { hits.total.value: 5 } # collector sees num_candidates docs
+ - length: {hits.hits: 3}
+ - match: { hits.hits.0._id: "2" }
+ - match: { hits.hits.0.fields.my_name.0: v2 }
+ - match: { hits.hits.1._id: "4" }
+ - match: { hits.hits.1.fields.my_name.0: v2 }
+ - match: { hits.hits.2._id: "6" }
+ - match: { hits.hits.2.fields.my_name.0: v2 }
+
+ # alias prefilter is combined with internal filter
+ - do:
+ search:
+ index: my_alias
+ body:
+ size: 3
+ fields: [ my_name ]
+ query:
+ knn:
+ field: my_vector
+ query_vector: [ 1, 1, 1, 1 ]
+ num_candidates: 5
+ filter:
+ term:
+ my_name: v1
+
+ # both alias filter and internal filter are applied as pre-filter resulting in 0 hits for knn search
+ - match: { hits.total.value: 0 }
+ - length: { hits.hits: 0 }
+
+ # alias prefilter is applied when knn is a part of another query
+ - do:
+ search:
+ index: my_alias
+ body:
+ size: 3
+ fields: [ my_name ]
+ query:
+ bool:
+ should:
+ - wildcard:
+ my_name:
+ value: "v*"
+ - knn:
+ field: my_vector
+ query_vector: [1, 1, 1, 1]
+ num_candidates: 5
+
+ - match: { hits.total.value: 5 }
+ - length: { hits.hits: 3 }
+ - match: { hits.hits.0._id: "2" }
+ - match: { hits.hits.0.fields.my_name.0: v2 }
+ - match: { hits.hits.1._id: "4" }
+ - match: { hits.hits.1.fields.my_name.0: v2 }
+ - match: { hits.hits.2._id: "6" }
+ - match: { hits.hits.2.fields.my_name.0: v2 }
+
+---
+"PRE_FILTER: pre-filter across multiple internal filters":
+- do:
+ search:
+ index: my_index
+ body:
+ size: 3
+ fields: [ my_name ]
+ query:
+ knn:
+ field: my_vector
+ query_vector: [ 1, 1, 1, 1 ]
+ num_candidates: 5
+ filter:
+ - term:
+ my_name: v1
+ - term:
+ my_name: v2
+- match: { hits.total.value: 0 }
+- length: { hits.hits: 0 }
+
+---
+"PRE_FILTER: pre-filter across multiple aliases":
+ - do:
+ search:
+ index: my_alias,my_alias1
+ body:
+ size: 6
+ fields: [ my_name ]
+ query:
+ knn:
+ field: my_vector
+ query_vector: [1, 1, 1, 1]
+ num_candidates: 100
+
+ - match: { hits.total.value: 10 } # 5 docs from each alias
+ - length: {hits.hits: 6}
+ - match: { hits.hits.0._id: "1" }
+ - match: { hits.hits.0.fields.my_name.0: v1 }
+ - match: { hits.hits.1._id: "2" }
+ - match: { hits.hits.1.fields.my_name.0: v2 }
+ - match: { hits.hits.2._id: "3" }
+ - match: { hits.hits.2.fields.my_name.0: v1 }
+ - match: { hits.hits.3._id: "4" }
+ - match: { hits.hits.3.fields.my_name.0: v2 }
+ - match: { hits.hits.4._id: "5" }
+ - match: { hits.hits.4.fields.my_name.0: v1 }
+ - match: { hits.hits.5._id: "6" }
+ - match: { hits.hits.5.fields.my_name.0: v2 }
+
+---
+"PRE_FILTER: knn query with internal filter as pre-filter":
+ - do:
+ search:
+ index: my_index
+ body:
+ size: 3
+ fields: [ my_name ]
+ query:
+ knn:
+ field: my_vector
+ query_vector: [1, 1, 1, 1]
+ num_candidates: 5
+ filter:
+ term:
+ my_name: v2
+
+ - match: { hits.total.value: 5 }
+ - length: {hits.hits: 3}
+ - match: { hits.hits.0._id: "2" }
+ - match: { hits.hits.0.fields.my_name.0: v2 }
+ - match: { hits.hits.1._id: "4" }
+ - match: { hits.hits.1.fields.my_name.0: v2 }
+ - match: { hits.hits.2._id: "6" }
+ - match: { hits.hits.2.fields.my_name.0: v2 }
+
+---
+"POST_FILTER: knn query with filter from a parent bool query as post-filter":
+ - do:
+ search:
+ index: my_index
+ body:
+ size: 3
+ fields: [ my_name ]
+ query:
+ bool:
+ must:
+ - term:
+ my_name: v2
+ - knn:
+ field: my_vector
+ query_vector: [1, 1, 1, 1]
+ num_candidates: 5
+
+ - match: { hits.total.value: 2 }
+ - length: {hits.hits: 2} # knn query returns top 5 docs, but they are post-filtered to 2 docs
+ - match: { hits.hits.0._id: "2" }
+ - match: { hits.hits.0.fields.my_name.0: v2 }
+ - match: { hits.hits.1._id: "4" }
+ - match: { hits.hits.1.fields.my_name.0: v2 }
+
+ - do:
+ search:
+ index: my_index
+ body:
+ size: 3
+ fields: [ my_name ]
+ query:
+ bool:
+ must:
+ - term:
+ my_name: v2
+ - knn:
+ field: my_vector
+ query_vector: [ 1, 1, 1, 1 ]
+ num_candidates: 5
+ filter:
+ term:
+ my_name: v1
+
+ - match: { hits.total.value: 0}
+ - length: { hits.hits: 0 } # knn query returns top 5 docs, but they are post-filtered to 0 docs
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/120_knn_query_multiple_shards.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/120_knn_query_multiple_shards.yml
new file mode 100644
index 0000000000000..b1c0fd948481b
--- /dev/null
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/120_knn_query_multiple_shards.yml
@@ -0,0 +1,216 @@
+# test how knn query interacts with filters
+setup:
+ - skip:
+ version: ' - 8.11.99'
+ reason: 'knn as query added in 8.12'
+ features: close_to
+
+ - do:
+ indices.create:
+ index: my_index
+ body:
+ settings:
+ number_of_shards: 2
+ mappings:
+ dynamic: false
+ properties:
+ my_vector:
+ type: dense_vector
+ dims: 4
+ index : true
+ similarity : l2_norm
+ my_name:
+ type: keyword
+ store: true
+
+ - do:
+ bulk:
+ refresh: true
+ index: my_index
+ body:
+ - '{"index": {"_id": "1"}}'
+ - '{"my_vector": [1, 1, 1, 1], "my_name": "v1"}'
+ - '{"index": {"_id": "2"}}'
+ - '{"my_vector": [1, 1, 1, 2], "my_name": "v2"}'
+ - '{"index": {"_id": "3"}}'
+ - '{"my_vector": [1, 1, 1, 3], "my_name": "v1"}'
+ - '{"index": {"_id": "4"}}'
+ - '{"my_vector": [1, 1, 1, 4], "my_name": "v2"}'
+ - '{"index": {"_id": "5"}}'
+ - '{"my_vector": [1, 1, 1, 5], "my_name": "v1"}'
+ - '{"index": {"_id": "6"}}'
+ - '{"my_vector": [1, 1, 1, 6], "my_name": "v2"}'
+ - '{"index": {"_id": "7"}}'
+ - '{"my_vector": [1, 1, 1, 7], "my_name": "v1"}'
+ - '{"index": {"_id": "8"}}'
+ - '{"my_vector": [1, 1, 1, 8], "my_name": "v2"}'
+ - '{"index": {"_id": "9"}}'
+ - '{"my_vector": [1, 1, 1, 9], "my_name": "v1"}'
+ - '{"index": {"_id": "10"}}'
+ - '{"my_vector": [1, 1, 1, 10], "my_name": "v2"}'
+ - '{"index": {"_id": "11"}}'
+ - '{"my_vector": [1, 1, 1, 11], "my_name": "v1"}'
+ - '{"index": {"_id": "12"}}'
+ - '{"my_vector": [1, 1, 1, 12], "my_name": "v2"}'
+
+
+---
+"Search for 2 knn queries combines scores from them":
+ - do:
+ search:
+ index: my_index
+ body:
+ size: 6
+ query:
+ bool:
+ should:
+ - knn:
+ field: my_vector
+ query_vector: [ 1, 1, 1, 1 ]
+ num_candidates: 100
+ boost: 1.1
+ - knn:
+ field: my_vector
+ query_vector: [ 1, 1, 1, 12 ]
+ num_candidates: 100
+
+ - length: {hits.hits: 6}
+ - match: {hits.total.value: 12}
+ - match: {hits.hits.0._id: '1'}
+ - match: {hits.hits.1._id: '12'}
+ - match: {hits.hits.2._id: '2'}
+ - match: { hits.hits.3._id: '11' }
+ - match: { hits.hits.4._id: '3' }
+ - match: { hits.hits.5._id: '10' }
+
+
+---
+"Hybrid search combines scores from knn and other queries":
+ - do:
+ search:
+ include_named_queries_score: true
+ index: my_index
+ body:
+ size: 3
+ query:
+ bool:
+ should:
+ - wildcard:
+ my_name:
+ value: "v*" # produces scores 1.0
+ _name: "bm25_query"
+ - knn:
+ field: my_vector
+ query_vector: [ 1, 1, 1, 1 ]
+ num_candidates: 3
+ _name: "knn_query"
+
+ - length: {hits.hits: 3}
+ - match: {hits.total.value: 12}
+ - match: {hits.hits.0._id: '1'}
+ - match: {hits.hits.1._id: '2'}
+ - match: {hits.hits.2._id: '3'}
+
+ - close_to: {hits.hits.0._score: { value: 2.0, error: 0.00001 } }
+ - close_to: {hits.hits.0.matched_queries.bm25_query: { value: 1.0, error: 0.00001 } }
+ - close_to: {hits.hits.0.matched_queries.knn_query: { value: 1.0, error: 0.00001 } }
+
+ - close_to: {hits.hits.1._score: { value: 1.5, error: 0.00001 } }
+ - close_to: { hits.hits.1.matched_queries.bm25_query: { value: 1.0, error: 0.00001 } }
+ - close_to: { hits.hits.1.matched_queries.knn_query: { value: 0.5, error: 0.00001 } }
+
+ - close_to: {hits.hits.2._score: { value: 1.2, error: 0.00001 } }
+ - close_to: { hits.hits.2.matched_queries.bm25_query: { value: 1.0, error: 0.00001 } }
+ - close_to: { hits.hits.2.matched_queries.knn_query: { value: 0.2, error: 0.00001 } }
+
+ # the same query with boosts
+ - do:
+ search:
+ include_named_queries_score: true
+ index: my_index
+ body:
+ size: 3
+ query:
+ bool:
+ should:
+ - wildcard:
+ my_name:
+ value: "v*" # produces scores 1.0
+ boost: 100
+ _name: "bm25_query"
+ - knn:
+ field: my_vector
+ query_vector: [ 1, 1, 1, 1 ]
+ num_candidates: 3
+ boost: 100
+ _name: "knn_query"
+
+ - length: { hits.hits: 3 }
+ - match: { hits.total.value: 12 }
+ - match: { hits.hits.0._id: '1' }
+ - match: { hits.hits.1._id: '2' }
+ - match: { hits.hits.2._id: '3' }
+
+ - close_to: { hits.hits.0._score: { value: 200.0, error: 0.00001 } }
+ - close_to: { hits.hits.0.matched_queries.bm25_query: { value: 100.0, error: 0.00001 } }
+ - close_to: { hits.hits.0.matched_queries.knn_query: { value: 100.0, error: 0.00001 } }
+
+ - close_to: { hits.hits.1._score: { value: 150.0, error: 0.00001 } }
+ - close_to: { hits.hits.1.matched_queries.bm25_query: { value: 100.0, error: 0.00001 } }
+ - close_to: { hits.hits.1.matched_queries.knn_query: { value: 50.0, error: 0.00001 } }
+
+ - close_to: { hits.hits.2._score: { value: 120, error: 0.00001 } }
+ - close_to: { hits.hits.2.matched_queries.bm25_query: { value: 100.0, error: 0.00001 } }
+ - close_to: { hits.hits.2.matched_queries.knn_query: { value: 20.0, error: 0.00001 } }
+
+---
+"Aggregations with collected number of docs depends on num_candidates":
+ - do:
+ search:
+ index: my_index
+ body:
+ size: 2
+ query:
+ knn:
+ field: my_vector
+ query_vector: [1, 1, 1, 1]
+ num_candidates: 100 # collect up to 100 candidates from each shard
+ aggs:
+ my_agg:
+ terms:
+ field: my_name
+ order:
+ _key: asc
+
+ - length: {hits.hits: 2}
+ - match: {hits.total.value: 12}
+ - match: {aggregations.my_agg.buckets.0.key: 'v1'}
+ - match: {aggregations.my_agg.buckets.1.key: 'v2'}
+ - match: {aggregations.my_agg.buckets.0.doc_count: 6}
+ - match: {aggregations.my_agg.buckets.1.doc_count: 6}
+
+ - do:
+ search:
+ index: my_index
+ body:
+ size: 2
+ query:
+ knn:
+ field: my_vector
+ query_vector: [ 1, 1, 1, 1 ]
+ num_candidates: 3 # collect 3 candidates from each shard
+ aggs:
+ my_agg2:
+ terms:
+ field: my_name
+ order:
+ _key: asc
+ my_sum_buckets:
+ sum_bucket:
+ buckets_path: "my_agg2>_count"
+
+ - length: { hits.hits: 2 }
+ - match: { hits.total.value: 6 }
+ - match: { aggregations.my_agg2.buckets.0.key: 'v1' }
+ - match: { aggregations.my_agg2.buckets.1.key: 'v2' }
+ - match: { aggregations.my_sum_buckets.value: 6.0 }
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/130_knn_query_nested_search.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/130_knn_query_nested_search.yml
new file mode 100644
index 0000000000000..435291b454d08
--- /dev/null
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/130_knn_query_nested_search.yml
@@ -0,0 +1,213 @@
+setup:
+ - skip:
+ version: ' - 8.11.99'
+ reason: 'knn as query added in 8.12'
+ - do:
+ indices.create:
+ index: test
+ body:
+ settings:
+ index:
+ number_of_shards: 1
+ mappings:
+ properties:
+ name:
+ type: keyword
+ nested:
+ type: nested
+ properties:
+ paragraph_id:
+ type: keyword
+ vector:
+ type: dense_vector
+ dims: 5
+ index: true
+ similarity: l2_norm
+ aliases:
+ my_alias:
+ filter:
+ term:
+ name: "rabbit.jpg"
+
+ - do:
+ index:
+ index: test
+ id: "1"
+ body:
+ name: cow.jpg
+ nested:
+ - paragraph_id: 0
+ vector: [230.0, 300.33, -34.8988, 15.555, -200.0]
+ - paragraph_id: 1
+ vector: [240.0, 300, -3, 1, -20]
+
+ - do:
+ index:
+ index: test
+ id: "2"
+ body:
+ name: moose.jpg
+ nested:
+ - paragraph_id: 0
+ vector: [-0.5, 100.0, -13, 14.8, -156.0]
+ - paragraph_id: 2
+ vector: [0, 100.0, 0, 14.8, -156.0]
+ - paragraph_id: 3
+ vector: [0, 1.0, 0, 1.8, -15.0]
+
+ - do:
+ index:
+ index: test
+ id: "3"
+ body:
+ name: rabbit.jpg
+ nested:
+ - paragraph_id: 0
+ vector: [0.5, 111.3, -13.0, 14.8, -156.0]
+
+ - do:
+ indices.refresh: {}
+
+---
+"nested kNN search that returns diverse parents docs":
+ - do:
+ search:
+ index: test
+ body:
+ fields: [ "name" ]
+ query:
+ nested:
+ path: nested
+ query:
+ knn:
+ field: nested.vector
+ query_vector: [-0.5, 90.0, -10, 14.8, -156.0]
+ num_candidates: 3
+
+ - match: {hits.hits.0._id: "2"}
+ - match: {hits.hits.0.fields.name.0: "moose.jpg"}
+
+ - match: {hits.hits.1._id: "3"}
+ - match: {hits.hits.1.fields.name.0: "rabbit.jpg"}
+
+ - do:
+ search:
+ index: test
+ body:
+ fields: [ "name" ]
+ query:
+ nested:
+ path: nested
+ query:
+ knn:
+ field: nested.vector
+ query_vector: [ -0.5, 90.0, -10, 14.8, -156.0 ]
+ num_candidates: 3
+ inner_hits: { size: 1, "fields": [ "nested.paragraph_id" ], _source: false }
+
+ - match: {hits.total.value: 3}
+
+ - match: { hits.hits.0._id: "2" }
+ - match: { hits.hits.0.fields.name.0: "moose.jpg" }
+ - match: { hits.hits.0.inner_hits.nested.hits.hits.0.fields.nested.0.paragraph_id.0: "0" }
+
+ - match: { hits.hits.1._id: "3" }
+ - match: { hits.hits.1.fields.name.0: "rabbit.jpg" }
+ - match: { hits.hits.1.inner_hits.nested.hits.hits.0.fields.nested.0.paragraph_id.0: "0" }
+
+ - match: { hits.hits.2._id: "1" }
+ - match: { hits.hits.2.fields.name.0: "cow.jpg" }
+ - match: { hits.hits.2.inner_hits.nested.hits.hits.0.fields.nested.0.paragraph_id.0: "0" }
+
+---
+"nested kNN search pre-filtered on alias with filter on top level fields":
+ - do:
+ search:
+ index: my_alias # filter on name: "rabbit.jpg"
+ body:
+ fields: [ "name" ]
+ query:
+ nested:
+ path: nested
+ query:
+ knn:
+ field: nested.vector
+ query_vector: [ -0.5, 90.0, -10, 14.8, -156.0 ]
+ num_candidates: 1
+ inner_hits: { size: 1, "fields": [ "nested.paragraph_id" ], _source: false }
+
+ - match: {hits.total.value: 1} # as alias is passed as pre-filter, we get a single result
+ - match: {hits.hits.0._id: "3"}
+ - match: {hits.hits.0.fields.name.0: "rabbit.jpg"}
+ - match: { hits.hits.0.inner_hits.nested.hits.hits.0.fields.nested.0.paragraph_id.0: "0" }
+
+---
+"nested kNN search post-filtered on top level fields":
+ - do:
+ search:
+ index: test
+ body:
+ fields: [ "name" ]
+ query:
+ bool:
+ must:
+ - term:
+ name: "rabbit.jpg"
+ - nested:
+ path: nested
+ query:
+ knn:
+ field: nested.vector
+ query_vector: [ -0.5, 90.0, -10, 14.8, -156.0 ]
+ num_candidates: 1
+ - match: { hits.total.value: 0 } # no hits because returned single vector did not pass post-filter
+
+ - do:
+ search:
+ index: test
+ body:
+ fields: [ "name" ]
+ query:
+ bool:
+ must:
+ - term:
+ name: "rabbit.jpg"
+ - nested:
+ path: nested
+ query:
+ knn:
+ field: nested.vector
+ query_vector: [ -0.5, 90.0, -10, 14.8, -156.0 ]
+ num_candidates: 3
+ inner_hits: { size: 1, fields: [ "nested.paragraph_id" ], _source: false }
+
+ - match: {hits.total.value: 1}
+ - match: {hits.hits.0._id: "3"}
+ - match: {hits.hits.0.fields.name.0: "rabbit.jpg"}
+ - match: { hits.hits.0.inner_hits.nested.hits.hits.0.fields.nested.0.paragraph_id.0: "0" }
+---
+
+"nested kNN search post-filtered on nested fields DOES NOT work":
+ - do:
+ search:
+ index: test
+ body:
+ fields: [ "name" ]
+ query:
+ nested:
+ path: nested
+ query:
+ bool:
+ must:
+ - term:
+ nested.paragraph_id: 3
+ - knn:
+ field: nested.vector
+ query_vector: [ -0.5, 90.0, -10, 14.8, -156.0 ]
+ num_candidates: 6
+ inner_hits: { size: 1, "fields": [ "nested.paragraph_id" ], _source: false }
+ # no hits because, regardless of num_candidates knn returns top 3 child vectors from distinct parents
+ # and they don't pass the post-filter
+ # TODO: fix it on Lucene level so nested knn respects num_candidates
+ # or do pre-filtering
+ - match: {hits.total.value: 0}
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/140_knn_query_with_other_queries.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/140_knn_query_with_other_queries.yml
new file mode 100644
index 0000000000000..8f52a72cce01e
--- /dev/null
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/140_knn_query_with_other_queries.yml
@@ -0,0 +1,127 @@
+# test how knn query interact with other queries
+setup:
+ - skip:
+ version: ' - 8.11.99'
+ reason: 'knn as query added in 8.12'
+ features: close_to
+
+ - do:
+ indices.create:
+ index: my_index
+ body:
+ settings:
+ number_of_shards: 1
+ mappings:
+ dynamic: false
+ properties:
+ my_vector:
+ type: dense_vector
+ dims: 4
+ index : true
+ similarity : l2_norm
+ my_name:
+ type: keyword
+ store: true
+ aliases:
+ my_alias:
+ filter:
+ term:
+ my_name: v2
+ my_alias1:
+ filter:
+ term:
+ my_name: v1
+
+ - do:
+ bulk:
+ refresh: true
+ index: my_index
+ body:
+ - '{"index": {"_id": "1"}}'
+ - '{"my_vector": [1, 1, 1, 1], "my_name": "v1"}'
+ - '{"index": {"_id": "2"}}'
+ - '{"my_vector": [1, 1, 1, 2], "my_name": "v2"}'
+ - '{"index": {"_id": "3"}}'
+ - '{"my_vector": [1, 1, 1, 3], "my_name": "v1"}'
+ - '{"index": {"_id": "4"}}'
+ - '{"my_vector": [1, 1, 1, 4], "my_name": "v2"}'
+ - '{"index": {"_id": "5"}}'
+ - '{"my_vector": [1, 1, 1, 5], "my_name": "v1"}'
+ - '{"index": {"_id": "6"}}'
+ - '{"my_vector": [1, 1, 1, 6], "my_name": "v2"}'
+ - '{"index": {"_id": "7"}}'
+ - '{"my_vector": [1, 1, 1, 7], "my_name": "v1"}'
+ - '{"index": {"_id": "8"}}'
+ - '{"my_vector": [1, 1, 1, 8], "my_name": "v2"}'
+ - '{"index": {"_id": "9"}}'
+ - '{"my_vector": [1, 1, 1, 9], "my_name": "v1"}'
+ - '{"index": {"_id": "10"}}'
+ - '{"my_vector": [1, 1, 1, 10], "my_name": "v2"}'
+
+---
+"Function score query with knn query":
+ # find top 5 knn docs, then boost docs with name v1 by 10 and docs with name v2 by 100
+ - do:
+ search:
+ index: my_index
+ body:
+ size: 3
+ fields: [ my_name ]
+ query:
+ function_score:
+ query:
+ knn:
+ field: my_vector
+ query_vector: [ 1, 1, 1, 1 ]
+ num_candidates: 5
+ functions:
+ - filter: { match: { my_name: v1 } }
+ weight: 10
+ - filter: { match: { my_name: v2 } }
+ weight: 100
+ boost_mode: multiply
+
+ - match: { hits.total.value: 5 } # collector sees num_candidates docs
+ - length: { hits.hits: 3 }
+ - match: { hits.hits.0._id: "2" }
+ - match: { hits.hits.0.fields.my_name.0: v2 }
+ - close_to: { hits.hits.0._score: { value: 50.0, error: 0.001 } }
+ - match: { hits.hits.1._id: "1" }
+ - match: { hits.hits.1.fields.my_name.0: v1 }
+ - close_to: { hits.hits.1._score: { value: 10.0, error: 0.001 } }
+ - match: { hits.hits.2._id: "4" }
+ - match: { hits.hits.2.fields.my_name.0: v2 }
+ - close_to: { hits.hits.2._score: { value: 10.0, error: 0.001 } }
+
+---
+"dis_max query with knn query":
+ - do:
+ search:
+ index: my_index
+ body:
+ size: 10
+ fields: [ my_name ]
+ query:
+ dis_max:
+ queries:
+ - knn: { field: my_vector, query_vector: [ 1, 1, 1, 1 ], num_candidates: 5 }
+ - match: { my_name: v2 }
+ tie_breaker: 0.8
+
+ - match: { hits.total.value: 8 } # 5 knn results + extra results from match query
+ - match: { hits.hits.0._id: "2" }
+ - match: { hits.hits.0.fields.my_name.0: v2 }
+ - match: { hits.hits.1._id: "1" }
+ - match: { hits.hits.1.fields.my_name.0: v1 }
+ - match: { hits.hits.2._id: "4" }
+ - match: { hits.hits.2.fields.my_name.0: v2 }
+ - match: { hits.hits.3._id: "6" }
+ - match: { hits.hits.3.fields.my_name.0: v2 }
+ - match: { hits.hits.4._id: "8" }
+ - match: { hits.hits.4.fields.my_name.0: v2 }
+ - match: { hits.hits.5._id: "10" }
+ - match: { hits.hits.5.fields.my_name.0: v2 }
+ - match: { hits.hits.6._id: "3" }
+ - match: { hits.hits.6.fields.my_name.0: v1 }
+ - match: { hits.hits.7._id: "5" }
+ - match: { hits.hits.7.fields.my_name.0: v1 }
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/40_knn_search.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/40_knn_search.yml
index 340cd8f8d0f70..57f8603f1e06e 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/40_knn_search.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/40_knn_search.yml
@@ -294,23 +294,6 @@ setup:
- match: { error.root_cause.0.reason: "failed to create query: field [nonexistent] does not exist in the mapping" }
---
-"Direct kNN queries are disallowed":
- - skip:
- version: ' - 8.3.99'
- reason: 'error message changed in 8.4'
- - do:
- catch: bad_request
- search:
- index: test-index
- body:
- query:
- knn:
- field: vector
- query_vector: [ -0.5, 90.0, -10, 14.8, -156.0 ]
- num_candidates: 1
- - match: { error.root_cause.0.type: "illegal_argument_exception" }
- - match: { error.root_cause.0.reason: "[knn] queries cannot be provided directly, use the [knn] body parameter instead" }
----
"KNN Vector similarity search only":
- skip:
version: ' - 8.7.99'
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_byte.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_byte.yml
index 873b6d87cac66..ea21bb69a77b8 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_byte.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_byte.yml
@@ -163,20 +163,6 @@ setup:
- match: { error.root_cause.0.reason: "failed to create query: field [nonexistent] does not exist in the mapping" }
---
-"Direct kNN queries are disallowed":
- - do:
- catch: bad_request
- search:
- index: test
- body:
- query:
- knn:
- field: vector
- query_vector: [ -1, 0, 1, 2, 3 ]
- num_candidates: 1
- - match: { error.root_cause.0.type: "illegal_argument_exception" }
- - match: { error.root_cause.0.reason: "[knn] queries cannot be provided directly, use the [knn] body parameter instead" }
----
"Vector similarity search only":
- skip:
version: ' - 8.7.99'
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java
index e3ea54f382c0a..4b395ec6856e5 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java
@@ -13,7 +13,6 @@
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
import org.elasticsearch.action.admin.indices.alias.Alias;
import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
-import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.support.ActiveShardCount;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.support.master.AcknowledgedResponse;
@@ -41,6 +40,7 @@
import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_WAIT_FOR_ACTIVE_SHARDS;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertRequestBuilderThrows;
import static org.hamcrest.Matchers.allOf;
import static org.hamcrest.Matchers.equalTo;
@@ -270,12 +270,14 @@ public void onFailure(Exception e) {
// we only really assert that we never reuse segments of old indices or anything like this here and that nothing fails with
// crazy exceptions
- SearchResponse expected = prepareSearch("test").setIndicesOptions(IndicesOptions.lenientExpandOpen())
- .setQuery(new RangeQueryBuilder("index_version").from(indexVersion.get(), true))
- .get();
- SearchResponse all = prepareSearch("test").setIndicesOptions(IndicesOptions.lenientExpandOpen()).get();
- assertEquals(expected + " vs. " + all, expected.getHits().getTotalHits().value, all.getHits().getTotalHits().value);
- logger.info("total: {}", expected.getHits().getTotalHits().value);
+ assertNoFailuresAndResponse(
+ prepareSearch("test").setIndicesOptions(IndicesOptions.lenientExpandOpen())
+ .setQuery(new RangeQueryBuilder("index_version").from(indexVersion.get(), true)),
+ expected -> assertNoFailuresAndResponse(prepareSearch("test").setIndicesOptions(IndicesOptions.lenientExpandOpen()), all -> {
+ assertEquals(expected + " vs. " + all, expected.getHits().getTotalHits().value, all.getHits().getTotalHits().value);
+ logger.info("total: {}", expected.getHits().getTotalHits().value);
+ })
+ );
}
public void testRestartIndexCreationAfterFullClusterRestart() throws Exception {
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java
index d010c8b0cd74f..f55ac7172266d 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java
@@ -38,7 +38,7 @@
@ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, minNumDataNodes = 2)
public class AwarenessAllocationIT extends ESIntegTestCase {
- private final Logger logger = LogManager.getLogger(AwarenessAllocationIT.class);
+ private static final Logger logger = LogManager.getLogger(AwarenessAllocationIT.class);
@Override
protected int numberOfReplicas() {
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java
index bd3a545d7ed77..6175395803e88 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java
@@ -9,8 +9,6 @@
package org.elasticsearch.cluster.allocation;
import org.apache.logging.log4j.Level;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
import org.apache.lucene.tests.util.LuceneTestCase;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse;
@@ -66,7 +64,6 @@
@LuceneTestCase.SuppressFileSystems(value = "WindowsFS")
@ClusterScope(scope = Scope.TEST, numDataNodes = 0)
public class ClusterRerouteIT extends ESIntegTestCase {
- private final Logger logger = LogManager.getLogger(ClusterRerouteIT.class);
public void testRerouteWithCommands_disableAllocationSettings() throws Exception {
Settings commonSettings = Settings.builder()
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java
index 3a2c6b5ebd0f7..80bba57270aa5 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java
@@ -193,7 +193,7 @@ public void testDeleteCreateInOneBulk() throws Exception {
refresh();
disruption.startDisrupting();
logger.info("--> delete index");
- executeAndCancelCommittedPublication(indicesAdmin().prepareDelete("test").setTimeout("0s")).get(10, TimeUnit.SECONDS);
+ executeAndCancelCommittedPublication(indicesAdmin().prepareDelete("test").setTimeout("0s")).get(30, TimeUnit.SECONDS);
logger.info("--> and recreate it");
executeAndCancelCommittedPublication(
prepareCreate("test").setSettings(
@@ -201,7 +201,7 @@ public void testDeleteCreateInOneBulk() throws Exception {
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(IndexMetadata.SETTING_WAIT_FOR_ACTIVE_SHARDS.getKey(), "0")
).setTimeout("0s")
- ).get(10, TimeUnit.SECONDS);
+ ).get(30, TimeUnit.SECONDS);
logger.info("--> letting cluster proceed");
@@ -295,7 +295,7 @@ public void testDelayedMappingPropagationOnPrimary() throws Exception {
// Now make sure the indexing request finishes successfully
disruption.stopDisrupting();
- assertTrue(putMappingResponse.get(10, TimeUnit.SECONDS).isAcknowledged());
+ assertTrue(putMappingResponse.get(30, TimeUnit.SECONDS).isAcknowledged());
assertThat(docIndexResponse.get(10, TimeUnit.SECONDS), instanceOf(IndexResponse.class));
assertEquals(1, docIndexResponse.get(10, TimeUnit.SECONDS).getShardInfo().getTotal());
}
@@ -408,11 +408,11 @@ public void testDelayedMappingPropagationOnReplica() throws Exception {
// Now make sure the indexing request finishes successfully
disruption.stopDisrupting();
- assertTrue(putMappingResponse.get(10, TimeUnit.SECONDS).isAcknowledged());
+ assertTrue(putMappingResponse.get(30, TimeUnit.SECONDS).isAcknowledged());
assertThat(docIndexResponse.get(10, TimeUnit.SECONDS), instanceOf(IndexResponse.class));
assertEquals(2, docIndexResponse.get(10, TimeUnit.SECONDS).getShardInfo().getTotal()); // both shards should have succeeded
- assertThat(dynamicMappingsFut.get(10, TimeUnit.SECONDS).getResult(), equalTo(CREATED));
+ assertThat(dynamicMappingsFut.get(30, TimeUnit.SECONDS).getResult(), equalTo(CREATED));
}
}
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java
index e995d815af0f1..d92664f55416a 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java
@@ -8,8 +8,6 @@
package org.elasticsearch.gateway;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
@@ -69,8 +67,6 @@
@ClusterScope(scope = Scope.TEST, numDataNodes = 0)
public class GatewayIndexStateIT extends ESIntegTestCase {
- private final Logger logger = LogManager.getLogger(GatewayIndexStateIT.class);
-
@Override
protected boolean addMockInternalEngine() {
// testRecoverBrokenIndexMetadata replies on the flushing on shutdown behavior which can be randomly disabled in MockInternalEngine.
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/SimpleIndexStateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/SimpleIndexStateIT.java
index 9b763ea581187..dd22f50ab420b 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/SimpleIndexStateIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/SimpleIndexStateIT.java
@@ -8,8 +8,6 @@
package org.elasticsearch.indices.state;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
import org.elasticsearch.action.support.ActiveShardCount;
@@ -27,8 +25,6 @@
@ESIntegTestCase.ClusterScope(minNumDataNodes = 2)
public class SimpleIndexStateIT extends ESIntegTestCase {
- private final Logger logger = LogManager.getLogger(SimpleIndexStateIT.class);
-
public void testSimpleOpenClose() {
logger.info("--> creating test index");
createIndex("test");
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java b/server/src/internalClusterTest/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java
index bfd16adaa405b..fceeb2013b7c5 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java
@@ -8,8 +8,6 @@
package org.elasticsearch.recovery;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.admin.indices.refresh.RefreshResponse;
import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
import org.elasticsearch.action.admin.indices.stats.ShardStats;
@@ -47,7 +45,6 @@
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout;
public class RecoveryWhileUnderLoadIT extends ESIntegTestCase {
- private final Logger logger = LogManager.getLogger(RecoveryWhileUnderLoadIT.class);
public static final class RetentionLeaseSyncIntervalSettingPlugin extends Plugin {
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/SearchCancellationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/SearchCancellationIT.java
index 05fa9dc66928c..345504582305a 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/search/SearchCancellationIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/search/SearchCancellationIT.java
@@ -8,6 +8,7 @@
package org.elasticsearch.search;
+import org.apache.lucene.tests.util.LuceneTestCase;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.ActionFuture;
import org.elasticsearch.action.search.MultiSearchAction;
@@ -50,6 +51,7 @@
import static org.hamcrest.Matchers.hasSize;
import static org.hamcrest.Matchers.notNullValue;
+@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/101739")
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE)
public class SearchCancellationIT extends AbstractSearchCancellationTestCase {
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/MissingValueIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/MissingValueIT.java
index 8b7f566750042..ba20e86237530 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/MissingValueIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/MissingValueIT.java
@@ -8,7 +8,6 @@
package org.elasticsearch.search.aggregations;
-import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.common.geo.GeoPoint;
import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval;
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
@@ -31,6 +30,7 @@
import static org.elasticsearch.search.aggregations.AggregationBuilders.terms;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse;
import static org.hamcrest.Matchers.closeTo;
@ESIntegTestCase.SuiteScopeTestCase
@@ -54,161 +54,186 @@ protected void setupSuiteScopeCluster() throws Exception {
}
public void testUnmappedTerms() {
- SearchResponse response = prepareSearch("idx").addAggregation(terms("my_terms").field("non_existing_field").missing("bar")).get();
- assertNoFailures(response);
- Terms terms = response.getAggregations().get("my_terms");
- assertEquals(1, terms.getBuckets().size());
- assertEquals(2, terms.getBucketByKey("bar").getDocCount());
+ assertNoFailuresAndResponse(
+ prepareSearch("idx").addAggregation(terms("my_terms").field("non_existing_field").missing("bar")),
+ response -> {
+ Terms terms = response.getAggregations().get("my_terms");
+ assertEquals(1, terms.getBuckets().size());
+ assertEquals(2, terms.getBucketByKey("bar").getDocCount());
+ }
+ );
}
public void testStringTerms() {
for (ExecutionMode mode : ExecutionMode.values()) {
- SearchResponse response = prepareSearch("idx").addAggregation(
- terms("my_terms").field("str").executionHint(mode.toString()).missing("bar")
- ).get();
- assertNoFailures(response);
- Terms terms = response.getAggregations().get("my_terms");
- assertEquals(2, terms.getBuckets().size());
- assertEquals(1, terms.getBucketByKey("foo").getDocCount());
- assertEquals(1, terms.getBucketByKey("bar").getDocCount());
-
- response = prepareSearch("idx").addAggregation(terms("my_terms").field("str").missing("foo")).get();
- assertNoFailures(response);
- terms = response.getAggregations().get("my_terms");
- assertEquals(1, terms.getBuckets().size());
- assertEquals(2, terms.getBucketByKey("foo").getDocCount());
+ assertNoFailuresAndResponse(
+ prepareSearch("idx").addAggregation(terms("my_terms").field("str").executionHint(mode.toString()).missing("bar")),
+ response -> {
+ assertNoFailures(response);
+ Terms terms = response.getAggregations().get("my_terms");
+ assertEquals(2, terms.getBuckets().size());
+ assertEquals(1, terms.getBucketByKey("foo").getDocCount());
+ assertEquals(1, terms.getBucketByKey("bar").getDocCount());
+ }
+ );
+ assertNoFailuresAndResponse(prepareSearch("idx").addAggregation(terms("my_terms").field("str").missing("foo")), response -> {
+ Terms terms = response.getAggregations().get("my_terms");
+ assertEquals(1, terms.getBuckets().size());
+ assertEquals(2, terms.getBucketByKey("foo").getDocCount());
+ });
}
}
public void testLongTerms() {
- SearchResponse response = prepareSearch("idx").addAggregation(terms("my_terms").field("long").missing(4)).get();
- assertNoFailures(response);
- Terms terms = response.getAggregations().get("my_terms");
- assertEquals(2, terms.getBuckets().size());
- assertEquals(1, terms.getBucketByKey("3").getDocCount());
- assertEquals(1, terms.getBucketByKey("4").getDocCount());
-
- response = prepareSearch("idx").addAggregation(terms("my_terms").field("long").missing(3)).get();
- assertNoFailures(response);
- terms = response.getAggregations().get("my_terms");
- assertEquals(1, terms.getBuckets().size());
- assertEquals(2, terms.getBucketByKey("3").getDocCount());
+ assertNoFailuresAndResponse(prepareSearch("idx").addAggregation(terms("my_terms").field("long").missing(4)), response -> {
+ Terms terms = response.getAggregations().get("my_terms");
+ assertEquals(2, terms.getBuckets().size());
+ assertEquals(1, terms.getBucketByKey("3").getDocCount());
+ assertEquals(1, terms.getBucketByKey("4").getDocCount());
+ });
+ assertNoFailuresAndResponse(prepareSearch("idx").addAggregation(terms("my_terms").field("long").missing(3)), response -> {
+ assertNoFailures(response);
+ Terms terms2 = response.getAggregations().get("my_terms");
+ assertEquals(1, terms2.getBuckets().size());
+ assertEquals(2, terms2.getBucketByKey("3").getDocCount());
+ });
}
public void testDoubleTerms() {
- SearchResponse response = prepareSearch("idx").addAggregation(terms("my_terms").field("double").missing(4.5)).get();
- assertNoFailures(response);
- Terms terms = response.getAggregations().get("my_terms");
- assertEquals(2, terms.getBuckets().size());
- assertEquals(1, terms.getBucketByKey("4.5").getDocCount());
- assertEquals(1, terms.getBucketByKey("5.5").getDocCount());
-
- response = prepareSearch("idx").addAggregation(terms("my_terms").field("double").missing(5.5)).get();
- assertNoFailures(response);
- terms = response.getAggregations().get("my_terms");
- assertEquals(1, terms.getBuckets().size());
- assertEquals(2, terms.getBucketByKey("5.5").getDocCount());
+ assertNoFailuresAndResponse(prepareSearch("idx").addAggregation(terms("my_terms").field("double").missing(4.5)), response -> {
+ Terms terms = response.getAggregations().get("my_terms");
+ assertEquals(2, terms.getBuckets().size());
+ assertEquals(1, terms.getBucketByKey("4.5").getDocCount());
+ assertEquals(1, terms.getBucketByKey("5.5").getDocCount());
+ });
+
+ assertNoFailuresAndResponse(prepareSearch("idx").addAggregation(terms("my_terms").field("double").missing(5.5)), response -> {
+ Terms terms = response.getAggregations().get("my_terms");
+ assertEquals(1, terms.getBuckets().size());
+ assertEquals(2, terms.getBucketByKey("5.5").getDocCount());
+ });
}
public void testUnmappedHistogram() {
- SearchResponse response = prepareSearch("idx").addAggregation(
- histogram("my_histogram").field("non-existing_field").interval(5).missing(12)
- ).get();
- assertNoFailures(response);
- Histogram histogram = response.getAggregations().get("my_histogram");
- assertEquals(1, histogram.getBuckets().size());
- assertEquals(10d, histogram.getBuckets().get(0).getKey());
- assertEquals(2, histogram.getBuckets().get(0).getDocCount());
+ assertNoFailuresAndResponse(
+ prepareSearch("idx").addAggregation(histogram("my_histogram").field("non-existing_field").interval(5).missing(12)),
+ response -> {
+ Histogram histogram = response.getAggregations().get("my_histogram");
+ assertEquals(1, histogram.getBuckets().size());
+ assertEquals(10d, histogram.getBuckets().get(0).getKey());
+ assertEquals(2, histogram.getBuckets().get(0).getDocCount());
+ }
+ );
}
public void testHistogram() {
- SearchResponse response = prepareSearch("idx").addAggregation(histogram("my_histogram").field("long").interval(5).missing(7)).get();
- assertNoFailures(response);
- Histogram histogram = response.getAggregations().get("my_histogram");
- assertEquals(2, histogram.getBuckets().size());
- assertEquals(0d, histogram.getBuckets().get(0).getKey());
- assertEquals(1, histogram.getBuckets().get(0).getDocCount());
- assertEquals(5d, histogram.getBuckets().get(1).getKey());
- assertEquals(1, histogram.getBuckets().get(1).getDocCount());
-
- response = prepareSearch("idx").addAggregation(histogram("my_histogram").field("long").interval(5).missing(3)).get();
- assertNoFailures(response);
- histogram = response.getAggregations().get("my_histogram");
- assertEquals(1, histogram.getBuckets().size());
- assertEquals(0d, histogram.getBuckets().get(0).getKey());
- assertEquals(2, histogram.getBuckets().get(0).getDocCount());
+ assertNoFailuresAndResponse(
+ prepareSearch("idx").addAggregation(histogram("my_histogram").field("long").interval(5).missing(7)),
+ response -> {
+ Histogram histogram = response.getAggregations().get("my_histogram");
+ assertEquals(2, histogram.getBuckets().size());
+ assertEquals(0d, histogram.getBuckets().get(0).getKey());
+ assertEquals(1, histogram.getBuckets().get(0).getDocCount());
+ assertEquals(5d, histogram.getBuckets().get(1).getKey());
+ assertEquals(1, histogram.getBuckets().get(1).getDocCount());
+ }
+ );
+
+ assertNoFailuresAndResponse(
+ prepareSearch("idx").addAggregation(histogram("my_histogram").field("long").interval(5).missing(3)),
+ response -> {
+ Histogram histogram = response.getAggregations().get("my_histogram");
+ assertEquals(1, histogram.getBuckets().size());
+ assertEquals(0d, histogram.getBuckets().get(0).getKey());
+ assertEquals(2, histogram.getBuckets().get(0).getDocCount());
+ }
+ );
}
public void testDateHistogram() {
- SearchResponse response = prepareSearch("idx").addAggregation(
- dateHistogram("my_histogram").field("date").calendarInterval(DateHistogramInterval.YEAR).missing("2014-05-07")
- ).get();
- assertNoFailures(response);
- Histogram histogram = response.getAggregations().get("my_histogram");
- assertEquals(2, histogram.getBuckets().size());
- assertEquals("2014-01-01T00:00:00.000Z", histogram.getBuckets().get(0).getKeyAsString());
- assertEquals(1, histogram.getBuckets().get(0).getDocCount());
- assertEquals("2015-01-01T00:00:00.000Z", histogram.getBuckets().get(1).getKeyAsString());
- assertEquals(1, histogram.getBuckets().get(1).getDocCount());
-
- response = prepareSearch("idx").addAggregation(
- dateHistogram("my_histogram").field("date").calendarInterval(DateHistogramInterval.YEAR).missing("2015-05-07")
- ).get();
- assertNoFailures(response);
- histogram = response.getAggregations().get("my_histogram");
- assertEquals(1, histogram.getBuckets().size());
- assertEquals("2015-01-01T00:00:00.000Z", histogram.getBuckets().get(0).getKeyAsString());
- assertEquals(2, histogram.getBuckets().get(0).getDocCount());
+ assertNoFailuresAndResponse(
+ prepareSearch("idx").addAggregation(
+ dateHistogram("my_histogram").field("date").calendarInterval(DateHistogramInterval.YEAR).missing("2014-05-07")
+ ),
+ response -> {
+ Histogram histogram = response.getAggregations().get("my_histogram");
+ assertEquals(2, histogram.getBuckets().size());
+ assertEquals("2014-01-01T00:00:00.000Z", histogram.getBuckets().get(0).getKeyAsString());
+ assertEquals(1, histogram.getBuckets().get(0).getDocCount());
+ assertEquals("2015-01-01T00:00:00.000Z", histogram.getBuckets().get(1).getKeyAsString());
+ assertEquals(1, histogram.getBuckets().get(1).getDocCount());
+ }
+ );
+ assertNoFailuresAndResponse(
+ prepareSearch("idx").addAggregation(
+ dateHistogram("my_histogram").field("date").calendarInterval(DateHistogramInterval.YEAR).missing("2015-05-07")
+ ),
+ response -> {
+ Histogram histogram = response.getAggregations().get("my_histogram");
+ assertEquals(1, histogram.getBuckets().size());
+ assertEquals("2015-01-01T00:00:00.000Z", histogram.getBuckets().get(0).getKeyAsString());
+ assertEquals(2, histogram.getBuckets().get(0).getDocCount());
+ }
+ );
}
public void testCardinality() {
- SearchResponse response = prepareSearch("idx").addAggregation(cardinality("card").field("long").missing(2)).get();
- assertNoFailures(response);
- Cardinality cardinality = response.getAggregations().get("card");
- assertEquals(2, cardinality.getValue());
+ assertNoFailuresAndResponse(prepareSearch("idx").addAggregation(cardinality("card").field("long").missing(2)), response -> {
+ Cardinality cardinality = response.getAggregations().get("card");
+ assertEquals(2, cardinality.getValue());
+ });
}
public void testPercentiles() {
- SearchResponse response = prepareSearch("idx").addAggregation(percentiles("percentiles").field("long").missing(1000)).get();
- assertNoFailures(response);
- Percentiles percentiles = response.getAggregations().get("percentiles");
- assertEquals(1000, percentiles.percentile(100), 0);
+ assertNoFailuresAndResponse(
+ prepareSearch("idx").addAggregation(percentiles("percentiles").field("long").missing(1000)),
+ response -> {
+ Percentiles percentiles = response.getAggregations().get("percentiles");
+ assertEquals(1000, percentiles.percentile(100), 0);
+ }
+ );
}
public void testStats() {
- SearchResponse response = prepareSearch("idx").addAggregation(stats("stats").field("long").missing(5)).get();
- assertNoFailures(response);
- Stats stats = response.getAggregations().get("stats");
- assertEquals(2, stats.getCount());
- assertEquals(4, stats.getAvg(), 0);
+ assertNoFailuresAndResponse(prepareSearch("idx").addAggregation(stats("stats").field("long").missing(5)), response -> {
+ Stats stats = response.getAggregations().get("stats");
+ assertEquals(2, stats.getCount());
+ assertEquals(4, stats.getAvg(), 0);
+ });
}
public void testUnmappedGeoBounds() {
- SearchResponse response = prepareSearch("idx").addAggregation(geoBounds("bounds").field("non_existing_field").missing("2,1")).get();
- assertNoFailures(response);
- GeoBounds bounds = response.getAggregations().get("bounds");
- assertThat(bounds.bottomRight().lat(), closeTo(2.0, 1E-5));
- assertThat(bounds.bottomRight().lon(), closeTo(1.0, 1E-5));
- assertThat(bounds.topLeft().lat(), closeTo(2.0, 1E-5));
- assertThat(bounds.topLeft().lon(), closeTo(1.0, 1E-5));
+ assertNoFailuresAndResponse(
+ prepareSearch("idx").addAggregation(geoBounds("bounds").field("non_existing_field").missing("2,1")),
+ response -> {
+ GeoBounds bounds = response.getAggregations().get("bounds");
+ assertThat(bounds.bottomRight().lat(), closeTo(2.0, 1E-5));
+ assertThat(bounds.bottomRight().lon(), closeTo(1.0, 1E-5));
+ assertThat(bounds.topLeft().lat(), closeTo(2.0, 1E-5));
+ assertThat(bounds.topLeft().lon(), closeTo(1.0, 1E-5));
+ }
+ );
}
public void testGeoBounds() {
- SearchResponse response = prepareSearch("idx").addAggregation(geoBounds("bounds").field("location").missing("2,1")).get();
- assertNoFailures(response);
- GeoBounds bounds = response.getAggregations().get("bounds");
- assertThat(bounds.bottomRight().lat(), closeTo(1.0, 1E-5));
- assertThat(bounds.bottomRight().lon(), closeTo(2.0, 1E-5));
- assertThat(bounds.topLeft().lat(), closeTo(2.0, 1E-5));
- assertThat(bounds.topLeft().lon(), closeTo(1.0, 1E-5));
+ assertNoFailuresAndResponse(prepareSearch("idx").addAggregation(geoBounds("bounds").field("location").missing("2,1")), response -> {
+ GeoBounds bounds = response.getAggregations().get("bounds");
+ assertThat(bounds.bottomRight().lat(), closeTo(1.0, 1E-5));
+ assertThat(bounds.bottomRight().lon(), closeTo(2.0, 1E-5));
+ assertThat(bounds.topLeft().lat(), closeTo(2.0, 1E-5));
+ assertThat(bounds.topLeft().lon(), closeTo(1.0, 1E-5));
+ });
}
public void testGeoCentroid() {
- SearchResponse response = prepareSearch("idx").addAggregation(geoCentroid("centroid").field("location").missing("2,1")).get();
- assertNoFailures(response);
- GeoCentroid centroid = response.getAggregations().get("centroid");
- GeoPoint point = new GeoPoint(1.5, 1.5);
- assertThat(point.getY(), closeTo(centroid.centroid().getY(), 1E-5));
- assertThat(point.getX(), closeTo(centroid.centroid().getX(), 1E-5));
+ assertNoFailuresAndResponse(
+ prepareSearch("idx").addAggregation(geoCentroid("centroid").field("location").missing("2,1")),
+ response -> {
+ GeoCentroid centroid = response.getAggregations().get("centroid");
+ GeoPoint point = new GeoPoint(1.5, 1.5);
+ assertThat(point.getY(), closeTo(centroid.centroid().getY(), 1E-5));
+ assertThat(point.getX(), closeTo(centroid.centroid().getX(), 1E-5));
+ }
+ );
}
}
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/sort/SimpleSortIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/sort/SimpleSortIT.java
index 0e430c9618bc8..db06eb1b5de0b 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/search/sort/SimpleSortIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/search/sort/SimpleSortIT.java
@@ -9,7 +9,6 @@
package org.elasticsearch.search.sort;
import org.elasticsearch.action.index.IndexRequestBuilder;
-import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.search.ShardSearchFailure;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.geo.GeoPoint;
@@ -41,6 +40,7 @@
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse;
import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder;
import static org.hamcrest.Matchers.closeTo;
import static org.hamcrest.Matchers.containsString;
@@ -177,40 +177,43 @@ public void testSimpleSorts() throws Exception {
refresh();
// STRING script
- int size = 1 + random.nextInt(10);
-
Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['str_value'].value", Collections.emptyMap());
- SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery())
- .setSize(size)
- .addSort(new ScriptSortBuilder(script, ScriptSortType.STRING))
- .get();
-
- assertHitCount(searchResponse, 10);
- assertThat(searchResponse.getHits().getHits().length, equalTo(size));
- for (int i = 0; i < size; i++) {
- SearchHit searchHit = searchResponse.getHits().getAt(i);
- assertThat(searchHit.getId(), equalTo(Integer.toString(i)));
-
- String expected = new String(new char[] { (char) (97 + i), (char) (97 + i) });
- assertThat(searchHit.getSortValues()[0].toString(), equalTo(expected));
- }
-
- size = 1 + random.nextInt(10);
- searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("str_value", SortOrder.DESC).get();
-
- assertHitCount(searchResponse, 10);
- assertThat(searchResponse.getHits().getHits().length, equalTo(size));
- for (int i = 0; i < size; i++) {
- SearchHit searchHit = searchResponse.getHits().getAt(i);
- assertThat(searchHit.getId(), equalTo(Integer.toString(9 - i)));
+ final int sizeFirstRequest = 1 + random.nextInt(10);
+ assertNoFailuresAndResponse(
+ prepareSearch().setQuery(matchAllQuery())
+ .setSize(sizeFirstRequest)
+ .addSort(new ScriptSortBuilder(script, ScriptSortType.STRING)),
+ searchResponse -> {
+ assertHitCount(searchResponse, 10);
+ assertThat(searchResponse.getHits().getHits().length, equalTo(sizeFirstRequest));
+ for (int i = 0; i < sizeFirstRequest; i++) {
+ SearchHit searchHit = searchResponse.getHits().getAt(i);
+ assertThat(searchHit.getId(), equalTo(Integer.toString(i)));
+
+ String expected = new String(new char[] { (char) (97 + i), (char) (97 + i) });
+ assertThat(searchHit.getSortValues()[0].toString(), equalTo(expected));
+ }
+ }
+ );
- String expected = new String(new char[] { (char) (97 + (9 - i)), (char) (97 + (9 - i)) });
- assertThat(searchHit.getSortValues()[0].toString(), equalTo(expected));
- }
+ final int sizeSecondRequest = 1 + random.nextInt(10);
+ assertNoFailuresAndResponse(
+ prepareSearch().setQuery(matchAllQuery()).setSize(sizeSecondRequest).addSort("str_value", SortOrder.DESC),
+ searchResponse -> {
+ assertHitCount(searchResponse, 10);
+ assertThat(searchResponse.getHits().getHits().length, equalTo(sizeSecondRequest));
+ for (int i = 0; i < sizeSecondRequest; i++) {
+ SearchHit searchHit = searchResponse.getHits().getAt(i);
+ assertThat(searchHit.getId(), equalTo(Integer.toString(9 - i)));
+
+ String expected = new String(new char[] { (char) (97 + (9 - i)), (char) (97 + (9 - i)) });
+ assertThat(searchHit.getSortValues()[0].toString(), equalTo(expected));
+ }
- assertThat(searchResponse.toString(), not(containsString("error")));
- assertNoFailures(searchResponse);
+ assertThat(searchResponse.toString(), not(containsString("error")));
+ }
+ );
}
public void testSortMinValueScript() throws IOException {
@@ -260,64 +263,71 @@ public void testSortMinValueScript() throws IOException {
indicesAdmin().prepareRefresh("test").get();
// test the long values
- SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery())
- .addScriptField("min", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "get min long", Collections.emptyMap()))
- .addSort(SortBuilders.fieldSort("ord").order(SortOrder.ASC).unmappedType("long"))
- .setSize(10)
- .get();
-
- assertNoFailures(searchResponse);
-
- assertHitCount(searchResponse, 20L);
- for (int i = 0; i < 10; i++) {
- SearchHit searchHit = searchResponse.getHits().getAt(i);
- assertThat("res: " + i + " id: " + searchHit.getId(), searchHit.field("min").getValue(), equalTo((long) i));
- }
+ assertNoFailuresAndResponse(
+ prepareSearch().setQuery(matchAllQuery())
+ .addScriptField("min", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "get min long", Collections.emptyMap()))
+ .addSort(SortBuilders.fieldSort("ord").order(SortOrder.ASC).unmappedType("long"))
+ .setSize(10),
+ searchResponse -> {
+ assertHitCount(searchResponse, 20L);
+ for (int i = 0; i < 10; i++) {
+ SearchHit searchHit = searchResponse.getHits().getAt(i);
+ assertThat("res: " + i + " id: " + searchHit.getId(), searchHit.field("min").getValue(), equalTo((long) i));
+ }
+ }
+ );
// test the double values
- searchResponse = prepareSearch().setQuery(matchAllQuery())
- .addScriptField("min", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "get min double", Collections.emptyMap()))
- .addSort(SortBuilders.fieldSort("ord").order(SortOrder.ASC).unmappedType("long"))
- .setSize(10)
- .get();
-
- assertNoFailures(searchResponse);
-
- assertHitCount(searchResponse, 20L);
- for (int i = 0; i < 10; i++) {
- SearchHit searchHit = searchResponse.getHits().getAt(i);
- assertThat("res: " + i + " id: " + searchHit.getId(), searchHit.field("min").getValue(), equalTo((double) i));
- }
+ assertNoFailuresAndResponse(
+ prepareSearch().setQuery(matchAllQuery())
+ .addScriptField("min", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "get min double", Collections.emptyMap()))
+ .addSort(SortBuilders.fieldSort("ord").order(SortOrder.ASC).unmappedType("long"))
+ .setSize(10),
+ searchResponse -> {
+ assertHitCount(searchResponse, 20L);
+ for (int i = 0; i < 10; i++) {
+ SearchHit searchHit = searchResponse.getHits().getAt(i);
+ assertThat("res: " + i + " id: " + searchHit.getId(), searchHit.field("min").getValue(), equalTo((double) i));
+ }
+ }
+ );
// test the string values
- searchResponse = prepareSearch().setQuery(matchAllQuery())
- .addScriptField("min", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "get min string", Collections.emptyMap()))
- .addSort(SortBuilders.fieldSort("ord").order(SortOrder.ASC).unmappedType("long"))
- .setSize(10)
- .get();
-
- assertNoFailures(searchResponse);
-
- assertHitCount(searchResponse, 20L);
- for (int i = 0; i < 10; i++) {
- SearchHit searchHit = searchResponse.getHits().getAt(i);
- assertThat("res: " + i + " id: " + searchHit.getId(), searchHit.field("min").getValue(), equalTo(i));
- }
+ assertNoFailuresAndResponse(
+ prepareSearch().setQuery(matchAllQuery())
+ .addScriptField("min", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "get min string", Collections.emptyMap()))
+ .addSort(SortBuilders.fieldSort("ord").order(SortOrder.ASC).unmappedType("long"))
+ .setSize(10),
+ searchResponse -> {
+ assertHitCount(searchResponse, 20L);
+ for (int i = 0; i < 10; i++) {
+ SearchHit searchHit = searchResponse.getHits().getAt(i);
+ assertThat("res: " + i + " id: " + searchHit.getId(), searchHit.field("min").getValue(), equalTo(i));
+ }
+ }
+ );
// test the geopoint values
- searchResponse = prepareSearch().setQuery(matchAllQuery())
- .addScriptField("min", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "get min geopoint lon", Collections.emptyMap()))
- .addSort(SortBuilders.fieldSort("ord").order(SortOrder.ASC).unmappedType("long"))
- .setSize(10)
- .get();
-
- assertNoFailures(searchResponse);
-
- assertHitCount(searchResponse, 20L);
- for (int i = 0; i < 10; i++) {
- SearchHit searchHit = searchResponse.getHits().getAt(i);
- assertThat("res: " + i + " id: " + searchHit.getId(), searchHit.field("min").getValue(), closeTo(i, GeoUtils.TOLERANCE));
- }
+ assertNoFailuresAndResponse(
+ prepareSearch().setQuery(matchAllQuery())
+ .addScriptField(
+ "min",
+ new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "get min geopoint lon", Collections.emptyMap())
+ )
+ .addSort(SortBuilders.fieldSort("ord").order(SortOrder.ASC).unmappedType("long"))
+ .setSize(10),
+ searchResponse -> {
+ assertHitCount(searchResponse, 20L);
+ for (int i = 0; i < 10; i++) {
+ SearchHit searchHit = searchResponse.getHits().getAt(i);
+ assertThat(
+ "res: " + i + " id: " + searchHit.getId(),
+ searchHit.field("min").getValue(),
+ closeTo(i, GeoUtils.TOLERANCE)
+ );
+ }
+ }
+ );
}
public void testDocumentsWithNullValue() throws Exception {
@@ -350,61 +360,60 @@ public void testDocumentsWithNullValue() throws Exception {
Script scripField = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['id'].value", Collections.emptyMap());
- SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery())
- .addScriptField("id", scripField)
- .addSort("svalue", SortOrder.ASC)
- .get();
-
- assertNoFailures(searchResponse);
-
- assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L));
- assertThat(searchResponse.getHits().getAt(0).field("id").getValue(), equalTo("1"));
- assertThat(searchResponse.getHits().getAt(1).field("id").getValue(), equalTo("3"));
- assertThat(searchResponse.getHits().getAt(2).field("id").getValue(), equalTo("2"));
-
- searchResponse = prepareSearch().setQuery(matchAllQuery())
- .addScriptField("id", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['id'][0]", Collections.emptyMap()))
- .addSort("svalue", SortOrder.ASC)
- .get();
-
- assertNoFailures(searchResponse);
-
- assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L));
- assertThat(searchResponse.getHits().getAt(0).field("id").getValue(), equalTo("1"));
- assertThat(searchResponse.getHits().getAt(1).field("id").getValue(), equalTo("3"));
- assertThat(searchResponse.getHits().getAt(2).field("id").getValue(), equalTo("2"));
-
- searchResponse = prepareSearch().setQuery(matchAllQuery()).addScriptField("id", scripField).addSort("svalue", SortOrder.DESC).get();
-
- if (searchResponse.getFailedShards() > 0) {
- logger.warn("Failed shards:");
- for (ShardSearchFailure shardSearchFailure : searchResponse.getShardFailures()) {
- logger.warn("-> {}", shardSearchFailure);
+ assertNoFailuresAndResponse(
+ prepareSearch().setQuery(matchAllQuery()).addScriptField("id", scripField).addSort("svalue", SortOrder.ASC),
+ searchResponse -> {
+ assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L));
+ assertThat(searchResponse.getHits().getAt(0).field("id").getValue(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(1).field("id").getValue(), equalTo("3"));
+ assertThat(searchResponse.getHits().getAt(2).field("id").getValue(), equalTo("2"));
}
- }
- assertThat(searchResponse.getFailedShards(), equalTo(0));
+ );
+ assertNoFailuresAndResponse(
+ prepareSearch().setQuery(matchAllQuery())
+ .addScriptField("id", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['id'][0]", Collections.emptyMap()))
+ .addSort("svalue", SortOrder.ASC),
+ searchResponse -> {
+ assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L));
+ assertThat(searchResponse.getHits().getAt(0).field("id").getValue(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(1).field("id").getValue(), equalTo("3"));
+ assertThat(searchResponse.getHits().getAt(2).field("id").getValue(), equalTo("2"));
+ }
+ );
- assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L));
- assertThat(searchResponse.getHits().getAt(0).field("id").getValue(), equalTo("3"));
- assertThat(searchResponse.getHits().getAt(1).field("id").getValue(), equalTo("1"));
- assertThat(searchResponse.getHits().getAt(2).field("id").getValue(), equalTo("2"));
+ assertNoFailuresAndResponse(
+ prepareSearch().setQuery(matchAllQuery()).addScriptField("id", scripField).addSort("svalue", SortOrder.DESC),
+ searchResponse -> {
+ if (searchResponse.getFailedShards() > 0) {
+ logger.warn("Failed shards:");
+ for (ShardSearchFailure shardSearchFailure : searchResponse.getShardFailures()) {
+ logger.warn("-> {}", shardSearchFailure);
+ }
+ }
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
- // a query with docs just with null values
- searchResponse = prepareSearch().setQuery(termQuery("id", "2"))
- .addScriptField("id", scripField)
- .addSort("svalue", SortOrder.DESC)
- .get();
-
- if (searchResponse.getFailedShards() > 0) {
- logger.warn("Failed shards:");
- for (ShardSearchFailure shardSearchFailure : searchResponse.getShardFailures()) {
- logger.warn("-> {}", shardSearchFailure);
+ assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L));
+ assertThat(searchResponse.getHits().getAt(0).field("id").getValue(), equalTo("3"));
+ assertThat(searchResponse.getHits().getAt(1).field("id").getValue(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(2).field("id").getValue(), equalTo("2"));
}
- }
- assertThat(searchResponse.getFailedShards(), equalTo(0));
+ );
+ // a query with docs just with null values
+ assertNoFailuresAndResponse(
+ prepareSearch().setQuery(termQuery("id", "2")).addScriptField("id", scripField).addSort("svalue", SortOrder.DESC),
+ searchResponse -> {
+ if (searchResponse.getFailedShards() > 0) {
+ logger.warn("Failed shards:");
+ for (ShardSearchFailure shardSearchFailure : searchResponse.getShardFailures()) {
+ logger.warn("-> {}", shardSearchFailure);
+ }
+ }
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
- assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L));
- assertThat(searchResponse.getHits().getAt(0).field("id").getValue(), equalTo("2"));
+ assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L));
+ assertThat(searchResponse.getHits().getAt(0).field("id").getValue(), equalTo("2"));
+ }
+ );
}
public void test2920() throws IOException {
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/AbortedSnapshotIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/AbortedSnapshotIT.java
index e3bd85440c535..bd14f913b10ef 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/AbortedSnapshotIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/AbortedSnapshotIT.java
@@ -78,7 +78,7 @@ public void run() {
final var shardStatuses = snapshotShardsService.currentSnapshotShards(snapshot);
assertEquals(1, shardStatuses.size());
- final var shardStatus = shardStatuses.get(new ShardId(index, 0)).asCopy();
+ final var shardStatus = shardStatuses.get(new ShardId(index, 0));
logger.info("--> {}", shardStatus);
if (i == 0) {
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java
index d68301a310722..ca522064e3d04 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java
@@ -1066,7 +1066,6 @@ public void testEquivalentDeletesAreDeduplicated() throws Exception {
}
}
- @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/99355")
public void testMasterFailoverOnFinalizationLoop() throws Exception {
internalCluster().startMasterOnlyNodes(3);
final String dataNode = internalCluster().startDataOnlyNode();
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShardsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShardsServiceIT.java
index ee955da01f4af..b2494c5bd2b91 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShardsServiceIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShardsServiceIT.java
@@ -71,7 +71,7 @@ public void testRetryPostingSnapshotStatusMessages() throws Exception {
List stages = snapshotShardsService.currentSnapshotShards(snapshot)
.values()
.stream()
- .map(status -> status.asCopy().getStage())
+ .map(IndexShardSnapshotStatus.Copy::getStage)
.toList();
assertThat(stages, hasSize(shards));
assertThat(stages, everyItem(equalTo(IndexShardSnapshotStatus.Stage.DONE)));
diff --git a/server/src/main/java/module-info.java b/server/src/main/java/module-info.java
index 8e636a93e4f0b..65792ebcccc64 100644
--- a/server/src/main/java/module-info.java
+++ b/server/src/main/java/module-info.java
@@ -406,7 +406,8 @@
provides org.elasticsearch.features.FeatureSpecification
with
org.elasticsearch.features.FeaturesSupportedSpecification,
- org.elasticsearch.health.HealthFeature;
+ org.elasticsearch.health.HealthFeature,
+ org.elasticsearch.rest.RestFeatures;
uses org.elasticsearch.plugins.internal.SettingsExtension;
uses RestExtension;
diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java
index d09be93772e78..0c7145730e447 100644
--- a/server/src/main/java/org/elasticsearch/TransportVersions.java
+++ b/server/src/main/java/org/elasticsearch/TransportVersions.java
@@ -157,6 +157,10 @@ static TransportVersion def(int id) {
public static final TransportVersion CLUSTER_FEATURES_ADDED = def(8_526_00_0);
public static final TransportVersion DSL_ERROR_STORE_INFORMATION_ENHANCED = def(8_527_00_0);
public static final TransportVersion INVALID_BUCKET_PATH_EXCEPTION_INTRODUCED = def(8_528_00_0);
+ public static final TransportVersion KNN_AS_QUERY_ADDED = def(8_529_00_0);
+ public static final TransportVersion UNDESIRED_SHARD_ALLOCATIONS_COUNT_ADDED = def(8_530_00_0);
+ public static final TransportVersion ML_INFERENCE_TASK_SETTINGS_OPTIONAL_ADDED = def(8_531_00_0);
+ public static final TransportVersion DEPRECATED_COMPONENT_TEMPLATES_ADDED = def(8_532_00_0);
/*
* STOP! READ THIS FIRST! No, really,
diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java
index 6ac451d5bc93b..a855b6b8ee7e3 100644
--- a/server/src/main/java/org/elasticsearch/action/ActionModule.java
+++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java
@@ -282,6 +282,7 @@
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsFilter;
import org.elasticsearch.common.util.concurrent.ThreadContext;
+import org.elasticsearch.features.NodeFeature;
import org.elasticsearch.gateway.TransportNodesListGatewayStartedShards;
import org.elasticsearch.health.GetHealthAction;
import org.elasticsearch.health.RestGetHealthAction;
@@ -857,7 +858,7 @@ private static ActionFilters setupActionFilters(List actionPlugins
return new ActionFilters(Set.copyOf(finalFilters));
}
- public void initRestHandlers(Supplier nodesInCluster) {
+ public void initRestHandlers(Supplier nodesInCluster, Predicate clusterSupportsFeature) {
List catActions = new ArrayList<>();
Predicate catActionsFilter = restExtension.getCatActionsFilter();
Predicate restFilter = restExtension.getActionsFilter();
@@ -889,7 +890,7 @@ public void initRestHandlers(Supplier nodesInCluster) {
registerHandler.accept(new RestClusterStateAction(settingsFilter, threadPool));
registerHandler.accept(new RestClusterHealthAction());
registerHandler.accept(new RestClusterUpdateSettingsAction());
- registerHandler.accept(new RestClusterGetSettingsAction(settings, clusterSettings, settingsFilter, nodesInCluster));
+ registerHandler.accept(new RestClusterGetSettingsAction(settings, clusterSettings, settingsFilter, clusterSupportsFeature));
registerHandler.accept(new RestClusterRerouteAction(settingsFilter));
registerHandler.accept(new RestClusterSearchShardsAction());
registerHandler.accept(new RestPendingClusterTasksAction());
diff --git a/server/src/main/java/org/elasticsearch/action/NoShardAvailableActionException.java b/server/src/main/java/org/elasticsearch/action/NoShardAvailableActionException.java
index bb4eb6c202b76..e018cf48fcefc 100644
--- a/server/src/main/java/org/elasticsearch/action/NoShardAvailableActionException.java
+++ b/server/src/main/java/org/elasticsearch/action/NoShardAvailableActionException.java
@@ -16,7 +16,7 @@
import java.io.IOException;
import java.io.PrintWriter;
-public class NoShardAvailableActionException extends ElasticsearchException {
+public final class NoShardAvailableActionException extends ElasticsearchException {
private static final StackTraceElement[] EMPTY_STACK_TRACE = new StackTraceElement[0];
@@ -28,22 +28,18 @@ public static NoShardAvailableActionException forOnShardFailureWrapper(String ms
return new NoShardAvailableActionException(null, msg, null, true);
}
- @SuppressWarnings("this-escape")
public NoShardAvailableActionException(ShardId shardId) {
this(shardId, null, null, false);
}
- @SuppressWarnings("this-escape")
public NoShardAvailableActionException(ShardId shardId, String msg) {
this(shardId, msg, null, false);
}
- @SuppressWarnings("this-escape")
public NoShardAvailableActionException(ShardId shardId, String msg, Throwable cause) {
this(shardId, msg, cause, false);
}
- @SuppressWarnings("this-escape")
private NoShardAvailableActionException(ShardId shardId, String msg, Throwable cause, boolean onShardFailureWrapper) {
super(msg, cause);
setShard(shardId);
diff --git a/server/src/main/java/org/elasticsearch/action/RoutingMissingException.java b/server/src/main/java/org/elasticsearch/action/RoutingMissingException.java
index a90bc14f9ac8d..0999e7154b058 100644
--- a/server/src/main/java/org/elasticsearch/action/RoutingMissingException.java
+++ b/server/src/main/java/org/elasticsearch/action/RoutingMissingException.java
@@ -18,11 +18,10 @@
import java.io.IOException;
import java.util.Objects;
-public class RoutingMissingException extends ElasticsearchException {
+public final class RoutingMissingException extends ElasticsearchException {
private final String id;
- @SuppressWarnings("this-escape")
public RoutingMissingException(String index, String id) {
super("routing is required for [" + index + "]/[" + id + "]");
Objects.requireNonNull(index, "index must not be null");
diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetDesiredBalanceAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetDesiredBalanceAction.java
index b585e891a5903..fc11790079521 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetDesiredBalanceAction.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetDesiredBalanceAction.java
@@ -95,7 +95,7 @@ protected void masterOperation(
listener.onResponse(
new DesiredBalanceResponse(
desiredBalanceShardsAllocator.getStats(),
- ClusterBalanceStats.createFrom(state, clusterInfo, writeLoadForecaster),
+ ClusterBalanceStats.createFrom(state, latestDesiredBalance, clusterInfo, writeLoadForecaster),
createRoutingTable(state, latestDesiredBalance),
clusterInfo
)
diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequest.java
index 0cf0baa75a8de..ebf01feaaa891 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequest.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequest.java
@@ -20,7 +20,7 @@
/**
* A request to get node (cluster) level information.
*/
-public class NodesInfoRequest extends BaseNodesRequest {
+public final class NodesInfoRequest extends BaseNodesRequest {
private final NodesInfoMetrics nodesInfoMetrics;
@@ -39,7 +39,6 @@ public NodesInfoRequest(StreamInput in) throws IOException {
* Get information from nodes based on the nodes ids specified. If none are passed, information
* for all nodes will be returned.
*/
- @SuppressWarnings("this-escape")
public NodesInfoRequest(String... nodesIds) {
super(nodesIds);
nodesInfoMetrics = new NodesInfoMetrics();
diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java
index 39205715dca8f..6f6253491c580 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java
@@ -20,7 +20,9 @@
import java.io.IOException;
import java.util.Objects;
-public class ClusterSearchShardsRequest extends MasterNodeReadRequest implements IndicesRequest.Replaceable {
+public final class ClusterSearchShardsRequest extends MasterNodeReadRequest
+ implements
+ IndicesRequest.Replaceable {
private String[] indices = Strings.EMPTY_ARRAY;
@Nullable
@@ -31,7 +33,6 @@ public class ClusterSearchShardsRequest extends MasterNodeReadRequest shardsStatus = snapshotShardsService.currentSnapshotShards(snapshot);
+ final var shardsStatus = snapshotShardsService.currentSnapshotShards(snapshot);
if (shardsStatus == null) {
continue;
}
Map shardMapBuilder = new HashMap<>();
- for (Map.Entry shardEntry : shardsStatus.entrySet()) {
+ for (final var shardEntry : shardsStatus.entrySet()) {
final ShardId shardId = shardEntry.getKey();
- final IndexShardSnapshotStatus.Copy lastSnapshotStatus = shardEntry.getValue().asCopy();
+ final IndexShardSnapshotStatus.Copy lastSnapshotStatus = shardEntry.getValue();
final IndexShardSnapshotStatus.Stage stage = lastSnapshotStatus.getStage();
String shardNodeId = null;
diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java
index 2a6f0325be1d2..f8b9a9571ddd2 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java
@@ -243,7 +243,6 @@ private void buildResponse(
entry.indices().get(shardId.getIndexName()),
shardId
)
- .asCopy()
);
} else {
shardStatus = new SnapshotIndexShardStatus(entry.shardId(shardEntry.getKey()), stage);
@@ -322,7 +321,7 @@ private void loadRepositoryData(
repositoriesService.repository(repositoryName)
.getSnapshotInfo(new GetSnapshotInfoContext(snapshotIdsToLoad, true, task::isCancelled, (context, snapshotInfo) -> {
List shardStatusBuilder = new ArrayList<>();
- final Map shardStatuses;
+ final Map shardStatuses;
try {
shardStatuses = snapshotShards(repositoryName, repositoryData, task, snapshotInfo);
} catch (Exception e) {
@@ -330,8 +329,8 @@ private void loadRepositoryData(
context.onFailure(e);
return;
}
- for (Map.Entry shardStatus : shardStatuses.entrySet()) {
- IndexShardSnapshotStatus.Copy lastSnapshotStatus = shardStatus.getValue().asCopy();
+ for (final var shardStatus : shardStatuses.entrySet()) {
+ IndexShardSnapshotStatus.Copy lastSnapshotStatus = shardStatus.getValue();
shardStatusBuilder.add(new SnapshotIndexShardStatus(shardStatus.getKey(), lastSnapshotStatus));
}
final SnapshotsInProgress.State state = switch (snapshotInfo.state()) {
@@ -374,14 +373,14 @@ private void loadRepositoryData(
* @param snapshotInfo snapshot info
* @return map of shard id to snapshot status
*/
- private Map snapshotShards(
+ private Map snapshotShards(
final String repositoryName,
final RepositoryData repositoryData,
final CancellableTask task,
final SnapshotInfo snapshotInfo
) throws IOException {
final Repository repository = repositoriesService.repository(repositoryName);
- final Map shardStatus = new HashMap<>();
+ final Map shardStatus = new HashMap<>();
for (String index : snapshotInfo.indices()) {
IndexId indexId = repositoryData.resolveIndexId(index);
task.ensureNotCancelled();
@@ -394,7 +393,7 @@ private Map snapshotShards(
if (shardFailure != null) {
shardStatus.put(shardId, IndexShardSnapshotStatus.newFailed(shardFailure.reason()));
} else {
- final IndexShardSnapshotStatus shardSnapshotStatus;
+ final IndexShardSnapshotStatus.Copy shardSnapshotStatus;
if (snapshotInfo.state() == SnapshotState.FAILED) {
// If the snapshot failed, but the shard's snapshot does
// not have an exception, it means that partial snapshots
diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeAction.java
index 531dc6dc5eff3..8a674292b3cc5 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeAction.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeAction.java
@@ -59,7 +59,7 @@ public static class Fields {
* A request to analyze a text associated with a specific index. Allow to provide
* the actual analyzer name to perform the analysis with.
*/
- public static class Request extends SingleShardRequest {
+ public static final class Request extends SingleShardRequest {
private String[] text;
private String analyzer;
@@ -91,7 +91,6 @@ public Request() {}
*
* @param index The text to analyze
*/
- @SuppressWarnings("this-escape")
public Request(String index) {
this.index(index);
}
diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java
index d2df8e20f99ea..c8ecbf273c93c 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java
@@ -43,7 +43,7 @@ public class TransportVerifyShardBeforeCloseAction extends TransportReplicationA
public static final String NAME = CloseIndexAction.NAME + "[s]";
public static final ActionType TYPE = new ActionType<>(NAME, ReplicationResponse::new);
- protected Logger logger = LogManager.getLogger(getClass());
+ private static final Logger logger = LogManager.getLogger(TransportVerifyShardBeforeCloseAction.class);
@Inject
public TransportVerifyShardBeforeCloseAction(
@@ -163,7 +163,7 @@ public void markShardCopyAsStaleIfNeeded(
}
}
- public static class ShardRequest extends ReplicationRequest {
+ public static final class ShardRequest extends ReplicationRequest {
private final ClusterBlock clusterBlock;
@@ -175,7 +175,6 @@ public static class ShardRequest extends ReplicationRequest {
phase1 = in.readBoolean();
}
- @SuppressWarnings("this-escape")
public ShardRequest(final ShardId shardId, final ClusterBlock clusterBlock, final boolean phase1, final TaskId parentTaskId) {
super(shardId);
this.clusterBlock = Objects.requireNonNull(clusterBlock);
diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/readonly/TransportVerifyShardIndexBlockAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/readonly/TransportVerifyShardIndexBlockAction.java
index aec5718b31a84..7daf04f41a9fb 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/indices/readonly/TransportVerifyShardIndexBlockAction.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/indices/readonly/TransportVerifyShardIndexBlockAction.java
@@ -7,8 +7,6 @@
*/
package org.elasticsearch.action.admin.indices.readonly;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionType;
import org.elasticsearch.action.support.ActionFilters;
@@ -48,7 +46,6 @@ public class TransportVerifyShardIndexBlockAction extends TransportReplicationAc
public static final String NAME = AddIndexBlockAction.NAME + "[s]";
public static final ActionType TYPE = new ActionType<>(NAME, ReplicationResponse::new);
- protected Logger logger = LogManager.getLogger(getClass());
@Inject
public TransportVerifyShardIndexBlockAction(
@@ -157,7 +154,7 @@ public void markShardCopyAsStaleIfNeeded(
}
}
- public static class ShardRequest extends ReplicationRequest {
+ public static final class ShardRequest extends ReplicationRequest {
private final ClusterBlock clusterBlock;
@@ -166,7 +163,6 @@ public static class ShardRequest extends ReplicationRequest {
clusterBlock = new ClusterBlock(in);
}
- @SuppressWarnings("this-escape")
public ShardRequest(final ShardId shardId, final ClusterBlock clusterBlock, final TaskId parentTaskId) {
super(shardId);
this.clusterBlock = Objects.requireNonNull(clusterBlock);
diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java
index 30197d102dc47..5686deb6b804a 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java
@@ -8,8 +8,6 @@
package org.elasticsearch.action.admin.indices.shrink;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.indices.create.CreateIndexClusterStateUpdateRequest;
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
@@ -44,7 +42,6 @@
* Main class to initiate resizing (shrink / split) an index into a new index
*/
public class TransportResizeAction extends TransportMasterNodeAction {
- private static final Logger logger = LogManager.getLogger(TransportResizeAction.class);
private final MetadataCreateIndexService createIndexService;
private final Client client;
diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStatsFlags.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStatsFlags.java
index bccc7a8f7e243..391ac532a0c3a 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStatsFlags.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStatsFlags.java
@@ -24,7 +24,7 @@
* The SHARD_LEVEL flags are for stat fields that can be calculated at the shard level and then may be later aggregated at the index level
* along with index-level flag stat fields (e.g., Mappings).
*/
-public class CommonStatsFlags implements Writeable, Cloneable {
+public final class CommonStatsFlags implements Writeable, Cloneable {
public static final CommonStatsFlags ALL = new CommonStatsFlags().all();
public static final CommonStatsFlags SHARD_LEVEL = new CommonStatsFlags().all().set(Flag.Mappings, false);
@@ -40,7 +40,6 @@ public class CommonStatsFlags implements Writeable, Cloneable {
/**
* @param flags flags to set. If no flags are supplied, default flags will be set.
*/
- @SuppressWarnings("this-escape")
public CommonStatsFlags(Flag... flags) {
if (flags.length > 0) {
clear();
diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutComponentTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutComponentTemplateAction.java
index 4e1776a49d21c..335c0781fb884 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutComponentTemplateAction.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutComponentTemplateAction.java
@@ -78,7 +78,12 @@ public static ComponentTemplate normalizeComponentTemplate(
Settings settings = builder.build();
indexScopedSettings.validate(settings, true);
template = new Template(settings, template.mappings(), template.aliases(), template.lifecycle());
- componentTemplate = new ComponentTemplate(template, componentTemplate.version(), componentTemplate.metadata());
+ componentTemplate = new ComponentTemplate(
+ template,
+ componentTemplate.version(),
+ componentTemplate.metadata(),
+ componentTemplate.deprecated()
+ );
}
return componentTemplate;
diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java
index 64505d76e26b8..0505f41b27599 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java
@@ -29,7 +29,7 @@
*
* The request requires the query to be set using {@link #query(QueryBuilder)}
*/
-public class ValidateQueryRequest extends BroadcastRequest implements ToXContentObject {
+public final class ValidateQueryRequest extends BroadcastRequest implements ToXContentObject {
public static final IndicesOptions DEFAULT_INDICES_OPTIONS = IndicesOptions.fromOptions(false, false, true, false);
@@ -65,7 +65,6 @@ public ValidateQueryRequest(StreamInput in) throws IOException {
* Constructs a new validate request against the provided indices. No indices provided means it will
* run against all indices.
*/
- @SuppressWarnings("this-escape")
public ValidateQueryRequest(String... indices) {
super(indices);
indicesOptions(DEFAULT_INDICES_OPTIONS);
diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java
index f071ffb22fd5d..402b41761e3d1 100644
--- a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java
+++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java
@@ -21,7 +21,7 @@
* Implements the low-level details of bulk request handling
*/
public final class BulkRequestHandler {
- private final Logger logger;
+ private static final Logger logger = LogManager.getLogger(BulkRequestHandler.class);
private final BiConsumer> consumer;
private final BulkProcessor.Listener listener;
private final Semaphore semaphore;
@@ -36,7 +36,6 @@ public final class BulkRequestHandler {
int concurrentRequests
) {
assert concurrentRequests >= 0;
- this.logger = LogManager.getLogger(getClass());
this.consumer = consumer;
this.listener = listener;
this.concurrentRequests = concurrentRequests;
diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java
index f3473f274bf38..bd929b9a2204e 100644
--- a/server/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java
+++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java
@@ -24,7 +24,10 @@
import java.io.IOException;
import java.util.Set;
-public class BulkShardRequest extends ReplicatedWriteRequest implements Accountable, RawIndexingDataTransportRequest {
+public final class BulkShardRequest extends ReplicatedWriteRequest
+ implements
+ Accountable,
+ RawIndexingDataTransportRequest {
private static final long SHALLOW_SIZE = RamUsageEstimator.shallowSizeOfInstance(BulkShardRequest.class);
@@ -35,7 +38,6 @@ public BulkShardRequest(StreamInput in) throws IOException {
items = in.readArray(i -> i.readOptionalWriteable(inpt -> new BulkItemRequest(shardId, inpt)), BulkItemRequest[]::new);
}
- @SuppressWarnings("this-escape")
public BulkShardRequest(ShardId shardId, RefreshPolicy refreshPolicy, BulkItemRequest[] items) {
super(shardId);
this.items = items;
diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/CreateDataStreamAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/CreateDataStreamAction.java
index 68a4e0d0b04c9..4ecb092f34d4b 100644
--- a/server/src/main/java/org/elasticsearch/action/datastreams/CreateDataStreamAction.java
+++ b/server/src/main/java/org/elasticsearch/action/datastreams/CreateDataStreamAction.java
@@ -7,7 +7,6 @@
*/
package org.elasticsearch.action.datastreams;
-import org.elasticsearch.TransportVersions;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.ActionType;
import org.elasticsearch.action.IndicesRequest;
@@ -66,20 +65,14 @@ public ActionRequestValidationException validate() {
public Request(StreamInput in) throws IOException {
super(in);
this.name = in.readString();
- if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_16_0)) {
- this.startTime = in.readVLong();
- } else {
- this.startTime = System.currentTimeMillis();
- }
+ this.startTime = in.readVLong();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(name);
- if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_16_0)) {
- out.writeVLong(startTime);
- }
+ out.writeVLong(startTime);
}
@Override
diff --git a/server/src/main/java/org/elasticsearch/action/ingest/SimulateExecutionService.java b/server/src/main/java/org/elasticsearch/action/ingest/SimulateExecutionService.java
index 5d015283aa7cd..2dde9e53f5e7f 100644
--- a/server/src/main/java/org/elasticsearch/action/ingest/SimulateExecutionService.java
+++ b/server/src/main/java/org/elasticsearch/action/ingest/SimulateExecutionService.java
@@ -46,7 +46,8 @@ static void executeDocument(
pipeline.getDescription(),
pipeline.getVersion(),
pipeline.getMetadata(),
- verbosePipelineProcessor
+ verbosePipelineProcessor,
+ pipeline.getDeprecated()
);
ingestDocument.executePipeline(verbosePipeline, (result, e) -> {
handler.accept(new SimulateDocumentVerboseResult(processorResultList), e);
diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java b/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java
index 487a5c5653998..b6a9179b1e956 100644
--- a/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java
+++ b/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java
@@ -455,7 +455,7 @@ public String toString() {
* and how many of them were skipped and further details in a Map of Cluster objects
* (when doing a cross-cluster search).
*/
- public static class Clusters implements ToXContentFragment, Writeable {
+ public static final class Clusters implements ToXContentFragment, Writeable {
public static final Clusters EMPTY = new Clusters(0, 0, 0);
@@ -538,7 +538,6 @@ public Clusters(int total, int successful, int skipped) {
this.clusterInfo = Collections.emptyMap(); // will never be used if created from this constructor
}
- @SuppressWarnings("this-escape")
public Clusters(StreamInput in) throws IOException {
this.total = in.readVInt();
int successfulTemp = in.readVInt();
diff --git a/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastShardOperationFailedException.java b/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastShardOperationFailedException.java
index 9cfe0a1f1b992..b1594bf5ba935 100644
--- a/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastShardOperationFailedException.java
+++ b/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastShardOperationFailedException.java
@@ -20,7 +20,7 @@
*
*
*/
-public class BroadcastShardOperationFailedException extends ElasticsearchException implements ElasticsearchWrapperException {
+public final class BroadcastShardOperationFailedException extends ElasticsearchException implements ElasticsearchWrapperException {
public BroadcastShardOperationFailedException(ShardId shardId, String msg) {
this(shardId, msg, null);
@@ -30,7 +30,6 @@ public BroadcastShardOperationFailedException(ShardId shardId, Throwable cause)
this(shardId, "", cause);
}
- @SuppressWarnings("this-escape")
public BroadcastShardOperationFailedException(ShardId shardId, String msg, Throwable cause) {
super(msg, cause);
setShard(shardId);
diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java
index 1f347ec2b8cac..1604ff81603ab 100644
--- a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java
+++ b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java
@@ -661,13 +661,11 @@ public interface ReplicaResponse {
}
- public static class RetryOnPrimaryException extends ElasticsearchException {
- @SuppressWarnings("this-escape")
+ public static final class RetryOnPrimaryException extends ElasticsearchException {
public RetryOnPrimaryException(ShardId shardId, String msg) {
this(shardId, msg, null);
}
- @SuppressWarnings("this-escape")
RetryOnPrimaryException(ShardId shardId, String msg, Throwable cause) {
super(msg, cause);
setShard(shardId);
diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java
index 411f23a0fc0ad..0abe7ad678dc5 100644
--- a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java
+++ b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java
@@ -605,9 +605,8 @@ protected Releasable checkReplicaLimits(final ReplicaRequest request) {
return () -> {};
}
- public static class RetryOnReplicaException extends ElasticsearchException {
+ public static final class RetryOnReplicaException extends ElasticsearchException {
- @SuppressWarnings("this-escape")
public RetryOnReplicaException(ShardId shardId, String msg) {
super(msg);
setShard(shardId);
diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java
index 56edc5117a28b..650b9db7f3d69 100644
--- a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java
+++ b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java
@@ -50,7 +50,7 @@
*/
// It's not possible to suppress teh warning at #realtime(boolean) at a method-level.
@SuppressWarnings("unchecked")
-public class TermVectorsRequest extends SingleShardRequest implements RealtimeRequest {
+public final class TermVectorsRequest extends SingleShardRequest implements RealtimeRequest {
private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(TermVectorsRequest.class);
private static final ParseField INDEX = new ParseField("_index");
@@ -79,7 +79,7 @@ public class TermVectorsRequest extends SingleShardRequest i
private long version = Versions.MATCH_ANY;
- protected String preference;
+ private String preference;
private static final AtomicInteger randomInt = new AtomicInteger(0);
@@ -204,7 +204,6 @@ public TermVectorsRequest(TermVectorsRequest other) {
this.filterSettings = other.filterSettings();
}
- @SuppressWarnings("this-escape")
public TermVectorsRequest(MultiGetRequest.Item item) {
super(item.index());
this.id = item.id();
diff --git a/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java
index 8bcb6a28fb50a..0f84ecab5f8b2 100644
--- a/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java
+++ b/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java
@@ -841,9 +841,8 @@ public String toString() {
}
}
- public static class NoLongerPrimaryShardException extends ElasticsearchException {
+ public static final class NoLongerPrimaryShardException extends ElasticsearchException {
- @SuppressWarnings("this-escape")
public NoLongerPrimaryShardException(ShardId shardId, String msg) {
super(msg);
setShard(shardId);
diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/FollowersChecker.java b/server/src/main/java/org/elasticsearch/cluster/coordination/FollowersChecker.java
index 86e5d6739fcb7..feb0543aad625 100644
--- a/server/src/main/java/org/elasticsearch/cluster/coordination/FollowersChecker.java
+++ b/server/src/main/java/org/elasticsearch/cluster/coordination/FollowersChecker.java
@@ -61,7 +61,7 @@
* considering a follower to be faulty, to allow for a brief network partition or a long GC cycle to occur without triggering the removal of
* a node and the consequent shard reallocation.
*/
-public class FollowersChecker {
+public final class FollowersChecker {
private static final Logger logger = LogManager.getLogger(FollowersChecker.class);
@@ -105,7 +105,6 @@ public class FollowersChecker {
private final NodeHealthService nodeHealthService;
private volatile FastResponseState fastResponseState;
- @SuppressWarnings("this-escape")
public FollowersChecker(
Settings settings,
TransportService transportService,
diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/PreVoteCollector.java b/server/src/main/java/org/elasticsearch/cluster/coordination/PreVoteCollector.java
index a308a8e0e6c75..b17eb731cfc6a 100644
--- a/server/src/main/java/org/elasticsearch/cluster/coordination/PreVoteCollector.java
+++ b/server/src/main/java/org/elasticsearch/cluster/coordination/PreVoteCollector.java
@@ -21,7 +21,7 @@
import java.util.function.LongConsumer;
public abstract class PreVoteCollector {
- private final Logger logger = LogManager.getLogger(PreVoteCollector.class);
+ private static final Logger logger = LogManager.getLogger(PreVoteCollector.class);
// Tuple for simple atomic updates. null until the first call to `update()`.
protected volatile Tuple state; // DiscoveryNode component is null if there is currently no known
diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/ComponentTemplate.java b/server/src/main/java/org/elasticsearch/cluster/metadata/ComponentTemplate.java
index 090f647ee349a..dac5005e0e043 100644
--- a/server/src/main/java/org/elasticsearch/cluster/metadata/ComponentTemplate.java
+++ b/server/src/main/java/org/elasticsearch/cluster/metadata/ComponentTemplate.java
@@ -8,6 +8,7 @@
package org.elasticsearch.cluster.metadata;
+import org.elasticsearch.TransportVersions;
import org.elasticsearch.action.admin.indices.rollover.RolloverConfiguration;
import org.elasticsearch.cluster.Diff;
import org.elasticsearch.cluster.SimpleDiffable;
@@ -35,18 +36,20 @@ public class ComponentTemplate implements SimpleDiffable, ToX
private static final ParseField TEMPLATE = new ParseField("template");
private static final ParseField VERSION = new ParseField("version");
private static final ParseField METADATA = new ParseField("_meta");
+ private static final ParseField DEPRECATED = new ParseField("deprecated");
@SuppressWarnings("unchecked")
public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(
"component_template",
false,
- a -> new ComponentTemplate((Template) a[0], (Long) a[1], (Map) a[2])
+ a -> new ComponentTemplate((Template) a[0], (Long) a[1], (Map) a[2], (Boolean) a[3])
);
static {
PARSER.declareObject(ConstructingObjectParser.constructorArg(), Template.PARSER, TEMPLATE);
PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), VERSION);
PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> p.map(), METADATA);
+ PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), DEPRECATED);
}
private final Template template;
@@ -54,6 +57,8 @@ public class ComponentTemplate implements SimpleDiffable, ToX
private final Long version;
@Nullable
private final Map metadata;
+ @Nullable
+ private final Boolean deprecated;
static Diff readComponentTemplateDiffFrom(StreamInput in) throws IOException {
return SimpleDiffable.readDiffFrom(ComponentTemplate::new, in);
@@ -64,9 +69,19 @@ public static ComponentTemplate parse(XContentParser parser) {
}
public ComponentTemplate(Template template, @Nullable Long version, @Nullable Map metadata) {
+ this(template, version, metadata, null);
+ }
+
+ public ComponentTemplate(
+ Template template,
+ @Nullable Long version,
+ @Nullable Map metadata,
+ @Nullable Boolean deprecated
+ ) {
this.template = template;
this.version = version;
this.metadata = metadata;
+ this.deprecated = deprecated;
}
public ComponentTemplate(StreamInput in) throws IOException {
@@ -77,6 +92,11 @@ public ComponentTemplate(StreamInput in) throws IOException {
} else {
this.metadata = null;
}
+ if (in.getTransportVersion().onOrAfter(TransportVersions.DEPRECATED_COMPONENT_TEMPLATES_ADDED)) {
+ this.deprecated = in.readOptionalBoolean();
+ } else {
+ deprecated = null;
+ }
}
public Template template() {
@@ -93,6 +113,14 @@ public Map metadata() {
return metadata;
}
+ public Boolean deprecated() {
+ return deprecated;
+ }
+
+ public boolean isDeprecated() {
+ return Boolean.TRUE.equals(deprecated);
+ }
+
@Override
public void writeTo(StreamOutput out) throws IOException {
this.template.writeTo(out);
@@ -103,11 +131,14 @@ public void writeTo(StreamOutput out) throws IOException {
out.writeBoolean(true);
out.writeGenericMap(this.metadata);
}
+ if (out.getTransportVersion().onOrAfter(TransportVersions.DEPRECATED_COMPONENT_TEMPLATES_ADDED)) {
+ out.writeOptionalBoolean(this.deprecated);
+ }
}
@Override
public int hashCode() {
- return Objects.hash(template, version, metadata);
+ return Objects.hash(template, version, metadata, deprecated);
}
@Override
@@ -121,7 +152,8 @@ public boolean equals(Object obj) {
ComponentTemplate other = (ComponentTemplate) obj;
return Objects.equals(template, other.template)
&& Objects.equals(version, other.version)
- && Objects.equals(metadata, other.metadata);
+ && Objects.equals(metadata, other.metadata)
+ && Objects.equals(deprecated, other.deprecated);
}
@Override
@@ -148,6 +180,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params, @Nulla
if (this.metadata != null) {
builder.field(METADATA.getPreferredName(), this.metadata);
}
+ if (this.deprecated != null) {
+ builder.field(DEPRECATED.getPreferredName(), this.deprecated);
+ }
builder.endObject();
return builder;
}
diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java b/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java
index bd745e7ff4ea6..47ab1d099c037 100644
--- a/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java
+++ b/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java
@@ -46,6 +46,7 @@ public class ComposableIndexTemplate implements SimpleDiffable PARSER = new ConstructingObjectParser<>(
@@ -60,7 +61,8 @@ public class ComposableIndexTemplate implements SimpleDiffable) a[5],
(DataStreamTemplate) a[6],
(Boolean) a[7],
- (List) a[8]
+ (List) a[8],
+ (Boolean) a[9]
)
);
@@ -74,6 +76,7 @@ public class ComposableIndexTemplate implements SimpleDiffable indexPatterns;
@@ -93,6 +96,8 @@ public class ComposableIndexTemplate implements SimpleDiffable ignoreMissingComponentTemplates;
+ @Nullable
+ private final Boolean deprecated;
static Diff readITV2DiffFrom(StreamInput in) throws IOException {
return SimpleDiffable.readDiffFrom(ComposableIndexTemplate::new, in);
@@ -135,10 +140,10 @@ public ComposableIndexTemplate(
@Nullable DataStreamTemplate dataStreamTemplate,
@Nullable Boolean allowAutoCreate
) {
- this(indexPatterns, template, componentTemplates, priority, version, metadata, dataStreamTemplate, null, null);
+ this(indexPatterns, template, componentTemplates, priority, version, metadata, dataStreamTemplate, allowAutoCreate, null);
}
- public ComposableIndexTemplate(
+ ComposableIndexTemplate(
List indexPatterns,
@Nullable Template template,
@Nullable List componentTemplates,
@@ -148,6 +153,32 @@ public ComposableIndexTemplate(
@Nullable DataStreamTemplate dataStreamTemplate,
@Nullable Boolean allowAutoCreate,
@Nullable List ignoreMissingComponentTemplates
+ ) {
+ this(
+ indexPatterns,
+ template,
+ componentTemplates,
+ priority,
+ version,
+ metadata,
+ dataStreamTemplate,
+ allowAutoCreate,
+ ignoreMissingComponentTemplates,
+ null
+ );
+ }
+
+ public ComposableIndexTemplate(
+ List indexPatterns,
+ @Nullable Template template,
+ @Nullable List componentTemplates,
+ @Nullable Long priority,
+ @Nullable Long version,
+ @Nullable Map metadata,
+ @Nullable DataStreamTemplate dataStreamTemplate,
+ @Nullable Boolean allowAutoCreate,
+ @Nullable List ignoreMissingComponentTemplates,
+ @Nullable Boolean deprecated
) {
this.indexPatterns = indexPatterns;
this.template = template;
@@ -158,6 +189,7 @@ public ComposableIndexTemplate(
this.dataStreamTemplate = dataStreamTemplate;
this.allowAutoCreate = allowAutoCreate;
this.ignoreMissingComponentTemplates = ignoreMissingComponentTemplates;
+ this.deprecated = deprecated;
}
public ComposableIndexTemplate(StreamInput in) throws IOException {
@@ -178,6 +210,11 @@ public ComposableIndexTemplate(StreamInput in) throws IOException {
} else {
this.ignoreMissingComponentTemplates = null;
}
+ if (in.getTransportVersion().onOrAfter(TransportVersions.DEPRECATED_COMPONENT_TEMPLATES_ADDED)) {
+ this.deprecated = in.readOptionalBoolean();
+ } else {
+ this.deprecated = null;
+ }
}
public List indexPatterns() {
@@ -250,6 +287,14 @@ public List getIgnoreMissingComponentTemplates() {
return ignoreMissingComponentTemplates;
}
+ public Boolean deprecated() {
+ return deprecated;
+ }
+
+ public boolean isDeprecated() {
+ return Boolean.TRUE.equals(deprecated);
+ }
+
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeStringCollection(this.indexPatterns);
@@ -268,6 +313,9 @@ public void writeTo(StreamOutput out) throws IOException {
if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_7_0)) {
out.writeOptionalStringCollection(ignoreMissingComponentTemplates);
}
+ if (out.getTransportVersion().onOrAfter(TransportVersions.DEPRECATED_COMPONENT_TEMPLATES_ADDED)) {
+ out.writeOptionalBoolean(deprecated);
+ }
}
@Override
@@ -307,6 +355,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params, @Nulla
if (this.ignoreMissingComponentTemplates != null) {
builder.stringListField(IGNORE_MISSING_COMPONENT_TEMPLATES.getPreferredName(), ignoreMissingComponentTemplates);
}
+ if (this.deprecated != null) {
+ builder.field(DEPRECATED.getPreferredName(), deprecated);
+ }
builder.endObject();
return builder;
}
@@ -322,7 +373,8 @@ public int hashCode() {
this.metadata,
this.dataStreamTemplate,
this.allowAutoCreate,
- this.ignoreMissingComponentTemplates
+ this.ignoreMissingComponentTemplates,
+ this.deprecated
);
}
@@ -343,7 +395,8 @@ && componentTemplatesEquals(this.componentTemplates, other.componentTemplates)
&& Objects.equals(this.metadata, other.metadata)
&& Objects.equals(this.dataStreamTemplate, other.dataStreamTemplate)
&& Objects.equals(this.allowAutoCreate, other.allowAutoCreate)
- && Objects.equals(this.ignoreMissingComponentTemplates, other.ignoreMissingComponentTemplates);
+ && Objects.equals(this.ignoreMissingComponentTemplates, other.ignoreMissingComponentTemplates)
+ && Objects.equals(deprecated, other.deprecated);
}
static boolean componentTemplatesEquals(List c1, List c2) {
@@ -480,6 +533,7 @@ public static class Builder {
private DataStreamTemplate dataStreamTemplate;
private Boolean allowAutoCreate;
private List ignoreMissingComponentTemplates;
+ private Boolean deprecated;
public Builder() {}
@@ -528,6 +582,11 @@ public Builder ignoreMissingComponentTemplates(List ignoreMissingCompone
return this;
}
+ public Builder deprecated(@Nullable Boolean deprecated) {
+ this.deprecated = deprecated;
+ return this;
+ }
+
public ComposableIndexTemplate build() {
return new ComposableIndexTemplate(
this.indexPatterns,
@@ -538,7 +597,8 @@ public ComposableIndexTemplate build() {
this.metadata,
this.dataStreamTemplate,
this.allowAutoCreate,
- this.ignoreMissingComponentTemplates
+ this.ignoreMissingComponentTemplates,
+ this.deprecated
);
}
}
diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetadata.java
index 7a40d7fd774d1..35b7d957bf076 100644
--- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetadata.java
+++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetadata.java
@@ -224,7 +224,7 @@ public String toString() {
}
}
- public static class Builder {
+ public static final class Builder {
private static final Set VALID_FIELDS = Set.of("order", "mappings", "settings", "index_patterns", "aliases", "version");
@@ -248,7 +248,6 @@ public Builder(String name) {
aliases = new HashMap<>();
}
- @SuppressWarnings("this-escape")
public Builder(IndexTemplateMetadata indexTemplateMetadata) {
this.name = indexTemplateMetadata.name();
order(indexTemplateMetadata.order());
diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java
index cf63602729bb4..e0dc1728eab6a 100644
--- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java
+++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java
@@ -28,6 +28,8 @@
import org.elasticsearch.common.ValidationException;
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.logging.DeprecationCategory;
+import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.HeaderWarning;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.IndexScopedSettings;
@@ -50,6 +52,8 @@
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.indices.InvalidIndexTemplateException;
import org.elasticsearch.indices.SystemIndices;
+import org.elasticsearch.ingest.IngestMetadata;
+import org.elasticsearch.ingest.PipelineConfiguration;
import org.elasticsearch.xcontent.NamedXContentRegistry;
import java.io.IOException;
@@ -122,6 +126,7 @@ public class MetadataIndexTemplateService {
}
private static final Logger logger = LogManager.getLogger(MetadataIndexTemplateService.class);
+ private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(MetadataIndexTemplateService.class);
private final ClusterService clusterService;
private final MasterServiceTaskQueue taskQueue;
@@ -304,7 +309,12 @@ public ClusterState addComponentTemplate(
template.template().aliases(),
template.template().lifecycle()
);
- final ComponentTemplate finalComponentTemplate = new ComponentTemplate(finalTemplate, template.version(), template.metadata());
+ final ComponentTemplate finalComponentTemplate = new ComponentTemplate(
+ finalTemplate,
+ template.version(),
+ template.metadata(),
+ template.deprecated()
+ );
if (finalComponentTemplate.equals(existing)) {
return currentState;
@@ -614,7 +624,8 @@ public ClusterState addIndexTemplateV2(
template.metadata(),
template.getDataStreamTemplate(),
template.getAllowAutoCreate(),
- template.getIgnoreMissingComponentTemplates()
+ template.getIgnoreMissingComponentTemplates(),
+ template.deprecated()
);
}
@@ -716,13 +727,21 @@ private void validateIndexTemplateV2(String name, ComposableIndexTemplate indexT
indexTemplate.metadata(),
indexTemplate.getDataStreamTemplate(),
indexTemplate.getAllowAutoCreate(),
- indexTemplate.getIgnoreMissingComponentTemplates()
+ indexTemplate.getIgnoreMissingComponentTemplates(),
+ indexTemplate.deprecated()
);
validate(name, templateToValidate);
validateDataStreamsStillReferenced(currentState, name, templateToValidate);
validateLifecycleIsOnlyAppliedOnDataStreams(currentState.metadata(), name, templateToValidate);
+ if (templateToValidate.isDeprecated() == false) {
+ validateUseOfDeprecatedComponentTemplates(name, templateToValidate, currentState.metadata().componentTemplates());
+ validateUseOfDeprecatedIngestPipelines(name, currentState.metadata().custom(IngestMetadata.TYPE), combinedSettings);
+ // TODO come up with a plan how to validate usage of deprecated ILM policies
+ // we don't have access to the core/main plugin here so we can't use the IndexLifecycleMetadata type
+ }
+
// Finally, right before adding the template, we need to ensure that the composite settings,
// mappings, and aliases are valid after it's been composed with the component templates
try {
@@ -739,6 +758,50 @@ private void validateIndexTemplateV2(String name, ComposableIndexTemplate indexT
}
}
+ private void validateUseOfDeprecatedComponentTemplates(
+ String name,
+ ComposableIndexTemplate template,
+ Map componentTemplates
+ ) {
+ template.composedOf()
+ .stream()
+ .map(ct -> Tuple.tuple(ct, componentTemplates.get(ct)))
+ .filter(ct -> Objects.nonNull(ct.v2()))
+ .filter(ct -> ct.v2().isDeprecated())
+ .forEach(
+ ct -> deprecationLogger.warn(
+ DeprecationCategory.TEMPLATES,
+ "use_of_deprecated_component_template",
+ "index template [{}] uses deprecated component template [{}]",
+ name,
+ ct.v1()
+ )
+ );
+ }
+
+ private void validateUseOfDeprecatedIngestPipelines(String name, IngestMetadata ingestMetadata, Settings combinedSettings) {
+ Map pipelines = Optional.ofNullable(ingestMetadata)
+ .map(IngestMetadata::getPipelines)
+ .orElse(Map.of());
+ emitWarningIfPipelineIsDeprecated(name, pipelines, combinedSettings.get("index.default_pipeline"));
+ emitWarningIfPipelineIsDeprecated(name, pipelines, combinedSettings.get("index.final_pipeline"));
+ }
+
+ private void emitWarningIfPipelineIsDeprecated(String name, Map pipelines, String pipelineName) {
+ Optional.ofNullable(pipelineName)
+ .map(pipelines::get)
+ .filter(p -> Boolean.TRUE.equals(p.getConfigAsMap().get("deprecated")))
+ .ifPresent(
+ p -> deprecationLogger.warn(
+ DeprecationCategory.TEMPLATES,
+ "use_of_deprecated_ingest_pipeline",
+ "index template [{}] uses deprecated ingest pipeline [{}]",
+ name,
+ p.getId()
+ )
+ );
+ }
+
private static void validateLifecycleIsOnlyAppliedOnDataStreams(
Metadata metadata,
String indexTemplateName,
diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/ShardsAvailabilityHealthIndicatorService.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/ShardsAvailabilityHealthIndicatorService.java
index 0c5f547d1cb10..d17f3a297e805 100644
--- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/ShardsAvailabilityHealthIndicatorService.java
+++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/ShardsAvailabilityHealthIndicatorService.java
@@ -60,6 +60,7 @@
import java.util.function.Predicate;
import java.util.stream.Collectors;
import java.util.stream.Stream;
+import java.util.stream.StreamSupport;
import static java.util.stream.Collectors.joining;
import static java.util.stream.Collectors.toMap;
@@ -119,12 +120,38 @@ public String name() {
return NAME;
}
+ /**
+ * Creates a new {@link ShardAllocationStatus} that will be used to track
+ * primary and replica availability, providing the color, diagnosis, and
+ * messages about the available or unavailable shards in the cluster.
+ * @param metadata Metadata for the cluster
+ * @return A new ShardAllocationStatus that has not yet been filled.
+ */
+ ShardAllocationStatus createNewStatus(Metadata metadata) {
+ return new ShardAllocationStatus(metadata);
+ }
+
@Override
public HealthIndicatorResult calculate(boolean verbose, int maxAffectedResourcesCount, HealthInfo healthInfo) {
var state = clusterService.state();
var shutdown = state.getMetadata().custom(NodesShutdownMetadata.TYPE, NodesShutdownMetadata.EMPTY);
- var status = new ShardAllocationStatus(state.getMetadata());
+ var status = createNewStatus(state.getMetadata());
+ updateShardAllocationStatus(status, state, shutdown, verbose);
+ return createIndicator(
+ status.getStatus(),
+ status.getSymptom(),
+ status.getDetails(verbose),
+ status.getImpacts(),
+ status.getDiagnosis(verbose, maxAffectedResourcesCount)
+ );
+ }
+ static void updateShardAllocationStatus(
+ ShardAllocationStatus status,
+ ClusterState state,
+ NodesShutdownMetadata shutdown,
+ boolean verbose
+ ) {
for (IndexRoutingTable indexShardRouting : state.routingTable()) {
for (int i = 0; i < indexShardRouting.size(); i++) {
IndexShardRoutingTable shardRouting = indexShardRouting.shard(i);
@@ -136,13 +163,6 @@ public HealthIndicatorResult calculate(boolean verbose, int maxAffectedResources
}
status.updateSearchableSnapshotsOfAvailableIndices();
- return createIndicator(
- status.getStatus(),
- status.getSymptom(),
- status.getDetails(verbose),
- status.getImpacts(),
- status.getDiagnosis(verbose, maxAffectedResourcesCount)
- );
}
// Impact IDs
@@ -395,22 +415,27 @@ public HealthIndicatorResult calculate(boolean verbose, int maxAffectedResources
)
);
- private class ShardAllocationCounts {
- private int unassigned = 0;
- private int unassigned_new = 0;
- private int unassigned_restarting = 0;
- private int initializing = 0;
- private int started = 0;
- private int relocating = 0;
- private final Set indicesWithUnavailableShards = new HashSet<>();
+ class ShardAllocationCounts {
+ int unassigned = 0;
+ int unassigned_new = 0;
+ int unassigned_restarting = 0;
+ int initializing = 0;
+ int started = 0;
+ int relocating = 0;
+ final Set indicesWithUnavailableShards = new HashSet<>();
+ final Set indicesWithAllShardsUnavailable = new HashSet<>();
// We keep the searchable snapshots separately as long as the original index is still available
// This is checked during the post-processing
- private final SearchableSnapshotsState searchableSnapshotsState = new SearchableSnapshotsState();
- private final Map> diagnosisDefinitions = new HashMap<>();
+ SearchableSnapshotsState searchableSnapshotsState = new SearchableSnapshotsState();
+ final Map> diagnosisDefinitions = new HashMap<>();
public void increment(ShardRouting routing, ClusterState state, NodesShutdownMetadata shutdowns, boolean verbose) {
boolean isNew = isUnassignedDueToNewInitialization(routing, state);
boolean isRestarting = isUnassignedDueToTimelyRestart(routing, shutdowns);
+ boolean allUnavailable = areAllShardsOfThisTypeUnavailable(routing, state);
+ if (allUnavailable) {
+ indicesWithAllShardsUnavailable.add(routing.getIndexName());
+ }
if ((routing.active() || isRestarting || isNew) == false) {
String indexName = routing.getIndexName();
Settings indexSettings = state.getMetadata().index(indexName).getSettings();
@@ -451,11 +476,31 @@ public boolean areAllAvailable() {
return indicesWithUnavailableShards.isEmpty();
}
+ public boolean doAnyIndicesHaveAllUnavailable() {
+ return indicesWithAllShardsUnavailable.isEmpty() == false;
+ }
+
private void addDefinition(Diagnosis.Definition diagnosisDefinition, String indexName) {
diagnosisDefinitions.computeIfAbsent(diagnosisDefinition, (k) -> new HashSet<>()).add(indexName);
}
}
+ /**
+ * Returns true if all the shards of the same type (primary or replica) are unassigned. For
+ * example: if a replica is passed then this will return true if ALL replicas are unassigned,
+ * but if at least one is assigned, it will return false.
+ */
+ private boolean areAllShardsOfThisTypeUnavailable(ShardRouting routing, ClusterState state) {
+ return StreamSupport.stream(
+ state.routingTable().allActiveShardsGrouped(new String[] { routing.getIndexName() }, true).spliterator(),
+ false
+ )
+ .flatMap(shardIter -> shardIter.getShardRoutings().stream())
+ .filter(sr -> sr.shardId().equals(routing.shardId()))
+ .filter(sr -> sr.primary() == routing.primary())
+ .allMatch(ShardRouting::unassigned);
+ }
+
private static boolean isUnassignedDueToTimelyRestart(ShardRouting routing, NodesShutdownMetadata shutdowns) {
var info = routing.unassignedInfo();
if (info == null || info.getReason() != UnassignedInfo.Reason.NODE_RESTARTING) {
@@ -805,9 +850,9 @@ private static Optional checkNotEnoughNodesInDataTier(
}
class ShardAllocationStatus {
- private final ShardAllocationCounts primaries = new ShardAllocationCounts();
- private final ShardAllocationCounts replicas = new ShardAllocationCounts();
- private final Metadata clusterMetadata;
+ final ShardAllocationCounts primaries = new ShardAllocationCounts();
+ final ShardAllocationCounts replicas = new ShardAllocationCounts();
+ final Metadata clusterMetadata;
ShardAllocationStatus(Metadata clusterMetadata) {
this.clusterMetadata = clusterMetadata;
diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ClusterBalanceStats.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ClusterBalanceStats.java
index 853a26263fe9f..5df5de43cffdd 100644
--- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ClusterBalanceStats.java
+++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ClusterBalanceStats.java
@@ -31,15 +31,18 @@
import java.util.Map;
import java.util.function.ToDoubleFunction;
-public record ClusterBalanceStats(Map tiers, Map nodes)
- implements
- Writeable,
- ToXContentObject {
+public record ClusterBalanceStats(
+ int shards,
+ int undesiredShardAllocations,
+ Map tiers,
+ Map nodes
+) implements Writeable, ToXContentObject {
- public static ClusterBalanceStats EMPTY = new ClusterBalanceStats(Map.of(), Map.of());
+ public static ClusterBalanceStats EMPTY = new ClusterBalanceStats(0, 0, Map.of(), Map.of());
public static ClusterBalanceStats createFrom(
ClusterState clusterState,
+ DesiredBalance desiredBalance,
ClusterInfo clusterInfo,
WriteLoadForecaster writeLoadForecaster
) {
@@ -50,32 +53,60 @@ public static ClusterBalanceStats createFrom(
if (dataRoles.isEmpty()) {
continue;
}
- var nodeStats = NodeBalanceStats.createFrom(routingNode, clusterState.metadata(), clusterInfo, writeLoadForecaster);
+ var nodeStats = NodeBalanceStats.createFrom(
+ routingNode,
+ clusterState.metadata(),
+ desiredBalance,
+ clusterInfo,
+ writeLoadForecaster
+ );
nodes.put(routingNode.node().getName(), nodeStats);
for (DiscoveryNodeRole role : dataRoles) {
tierToNodeStats.computeIfAbsent(role.roleName(), ignored -> new ArrayList<>()).add(nodeStats);
}
}
- return new ClusterBalanceStats(Maps.transformValues(tierToNodeStats, TierBalanceStats::createFrom), nodes);
+ return new ClusterBalanceStats(
+ nodes.values().stream().mapToInt(NodeBalanceStats::shards).sum(),
+ nodes.values().stream().mapToInt(NodeBalanceStats::undesiredShardAllocations).sum(),
+ Maps.transformValues(tierToNodeStats, TierBalanceStats::createFrom),
+ nodes
+ );
}
public static ClusterBalanceStats readFrom(StreamInput in) throws IOException {
- return new ClusterBalanceStats(in.readImmutableMap(TierBalanceStats::readFrom), in.readImmutableMap(NodeBalanceStats::readFrom));
+ return new ClusterBalanceStats(
+ in.getTransportVersion().onOrAfter(TransportVersions.UNDESIRED_SHARD_ALLOCATIONS_COUNT_ADDED) ? in.readVInt() : -1,
+ in.getTransportVersion().onOrAfter(TransportVersions.UNDESIRED_SHARD_ALLOCATIONS_COUNT_ADDED) ? in.readVInt() : -1,
+ in.readImmutableMap(TierBalanceStats::readFrom),
+ in.readImmutableMap(NodeBalanceStats::readFrom)
+ );
}
@Override
public void writeTo(StreamOutput out) throws IOException {
+ if (out.getTransportVersion().onOrAfter(TransportVersions.UNDESIRED_SHARD_ALLOCATIONS_COUNT_ADDED)) {
+ out.writeVInt(shards);
+ }
+ if (out.getTransportVersion().onOrAfter(TransportVersions.UNDESIRED_SHARD_ALLOCATIONS_COUNT_ADDED)) {
+ out.writeVInt(undesiredShardAllocations);
+ }
out.writeMap(tiers, StreamOutput::writeWriteable);
out.writeMap(nodes, StreamOutput::writeWriteable);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
- return builder.startObject().field("tiers").map(tiers).field("nodes").map(nodes).endObject();
+ return builder.startObject()
+ .field("shard_count", shards)
+ .field("undesired_shard_allocation_count", undesiredShardAllocations)
+ .field("tiers", tiers)
+ .field("nodes", nodes)
+ .endObject();
}
public record TierBalanceStats(
MetricStats shardCount,
+ MetricStats undesiredShardAllocations,
MetricStats forecastWriteLoad,
MetricStats forecastShardSize,
MetricStats actualShardSize
@@ -84,6 +115,7 @@ public record TierBalanceStats(
private static TierBalanceStats createFrom(List nodes) {
return new TierBalanceStats(
MetricStats.createFrom(nodes, it -> it.shards),
+ MetricStats.createFrom(nodes, it -> it.undesiredShardAllocations),
MetricStats.createFrom(nodes, it -> it.forecastWriteLoad),
MetricStats.createFrom(nodes, it -> it.forecastShardSize),
MetricStats.createFrom(nodes, it -> it.actualShardSize)
@@ -93,6 +125,9 @@ private static TierBalanceStats createFrom(List nodes) {
public static TierBalanceStats readFrom(StreamInput in) throws IOException {
return new TierBalanceStats(
MetricStats.readFrom(in),
+ in.getTransportVersion().onOrAfter(TransportVersions.UNDESIRED_SHARD_ALLOCATIONS_COUNT_ADDED)
+ ? MetricStats.readFrom(in)
+ : new MetricStats(0.0, 0.0, 0.0, 0.0, 0.0),
MetricStats.readFrom(in),
MetricStats.readFrom(in),
MetricStats.readFrom(in)
@@ -102,6 +137,9 @@ public static TierBalanceStats readFrom(StreamInput in) throws IOException {
@Override
public void writeTo(StreamOutput out) throws IOException {
shardCount.writeTo(out);
+ if (out.getTransportVersion().onOrAfter(TransportVersions.UNDESIRED_SHARD_ALLOCATIONS_COUNT_ADDED)) {
+ undesiredShardAllocations.writeTo(out);
+ }
forecastWriteLoad.writeTo(out);
forecastShardSize.writeTo(out);
actualShardSize.writeTo(out);
@@ -111,6 +149,7 @@ public void writeTo(StreamOutput out) throws IOException {
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
return builder.startObject()
.field("shard_count", shardCount)
+ .field("undesired_shard_allocation_count", undesiredShardAllocations)
.field("forecast_write_load", forecastWriteLoad)
.field("forecast_disk_usage", forecastShardSize)
.field("actual_disk_usage", actualShardSize)
@@ -172,6 +211,7 @@ public record NodeBalanceStats(
String nodeId,
List roles,
int shards,
+ int undesiredShardAllocations,
double forecastWriteLoad,
long forecastShardSize,
long actualShardSize
@@ -182,9 +222,11 @@ public record NodeBalanceStats(
private static NodeBalanceStats createFrom(
RoutingNode routingNode,
Metadata metadata,
+ DesiredBalance desiredBalance,
ClusterInfo clusterInfo,
WriteLoadForecaster writeLoadForecaster
) {
+ int undesired = 0;
double forecastWriteLoad = 0.0;
long forecastShardSize = 0L;
long actualShardSize = 0L;
@@ -196,23 +238,37 @@ private static NodeBalanceStats createFrom(
forecastWriteLoad += writeLoadForecaster.getForecastedWriteLoad(indexMetadata).orElse(0.0);
forecastShardSize += indexMetadata.getForecastedShardSizeInBytes().orElse(shardSize);
actualShardSize += shardSize;
+ if (isDesiredShardAllocation(shardRouting, desiredBalance) == false) {
+ undesired++;
+ }
}
return new NodeBalanceStats(
routingNode.nodeId(),
routingNode.node().getRoles().stream().map(DiscoveryNodeRole::roleName).toList(),
routingNode.size(),
+ undesired,
forecastWriteLoad,
forecastShardSize,
actualShardSize
);
}
+ private static boolean isDesiredShardAllocation(ShardRouting shardRouting, DesiredBalance desiredBalance) {
+ if (shardRouting.relocating()) {
+ // relocating out shards are temporarily accepted
+ return true;
+ }
+ var assignment = desiredBalance.getAssignment(shardRouting.shardId());
+ return assignment != null && assignment.nodeIds().contains(shardRouting.currentNodeId());
+ }
+
public static NodeBalanceStats readFrom(StreamInput in) throws IOException {
return new NodeBalanceStats(
in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0) ? in.readString() : UNKNOWN_NODE_ID,
in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0) ? in.readStringCollectionAsList() : List.of(),
in.readInt(),
+ in.getTransportVersion().onOrAfter(TransportVersions.UNDESIRED_SHARD_ALLOCATIONS_COUNT_ADDED) ? in.readVInt() : -1,
in.readDouble(),
in.readLong(),
in.readLong()
@@ -228,6 +284,9 @@ public void writeTo(StreamOutput out) throws IOException {
out.writeStringCollection(roles);
}
out.writeInt(shards);
+ if (out.getTransportVersion().onOrAfter(TransportVersions.UNDESIRED_SHARD_ALLOCATIONS_COUNT_ADDED)) {
+ out.writeVInt(undesiredShardAllocations);
+ }
out.writeDouble(forecastWriteLoad);
out.writeLong(forecastShardSize);
out.writeLong(actualShardSize);
@@ -241,6 +300,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws
}
return builder.field("roles", roles)
.field("shard_count", shards)
+ .field("undesired_shard_allocation_count", undesiredShardAllocations)
.field("forecast_write_load", forecastWriteLoad)
.humanReadableField("forecast_disk_usage_bytes", "forecast_disk_usage", ByteSizeValue.ofBytes(forecastShardSize))
.humanReadableField("actual_disk_usage_bytes", "actual_disk_usage", ByteSizeValue.ofBytes(actualShardSize))
diff --git a/server/src/main/java/org/elasticsearch/common/geo/GeoPoint.java b/server/src/main/java/org/elasticsearch/common/geo/GeoPoint.java
index e6ed24dc7220a..74de0b2e03e60 100644
--- a/server/src/main/java/org/elasticsearch/common/geo/GeoPoint.java
+++ b/server/src/main/java/org/elasticsearch/common/geo/GeoPoint.java
@@ -25,10 +25,10 @@
import java.io.IOException;
import java.util.Locale;
-public class GeoPoint implements SpatialPoint, ToXContentFragment {
+public final class GeoPoint implements SpatialPoint, ToXContentFragment {
- protected double lat;
- protected double lon;
+ private double lat;
+ private double lon;
public GeoPoint() {}
@@ -38,7 +38,6 @@ public GeoPoint() {}
*
* @param value String to create the point from
*/
- @SuppressWarnings("this-escape")
public GeoPoint(String value) {
this.resetFromString(value);
}
diff --git a/server/src/main/java/org/elasticsearch/common/inject/CreationException.java b/server/src/main/java/org/elasticsearch/common/inject/CreationException.java
index f09248de947e9..78f89e95e5ff7 100644
--- a/server/src/main/java/org/elasticsearch/common/inject/CreationException.java
+++ b/server/src/main/java/org/elasticsearch/common/inject/CreationException.java
@@ -27,13 +27,12 @@
*
* @author crazybob@google.com (Bob Lee)
*/
-public class CreationException extends RuntimeException {
+public final class CreationException extends RuntimeException {
private final Collection messages;
/**
* Creates a CreationException containing {@code messages}.
*/
- @SuppressWarnings("this-escape")
public CreationException(Collection messages) {
this.messages = messages;
if (this.messages.isEmpty()) {
diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/ByteArrayStreamInput.java b/server/src/main/java/org/elasticsearch/common/io/stream/ByteArrayStreamInput.java
index c7e9a4abf2c57..478ae231e16ff 100644
--- a/server/src/main/java/org/elasticsearch/common/io/stream/ByteArrayStreamInput.java
+++ b/server/src/main/java/org/elasticsearch/common/io/stream/ByteArrayStreamInput.java
@@ -17,18 +17,16 @@
* Resettable {@link StreamInput} that wraps a byte array. It is heavily inspired in Lucene's
* {@link org.apache.lucene.store.ByteArrayDataInput}.
*/
-public class ByteArrayStreamInput extends StreamInput {
+public final class ByteArrayStreamInput extends StreamInput {
private byte[] bytes;
private int pos;
private int limit;
- @SuppressWarnings("this-escape")
public ByteArrayStreamInput() {
reset(BytesRef.EMPTY_BYTES);
}
- @SuppressWarnings("this-escape")
public ByteArrayStreamInput(byte[] bytes) {
reset(bytes);
}
diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/VersionCheckingStreamOutput.java b/server/src/main/java/org/elasticsearch/common/io/stream/VersionCheckingStreamOutput.java
index 6a02bedcdf086..42fb7f4a6afe4 100644
--- a/server/src/main/java/org/elasticsearch/common/io/stream/VersionCheckingStreamOutput.java
+++ b/server/src/main/java/org/elasticsearch/common/io/stream/VersionCheckingStreamOutput.java
@@ -17,9 +17,8 @@
* This {@link StreamOutput} writes nowhere. It can be used to check if serialization would
* be successful writing to a specific version.
*/
-public class VersionCheckingStreamOutput extends StreamOutput {
+public final class VersionCheckingStreamOutput extends StreamOutput {
- @SuppressWarnings("this-escape")
public VersionCheckingStreamOutput(TransportVersion version) {
setTransportVersion(version);
}
diff --git a/server/src/main/java/org/elasticsearch/common/logging/ECSJsonLayout.java b/server/src/main/java/org/elasticsearch/common/logging/ECSJsonLayout.java
index 54b5749b797f7..93ca7a9615be6 100644
--- a/server/src/main/java/org/elasticsearch/common/logging/ECSJsonLayout.java
+++ b/server/src/main/java/org/elasticsearch/common/logging/ECSJsonLayout.java
@@ -32,14 +32,13 @@ public static ECSJsonLayout.Builder newBuilder() {
return new ECSJsonLayout.Builder().asBuilder();
}
- public static class Builder extends AbstractStringLayout.Builder
+ public static final class Builder extends AbstractStringLayout.Builder
implements
org.apache.logging.log4j.core.util.Builder {
@PluginAttribute("dataset")
String dataset;
- @SuppressWarnings("this-escape")
public Builder() {
setCharset(StandardCharsets.UTF_8);
}
diff --git a/server/src/main/java/org/elasticsearch/common/logging/ESJsonLayout.java b/server/src/main/java/org/elasticsearch/common/logging/ESJsonLayout.java
index fb7475e3cba53..a5272b8074d79 100644
--- a/server/src/main/java/org/elasticsearch/common/logging/ESJsonLayout.java
+++ b/server/src/main/java/org/elasticsearch/common/logging/ESJsonLayout.java
@@ -147,7 +147,7 @@ PatternLayout getPatternLayout() {
return patternLayout;
}
- public static class Builder> extends AbstractStringLayout.Builder
+ public static final class Builder> extends AbstractStringLayout.Builder
implements
org.apache.logging.log4j.core.util.Builder {
@@ -163,7 +163,6 @@ public static class Builder> extends AbstractS
@PluginConfiguration
private Configuration config;
- @SuppressWarnings("this-escape")
public Builder() {
setCharset(StandardCharsets.UTF_8);
}
diff --git a/server/src/main/java/org/elasticsearch/common/metrics/Counters.java b/server/src/main/java/org/elasticsearch/common/metrics/Counters.java
index 665ed371955c6..9606fc7687595 100644
--- a/server/src/main/java/org/elasticsearch/common/metrics/Counters.java
+++ b/server/src/main/java/org/elasticsearch/common/metrics/Counters.java
@@ -28,11 +28,10 @@
* that will not have conflicts, which means that there no counter will have a label which is a substring of the label of another counter.
* For example, the counters `foo: 1` and `foo.bar: 3` cannot co-exist in a nested map.
*/
-public class Counters implements Writeable {
+public final class Counters implements Writeable {
private final ConcurrentMap counters = new ConcurrentHashMap<>();
- @SuppressWarnings("this-escape")
public Counters(StreamInput in) throws IOException {
int numCounters = in.readVInt();
for (int i = 0; i < numCounters; i++) {
diff --git a/server/src/main/java/org/elasticsearch/common/settings/LocallyMountedSecrets.java b/server/src/main/java/org/elasticsearch/common/settings/LocallyMountedSecrets.java
index 1ac3db3827eb4..b3639079cc920 100644
--- a/server/src/main/java/org/elasticsearch/common/settings/LocallyMountedSecrets.java
+++ b/server/src/main/java/org/elasticsearch/common/settings/LocallyMountedSecrets.java
@@ -65,7 +65,7 @@
* }
* }
*/
-public class LocallyMountedSecrets implements SecureSettings {
+public final class LocallyMountedSecrets implements SecureSettings {
public static final String SECRETS_FILE_NAME = "secrets.json";
public static final String SECRETS_DIRECTORY = "secrets";
@@ -116,7 +116,6 @@ public class LocallyMountedSecrets implements SecureSettings {
/**
* Direct constructor to be used by the CLI
*/
- @SuppressWarnings("this-escape")
public LocallyMountedSecrets(Environment environment) {
var secretsDirPath = resolveSecretsDir(environment);
var secretsFilePath = resolveSecretsFile(environment);
diff --git a/server/src/main/java/org/elasticsearch/common/util/BytesRefArray.java b/server/src/main/java/org/elasticsearch/common/util/BytesRefArray.java
index 91dbfc30123fe..c78db448380b3 100644
--- a/server/src/main/java/org/elasticsearch/common/util/BytesRefArray.java
+++ b/server/src/main/java/org/elasticsearch/common/util/BytesRefArray.java
@@ -22,7 +22,7 @@
/**
* Compact serializable container for ByteRefs
*/
-public class BytesRefArray implements Accountable, Releasable, Writeable {
+public final class BytesRefArray implements Accountable, Releasable, Writeable {
// base size of the bytes ref array
private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(BytesRefArray.class);
@@ -32,7 +32,6 @@ public class BytesRefArray implements Accountable, Releasable, Writeable {
private ByteArray bytes;
private long size;
- @SuppressWarnings("this-escape")
public BytesRefArray(long capacity, BigArrays bigArrays) {
this.bigArrays = bigArrays;
boolean success = false;
@@ -49,7 +48,6 @@ public BytesRefArray(long capacity, BigArrays bigArrays) {
size = 0;
}
- @SuppressWarnings("this-escape")
public BytesRefArray(StreamInput in, BigArrays bigArrays) throws IOException {
this.bigArrays = bigArrays;
// we allocate big arrays so we have to `close` if we fail here or we'll leak them.
diff --git a/server/src/main/java/org/elasticsearch/common/util/FeatureFlag.java b/server/src/main/java/org/elasticsearch/common/util/FeatureFlag.java
index 5bbf7aff906c0..1be14026c20c8 100644
--- a/server/src/main/java/org/elasticsearch/common/util/FeatureFlag.java
+++ b/server/src/main/java/org/elasticsearch/common/util/FeatureFlag.java
@@ -36,7 +36,7 @@
*/
public class FeatureFlag {
- private final Logger logger = LogManager.getLogger(FeatureFlag.class);
+ private static final Logger logger = LogManager.getLogger(FeatureFlag.class);
private final String name;
private final boolean enabled;
diff --git a/server/src/main/java/org/elasticsearch/common/util/LongObjectPagedHashMap.java b/server/src/main/java/org/elasticsearch/common/util/LongObjectPagedHashMap.java
index f54500a806cca..860060ca5a346 100644
--- a/server/src/main/java/org/elasticsearch/common/util/LongObjectPagedHashMap.java
+++ b/server/src/main/java/org/elasticsearch/common/util/LongObjectPagedHashMap.java
@@ -17,7 +17,7 @@
* A hash table from native longs to objects. This implementation resolves collisions
* using open-addressing and does not support null values. This class is not thread-safe.
*/
-public class LongObjectPagedHashMap extends AbstractPagedHashMap implements Iterable