The default implementation returns the result of calling
- * {@link #visitChildren} on {@code ctx}.
- */
- @Override public T visitMatchQuery(EsqlBaseParser.MatchQueryContext ctx) { return visitChildren(ctx); }
}
diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserListener.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserListener.java
index 0c39b3ea83fa9..4348c641d9f69 100644
--- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserListener.java
+++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserListener.java
@@ -913,24 +913,4 @@ public interface EsqlBaseParserListener extends ParseTreeListener {
* @param ctx the parse tree
*/
void exitInlinestatsCommand(EsqlBaseParser.InlinestatsCommandContext ctx);
- /**
- * Enter a parse tree produced by {@link EsqlBaseParser#matchCommand}.
- * @param ctx the parse tree
- */
- void enterMatchCommand(EsqlBaseParser.MatchCommandContext ctx);
- /**
- * Exit a parse tree produced by {@link EsqlBaseParser#matchCommand}.
- * @param ctx the parse tree
- */
- void exitMatchCommand(EsqlBaseParser.MatchCommandContext ctx);
- /**
- * Enter a parse tree produced by {@link EsqlBaseParser#matchQuery}.
- * @param ctx the parse tree
- */
- void enterMatchQuery(EsqlBaseParser.MatchQueryContext ctx);
- /**
- * Exit a parse tree produced by {@link EsqlBaseParser#matchQuery}.
- * @param ctx the parse tree
- */
- void exitMatchQuery(EsqlBaseParser.MatchQueryContext ctx);
}
diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserVisitor.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserVisitor.java
index 31c9371b9f806..c334526abfe39 100644
--- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserVisitor.java
+++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserVisitor.java
@@ -550,16 +550,4 @@ public interface EsqlBaseParserVisitor extends ParseTreeVisitor {
* @return the visitor result
*/
T visitInlinestatsCommand(EsqlBaseParser.InlinestatsCommandContext ctx);
- /**
- * Visit a parse tree produced by {@link EsqlBaseParser#matchCommand}.
- * @param ctx the parse tree
- * @return the visitor result
- */
- T visitMatchCommand(EsqlBaseParser.MatchCommandContext ctx);
- /**
- * Visit a parse tree produced by {@link EsqlBaseParser#matchQuery}.
- * @param ctx the parse tree
- * @return the visitor result
- */
- T visitMatchQuery(EsqlBaseParser.MatchQueryContext ctx);
}
diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java
index cc6273d4de292..8dc07e2e1017f 100644
--- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java
+++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java
@@ -27,7 +27,6 @@
import org.elasticsearch.xpack.esql.core.expression.NamedExpression;
import org.elasticsearch.xpack.esql.core.expression.UnresolvedAttribute;
import org.elasticsearch.xpack.esql.core.expression.UnresolvedStar;
-import org.elasticsearch.xpack.esql.core.expression.predicate.fulltext.StringQueryPredicate;
import org.elasticsearch.xpack.esql.core.tree.Source;
import org.elasticsearch.xpack.esql.core.type.DataType;
import org.elasticsearch.xpack.esql.core.util.Holder;
@@ -354,23 +353,6 @@ public PlanFactory visitWhereCommand(EsqlBaseParser.WhereCommandContext ctx) {
return input -> new Filter(source(ctx), input, expression);
}
- @Override
- public PlanFactory visitMatchCommand(EsqlBaseParser.MatchCommandContext ctx) {
- if (Build.current().isSnapshot() == false) {
- throw new ParsingException(source(ctx), "MATCH command currently requires a snapshot build");
- }
-
- StringQueryPredicate stringQueryPredicate = visitMatchQuery(ctx.matchQuery());
- return input -> new Filter(source(ctx), input, stringQueryPredicate);
- }
-
- @Override
- public StringQueryPredicate visitMatchQuery(EsqlBaseParser.MatchQueryContext ctx) {
- Source source = source(ctx);
- String queryString = unquote(ctx.QUOTED_STRING().getText());
- return new StringQueryPredicate(source, queryString, null);
- }
-
@Override
public PlanFactory visitLimitCommand(EsqlBaseParser.LimitCommandContext ctx) {
Source source = source(ctx);
diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java
index 3e8d1e4e71562..a0719286a4009 100644
--- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java
+++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java
@@ -247,10 +247,6 @@ public final void test() throws Throwable {
"multiple indices aren't supported",
testCase.requiredCapabilities.contains(EsqlCapabilities.Cap.UNION_TYPES.capabilityName())
);
- assumeFalse(
- "can't use match command in csv tests",
- testCase.requiredCapabilities.contains(EsqlCapabilities.Cap.MATCH_COMMAND.capabilityName())
- );
assumeFalse(
"can't use QSTR function in csv tests",
testCase.requiredCapabilities.contains(EsqlCapabilities.Cap.QSTR_FUNCTION.capabilityName())
diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java
index 0b83b76992546..2012e319510af 100644
--- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java
+++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java
@@ -18,7 +18,6 @@
import org.elasticsearch.xpack.esql.index.EsIndex;
import org.elasticsearch.xpack.esql.index.IndexResolution;
import org.elasticsearch.xpack.esql.parser.EsqlParser;
-import org.elasticsearch.xpack.esql.parser.ParsingException;
import org.elasticsearch.xpack.esql.parser.QueryParam;
import org.elasticsearch.xpack.esql.parser.QueryParams;
@@ -1077,36 +1076,6 @@ public void testMatchFilter() throws Exception {
);
}
- public void testMatchCommand() {
- assertMatchCommand("1:24:", "LIMIT", "from test | limit 10 | match \"Anna\"");
- assertMatchCommand("1:13:", "SHOW", "show info | match \"8.16.0\"");
- assertMatchCommand("1:17:", "ROW", "row a= \"Anna\" | match \"Anna\"");
- assertMatchCommand("1:26:", "EVAL", "from test | eval z = 2 | match \"Anna\"");
- assertMatchCommand("1:43:", "DISSECT", "from test | dissect first_name \"%{foo}\" | match \"Connection\"");
- assertMatchCommand("1:27:", "DROP", "from test | drop emp_no | match \"Anna\"");
- assertMatchCommand("1:35:", "EVAL", "from test | eval n = emp_no * 3 | match \"Anna\"");
- assertMatchCommand("1:44:", "GROK", "from test | grok last_name \"%{WORD:foo}\" | match \"Anna\"");
- assertMatchCommand("1:27:", "KEEP", "from test | keep emp_no | match \"Anna\"");
-
- // TODO Keep adding tests for all unsupported commands
- }
-
- private void assertMatchCommand(String lineAndColumn, String command, String query) {
- String message;
- Class extends Exception> exception;
- var isSnapshot = Build.current().isSnapshot();
- if (isSnapshot) {
- message = " MATCH cannot be used after ";
- exception = VerificationException.class;
- } else {
- message = " mismatched input 'match' expecting ";
- exception = ParsingException.class;
- }
-
- var expectedErrorMessage = lineAndColumn + message + (isSnapshot ? command : "");
- assertThat(error(query, defaultAnalyzer, exception), containsString(expectedErrorMessage));
- }
-
public void testQueryStringFunctionsNotAllowedAfterCommands() throws Exception {
assumeTrue("skipping because QSTR is not enabled", EsqlCapabilities.Cap.QSTR_FUNCTION.isEnabled());
diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java
index 2ed0093945837..c2779b7dbc46d 100644
--- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java
+++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java
@@ -609,101 +609,6 @@ public void testQueryStringFunctionMultipleQstrClauses() {
assertThat(query.query().toString(), is(expected.toString()));
}
- /**
- * Expecting
- * LimitExec[1000[INTEGER]]
- * \_ExchangeExec[[],false]
- * \_ProjectExec[[_meta_field{f}#8, emp_no{f}#2, first_name{f}#3, gender{f}#4, job{f}#9, job.raw{f}#10, languages{f}#5, last_na
- * me{f}#6, long_noidx{f}#11, salary{f}#7]]
- * \_FieldExtractExec[_meta_field{f}#8, emp_no{f}#2, first_name{f}#3]
- * \_EsQueryExec[test], indexMode[standard], query[{"query_string":{"query":"\"last_name: Smith\""
- */
- public void testMatchCommand() {
- assumeTrue("skipping because MATCH_COMMAND is not enabled", EsqlCapabilities.Cap.MATCH_COMMAND.isEnabled());
- var plan = plannerOptimizer.plan("""
- from test
- | match "last_name: Smith"
- """, IS_SV_STATS);
-
- var limit = as(plan, LimitExec.class);
- var exchange = as(limit.child(), ExchangeExec.class);
- var project = as(exchange.child(), ProjectExec.class);
- var field = as(project.child(), FieldExtractExec.class);
- var query = as(field.child(), EsQueryExec.class);
- assertThat(query.limit().fold(), is(1000));
- var expected = QueryBuilders.queryStringQuery("last_name: Smith");
- assertThat(query.query().toString(), is(expected.toString()));
- }
-
- /**
- * LimitExec[1000[INTEGER]]
- * \_ExchangeExec[[],false]
- * \_ProjectExec[[_meta_field{f}#9, emp_no{f}#3, first_name{f}#4, gender{f}#5, job{f}#10, job.raw{f}#11, languages{f}#6, last_n
- * ame{f}#7, long_noidx{f}#12, salary{f}#8]]
- * \_FieldExtractExec[_meta_field{f}#9, emp_no{f}#3, first_name{f}#4, gen]
- * \_EsQueryExec[test], indexMode[standard],
- * query[{"bool":{ "must":[{
- * "esql_single_value":{"field":"emp_no","next":{"range":{"emp_no":{"gt":10010,"boost":1.0}}}}},
- * {"query_string":{"query":"last_name: Smith","fields":[]}}],"boost":1.0}
- * }]
- */
- public void testMatchCommandWithWhereClause() {
- assumeTrue("skipping because MATCH_COMMAND is not enabled", EsqlCapabilities.Cap.MATCH_COMMAND.isEnabled());
- String queryText = """
- from test
- | where emp_no > 10010
- | match "last_name: Smith"
- """;
- var plan = plannerOptimizer.plan(queryText, IS_SV_STATS);
-
- var limit = as(plan, LimitExec.class);
- var exchange = as(limit.child(), ExchangeExec.class);
- var project = as(exchange.child(), ProjectExec.class);
- var field = as(project.child(), FieldExtractExec.class);
- var query = as(field.child(), EsQueryExec.class);
- assertThat(query.limit().fold(), is(1000));
-
- Source source = new Source(2, 8, "emp_no > 10010");
- var range = wrapWithSingleQuery(queryText, QueryBuilders.rangeQuery("emp_no").gt(10010), "emp_no", source);
- var queryString = QueryBuilders.queryStringQuery("last_name: Smith");
- var expected = QueryBuilders.boolQuery().must(range).must(queryString);
- assertThat(query.query().toString(), is(expected.toString()));
- }
-
- /**
- * TopNExec[[Order[emp_no{f}#3,ASC,LAST]],1000[INTEGER],0]
- * \_ExchangeExec[[],false]
- * \_ProjectExec[[_meta_field{f}#9, emp_no{f}#3, first_name{f}#4, gender{f}#5, job{f}#10, job.raw{f}#11, languages{f}#6, last_n
- * ame{f}#7, long_noidx{f}#12, salary{f}#8]]
- * \_FieldExtractExec[_meta_field{f}#9, emp_no{f}#3, first_name{f}#4, gen]
- * \_EsQueryExec[test],
- * query[{"bool":{"must":[{"query_string":{"query":"last_name: Smith","fields":[]}},
- * {"query_string":{"query":"John","fields":[]}}],"boost":1.0}}]
- * sort[[FieldSort[field=emp_no{f}#3, direction=ASC, nulls=LAST]]]
- */
- public void testMatchCommandWithMultipleMatches() {
- assumeTrue("skipping because MATCH_COMMAND is not enabled", EsqlCapabilities.Cap.MATCH_COMMAND.isEnabled());
- var plan = plannerOptimizer.plan("""
- from test
- | match "last_name: Smith"
- | sort emp_no
- | MATCH "John"
- """, IS_SV_STATS);
-
- var limit = as(plan, TopNExec.class);
- var exchange = as(limit.child(), ExchangeExec.class);
- var project = as(exchange.child(), ProjectExec.class);
- var field = as(project.child(), FieldExtractExec.class);
- var query = as(field.child(), EsQueryExec.class);
- assertThat(query.limit().fold(), is(1000));
-
- Source source = new Source(2, 8, "emp_no > 10010");
- var queryString1 = QueryBuilders.queryStringQuery("last_name: Smith");
- var queryString2 = QueryBuilders.queryStringQuery("John");
- var expected = QueryBuilders.boolQuery().must(queryString1).must(queryString2);
- assertThat(query.query().toString(), is(expected.toString()));
- }
-
// optimizer doesn't know yet how to break down different multi count
public void testCountFieldsAndAllWithFilter() {
var plan = plannerOptimizer.plan("""
diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java
index 3ee7509ea1530..c5a5bfac023c1 100644
--- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java
+++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java
@@ -19,7 +19,6 @@
import org.elasticsearch.xpack.esql.core.expression.MetadataAttribute;
import org.elasticsearch.xpack.esql.core.expression.NamedExpression;
import org.elasticsearch.xpack.esql.core.expression.UnresolvedAttribute;
-import org.elasticsearch.xpack.esql.core.expression.predicate.fulltext.StringQueryPredicate;
import org.elasticsearch.xpack.esql.core.expression.predicate.logical.Not;
import org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison.BinaryComparison;
import org.elasticsearch.xpack.esql.core.type.DataType;
@@ -51,7 +50,6 @@
import org.elasticsearch.xpack.esql.plan.logical.Row;
import org.elasticsearch.xpack.esql.plan.logical.UnresolvedRelation;
-import java.io.IOException;
import java.util.List;
import java.util.Map;
import java.util.function.Function;
@@ -993,17 +991,6 @@ public void testInputParams() {
assertThat(alias.child().fold(), is(11));
}
- public void testMatchCommand() throws IOException {
- assumeTrue("Match command available just for snapshots", Build.current().isSnapshot());
- String queryString = "field: value";
- assertEquals(
- new Filter(EMPTY, PROCESSING_CMD_INPUT, new StringQueryPredicate(EMPTY, queryString, null)),
- processingCommand("match \"" + queryString + "\"")
- );
-
- expectError("from a | match an unquoted string", "mismatched input 'an' expecting QUOTED_STRING");
- }
-
public void testMissingInputParams() {
expectError("row x = ?, y = ?", List.of(new QueryParam(null, 1, INTEGER)), "Not enough actual parameters 1");
}
From 0638d3977a826f421325b2fdef9c637bdc59d4df Mon Sep 17 00:00:00 2001
From: Smriti <152067238+smriti0321@users.noreply.github.com>
Date: Wed, 25 Sep 2024 13:55:17 +0200
Subject: [PATCH 16/30] Update index-templates.asciidoc (#113461)
Adding `security_solution-*-*` in list of index nae to avoid the pattern collisions.
---
docs/reference/indices/index-templates.asciidoc | 1 +
1 file changed, 1 insertion(+)
diff --git a/docs/reference/indices/index-templates.asciidoc b/docs/reference/indices/index-templates.asciidoc
index 66911716ffee2..5b152ecf177ec 100644
--- a/docs/reference/indices/index-templates.asciidoc
+++ b/docs/reference/indices/index-templates.asciidoc
@@ -44,6 +44,7 @@ following index patterns:
- `metrics-*-*`
- `synthetics-*-*`
- `profiling-*`
+- `security_solution-*-*`
// end::built-in-index-template-patterns[]
{fleet-guide}/fleet-overview.html[{agent}] uses these templates to create
From 19b3d5f1545e6c666f67561b50a3247fc9d13b25 Mon Sep 17 00:00:00 2001
From: elasticsearchmachine
<58790826+elasticsearchmachine@users.noreply.github.com>
Date: Wed, 25 Sep 2024 22:17:34 +1000
Subject: [PATCH 17/30] Mute org.elasticsearch.xpack.ml.integration.MlJobIT
testCreateJobsWithIndexNameOption #113528
---
muted-tests.yml | 3 +++
1 file changed, 3 insertions(+)
diff --git a/muted-tests.yml b/muted-tests.yml
index 13c5edc1440bc..0b64a4f55d5b8 100644
--- a/muted-tests.yml
+++ b/muted-tests.yml
@@ -287,6 +287,9 @@ tests:
- class: org.elasticsearch.xpack.esql.qa.mixed.EsqlClientYamlIT
method: test {p0=esql/60_usage/Basic ESQL usage output (telemetry)}
issue: https://github.com/elastic/elasticsearch/issues/113502
+- class: org.elasticsearch.xpack.ml.integration.MlJobIT
+ method: testCreateJobsWithIndexNameOption
+ issue: https://github.com/elastic/elasticsearch/issues/113528
# Examples:
#
From ab09759f384102fa5eb0c7e7ebcfbee4ff48d5db Mon Sep 17 00:00:00 2001
From: Armin Braun
Date: Wed, 25 Sep 2024 14:25:48 +0200
Subject: [PATCH 18/30] Turn SortFieldAndFormat into a record (#113398)
It's in the title, prerequisite for https://github.com/elastic/elasticsearch/pull/113164
---
.../search/sort/SortBuilder.java | 4 +-
.../search/sort/SortFieldAndFormat.java | 5 +-
.../search/nested/NestedSortingTests.java | 2 +-
.../search/sort/AbstractSortTestCase.java | 2 +-
.../search/sort/FieldSortBuilderTests.java | 52 +++++++++----------
.../sort/GeoDistanceSortBuilderTests.java | 38 +++++++-------
.../search/sort/ScriptSortBuilderTests.java | 18 +++----
.../mapper/WildcardFieldMapperTests.java | 4 +-
8 files changed, 61 insertions(+), 64 deletions(-)
diff --git a/server/src/main/java/org/elasticsearch/search/sort/SortBuilder.java b/server/src/main/java/org/elasticsearch/search/sort/SortBuilder.java
index 5a82211918356..9f943e63ef1e6 100644
--- a/server/src/main/java/org/elasticsearch/search/sort/SortBuilder.java
+++ b/server/src/main/java/org/elasticsearch/search/sort/SortBuilder.java
@@ -159,8 +159,8 @@ public static Optional buildSort(List> sortBuilde
List sortFormats = new ArrayList<>(sortBuilders.size());
for (SortBuilder> builder : sortBuilders) {
SortFieldAndFormat sf = builder.build(context);
- sortFields.add(sf.field);
- sortFormats.add(sf.format);
+ sortFields.add(sf.field());
+ sortFormats.add(sf.format());
}
if (sortFields.isEmpty() == false) {
// optimize if we just sort on score non reversed, we don't really
diff --git a/server/src/main/java/org/elasticsearch/search/sort/SortFieldAndFormat.java b/server/src/main/java/org/elasticsearch/search/sort/SortFieldAndFormat.java
index 695895950e8fc..8c152ad1de2f3 100644
--- a/server/src/main/java/org/elasticsearch/search/sort/SortFieldAndFormat.java
+++ b/server/src/main/java/org/elasticsearch/search/sort/SortFieldAndFormat.java
@@ -13,10 +13,7 @@
import java.util.Objects;
-public final class SortFieldAndFormat {
-
- public final SortField field;
- public final DocValueFormat format;
+public record SortFieldAndFormat(SortField field, DocValueFormat format) {
public SortFieldAndFormat(SortField field, DocValueFormat format) {
this.field = Objects.requireNonNull(field);
diff --git a/server/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java b/server/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java
index 1b4a093dc70e4..e088e8569bf8a 100644
--- a/server/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java
+++ b/server/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java
@@ -820,7 +820,7 @@ private static TopFieldDocs search(
Query query = new BooleanQuery.Builder().add(queryBuilder.toQuery(searchExecutionContext), Occur.MUST)
.add(Queries.newNonNestedFilter(searchExecutionContext.indexVersionCreated()), Occur.FILTER)
.build();
- Sort sort = new Sort(sortBuilder.build(searchExecutionContext).field);
+ Sort sort = new Sort(sortBuilder.build(searchExecutionContext).field());
return searcher.search(query, 10, sort);
}
diff --git a/server/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java b/server/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java
index 583cdf302ad65..dc1f12d6cf657 100644
--- a/server/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java
+++ b/server/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java
@@ -152,7 +152,7 @@ public void testBuildSortField() throws IOException {
for (int runs = 0; runs < NUMBER_OF_TESTBUILDERS; runs++) {
T sortBuilder = createTestItem();
SortFieldAndFormat sortField = Rewriteable.rewrite(sortBuilder, mockShardContext).build(mockShardContext);
- sortFieldAssertions(sortBuilder, sortField.field, sortField.format);
+ sortFieldAssertions(sortBuilder, sortField.field(), sortField.format());
}
}
diff --git a/server/src/test/java/org/elasticsearch/search/sort/FieldSortBuilderTests.java b/server/src/test/java/org/elasticsearch/search/sort/FieldSortBuilderTests.java
index 433a1d8eaf2f4..5f08a3f1143e0 100644
--- a/server/src/test/java/org/elasticsearch/search/sort/FieldSortBuilderTests.java
+++ b/server/src/test/java/org/elasticsearch/search/sort/FieldSortBuilderTests.java
@@ -150,26 +150,26 @@ protected void sortFieldAssertions(FieldSortBuilder builder, SortField sortField
public void testBuildSortFieldMissingValue() throws IOException {
SearchExecutionContext searchExecutionContext = createMockSearchExecutionContext();
FieldSortBuilder fieldSortBuilder = new FieldSortBuilder("value").missing("_first");
- SortField sortField = fieldSortBuilder.build(searchExecutionContext).field;
+ SortField sortField = fieldSortBuilder.build(searchExecutionContext).field();
SortedNumericSortField expectedSortField = new SortedNumericSortField("value", SortField.Type.DOUBLE);
expectedSortField.setMissingValue(Double.NEGATIVE_INFINITY);
assertEquals(expectedSortField, sortField);
fieldSortBuilder = new FieldSortBuilder("value").missing("_last");
- sortField = fieldSortBuilder.build(searchExecutionContext).field;
+ sortField = fieldSortBuilder.build(searchExecutionContext).field();
expectedSortField = new SortedNumericSortField("value", SortField.Type.DOUBLE);
expectedSortField.setMissingValue(Double.POSITIVE_INFINITY);
assertEquals(expectedSortField, sortField);
Double randomDouble = randomDouble();
fieldSortBuilder = new FieldSortBuilder("value").missing(randomDouble);
- sortField = fieldSortBuilder.build(searchExecutionContext).field;
+ sortField = fieldSortBuilder.build(searchExecutionContext).field();
expectedSortField = new SortedNumericSortField("value", SortField.Type.DOUBLE);
expectedSortField.setMissingValue(randomDouble);
assertEquals(expectedSortField, sortField);
fieldSortBuilder = new FieldSortBuilder("value").missing(randomDouble.toString());
- sortField = fieldSortBuilder.build(searchExecutionContext).field;
+ sortField = fieldSortBuilder.build(searchExecutionContext).field();
expectedSortField = new SortedNumericSortField("value", SortField.Type.DOUBLE);
expectedSortField.setMissingValue(randomDouble);
assertEquals(expectedSortField, sortField);
@@ -181,19 +181,19 @@ public void testBuildSortFieldMissingValue() throws IOException {
public void testBuildSortFieldOrder() throws IOException {
SearchExecutionContext searchExecutionContext = createMockSearchExecutionContext();
FieldSortBuilder fieldSortBuilder = new FieldSortBuilder("value");
- SortField sortField = fieldSortBuilder.build(searchExecutionContext).field;
+ SortField sortField = fieldSortBuilder.build(searchExecutionContext).field();
SortedNumericSortField expectedSortField = new SortedNumericSortField("value", SortField.Type.DOUBLE, false);
expectedSortField.setMissingValue(Double.POSITIVE_INFINITY);
assertEquals(expectedSortField, sortField);
fieldSortBuilder = new FieldSortBuilder("value").order(SortOrder.ASC);
- sortField = fieldSortBuilder.build(searchExecutionContext).field;
+ sortField = fieldSortBuilder.build(searchExecutionContext).field();
expectedSortField = new SortedNumericSortField("value", SortField.Type.DOUBLE, false);
expectedSortField.setMissingValue(Double.POSITIVE_INFINITY);
assertEquals(expectedSortField, sortField);
fieldSortBuilder = new FieldSortBuilder("value").order(SortOrder.DESC);
- sortField = fieldSortBuilder.build(searchExecutionContext).field;
+ sortField = fieldSortBuilder.build(searchExecutionContext).field();
expectedSortField = new SortedNumericSortField("value", SortField.Type.DOUBLE, true, SortedNumericSelector.Type.MAX);
expectedSortField.setMissingValue(Double.NEGATIVE_INFINITY);
assertEquals(expectedSortField, sortField);
@@ -206,44 +206,44 @@ public void testMultiValueMode() throws IOException {
SearchExecutionContext searchExecutionContext = createMockSearchExecutionContext();
FieldSortBuilder sortBuilder = new FieldSortBuilder("value").sortMode(SortMode.MIN);
- SortField sortField = sortBuilder.build(searchExecutionContext).field;
+ SortField sortField = sortBuilder.build(searchExecutionContext).field();
assertThat(sortField, instanceOf(SortedNumericSortField.class));
SortedNumericSortField numericSortField = (SortedNumericSortField) sortField;
assertEquals(SortedNumericSelector.Type.MIN, numericSortField.getSelector());
sortBuilder = new FieldSortBuilder("value").sortMode(SortMode.MAX);
- sortField = sortBuilder.build(searchExecutionContext).field;
+ sortField = sortBuilder.build(searchExecutionContext).field();
assertThat(sortField, instanceOf(SortedNumericSortField.class));
numericSortField = (SortedNumericSortField) sortField;
assertEquals(SortedNumericSelector.Type.MAX, numericSortField.getSelector());
sortBuilder = new FieldSortBuilder("value").sortMode(SortMode.SUM);
- sortField = sortBuilder.build(searchExecutionContext).field;
+ sortField = sortBuilder.build(searchExecutionContext).field();
assertThat(sortField.getComparatorSource(), instanceOf(XFieldComparatorSource.class));
XFieldComparatorSource comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource();
assertEquals(MultiValueMode.SUM, comparatorSource.sortMode());
sortBuilder = new FieldSortBuilder("value").sortMode(SortMode.AVG);
- sortField = sortBuilder.build(searchExecutionContext).field;
+ sortField = sortBuilder.build(searchExecutionContext).field();
assertThat(sortField.getComparatorSource(), instanceOf(XFieldComparatorSource.class));
comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource();
assertEquals(MultiValueMode.AVG, comparatorSource.sortMode());
sortBuilder = new FieldSortBuilder("value").sortMode(SortMode.MEDIAN);
- sortField = sortBuilder.build(searchExecutionContext).field;
+ sortField = sortBuilder.build(searchExecutionContext).field();
assertThat(sortField.getComparatorSource(), instanceOf(XFieldComparatorSource.class));
comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource();
assertEquals(MultiValueMode.MEDIAN, comparatorSource.sortMode());
// sort mode should also be set by build() implicitly to MIN or MAX if not set explicitly on builder
sortBuilder = new FieldSortBuilder("value");
- sortField = sortBuilder.build(searchExecutionContext).field;
+ sortField = sortBuilder.build(searchExecutionContext).field();
assertThat(sortField, instanceOf(SortedNumericSortField.class));
numericSortField = (SortedNumericSortField) sortField;
assertEquals(SortedNumericSelector.Type.MIN, numericSortField.getSelector());
sortBuilder = new FieldSortBuilder("value").order(SortOrder.DESC);
- sortField = sortBuilder.build(searchExecutionContext).field;
+ sortField = sortBuilder.build(searchExecutionContext).field();
assertThat(sortField, instanceOf(SortedNumericSortField.class));
numericSortField = (SortedNumericSortField) sortField;
assertEquals(SortedNumericSelector.Type.MAX, numericSortField.getSelector());
@@ -258,7 +258,7 @@ public void testBuildNested() throws IOException {
FieldSortBuilder sortBuilder = new FieldSortBuilder("fieldName").setNestedSort(
new NestedSortBuilder("path").setFilter(QueryBuilders.termQuery(MAPPED_STRING_FIELDNAME, "value"))
);
- SortField sortField = sortBuilder.build(searchExecutionContext).field;
+ SortField sortField = sortBuilder.build(searchExecutionContext).field();
assertThat(sortField.getComparatorSource(), instanceOf(XFieldComparatorSource.class));
XFieldComparatorSource comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource();
Nested nested = comparatorSource.nested();
@@ -267,7 +267,7 @@ public void testBuildNested() throws IOException {
NestedSortBuilder nestedSort = new NestedSortBuilder("path");
sortBuilder = new FieldSortBuilder("fieldName").setNestedSort(nestedSort);
- sortField = sortBuilder.build(searchExecutionContext).field;
+ sortField = sortBuilder.build(searchExecutionContext).field();
assertThat(sortField.getComparatorSource(), instanceOf(XFieldComparatorSource.class));
comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource();
nested = comparatorSource.nested();
@@ -276,7 +276,7 @@ public void testBuildNested() throws IOException {
nestedSort.setFilter(QueryBuilders.termQuery(MAPPED_STRING_FIELDNAME, "value"));
sortBuilder = new FieldSortBuilder("fieldName").setNestedSort(nestedSort);
- sortField = sortBuilder.build(searchExecutionContext).field;
+ sortField = sortBuilder.build(searchExecutionContext).field();
assertThat(sortField.getComparatorSource(), instanceOf(XFieldComparatorSource.class));
comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource();
nested = comparatorSource.nested();
@@ -308,27 +308,27 @@ public void testShardDocSort() throws IOException {
reverse ? SortOrder.DESC : SortOrder.ASC
);
SortFieldAndFormat sortAndFormat = sortBuilder.build(searchExecutionContext);
- assertThat(sortAndFormat.field.getClass(), equalTo(ShardDocSortField.class));
- ShardDocSortField sortField = (ShardDocSortField) sortAndFormat.field;
+ assertThat(sortAndFormat.field().getClass(), equalTo(ShardDocSortField.class));
+ ShardDocSortField sortField = (ShardDocSortField) sortAndFormat.field();
assertThat(sortField.getShardRequestIndex(), equalTo(searchExecutionContext.getShardRequestIndex()));
assertThat(sortField.getReverse(), equalTo(reverse));
- assertThat(sortAndFormat.format, equalTo(DocValueFormat.RAW));
+ assertThat(sortAndFormat.format(), equalTo(DocValueFormat.RAW));
}
public void testFormatDateTime() throws Exception {
SearchExecutionContext searchExecutionContext = createMockSearchExecutionContext();
SortFieldAndFormat sortAndFormat = SortBuilders.fieldSort("custom-date").build(searchExecutionContext);
- assertThat(sortAndFormat.format.formatSortValue(1615580798601L), equalTo(1615580798601L));
+ assertThat(sortAndFormat.format().formatSortValue(1615580798601L), equalTo(1615580798601L));
sortAndFormat = SortBuilders.fieldSort("custom-date").setFormat("yyyy-MM-dd").build(searchExecutionContext);
- assertThat(sortAndFormat.format.formatSortValue(1615580798601L), equalTo("2021-03-12"));
+ assertThat(sortAndFormat.format().formatSortValue(1615580798601L), equalTo("2021-03-12"));
sortAndFormat = SortBuilders.fieldSort("custom-date").setFormat("epoch_millis").build(searchExecutionContext);
- assertThat(sortAndFormat.format.formatSortValue(1615580798601L), equalTo("1615580798601"));
+ assertThat(sortAndFormat.format().formatSortValue(1615580798601L), equalTo("1615580798601"));
sortAndFormat = SortBuilders.fieldSort("custom-date").setFormat("yyyy/MM/dd HH:mm:ss").build(searchExecutionContext);
- assertThat(sortAndFormat.format.formatSortValue(1615580798601L), equalTo("2021/03/12 20:26:38"));
+ assertThat(sortAndFormat.format().formatSortValue(1615580798601L), equalTo("2021/03/12 20:26:38"));
}
public void testInvalidFormat() {
@@ -371,12 +371,12 @@ public void testModeNonNumericField() throws IOException {
SearchExecutionContext searchExecutionContext = createMockSearchExecutionContext();
FieldSortBuilder sortBuilder = new FieldSortBuilder(MAPPED_STRING_FIELDNAME).sortMode(SortMode.MIN);
- SortField sortField = sortBuilder.build(searchExecutionContext).field;
+ SortField sortField = sortBuilder.build(searchExecutionContext).field();
assertThat(sortField, instanceOf(SortedSetSortField.class));
assertEquals(SortedSetSelector.Type.MIN, ((SortedSetSortField) sortField).getSelector());
sortBuilder = new FieldSortBuilder(MAPPED_STRING_FIELDNAME).sortMode(SortMode.MAX);
- sortField = sortBuilder.build(searchExecutionContext).field;
+ sortField = sortBuilder.build(searchExecutionContext).field();
assertThat(sortField, instanceOf(SortedSetSortField.class));
assertEquals(SortedSetSelector.Type.MAX, ((SortedSetSortField) sortField).getSelector());
diff --git a/server/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderTests.java b/server/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderTests.java
index 18f63821e721b..17a9fb5974176 100644
--- a/server/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderTests.java
+++ b/server/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderTests.java
@@ -367,32 +367,32 @@ public void testCommonCaseIsOptimized() throws IOException {
// The common case should use LatLonDocValuesField.newDistanceSort
GeoDistanceSortBuilder builder = new GeoDistanceSortBuilder("", new GeoPoint(3.5, 2.1));
SortFieldAndFormat sort = builder.build(context);
- assertEquals(LatLonDocValuesField.newDistanceSort("random_field_name", 3.5, 2.1).getClass(), sort.field.getClass());
+ assertEquals(LatLonDocValuesField.newDistanceSort("random_field_name", 3.5, 2.1).getClass(), sort.field().getClass());
// however this might be disabled by fancy options
builder = new GeoDistanceSortBuilder("random_field_name", new GeoPoint(3.5, 2.1), new GeoPoint(3.0, 4));
sort = builder.build(context);
- assertEquals(SortField.class, sort.field.getClass()); // 2 points -> plain SortField with a custom comparator
+ assertEquals(SortField.class, sort.field().getClass()); // 2 points -> plain SortField with a custom comparator
builder = new GeoDistanceSortBuilder("random_field_name", new GeoPoint(3.5, 2.1));
builder.unit(DistanceUnit.KILOMETERS);
sort = builder.build(context);
- assertEquals(SortField.class, sort.field.getClass()); // km rather than m -> plain SortField with a custom comparator
+ assertEquals(SortField.class, sort.field().getClass()); // km rather than m -> plain SortField with a custom comparator
builder = new GeoDistanceSortBuilder("random_field_name", new GeoPoint(3.5, 2.1));
builder.order(SortOrder.DESC);
sort = builder.build(context);
- assertEquals(SortField.class, sort.field.getClass()); // descending means the max value should be considered rather than min
+ assertEquals(SortField.class, sort.field().getClass()); // descending means the max value should be considered rather than min
builder = new GeoDistanceSortBuilder("random_field_name", new GeoPoint(3.5, 2.1));
builder.setNestedSort(new NestedSortBuilder("path"));
sort = builder.build(context);
- assertEquals(SortField.class, sort.field.getClass()); // can't use LatLon optimized sorting with nested fields
+ assertEquals(SortField.class, sort.field().getClass()); // can't use LatLon optimized sorting with nested fields
builder = new GeoDistanceSortBuilder("random_field_name", new GeoPoint(3.5, 2.1));
builder.order(SortOrder.DESC);
sort = builder.build(context);
- assertEquals(SortField.class, sort.field.getClass()); // can't use LatLon optimized sorting with DESC sorting
+ assertEquals(SortField.class, sort.field().getClass()); // can't use LatLon optimized sorting with DESC sorting
}
/**
@@ -401,13 +401,13 @@ public void testCommonCaseIsOptimized() throws IOException {
public void testBuildSortFieldOrder() throws IOException {
SearchExecutionContext searchExecutionContext = createMockSearchExecutionContext();
GeoDistanceSortBuilder geoDistanceSortBuilder = new GeoDistanceSortBuilder("fieldName", 1.0, 1.0);
- assertEquals(false, geoDistanceSortBuilder.build(searchExecutionContext).field.getReverse());
+ assertEquals(false, geoDistanceSortBuilder.build(searchExecutionContext).field().getReverse());
geoDistanceSortBuilder.order(SortOrder.ASC);
- assertEquals(false, geoDistanceSortBuilder.build(searchExecutionContext).field.getReverse());
+ assertEquals(false, geoDistanceSortBuilder.build(searchExecutionContext).field().getReverse());
geoDistanceSortBuilder.order(SortOrder.DESC);
- assertEquals(true, geoDistanceSortBuilder.build(searchExecutionContext).field.getReverse());
+ assertEquals(true, geoDistanceSortBuilder.build(searchExecutionContext).field().getReverse());
}
/**
@@ -417,7 +417,7 @@ public void testMultiValueMode() throws IOException {
SearchExecutionContext searchExecutionContext = createMockSearchExecutionContext();
GeoDistanceSortBuilder geoDistanceSortBuilder = new GeoDistanceSortBuilder("fieldName", 1.0, 1.0);
geoDistanceSortBuilder.sortMode(SortMode.MAX);
- SortField sortField = geoDistanceSortBuilder.build(searchExecutionContext).field;
+ SortField sortField = geoDistanceSortBuilder.build(searchExecutionContext).field();
assertThat(sortField.getComparatorSource(), instanceOf(XFieldComparatorSource.class));
XFieldComparatorSource comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource();
assertEquals(MultiValueMode.MAX, comparatorSource.sortMode());
@@ -425,7 +425,7 @@ public void testMultiValueMode() throws IOException {
// also use MultiValueMode.Max if no Mode set but order is DESC
geoDistanceSortBuilder = new GeoDistanceSortBuilder("fieldName", 1.0, 1.0);
geoDistanceSortBuilder.order(SortOrder.DESC);
- sortField = geoDistanceSortBuilder.build(searchExecutionContext).field;
+ sortField = geoDistanceSortBuilder.build(searchExecutionContext).field();
assertThat(sortField.getComparatorSource(), instanceOf(XFieldComparatorSource.class));
comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource();
assertEquals(MultiValueMode.MAX, comparatorSource.sortMode());
@@ -434,7 +434,7 @@ public void testMultiValueMode() throws IOException {
geoDistanceSortBuilder = new GeoDistanceSortBuilder("fieldName", 1.0, 1.0);
// need to use distance unit other than Meters to not get back a LatLonPointSortField
geoDistanceSortBuilder.order(SortOrder.ASC).unit(DistanceUnit.INCH);
- sortField = geoDistanceSortBuilder.build(searchExecutionContext).field;
+ sortField = geoDistanceSortBuilder.build(searchExecutionContext).field();
assertThat(sortField.getComparatorSource(), instanceOf(XFieldComparatorSource.class));
comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource();
assertEquals(MultiValueMode.MIN, comparatorSource.sortMode());
@@ -442,19 +442,19 @@ public void testMultiValueMode() throws IOException {
geoDistanceSortBuilder = new GeoDistanceSortBuilder("fieldName", 1.0, 1.0);
// need to use distance unit other than Meters to not get back a LatLonPointSortField
geoDistanceSortBuilder.sortMode(SortMode.MIN).unit(DistanceUnit.INCH);
- sortField = geoDistanceSortBuilder.build(searchExecutionContext).field;
+ sortField = geoDistanceSortBuilder.build(searchExecutionContext).field();
assertThat(sortField.getComparatorSource(), instanceOf(XFieldComparatorSource.class));
comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource();
assertEquals(MultiValueMode.MIN, comparatorSource.sortMode());
geoDistanceSortBuilder.sortMode(SortMode.AVG);
- sortField = geoDistanceSortBuilder.build(searchExecutionContext).field;
+ sortField = geoDistanceSortBuilder.build(searchExecutionContext).field();
assertThat(sortField.getComparatorSource(), instanceOf(XFieldComparatorSource.class));
comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource();
assertEquals(MultiValueMode.AVG, comparatorSource.sortMode());
geoDistanceSortBuilder.sortMode(SortMode.MEDIAN);
- sortField = geoDistanceSortBuilder.build(searchExecutionContext).field;
+ sortField = geoDistanceSortBuilder.build(searchExecutionContext).field();
assertThat(sortField.getComparatorSource(), instanceOf(XFieldComparatorSource.class));
comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource();
assertEquals(MultiValueMode.MEDIAN, comparatorSource.sortMode());
@@ -469,7 +469,7 @@ public void testBuildNested() throws IOException {
GeoDistanceSortBuilder sortBuilder = new GeoDistanceSortBuilder("fieldName", 1.0, 1.0).setNestedSort(
new NestedSortBuilder("path").setFilter(QueryBuilders.matchAllQuery())
);
- SortField sortField = sortBuilder.build(searchExecutionContext).field;
+ SortField sortField = sortBuilder.build(searchExecutionContext).field();
assertThat(sortField.getComparatorSource(), instanceOf(XFieldComparatorSource.class));
XFieldComparatorSource comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource();
Nested nested = comparatorSource.nested();
@@ -477,7 +477,7 @@ public void testBuildNested() throws IOException {
assertEquals(new MatchAllDocsQuery(), nested.getInnerQuery());
sortBuilder = new GeoDistanceSortBuilder("fieldName", 1.0, 1.0).setNestedSort(new NestedSortBuilder("path"));
- sortField = sortBuilder.build(searchExecutionContext).field;
+ sortField = sortBuilder.build(searchExecutionContext).field();
assertThat(sortField.getComparatorSource(), instanceOf(XFieldComparatorSource.class));
comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource();
nested = comparatorSource.nested();
@@ -487,7 +487,7 @@ public void testBuildNested() throws IOException {
sortBuilder = new GeoDistanceSortBuilder("fieldName", 1.0, 1.0).setNestedSort(
new NestedSortBuilder("path").setFilter(QueryBuilders.matchAllQuery())
);
- sortField = sortBuilder.build(searchExecutionContext).field;
+ sortField = sortBuilder.build(searchExecutionContext).field();
assertThat(sortField.getComparatorSource(), instanceOf(XFieldComparatorSource.class));
comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource();
nested = comparatorSource.nested();
@@ -504,7 +504,7 @@ public void testBuildCoerce() throws IOException {
sortBuilder.validation(GeoValidationMethod.COERCE);
assertEquals(-180.0, sortBuilder.points()[0].getLat(), 0.0);
assertEquals(-360.0, sortBuilder.points()[0].getLon(), 0.0);
- SortField sortField = sortBuilder.build(searchExecutionContext).field;
+ SortField sortField = sortBuilder.build(searchExecutionContext).field();
assertEquals(LatLonDocValuesField.newDistanceSort("fieldName", 0.0, 180.0), sortField);
}
diff --git a/server/src/test/java/org/elasticsearch/search/sort/ScriptSortBuilderTests.java b/server/src/test/java/org/elasticsearch/search/sort/ScriptSortBuilderTests.java
index 98345d0cb4edd..872775e18c7d1 100644
--- a/server/src/test/java/org/elasticsearch/search/sort/ScriptSortBuilderTests.java
+++ b/server/src/test/java/org/elasticsearch/search/sort/ScriptSortBuilderTests.java
@@ -273,7 +273,7 @@ public void testMultiValueMode() throws IOException {
for (SortMode mode : SortMode.values()) {
ScriptSortBuilder sortBuilder = new ScriptSortBuilder(mockScript(MOCK_SCRIPT_NAME), ScriptSortType.NUMBER);
sortBuilder.sortMode(mode);
- SortField sortField = sortBuilder.build(searchExecutionContext).field;
+ SortField sortField = sortBuilder.build(searchExecutionContext).field();
assertThat(sortField.getComparatorSource(), instanceOf(XFieldComparatorSource.class));
XFieldComparatorSource comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource();
assertEquals(MultiValueMode.fromString(mode.toString()), comparatorSource.sortMode());
@@ -282,14 +282,14 @@ public void testMultiValueMode() throws IOException {
// check that without mode set, order ASC sets mode to MIN, DESC to MAX
ScriptSortBuilder sortBuilder = new ScriptSortBuilder(mockScript(MOCK_SCRIPT_NAME), ScriptSortType.NUMBER);
sortBuilder.order(SortOrder.ASC);
- SortField sortField = sortBuilder.build(searchExecutionContext).field;
+ SortField sortField = sortBuilder.build(searchExecutionContext).field();
assertThat(sortField.getComparatorSource(), instanceOf(XFieldComparatorSource.class));
XFieldComparatorSource comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource();
assertEquals(MultiValueMode.MIN, comparatorSource.sortMode());
sortBuilder = new ScriptSortBuilder(mockScript(MOCK_SCRIPT_NAME), ScriptSortType.NUMBER);
sortBuilder.order(SortOrder.DESC);
- sortField = sortBuilder.build(searchExecutionContext).field;
+ sortField = sortBuilder.build(searchExecutionContext).field();
assertThat(sortField.getComparatorSource(), instanceOf(XFieldComparatorSource.class));
comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource();
assertEquals(MultiValueMode.MAX, comparatorSource.sortMode());
@@ -300,15 +300,15 @@ public void testMultiValueMode() throws IOException {
*/
public void testBuildCorrectComparatorType() throws IOException {
ScriptSortBuilder sortBuilder = new ScriptSortBuilder(mockScript(MOCK_SCRIPT_NAME), ScriptSortType.STRING);
- SortField sortField = sortBuilder.build(createMockSearchExecutionContext()).field;
+ SortField sortField = sortBuilder.build(createMockSearchExecutionContext()).field();
assertThat(sortField.getComparatorSource(), instanceOf(BytesRefFieldComparatorSource.class));
sortBuilder = new ScriptSortBuilder(mockScript(MOCK_SCRIPT_NAME), ScriptSortType.NUMBER);
- sortField = sortBuilder.build(createMockSearchExecutionContext()).field;
+ sortField = sortBuilder.build(createMockSearchExecutionContext()).field();
assertThat(sortField.getComparatorSource(), instanceOf(DoubleValuesComparatorSource.class));
sortBuilder = new ScriptSortBuilder(mockScript(MOCK_SCRIPT_NAME), ScriptSortType.VERSION);
- sortField = sortBuilder.build(createMockSearchExecutionContext()).field;
+ sortField = sortBuilder.build(createMockSearchExecutionContext()).field();
assertThat(sortField.getComparatorSource(), instanceOf(BytesRefFieldComparatorSource.class));
}
@@ -321,7 +321,7 @@ public void testBuildNested() throws IOException {
ScriptSortBuilder sortBuilder = new ScriptSortBuilder(mockScript(MOCK_SCRIPT_NAME), ScriptSortType.NUMBER).setNestedSort(
new NestedSortBuilder("path").setFilter(QueryBuilders.matchAllQuery())
);
- SortField sortField = sortBuilder.build(searchExecutionContext).field;
+ SortField sortField = sortBuilder.build(searchExecutionContext).field();
assertThat(sortField.getComparatorSource(), instanceOf(XFieldComparatorSource.class));
XFieldComparatorSource comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource();
Nested nested = comparatorSource.nested();
@@ -331,7 +331,7 @@ public void testBuildNested() throws IOException {
sortBuilder = new ScriptSortBuilder(mockScript(MOCK_SCRIPT_NAME), ScriptSortType.NUMBER).setNestedSort(
new NestedSortBuilder("path")
);
- sortField = sortBuilder.build(searchExecutionContext).field;
+ sortField = sortBuilder.build(searchExecutionContext).field();
assertThat(sortField.getComparatorSource(), instanceOf(XFieldComparatorSource.class));
comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource();
nested = comparatorSource.nested();
@@ -341,7 +341,7 @@ public void testBuildNested() throws IOException {
sortBuilder = new ScriptSortBuilder(mockScript(MOCK_SCRIPT_NAME), ScriptSortType.NUMBER).setNestedSort(
new NestedSortBuilder("path").setFilter(QueryBuilders.matchAllQuery())
);
- sortField = sortBuilder.build(searchExecutionContext).field;
+ sortField = sortBuilder.build(searchExecutionContext).field();
assertThat(sortField.getComparatorSource(), instanceOf(XFieldComparatorSource.class));
comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource();
nested = comparatorSource.nested();
diff --git a/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapperTests.java b/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapperTests.java
index 7abce10a82f3c..4b9ccff6f526c 100644
--- a/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapperTests.java
+++ b/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapperTests.java
@@ -408,11 +408,11 @@ public void testSearchResultsVersusKeywordField() throws IOException {
SearchExecutionContext searchExecutionContext = createMockContext();
FieldSortBuilder wildcardSortBuilder = new FieldSortBuilder(WILDCARD_FIELD_NAME);
- SortField wildcardSortField = wildcardSortBuilder.build(searchExecutionContext).field;
+ SortField wildcardSortField = wildcardSortBuilder.build(searchExecutionContext).field();
ScoreDoc[] wildcardHits = searcher.search(new MatchAllDocsQuery(), numDocs, new Sort(wildcardSortField)).scoreDocs;
FieldSortBuilder keywordSortBuilder = new FieldSortBuilder(KEYWORD_FIELD_NAME);
- SortField keywordSortField = keywordSortBuilder.build(searchExecutionContext).field;
+ SortField keywordSortField = keywordSortBuilder.build(searchExecutionContext).field();
ScoreDoc[] keywordHits = searcher.search(new MatchAllDocsQuery(), numDocs, new Sort(keywordSortField)).scoreDocs;
assertThat(wildcardHits.length, equalTo(keywordHits.length));
From 328f463f82df1c78a1145b2af05264ffade169de Mon Sep 17 00:00:00 2001
From: elasticsearchmachine
<58790826+elasticsearchmachine@users.noreply.github.com>
Date: Wed, 25 Sep 2024 22:26:14 +1000
Subject: [PATCH 19/30] Mute
org.elasticsearch.validation.DotPrefixClientYamlTestSuiteIT test
{p0=dot_prefix/10_basic/Deprecated index template with a dot prefix index
pattern} #113529
---
muted-tests.yml | 3 +++
1 file changed, 3 insertions(+)
diff --git a/muted-tests.yml b/muted-tests.yml
index 0b64a4f55d5b8..e85b8c5ad5a45 100644
--- a/muted-tests.yml
+++ b/muted-tests.yml
@@ -290,6 +290,9 @@ tests:
- class: org.elasticsearch.xpack.ml.integration.MlJobIT
method: testCreateJobsWithIndexNameOption
issue: https://github.com/elastic/elasticsearch/issues/113528
+- class: org.elasticsearch.validation.DotPrefixClientYamlTestSuiteIT
+ method: test {p0=dot_prefix/10_basic/Deprecated index template with a dot prefix index pattern}
+ issue: https://github.com/elastic/elasticsearch/issues/113529
# Examples:
#
From a34e9fc32300d6921c3cc0e7af49e2b18a4ad7b2 Mon Sep 17 00:00:00 2001
From: David Kyle
Date: Wed, 25 Sep 2024 13:27:06 +0100
Subject: [PATCH 20/30] [ML] Remove the cluster state listener when the
adaptive allocations service stops (#113524)
---
.../AdaptiveAllocationsScalerService.java | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/AdaptiveAllocationsScalerService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/AdaptiveAllocationsScalerService.java
index 6c59add730052..bbe90f769818b 100644
--- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/AdaptiveAllocationsScalerService.java
+++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/AdaptiveAllocationsScalerService.java
@@ -259,12 +259,17 @@ public synchronized void start() {
}
public synchronized void stop() {
+ clusterService.removeListener(this);
stopScheduling();
metrics.close();
}
@Override
public void clusterChanged(ClusterChangedEvent event) {
+ if (event.metadataChanged() == false) {
+ return;
+ }
+
updateAutoscalers(event.state());
if (scalers.isEmpty() == false) {
startScheduling();
From 2380778bc175857c385422d2c6026e7f41a357c4 Mon Sep 17 00:00:00 2001
From: David Turner
Date: Wed, 25 Sep 2024 13:54:39 +0100
Subject: [PATCH 21/30] Fix up comment in `PersistentTasksNodeService`
(#113526)
The formatting of this comment was destroyed by spotless. This commit
fixes this.
---
.../PersistentTasksNodeService.java | 54 ++++++++++---------
1 file changed, 29 insertions(+), 25 deletions(-)
diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksNodeService.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksNodeService.java
index 8bad8b5003bce..b86292be8e9ee 100644
--- a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksNodeService.java
+++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksNodeService.java
@@ -75,31 +75,35 @@ public void clusterChanged(ClusterChangedEvent event) {
PersistentTasksCustomMetadata tasks = event.state().getMetadata().custom(PersistentTasksCustomMetadata.TYPE);
PersistentTasksCustomMetadata previousTasks = event.previousState().getMetadata().custom(PersistentTasksCustomMetadata.TYPE);
- // Cluster State Local State Local Action
- // STARTED NULL Create as STARTED, Start
- // STARTED STARTED Noop - running
- // STARTED COMPLETED Noop - waiting for notification ack
- // STARTED LOCAL_ABORTED Noop - waiting for notification ack
-
- // NULL NULL Noop - nothing to do
- // NULL STARTED Remove locally, Mark as PENDING_CANCEL, Cancel
- // NULL COMPLETED Remove locally
- // NULL LOCAL_ABORTED Remove locally
-
- // Master states:
- // NULL - doesn't exist in the cluster state
- // STARTED - exist in the cluster state
-
- // Local state:
- // NULL - we don't have task registered locally in runningTasks
- // STARTED - registered in TaskManager, requires master notification when finishes
- // PENDING_CANCEL - registered in TaskManager, doesn't require master notification when finishes
- // COMPLETED - not registered in TaskManager, notified, waiting for master to remove it from CS so we can remove locally
- // LOCAL_ABORTED - not registered in TaskManager, notified, waiting for master to adjust it in CS so we can remove locally
-
- // When task finishes if it is marked as STARTED or PENDING_CANCEL it is marked as COMPLETED and unregistered,
- // If the task was STARTED, the master notification is also triggered (this is handled by unregisterTask() method, which is
- // triggered by PersistentTaskListener
+ /*
+ * Master states:
+ * NULL - doesn't exist in the cluster state
+ * STARTED - exist in the cluster state
+ *
+ * Local states (see org.elasticsearch.persistent.AllocatedPersistentTask.State)
+ * NULL - we don't have task registered locally in runningTasks
+ * STARTED - registered in TaskManager, requires master notification when finishes
+ * PENDING_CANCEL - registered in TaskManager, doesn't require master notification when finishes
+ * COMPLETED - not registered in TaskManager, notified, waiting for master to remove it from CS so we can remove locally
+ * LOCAL_ABORTED - not registered in TaskManager, notified, waiting for master to adjust it in CS so we can remove locally
+ *
+ * Master state | Local state | Local action
+ * ---------------+----------------+-----------------------------------------------
+ * STARTED | NULL | Create as STARTED, Start
+ * STARTED | STARTED | Noop - running
+ * STARTED | PENDING_CANCEL | Impossible
+ * STARTED | COMPLETED | Noop - waiting for notification ack
+ * STARTED | LOCAL_ABORTED | Noop - waiting for notification ack
+ * NULL | NULL | Noop - nothing to do
+ * NULL | STARTED | Remove locally, Mark as PENDING_CANCEL, Cancel
+ * NULL | PENDING_CANCEL | Noop - will remove locally when complete
+ * NULL | COMPLETED | Remove locally
+ * NULL | LOCAL_ABORTED | Remove locally
+ *
+ * When task finishes if it is marked as STARTED or PENDING_CANCEL it is marked as COMPLETED and unregistered,
+ * If the task was STARTED, the master notification is also triggered (this is handled by unregisterTask() method, which is
+ * triggered by PersistentTaskListener
+ */
if (Objects.equals(tasks, previousTasks) == false || event.nodesChanged()) {
// We have some changes let's check if they are related to our node
From d2447c723fe47e200ea801a7b55ca0c617cd1c89 Mon Sep 17 00:00:00 2001
From: Armin Braun
Date: Wed, 25 Sep 2024 15:12:15 +0200
Subject: [PATCH 22/30] Move field_caps parsers to test codebase (#113310)
The parsing logic is test only at this point, lets move it to tests
accordingly to keep the prod codebase a little smaller.
Also fixed a missing `static`.
---
.../action/fieldcaps/FieldCapabilities.java | 57 ++------
.../fieldcaps/FieldCapabilitiesFailure.java | 31 +----
.../fieldcaps/FieldCapabilitiesResponse.java | 56 +-------
.../TransportFieldCapabilitiesAction.java | 4 +-
.../FieldCapabilitiesResponseTests.java | 2 +-
.../fieldcaps/FieldCapabilitiesTests.java | 2 +-
.../MergedFieldCapabilitiesResponseTests.java | 2 +-
.../action/fieldcaps/FieldCapsUtils.java | 129 ++++++++++++++++++
.../test/rest/ESRestTestCase.java | 3 +-
.../xpack/esql/analysis/AnalyzerTests.java | 14 --
.../analysis/index/IndexResolverTests.java | 6 +-
11 files changed, 156 insertions(+), 150 deletions(-)
create mode 100644 test/framework/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapsUtils.java
diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilities.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilities.java
index 321cc394ea809..f1aab9501051e 100644
--- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilities.java
+++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilities.java
@@ -16,13 +16,10 @@
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.core.Predicates;
import org.elasticsearch.index.mapper.TimeSeriesParams;
-import org.elasticsearch.xcontent.ConstructingObjectParser;
-import org.elasticsearch.xcontent.InstantiatingObjectParser;
import org.elasticsearch.xcontent.ParseField;
import org.elasticsearch.xcontent.ParserConstructor;
import org.elasticsearch.xcontent.ToXContentObject;
import org.elasticsearch.xcontent.XContentBuilder;
-import org.elasticsearch.xcontent.XContentParser;
import java.io.IOException;
import java.util.ArrayList;
@@ -46,18 +43,17 @@
*/
public class FieldCapabilities implements Writeable, ToXContentObject {
- private static final ParseField TYPE_FIELD = new ParseField("type");
- private static final ParseField IS_METADATA_FIELD = new ParseField("metadata_field");
- private static final ParseField SEARCHABLE_FIELD = new ParseField("searchable");
- private static final ParseField AGGREGATABLE_FIELD = new ParseField("aggregatable");
- private static final ParseField TIME_SERIES_DIMENSION_FIELD = new ParseField(TIME_SERIES_DIMENSION_PARAM);
- private static final ParseField TIME_SERIES_METRIC_FIELD = new ParseField(TIME_SERIES_METRIC_PARAM);
- private static final ParseField INDICES_FIELD = new ParseField("indices");
- private static final ParseField NON_SEARCHABLE_INDICES_FIELD = new ParseField("non_searchable_indices");
- private static final ParseField NON_AGGREGATABLE_INDICES_FIELD = new ParseField("non_aggregatable_indices");
- private static final ParseField NON_DIMENSION_INDICES_FIELD = new ParseField("non_dimension_indices");
- private static final ParseField METRIC_CONFLICTS_INDICES_FIELD = new ParseField("metric_conflicts_indices");
- private static final ParseField META_FIELD = new ParseField("meta");
+ public static final ParseField TYPE_FIELD = new ParseField("type");
+ public static final ParseField IS_METADATA_FIELD = new ParseField("metadata_field");
+ public static final ParseField SEARCHABLE_FIELD = new ParseField("searchable");
+ public static final ParseField AGGREGATABLE_FIELD = new ParseField("aggregatable");
+ public static final ParseField TIME_SERIES_DIMENSION_FIELD = new ParseField(TIME_SERIES_DIMENSION_PARAM);
+ public static final ParseField TIME_SERIES_METRIC_FIELD = new ParseField(TIME_SERIES_METRIC_PARAM);
+ public static final ParseField INDICES_FIELD = new ParseField("indices");
+ public static final ParseField NON_SEARCHABLE_INDICES_FIELD = new ParseField("non_searchable_indices");
+ public static final ParseField NON_AGGREGATABLE_INDICES_FIELD = new ParseField("non_aggregatable_indices");
+ public static final ParseField NON_DIMENSION_INDICES_FIELD = new ParseField("non_dimension_indices");
+ public static final ParseField METRIC_CONFLICTS_INDICES_FIELD = new ParseField("metric_conflicts_indices");
private final String name;
private final String type;
@@ -312,37 +308,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws
return builder;
}
- public static FieldCapabilities fromXContent(String name, XContentParser parser) throws IOException {
- return PARSER.parse(parser, name);
- }
-
- private static final InstantiatingObjectParser PARSER;
-
- static {
- InstantiatingObjectParser.Builder parser = InstantiatingObjectParser.builder(
- "field_capabilities",
- true,
- FieldCapabilities.class
- );
- parser.declareString(ConstructingObjectParser.constructorArg(), TYPE_FIELD);
- parser.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), IS_METADATA_FIELD);
- parser.declareBoolean(ConstructingObjectParser.constructorArg(), SEARCHABLE_FIELD);
- parser.declareBoolean(ConstructingObjectParser.constructorArg(), AGGREGATABLE_FIELD);
- parser.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), TIME_SERIES_DIMENSION_FIELD);
- parser.declareString(ConstructingObjectParser.optionalConstructorArg(), TIME_SERIES_METRIC_FIELD);
- parser.declareStringArray(ConstructingObjectParser.optionalConstructorArg(), INDICES_FIELD);
- parser.declareStringArray(ConstructingObjectParser.optionalConstructorArg(), NON_SEARCHABLE_INDICES_FIELD);
- parser.declareStringArray(ConstructingObjectParser.optionalConstructorArg(), NON_AGGREGATABLE_INDICES_FIELD);
- parser.declareStringArray(ConstructingObjectParser.optionalConstructorArg(), NON_DIMENSION_INDICES_FIELD);
- parser.declareStringArray(ConstructingObjectParser.optionalConstructorArg(), METRIC_CONFLICTS_INDICES_FIELD);
- parser.declareObject(
- ConstructingObjectParser.optionalConstructorArg(),
- (p, context) -> p.map(HashMap::new, v -> Set.copyOf(v.list())),
- META_FIELD
- );
- PARSER = parser.build();
- }
-
/**
* The name of the field.
*/
diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesFailure.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesFailure.java
index cc2ea2a4ed57f..1153633ecf595 100644
--- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesFailure.java
+++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesFailure.java
@@ -13,12 +13,9 @@
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
-import org.elasticsearch.common.xcontent.XContentParserUtils;
-import org.elasticsearch.xcontent.ConstructingObjectParser;
import org.elasticsearch.xcontent.ParseField;
import org.elasticsearch.xcontent.ToXContentObject;
import org.elasticsearch.xcontent.XContentBuilder;
-import org.elasticsearch.xcontent.XContentParser;
import java.io.IOException;
import java.util.ArrayList;
@@ -28,8 +25,8 @@
public class FieldCapabilitiesFailure implements Writeable, ToXContentObject {
- private static final ParseField INDICES_FIELD = new ParseField("indices");
- private static final ParseField FAILURE_FIELD = new ParseField("failure");
+ public static final ParseField INDICES_FIELD = new ParseField("indices");
+ public static final ParseField FAILURE_FIELD = new ParseField("failure");
private final List indices;
private final Exception exception;
@@ -58,30 +55,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws
return builder;
}
- @SuppressWarnings("unchecked")
- private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(
- "field_capabilities_failure",
- true,
- a -> {
- return new FieldCapabilitiesFailure(((List) a[0]).toArray(String[]::new), (Exception) a[1]);
- }
- );
-
- static {
- PARSER.declareStringArray(ConstructingObjectParser.constructorArg(), INDICES_FIELD);
- PARSER.declareObject(ConstructingObjectParser.constructorArg(), (p, c) -> {
- XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, p.currentToken(), p);
- XContentParserUtils.ensureExpectedToken(XContentParser.Token.FIELD_NAME, p.nextToken(), p);
- Exception e = ElasticsearchException.failureFromXContent(p);
- XContentParserUtils.ensureExpectedToken(XContentParser.Token.END_OBJECT, p.nextToken(), p);
- return e;
- }, FAILURE_FIELD);
- }
-
- public static FieldCapabilitiesFailure fromXContent(XContentParser parser) throws IOException {
- return PARSER.parse(parser, null);
- }
-
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeStringCollection(indices);
diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponse.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponse.java
index e5c2678bbf38e..e0f54aeef72ea 100644
--- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponse.java
+++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponse.java
@@ -15,31 +15,25 @@
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.ChunkedToXContentObject;
-import org.elasticsearch.common.xcontent.XContentParserUtils;
-import org.elasticsearch.core.Tuple;
-import org.elasticsearch.xcontent.ConstructingObjectParser;
import org.elasticsearch.xcontent.ParseField;
import org.elasticsearch.xcontent.ToXContent;
-import org.elasticsearch.xcontent.XContentParser;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collections;
-import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Objects;
-import java.util.stream.Collectors;
/**
* Response for {@link FieldCapabilitiesRequest} requests.
*/
public class FieldCapabilitiesResponse extends ActionResponse implements ChunkedToXContentObject {
- private static final ParseField INDICES_FIELD = new ParseField("indices");
- private static final ParseField FIELDS_FIELD = new ParseField("fields");
+ public static final ParseField INDICES_FIELD = new ParseField("indices");
+ public static final ParseField FIELDS_FIELD = new ParseField("fields");
private static final ParseField FAILED_INDICES_FIELD = new ParseField("failed_indices");
- private static final ParseField FAILURES_FIELD = new ParseField("failures");
+ public static final ParseField FAILURES_FIELD = new ParseField("failures");
private final String[] indices;
private final Map> responseMap;
@@ -183,50 +177,6 @@ public Iterator extends ToXContent> toXContentChunked(ToXContent.Params params
);
}
- public static FieldCapabilitiesResponse fromXContent(XContentParser parser) throws IOException {
- return PARSER.parse(parser, null);
- }
-
- @SuppressWarnings("unchecked")
- private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(
- "field_capabilities_response",
- true,
- a -> {
- Map> responseMap = ((List>>) a[0]).stream()
- .collect(Collectors.toMap(Tuple::v1, Tuple::v2));
- List indices = a[1] == null ? Collections.emptyList() : (List) a[1];
- List failures = a[2] == null ? Collections.emptyList() : (List) a[2];
- return new FieldCapabilitiesResponse(indices.toArray(String[]::new), responseMap, failures);
- }
- );
-
- static {
- PARSER.declareNamedObjects(ConstructingObjectParser.constructorArg(), (p, c, n) -> {
- Map typeToCapabilities = parseTypeToCapabilities(p, n);
- return new Tuple<>(n, typeToCapabilities);
- }, FIELDS_FIELD);
- PARSER.declareStringArray(ConstructingObjectParser.optionalConstructorArg(), INDICES_FIELD);
- PARSER.declareObjectArray(
- ConstructingObjectParser.optionalConstructorArg(),
- (p, c) -> FieldCapabilitiesFailure.fromXContent(p),
- FAILURES_FIELD
- );
- }
-
- private static Map parseTypeToCapabilities(XContentParser parser, String name) throws IOException {
- Map typeToCapabilities = new HashMap<>();
-
- XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser);
- XContentParser.Token token;
- while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
- XContentParserUtils.ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser);
- String type = parser.currentName();
- FieldCapabilities capabilities = FieldCapabilities.fromXContent(name, parser);
- typeToCapabilities.put(type, capabilities);
- }
- return typeToCapabilities;
- }
-
@Override
public boolean equals(Object o) {
if (this == o) return true;
diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java
index 8e222e7197180..66434134fa69e 100644
--- a/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java
+++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java
@@ -310,7 +310,7 @@ private static void checkIndexBlocks(ClusterState clusterState, String[] concret
}
}
- private void mergeIndexResponses(
+ private static void mergeIndexResponses(
FieldCapabilitiesRequest request,
CancellableTask task,
Map indexResponses,
@@ -564,7 +564,7 @@ boolean isEmpty() {
private class NodeTransportHandler implements TransportRequestHandler {
@Override
- public void messageReceived(FieldCapabilitiesNodeRequest request, TransportChannel channel, Task task) throws Exception {
+ public void messageReceived(FieldCapabilitiesNodeRequest request, TransportChannel channel, Task task) {
assert task instanceof CancellableTask;
final ActionListener listener = new ChannelActionListener<>(channel);
ActionListener.completeWith(listener, () -> {
diff --git a/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponseTests.java b/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponseTests.java
index 1b2384b23e413..6ea4a1d3dc46b 100644
--- a/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponseTests.java
+++ b/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponseTests.java
@@ -119,7 +119,7 @@ public void testFailureParsing() throws IOException {
);
FieldCapabilitiesResponse parsedResponse;
try (XContentParser parser = createParser(xContentType.xContent(), originalBytes)) {
- parsedResponse = FieldCapabilitiesResponse.fromXContent(parser);
+ parsedResponse = FieldCapsUtils.parseFieldCapsResponse(parser);
assertNull(parser.nextToken());
}
assertNotSame(parsedResponse, randomResponse);
diff --git a/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesTests.java b/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesTests.java
index 27e36b6d35b7e..ed1af12965841 100644
--- a/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesTests.java
+++ b/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesTests.java
@@ -35,7 +35,7 @@ public class FieldCapabilitiesTests extends AbstractXContentSerializingTestCase<
@Override
protected FieldCapabilities doParseInstance(XContentParser parser) throws IOException {
- return FieldCapabilities.fromXContent(FIELD_NAME, parser);
+ return FieldCapsUtils.parseFieldCaps(FIELD_NAME, parser);
}
@Override
diff --git a/server/src/test/java/org/elasticsearch/action/fieldcaps/MergedFieldCapabilitiesResponseTests.java b/server/src/test/java/org/elasticsearch/action/fieldcaps/MergedFieldCapabilitiesResponseTests.java
index 7b19f34cfe6cc..2059e9dd78b04 100644
--- a/server/src/test/java/org/elasticsearch/action/fieldcaps/MergedFieldCapabilitiesResponseTests.java
+++ b/server/src/test/java/org/elasticsearch/action/fieldcaps/MergedFieldCapabilitiesResponseTests.java
@@ -26,7 +26,7 @@ public class MergedFieldCapabilitiesResponseTests extends AbstractChunkedSeriali
@Override
protected FieldCapabilitiesResponse doParseInstance(XContentParser parser) throws IOException {
- return FieldCapabilitiesResponse.fromXContent(parser);
+ return FieldCapsUtils.parseFieldCapsResponse(parser);
}
@Override
diff --git a/test/framework/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapsUtils.java b/test/framework/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapsUtils.java
new file mode 100644
index 0000000000000..84c057d3b6a81
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapsUtils.java
@@ -0,0 +1,129 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the "Elastic License
+ * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
+ * Public License v 1"; you may not use this file except in compliance with, at
+ * your election, the "Elastic License 2.0", the "GNU Affero General Public
+ * License v3.0 only", or the "Server Side Public License, v 1".
+ */
+package org.elasticsearch.action.fieldcaps;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.xcontent.XContentParserUtils;
+import org.elasticsearch.core.Tuple;
+import org.elasticsearch.xcontent.ConstructingObjectParser;
+import org.elasticsearch.xcontent.InstantiatingObjectParser;
+import org.elasticsearch.xcontent.ParseField;
+import org.elasticsearch.xcontent.XContentParser;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+/**
+ * Utilities for parsing field_caps responses for test purposes.
+ */
+public enum FieldCapsUtils {
+ ;
+
+ @SuppressWarnings("unchecked")
+ private static final ConstructingObjectParser FAILURE_PARSER = new ConstructingObjectParser<>(
+ "field_capabilities_failure",
+ true,
+ a -> new FieldCapabilitiesFailure(((List) a[0]).toArray(String[]::new), (Exception) a[1])
+ );
+
+ static {
+ FAILURE_PARSER.declareStringArray(ConstructingObjectParser.constructorArg(), FieldCapabilitiesFailure.INDICES_FIELD);
+ FAILURE_PARSER.declareObject(ConstructingObjectParser.constructorArg(), (p, c) -> {
+ XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, p.currentToken(), p);
+ XContentParserUtils.ensureExpectedToken(XContentParser.Token.FIELD_NAME, p.nextToken(), p);
+ Exception e = ElasticsearchException.failureFromXContent(p);
+ XContentParserUtils.ensureExpectedToken(XContentParser.Token.END_OBJECT, p.nextToken(), p);
+ return e;
+ }, FieldCapabilitiesFailure.FAILURE_FIELD);
+ }
+
+ public static FieldCapabilitiesFailure parseFailure(XContentParser parser) throws IOException {
+ return FAILURE_PARSER.parse(parser, null);
+ }
+
+ @SuppressWarnings("unchecked")
+ private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(
+ "field_capabilities_response",
+ true,
+ a -> {
+ Map> responseMap = ((List>>) a[0]).stream()
+ .collect(Collectors.toMap(Tuple::v1, Tuple::v2));
+ List indices = a[1] == null ? Collections.emptyList() : (List) a[1];
+ List failures = a[2] == null ? Collections.emptyList() : (List) a[2];
+ return new FieldCapabilitiesResponse(indices.toArray(String[]::new), responseMap, failures);
+ }
+ );
+
+ static {
+ PARSER.declareNamedObjects(ConstructingObjectParser.constructorArg(), (p, c, n) -> {
+ Map typeToCapabilities = parseTypeToCapabilities(p, n);
+ return new Tuple<>(n, typeToCapabilities);
+ }, FieldCapabilitiesResponse.FIELDS_FIELD);
+ PARSER.declareStringArray(ConstructingObjectParser.optionalConstructorArg(), FieldCapabilitiesResponse.INDICES_FIELD);
+ PARSER.declareObjectArray(
+ ConstructingObjectParser.optionalConstructorArg(),
+ (p, c) -> parseFailure(p),
+ FieldCapabilitiesResponse.FAILURES_FIELD
+ );
+ }
+
+ public static FieldCapabilitiesResponse parseFieldCapsResponse(XContentParser parser) throws IOException {
+ return PARSER.parse(parser, null);
+ }
+
+ private static Map parseTypeToCapabilities(XContentParser parser, String name) throws IOException {
+ Map typeToCapabilities = new HashMap<>();
+
+ XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser);
+ XContentParser.Token token;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ XContentParserUtils.ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser);
+ String type = parser.currentName();
+ FieldCapabilities capabilities = parseFieldCaps(name, parser);
+ typeToCapabilities.put(type, capabilities);
+ }
+ return typeToCapabilities;
+ }
+
+ public static FieldCapabilities parseFieldCaps(String name, XContentParser parser) throws IOException {
+ return FIELD_CAPS_PARSER.parse(parser, name);
+ }
+
+ private static final InstantiatingObjectParser FIELD_CAPS_PARSER;
+
+ static {
+ InstantiatingObjectParser.Builder parser = InstantiatingObjectParser.builder(
+ "field_capabilities",
+ true,
+ FieldCapabilities.class
+ );
+ parser.declareString(ConstructingObjectParser.constructorArg(), FieldCapabilities.TYPE_FIELD);
+ parser.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), FieldCapabilities.IS_METADATA_FIELD);
+ parser.declareBoolean(ConstructingObjectParser.constructorArg(), FieldCapabilities.SEARCHABLE_FIELD);
+ parser.declareBoolean(ConstructingObjectParser.constructorArg(), FieldCapabilities.AGGREGATABLE_FIELD);
+ parser.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), FieldCapabilities.TIME_SERIES_DIMENSION_FIELD);
+ parser.declareString(ConstructingObjectParser.optionalConstructorArg(), FieldCapabilities.TIME_SERIES_METRIC_FIELD);
+ parser.declareStringArray(ConstructingObjectParser.optionalConstructorArg(), FieldCapabilities.INDICES_FIELD);
+ parser.declareStringArray(ConstructingObjectParser.optionalConstructorArg(), FieldCapabilities.NON_SEARCHABLE_INDICES_FIELD);
+ parser.declareStringArray(ConstructingObjectParser.optionalConstructorArg(), FieldCapabilities.NON_AGGREGATABLE_INDICES_FIELD);
+ parser.declareStringArray(ConstructingObjectParser.optionalConstructorArg(), FieldCapabilities.NON_DIMENSION_INDICES_FIELD);
+ parser.declareStringArray(ConstructingObjectParser.optionalConstructorArg(), FieldCapabilities.METRIC_CONFLICTS_INDICES_FIELD);
+ parser.declareObject(
+ ConstructingObjectParser.optionalConstructorArg(),
+ (p, context) -> p.map(HashMap::new, v -> Set.copyOf(v.list())),
+ new ParseField("meta")
+ );
+ FIELD_CAPS_PARSER = parser.build();
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java
index 5c25e0cc3b0d9..6ed0a1dfe0229 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java
@@ -34,6 +34,7 @@
import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest;
import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse;
+import org.elasticsearch.action.fieldcaps.FieldCapsUtils;
import org.elasticsearch.action.support.broadcast.BaseBroadcastResponse;
import org.elasticsearch.action.support.broadcast.BroadcastResponse;
import org.elasticsearch.action.support.master.AcknowledgedResponse;
@@ -2458,7 +2459,7 @@ protected FieldCapabilitiesResponse fieldCaps(
Response response = restClient.performRequest(request);
assertOK(response);
try (XContentParser parser = responseAsParser(response)) {
- return FieldCapabilitiesResponse.fromXContent(parser);
+ return FieldCapsUtils.parseFieldCapsResponse(parser);
}
}
diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java
index a14c6bf22d532..106e58c3f89d9 100644
--- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java
+++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java
@@ -10,16 +10,10 @@
import org.elasticsearch.Build;
import org.elasticsearch.action.fieldcaps.FieldCapabilitiesIndexResponse;
import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse;
-import org.elasticsearch.common.bytes.BytesReference;
-import org.elasticsearch.common.io.Streams;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.index.IndexMode;
import org.elasticsearch.index.analysis.IndexAnalyzers;
import org.elasticsearch.test.ESTestCase;
-import org.elasticsearch.xcontent.XContentParser;
-import org.elasticsearch.xcontent.XContentParserConfiguration;
-import org.elasticsearch.xcontent.XContentType;
import org.elasticsearch.xpack.esql.LoadMapping;
import org.elasticsearch.xpack.esql.VerificationException;
import org.elasticsearch.xpack.esql.core.expression.Alias;
@@ -56,7 +50,6 @@
import org.elasticsearch.xpack.esql.session.IndexResolver;
import java.io.IOException;
-import java.io.InputStream;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
@@ -2132,13 +2125,6 @@ private static LogicalPlan analyzeWithEmptyFieldCapsResponse(String query) throw
return analyze(query, analyzer);
}
- private static FieldCapabilitiesResponse readFieldCapsResponse(String resourceName) throws IOException {
- InputStream stream = AnalyzerTests.class.getResourceAsStream("/" + resourceName);
- BytesReference ref = Streams.readFully(stream);
- XContentParser parser = XContentHelper.createParser(XContentParserConfiguration.EMPTY, ref, XContentType.JSON);
- return FieldCapabilitiesResponse.fromXContent(parser);
- }
-
private void assertEmptyEsRelation(LogicalPlan plan) {
assertThat(plan, instanceOf(EsRelation.class));
EsRelation esRelation = (EsRelation) plan;
diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolverTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolverTests.java
index 72de6c99191cc..6d7822e5619cc 100644
--- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolverTests.java
+++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolverTests.java
@@ -8,6 +8,7 @@
import org.elasticsearch.action.fieldcaps.FieldCapabilities;
import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse;
+import org.elasticsearch.action.fieldcaps.FieldCapsUtils;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.Streams;
import org.elasticsearch.common.util.Maps;
@@ -466,8 +467,9 @@ public void testMergeObjectUnsupportedTypes() throws Exception {
private static FieldCapabilitiesResponse readFieldCapsResponse(String resourceName) throws IOException {
InputStream stream = IndexResolverTests.class.getResourceAsStream("/" + resourceName);
BytesReference ref = Streams.readFully(stream);
- XContentParser parser = XContentHelper.createParser(XContentParserConfiguration.EMPTY, ref, XContentType.JSON);
- return FieldCapabilitiesResponse.fromXContent(parser);
+ try (XContentParser parser = XContentHelper.createParser(XContentParserConfiguration.EMPTY, ref, XContentType.JSON)) {
+ return FieldCapsUtils.parseFieldCapsResponse(parser);
+ }
}
public static IndexResolution merge(EsIndex... indices) {
From fce8e40470e060ce421220a140a0eadfd29cf5c2 Mon Sep 17 00:00:00 2001
From: elasticsearchmachine
<58790826+elasticsearchmachine@users.noreply.github.com>
Date: Wed, 25 Sep 2024 23:55:19 +1000
Subject: [PATCH 23/30] Mute
org.elasticsearch.xpack.esql.expression.function.fulltext.QueryStringFunctionTests
org.elasticsearch.xpack.esql.expression.function.fulltext.QueryStringFunctionTests
#113496
---
muted-tests.yml | 2 ++
1 file changed, 2 insertions(+)
diff --git a/muted-tests.yml b/muted-tests.yml
index e85b8c5ad5a45..4106c442b8ae9 100644
--- a/muted-tests.yml
+++ b/muted-tests.yml
@@ -293,6 +293,8 @@ tests:
- class: org.elasticsearch.validation.DotPrefixClientYamlTestSuiteIT
method: test {p0=dot_prefix/10_basic/Deprecated index template with a dot prefix index pattern}
issue: https://github.com/elastic/elasticsearch/issues/113529
+- class: org.elasticsearch.xpack.esql.expression.function.fulltext.QueryStringFunctionTests
+ issue: https://github.com/elastic/elasticsearch/issues/113496
# Examples:
#
From 6642f1ee869b9220588bd8a31fa464ae93710970 Mon Sep 17 00:00:00 2001
From: Ignacio Vera
Date: Wed, 25 Sep 2024 16:22:38 +0200
Subject: [PATCH 24/30] Unumute test affected by
https://github.com/elastic/elasticsearch/pull/113099 (#113533)
Those test were failing for a missing backport with the following error:
```
stack_trace":"java.io.IOException: Can't read unknown type [111]\n\tat org.elasticsearch.server@9.0.0-SNAPSHOT
```
The backport is done so we can just unmute them.
fixes https://github.com/elastic/elasticsearch/issues/113502 fixes
https://github.com/elastic/elasticsearch/issues/113497
---
muted-tests.yml | 6 ------
1 file changed, 6 deletions(-)
diff --git a/muted-tests.yml b/muted-tests.yml
index 4106c442b8ae9..07eaff7f6fea3 100644
--- a/muted-tests.yml
+++ b/muted-tests.yml
@@ -281,12 +281,6 @@ tests:
- class: org.elasticsearch.xpack.ml.integration.MlJobIT
method: testOutOfOrderData
issue: https://github.com/elastic/elasticsearch/issues/113477
-- class: org.elasticsearch.upgrades.UpgradeClusterClientYamlTestSuiteIT
- method: test {p0=mixed_cluster/100_analytics_usage/Basic test for usage stats on analytics indices}
- issue: https://github.com/elastic/elasticsearch/issues/113497
-- class: org.elasticsearch.xpack.esql.qa.mixed.EsqlClientYamlIT
- method: test {p0=esql/60_usage/Basic ESQL usage output (telemetry)}
- issue: https://github.com/elastic/elasticsearch/issues/113502
- class: org.elasticsearch.xpack.ml.integration.MlJobIT
method: testCreateJobsWithIndexNameOption
issue: https://github.com/elastic/elasticsearch/issues/113528
From 5d3c224aa9b473d67a7013407dafb22a3693974b Mon Sep 17 00:00:00 2001
From: elasticsearchmachine
<58790826+elasticsearchmachine@users.noreply.github.com>
Date: Thu, 26 Sep 2024 00:48:49 +1000
Subject: [PATCH 25/30] Mute
org.elasticsearch.backwards.MixedClusterClientYamlTestSuiteIT test
{p0=search/180_locale_dependent_mapping/Test Index and Search locale
dependent mappings / dates} #113537
---
muted-tests.yml | 3 +++
1 file changed, 3 insertions(+)
diff --git a/muted-tests.yml b/muted-tests.yml
index 07eaff7f6fea3..202cca7cfd199 100644
--- a/muted-tests.yml
+++ b/muted-tests.yml
@@ -289,6 +289,9 @@ tests:
issue: https://github.com/elastic/elasticsearch/issues/113529
- class: org.elasticsearch.xpack.esql.expression.function.fulltext.QueryStringFunctionTests
issue: https://github.com/elastic/elasticsearch/issues/113496
+- class: org.elasticsearch.backwards.MixedClusterClientYamlTestSuiteIT
+ method: test {p0=search/180_locale_dependent_mapping/Test Index and Search locale dependent mappings / dates}
+ issue: https://github.com/elastic/elasticsearch/issues/113537
# Examples:
#
From 4ba0e526db44170ef027189bd33e7ab87a752809 Mon Sep 17 00:00:00 2001
From: elasticsearchmachine
<58790826+elasticsearchmachine@users.noreply.github.com>
Date: Thu, 26 Sep 2024 01:22:36 +1000
Subject: [PATCH 26/30] Mute
org.elasticsearch.xpack.esql.qa.mixed.EsqlClientYamlIT test
{p0=esql/70_locale/Date format with default locale} #113539
---
muted-tests.yml | 3 +++
1 file changed, 3 insertions(+)
diff --git a/muted-tests.yml b/muted-tests.yml
index 202cca7cfd199..5ea6206c7f27c 100644
--- a/muted-tests.yml
+++ b/muted-tests.yml
@@ -292,6 +292,9 @@ tests:
- class: org.elasticsearch.backwards.MixedClusterClientYamlTestSuiteIT
method: test {p0=search/180_locale_dependent_mapping/Test Index and Search locale dependent mappings / dates}
issue: https://github.com/elastic/elasticsearch/issues/113537
+- class: org.elasticsearch.xpack.esql.qa.mixed.EsqlClientYamlIT
+ method: test {p0=esql/70_locale/Date format with default locale}
+ issue: https://github.com/elastic/elasticsearch/issues/113539
# Examples:
#
From 8e18d6e8a76aed6297492854dc05348562848779 Mon Sep 17 00:00:00 2001
From: elasticsearchmachine
<58790826+elasticsearchmachine@users.noreply.github.com>
Date: Thu, 26 Sep 2024 01:22:47 +1000
Subject: [PATCH 27/30] Mute
org.elasticsearch.xpack.esql.qa.mixed.EsqlClientYamlIT test
{p0=esql/70_locale/Date format with Italian locale} #113540
---
muted-tests.yml | 3 +++
1 file changed, 3 insertions(+)
diff --git a/muted-tests.yml b/muted-tests.yml
index 5ea6206c7f27c..381011301f533 100644
--- a/muted-tests.yml
+++ b/muted-tests.yml
@@ -295,6 +295,9 @@ tests:
- class: org.elasticsearch.xpack.esql.qa.mixed.EsqlClientYamlIT
method: test {p0=esql/70_locale/Date format with default locale}
issue: https://github.com/elastic/elasticsearch/issues/113539
+- class: org.elasticsearch.xpack.esql.qa.mixed.EsqlClientYamlIT
+ method: test {p0=esql/70_locale/Date format with Italian locale}
+ issue: https://github.com/elastic/elasticsearch/issues/113540
# Examples:
#
From 4f666310c7dff24d3f7704635a8faf01dcc140c7 Mon Sep 17 00:00:00 2001
From: Liam Thompson <32779855+leemthompo@users.noreply.github.com>
Date: Wed, 25 Sep 2024 17:30:01 +0200
Subject: [PATCH 28/30] [DOCS] Create Elasticsearch basics section, refactor
quickstarts section (#112436)
Co-authored-by: shainaraskas <58563081+shainaraskas@users.noreply.github.com>
---
docs/reference/index.asciidoc | 4 +-
docs/reference/intro.asciidoc | 368 ++++++++++++------
docs/reference/landing-page.asciidoc | 2 +-
.../quickstart/getting-started.asciidoc | 87 +----
docs/reference/quickstart/index.asciidoc | 31 +-
.../run-elasticsearch-locally.asciidoc | 34 +-
docs/reference/setup.asciidoc | 2 +
docs/reference/tab-widgets/api-call.asciidoc | 8 +-
8 files changed, 323 insertions(+), 213 deletions(-)
rename docs/reference/{quickstart => }/run-elasticsearch-locally.asciidoc (68%)
diff --git a/docs/reference/index.asciidoc b/docs/reference/index.asciidoc
index 79b5f2b69f24d..24dbee8c2983b 100644
--- a/docs/reference/index.asciidoc
+++ b/docs/reference/index.asciidoc
@@ -6,10 +6,10 @@ include::links.asciidoc[]
include::landing-page.asciidoc[]
-include::intro.asciidoc[]
-
include::release-notes/highlights.asciidoc[]
+include::intro.asciidoc[]
+
include::quickstart/index.asciidoc[]
include::setup.asciidoc[]
diff --git a/docs/reference/intro.asciidoc b/docs/reference/intro.asciidoc
index f80856368af2b..831888103c5c1 100644
--- a/docs/reference/intro.asciidoc
+++ b/docs/reference/intro.asciidoc
@@ -1,68 +1,98 @@
[[elasticsearch-intro]]
-== What is {es}?
+== {es} basics
+
+This guide covers the core concepts you need to understand to get started with {es}.
+If you'd prefer to start working with {es} right away, set up a <> and jump to <>.
+
+This guide covers the following topics:
+
+* <>: Learn about {es} and some of its main use cases.
+* <>: Understand your options for deploying {es} in different environments, including a fast local development setup.
+* <>: Understand {es}'s most important primitives and how it stores data.
+* <>: Understand your options for ingesting data into {es}.
+* <>: Understand your options for searching and analyzing data in {es}.
+* <>: Understand the basic concepts required for moving your {es} deployment to production.
+
+[[elasticsearch-intro-what-is-es]]
+=== What is {es}?
{es-repo}[{es}] is a distributed search and analytics engine, scalable data store, and vector database built on Apache Lucene.
It's optimized for speed and relevance on production-scale workloads.
Use {es} to search, index, store, and analyze data of all shapes and sizes in near real time.
+{es} is the heart of the {estc-welcome-current}/stack-components.html[Elastic Stack].
+Combined with https://www.elastic.co/kibana[{kib}], it powers the following Elastic solutions:
+
+* https://www.elastic.co/observability[Observability]
+* https://www.elastic.co/enterprise-search[Search]
+* https://www.elastic.co/security[Security]
+
[TIP]
====
{es} has a lot of features. Explore the full list on the https://www.elastic.co/elasticsearch/features[product webpage^].
====
-{es} is the heart of the {estc-welcome-current}/stack-components.html[Elastic Stack] and powers the Elastic https://www.elastic.co/enterprise-search[Search], https://www.elastic.co/observability[Observability] and https://www.elastic.co/security[Security] solutions.
-
-{es} is used for a wide and growing range of use cases. Here are a few examples:
-
-* *Monitor log and event data*: Store logs, metrics, and event data for observability and security information and event management (SIEM).
-* *Build search applications*: Add search capabilities to apps or websites, or build search engines over internal data.
-* *Vector database*: Store and search vectorized data, and create vector embeddings with built-in and third-party natural language processing (NLP) models.
-* *Retrieval augmented generation (RAG)*: Use {es} as a retrieval engine to augment generative AI models.
-* *Application and security monitoring*: Monitor and analyze application performance and security data.
-* *Machine learning*: Use {ml} to automatically model the behavior of your data in real-time.
-
-This is just a sample of search, observability, and security use cases enabled by {es}.
-Refer to our https://www.elastic.co/customers/success-stories[customer success stories] for concrete examples across a range of industries.
-// Link to demos, search labs chatbots
-
[discrete]
[[elasticsearch-intro-elastic-stack]]
.What is the Elastic Stack?
*******************************
{es} is the core component of the Elastic Stack, a suite of products for collecting, storing, searching, and visualizing data.
-https://www.elastic.co/guide/en/starting-with-the-elasticsearch-platform-and-its-solutions/current/stack-components.html[Learn more about the Elastic Stack].
+{estc-welcome-current}/stack-components.html[Learn more about the Elastic Stack].
*******************************
-// TODO: Remove once we've moved Stack Overview to a subpage?
[discrete]
+[[elasticsearch-intro-use-cases]]
+==== Use cases
+
+{es} is used for a wide and growing range of use cases. Here are a few examples:
+
+**Observability**
+
+* *Logs, metrics, and traces*: Collect, store, and analyze logs, metrics, and traces from applications, systems, and services.
+* *Application performance monitoring (APM)*: Monitor and analyze the performance of business-critical software applications.
+* *Real user monitoring (RUM)*: Monitor, quantify, and analyze user interactions with web applications.
+* *OpenTelemetry*: Reuse your existing instrumentation to send telemetry data to the Elastic Stack using the OpenTelemetry standard.
+
+**Search**
+
+* *Full-text search*: Build a fast, relevant full-text search solution using inverted indexes, tokenization, and text analysis.
+* *Vector database*: Store and search vectorized data, and create vector embeddings with built-in and third-party natural language processing (NLP) models.
+* *Semantic search*: Understand the intent and contextual meaning behind search queries using tools like synonyms, dense vector embeddings, and learned sparse query-document expansion.
+* *Hybrid search*: Combine full-text search with vector search using state-of-the-art ranking algorithms.
+* *Build search experiences*: Add hybrid search capabilities to apps or websites, or build enterprise search engines over your organization's internal data sources.
+* *Retrieval augmented generation (RAG)*: Use {es} as a retrieval engine to supplement generative AI models with more relevant, up-to-date, or proprietary data for a range of use cases.
+* *Geospatial search*: Search for locations and calculate spatial relationships using geospatial queries.
+
+**Security**
+
+* *Security information and event management (SIEM)*: Collect, store, and analyze security data from applications, systems, and services.
+* *Endpoint security*: Monitor and analyze endpoint security data.
+* *Threat hunting*: Search and analyze data to detect and respond to security threats.
+
+This is just a sample of search, observability, and security use cases enabled by {es}.
+Refer to Elastic https://www.elastic.co/customers/success-stories[customer success stories] for concrete examples across a range of industries.
+
[[elasticsearch-intro-deploy]]
-=== Deployment options
+=== Run {es}
To use {es}, you need a running instance of the {es} service.
-You can deploy {es} in various ways:
+You can deploy {es} in various ways.
-* <>: Get started quickly with a minimal local Docker setup.
-* {cloud}/ec-getting-started-trial.html[*Elastic Cloud*]: {es} is available as part of our hosted Elastic Stack offering, deployed in the cloud with your provider of choice. Sign up for a https://cloud.elastic.co/registration[14-day free trial].
+**Quick start option**
+
+* <>: Get started quickly with a minimal local Docker setup for development and testing.
+
+**Hosted options**
+
+* {cloud}/ec-getting-started-trial.html[*Elastic Cloud Hosted*]: {es} is available as part of the hosted Elastic Stack offering, deployed in the cloud with your provider of choice. Sign up for a https://cloud.elastic.co/registration[14-day free trial].
* {serverless-docs}/general/sign-up-trial[*Elastic Cloud Serverless* (technical preview)]: Create serverless projects for autoscaled and fully managed {es} deployments. Sign up for a https://cloud.elastic.co/serverless-registration[14-day free trial].
-**Advanced deployment options**
+**Advanced options**
* <>: Install, configure, and run {es} on your own premises.
* {ece-ref}/Elastic-Cloud-Enterprise-overview.html[*Elastic Cloud Enterprise*]: Deploy Elastic Cloud on public or private clouds, virtual machines, or your own premises.
* {eck-ref}/k8s-overview.html[*Elastic Cloud on Kubernetes*]: Deploy Elastic Cloud on Kubernetes.
-[discrete]
-[[elasticsearch-next-steps]]
-=== Learn more
-
-Here are some resources to help you get started:
-
-* <>: A beginner's guide to deploying your first {es} instance, indexing data, and running queries.
-* https://elastic.co/webinars/getting-started-elasticsearch[Webinar: Introduction to {es}]: Register for our live webinars to learn directly from {es} experts.
-* https://www.elastic.co/search-labs[Elastic Search Labs]: Tutorials and blogs that explore AI-powered search using the latest {es} features.
-** Follow our tutorial https://www.elastic.co/search-labs/tutorials/search-tutorial/welcome[to build a hybrid search solution in Python].
-** Check out the https://github.com/elastic/elasticsearch-labs?tab=readme-ov-file#elasticsearch-examples--apps[`elasticsearch-labs` repository] for a range of Python notebooks and apps for various use cases.
-
// new html page
[[documents-indices]]
=== Indices, documents, and fields
@@ -73,20 +103,16 @@ Here are some resources to help you get started:
The index is the fundamental unit of storage in {es}, a logical namespace for storing data that share similar characteristics.
After you have {es} <>, you'll get started by creating an index to store your data.
+An index is a collection of documents uniquely identified by a name or an <>.
+This unique name is important because it's used to target the index in search queries and other operations.
+
[TIP]
====
A closely related concept is a <>.
-This index abstraction is optimized for append-only time-series data, and is made up of hidden, auto-generated backing indices.
-If you're working with time-series data, we recommend the {observability-guide}[Elastic Observability] solution.
+This index abstraction is optimized for append-only timestamped data, and is made up of hidden, auto-generated backing indices.
+If you're working with timestamped data, we recommend the {observability-guide}[Elastic Observability] solution for additional tools and optimized content.
====
-Some key facts about indices:
-
-* An index is a collection of documents
-* An index has a unique name
-* An index can also be referred to by an alias
-* An index has a mapping that defines the schema of its documents
-
[discrete]
[[elasticsearch-intro-documents-fields]]
==== Documents and fields
@@ -126,14 +152,12 @@ A simple {es} document might look like this:
[discrete]
[[elasticsearch-intro-documents-fields-data-metadata]]
-==== Data and metadata
+==== Metadata fields
-An indexed document contains data and metadata.
+An indexed document contains data and metadata. <> are system fields that store information about the documents.
In {es}, metadata fields are prefixed with an underscore.
+For example, the following fields are metadata fields:
-The most important metadata fields are:
-
-* `_source`: Contains the original JSON document.
* `_index`: The name of the index where the document is stored.
* `_id`: The document's ID. IDs must be unique per index.
@@ -146,8 +170,8 @@ A mapping defines the <> for each field, how the field
and how it should be stored.
When adding documents to {es}, you have two options for mappings:
-* <>: Let {es} automatically detect the data types and create the mappings for you. This is great for getting started quickly, but can lead to unexpected results for complex data.
-* <>: Define the mappings up front by specifying data types for each field. Recommended for production use cases, because you have much more control over how your data is indexed.
+* <>: Let {es} automatically detect the data types and create the mappings for you. Dynamic mapping helps you get started quickly, but might yield suboptimal results for your specific use case due to automatic field type inference.
+* <>: Define the mappings up front by specifying data types for each field. Recommended for production use cases, because you have full control over how your data is indexed to suit your specific use case.
[TIP]
====
@@ -155,81 +179,207 @@ You can use a combination of dynamic and explicit mapping on the same index.
This is useful when you have a mix of known and unknown fields in your data.
====
+// New html page
+[[es-ingestion-overview]]
+=== Add data to {es}
+
+There are multiple ways to ingest data into {es}.
+The option that you choose depends on whether you're working with timestamped data or non-timestamped data, where the data is coming from, its complexity, and more.
+
+[TIP]
+====
+You can load {kibana-ref}/connect-to-elasticsearch.html#_add_sample_data[sample data] into your {es} cluster using {kib}, to get started quickly.
+====
+
+[discrete]
+[[es-ingestion-overview-general-content]]
+==== General content
+
+General content is data that does not have a timestamp.
+This could be data like vector embeddings, website content, product catalogs, and more.
+For general content, you have the following options for adding data to {es} indices:
+
+* <>: Use the {es} <> to index documents directly, using the Dev Tools {kibana-ref}/console-kibana.html[Console], or cURL.
++
+If you're building a website or app, then you can call Elasticsearch APIs using an https://www.elastic.co/guide/en/elasticsearch/client/index.html[{es} client] in the programming language of your choice. If you use the Python client, then check out the `elasticsearch-labs` repo for various https://github.com/elastic/elasticsearch-labs/tree/main/notebooks/search/python-examples[example notebooks].
+* {kibana-ref}/connect-to-elasticsearch.html#upload-data-kibana[File upload]: Use the {kib} file uploader to index single files for one-off testing and exploration. The GUI guides you through setting up your index and field mappings.
+* https://github.com/elastic/crawler[Web crawler]: Extract and index web page content into {es} documents.
+* {enterprise-search-ref}/connectors.html[Connectors]: Sync data from various third-party data sources to create searchable, read-only replicas in {es}.
+
+[discrete]
+[[es-ingestion-overview-timestamped]]
+==== Timestamped data
+
+Timestamped data in {es} refers to datasets that include a timestamp field. If you use the {ecs-ref}/ecs-reference.html[Elastic Common Schema (ECS)], this field is named `@timestamp`.
+This could be data like logs, metrics, and traces.
+
+For timestamped data, you have the following options for adding data to {es} data streams:
+
+* {fleet-guide}/fleet-overview.html[Elastic Agent and Fleet]: The preferred way to index timestamped data. Each Elastic Agent based integration includes default ingestion rules, dashboards, and visualizations to start analyzing your data right away.
+You can use the Fleet UI in {kib} to centrally manage Elastic Agents and their policies.
+* {beats-ref}/beats-reference.html[Beats]: If your data source isn't supported by Elastic Agent, use Beats to collect and ship data to Elasticsearch. You install a separate Beat for each type of data to collect.
+* {logstash-ref}/introduction.html[Logstash]: Logstash is an open source data collection engine with real-time pipelining capabilities that supports a wide variety of data sources. You might use this option because neither Elastic Agent nor Beats supports your data source. You can also use Logstash to persist incoming data, or if you need to send the data to multiple destinations.
+* {cloud}/ec-ingest-guides.html[Language clients]: The linked tutorials demonstrate how to use {es} programming language clients to ingest data from an application. In these examples, {es} is running on Elastic Cloud, but the same principles apply to any {es} deployment.
+
+[TIP]
+====
+If you're interested in data ingestion pipelines for timestamped data, use the decision tree in the {cloud}/ec-cloud-ingest-data.html#ec-data-ingest-pipeline[Elastic Cloud docs] to understand your options.
+====
+
// New html page
[[search-analyze]]
-=== Search and analyze
+=== Search and analyze data
-While you can use {es} as a document store and retrieve documents and their
-metadata, the real power comes from being able to easily access the full suite
-of search capabilities built on the Apache Lucene search engine library.
+You can use {es} as a basic document store to retrieve documents and their
+metadata.
+However, the real power of {es} comes from its advanced search and analytics capabilities.
-{es} provides a simple, coherent REST API for managing your cluster and indexing
-and searching your data. For testing purposes, you can easily submit requests
-directly from the command line or through the Developer Console in {kib}. From
-your applications, you can use the
-https://www.elastic.co/guide/en/elasticsearch/client/index.html[{es} client]
-for your language of choice: Java, JavaScript, Go, .NET, PHP, Perl, Python
-or Ruby.
+You'll use a combination of an API endpoint and a query language to interact with your data.
[discrete]
-[[search-data]]
-==== Searching your data
-
-The {es} REST APIs support structured queries, full text queries, and complex
-queries that combine the two. Structured queries are
-similar to the types of queries you can construct in SQL. For example, you
-could search the `gender` and `age` fields in your `employee` index and sort the
-matches by the `hire_date` field. Full-text queries find all documents that
-match the query string and return them sorted by _relevance_—how good a
-match they are for your search terms.
-
-In addition to searching for individual terms, you can perform phrase searches,
-similarity searches, and prefix searches, and get autocomplete suggestions.
-
-Have geospatial or other numerical data that you want to search? {es} indexes
-non-textual data in optimized data structures that support
-high-performance geo and numerical queries.
-
-You can access all of these search capabilities using {es}'s
-comprehensive JSON-style query language (<>). You can also
-construct <> to search and aggregate data
-natively inside {es}, and JDBC and ODBC drivers enable a broad range of
-third-party applications to interact with {es} via SQL.
+[[search-analyze-rest-api]]
+==== REST API
+
+Use REST APIs to manage your {es} cluster, and to index
+and search your data.
+For testing purposes, you can submit requests
+directly from the command line or through the Dev Tools {kibana-ref}/console-kibana.html[Console] in {kib}.
+From your applications, you can use a
+https://www.elastic.co/guide/en/elasticsearch/client/index.html[client]
+in your programming language of choice.
+
+Refer to <> for a hands-on example of using the `_search` endpoint, adding data to {es}, and running basic searches in Query DSL syntax.
[discrete]
-[[analyze-data]]
-==== Analyzing your data
+[[search-analyze-query-languages]]
+==== Query languages
+
+{es} provides a number of query languages for interacting with your data.
+
+*Query DSL* is the primary query language for {es} today.
+
+*{esql}* is a new piped query language and compute engine which was first added in version *8.11*.
+
+{esql} does not yet support all the features of Query DSL, like full-text search and semantic search.
+Look forward to new {esql} features and functionalities in each release.
+
+Refer to <> for a full overview of the query languages available in {es}.
+
+[discrete]
+[[search-analyze-query-dsl]]
+===== Query DSL
+
+<> is a full-featured JSON-style query language that enables complex searching, filtering, and aggregations.
+It is the original and most powerful query language for {es} today.
+
+The <> accepts queries written in Query DSL syntax.
+
+[discrete]
+[[search-analyze-query-dsl-search-filter]]
+====== Search and filter with Query DSL
+
+Query DSL support a wide range of search techniques, including the following:
+
+* <>: Search text that has been analyzed and indexed to support phrase or proximity queries, fuzzy matches, and more.
+* <>: Search for exact matches using `keyword` fields.
+* <>: Search `semantic_text` fields using dense or sparse vector search on embeddings generated in your {es} cluster.
+* <>: Search for similar dense vectors using the kNN algorithm for embeddings generated outside of {es}.
+* <>: Search for locations and calculate spatial relationships using geospatial queries.
-{es} aggregations enable you to build complex summaries of your data and gain
-insight into key metrics, patterns, and trends. Instead of just finding the
-proverbial “needle in a haystack”, aggregations enable you to answer questions
-like:
+Learn about the full range of queries supported by <>.
-* How many needles are in the haystack?
-* What is the average length of the needles?
-* What is the median length of the needles, broken down by manufacturer?
-* How many needles were added to the haystack in each of the last six months?
+You can also filter data using Query DSL.
+Filters enable you to include or exclude documents by retrieving documents that match specific field-level criteria.
+A query that uses the `filter` parameter indicates <>.
-You can also use aggregations to answer more subtle questions, such as:
+[discrete]
+[[search-analyze-data-query-dsl]]
+====== Analyze with Query DSL
-* What are your most popular needle manufacturers?
-* Are there any unusual or anomalous clumps of needles?
+<> are the primary tool for analyzing {es} data using Query DSL.
+Aggregrations enable you to build complex summaries of your data and gain
+insight into key metrics, patterns, and trends.
-Because aggregations leverage the same data-structures used for search, they are
+Because aggregations leverage the same data structures used for search, they are
also very fast. This enables you to analyze and visualize your data in real time.
-Your reports and dashboards update as your data changes so you can take action
-based on the latest information.
+You can search documents, filter results, and perform analytics at the same time, on the same
+data, in a single request.
+That means aggregations are calculated in the context of the search query.
+
+The folowing aggregation types are available:
+
+* <>: Calculate metrics,
+such as a sum or average, from field values.
+* <>: Group documents into buckets based on field values, ranges,
+or other criteria.
+* <>: Run aggregations on the results of other aggregations.
+
+Run aggregations by specifying the <>'s `aggs` parameter.
+Learn more in <>.
+
+[discrete]
+[[search-analyze-data-esql]]
+===== {esql}
-What’s more, aggregations operate alongside search requests. You can search
-documents, filter results, and perform analytics at the same time, on the same
-data, in a single request. And because aggregations are calculated in the
-context of a particular search, you’re not just displaying a count of all
-size 70 needles, you’re displaying a count of the size 70 needles
-that match your users' search criteria--for example, all size 70 _non-stick
-embroidery_ needles.
+<> is a piped query language for filtering, transforming, and analyzing data.
+{esql} is built on top of a new compute engine, where search, aggregation, and transformation functions are
+directly executed within {es} itself.
+{esql} syntax can also be used within various {kib} tools.
+
+The <> accepts queries written in {esql} syntax.
+
+Today, it supports a subset of the features available in Query DSL, like aggregations, filters, and transformations.
+It does not yet support full-text search or semantic search.
+
+It comes with a comprehensive set of <> for working with data and has robust integration with {kib}'s Discover, dashboards and visualizations.
+
+Learn more in <>, or try https://www.elastic.co/training/introduction-to-esql[our training course].
+
+[discrete]
+[[search-analyze-data-query-languages-table]]
+==== List of available query languages
+The following table summarizes all available {es} query languages, to help you choose the right one for your use case.
+
+[cols="1,2,2,1", options="header"]
+|===
+| Name | Description | Use cases | API endpoint
+
+| <>
+| The primary query language for {es}. A powerful and flexible JSON-style language that enables complex queries.
+| Full-text search, semantic search, keyword search, filtering, aggregations, and more.
+| <>
+
+
+| <>
+| Introduced in *8.11*, the Elasticsearch Query Language ({esql}) is a piped query language language for filtering, transforming, and analyzing data.
+| Initially tailored towards working with time series data like logs and metrics.
+Robust integration with {kib} for querying, visualizing, and analyzing data.
+Does not yet support full-text search.
+| <>
+
+
+| <>
+| Event Query Language (EQL) is a query language for event-based time series data. Data must contain the `@timestamp` field to use EQL.
+| Designed for the threat hunting security use case.
+| <>
+
+| <>
+| Allows native, real-time SQL-like querying against {es} data. JDBC and ODBC drivers are available for integration with business intelligence (BI) tools.
+| Enables users familiar with SQL to query {es} data using familiar syntax for BI and reporting.
+| <>
+
+| {kibana-ref}/kuery-query.html[Kibana Query Language (KQL)]
+| Kibana Query Language (KQL) is a text-based query language for filtering data when you access it through the {kib} UI.
+| Use KQL to filter documents where a value for a field exists, matches a given value, or is within a given range.
+| N/A
+
+|===
+
+// New html page
+// TODO: this page won't live here long term
[[scalability]]
-=== Scalability and resilience
+=== Plan for production
{es} is built to be always available and to scale with your needs. It does this
by being distributed by nature. You can add servers (nodes) to a cluster to
diff --git a/docs/reference/landing-page.asciidoc b/docs/reference/landing-page.asciidoc
index e781dc0aff4e3..f1b5ce8210996 100644
--- a/docs/reference/landing-page.asciidoc
+++ b/docs/reference/landing-page.asciidoc
@@ -62,7 +62,7 @@
Elasticsearch is the search and analytics engine that powers the Elastic Stack.
diff --git a/docs/reference/quickstart/getting-started.asciidoc b/docs/reference/quickstart/getting-started.asciidoc
index 6b3095e07f9d4..e674dda147bcc 100644
--- a/docs/reference/quickstart/getting-started.asciidoc
+++ b/docs/reference/quickstart/getting-started.asciidoc
@@ -1,47 +1,20 @@
[[getting-started]]
-== Quick start guide
+== Quick start: Add data using Elasticsearch APIs
+++++
+Basics: Add data using APIs
+++++
-This guide helps you learn how to:
+In this quick start guide, you'll learn how to do the following tasks:
-* Run {es} and {kib} (using {ecloud} or in a local Docker dev environment),
-* add simple (non-timestamped) dataset to {es},
-* run basic searches.
-
-[TIP]
-====
-If you're interested in using {es} with Python, check out Elastic Search Labs. This is the best place to explore AI-powered search use cases, such as working with embeddings, vector search, and retrieval augmented generation (RAG).
-
-* https://www.elastic.co/search-labs/tutorials/search-tutorial/welcome[Tutorial]: this walks you through building a complete search solution with {es}, from the ground up.
-* https://github.com/elastic/elasticsearch-labs[`elasticsearch-labs` repository]: it contains a range of Python https://github.com/elastic/elasticsearch-labs/tree/main/notebooks[notebooks] and https://github.com/elastic/elasticsearch-labs/tree/main/example-apps[example apps].
-====
-
-[discrete]
-[[run-elasticsearch]]
-=== Run {es}
-
-The simplest way to set up {es} is to create a managed deployment with {ess} on
-{ecloud}. If you prefer to manage your own test environment, install and
-run {es} using Docker.
-
-include::{es-ref-dir}/tab-widgets/code.asciidoc[]
-include::{es-ref-dir}/tab-widgets/quick-start-install-widget.asciidoc[]
-
-[discrete]
-[[send-requests-to-elasticsearch]]
-=== Send requests to {es}
-
-You send data and other requests to {es} using REST APIs. This lets you interact
-with {es} using any client that sends HTTP requests, such as
-https://curl.se[curl]. You can also use {kib}'s Console to send requests to
-{es}.
-
-include::{es-ref-dir}/tab-widgets/api-call-widget.asciidoc[]
+* Add a small, non-timestamped dataset to {es} using Elasticsearch REST APIs.
+* Run basic searches.
[discrete]
[[add-data]]
=== Add data
-You add data to {es} as JSON objects called documents. {es} stores these
+You add data to {es} as JSON objects called documents.
+{es} stores these
documents in searchable indices.
[discrete]
@@ -58,6 +31,13 @@ The request automatically creates the index.
PUT books
----
// TESTSETUP
+
+[source,console]
+--------------------------------------------------
+DELETE books
+--------------------------------------------------
+// TEARDOWN
+
////
[source,console]
@@ -236,10 +216,11 @@ JSON object submitted during indexing.
[[qs-match-query]]
==== `match` query
-You can use the `match` query to search for documents that contain a specific value in a specific field.
+You can use the <> to search for documents that contain a specific value in a specific field.
This is the standard query for performing full-text search, including fuzzy matching and phrase searches.
Run the following command to search the `books` index for documents containing `brave` in the `name` field:
+
[source,console]
----
GET books/_search
@@ -251,34 +232,4 @@ GET books/_search
}
}
----
-// TEST[continued]
-
-[discrete]
-[[whats-next]]
-=== Next steps
-
-Now that {es} is up and running and you've learned the basics, you'll probably want to test out larger datasets, or index your own data.
-
-[discrete]
-[[whats-next-search-learn-more]]
-==== Learn more about search queries
-
-* <>. Jump here to learn about exact value search, full-text search, vector search, and more, using the <>.
-
-[discrete]
-[[whats-next-more-data]]
-==== Add more data
-
-* Learn how to {kibana-ref}/sample-data.html[install sample data] using {kib}. This is a quick way to test out {es} on larger workloads.
-* Learn how to use the {kibana-ref}/connect-to-elasticsearch.html#upload-data-kibana[upload data UI] in {kib} to add your own CSV, TSV, or JSON files.
-* Use the https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html[bulk API] to ingest your own datasets to {es}.
-
-[discrete]
-[[whats-next-client-libraries]]
-==== {es} programming language clients
-
-* Check out our https://www.elastic.co/guide/en/elasticsearch/client/index.html[client library] to work with your {es} instance in your preferred programming language.
-* If you're using Python, check out https://www.elastic.co/search-labs[Elastic Search Labs] for a range of examples that use the {es} Python client. This is the best place to explore AI-powered search use cases, such as working with embeddings, vector search, and retrieval augmented generation (RAG).
-** This extensive, hands-on https://www.elastic.co/search-labs/tutorials/search-tutorial/welcome[tutorial]
-walks you through building a complete search solution with {es}, from the ground up.
-** https://github.com/elastic/elasticsearch-labs[`elasticsearch-labs`] contains a range of executable Python https://github.com/elastic/elasticsearch-labs/tree/main/notebooks[notebooks] and https://github.com/elastic/elasticsearch-labs/tree/main/example-apps[example apps].
\ No newline at end of file
+// TEST[continued]
\ No newline at end of file
diff --git a/docs/reference/quickstart/index.asciidoc b/docs/reference/quickstart/index.asciidoc
index e517d039e620b..6bfed4c198c75 100644
--- a/docs/reference/quickstart/index.asciidoc
+++ b/docs/reference/quickstart/index.asciidoc
@@ -1,10 +1,29 @@
[[quickstart]]
-= Quickstart
+= Quick starts
-Get started quickly with {es}.
+Use these quick starts to get hands-on experience with the {es} APIs.
+Unless otherwise noted, these examples will use queries written in <> syntax.
-* Learn how to run {es} (and {kib}) for <>.
-* Follow our <> to add data to {es} and query it.
+[discrete]
+[[quickstart-requirements]]
+== Requirements
-include::run-elasticsearch-locally.asciidoc[]
-include::getting-started.asciidoc[]
+You'll need a running {es} cluster, together with {kib} to use the Dev Tools API Console.
+Get started <> , or see our <>.
+
+[discrete]
+[[quickstart-list]]
+== Hands-on quick starts
+
+* <>. Learn how to add data to {es} and perform basic searches.
+
+[discrete]
+[[quickstart-python-links]]
+== Working in Python
+
+If you're interested in using {es} with Python, check out Elastic Search Labs:
+
+* https://github.com/elastic/elasticsearch-labs[`elasticsearch-labs` repository]: Contains a range of Python https://github.com/elastic/elasticsearch-labs/tree/main/notebooks[notebooks] and https://github.com/elastic/elasticsearch-labs/tree/main/example-apps[example apps].
+* https://www.elastic.co/search-labs/tutorials/search-tutorial/welcome[Tutorial]: This walks you through building a complete search solution with {es} from the ground up using Flask.
+
+include::getting-started.asciidoc[]
\ No newline at end of file
diff --git a/docs/reference/quickstart/run-elasticsearch-locally.asciidoc b/docs/reference/run-elasticsearch-locally.asciidoc
similarity index 68%
rename from docs/reference/quickstart/run-elasticsearch-locally.asciidoc
rename to docs/reference/run-elasticsearch-locally.asciidoc
index 24e0f3f22350e..64bcd3d066529 100644
--- a/docs/reference/quickstart/run-elasticsearch-locally.asciidoc
+++ b/docs/reference/run-elasticsearch-locally.asciidoc
@@ -1,7 +1,7 @@
[[run-elasticsearch-locally]]
-== Run {es} locally in Docker (without security)
+== Run {es} locally in Docker
++++
-Local dev setup (Docker)
+Run {es} locally
++++
[WARNING]
@@ -9,24 +9,13 @@
*DO NOT USE THESE INSTRUCTIONS FOR PRODUCTION DEPLOYMENTS*
The instructions on this page are for *local development only*. Do not use these instructions for production deployments, because they are not secure.
-While this approach is convenient for experimenting and learning, you should never run the service in this way in a production environment.
+While this approach is convenient for experimenting and learning, you should never run Elasticsearch in this way in a production environment.
====
-The following commands help you very quickly spin up a single-node {es} cluster, together with {kib} in Docker.
-Note that if you don't need the {kib} UI, you can skip those instructions.
+Follow this tutorial if you want to quickly set up {es} in Docker for local development or testing.
-[discrete]
-[[local-dev-why]]
-=== When would I use this setup?
-
-Use this setup if you want to quickly spin up {es} (and {kib}) for local development or testing.
-
-For example you might:
-
-* Want to run a quick test to see how a feature works.
-* Follow a tutorial or guide that requires an {es} cluster, like our <>.
-* Experiment with the {es} APIs using different tools, like the Dev Tools Console, cURL, or an Elastic programming language client.
-* Quickly spin up an {es} cluster to test an executable https://github.com/elastic/elasticsearch-labs/tree/main/notebooks#readme[Python notebook] locally.
+This tutorial also includes instructions for installing {kib}.
+ If you don't need access to the {kib} UI, then you can skip those instructions.
[discrete]
[[local-dev-prerequisites]]
@@ -118,12 +107,12 @@ When you access {kib}, use `elastic` as the username and the password you set ea
[NOTE]
====
-The service is started with a trial license. The trial license enables all features of Elasticsearch for a trial period of 30 days. After the trial period expires, the license is downgraded to a basic license, which is free forever. If you prefer to skip the trial and use the basic license, set the value of the `xpack.license.self_generated.type` variable to basic instead. For a detailed feature comparison between the different licenses, refer to our https://www.elastic.co/subscriptions[subscriptions page].
+The service is started with a trial license. The trial license enables all features of Elasticsearch for a trial period of 30 days. After the trial period expires, the license is downgraded to a basic license, which is free forever.
====
[discrete]
[[local-dev-connecting-clients]]
-== Connecting to {es} with language clients
+=== Connect to {es} with language clients
To connect to the {es} cluster from a language client, you can use basic authentication with the `elastic` username and the password you set in the environment variable.
@@ -172,12 +161,11 @@ curl -u elastic:$ELASTIC_PASSWORD \
[[local-dev-next-steps]]
=== Next steps
-Use our <> to learn the basics of {es}: how to add data and query it.
+Use our <> to learn the basics of {es}.
[discrete]
[[local-dev-production]]
=== Moving to production
-This setup is not suitable for production use. For production deployments, we recommend using our managed service on Elastic Cloud. https://cloud.elastic.co/registration[Sign up for a free trial] (no credit card required).
-
-Otherwise, refer to https://www.elastic.co/guide/en/elasticsearch/reference/current/install-elasticsearch.html[Install {es}] to learn about the various options for installing {es} in a self-managed production environment, including using Docker.
+This setup is not suitable for production use.
+Refer to <> for more information.
\ No newline at end of file
diff --git a/docs/reference/setup.asciidoc b/docs/reference/setup.asciidoc
index b346fddc5e5a1..a284e563917c3 100644
--- a/docs/reference/setup.asciidoc
+++ b/docs/reference/setup.asciidoc
@@ -27,6 +27,8 @@ the only resource-intensive application on the host or container. For example,
you might run {metricbeat} alongside {es} for cluster statistics, but a
resource-heavy {ls} deployment should be on its own host.
+include::run-elasticsearch-locally.asciidoc[]
+
include::setup/install.asciidoc[]
include::setup/configuration.asciidoc[]
diff --git a/docs/reference/tab-widgets/api-call.asciidoc b/docs/reference/tab-widgets/api-call.asciidoc
index bb6b89374075d..5e70d73684436 100644
--- a/docs/reference/tab-widgets/api-call.asciidoc
+++ b/docs/reference/tab-widgets/api-call.asciidoc
@@ -1,5 +1,5 @@
// tag::cloud[]
-**Use {kib}**
+**Option 1: Use {kib}**
//tag::kibana-api-ex[]
. Open {kib}'s main menu ("*☰*" near Elastic logo) and go to **Dev Tools > Console**.
@@ -16,9 +16,9 @@ GET /
//end::kibana-api-ex[]
-**Use curl**
+**Option 2: Use `curl`**
-To communicate with {es} using curl or another client, you need your cluster's
+To communicate with {es} using `curl` or another client, you need your cluster's
endpoint.
. Open {kib}'s main menu and click **Manage this deployment**.
@@ -26,7 +26,7 @@ endpoint.
. From your deployment menu, go to the **Elasticsearch** page. Click **Copy
endpoint**.
-. To submit an example API request, run the following curl command in a new
+. To submit an example API request, run the following `curl` command in a new
terminal session. Replace `` with the password for the `elastic` user.
Replace `` with your endpoint.
+
From 138e100d3bf3d3ef8a71245456492495c8e58cef Mon Sep 17 00:00:00 2001
From: Rene Groeschke
Date: Wed, 25 Sep 2024 19:34:11 +0200
Subject: [PATCH 29/30] Workaround packaging tests failures on debian10
(#113550)
This is a workaround until https://github.com/elastic/elasticsearch/issues/113549 is addressed
---
.ci/scripts/packaging-test.sh | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.ci/scripts/packaging-test.sh b/.ci/scripts/packaging-test.sh
index 1626255c30b4f..6b9938dabffa8 100755
--- a/.ci/scripts/packaging-test.sh
+++ b/.ci/scripts/packaging-test.sh
@@ -39,7 +39,7 @@ if [ -f "/etc/os-release" ] ; then
# Work around incorrect lintian version
# https://github.com/elastic/elasticsearch/issues/48573
if [ $VERSION_ID == 10 ] ; then
- sudo apt-get update -y
+ sudo apt-get update -y || true
sudo apt-get install -y --allow-downgrades lintian=2.15.0
fi
fi
From 43ec760e9a54596c8dfd79d23feb9d28000739e7 Mon Sep 17 00:00:00 2001
From: Max Hniebergall <137079448+maxhniebergall@users.noreply.github.com>
Date: Wed, 25 Sep 2024 13:40:10 -0400
Subject: [PATCH 30/30] [ML] Zone aware planner renaming & related refactoring
(#111522)
* Renaming - code mentioned modelId but was actually deploymentId
* Documenting
* add a test case and more renaming
* Renaming & remove TODOs
* Update MlAutoscalingStats javadoc to match autoscaler comments
* precommit
---
.../ml/autoscaling/MlAutoscalingStats.java | 39 +++++---
.../TrainedModelAssignmentRebalancer.java | 16 +--
.../planning/AbstractPreserveAllocations.java | 19 ++--
.../assignment/planning/AssignmentPlan.java | 50 ++++++----
.../planning/AssignmentPlanner.java | 14 +--
.../planning/LinearProgrammingPlanSolver.java | 32 +++---
.../planning/ZoneAwareAssignmentPlanner.java | 97 +++++++++++--------
.../planning/AssignmentPlanTests.java | 26 ++---
.../planning/AssignmentPlannerTests.java | 84 ++++++++--------
.../planning/PreserveAllAllocationsTests.java | 8 +-
.../planning/PreserveOneAllocationTests.java | 8 +-
.../ZoneAwareAssignmentPlannerTests.java | 51 +++++++---
12 files changed, 255 insertions(+), 189 deletions(-)
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/autoscaling/MlAutoscalingStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/autoscaling/MlAutoscalingStats.java
index ffadf4cafaf12..febe6e97a12aa 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/autoscaling/MlAutoscalingStats.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/autoscaling/MlAutoscalingStats.java
@@ -29,21 +29,30 @@
*
* The word "total" in an attribute name indicates that the attribute is a sum across all nodes.
*
- * @param currentTotalNodes the count of nodes that are currently in the cluster
- * @param currentPerNodeMemoryBytes the minimum size (memory) of all nodes in the cluster
- * @param currentTotalModelMemoryBytes the sum of model memory over every assignment/deployment
- * @param currentTotalProcessorsInUse the sum of processors used over every assignment/deployment
- * @param currentPerNodeMemoryOverheadBytes always equal to MachineLearning.NATIVE_EXECUTABLE_CODE_OVERHEAD
- * @param wantedMinNodes the minimum number of nodes that must be provided by the autoscaler
- * @param wantedExtraPerNodeMemoryBytes the amount of additional memory that must be provided on every node
- * (this value must be >0 to trigger a scale up based on memory)
- * @param wantedExtraPerNodeNodeProcessors the number of additional processors that must be provided on every node
- * (this value must be >0 to trigger a scale up based on processors)
- * @param wantedExtraModelMemoryBytes the amount of additional model memory that is newly required
- * (due to a new assignment/deployment)
- * @param wantedExtraProcessors the number of additional processors that are required to be added to the cluster
- * @param unwantedNodeMemoryBytesToRemove the amount of memory that should be removed from the cluster. If this is equal to the amount of
- * memory provided by a node, a node will be removed.
+ * @param currentTotalNodes The count of nodes that are currently in the cluster,
+ * used to confirm that both sides have same view of current state
+ * @param currentPerNodeMemoryBytes The minimum size (memory) of all nodes in the cluster
+ * used to confirm that both sides have same view of current state.
+ * @param currentTotalModelMemoryBytes The sum of model memory over every assignment/deployment, used to calculate requirements
+ * @param currentTotalProcessorsInUse The sum of processors used over every assignment/deployment, not used by autoscaler
+ * @param currentPerNodeMemoryOverheadBytes Always equal to MachineLearning.NATIVE_EXECUTABLE_CODE_OVERHEAD,
+ * @param wantedMinNodes The minimum number of nodes that must be provided by the autoscaler
+ * @param wantedExtraPerNodeMemoryBytes If there are jobs or trained models that have been started but cannot be allocated on the
+ * ML nodes currently within the cluster then this will be the *max* of the ML native memory
+ * requirements of those jobs/trained models. The metric is in terms of ML native memory,
+ * not container memory.
+ * @param wantedExtraPerNodeNodeProcessors If there are trained model allocations that have been started but cannot be allocated on the
+ * ML nodes currently within the cluster then this will be the *max* of the vCPU requirements of
+ * those allocations. Zero otherwise.
+ * @param wantedExtraModelMemoryBytes If there are jobs or trained models that have been started but cannot be allocated on the ML
+ * nodes currently within the cluster then this will be the *sum* of the ML native memory
+ * requirements of those jobs/trained models. The metric is in terms of ML native memory,
+ * not container memory.
+ * @param wantedExtraProcessors If there are trained model allocations that have been started but cannot be allocated on the
+ * ML nodes currently within the cluster then this will be the *sum* of the vCPU requirements
+ * of those allocations. Zero otherwise.
+ * @param unwantedNodeMemoryBytesToRemove The size of the ML node to be removed, in GB rounded to the nearest GB,
+ * or zero if no nodes could be removed.
*/
public record MlAutoscalingStats(
diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancer.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancer.java
index 624ef5434e2a0..8804d588988b2 100644
--- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancer.java
+++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancer.java
@@ -123,8 +123,8 @@ private static AssignmentPlan mergePlans(
nodesByZone.values().forEach(allNodes::addAll);
final List allDeployments = new ArrayList<>();
- allDeployments.addAll(planForNormalPriorityModels.models());
- allDeployments.addAll(planForLowPriorityModels.models());
+ allDeployments.addAll(planForNormalPriorityModels.deployments());
+ allDeployments.addAll(planForLowPriorityModels.deployments());
final Map originalNodeById = allNodes.stream()
.collect(Collectors.toMap(AssignmentPlan.Node::id, Function.identity()));
@@ -139,7 +139,7 @@ private static void copyAssignments(
AssignmentPlan.Builder dest,
Map originalNodeById
) {
- for (AssignmentPlan.Deployment m : source.models()) {
+ for (AssignmentPlan.Deployment m : source.deployments()) {
Map nodeAssignments = source.assignments(m).orElse(Map.of());
for (Map.Entry assignment : nodeAssignments.entrySet()) {
AssignmentPlan.Node originalNode = originalNodeById.get(assignment.getKey().id());
@@ -328,14 +328,14 @@ private static long getNodeFreeMemoryExcludingPerNodeOverheadAndNativeInference(
private TrainedModelAssignmentMetadata.Builder buildAssignmentsFromPlan(AssignmentPlan assignmentPlan) {
TrainedModelAssignmentMetadata.Builder builder = TrainedModelAssignmentMetadata.Builder.empty();
- for (AssignmentPlan.Deployment deployment : assignmentPlan.models()) {
- TrainedModelAssignment existingAssignment = currentMetadata.getDeploymentAssignment(deployment.id());
+ for (AssignmentPlan.Deployment deployment : assignmentPlan.deployments()) {
+ TrainedModelAssignment existingAssignment = currentMetadata.getDeploymentAssignment(deployment.deploymentId());
TrainedModelAssignment.Builder assignmentBuilder = existingAssignment == null && createAssignmentRequest.isPresent()
? TrainedModelAssignment.Builder.empty(createAssignmentRequest.get())
: TrainedModelAssignment.Builder.empty(
- currentMetadata.getDeploymentAssignment(deployment.id()).getTaskParams(),
- currentMetadata.getDeploymentAssignment(deployment.id()).getAdaptiveAllocationsSettings()
+ currentMetadata.getDeploymentAssignment(deployment.deploymentId()).getTaskParams(),
+ currentMetadata.getDeploymentAssignment(deployment.deploymentId()).getAdaptiveAllocationsSettings()
);
if (existingAssignment != null) {
assignmentBuilder.setStartTime(existingAssignment.getStartTime());
@@ -366,7 +366,7 @@ private TrainedModelAssignmentMetadata.Builder buildAssignmentsFromPlan(Assignme
assignmentBuilder.calculateAndSetAssignmentState();
explainAssignments(assignmentPlan, nodeLoads, deployment).ifPresent(assignmentBuilder::setReason);
- builder.addNewAssignment(deployment.id(), assignmentBuilder);
+ builder.addNewAssignment(deployment.deploymentId(), assignmentBuilder);
}
return builder;
}
diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AbstractPreserveAllocations.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AbstractPreserveAllocations.java
index 0151c8f5ee9c8..66b8d9e570211 100644
--- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AbstractPreserveAllocations.java
+++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AbstractPreserveAllocations.java
@@ -54,7 +54,7 @@ Deployment modifyModelPreservingPreviousAssignments(Deployment m) {
}
return new Deployment(
- m.id(),
+ m.deploymentId(),
m.memoryBytes(),
m.allocations() - calculatePreservedAllocations(m),
m.threadsPerAllocation(),
@@ -71,11 +71,14 @@ AssignmentPlan mergePreservedAllocations(AssignmentPlan assignmentPlan) {
// they will not match the models/nodes members we have in this class.
// Therefore, we build a lookup table based on the ids, so we can merge the plan
// with its preserved allocations.
- final Map, Integer> plannedAssignmentsByModelNodeIdPair = new HashMap<>();
- for (Deployment m : assignmentPlan.models()) {
- Map assignments = assignmentPlan.assignments(m).orElse(Map.of());
- for (Map.Entry nodeAssignment : assignments.entrySet()) {
- plannedAssignmentsByModelNodeIdPair.put(Tuple.tuple(m.id(), nodeAssignment.getKey().id()), nodeAssignment.getValue());
+ final Map, Integer> plannedAssignmentsByDeploymentNodeIdPair = new HashMap<>();
+ for (Deployment d : assignmentPlan.deployments()) {
+ Map assignmentsOfDeployment = assignmentPlan.assignments(d).orElse(Map.of());
+ for (Map.Entry nodeAssignment : assignmentsOfDeployment.entrySet()) {
+ plannedAssignmentsByDeploymentNodeIdPair.put(
+ Tuple.tuple(d.deploymentId(), nodeAssignment.getKey().id()),
+ nodeAssignment.getValue()
+ );
}
}
@@ -93,8 +96,8 @@ AssignmentPlan mergePreservedAllocations(AssignmentPlan assignmentPlan) {
}
}
for (Deployment deploymentNewAllocations : deployments) {
- int newAllocations = plannedAssignmentsByModelNodeIdPair.getOrDefault(
- Tuple.tuple(deploymentNewAllocations.id(), n.id()),
+ int newAllocations = plannedAssignmentsByDeploymentNodeIdPair.getOrDefault(
+ Tuple.tuple(deploymentNewAllocations.deploymentId(), n.id()),
0
);
diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlan.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlan.java
index 7fc16394ed85c..c294e7b2de792 100644
--- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlan.java
+++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlan.java
@@ -31,8 +31,22 @@
*/
public class AssignmentPlan implements Comparable {
+ /**
+ *
+ * @param deploymentId
+ * @param memoryBytes
+ * @param allocations
+ * @param threadsPerAllocation
+ * @param currentAllocationsByNodeId
+ * @param maxAssignedAllocations this value is used by the ZoneAwareAssignmentPlan and AssignmentPlanner to keep track of the
+ * maximum number of allocations which have been assigned. It is mainly for assigning over AZs.
+ * @param adaptiveAllocationsSettings
+ * @param priority
+ * @param perDeploymentMemoryBytes
+ * @param perAllocationMemoryBytes
+ */
public record Deployment(
- String id,
+ String deploymentId,
long memoryBytes,
int allocations,
int threadsPerAllocation,
@@ -44,7 +58,7 @@ public record Deployment(
long perAllocationMemoryBytes
) {
public Deployment(
- String id,
+ String deploymentId,
long modelBytes,
int allocations,
int threadsPerAllocation,
@@ -55,7 +69,7 @@ public Deployment(
long perAllocationMemoryBytes
) {
this(
- id,
+ deploymentId,
modelBytes,
allocations,
threadsPerAllocation,
@@ -82,7 +96,7 @@ boolean hasEverBeenAllocated() {
public long estimateMemoryUsageBytes(int allocations) {
return StartTrainedModelDeploymentAction.estimateMemoryUsageBytes(
- id,
+ deploymentId,
memoryBytes,
perDeploymentMemoryBytes,
perAllocationMemoryBytes,
@@ -92,13 +106,13 @@ public long estimateMemoryUsageBytes(int allocations) {
long estimateAdditionalMemoryUsageBytes(int allocationsOld, int allocationsNew) {
return StartTrainedModelDeploymentAction.estimateMemoryUsageBytes(
- id,
+ deploymentId,
memoryBytes,
perDeploymentMemoryBytes,
perAllocationMemoryBytes,
allocationsNew
) - StartTrainedModelDeploymentAction.estimateMemoryUsageBytes(
- id,
+ deploymentId,
memoryBytes,
perDeploymentMemoryBytes,
perAllocationMemoryBytes,
@@ -109,7 +123,7 @@ long estimateAdditionalMemoryUsageBytes(int allocationsOld, int allocationsNew)
long minimumMemoryRequiredBytes() {
return StartTrainedModelDeploymentAction.estimateMemoryUsageBytes(
- id,
+ deploymentId,
memoryBytes,
perDeploymentMemoryBytes,
perAllocationMemoryBytes,
@@ -136,7 +150,7 @@ int findExcessAllocations(int maxAllocations, long availableMemoryBytes) {
@Override
public String toString() {
- return id
+ return deploymentId
+ " (mem = "
+ ByteSizeValue.ofBytes(memoryBytes)
+ ") (allocations = "
@@ -186,7 +200,7 @@ private AssignmentPlan(
this.remainingModelAllocations = Objects.requireNonNull(remainingModelAllocations);
}
- public Set models() {
+ public Set deployments() {
return assignments.keySet();
}
@@ -208,7 +222,7 @@ public int compareTo(AssignmentPlan o) {
}
public boolean satisfiesCurrentAssignments() {
- return models().stream().allMatch(this::isSatisfyingCurrentAssignmentsForModel);
+ return deployments().stream().allMatch(this::isSatisfyingCurrentAssignmentsForModel);
}
private boolean isSatisfyingCurrentAssignmentsForModel(Deployment m) {
@@ -225,18 +239,18 @@ public boolean satisfiesAllocations(Deployment m) {
}
public boolean satisfiesAllModels() {
- return models().stream().allMatch(this::satisfiesAllocations);
+ return deployments().stream().allMatch(this::satisfiesAllocations);
}
public boolean arePreviouslyAssignedModelsAssigned() {
- return models().stream()
+ return deployments().stream()
.filter(Deployment::hasEverBeenAllocated)
.map(this::totalAllocations)
.allMatch(totalAllocations -> totalAllocations > 0);
}
public long countPreviouslyAssignedModelsThatAreStillAssigned() {
- return models().stream()
+ return deployments().stream()
.filter(Deployment::hasEverBeenAllocated)
.map(this::totalAllocations)
.filter(totalAllocations -> totalAllocations > 0)
@@ -301,11 +315,11 @@ public String prettyPrint() {
msg.append(" ->");
for (Tuple modelAllocations : nodeToModel.get(n)
.stream()
- .sorted(Comparator.comparing(x -> x.v1().id()))
+ .sorted(Comparator.comparing(x -> x.v1().deploymentId()))
.toList()) {
if (modelAllocations.v2() > 0) {
msg.append(" ");
- msg.append(modelAllocations.v1().id());
+ msg.append(modelAllocations.v1().deploymentId());
msg.append(" (mem = ");
msg.append(ByteSizeValue.ofBytes(modelAllocations.v1().memoryBytes()));
msg.append(")");
@@ -415,7 +429,7 @@ public Builder assignModelToNode(Deployment deployment, Node node, int allocatio
+ "] to assign ["
+ allocations
+ "] allocations to deployment ["
- + deployment.id()
+ + deployment.deploymentId()
+ "]"
);
}
@@ -426,7 +440,7 @@ public Builder assignModelToNode(Deployment deployment, Node node, int allocatio
+ "] to assign ["
+ allocations
+ "] allocations to deployment ["
- + deployment.id()
+ + deployment.deploymentId()
+ "]; required threads per allocation ["
+ deployment.threadsPerAllocation()
+ "]"
@@ -464,7 +478,7 @@ public void accountMemory(Deployment m, Node n) {
private void accountMemory(Deployment m, Node n, long requiredMemory) {
remainingNodeMemory.computeIfPresent(n, (k, v) -> v - requiredMemory);
if (remainingNodeMemory.containsKey(n) && remainingNodeMemory.get(n) < 0) {
- throw new IllegalArgumentException("not enough memory on node [" + n.id() + "] to assign model [" + m.id() + "]");
+ throw new IllegalArgumentException("not enough memory on node [" + n.id() + "] to assign model [" + m.deploymentId() + "]");
}
}
diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlanner.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlanner.java
index 38279a2fd6c03..8b5f33e25e242 100644
--- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlanner.java
+++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlanner.java
@@ -50,7 +50,7 @@ public class AssignmentPlanner {
public AssignmentPlanner(List nodes, List deployments) {
this.nodes = nodes.stream().sorted(Comparator.comparing(Node::id)).toList();
- this.deployments = deployments.stream().sorted(Comparator.comparing(AssignmentPlan.Deployment::id)).toList();
+ this.deployments = deployments.stream().sorted(Comparator.comparing(AssignmentPlan.Deployment::deploymentId)).toList();
}
public AssignmentPlan computePlan() {
@@ -111,7 +111,7 @@ private AssignmentPlan solveAllocatingAtLeastOnceModelsThatWerePreviouslyAllocat
.filter(m -> m.hasEverBeenAllocated())
.map(
m -> new AssignmentPlan.Deployment(
- m.id(),
+ m.deploymentId(),
m.memoryBytes(),
1,
m.threadsPerAllocation(),
@@ -130,21 +130,21 @@ private AssignmentPlan solveAllocatingAtLeastOnceModelsThatWerePreviouslyAllocat
).solvePlan(true);
Map modelIdToNodeIdWithSingleAllocation = new HashMap<>();
- for (AssignmentPlan.Deployment m : planWithSingleAllocationForPreviouslyAssignedModels.models()) {
+ for (AssignmentPlan.Deployment m : planWithSingleAllocationForPreviouslyAssignedModels.deployments()) {
Optional> assignments = planWithSingleAllocationForPreviouslyAssignedModels.assignments(m);
Set nodes = assignments.orElse(Map.of()).keySet();
if (nodes.isEmpty() == false) {
assert nodes.size() == 1;
- modelIdToNodeIdWithSingleAllocation.put(m.id(), nodes.iterator().next().id());
+ modelIdToNodeIdWithSingleAllocation.put(m.deploymentId(), nodes.iterator().next().id());
}
}
List planDeployments = deployments.stream().map(m -> {
- Map currentAllocationsByNodeId = modelIdToNodeIdWithSingleAllocation.containsKey(m.id())
- ? Map.of(modelIdToNodeIdWithSingleAllocation.get(m.id()), 1)
+ Map currentAllocationsByNodeId = modelIdToNodeIdWithSingleAllocation.containsKey(m.deploymentId())
+ ? Map.of(modelIdToNodeIdWithSingleAllocation.get(m.deploymentId()), 1)
: Map.of();
return new AssignmentPlan.Deployment(
- m.id(),
+ m.deploymentId(),
m.memoryBytes(),
m.allocations(),
m.threadsPerAllocation(),
diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/LinearProgrammingPlanSolver.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/LinearProgrammingPlanSolver.java
index bd97680e285cc..90b3d3590a254 100644
--- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/LinearProgrammingPlanSolver.java
+++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/LinearProgrammingPlanSolver.java
@@ -279,24 +279,24 @@ private boolean solveLinearProgram(
Map, Variable> allocationVars = new HashMap<>();
- for (AssignmentPlan.Deployment m : deployments) {
+ for (AssignmentPlan.Deployment d : deployments) {
for (Node n : nodes) {
- Variable allocationVar = model.addVariable("allocations_of_model_" + m.id() + "_on_node_" + n.id())
+ Variable allocationVar = model.addVariable("allocations_of_model_" + d.deploymentId() + "_on_node_" + n.id())
.integer(false) // We relax the program to non-integer as the integer solver is much slower and can often lead to
// infeasible solutions
.lower(0.0) // It is important not to set an upper bound here as it impacts memory negatively
- .weight(weightForAllocationVar(m, n, weights));
- allocationVars.put(Tuple.tuple(m, n), allocationVar);
+ .weight(weightForAllocationVar(d, n, weights));
+ allocationVars.put(Tuple.tuple(d, n), allocationVar);
}
}
- for (Deployment m : deployments) {
+ for (Deployment d : deployments) {
// Each model should not get more allocations than is required.
// Also, if the model has previous assignments, it should get at least as many allocations as it did before.
- model.addExpression("allocations_of_model_" + m.id() + "_not_more_than_required")
- .lower(m.getCurrentAssignedAllocations())
- .upper(m.allocations())
- .setLinearFactorsSimple(varsForModel(m, allocationVars));
+ model.addExpression("allocations_of_model_" + d.deploymentId() + "_not_more_than_required")
+ .lower(d.getCurrentAssignedAllocations())
+ .upper(d.allocations())
+ .setLinearFactorsSimple(varsForModel(d, allocationVars));
}
double[] threadsPerAllocationPerModel = deployments.stream().mapToDouble(m -> m.threadsPerAllocation()).toArray();
@@ -374,18 +374,18 @@ private String prettyPrintSolverResult(
for (int i = 0; i < nodes.size(); i++) {
Node n = nodes.get(i);
msg.append(n + " ->");
- for (Deployment m : deployments) {
- if (threadValues.get(Tuple.tuple(m, n)) > 0) {
+ for (Deployment d : deployments) {
+ if (threadValues.get(Tuple.tuple(d, n)) > 0) {
msg.append(" ");
- msg.append(m.id());
+ msg.append(d.deploymentId());
msg.append(" (mem = ");
- msg.append(ByteSizeValue.ofBytes(m.memoryBytes()));
+ msg.append(ByteSizeValue.ofBytes(d.memoryBytes()));
msg.append(") (allocations = ");
- msg.append(threadValues.get(Tuple.tuple(m, n)));
+ msg.append(threadValues.get(Tuple.tuple(d, n)));
msg.append("/");
- msg.append(m.allocations());
+ msg.append(d.allocations());
msg.append(") (y = ");
- msg.append(assignmentValues.get(Tuple.tuple(m, n)));
+ msg.append(assignmentValues.get(Tuple.tuple(d, n)));
msg.append(")");
}
}
diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/ZoneAwareAssignmentPlanner.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/ZoneAwareAssignmentPlanner.java
index 1f0857391598f..c5b750f91014f 100644
--- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/ZoneAwareAssignmentPlanner.java
+++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/ZoneAwareAssignmentPlanner.java
@@ -80,22 +80,22 @@ private AssignmentPlan computePlan(boolean tryAssigningPreviouslyAssignedModels)
// allocated on the first per zone assignment plans.
int remainingZones = nodesByZone.size();
- Map modelIdToRemainingAllocations = deployments.stream()
- .collect(Collectors.toMap(AssignmentPlan.Deployment::id, AssignmentPlan.Deployment::allocations));
+ Map deploymentIdToRemainingAllocations = deployments.stream()
+ .collect(Collectors.toMap(AssignmentPlan.Deployment::deploymentId, AssignmentPlan.Deployment::allocations));
List plans = new ArrayList<>();
for (var zoneToNodes : nodesByZone.entrySet()) {
logger.debug(() -> format("computing plan for availability zone %s", zoneToNodes.getKey()));
AssignmentPlan plan = computeZonePlan(
zoneToNodes.getValue(),
- modelIdToRemainingAllocations,
+ deploymentIdToRemainingAllocations,
remainingZones,
tryAssigningPreviouslyAssignedModels
);
- plan.models()
+ plan.deployments()
.forEach(
- m -> modelIdToRemainingAllocations.computeIfPresent(
- m.id(),
- (modelId, remainingAllocations) -> remainingAllocations - plan.totalAllocations(m)
+ d -> deploymentIdToRemainingAllocations.computeIfPresent(
+ d.deploymentId(),
+ (deploymentId, remainingAllocations) -> remainingAllocations - plan.totalAllocations(d)
)
);
plans.add(plan);
@@ -108,56 +108,69 @@ private AssignmentPlan computePlan(boolean tryAssigningPreviouslyAssignedModels)
private AssignmentPlan computeZonePlan(
List nodes,
- Map modelIdToRemainingAllocations,
+ Map deploymentIdToRemainingAllocations,
int remainingZones,
boolean tryAssigningPreviouslyAssignedModels
) {
- Map modelIdToTargetAllocations = modelIdToRemainingAllocations.entrySet()
+ Map deploymentIdToTargetAllocationsPerZone = deploymentIdToRemainingAllocations.entrySet()
.stream()
.filter(e -> e.getValue() > 0)
- .collect(Collectors.toMap(e -> e.getKey(), e -> (e.getValue() - 1) / remainingZones + 1));
+ .collect(
+ Collectors.toMap(Map.Entry::getKey, e -> 1 + remainingAllocationsPerZoneAfterAssigningOne(remainingZones, e.getValue()))
+ );
+ // If there was at least one allocation for a deployment, we will apply it to each zone
List modifiedDeployments = deployments.stream()
- .filter(m -> modelIdToTargetAllocations.getOrDefault(m.id(), 0) > 0)
+ .filter(d -> deploymentIdToTargetAllocationsPerZone.getOrDefault(d.deploymentId(), 0) > 0)
+ // filter out deployments with no allocations
.map(
- m -> new AssignmentPlan.Deployment(
- m.id(),
- m.memoryBytes(),
- modelIdToTargetAllocations.get(m.id()),
- m.threadsPerAllocation(),
- m.currentAllocationsByNodeId(),
- (tryAssigningPreviouslyAssignedModels && modelIdToRemainingAllocations.get(m.id()) == m.allocations())
- ? m.maxAssignedAllocations()
+ d -> new AssignmentPlan.Deployment(
+ // replace each deployment with a new deployment
+ d.deploymentId(),
+ d.memoryBytes(),
+ deploymentIdToTargetAllocationsPerZone.get(d.deploymentId()),
+ d.threadsPerAllocation(),
+ d.currentAllocationsByNodeId(),
+ // (below) Only force assigning at least once previously assigned models that have not had any allocation yet
+ (tryAssigningPreviouslyAssignedModels && deploymentIdToRemainingAllocations.get(d.deploymentId()) == d.allocations())
+ ? d.maxAssignedAllocations()
: 0,
- m.getAdaptiveAllocationsSettings(),
- // Only force assigning at least once previously assigned models that have not had any allocation yet
- m.perDeploymentMemoryBytes(),
- m.perAllocationMemoryBytes()
+ d.getAdaptiveAllocationsSettings(),
+ d.perDeploymentMemoryBytes(),
+ d.perAllocationMemoryBytes()
)
)
.toList();
return new AssignmentPlanner(nodes, modifiedDeployments).computePlan(tryAssigningPreviouslyAssignedModels);
}
+ private static int remainingAllocationsPerZoneAfterAssigningOne(int remainingZones, Integer remainingAllocations) {
+ if (remainingAllocations == null || remainingZones == 0) {
+ // should never happen
+ return 0;
+ }
+ return (remainingAllocations - 1) / remainingZones;
+ }
+
private AssignmentPlan computePlanAcrossAllNodes(List plans) {
logger.debug(() -> "computing plan across all nodes");
final List allNodes = new ArrayList<>();
nodesByZone.values().forEach(allNodes::addAll);
- Map> allocationsByNodeIdByModelId = mergeAllocationsByNodeIdByModelId(plans);
+ Map> allocationsByNodeIdByDeploymentId = mergeAllocationsByNodeIdByDeploymentId(plans);
List modelsAccountingPlans = deployments.stream()
.map(
- m -> new AssignmentPlan.Deployment(
- m.id(),
- m.memoryBytes(),
- m.allocations(),
- m.threadsPerAllocation(),
- allocationsByNodeIdByModelId.get(m.id()),
- m.maxAssignedAllocations(),
- m.getAdaptiveAllocationsSettings(),
- m.perDeploymentMemoryBytes(),
- m.perAllocationMemoryBytes()
+ d -> new AssignmentPlan.Deployment(
+ d.deploymentId(),
+ d.memoryBytes(),
+ d.allocations(),
+ d.threadsPerAllocation(),
+ allocationsByNodeIdByDeploymentId.get(d.deploymentId()),
+ d.maxAssignedAllocations(),
+ d.getAdaptiveAllocationsSettings(),
+ d.perDeploymentMemoryBytes(),
+ d.perAllocationMemoryBytes()
)
)
.toList();
@@ -176,11 +189,11 @@ private AssignmentPlan swapOriginalModelsInPlan(
List planDeployments
) {
final Map originalModelById = deployments.stream()
- .collect(Collectors.toMap(AssignmentPlan.Deployment::id, Function.identity()));
+ .collect(Collectors.toMap(AssignmentPlan.Deployment::deploymentId, Function.identity()));
final Map originalNodeById = allNodes.stream().collect(Collectors.toMap(Node::id, Function.identity()));
AssignmentPlan.Builder planBuilder = AssignmentPlan.builder(allNodes, deployments);
for (AssignmentPlan.Deployment m : planDeployments) {
- AssignmentPlan.Deployment originalDeployment = originalModelById.get(m.id());
+ AssignmentPlan.Deployment originalDeployment = originalModelById.get(m.deploymentId());
Map nodeAssignments = plan.assignments(m).orElse(Map.of());
for (Map.Entry assignment : nodeAssignments.entrySet()) {
Node originalNode = originalNodeById.get(assignment.getKey().id());
@@ -193,12 +206,12 @@ private AssignmentPlan swapOriginalModelsInPlan(
return planBuilder.build();
}
- private Map> mergeAllocationsByNodeIdByModelId(List plans) {
- Map> allocationsByNodeIdByModelId = new HashMap<>();
- deployments.forEach(m -> allocationsByNodeIdByModelId.put(m.id(), new HashMap<>()));
+ private Map> mergeAllocationsByNodeIdByDeploymentId(List plans) {
+ Map> allocationsByNodeIdByDeploymentId = new HashMap<>();
+ deployments.forEach(d -> allocationsByNodeIdByDeploymentId.put(d.deploymentId(), new HashMap<>()));
for (AssignmentPlan plan : plans) {
- for (AssignmentPlan.Deployment m : plan.models()) {
- Map nodeIdToAllocations = allocationsByNodeIdByModelId.get(m.id());
+ for (AssignmentPlan.Deployment m : plan.deployments()) {
+ Map nodeIdToAllocations = allocationsByNodeIdByDeploymentId.get(m.deploymentId());
Optional> assignments = plan.assignments(m);
if (assignments.isPresent()) {
for (Map.Entry nodeAssignments : assignments.get().entrySet()) {
@@ -212,6 +225,6 @@ private Map> mergeAllocationsByNodeIdByModelId(List
}
}
}
- return allocationsByNodeIdByModelId;
+ return allocationsByNodeIdByDeploymentId;
}
}
diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlanTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlanTests.java
index d84c04f0c41f1..3f93c3431d891 100644
--- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlanTests.java
+++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlanTests.java
@@ -69,7 +69,7 @@ public void testAssignModelToNode_GivenNoPreviousAssignment() {
AssignmentPlan plan = builder.build();
- assertThat(plan.models(), contains(m));
+ assertThat(plan.deployments(), contains(m));
assertThat(plan.satisfiesCurrentAssignments(), is(true));
assertThat(plan.assignments(m).get(), equalTo(Map.of(n, 1)));
}
@@ -102,7 +102,7 @@ public void testAssignModelToNode_GivenNoPreviousAssignment() {
AssignmentPlan plan = builder.build();
- assertThat(plan.models(), contains(m));
+ assertThat(plan.deployments(), contains(m));
assertThat(plan.satisfiesCurrentAssignments(), is(true));
assertThat(plan.assignments(m).get(), equalTo(Map.of(n, 1)));
}
@@ -134,7 +134,7 @@ public void testAssignModelToNode_GivenNewPlanSatisfiesCurrentAssignment() {
AssignmentPlan plan = builder.build();
- assertThat(plan.models(), contains(m));
+ assertThat(plan.deployments(), contains(m));
assertThat(plan.satisfiesCurrentAssignments(), is(true));
assertThat(plan.assignments(m).get(), equalTo(Map.of(n, 1)));
}
@@ -162,7 +162,7 @@ public void testAssignModelToNode_GivenNewPlanSatisfiesCurrentAssignment() {
AssignmentPlan plan = builder.build();
- assertThat(plan.models(), contains(m));
+ assertThat(plan.deployments(), contains(m));
assertThat(plan.satisfiesCurrentAssignments(), is(true));
assertThat(plan.assignments(m).get(), equalTo(Map.of(n, 1)));
@@ -186,7 +186,7 @@ public void testAssignModelToNode_GivenNewPlanDoesNotSatisfyCurrentAssignment()
AssignmentPlan plan = builder.build();
- assertThat(plan.models(), contains(m));
+ assertThat(plan.deployments(), contains(m));
assertThat(plan.satisfiesCurrentAssignments(), is(false));
assertThat(plan.assignments(m).get(), equalTo(Map.of(n, 1)));
}
@@ -215,7 +215,7 @@ public void testAssignModelToNode_GivenNewPlanDoesNotSatisfyCurrentAssignment()
AssignmentPlan plan = builder.build();
- assertThat(plan.models(), contains(m));
+ assertThat(plan.deployments(), contains(m));
assertThat(plan.satisfiesCurrentAssignments(), is(false));
assertThat(plan.assignments(m).get(), equalTo(Map.of(n, 1)));
}
@@ -251,7 +251,7 @@ public void testAssignModelToNode_GivenPreviouslyAssignedModelDoesNotFit() {
builder.assignModelToNode(m, n, 2);
AssignmentPlan plan = builder.build();
- assertThat(plan.models(), contains(m));
+ assertThat(plan.deployments(), contains(m));
assertThat(plan.satisfiesCurrentAssignments(), is(true));
assertThat(plan.assignments(m).get(), equalTo(Map.of(n, 2)));
}
@@ -274,7 +274,7 @@ public void testAssignModelToNode_GivenPreviouslyAssignedModelDoesNotFit() {
builder.assignModelToNode(m, n, 2);
AssignmentPlan plan = builder.build();
- assertThat(plan.models(), contains(m));
+ assertThat(plan.deployments(), contains(m));
assertThat(plan.satisfiesCurrentAssignments(), is(true));
assertThat(plan.assignments(m).get(), equalTo(Map.of(n, 2)));
}
@@ -355,7 +355,7 @@ public void testAssignModelToNode_GivenSameModelAssignedTwice() {
AssignmentPlan plan = builder.build();
- assertThat(plan.models(), contains(m));
+ assertThat(plan.deployments(), contains(m));
assertThat(plan.satisfiesCurrentAssignments(), is(true));
assertThat(plan.assignments(m).get(), equalTo(Map.of(n, 3)));
}
@@ -511,7 +511,7 @@ public void testCompareTo_GivenDifferenceInMemory() {
assertThat(planUsingMoreMemory.compareTo(planUsingLessMemory), lessThan(0));
}
- public void testSatisfiesAllModels_GivenAllModelsAreSatisfied() {
+ public void testSatisfiesAllModels_GivenAllDeploymentsAreSatisfied() {
Node node1 = new Node("n_1", ByteSizeValue.ofMb(1000).getBytes(), 4);
Node node2 = new Node("n_2", ByteSizeValue.ofMb(1000).getBytes(), 4);
{
@@ -602,7 +602,7 @@ public void testSatisfiesAllModels_GivenAllModelsAreSatisfied() {
}
}
- public void testSatisfiesAllModels_GivenOneModelHasOneAllocationLess() {
+ public void testSatisfiesAllDeployments_GivenOneModelHasOneAllocationLess() {
Node node1 = new Node("n_1", ByteSizeValue.ofMb(1000).getBytes(), 4);
Node node2 = new Node("n_2", ByteSizeValue.ofMb(1000).getBytes(), 4);
Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(50).getBytes(), 1, 2, Map.of(), 0, null, 0, 0);
@@ -617,7 +617,7 @@ public void testSatisfiesAllModels_GivenOneModelHasOneAllocationLess() {
assertThat(plan.satisfiesAllModels(), is(false));
}
- public void testArePreviouslyAssignedModelsAssigned_GivenTrue() {
+ public void testArePreviouslyAssignedDeploymentsAssigned_GivenTrue() {
Node node1 = new Node("n_1", ByteSizeValue.ofMb(1000).getBytes(), 4);
Node node2 = new Node("n_2", ByteSizeValue.ofMb(1000).getBytes(), 4);
Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(50).getBytes(), 1, 2, Map.of(), 3, null, 0, 0);
@@ -630,7 +630,7 @@ public void testArePreviouslyAssignedModelsAssigned_GivenTrue() {
assertThat(plan.arePreviouslyAssignedModelsAssigned(), is(true));
}
- public void testArePreviouslyAssignedModelsAssigned_GivenFalse() {
+ public void testArePreviouslyAssignedDeploymentsAssigned_GivenFalse() {
Node node1 = new Node("n_1", ByteSizeValue.ofMb(1000).getBytes(), 4);
Node node2 = new Node("n_2", ByteSizeValue.ofMb(1000).getBytes(), 4);
Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(50).getBytes(), 1, 2, Map.of(), 3, null, 0, 0);
diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlannerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlannerTests.java
index ef76c388b81a1..24095600c42d0 100644
--- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlannerTests.java
+++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlannerTests.java
@@ -261,7 +261,7 @@ public void testModelWithMoreAllocationsThanAvailableCores_GivenSingleThreadPerA
}
}
- public void testMultipleModelsAndNodesWithSingleSolution() {
+ public void testMultipleDeploymentsAndNodesWithSingleSolution() {
Node node1 = new Node("n_1", 2 * scaleNodeSize(50), 7);
Node node2 = new Node("n_2", 2 * scaleNodeSize(50), 7);
Node node3 = new Node("n_3", 2 * scaleNodeSize(50), 2);
@@ -316,7 +316,7 @@ public void testMultipleModelsAndNodesWithSingleSolution() {
}
}
- public void testMultipleModelsAndNodesWithSingleSolution_NewMemoryFields() {
+ public void testMultipleDeploymentsAndNodesWithSingleSolution_NewMemoryFields() {
Node node1 = new Node("n_1", ByteSizeValue.ofMb(800).getBytes(), 7);
Node node2 = new Node("n_2", ByteSizeValue.ofMb(800).getBytes(), 7);
Node node3 = new Node("n_3", ByteSizeValue.ofMb(900).getBytes(), 2);
@@ -508,7 +508,7 @@ public void testModelWithPreviousAssignmentAndNoMoreCoresAvailable() {
assertThat(plan.assignments(deployment).get(), equalTo(Map.of(node, 4)));
}
- public void testFullCoreUtilization_GivenModelsWithSingleThreadPerAllocation() {
+ public void testFullCoreUtilization_GivenDeploymentsWithSingleThreadPerAllocation() {
List nodes = List.of(
new Node("n_1", ByteSizeValue.ofGb(18).getBytes(), 8),
new Node("n_2", ByteSizeValue.ofGb(18).getBytes(), 8),
@@ -544,7 +544,7 @@ public void testFullCoreUtilization_GivenModelsWithSingleThreadPerAllocation() {
assertPreviousAssignmentsAreSatisfied(deployments, assignmentPlan);
}
- public void testFullCoreUtilization_GivenModelsWithSingleThreadPerAllocation_NewMemoryFields() {
+ public void testFullCoreUtilization_GivenDeploymentsWithSingleThreadPerAllocation_NewMemoryFields() {
List nodes = List.of(
new Node("n_1", ByteSizeValue.ofGb(18).getBytes(), 8),
new Node("n_2", ByteSizeValue.ofGb(18).getBytes(), 8),
@@ -641,32 +641,32 @@ public void testFullCoreUtilization_GivenModelsWithSingleThreadPerAllocation_New
assertPreviousAssignmentsAreSatisfied(deployments, assignmentPlan);
}
- public void testTooManyNodesAndModels_DoesNotThrowOOM_GivenNodesJustUnderLimit() {
- runTooManyNodesAndModels(3161, 1);
+ public void testTooManyNodesAndDeployments_DoesNotThrowOOM_GivenNodesJustUnderLimit() {
+ runTooManyNodesAndDeployments(3161, 1);
}
- public void testTooManyNodesAndModels_DoesNotThrowOOM_GivenNodesJustOverLimit() {
- runTooManyNodesAndModels(3162, 1);
+ public void testTooManyNodesAndDeployments_DoesNotThrowOOM_GivenNodesJustOverLimit() {
+ runTooManyNodesAndDeployments(3162, 1);
}
- public void testTooManyNodesAndModels_DoesNotThrowOOM_GivenModelsJustUnderLimit() {
- runTooManyNodesAndModels(1, 3161);
+ public void testTooManyNodesAndModels_DoesNotThrowOOM_GivenDeploymentsJustUnderLimit() {
+ runTooManyNodesAndDeployments(1, 3161);
}
- public void testTooManyNodesAndModels_DoesNotThrowOOM_GivenModelsJustOverLimit() {
- runTooManyNodesAndModels(1, 3162);
+ public void testTooManyNodesAndModels_DoesNotThrowOOM_GivenDeploymentsJustOverLimit() {
+ runTooManyNodesAndDeployments(1, 3162);
}
- public void testTooManyNodesAndModels_DoesNotThrowOOM_GivenComboJustUnderLimit() {
- runTooManyNodesAndModels(170, 171);
+ public void testTooManyNodesAndDeployments_DoesNotThrowOOM_GivenComboJustUnderLimit() {
+ runTooManyNodesAndDeployments(170, 171);
}
- public void testTooManyNodesAndModels_DoesNotThrowOOM_GivenComboJustOverLimit() {
- runTooManyNodesAndModels(171, 171);
+ public void testTooManyNodesAndDeployments_DoesNotThrowOOM_GivenComboJustOverLimit() {
+ runTooManyNodesAndDeployments(171, 171);
}
- public void testTooManyNodesAndModels_DoesNotThrowOOM_GivenComboWayOverLimit() {
- runTooManyNodesAndModels(1000, 1000);
+ public void testTooManyNodesAndDeployments_DoesNotThrowOOM_GivenComboWayOverLimit() {
+ runTooManyNodesAndDeployments(1000, 1000);
}
public void testRandomBenchmark() {
@@ -679,7 +679,7 @@ public void testRandomBenchmark() {
int scale = randomIntBetween(0, 10);
double load = randomDoubleBetween(0.1, 1.0, true);
List nodes = randomNodes(scale);
- List deployments = randomModels(scale, load);
+ List deployments = randomDeployments(scale, load);
nodeSizes.add(nodes.size());
modelSizes.add(deployments.size());
logger.debug("Nodes = " + nodes.size() + "; Models = " + deployments.size());
@@ -719,7 +719,7 @@ public void testPreviousAssignmentsGetAtLeastAsManyAllocationsAfterAddingNewMode
int scale = randomIntBetween(0, 10);
double load = randomDoubleBetween(0.1, 1.0, true);
List nodes = randomNodes(scale);
- List deployments = randomModels(scale, load);
+ List deployments = randomDeployments(scale, load);
AssignmentPlan originalPlan = new AssignmentPlanner(nodes, deployments).computePlan();
List previousModelsPlusNew = new ArrayList<>(deployments.size() + 1);
@@ -730,7 +730,7 @@ public void testPreviousAssignmentsGetAtLeastAsManyAllocationsAfterAddingNewMode
.collect(Collectors.toMap(e -> e.getKey().id(), Map.Entry::getValue));
previousModelsPlusNew.add(
new AssignmentPlan.Deployment(
- m.id(),
+ m.deploymentId(),
m.memoryBytes(),
m.allocations(),
m.threadsPerAllocation(),
@@ -827,7 +827,7 @@ public void testModelWithoutCurrentAllocationsGetsAssignedIfAllocatedPreviously(
assertThat(assignmentPlan.getRemainingNodeMemory("n_2"), greaterThanOrEqualTo(0L));
}
- public void testGivenPreviouslyAssignedModels_CannotAllBeAllocated() {
+ public void testGivenPreviouslyAssignedDeployments_CannotAllBeAllocated() {
Node node1 = new Node("n_1", scaleNodeSize(ByteSizeValue.ofGb(2).getMb()), 2);
AssignmentPlan.Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(1200).getBytes(), 1, 1, Map.of(), 1, null, 0, 0);
AssignmentPlan.Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(1100).getBytes(), 1, 1, Map.of(), 1, null, 0, 0);
@@ -854,7 +854,7 @@ public void testGivenClusterResize_AllocationShouldNotExceedMemoryConstraints()
// Then start m_2
assignmentPlan = new AssignmentPlanner(
List.of(node1, node2),
- Stream.concat(createModelsFromPlan(assignmentPlan).stream(), Stream.of(deployment2)).toList()
+ Stream.concat(createDeploymentsFromPlan(assignmentPlan).stream(), Stream.of(deployment2)).toList()
).computePlan();
indexedBasedPlan = convertToIdIndexed(assignmentPlan);
@@ -865,7 +865,7 @@ public void testGivenClusterResize_AllocationShouldNotExceedMemoryConstraints()
// Then start m_3
assignmentPlan = new AssignmentPlanner(
List.of(node1, node2),
- Stream.concat(createModelsFromPlan(assignmentPlan).stream(), Stream.of(deployment3)).toList()
+ Stream.concat(createDeploymentsFromPlan(assignmentPlan).stream(), Stream.of(deployment3)).toList()
).computePlan();
indexedBasedPlan = convertToIdIndexed(assignmentPlan);
@@ -875,7 +875,7 @@ public void testGivenClusterResize_AllocationShouldNotExceedMemoryConstraints()
assertThat(indexedBasedPlan.get("m_3"), equalTo(Map.of("n_2", 1)));
// First, one node goes away.
- assignmentPlan = new AssignmentPlanner(List.of(node1), createModelsFromPlan(assignmentPlan)).computePlan();
+ assignmentPlan = new AssignmentPlanner(List.of(node1), createDeploymentsFromPlan(assignmentPlan)).computePlan();
assertThat(assignmentPlan.getRemainingNodeMemory("n_1"), greaterThanOrEqualTo(0L));
}
@@ -896,7 +896,7 @@ public void testGivenClusterResize_ShouldAllocateEachModelAtLeastOnce() {
// Then start m_2
assignmentPlan = new AssignmentPlanner(
List.of(node1, node2),
- Stream.concat(createModelsFromPlan(assignmentPlan).stream(), Stream.of(deployment2)).toList()
+ Stream.concat(createDeploymentsFromPlan(assignmentPlan).stream(), Stream.of(deployment2)).toList()
).computePlan();
indexedBasedPlan = convertToIdIndexed(assignmentPlan);
@@ -907,7 +907,7 @@ public void testGivenClusterResize_ShouldAllocateEachModelAtLeastOnce() {
// Then start m_3
assignmentPlan = new AssignmentPlanner(
List.of(node1, node2),
- Stream.concat(createModelsFromPlan(assignmentPlan).stream(), Stream.of(deployment3)).toList()
+ Stream.concat(createDeploymentsFromPlan(assignmentPlan).stream(), Stream.of(deployment3)).toList()
).computePlan();
indexedBasedPlan = convertToIdIndexed(assignmentPlan);
@@ -921,20 +921,20 @@ public void testGivenClusterResize_ShouldAllocateEachModelAtLeastOnce() {
Node node4 = new Node("n_4", ByteSizeValue.ofMb(2600).getBytes(), 2);
// First, one node goes away.
- assignmentPlan = new AssignmentPlanner(List.of(node1), createModelsFromPlan(assignmentPlan)).computePlan();
+ assignmentPlan = new AssignmentPlanner(List.of(node1), createDeploymentsFromPlan(assignmentPlan)).computePlan();
assertThat(assignmentPlan.getRemainingNodeMemory(node1.id()), greaterThanOrEqualTo(0L));
// Then, a node double in memory size is added.
- assignmentPlan = new AssignmentPlanner(List.of(node1, node3), createModelsFromPlan(assignmentPlan)).computePlan();
+ assignmentPlan = new AssignmentPlanner(List.of(node1, node3), createDeploymentsFromPlan(assignmentPlan)).computePlan();
assertThat(assignmentPlan.getRemainingNodeMemory(node1.id()), greaterThanOrEqualTo(0L));
assertThat(assignmentPlan.getRemainingNodeMemory(node3.id()), greaterThanOrEqualTo(0L));
// And another.
- assignmentPlan = new AssignmentPlanner(List.of(node1, node3, node4), createModelsFromPlan(assignmentPlan)).computePlan();
+ assignmentPlan = new AssignmentPlanner(List.of(node1, node3, node4), createDeploymentsFromPlan(assignmentPlan)).computePlan();
assertThat(assignmentPlan.getRemainingNodeMemory(node1.id()), greaterThanOrEqualTo(0L));
assertThat(assignmentPlan.getRemainingNodeMemory(node3.id()), greaterThanOrEqualTo(0L));
assertThat(assignmentPlan.getRemainingNodeMemory(node4.id()), greaterThanOrEqualTo(0L));
// Finally, the remaining smaller node is removed
- assignmentPlan = new AssignmentPlanner(List.of(node3, node4), createModelsFromPlan(assignmentPlan)).computePlan();
+ assignmentPlan = new AssignmentPlanner(List.of(node3, node4), createDeploymentsFromPlan(assignmentPlan)).computePlan();
assertThat(assignmentPlan.getRemainingNodeMemory(node3.id()), greaterThanOrEqualTo(0L));
assertThat(assignmentPlan.getRemainingNodeMemory(node4.id()), greaterThanOrEqualTo(0L));
@@ -949,7 +949,7 @@ public void testGivenClusterResize_ShouldAllocateEachModelAtLeastOnce() {
assertThat(assignmentPlan.getRemainingNodeCores("n_2"), equalTo(0));
}
- public void testGivenClusterResize_ShouldRemoveAllocatedModels() {
+ public void testGivenClusterResize_ShouldRemoveAllocatedDeployments() {
// Ensure that plan is removing previously allocated models if not enough memory is available
Node node1 = new Node("n_1", ByteSizeValue.ofMb(1840).getBytes(), 2);
Node node2 = new Node("n_2", ByteSizeValue.ofMb(2580).getBytes(), 2);
@@ -969,14 +969,14 @@ public void testGivenClusterResize_ShouldRemoveAllocatedModels() {
assertThat(assignmentPlan.getRemainingNodeMemory(node2.id()), greaterThanOrEqualTo(0L));
// Now the cluster starts getting resized. Ensure that resources are not over-allocated.
- assignmentPlan = new AssignmentPlanner(List.of(node1), createModelsFromPlan(assignmentPlan)).computePlan();
+ assignmentPlan = new AssignmentPlanner(List.of(node1), createDeploymentsFromPlan(assignmentPlan)).computePlan();
assertThat(indexedBasedPlan.get("m_1"), equalTo(Map.of("n_1", 2)));
assertThat(assignmentPlan.getRemainingNodeMemory(node1.id()), greaterThanOrEqualTo(0L));
assertThat(assignmentPlan.getRemainingNodeCores(node1.id()), greaterThanOrEqualTo(0));
}
- public void testGivenClusterResize_ShouldRemoveAllocatedModels_NewMemoryFields() {
+ public void testGivenClusterResize_ShouldRemoveAllocatedDeployments_NewMemoryFields() {
// Ensure that plan is removing previously allocated models if not enough memory is available
Node node1 = new Node("n_1", ByteSizeValue.ofMb(700).getBytes(), 2);
Node node2 = new Node("n_2", ByteSizeValue.ofMb(1000).getBytes(), 2);
@@ -1026,16 +1026,16 @@ public void testGivenClusterResize_ShouldRemoveAllocatedModels_NewMemoryFields()
assertThat(assignmentPlan.getRemainingNodeMemory(node2.id()), greaterThanOrEqualTo(0L));
// Now the cluster starts getting resized. Ensure that resources are not over-allocated.
- assignmentPlan = new AssignmentPlanner(List.of(node1), createModelsFromPlan(assignmentPlan)).computePlan();
+ assignmentPlan = new AssignmentPlanner(List.of(node1), createDeploymentsFromPlan(assignmentPlan)).computePlan();
assertThat(indexedBasedPlan.get("m_1"), equalTo(Map.of("n_1", 2)));
assertThat(assignmentPlan.getRemainingNodeMemory(node1.id()), greaterThanOrEqualTo(0L));
assertThat(assignmentPlan.getRemainingNodeCores(node1.id()), greaterThanOrEqualTo(0));
}
- public static List createModelsFromPlan(AssignmentPlan plan) {
+ public static List createDeploymentsFromPlan(AssignmentPlan plan) {
List deployments = new ArrayList<>();
- for (Deployment m : plan.models()) {
+ for (Deployment m : plan.deployments()) {
Optional> assignments = plan.assignments(m);
Map currentAllocations = Map.of();
if (assignments.isPresent()) {
@@ -1047,7 +1047,7 @@ public static List createModelsFromPlan(AssignmentPlan plan) {
int totalAllocations = currentAllocations.values().stream().mapToInt(Integer::intValue).sum();
deployments.add(
new Deployment(
- m.id(),
+ m.deploymentId(),
m.memoryBytes(),
m.allocations(),
m.threadsPerAllocation(),
@@ -1064,13 +1064,13 @@ public static List createModelsFromPlan(AssignmentPlan plan) {
public static Map> convertToIdIndexed(AssignmentPlan plan) {
Map> result = new HashMap<>();
- for (AssignmentPlan.Deployment m : plan.models()) {
+ for (AssignmentPlan.Deployment m : plan.deployments()) {
Optional> assignments = plan.assignments(m);
Map allocationsPerNodeId = assignments.isPresent() ? new HashMap<>() : Map.of();
for (Map.Entry nodeAssignments : assignments.orElse(Map.of()).entrySet()) {
allocationsPerNodeId.put(nodeAssignments.getKey().id(), nodeAssignments.getValue());
}
- result.put(m.id(), allocationsPerNodeId);
+ result.put(m.deploymentId(), allocationsPerNodeId);
}
return result;
}
@@ -1103,7 +1103,7 @@ public static List randomNodes(int scale, String nodeIdPrefix) {
return nodes;
}
- public static List randomModels(int scale, double load) {
+ public static List randomDeployments(int scale, double load) {
List deployments = new ArrayList<>();
for (int i = 0; i < Math.max(2, Math.round(load * (1 + 8 * scale))); i++) {
deployments.add(randomModel(String.valueOf(i)));
@@ -1158,7 +1158,7 @@ public static void assertPreviousAssignmentsAreSatisfied(List nodes = new ArrayList<>();
for (int i = 0; i < nodesSize; i++) {
nodes.add(new Node("n_" + i, ByteSizeValue.ofGb(6).getBytes(), 100));
diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveAllAllocationsTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveAllAllocationsTests.java
index 9885c4d583198..7499470cc8d6f 100644
--- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveAllAllocationsTests.java
+++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveAllAllocationsTests.java
@@ -83,13 +83,13 @@ public void testGivenPreviousAssignments() {
List modelsPreservingAllocations = preserveAllAllocations.modelsPreservingAllocations();
assertThat(modelsPreservingAllocations, hasSize(2));
- assertThat(modelsPreservingAllocations.get(0).id(), equalTo("m_1"));
+ assertThat(modelsPreservingAllocations.get(0).deploymentId(), equalTo("m_1"));
assertThat(modelsPreservingAllocations.get(0).memoryBytes(), equalTo(ByteSizeValue.ofMb(30).getBytes()));
assertThat(modelsPreservingAllocations.get(0).allocations(), equalTo(1));
assertThat(modelsPreservingAllocations.get(0).threadsPerAllocation(), equalTo(1));
assertThat(modelsPreservingAllocations.get(0).currentAllocationsByNodeId(), equalTo(Map.of("n_1", 0)));
- assertThat(modelsPreservingAllocations.get(1).id(), equalTo("m_2"));
+ assertThat(modelsPreservingAllocations.get(1).deploymentId(), equalTo("m_2"));
assertThat(modelsPreservingAllocations.get(1).memoryBytes(), equalTo(ByteSizeValue.ofMb(50).getBytes()));
assertThat(modelsPreservingAllocations.get(1).allocations(), equalTo(3));
assertThat(modelsPreservingAllocations.get(1).threadsPerAllocation(), equalTo(4));
@@ -166,7 +166,7 @@ public void testGivenPreviousAssignments() {
List modelsPreservingAllocations = preserveAllAllocations.modelsPreservingAllocations();
assertThat(modelsPreservingAllocations, hasSize(2));
- assertThat(modelsPreservingAllocations.get(0).id(), equalTo("m_1"));
+ assertThat(modelsPreservingAllocations.get(0).deploymentId(), equalTo("m_1"));
assertThat(modelsPreservingAllocations.get(0).memoryBytes(), equalTo(ByteSizeValue.ofMb(30).getBytes()));
assertThat(modelsPreservingAllocations.get(0).perDeploymentMemoryBytes(), equalTo(ByteSizeValue.ofMb(300).getBytes()));
assertThat(modelsPreservingAllocations.get(0).perAllocationMemoryBytes(), equalTo(ByteSizeValue.ofMb(10).getBytes()));
@@ -174,7 +174,7 @@ public void testGivenPreviousAssignments() {
assertThat(modelsPreservingAllocations.get(0).threadsPerAllocation(), equalTo(1));
assertThat(modelsPreservingAllocations.get(0).currentAllocationsByNodeId(), equalTo(Map.of("n_1", 0)));
- assertThat(modelsPreservingAllocations.get(1).id(), equalTo("m_2"));
+ assertThat(modelsPreservingAllocations.get(1).deploymentId(), equalTo("m_2"));
assertThat(modelsPreservingAllocations.get(1).memoryBytes(), equalTo(ByteSizeValue.ofMb(50).getBytes()));
assertThat(modelsPreservingAllocations.get(1).perDeploymentMemoryBytes(), equalTo(ByteSizeValue.ofMb(300).getBytes()));
assertThat(modelsPreservingAllocations.get(1).perAllocationMemoryBytes(), equalTo(ByteSizeValue.ofMb(10).getBytes()));
diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveOneAllocationTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveOneAllocationTests.java
index 50ba8763c690d..bc95fb1e0339e 100644
--- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveOneAllocationTests.java
+++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveOneAllocationTests.java
@@ -77,7 +77,7 @@ public void testGivenPreviousAssignments() {
List modelsPreservingAllocations = preserveOneAllocation.modelsPreservingAllocations();
assertThat(modelsPreservingAllocations, hasSize(2));
- assertThat(modelsPreservingAllocations.get(0).id(), equalTo("m_1"));
+ assertThat(modelsPreservingAllocations.get(0).deploymentId(), equalTo("m_1"));
assertThat(modelsPreservingAllocations.get(0).memoryBytes(), equalTo(ByteSizeValue.ofMb(30).getBytes()));
assertThat(modelsPreservingAllocations.get(0).perDeploymentMemoryBytes(), equalTo(ByteSizeValue.ofMb(0).getBytes()));
assertThat(modelsPreservingAllocations.get(0).perAllocationMemoryBytes(), equalTo(ByteSizeValue.ofMb(0).getBytes()));
@@ -85,7 +85,7 @@ public void testGivenPreviousAssignments() {
assertThat(modelsPreservingAllocations.get(0).threadsPerAllocation(), equalTo(1));
assertThat(modelsPreservingAllocations.get(0).currentAllocationsByNodeId(), equalTo(Map.of("n_1", 0)));
- assertThat(modelsPreservingAllocations.get(1).id(), equalTo("m_2"));
+ assertThat(modelsPreservingAllocations.get(1).deploymentId(), equalTo("m_2"));
assertThat(modelsPreservingAllocations.get(1).memoryBytes(), equalTo(ByteSizeValue.ofMb(50).getBytes()));
assertThat(modelsPreservingAllocations.get(1).perDeploymentMemoryBytes(), equalTo(ByteSizeValue.ofMb(0).getBytes()));
assertThat(modelsPreservingAllocations.get(1).perAllocationMemoryBytes(), equalTo(ByteSizeValue.ofMb(0).getBytes()));
@@ -165,7 +165,7 @@ public void testGivenPreviousAssignments() {
List modelsPreservingAllocations = preserveOneAllocation.modelsPreservingAllocations();
assertThat(modelsPreservingAllocations, hasSize(2));
- assertThat(modelsPreservingAllocations.get(0).id(), equalTo("m_1"));
+ assertThat(modelsPreservingAllocations.get(0).deploymentId(), equalTo("m_1"));
assertThat(modelsPreservingAllocations.get(0).memoryBytes(), equalTo(ByteSizeValue.ofMb(30).getBytes()));
assertThat(modelsPreservingAllocations.get(0).perDeploymentMemoryBytes(), equalTo(ByteSizeValue.ofMb(300).getBytes()));
assertThat(modelsPreservingAllocations.get(0).perAllocationMemoryBytes(), equalTo(ByteSizeValue.ofMb(10).getBytes()));
@@ -173,7 +173,7 @@ public void testGivenPreviousAssignments() {
assertThat(modelsPreservingAllocations.get(0).threadsPerAllocation(), equalTo(1));
assertThat(modelsPreservingAllocations.get(0).currentAllocationsByNodeId(), equalTo(Map.of("n_1", 0)));
- assertThat(modelsPreservingAllocations.get(1).id(), equalTo("m_2"));
+ assertThat(modelsPreservingAllocations.get(1).deploymentId(), equalTo("m_2"));
assertThat(modelsPreservingAllocations.get(1).memoryBytes(), equalTo(ByteSizeValue.ofMb(50).getBytes()));
assertThat(modelsPreservingAllocations.get(1).perDeploymentMemoryBytes(), equalTo(ByteSizeValue.ofMb(300).getBytes()));
assertThat(modelsPreservingAllocations.get(1).perAllocationMemoryBytes(), equalTo(ByteSizeValue.ofMb(10).getBytes()));
diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/ZoneAwareAssignmentPlannerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/ZoneAwareAssignmentPlannerTests.java
index 4993600d0d3b3..7005ad959577b 100644
--- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/ZoneAwareAssignmentPlannerTests.java
+++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/ZoneAwareAssignmentPlannerTests.java
@@ -22,9 +22,9 @@
import static org.elasticsearch.xpack.ml.inference.assignment.planning.AssignmentPlannerTests.assertModelFullyAssignedToNode;
import static org.elasticsearch.xpack.ml.inference.assignment.planning.AssignmentPlannerTests.assertPreviousAssignmentsAreSatisfied;
import static org.elasticsearch.xpack.ml.inference.assignment.planning.AssignmentPlannerTests.convertToIdIndexed;
-import static org.elasticsearch.xpack.ml.inference.assignment.planning.AssignmentPlannerTests.createModelsFromPlan;
+import static org.elasticsearch.xpack.ml.inference.assignment.planning.AssignmentPlannerTests.createDeploymentsFromPlan;
+import static org.elasticsearch.xpack.ml.inference.assignment.planning.AssignmentPlannerTests.randomDeployments;
import static org.elasticsearch.xpack.ml.inference.assignment.planning.AssignmentPlannerTests.randomModel;
-import static org.elasticsearch.xpack.ml.inference.assignment.planning.AssignmentPlannerTests.randomModels;
import static org.elasticsearch.xpack.ml.inference.assignment.planning.AssignmentPlannerTests.randomNodes;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
@@ -138,6 +138,33 @@ public void testGivenOneModel_OneNodePerZone_TwoZones_FullyFits() {
assertThat(indexedBasedPlan.get("m_1"), equalTo(Map.of("n_1", 1, "n_2", 1)));
}
+ public void testGivenOneModel_OneLargeNodePerZone_TwoZones_FullyFits() {
+ Node node1 = new Node("n_1", ByteSizeValue.ofGb(16).getBytes(), 8);
+ Node node2 = new Node("n_2", ByteSizeValue.ofGb(16).getBytes(), 8);
+ AssignmentPlan.Deployment deployment = new AssignmentPlan.Deployment(
+ "m_1",
+ ByteSizeValue.ofMb(100).getBytes(),
+ 4,
+ 2,
+ Map.of(),
+ 0,
+ null,
+ 0,
+ 0
+ );
+
+ AssignmentPlan plan = new ZoneAwareAssignmentPlanner(
+ Map.of(List.of("z_1"), List.of(node1), List.of("z_2"), List.of(node2)),
+ List.of(deployment)
+ ).computePlan();
+
+ assertThat(plan.satisfiesAllModels(), is(true));
+
+ Map> indexedBasedPlan = convertToIdIndexed(plan);
+ assertThat(indexedBasedPlan.keySet(), hasItems("m_1"));
+ assertThat(indexedBasedPlan.get("m_1"), equalTo(Map.of("n_1", 2, "n_2", 2)));
+ }
+
public void testGivenOneModel_OneNodePerZone_TwoZones_PartiallyFits() {
Node node1 = new Node("n_1", ByteSizeValue.ofMb(440).getBytes(), 4);
Node node2 = new Node("n_2", ByteSizeValue.ofMb(440).getBytes(), 4);
@@ -166,7 +193,7 @@ public void testGivenOneModel_OneNodePerZone_TwoZones_PartiallyFits() {
assertThat(plan.getRemainingNodeMemory("n_2"), equalTo(0L));
}
- public void testGivenThreeModels_TwoNodesPerZone_ThreeZones_FullyFit() {
+ public void testGivenThreeDeployments_TwoNodesPerZone_ThreeZones_FullyFit() {
Node node1 = new Node("n_1", ByteSizeValue.ofMb(1000).getBytes(), 4);
Node node2 = new Node("n_2", ByteSizeValue.ofMb(1000).getBytes(), 4);
Node node3 = new Node("n_3", ByteSizeValue.ofMb(1000).getBytes(), 4);
@@ -217,7 +244,7 @@ public void testGivenThreeModels_TwoNodesPerZone_ThreeZones_FullyFit() {
}
}
- public void testGivenTwoModelsWithSingleAllocation_OneNode_ThreeZones() {
+ public void testGivenTwoDeploymentsWithSingleAllocation_OneNode_ThreeZones() {
Node node1 = new Node("n_1", ByteSizeValue.ofMb(1000).getBytes(), 4);
Node node2 = new Node("n_2", ByteSizeValue.ofMb(1000).getBytes(), 4);
Node node3 = new Node("n_3", ByteSizeValue.ofMb(1000).getBytes(), 4);
@@ -243,7 +270,7 @@ public void testPreviousAssignmentsGetAtLeastAsManyAllocationsAfterAddingNewMode
List.of("z_3"),
randomNodes(scale, "z_3_")
);
- List deployments = randomModels(scale, load);
+ List deployments = randomDeployments(scale, load);
AssignmentPlan originalPlan = new ZoneAwareAssignmentPlanner(nodesByZone, deployments).computePlan();
List previousModelsPlusNew = new ArrayList<>(deployments.size() + 1);
@@ -254,7 +281,7 @@ public void testPreviousAssignmentsGetAtLeastAsManyAllocationsAfterAddingNewMode
.collect(Collectors.toMap(e -> e.getKey().id(), Map.Entry::getValue));
previousModelsPlusNew.add(
new AssignmentPlan.Deployment(
- m.id(),
+ m.deploymentId(),
m.memoryBytes(),
m.allocations(),
m.threadsPerAllocation(),
@@ -291,7 +318,7 @@ public void testGivenClusterResize_GivenOneZone_ShouldAllocateEachModelAtLeastOn
// Then start m_2
assignmentPlan = new ZoneAwareAssignmentPlanner(
Map.of(List.of(), List.of(node1, node2)),
- Stream.concat(createModelsFromPlan(assignmentPlan).stream(), Stream.of(deployment2)).toList()
+ Stream.concat(createDeploymentsFromPlan(assignmentPlan).stream(), Stream.of(deployment2)).toList()
).computePlan();
indexedBasedPlan = convertToIdIndexed(assignmentPlan);
@@ -302,7 +329,7 @@ public void testGivenClusterResize_GivenOneZone_ShouldAllocateEachModelAtLeastOn
// Then start m_3
assignmentPlan = new ZoneAwareAssignmentPlanner(
Map.of(List.of(), List.of(node1, node2)),
- Stream.concat(createModelsFromPlan(assignmentPlan).stream(), Stream.of(deployment3)).toList()
+ Stream.concat(createDeploymentsFromPlan(assignmentPlan).stream(), Stream.of(deployment3)).toList()
).computePlan();
indexedBasedPlan = convertToIdIndexed(assignmentPlan);
@@ -316,19 +343,19 @@ public void testGivenClusterResize_GivenOneZone_ShouldAllocateEachModelAtLeastOn
Node node4 = new Node("n_4", ByteSizeValue.ofMb(5160).getBytes(), 2);
// First, one node goes away.
- assignmentPlan = new ZoneAwareAssignmentPlanner(Map.of(List.of(), List.of(node1)), createModelsFromPlan(assignmentPlan))
+ assignmentPlan = new ZoneAwareAssignmentPlanner(Map.of(List.of(), List.of(node1)), createDeploymentsFromPlan(assignmentPlan))
.computePlan();
// Then, a node double in memory size is added.
- assignmentPlan = new ZoneAwareAssignmentPlanner(Map.of(List.of(), List.of(node1, node3)), createModelsFromPlan(assignmentPlan))
+ assignmentPlan = new ZoneAwareAssignmentPlanner(Map.of(List.of(), List.of(node1, node3)), createDeploymentsFromPlan(assignmentPlan))
.computePlan();
// And another.
assignmentPlan = new ZoneAwareAssignmentPlanner(
Map.of(List.of(), List.of(node1, node3, node4)),
- createModelsFromPlan(assignmentPlan)
+ createDeploymentsFromPlan(assignmentPlan)
).computePlan();
// Finally, the remaining smaller node is removed
- assignmentPlan = new ZoneAwareAssignmentPlanner(Map.of(List.of(), List.of(node3, node4)), createModelsFromPlan(assignmentPlan))
+ assignmentPlan = new ZoneAwareAssignmentPlanner(Map.of(List.of(), List.of(node3, node4)), createDeploymentsFromPlan(assignmentPlan))
.computePlan();
indexedBasedPlan = convertToIdIndexed(assignmentPlan);