Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/main' into esql_points_from_sour…
Browse files Browse the repository at this point in the history
…ce_wkb
  • Loading branch information
craigtaverner committed Jan 2, 2024
2 parents b96933b + db093cd commit db2094b
Show file tree
Hide file tree
Showing 144 changed files with 4,586 additions and 1,848 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,6 @@ private static ListMultimap<Class<?>, String> createLegacyRestTestBasePluginUsag
map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:eql:qa:ccs-rolling-upgrade");
map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:eql:qa:correctness");
map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:eql:qa:mixed-node");
map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:esql:qa:security");
map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:esql:qa:server:heap-attack");
map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:esql:qa:server:multi-node");
map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:esql:qa:server:single-node");
Expand Down Expand Up @@ -166,7 +165,6 @@ private static ListMultimap<Class<?>, String> createLegacyRestTestBasePluginUsag
map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:vector-tile:qa:multi-cluster");
map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:watcher:qa:rest");
map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:watcher:qa:with-security");
map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:esql:qa:server:mixed-cluster");
return map;
}

Expand Down
5 changes: 5 additions & 0 deletions docs/changelog/101717.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 101717
summary: Pause shard snapshots on graceful shutdown
area: Snapshot/Restore
type: enhancement
issues: []
5 changes: 5 additions & 0 deletions docs/changelog/102824.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 102824
summary: Change detection aggregation improvements
area: Machine Learning
type: enhancement
issues: []
5 changes: 5 additions & 0 deletions docs/changelog/103546.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 103546
summary: Handle timeout on standalone rewrite calls
area: Search
type: bug
issues: []
6 changes: 6 additions & 0 deletions docs/changelog/103673.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
pr: 103673
summary: "ESQL: Infer not null for aggregated fields"
area: ES|QL
type: enhancement
issues:
- 102787
5 changes: 5 additions & 0 deletions docs/changelog/103690.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 103690
summary: Restore inter-segment search concurrency with synthetic source is enabled
area: Search
type: bug
issues: []
6 changes: 6 additions & 0 deletions docs/changelog/103720.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
pr: 103720
summary: Add "step":"ERROR" to ILM explain response for missing policy
area: ILM+SLM
type: enhancement
issues:
- 99030
5 changes: 5 additions & 0 deletions docs/changelog/103727.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 103727
summary: "ESQL: Track the rest of `DocVector`"
area: ES|QL
type: enhancement
issues: []
5 changes: 5 additions & 0 deletions docs/changelog/103758.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 103758
summary: Fix the transport version of `PlanStreamOutput`
area: ES|QL
type: bug
issues: []
2 changes: 2 additions & 0 deletions docs/reference/esql/esql-get-started.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@
<titleabbrev>Getting started</titleabbrev>
++++

preview::["Do not use {esql} on production environments. This functionality is in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA of official GA features."]

This guide shows how you can use {esql} to query and aggregate your data.

[discrete]
Expand Down
2 changes: 1 addition & 1 deletion docs/reference/esql/index.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

[partintro]

preview::[]
preview::["Do not use {esql} on production environments. This functionality is in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA of official GA features."]

The {es} Query Language ({esql}) provides a powerful way to filter, transform,
and analyze data stored in {es}, and in the future in other runtimes. It is
Expand Down
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
50 changes: 7 additions & 43 deletions docs/reference/ingest/search-inference-processing.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -102,10 +102,13 @@ Here, you'll be able to:
1. Choose a name for your pipeline.
- This name will need to be unique across the whole deployment.
If you want this pipeline to be index-specific, we recommend including the name of your index in the pipeline name.
- If you do not set the pipeline name, a default unique name will be provided upon selecting a trained model.
2. Select the ML trained model you want to use.
- The model must be deployed before you can select it.
To begin deployment of a model, click the *Deploy* button.
3. Select one or more source fields as input for the inference processor.
- If there are no source fields available, your index will need a <<mapping, field mapping>>.
4. (Optional) Choose a name for your target field.
4. (Optional) Choose a name for your target field(s).
This is where the output of the inference model will be stored. Changing the default name is only possible if you have a single source field selected.
5. Add the source-target field mapping to the configuration by clicking the *Add* button.
6. Repeat steps 3-5 for each field mapping you want to add.
Expand All @@ -123,51 +126,12 @@ These pipelines can also be viewed, edited, and deleted in Kibana via *Stack Man
You may also use the <<ingest-apis,Ingest pipeline APIs>>.
If you delete any of these pipelines outside of the *Content* UI in Kibana, make sure to edit the ML inference pipelines that reference them.

[discrete#ingest-pipeline-search-inference-update-mapping]
==== Update mappings to use ML inference pipelines

After setting up an ML inference pipeline or attaching an existing one, it may be necessary to manually create the field mappings in order to support the referenced trained ML model's output.
This needs to happen before the pipeline is first used to index some documents, otherwise the model output fields could be inferred with the wrong type.

[NOTE]
====
This doesn't apply when you're creating a pipeline with the ELSER model, for which the index mappings are automatically updated in the process.
====

The required field name and type depends on the configuration of the pipeline and the trained model it uses.
For example, if you configure a `text_embedding` model, select `summary` as a source field, and `ml.inference.summary` as the target field, the inference output will be stored in `ml.inference.<source field name>.predicted_value` as a <<dense-vector, dense_vector>> type.
In order to support semantic search on this field, it must be added to the mapping:

[source,console]
----
PUT my-index-0001/_mapping
{
"properties": {
"ml.inference.summary.predicted_value": { <1>
"type": "dense_vector", <2>
"dims": 768, <3>
"index": true,
"similarity": "dot_product"
}
}
}
----
// NOTCONSOLE
// TEST[skip:TODO]

<1> The output of the ML model is stored in the configured target field suffixed with `predicted_value`.
<2> Choose a field type that is compatible with the inference output and supports your search use cases.
<3> Set additional properties as necessary.

[TIP]
====
You can check the shape of the generated output before indexing any documents while creating the ML inference pipeline under the *Test* tab.
Simply provide a sample document, click *Simulate*, and look for the `ml.inference` object in the results.
====

[discrete#ingest-pipeline-search-inference-test-inference-pipeline]
==== Test your ML inference pipeline

You can verify the expected structure of the inference output before indexing any documents while creating the {ml} inference pipeline under the *Test* tab.
Provide a sample document, click *Simulate*, and look for the `ml.inference` object in the results.

To ensure the ML inference pipeline will be run when ingesting documents, you must make sure the documents you are ingesting have a field named `_run_ml_inference` that is set to `true` and you must set the pipeline to `{index_name}`.
For connector and crawler indices, this will happen automatically if you've configured the settings appropriately for the pipeline name `{index_name}`.
To manage these settings:
Expand Down
19 changes: 19 additions & 0 deletions libs/core/src/main/java/org/elasticsearch/core/Releasables.java
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
import java.io.IOException;
import java.io.UncheckedIOException;
import java.util.Arrays;
import java.util.Iterator;
import java.util.concurrent.atomic.AtomicReference;

/** Utility methods to work with {@link Releasable}s. */
Expand Down Expand Up @@ -103,6 +104,24 @@ public String toString() {
};
}

/**
* Similar to {@link #wrap(Iterable)} except that it accepts an {@link Iterator} of releasables. The resulting resource must therefore
* only be released once.
*/
public static Releasable wrap(final Iterator<Releasable> releasables) {
return assertOnce(wrap(new Iterable<>() {
@Override
public Iterator<Releasable> iterator() {
return releasables;
}

@Override
public String toString() {
return releasables.toString();
}
}));
}

/** @see #wrap(Iterable) */
public static Releasable wrap(final Releasable... releasables) {
return new Releasable() {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -107,5 +107,27 @@ public String toString() {
assertEquals("wrapped[list]", wrapIterable.toString());
wrapIterable.close();
assertEquals(5, count.get());

final var wrapIterator = Releasables.wrap(new Iterator<>() {
final Iterator<Releasable> innerIterator = List.of(releasable, releasable, releasable).iterator();

@Override
public boolean hasNext() {
return innerIterator.hasNext();
}

@Override
public Releasable next() {
return innerIterator.next();
}

@Override
public String toString() {
return "iterator";
}
});
assertEquals("wrapped[iterator]", wrapIterator.toString());
wrapIterator.close();
assertEquals(8, count.get());
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
import org.apache.logging.log4j.core.util.Throwables;
import org.elasticsearch.ElasticsearchStatusException;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.ActionFuture;
import org.elasticsearch.action.ActionRequestBuilder;
import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.DocWriteResponse;
Expand All @@ -28,6 +29,8 @@
import org.elasticsearch.action.admin.indices.rollover.RolloverRequest;
import org.elasticsearch.action.admin.indices.rollover.RolloverResponse;
import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse;
import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresRequest;
import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction;
import org.elasticsearch.action.admin.indices.stats.IndexShardStats;
import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction;
import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest;
Expand Down Expand Up @@ -107,6 +110,7 @@
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
import java.util.stream.Collectors;
Expand Down Expand Up @@ -578,7 +582,7 @@ public void testResolvabilityOfDataStreamsInAPIs() throws Exception {
verifyResolvability(dataStreamName, indicesAdmin().prepareClose(dataStreamName), true);
verifyResolvability(aliasToDataStream, indicesAdmin().prepareClose(aliasToDataStream), true);
verifyResolvability(dataStreamName, clusterAdmin().prepareSearchShards(dataStreamName), false);
verifyResolvability(dataStreamName, indicesAdmin().prepareShardStores(dataStreamName), false);
verifyResolvability(client().execute(TransportIndicesShardStoresAction.TYPE, new IndicesShardStoresRequest(dataStreamName)));

request = new CreateDataStreamAction.Request("logs-barbaz");
client().execute(CreateDataStreamAction.INSTANCE, request).actionGet();
Expand Down Expand Up @@ -622,7 +626,7 @@ public void testResolvabilityOfDataStreamsInAPIs() throws Exception {
verifyResolvability(wildcardExpression, indicesAdmin().prepareOpen(wildcardExpression), false);
verifyResolvability(wildcardExpression, indicesAdmin().prepareClose(wildcardExpression), false);
verifyResolvability(wildcardExpression, clusterAdmin().prepareSearchShards(wildcardExpression), false);
verifyResolvability(wildcardExpression, indicesAdmin().prepareShardStores(wildcardExpression), false);
verifyResolvability(client().execute(TransportIndicesShardStoresAction.TYPE, new IndicesShardStoresRequest(wildcardExpression)));
}

public void testCannotDeleteComposableTemplateUsedByDataStream() throws Exception {
Expand Down Expand Up @@ -1866,11 +1870,15 @@ private static void verifyResolvability(
multiSearchResponse -> assertThat(multiSearchResponse.getResponses()[0].isFailure(), is(false))
);
} else {
requestBuilder.get();
verifyResolvability(requestBuilder.execute());
}
}
}

private static void verifyResolvability(ActionFuture<?> future) {
future.actionGet(10, TimeUnit.SECONDS);
}

static void indexDocs(String dataStream, int numDocs) {
BulkRequest bulkRequest = new BulkRequest();
for (int i = 0; i < numDocs; i++) {
Expand Down
3 changes: 3 additions & 0 deletions qa/ccs-rolling-upgrade-remote-cluster/build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
* Side Public License, v 1.
*/

import org.elasticsearch.gradle.Version
import org.elasticsearch.gradle.internal.info.BuildParams
import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask

Expand All @@ -32,13 +33,15 @@ BuildParams.bwcVersions.withWireCompatible { bwcVersion, baseName ->
versions = [bwcVersion.toString(), project.version]
setting 'cluster.remote.node.attr', 'gateway'
setting 'xpack.security.enabled', 'false'
requiresFeature 'es.failure_store_feature_flag_enabled', new Version(8, 12, 0)
}
def remoteCluster = testClusters.register("${baseName}-remote") {
numberOfNodes = 3
versions = [bwcVersion.toString(), project.version]
firstNode.setting 'node.attr.gateway', 'true'
lastNode.setting 'node.attr.gateway', 'true'
setting 'xpack.security.enabled', 'false'
requiresFeature 'es.failure_store_feature_flag_enabled', new Version(8, 12, 0)
}


Expand Down
Loading

0 comments on commit db2094b

Please sign in to comment.