Skip to content

Commit

Permalink
Merge remote-tracking branch 'upstream/main' into dls_aggs
Browse files Browse the repository at this point in the history
  • Loading branch information
jakelandis committed Mar 11, 2024
2 parents 7e1f65f + 903524f commit 59bf139
Show file tree
Hide file tree
Showing 50 changed files with 1,322 additions and 340 deletions.
6 changes: 6 additions & 0 deletions docs/changelog/106156.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
pr: 106156
summary: Disable parallel collection for terms aggregation with `min_doc_count` equals
to 0
area: Aggregations
type: bug
issues: []
73 changes: 54 additions & 19 deletions docs/reference/index-modules/slowlog.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -58,33 +58,56 @@ The search slow log file is configured in the `log4j2.properties` file.
[discrete]
==== Identifying search slow log origin

It is often useful to identify what triggered a slow running query. If a call was initiated with an `X-Opaque-ID` header, then the user ID
is included in Search Slow logs as an additional **id** field
It is often useful to identify what triggered a slow running query.
To include information about the user that triggered a slow search,
use the `index.search.slowlog.include.user` setting.

[source,console]
--------------------------------------------------
PUT /my-index-000001/_settings
{
"index.search.slowlog.include.user": true
}
--------------------------------------------------
// TEST[setup:my_index]

This will result in user information being included in the slow log.

[source,js]
---------------------------
{
"type": "index_search_slowlog",
"timestamp": "2030-08-30T11:59:37,786+02:00",
"level": "WARN",
"component": "i.s.s.query",
"cluster.name": "distribution_run",
"node.name": "node-0",
"message": "[index6][0]",
"took": "78.4micros",
"took_millis": "0",
"total_hits": "0 hits",
"stats": "[]",
"search_type": "QUERY_THEN_FETCH",
"total_shards": "1",
"source": "{\"query\":{\"match_all\":{\"boost\":1.0}}}",
"id": "MY_USER_ID",
"cluster.uuid": "Aq-c-PAeQiK3tfBYtig9Bw",
"node.id": "D7fUYfnfTLa2D7y-xw6tZg"
"@timestamp": "2024-02-21T12:42:37.255Z",
"log.level": "WARN",
"auth.type": "REALM",
"elasticsearch.slowlog.id": "tomcat-123",
"elasticsearch.slowlog.message": "[index6][0]",
"elasticsearch.slowlog.search_type": "QUERY_THEN_FETCH",
"elasticsearch.slowlog.source": "{\"query\":{\"match_all\":{\"boost\":1.0}}}",
"elasticsearch.slowlog.stats": "[]",
"elasticsearch.slowlog.took": "747.3micros",
"elasticsearch.slowlog.took_millis": 0,
"elasticsearch.slowlog.total_hits": "1 hits",
"elasticsearch.slowlog.total_shards": 1,
"user.name": "elastic",
"user.realm": "reserved",
"ecs.version": "1.2.0",
"service.name": "ES_ECS",
"event.dataset": "elasticsearch.index_search_slowlog",
"process.thread.name": "elasticsearch[runTask-0][search][T#5]",
"log.logger": "index.search.slowlog.query",
"elasticsearch.cluster.uuid": "Ui23kfF1SHKJwu_hI1iPPQ",
"elasticsearch.node.id": "JK-jn-XpQ3OsDUsq5ZtfGg",
"elasticsearch.node.name": "node-0",
"elasticsearch.cluster.name": "distribution_run"
}
---------------------------
// NOTCONSOLE

If a call was initiated with an `X-Opaque-ID` header, then the ID is included
in Search Slow logs in the **elasticsearch.slowlog.id** field. See
<<x-opaque-id, X-Opaque-Id HTTP header>> for details and best practices.

[discrete]
[[index-slow-log]]
=== Index Slow log
Expand Down Expand Up @@ -119,6 +142,18 @@ PUT /my-index-000001/_settings
--------------------------------------------------
// TEST[setup:my_index]

To include information about the user that triggered a slow indexing event,
use the `index.indexing.slowlog.include.user` setting.

[source,console]
--------------------------------------------------
PUT /my-index-000001/_settings
{
"index.indexing.slowlog.include.user": true
}
--------------------------------------------------
// TEST[setup:my_index]

By default Elasticsearch will log the first 1000 characters of the _source in
the slowlog. You can change that with `index.indexing.slowlog.source`. Setting
it to `false` or `0` will skip logging the source entirely, while setting it to
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,9 @@
package org.elasticsearch.geometry.simplify;

import org.elasticsearch.test.ESTestCase;
import org.hamcrest.BaseMatcher;
import org.hamcrest.Description;
import org.hamcrest.Matcher;
import org.hamcrest.TypeSafeMatcher;

import static java.lang.Math.toRadians;
import static org.elasticsearch.geometry.simplify.SimplificationErrorCalculator.Point3D.from;
Expand Down Expand Up @@ -238,7 +238,7 @@ private static Matcher<SimplificationErrorCalculator.Point3D> samePoint(Simplifi
return new TestPoint3DMatcher(expected, 1e-15);
}

private static class TestPoint3DMatcher extends BaseMatcher<SimplificationErrorCalculator.Point3D> {
private static class TestPoint3DMatcher extends TypeSafeMatcher<SimplificationErrorCalculator.Point3D> {
private final Matcher<Double> xMatcher;
private final Matcher<Double> yMatcher;
private final Matcher<Double> zMatcher;
Expand All @@ -252,11 +252,8 @@ private static class TestPoint3DMatcher extends BaseMatcher<SimplificationErrorC
}

@Override
public boolean matches(Object actual) {
if (actual instanceof SimplificationErrorCalculator.Point3D point3D) {
return xMatcher.matches(point3D.x()) && yMatcher.matches(point3D.y()) && zMatcher.matches(point3D.z());
}
return false;
public boolean matchesSafely(SimplificationErrorCalculator.Point3D point3D) {
return xMatcher.matches(point3D.x()) && yMatcher.matches(point3D.y()) && zMatcher.matches(point3D.z());
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -500,7 +500,7 @@ public void writeRawValue(InputStream stream, XContentType xContentType) throws
public void writeRawValue(String value) throws IOException {
try {
if (supportsRawWrites()) {
generator.writeRaw(value);
generator.writeRawValue(value);
} else {
// fallback to a regular string for formats that don't allow writing the value as is
generator.writeString(value);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,9 +34,7 @@
import org.elasticsearch.snapshots.SnapshotState;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.junit.annotations.TestIssueLogging;
import org.hamcrest.Description;
import org.hamcrest.Matcher;
import org.hamcrest.TypeSafeMatcher;

import java.util.Arrays;
import java.util.Comparator;
Expand All @@ -51,8 +49,10 @@
import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING;
import static org.elasticsearch.index.store.Store.INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.hamcrest.Matchers.contains;
import static org.hamcrest.Matchers.empty;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.in;
import static org.hamcrest.Matchers.is;

@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0)
Expand Down Expand Up @@ -95,7 +95,7 @@ public void testHighWatermarkNotExceeded() throws Exception {

// increase disk size of node 0 to allow just enough room for one shard, and check that it's rebalanced back
getTestFileStore(dataNodeName).setTotalSpace(shardSizes.getSmallestShardSize() + WATERMARK_BYTES);
assertBusyWithDiskUsageRefresh(dataNode0Id, indexName, new ContainsExactlyOneOf<>(shardSizes.getSmallestShardIds()));
assertBusyWithDiskUsageRefresh(dataNode0Id, indexName, contains(in(shardSizes.getSmallestShardIds())));
}

public void testRestoreSnapshotAllocationDoesNotExceedWatermark() throws Exception {
Expand Down Expand Up @@ -158,7 +158,7 @@ public void testRestoreSnapshotAllocationDoesNotExceedWatermark() throws Excepti

// increase disk size of node 0 to allow just enough room for one shard, and check that it's rebalanced back
getTestFileStore(dataNodeName).setTotalSpace(shardSizes.getSmallestShardSize() + WATERMARK_BYTES);
assertBusyWithDiskUsageRefresh(dataNode0Id, indexName, new ContainsExactlyOneOf<>(shardSizes.getSmallestShardIds()));
assertBusyWithDiskUsageRefresh(dataNode0Id, indexName, contains(in(shardSizes.getSmallestShardIds())));
}

@TestIssueLogging(
Expand Down Expand Up @@ -221,11 +221,7 @@ public void testRestoreSnapshotAllocationDoesNotExceedWatermarkWithMultipleShard
assertThat(restoreInfo.successfulShards(), is(snapshotInfo.totalShards()));
assertThat(restoreInfo.failedShards(), is(0));

assertBusyWithDiskUsageRefresh(
dataNode0Id,
indexName,
new ContainsExactlyOneOf<>(shardSizes.getShardIdsWithSizeSmallerOrEqual(usableSpace))
);
assertBusyWithDiskUsageRefresh(dataNode0Id, indexName, contains(in(shardSizes.getShardIdsWithSizeSmallerOrEqual(usableSpace))));
}

private Set<ShardId> getShardIds(final String nodeId, final String indexName) {
Expand Down Expand Up @@ -346,23 +342,4 @@ private void assertBusyWithDiskUsageRefresh(String nodeId, String indexName, Mat
private InternalClusterInfoService getInternalClusterInfoService() {
return (InternalClusterInfoService) internalCluster().getCurrentMasterNodeInstance(ClusterInfoService.class);
}

private static final class ContainsExactlyOneOf<T> extends TypeSafeMatcher<Set<T>> {

private final Set<T> expectedValues;

ContainsExactlyOneOf(Set<T> expectedValues) {
this.expectedValues = expectedValues;
}

@Override
protected boolean matchesSafely(Set<T> item) {
return item.size() == 1 && expectedValues.contains(item.iterator().next());
}

@Override
public void describeTo(Description description) {
description.appendText("Expected to contain exactly one value from ").appendValueList("[", ",", "]", expectedValues);
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,7 @@
import org.elasticsearch.test.ESSingleNodeTestCase;
import org.elasticsearch.xcontent.XContentBuilder;
import org.elasticsearch.xcontent.XContentFactory;
import org.hamcrest.BaseMatcher;
import org.hamcrest.Description;
import org.hamcrest.Matcher;
import org.hamcrest.Matchers;
import org.junit.Before;

Expand All @@ -33,12 +32,12 @@
import java.util.HashMap;
import java.util.Map;
import java.util.function.Function;
import java.util.stream.IntStream;

import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse;
import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.oneOf;

public class GeoPointScriptDocValuesIT extends ESSingleNodeTestCase {
Expand Down Expand Up @@ -255,28 +254,8 @@ public void testNullPoint() throws Exception {
);
}

private static MultiPointLabelPosition isMultiPointLabelPosition(double[] lats, double[] lons) {
return new MultiPointLabelPosition(lats, lons);
}

private static class MultiPointLabelPosition extends BaseMatcher<GeoPoint> {
private final GeoPoint[] points;

private MultiPointLabelPosition(double[] lats, double[] lons) {
points = new GeoPoint[lats.length];
for (int i = 0; i < lats.length; i++) {
points[i] = new GeoPoint(lats[i], lons[i]);
}
}

@Override
public boolean matches(Object actual) {
return is(oneOf(points)).matches(actual);
}

@Override
public void describeTo(Description description) {
description.appendText("is(oneOf(" + Arrays.toString(points) + ")");
}
private static Matcher<GeoPoint> isMultiPointLabelPosition(double[] lats, double[] lons) {
assert lats.length == lons.length;
return oneOf(IntStream.range(0, lats.length).mapToObj(i -> new GeoPoint(lats[i], lons[i])).toArray(GeoPoint[]::new));
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,9 @@ static TransportVersion def(int id) {
public static final TransportVersion ESQL_ENRICH_OPERATOR_STATUS = def(8_600_00_0);
public static final TransportVersion ESQL_SERIALIZE_ARRAY_VECTOR = def(8_601_00_0);
public static final TransportVersion ESQL_SERIALIZE_ARRAY_BLOCK = def(8_602_00_0);
public static final TransportVersion AGGS_EXCLUDED_DELETED_DOCS = def(8_603_00_0);
public static final TransportVersion ADD_DATA_STREAM_GLOBAL_RETENTION = def(8_603_00_0);
public static final TransportVersion AGGS_EXCLUDED_DELETED_DOCS = def(8_604_00_0);


/*
* STOP! READ THIS FIRST! No, really,
Expand Down
Loading

0 comments on commit 59bf139

Please sign in to comment.