Skip to content

Commit

Permalink
Merge branch 'main' into upgrade_junit
Browse files Browse the repository at this point in the history
  • Loading branch information
jakelandis authored Jan 5, 2024
2 parents ce46ccf + 1847889 commit 4e0eb58
Show file tree
Hide file tree
Showing 208 changed files with 2,966 additions and 2,363 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@
import org.elasticsearch.core.IOUtils;
import org.elasticsearch.index.IndexVersion;
import org.elasticsearch.index.mapper.BlockLoader;
import org.elasticsearch.index.mapper.FieldNamesFieldMapper;
import org.elasticsearch.index.mapper.KeywordFieldMapper;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.NumberFieldMapper;
Expand Down Expand Up @@ -202,6 +203,11 @@ public Set<String> sourcePaths(String name) {
public String parentField(String field) {
throw new UnsupportedOperationException();
}

@Override
public FieldNamesFieldMapper.FieldNamesFieldType fieldNames() {
return FieldNamesFieldMapper.FieldNamesFieldType.get(true);
}
});
}
throw new IllegalArgumentException("can't read [" + name + "]");
Expand Down
5 changes: 5 additions & 0 deletions docs/changelog/103632.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 103632
summary: "ESQL: Check field exists before load from `_source`"
area: ES|QL
type: enhancement
issues: []
14 changes: 14 additions & 0 deletions docs/changelog/103898.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
pr: 103898
summary: Change `index.look_ahead_time` index setting's default value from 2 hours to 30 minutes.
area: TSDB
type: breaking
issues: []
breaking:
title: Change `index.look_ahead_time` index setting's default value from 2 hours to 30 minutes.
area: Index setting
details: Lower the `index.look_ahead_time` index setting's max value from 2 hours to 30 minutes.
impact: >
Documents with @timestamp of 30 minutes or more in the future will be rejected.
Before documents with @timestamp of 2 hours or more in the future were rejected.
If the previous behaviour should be kept, then update the `index.look_ahead_time` setting to two hours before performing the upgrade.
notable: false
5 changes: 5 additions & 0 deletions docs/changelog/103923.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 103923
summary: Preserve response headers in Datafeed preview
area: Machine Learning
type: bug
issues: []
11 changes: 9 additions & 2 deletions docs/reference/troubleshooting/corruption-issues.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -38,13 +38,20 @@ well-tested, so you can be very confident that a checksum mismatch really does
indicate that the data read from disk is different from the data that {es}
previously wrote.

It is also possible that {es} reports a corruption if a file it needs is
entirely missing, with an exception such as:

- `java.io.FileNotFoundException`
- `java.nio.file.NoSuchFileException`

The files that make up a Lucene index are written in full before they are used.
If a file is needed to recover an index after a restart then your storage
system previously confirmed to {es} that this file was durably synced to disk.
On Linux this means that the `fsync()` system call returned successfully. {es}
sometimes reports that an index is corrupt because a file needed for recovery
has been truncated or is missing its footer. This indicates that your storage
system acknowledges durable writes incorrectly.
is missing, or it exists but has been truncated or is missing its footer. This
indicates that your storage system acknowledges durable writes incorrectly or
that some external process has modified the data {es} previously wrote to disk.

There are many possible explanations for {es} detecting corruption in your
cluster. Databases like {es} generate a challenging I/O workload that may find
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ public class DataStreamsPlugin extends Plugin implements ActionPlugin, HealthPlu
private static final TimeValue MAX_LOOK_AHEAD_TIME = TimeValue.timeValueHours(2);
public static final Setting<TimeValue> LOOK_AHEAD_TIME = Setting.timeSetting(
"index.look_ahead_time",
TimeValue.timeValueHours(2),
TimeValue.timeValueMinutes(30),
TimeValue.timeValueMinutes(1),
TimeValue.timeValueDays(7), // is effectively 2h now.
Setting.Property.IndexScope,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
import org.elasticsearch.action.admin.indices.rollover.RolloverConfiguration;
import org.elasticsearch.action.admin.indices.rollover.RolloverRequest;
import org.elasticsearch.action.admin.indices.rollover.RolloverResponse;
import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsAction;
import org.elasticsearch.action.admin.indices.settings.put.TransportUpdateSettingsAction;
import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest;
import org.elasticsearch.action.datastreams.lifecycle.ErrorEntry;
import org.elasticsearch.action.downsample.DownsampleAction;
Expand Down Expand Up @@ -895,7 +895,7 @@ private Set<Index> maybeExecuteForceMerge(ClusterState state, List<Index> indice
transportActionsDeduplicator.executeOnce(
updateMergePolicySettingsRequest,
new ErrorRecordingActionListener(
UpdateSettingsAction.NAME,
TransportUpdateSettingsAction.TYPE.name(),
indexName,
errorStore,
Strings.format(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,7 @@
*/
public class DeleteDataStreamLifecycleAction {

public static final ActionType<AcknowledgedResponse> INSTANCE = ActionType.acknowledgedResponse(
"indices:admin/data_stream/lifecycle/delete"
);
public static final ActionType<AcknowledgedResponse> INSTANCE = ActionType.localOnly("indices:admin/data_stream/lifecycle/delete");

private DeleteDataStreamLifecycleAction() {/* no instances */}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,9 +40,7 @@
*/
public class PutDataStreamLifecycleAction {

public static final ActionType<AcknowledgedResponse> INSTANCE = ActionType.acknowledgedResponse(
"indices:admin/data_stream/lifecycle/put"
);
public static final ActionType<AcknowledgedResponse> INSTANCE = ActionType.localOnly("indices:admin/data_stream/lifecycle/put");

private PutDataStreamLifecycleAction() {/* no instances */}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -87,24 +87,24 @@ public void testPickingBackingIndicesPredefinedDates() throws Exception {
assertThat(backingIndex, notNullValue());
// Ensure truncate to seconds:
assertThat(backingIndex.getSettings().get("index.time_series.start_time"), equalTo("2022-03-15T06:29:36.000Z"));
assertThat(backingIndex.getSettings().get("index.time_series.end_time"), equalTo("2022-03-15T10:29:36.000Z"));
assertThat(backingIndex.getSettings().get("index.time_series.end_time"), equalTo("2022-03-15T08:59:36.000Z"));

// advance time and rollover:
time = time.plusSeconds(80 * 60);
time = time.plusSeconds(20 * 60);
var result = rolloverOver(state, "logs-myapp", time);
state = result.clusterState();

DataStream dataStream = state.getMetadata().dataStreams().get("logs-myapp");
backingIndex = state.getMetadata().index(dataStream.getIndices().get(1));
assertThat(backingIndex, notNullValue());
assertThat(backingIndex.getSettings().get("index.time_series.start_time"), equalTo("2022-03-15T10:29:36.000Z"));
assertThat(backingIndex.getSettings().get("index.time_series.end_time"), equalTo("2022-03-15T12:29:36.000Z"));
assertThat(backingIndex.getSettings().get("index.time_series.start_time"), equalTo("2022-03-15T08:59:36.000Z"));
assertThat(backingIndex.getSettings().get("index.time_series.end_time"), equalTo("2022-03-15T09:29:36.000Z"));
String secondBackingIndex = backingIndex.getIndex().getName();

// first backing index:
{
long start = MILLIS_FORMATTER.parseMillis("2022-03-15T06:29:36.000Z");
long end = MILLIS_FORMATTER.parseMillis("2022-03-15T10:29:36.000Z") - 1;
long end = MILLIS_FORMATTER.parseMillis("2022-03-15T08:59:36.000Z") - 1;
for (int i = 0; i < 256; i++) {
String timestamp = MILLIS_FORMATTER.formatMillis(randomLongBetween(start, end));
var writeIndex = getWriteIndex(state, "logs-myapp", timestamp);
Expand All @@ -114,14 +114,14 @@ public void testPickingBackingIndicesPredefinedDates() throws Exception {

// Borderline:
{
var writeIndex = getWriteIndex(state, "logs-myapp", "2022-03-15T10:29:35.999Z");
var writeIndex = getWriteIndex(state, "logs-myapp", "2022-03-15T08:59:35.999Z");
assertThat(writeIndex.getName(), equalTo(".ds-logs-myapp-2022.03.15-000001"));
}

// Second backing index:
{
long start = MILLIS_FORMATTER.parseMillis("2022-03-15T10:29:36.000Z");
long end = MILLIS_FORMATTER.parseMillis("2022-03-15T12:29:36.000Z") - 1;
long start = MILLIS_FORMATTER.parseMillis("2022-03-15T08:59:36.000Z");
long end = MILLIS_FORMATTER.parseMillis("2022-03-15T09:29:36.000Z") - 1;
for (int i = 0; i < 256; i++) {
String timestamp = MILLIS_FORMATTER.formatMillis(randomLongBetween(start, end));
var writeIndex = getWriteIndex(state, "logs-myapp", timestamp);
Expand All @@ -131,19 +131,19 @@ public void testPickingBackingIndicesPredefinedDates() throws Exception {

// Borderline (again):
{
var writeIndex = getWriteIndex(state, "logs-myapp", "2022-03-15T12:29:35.999Z");
var writeIndex = getWriteIndex(state, "logs-myapp", "2022-03-15T09:29:35.999Z");
assertThat(writeIndex.getName(), equalTo(secondBackingIndex));
}

// Outside the valid temporal ranges:
{
var finalState = state;
var e = expectThrows(IllegalArgumentException.class, () -> getWriteIndex(finalState, "logs-myapp", "2022-03-15T12:29:36.000Z"));
var e = expectThrows(IllegalArgumentException.class, () -> getWriteIndex(finalState, "logs-myapp", "2022-03-15T09:29:36.000Z"));
assertThat(
e.getMessage(),
equalTo(
"the document timestamp [2022-03-15T12:29:36.000Z] is outside of ranges of currently writable indices ["
+ "[2022-03-15T06:29:36.000Z,2022-03-15T10:29:36.000Z][2022-03-15T10:29:36.000Z,2022-03-15T12:29:36.000Z]]"
"the document timestamp [2022-03-15T09:29:36.000Z] is outside of ranges of currently writable indices ["
+ "[2022-03-15T06:29:36.000Z,2022-03-15T08:59:36.000Z][2022-03-15T08:59:36.000Z,2022-03-15T09:29:36.000Z]]"
)
);
}
Expand All @@ -158,24 +158,24 @@ public void testPickingBackingIndicesNanoTimestamp() throws Exception {
assertThat(backingIndex, notNullValue());
// Ensure truncate to seconds and millis format:
assertThat(backingIndex.getSettings().get("index.time_series.start_time"), equalTo("2022-03-15T06:29:36.000Z"));
assertThat(backingIndex.getSettings().get("index.time_series.end_time"), equalTo("2022-03-15T10:29:36.000Z"));
assertThat(backingIndex.getSettings().get("index.time_series.end_time"), equalTo("2022-03-15T08:59:36.000Z"));

// advance time and rollover:
time = time.plusSeconds(80 * 60);
time = time.plusSeconds(20 * 60);
var result = rolloverOver(state, "logs-myapp", time);
state = result.clusterState();

DataStream dataStream = state.getMetadata().dataStreams().get("logs-myapp");
backingIndex = state.getMetadata().index(dataStream.getIndices().get(1));
assertThat(backingIndex, notNullValue());
assertThat(backingIndex.getSettings().get("index.time_series.start_time"), equalTo("2022-03-15T10:29:36.000Z"));
assertThat(backingIndex.getSettings().get("index.time_series.end_time"), equalTo("2022-03-15T12:29:36.000Z"));
assertThat(backingIndex.getSettings().get("index.time_series.start_time"), equalTo("2022-03-15T08:59:36.000Z"));
assertThat(backingIndex.getSettings().get("index.time_series.end_time"), equalTo("2022-03-15T09:29:36.000Z"));
String secondBackingIndex = backingIndex.getIndex().getName();

// first backing index:
{
long start = NANOS_FORMATTER.parseMillis("2022-03-15T06:29:36.000000000Z");
long end = NANOS_FORMATTER.parseMillis("2022-03-15T10:29:36.000000000Z") - 1;
long end = NANOS_FORMATTER.parseMillis("2022-03-15T08:59:36.000000000Z") - 1;
for (int i = 0; i < 256; i++) {
String timestamp = NANOS_FORMATTER.formatMillis(randomLongBetween(start, end));
var writeIndex = getWriteIndex(state, "logs-myapp", timestamp);
Expand All @@ -185,14 +185,14 @@ public void testPickingBackingIndicesNanoTimestamp() throws Exception {

// Borderline:
{
var writeIndex = getWriteIndex(state, "logs-myapp", "2022-03-15T10:29:35.999999999Z");
var writeIndex = getWriteIndex(state, "logs-myapp", "2022-03-15T08:59:35.999999999Z");
assertThat(writeIndex.getName(), equalTo(".ds-logs-myapp-2022.03.15-000001"));
}

// Second backing index:
{
long start = NANOS_FORMATTER.parseMillis("2022-03-15T10:29:36.000000000Z");
long end = NANOS_FORMATTER.parseMillis("2022-03-15T12:29:36.000000000Z") - 1;
long start = NANOS_FORMATTER.parseMillis("2022-03-15T08:59:36.000000000Z");
long end = NANOS_FORMATTER.parseMillis("2022-03-15T09:29:36.000000000Z") - 1;
for (int i = 0; i < 256; i++) {
String timestamp = NANOS_FORMATTER.formatMillis(randomLongBetween(start, end));
var writeIndex = getWriteIndex(state, "logs-myapp", timestamp);
Expand All @@ -202,7 +202,7 @@ public void testPickingBackingIndicesNanoTimestamp() throws Exception {

// Borderline (again):
{
var writeIndex = getWriteIndex(state, "logs-myapp", "2022-03-15T12:29:35.999999999Z");
var writeIndex = getWriteIndex(state, "logs-myapp", "2022-03-15T09:29:35.999999999Z");
assertThat(writeIndex.getName(), equalTo(secondBackingIndex));
}
}
Expand Down
Loading

0 comments on commit 4e0eb58

Please sign in to comment.