Skip to content

Commit

Permalink
Merge branch 'main' into catch_pytorch_startup_exceptions
Browse files Browse the repository at this point in the history
  • Loading branch information
droberts195 committed Jan 4, 2024
2 parents c1e07c7 + 3bd597c commit 34bf544
Show file tree
Hide file tree
Showing 136 changed files with 2,412 additions and 1,201 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,6 @@
@State(Scope.Thread)
@Fork(1)
public class MultivalueDedupeBenchmark {
private static final BigArrays BIG_ARRAYS = BigArrays.NON_RECYCLING_INSTANCE; // TODO real big arrays?
private static final BlockFactory blockFactory = BlockFactory.getInstance(
new NoopCircuitBreaker("noop"),
BigArrays.NON_RECYCLING_INSTANCE
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
import org.elasticsearch.compute.data.DocVector;
import org.elasticsearch.compute.data.DoubleBlock;
import org.elasticsearch.compute.data.DoubleVector;
import org.elasticsearch.compute.data.ElementType;
import org.elasticsearch.compute.data.IntBlock;
import org.elasticsearch.compute.data.IntVector;
import org.elasticsearch.compute.data.LongBlock;
Expand Down Expand Up @@ -96,8 +97,12 @@ public class ValuesSourceReaderBenchmark {
for (String name : ValuesSourceReaderBenchmark.class.getField("name").getAnnotationsByType(Param.class)[0].value()) {
benchmark.layout = layout;
benchmark.name = name;
benchmark.setupPages();
benchmark.benchmark();
try {
benchmark.setupPages();
benchmark.benchmark();
} catch (Exception e) {
throw new AssertionError("error initializing [" + layout + "/" + name + "]", e);
}
}
}
} finally {
Expand All @@ -111,11 +116,11 @@ public class ValuesSourceReaderBenchmark {
private static List<ValuesSourceReaderOperator.FieldInfo> fields(String name) {
return switch (name) {
case "3_stored_keywords" -> List.of(
new ValuesSourceReaderOperator.FieldInfo("keyword_1", List.of(blockLoader("stored_keyword_1"))),
new ValuesSourceReaderOperator.FieldInfo("keyword_2", List.of(blockLoader("stored_keyword_2"))),
new ValuesSourceReaderOperator.FieldInfo("keyword_3", List.of(blockLoader("stored_keyword_3")))
new ValuesSourceReaderOperator.FieldInfo("keyword_1", ElementType.BYTES_REF, shardIdx -> blockLoader("stored_keyword_1")),
new ValuesSourceReaderOperator.FieldInfo("keyword_2", ElementType.BYTES_REF, shardIdx -> blockLoader("stored_keyword_2")),
new ValuesSourceReaderOperator.FieldInfo("keyword_3", ElementType.BYTES_REF, shardIdx -> blockLoader("stored_keyword_3"))
);
default -> List.of(new ValuesSourceReaderOperator.FieldInfo(name, List.of(blockLoader(name))));
default -> List.of(new ValuesSourceReaderOperator.FieldInfo(name, elementType(name), shardIdx -> blockLoader(name)));
};
}

Expand All @@ -125,29 +130,38 @@ enum Where {
STORED;
}

private static BlockLoader blockLoader(String name) {
Where where = Where.DOC_VALUES;
if (name.startsWith("stored_")) {
name = name.substring("stored_".length());
where = Where.STORED;
} else if (name.startsWith("source_")) {
name = name.substring("source_".length());
where = Where.SOURCE;
}
private static ElementType elementType(String name) {
name = WhereAndBaseName.fromName(name).name;
switch (name) {
case "long":
return numericBlockLoader(name, where, NumberFieldMapper.NumberType.LONG);
return ElementType.LONG;
case "int":
return numericBlockLoader(name, where, NumberFieldMapper.NumberType.INTEGER);
return ElementType.INT;
case "double":
return numericBlockLoader(name, where, NumberFieldMapper.NumberType.DOUBLE);
case "keyword":
name = "keyword_1";
return ElementType.DOUBLE;
}
if (name.startsWith("keyword")) {
return ElementType.BYTES_REF;
}
throw new UnsupportedOperationException("no element type for [" + name + "]");
}

private static BlockLoader blockLoader(String name) {
WhereAndBaseName w = WhereAndBaseName.fromName(name);
switch (w.name) {
case "long":
return numericBlockLoader(w, NumberFieldMapper.NumberType.LONG);
case "int":
return numericBlockLoader(w, NumberFieldMapper.NumberType.INTEGER);
case "double":
return numericBlockLoader(w, NumberFieldMapper.NumberType.DOUBLE);
case "keyword":
w = new WhereAndBaseName(w.where, "keyword_1");
}
if (w.name.startsWith("keyword")) {
boolean syntheticSource = false;
FieldType ft = new FieldType(KeywordFieldMapper.Defaults.FIELD_TYPE);
switch (where) {
switch (w.where) {
case DOC_VALUES:
break;
case SOURCE:
Expand All @@ -161,7 +175,7 @@ private static BlockLoader blockLoader(String name) {
}
ft.freeze();
return new KeywordFieldMapper.KeywordFieldType(
name,
w.name,
ft,
Lucene.KEYWORD_ANALYZER,
Lucene.KEYWORD_ANALYZER,
Expand Down Expand Up @@ -193,10 +207,21 @@ public String parentField(String field) {
throw new IllegalArgumentException("can't read [" + name + "]");
}

private static BlockLoader numericBlockLoader(String name, Where where, NumberFieldMapper.NumberType numberType) {
private record WhereAndBaseName(Where where, String name) {
static WhereAndBaseName fromName(String name) {
if (name.startsWith("stored_")) {
return new WhereAndBaseName(Where.STORED, name.substring("stored_".length()));
} else if (name.startsWith("source_")) {
return new WhereAndBaseName(Where.SOURCE, name.substring("source_".length()));
}
return new WhereAndBaseName(Where.DOC_VALUES, name);
}
}

private static BlockLoader numericBlockLoader(WhereAndBaseName w, NumberFieldMapper.NumberType numberType) {
boolean stored = false;
boolean docValues = true;
switch (where) {
switch (w.where) {
case DOC_VALUES:
break;
case SOURCE:
Expand All @@ -207,7 +232,7 @@ private static BlockLoader numericBlockLoader(String name, Where where, NumberFi
throw new UnsupportedOperationException();
}
return new NumberFieldMapper.NumberFieldType(
name,
w.name,
numberType,
true,
stored,
Expand Down
5 changes: 5 additions & 0 deletions docs/changelog/101487.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 101487
summary: Wait for async searches to finish when shutting down
area: Infra/Node Lifecycle
type: enhancement
issues: []
6 changes: 6 additions & 0 deletions docs/changelog/102207.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
pr: 102207
summary: Fix disk computation when initializing unassigned shards in desired balance
computation
area: Allocation
type: bug
issues: []
5 changes: 5 additions & 0 deletions docs/changelog/103453.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 103453
summary: Add expiration time to update api key api
area: Security
type: enhancement
issues: []
5 changes: 5 additions & 0 deletions docs/changelog/103821.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 103821
summary: "ESQL: Delay finding field load infrastructure"
area: ES|QL
type: enhancement
issues: []
1 change: 1 addition & 0 deletions docs/reference/esql/functions/types/add.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
|===
lhs | rhs | result
date_period | date_period | date_period
date_period | datetime | datetime
datetime | date_period | datetime
datetime | time_duration | datetime
double | double | double
Expand Down
11 changes: 8 additions & 3 deletions docs/reference/rest-api/security/bulk-update-api-keys.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ This operation can greatly improve performance over making individual updates.

It's not possible to update expired or <<security-api-invalidate-api-key,invalidated>> API keys.

This API supports updates to API key access scope and metadata.
This API supports updates to API key access scope, metadata and expiration.
The access scope of each API key is derived from the <<security-api-bulk-update-api-keys-api-key-role-descriptors,`role_descriptors`>> you specify in the request, and a snapshot of the owner user's permissions at the time of the request.
The snapshot of the owner's permissions is updated automatically on every call.

Expand Down Expand Up @@ -63,6 +63,9 @@ The structure of a role descriptor is the same as the request for the <<api-key-
Within the `metadata` object, top-level keys beginning with an underscore (`_`) are reserved for system usage.
Any information specified with this parameter fully replaces metadata previously associated with the API key.

`expiration`::
(Optional, string) Expiration time for the API keys. By default, API keys never expire. Can be omitted to leave unchanged.

[[security-api-bulk-update-api-keys-response-body]]
==== {api-response-body-title}

Expand Down Expand Up @@ -166,7 +169,8 @@ Further, assume that the owner user's permissions are:
--------------------------------------------------
// NOTCONSOLE

The following example updates the API keys created above, assigning them new role descriptors and metadata.
The following example updates the API keys created above, assigning them new role descriptors, metadata and updates
their expiration time.

[source,console]
----
Expand All @@ -192,7 +196,8 @@ POST /_security/api_key/_bulk_update
"trusted": true,
"tags": ["production"]
}
}
},
"expiration": "30d"
}
----
// TEST[skip:api key ids not available]
Expand Down
5 changes: 4 additions & 1 deletion docs/reference/rest-api/security/update-api-key.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ If you need to apply the same update to many API keys, you can use <<security-ap

It's not possible to update expired API keys, or API keys that have been invalidated by <<security-api-invalidate-api-key,invalidate API Key>>.

This API supports updates to an API key's access scope and metadata.
This API supports updates to an API key's access scope, metadata and expiration.
The access scope of an API key is derived from the <<security-api-update-api-key-api-key-role-descriptors,`role_descriptors`>> you specify in the request, and a snapshot of the owner user's permissions at the time of the request.
The snapshot of the owner's permissions is updated automatically on every call.

Expand Down Expand Up @@ -67,6 +67,9 @@ It supports nested data structure.
Within the `metadata` object, top-level keys beginning with `_` are reserved for system usage.
When specified, this fully replaces metadata previously associated with the API key.

`expiration`::
(Optional, string) Expiration time for the API key. By default, API keys never expire. Can be omitted to leave unchanged.

[[security-api-update-api-key-response-body]]
==== {api-response-body-title}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ Use this API to update cross-cluster API keys created by the <<security-api-crea
It's not possible to update expired API keys, or API keys that have been invalidated by
<<security-api-invalidate-api-key,invalidate API Key>>.

This API supports updates to an API key's access scope and metadata.
This API supports updates to an API key's access scope, metadata and expiration.
The owner user's information, e.g. `username`, `realm`, is also updated automatically on every call.

NOTE: This API cannot update <<security-api-create-api-key,REST API keys>>, which should be updated by
Expand Down Expand Up @@ -66,6 +66,9 @@ It supports nested data structure.
Within the `metadata` object, top-level keys beginning with `_` are reserved for system usage.
When specified, this fully replaces metadata previously associated with the API key.

`expiration`::
(Optional, string) Expiration time for the API key. By default, API keys never expire. Can be omitted to leave unchanged.

[[security-api-update-cross-cluster-api-key-response-body]]
==== {api-response-body-title}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -58,9 +58,9 @@ DELETE /_enrich/policy/clientip_policy

// tag::demo-env[]

On the demo environment at https://esql.demo.elastic.co/[esql.demo.elastic.co],
On the demo environment at https://ela.st/ql/[ela.st/ql],
an enrich policy called `clientip_policy` has already been created an executed.
The policy links an IP address to an environment ("Development", "QA", or
"Production")
"Production").

// end::demo-env[]
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,6 @@ PUT sample_data/_bulk

The data set used in this guide has been preloaded into the Elastic {esql}
public demo environment. Visit
https://esql.demo.elastic.co/[esql.demo.elastic.co] to start using it.
https://ela.st/ql[ela.st/ql] to start using it.

// end::demo-env[]
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@
import org.elasticsearch.index.query.functionscore.RandomScoreFunctionBuilder;
import org.elasticsearch.search.SearchModule;
import org.elasticsearch.test.cluster.ElasticsearchCluster;
import org.elasticsearch.test.cluster.FeatureFlag;
import org.elasticsearch.test.cluster.local.LocalClusterConfigProvider;
import org.elasticsearch.test.cluster.local.distribution.DistributionType;
import org.elasticsearch.xcontent.XContentBuilder;
Expand Down Expand Up @@ -76,6 +77,7 @@ public class QueryBuilderBWCIT extends ParameterizedFullClusterRestartTestCase {
.version(getOldClusterTestVersion())
.nodes(2)
.setting("xpack.security.enabled", "false")
.feature(FeatureFlag.FAILURE_STORE_ENABLED)
.apply(() -> clusterConfig)
.build();

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
import com.carrotsearch.randomizedtesting.annotations.Name;

import org.apache.http.HttpHost;
import org.apache.lucene.tests.util.LuceneTestCase.AwaitsFix;
import org.elasticsearch.Build;
import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse;
import org.elasticsearch.client.Request;
Expand Down Expand Up @@ -38,6 +39,7 @@
* In 8.2 we also added the ability to filter fields by type and metadata, with some post-hoc filtering applied on
* the co-ordinating node if older nodes were included in the system
*/
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103473")
public class FieldCapsIT extends ParameterizedRollingUpgradeTestCase {

public FieldCapsIT(@Name("upgradedNodes") int upgradedNodes) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -65,9 +65,14 @@ private static void executeReloadSecureSettings(
SecureString password,
ActionListener<NodesReloadSecureSettingsResponse> listener
) {
final var request = new NodesReloadSecureSettingsRequest(nodeIds);
request.setSecureStorePassword(password);
client().execute(TransportNodesReloadSecureSettingsAction.TYPE, request, listener);
final var request = new NodesReloadSecureSettingsRequest();
try {
request.nodesIds(nodeIds);
request.setSecureStorePassword(password);
client().execute(TransportNodesReloadSecureSettingsAction.TYPE, request, listener);
} finally {
request.decRef();
}
}

private static SecureString emptyPassword() {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
import org.elasticsearch.common.UUIDs;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.ReleasableLock;
import org.elasticsearch.core.UpdateForV9;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexService;
Expand Down Expand Up @@ -97,7 +96,7 @@ void syncFlush(String syncId) throws IOException {
// make sure that background merges won't happen; otherwise, IndexWriter#hasUncommittedChanges can become true again
forceMerge(false, 1, false, UUIDs.randomBase64UUID());
assertNotNull(indexWriter);
try (ReleasableLock ignored = readLock.acquire()) {
try (var ignored = acquireEnsureOpenRef()) {
assertThat(getTranslogStats().getUncommittedOperations(), equalTo(0));
Map<String, String> userData = new HashMap<>(getLastCommittedSegmentInfos().userData);
SequenceNumbers.CommitInfo commitInfo = SequenceNumbers.loadSeqNoInfoFromLuceneCommit(userData.entrySet());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -782,10 +782,13 @@ public Settings onNodeStopped(String nodeName) {
* Tests shard recovery throttling on the target node. Node statistics should show throttling time on the target node, while no
* throttling should be shown on the source node because the target will accept data more slowly than the source's throttling threshold.
*/
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103204")
public void testTargetThrottling() throws Exception {
logger.info("--> starting node A with default settings");
final String nodeA = internalCluster().startNode();
final String nodeA = internalCluster().startNode(
Settings.builder()
// Use a high value so that when unthrottling recoveries we do not cause accidental throttling on the source node.
.put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), "200mb")
);

logger.info("--> creating index on node A");
ByteSizeValue shardSize = createAndPopulateIndex(INDEX_NAME, 1, SHARD_COUNT_1, REPLICA_COUNT_0).getShards()[0].getStats()
Expand Down
2 changes: 2 additions & 0 deletions server/src/main/java/org/elasticsearch/TransportVersions.java
Original file line number Diff line number Diff line change
Expand Up @@ -176,6 +176,8 @@ static TransportVersion def(int id) {
public static final TransportVersion ESQL_STATUS_INCLUDE_LUCENE_QUERIES = def(8_564_00_0);
public static final TransportVersion ESQL_CLUSTER_ALIAS = def(8_565_00_0);
public static final TransportVersion SNAPSHOTS_IN_PROGRESS_TRACKING_REMOVING_NODES_ADDED = def(8_566_00_0);
public static final TransportVersion SMALLER_RELOAD_SECURE_SETTINGS_REQUEST = def(8_567_00_0);
public static final TransportVersion UPDATE_API_KEY_EXPIRATION_TIME_ADDED = def(8_568_00_0);

/*
* STOP! READ THIS FIRST! No, really,
Expand Down
Loading

0 comments on commit 34bf544

Please sign in to comment.