diff --git a/CHANGELOG.md b/CHANGELOG.md index 211ceca1acb9d..de3a757256abd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,10 +13,12 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix error in RemoteSegmentStoreDirectory when debug logging is enabled ([#12328](https://github.com/opensearch-project/OpenSearch/pull/12328)) - Support for returning scores in matched queries ([#11626](https://github.com/opensearch-project/OpenSearch/pull/11626)) - Add shard id property to SearchLookup for use in field types provided by plugins ([#1063](https://github.com/opensearch-project/OpenSearch/pull/1063)) +- Force merge API supports performing on primary shards only ([#11269](https://github.com/opensearch-project/OpenSearch/pull/11269)) - Add kuromoji_completion analyzer and filter ([#4835](https://github.com/opensearch-project/OpenSearch/issues/4835)) - [Admission Control] Integrate IO Usage Tracker to the Resource Usage Collector Service and Emit IO Usage Stats ([#11880](https://github.com/opensearch-project/OpenSearch/pull/11880)) - The org.opensearch.bootstrap.Security should support codebase for JAR files with classifiers ([#12586](https://github.com/opensearch-project/OpenSearch/issues/12586)) - Remote reindex: Add support for configurable retry mechanism ([#12561](https://github.com/opensearch-project/OpenSearch/pull/12561)) +- Tracing for deep search path ([#12103](https://github.com/opensearch-project/OpenSearch/pull/12103)) ### Dependencies - Bump `com.squareup.okio:okio` from 3.7.0 to 3.8.0 ([#12290](https://github.com/opensearch-project/OpenSearch/pull/12290)) @@ -38,9 +40,12 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `org.apache.zookeeper:zookeeper` from 3.9.1 to 3.9.2 ([#12580](https://github.com/opensearch-project/OpenSearch/pull/12580)) - Bump `org.codehaus.woodstox:stax2-api` from 4.2.1 to 4.2.2 ([#12579](https://github.com/opensearch-project/OpenSearch/pull/12579)) - Bump Jackson version from 2.16.1 to 2.16.2 ([#12611](https://github.com/opensearch-project/OpenSearch/pull/12611)) +- Bump `reactor-netty` from 1.1.15 to 1.1.17 ([#12633](https://github.com/opensearch-project/OpenSearch/pull/12633)) +- Bump `reactor` from 3.5.14 to 3.5.15 ([#12633](https://github.com/opensearch-project/OpenSearch/pull/12633)) ### Changed - Allow composite aggregation to run under a parent filter aggregation ([#11499](https://github.com/opensearch-project/OpenSearch/pull/11499)) +- Quickly compute terms aggregations when the top-level query is functionally match-all for a segment ([#11643](https://github.com/opensearch-project/OpenSearch/pull/11643)) ### Deprecated diff --git a/buildSrc/version.properties b/buildSrc/version.properties index e7e995fbbb310..060f8435a4d9a 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -30,8 +30,8 @@ netty = 4.1.107.Final joda = 2.12.2 # project reactor -reactor_netty = 1.1.15 -reactor = 3.5.14 +reactor_netty = 1.1.17 +reactor = 3.5.15 # client dependencies httpclient = 4.5.14 diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanContext.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanContext.java index f9af611553aff..e5e62c795e5d0 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanContext.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanContext.java @@ -31,4 +31,19 @@ public SpanContext(Span span) { Span getSpan() { return span; } + + /** + * Sets the error for the current span behind this context + * @param cause error + */ + public void setError(final Exception cause) { + span.setError(cause); + } + + /** + * Ends current span + */ + public void endSpan() { + span.endSpan(); + } } diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanCreationContext.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanCreationContext.java index cbbcfe7a85d57..6af7c440f8de9 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanCreationContext.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanCreationContext.java @@ -79,8 +79,8 @@ public SpanCreationContext attributes(Attributes attributes) { } /** - * Sets the parent for spann - * @param parent parent + * Sets the parent for span + * @param parent parent span context * @return spanCreationContext */ public SpanCreationContext parent(SpanContext parent) { diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/listener/QueryInsightsListener.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/listener/QueryInsightsListener.java index 705273f52a567..9ec8673147c38 100644 --- a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/listener/QueryInsightsListener.java +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/listener/QueryInsightsListener.java @@ -116,7 +116,7 @@ public void onPhaseStart(SearchPhaseContext context) {} public void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) {} @Override - public void onPhaseFailure(SearchPhaseContext context) {} + public void onPhaseFailure(SearchPhaseContext context, Throwable cause) {} @Override public void onRequestStart(SearchRequestContext searchRequestContext) {} diff --git a/plugins/repository-azure/licenses/reactor-netty-core-1.1.15.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-core-1.1.15.jar.sha1 deleted file mode 100644 index c30a99a2338b4..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-core-1.1.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3221d405ad55a573cf29875a8244a4217cf07185 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-core-1.1.17.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-core-1.1.17.jar.sha1 new file mode 100644 index 0000000000000..3d631bc904f24 --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-core-1.1.17.jar.sha1 @@ -0,0 +1 @@ +319b1d41f28e92b31b7ca0f19183337f5539bb44 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-http-1.1.15.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-http-1.1.15.jar.sha1 deleted file mode 100644 index ab3171cd02b73..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-http-1.1.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c79756fa2dfc28ac81fc9d23a14b17c656c3e560 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-http-1.1.17.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-http-1.1.17.jar.sha1 new file mode 100644 index 0000000000000..9ceef6959744b --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-http-1.1.17.jar.sha1 @@ -0,0 +1 @@ +9ed949dcd050ef30d9eeedd53d95d1dce20ce832 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.15.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.15.jar.sha1 deleted file mode 100644 index c30a99a2338b4..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3221d405ad55a573cf29875a8244a4217cf07185 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.17.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.17.jar.sha1 new file mode 100644 index 0000000000000..3d631bc904f24 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.17.jar.sha1 @@ -0,0 +1 @@ +319b1d41f28e92b31b7ca0f19183337f5539bb44 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.15.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.15.jar.sha1 deleted file mode 100644 index ab3171cd02b73..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c79756fa2dfc28ac81fc9d23a14b17c656c3e560 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.17.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.17.jar.sha1 new file mode 100644 index 0000000000000..9ceef6959744b --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.17.jar.sha1 @@ -0,0 +1 @@ +9ed949dcd050ef30d9eeedd53d95d1dce20ce832 \ No newline at end of file diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.forcemerge.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.forcemerge.json index 02fbcc36dfe64..986bce55f41e5 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.forcemerge.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.forcemerge.json @@ -63,6 +63,10 @@ "wait_for_completion": { "type" : "boolean", "description" : "If false, the request will return a task immediately and the operation will run in background. Defaults to true." + }, + "primary_only": { + "type" : "boolean", + "description" : "Specify whether the operation should only perform on primary shards. Defaults to false." } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.forcemerge/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.forcemerge/10_basic.yml index d62c4c8882b13..7410e020e1a91 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.forcemerge/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.forcemerge/10_basic.yml @@ -27,3 +27,23 @@ index: test max_num_segments: 10 only_expunge_deletes: true + +--- +"Test primary_only parameter": + - skip: + version: " - 2.12.99" + reason: "primary_only is available in 2.13.0+" + + - do: + indices.create: + index: test + body: + settings: + index.number_of_shards: 2 + index.number_of_replicas: 1 + + - do: + indices.forcemerge: + index: test + primary_only: true + - match: { _shards.total: 2 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.forcemerge/20_wait_for_completion.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.forcemerge/20_wait_for_completion.yml index 1d6c63558bb59..dd95a46f7dee3 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.forcemerge/20_wait_for_completion.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.forcemerge/20_wait_for_completion.yml @@ -4,15 +4,17 @@ # will return a task immediately and the merge process will run in background. - skip: - version: " - 2.6.99" - reason: "only available in 2.7+" - features: allowed_warnings + version: " - 2.6.99, 2.13.0 - " + reason: "wait_for_completion was introduced in 2.7.0 and task description was changed in 2.13.0" + features: allowed_warnings, node_selector - do: indices.create: index: test_index - do: + node_selector: + version: " 2.7.0 - 2.12.99" indices.forcemerge: index: test_index wait_for_completion: false @@ -27,6 +29,30 @@ - match: { task.action: "indices:admin/forcemerge" } - match: { task.description: "Force-merge indices [test_index], maxSegments[1], onlyExpungeDeletes[false], flush[true]" } +--- +"Force merge index with wait_for_completion after task description changed": + - skip: + version: " - 2.12.99 " + reason: "task description was changed in 2.13.0" + features: allowed_warnings, node_selector + + - do: + node_selector: + version: " 2.13.0 - " + indices.forcemerge: + index: test_index + wait_for_completion: false + max_num_segments: 1 + - match: { task: /^.+$/ } + - set: { task: taskId } + + - do: + tasks.get: + wait_for_completion: true + task_id: $taskId + - match: { task.action: "indices:admin/forcemerge" } + - match: { task.description: "Force-merge indices [test_index], maxSegments[1], onlyExpungeDeletes[false], flush[true], primaryOnly[false]" } + # .tasks index is created when the force-merge operation completes, so we should delete .tasks index finally, # if not, the .tasks index may introduce unexpected warnings and then cause other test cases to fail. # Delete the .tasks index directly will also introduce warning, but currently we don't have such APIs which can delete one diff --git a/server/licenses/reactor-core-3.5.14.jar.sha1 b/server/licenses/reactor-core-3.5.14.jar.sha1 deleted file mode 100644 index 3b58e7a68bade..0000000000000 --- a/server/licenses/reactor-core-3.5.14.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6e0c97c2e78273a00fd4ed38016b19ff3c6de59e \ No newline at end of file diff --git a/server/licenses/reactor-core-3.5.15.jar.sha1 b/server/licenses/reactor-core-3.5.15.jar.sha1 new file mode 100644 index 0000000000000..02df47ed58b9d --- /dev/null +++ b/server/licenses/reactor-core-3.5.15.jar.sha1 @@ -0,0 +1 @@ +4e07a24c671235a2a806e75e9b8ff23d7d1db3d4 \ No newline at end of file diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeIT.java index 09af533292e9a..5090af1706d5a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeIT.java @@ -100,6 +100,24 @@ public void testForceMergeUUIDConsistent() throws IOException { assertThat(primaryForceMergeUUID, is(replicaForceMergeUUID)); } + public void testForceMergeOnlyOnPrimaryShards() throws IOException { + internalCluster().ensureAtLeastNumDataNodes(2); + final String index = "test-index"; + createIndex( + index, + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1).build() + ); + ensureGreen(index); + final ForceMergeResponse forceMergeResponse = client().admin() + .indices() + .prepareForceMerge(index) + .setMaxNumSegments(1) + .setPrimaryOnly(true) + .get(); + assertThat(forceMergeResponse.getFailedShards(), is(0)); + assertThat(forceMergeResponse.getSuccessfulShards(), is(1)); + } + private static String getForceMergeUUID(IndexShard indexShard) throws IOException { try (GatedCloseable wrappedIndexCommit = indexShard.acquireLastIndexCommit(true)) { return wrappedIndexCommit.get().getUserData().get(Engine.FORCE_MERGE_UUID_KEY); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java index 72e680e22ed75..8ce87f37d77cd 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java @@ -298,7 +298,6 @@ public void testGatewayRecovery() throws Exception { logger.info("--> request recoveries"); RecoveryResponse response = client().admin().indices().prepareRecoveries(INDEX_NAME).execute().actionGet(); assertThat(response.shardRecoveryStates().size(), equalTo(SHARD_COUNT)); - assertThat(response.shardRecoveryStates().get(INDEX_NAME).size(), equalTo(1)); List recoveryStates = response.shardRecoveryStates().get(INDEX_NAME); assertThat(recoveryStates.size(), equalTo(1)); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java index d4de69fe5f325..c6ecd7d62c7d1 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java @@ -25,6 +25,7 @@ import org.opensearch.action.admin.cluster.stats.ClusterStatsResponse; import org.opensearch.action.admin.indices.alias.Alias; import org.opensearch.action.admin.indices.flush.FlushRequest; +import org.opensearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.opensearch.action.admin.indices.stats.IndicesStatsRequest; import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; import org.opensearch.action.get.GetResponse; @@ -400,6 +401,14 @@ public void testMultipleShards() throws Exception { } public void testReplicationAfterForceMerge() throws Exception { + performReplicationAfterForceMerge(false, SHARD_COUNT * (1 + REPLICA_COUNT)); + } + + public void testReplicationAfterForceMergeOnPrimaryShardsOnly() throws Exception { + performReplicationAfterForceMerge(true, SHARD_COUNT); + } + + private void performReplicationAfterForceMerge(boolean primaryOnly, int expectedSuccessfulShards) throws Exception { final String nodeA = internalCluster().startDataOnlyNode(); final String nodeB = internalCluster().startDataOnlyNode(); createIndex(INDEX_NAME); @@ -430,8 +439,16 @@ public void testReplicationAfterForceMerge() throws Exception { waitForDocs(expectedHitCount, indexer); waitForSearchableDocs(expectedHitCount, nodeA, nodeB); - // Force a merge here so that the in memory SegmentInfos does not reference old segments on disk. - client().admin().indices().prepareForceMerge(INDEX_NAME).setMaxNumSegments(1).setFlush(false).get(); + // Perform force merge only on the primary shards. + final ForceMergeResponse forceMergeResponse = client().admin() + .indices() + .prepareForceMerge(INDEX_NAME) + .setPrimaryOnly(primaryOnly) + .setMaxNumSegments(1) + .setFlush(false) + .get(); + assertThat(forceMergeResponse.getFailedShards(), is(0)); + assertThat(forceMergeResponse.getSuccessfulShards(), is(expectedSuccessfulShards)); refresh(INDEX_NAME); verifyStoreContent(); } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequest.java index be96b54754fee..521cafbe4d765 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequest.java @@ -70,11 +70,13 @@ public static final class Defaults { public static final int MAX_NUM_SEGMENTS = -1; public static final boolean ONLY_EXPUNGE_DELETES = false; public static final boolean FLUSH = true; + public static final boolean PRIMARY_ONLY = false; } private int maxNumSegments = Defaults.MAX_NUM_SEGMENTS; private boolean onlyExpungeDeletes = Defaults.ONLY_EXPUNGE_DELETES; private boolean flush = Defaults.FLUSH; + private boolean primaryOnly = Defaults.PRIMARY_ONLY; private static final Version FORCE_MERGE_UUID_VERSION = LegacyESVersion.V_7_7_0; @@ -102,6 +104,9 @@ public ForceMergeRequest(StreamInput in) throws IOException { maxNumSegments = in.readInt(); onlyExpungeDeletes = in.readBoolean(); flush = in.readBoolean(); + if (in.getVersion().onOrAfter(Version.V_2_13_0)) { + primaryOnly = in.readBoolean(); + } if (in.getVersion().onOrAfter(FORCE_MERGE_UUID_VERSION)) { forceMergeUUID = in.readOptionalString(); } else { @@ -167,6 +172,21 @@ public ForceMergeRequest flush(boolean flush) { return this; } + /** + * Should force merge only performed on primary shards. Defaults to {@code false}. + */ + public boolean primaryOnly() { + return primaryOnly; + } + + /** + * Should force merge only performed on primary shards. Defaults to {@code false}. + */ + public ForceMergeRequest primaryOnly(boolean primaryOnly) { + this.primaryOnly = primaryOnly; + return this; + } + /** * Should this task store its result after it has finished? */ @@ -189,6 +209,8 @@ public String getDescription() { + onlyExpungeDeletes + "], flush[" + flush + + "], primaryOnly[" + + primaryOnly + "]"; } @@ -198,6 +220,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeInt(maxNumSegments); out.writeBoolean(onlyExpungeDeletes); out.writeBoolean(flush); + if (out.getVersion().onOrAfter(Version.V_2_13_0)) { + out.writeBoolean(primaryOnly); + } if (out.getVersion().onOrAfter(FORCE_MERGE_UUID_VERSION)) { out.writeOptionalString(forceMergeUUID); } @@ -212,6 +237,8 @@ public String toString() { + onlyExpungeDeletes + ", flush=" + flush + + ", primaryOnly=" + + primaryOnly + '}'; } } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequestBuilder.java index d8a618a1828ad..10b9749f16b27 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequestBuilder.java @@ -81,4 +81,12 @@ public ForceMergeRequestBuilder setFlush(boolean flush) { request.flush(flush); return this; } + + /** + * Should force merge only performed on primary shards. Defaults to {@code false}. + */ + public ForceMergeRequestBuilder setPrimaryOnly(boolean primaryOnly) { + request.primaryOnly(primaryOnly); + return this; + } } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/TransportForceMergeAction.java b/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/TransportForceMergeAction.java index fb8eb86c12269..b71c75462900a 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/TransportForceMergeAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/TransportForceMergeAction.java @@ -115,11 +115,16 @@ protected EmptyResult shardOperation(ForceMergeRequest request, ShardRouting sha } /** - * The refresh request works against *all* shards. + * The force merge request works against *all* shards by default, but it can work against all primary shards only + * by setting primary_only to true. */ @Override protected ShardsIterator shards(ClusterState clusterState, ForceMergeRequest request, String[] concreteIndices) { - return clusterState.routingTable().allShards(concreteIndices); + if (request.primaryOnly()) { + return clusterState.routingTable().allShardsSatisfyingPredicate(concreteIndices, ShardRouting::primary); + } else { + return clusterState.routingTable().allShards(concreteIndices); + } } @Override diff --git a/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java index 3b99ea9cc1cd2..0520a4a7aecec 100644 --- a/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java @@ -58,6 +58,10 @@ import org.opensearch.search.internal.SearchContext; import org.opensearch.search.internal.ShardSearchRequest; import org.opensearch.search.pipeline.PipelinedRequest; +import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.SpanCreationContext; +import org.opensearch.telemetry.tracing.SpanScope; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.transport.Transport; import java.util.ArrayDeque; @@ -116,6 +120,7 @@ abstract class AbstractSearchAsyncAction exten private final Map pendingExecutionsPerNode = new ConcurrentHashMap<>(); private final boolean throttleConcurrentRequests; private final SearchRequestContext searchRequestContext; + private final Tracer tracer; private SearchPhase currentPhase; private boolean currentPhaseHasLifecycle; @@ -140,7 +145,8 @@ abstract class AbstractSearchAsyncAction exten SearchPhaseResults resultConsumer, int maxConcurrentRequestsPerNode, SearchResponse.Clusters clusters, - SearchRequestContext searchRequestContext + SearchRequestContext searchRequestContext, + Tracer tracer ) { super(name); final List toSkipIterators = new ArrayList<>(); @@ -177,6 +183,7 @@ abstract class AbstractSearchAsyncAction exten this.results = resultConsumer; this.clusters = clusters; this.searchRequestContext = searchRequestContext; + this.tracer = tracer; } @Override @@ -221,6 +228,7 @@ public final void start() { null ) ); + onRequestEnd(searchRequestContext); return; } executePhase(this); @@ -460,7 +468,8 @@ private void onRequestEnd(SearchRequestContext searchRequestContext) { } private void executePhase(SearchPhase phase) { - try { + Span phaseSpan = tracer.startSpan(SpanCreationContext.server().name("[phase/" + phase.getName() + "]")); + try (final SpanScope scope = tracer.withSpanInScope(phaseSpan)) { onPhaseStart(phase); phase.recordAndRun(); } catch (Exception e) { @@ -468,7 +477,15 @@ private void executePhase(SearchPhase phase) { logger.debug(new ParameterizedMessage("Failed to execute [{}] while moving to [{}] phase", request, phase.getName()), e); } + if (currentPhaseHasLifecycle == false) { + phaseSpan.setError(e); + } + onPhaseFailure(phase, "", e); + } finally { + if (currentPhaseHasLifecycle == false) { + phaseSpan.endSpan(); + } } } @@ -733,7 +750,7 @@ public void sendSearchResponse(InternalSearchResponse internalSearchResponse, At @Override public final void onPhaseFailure(SearchPhase phase, String msg, Throwable cause) { if (currentPhaseHasLifecycle) { - this.searchRequestContext.getSearchRequestOperationsListener().onPhaseFailure(this); + this.searchRequestContext.getSearchRequestOperationsListener().onPhaseFailure(this, cause); } raisePhaseFailure(new SearchPhaseExecutionException(phase.getName(), msg, cause, buildShardFailures())); } diff --git a/server/src/main/java/org/opensearch/action/search/CanMatchPreFilterSearchPhase.java b/server/src/main/java/org/opensearch/action/search/CanMatchPreFilterSearchPhase.java index c693eea4a2c33..952d83b9e4539 100644 --- a/server/src/main/java/org/opensearch/action/search/CanMatchPreFilterSearchPhase.java +++ b/server/src/main/java/org/opensearch/action/search/CanMatchPreFilterSearchPhase.java @@ -44,6 +44,7 @@ import org.opensearch.search.sort.FieldSortBuilder; import org.opensearch.search.sort.MinAndMax; import org.opensearch.search.sort.SortOrder; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.transport.Transport; import java.util.Comparator; @@ -91,7 +92,8 @@ final class CanMatchPreFilterSearchPhase extends AbstractSearchAsyncAction, SearchPhase> phaseFactory, SearchResponse.Clusters clusters, - SearchRequestContext searchRequestContext + SearchRequestContext searchRequestContext, + Tracer tracer ) { // We set max concurrent shard requests to the number of shards so no throttling happens for can_match requests super( @@ -112,7 +114,8 @@ final class CanMatchPreFilterSearchPhase extends AbstractSearchAsyncAction(shardsIts.size()), request.getMaxConcurrentShardRequests(), clusters, - searchRequestContext + searchRequestContext, + tracer ); this.queryPhaseResultConsumer = queryPhaseResultConsumer; this.searchPhaseController = searchPhaseController; diff --git a/server/src/main/java/org/opensearch/action/search/SearchQueryThenFetchAsyncAction.java b/server/src/main/java/org/opensearch/action/search/SearchQueryThenFetchAsyncAction.java index c26bd5eef8c15..c8ab5fdaf61a1 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchQueryThenFetchAsyncAction.java +++ b/server/src/main/java/org/opensearch/action/search/SearchQueryThenFetchAsyncAction.java @@ -43,6 +43,7 @@ import org.opensearch.search.internal.SearchContext; import org.opensearch.search.internal.ShardSearchRequest; import org.opensearch.search.query.QuerySearchResult; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.transport.Transport; import java.util.Map; @@ -82,7 +83,8 @@ class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction new ParameterizedMessage("onPhaseFailure listener [{}] failed", listener), e); } diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestSlowLog.java b/server/src/main/java/org/opensearch/action/search/SearchRequestSlowLog.java index 74e04d976cb1c..a9a07c6aca7f4 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequestSlowLog.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequestSlowLog.java @@ -140,7 +140,7 @@ protected void onPhaseStart(SearchPhaseContext context) {} protected void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) {} @Override - protected void onPhaseFailure(SearchPhaseContext context) {} + protected void onPhaseFailure(SearchPhaseContext context, Throwable cause) {} @Override protected void onRequestStart(SearchRequestContext searchRequestContext) {} diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java b/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java index ac32b08afb7f6..97ef94055faf7 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java @@ -71,7 +71,7 @@ protected void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searc } @Override - protected void onPhaseFailure(SearchPhaseContext context) { + protected void onPhaseFailure(SearchPhaseContext context, Throwable cause) { phaseStatsMap.get(context.getCurrentPhase().getSearchPhaseName()).current.dec(); } diff --git a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java index 3d1a25a8aa01f..65cfd35489033 100644 --- a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java @@ -88,6 +88,12 @@ import org.opensearch.tasks.CancellableTask; import org.opensearch.tasks.Task; import org.opensearch.telemetry.metrics.MetricsRegistry; +import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.SpanBuilder; +import org.opensearch.telemetry.tracing.SpanScope; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.telemetry.tracing.listener.TraceableActionListener; +import org.opensearch.telemetry.tracing.listener.TraceableSearchRequestOperationsListener; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.RemoteClusterAware; import org.opensearch.transport.RemoteClusterService; @@ -173,6 +179,7 @@ public class TransportSearchAction extends HandledTransportAction) SearchRequest::new); this.client = client; @@ -215,6 +223,7 @@ public TransportSearchAction( this.searchRequestOperationsCompositeListenerFactory = searchRequestOperationsCompositeListenerFactory; clusterService.getClusterSettings() .addSettingsUpdateConsumer(SEARCH_QUERY_METRICS_ENABLED_SETTING, this::setSearchQueryMetricsEnabled); + this.tracer = tracer; } private void setSearchQueryMetricsEnabled(boolean searchQueryMetricsEnabled) { @@ -384,7 +393,8 @@ public AbstractSearchAsyncAction asyncSearchAction( new ArraySearchPhaseResults<>(shardsIts.size()), searchRequest.getMaxConcurrentShardRequests(), clusters, - searchRequestContext + searchRequestContext, + tracer ) { @Override protected void executePhaseOnShard( @@ -431,49 +441,58 @@ private void executeRequest( if (originalSearchRequest.isPhaseTook() == null) { originalSearchRequest.setPhaseTook(clusterService.getClusterSettings().get(SEARCH_PHASE_TOOK_ENABLED)); } - SearchRequestOperationsListener.CompositeListener requestOperationsListeners = searchRequestOperationsCompositeListenerFactory - .buildCompositeListener(originalSearchRequest, logger); - SearchRequestContext searchRequestContext = new SearchRequestContext(requestOperationsListeners, originalSearchRequest); - searchRequestContext.getSearchRequestOperationsListener().onRequestStart(searchRequestContext); - - PipelinedRequest searchRequest; - ActionListener listener; - try { - searchRequest = searchPipelineService.resolvePipeline(originalSearchRequest); - listener = searchRequest.transformResponseListener(originalListener); - } catch (Exception e) { - originalListener.onFailure(e); - return; - } - ActionListener requestTransformListener = ActionListener.wrap(sr -> { - if (searchQueryMetricsEnabled) { - try { - searchQueryCategorizer.categorize(sr.source()); - } catch (Exception e) { - logger.error("Error while trying to categorize the query.", e); - } + final Span requestSpan = tracer.startSpan(SpanBuilder.from(task, actionName)); + try (final SpanScope spanScope = tracer.withSpanInScope(requestSpan)) { + SearchRequestOperationsListener.CompositeListener requestOperationsListeners; + final ActionListener updatedListener = TraceableActionListener.create(originalListener, requestSpan, tracer); + requestOperationsListeners = searchRequestOperationsCompositeListenerFactory.buildCompositeListener( + originalSearchRequest, + logger, + TraceableSearchRequestOperationsListener.create(tracer, requestSpan) + ); + SearchRequestContext searchRequestContext = new SearchRequestContext(requestOperationsListeners, originalSearchRequest); + searchRequestContext.getSearchRequestOperationsListener().onRequestStart(searchRequestContext); + + PipelinedRequest searchRequest; + ActionListener listener; + try { + searchRequest = searchPipelineService.resolvePipeline(originalSearchRequest); + listener = searchRequest.transformResponseListener(updatedListener); + } catch (Exception e) { + updatedListener.onFailure(e); + return; } - ActionListener rewriteListener = buildRewriteListener( - sr, - task, - timeProvider, - searchAsyncActionProvider, - listener, - searchRequestContext - ); - if (sr.source() == null) { - rewriteListener.onResponse(sr.source()); - } else { - Rewriteable.rewriteAndFetch( - sr.source(), - searchService.getRewriteContext(timeProvider::getAbsoluteStartMillis), - rewriteListener + ActionListener requestTransformListener = ActionListener.wrap(sr -> { + if (searchQueryMetricsEnabled) { + try { + searchQueryCategorizer.categorize(sr.source()); + } catch (Exception e) { + logger.error("Error while trying to categorize the query.", e); + } + } + + ActionListener rewriteListener = buildRewriteListener( + sr, + task, + timeProvider, + searchAsyncActionProvider, + listener, + searchRequestContext ); - } - }, listener::onFailure); - searchRequest.transformRequest(requestTransformListener); + if (sr.source() == null) { + rewriteListener.onResponse(sr.source()); + } else { + Rewriteable.rewriteAndFetch( + sr.source(), + searchService.getRewriteContext(timeProvider::getAbsoluteStartMillis), + rewriteListener + ); + } + }, listener::onFailure); + searchRequest.transformRequest(requestTransformListener); + } } private ActionListener buildRewriteListener( @@ -1240,7 +1259,8 @@ private AbstractSearchAsyncAction searchAsyncAction ) ), clusters, - searchRequestContext + searchRequestContext, + tracer ); } else { final QueryPhaseResultConsumer queryResultConsumer = searchPhaseController.newSearchPhaseResults( @@ -1271,7 +1291,8 @@ private AbstractSearchAsyncAction searchAsyncAction clusterState, task, clusters, - searchRequestContext + searchRequestContext, + tracer ); break; case QUERY_THEN_FETCH: @@ -1292,7 +1313,8 @@ private AbstractSearchAsyncAction searchAsyncAction clusterState, task, clusters, - searchRequestContext + searchRequestContext, + tracer ); break; default: diff --git a/server/src/main/java/org/opensearch/cluster/routing/RoutingTable.java b/server/src/main/java/org/opensearch/cluster/routing/RoutingTable.java index 7f2382f8b4910..e4095a84be081 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/RoutingTable.java +++ b/server/src/main/java/org/opensearch/cluster/routing/RoutingTable.java @@ -307,6 +307,16 @@ public ShardsIterator allShardsSatisfyingPredicate(Predicate predi return allShardsSatisfyingPredicate(indices, predicate, false); } + /** + * All the shards for the provided indices on the node which match the predicate + * @param indices indices to return all the shards. + * @param predicate condition to match + * @return iterator over shards matching the predicate for the specific indices + */ + public ShardsIterator allShardsSatisfyingPredicate(String[] indices, Predicate predicate) { + return allShardsSatisfyingPredicate(indices, predicate, false); + } + private ShardsIterator allShardsSatisfyingPredicate( String[] indices, Predicate predicate, diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestForceMergeAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestForceMergeAction.java index 06f1d5f46f90b..f3e66bd20cd86 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestForceMergeAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestForceMergeAction.java @@ -76,6 +76,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC mergeRequest.maxNumSegments(request.paramAsInt("max_num_segments", mergeRequest.maxNumSegments())); mergeRequest.onlyExpungeDeletes(request.paramAsBoolean("only_expunge_deletes", mergeRequest.onlyExpungeDeletes())); mergeRequest.flush(request.paramAsBoolean("flush", mergeRequest.flush())); + mergeRequest.primaryOnly(request.paramAsBoolean("primary_only", mergeRequest.primaryOnly())); if (mergeRequest.onlyExpungeDeletes() && mergeRequest.maxNumSegments() != ForceMergeRequest.Defaults.MAX_NUM_SEGMENTS) { deprecationLogger.deprecate( "force_merge_expunge_deletes_and_max_num_segments_deprecation", diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java index 5ed899408ab40..69fda2f3f6133 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java @@ -35,8 +35,13 @@ import org.apache.lucene.index.DocValues; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.index.SortedDocValues; import org.apache.lucene.index.SortedSetDocValues; +import org.apache.lucene.index.Terms; +import org.apache.lucene.index.TermsEnum; +import org.apache.lucene.search.CollectionTerminatedException; +import org.apache.lucene.search.Weight; import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.PriorityQueue; @@ -46,6 +51,7 @@ import org.opensearch.common.util.LongHash; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.mapper.DocCountFieldMapper; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.AggregationExecutionException; import org.opensearch.search.aggregations.Aggregator; @@ -73,6 +79,7 @@ import static org.opensearch.search.aggregations.InternalOrder.isKeyOrder; import static org.apache.lucene.index.SortedSetDocValues.NO_MORE_ORDS; +import static org.apache.lucene.search.DocIdSetIterator.NO_MORE_DOCS; /** * An aggregator of string values that relies on global ordinals in order to build buckets. @@ -85,6 +92,8 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr private final LongPredicate acceptedGlobalOrdinals; private final long valueCount; + private final String fieldName; + private Weight weight; private final GlobalOrdLookupFunction lookupGlobalOrd; protected final CollectionStrategy collectionStrategy; protected int segmentsWithSingleValuedOrds = 0; @@ -136,16 +145,105 @@ public GlobalOrdinalsStringTermsAggregator( return new DenseGlobalOrds(); }); } + this.fieldName = (valuesSource instanceof ValuesSource.Bytes.WithOrdinals.FieldData) + ? ((ValuesSource.Bytes.WithOrdinals.FieldData) valuesSource).getIndexFieldName() + : null; } String descriptCollectionStrategy() { return collectionStrategy.describe(); } + public void setWeight(Weight weight) { + this.weight = weight; + } + + /** + Read doc frequencies directly from indexed terms in the segment to skip iterating through individual documents + @param ctx The LeafReaderContext to collect terms from + @param globalOrds The SortedSetDocValues for the field's ordinals + @param ordCountConsumer A consumer to accept collected term frequencies + @return A LeafBucketCollector implementation with collection termination, since collection is complete + @throws IOException If an I/O error occurs during reading + */ + LeafBucketCollector termDocFreqCollector( + LeafReaderContext ctx, + SortedSetDocValues globalOrds, + BiConsumer ordCountConsumer + ) throws IOException { + if (weight == null) { + // Weight not assigned - cannot use this optimization + return null; + } else { + if (weight.count(ctx) == 0) { + // No documents matches top level query on this segment, we can skip the segment entirely + return LeafBucketCollector.NO_OP_COLLECTOR; + } else if (weight.count(ctx) != ctx.reader().maxDoc()) { + // weight.count(ctx) == ctx.reader().maxDoc() implies there are no deleted documents and + // top-level query matches all docs in the segment + return null; + } + } + + Terms segmentTerms = ctx.reader().terms(this.fieldName); + if (segmentTerms == null) { + // Field is not indexed. + return null; + } + + NumericDocValues docCountValues = DocValues.getNumeric(ctx.reader(), DocCountFieldMapper.NAME); + if (docCountValues.nextDoc() != NO_MORE_DOCS) { + // This segment has at least one document with the _doc_count field. + return null; + } + + TermsEnum indexTermsEnum = segmentTerms.iterator(); + BytesRef indexTerm = indexTermsEnum.next(); + TermsEnum globalOrdinalTermsEnum = globalOrds.termsEnum(); + BytesRef ordinalTerm = globalOrdinalTermsEnum.next(); + + // Iterate over the terms in the segment, look for matches in the global ordinal terms, + // and increment bucket count when segment terms match global ordinal terms. + while (indexTerm != null && ordinalTerm != null) { + int compare = indexTerm.compareTo(ordinalTerm); + if (compare == 0) { + if (acceptedGlobalOrdinals.test(globalOrdinalTermsEnum.ord())) { + ordCountConsumer.accept(globalOrdinalTermsEnum.ord(), indexTermsEnum.docFreq()); + } + indexTerm = indexTermsEnum.next(); + ordinalTerm = globalOrdinalTermsEnum.next(); + } else if (compare < 0) { + indexTerm = indexTermsEnum.next(); + } else { + ordinalTerm = globalOrdinalTermsEnum.next(); + } + } + return new LeafBucketCollector() { + @Override + public void collect(int doc, long owningBucketOrd) throws IOException { + throw new CollectionTerminatedException(); + } + }; + } + @Override public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucketCollector sub) throws IOException { SortedSetDocValues globalOrds = valuesSource.globalOrdinalsValues(ctx); collectionStrategy.globalOrdsReady(globalOrds); + + if (collectionStrategy instanceof DenseGlobalOrds + && this.resultStrategy instanceof StandardTermsResults + && sub == LeafBucketCollector.NO_OP_COLLECTOR) { + LeafBucketCollector termDocFreqCollector = termDocFreqCollector( + ctx, + globalOrds, + (ord, docCount) -> incrementBucketDocCount(collectionStrategy.globalOrdToBucketOrd(0, ord), docCount) + ); + if (termDocFreqCollector != null) { + return termDocFreqCollector; + } + } + SortedDocValues singleValues = DocValues.unwrapSingleton(globalOrds); if (singleValues != null) { segmentsWithSingleValuedOrds++; @@ -343,9 +441,20 @@ public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucketCol final SortedSetDocValues segmentOrds = valuesSource.ordinalsValues(ctx); segmentDocCounts = context.bigArrays().grow(segmentDocCounts, 1 + segmentOrds.getValueCount()); assert sub == LeafBucketCollector.NO_OP_COLLECTOR; - final SortedDocValues singleValues = DocValues.unwrapSingleton(segmentOrds); mapping = valuesSource.globalOrdinalsMapping(ctx); - // Dense mode doesn't support include/exclude so we don't have to check it here. + + if (this.resultStrategy instanceof StandardTermsResults) { + LeafBucketCollector termDocFreqCollector = this.termDocFreqCollector( + ctx, + segmentOrds, + (ord, docCount) -> incrementBucketDocCount(mapping.applyAsLong(ord), docCount) + ); + if (termDocFreqCollector != null) { + return termDocFreqCollector; + } + } + + final SortedDocValues singleValues = DocValues.unwrapSingleton(segmentOrds); if (singleValues != null) { segmentsWithSingleValuedOrds++; return resultStrategy.wrapCollector(new LeafBucketCollectorBase(sub, segmentOrds) { diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSource.java b/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSource.java index 3ce1f0447dfcc..1f4dd429e094e 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSource.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSource.java @@ -244,6 +244,10 @@ public FieldData(IndexOrdinalsFieldData indexFieldData) { this.indexFieldData = indexFieldData; } + public String getIndexFieldName() { + return this.indexFieldData.getFieldName(); + } + @Override public SortedBinaryDocValues bytesValues(LeafReaderContext context) { final LeafOrdinalsFieldData atomicFieldData = indexFieldData.load(context); diff --git a/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java b/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java index 403b0b545c113..ec3ed2332d0b8 100644 --- a/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java +++ b/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java @@ -387,6 +387,11 @@ public BulkScorer bulkScorer(LeafReaderContext context) throws IOException { return null; } } + + @Override + public int count(LeafReaderContext context) throws IOException { + return weight.count(context); + } }; } else { return weight; diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/AttributeNames.java b/server/src/main/java/org/opensearch/telemetry/tracing/AttributeNames.java index 6a97914b04ebc..212ef3c713d8e 100644 --- a/server/src/main/java/org/opensearch/telemetry/tracing/AttributeNames.java +++ b/server/src/main/java/org/opensearch/telemetry/tracing/AttributeNames.java @@ -75,6 +75,16 @@ private AttributeNames() { */ public static final String TRANSPORT_ACTION = "action"; + /** + * Task id + */ + public static final String TASK_ID = "task_id"; + + /** + * Parent task id + */ + public static final String PARENT_TASK_ID = "parent_task_id"; + /** * Index Name */ @@ -99,4 +109,9 @@ private AttributeNames() { * Refresh Policy */ public static final String REFRESH_POLICY = "refresh_policy"; + + /** + * Search Response Total Hits + */ + public static final String TOTAL_HITS = "total_hits"; } diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/SpanBuilder.java b/server/src/main/java/org/opensearch/telemetry/tracing/SpanBuilder.java index 70658c5d71bf3..42e64109b72fd 100644 --- a/server/src/main/java/org/opensearch/telemetry/tracing/SpanBuilder.java +++ b/server/src/main/java/org/opensearch/telemetry/tracing/SpanBuilder.java @@ -15,6 +15,7 @@ import org.opensearch.core.common.Strings; import org.opensearch.http.HttpRequest; import org.opensearch.rest.RestRequest; +import org.opensearch.tasks.Task; import org.opensearch.telemetry.tracing.attributes.Attributes; import org.opensearch.transport.TcpChannel; import org.opensearch.transport.Transport; @@ -196,4 +197,43 @@ private static Attributes buildSpanAttributes(String nodeId, ReplicatedWriteRequ return attributes; } + /** + * Creates {@link SpanCreationContext} with parent set to specified SpanContext. + * @param spanName name of span. + * @param parentSpan target parent span. + * @return context + */ + public static SpanCreationContext from(String spanName, SpanContext parentSpan) { + return SpanCreationContext.server().name(spanName).parent(parentSpan); + } + + /** + * Creates {@link SpanCreationContext} with parent set to specified SpanContext. + * @param task search task. + * @param actionName action. + * @return context + */ + public static SpanCreationContext from(Task task, String actionName) { + return SpanCreationContext.server().name(createSpanName(task, actionName)).attributes(buildSpanAttributes(task, actionName)); + } + + private static Attributes buildSpanAttributes(Task task, String actionName) { + Attributes attributes = Attributes.create().addAttribute(AttributeNames.TRANSPORT_ACTION, actionName); + if (task != null) { + attributes.addAttribute(AttributeNames.TASK_ID, task.getId()); + if (task.getParentTaskId() != null && task.getParentTaskId().isSet()) { + attributes.addAttribute(AttributeNames.PARENT_TASK_ID, task.getParentTaskId().getId()); + } + } + return attributes; + + } + + private static String createSpanName(Task task, String actionName) { + if (task != null) { + return task.getType() + SEPARATOR + task.getAction(); + } else { + return actionName; + } + } } diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/listener/TraceableSearchRequestOperationsListener.java b/server/src/main/java/org/opensearch/telemetry/tracing/listener/TraceableSearchRequestOperationsListener.java new file mode 100644 index 0000000000000..71fb59194c447 --- /dev/null +++ b/server/src/main/java/org/opensearch/telemetry/tracing/listener/TraceableSearchRequestOperationsListener.java @@ -0,0 +1,75 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing.listener; + +import org.opensearch.action.search.SearchPhaseContext; +import org.opensearch.action.search.SearchRequestContext; +import org.opensearch.action.search.SearchRequestOperationsListener; +import org.opensearch.telemetry.tracing.AttributeNames; +import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.SpanContext; +import org.opensearch.telemetry.tracing.Tracer; + +/** + * SearchRequestOperationsListener subscriber for search request tracing + * + * @opensearch.internal + */ +public final class TraceableSearchRequestOperationsListener extends SearchRequestOperationsListener { + private final Tracer tracer; + private final Span requestSpan; + private SpanContext phaseSpanContext; + + public TraceableSearchRequestOperationsListener(final Tracer tracer, final Span requestSpan) { + this.tracer = tracer; + this.requestSpan = requestSpan; + this.phaseSpanContext = null; + } + + public static SearchRequestOperationsListener create(final Tracer tracer, final Span requestSpan) { + if (tracer.isRecording()) { + return new TraceableSearchRequestOperationsListener(tracer, requestSpan); + } else { + return SearchRequestOperationsListener.NOOP; + } + } + + @Override + protected void onPhaseStart(SearchPhaseContext context) { + assert phaseSpanContext == null : "There should be only one search phase active at a time"; + phaseSpanContext = tracer.getCurrentSpan(); + } + + @Override + protected void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { + assert phaseSpanContext != null : "There should be a search phase active at that time"; + phaseSpanContext.endSpan(); + phaseSpanContext = null; + } + + @Override + protected void onPhaseFailure(SearchPhaseContext context, Throwable cause) { + assert phaseSpanContext != null : "There should be a search phase active at that time"; + phaseSpanContext.setError((Exception) cause); + phaseSpanContext.endSpan(); + phaseSpanContext = null; + } + + @Override + public void onRequestStart(SearchRequestContext searchRequestContext) {} + + @Override + public void onRequestEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { + // add response-related attributes on request end + requestSpan.addAttribute( + AttributeNames.TOTAL_HITS, + searchRequestContext.totalHits() == null ? 0 : searchRequestContext.totalHits().value + ); + } +} diff --git a/server/src/test/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequestTests.java b/server/src/test/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequestTests.java index 87ba6110447c1..430d600354e3b 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequestTests.java @@ -31,21 +31,136 @@ package org.opensearch.action.admin.indices.forcemerge; +import org.opensearch.Version; +import org.opensearch.action.support.IndicesOptions; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.tasks.TaskId; import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.VersionUtils; public class ForceMergeRequestTests extends OpenSearchTestCase { public void testDescription() { ForceMergeRequest request = new ForceMergeRequest(); - assertEquals("Force-merge indices [], maxSegments[-1], onlyExpungeDeletes[false], flush[true]", request.getDescription()); + assertEquals( + "Force-merge indices [], maxSegments[-1], onlyExpungeDeletes[false], flush[true], primaryOnly[false]", + request.getDescription() + ); request = new ForceMergeRequest("shop", "blog"); - assertEquals("Force-merge indices [shop, blog], maxSegments[-1], onlyExpungeDeletes[false], flush[true]", request.getDescription()); + assertEquals( + "Force-merge indices [shop, blog], maxSegments[-1], onlyExpungeDeletes[false], flush[true], primaryOnly[false]", + request.getDescription() + ); request = new ForceMergeRequest(); request.maxNumSegments(12); request.onlyExpungeDeletes(true); request.flush(false); - assertEquals("Force-merge indices [], maxSegments[12], onlyExpungeDeletes[true], flush[false]", request.getDescription()); + request.primaryOnly(true); + assertEquals( + "Force-merge indices [], maxSegments[12], onlyExpungeDeletes[true], flush[false], primaryOnly[true]", + request.getDescription() + ); + } + + public void testToString() { + ForceMergeRequest request = new ForceMergeRequest(); + assertEquals("ForceMergeRequest{maxNumSegments=-1, onlyExpungeDeletes=false, flush=true, primaryOnly=false}", request.toString()); + + request = new ForceMergeRequest(); + request.maxNumSegments(12); + request.onlyExpungeDeletes(true); + request.flush(false); + request.primaryOnly(true); + assertEquals("ForceMergeRequest{maxNumSegments=12, onlyExpungeDeletes=true, flush=false, primaryOnly=true}", request.toString()); + } + + public void testSerialization() throws Exception { + final ForceMergeRequest request = randomRequest(); + try (BytesStreamOutput out = new BytesStreamOutput()) { + request.writeTo(out); + + final ForceMergeRequest deserializedRequest; + try (StreamInput in = out.bytes().streamInput()) { + deserializedRequest = new ForceMergeRequest(in); + } + assertEquals(request.maxNumSegments(), deserializedRequest.maxNumSegments()); + assertEquals(request.onlyExpungeDeletes(), deserializedRequest.onlyExpungeDeletes()); + assertEquals(request.flush(), deserializedRequest.flush()); + assertEquals(request.primaryOnly(), deserializedRequest.primaryOnly()); + assertEquals(request.forceMergeUUID(), deserializedRequest.forceMergeUUID()); + } + } + + public void testBwcSerialization() throws Exception { + { + final ForceMergeRequest sample = randomRequest(); + final Version compatibleVersion = VersionUtils.randomCompatibleVersion(random(), Version.CURRENT); + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.setVersion(compatibleVersion); + sample.writeTo(out); + + try (StreamInput in = out.bytes().streamInput()) { + in.setVersion(Version.CURRENT); + TaskId.readFromStream(in); + in.readStringArray(); + IndicesOptions.readIndicesOptions(in); + int maxNumSegments = in.readInt(); + boolean onlyExpungeDeletes = in.readBoolean(); + boolean flush = in.readBoolean(); + String forceMergeUUID = in.readOptionalString(); + assertEquals(sample.maxNumSegments(), maxNumSegments); + assertEquals(sample.onlyExpungeDeletes(), onlyExpungeDeletes); + assertEquals(sample.flush(), flush); + assertEquals(sample.forceMergeUUID(), forceMergeUUID); + } + + } + } + + { + final ForceMergeRequest sample = randomRequest(); + final Version compatibleVersion = VersionUtils.randomCompatibleVersion(random(), Version.CURRENT); + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.setVersion(Version.CURRENT); + sample.getParentTask().writeTo(out); + out.writeStringArray(sample.indices()); + sample.indicesOptions().writeIndicesOptions(out); + out.writeInt(sample.maxNumSegments()); + out.writeBoolean(sample.onlyExpungeDeletes()); + out.writeBoolean(sample.flush()); + if (compatibleVersion.onOrAfter(Version.V_2_13_0)) { + out.writeBoolean(sample.primaryOnly()); + } + out.writeOptionalString(sample.forceMergeUUID()); + + final ForceMergeRequest deserializedRequest; + try (StreamInput in = out.bytes().streamInput()) { + in.setVersion(compatibleVersion); + deserializedRequest = new ForceMergeRequest(in); + } + + assertEquals(sample.maxNumSegments(), deserializedRequest.maxNumSegments()); + assertEquals(sample.onlyExpungeDeletes(), deserializedRequest.onlyExpungeDeletes()); + assertEquals(sample.flush(), deserializedRequest.flush()); + if (compatibleVersion.onOrAfter(Version.V_2_13_0)) { + assertEquals(sample.primaryOnly(), deserializedRequest.primaryOnly()); + } + assertEquals(sample.forceMergeUUID(), deserializedRequest.forceMergeUUID()); + } + } + } + + private ForceMergeRequest randomRequest() { + ForceMergeRequest request = new ForceMergeRequest(); + if (randomBoolean()) { + request.maxNumSegments(randomIntBetween(1, 10)); + } + request.onlyExpungeDeletes(true); + request.flush(randomBoolean()); + request.primaryOnly(randomBoolean()); + return request; } } diff --git a/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java b/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java index 3af7af114e96d..420289d3ff2e5 100644 --- a/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java @@ -58,6 +58,7 @@ import org.opensearch.search.internal.ShardSearchContextId; import org.opensearch.search.internal.ShardSearchRequest; import org.opensearch.search.query.QuerySearchResult; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.InternalAggregationTestCase; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; @@ -85,19 +86,16 @@ import java.util.function.BiFunction; import java.util.stream.IntStream; -import static org.hamcrest.CoreMatchers.is; -import static org.hamcrest.CoreMatchers.nullValue; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.instanceOf; -import static org.mockito.Mockito.mock; public class AbstractSearchAsyncActionTests extends OpenSearchTestCase { private final List> resolvedNodes = new ArrayList<>(); private final Set releasedContexts = new CopyOnWriteArraySet<>(); private ExecutorService executor; - private SearchRequestOperationsListener assertingListener; + private SearchRequestOperationsListenerAssertingListener assertingListener; ThreadPool threadPool; @Before @@ -106,27 +104,7 @@ public void setUp() throws Exception { super.setUp(); executor = Executors.newFixedThreadPool(1); threadPool = new TestThreadPool(getClass().getName()); - assertingListener = new SearchRequestOperationsListener() { - private volatile SearchPhase phase; - - @Override - protected void onPhaseStart(SearchPhaseContext context) { - assertThat(phase, is(nullValue())); - phase = context.getCurrentPhase(); - } - - @Override - protected void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { - assertThat(phase, is(context.getCurrentPhase())); - phase = null; - } - - @Override - protected void onPhaseFailure(SearchPhaseContext context) { - assertThat(phase, is(context.getCurrentPhase())); - phase = null; - } - }; + assertingListener = new SearchRequestOperationsListenerAssertingListener(); } @After @@ -136,6 +114,7 @@ public void tearDown() throws Exception { executor.shutdown(); assertTrue(executor.awaitTermination(1, TimeUnit.SECONDS)); ThreadPool.terminate(threadPool, 5, TimeUnit.SECONDS); + assertingListener.assertFinished(); } private AbstractSearchAsyncAction createAction( @@ -207,7 +186,8 @@ private AbstractSearchAsyncAction createAction( new SearchRequestContext( new SearchRequestOperationsListener.CompositeListener(List.of(assertingListener), LogManager.getLogger()), request - ) + ), + NoopTracer.INSTANCE ) { @Override protected SearchPhase getNextPhase(final SearchPhaseResults results, SearchPhaseContext context) { @@ -371,7 +351,7 @@ public void testOnPhaseFailureAndVerifyListeners() { ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); SearchRequestStats testListener = new SearchRequestStats(clusterSettings); - final List requestOperationListeners = List.of(testListener); + final List requestOperationListeners = List.of(testListener, assertingListener); SearchQueryThenFetchAsyncAction action = createSearchQueryThenFetchAsyncAction(requestOperationListeners); action.start(); assertEquals(1, testListener.getPhaseCurrent(action.getSearchPhaseName())); @@ -403,6 +383,7 @@ public void run() { SearchShardIterator searchShardIterator = new SearchShardIterator(null, shardId, Collections.emptyList(), OriginalIndices.NONE); searchShardIterator.resetAndSkip(); action.skipShard(searchShardIterator); + action.start(); action.executeNextPhase(action, fetchPhase); assertEquals(1, testListener.getPhaseCurrent(fetchPhase.getSearchPhaseName())); action.onPhaseFailure(new SearchPhase("test") { @@ -645,7 +626,7 @@ public void testExecutePhaseOnShardFailureAndThrowException() throws Interrupted public void testOnPhaseListenersWithQueryAndThenFetchType() throws InterruptedException { ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); SearchRequestStats testListener = new SearchRequestStats(clusterSettings); - final List requestOperationListeners = new ArrayList<>(List.of(testListener)); + final List requestOperationListeners = new ArrayList<>(List.of(testListener, assertingListener)); long delay = (randomIntBetween(1, 5)); delay = delay * 10; @@ -685,8 +666,8 @@ public void testOnPhaseListenersWithQueryAndThenFetchType() throws InterruptedEx assertEquals(1, testListener.getPhaseCurrent(expandPhase.getSearchPhaseName())); action.executeNextPhase(expandPhase, fetchPhase); + action.onPhaseDone(); /* finish phase since we don't have reponse being sent */ - action.sendSearchResponse(mock(InternalSearchResponse.class), mock(String.valueOf(QuerySearchResult.class))); assertThat(testListener.getPhaseMetric(expandPhase.getSearchPhaseName()), greaterThanOrEqualTo(delay)); assertEquals(1, testListener.getPhaseTotal(expandPhase.getSearchPhaseName())); assertEquals(0, testListener.getPhaseCurrent(expandPhase.getSearchPhaseName())); @@ -695,7 +676,7 @@ public void testOnPhaseListenersWithQueryAndThenFetchType() throws InterruptedEx public void testOnPhaseListenersWithDfsType() throws InterruptedException { ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); SearchRequestStats testListener = new SearchRequestStats(clusterSettings); - final List requestOperationListeners = new ArrayList<>(List.of(testListener)); + final List requestOperationListeners = new ArrayList<>(List.of(testListener, assertingListener)); SearchDfsQueryThenFetchAsyncAction searchDfsQueryThenFetchAsyncAction = createSearchDfsQueryThenFetchAsyncAction( requestOperationListeners @@ -712,6 +693,11 @@ public void testOnPhaseListenersWithDfsType() throws InterruptedException { searchDfsQueryThenFetchAsyncAction.skipShard(searchShardIterator); searchDfsQueryThenFetchAsyncAction.executeNextPhase(searchDfsQueryThenFetchAsyncAction, fetchPhase); + searchDfsQueryThenFetchAsyncAction.onPhaseFailure( + fetchPhase, + "Something went wrong", + null + ); /* finalizing the fetch phase since we do adhoc phase lifecycle calls */ assertThat(testListener.getPhaseMetric(searchDfsQueryThenFetchAsyncAction.getSearchPhaseName()), greaterThanOrEqualTo(delay)); assertEquals(1, testListener.getPhaseTotal(searchDfsQueryThenFetchAsyncAction.getSearchPhaseName())); @@ -754,7 +740,7 @@ private SearchDfsQueryThenFetchAsyncAction createSearchDfsQueryThenFetchAsyncAct null, null, null, - null, + controller, executor, resultConsumer, searchRequest, @@ -767,7 +753,8 @@ private SearchDfsQueryThenFetchAsyncAction createSearchDfsQueryThenFetchAsyncAct new SearchRequestContext( new SearchRequestOperationsListener.CompositeListener(searchRequestOperationsListeners, logger), searchRequest - ) + ), + NoopTracer.INSTANCE ); } @@ -820,7 +807,8 @@ private SearchQueryThenFetchAsyncAction createSearchQueryThenFetchAsyncAction( new SearchRequestContext( new SearchRequestOperationsListener.CompositeListener(searchRequestOperationsListeners, logger), searchRequest - ) + ), + NoopTracer.INSTANCE ) { @Override ShardSearchFailure[] buildShardFailures() { diff --git a/server/src/test/java/org/opensearch/action/search/CanMatchPreFilterSearchPhaseTests.java b/server/src/test/java/org/opensearch/action/search/CanMatchPreFilterSearchPhaseTests.java index 30fc50f91dabd..1881c705fe6b3 100644 --- a/server/src/test/java/org/opensearch/action/search/CanMatchPreFilterSearchPhaseTests.java +++ b/server/src/test/java/org/opensearch/action/search/CanMatchPreFilterSearchPhaseTests.java @@ -55,6 +55,7 @@ import org.opensearch.search.sort.MinAndMax; import org.opensearch.search.sort.SortBuilders; import org.opensearch.search.sort.SortOrder; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.InternalAggregationTestCase; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.transport.Transport; @@ -66,7 +67,6 @@ import java.util.Collections; import java.util.Comparator; import java.util.HashSet; -import java.util.IdentityHashMap; import java.util.List; import java.util.Map; import java.util.Set; @@ -75,7 +75,6 @@ import java.util.concurrent.Executor; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; -import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiFunction; import java.util.stream.IntStream; @@ -83,42 +82,22 @@ import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.CoreMatchers.nullValue; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.collection.IsEmptyCollection.empty; public class CanMatchPreFilterSearchPhaseTests extends OpenSearchTestCase { - private SearchRequestOperationsListener assertingListener; - private Set phases; + private SearchRequestOperationsListenerAssertingListener assertingListener; @Before public void setUp() throws Exception { super.setUp(); - phases = Collections.newSetFromMap(new IdentityHashMap<>()); - assertingListener = new SearchRequestOperationsListener() { - @Override - protected void onPhaseStart(SearchPhaseContext context) { - assertThat(phases.contains(context.getCurrentPhase()), is(false)); - phases.add(context.getCurrentPhase()); - } - - @Override - protected void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { - assertThat(phases.contains(context.getCurrentPhase()), is(true)); - phases.remove(context.getCurrentPhase()); - } - - @Override - protected void onPhaseFailure(SearchPhaseContext context) { - assertThat(phases.contains(context.getCurrentPhase()), is(true)); - phases.remove(context.getCurrentPhase()); - } - }; + assertingListener = new SearchRequestOperationsListenerAssertingListener(); } @After public void tearDown() throws Exception { super.tearDown(); - assertBusy(() -> assertThat(phases, empty()), 5, TimeUnit.SECONDS); + + assertingListener.assertFinished(); } public void testFilterShards() throws InterruptedException { @@ -164,6 +143,10 @@ public void sendCanMatch( final SearchRequest searchRequest = new SearchRequest(); searchRequest.allowPartialSearchResults(true); + final SearchRequestOperationsListener searchRequestOperationsListener = new SearchRequestOperationsListener.CompositeListener( + List.of(assertingListener), + LogManager.getLogger() + ); CanMatchPreFilterSearchPhase canMatchPhase = new CanMatchPreFilterSearchPhase( logger, searchTransportService, @@ -182,15 +165,13 @@ public void sendCanMatch( @Override public void run() throws IOException { result.set(iter); + searchRequestOperationsListener.onPhaseEnd(new MockSearchPhaseContext(1, searchRequest, this), null); latch.countDown(); - assertingListener.onPhaseEnd(new MockSearchPhaseContext(1, searchRequest, this), null); } }, SearchResponse.Clusters.EMPTY, - new SearchRequestContext( - new SearchRequestOperationsListener.CompositeListener(List.of(assertingListener), LogManager.getLogger()), - searchRequest - ) + new SearchRequestContext(searchRequestOperationsListener, searchRequest), + NoopTracer.INSTANCE ); canMatchPhase.start(); @@ -260,6 +241,10 @@ public void sendCanMatch( final SearchRequest searchRequest = new SearchRequest(); searchRequest.allowPartialSearchResults(true); + final SearchRequestOperationsListener searchRequestOperationsListener = new SearchRequestOperationsListener.CompositeListener( + List.of(assertingListener), + LogManager.getLogger() + ); CanMatchPreFilterSearchPhase canMatchPhase = new CanMatchPreFilterSearchPhase( logger, searchTransportService, @@ -278,15 +263,13 @@ public void sendCanMatch( @Override public void run() throws IOException { result.set(iter); + searchRequestOperationsListener.onPhaseEnd(new MockSearchPhaseContext(1, searchRequest, this), null); latch.countDown(); - assertingListener.onPhaseEnd(new MockSearchPhaseContext(1, searchRequest, this), null); } }, SearchResponse.Clusters.EMPTY, - new SearchRequestContext( - new SearchRequestOperationsListener.CompositeListener(List.of(assertingListener), LogManager.getLogger()), - searchRequest - ) + new SearchRequestContext(searchRequestOperationsListener, searchRequest), + NoopTracer.INSTANCE ); canMatchPhase.start(); @@ -346,6 +329,10 @@ public void sendCanMatch( (e) -> { throw new AssertionError("unexpected", e); } ); Map aliasFilters = Collections.singletonMap("_na_", new AliasFilter(null, Strings.EMPTY_ARRAY)); + final SearchRequestOperationsListener searchRequestOperationsListener = new SearchRequestOperationsListener.CompositeListener( + List.of(assertingListener), + LogManager.getLogger() + ); final CanMatchPreFilterSearchPhase canMatchPhase = new CanMatchPreFilterSearchPhase( logger, searchTransportService, @@ -360,58 +347,57 @@ public void sendCanMatch( timeProvider, ClusterState.EMPTY_STATE, null, - (iter) -> new AbstractSearchAsyncAction("test", logger, transportService, (cluster, node) -> { - assert cluster == null : "cluster was not null: " + cluster; - return lookup.get(node); - }, - aliasFilters, - Collections.emptyMap(), - Collections.emptyMap(), - executor, - searchRequest, - responseListener, - iter, - new TransportSearchAction.SearchTimeProvider(0, 0, () -> 0), - ClusterState.EMPTY_STATE, - null, - new ArraySearchPhaseResults<>(iter.size()), - randomIntBetween(1, 32), - SearchResponse.Clusters.EMPTY, - new SearchRequestContext( - new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), - searchRequest - ) - ) { - - @Override - protected SearchPhase getNextPhase(SearchPhaseResults results, SearchPhaseContext context) { - return new SearchPhase("test") { + (iter) -> { + return new WrappingSearchAsyncActionPhase( + new AbstractSearchAsyncAction("test", logger, transportService, (cluster, node) -> { + assert cluster == null : "cluster was not null: " + cluster; + return lookup.get(node); + }, + aliasFilters, + Collections.emptyMap(), + Collections.emptyMap(), + executor, + searchRequest, + responseListener, + iter, + new TransportSearchAction.SearchTimeProvider(0, 0, () -> 0), + ClusterState.EMPTY_STATE, + null, + new ArraySearchPhaseResults<>(iter.size()), + randomIntBetween(1, 32), + SearchResponse.Clusters.EMPTY, + new SearchRequestContext(searchRequestOperationsListener, searchRequest), + NoopTracer.INSTANCE + ) { @Override - public void run() { - latch.countDown(); + protected SearchPhase getNextPhase(SearchPhaseResults results, SearchPhaseContext context) { + return new WrappingSearchAsyncActionPhase(this) { + @Override + public void run() { + latch.countDown(); + } + }; } - }; - } - @Override - protected void executePhaseOnShard( - final SearchShardIterator shardIt, - final SearchShardTarget shard, - final SearchActionListener listener - ) { - if (randomBoolean()) { - listener.onResponse(new SearchPhaseResult() { - }); - } else { - listener.onFailure(new Exception("failure")); + @Override + protected void executePhaseOnShard( + final SearchShardIterator shardIt, + final SearchShardTarget shard, + final SearchActionListener listener + ) { + if (randomBoolean()) { + listener.onResponse(new SearchPhaseResult() { + }); + } else { + listener.onFailure(new Exception("failure")); + } + } } - } + ); }, SearchResponse.Clusters.EMPTY, - new SearchRequestContext( - new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), - searchRequest - ) + new SearchRequestContext(searchRequestOperationsListener, searchRequest), + NoopTracer.INSTANCE ); canMatchPhase.start(); @@ -475,6 +461,10 @@ public void sendCanMatch( searchRequest.source(new SearchSourceBuilder().sort(SortBuilders.fieldSort("timestamp").order(order))); searchRequest.allowPartialSearchResults(true); + final SearchRequestOperationsListener searchRequestOperationsListener = new SearchRequestOperationsListener.CompositeListener( + List.of(assertingListener), + LogManager.getLogger() + ); CanMatchPreFilterSearchPhase canMatchPhase = new CanMatchPreFilterSearchPhase( logger, searchTransportService, @@ -493,15 +483,13 @@ public void sendCanMatch( @Override public void run() { result.set(iter); + searchRequestOperationsListener.onPhaseEnd(new MockSearchPhaseContext(1, searchRequest, this), null); latch.countDown(); - assertingListener.onPhaseEnd(new MockSearchPhaseContext(1, searchRequest, this), null); } }, SearchResponse.Clusters.EMPTY, - new SearchRequestContext( - new SearchRequestOperationsListener.CompositeListener(List.of(assertingListener), LogManager.getLogger()), - searchRequest - ) + new SearchRequestContext(searchRequestOperationsListener, searchRequest), + NoopTracer.INSTANCE ); canMatchPhase.start(); @@ -580,6 +568,10 @@ public void sendCanMatch( searchRequest.source(new SearchSourceBuilder().sort(SortBuilders.fieldSort("timestamp").order(order))); searchRequest.allowPartialSearchResults(true); + final SearchRequestOperationsListener searchRequestOperationsListener = new SearchRequestOperationsListener.CompositeListener( + List.of(assertingListener), + LogManager.getLogger() + ); CanMatchPreFilterSearchPhase canMatchPhase = new CanMatchPreFilterSearchPhase( logger, searchTransportService, @@ -598,15 +590,13 @@ public void sendCanMatch( @Override public void run() { result.set(iter); + searchRequestOperationsListener.onPhaseEnd(new MockSearchPhaseContext(1, searchRequest, this), null); latch.countDown(); - assertingListener.onPhaseEnd(new MockSearchPhaseContext(1, searchRequest, this), null); } }, SearchResponse.Clusters.EMPTY, - new SearchRequestContext( - new SearchRequestOperationsListener.CompositeListener(List.of(assertingListener), LogManager.getLogger()), - searchRequest - ) + new SearchRequestContext(searchRequestOperationsListener, searchRequest), + NoopTracer.INSTANCE ); canMatchPhase.start(); @@ -730,7 +720,8 @@ public void run() { }; }, SearchResponse.Clusters.EMPTY, - searchRequestContext + searchRequestContext, + NoopTracer.INSTANCE ); canMatchPhase.start(); @@ -779,7 +770,8 @@ private static final class SearchDfsQueryAsyncAction extends AbstractSearchAsync new ArraySearchPhaseResults<>(shardsIts.size()), request.getMaxConcurrentShardRequests(), clusters, - searchRequestContext + searchRequestContext, + NoopTracer.INSTANCE ); this.listener = searchRequestContext.getSearchRequestOperationsListener(); } diff --git a/server/src/test/java/org/opensearch/action/search/SearchAsyncActionTests.java b/server/src/test/java/org/opensearch/action/search/SearchAsyncActionTests.java index af7adc4e58fb8..35e90ff662b19 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchAsyncActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchAsyncActionTests.java @@ -51,11 +51,14 @@ import org.opensearch.search.internal.AliasFilter; import org.opensearch.search.internal.InternalSearchResponse; import org.opensearch.search.internal.ShardSearchContextId; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.transport.Transport; import org.opensearch.transport.TransportException; import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportRequestOptions; +import org.junit.After; +import org.junit.Before; import java.io.IOException; import java.util.ArrayList; @@ -79,6 +82,23 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo; public class SearchAsyncActionTests extends OpenSearchTestCase { + private SearchRequestOperationsListenerAssertingListener assertingListener; + + @Before + @Override + public void setUp() throws Exception { + super.setUp(); + + assertingListener = new SearchRequestOperationsListenerAssertingListener(); + } + + @After + @Override + public void tearDown() throws Exception { + super.tearDown(); + + assertingListener.assertFinished(); + } public void testSkipSearchShards() throws InterruptedException { SearchRequest request = new SearchRequest(); @@ -117,6 +137,10 @@ public void testSkipSearchShards() throws InterruptedException { lookup.put(replicaNode.getId(), new MockConnection(replicaNode)); Map aliasFilters = Collections.singletonMap("_na_", new AliasFilter(null, Strings.EMPTY_ARRAY)); AtomicInteger numRequests = new AtomicInteger(0); + final SearchRequestOperationsListener searchRequestOperationsListener = new SearchRequestOperationsListener.CompositeListener( + List.of(assertingListener), + LogManager.getLogger() + ); AbstractSearchAsyncAction asyncAction = new AbstractSearchAsyncAction( "test", logger, @@ -138,7 +162,8 @@ public void testSkipSearchShards() throws InterruptedException { new ArraySearchPhaseResults<>(shardsIter.size()), request.getMaxConcurrentShardRequests(), SearchResponse.Clusters.EMPTY, - new SearchRequestContext(new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), request) + new SearchRequestContext(searchRequestOperationsListener, request), + NoopTracer.INSTANCE ) { @Override @@ -169,6 +194,7 @@ protected SearchPhase getNextPhase(SearchPhaseResults res @Override public void run() { assertTrue(searchPhaseDidRun.compareAndSet(false, true)); + searchRequestOperationsListener.onPhaseEnd(new MockSearchPhaseContext(1, request, this), null); } }; } @@ -236,6 +262,10 @@ public void testLimitConcurrentShardRequests() throws InterruptedException { Map aliasFilters = Collections.singletonMap("_na_", new AliasFilter(null, Strings.EMPTY_ARRAY)); CountDownLatch awaitInitialRequests = new CountDownLatch(1); AtomicInteger numRequests = new AtomicInteger(0); + final SearchRequestOperationsListener searchRequestOperationsListener = new SearchRequestOperationsListener.CompositeListener( + List.of(assertingListener), + LogManager.getLogger() + ); AbstractSearchAsyncAction asyncAction = new AbstractSearchAsyncAction( "test", logger, @@ -257,7 +287,8 @@ public void testLimitConcurrentShardRequests() throws InterruptedException { new ArraySearchPhaseResults<>(shardsIter.size()), request.getMaxConcurrentShardRequests(), SearchResponse.Clusters.EMPTY, - new SearchRequestContext(new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), request) + new SearchRequestContext(searchRequestOperationsListener, request), + NoopTracer.INSTANCE ) { @Override @@ -295,6 +326,7 @@ protected SearchPhase getNextPhase(SearchPhaseResults res return new SearchPhase("test") { @Override public void run() { + searchRequestOperationsListener.onPhaseEnd(new MockSearchPhaseContext(1, request, this), null); assertTrue(searchPhaseDidRun.compareAndSet(false, true)); } }; @@ -375,7 +407,11 @@ public void sendFreeContext(Transport.Connection connection, ShardSearchContextI new ArraySearchPhaseResults<>(shardsIter.size()), request.getMaxConcurrentShardRequests(), SearchResponse.Clusters.EMPTY, - new SearchRequestContext(new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), request) + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(assertingListener), LogManager.getLogger()), + request + ), + NoopTracer.INSTANCE ) { TestSearchResponse response = new TestSearchResponse(); @@ -411,6 +447,7 @@ public void run() { sendReleaseSearchContext(result.getContextId(), new MockConnection(result.node), OriginalIndices.NONE); } responseListener.onResponse(response); + assertingListener.onPhaseEnd(new MockSearchPhaseContext(1, request, this), null); } }; } @@ -498,7 +535,11 @@ public void sendFreeContext(Transport.Connection connection, ShardSearchContextI new ArraySearchPhaseResults<>(shardsIter.size()), request.getMaxConcurrentShardRequests(), SearchResponse.Clusters.EMPTY, - new SearchRequestContext(new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), request) + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(assertingListener), LogManager.getLogger()), + request + ), + NoopTracer.INSTANCE ) { TestSearchResponse response = new TestSearchResponse(); @@ -591,6 +632,10 @@ public void testAllowPartialResults() throws InterruptedException { Map aliasFilters = Collections.singletonMap("_na_", new AliasFilter(null, Strings.EMPTY_ARRAY)); AtomicInteger numRequests = new AtomicInteger(0); AtomicInteger numFailReplicas = new AtomicInteger(0); + final SearchRequestOperationsListener searchRequestOperationsListener = new SearchRequestOperationsListener.CompositeListener( + List.of(assertingListener), + LogManager.getLogger() + ); AbstractSearchAsyncAction asyncAction = new AbstractSearchAsyncAction( "test", logger, @@ -612,7 +657,8 @@ public void testAllowPartialResults() throws InterruptedException { new ArraySearchPhaseResults<>(shardsIter.size()), request.getMaxConcurrentShardRequests(), SearchResponse.Clusters.EMPTY, - new SearchRequestContext(new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), request) + new SearchRequestContext(searchRequestOperationsListener, request), + NoopTracer.INSTANCE ) { @Override protected void executePhaseOnShard( @@ -645,6 +691,7 @@ protected SearchPhase getNextPhase(SearchPhaseResults res @Override public void run() { assertTrue(searchPhaseDidRun.compareAndSet(false, true)); + searchRequestOperationsListener.onPhaseEnd(new MockSearchPhaseContext(1, request, this), null); } }; } diff --git a/server/src/test/java/org/opensearch/action/search/SearchQueryThenFetchAsyncActionTests.java b/server/src/test/java/org/opensearch/action/search/SearchQueryThenFetchAsyncActionTests.java index faf6f86c69c27..aefbbe80d5fa1 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchQueryThenFetchAsyncActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchQueryThenFetchAsyncActionTests.java @@ -59,9 +59,12 @@ import org.opensearch.search.internal.ShardSearchRequest; import org.opensearch.search.query.QuerySearchResult; import org.opensearch.search.sort.SortBuilders; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.InternalAggregationTestCase; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.transport.Transport; +import org.junit.After; +import org.junit.Before; import java.util.Collections; import java.util.List; @@ -77,6 +80,24 @@ import static org.hamcrest.Matchers.instanceOf; public class SearchQueryThenFetchAsyncActionTests extends OpenSearchTestCase { + private SearchRequestOperationsListenerAssertingListener assertingListener; + + @Before + @Override + public void setUp() throws Exception { + super.setUp(); + + assertingListener = new SearchRequestOperationsListenerAssertingListener(); + } + + @After + @Override + public void tearDown() throws Exception { + super.tearDown(); + + assertingListener.assertFinished(); + } + public void testBottomFieldSort() throws Exception { testCase(false, false); } @@ -218,15 +239,17 @@ public void sendExecuteQuery( task, SearchResponse.Clusters.EMPTY, new SearchRequestContext( - new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + new SearchRequestOperationsListener.CompositeListener(List.of(assertingListener), LogManager.getLogger()), searchRequest - ) + ), + NoopTracer.INSTANCE ) { @Override protected SearchPhase getNextPhase(SearchPhaseResults results, SearchPhaseContext context) { return new SearchPhase("test") { @Override public void run() { + assertingListener.onPhaseEnd(new MockSearchPhaseContext(1, searchRequest, this), null); latch.countDown(); } }; diff --git a/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsCompositeListenerFactoryTests.java b/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsCompositeListenerFactoryTests.java index 1cb336e18b12c..845543fbd9f57 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsCompositeListenerFactoryTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsCompositeListenerFactoryTests.java @@ -125,7 +125,7 @@ protected void onPhaseStart(SearchPhaseContext context) {} protected void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) {} @Override - protected void onPhaseFailure(SearchPhaseContext context) {} + protected void onPhaseFailure(SearchPhaseContext context, Throwable cause) {} }; } } diff --git a/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsListenerAssertingListener.java b/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsListenerAssertingListener.java new file mode 100644 index 0000000000000..327371ebcaf0b --- /dev/null +++ b/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsListenerAssertingListener.java @@ -0,0 +1,39 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.CoreMatchers.nullValue; +import static org.hamcrest.MatcherAssert.assertThat; + +class SearchRequestOperationsListenerAssertingListener extends SearchRequestOperationsListener { + private volatile SearchPhase phase; + + @Override + protected void onPhaseStart(SearchPhaseContext context) { + assertThat(phase, is(nullValue())); + phase = context.getCurrentPhase(); + } + + @Override + protected void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { + assertThat(phase, is(context.getCurrentPhase())); + phase = null; + } + + @Override + protected void onPhaseFailure(SearchPhaseContext context, Throwable cause) { + assertThat(phase, is(context.getCurrentPhase())); + phase = null; + } + + public void assertFinished() { + assertThat(phase, is(nullValue())); + } +} diff --git a/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsListenerTests.java b/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsListenerTests.java index a53c35a8401b3..990ed95f1aebc 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsListenerTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsListenerTests.java @@ -40,7 +40,7 @@ public void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRe } @Override - public void onPhaseFailure(SearchPhaseContext context) { + public void onPhaseFailure(SearchPhaseContext context, Throwable cause) { searchPhaseMap.get(context.getCurrentPhase().getSearchPhaseName()).current.dec(); } }; diff --git a/server/src/test/java/org/opensearch/action/search/SearchRequestStatsTests.java b/server/src/test/java/org/opensearch/action/search/SearchRequestStatsTests.java index 377ccebbfd418..fb9b26e3f3ad1 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchRequestStatsTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchRequestStatsTests.java @@ -36,7 +36,7 @@ public void testSearchRequestPhaseFailure() { when(mockSearchPhase.getSearchPhaseName()).thenReturn(searchPhaseName); testRequestStats.onPhaseStart(ctx); assertEquals(1, testRequestStats.getPhaseCurrent(searchPhaseName)); - testRequestStats.onPhaseFailure(ctx); + testRequestStats.onPhaseFailure(ctx, new Throwable()); assertEquals(0, testRequestStats.getPhaseCurrent(searchPhaseName)); } } @@ -156,7 +156,7 @@ public void testSearchRequestStatsOnPhaseFailureConcurrently() throws Interrupte threads[i] = new Thread(() -> { phaser.arriveAndAwaitAdvance(); testRequestStats.onPhaseStart(ctx); - testRequestStats.onPhaseFailure(ctx); + testRequestStats.onPhaseFailure(ctx, new Throwable()); countDownLatch.countDown(); }); threads[i].start(); diff --git a/server/src/test/java/org/opensearch/cluster/routing/RoutingTableTests.java b/server/src/test/java/org/opensearch/cluster/routing/RoutingTableTests.java index 8542ff53c6ff1..97283f561d6d4 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/RoutingTableTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/RoutingTableTests.java @@ -279,6 +279,31 @@ public void testAllShardsMatchingPredicate() { ); } + public void testAllShardsMatchingPredicateWithSpecificIndices() { + MockAllocationService allocation = createAllocationService(Settings.EMPTY, new DelayedShardsMockGatewayAllocator()); + Metadata metadata = Metadata.builder() + .put(IndexMetadata.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1)) + .put(IndexMetadata.builder("test2").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1)) + .build(); + ClusterState clusterState = ClusterState.builder(org.opensearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metadata(metadata) + .routingTable(RoutingTable.builder().addAsNew(metadata.index("test1")).addAsNew(metadata.index("test2")).build()) + .build(); + clusterState = ClusterState.builder(clusterState) + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))) + .build(); + clusterState = allocation.reroute(clusterState, "reroute"); + + String[] indices = new String[] { "test1", "test2" }; + // Verifies against all primary shards on the node + assertThat(clusterState.routingTable().allShardsSatisfyingPredicate(indices, ShardRouting::primary).size(), is(2)); + // Verifies against all replica shards on the node + assertThat( + clusterState.routingTable().allShardsSatisfyingPredicate(indices, shardRouting -> !shardRouting.primary()).size(), + is(2) + ); + } + public void testActivePrimaryShardsGrouped() { assertThat(this.emptyRoutingTable.activePrimaryShardsGrouped(new String[0], true).size(), is(0)); assertThat(this.emptyRoutingTable.activePrimaryShardsGrouped(new String[0], false).size(), is(0)); diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/KeywordTermsAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/KeywordTermsAggregatorTests.java index 4229361aa7f46..753644dce81d5 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/KeywordTermsAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/KeywordTermsAggregatorTests.java @@ -32,7 +32,6 @@ package org.opensearch.search.aggregations.bucket.terms; import org.apache.lucene.document.Document; -import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.IndexSearcher; @@ -41,7 +40,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; import org.apache.lucene.tests.index.RandomIndexWriter; -import org.apache.lucene.util.BytesRef; +import org.opensearch.common.TriConsumer; import org.opensearch.index.mapper.KeywordFieldMapper; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.search.aggregations.AggregatorTestCase; @@ -57,6 +56,8 @@ public class KeywordTermsAggregatorTests extends AggregatorTestCase { private static final String KEYWORD_FIELD = "keyword"; + private static final Consumer CONFIGURE_KEYWORD_FIELD = agg -> agg.field(KEYWORD_FIELD); + private static final List dataset; static { List d = new ArrayList<>(45); @@ -68,51 +69,63 @@ public class KeywordTermsAggregatorTests extends AggregatorTestCase { dataset = d; } + private static final Consumer VERIFY_MATCH_ALL_DOCS = agg -> { + assertEquals(9, agg.getBuckets().size()); + for (int i = 0; i < 9; i++) { + StringTerms.Bucket bucket = (StringTerms.Bucket) agg.getBuckets().get(i); + assertThat(bucket.getKey(), equalTo(String.valueOf(9L - i))); + assertThat(bucket.getDocCount(), equalTo(9L - i)); + } + }; + + private static final Consumer VERIFY_MATCH_NO_DOCS = agg -> { assertEquals(0, agg.getBuckets().size()); }; + + private static final Query MATCH_ALL_DOCS_QUERY = new MatchAllDocsQuery(); + + private static final Query MATCH_NO_DOCS_QUERY = new MatchNoDocsQuery(); + public void testMatchNoDocs() throws IOException { testSearchCase( - new MatchNoDocsQuery(), + ADD_SORTED_SET_FIELD_NOT_INDEXED, + MATCH_NO_DOCS_QUERY, dataset, - aggregation -> aggregation.field(KEYWORD_FIELD), - agg -> assertEquals(0, agg.getBuckets().size()), - null // without type hint + CONFIGURE_KEYWORD_FIELD, + VERIFY_MATCH_NO_DOCS, + null // without type hint ); testSearchCase( - new MatchNoDocsQuery(), + ADD_SORTED_SET_FIELD_NOT_INDEXED, + MATCH_NO_DOCS_QUERY, dataset, - aggregation -> aggregation.field(KEYWORD_FIELD), - agg -> assertEquals(0, agg.getBuckets().size()), - ValueType.STRING // with type hint + CONFIGURE_KEYWORD_FIELD, + VERIFY_MATCH_NO_DOCS, + ValueType.STRING // with type hint ); } public void testMatchAllDocs() throws IOException { - Query query = new MatchAllDocsQuery(); - - testSearchCase(query, dataset, aggregation -> aggregation.field(KEYWORD_FIELD), agg -> { - assertEquals(9, agg.getBuckets().size()); - for (int i = 0; i < 9; i++) { - StringTerms.Bucket bucket = (StringTerms.Bucket) agg.getBuckets().get(i); - assertThat(bucket.getKey(), equalTo(String.valueOf(9L - i))); - assertThat(bucket.getDocCount(), equalTo(9L - i)); - } - }, - null // without type hint + testSearchCase( + ADD_SORTED_SET_FIELD_NOT_INDEXED, + MATCH_ALL_DOCS_QUERY, + dataset, + CONFIGURE_KEYWORD_FIELD, + VERIFY_MATCH_ALL_DOCS, + null // without type hint ); - testSearchCase(query, dataset, aggregation -> aggregation.field(KEYWORD_FIELD), agg -> { - assertEquals(9, agg.getBuckets().size()); - for (int i = 0; i < 9; i++) { - StringTerms.Bucket bucket = (StringTerms.Bucket) agg.getBuckets().get(i); - assertThat(bucket.getKey(), equalTo(String.valueOf(9L - i))); - assertThat(bucket.getDocCount(), equalTo(9L - i)); - } - }, - ValueType.STRING // with type hint + testSearchCase( + ADD_SORTED_SET_FIELD_NOT_INDEXED, + MATCH_ALL_DOCS_QUERY, + dataset, + CONFIGURE_KEYWORD_FIELD, + VERIFY_MATCH_ALL_DOCS, + ValueType.STRING // with type hint ); } private void testSearchCase( + TriConsumer addField, Query query, List dataset, Consumer configure, @@ -123,7 +136,7 @@ private void testSearchCase( try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { Document document = new Document(); for (String value : dataset) { - document.add(new SortedSetDocValuesField(KEYWORD_FIELD, new BytesRef(value))); + addField.apply(document, KEYWORD_FIELD, value); indexWriter.addDocument(document); document.clear(); } @@ -147,5 +160,4 @@ private void testSearchCase( } } } - } diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorTests.java index 80744ecde4d69..6d105c27a692f 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorTests.java @@ -44,6 +44,8 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.NoMergePolicy; +import org.apache.lucene.index.Term; import org.apache.lucene.search.DocValuesFieldExistsQuery; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; @@ -52,6 +54,7 @@ import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.NumericUtils; +import org.opensearch.common.TriConsumer; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.network.InetAddresses; import org.opensearch.common.settings.Settings; @@ -120,6 +123,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BiFunction; import java.util.function.Consumer; import java.util.function.Function; @@ -136,9 +140,6 @@ import static org.mockito.Mockito.when; public class TermsAggregatorTests extends AggregatorTestCase { - - private boolean randomizeAggregatorImpl = true; - // Constants for a script that returns a string private static final String STRING_SCRIPT_NAME = "string_script"; private static final String STRING_SCRIPT_OUTPUT = "Orange"; @@ -171,9 +172,22 @@ protected ScriptService getMockScriptService() { return new ScriptService(Settings.EMPTY, engines, ScriptModule.CORE_CONTEXTS); } + protected CountingAggregator createCountingAggregator( + AggregationBuilder aggregationBuilder, + IndexSearcher indexSearcher, + boolean randomizeAggregatorImpl, + MappedFieldType... fieldTypes + ) throws IOException { + return new CountingAggregator( + new AtomicInteger(), + createAggregator(aggregationBuilder, indexSearcher, randomizeAggregatorImpl, fieldTypes) + ); + } + protected A createAggregator( AggregationBuilder aggregationBuilder, IndexSearcher indexSearcher, + boolean randomizeAggregatorImpl, MappedFieldType... fieldTypes ) throws IOException { try { @@ -188,6 +202,14 @@ protected A createAggregator( } } + protected A createAggregator( + AggregationBuilder aggregationBuilder, + IndexSearcher indexSearcher, + MappedFieldType... fieldTypes + ) throws IOException { + return createAggregator(aggregationBuilder, indexSearcher, true, fieldTypes); + } + @Override protected AggregationBuilder createAggBuilderForTypeTest(MappedFieldType fieldType, String fieldName) { return new TermsAggregationBuilder("foo").field(fieldName); @@ -207,8 +229,7 @@ protected List getSupportedValuesSourceTypes() { } public void testUsesGlobalOrdinalsByDefault() throws Exception { - randomizeAggregatorImpl = false; - + boolean randomizeAggregatorImpl = false; Directory directory = newDirectory(); RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory); indexWriter.close(); @@ -220,35 +241,35 @@ public void testUsesGlobalOrdinalsByDefault() throws Exception { .field("string"); MappedFieldType fieldType = new KeywordFieldMapper.KeywordFieldType("string"); - TermsAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType); + TermsAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, randomizeAggregatorImpl, fieldType); assertThat(aggregator, instanceOf(GlobalOrdinalsStringTermsAggregator.class)); GlobalOrdinalsStringTermsAggregator globalAgg = (GlobalOrdinalsStringTermsAggregator) aggregator; assertThat(globalAgg.descriptCollectionStrategy(), equalTo("dense")); // Infers depth_first because the maxOrd is 0 which is less than the size aggregationBuilder.subAggregation(AggregationBuilders.cardinality("card").field("string")); - aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType); + aggregator = createAggregator(aggregationBuilder, indexSearcher, randomizeAggregatorImpl, fieldType); assertThat(aggregator, instanceOf(GlobalOrdinalsStringTermsAggregator.class)); globalAgg = (GlobalOrdinalsStringTermsAggregator) aggregator; assertThat(globalAgg.collectMode, equalTo(Aggregator.SubAggCollectionMode.DEPTH_FIRST)); assertThat(globalAgg.descriptCollectionStrategy(), equalTo("remap")); aggregationBuilder.collectMode(Aggregator.SubAggCollectionMode.DEPTH_FIRST); - aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType); + aggregator = createAggregator(aggregationBuilder, indexSearcher, randomizeAggregatorImpl, fieldType); assertThat(aggregator, instanceOf(GlobalOrdinalsStringTermsAggregator.class)); globalAgg = (GlobalOrdinalsStringTermsAggregator) aggregator; assertThat(globalAgg.collectMode, equalTo(Aggregator.SubAggCollectionMode.DEPTH_FIRST)); assertThat(globalAgg.descriptCollectionStrategy(), equalTo("remap")); aggregationBuilder.collectMode(Aggregator.SubAggCollectionMode.BREADTH_FIRST); - aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType); + aggregator = createAggregator(aggregationBuilder, indexSearcher, randomizeAggregatorImpl, fieldType); assertThat(aggregator, instanceOf(GlobalOrdinalsStringTermsAggregator.class)); globalAgg = (GlobalOrdinalsStringTermsAggregator) aggregator; assertThat(globalAgg.collectMode, equalTo(Aggregator.SubAggCollectionMode.BREADTH_FIRST)); assertThat(globalAgg.descriptCollectionStrategy(), equalTo("dense")); aggregationBuilder.order(BucketOrder.aggregation("card", true)); - aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType); + aggregator = createAggregator(aggregationBuilder, indexSearcher, randomizeAggregatorImpl, fieldType); assertThat(aggregator, instanceOf(GlobalOrdinalsStringTermsAggregator.class)); globalAgg = (GlobalOrdinalsStringTermsAggregator) aggregator; assertThat(globalAgg.descriptCollectionStrategy(), equalTo("remap")); @@ -257,51 +278,145 @@ public void testUsesGlobalOrdinalsByDefault() throws Exception { directory.close(); } - public void testSimple() throws Exception { + /** + * This test case utilizes the default implementation of GlobalOrdinalsStringTermsAggregator since collectSegmentOrds is false + */ + public void testSimpleAggregation() throws Exception { + // Fields not indexed: cannot use LeafBucketCollector#termDocFreqCollector - all documents are visited + testSimple(ADD_SORTED_SET_FIELD_NOT_INDEXED, false, false, false, TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS, 4); + + // Fields indexed, deleted documents in segment: cannot use LeafBucketCollector#termDocFreqCollector - all documents are visited + testSimple(ADD_SORTED_SET_FIELD_INDEXED, true, false, false, TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS, 4); + + // Fields indexed, no deleted documents in segment: will use LeafBucketCollector#termDocFreqCollector - no documents are visited + testSimple(ADD_SORTED_SET_FIELD_INDEXED, false, false, false, TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS, 0); + + // Fields indexed, no deleted documents, but _doc_field value present in document: + // cannot use LeafBucketCollector#termDocFreqCollector - all documents are visited + testSimple(ADD_SORTED_SET_FIELD_INDEXED, false, true, false, TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS, 4); + + } + + /** + * This test case utilizes the LowCardinality implementation of GlobalOrdinalsStringTermsAggregator since collectSegmentOrds is true + */ + public void testSimpleAggregationLowCardinality() throws Exception { + // Fields not indexed: cannot use LeafBucketCollector#termDocFreqCollector - all documents are visited + testSimple(ADD_SORTED_SET_FIELD_NOT_INDEXED, false, false, true, TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS, 4); + + // Fields indexed, deleted documents in segment: cannot use LeafBucketCollector#termDocFreqCollector - all documents are visited + testSimple(ADD_SORTED_SET_FIELD_INDEXED, true, false, true, TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS, 4); + + // Fields indexed, no deleted documents in segment: will use LeafBucketCollector#termDocFreqCollector - no documents are visited + testSimple(ADD_SORTED_SET_FIELD_INDEXED, false, false, true, TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS, 0); + + // Fields indexed, no deleted documents, but _doc_field value present in document: + // cannot use LeafBucketCollector#termDocFreqCollector - all documents are visited + testSimple(ADD_SORTED_SET_FIELD_INDEXED, false, true, true, TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS, 4); + } + + /** + * This test case utilizes the MapStringTermsAggregator. + */ + public void testSimpleMapStringAggregation() throws Exception { + testSimple( + ADD_SORTED_SET_FIELD_INDEXED, + randomBoolean(), + randomBoolean(), + randomBoolean(), + TermsAggregatorFactory.ExecutionMode.MAP, + 4 + ); + } + + /** + * This is a utility method to test out string terms aggregation + * @param addFieldConsumer a function that determines how a field is added to the document + * @param includeDeletedDocumentsInSegment to include deleted documents in the segment or not + * @param collectSegmentOrds collect segment ords or not - set true to utilize LowCardinality implementation for GlobalOrdinalsStringTermsAggregator + * @param executionMode execution mode MAP or GLOBAL_ORDINALS + * @param expectedCollectCount expected number of documents visited as part of collect() invocation + */ + private void testSimple( + TriConsumer addFieldConsumer, + final boolean includeDeletedDocumentsInSegment, + final boolean includeDocCountField, + boolean collectSegmentOrds, + TermsAggregatorFactory.ExecutionMode executionMode, + final int expectedCollectCount + ) throws Exception { try (Directory directory = newDirectory()) { - try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { + try ( + RandomIndexWriter indexWriter = new RandomIndexWriter( + random(), + directory, + newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE) + ) + ) { Document document = new Document(); - document.add(new SortedSetDocValuesField("string", new BytesRef("a"))); - document.add(new SortedSetDocValuesField("string", new BytesRef("b"))); + addFieldConsumer.apply(document, "string", "a"); + addFieldConsumer.apply(document, "string", "b"); indexWriter.addDocument(document); document = new Document(); - document.add(new SortedSetDocValuesField("string", new BytesRef(""))); - document.add(new SortedSetDocValuesField("string", new BytesRef("c"))); - document.add(new SortedSetDocValuesField("string", new BytesRef("a"))); + addFieldConsumer.apply(document, "string", ""); + addFieldConsumer.apply(document, "string", "c"); + addFieldConsumer.apply(document, "string", "a"); indexWriter.addDocument(document); document = new Document(); - document.add(new SortedSetDocValuesField("string", new BytesRef("b"))); - document.add(new SortedSetDocValuesField("string", new BytesRef("d"))); + addFieldConsumer.apply(document, "string", "b"); + addFieldConsumer.apply(document, "string", "d"); indexWriter.addDocument(document); document = new Document(); - document.add(new SortedSetDocValuesField("string", new BytesRef(""))); + addFieldConsumer.apply(document, "string", ""); + if (includeDocCountField) { + // Adding _doc_count to one document + document.add(new NumericDocValuesField("_doc_count", 10)); + } indexWriter.addDocument(document); + + if (includeDeletedDocumentsInSegment) { + document = new Document(); + ADD_SORTED_SET_FIELD_INDEXED.apply(document, "string", "e"); + indexWriter.addDocument(document); + indexWriter.deleteDocuments(new Term("string", "e")); + assertEquals(5, indexWriter.getDocStats().maxDoc); // deleted document still in segment + } + try (IndexReader indexReader = maybeWrapReaderEs(indexWriter.getReader())) { IndexSearcher indexSearcher = newIndexSearcher(indexReader); - for (TermsAggregatorFactory.ExecutionMode executionMode : TermsAggregatorFactory.ExecutionMode.values()) { - TermsAggregationBuilder aggregationBuilder = new TermsAggregationBuilder("_name").userValueTypeHint( - ValueType.STRING - ).executionHint(executionMode.toString()).field("string").order(BucketOrder.key(true)); - MappedFieldType fieldType = new KeywordFieldMapper.KeywordFieldType("string"); - TermsAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType); - aggregator.preCollection(); - indexSearcher.search(new MatchAllDocsQuery(), aggregator); - aggregator.postCollection(); - Terms result = reduce(aggregator); - assertEquals(5, result.getBuckets().size()); - assertEquals("", result.getBuckets().get(0).getKeyAsString()); + TermsAggregationBuilder aggregationBuilder = new TermsAggregationBuilder("_name").userValueTypeHint(ValueType.STRING) + .executionHint(executionMode.toString()) + .field("string") + .order(BucketOrder.key(true)); + MappedFieldType fieldType = new KeywordFieldMapper.KeywordFieldType("string"); + + TermsAggregatorFactory.COLLECT_SEGMENT_ORDS = collectSegmentOrds; + TermsAggregatorFactory.REMAP_GLOBAL_ORDS = false; + CountingAggregator aggregator = createCountingAggregator(aggregationBuilder, indexSearcher, false, fieldType); + + aggregator.preCollection(); + indexSearcher.search(new MatchAllDocsQuery(), aggregator); + aggregator.postCollection(); + Terms result = reduce(aggregator); + assertEquals(5, result.getBuckets().size()); + assertEquals("", result.getBuckets().get(0).getKeyAsString()); + if (includeDocCountField) { + assertEquals(11L, result.getBuckets().get(0).getDocCount()); + } else { assertEquals(2L, result.getBuckets().get(0).getDocCount()); - assertEquals("a", result.getBuckets().get(1).getKeyAsString()); - assertEquals(2L, result.getBuckets().get(1).getDocCount()); - assertEquals("b", result.getBuckets().get(2).getKeyAsString()); - assertEquals(2L, result.getBuckets().get(2).getDocCount()); - assertEquals("c", result.getBuckets().get(3).getKeyAsString()); - assertEquals(1L, result.getBuckets().get(3).getDocCount()); - assertEquals("d", result.getBuckets().get(4).getKeyAsString()); - assertEquals(1L, result.getBuckets().get(4).getDocCount()); - assertTrue(AggregationInspectionHelper.hasValue((InternalTerms) result)); } + assertEquals("a", result.getBuckets().get(1).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(1).getDocCount()); + assertEquals("b", result.getBuckets().get(2).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(2).getDocCount()); + assertEquals("c", result.getBuckets().get(3).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(3).getDocCount()); + assertEquals("d", result.getBuckets().get(4).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(4).getDocCount()); + assertTrue(AggregationInspectionHelper.hasValue((InternalTerms) result)); + + assertEquals(expectedCollectCount, aggregator.getCollectCount().get()); } } } @@ -1543,5 +1658,4 @@ private T reduce(Aggregator agg) throws IOExcept doAssertReducedMultiBucketConsumer(result, reduceBucketConsumer); return result; } - } diff --git a/server/src/test/java/org/opensearch/search/query/QueryPhaseTests.java b/server/src/test/java/org/opensearch/search/query/QueryPhaseTests.java index d0e01c5461c79..4bd4d406e4391 100644 --- a/server/src/test/java/org/opensearch/search/query/QueryPhaseTests.java +++ b/server/src/test/java/org/opensearch/search/query/QueryPhaseTests.java @@ -122,6 +122,7 @@ import java.util.concurrent.TimeUnit; import static org.opensearch.search.query.TopDocsCollectorContext.hasInfMaxScore; +import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -437,10 +438,16 @@ public void testTerminateAfterEarlyTermination() throws Exception { assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L)); assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); + // Do not expect an exact match when terminate_after is used in conjunction to size = 0 as an optimization introduced by + // https://issues.apache.org/jira/browse/LUCENE-10620 can produce a total hit count >= terminated_after, because + // TotalHitCountCollector is used in this case as part of Weight#count() optimization context.setSize(0); QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher); assertTrue(context.queryResult().terminatedEarly()); - assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L)); + assertThat( + context.queryResult().topDocs().topDocs.totalHits.value, + allOf(greaterThanOrEqualTo(1L), lessThanOrEqualTo((long) numDocs)) + ); assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0)); } @@ -466,7 +473,10 @@ public void testTerminateAfterEarlyTermination() throws Exception { context.parsedQuery(new ParsedQuery(bq)); QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher); assertTrue(context.queryResult().terminatedEarly()); - assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L)); + assertThat( + context.queryResult().topDocs().topDocs.totalHits.value, + allOf(greaterThanOrEqualTo(1L), lessThanOrEqualTo((long) numDocs)) + ); assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0)); } { @@ -486,9 +496,12 @@ public void testTerminateAfterEarlyTermination() throws Exception { context.queryCollectorManagers().put(TotalHitCountCollector.class, manager); QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher); assertTrue(context.queryResult().terminatedEarly()); - assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L)); + assertThat( + context.queryResult().topDocs().topDocs.totalHits.value, + allOf(greaterThanOrEqualTo(1L), lessThanOrEqualTo((long) numDocs)) + ); assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0)); - assertThat(manager.getTotalHits(), equalTo(1)); + assertThat(manager.getTotalHits(), allOf(greaterThanOrEqualTo(1), lessThanOrEqualTo(numDocs))); } // tests with trackTotalHits and terminateAfter @@ -503,7 +516,10 @@ public void testTerminateAfterEarlyTermination() throws Exception { if (trackTotalHits == -1) { assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(0L)); } else { - assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) Math.min(trackTotalHits, 10))); + assertThat( + context.queryResult().topDocs().topDocs.totalHits.value, + allOf(greaterThanOrEqualTo(Math.min(trackTotalHits, 10L)), lessThanOrEqualTo((long) numDocs)) + ); } assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0)); // The concurrent search terminates the collection when the number of hits is reached by each @@ -511,7 +527,7 @@ public void testTerminateAfterEarlyTermination() throws Exception { // slices (as the unit of concurrency). To address that, we have to use the shared global state, // much as HitsThresholdChecker does. if (executor == null) { - assertThat(manager.getTotalHits(), equalTo(10)); + assertThat(manager.getTotalHits(), allOf(greaterThanOrEqualTo(Math.min(trackTotalHits, 10)), lessThanOrEqualTo(numDocs))); } } diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index 7c50e961853b5..4b43b5f9c3ff5 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -2310,7 +2310,8 @@ public void onFailure(final Exception e) { client ), NoopMetricsRegistry.INSTANCE, - searchRequestOperationsCompositeListenerFactory + searchRequestOperationsCompositeListenerFactory, + NoopTracer.INSTANCE ) ); actions.put( diff --git a/server/src/test/java/org/opensearch/telemetry/tracing/SpanBuilderTests.java b/server/src/test/java/org/opensearch/telemetry/tracing/SpanBuilderTests.java index 75fc6761a60ef..4b763e4bd4454 100644 --- a/server/src/test/java/org/opensearch/telemetry/tracing/SpanBuilderTests.java +++ b/server/src/test/java/org/opensearch/telemetry/tracing/SpanBuilderTests.java @@ -21,6 +21,7 @@ import org.opensearch.http.HttpResponse; import org.opensearch.rest.RestRequest; import org.opensearch.telemetry.tracing.attributes.Attributes; +import org.opensearch.telemetry.tracing.noop.NoopSpan; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.transport.Transport; import org.opensearch.transport.TransportException; @@ -104,6 +105,16 @@ public void testTransportContext() { assertEquals(connection.getNode().getHostAddress(), attributes.getAttributesMap().get(AttributeNames.TRANSPORT_TARGET_HOST)); } + public void testParentSpan() { + String spanName = "test-name"; + SpanContext parentSpanContext = new SpanContext(NoopSpan.INSTANCE); + SpanCreationContext context = SpanBuilder.from(spanName, parentSpanContext); + Attributes attributes = context.getAttributes(); + assertNull(attributes); + assertEquals(spanName, context.getSpanName()); + assertEquals(parentSpanContext, context.getParent()); + } + private static Transport.Connection createTransportConnection() { return new Transport.Connection() { @Override diff --git a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java index ac0447dbebf7e..4eb49ebb42241 100644 --- a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java @@ -34,11 +34,13 @@ import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.document.BinaryDocValuesField; import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; import org.apache.lucene.document.InetAddressPoint; import org.apache.lucene.document.LatLonDocValuesField; import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.document.StoredField; +import org.apache.lucene.document.StringField; import org.apache.lucene.index.CompositeReaderContext; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; @@ -62,6 +64,7 @@ import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.CheckedConsumer; +import org.opensearch.common.TriConsumer; import org.opensearch.common.TriFunction; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; @@ -121,6 +124,7 @@ import org.opensearch.search.aggregations.AggregatorFactories.Builder; import org.opensearch.search.aggregations.MultiBucketConsumerService.MultiBucketConsumer; import org.opensearch.search.aggregations.bucket.nested.NestedAggregationBuilder; +import org.opensearch.search.aggregations.bucket.terms.TermsAggregator; import org.opensearch.search.aggregations.metrics.MetricsAggregator; import org.opensearch.search.aggregations.pipeline.PipelineAggregator; import org.opensearch.search.aggregations.pipeline.PipelineAggregator.PipelineTree; @@ -147,6 +151,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Consumer; import java.util.function.Function; import java.util.function.Supplier; @@ -178,6 +183,14 @@ public abstract class AggregatorTestCase extends OpenSearchTestCase { // A list of field types that should not be tested, or are not currently supported private static List TYPE_TEST_DENYLIST; + protected static final TriConsumer ADD_SORTED_SET_FIELD_NOT_INDEXED = (document, field, value) -> document + .add(new SortedSetDocValuesField(field, new BytesRef(value))); + + protected static final TriConsumer ADD_SORTED_SET_FIELD_INDEXED = (document, field, value) -> { + document.add(new SortedSetDocValuesField(field, new BytesRef(value))); + document.add(new StringField(field, value, Field.Store.NO)); + }; + static { List denylist = new ArrayList<>(); denylist.add(ObjectMapper.CONTENT_TYPE); // Cannot aggregate objects @@ -433,7 +446,6 @@ protected QueryShardContext queryShardContextMock( CircuitBreakerService circuitBreakerService, BigArrays bigArrays ) { - return new QueryShardContext( 0, indexSettings, @@ -1096,6 +1108,89 @@ protected void doWriteTo(StreamOutput out) throws IOException { } } + /** + * Wrapper around Aggregator class + * Maintains a count for times collect() is invoked - number of documents visited + */ + protected static class CountingAggregator extends Aggregator { + private final AtomicInteger collectCounter; + public final Aggregator delegate; + + public CountingAggregator(AtomicInteger collectCounter, TermsAggregator delegate) { + this.collectCounter = collectCounter; + this.delegate = delegate; + } + + public AtomicInteger getCollectCount() { + return collectCounter; + } + + @Override + public void close() { + delegate.close(); + } + + @Override + public String name() { + return delegate.name(); + } + + @Override + public SearchContext context() { + return delegate.context(); + } + + @Override + public Aggregator parent() { + return delegate.parent(); + } + + @Override + public Aggregator subAggregator(String name) { + return delegate.subAggregator(name); + } + + @Override + public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { + return delegate.buildAggregations(owningBucketOrds); + } + + @Override + public InternalAggregation buildEmptyAggregation() { + return delegate.buildEmptyAggregation(); + } + + @Override + public LeafBucketCollector getLeafCollector(LeafReaderContext ctx) throws IOException { + return new LeafBucketCollector() { + @Override + public void collect(int doc, long bucket) throws IOException { + delegate.getLeafCollector(ctx).collect(doc, bucket); + collectCounter.incrementAndGet(); + } + }; + } + + @Override + public ScoreMode scoreMode() { + return delegate.scoreMode(); + } + + @Override + public void preCollection() throws IOException { + delegate.preCollection(); + } + + @Override + public void postCollection() throws IOException { + delegate.postCollection(); + } + + public void setWeight(Weight weight) { + this.delegate.setWeight(weight); + } + } + public static class InternalAggCardinality extends InternalAggregation { private final CardinalityUpperBound cardinality;