Skip to content

Commit

Permalink
Correctly calculate doc_count_error at the slice level for concurrent…
Browse files Browse the repository at this point in the history
… segment search. Change slice_size heuristic to be equal to shard_size.

Signed-off-by: Jay Deng <[email protected]>
  • Loading branch information
jed326 authored and Jay Deng committed Jan 9, 2024
1 parent 0b30f30 commit 1500ef2
Show file tree
Hide file tree
Showing 7 changed files with 393 additions and 13 deletions.
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -185,6 +185,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
- Automatically add scheme to discovery.ec2.endpoint ([#11512](https://github.com/opensearch-project/OpenSearch/pull/11512))
- Restore support for Java 8 for RestClient ([#11562](https://github.com/opensearch-project/OpenSearch/pull/11562))
- Add deleted doc count in _cat/shards ([#11678](https://github.com/opensearch-project/OpenSearch/pull/11678))
- Use slice_size == shard_size heuristic in terms aggs for concurrent segment search and properly calculate the doc_count_error ([#11732](https://github.com/opensearch-project/OpenSearch/pull/11732))

### Deprecated

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@
import java.util.Map;

import static org.opensearch.index.query.QueryBuilders.matchAllQuery;
import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING;
import static org.opensearch.search.aggregations.AggregationBuilders.terms;
import static org.hamcrest.Matchers.equalTo;

Expand Down Expand Up @@ -76,6 +77,10 @@ public void testNoShardSizeString() throws Exception {
}

public void testShardSizeEqualsSizeString() throws Exception {
assumeFalse(
"For concurrent segment search there will be additional error introduced during the slice level reduce and thus different buckets may be returned. See https://github.com/opensearch-project/OpenSearch/issues/11680",
internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING)
);
createIdx("type=keyword");

indexData();
Expand Down Expand Up @@ -211,6 +216,10 @@ public void testNoShardSizeLong() throws Exception {
}

public void testShardSizeEqualsSizeLong() throws Exception {
assumeFalse(
"For concurrent segment search there will be additional error introduced during the slice level reduce and thus different buckets may be returned. See https://github.com/opensearch-project/OpenSearch/issues/11680",
internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING)
);
createIdx("type=long");

indexData();
Expand Down Expand Up @@ -345,6 +354,10 @@ public void testNoShardSizeDouble() throws Exception {
}

public void testShardSizeEqualsSizeDouble() throws Exception {
assumeFalse(
"For concurrent segment search there will be additional error introduced during the slice level reduce and thus different buckets may be returned. See https://github.com/opensearch-project/OpenSearch/issues/11680",
internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING)
);
createIdx("type=double");

indexData();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1118,6 +1118,11 @@ public void testFixedDocs() throws Exception {
.get();
assertSearchResponse(response);

// Force merge each shard down to 1 segment to verify results are the same between concurrent and non-concurrent search paths
// Else for concurrent segment search there will be additional error introduced during the slice level reduce and thus different
// buckets may be returned. See https://github.com/opensearch-project/OpenSearch/issues/11680",
client().admin().indices().prepareForceMerge("idx_fixed_docs_0", "idx_fixed_docs_1", "idx_fixed_docs_2").get();

Terms terms = response.getAggregations().get("terms");
assertThat(terms, notNullValue());
assertThat(terms.getDocCountError(), equalTo(46L));
Expand Down
Loading

0 comments on commit 1500ef2

Please sign in to comment.