From 2ab7bfd59d2ae617e8d828bcd2df74f27083e919 Mon Sep 17 00:00:00 2001 From: Ticheng Lin <51488860+ticheng-aws@users.noreply.github.com> Date: Tue, 5 Sep 2023 11:02:20 -0700 Subject: [PATCH 1/4] Add support for query profiler with concurrent aggregation (#9248) * Add support for query profiler with concurrent aggregation (#9248) Signed-off-by: Ticheng Lin * Refactor and work on the PR comments Signed-off-by: Ticheng Lin * Update collectorToLeaves mapping for children breakdowns post profile metric collection and before creating the results Signed-off-by: Sorabh Hamirwasia * Refactor logic to compute the slice level breakdown stats and query level breakdown stats for concurrent search case Signed-off-by: Sorabh Hamirwasia * Fix QueryProfilePhaseTests and QueryProfileTests, and parameterize QueryProfilerIT with concurrent search enabled Signed-off-by: Ticheng Lin * Handle the case when there are no leaf context to compute the profile stats to return default stats for all breakdown type along with min/max/avg values. Replace queryStart and queryEnd time with queryNodeTime Signed-off-by: Sorabh Hamirwasia * Add UTs for ConcurrentQueryProfileBreakdown Signed-off-by: Sorabh Hamirwasia * Add concurrent search stats test into the QueryProfilerIT Signed-off-by: Ticheng Lin * Address review comments Signed-off-by: Sorabh Hamirwasia --------- Signed-off-by: Ticheng Lin Signed-off-by: Sorabh Hamirwasia Co-authored-by: Sorabh Hamirwasia --- CHANGELOG.md | 1 + .../search/profile/query/QueryProfilerIT.java | 74 +++- .../search/internal/ContextIndexSearcher.java | 3 + .../profile/AbstractInternalProfileTree.java | 4 + .../profile/AbstractProfileBreakdown.java | 5 +- .../profile/ContextualProfileBreakdown.java | 10 + .../AggregationProfileBreakdown.java | 17 +- .../query/AbstractQueryProfileTree.java | 70 ++++ .../ConcurrentQueryProfileBreakdown.java | 283 ++++++++++++++- .../query/ConcurrentQueryProfileTree.java | 91 +++++ .../query/InternalQueryProfileTree.java | 61 +--- .../search/profile/query/ProfileWeight.java | 4 + .../search/profile/query/QueryProfiler.java | 8 +- .../ConcurrentQueryProfileBreakdownTests.java | 323 ++++++++++++++++++ .../profile/query/QueryProfilerTests.java | 172 ++++++++-- .../search/query/QueryProfilePhaseTests.java | 96 ++++++ 16 files changed, 1114 insertions(+), 108 deletions(-) create mode 100644 server/src/main/java/org/opensearch/search/profile/query/AbstractQueryProfileTree.java create mode 100644 server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileTree.java create mode 100644 server/src/test/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdownTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index e326d255922ec..54e8da353f49c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,6 +19,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add cluster state stats ([#10670](https://github.com/opensearch-project/OpenSearch/pull/10670)) - [Remote cluster state] Restore cluster state version during remote state auto restore ([#10853](https://github.com/opensearch-project/OpenSearch/pull/10853)) - Update the indexRandom function to create more segments for concurrent search tests ([10247](https://github.com/opensearch-project/OpenSearch/pull/10247)) +- Add support for query profiler with concurrent aggregation ([#9248](https://github.com/opensearch-project/OpenSearch/pull/9248)) ### Dependencies - Bumps jetty version to 9.4.52.v20230823 to fix GMS-2023-1857 ([#9822](https://github.com/opensearch-project/OpenSearch/pull/9822)) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/profile/query/QueryProfilerIT.java b/server/src/internalClusterTest/java/org/opensearch/search/profile/query/QueryProfilerIT.java index 5f794d2abf878..24f2b1bbdabfc 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/profile/query/QueryProfilerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/profile/query/QueryProfilerIT.java @@ -32,6 +32,8 @@ package org.opensearch.search.profile.query; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.tests.util.English; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.MultiSearchResponse; @@ -40,20 +42,23 @@ import org.opensearch.action.search.SearchType; import org.opensearch.action.search.ShardSearchFailure; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryBuilders; import org.opensearch.search.SearchHit; import org.opensearch.search.profile.ProfileResult; import org.opensearch.search.profile.ProfileShardResult; import org.opensearch.search.sort.SortOrder; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.util.Arrays; +import java.util.Collection; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.profile.query.RandomQueryGenerator.randomQueryBuilder; import static org.hamcrest.Matchers.emptyOrNullString; import static org.hamcrest.Matchers.equalTo; @@ -61,8 +66,32 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.nullValue; + +public class QueryProfilerIT extends ParameterizedOpenSearchIntegTestCase { + private final boolean concurrentSearchEnabled; + private static final String MAX_PREFIX = "max_"; + private static final String MIN_PREFIX = "min_"; + private static final String AVG_PREFIX = "avg_"; + private static final String TIMING_TYPE_COUNT_SUFFIX = "_count"; + + public QueryProfilerIT(Settings settings, boolean concurrentSearchEnabled) { + super(settings); + this.concurrentSearchEnabled = concurrentSearchEnabled; + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build(), false }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build(), true } + ); + } -public class QueryProfilerIT extends OpenSearchIntegTestCase { + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } /** * This test simply checks to make sure nothing crashes. Test indexes 100-150 documents, @@ -229,6 +258,7 @@ public void testSimpleMatch() throws Exception { assertEquals(result.getLuceneDescription(), "field1:one"); assertThat(result.getTime(), greaterThan(0L)); assertNotNull(result.getTimeBreakdown()); + assertQueryProfileResult(result); } CollectorResult result = searchProfiles.getCollectorResult(); @@ -271,6 +301,7 @@ public void testBool() throws Exception { assertThat(result.getTime(), greaterThan(0L)); assertNotNull(result.getTimeBreakdown()); assertEquals(result.getProfiledChildren().size(), 2); + assertQueryProfileResult(result); // Check the children List children = result.getProfiledChildren(); @@ -282,12 +313,14 @@ public void testBool() throws Exception { assertThat(childProfile.getTime(), greaterThan(0L)); assertNotNull(childProfile.getTimeBreakdown()); assertEquals(childProfile.getProfiledChildren().size(), 0); + assertQueryProfileResult(childProfile); childProfile = children.get(1); assertEquals(childProfile.getQueryName(), "TermQuery"); assertEquals(childProfile.getLuceneDescription(), "field1:two"); assertThat(childProfile.getTime(), greaterThan(0L)); assertNotNull(childProfile.getTimeBreakdown()); + assertQueryProfileResult(childProfile); } CollectorResult result = searchProfiles.getCollectorResult(); @@ -330,6 +363,7 @@ public void testEmptyBool() throws Exception { assertNotNull(result.getLuceneDescription()); assertThat(result.getTime(), greaterThan(0L)); assertNotNull(result.getTimeBreakdown()); + assertQueryProfileResult(result); } CollectorResult result = searchProfiles.getCollectorResult(); @@ -375,6 +409,7 @@ public void testCollapsingBool() throws Exception { assertNotNull(result.getLuceneDescription()); assertThat(result.getTime(), greaterThan(0L)); assertNotNull(result.getTimeBreakdown()); + assertQueryProfileResult(result); } CollectorResult result = searchProfiles.getCollectorResult(); @@ -415,6 +450,7 @@ public void testBoosting() throws Exception { assertNotNull(result.getLuceneDescription()); assertThat(result.getTime(), greaterThan(0L)); assertNotNull(result.getTimeBreakdown()); + assertQueryProfileResult(result); } CollectorResult result = searchProfiles.getCollectorResult(); @@ -455,6 +491,7 @@ public void testDisMaxRange() throws Exception { assertNotNull(result.getLuceneDescription()); assertThat(result.getTime(), greaterThan(0L)); assertNotNull(result.getTimeBreakdown()); + assertQueryProfileResult(result); } CollectorResult result = searchProfiles.getCollectorResult(); @@ -494,6 +531,7 @@ public void testRange() throws Exception { assertNotNull(result.getLuceneDescription()); assertThat(result.getTime(), greaterThan(0L)); assertNotNull(result.getTimeBreakdown()); + assertQueryProfileResult(result); } CollectorResult result = searchProfiles.getCollectorResult(); @@ -547,6 +585,7 @@ public void testPhrase() throws Exception { assertNotNull(result.getLuceneDescription()); assertThat(result.getTime(), greaterThan(0L)); assertNotNull(result.getTimeBreakdown()); + assertQueryProfileResult(result); } CollectorResult result = searchProfiles.getCollectorResult(); @@ -579,4 +618,35 @@ public void testNoProfile() throws Exception { assertThat("Profile response element should be an empty map", resp.getProfileResults().size(), equalTo(0)); } + private void assertQueryProfileResult(ProfileResult result) { + Map breakdown = result.getTimeBreakdown(); + Long maxSliceTime = result.getMaxSliceTime(); + Long minSliceTime = result.getMinSliceTime(); + Long avgSliceTime = result.getAvgSliceTime(); + if (concurrentSearchEnabled) { + assertNotNull(maxSliceTime); + assertNotNull(minSliceTime); + assertNotNull(avgSliceTime); + assertThat(breakdown.size(), equalTo(66)); + for (QueryTimingType queryTimingType : QueryTimingType.values()) { + if (queryTimingType != QueryTimingType.CREATE_WEIGHT) { + String maxTimingType = MAX_PREFIX + queryTimingType; + String minTimingType = MIN_PREFIX + queryTimingType; + String avgTimingType = AVG_PREFIX + queryTimingType; + assertNotNull(breakdown.get(maxTimingType)); + assertNotNull(breakdown.get(minTimingType)); + assertNotNull(breakdown.get(avgTimingType)); + assertNotNull(breakdown.get(maxTimingType + TIMING_TYPE_COUNT_SUFFIX)); + assertNotNull(breakdown.get(minTimingType + TIMING_TYPE_COUNT_SUFFIX)); + assertNotNull(breakdown.get(avgTimingType + TIMING_TYPE_COUNT_SUFFIX)); + } + } + } else { + assertThat(maxSliceTime, is(nullValue())); + assertThat(minSliceTime, is(nullValue())); + assertThat(avgSliceTime, is(nullValue())); + assertThat(breakdown.size(), equalTo(27)); + } + } + } diff --git a/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java b/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java index 188a45a1e2870..7e2e7de2643f6 100644 --- a/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java +++ b/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java @@ -299,6 +299,9 @@ private void searchLeaf(LeafReaderContext ctx, Weight weight, Collector collecto final LeafCollector leafCollector; try { cancellable.checkCancelled(); + if (weight instanceof ProfileWeight) { + ((ProfileWeight) weight).associateCollectorToLeaves(ctx, collector); + } weight = wrapWeight(weight); // See please https://github.com/apache/lucene/pull/964 collector.setWeight(weight); diff --git a/server/src/main/java/org/opensearch/search/profile/AbstractInternalProfileTree.java b/server/src/main/java/org/opensearch/search/profile/AbstractInternalProfileTree.java index 41b08681746f0..904b04b249b1b 100644 --- a/server/src/main/java/org/opensearch/search/profile/AbstractInternalProfileTree.java +++ b/server/src/main/java/org/opensearch/search/profile/AbstractInternalProfileTree.java @@ -180,6 +180,10 @@ private ProfileResult doGetTree(int token) { // calculating the same times over and over...but worth the effort? String type = getTypeFromElement(element); String description = getDescriptionFromElement(element); + return createProfileResult(type, description, breakdown, childrenProfileResults); + } + + protected ProfileResult createProfileResult(String type, String description, PB breakdown, List childrenProfileResults) { return new ProfileResult( type, description, diff --git a/server/src/main/java/org/opensearch/search/profile/AbstractProfileBreakdown.java b/server/src/main/java/org/opensearch/search/profile/AbstractProfileBreakdown.java index 67ab062c0e3ca..4a1563e7cdce9 100644 --- a/server/src/main/java/org/opensearch/search/profile/AbstractProfileBreakdown.java +++ b/server/src/main/java/org/opensearch/search/profile/AbstractProfileBreakdown.java @@ -80,6 +80,7 @@ public Map toBreakdownMap() { for (T timingType : this.timingTypes) { map.put(timingType.toString(), this.timings[timingType.ordinal()].getApproximateTiming()); map.put(timingType + TIMING_TYPE_COUNT_SUFFIX, this.timings[timingType.ordinal()].getCount()); + map.put(timingType + TIMING_TYPE_START_TIME_SUFFIX, this.timings[timingType.ordinal()].getEarliestTimerStartTime()); } return Collections.unmodifiableMap(map); } @@ -87,11 +88,11 @@ public Map toBreakdownMap() { /** * Fetch extra debugging information. */ - protected Map toDebugMap() { + public Map toDebugMap() { return emptyMap(); } - public final long toNodeTime() { + public long toNodeTime() { long total = 0; for (T timingType : timingTypes) { total += timings[timingType.ordinal()].getApproximateTiming(); diff --git a/server/src/main/java/org/opensearch/search/profile/ContextualProfileBreakdown.java b/server/src/main/java/org/opensearch/search/profile/ContextualProfileBreakdown.java index 4f071f730cd45..3fe621321c8ad 100644 --- a/server/src/main/java/org/opensearch/search/profile/ContextualProfileBreakdown.java +++ b/server/src/main/java/org/opensearch/search/profile/ContextualProfileBreakdown.java @@ -8,6 +8,12 @@ package org.opensearch.search.profile; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.Collector; + +import java.util.List; +import java.util.Map; + /** * Provide contextual profile breakdowns which are associated with freestyle context. Used when concurrent * search over segments is activated and each collector needs own non-shareable profile breakdown instance. @@ -25,4 +31,8 @@ public ContextualProfileBreakdown(Class clazz) { * @return contextual profile breakdown instance */ public abstract AbstractProfileBreakdown context(Object context); + + public void associateCollectorToLeaves(Collector collector, LeafReaderContext leaf) {} + + public void associateCollectorsToLeaves(Map> collectorToLeaves) {} } diff --git a/server/src/main/java/org/opensearch/search/profile/aggregation/AggregationProfileBreakdown.java b/server/src/main/java/org/opensearch/search/profile/aggregation/AggregationProfileBreakdown.java index d0c67915e6d8d..9fa628f107f7b 100644 --- a/server/src/main/java/org/opensearch/search/profile/aggregation/AggregationProfileBreakdown.java +++ b/server/src/main/java/org/opensearch/search/profile/aggregation/AggregationProfileBreakdown.java @@ -34,7 +34,6 @@ import org.opensearch.search.profile.AbstractProfileBreakdown; -import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -60,21 +59,7 @@ public void addDebugInfo(String key, Object value) { } @Override - protected Map toDebugMap() { + public Map toDebugMap() { return unmodifiableMap(extra); } - - /** - * Build a timing count startTime breakdown for aggregation timing types - */ - @Override - public Map toBreakdownMap() { - Map map = new HashMap<>(timings.length * 3); - for (AggregationTimingType timingType : timingTypes) { - map.put(timingType.toString(), timings[timingType.ordinal()].getApproximateTiming()); - map.put(timingType + TIMING_TYPE_COUNT_SUFFIX, timings[timingType.ordinal()].getCount()); - map.put(timingType + TIMING_TYPE_START_TIME_SUFFIX, timings[timingType.ordinal()].getEarliestTimerStartTime()); - } - return Collections.unmodifiableMap(map); - } } diff --git a/server/src/main/java/org/opensearch/search/profile/query/AbstractQueryProfileTree.java b/server/src/main/java/org/opensearch/search/profile/query/AbstractQueryProfileTree.java new file mode 100644 index 0000000000000..8e825def13f5d --- /dev/null +++ b/server/src/main/java/org/opensearch/search/profile/query/AbstractQueryProfileTree.java @@ -0,0 +1,70 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.profile.query; + +import org.apache.lucene.search.Query; +import org.opensearch.search.profile.AbstractInternalProfileTree; +import org.opensearch.search.profile.ContextualProfileBreakdown; +import org.opensearch.search.profile.ProfileResult; + +/** + * This class tracks the dependency tree for queries (scoring and rewriting) and + * generates {@link QueryProfileBreakdown} for each node in the tree. It also finalizes the tree + * and returns a list of {@link ProfileResult} that can be serialized back to the client + * + * @opensearch.internal + */ +public abstract class AbstractQueryProfileTree extends AbstractInternalProfileTree, Query> { + + /** Rewrite time */ + private long rewriteTime; + private long rewriteScratch; + + @Override + protected String getTypeFromElement(Query query) { + // Anonymous classes won't have a name, + // we need to get the super class + if (query.getClass().getSimpleName().isEmpty()) { + return query.getClass().getSuperclass().getSimpleName(); + } + return query.getClass().getSimpleName(); + } + + @Override + protected String getDescriptionFromElement(Query query) { + return query.toString(); + } + + /** + * Begin timing a query for a specific Timing context + */ + public void startRewriteTime() { + assert rewriteScratch == 0; + rewriteScratch = System.nanoTime(); + } + + /** + * Halt the timing process and add the elapsed rewriting time. + * startRewriteTime() must be called for a particular context prior to calling + * stopAndAddRewriteTime(), otherwise the elapsed time will be negative and + * nonsensical + * + * @return The elapsed time + */ + public long stopAndAddRewriteTime() { + long time = Math.max(1, System.nanoTime() - rewriteScratch); + rewriteTime += time; + rewriteScratch = 0; + return time; + } + + public long getRewriteTime() { + return rewriteTime; + } +} diff --git a/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java b/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java index 6f0c78e8b307d..f37d53737d52b 100644 --- a/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java +++ b/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java @@ -8,10 +8,15 @@ package org.opensearch.search.profile.query; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.Collector; import org.opensearch.search.profile.AbstractProfileBreakdown; import org.opensearch.search.profile.ContextualProfileBreakdown; +import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; @@ -23,8 +28,22 @@ * @opensearch.internal */ public final class ConcurrentQueryProfileBreakdown extends ContextualProfileBreakdown { + static final String SLICE_END_TIME_SUFFIX = "_slice_end_time"; + static final String SLICE_START_TIME_SUFFIX = "_slice_start_time"; + static final String MAX_PREFIX = "max_"; + static final String MIN_PREFIX = "min_"; + static final String AVG_PREFIX = "avg_"; + private long queryNodeTime = Long.MIN_VALUE; + private long maxSliceNodeTime = Long.MIN_VALUE; + private long minSliceNodeTime = Long.MAX_VALUE; + private long avgSliceNodeTime = 0L; + + // keep track of all breakdown timings per segment. package-private for testing private final Map> contexts = new ConcurrentHashMap<>(); + // represents slice to leaves mapping as for each slice a unique collector instance is created + private final Map> sliceCollectorsToLeaves = new ConcurrentHashMap<>(); + /** Sole constructor. */ public ConcurrentQueryProfileBreakdown() { super(QueryTimingType.class); @@ -44,14 +63,268 @@ public AbstractProfileBreakdown context(Object context) { @Override public Map toBreakdownMap() { - final Map map = new HashMap<>(super.toBreakdownMap()); + final Map topLevelBreakdownMapWithWeightTime = super.toBreakdownMap(); + final long createWeightStartTime = topLevelBreakdownMapWithWeightTime.get( + QueryTimingType.CREATE_WEIGHT + TIMING_TYPE_START_TIME_SUFFIX + ); + final long createWeightTime = topLevelBreakdownMapWithWeightTime.get(QueryTimingType.CREATE_WEIGHT.toString()); + + if (sliceCollectorsToLeaves.isEmpty() || contexts.isEmpty()) { + // If there are no leaf contexts, then return the default concurrent query level breakdown, which will include the + // create_weight time/count + queryNodeTime = createWeightTime; + maxSliceNodeTime = queryNodeTime; + minSliceNodeTime = queryNodeTime; + avgSliceNodeTime = queryNodeTime; + return buildDefaultQueryBreakdownMap(createWeightTime); + } + + // first create the slice level breakdowns + final Map> sliceLevelBreakdowns = buildSliceLevelBreakdown(createWeightStartTime); + return buildQueryBreakdownMap(sliceLevelBreakdowns, createWeightTime, createWeightStartTime); + } + + /** + * @param createWeightTime time for creating weight + * @return default breakdown map for concurrent query which includes the create weight time and all other timing type stats in the + * breakdown has default value of 0. For concurrent search case, the max/min/avg stats for each timing type will also be 0 in this + * default breakdown map. + */ + private Map buildDefaultQueryBreakdownMap(long createWeightTime) { + final Map concurrentQueryBreakdownMap = new HashMap<>(); + for (QueryTimingType timingType : QueryTimingType.values()) { + final String timingTypeKey = timingType.toString(); + final String timingTypeCountKey = timingTypeKey + TIMING_TYPE_COUNT_SUFFIX; + + if (timingType.equals(QueryTimingType.CREATE_WEIGHT)) { + concurrentQueryBreakdownMap.put(timingTypeKey, createWeightTime); + concurrentQueryBreakdownMap.put(timingTypeCountKey, 1L); + continue; + } + final String maxBreakdownTypeTime = MAX_PREFIX + timingTypeKey; + final String minBreakdownTypeTime = MIN_PREFIX + timingTypeKey; + final String avgBreakdownTypeTime = AVG_PREFIX + timingTypeKey; + final String maxBreakdownTypeCount = MAX_PREFIX + timingTypeCountKey; + final String minBreakdownTypeCount = MIN_PREFIX + timingTypeCountKey; + final String avgBreakdownTypeCount = AVG_PREFIX + timingTypeCountKey; + // add time related stats + concurrentQueryBreakdownMap.put(timingTypeKey, 0L); + concurrentQueryBreakdownMap.put(maxBreakdownTypeTime, 0L); + concurrentQueryBreakdownMap.put(minBreakdownTypeTime, 0L); + concurrentQueryBreakdownMap.put(avgBreakdownTypeTime, 0L); + // add count related stats + concurrentQueryBreakdownMap.put(timingTypeCountKey, 0L); + concurrentQueryBreakdownMap.put(maxBreakdownTypeCount, 0L); + concurrentQueryBreakdownMap.put(minBreakdownTypeCount, 0L); + concurrentQueryBreakdownMap.put(avgBreakdownTypeCount, 0L); + } + return concurrentQueryBreakdownMap; + } + + /** + * Computes the slice level breakdownMap. It uses sliceCollectorsToLeaves to figure out all the leaves or segments part of a slice. + * Then use the breakdown timing stats for each of these leaves to calculate the breakdown stats at slice level. + * @param createWeightStartTime start time when createWeight is called + * @return map of collector (or slice) to breakdown map + */ + Map> buildSliceLevelBreakdown(long createWeightStartTime) { + final Map> sliceLevelBreakdowns = new HashMap<>(); + long totalSliceNodeTime = 0; + for (Map.Entry> slice : sliceCollectorsToLeaves.entrySet()) { + final Collector sliceCollector = slice.getKey(); + // initialize each slice level breakdown + final Map currentSliceBreakdown = sliceLevelBreakdowns.computeIfAbsent(sliceCollector, k -> new HashMap<>()); + // max slice end time across all timing types + long sliceMaxEndTime = Long.MIN_VALUE; + for (QueryTimingType timingType : QueryTimingType.values()) { + if (timingType.equals(QueryTimingType.CREATE_WEIGHT)) { + // do nothing for create weight as that is query level time and not slice level + continue; + } + + // for each timing type compute maxSliceEndTime and minSliceStartTime. Also add the counts of timing type to + // compute total count at slice level + final String timingTypeCountKey = timingType + TIMING_TYPE_COUNT_SUFFIX; + final String timingTypeStartKey = timingType + TIMING_TYPE_START_TIME_SUFFIX; + final String timingTypeSliceStartTimeKey = timingType + SLICE_START_TIME_SUFFIX; + final String timingTypeSliceEndTimeKey = timingType + SLICE_END_TIME_SUFFIX; + + for (LeafReaderContext sliceLeaf : slice.getValue()) { + if (!contexts.containsKey(sliceLeaf)) { + // In case like early termination, the sliceCollectorToLeave association will be added for a + // leaf, but the leaf level breakdown will not be created in the contexts map. + // This is because before updating the contexts map, the query hits earlyTerminationException. + // To handle such case, we will ignore the leaf that is not present. + continue; + } + final Map currentSliceLeafBreakdownMap = contexts.get(sliceLeaf).toBreakdownMap(); + // get the count for current leaf timing type + currentSliceBreakdown.compute( + timingTypeCountKey, + (key, value) -> (value == null) + ? currentSliceLeafBreakdownMap.get(timingTypeCountKey) + : value + currentSliceLeafBreakdownMap.get(timingTypeCountKey) + ); + + // compute the sliceEndTime for timingType using max of endTime across slice leaves + final long sliceLeafTimingTypeEndTime = currentSliceLeafBreakdownMap.get(timingTypeStartKey) + + currentSliceLeafBreakdownMap.get(timingType.toString()); + currentSliceBreakdown.compute( + timingTypeSliceEndTimeKey, + (key, value) -> (value == null) ? sliceLeafTimingTypeEndTime : Math.max(value, sliceLeafTimingTypeEndTime) + ); + + // compute the sliceStartTime for timingType using min of startTime across slice leaves + final long sliceLeafTimingTypeStartTime = currentSliceLeafBreakdownMap.get(timingTypeStartKey); + currentSliceBreakdown.compute( + timingTypeSliceStartTimeKey, + (key, value) -> (value == null) ? sliceLeafTimingTypeStartTime : Math.min(value, sliceLeafTimingTypeStartTime) + ); + } + // compute sliceMaxEndTime as max of sliceEndTime across all timing types + sliceMaxEndTime = Math.max(sliceMaxEndTime, currentSliceBreakdown.get(timingTypeSliceEndTimeKey)); + // compute total time for each timing type at slice level using sliceEndTime and sliceStartTime + currentSliceBreakdown.put( + timingType.toString(), + currentSliceBreakdown.get(timingTypeSliceEndTimeKey) - currentSliceBreakdown.get(timingTypeSliceStartTimeKey) + ); + } + // currentSliceNodeTime includes the create weight time as well which will be same for all the slices + long currentSliceNodeTime = sliceMaxEndTime - createWeightStartTime; + // compute max/min slice times + maxSliceNodeTime = Math.max(maxSliceNodeTime, currentSliceNodeTime); + minSliceNodeTime = Math.min(minSliceNodeTime, currentSliceNodeTime); + // total time at query level + totalSliceNodeTime += currentSliceNodeTime; + } + avgSliceNodeTime = totalSliceNodeTime / sliceCollectorsToLeaves.size(); + return sliceLevelBreakdowns; + } + + /** + * Computes the query level breakdownMap using the breakdown maps of all the slices. In query level breakdown map, it has the + * time/count stats for each breakdown type. Total time per breakdown type at query level is computed by subtracting the max of slice + * end time with min of slice start time for that type. Count for each breakdown type at query level is sum of count of that type + * across slices. Other than these, there are max/min/avg stats across slices for each breakdown type + * + * @param sliceLevelBreakdowns breakdown map for all the slices + * @param createWeightTime time for create weight + * @param createWeightStartTime start time for create weight + * @return breakdown map for entire query + */ + public Map buildQueryBreakdownMap( + Map> sliceLevelBreakdowns, + long createWeightTime, + long createWeightStartTime + ) { + final Map queryBreakdownMap = new HashMap<>(); + long queryEndTime = Long.MIN_VALUE; + for (QueryTimingType queryTimingType : QueryTimingType.values()) { + final String timingTypeKey = queryTimingType.toString(); + final String timingTypeCountKey = timingTypeKey + TIMING_TYPE_COUNT_SUFFIX; + final String sliceEndTimeForTimingType = timingTypeKey + SLICE_END_TIME_SUFFIX; + final String sliceStartTimeForTimingType = timingTypeKey + SLICE_START_TIME_SUFFIX; + + final String maxBreakdownTypeTime = MAX_PREFIX + timingTypeKey; + final String minBreakdownTypeTime = MIN_PREFIX + timingTypeKey; + final String avgBreakdownTypeTime = AVG_PREFIX + timingTypeKey; + final String maxBreakdownTypeCount = MAX_PREFIX + timingTypeCountKey; + final String minBreakdownTypeCount = MIN_PREFIX + timingTypeCountKey; + final String avgBreakdownTypeCount = AVG_PREFIX + timingTypeCountKey; + + long queryTimingTypeEndTime = Long.MIN_VALUE; + long queryTimingTypeStartTime = Long.MAX_VALUE; + long queryTimingTypeCount = 0L; - for (final AbstractProfileBreakdown context : contexts.values()) { - for (final Map.Entry entry : context.toBreakdownMap().entrySet()) { - map.merge(entry.getKey(), entry.getValue(), Long::sum); + // the create weight time is computed at the query level and is called only once per query + if (queryTimingType == QueryTimingType.CREATE_WEIGHT) { + queryBreakdownMap.put(timingTypeCountKey, 1L); + queryBreakdownMap.put(timingTypeKey, createWeightTime); + continue; } + + // for all other timing types, we will compute min/max/avg/total across slices + for (Map.Entry> sliceBreakdown : sliceLevelBreakdowns.entrySet()) { + Long sliceBreakdownTypeTime = sliceBreakdown.getValue().get(timingTypeKey); + Long sliceBreakdownTypeCount = sliceBreakdown.getValue().get(timingTypeCountKey); + // compute max/min/avg TimingType time across slices + queryBreakdownMap.compute( + maxBreakdownTypeTime, + (key, value) -> (value == null) ? sliceBreakdownTypeTime : Math.max(sliceBreakdownTypeTime, value) + ); + queryBreakdownMap.compute( + minBreakdownTypeTime, + (key, value) -> (value == null) ? sliceBreakdownTypeTime : Math.min(sliceBreakdownTypeTime, value) + ); + queryBreakdownMap.compute( + avgBreakdownTypeTime, + (key, value) -> (value == null) ? sliceBreakdownTypeTime : sliceBreakdownTypeTime + value + ); + + // compute max/min/avg TimingType count across slices + queryBreakdownMap.compute( + maxBreakdownTypeCount, + (key, value) -> (value == null) ? sliceBreakdownTypeCount : Math.max(sliceBreakdownTypeCount, value) + ); + queryBreakdownMap.compute( + minBreakdownTypeCount, + (key, value) -> (value == null) ? sliceBreakdownTypeCount : Math.min(sliceBreakdownTypeCount, value) + ); + queryBreakdownMap.compute( + avgBreakdownTypeCount, + (key, value) -> (value == null) ? sliceBreakdownTypeCount : sliceBreakdownTypeCount + value + ); + + // query start/end time for a TimingType is min/max of start/end time across slices for that TimingType + queryTimingTypeEndTime = Math.max(queryTimingTypeEndTime, sliceBreakdown.getValue().get(sliceEndTimeForTimingType)); + queryTimingTypeStartTime = Math.min(queryTimingTypeStartTime, sliceBreakdown.getValue().get(sliceStartTimeForTimingType)); + queryTimingTypeCount += sliceBreakdownTypeCount; + } + queryBreakdownMap.put(timingTypeKey, queryTimingTypeEndTime - queryTimingTypeStartTime); + queryBreakdownMap.put(timingTypeCountKey, queryTimingTypeCount); + queryBreakdownMap.compute(avgBreakdownTypeTime, (key, value) -> (value == null) ? 0 : value / sliceLevelBreakdowns.size()); + queryBreakdownMap.compute(avgBreakdownTypeCount, (key, value) -> (value == null) ? 0 : value / sliceLevelBreakdowns.size()); + // compute query end time using max of query end time across all timing types + queryEndTime = Math.max(queryEndTime, queryTimingTypeEndTime); } + queryNodeTime = queryEndTime - createWeightStartTime; + return queryBreakdownMap; + } + + @Override + public long toNodeTime() { + return queryNodeTime; + } + + @Override + public void associateCollectorToLeaves(Collector collector, LeafReaderContext leaf) { + // Each slice (or collector) is executed by single thread. So the list for a key will always be updated by a single thread only + sliceCollectorsToLeaves.computeIfAbsent(collector, k -> new ArrayList<>()).add(leaf); + } + + @Override + public void associateCollectorsToLeaves(Map> collectorsToLeaves) { + sliceCollectorsToLeaves.putAll(collectorsToLeaves); + } + + Map> getSliceCollectorsToLeaves() { + return Collections.unmodifiableMap(sliceCollectorsToLeaves); + } + + // used by tests + Map> getContexts() { + return contexts; + } + + long getMaxSliceNodeTime() { + return maxSliceNodeTime; + } + + long getMinSliceNodeTime() { + return minSliceNodeTime; + } - return map; + long getAvgSliceNodeTime() { + return avgSliceNodeTime; } } diff --git a/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileTree.java b/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileTree.java new file mode 100644 index 0000000000000..4e54178c3b4fb --- /dev/null +++ b/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileTree.java @@ -0,0 +1,91 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.profile.query; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.Collector; +import org.opensearch.search.profile.ContextualProfileBreakdown; +import org.opensearch.search.profile.ProfileResult; + +import java.util.List; +import java.util.Map; + +/** + * This class returns a list of {@link ProfileResult} that can be serialized back to the client in the concurrent execution. + * + * @opensearch.internal + */ +public class ConcurrentQueryProfileTree extends AbstractQueryProfileTree { + + @Override + protected ContextualProfileBreakdown createProfileBreakdown() { + return new ConcurrentQueryProfileBreakdown(); + } + + @Override + protected ProfileResult createProfileResult( + String type, + String description, + ContextualProfileBreakdown breakdown, + List childrenProfileResults + ) { + assert breakdown instanceof ConcurrentQueryProfileBreakdown; + final ConcurrentQueryProfileBreakdown concurrentBreakdown = (ConcurrentQueryProfileBreakdown) breakdown; + return new ProfileResult( + type, + description, + concurrentBreakdown.toBreakdownMap(), + concurrentBreakdown.toDebugMap(), + concurrentBreakdown.toNodeTime(), + childrenProfileResults, + concurrentBreakdown.getMaxSliceNodeTime(), + concurrentBreakdown.getMinSliceNodeTime(), + concurrentBreakdown.getAvgSliceNodeTime() + ); + } + + /** + * For concurrent query case, when there are nested queries (with children), then the {@link ConcurrentQueryProfileBreakdown} created + * for the child queries weight doesn't have the association of collector to leaves. This is because child query weights are not + * exposed by the {@link org.apache.lucene.search.Weight} interface. So after all the collection is happened and before the result + * tree is created we need to pass the association from parent to the child breakdowns. This will be then used to create the + * breakdown map at slice level for the child queries as well + * + * @return a hierarchical representation of the profiled query tree + */ + @Override + public List getTree() { + for (Integer root : roots) { + final ContextualProfileBreakdown parentBreakdown = breakdowns.get(root); + assert parentBreakdown instanceof ConcurrentQueryProfileBreakdown; + final Map> parentCollectorToLeaves = ((ConcurrentQueryProfileBreakdown) parentBreakdown) + .getSliceCollectorsToLeaves(); + // update all the children with the parent collectorToLeaves association + updateCollectorToLeavesForChildBreakdowns(root, parentCollectorToLeaves); + } + // once the collector to leaves mapping is updated, get the result + return super.getTree(); + } + + /** + * Updates the children with collector to leaves mapping as recorded by parent breakdown + * @param parentToken parent token number in the tree + * @param collectorToLeaves collector to leaves mapping recorded by parent + */ + private void updateCollectorToLeavesForChildBreakdowns(Integer parentToken, Map> collectorToLeaves) { + final List children = tree.get(parentToken); + if (children != null) { + for (Integer currentChild : children) { + final ContextualProfileBreakdown currentChildBreakdown = breakdowns.get(currentChild); + currentChildBreakdown.associateCollectorsToLeaves(collectorToLeaves); + updateCollectorToLeavesForChildBreakdowns(currentChild, collectorToLeaves); + } + } + } +} diff --git a/server/src/main/java/org/opensearch/search/profile/query/InternalQueryProfileTree.java b/server/src/main/java/org/opensearch/search/profile/query/InternalQueryProfileTree.java index 40c6a29384491..1ed367f094fb7 100644 --- a/server/src/main/java/org/opensearch/search/profile/query/InternalQueryProfileTree.java +++ b/server/src/main/java/org/opensearch/search/profile/query/InternalQueryProfileTree.java @@ -32,73 +32,18 @@ package org.opensearch.search.profile.query; -import org.apache.lucene.search.Query; -import org.opensearch.search.profile.AbstractInternalProfileTree; import org.opensearch.search.profile.ContextualProfileBreakdown; import org.opensearch.search.profile.ProfileResult; /** - * This class tracks the dependency tree for queries (scoring and rewriting) and - * generates {@link QueryProfileBreakdown} for each node in the tree. It also finalizes the tree - * and returns a list of {@link ProfileResult} that can be serialized back to the client + * This class returns a list of {@link ProfileResult} that can be serialized back to the client in the non-concurrent execution. * * @opensearch.internal */ -final class InternalQueryProfileTree extends AbstractInternalProfileTree, Query> { - - /** Rewrite time */ - private long rewriteTime; - private long rewriteScratch; - private final boolean concurrent; - - InternalQueryProfileTree(boolean concurrent) { - this.concurrent = concurrent; - } +public class InternalQueryProfileTree extends AbstractQueryProfileTree { @Override protected ContextualProfileBreakdown createProfileBreakdown() { - return (concurrent) ? new ConcurrentQueryProfileBreakdown() : new QueryProfileBreakdown(); - } - - @Override - protected String getTypeFromElement(Query query) { - // Anonymous classes won't have a name, - // we need to get the super class - if (query.getClass().getSimpleName().isEmpty()) { - return query.getClass().getSuperclass().getSimpleName(); - } - return query.getClass().getSimpleName(); - } - - @Override - protected String getDescriptionFromElement(Query query) { - return query.toString(); - } - - /** - * Begin timing a query for a specific Timing context - */ - public void startRewriteTime() { - assert rewriteScratch == 0; - rewriteScratch = System.nanoTime(); - } - - /** - * Halt the timing process and add the elapsed rewriting time. - * startRewriteTime() must be called for a particular context prior to calling - * stopAndAddRewriteTime(), otherwise the elapsed time will be negative and - * nonsensical - * - * @return The elapsed time - */ - public long stopAndAddRewriteTime() { - long time = Math.max(1, System.nanoTime() - rewriteScratch); - rewriteTime += time; - rewriteScratch = 0; - return time; - } - - public long getRewriteTime() { - return rewriteTime; + return new QueryProfileBreakdown(); } } diff --git a/server/src/main/java/org/opensearch/search/profile/query/ProfileWeight.java b/server/src/main/java/org/opensearch/search/profile/query/ProfileWeight.java index 12f9a7184d84a..c7e70d8d88007 100644 --- a/server/src/main/java/org/opensearch/search/profile/query/ProfileWeight.java +++ b/server/src/main/java/org/opensearch/search/profile/query/ProfileWeight.java @@ -34,6 +34,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.BulkScorer; +import org.apache.lucene.search.Collector; import org.apache.lucene.search.Explanation; import org.apache.lucene.search.Query; import org.apache.lucene.search.Scorer; @@ -137,4 +138,7 @@ public boolean isCacheable(LeafReaderContext ctx) { return false; } + public void associateCollectorToLeaves(LeafReaderContext leaf, Collector collector) { + profile.associateCollectorToLeaves(collector, leaf); + } } diff --git a/server/src/main/java/org/opensearch/search/profile/query/QueryProfiler.java b/server/src/main/java/org/opensearch/search/profile/query/QueryProfiler.java index 64194b4aff207..a80ce1c658081 100644 --- a/server/src/main/java/org/opensearch/search/profile/query/QueryProfiler.java +++ b/server/src/main/java/org/opensearch/search/profile/query/QueryProfiler.java @@ -59,7 +59,7 @@ public final class QueryProfiler extends AbstractProfiler queryBreakDownMap = testQueryProfileBreakdown.toBreakdownMap(); + assertFalse(queryBreakDownMap == null || queryBreakDownMap.isEmpty()); + assertEquals(66, queryBreakDownMap.size()); + for (QueryTimingType queryTimingType : QueryTimingType.values()) { + String timingTypeKey = queryTimingType.toString(); + String timingTypeCountKey = queryTimingType + TIMING_TYPE_COUNT_SUFFIX; + + if (queryTimingType.equals(QueryTimingType.CREATE_WEIGHT)) { + final long createWeightTime = queryBreakDownMap.get(timingTypeKey); + assertTrue(createWeightTime > 0); + assertEquals(1, (long) queryBreakDownMap.get(timingTypeCountKey)); + // verify there is no min/max/avg for weight type stats + assertFalse( + queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(MIN_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeCountKey) + || queryBreakDownMap.containsKey(MIN_PREFIX + timingTypeCountKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeCountKey) + ); + // verify total/min/max/avg node time is same as weight time + assertEquals(createWeightTime, testQueryProfileBreakdown.toNodeTime()); + assertEquals(createWeightTime, testQueryProfileBreakdown.getMaxSliceNodeTime()); + assertEquals(createWeightTime, testQueryProfileBreakdown.getMinSliceNodeTime()); + assertEquals(createWeightTime, testQueryProfileBreakdown.getAvgSliceNodeTime()); + continue; + } + assertEquals(0, (long) queryBreakDownMap.get(timingTypeKey)); + assertEquals(0, (long) queryBreakDownMap.get(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeKey)); + assertEquals(0, (long) queryBreakDownMap.get(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeKey)); + assertEquals(0, (long) queryBreakDownMap.get(MIN_PREFIX + timingTypeKey)); + assertEquals(0, (long) queryBreakDownMap.get(timingTypeCountKey)); + assertEquals(0, (long) queryBreakDownMap.get(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeCountKey)); + assertEquals(0, (long) queryBreakDownMap.get(MIN_PREFIX + timingTypeCountKey)); + assertEquals(0, (long) queryBreakDownMap.get(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeCountKey)); + } + } + + public void testBuildSliceLevelBreakdownWithSingleSlice() throws Exception { + final DirectoryReader directoryReader = getDirectoryReader(1); + final Directory directory = directoryReader.directory(); + final LeafReaderContext sliceLeaf = directoryReader.leaves().get(0); + final Collector sliceCollector = mock(Collector.class); + final long createWeightEarliestStartTime = createWeightTimer.getEarliestTimerStartTime(); + final Map leafProfileBreakdownMap = getLeafBreakdownMap(createWeightEarliestStartTime + 10, 10, 1); + final AbstractProfileBreakdown leafProfileBreakdown = new TestQueryProfileBreakdown( + QueryTimingType.class, + leafProfileBreakdownMap + ); + testQueryProfileBreakdown.associateCollectorToLeaves(sliceCollector, sliceLeaf); + testQueryProfileBreakdown.getContexts().put(sliceLeaf, leafProfileBreakdown); + final Map> sliceBreakdownMap = testQueryProfileBreakdown.buildSliceLevelBreakdown( + createWeightEarliestStartTime + ); + assertFalse(sliceBreakdownMap == null || sliceBreakdownMap.isEmpty()); + assertEquals(1, sliceBreakdownMap.size()); + assertTrue(sliceBreakdownMap.containsKey(sliceCollector)); + + final Map sliceBreakdown = sliceBreakdownMap.entrySet().iterator().next().getValue(); + for (QueryTimingType timingType : QueryTimingType.values()) { + String timingTypeKey = timingType.toString(); + String timingTypeCountKey = timingTypeKey + TIMING_TYPE_COUNT_SUFFIX; + + if (timingType.equals(QueryTimingType.CREATE_WEIGHT)) { + // there should be no entry for create weight at slice level breakdown map + assertNull(sliceBreakdown.get(timingTypeKey)); + assertNull(sliceBreakdown.get(timingTypeCountKey)); + continue; + } + + // for other timing type we will have all the value and will be same as leaf breakdown as there is single slice and single leaf + assertEquals(leafProfileBreakdownMap.get(timingTypeKey), sliceBreakdown.get(timingTypeKey)); + assertEquals(leafProfileBreakdownMap.get(timingTypeCountKey), sliceBreakdown.get(timingTypeCountKey)); + assertEquals( + leafProfileBreakdownMap.get(timingTypeKey + TIMING_TYPE_START_TIME_SUFFIX), + sliceBreakdown.get(timingTypeKey + SLICE_START_TIME_SUFFIX) + ); + assertEquals( + leafProfileBreakdownMap.get(timingTypeKey + TIMING_TYPE_START_TIME_SUFFIX) + leafProfileBreakdownMap.get(timingTypeKey), + (long) sliceBreakdown.get(timingTypeKey + SLICE_END_TIME_SUFFIX) + ); + } + assertEquals(20, testQueryProfileBreakdown.getMaxSliceNodeTime()); + assertEquals(20, testQueryProfileBreakdown.getMinSliceNodeTime()); + assertEquals(20, testQueryProfileBreakdown.getAvgSliceNodeTime()); + directoryReader.close(); + directory.close(); + } + + public void testBuildSliceLevelBreakdownWithMultipleSlices() throws Exception { + final DirectoryReader directoryReader = getDirectoryReader(2); + final Directory directory = directoryReader.directory(); + final Collector sliceCollector_1 = mock(Collector.class); + final Collector sliceCollector_2 = mock(Collector.class); + final long createWeightEarliestStartTime = createWeightTimer.getEarliestTimerStartTime(); + final Map leafProfileBreakdownMap_1 = getLeafBreakdownMap(createWeightEarliestStartTime + 10, 10, 1); + final Map leafProfileBreakdownMap_2 = getLeafBreakdownMap(createWeightEarliestStartTime + 40, 10, 1); + final AbstractProfileBreakdown leafProfileBreakdown_1 = new TestQueryProfileBreakdown( + QueryTimingType.class, + leafProfileBreakdownMap_1 + ); + final AbstractProfileBreakdown leafProfileBreakdown_2 = new TestQueryProfileBreakdown( + QueryTimingType.class, + leafProfileBreakdownMap_2 + ); + testQueryProfileBreakdown.associateCollectorToLeaves(sliceCollector_1, directoryReader.leaves().get(0)); + testQueryProfileBreakdown.associateCollectorToLeaves(sliceCollector_2, directoryReader.leaves().get(1)); + testQueryProfileBreakdown.getContexts().put(directoryReader.leaves().get(0), leafProfileBreakdown_1); + testQueryProfileBreakdown.getContexts().put(directoryReader.leaves().get(1), leafProfileBreakdown_2); + final Map> sliceBreakdownMap = testQueryProfileBreakdown.buildSliceLevelBreakdown( + createWeightEarliestStartTime + ); + assertFalse(sliceBreakdownMap == null || sliceBreakdownMap.isEmpty()); + assertEquals(2, sliceBreakdownMap.size()); + + for (Map.Entry> sliceBreakdowns : sliceBreakdownMap.entrySet()) { + Map sliceBreakdown = sliceBreakdowns.getValue(); + Map leafProfileBreakdownMap; + if (sliceBreakdowns.getKey().equals(sliceCollector_1)) { + leafProfileBreakdownMap = leafProfileBreakdownMap_1; + } else { + leafProfileBreakdownMap = leafProfileBreakdownMap_2; + } + for (QueryTimingType timingType : QueryTimingType.values()) { + String timingTypeKey = timingType.toString(); + String timingTypeCountKey = timingTypeKey + TIMING_TYPE_COUNT_SUFFIX; + + if (timingType.equals(QueryTimingType.CREATE_WEIGHT)) { + // there should be no entry for create weight at slice level breakdown map + assertNull(sliceBreakdown.get(timingTypeKey)); + assertNull(sliceBreakdown.get(timingTypeCountKey)); + continue; + } + + // for other timing type we will have all the value and will be same as leaf breakdown as there is single slice and single + // leaf + assertEquals(leafProfileBreakdownMap.get(timingTypeKey), sliceBreakdown.get(timingTypeKey)); + assertEquals(leafProfileBreakdownMap.get(timingTypeCountKey), sliceBreakdown.get(timingTypeCountKey)); + assertEquals( + leafProfileBreakdownMap.get(timingTypeKey + TIMING_TYPE_START_TIME_SUFFIX), + sliceBreakdown.get(timingTypeKey + SLICE_START_TIME_SUFFIX) + ); + assertEquals( + leafProfileBreakdownMap.get(timingTypeKey + TIMING_TYPE_START_TIME_SUFFIX) + leafProfileBreakdownMap.get(timingTypeKey), + (long) sliceBreakdown.get(timingTypeKey + SLICE_END_TIME_SUFFIX) + ); + } + } + + assertEquals(50, testQueryProfileBreakdown.getMaxSliceNodeTime()); + assertEquals(20, testQueryProfileBreakdown.getMinSliceNodeTime()); + assertEquals(35, testQueryProfileBreakdown.getAvgSliceNodeTime()); + directoryReader.close(); + directory.close(); + } + + public void testBreakDownMapWithMultipleSlices() throws Exception { + final DirectoryReader directoryReader = getDirectoryReader(2); + final Directory directory = directoryReader.directory(); + final Collector sliceCollector_1 = mock(Collector.class); + final Collector sliceCollector_2 = mock(Collector.class); + final long createWeightEarliestStartTime = createWeightTimer.getEarliestTimerStartTime(); + final Map leafProfileBreakdownMap_1 = getLeafBreakdownMap(createWeightEarliestStartTime + 10, 10, 1); + final Map leafProfileBreakdownMap_2 = getLeafBreakdownMap(createWeightEarliestStartTime + 40, 20, 1); + final AbstractProfileBreakdown leafProfileBreakdown_1 = new TestQueryProfileBreakdown( + QueryTimingType.class, + leafProfileBreakdownMap_1 + ); + final AbstractProfileBreakdown leafProfileBreakdown_2 = new TestQueryProfileBreakdown( + QueryTimingType.class, + leafProfileBreakdownMap_2 + ); + testQueryProfileBreakdown.associateCollectorToLeaves(sliceCollector_1, directoryReader.leaves().get(0)); + testQueryProfileBreakdown.associateCollectorToLeaves(sliceCollector_2, directoryReader.leaves().get(1)); + testQueryProfileBreakdown.getContexts().put(directoryReader.leaves().get(0), leafProfileBreakdown_1); + testQueryProfileBreakdown.getContexts().put(directoryReader.leaves().get(1), leafProfileBreakdown_2); + + Map queryBreakDownMap = testQueryProfileBreakdown.toBreakdownMap(); + assertFalse(queryBreakDownMap == null || queryBreakDownMap.isEmpty()); + assertEquals(66, queryBreakDownMap.size()); + + for (QueryTimingType queryTimingType : QueryTimingType.values()) { + String timingTypeKey = queryTimingType.toString(); + String timingTypeCountKey = queryTimingType + TIMING_TYPE_COUNT_SUFFIX; + + if (queryTimingType.equals(QueryTimingType.CREATE_WEIGHT)) { + final long createWeightTime = queryBreakDownMap.get(timingTypeKey); + assertEquals(createWeightTimer.getApproximateTiming(), createWeightTime); + assertEquals(1, (long) queryBreakDownMap.get(timingTypeCountKey)); + // verify there is no min/max/avg for weight type stats + assertFalse( + queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(MIN_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeCountKey) + || queryBreakDownMap.containsKey(MIN_PREFIX + timingTypeCountKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeCountKey) + ); + continue; + } + assertEquals(50, (long) queryBreakDownMap.get(timingTypeKey)); + assertEquals(20, (long) queryBreakDownMap.get(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeKey)); + assertEquals(15, (long) queryBreakDownMap.get(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeKey)); + assertEquals(10, (long) queryBreakDownMap.get(MIN_PREFIX + timingTypeKey)); + assertEquals(2, (long) queryBreakDownMap.get(timingTypeCountKey)); + assertEquals(1, (long) queryBreakDownMap.get(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeCountKey)); + assertEquals(1, (long) queryBreakDownMap.get(MIN_PREFIX + timingTypeCountKey)); + assertEquals(1, (long) queryBreakDownMap.get(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeCountKey)); + } + + assertEquals(60, testQueryProfileBreakdown.getMaxSliceNodeTime()); + assertEquals(20, testQueryProfileBreakdown.getMinSliceNodeTime()); + assertEquals(40, testQueryProfileBreakdown.getAvgSliceNodeTime()); + directoryReader.close(); + directory.close(); + } + + private Map getLeafBreakdownMap(long startTime, long timeTaken, long count) { + Map leafBreakDownMap = new HashMap<>(); + for (QueryTimingType timingType : QueryTimingType.values()) { + if (timingType.equals(QueryTimingType.CREATE_WEIGHT)) { + // don't add anything + continue; + } + String timingTypeKey = timingType.toString(); + leafBreakDownMap.put(timingTypeKey, timeTaken); + leafBreakDownMap.put(timingTypeKey + TIMING_TYPE_COUNT_SUFFIX, count); + leafBreakDownMap.put(timingTypeKey + TIMING_TYPE_START_TIME_SUFFIX, startTime); + } + return leafBreakDownMap; + } + + private DirectoryReader getDirectoryReader(int numLeaves) throws Exception { + final Directory directory = newDirectory(); + IndexWriter iw = new IndexWriter(directory, new IndexWriterConfig(new StandardAnalyzer()).setMergePolicy(NoMergePolicy.INSTANCE)); + + for (int i = 0; i < numLeaves; ++i) { + Document document = new Document(); + document.add(new StringField("field1", "value" + i, Field.Store.NO)); + document.add(new StringField("field2", "value" + i, Field.Store.NO)); + iw.addDocument(document); + iw.commit(); + } + iw.deleteDocuments(new Term("field1", "value3")); + iw.close(); + return DirectoryReader.open(directory); + } + + private static class TestQueryProfileBreakdown extends AbstractProfileBreakdown { + private Map breakdownMap; + + public TestQueryProfileBreakdown(Class clazz, Map breakdownMap) { + super(clazz); + this.breakdownMap = breakdownMap; + } + + @Override + public Map toBreakdownMap() { + return breakdownMap; + } + } +} diff --git a/server/src/test/java/org/opensearch/search/profile/query/QueryProfilerTests.java b/server/src/test/java/org/opensearch/search/profile/query/QueryProfilerTests.java index a278e7004cc93..64a440b85eb10 100644 --- a/server/src/test/java/org/opensearch/search/profile/query/QueryProfilerTests.java +++ b/server/src/test/java/org/opensearch/search/profile/query/QueryProfilerTests.java @@ -81,6 +81,9 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.nullValue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -89,6 +92,10 @@ public class QueryProfilerTests extends OpenSearchTestCase { private IndexReader reader; private ContextIndexSearcher searcher; private ExecutorService executor; + private static final String MAX_PREFIX = "max_"; + private static final String MIN_PREFIX = "min_"; + private static final String AVG_PREFIX = "avg_"; + private static final String TIMING_TYPE_COUNT_SUFFIX = "_count"; @ParametersFactory public static Collection concurrency() { @@ -160,7 +167,8 @@ public void testBasic() throws IOException { searcher.search(query, 1); List results = profiler.getTree(); assertEquals(1, results.size()); - Map breakdown = results.get(0).getTimeBreakdown(); + ProfileResult profileResult = results.get(0); + Map breakdown = profileResult.getTimeBreakdown(); assertThat(breakdown.get(QueryTimingType.CREATE_WEIGHT.toString()), greaterThan(0L)); assertThat(breakdown.get(QueryTimingType.BUILD_SCORER.toString()), greaterThan(0L)); assertThat(breakdown.get(QueryTimingType.NEXT_DOC.toString()), greaterThan(0L)); @@ -168,12 +176,52 @@ public void testBasic() throws IOException { assertThat(breakdown.get(QueryTimingType.SCORE.toString()), greaterThan(0L)); assertThat(breakdown.get(QueryTimingType.MATCH.toString()), equalTo(0L)); - assertThat(breakdown.get(QueryTimingType.CREATE_WEIGHT.toString() + "_count"), greaterThan(0L)); - assertThat(breakdown.get(QueryTimingType.BUILD_SCORER.toString() + "_count"), greaterThan(0L)); - assertThat(breakdown.get(QueryTimingType.NEXT_DOC.toString() + "_count"), greaterThan(0L)); - assertThat(breakdown.get(QueryTimingType.ADVANCE.toString() + "_count"), equalTo(0L)); - assertThat(breakdown.get(QueryTimingType.SCORE.toString() + "_count"), greaterThan(0L)); - assertThat(breakdown.get(QueryTimingType.MATCH.toString() + "_count"), equalTo(0L)); + assertThat(breakdown.get(QueryTimingType.CREATE_WEIGHT + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(QueryTimingType.BUILD_SCORER + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(QueryTimingType.NEXT_DOC + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(QueryTimingType.ADVANCE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(QueryTimingType.SCORE + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(QueryTimingType.MATCH + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + + if (executor != null) { + assertThat(profileResult.getMaxSliceTime(), is(not(nullValue()))); + assertThat(profileResult.getMinSliceTime(), is(not(nullValue()))); + assertThat(profileResult.getAvgSliceTime(), is(not(nullValue()))); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.BUILD_SCORER), greaterThan(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.BUILD_SCORER), greaterThan(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.BUILD_SCORER), greaterThan(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.NEXT_DOC), greaterThan(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.NEXT_DOC), greaterThan(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.NEXT_DOC), greaterThan(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.ADVANCE), equalTo(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.ADVANCE), equalTo(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.ADVANCE), equalTo(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.SCORE), greaterThan(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.SCORE), greaterThan(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.SCORE), greaterThan(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.MATCH), equalTo(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.MATCH), equalTo(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.MATCH), equalTo(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.BUILD_SCORER + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.BUILD_SCORER + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.BUILD_SCORER + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.NEXT_DOC + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.NEXT_DOC + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.NEXT_DOC + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.ADVANCE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.ADVANCE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.ADVANCE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.SCORE + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.SCORE + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.SCORE + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.MATCH + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.MATCH + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.MATCH + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + } else { + assertThat(profileResult.getMaxSliceTime(), is(nullValue())); + assertThat(profileResult.getMinSliceTime(), is(nullValue())); + assertThat(profileResult.getAvgSliceTime(), is(nullValue())); + } long rewriteTime = profiler.getRewriteTime(); assertThat(rewriteTime, greaterThan(0L)); @@ -186,7 +234,8 @@ public void testNoScoring() throws IOException { searcher.search(query, 1, Sort.INDEXORDER); // scores are not needed List results = profiler.getTree(); assertEquals(1, results.size()); - Map breakdown = results.get(0).getTimeBreakdown(); + ProfileResult profileResult = results.get(0); + Map breakdown = profileResult.getTimeBreakdown(); assertThat(breakdown.get(QueryTimingType.CREATE_WEIGHT.toString()), greaterThan(0L)); assertThat(breakdown.get(QueryTimingType.BUILD_SCORER.toString()), greaterThan(0L)); assertThat(breakdown.get(QueryTimingType.NEXT_DOC.toString()), greaterThan(0L)); @@ -194,12 +243,52 @@ public void testNoScoring() throws IOException { assertThat(breakdown.get(QueryTimingType.SCORE.toString()), equalTo(0L)); assertThat(breakdown.get(QueryTimingType.MATCH.toString()), equalTo(0L)); - assertThat(breakdown.get(QueryTimingType.CREATE_WEIGHT.toString() + "_count"), greaterThan(0L)); - assertThat(breakdown.get(QueryTimingType.BUILD_SCORER.toString() + "_count"), greaterThan(0L)); - assertThat(breakdown.get(QueryTimingType.NEXT_DOC.toString() + "_count"), greaterThan(0L)); - assertThat(breakdown.get(QueryTimingType.ADVANCE.toString() + "_count"), equalTo(0L)); - assertThat(breakdown.get(QueryTimingType.SCORE.toString() + "_count"), equalTo(0L)); - assertThat(breakdown.get(QueryTimingType.MATCH.toString() + "_count"), equalTo(0L)); + assertThat(breakdown.get(QueryTimingType.CREATE_WEIGHT + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(QueryTimingType.BUILD_SCORER + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(QueryTimingType.NEXT_DOC + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(QueryTimingType.ADVANCE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(QueryTimingType.SCORE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(QueryTimingType.MATCH + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + + if (executor != null) { + assertThat(profileResult.getMaxSliceTime(), is(not(nullValue()))); + assertThat(profileResult.getMinSliceTime(), is(not(nullValue()))); + assertThat(profileResult.getAvgSliceTime(), is(not(nullValue()))); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.BUILD_SCORER), greaterThan(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.BUILD_SCORER), greaterThan(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.BUILD_SCORER), greaterThan(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.NEXT_DOC), greaterThan(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.NEXT_DOC), greaterThan(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.NEXT_DOC), greaterThan(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.ADVANCE), equalTo(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.ADVANCE), equalTo(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.ADVANCE), equalTo(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.SCORE), equalTo(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.SCORE), equalTo(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.SCORE), equalTo(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.MATCH), equalTo(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.MATCH), equalTo(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.MATCH), equalTo(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.BUILD_SCORER + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.BUILD_SCORER + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.BUILD_SCORER + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.NEXT_DOC + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.NEXT_DOC + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.NEXT_DOC + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.ADVANCE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.ADVANCE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.ADVANCE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.SCORE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.SCORE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.SCORE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.MATCH + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.MATCH + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.MATCH + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + } else { + assertThat(profileResult.getMaxSliceTime(), is(nullValue())); + assertThat(profileResult.getMinSliceTime(), is(nullValue())); + assertThat(profileResult.getAvgSliceTime(), is(nullValue())); + } long rewriteTime = profiler.getRewriteTime(); assertThat(rewriteTime, greaterThan(0L)); @@ -226,7 +315,8 @@ public void testApproximations() throws IOException { searcher.count(query); List results = profiler.getTree(); assertEquals(1, results.size()); - Map breakdown = results.get(0).getTimeBreakdown(); + ProfileResult profileResult = results.get(0); + Map breakdown = profileResult.getTimeBreakdown(); assertThat(breakdown.get(QueryTimingType.CREATE_WEIGHT.toString()), greaterThan(0L)); assertThat(breakdown.get(QueryTimingType.BUILD_SCORER.toString()), greaterThan(0L)); assertThat(breakdown.get(QueryTimingType.NEXT_DOC.toString()), greaterThan(0L)); @@ -234,12 +324,52 @@ public void testApproximations() throws IOException { assertThat(breakdown.get(QueryTimingType.SCORE.toString()), equalTo(0L)); assertThat(breakdown.get(QueryTimingType.MATCH.toString()), greaterThan(0L)); - assertThat(breakdown.get(QueryTimingType.CREATE_WEIGHT.toString() + "_count"), greaterThan(0L)); - assertThat(breakdown.get(QueryTimingType.BUILD_SCORER.toString() + "_count"), greaterThan(0L)); - assertThat(breakdown.get(QueryTimingType.NEXT_DOC.toString() + "_count"), greaterThan(0L)); - assertThat(breakdown.get(QueryTimingType.ADVANCE.toString() + "_count"), equalTo(0L)); - assertThat(breakdown.get(QueryTimingType.SCORE.toString() + "_count"), equalTo(0L)); - assertThat(breakdown.get(QueryTimingType.MATCH.toString() + "_count"), greaterThan(0L)); + assertThat(breakdown.get(QueryTimingType.CREATE_WEIGHT + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(QueryTimingType.BUILD_SCORER + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(QueryTimingType.NEXT_DOC + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(QueryTimingType.ADVANCE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(QueryTimingType.SCORE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(QueryTimingType.MATCH + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + + if (executor != null) { + assertThat(profileResult.getMaxSliceTime(), is(not(nullValue()))); + assertThat(profileResult.getMinSliceTime(), is(not(nullValue()))); + assertThat(profileResult.getAvgSliceTime(), is(not(nullValue()))); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.BUILD_SCORER), greaterThan(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.BUILD_SCORER), greaterThan(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.BUILD_SCORER), greaterThan(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.NEXT_DOC), greaterThan(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.NEXT_DOC), greaterThan(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.NEXT_DOC), greaterThan(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.ADVANCE), equalTo(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.ADVANCE), equalTo(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.ADVANCE), equalTo(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.SCORE), equalTo(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.SCORE), equalTo(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.SCORE), equalTo(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.MATCH), greaterThan(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.MATCH), greaterThan(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.MATCH), greaterThan(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.BUILD_SCORER + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.BUILD_SCORER + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.BUILD_SCORER + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.NEXT_DOC + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.NEXT_DOC + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.NEXT_DOC + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.ADVANCE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.ADVANCE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.ADVANCE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.SCORE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.SCORE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.SCORE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.MATCH + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.MATCH + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.MATCH + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + } else { + assertThat(profileResult.getMaxSliceTime(), is(nullValue())); + assertThat(profileResult.getMinSliceTime(), is(nullValue())); + assertThat(profileResult.getAvgSliceTime(), is(nullValue())); + } long rewriteTime = profiler.getRewriteTime(); assertThat(rewriteTime, greaterThan(0L)); diff --git a/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java b/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java index 739ba6fc15c76..fc77c1b356124 100644 --- a/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java +++ b/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java @@ -338,6 +338,14 @@ public void testMinScoreDisablesCountOptimization() throws Exception { assertThat(query.getTimeBreakdown().keySet(), not(empty())); assertThat(query.getTimeBreakdown().get("score"), greaterThanOrEqualTo(100L)); assertThat(query.getTimeBreakdown().get("score_count"), equalTo(1L)); + if (executor != null) { + assertThat(query.getTimeBreakdown().get("max_score"), greaterThanOrEqualTo(100L)); + assertThat(query.getTimeBreakdown().get("min_score"), greaterThanOrEqualTo(100L)); + assertThat(query.getTimeBreakdown().get("avg_score"), greaterThanOrEqualTo(100L)); + assertThat(query.getTimeBreakdown().get("max_score_count"), equalTo(1L)); + assertThat(query.getTimeBreakdown().get("min_score_count"), equalTo(1L)); + assertThat(query.getTimeBreakdown().get("avg_score_count"), equalTo(1L)); + } assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); }, collector -> { @@ -477,6 +485,14 @@ public void testTerminateAfterEarlyTermination() throws Exception { assertThat(query.getTimeBreakdown().keySet(), not(empty())); assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("score_count"), greaterThan(0L)); + if (executor != null) { + assertThat(query.getTimeBreakdown().get("max_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("min_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("avg_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("max_score_count"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("min_score_count"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("avg_score_count"), greaterThan(0L)); + } assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); }, collector -> { @@ -547,6 +563,14 @@ public void testTerminateAfterEarlyTermination() throws Exception { assertThat(query.getTimeBreakdown().keySet(), not(empty())); assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("score_count"), greaterThan(0L)); + if (executor != null) { + assertThat(query.getTimeBreakdown().get("max_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("min_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("avg_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("max_score_count"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("min_score_count"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("avg_score_count"), greaterThan(0L)); + } assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); }, collector -> { @@ -585,6 +609,14 @@ public void testTerminateAfterEarlyTermination() throws Exception { assertThat(query.getTimeBreakdown().keySet(), not(empty())); assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("score_count"), greaterThan(0L)); + if (executor != null) { + assertThat(query.getTimeBreakdown().get("max_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("min_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("avg_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("max_score_count"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("min_score_count"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("avg_score_count"), greaterThan(0L)); + } assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); @@ -709,6 +741,14 @@ public void testTerminateAfterEarlyTermination() throws Exception { assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight_count"), equalTo(1L)); assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("score"), greaterThan(0L)); assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("score_count"), greaterThan(0L)); + if (executor != null) { + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("max_score"), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("min_score"), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("avg_score"), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("max_score_count"), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("min_score_count"), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("avg_score_count"), greaterThan(0L)); + } assertThat(query.getProfiledChildren().get(1).getQueryName(), equalTo("TermQuery")); assertThat(query.getProfiledChildren().get(1).getTime(), greaterThan(0L)); @@ -716,6 +756,14 @@ public void testTerminateAfterEarlyTermination() throws Exception { assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("create_weight_count"), equalTo(1L)); assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("score"), greaterThan(0L)); assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("score_count"), greaterThan(0L)); + if (executor != null) { + assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("max_score"), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("min_score"), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("avg_score"), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("max_score_count"), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("min_score_count"), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("avg_score_count"), greaterThan(0L)); + } }, collector -> { assertThat(collector.getReason(), equalTo("search_terminate_after_count")); assertThat(collector.getTime(), greaterThan(0L)); @@ -1054,6 +1102,14 @@ public void testDisableTopScoreCollection() throws Exception { assertThat(query.getTimeBreakdown().keySet(), not(empty())); assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("score_count"), greaterThan(0L)); + if (executor != null) { + assertThat(query.getTimeBreakdown().get("max_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("min_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("avg_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("max_score_count"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("min_score_count"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("avg_score_count"), greaterThan(0L)); + } assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); }, collector -> { @@ -1133,6 +1189,14 @@ public void testMinScore() throws Exception { assertThat(query.getTimeBreakdown().keySet(), not(empty())); assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("score_count"), equalTo(10L)); + if (executor != null) { + assertThat(query.getTimeBreakdown().get("max_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("min_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("avg_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("max_score_count"), equalTo(10L)); + assertThat(query.getTimeBreakdown().get("min_score_count"), equalTo(10L)); + assertThat(query.getTimeBreakdown().get("avg_score_count"), equalTo(10L)); + } assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); @@ -1210,6 +1274,14 @@ public void testMaxScore() throws Exception { assertThat(query.getTimeBreakdown().keySet(), not(empty())); assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("score_count"), greaterThanOrEqualTo(6L)); + if (executor != null) { + assertThat(query.getTimeBreakdown().get("max_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("min_score"), greaterThanOrEqualTo(0L)); + assertThat(query.getTimeBreakdown().get("avg_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("max_score_count"), greaterThanOrEqualTo(6L)); + assertThat(query.getTimeBreakdown().get("min_score_count"), greaterThanOrEqualTo(0L)); + assertThat(query.getTimeBreakdown().get("avg_score_count"), greaterThanOrEqualTo(1L)); + } assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); @@ -1245,6 +1317,14 @@ public void testMaxScore() throws Exception { assertThat(query.getTimeBreakdown().keySet(), not(empty())); assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("score_count"), greaterThanOrEqualTo(6L)); + if (executor != null) { + assertThat(query.getTimeBreakdown().get("max_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("min_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("avg_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("max_score_count"), greaterThanOrEqualTo(6L)); + assertThat(query.getTimeBreakdown().get("min_score_count"), greaterThanOrEqualTo(0L)); + assertThat(query.getTimeBreakdown().get("avg_score_count"), greaterThanOrEqualTo(1L)); + } assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); @@ -1315,6 +1395,14 @@ public void testCollapseQuerySearchResults() throws Exception { assertThat(query.getTimeBreakdown().keySet(), not(empty())); assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("score_count"), greaterThanOrEqualTo(6L)); + if (executor != null) { + assertThat(query.getTimeBreakdown().get("max_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("min_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("avg_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("max_score_count"), greaterThanOrEqualTo(6L)); + assertThat(query.getTimeBreakdown().get("min_score_count"), greaterThanOrEqualTo(6L)); + assertThat(query.getTimeBreakdown().get("avg_score_count"), greaterThanOrEqualTo(6L)); + } assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); assertThat(query.getProfiledChildren(), empty()); @@ -1342,6 +1430,14 @@ public void testCollapseQuerySearchResults() throws Exception { assertThat(query.getTimeBreakdown().keySet(), not(empty())); assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("score_count"), greaterThanOrEqualTo(6L)); + if (executor != null) { + assertThat(query.getTimeBreakdown().get("max_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("min_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("avg_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("max_score_count"), greaterThanOrEqualTo(6L)); + assertThat(query.getTimeBreakdown().get("min_score_count"), greaterThanOrEqualTo(6L)); + assertThat(query.getTimeBreakdown().get("avg_score_count"), greaterThanOrEqualTo(6L)); + } assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); assertThat(query.getProfiledChildren(), empty()); From fae8e556c629bbf5664ca186e417f9ae825389f0 Mon Sep 17 00:00:00 2001 From: Sorabh Date: Wed, 20 Sep 2023 20:32:00 -0700 Subject: [PATCH 2/4] Fix NPE in ConcurrentQueryProfile while computing the breakdown map for slices (#10111) * Fix NPE in ConcurrentQueryProfile while computing the breakdown map for slices. There can be cases where one or more slice may not have timing related information for its leaves in contexts map. During creation of slice and query level breakdown map it needs to handle such cases by using default values correctly. Also updating the min/max/avg sliceNodeTime to not include time to create weight and wait times by slice threads. It will reflect the min/max/avg execution time of each slice whereas totalNodeTime will reflect the total query time. Signed-off-by: Sorabh Hamirwasia * Address review comments Signed-off-by: Sorabh Hamirwasia --------- Signed-off-by: Sorabh Hamirwasia --- .../ConcurrentQueryProfileBreakdown.java | 84 +++++++++++--- .../ConcurrentQueryProfileBreakdownTests.java | 103 ++++++++++++++---- 2 files changed, 147 insertions(+), 40 deletions(-) diff --git a/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java b/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java index f37d53737d52b..e567fdd2d436c 100644 --- a/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java +++ b/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java @@ -10,6 +10,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Collector; +import org.opensearch.OpenSearchException; import org.opensearch.search.profile.AbstractProfileBreakdown; import org.opensearch.search.profile.ContextualProfileBreakdown; @@ -73,14 +74,14 @@ public Map toBreakdownMap() { // If there are no leaf contexts, then return the default concurrent query level breakdown, which will include the // create_weight time/count queryNodeTime = createWeightTime; - maxSliceNodeTime = queryNodeTime; - minSliceNodeTime = queryNodeTime; - avgSliceNodeTime = queryNodeTime; + maxSliceNodeTime = 0L; + minSliceNodeTime = 0L; + avgSliceNodeTime = 0L; return buildDefaultQueryBreakdownMap(createWeightTime); } // first create the slice level breakdowns - final Map> sliceLevelBreakdowns = buildSliceLevelBreakdown(createWeightStartTime); + final Map> sliceLevelBreakdowns = buildSliceLevelBreakdown(); return buildQueryBreakdownMap(sliceLevelBreakdowns, createWeightTime, createWeightStartTime); } @@ -124,18 +125,19 @@ private Map buildDefaultQueryBreakdownMap(long createWeightTime) { /** * Computes the slice level breakdownMap. It uses sliceCollectorsToLeaves to figure out all the leaves or segments part of a slice. * Then use the breakdown timing stats for each of these leaves to calculate the breakdown stats at slice level. - * @param createWeightStartTime start time when createWeight is called + * * @return map of collector (or slice) to breakdown map */ - Map> buildSliceLevelBreakdown(long createWeightStartTime) { + Map> buildSliceLevelBreakdown() { final Map> sliceLevelBreakdowns = new HashMap<>(); - long totalSliceNodeTime = 0; + long totalSliceNodeTime = 0L; for (Map.Entry> slice : sliceCollectorsToLeaves.entrySet()) { final Collector sliceCollector = slice.getKey(); // initialize each slice level breakdown final Map currentSliceBreakdown = sliceLevelBreakdowns.computeIfAbsent(sliceCollector, k -> new HashMap<>()); // max slice end time across all timing types long sliceMaxEndTime = Long.MIN_VALUE; + long sliceMinStartTime = Long.MAX_VALUE; for (QueryTimingType timingType : QueryTimingType.values()) { if (timingType.equals(QueryTimingType.CREATE_WEIGHT)) { // do nothing for create weight as that is query level time and not slice level @@ -155,6 +157,12 @@ Map> buildSliceLevelBreakdown(long createWeightStar // leaf, but the leaf level breakdown will not be created in the contexts map. // This is because before updating the contexts map, the query hits earlyTerminationException. // To handle such case, we will ignore the leaf that is not present. + // + // Other than early termination, it can also happen in other cases. For example: there is a must boolean query + // with 2 boolean clauses. While creating scorer for first clause if no docs are found for the field in a leaf + // context then it will return null scorer. Then for 2nd clause weight as well no scorer will be created for this + // leaf context (as it is a must query). Due to this it will end up missing the leaf context in the contexts map + // for second clause weight. continue; } final Map currentSliceLeafBreakdownMap = contexts.get(sliceLeaf).toBreakdownMap(); @@ -182,15 +190,36 @@ Map> buildSliceLevelBreakdown(long createWeightStar ); } // compute sliceMaxEndTime as max of sliceEndTime across all timing types - sliceMaxEndTime = Math.max(sliceMaxEndTime, currentSliceBreakdown.get(timingTypeSliceEndTimeKey)); + sliceMaxEndTime = Math.max(sliceMaxEndTime, currentSliceBreakdown.getOrDefault(timingTypeSliceEndTimeKey, Long.MIN_VALUE)); + sliceMinStartTime = Math.min( + sliceMinStartTime, + currentSliceBreakdown.getOrDefault(timingTypeSliceStartTimeKey, Long.MAX_VALUE) + ); // compute total time for each timing type at slice level using sliceEndTime and sliceStartTime currentSliceBreakdown.put( timingType.toString(), - currentSliceBreakdown.get(timingTypeSliceEndTimeKey) - currentSliceBreakdown.get(timingTypeSliceStartTimeKey) + currentSliceBreakdown.getOrDefault(timingTypeSliceEndTimeKey, 0L) - currentSliceBreakdown.getOrDefault( + timingTypeSliceStartTimeKey, + 0L + ) ); } - // currentSliceNodeTime includes the create weight time as well which will be same for all the slices - long currentSliceNodeTime = sliceMaxEndTime - createWeightStartTime; + // currentSliceNodeTime does not include the create weight time, as that is computed in non-concurrent part + long currentSliceNodeTime; + if (sliceMinStartTime == Long.MAX_VALUE && sliceMaxEndTime == Long.MIN_VALUE) { + currentSliceNodeTime = 0L; + } else if (sliceMinStartTime == Long.MAX_VALUE || sliceMaxEndTime == Long.MIN_VALUE) { + throw new OpenSearchException( + "Unexpected value of sliceMinStartTime [" + + sliceMinStartTime + + "] or sliceMaxEndTime [" + + sliceMaxEndTime + + "] while computing the slice level timing profile breakdowns" + ); + } else { + currentSliceNodeTime = sliceMaxEndTime - sliceMinStartTime; + } + // compute max/min slice times maxSliceNodeTime = Math.max(maxSliceNodeTime, currentSliceNodeTime); minSliceNodeTime = Math.min(minSliceNodeTime, currentSliceNodeTime); @@ -245,8 +274,8 @@ public Map buildQueryBreakdownMap( // for all other timing types, we will compute min/max/avg/total across slices for (Map.Entry> sliceBreakdown : sliceLevelBreakdowns.entrySet()) { - Long sliceBreakdownTypeTime = sliceBreakdown.getValue().get(timingTypeKey); - Long sliceBreakdownTypeCount = sliceBreakdown.getValue().get(timingTypeCountKey); + long sliceBreakdownTypeTime = sliceBreakdown.getValue().getOrDefault(timingTypeKey, 0L); + long sliceBreakdownTypeCount = sliceBreakdown.getValue().getOrDefault(timingTypeCountKey, 0L); // compute max/min/avg TimingType time across slices queryBreakdownMap.compute( maxBreakdownTypeTime, @@ -276,17 +305,38 @@ public Map buildQueryBreakdownMap( ); // query start/end time for a TimingType is min/max of start/end time across slices for that TimingType - queryTimingTypeEndTime = Math.max(queryTimingTypeEndTime, sliceBreakdown.getValue().get(sliceEndTimeForTimingType)); - queryTimingTypeStartTime = Math.min(queryTimingTypeStartTime, sliceBreakdown.getValue().get(sliceStartTimeForTimingType)); + queryTimingTypeEndTime = Math.max( + queryTimingTypeEndTime, + sliceBreakdown.getValue().getOrDefault(sliceEndTimeForTimingType, Long.MIN_VALUE) + ); + queryTimingTypeStartTime = Math.min( + queryTimingTypeStartTime, + sliceBreakdown.getValue().getOrDefault(sliceStartTimeForTimingType, Long.MAX_VALUE) + ); queryTimingTypeCount += sliceBreakdownTypeCount; } + + if (queryTimingTypeStartTime == Long.MAX_VALUE || queryTimingTypeEndTime == Long.MIN_VALUE) { + throw new OpenSearchException( + "Unexpected timing type [" + + timingTypeKey + + "] start [" + + queryTimingTypeStartTime + + "] or end time [" + + queryTimingTypeEndTime + + "] computed across slices for profile results" + ); + } queryBreakdownMap.put(timingTypeKey, queryTimingTypeEndTime - queryTimingTypeStartTime); queryBreakdownMap.put(timingTypeCountKey, queryTimingTypeCount); - queryBreakdownMap.compute(avgBreakdownTypeTime, (key, value) -> (value == null) ? 0 : value / sliceLevelBreakdowns.size()); - queryBreakdownMap.compute(avgBreakdownTypeCount, (key, value) -> (value == null) ? 0 : value / sliceLevelBreakdowns.size()); + queryBreakdownMap.compute(avgBreakdownTypeTime, (key, value) -> (value == null) ? 0L : value / sliceLevelBreakdowns.size()); + queryBreakdownMap.compute(avgBreakdownTypeCount, (key, value) -> (value == null) ? 0L : value / sliceLevelBreakdowns.size()); // compute query end time using max of query end time across all timing types queryEndTime = Math.max(queryEndTime, queryTimingTypeEndTime); } + if (queryEndTime == Long.MIN_VALUE) { + throw new OpenSearchException("Unexpected error while computing the query end time across slices in profile result"); + } queryNodeTime = queryEndTime - createWeightStartTime; return queryBreakdownMap; } diff --git a/server/src/test/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdownTests.java b/server/src/test/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdownTests.java index 065c90b262e11..f29ba3b0cea07 100644 --- a/server/src/test/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdownTests.java +++ b/server/src/test/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdownTests.java @@ -81,9 +81,9 @@ public void testBreakdownMapWithNoLeafContext() throws Exception { ); // verify total/min/max/avg node time is same as weight time assertEquals(createWeightTime, testQueryProfileBreakdown.toNodeTime()); - assertEquals(createWeightTime, testQueryProfileBreakdown.getMaxSliceNodeTime()); - assertEquals(createWeightTime, testQueryProfileBreakdown.getMinSliceNodeTime()); - assertEquals(createWeightTime, testQueryProfileBreakdown.getAvgSliceNodeTime()); + assertEquals(0, testQueryProfileBreakdown.getMaxSliceNodeTime()); + assertEquals(0, testQueryProfileBreakdown.getMinSliceNodeTime()); + assertEquals(0, testQueryProfileBreakdown.getAvgSliceNodeTime()); continue; } assertEquals(0, (long) queryBreakDownMap.get(timingTypeKey)); @@ -103,16 +103,15 @@ public void testBuildSliceLevelBreakdownWithSingleSlice() throws Exception { final LeafReaderContext sliceLeaf = directoryReader.leaves().get(0); final Collector sliceCollector = mock(Collector.class); final long createWeightEarliestStartTime = createWeightTimer.getEarliestTimerStartTime(); - final Map leafProfileBreakdownMap = getLeafBreakdownMap(createWeightEarliestStartTime + 10, 10, 1); + final long createWeightEndTime = createWeightEarliestStartTime + createWeightTimer.getApproximateTiming(); + final Map leafProfileBreakdownMap = getLeafBreakdownMap(createWeightEndTime + 10, 10, 1); final AbstractProfileBreakdown leafProfileBreakdown = new TestQueryProfileBreakdown( QueryTimingType.class, leafProfileBreakdownMap ); testQueryProfileBreakdown.associateCollectorToLeaves(sliceCollector, sliceLeaf); testQueryProfileBreakdown.getContexts().put(sliceLeaf, leafProfileBreakdown); - final Map> sliceBreakdownMap = testQueryProfileBreakdown.buildSliceLevelBreakdown( - createWeightEarliestStartTime - ); + final Map> sliceBreakdownMap = testQueryProfileBreakdown.buildSliceLevelBreakdown(); assertFalse(sliceBreakdownMap == null || sliceBreakdownMap.isEmpty()); assertEquals(1, sliceBreakdownMap.size()); assertTrue(sliceBreakdownMap.containsKey(sliceCollector)); @@ -141,9 +140,9 @@ public void testBuildSliceLevelBreakdownWithSingleSlice() throws Exception { (long) sliceBreakdown.get(timingTypeKey + SLICE_END_TIME_SUFFIX) ); } - assertEquals(20, testQueryProfileBreakdown.getMaxSliceNodeTime()); - assertEquals(20, testQueryProfileBreakdown.getMinSliceNodeTime()); - assertEquals(20, testQueryProfileBreakdown.getAvgSliceNodeTime()); + assertEquals(10, testQueryProfileBreakdown.getMaxSliceNodeTime()); + assertEquals(10, testQueryProfileBreakdown.getMinSliceNodeTime()); + assertEquals(10, testQueryProfileBreakdown.getAvgSliceNodeTime()); directoryReader.close(); directory.close(); } @@ -154,8 +153,9 @@ public void testBuildSliceLevelBreakdownWithMultipleSlices() throws Exception { final Collector sliceCollector_1 = mock(Collector.class); final Collector sliceCollector_2 = mock(Collector.class); final long createWeightEarliestStartTime = createWeightTimer.getEarliestTimerStartTime(); - final Map leafProfileBreakdownMap_1 = getLeafBreakdownMap(createWeightEarliestStartTime + 10, 10, 1); - final Map leafProfileBreakdownMap_2 = getLeafBreakdownMap(createWeightEarliestStartTime + 40, 10, 1); + final long createWeightEndTime = createWeightEarliestStartTime + createWeightTimer.getApproximateTiming(); + final Map leafProfileBreakdownMap_1 = getLeafBreakdownMap(createWeightEndTime + 10, 10, 1); + final Map leafProfileBreakdownMap_2 = getLeafBreakdownMap(createWeightEndTime + 40, 10, 1); final AbstractProfileBreakdown leafProfileBreakdown_1 = new TestQueryProfileBreakdown( QueryTimingType.class, leafProfileBreakdownMap_1 @@ -168,9 +168,7 @@ public void testBuildSliceLevelBreakdownWithMultipleSlices() throws Exception { testQueryProfileBreakdown.associateCollectorToLeaves(sliceCollector_2, directoryReader.leaves().get(1)); testQueryProfileBreakdown.getContexts().put(directoryReader.leaves().get(0), leafProfileBreakdown_1); testQueryProfileBreakdown.getContexts().put(directoryReader.leaves().get(1), leafProfileBreakdown_2); - final Map> sliceBreakdownMap = testQueryProfileBreakdown.buildSliceLevelBreakdown( - createWeightEarliestStartTime - ); + final Map> sliceBreakdownMap = testQueryProfileBreakdown.buildSliceLevelBreakdown(); assertFalse(sliceBreakdownMap == null || sliceBreakdownMap.isEmpty()); assertEquals(2, sliceBreakdownMap.size()); @@ -208,9 +206,9 @@ public void testBuildSliceLevelBreakdownWithMultipleSlices() throws Exception { } } - assertEquals(50, testQueryProfileBreakdown.getMaxSliceNodeTime()); - assertEquals(20, testQueryProfileBreakdown.getMinSliceNodeTime()); - assertEquals(35, testQueryProfileBreakdown.getAvgSliceNodeTime()); + assertEquals(10, testQueryProfileBreakdown.getMaxSliceNodeTime()); + assertEquals(10, testQueryProfileBreakdown.getMinSliceNodeTime()); + assertEquals(10, testQueryProfileBreakdown.getAvgSliceNodeTime()); directoryReader.close(); directory.close(); } @@ -221,8 +219,9 @@ public void testBreakDownMapWithMultipleSlices() throws Exception { final Collector sliceCollector_1 = mock(Collector.class); final Collector sliceCollector_2 = mock(Collector.class); final long createWeightEarliestStartTime = createWeightTimer.getEarliestTimerStartTime(); - final Map leafProfileBreakdownMap_1 = getLeafBreakdownMap(createWeightEarliestStartTime + 10, 10, 1); - final Map leafProfileBreakdownMap_2 = getLeafBreakdownMap(createWeightEarliestStartTime + 40, 20, 1); + final long createWeightEndTime = createWeightEarliestStartTime + createWeightTimer.getApproximateTiming(); + final Map leafProfileBreakdownMap_1 = getLeafBreakdownMap(createWeightEndTime + 10, 10, 1); + final Map leafProfileBreakdownMap_2 = getLeafBreakdownMap(createWeightEndTime + 40, 20, 1); final AbstractProfileBreakdown leafProfileBreakdown_1 = new TestQueryProfileBreakdown( QueryTimingType.class, leafProfileBreakdownMap_1 @@ -269,9 +268,67 @@ public void testBreakDownMapWithMultipleSlices() throws Exception { assertEquals(1, (long) queryBreakDownMap.get(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeCountKey)); } - assertEquals(60, testQueryProfileBreakdown.getMaxSliceNodeTime()); - assertEquals(20, testQueryProfileBreakdown.getMinSliceNodeTime()); - assertEquals(40, testQueryProfileBreakdown.getAvgSliceNodeTime()); + assertEquals(20, testQueryProfileBreakdown.getMaxSliceNodeTime()); + assertEquals(10, testQueryProfileBreakdown.getMinSliceNodeTime()); + assertEquals(15, testQueryProfileBreakdown.getAvgSliceNodeTime()); + directoryReader.close(); + directory.close(); + } + + public void testBreakDownMapWithMultipleSlicesAndOneSliceWithNoLeafContext() throws Exception { + final DirectoryReader directoryReader = getDirectoryReader(2); + final Directory directory = directoryReader.directory(); + final Collector sliceCollector_1 = mock(Collector.class); + final Collector sliceCollector_2 = mock(Collector.class); + final long createWeightEarliestStartTime = createWeightTimer.getEarliestTimerStartTime(); + final long createWeightEndTime = createWeightEarliestStartTime + createWeightTimer.getApproximateTiming(); + final Map leafProfileBreakdownMap_1 = getLeafBreakdownMap(createWeightEndTime + 10, 10, 1); + final AbstractProfileBreakdown leafProfileBreakdown_1 = new TestQueryProfileBreakdown( + QueryTimingType.class, + leafProfileBreakdownMap_1 + ); + testQueryProfileBreakdown.associateCollectorToLeaves(sliceCollector_1, directoryReader.leaves().get(0)); + testQueryProfileBreakdown.associateCollectorToLeaves(sliceCollector_2, directoryReader.leaves().get(1)); + testQueryProfileBreakdown.getContexts().put(directoryReader.leaves().get(0), leafProfileBreakdown_1); + // leaf2 profile breakdown is not present in contexts map + + Map queryBreakDownMap = testQueryProfileBreakdown.toBreakdownMap(); + assertFalse(queryBreakDownMap == null || queryBreakDownMap.isEmpty()); + assertEquals(66, queryBreakDownMap.size()); + + for (QueryTimingType queryTimingType : QueryTimingType.values()) { + String timingTypeKey = queryTimingType.toString(); + String timingTypeCountKey = queryTimingType + TIMING_TYPE_COUNT_SUFFIX; + + if (queryTimingType.equals(QueryTimingType.CREATE_WEIGHT)) { + final long createWeightTime = queryBreakDownMap.get(timingTypeKey); + assertEquals(createWeightTimer.getApproximateTiming(), createWeightTime); + assertEquals(1, (long) queryBreakDownMap.get(timingTypeCountKey)); + // verify there is no min/max/avg for weight type stats + assertFalse( + queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(MIN_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeCountKey) + || queryBreakDownMap.containsKey(MIN_PREFIX + timingTypeCountKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeCountKey) + ); + continue; + } + assertEquals(10, (long) queryBreakDownMap.get(timingTypeKey)); + assertEquals(10, (long) queryBreakDownMap.get(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeKey)); + assertEquals(5, (long) queryBreakDownMap.get(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeKey)); + assertEquals(0, (long) queryBreakDownMap.get(MIN_PREFIX + timingTypeKey)); + assertEquals(1, (long) queryBreakDownMap.get(timingTypeCountKey)); + assertEquals(1, (long) queryBreakDownMap.get(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeCountKey)); + // min of 0 means one of the slice didn't worked on any leaf context + assertEquals(0, (long) queryBreakDownMap.get(MIN_PREFIX + timingTypeCountKey)); + assertEquals(0, (long) queryBreakDownMap.get(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeCountKey)); + } + + assertEquals(10, testQueryProfileBreakdown.getMaxSliceNodeTime()); + assertEquals(0, testQueryProfileBreakdown.getMinSliceNodeTime()); + assertEquals(5, testQueryProfileBreakdown.getAvgSliceNodeTime()); directoryReader.close(); directory.close(); } From 131fbb0e73e76b192ad5e92a5e2c177dc0cdb2ec Mon Sep 17 00:00:00 2001 From: Ticheng Lin <51488860+ticheng-aws@users.noreply.github.com> Date: Wed, 11 Oct 2023 07:31:18 -0700 Subject: [PATCH 3/4] Fix flaky query profile phase tests with concurrent search enabled (#10547) (#10547) Signed-off-by: Ticheng Lin --- .../search/query/QueryProfilePhaseTests.java | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java b/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java index fc77c1b356124..28af8a63cfba8 100644 --- a/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java +++ b/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java @@ -1104,10 +1104,10 @@ public void testDisableTopScoreCollection() throws Exception { assertThat(query.getTimeBreakdown().get("score_count"), greaterThan(0L)); if (executor != null) { assertThat(query.getTimeBreakdown().get("max_score"), greaterThan(0L)); - assertThat(query.getTimeBreakdown().get("min_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("min_score"), greaterThanOrEqualTo(0L)); assertThat(query.getTimeBreakdown().get("avg_score"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("max_score_count"), greaterThan(0L)); - assertThat(query.getTimeBreakdown().get("min_score_count"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("min_score_count"), greaterThanOrEqualTo(0L)); assertThat(query.getTimeBreakdown().get("avg_score_count"), greaterThan(0L)); } assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); @@ -1278,7 +1278,7 @@ public void testMaxScore() throws Exception { assertThat(query.getTimeBreakdown().get("max_score"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("min_score"), greaterThanOrEqualTo(0L)); assertThat(query.getTimeBreakdown().get("avg_score"), greaterThan(0L)); - assertThat(query.getTimeBreakdown().get("max_score_count"), greaterThanOrEqualTo(6L)); + assertThat(query.getTimeBreakdown().get("max_score_count"), greaterThanOrEqualTo(4L)); assertThat(query.getTimeBreakdown().get("min_score_count"), greaterThanOrEqualTo(0L)); assertThat(query.getTimeBreakdown().get("avg_score_count"), greaterThanOrEqualTo(1L)); } @@ -1400,7 +1400,7 @@ public void testCollapseQuerySearchResults() throws Exception { assertThat(query.getTimeBreakdown().get("min_score"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("avg_score"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("max_score_count"), greaterThanOrEqualTo(6L)); - assertThat(query.getTimeBreakdown().get("min_score_count"), greaterThanOrEqualTo(6L)); + assertThat(query.getTimeBreakdown().get("min_score_count"), greaterThanOrEqualTo(2L)); assertThat(query.getTimeBreakdown().get("avg_score_count"), greaterThanOrEqualTo(6L)); } assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); @@ -1435,7 +1435,7 @@ public void testCollapseQuerySearchResults() throws Exception { assertThat(query.getTimeBreakdown().get("min_score"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("avg_score"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("max_score_count"), greaterThanOrEqualTo(6L)); - assertThat(query.getTimeBreakdown().get("min_score_count"), greaterThanOrEqualTo(6L)); + assertThat(query.getTimeBreakdown().get("min_score_count"), greaterThanOrEqualTo(2L)); assertThat(query.getTimeBreakdown().get("avg_score_count"), greaterThanOrEqualTo(6L)); } assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); From b7bec57021c0f9c39ff79ffdb4ea94262bd696d0 Mon Sep 17 00:00:00 2001 From: Ticheng Lin <51488860+ticheng-aws@users.noreply.github.com> Date: Thu, 19 Oct 2023 19:58:13 -0700 Subject: [PATCH 4/4] Introduce ConcurrentQueryProfiler to profile query using concurrent segment search path and support concurrency during rewrite and create weight (#10352) * Fix timer race condition in profile rewrite and create weight for concurrent segment search (#10352) Signed-off-by: Ticheng Lin * Refactor and work on the PR comments (#10352) Signed-off-by: Ticheng Lin --------- Signed-off-by: Ticheng Lin --- CHANGELOG.md | 1 + .../search/profile/query/QueryProfilerIT.java | 83 +++++++++++ .../opensearch/search/profile/Profilers.java | 7 +- .../org/opensearch/search/profile/Timer.java | 12 ++ .../query/AbstractQueryProfileTree.java | 5 +- .../ConcurrentQueryProfileBreakdown.java | 27 +++- .../query/ConcurrentQueryProfiler.java | 134 ++++++++++++++++++ .../search/profile/query/QueryProfiler.java | 14 +- .../ConcurrentQueryProfileBreakdownTests.java | 52 +++++++ .../query/ConcurrentQueryProfilerTests.java | 36 +++++ .../profile/query/QueryProfilerTests.java | 16 ++- 11 files changed, 366 insertions(+), 21 deletions(-) create mode 100644 server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfiler.java create mode 100644 server/src/test/java/org/opensearch/search/profile/query/ConcurrentQueryProfilerTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 54e8da353f49c..bb1e6a33dd02c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Remote cluster state] Restore cluster state version during remote state auto restore ([#10853](https://github.com/opensearch-project/OpenSearch/pull/10853)) - Update the indexRandom function to create more segments for concurrent search tests ([10247](https://github.com/opensearch-project/OpenSearch/pull/10247)) - Add support for query profiler with concurrent aggregation ([#9248](https://github.com/opensearch-project/OpenSearch/pull/9248)) +- Introduce ConcurrentQueryProfiler to profile query using concurrent segment search path and support concurrency during rewrite and create weight ([10352](https://github.com/opensearch-project/OpenSearch/pull/10352)) ### Dependencies - Bumps jetty version to 9.4.52.v20230823 to fix GMS-2023-1857 ([#9822](https://github.com/opensearch-project/OpenSearch/pull/9822)) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/profile/query/QueryProfilerIT.java b/server/src/internalClusterTest/java/org/opensearch/search/profile/query/QueryProfilerIT.java index 24f2b1bbdabfc..ef73438114079 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/profile/query/QueryProfilerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/profile/query/QueryProfilerIT.java @@ -460,6 +460,89 @@ public void testBoosting() throws Exception { } } + public void testSearchLeafForItsLeavesAndRewriteQuery() throws Exception { + createIndex("test"); + ensureGreen(); + + int numDocs = 122; + IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; + for (int i = 0; i < numDocs; i++) { + docs[i] = client().prepareIndex("test").setId(String.valueOf(i)).setSource("field1", English.intToEnglish(i), "field2", i); + } + + List terms = Arrays.asList("zero", "zero", "one"); + + indexRandom(true, docs); + + refresh(); + + QueryBuilder q = QueryBuilders.boostingQuery( + QueryBuilders.idsQuery().addIds(String.valueOf(randomInt()), String.valueOf(randomInt())), + QueryBuilders.termsQuery("field1", terms) + ).boost(randomFloat()).negativeBoost(randomFloat()); + logger.info("Query: {}", q); + + SearchResponse resp = client().prepareSearch() + .setQuery(q) + .setTrackTotalHits(true) + .setProfile(true) + .setSearchType(SearchType.QUERY_THEN_FETCH) + .get(); + + assertNotNull("Profile response element should not be null", resp.getProfileResults()); + assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0)); + + for (Map.Entry shardResult : resp.getProfileResults().entrySet()) { + assertThat(shardResult.getValue().getNetworkTime().getInboundNetworkTime(), greaterThanOrEqualTo(0L)); + assertThat(shardResult.getValue().getNetworkTime().getOutboundNetworkTime(), greaterThanOrEqualTo(0L)); + for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { + List results = searchProfiles.getQueryResults(); + for (ProfileResult result : results) { + assertNotNull(result.getQueryName()); + assertNotNull(result.getLuceneDescription()); + assertThat(result.getTime(), greaterThan(0L)); + Map breakdown = result.getTimeBreakdown(); + Long maxSliceTime = result.getMaxSliceTime(); + Long minSliceTime = result.getMinSliceTime(); + Long avgSliceTime = result.getAvgSliceTime(); + if (concurrentSearchEnabled && results.get(0).equals(result)) { + assertNotNull(maxSliceTime); + assertNotNull(minSliceTime); + assertNotNull(avgSliceTime); + assertThat(breakdown.size(), equalTo(66)); + for (QueryTimingType queryTimingType : QueryTimingType.values()) { + if (queryTimingType != QueryTimingType.CREATE_WEIGHT) { + String maxTimingType = MAX_PREFIX + queryTimingType; + String minTimingType = MIN_PREFIX + queryTimingType; + String avgTimingType = AVG_PREFIX + queryTimingType; + assertNotNull(breakdown.get(maxTimingType)); + assertNotNull(breakdown.get(minTimingType)); + assertNotNull(breakdown.get(avgTimingType)); + assertNotNull(breakdown.get(maxTimingType + TIMING_TYPE_COUNT_SUFFIX)); + assertNotNull(breakdown.get(minTimingType + TIMING_TYPE_COUNT_SUFFIX)); + assertNotNull(breakdown.get(avgTimingType + TIMING_TYPE_COUNT_SUFFIX)); + } + } + } else if (concurrentSearchEnabled) { + assertThat(maxSliceTime, equalTo(0L)); + assertThat(minSliceTime, equalTo(0L)); + assertThat(avgSliceTime, equalTo(0L)); + assertThat(breakdown.size(), equalTo(27)); + } else { + assertThat(maxSliceTime, is(nullValue())); + assertThat(minSliceTime, is(nullValue())); + assertThat(avgSliceTime, is(nullValue())); + assertThat(breakdown.size(), equalTo(27)); + } + } + + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), is(not(emptyOrNullString()))); + assertThat(result.getTime(), greaterThan(0L)); + } + } + } + public void testDisMaxRange() throws Exception { createIndex("test"); ensureGreen(); diff --git a/server/src/main/java/org/opensearch/search/profile/Profilers.java b/server/src/main/java/org/opensearch/search/profile/Profilers.java index 8e87c7ff4acd4..68cf05c988b5b 100644 --- a/server/src/main/java/org/opensearch/search/profile/Profilers.java +++ b/server/src/main/java/org/opensearch/search/profile/Profilers.java @@ -35,6 +35,9 @@ import org.opensearch.search.internal.ContextIndexSearcher; import org.opensearch.search.profile.aggregation.AggregationProfiler; import org.opensearch.search.profile.aggregation.ConcurrentAggregationProfiler; +import org.opensearch.search.profile.query.ConcurrentQueryProfileTree; +import org.opensearch.search.profile.query.ConcurrentQueryProfiler; +import org.opensearch.search.profile.query.InternalQueryProfileTree; import org.opensearch.search.profile.query.QueryProfiler; import java.util.ArrayList; @@ -64,7 +67,9 @@ public Profilers(ContextIndexSearcher searcher, boolean isConcurrentSegmentSearc /** Switch to a new profile. */ public QueryProfiler addQueryProfiler() { - QueryProfiler profiler = new QueryProfiler(isConcurrentSegmentSearchEnabled); + QueryProfiler profiler = isConcurrentSegmentSearchEnabled + ? new ConcurrentQueryProfiler(new ConcurrentQueryProfileTree()) + : new QueryProfiler(new InternalQueryProfileTree()); searcher.setProfiler(profiler); queryProfilers.add(profiler); return profiler; diff --git a/server/src/main/java/org/opensearch/search/profile/Timer.java b/server/src/main/java/org/opensearch/search/profile/Timer.java index 172762cabeb6a..864c689cf7fa0 100644 --- a/server/src/main/java/org/opensearch/search/profile/Timer.java +++ b/server/src/main/java/org/opensearch/search/profile/Timer.java @@ -53,6 +53,18 @@ public class Timer { private boolean doTiming; private long timing, count, lastCount, start, earliestTimerStartTime; + public Timer() { + this(0, 0, 0, 0, 0); + } + + public Timer(long timing, long count, long lastCount, long start, long earliestTimerStartTime) { + this.timing = timing; + this.count = count; + this.lastCount = lastCount; + this.start = start; + this.earliestTimerStartTime = earliestTimerStartTime; + } + /** pkg-private for testing */ long nanoTime() { return System.nanoTime(); diff --git a/server/src/main/java/org/opensearch/search/profile/query/AbstractQueryProfileTree.java b/server/src/main/java/org/opensearch/search/profile/query/AbstractQueryProfileTree.java index 8e825def13f5d..2f5d632ee2d87 100644 --- a/server/src/main/java/org/opensearch/search/profile/query/AbstractQueryProfileTree.java +++ b/server/src/main/java/org/opensearch/search/profile/query/AbstractQueryProfileTree.java @@ -54,14 +54,11 @@ public void startRewriteTime() { * startRewriteTime() must be called for a particular context prior to calling * stopAndAddRewriteTime(), otherwise the elapsed time will be negative and * nonsensical - * - * @return The elapsed time */ - public long stopAndAddRewriteTime() { + public void stopAndAddRewriteTime() { long time = Math.max(1, System.nanoTime() - rewriteScratch); rewriteTime += time; rewriteScratch = 0; - return time; } public long getRewriteTime() { diff --git a/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java b/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java index e567fdd2d436c..59ef01f9f947a 100644 --- a/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java +++ b/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java @@ -70,7 +70,7 @@ public Map toBreakdownMap() { ); final long createWeightTime = topLevelBreakdownMapWithWeightTime.get(QueryTimingType.CREATE_WEIGHT.toString()); - if (sliceCollectorsToLeaves.isEmpty() || contexts.isEmpty()) { + if (contexts.isEmpty()) { // If there are no leaf contexts, then return the default concurrent query level breakdown, which will include the // create_weight time/count queryNodeTime = createWeightTime; @@ -78,6 +78,21 @@ public Map toBreakdownMap() { minSliceNodeTime = 0L; avgSliceNodeTime = 0L; return buildDefaultQueryBreakdownMap(createWeightTime); + } else if (sliceCollectorsToLeaves.isEmpty()) { + // This will happen when each slice executes search leaf for its leaves and query is rewritten for the leaf being searched. It + // creates a new weight and breakdown map for each rewritten query. This new breakdown map captures the timing information for + // the new rewritten query. The sliceCollectorsToLeaves is empty because this breakdown for rewritten query gets created later + // in search leaf path which doesn't have collector. Also, this is not needed since this breakdown is per leaf and there is no + // concurrency involved. An empty sliceCollectorsToLeaves could also happen in the case of early termination. + AbstractProfileBreakdown breakdown = contexts.values().iterator().next(); + queryNodeTime = breakdown.toNodeTime() + createWeightTime; + maxSliceNodeTime = 0L; + minSliceNodeTime = 0L; + avgSliceNodeTime = 0L; + Map queryBreakdownMap = new HashMap<>(breakdown.toBreakdownMap()); + queryBreakdownMap.put(QueryTimingType.CREATE_WEIGHT.toString(), createWeightTime); + queryBreakdownMap.put(QueryTimingType.CREATE_WEIGHT + TIMING_TYPE_COUNT_SUFFIX, 1L); + return queryBreakdownMap; } // first create the slice level breakdowns @@ -191,10 +206,12 @@ Map> buildSliceLevelBreakdown() { } // compute sliceMaxEndTime as max of sliceEndTime across all timing types sliceMaxEndTime = Math.max(sliceMaxEndTime, currentSliceBreakdown.getOrDefault(timingTypeSliceEndTimeKey, Long.MIN_VALUE)); - sliceMinStartTime = Math.min( - sliceMinStartTime, - currentSliceBreakdown.getOrDefault(timingTypeSliceStartTimeKey, Long.MAX_VALUE) - ); + long currentSliceStartTime = currentSliceBreakdown.getOrDefault(timingTypeSliceStartTimeKey, Long.MAX_VALUE); + if (currentSliceStartTime == 0L) { + // The timer for the current timing type never starts, so we continue here + continue; + } + sliceMinStartTime = Math.min(sliceMinStartTime, currentSliceStartTime); // compute total time for each timing type at slice level using sliceEndTime and sliceStartTime currentSliceBreakdown.put( timingType.toString(), diff --git a/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfiler.java b/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfiler.java new file mode 100644 index 0000000000000..42bf23bb13fbe --- /dev/null +++ b/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfiler.java @@ -0,0 +1,134 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.profile.query; + +import org.apache.lucene.search.Query; +import org.opensearch.search.profile.ContextualProfileBreakdown; +import org.opensearch.search.profile.ProfileResult; +import org.opensearch.search.profile.Timer; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.LinkedHashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +/** + * This class acts as a thread-local storage for profiling a query with concurrent execution + * + * @opensearch.internal + */ +public final class ConcurrentQueryProfiler extends QueryProfiler { + + private final Map threadToProfileTree; + // The LinkedList does not need to be thread safe, as the map associates thread IDs with LinkedList, and only + // one thread will access the LinkedList at a time. + private final Map> threadToRewriteTimers; + + public ConcurrentQueryProfiler(AbstractQueryProfileTree profileTree) { + super(profileTree); + long threadId = getCurrentThreadId(); + // We utilize LinkedHashMap to preserve the insertion order of the profiled queries + threadToProfileTree = Collections.synchronizedMap(new LinkedHashMap<>()); + threadToProfileTree.put(threadId, (ConcurrentQueryProfileTree) profileTree); + threadToRewriteTimers = new ConcurrentHashMap<>(); + threadToRewriteTimers.put(threadId, new LinkedList<>()); + } + + @Override + public ContextualProfileBreakdown getQueryBreakdown(Query query) { + ConcurrentQueryProfileTree profileTree = threadToProfileTree.computeIfAbsent( + getCurrentThreadId(), + k -> new ConcurrentQueryProfileTree() + ); + return profileTree.getProfileBreakdown(query); + } + + /** + * Removes the last (e.g. most recent) element on ConcurrentQueryProfileTree stack. + */ + @Override + public void pollLastElement() { + ConcurrentQueryProfileTree concurrentProfileTree = threadToProfileTree.get(getCurrentThreadId()); + if (concurrentProfileTree != null) { + concurrentProfileTree.pollLast(); + } + } + + /** + * @return a hierarchical representation of the profiled tree + */ + @Override + public List getTree() { + List profileResults = new ArrayList<>(); + for (Map.Entry profile : threadToProfileTree.entrySet()) { + profileResults.addAll(profile.getValue().getTree()); + } + return profileResults; + } + + /** + * Begin timing the rewrite phase of a request + */ + @Override + public void startRewriteTime() { + Timer rewriteTimer = new Timer(); + threadToRewriteTimers.computeIfAbsent(getCurrentThreadId(), k -> new LinkedList<>()).add(rewriteTimer); + rewriteTimer.start(); + } + + /** + * Stop recording the current rewrite timer + */ + public void stopAndAddRewriteTime() { + Timer rewriteTimer = threadToRewriteTimers.get(getCurrentThreadId()).getLast(); + rewriteTimer.stop(); + } + + /** + * @return total time taken to rewrite all queries in this concurrent query profiler + */ + @Override + public long getRewriteTime() { + long totalRewriteTime = 0L; + List rewriteTimers = new LinkedList<>(); + threadToRewriteTimers.values().forEach(rewriteTimers::addAll); + LinkedList mergedIntervals = mergeRewriteTimeIntervals(rewriteTimers); + for (long[] interval : mergedIntervals) { + totalRewriteTime += interval[1] - interval[0]; + } + return totalRewriteTime; + } + + // package private for unit testing + LinkedList mergeRewriteTimeIntervals(List timers) { + LinkedList mergedIntervals = new LinkedList<>(); + timers.sort(Comparator.comparingLong(Timer::getEarliestTimerStartTime)); + for (Timer timer : timers) { + long startTime = timer.getEarliestTimerStartTime(); + long endTime = startTime + timer.getApproximateTiming(); + if (mergedIntervals.isEmpty() || mergedIntervals.getLast()[1] < startTime) { + long[] interval = new long[2]; + interval[0] = startTime; + interval[1] = endTime; + mergedIntervals.add(interval); + } else { + mergedIntervals.getLast()[1] = Math.max(mergedIntervals.getLast()[1], endTime); + } + } + return mergedIntervals; + } + + private long getCurrentThreadId() { + return Thread.currentThread().getId(); + } +} diff --git a/server/src/main/java/org/opensearch/search/profile/query/QueryProfiler.java b/server/src/main/java/org/opensearch/search/profile/query/QueryProfiler.java index a80ce1c658081..332c4b3551450 100644 --- a/server/src/main/java/org/opensearch/search/profile/query/QueryProfiler.java +++ b/server/src/main/java/org/opensearch/search/profile/query/QueryProfiler.java @@ -51,15 +51,15 @@ * * @opensearch.internal */ -public final class QueryProfiler extends AbstractProfiler, Query> { +public class QueryProfiler extends AbstractProfiler, Query> { /** * The root Collector used in the search */ private InternalProfileComponent collector; - public QueryProfiler(boolean concurrent) { - super(concurrent ? new ConcurrentQueryProfileTree() : new InternalQueryProfileTree()); + public QueryProfiler(AbstractQueryProfileTree profileTree) { + super(profileTree); } /** Set the collector that is associated with this profiler. */ @@ -81,14 +81,14 @@ public void startRewriteTime() { /** * Stop recording the current rewrite and add it's time to the total tally, returning the * cumulative time so far. - * - * @return cumulative rewrite time */ - public long stopAndAddRewriteTime() { - return ((AbstractQueryProfileTree) profileTree).stopAndAddRewriteTime(); + public void stopAndAddRewriteTime() { + ((AbstractQueryProfileTree) profileTree).stopAndAddRewriteTime(); } /** + * The rewriting process is complex and hard to display because queries can undergo significant changes. + * Instead of showing intermediate results, we display the cumulative time for the non-concurrent search case. * @return total time taken to rewrite all queries in this profile */ public long getRewriteTime() { diff --git a/server/src/test/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdownTests.java b/server/src/test/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdownTests.java index f29ba3b0cea07..db14eb90ef839 100644 --- a/server/src/test/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdownTests.java +++ b/server/src/test/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdownTests.java @@ -333,6 +333,58 @@ public void testBreakDownMapWithMultipleSlicesAndOneSliceWithNoLeafContext() thr directory.close(); } + public void testOneLeafContextWithEmptySliceCollectorsToLeaves() throws Exception { + final DirectoryReader directoryReader = getDirectoryReader(1); + final Directory directory = directoryReader.directory(); + final long createWeightEarliestStartTime = createWeightTimer.getEarliestTimerStartTime(); + final long createWeightEndTime = createWeightEarliestStartTime + createWeightTimer.getApproximateTiming(); + final Map leafProfileBreakdownMap_1 = getLeafBreakdownMap(createWeightEndTime + 10, 10, 1); + final AbstractProfileBreakdown leafProfileBreakdown_1 = new TestQueryProfileBreakdown( + QueryTimingType.class, + leafProfileBreakdownMap_1 + ); + testQueryProfileBreakdown.getContexts().put(directoryReader.leaves().get(0), leafProfileBreakdown_1); + final Map queryBreakDownMap = testQueryProfileBreakdown.toBreakdownMap(); + assertFalse(queryBreakDownMap == null || queryBreakDownMap.isEmpty()); + assertEquals(26, queryBreakDownMap.size()); + for (QueryTimingType queryTimingType : QueryTimingType.values()) { + String timingTypeKey = queryTimingType.toString(); + String timingTypeCountKey = queryTimingType + TIMING_TYPE_COUNT_SUFFIX; + + if (queryTimingType.equals(QueryTimingType.CREATE_WEIGHT)) { + final long createWeightTime = queryBreakDownMap.get(timingTypeKey); + assertEquals(createWeightTimer.getApproximateTiming(), createWeightTime); + assertEquals(1, (long) queryBreakDownMap.get(timingTypeCountKey)); + // verify there is no min/max/avg for weight type stats + assertFalse( + queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(MIN_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeCountKey) + || queryBreakDownMap.containsKey(MIN_PREFIX + timingTypeCountKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeCountKey) + ); + continue; + } + assertNotNull(queryBreakDownMap.get(timingTypeKey)); + assertNotNull(queryBreakDownMap.get(timingTypeCountKey)); + // verify there is no min/max/avg for current breakdown type stats + assertFalse( + queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(MIN_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeCountKey) + || queryBreakDownMap.containsKey(MIN_PREFIX + timingTypeCountKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeCountKey) + ); + } + assertEquals(0, testQueryProfileBreakdown.getMaxSliceNodeTime()); + assertEquals(0, testQueryProfileBreakdown.getMinSliceNodeTime()); + assertEquals(0, testQueryProfileBreakdown.getAvgSliceNodeTime()); + directoryReader.close(); + directory.close(); + } + private Map getLeafBreakdownMap(long startTime, long timeTaken, long count) { Map leafBreakDownMap = new HashMap<>(); for (QueryTimingType timingType : QueryTimingType.values()) { diff --git a/server/src/test/java/org/opensearch/search/profile/query/ConcurrentQueryProfilerTests.java b/server/src/test/java/org/opensearch/search/profile/query/ConcurrentQueryProfilerTests.java new file mode 100644 index 0000000000000..736bbcdd9e8dd --- /dev/null +++ b/server/src/test/java/org/opensearch/search/profile/query/ConcurrentQueryProfilerTests.java @@ -0,0 +1,36 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.profile.query; + +import org.opensearch.search.profile.Timer; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.LinkedList; +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; + +public class ConcurrentQueryProfilerTests extends OpenSearchTestCase { + + public void testMergeRewriteTimeIntervals() { + ConcurrentQueryProfiler profiler = new ConcurrentQueryProfiler(new ConcurrentQueryProfileTree()); + List timers = new LinkedList<>(); + timers.add(new Timer(217134L, 1L, 1L, 0L, 553074511206907L)); + timers.add(new Timer(228954L, 1L, 1L, 0L, 553074509287335L)); + timers.add(new Timer(228954L, 1L, 1L, 0L, 553074509287336L)); + LinkedList mergedIntervals = profiler.mergeRewriteTimeIntervals(timers); + assertThat(mergedIntervals.size(), equalTo(2)); + long[] interval = mergedIntervals.get(0); + assertThat(interval[0], equalTo(553074509287335L)); + assertThat(interval[1], equalTo(553074509516290L)); + interval = mergedIntervals.get(1); + assertThat(interval[0], equalTo(553074511206907L)); + assertThat(interval[1], equalTo(553074511424041L)); + } +} diff --git a/server/src/test/java/org/opensearch/search/profile/query/QueryProfilerTests.java b/server/src/test/java/org/opensearch/search/profile/query/QueryProfilerTests.java index 64a440b85eb10..481a224f2ff0e 100644 --- a/server/src/test/java/org/opensearch/search/profile/query/QueryProfilerTests.java +++ b/server/src/test/java/org/opensearch/search/profile/query/QueryProfilerTests.java @@ -161,7 +161,9 @@ public void tearDown() throws Exception { } public void testBasic() throws IOException { - QueryProfiler profiler = new QueryProfiler(executor != null); + QueryProfiler profiler = executor != null + ? new ConcurrentQueryProfiler(new ConcurrentQueryProfileTree()) + : new QueryProfiler(new InternalQueryProfileTree()); searcher.setProfiler(profiler); Query query = new TermQuery(new Term("foo", "bar")); searcher.search(query, 1); @@ -228,7 +230,9 @@ public void testBasic() throws IOException { } public void testNoScoring() throws IOException { - QueryProfiler profiler = new QueryProfiler(executor != null); + QueryProfiler profiler = executor != null + ? new ConcurrentQueryProfiler(new ConcurrentQueryProfileTree()) + : new QueryProfiler(new InternalQueryProfileTree()); searcher.setProfiler(profiler); Query query = new TermQuery(new Term("foo", "bar")); searcher.search(query, 1, Sort.INDEXORDER); // scores are not needed @@ -295,7 +299,9 @@ public void testNoScoring() throws IOException { } public void testUseIndexStats() throws IOException { - QueryProfiler profiler = new QueryProfiler(executor != null); + QueryProfiler profiler = executor != null + ? new ConcurrentQueryProfiler(new ConcurrentQueryProfileTree()) + : new QueryProfiler(new InternalQueryProfileTree()); searcher.setProfiler(profiler); Query query = new TermQuery(new Term("foo", "bar")); searcher.count(query); // will use index stats @@ -309,7 +315,9 @@ public void testUseIndexStats() throws IOException { } public void testApproximations() throws IOException { - QueryProfiler profiler = new QueryProfiler(executor != null); + QueryProfiler profiler = executor != null + ? new ConcurrentQueryProfiler(new ConcurrentQueryProfileTree()) + : new QueryProfiler(new InternalQueryProfileTree()); searcher.setProfiler(profiler); Query query = new RandomApproximationQuery(new TermQuery(new Term("foo", "bar")), random()); searcher.count(query);