From 1c1cec3aa52bfb7c33528fe45cc78d5f36683bda Mon Sep 17 00:00:00 2001 From: Ticheng Lin Date: Tue, 7 Nov 2023 21:36:08 -0800 Subject: [PATCH] Fix slice collectors to leaves association with profile enabled (#11134) Signed-off-by: Ticheng Lin --- .../query/ConcurrentQueryProfileBreakdown.java | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java b/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java index 59ef01f9f947a..7be04998dcea0 100644 --- a/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java +++ b/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java @@ -83,12 +83,26 @@ public Map toBreakdownMap() { // creates a new weight and breakdown map for each rewritten query. This new breakdown map captures the timing information for // the new rewritten query. The sliceCollectorsToLeaves is empty because this breakdown for rewritten query gets created later // in search leaf path which doesn't have collector. Also, this is not needed since this breakdown is per leaf and there is no - // concurrency involved. An empty sliceCollectorsToLeaves could also happen in the case of early termination. + // concurrency involved. AbstractProfileBreakdown breakdown = contexts.values().iterator().next(); queryNodeTime = breakdown.toNodeTime() + createWeightTime; maxSliceNodeTime = 0L; minSliceNodeTime = 0L; avgSliceNodeTime = 0L; + if (contexts.size() > 1) { + // If the leaf counts in contexts map don't match those in sliceCollectorsToLeaves, this means that the current + // node is outside the concurrent search path, which occurred before the contextIndexSearcher::searchLeaf. + // For example, in the post filter query, we may create a profile node during filtered collector context creation. + // In this case, as there is no concurrent implementation, we can simply return the accumulated breakdown result + // instead of the statistical result. + final Map queryBreakdownMap = new HashMap<>(topLevelBreakdownMapWithWeightTime); + for (final AbstractProfileBreakdown context : contexts.values()) { + for (final Map.Entry entry : context.toBreakdownMap().entrySet()) { + queryBreakdownMap.merge(entry.getKey(), entry.getValue(), Long::sum); + } + } + return queryBreakdownMap; + } Map queryBreakdownMap = new HashMap<>(breakdown.toBreakdownMap()); queryBreakdownMap.put(QueryTimingType.CREATE_WEIGHT.toString(), createWeightTime); queryBreakdownMap.put(QueryTimingType.CREATE_WEIGHT + TIMING_TYPE_COUNT_SUFFIX, 1L);