diff --git a/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java b/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java index 59ef01f9f947a..8a3510e99ac0c 100644 --- a/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java +++ b/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java @@ -65,6 +65,25 @@ public AbstractProfileBreakdown context(Object context) { @Override public Map toBreakdownMap() { final Map topLevelBreakdownMapWithWeightTime = super.toBreakdownMap(); + int totalLeafCount = 0; + for (List leaves : sliceCollectorsToLeaves.values()) { + totalLeafCount += leaves.size(); + } + if (contexts.size() != totalLeafCount) { + System.out.println("$$$$$"); + // If the leaf counts in contexts map don't match those in sliceCollectorsToLeaves, this means that the current + // node is outside the concurrent search path, which occurred before the contextIndexSearcher::searchLeaf. + // For example, in the post filter query, we may create a profile node during filtered collector context creation. + // In this case, as there is no concurrent implementation, we can simply return the accumulated breakdown result + // instead of the statistical result. + final Map map = new HashMap<>(topLevelBreakdownMapWithWeightTime); + for (final AbstractProfileBreakdown context : contexts.values()) { + for (final Map.Entry entry : context.toBreakdownMap().entrySet()) { + map.merge(entry.getKey(), entry.getValue(), Long::sum); + } + } + return map; + } final long createWeightStartTime = topLevelBreakdownMapWithWeightTime.get( QueryTimingType.CREATE_WEIGHT + TIMING_TYPE_START_TIME_SUFFIX ); @@ -83,7 +102,7 @@ public Map toBreakdownMap() { // creates a new weight and breakdown map for each rewritten query. This new breakdown map captures the timing information for // the new rewritten query. The sliceCollectorsToLeaves is empty because this breakdown for rewritten query gets created later // in search leaf path which doesn't have collector. Also, this is not needed since this breakdown is per leaf and there is no - // concurrency involved. An empty sliceCollectorsToLeaves could also happen in the case of early termination. + // concurrency involved. AbstractProfileBreakdown breakdown = contexts.values().iterator().next(); queryNodeTime = breakdown.toNodeTime() + createWeightTime; maxSliceNodeTime = 0L;