From af6e8125f7cb71df6a0e4ebe4bf0917f964a0e52 Mon Sep 17 00:00:00 2001 From: Michael Oviedo Date: Tue, 29 Oct 2024 21:45:46 +0000 Subject: [PATCH] change min/max to overall_min/overall_max + update comparison results publisher Signed-off-by: Michael Oviedo --- osbenchmark/aggregator.py | 19 +++++++++++++------ osbenchmark/results_publisher.py | 8 ++++---- 2 files changed, 17 insertions(+), 10 deletions(-) diff --git a/osbenchmark/aggregator.py b/osbenchmark/aggregator.py index f5c57eae..c94527c5 100644 --- a/osbenchmark/aggregator.py +++ b/osbenchmark/aggregator.py @@ -202,15 +202,22 @@ def calculate_weighted_average(self, task_metrics: Dict[str, List[Any]], iterati weighted_metrics[metric][item_key] = values[0][item_key] else: item_values = [value.get(item_key, 0) for value in values] - weighted_sum = sum(value * iterations for value in item_values) - total_iterations = iterations * len(item_values) - weighted_avg = weighted_sum / total_iterations - weighted_metrics[metric][item_key] = weighted_avg + if item_key == 'min': + weighted_metrics[metric]['overall_min'] = min(item_values) + elif item_key == 'max': + weighted_metrics[metric]['overall_max'] = max(item_values) + elif item_key == 'median': + weighted_sum = sum(value * iterations for value in item_values) + total_iterations = iterations * len(item_values) + weighted_metrics[metric][item_key] = weighted_sum / total_iterations + else: + weighted_sum = sum(value * iterations for value in item_values) + total_iterations = iterations * len(item_values) + weighted_metrics[metric][item_key] = weighted_sum / total_iterations else: weighted_sum = sum(value * iterations for value in values) total_iterations = iterations * len(values) - weighted_avg = weighted_sum / total_iterations - weighted_metrics[metric] = weighted_avg + weighted_metrics[metric] = weighted_sum / total_iterations return weighted_metrics diff --git a/osbenchmark/results_publisher.py b/osbenchmark/results_publisher.py index 4b3ebebd..e21b710b 100644 --- a/osbenchmark/results_publisher.py +++ b/osbenchmark/results_publisher.py @@ -464,16 +464,16 @@ def _write_results(self, metrics_table, metrics_table_console): data_plain=metrics_table, data_rich=metrics_table_console) def _publish_throughput(self, baseline_stats, contender_stats, task): - b_min = baseline_stats.metrics(task)["throughput"]["min"] + b_min = baseline_stats.metrics(task)["throughput"].get("overall_min") or baseline_stats.metrics(task)["throughput"]["min"] b_mean = baseline_stats.metrics(task)["throughput"]["mean"] b_median = baseline_stats.metrics(task)["throughput"]["median"] - b_max = baseline_stats.metrics(task)["throughput"]["max"] + b_max = baseline_stats.metrics(task)["throughput"].get("overall_max") or baseline_stats.metrics(task)["throughput"]["max"] b_unit = baseline_stats.metrics(task)["throughput"]["unit"] - c_min = contender_stats.metrics(task)["throughput"]["min"] + c_min = contender_stats.metrics(task)["throughput"].get("overall_min") or contender_stats.metrics(task)["throughput"]["min"] c_mean = contender_stats.metrics(task)["throughput"]["mean"] c_median = contender_stats.metrics(task)["throughput"]["median"] - c_max = contender_stats.metrics(task)["throughput"]["max"] + c_max = contender_stats.metrics(task)["throughput"].get("overall_max") or contender_stats.metrics(task)["throughput"]["max"] return self._join( self._line("Min Throughput", b_min, c_min, task, b_unit, treat_increase_as_improvement=True),