Skip to content

Commit

Permalink
Allow users to specify percentiles for compare subcommand (opensearch…
Browse files Browse the repository at this point in the history
…-project#503)

Signed-off-by: Ian Hoang <[email protected]>
  • Loading branch information
Ian Hoang committed Apr 4, 2024
1 parent 5f19515 commit bee133d
Show file tree
Hide file tree
Showing 2 changed files with 9 additions and 2 deletions.
6 changes: 6 additions & 0 deletions osbenchmark/benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -197,6 +197,11 @@ def add_workload_source(subparser):
"--contender",
required=True,
help=f"TestExecution ID of the contender (see {PROGRAM_NAME} list test_executions).")
compare_parser.add_argument(
"--percentiles",
help=f"A comma-separated list of percentiles to report for latency and service time"
f"(default: {metrics.GlobalStatsCalculator.DEFAULT_LATENCY_PERCENTILES}).",
default=metrics.GlobalStatsCalculator.DEFAULT_LATENCY_PERCENTILES)
compare_parser.add_argument(
"--results-format",
help="Define the output format for the command line results (default: markdown).",
Expand Down Expand Up @@ -820,6 +825,7 @@ def configure_results_publishing_params(args, cfg):
cfg.add(config.Scope.applicationOverride, "results_publishing", "values", args.show_in_results)
cfg.add(config.Scope.applicationOverride, "results_publishing", "output.path", args.results_file)
cfg.add(config.Scope.applicationOverride, "results_publishing", "numbers.align", args.results_numbers_align)
cfg.add(config.Scope.applicationOverride, "results_publishing", "percentiles", args.percentiles)


def print_test_execution_id(args):
Expand Down
5 changes: 3 additions & 2 deletions osbenchmark/results_publisher.py
Original file line number Diff line number Diff line change
Expand Up @@ -337,14 +337,15 @@ def _line(self, k, task, v, unit, converter=lambda x: x, force=False):

class ComparisonResultsPublisher:
def __init__(self, config):
self.logger = logging.getLogger(__name__)
self.results_file = config.opts("results_publishing", "output.path")
self.results_format = config.opts("results_publishing", "format")
self.numbers_align = config.opts("results_publishing", "numbers.align",
mandatory=False, default_value="right")
self.cwd = config.opts("node", "benchmark.cwd")
self.show_processing_time = convert.to_bool(config.opts("results_publishing", "output.processingtime",
mandatory=False, default_value=False))
self.latency_percentiles = comma_separated_string_to_number_list(config.opts("workload", "latency.percentiles", mandatory=False))
self.percentiles = comma_separated_string_to_number_list(config.opts("results_publishing", "percentiles", mandatory=False))
self.plain = False

def publish(self, r1, r2):
Expand Down Expand Up @@ -442,7 +443,7 @@ def _publish_processing_time(self, baseline_stats, contender_stats, task):

def _publish_percentiles(self, name, task, baseline_values, contender_values):
lines = []
for percentile in metrics.percentiles_for_sample_size(sys.maxsize, percentiles_list=self.latency_percentiles):
for percentile in metrics.percentiles_for_sample_size(sys.maxsize, percentiles_list=self.percentiles):
baseline_value = baseline_values.get(metrics.encode_float_key(percentile))
contender_value = contender_values.get(metrics.encode_float_key(percentile))
self._append_non_empty(lines, self._line("%sth percentile %s" % (percentile, name),
Expand Down

0 comments on commit bee133d

Please sign in to comment.