Skip to content

Commit

Permalink
Fix argument hierarchy in benchmark test workflow. (#4924)
Browse files Browse the repository at this point in the history
Signed-off-by: Rishabh Singh <[email protected]>
  • Loading branch information
rishabh6788 authored Aug 7, 2024
1 parent 5e70982 commit 8f4592d
Show file tree
Hide file tree
Showing 4 changed files with 50 additions and 45 deletions.
34 changes: 17 additions & 17 deletions src/test_workflow/benchmark_test/benchmark_args.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,11 +60,18 @@ class BenchmarkArgs:

def __init__(self) -> None:
parser = argparse.ArgumentParser(description="Test an OpenSearch Bundle or compare two tests")
subparsers = parser.add_subparsers(dest="command", required=True, help="Please provide either 'execute-test' to run benchmark or 'compare' command to compare benchmark runs.")
parent_parser = argparse.ArgumentParser(add_help=False)
parent_parser.add_argument("--suffix", dest="suffix",
help="Suffix to be added to stack name for performance test")

parent_parser.add_argument("--benchmark-config", dest="benchmark_config",
help="absolute filepath to custom benchmark.ini config")

subparsers = parser.add_subparsers(dest="command", required=True,
help="Please provide either 'execute-test' to run benchmark or 'compare' command to compare benchmark runs.")
# command to run a benchmark test
execute_test_parser = subparsers.add_parser("execute-test",
help="Execute benchmark test")
help="Execute benchmark test", parents=[parent_parser])
execute_test_parser.add_argument("--bundle-manifest", type=argparse.FileType("r"),
help="Bundle Manifest file.")
execute_test_parser.add_argument("--distribution-url", dest="distribution_url",
Expand All @@ -77,8 +84,6 @@ def __init__(self) -> None:
help="Username for the cluster")
execute_test_parser.add_argument("--password", dest="password",
help="Password for the cluster")
execute_test_parser.add_argument("--suffix", dest="suffix",
help="Suffix to be added to stack name for performance test")
execute_test_parser.add_argument("--component", dest="component", default="OpenSearch",
help="Component name that needs to be performance tested")
execute_test_parser.add_argument("--config", type=argparse.FileType("r"),
Expand Down Expand Up @@ -115,21 +120,19 @@ def __init__(self) -> None:
help="EC2 instance type for data node, defaults to r5.xlarge.")
execute_test_parser.add_argument("--workload", dest="workload", required=True,
help="Name of the workload that OpenSearch Benchmark should run")
execute_test_parser.add_argument("--benchmark-config", dest="benchmark_config",
help="absolute filepath to custom benchmark.ini config")
execute_test_parser.add_argument("--user-tag", dest="user_tag",
help="Attach arbitrary text to the meta-data of each metric record")
execute_test_parser.add_argument("--workload-params", dest="workload_params",
help="With this parameter you can inject variables into workloads. Parameters differs "
'for each workload type. e.g., --workload-params "number_of_replicas:1,number_of_shards:5"')
'for each workload type. e.g., --workload-params "number_of_replicas:1,number_of_shards:5"')
execute_test_parser.add_argument("--test-procedure", dest="test_procedure",
help="Defines a test procedure to use. You can find a list of test procedures by using "
'opensearch-benchmark list test-procedures. E.g. --test-procedure="ingest-only"')
'opensearch-benchmark list test-procedures. E.g. --test-procedure="ingest-only"')
execute_test_parser.add_argument("--exclude-tasks", dest="exclude_tasks",
help='Defines a comma-separated list of test procedure tasks not to run. E.g. --exclude-tasks="index-append"')
execute_test_parser.add_argument("--include-tasks", dest="include_tasks",
help="Defines a comma-separated list of test procedure tasks to run. By default, all tasks listed in a test procedure array are run."
' E.g. --include-tasks="scroll"',)
' E.g. --include-tasks="scroll"',)
execute_test_parser.add_argument("--capture-node-stat", dest="telemetry", action="append_const", const="node-stats",
help="Enable opensearch-benchmark to capture node stat metrics such as cpu, mem, jvm etc as well.")
execute_test_parser.add_argument("--capture-segment-replication-stat", dest="telemetry", action="append_const", const="segment-replication-stats",
Expand All @@ -142,7 +145,8 @@ def __init__(self) -> None:
help="User provided ml-node ebs block storage size defaults to 100Gb")

# command to run comparison
compare_parser = subparsers.add_parser("compare", help="Compare two tests using their test execution IDs")
compare_parser = subparsers.add_parser("compare", parents=[parent_parser],
help="Compare two tests using their test execution IDs")
compare_parser.add_argument("baseline", type=str, help="The baseline ID to compare", nargs='?')
compare_parser.add_argument("contender", type=str, help="The contender ID to compare", nargs='?')
compare_parser.add_argument("--results-format", default="markdown", type=str,
Expand All @@ -153,18 +157,17 @@ def __init__(self) -> None:
help="Determines whether to include the comparison in the results file")
compare_parser.add_argument("-v", "--verbose", action="store_const", default=logging.INFO, const=logging.DEBUG, dest="logging_level",
help="Show more verbose output.")
compare_parser.add_argument("--benchmark-config", dest="benchmark_config",
help="absolute filepath to custom benchmark.ini config")
compare_parser.add_argument("--suffix", dest="suffix", help="Suffix to be added to stack name for performance comparison")

args = parser.parse_args()
self.command = args.command
self.stack_suffix = args.suffix if args.suffix else None
self.benchmark_config = args.benchmark_config if args.benchmark_config else None

if args.command == "execute-test":
self.bundle_manifest = args.bundle_manifest if args.bundle_manifest else None
self.distribution_url = args.distribution_url if args.distribution_url else None
self.cluster_endpoint = args.cluster_endpoint if args.cluster_endpoint else None
self.distribution_version = args.distribution_version if args.distribution_version else None
self.stack_suffix = args.suffix if args.suffix else None
self.config = args.config
self.keep = args.keep
self.single_node = args.single_node
Expand All @@ -188,7 +191,6 @@ def __init__(self) -> None:
self.test_procedure = args.test_procedure if args.test_procedure else None
self.exclude_tasks = args.exclude_tasks if args.exclude_tasks else None
self.include_tasks = args.include_tasks if args.include_tasks else None
self.benchmark_config = args.benchmark_config if args.benchmark_config else None
self.user_tag = args.user_tag if args.user_tag else None
self.additional_config = json.dumps(args.additional_config) if args.additional_config is not None else None
self.use_50_percent_heap = args.use_50_percent_heap
Expand All @@ -209,9 +211,7 @@ def __init__(self) -> None:
self.results_format = args.results_format if hasattr(args, "results_format") else None
self.results_numbers_align = args.results_numbers_align if hasattr(args, "results_numbers_align") else None
self.show_in_results = args.show_in_results if hasattr(args, "show_in_results") else None
self.stack_suffix = args.suffix if args.suffix else None
self.logging_level = args.logging_level
self.benchmark_config = args.benchmark_config if args.benchmark_config else None

else:
logging.error("Invalid command: %s" % args.command)
18 changes: 9 additions & 9 deletions src/test_workflow/benchmark_test/benchmark_test_suite_compare.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,23 +27,23 @@ def execute(self) -> None:
self.cleanup()

def form_command(self) -> str:
command = f'docker run --name docker-container-{self.args.stack_suffix} ' \
"-v ~/.benchmark/benchmark.ini:/opensearch-benchmark/.benchmark/benchmark.ini " \
f"opensearchproject/opensearch-benchmark:1.6.0 " \
f"compare --baseline={self.args.baseline} --contender={self.args.contender} "
self.command = f'docker run --name docker-container-{self.args.stack_suffix} '
if self.args.benchmark_config:
self.command += f" -v {self.args.benchmark_config}:/opensearch-benchmark/.benchmark/benchmark.ini "
self.command += f"opensearchproject/opensearch-benchmark:1.6.0 " \
f"compare --baseline={self.args.baseline} --contender={self.args.contender} "

if self.args.results_format:
command += f"--results-format={self.args.results_format} "
self.command += f"--results-format={self.args.results_format} "

if self.args.results_numbers_align:
command += f"--results-numbers-align={self.args.results_numbers_align} "
self.command += f"--results-numbers-align={self.args.results_numbers_align} "

command += "--results-file=final_result.md "
self.command += "--results-file=final_result.md "

if self.args.show_in_results:
command += f"--show-in-results={self.args.show_in_results} "
self.command += f"--show-in-results={self.args.show_in_results} "

self.command = command
return self.command

def copy_comparison_results_to_local(self) -> None:
Expand Down
3 changes: 1 addition & 2 deletions tests/test_run_compare.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
from test_workflow.benchmark_test.benchmark_test_suite_runners import BenchmarkTestSuiteRunners


class TestRunBenchmarkTest(unittest.TestCase):
class TestRunBenchmarkTestCompare(unittest.TestCase):
@pytest.fixture(autouse=True)
def _capfd(self, capfd: Any) -> None:
self.capfd = capfd
Expand Down Expand Up @@ -111,7 +111,6 @@ def test_form_compare_command(self, *mocks: Any) -> None:
# define the expected command
expected_command = (
f"docker run --name docker-container-{args.stack_suffix} "
"-v ~/.benchmark/benchmark.ini:/opensearch-benchmark/.benchmark/benchmark.ini "
"opensearchproject/opensearch-benchmark:1.6.0 "
"compare --baseline=12345 --contender=54321 "
"--results-format=markdown "
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,31 +24,37 @@ def setUp(self) -> None:
self.args.results_numbers_align = "right"
self.args.show_in_results = "all"

def test_form_command(self) -> None:
expected_command = (
f'docker run --name docker-container-{self.args.stack_suffix} '
'-v ~/.benchmark/benchmark.ini:/opensearch-benchmark/.benchmark/benchmark.ini '
'opensearchproject/opensearch-benchmark:1.6.0 '
f'compare --baseline={self.args.baseline} --contender={self.args.contender} '
f'--results-format={self.args.results_format} '
f'--results-numbers-align={self.args.results_numbers_align} '
'--results-file=final_result.md '
f'--show-in-results={self.args.show_in_results} '
)
@patch('subprocess.check_call')
@patch('test_workflow.benchmark_test.benchmark_test_suite_compare.BenchmarkTestSuiteCompare.copy_comparison_results_to_local')
@patch('test_workflow.benchmark_test.benchmark_test_suite_compare.BenchmarkTestSuiteCompare.cleanup')
def test_execute(self, mock_cleanup: Mock, mock_copy_results: Mock, mock_check_call: Mock) -> None:
self.args.benchmark_config = None
compare = BenchmarkTestSuiteCompare(self.args)
compare.execute()
mock_check_call.assert_called_once()

with patch('test_workflow.benchmark_test.benchmark_test_suite_compare.BenchmarkTestSuiteCompare.form_command') as mock_form_command:
mock_form_command.return_value = expected_command
compare = BenchmarkTestSuiteCompare(self.args)
command = compare.form_command()
self.assertEqual(command, expected_command)
self.assertEqual(compare.command, 'docker run --name docker-container-test-suffix '
'opensearchproject/opensearch-benchmark:1.6.0 compare '
'--baseline=baseline-id --contender=contender-id --results-format=markdown '
'--results-numbers-align=right --results-file=final_result.md '
'--show-in-results=all ')
mock_copy_results.assert_called_once()
mock_cleanup.assert_called_once()

@patch('subprocess.check_call')
@patch('test_workflow.benchmark_test.benchmark_test_suite_compare.BenchmarkTestSuiteCompare.copy_comparison_results_to_local')
@patch('test_workflow.benchmark_test.benchmark_test_suite_compare.BenchmarkTestSuiteCompare.cleanup')
def test_execute(self, mock_cleanup: Mock, mock_copy_results: Mock, mock_check_call: Mock) -> None:
def test_execute_with_benchmark_config(self, mock_cleanup: Mock, mock_copy_results: Mock, mock_check_call: Mock) -> None:
self.args.benchmark_config = '/some/path/benchmark.ini'
compare = BenchmarkTestSuiteCompare(self.args)
compare.execute()
mock_check_call.assert_called_once()
self.assertEqual(compare.command, 'docker run --name docker-container-test-suffix -v '
'/some/path/benchmark.ini:/opensearch-benchmark/.benchmark/benchmark.ini '
'opensearchproject/opensearch-benchmark:1.6.0 compare '
'--baseline=baseline-id --contender=contender-id --results-format=markdown '
'--results-numbers-align=right --results-file=final_result.md '
'--show-in-results=all ')
mock_copy_results.assert_called_once()
mock_cleanup.assert_called_once()

Expand Down

0 comments on commit 8f4592d

Please sign in to comment.