diff --git a/pyproject.toml b/pyproject.toml index cd92d96dc4f..51a824874c4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -152,7 +152,6 @@ markers = [ "requires_eclipse", "requires_lsf", "requires_window_manager", - "scheduler", "script", "slow", "unstable", diff --git a/tests/conftest.py b/tests/conftest.py index df9d1f67f56..9eaeaaf2b48 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -288,21 +288,15 @@ def excepthook(cls, exc, tb): pytest.param(True, id="using_scheduler"), ] ) -def try_queue_and_scheduler(request, monkeypatch): +def using_scheduler(request, monkeypatch): should_enable_scheduler = request.param - scheduler_mark = request.node.get_closest_marker("scheduler") - assert scheduler_mark - if scheduler_mark.kwargs.get("skip") and should_enable_scheduler: - pytest.skip("Skipping running test with scheduler enabled") if should_enable_scheduler: # Flaky - the new scheduler needs an event loop, which might not be initialized yet. # This might be a bug in python 3.8, but it does not occur locally. _ = get_event_loop() monkeypatch.setenv("ERT_FEATURE_SCHEDULER", "1" if should_enable_scheduler else "0") - monkeypatch.setattr( - FeatureToggling._conf["scheduler"], "_value", should_enable_scheduler - ) + yield should_enable_scheduler def pytest_collection_modifyitems(config, items): @@ -361,10 +355,8 @@ def _run_snake_oil(source_root): "snake_oil.ert", ], ) - FeatureToggling.update_from_args(parsed) run_cli(parsed) - FeatureToggling.reset() @pytest.fixture diff --git a/tests/integration_tests/analysis/test_adaptive_localization.py b/tests/integration_tests/analysis/test_adaptive_localization.py index 8d0268b6211..31204831900 100644 --- a/tests/integration_tests/analysis/test_adaptive_localization.py +++ b/tests/integration_tests/analysis/test_adaptive_localization.py @@ -36,9 +36,8 @@ def run_cli_ES_with_case(poly_config): return prior_sample, posterior_sample -@pytest.mark.scheduler @pytest.mark.integration_test -@pytest.mark.usefixtures("copy_poly_case", "try_queue_and_scheduler") +@pytest.mark.usefixtures("copy_poly_case", "using_scheduler") def test_that_adaptive_localization_with_cutoff_1_equals_ensemble_prior(): set_adaptive_localization_1 = dedent( """ @@ -60,9 +59,8 @@ def test_that_adaptive_localization_with_cutoff_1_equals_ensemble_prior(): assert np.allclose(posterior_sample, prior_sample) -@pytest.mark.scheduler @pytest.mark.integration_test -@pytest.mark.usefixtures("copy_poly_case", "try_queue_and_scheduler") +@pytest.mark.usefixtures("copy_poly_case", "using_scheduler") def test_that_adaptive_localization_with_cutoff_0_equals_ESupdate(): """ Note that "RANDOM_SEED" in both ert configs needs to be the same to obtain @@ -98,9 +96,8 @@ def test_that_adaptive_localization_with_cutoff_0_equals_ESupdate(): assert np.allclose(posterior_sample_loc0, posterior_sample_noloc) -@pytest.mark.scheduler @pytest.mark.integration_test -@pytest.mark.usefixtures("copy_poly_case", "try_queue_and_scheduler") +@pytest.mark.usefixtures("copy_poly_case", "using_scheduler") def test_that_posterior_generalized_variance_increases_in_cutoff(): rng = np.random.default_rng(42) cutoff1 = rng.uniform(0, 1) diff --git a/tests/integration_tests/analysis/test_es_update.py b/tests/integration_tests/analysis/test_es_update.py index c08815a0500..e8fd1c2469b 100644 --- a/tests/integration_tests/analysis/test_es_update.py +++ b/tests/integration_tests/analysis/test_es_update.py @@ -61,9 +61,8 @@ def obs(): ) -@pytest.mark.scheduler @pytest.mark.integration_test -@pytest.mark.usefixtures("copy_poly_case", "try_queue_and_scheduler") +@pytest.mark.usefixtures("copy_poly_case", "using_scheduler") def test_that_posterior_has_lower_variance_than_prior(): run_cli( ENSEMBLE_SMOOTHER_MODE, @@ -91,9 +90,8 @@ def test_that_posterior_has_lower_variance_than_prior(): ) -@pytest.mark.scheduler @pytest.mark.integration_test -@pytest.mark.usefixtures("copy_snake_oil_field", "try_queue_and_scheduler") +@pytest.mark.usefixtures("copy_snake_oil_field", "using_scheduler") def test_that_surfaces_retain_their_order_when_loaded_and_saved_by_ert(): """This is a regression test to make sure ert does not use the wrong order (row-major / column-major) when working with surfaces. @@ -171,9 +169,8 @@ def sample_prior(nx, ny): ) -@pytest.mark.scheduler @pytest.mark.integration_test -@pytest.mark.usefixtures("copy_snake_oil_field", "try_queue_and_scheduler") +@pytest.mark.usefixtures("copy_snake_oil_field", "using_scheduler") def test_update_multiple_param(): """ Note that this is now a snapshot test, so there is no guarantee that the diff --git a/tests/integration_tests/job_queue/test_lsf_driver.py b/tests/integration_tests/job_queue/test_lsf_driver.py index 2e5855192bc..05b66c459df 100644 --- a/tests/integration_tests/job_queue/test_lsf_driver.py +++ b/tests/integration_tests/job_queue/test_lsf_driver.py @@ -174,13 +174,10 @@ def copy_lsf_poly_case(copy_poly_case, tmp_path): "mock_bsub", "mock_bjobs", "mock_start_server", - "try_queue_and_scheduler", - "monkeypatch", ) -@pytest.mark.scheduler() @pytest.mark.integration_test -def test_run_mocked_lsf_queue(request): - if "fail" in request.node.name and "scheduler" in request.node.name: +def test_run_mocked_lsf_queue(request, using_scheduler): + if "fail" in request.node.name and using_scheduler: pytest.skip( "Python LSF driver does not support general resubmission on bsub errors" ) diff --git a/tests/integration_tests/scheduler/test_integration_local_driver.py b/tests/integration_tests/scheduler/test_integration_local_driver.py index 0627b781ab7..563b5bc2d3f 100644 --- a/tests/integration_tests/scheduler/test_integration_local_driver.py +++ b/tests/integration_tests/scheduler/test_integration_local_driver.py @@ -37,9 +37,9 @@ def create_ert_config(path: Path): ) -@pytest.mark.scheduler +@pytest.mark.usefixtures("using_scheduler") @pytest.mark.integration_test -async def test_subprocesses_live_on_after_ert_dies(tmp_path, try_queue_and_scheduler): +async def test_subprocesses_live_on_after_ert_dies(tmp_path): # Have ERT run a forward model that writes in PID to a file, then sleeps # Forcefully terminate ERT and assert that the child process is not terminated create_ert_config(tmp_path) diff --git a/tests/integration_tests/shared/share/test_shell.py b/tests/integration_tests/shared/share/test_shell.py index 4768266d470..0c04d0f1369 100644 --- a/tests/integration_tests/shared/share/test_shell.py +++ b/tests/integration_tests/shared/share/test_shell.py @@ -6,9 +6,9 @@ from tests.integration_tests.run_cli import run_cli -@pytest.mark.scheduler +@pytest.mark.usefixtures("using_scheduler") @pytest.mark.integration_test -def test_shell_scripts_integration(tmpdir, try_queue_and_scheduler, monkeypatch): +def test_shell_scripts_integration(tmpdir): """ The following test is a regression test that checks that the scripts under src/ert/shared/share/ert/shell_scripts diff --git a/tests/integration_tests/status/test_tracking_integration.py b/tests/integration_tests/status/test_tracking_integration.py index 98a43e07364..e02b49a9fc0 100644 --- a/tests/integration_tests/status/test_tracking_integration.py +++ b/tests/integration_tests/status/test_tracking_integration.py @@ -29,7 +29,6 @@ FORWARD_MODEL_STATE_START, REALIZATION_STATE_FINISHED, ) -from ert.shared.feature_toggling import FeatureToggling def check_expression(original, path_expression, expected, msg_start): @@ -45,9 +44,8 @@ def check_expression(original, path_expression, expected, msg_start): assert match_found, f"{msg_start} Nothing matched {path_expression}" -@pytest.mark.scheduler @pytest.mark.integration_test -@pytest.mark.usefixtures("copy_poly_case", "try_queue_and_scheduler") +@pytest.mark.usefixtures("copy_poly_case", "using_scheduler") @pytest.mark.parametrize( ( "extra_config, extra_poly_eval, cmd_line_arguments," @@ -169,7 +167,6 @@ def test_tracking( parser, cmd_line_arguments, ) - FeatureToggling.update_from_args(parsed) ert_config = ErtConfig.from_file(parsed.config) os.chdir(ert_config.config_path) @@ -239,12 +236,9 @@ def test_tracking( ) thread.join() - FeatureToggling.reset() - -@pytest.mark.scheduler @pytest.mark.integration_test -@pytest.mark.usefixtures("copy_poly_case", "try_queue_and_scheduler") +@pytest.mark.usefixtures("copy_poly_case", "using_scheduler") @pytest.mark.parametrize( ("mode, cmd_line_arguments"), [ @@ -277,7 +271,6 @@ def test_setting_env_context_during_run( parser, cmd_line_arguments, ) - FeatureToggling.update_from_args(parsed) ert_config = ErtConfig.from_file(parsed.config) os.chdir(ert_config.config_path) @@ -323,8 +316,6 @@ def test_setting_env_context_during_run( for key in expected: assert key not in os.environ - FeatureToggling.reset() - def run_sim(start_date): """ @@ -337,9 +328,8 @@ def run_sim(start_date): summary.fwrite() -@pytest.mark.scheduler() @pytest.mark.integration_test -@pytest.mark.usefixtures("try_queue_and_scheduler") +@pytest.mark.usefixtures("using_scheduler") def test_tracking_missing_ecl(tmpdir, caplog, storage): with tmpdir.as_cwd(): config = dedent( @@ -365,7 +355,6 @@ def test_tracking_missing_ecl(tmpdir, caplog, storage): "config.ert", ], ) - FeatureToggling.update_from_args(parsed) ert_config = ErtConfig.from_file(parsed.config) os.chdir(ert_config.config_path) @@ -422,4 +411,3 @@ def test_tracking_missing_ecl(tmpdir, caplog, storage): ) in failures[0].failed_msg thread.join() - FeatureToggling.reset() diff --git a/tests/integration_tests/test_cli.py b/tests/integration_tests/test_cli.py index 4056e1d5290..880345b5d0a 100644 --- a/tests/integration_tests/test_cli.py +++ b/tests/integration_tests/test_cli.py @@ -92,8 +92,7 @@ def test_field_init_file_not_readable(monkeypatch): assert "Permission denied:" in str(err) -@pytest.mark.scheduler -@pytest.mark.usefixtures("copy_snake_oil_field", "try_queue_and_scheduler") +@pytest.mark.usefixtures("copy_snake_oil_field", "using_scheduler") def test_surface_init_fails_during_forward_model_callback(): rng = np.random.default_rng() @@ -157,7 +156,7 @@ def test_unopenable_observation_config_fails_gracefully(): pytest.param(ES_MDA_MODE), ], ) -@pytest.mark.usefixtures("copy_poly_case") +@pytest.mark.usefixtures("copy_poly_case", "using_scheduler") def test_that_the_model_raises_exception_if_active_less_than_minimum_realizations(mode): """ Verify that the run model checks that active realizations 20 is less than 100 @@ -186,8 +185,7 @@ def test_that_the_model_raises_exception_if_active_less_than_minimum_realization ) -@pytest.mark.usefixtures("copy_poly_case", "try_queue_and_scheduler") -@pytest.mark.scheduler +@pytest.mark.usefixtures("copy_poly_case", "using_scheduler") def test_that_the_model_warns_when_active_realizations_less_min_realizations(): """ Verify that the run model checks that active realizations is equal or higher than @@ -263,16 +261,14 @@ def setenv_config(tmp_path): } -@pytest.mark.usefixtures("set_site_config", "try_queue_and_scheduler") -@pytest.mark.scheduler +@pytest.mark.usefixtures("set_site_config", "using_scheduler") def test_that_setenv_config_is_parsed_correctly(setenv_config): config = ErtConfig.from_file(str(setenv_config)) # then res config should read the SETENV as is assert config.env_vars == expected_vars -@pytest.mark.usefixtures("set_site_config", "try_queue_and_scheduler") -@pytest.mark.scheduler +@pytest.mark.usefixtures("set_site_config", "using_scheduler") def test_that_setenv_sets_environment_variables_in_jobs(setenv_config): # When running the jobs run_cli( @@ -303,7 +299,7 @@ def test_that_setenv_sets_environment_variables_in_jobs(setenv_config): assert lines[3].strip() == "fourth:foo" -@pytest.mark.usefixtures("copy_poly_case", "try_queue_and_scheduler") +@pytest.mark.usefixtures("copy_poly_case", "using_scheduler") @pytest.mark.parametrize( ("job_src", "script_name", "script_src", "expect_stopped"), [ @@ -450,7 +446,6 @@ def run(self): ), ], ) -@pytest.mark.scheduler def test_that_stop_on_fail_workflow_jobs_stop_ert( job_src, script_name, @@ -497,9 +492,8 @@ def fixture_mock_cli_run(monkeypatch): yield mocked_monitor, mocked_thread_join, mocked_thread_start -@pytest.mark.scheduler @pytest.mark.integration_test -@pytest.mark.usefixtures("copy_poly_case", "try_queue_and_scheduler") +@pytest.mark.usefixtures("copy_poly_case", "using_scheduler") def test_ensemble_evaluator(): run_cli( ENSEMBLE_SMOOTHER_MODE, @@ -511,8 +505,7 @@ def test_ensemble_evaluator(): ) -@pytest.mark.scheduler -@pytest.mark.usefixtures("copy_poly_case", "try_queue_and_scheduler") +@pytest.mark.usefixtures("copy_poly_case", "using_scheduler") @pytest.mark.integration_test def test_es_mda(snapshot): with fileinput.input("poly.ert", inplace=True) as fin: @@ -572,9 +565,8 @@ def remove_linestartswith(file_name: str, startswith: str): run_cli(mode, "--target-case", target, "poly.ert") -@pytest.mark.scheduler @pytest.mark.integration_test -@pytest.mark.usefixtures("copy_poly_case", "try_queue_and_scheduler") +@pytest.mark.usefixtures("copy_poly_case", "using_scheduler") def test_ensemble_evaluator_disable_monitoring(): run_cli( ENSEMBLE_SMOOTHER_MODE, @@ -587,9 +579,8 @@ def test_ensemble_evaluator_disable_monitoring(): ) -@pytest.mark.scheduler @pytest.mark.integration_test -@pytest.mark.usefixtures("copy_poly_case", "try_queue_and_scheduler") +@pytest.mark.usefixtures("copy_poly_case", "using_scheduler") def test_cli_test_run(mock_cli_run): run_cli(TEST_RUN_MODE, "poly.ert") @@ -599,9 +590,8 @@ def test_cli_test_run(mock_cli_run): thread_start_mock.assert_has_calls([[call(), call()]]) -@pytest.mark.scheduler @pytest.mark.integration_test -@pytest.mark.usefixtures("copy_poly_case", "try_queue_and_scheduler") +@pytest.mark.usefixtures("copy_poly_case", "using_scheduler") def test_ies(): run_cli( ITERATIVE_ENSEMBLE_SMOOTHER_MODE, @@ -613,9 +603,8 @@ def test_ies(): ) -@pytest.mark.scheduler @pytest.mark.integration_test -@pytest.mark.usefixtures("copy_poly_case") +@pytest.mark.usefixtures("copy_poly_case", "using_scheduler") def test_that_running_ies_with_different_steplength_produces_different_result(): """This is a regression test to make sure that different step-lengths give different results when running SIES. @@ -681,9 +670,8 @@ def _run(target): assert not np.isclose(result_1.loc["iter-1"], result_2.loc["iter-1"]).all() -@pytest.mark.scheduler @pytest.mark.integration_test -@pytest.mark.usefixtures("copy_poly_case", "try_queue_and_scheduler") +@pytest.mark.usefixtures("copy_poly_case", "using_scheduler") @pytest.mark.parametrize( "prior_mask,reals_rerun_option,should_resample", [ @@ -743,9 +731,8 @@ def test_that_prior_is_not_overwritten_in_ensemble_experiment( np.testing.assert_array_equal(parameter_values, prior_values) -@pytest.mark.scheduler() @pytest.mark.integration_test -@pytest.mark.usefixtures("copy_poly_case", "try_queue_and_scheduler") +@pytest.mark.usefixtures("copy_poly_case", "using_scheduler") def test_failing_job_cli_error_message(): # modify poly_eval.py with open("poly_eval.py", mode="a", encoding="utf-8") as poly_script: diff --git a/tests/unit_tests/ensemble_evaluator/test_async_queue_execution.py b/tests/unit_tests/ensemble_evaluator/test_async_queue_execution.py index 3b97b49abdb..81894d2d6e9 100644 --- a/tests/unit_tests/ensemble_evaluator/test_async_queue_execution.py +++ b/tests/unit_tests/ensemble_evaluator/test_async_queue_execution.py @@ -9,7 +9,6 @@ from ert.ensemble_evaluator._wait_for_evaluator import wait_for_evaluator from ert.job_queue import JobQueue from ert.scheduler import Scheduler, create_driver -from ert.shared.feature_toggling import FeatureToggling async def mock_ws(host, port, done): @@ -34,16 +33,14 @@ async def _handler(websocket, path): @pytest.mark.asyncio @pytest.mark.timeout(60) -@pytest.mark.scheduler async def test_happy_path( tmpdir, unused_tcp_port, event_loop, make_ensemble_builder, queue_config, - caplog, monkeypatch, - try_queue_and_scheduler, + using_scheduler, ): asyncio.set_event_loop(event_loop) host = "localhost" @@ -55,7 +52,7 @@ async def test_happy_path( ensemble = make_ensemble_builder(monkeypatch, tmpdir, 1, 1).build() - if FeatureToggling.is_enabled("scheduler"): + if using_scheduler: queue = Scheduler( create_driver(queue_config), ensemble.reals, ee_uri=url, ens_id="ee_0" ) @@ -72,10 +69,7 @@ async def test_happy_path( assert mock_ws_task.done() - if FeatureToggling.is_enabled("scheduler"): - first_expected_queue_event_type = "SUBMITTED" - else: - first_expected_queue_event_type = "WAITING" + first_expected_queue_event_type = "SUBMITTED" if using_scheduler else "WAITING" for received_event, expected_type, expected_queue_event_type in zip( [mock_ws_task.result()[0], mock_ws_task.result()[-1]], diff --git a/tests/unit_tests/ensemble_evaluator/test_ensemble_builder.py b/tests/unit_tests/ensemble_evaluator/test_ensemble_builder.py index ccd1515aab7..9ae11d90312 100644 --- a/tests/unit_tests/ensemble_evaluator/test_ensemble_builder.py +++ b/tests/unit_tests/ensemble_evaluator/test_ensemble_builder.py @@ -10,8 +10,8 @@ @pytest.mark.parametrize("active_real", [True, False]) -@pytest.mark.scheduler -def test_build_ensemble(active_real, monkeypatch, try_queue_and_scheduler): +@pytest.mark.usefixtures("using_scheduler") +def test_build_ensemble(active_real): ensemble = ( EnsembleBuilder() .set_legacy_dependencies(QueueConfig(queue_system=QueueSystem.LOCAL), False, 0) diff --git a/tests/unit_tests/ensemble_evaluator/test_ensemble_legacy.py b/tests/unit_tests/ensemble_evaluator/test_ensemble_legacy.py index 387a3559fb7..1ecae53d855 100644 --- a/tests/unit_tests/ensemble_evaluator/test_ensemble_legacy.py +++ b/tests/unit_tests/ensemble_evaluator/test_ensemble_legacy.py @@ -16,7 +16,7 @@ @pytest.mark.timeout(60) -@pytest.mark.scheduler +@pytest.mark.usefixtures("using_scheduler") def test_run_legacy_ensemble(tmpdir, make_ensemble_builder, monkeypatch): num_reals = 2 custom_port_range = range(1024, 65535) @@ -49,10 +49,8 @@ def test_run_legacy_ensemble(tmpdir, make_ensemble_builder, monkeypatch): @pytest.mark.timeout(60) -@pytest.mark.scheduler -def test_run_and_cancel_legacy_ensemble( - tmpdir, make_ensemble_builder, monkeypatch, try_queue_and_scheduler -): +@pytest.mark.usefixtures("using_scheduler") +def test_run_and_cancel_legacy_ensemble(tmpdir, make_ensemble_builder, monkeypatch): num_reals = 2 custom_port_range = range(1024, 65535) with tmpdir.as_cwd(): diff --git a/tests/unit_tests/gui/simulation/test_run_dialog.py b/tests/unit_tests/gui/simulation/test_run_dialog.py index 4d2abb0282e..afb894ec403 100644 --- a/tests/unit_tests/gui/simulation/test_run_dialog.py +++ b/tests/unit_tests/gui/simulation/test_run_dialog.py @@ -335,11 +335,8 @@ def test_run_dialog(events, tab_widget_count, runmodel, qtbot: QtBot, mock_track qtbot.waitUntil(widget.done_button.isVisible, timeout=5000) -@pytest.mark.scheduler -@pytest.mark.usefixtures("copy_poly_case") -def test_that_run_dialog_can_be_closed_while_file_plot_is_open( - qtbot: QtBot, storage, source_root, try_queue_and_scheduler -): +@pytest.mark.usefixtures("copy_poly_case", "using_scheduler") +def test_that_run_dialog_can_be_closed_while_file_plot_is_open(qtbot: QtBot, storage): """ This is a regression test for a crash happening when closing the RunDialog with a file open. @@ -523,11 +520,8 @@ def test_run_dialog_memory_usage_showing( assert max_memory_value == "60000" -@pytest.mark.scheduler -@pytest.mark.usefixtures("use_tmpdir", "set_site_config") -def test_that_gui_runs_a_minimal_example( - qtbot: QtBot, storage, try_queue_and_scheduler -): +@pytest.mark.usefixtures("use_tmpdir", "set_site_config", "using_scheduler") +def test_that_gui_runs_a_minimal_example(qtbot: QtBot, storage): """ This is a regression test for a crash happening when clicking show details when running a minimal example. diff --git a/tests/unit_tests/gui/simulation/test_run_path_dialog.py b/tests/unit_tests/gui/simulation/test_run_path_dialog.py index 23f3a157a3a..98bc72bf40f 100644 --- a/tests/unit_tests/gui/simulation/test_run_path_dialog.py +++ b/tests/unit_tests/gui/simulation/test_run_path_dialog.py @@ -32,10 +32,8 @@ def handle_run_path_dialog(gui: ErtMainWindow, qtbot: QtBot, delete_run_path: bo qtbot.mouseClick(mb.buttons()[0], Qt.LeftButton) -@pytest.mark.scheduler -def test_run_path_is_deleted( - snake_oil_case_storage: ErtConfig, qtbot: QtBot, try_queue_and_scheduler -): +@pytest.mark.usefixtures("using_scheduler") +def test_run_path_is_deleted(snake_oil_case_storage: ErtConfig, qtbot: QtBot): snake_oil_case = snake_oil_case_storage args_mock = Mock() args_mock.config = "snake_oil.ert" @@ -88,10 +86,8 @@ def handle_dialog(): assert not os.path.exists(run_path / dummy_file.name) -@pytest.mark.scheduler -def test_run_path_is_not_deleted( - snake_oil_case_storage: ErtConfig, qtbot: QtBot, try_queue_and_scheduler -): +@pytest.mark.usefixtures("using_scheduler") +def test_run_path_is_not_deleted(snake_oil_case_storage: ErtConfig, qtbot: QtBot): snake_oil_case = snake_oil_case_storage args_mock = Mock() args_mock.config = "snake_oil.ert" diff --git a/tests/unit_tests/gui/test_main_window.py b/tests/unit_tests/gui/test_main_window.py index beb81d4e214..4a31c3e9757 100644 --- a/tests/unit_tests/gui/test_main_window.py +++ b/tests/unit_tests/gui/test_main_window.py @@ -211,11 +211,8 @@ def test_gui_shows_a_warning_and_disables_update_when_parameters_are_missing( assert gui.windowTitle() == "ERT - poly-no-gen-kw.ert" -@pytest.mark.scheduler -@pytest.mark.usefixtures("use_tmpdir", "set_site_config") -def test_that_run_dialog_can_be_closed_after_used_to_open_plots( - qtbot, storage, try_queue_and_scheduler -): +@pytest.mark.usefixtures("use_tmpdir", "set_site_config", "using_scheduler") +def test_that_run_dialog_can_be_closed_after_used_to_open_plots(qtbot, storage): """ This is a regression test for a bug where the plot window opened from run dialog would have run dialog as parent. Because of that it would be destroyed when @@ -413,10 +410,9 @@ def test_that_ert_changes_to_config_directory(qtbot): assert gui.windowTitle() == "ERT - snake_oil_surface.ert" -@pytest.mark.scheduler -@pytest.mark.usefixtures("use_tmpdir") +@pytest.mark.usefixtures("esmda_has_run", "use_tmpdir", "using_scheduler") def test_that_the_plot_window_contains_the_expected_elements( - esmda_has_run, opened_main_window: ErtMainWindow, qtbot, try_queue_and_scheduler + opened_main_window: ErtMainWindow, qtbot ): gui = opened_main_window expected_cases = [ @@ -689,10 +685,9 @@ def test_that_load_results_manually_can_be_run_after_esmda( load_results_manually(qtbot, opened_main_window) -@pytest.mark.scheduler -@pytest.mark.usefixtures("use_tmpdir") +@pytest.mark.usefixtures("use_tmpdir", "using_scheduler") def test_that_a_failing_job_shows_error_message_with_context( - opened_main_window_clean, qtbot, try_queue_and_scheduler + opened_main_window_clean, qtbot ): gui = opened_main_window_clean diff --git a/tests/unit_tests/gui/test_restart_ensemble_experiment.py b/tests/unit_tests/gui/test_restart_ensemble_experiment.py index 38630339db5..ef73ae12e3b 100644 --- a/tests/unit_tests/gui/test_restart_ensemble_experiment.py +++ b/tests/unit_tests/gui/test_restart_ensemble_experiment.py @@ -14,10 +14,8 @@ from .conftest import wait_for_child -@pytest.mark.scheduler -def test_restart_failed_realizations( - opened_main_window_clean, qtbot, try_queue_and_scheduler -): +@pytest.mark.usefixtures("using_scheduler") +def test_restart_failed_realizations(opened_main_window_clean, qtbot): """This runs an ensemble experiment with some failing realizations, and then does a restart, checking that only the failed realizations are started. """ diff --git a/tests/unit_tests/simulator/test_batch_sim.py b/tests/unit_tests/simulator/test_batch_sim.py index 99e4cf4678a..b96b0d26009 100644 --- a/tests/unit_tests/simulator/test_batch_sim.py +++ b/tests/unit_tests/simulator/test_batch_sim.py @@ -146,10 +146,8 @@ def test_that_starting_with_invalid_key_raises_key_error( batch_simulator.start("case", _input, storage) -@pytest.mark.scheduler() -def test_batch_simulation( - batch_simulator, storage, monkeypatch, try_queue_and_scheduler -): +@pytest.mark.usefixtures("using_scheduler") +def test_batch_simulation(batch_simulator, storage): # Starting a simulation which should actually run through. case_data = [ ( @@ -286,11 +284,8 @@ def test_that_batch_simulator_handles_invalid_suffixes_at_start( rsim.start("case", inp, storage) -@pytest.mark.usefixtures("use_tmpdir") -@pytest.mark.scheduler() -def test_batch_simulation_suffixes( - batch_sim_example, storage, monkeypatch, try_queue_and_scheduler -): +@pytest.mark.usefixtures("use_tmpdir", "using_scheduler") +def test_batch_simulation_suffixes(batch_sim_example, storage): ert_config = batch_sim_example monitor = MockMonitor() rsim = BatchSimulator( @@ -355,8 +350,8 @@ def test_batch_simulation_suffixes( assert act == pytest.approx(exp) -@pytest.mark.scheduler() -def test_stop_sim(copy_case, storage, monkeypatch, try_queue_and_scheduler): +@pytest.mark.usefixtures("using_scheduler") +def test_stop_sim(copy_case, storage): copy_case("batch_sim") with open("sleepy_time.ert", "a", encoding="utf-8") as f: f.write( @@ -437,10 +432,8 @@ def assertContextStatusOddFailures(batch_ctx, final_state_only=False): assert status == JobStatus.FAILED -@pytest.mark.scheduler() -def test_batch_ctx_status_failing_jobs( - setup_case, storage, monkeypatch, try_queue_and_scheduler -): +@pytest.mark.usefixtures("using_scheduler") +def test_batch_ctx_status_failing_jobs(setup_case, storage): ert_config = setup_case("batch_sim", "batch_sim_sleep_and_fail.ert") external_parameters = { diff --git a/tests/unit_tests/simulator/test_simulation_context.py b/tests/unit_tests/simulator/test_simulation_context.py index 930b14f1ae9..ac56f54140a 100644 --- a/tests/unit_tests/simulator/test_simulation_context.py +++ b/tests/unit_tests/simulator/test_simulation_context.py @@ -5,8 +5,8 @@ from tests.utils import wait_until -@pytest.mark.scheduler() -def test_simulation_context(setup_case, storage, monkeypatch, try_queue_and_scheduler): +@pytest.mark.usefixtures("using_scheduler") +def test_simulation_context(setup_case, storage): ert_config = setup_case("batch_sim", "sleepy_time.ert") ert = EnKFMain(ert_config)