Skip to content

Commit

Permalink
Remove scheduler mark in favour of a fixture
Browse files Browse the repository at this point in the history
Use `using_scheduler` fixture instead of `@pytest.mark.scheduler` mark.
Pytest seems to behave weirdly with parameterised fixtures and modifying
the test items list at startup, so we end up having to both mark tests
with the marker and add a fixture. Instead, we should just use the
fixture directly with no marker.

This also refactors the fixture slightly so that it returns whether we
intend to run using JobQueue or Scheduler for tests that have to
distintinguish between the two.
  • Loading branch information
pinkwah committed Feb 26, 2024
1 parent f5a9f48 commit a1eee46
Show file tree
Hide file tree
Showing 18 changed files with 65 additions and 140 deletions.
1 change: 0 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,6 @@ markers = [
"requires_eclipse",
"requires_lsf",
"requires_window_manager",
"scheduler",
"script",
"slow",
"unstable",
Expand Down
12 changes: 2 additions & 10 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -288,21 +288,15 @@ def excepthook(cls, exc, tb):
pytest.param(True, id="using_scheduler"),
]
)
def try_queue_and_scheduler(request, monkeypatch):
def using_scheduler(request, monkeypatch):
should_enable_scheduler = request.param
scheduler_mark = request.node.get_closest_marker("scheduler")
assert scheduler_mark
if scheduler_mark.kwargs.get("skip") and should_enable_scheduler:
pytest.skip("Skipping running test with scheduler enabled")
if should_enable_scheduler:
# Flaky - the new scheduler needs an event loop, which might not be initialized yet.
# This might be a bug in python 3.8, but it does not occur locally.
_ = get_event_loop()

monkeypatch.setenv("ERT_FEATURE_SCHEDULER", "1" if should_enable_scheduler else "0")
monkeypatch.setattr(
FeatureToggling._conf["scheduler"], "_value", should_enable_scheduler
)
yield should_enable_scheduler


def pytest_collection_modifyitems(config, items):
Expand Down Expand Up @@ -361,10 +355,8 @@ def _run_snake_oil(source_root):
"snake_oil.ert",
],
)
FeatureToggling.update_from_args(parsed)

run_cli(parsed)
FeatureToggling.reset()


@pytest.fixture
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,9 +36,8 @@ def run_cli_ES_with_case(poly_config):
return prior_sample, posterior_sample


@pytest.mark.scheduler
@pytest.mark.integration_test
@pytest.mark.usefixtures("copy_poly_case", "try_queue_and_scheduler")
@pytest.mark.usefixtures("copy_poly_case", "using_scheduler")
def test_that_adaptive_localization_with_cutoff_1_equals_ensemble_prior():
set_adaptive_localization_1 = dedent(
"""
Expand All @@ -60,9 +59,8 @@ def test_that_adaptive_localization_with_cutoff_1_equals_ensemble_prior():
assert np.allclose(posterior_sample, prior_sample)


@pytest.mark.scheduler
@pytest.mark.integration_test
@pytest.mark.usefixtures("copy_poly_case", "try_queue_and_scheduler")
@pytest.mark.usefixtures("copy_poly_case", "using_scheduler")
def test_that_adaptive_localization_with_cutoff_0_equals_ESupdate():
"""
Note that "RANDOM_SEED" in both ert configs needs to be the same to obtain
Expand Down Expand Up @@ -98,9 +96,8 @@ def test_that_adaptive_localization_with_cutoff_0_equals_ESupdate():
assert np.allclose(posterior_sample_loc0, posterior_sample_noloc)


@pytest.mark.scheduler
@pytest.mark.integration_test
@pytest.mark.usefixtures("copy_poly_case", "try_queue_and_scheduler")
@pytest.mark.usefixtures("copy_poly_case", "using_scheduler")
def test_that_posterior_generalized_variance_increases_in_cutoff():
rng = np.random.default_rng(42)
cutoff1 = rng.uniform(0, 1)
Expand Down
9 changes: 3 additions & 6 deletions tests/integration_tests/analysis/test_es_update.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,9 +61,8 @@ def obs():
)


@pytest.mark.scheduler
@pytest.mark.integration_test
@pytest.mark.usefixtures("copy_poly_case", "try_queue_and_scheduler")
@pytest.mark.usefixtures("copy_poly_case", "using_scheduler")
def test_that_posterior_has_lower_variance_than_prior():
run_cli(
ENSEMBLE_SMOOTHER_MODE,
Expand Down Expand Up @@ -91,9 +90,8 @@ def test_that_posterior_has_lower_variance_than_prior():
)


@pytest.mark.scheduler
@pytest.mark.integration_test
@pytest.mark.usefixtures("copy_snake_oil_field", "try_queue_and_scheduler")
@pytest.mark.usefixtures("copy_snake_oil_field", "using_scheduler")
def test_that_surfaces_retain_their_order_when_loaded_and_saved_by_ert():
"""This is a regression test to make sure ert does not use the wrong order
(row-major / column-major) when working with surfaces.
Expand Down Expand Up @@ -171,9 +169,8 @@ def sample_prior(nx, ny):
)


@pytest.mark.scheduler
@pytest.mark.integration_test
@pytest.mark.usefixtures("copy_snake_oil_field", "try_queue_and_scheduler")
@pytest.mark.usefixtures("copy_snake_oil_field", "using_scheduler")
def test_update_multiple_param():
"""
Note that this is now a snapshot test, so there is no guarantee that the
Expand Down
7 changes: 2 additions & 5 deletions tests/integration_tests/job_queue/test_lsf_driver.py
Original file line number Diff line number Diff line change
Expand Up @@ -174,13 +174,10 @@ def copy_lsf_poly_case(copy_poly_case, tmp_path):
"mock_bsub",
"mock_bjobs",
"mock_start_server",
"try_queue_and_scheduler",
"monkeypatch",
)
@pytest.mark.scheduler()
@pytest.mark.integration_test
def test_run_mocked_lsf_queue(request):
if "fail" in request.node.name and "scheduler" in request.node.name:
def test_run_mocked_lsf_queue(request, using_scheduler):
if "fail" in request.node.name and using_scheduler:
pytest.skip(
"Python LSF driver does not support general resubmission on bsub errors"
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,9 +37,9 @@ def create_ert_config(path: Path):
)


@pytest.mark.scheduler
@pytest.mark.usefixtures("using_scheduler")
@pytest.mark.integration_test
async def test_subprocesses_live_on_after_ert_dies(tmp_path, try_queue_and_scheduler):
async def test_subprocesses_live_on_after_ert_dies(tmp_path):
# Have ERT run a forward model that writes in PID to a file, then sleeps
# Forcefully terminate ERT and assert that the child process is not terminated
create_ert_config(tmp_path)
Expand Down
4 changes: 2 additions & 2 deletions tests/integration_tests/shared/share/test_shell.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,9 @@
from tests.integration_tests.run_cli import run_cli


@pytest.mark.scheduler
@pytest.mark.usefixtures("using_scheduler")
@pytest.mark.integration_test
def test_shell_scripts_integration(tmpdir, try_queue_and_scheduler, monkeypatch):
def test_shell_scripts_integration(tmpdir):
"""
The following test is a regression test that
checks that the scripts under src/ert/shared/share/ert/shell_scripts
Expand Down
18 changes: 3 additions & 15 deletions tests/integration_tests/status/test_tracking_integration.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@
FORWARD_MODEL_STATE_START,
REALIZATION_STATE_FINISHED,
)
from ert.shared.feature_toggling import FeatureToggling


def check_expression(original, path_expression, expected, msg_start):
Expand All @@ -45,9 +44,8 @@ def check_expression(original, path_expression, expected, msg_start):
assert match_found, f"{msg_start} Nothing matched {path_expression}"


@pytest.mark.scheduler
@pytest.mark.integration_test
@pytest.mark.usefixtures("copy_poly_case", "try_queue_and_scheduler")
@pytest.mark.usefixtures("copy_poly_case", "using_scheduler")
@pytest.mark.parametrize(
(
"extra_config, extra_poly_eval, cmd_line_arguments,"
Expand Down Expand Up @@ -169,7 +167,6 @@ def test_tracking(
parser,
cmd_line_arguments,
)
FeatureToggling.update_from_args(parsed)

ert_config = ErtConfig.from_file(parsed.config)
os.chdir(ert_config.config_path)
Expand Down Expand Up @@ -239,12 +236,9 @@ def test_tracking(
)
thread.join()

FeatureToggling.reset()


@pytest.mark.scheduler
@pytest.mark.integration_test
@pytest.mark.usefixtures("copy_poly_case", "try_queue_and_scheduler")
@pytest.mark.usefixtures("copy_poly_case", "using_scheduler")
@pytest.mark.parametrize(
("mode, cmd_line_arguments"),
[
Expand Down Expand Up @@ -277,7 +271,6 @@ def test_setting_env_context_during_run(
parser,
cmd_line_arguments,
)
FeatureToggling.update_from_args(parsed)

ert_config = ErtConfig.from_file(parsed.config)
os.chdir(ert_config.config_path)
Expand Down Expand Up @@ -323,8 +316,6 @@ def test_setting_env_context_during_run(
for key in expected:
assert key not in os.environ

FeatureToggling.reset()


def run_sim(start_date):
"""
Expand All @@ -337,9 +328,8 @@ def run_sim(start_date):
summary.fwrite()


@pytest.mark.scheduler()
@pytest.mark.integration_test
@pytest.mark.usefixtures("try_queue_and_scheduler")
@pytest.mark.usefixtures("using_scheduler")
def test_tracking_missing_ecl(tmpdir, caplog, storage):
with tmpdir.as_cwd():
config = dedent(
Expand All @@ -365,7 +355,6 @@ def test_tracking_missing_ecl(tmpdir, caplog, storage):
"config.ert",
],
)
FeatureToggling.update_from_args(parsed)

ert_config = ErtConfig.from_file(parsed.config)
os.chdir(ert_config.config_path)
Expand Down Expand Up @@ -422,4 +411,3 @@ def test_tracking_missing_ecl(tmpdir, caplog, storage):
) in failures[0].failed_msg

thread.join()
FeatureToggling.reset()
41 changes: 14 additions & 27 deletions tests/integration_tests/test_cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,8 +92,7 @@ def test_field_init_file_not_readable(monkeypatch):
assert "Permission denied:" in str(err)


@pytest.mark.scheduler
@pytest.mark.usefixtures("copy_snake_oil_field", "try_queue_and_scheduler")
@pytest.mark.usefixtures("copy_snake_oil_field", "using_scheduler")
def test_surface_init_fails_during_forward_model_callback():
rng = np.random.default_rng()

Expand Down Expand Up @@ -157,7 +156,7 @@ def test_unopenable_observation_config_fails_gracefully():
pytest.param(ES_MDA_MODE),
],
)
@pytest.mark.usefixtures("copy_poly_case")
@pytest.mark.usefixtures("copy_poly_case", "using_scheduler")
def test_that_the_model_raises_exception_if_active_less_than_minimum_realizations(mode):
"""
Verify that the run model checks that active realizations 20 is less than 100
Expand Down Expand Up @@ -186,8 +185,7 @@ def test_that_the_model_raises_exception_if_active_less_than_minimum_realization
)


@pytest.mark.usefixtures("copy_poly_case", "try_queue_and_scheduler")
@pytest.mark.scheduler
@pytest.mark.usefixtures("copy_poly_case", "using_scheduler")
def test_that_the_model_warns_when_active_realizations_less_min_realizations():
"""
Verify that the run model checks that active realizations is equal or higher than
Expand Down Expand Up @@ -263,16 +261,14 @@ def setenv_config(tmp_path):
}


@pytest.mark.usefixtures("set_site_config", "try_queue_and_scheduler")
@pytest.mark.scheduler
@pytest.mark.usefixtures("set_site_config", "using_scheduler")
def test_that_setenv_config_is_parsed_correctly(setenv_config):
config = ErtConfig.from_file(str(setenv_config))
# then res config should read the SETENV as is
assert config.env_vars == expected_vars


@pytest.mark.usefixtures("set_site_config", "try_queue_and_scheduler")
@pytest.mark.scheduler
@pytest.mark.usefixtures("set_site_config", "using_scheduler")
def test_that_setenv_sets_environment_variables_in_jobs(setenv_config):
# When running the jobs
run_cli(
Expand Down Expand Up @@ -303,7 +299,7 @@ def test_that_setenv_sets_environment_variables_in_jobs(setenv_config):
assert lines[3].strip() == "fourth:foo"


@pytest.mark.usefixtures("copy_poly_case", "try_queue_and_scheduler")
@pytest.mark.usefixtures("copy_poly_case", "using_scheduler")
@pytest.mark.parametrize(
("job_src", "script_name", "script_src", "expect_stopped"),
[
Expand Down Expand Up @@ -450,7 +446,6 @@ def run(self):
),
],
)
@pytest.mark.scheduler
def test_that_stop_on_fail_workflow_jobs_stop_ert(
job_src,
script_name,
Expand Down Expand Up @@ -497,9 +492,8 @@ def fixture_mock_cli_run(monkeypatch):
yield mocked_monitor, mocked_thread_join, mocked_thread_start


@pytest.mark.scheduler
@pytest.mark.integration_test
@pytest.mark.usefixtures("copy_poly_case", "try_queue_and_scheduler")
@pytest.mark.usefixtures("copy_poly_case", "using_scheduler")
def test_ensemble_evaluator():
run_cli(
ENSEMBLE_SMOOTHER_MODE,
Expand All @@ -511,8 +505,7 @@ def test_ensemble_evaluator():
)


@pytest.mark.scheduler
@pytest.mark.usefixtures("copy_poly_case", "try_queue_and_scheduler")
@pytest.mark.usefixtures("copy_poly_case", "using_scheduler")
@pytest.mark.integration_test
def test_es_mda(snapshot):
with fileinput.input("poly.ert", inplace=True) as fin:
Expand Down Expand Up @@ -572,9 +565,8 @@ def remove_linestartswith(file_name: str, startswith: str):
run_cli(mode, "--target-case", target, "poly.ert")


@pytest.mark.scheduler
@pytest.mark.integration_test
@pytest.mark.usefixtures("copy_poly_case", "try_queue_and_scheduler")
@pytest.mark.usefixtures("copy_poly_case", "using_scheduler")
def test_ensemble_evaluator_disable_monitoring():
run_cli(
ENSEMBLE_SMOOTHER_MODE,
Expand All @@ -587,9 +579,8 @@ def test_ensemble_evaluator_disable_monitoring():
)


@pytest.mark.scheduler
@pytest.mark.integration_test
@pytest.mark.usefixtures("copy_poly_case", "try_queue_and_scheduler")
@pytest.mark.usefixtures("copy_poly_case", "using_scheduler")
def test_cli_test_run(mock_cli_run):
run_cli(TEST_RUN_MODE, "poly.ert")

Expand All @@ -599,9 +590,8 @@ def test_cli_test_run(mock_cli_run):
thread_start_mock.assert_has_calls([[call(), call()]])


@pytest.mark.scheduler
@pytest.mark.integration_test
@pytest.mark.usefixtures("copy_poly_case", "try_queue_and_scheduler")
@pytest.mark.usefixtures("copy_poly_case", "using_scheduler")
def test_ies():
run_cli(
ITERATIVE_ENSEMBLE_SMOOTHER_MODE,
Expand All @@ -613,9 +603,8 @@ def test_ies():
)


@pytest.mark.scheduler
@pytest.mark.integration_test
@pytest.mark.usefixtures("copy_poly_case")
@pytest.mark.usefixtures("copy_poly_case", "using_scheduler")
def test_that_running_ies_with_different_steplength_produces_different_result():
"""This is a regression test to make sure that different step-lengths
give different results when running SIES.
Expand Down Expand Up @@ -681,9 +670,8 @@ def _run(target):
assert not np.isclose(result_1.loc["iter-1"], result_2.loc["iter-1"]).all()


@pytest.mark.scheduler
@pytest.mark.integration_test
@pytest.mark.usefixtures("copy_poly_case", "try_queue_and_scheduler")
@pytest.mark.usefixtures("copy_poly_case", "using_scheduler")
@pytest.mark.parametrize(
"prior_mask,reals_rerun_option,should_resample",
[
Expand Down Expand Up @@ -743,9 +731,8 @@ def test_that_prior_is_not_overwritten_in_ensemble_experiment(
np.testing.assert_array_equal(parameter_values, prior_values)


@pytest.mark.scheduler()
@pytest.mark.integration_test
@pytest.mark.usefixtures("copy_poly_case", "try_queue_and_scheduler")
@pytest.mark.usefixtures("copy_poly_case", "using_scheduler")
def test_failing_job_cli_error_message():
# modify poly_eval.py
with open("poly_eval.py", mode="a", encoding="utf-8") as poly_script:
Expand Down
Loading

0 comments on commit a1eee46

Please sign in to comment.