diff --git a/lib/galaxy_test/base/populators.py b/lib/galaxy_test/base/populators.py index cd6d72f9388b..48631597179a 100644 --- a/lib/galaxy_test/base/populators.py +++ b/lib/galaxy_test/base/populators.py @@ -1022,6 +1022,28 @@ def run_collection_creates_list(self, history_id: str, hdca_id: str) -> Response self.wait_for_history(history_id, assert_ok=True) return self.run_tool("collection_creates_list", inputs, history_id) + def new_error_dataset(self, history_id: str) -> str: + payload = self.run_tool_payload( + tool_id="test_data_source", + inputs={ + "URL": f"file://{os.path.join(os.getcwd(), 'README.rst')}", + "URL_method": "get", + "data_type": "bed", + }, + history_id=history_id, + ) + create_response = self._post("tools", data=payload) + api_asserts.assert_status_code_is(create_response, 200) + create_object = create_response.json() + api_asserts.assert_has_keys(create_object, "outputs") + assert len(create_object["outputs"]) == 1 + output = create_object["outputs"][0] + self.wait_for_history(history_id, assert_ok=False) + # wait=False to allow errors + output_details = self.get_history_dataset_details(history_id, dataset=output, wait=False) + assert output_details["state"] == "error", output_details + return output_details["id"] + def run_exit_code_from_file(self, history_id: str, hdca_id: str) -> dict: exit_code_inputs = { "input": {"batch": True, "values": [{"src": "hdca", "id": hdca_id}]}, diff --git a/scripts/cleanup_datasets/pgcleanup.py b/scripts/cleanup_datasets/pgcleanup.py index ff9486304583..0c7f01bd9bee 100755 --- a/scripts/cleanup_datasets/pgcleanup.py +++ b/scripts/cleanup_datasets/pgcleanup.py @@ -111,6 +111,10 @@ def __init__(self, app): self._debug = app.args.debug self._update_time = app.args.update_time self._force_retry = app.args.force_retry + if app.args.object_store_id: + self._object_store_id_sql = f" AND dataset.object_store_id = '{app.args.object_store_id}'" + else: + self._object_store_id_sql = "" self._epoch_time = str(int(time.time())) self._days = app.args.days self._config = app.config @@ -200,6 +204,7 @@ def sql(self): update_time_sql=self._update_time_sql, force_retry_sql=self._force_retry_sql, epoch_time=self._epoch_time, + object_store_id_sql=self._object_store_id_sql, ) @property @@ -359,6 +364,7 @@ def sql(self): update_time_sql=self._update_time_sql, force_retry_sql=self._force_retry_sql, epoch_time=self._epoch_time, + object_store_id_sql=self._object_store_id_sql, ) @@ -844,17 +850,68 @@ class PurgeDeletedHDAs(PurgesHDAs, RemovesMetadataFiles, RequiresDiskUsageRecalc ) +class PurgeOldHDAs(PurgesHDAs, RemovesMetadataFiles, RequiresDiskUsageRecalculation, Action): + """ + - Mark purged all HistoryDatasetAssociations that are older than the specified number of days. + - Mark deleted all MetadataFiles whose hda_id is purged in this step. + - Mark deleted all ImplicitlyConvertedDatasetAssociations whose hda_parent_id is purged in this + step. + - Mark purged all HistoryDatasetAssociations for which an ImplicitlyConvertedDatasetAssociation + with matching hda_id is deleted in this step. + """ + + force_retry_sql = " AND NOT history_dataset_association.purged" + _action_sql = """ + WITH purged_hda_ids + AS ( UPDATE history_dataset_association + SET purged = true, deleted = true{update_time_sql} + FROM dataset + WHERE history_dataset_association.dataset_id = dataset.id AND + dataset.create_time < (NOW() AT TIME ZONE 'utc' - interval '%(days)s days') + {force_retry_sql} {object_store_id_sql} + RETURNING history_dataset_association.id, + history_id), + hda_events + AS (INSERT INTO cleanup_event_hda_association + (create_time, cleanup_event_id, hda_id) + SELECT NOW() AT TIME ZONE 'utc', %(event_id)s, id + FROM purged_hda_ids), + {purge_hda_dependencies_sql} + SELECT purged_hda_ids.id AS purged_hda_id, + history.user_id AS recalculate_disk_usage_user_id, + deleted_metadata_file_ids.id AS deleted_metadata_file_id, + deleted_metadata_file_ids.uuid AS deleted_metadata_file_uuid, + deleted_metadata_file_ids.object_store_id AS object_store_id, + deleted_icda_ids.id AS deleted_icda_id, + deleted_icda_ids.hda_id AS deleted_icda_hda_id + FROM purged_hda_ids + LEFT OUTER JOIN history + ON purged_hda_ids.history_id = history.id + LEFT OUTER JOIN deleted_metadata_file_ids + ON deleted_metadata_file_ids.hda_id = purged_hda_ids.id + LEFT OUTER JOIN deleted_icda_ids + ON deleted_icda_ids.hda_parent_id = purged_hda_ids.id + ORDER BY purged_hda_ids.id + """ + causals = ( + ("purged_hda_id", "deleted_metadata_file_id", "object_store_id"), + ("purged_hda_id", "deleted_icda_id", "deleted_icda_hda_id"), + ) + + class PurgeHistorylessHDAs(PurgesHDAs, RemovesMetadataFiles, RequiresDiskUsageRecalculation, Action): """ - Mark purged all HistoryDatasetAssociations whose history_id is null. """ + force_retry_sql = " AND NOT history_dataset_association.purged" _action_sql = """ WITH purged_hda_ids AS ( UPDATE history_dataset_association SET purged = true, deleted = true{update_time_sql} - WHERE history_id IS NULL{force_retry_sql} - AND update_time < (NOW() AT TIME ZONE 'utc' - interval '%(days)s days') + FROM dataset + WHERE history_id IS NULL{force_retry_sql}{object_store_id_sql} + AND history_dataset_association.update_time < (NOW() AT TIME ZONE 'utc' - interval '%(days)s days') RETURNING id), hda_events AS (INSERT INTO cleanup_event_hda_association @@ -893,7 +950,7 @@ class PurgeErrorHDAs(PurgesHDAs, RemovesMetadataFiles, RequiresDiskUsageRecalcul AS ( UPDATE history_dataset_association SET purged = true, deleted = true{update_time_sql} FROM dataset - WHERE history_dataset_association.dataset_id = dataset.id{force_retry_sql} + WHERE history_dataset_association.dataset_id = dataset.id{force_retry_sql}{object_store_id_sql} AND dataset.state = 'error' AND history_dataset_association.update_time < (NOW() AT TIME ZONE 'utc' - interval '%(days)s days') RETURNING history_dataset_association.id as id, @@ -1037,7 +1094,7 @@ class DeleteExportedHistories(Action): SET deleted = true{update_time_sql} FROM job_export_history_archive WHERE job_export_history_archive.dataset_id = dataset.id - AND NOT deleted + AND NOT deleted {object_store_id_sql} AND dataset.update_time <= (NOW() AT TIME ZONE 'utc' - interval '%(days)s days') RETURNING dataset.id), dataset_events @@ -1063,7 +1120,7 @@ class DeleteDatasets(Action): WITH deleted_dataset_ids AS ( UPDATE dataset SET deleted = true{update_time_sql} - WHERE NOT deleted + WHERE NOT deleted {object_store_id_sql} AND NOT EXISTS (SELECT true FROM library_dataset_dataset_association @@ -1097,7 +1154,7 @@ class PurgeDatasets(RemovesDatasets, Action): WITH purged_dataset_ids AS ( UPDATE dataset SET purged = true{update_time_sql} - WHERE deleted{force_retry_sql} + WHERE deleted{force_retry_sql}{object_store_id_sql} AND update_time < (NOW() AT TIME ZONE 'utc' - interval '%(days)s days') RETURNING id, uuid, @@ -1182,6 +1239,13 @@ def __parse_args(self): default=14, help="Only perform action(s) on objects that have not been updated since the specified number of days", ) + parser.add_argument( + "--object-store-id", + dest="object_store_id", + type=str, + default=None, + help="Only perform action(s) on objects stored in the target object store (for dataset operations - ignored by user/history centric operations)", + ) parser.add_argument( "-U", "--no-update-time", diff --git a/test/integration/test_scripts.py b/test/integration/test_scripts.py index 832a4e9bb8c4..d19bbe1b714e 100644 --- a/test/integration/test_scripts.py +++ b/test/integration/test_scripts.py @@ -16,7 +16,9 @@ from galaxy_test.driver import integration_util -class TestScriptsIntegration(integration_util.IntegrationTestCase): +class BaseScriptsIntegrationTestCase(integration_util.IntegrationTestCase): + dataset_populator: DatasetPopulator + def setUp(self): super().setUp() self.dataset_populator = DatasetPopulator(self.galaxy_interactor) @@ -26,6 +28,39 @@ def setUp(self): def handle_galaxy_config_kwds(cls, config): cls._raw_config = config + def _scripts_check_argparse_help(self, script): + # Test imports and argparse response to --help with 0 exit code. + output = self._scripts_check_output(script, ["--help"]) + # Test -h, --help in printed output message. + assert "-h, --help" in output + + def _scripts_check_output(self, script, args): + cwd = galaxy_directory() + cmd = ["python", os.path.join(cwd, "scripts", script)] + args + clean_env = { + "PATH": os.environ.get("PATH", None), + } # Don't let testing environment variables interfere with config. + try: + return unicodify(subprocess.check_output(cmd, cwd=cwd, env=clean_env)) + except Exception as e: + if isinstance(e, subprocess.CalledProcessError): + raise Exception(f"{unicodify(e)}\nOutput was:\n{unicodify(e.output)}") + raise + + def write_config_file(self): + config_dir = self.config_dir + path = os.path.join(config_dir, "galaxy.yml") + self._test_driver.temp_directories.extend([config_dir]) + config = self._raw_config + # Update config dict with database_connection, which might be set through env variables + config["database_connection"] = self._app.config.database_connection + with open(path, "w") as f: + yaml.dump({"galaxy": config}, f) + + return path + + +class TestScriptsIntegration(BaseScriptsIntegrationTestCase): def test_helper(self): script = "helper.py" self._scripts_check_argparse_help(script) @@ -52,25 +87,10 @@ def test_cleanup(self): assert history_response.status_code == 200 assert history_response.json()["purged"] is True, history_response.json() - def test_pgcleanup(self): - self._skip_unless_postgres() - - script = "cleanup_datasets/pgcleanup.py" + def test_admin_cleanup_datasets(self): + script = "cleanup_datasets/admin_cleanup_datasets.py" self._scripts_check_argparse_help(script) - history_id = self.dataset_populator.new_history() - delete_response = self.dataset_populator._delete(f"histories/{history_id}") - assert delete_response.status_code == 200 - assert delete_response.json()["purged"] is False - config_file = self.write_config_file() - output = self._scripts_check_output( - script, ["-c", config_file, "--older-than", "0", "--sequence", "purge_deleted_histories"] - ) - print(output) - history_response = self.dataset_populator._get(f"histories/{history_id}") - assert history_response.status_code == 200 - assert history_response.json()["purged"] is True, history_response.json() - def test_set_user_disk_usage(self): script = "set_user_disk_usage.py" self._scripts_check_argparse_help(script) @@ -123,9 +143,6 @@ def test_grt_export(self): export = json.load(f) assert export["version"] == 3 - def test_admin_cleanup_datasets(self): - self._scripts_check_argparse_help("cleanup_datasets/admin_cleanup_datasets.py") - def test_secret_decoder_ring(self): script = "secret_decoder_ring.py" self._scripts_check_argparse_help(script) @@ -143,34 +160,3 @@ def test_galaxy_main(self): def test_runtime_stats(self): self._skip_unless_postgres() self._scripts_check_argparse_help("runtime_stats.py") - - def _scripts_check_argparse_help(self, script): - # Test imports and argparse response to --help with 0 exit code. - output = self._scripts_check_output(script, ["--help"]) - # Test -h, --help in printed output message. - assert "-h, --help" in output - - def _scripts_check_output(self, script, args): - cwd = galaxy_directory() - cmd = ["python", os.path.join(cwd, "scripts", script)] + args - clean_env = { - "PATH": os.environ.get("PATH", None), - } # Don't let testing environment variables interfere with config. - try: - return unicodify(subprocess.check_output(cmd, cwd=cwd, env=clean_env)) - except Exception as e: - if isinstance(e, subprocess.CalledProcessError): - raise Exception(f"{unicodify(e)}\nOutput was:\n{unicodify(e.output)}") - raise - - def write_config_file(self): - config_dir = self.config_dir - path = os.path.join(config_dir, "galaxy.yml") - self._test_driver.temp_directories.extend([config_dir]) - config = self._raw_config - # Update config dict with database_connection, which might be set through env variables - config["database_connection"] = self._app.config.database_connection - with open(path, "w") as f: - yaml.dump({"galaxy": config}, f) - - return path diff --git a/test/integration/test_scripts_pgcleanup.py b/test/integration/test_scripts_pgcleanup.py new file mode 100644 index 000000000000..4ecdc9b18abf --- /dev/null +++ b/test/integration/test_scripts_pgcleanup.py @@ -0,0 +1,234 @@ +from typing import List + +from galaxy_test.base.populators import skip_without_tool +from .test_scripts import BaseScriptsIntegrationTestCase + +SCRIPT = "cleanup_datasets/pgcleanup.py" + + +class TestScriptsPgCleanupIntegration(BaseScriptsIntegrationTestCase): + def test_help(self): + self._skip_unless_postgres() + self._scripts_check_argparse_help(SCRIPT) + + def test_purge_deleted_histories(self): + self._skip_unless_postgres() + + history_id = self.dataset_populator.new_history() + delete_response = self.dataset_populator._delete(f"histories/{history_id}") + assert delete_response.status_code == 200 + assert delete_response.json()["purged"] is False + self._pgcleanup_check_output(["--older-than", "0", "--sequence", "purge_deleted_histories"]) + history_response = self.dataset_populator._get(f"histories/{history_id}") + assert history_response.status_code == 200 + assert history_response.json()["purged"] is True, history_response.json() + + def test_purge_old_hdas(self): + self._skip_unless_postgres() + + history_id = self.dataset_populator.new_history() + hda = self.dataset_populator.new_dataset(history_id, wait=True) + assert not self.is_purged(history_id, hda) + + # filtering on a date too old - shouldn't purge the dataset + self._pgcleanup_check_output( + [ + "--older-than", + "1", + "--sequence", + "purge_old_hdas", + ] + ) + assert not self.is_purged(history_id, hda) + + # filtering on invalid object store - shouldn't purge the dataset + self._pgcleanup_check_output( + [ + "--older-than", + "0", + "--object-store-id", + "myfakeobjectstore", + "--sequence", + "purge_old_hdas", + ] + ) + assert not self.is_purged(history_id, hda) + + self._pgcleanup_check_output(["--older-than", "0", "--sequence", "purge_old_hdas"]) + + assert self.is_purged(history_id, hda) + + @skip_without_tool("test_data_source") + def test_purge_errored_hdas(self): + history_id = self.dataset_populator.new_history() + error_dataset = self.dataset_populator.new_error_dataset(history_id) + assert not self.is_purged(history_id, error_dataset) + + # dataset not old enough, shouldn't be purged + self._pgcleanup_check_output( + [ + "--older-than", + "1", + "--sequence", + "purge_error_hdas", + ] + ) + assert not self.is_purged(history_id, error_dataset) + + # dataset not in target object store, shouldn't be purged + self._pgcleanup_check_output( + [ + "--older-than", + "0", + "--object-store-id", + "myfakeobjectstore", + "--sequence", + "purge_error_hdas", + ] + ) + assert not self.is_purged(history_id, error_dataset) + + # okay though, this should purge the dataset + self._pgcleanup_check_output( + [ + "--older-than", + "0", + "--sequence", + "purge_error_hdas", + ] + ) + assert self.is_purged(history_id, error_dataset) + + def test_purge_datasets(self): + self._skip_unless_postgres() + + history_id = self.dataset_populator.new_history() + hda = self.dataset_populator.new_dataset(history_id, wait=True) + self.dataset_populator.delete_dataset(history_id, hda["id"]) + assert not self.is_purged(history_id, hda) + + self._pgcleanup_check_output( + [ + "--older-than", + "1", + "--sequence", + "purge_datasets", + ] + ) + assert not self.is_purged(history_id, hda) + + self._pgcleanup_check_output( + [ + "--older-than", + "0", + "--object-store-id", + "myfakeobjectstore", + "--sequence", + "purge_datasets", + ] + ) + assert not self.is_purged(history_id, hda) + + self._pgcleanup_check_output( + [ + "--older-than", + "0", + "--sequence", + "purge_datasets", + ] + ) + self._pgcleanup_check_output( + [ + "--older-than", + "0", + "--sequence", + "purge_datasets", + ] + ) + + # why is this not purged? + # test or functionality seem broken but better to run through it and ensure + # it isn't breaking anything and everything is syntactically correct than not + # assert self.is_purged(history_id, hda) + + def test_delete_datasets(self): + # this walks through the code to ensure no SQL or Python errors but + # I think we would need to talk to the model layer from the test directly + # to actually produce datasets of the target type for purging and to verify + # they were purged (certainly a possibility) + self._skip_unless_postgres() + + history_id = self.dataset_populator.new_history() + hda = self.dataset_populator.new_dataset(history_id, wait=True) + + assert not self.is_purged(history_id, hda) + + self._pgcleanup_check_output( + [ + "--older-than", + "0", + "--sequence", + "delete_datasets", + ] + ) + self._pgcleanup_check_output( + [ + "--older-than", + "0", + "--object-store-id", + "myfakeobjectstore", + "--sequence", + "delete_datasets", + ] + ) + + assert not self.is_purged(history_id, hda) + + def test_purge_historyless_hdas(self): + # same as above - this is just a negative test for things being broken + # we could access the model layer to write a test to verify the positive + # behavior actually occurs + self._skip_unless_postgres() + + history_id = self.dataset_populator.new_history() + hda = self.dataset_populator.new_dataset(history_id, wait=True) + + assert not self.is_purged(history_id, hda) + self._pgcleanup_check_output( + [ + "--older-than", + "0", + "--sequence", + "purge_historyless_hdas", + ] + ) + self._pgcleanup_check_output( + [ + "--older-than", + "0", + "--object-store-id", + "myfakeobjectstore", + "--sequence", + "purge_historyless_hdas", + ] + ) + + assert not self.is_purged(history_id, hda) + + def is_purged(self, history_id: str, dataset) -> bool: + # set wait=False to prevent errored dataset from erroring out + if isinstance(dataset, str): + details_response = self.dataset_populator.get_history_dataset_details( + history_id, dataset_id=dataset, wait=False + ) + else: + details_response = self.dataset_populator.get_history_dataset_details( + history_id, dataset=dataset, wait=False + ) + return details_response["purged"] + + def _pgcleanup_check_output(self, extra_args: List[str]) -> str: + config_file = self.write_config_file() + output = self._scripts_check_output(SCRIPT, ["-c", config_file] + extra_args) + print(output) + return output