Skip to content

Commit

Permalink
Fix up tests
Browse files Browse the repository at this point in the history
  • Loading branch information
mvdbeek committed Jun 10, 2024
1 parent 3c7730c commit 2a74f31
Show file tree
Hide file tree
Showing 3 changed files with 40 additions and 11 deletions.
34 changes: 24 additions & 10 deletions test/integration/objectstore/_purged_handling.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from galaxy_test.base.populators import DatasetPopulator


def purge_while_job_running(dataset_populator: DatasetPopulator, sleep_before_purge=4):
def purge_while_job_running(dataset_populator: DatasetPopulator, extra_sleep=0):
with dataset_populator.test_history() as history_id:
response = dataset_populator.run_tool(
"all_output_types",
Expand All @@ -14,42 +14,56 @@ def purge_while_job_running(dataset_populator: DatasetPopulator, sleep_before_pu
history_id=history_id,
)
job = dataset_populator.get_job_details(response["jobs"][0]["id"], full=True).json()
# Sleep a second to make sure we template command before purging output
time.sleep(sleep_before_purge)
# Make sure job runs (and thus command is templated before purging output)
dataset_populator.wait_for_job(job["id"], ok_states=["running"])
time.sleep(extra_sleep)
hda_ids = []
hda_filenames = []
for output_name, output in job["outputs"].items():
if output_name == "static_output_2":
# We need to keep one output so the job doesn't get cancelled
continue
details = dataset_populator.get_history_dataset_details(history_id=history_id, dataset_id=output["id"])
hda_filenames.append(details["file_name"])
details = dataset_populator.get_history_dataset_details(
history_id=history_id, content_id=output["id"], wait=False
)
if not output_name.startswith("discovered_output"):
# We're not precreating discovered outputs on disk
hda_filenames.append(details["file_name"])
dataset_populator.delete_dataset(
history_id=history_id, content_id=output["id"], purge=True, wait_for_purge=True
)
hda_ids.append(output["id"])
for output_collection in job["output_collections"].values():
for output_name, output_collection in job["output_collections"].items():
hdca = dataset_populator.get_history_collection_details(
history_id=history_id, content_id=output_collection["id"]
)
for element in hdca["elements"]:
# Technically the static pair elements are already included in job["outputs"],
# but no harm purging them again here, in case we ever change that logic.
hda_id = element["object"]["id"]
dataset_populator.delete_dataset(
history_id=history_id, content_id=hda_id, purge=True, wait_for_purge=True
)
hda_ids.append(hda_id)
dataset_populator.wait_for_job(job["id"], assert_ok=True)
# Now make sure we can't find the datasets
for hda_id in hda_ids:
# Now make sure we can't find the datasets on disk
job = dataset_populator.get_job_details(response["jobs"][0]["id"], full=True).json()
for output_name, output in job["outputs"].items():
exception = None
try:
dataset_populator.get_history_dataset_content(history_id=history_id, dataset_id=hda_id)
except AssertionError as e:
exception = e
assert exception and "The dataset you are attempting to view has been purged" in str(exception)
output_details = dataset_populator.get_history_dataset_details(history_id=history_id, dataset_id=hda_id)
output_details = dataset_populator.get_history_dataset_details(
history_id=history_id, content_id=hda_id, wait=False
)
# Make sure that we don't revert state while finishing job
assert output_details["purged"]
assert output_details["purged"], f"expected output '{output_name}' to be purged, but it is not purged."
assert not output_details.get("file_name")
assert (
output_details["file_size"] == 0
), f"expected file_size for '{output_name}' to be 0, but it is {output_details['file_size']}."
for file_name in hda_filenames:
# Make sure job didn't push to object store
assert not os.path.exists(file_name), f"Expected {file_name} to be purged."
4 changes: 3 additions & 1 deletion test/integration/test_extended_metadata.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,9 @@ def test_fetch_data_library(self):
assert dataset["created_from_basename"] == "4.bed"

def test_purge_while_job_running(self):
purge_while_job_running(self.dataset_populator)
# pass extra_sleep, since templating the command line will fail if the output
# is deleted before remote_tool_eval runs.
purge_while_job_running(self.dataset_populator, extra_sleep=2)


class TestExtendedMetadataDeferredIntegration(integration_util.IntegrationTestCase):
Expand Down
13 changes: 13 additions & 0 deletions test/integration/test_pulsar_embedded_mq.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,19 @@ def test_purge_while_job_running(self):
purge_while_job_running(self.dataset_populator)


class TestEmbeddedMessageQueuePulsarExtendedMetadataPurge(TestEmbeddedMessageQueuePulsarPurge):
"""Describe a Galaxy test instance with embedded pulsar and extended metadata configured.
$ Setup RabbitMQ (e.g. https://www.rabbitmq.com/install-homebrew.html)
$ GALAXY_TEST_AMQP_URL='amqp://guest:guest@localhost:5672//' pytest -s test/integration/test_pulsar_embedded_mq.py
"""

@classmethod
def handle_galaxy_config_kwds(cls, config):
config["metadata_strategy"] = "extended"
_handle_galaxy_config_kwds(cls, config)


class EmbeddedMessageQueuePulsarIntegrationInstance(integration_util.IntegrationInstance):
"""Describe a Galaxy test instance with embedded pulsar configured.
Expand Down

0 comments on commit 2a74f31

Please sign in to comment.