Skip to content

Commit

Permalink
Merge branch 'release_21.09' into dev
Browse files Browse the repository at this point in the history
  • Loading branch information
mvdbeek committed Oct 20, 2021
2 parents 5e85310 + 0e25970 commit 0e11349
Show file tree
Hide file tree
Showing 29 changed files with 220 additions and 147 deletions.
1 change: 1 addition & 0 deletions .github/workflows/api.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ name: API tests
on: [push, pull_request]
env:
GALAXY_TEST_DBURI: 'postgresql://postgres:postgres@localhost:5432/galaxy?client_encoding=utf8'
GALAXY_TEST_RAISE_EXCEPTION_ON_HISTORYLESS_HDA: '1'
concurrency:
group: api-${{ github.ref }}
cancel-in-progress: true
Expand Down
1 change: 1 addition & 0 deletions .github/workflows/api_paste.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ on: [push, pull_request]
env:
GALAXY_TEST_DBURI: 'postgresql://postgres:postgres@localhost:5432/galaxy?client_encoding=utf8'
GALAXY_TEST_USE_UVICORN: false
GALAXY_TEST_RAISE_EXCEPTION_ON_HISTORYLESS_HDA: '1'
concurrency:
group: api-legacy-${{ github.ref }}
cancel-in-progress: true
Expand Down
4 changes: 3 additions & 1 deletion .github/workflows/converter_tests.yaml
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
name: Converter tests
on: [push, pull_request]
env:
GALAXY_TEST_RAISE_EXCEPTION_ON_HISTORYLESS_HDA: '1'
concurrency:
group: converter-${{ github.ref }}
cancel-in-progress: true
Expand Down Expand Up @@ -65,4 +67,4 @@ jobs:
if: failure()
with:
name: Converter test results (${{ matrix.python-version }})
path: tool_test_output.html
path: tool_test_output.html
2 changes: 2 additions & 0 deletions .github/workflows/framework.yaml
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
name: Framework tests
on: [push, pull_request]
env:
GALAXY_TEST_RAISE_EXCEPTION_ON_HISTORYLESS_HDA: '1'
concurrency:
group: framework-${{ github.ref }}
cancel-in-progress: true
Expand Down
1 change: 1 addition & 0 deletions .github/workflows/integration.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ concurrency:
env:
GALAXY_TEST_DBURI: 'postgresql://postgres:postgres@localhost:5432/galaxy?client_encoding=utf8'
GALAXY_TEST_AMQP_URL: 'amqp://localhost:5672//'
GALAXY_TEST_RAISE_EXCEPTION_ON_HISTORYLESS_HDA: '1'
jobs:
test:
name: Test
Expand Down
1 change: 1 addition & 0 deletions .github/workflows/integration_selenium.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ env:
GALAXY_SKIP_CLIENT_BUILD: '0'
GALAXY_TEST_SELENIUM_RETRIES: 1
YARN_INSTALL_OPTS: --frozen-lockfile
GALAXY_TEST_RAISE_EXCEPTION_ON_HISTORYLESS_HDA: '1'
jobs:
test:
name: Test
Expand Down
1 change: 1 addition & 0 deletions .github/workflows/selenium.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ env:
GALAXY_TEST_SKIP_FLAKEY_TESTS_ON_ERROR: 'true'
GALAXY_TEST_SELENIUM_RETRIES: 1
YARN_INSTALL_OPTS: --frozen-lockfile
GALAXY_TEST_RAISE_EXCEPTION_ON_HISTORYLESS_HDA: '1'
jobs:
test:
name: Test
Expand Down
1 change: 1 addition & 0 deletions .github/workflows/selenium_beta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ env:
GALAXY_TEST_SELENIUM_RETRIES: 1
GALAXY_TEST_SELENIUM_BETA_HISTORY: 1
YARN_INSTALL_OPTS: --frozen-lockfile
GALAXY_TEST_RAISE_EXCEPTION_ON_HISTORYLESS_HDA: '1'
jobs:
test:
name: Test
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ export const contentPayload = (cfg = {}) => {
filter(([a,b]) => !isNaN(a.maxHid) && !isNaN(b.maxHid)),
withLatestFrom(pos$, hid$),
map(([[lastResponse, response], pos, hid]) => {
const updatesAtTop = response.maxHid > lastResponse.maxHid;
const updatesAtTop = response.maxHid >= lastResponse.maxHid;

const scrollerExactlyAtTop = pos.cursor === 0 || pos.key === lastResponse.maxHid;
const fudge = 2;
Expand Down
7 changes: 7 additions & 0 deletions lib/galaxy/datatypes/test/1.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
test:
- test1
- test2
testing: testing 1
test2:
- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Phasellus ac justo metus. Praesent eget mollis enim, sed facilisis felis. Pellentesque vulputate varius mi, vel eleifend mauris luctus vestibulum. Suspendisse elementum nisi lorem, in consequat nisl commodo nec. Curabitur faucibus nec diam id interdum. Sed lobortis sed libero id feugiat. Duis diam odio, elementum sed suscipit ac, venenatis a eros. Phasellus at sollicitudin ligula. Suspendisse congue, tortor vitae aliquet ornare, diam mi scelerisque eros, vel tempus elit odio ac ligula. Pellentesque ultricies diam ornare tempor sagittis. Donec vel elit ac elit placerat dictum. Pellentesque et diam at sem porttitor blandit.
- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Phasellus ac justo metus. Praesent eget mollis enim, sed facilisis felis. Pellentesque vulputate varius mi, vel eleifend mauris luctus vestibulum. Suspendisse elementum nisi lorem, in consequat nisl commodo nec. Curabitur faucibus nec diam id interdum. Sed lobortis sed libero id feugiat. Duis diam odio, elementum sed suscipit ac, venenatis a eros. Phasellus at sollicitudin ligula. Suspendisse congue, tortor vitae aliquet ornare, diam mi scelerisque eros, vel tempus elit odio ac ligula. Pellentesque ultricies diam ornare tempor sagittis. Donec vel elit ac elit placerat dictum. Pellentesque et diam at sem porttitor blandit.
42 changes: 25 additions & 17 deletions lib/galaxy/job_execution/output_collect.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@
ModelPersistenceContext,
persist_elements_to_folder,
persist_elements_to_hdca,
persist_extra_files,
persist_hdas,
RegexCollectedDatasetMatch,
SessionlessModelPersistenceContext,
Expand Down Expand Up @@ -207,6 +206,13 @@ def __init__(
self.object_store = object_store
self.final_job_state = final_job_state
self.flush_per_n_datasets = flush_per_n_datasets
self._tag_handler = None

@property
def tag_handler(self):
if self._tag_handler is None:
self._tag_handler = self.app.tag_handler.create_tag_handler_session()
return self._tag_handler

@property
def work_context(self):
Expand All @@ -221,10 +227,6 @@ def user(self):
user = None
return user

@property
def tag_handler(self):
return self.app.tag_handler

def persist_object(self, obj):
self.sa_session.add(obj)

Expand Down Expand Up @@ -284,7 +286,8 @@ def add_library_dataset_to_folder(self, library_folder, ld):

def add_datasets_to_history(self, datasets, for_output_dataset=None):
sa_session = self.sa_session
self.job.history.add_datasets(sa_session, datasets)
self.job.history.stage_addition(datasets)
pending_histories = {self.job.history}
if for_output_dataset is not None:
# Need to update all associated output hdas, i.e. history was
# shared with job running
Expand All @@ -293,9 +296,11 @@ def add_datasets_to_history(self, datasets, for_output_dataset=None):
continue
for dataset in datasets:
new_data = dataset.copy()
copied_dataset.history.add_dataset(new_data)
copied_dataset.history.stage_addition(new_data)
pending_histories.add(copied_dataset.history)
sa_session.add(new_data)
sa_session.flush()
for history in pending_histories:
history.add_pending_items()

def output_collection_def(self, name):
tool = self.tool
Expand Down Expand Up @@ -388,6 +393,7 @@ def collect_primary_datasets(job_context, output, input_ext):
primary_output_assigned = False
new_outdata_name = None
primary_datasets = {}
storage_callbacks = []
for output_index, (name, outdata) in enumerate(output.items()):
dataset_collectors = [DEFAULT_DATASET_COLLECTOR]
output_def = job_context.output_def(name)
Expand Down Expand Up @@ -429,27 +435,27 @@ def collect_primary_datasets(job_context, output, input_ext):
# TODO: should be able to disambiguate files in different directories...
new_primary_filename = os.path.split(filename)[-1]
new_primary_datasets_attributes = job_context.tool_provided_metadata.get_new_dataset_meta_by_basename(name, new_primary_filename)

extra_files = None
if new_primary_datasets_attributes:
extra_files_path = new_primary_datasets_attributes.get('extra_files', None)
if extra_files_path:
extra_files = os.path.join(job_working_directory, extra_files_path)
primary_data = job_context.create_dataset(
ext,
designation,
visible,
dbkey,
new_primary_name,
filename,
extra_files=extra_files,
info=info,
init_from=outdata,
dataset_attributes=new_primary_datasets_attributes,
creating_job_id=job_context.get_job_id() if job_context else None
creating_job_id=job_context.get_job_id() if job_context else None,
storage_callbacks=storage_callbacks
)
# Associate new dataset with job
job_context.add_output_dataset_association(f'__new_primary_file_{name}|{designation}__', primary_data)

if new_primary_datasets_attributes:
extra_files_path = new_primary_datasets_attributes.get('extra_files', None)
if extra_files_path:
extra_files_path_joined = os.path.join(job_working_directory, extra_files_path)
persist_extra_files(job_context.object_store, extra_files_path_joined, primary_data)
job_context.add_datasets_to_history([primary_data], for_output_dataset=outdata)
# Add dataset to return dict
primary_datasets[name][designation] = primary_data
Expand All @@ -462,7 +468,9 @@ def collect_primary_datasets(job_context, output, input_ext):
if sa_session:
sa_session.add(outdata)

job_context.flush()
# Move discovered outputs to storage and set metdata / peeks
for callback in storage_callbacks:
callback()
return primary_datasets


Expand Down
20 changes: 10 additions & 10 deletions lib/galaxy/jobs/actions/post.py
Original file line number Diff line number Diff line change
Expand Up @@ -209,7 +209,7 @@ def _gen_new_name(self, action, input_names, replacement_dict):
@classmethod
def execute(cls, app, sa_session, action, job, replacement_dict, final_job_state=None):
input_names = {}
# Lookp through inputs find one with "to_be_replaced" input
# Loop through inputs find one with "to_be_replaced" input
# variable name, and get the replacement name
for input_assoc in job.input_datasets:
if input_assoc.dataset:
Expand Down Expand Up @@ -399,31 +399,31 @@ class TagDatasetAction(DefaultJobAction):

@classmethod
def execute_on_mapped_over(cls, trans, sa_session, action, step_inputs, step_outputs, replacement_dict, final_job_state=None):
tag_handler = trans.app.tag_handler.create_tag_handler_session()
if action.action_arguments:
tags = [t.replace('#', 'name:') if t.startswith('#') else t for t in [t.strip() for t in action.action_arguments.get('tags', '').split(',') if t.strip()]]
if tags:
for name, step_output in step_outputs.items():
if action.output_name == '' or name == action.output_name:
cls._execute(trans.app, trans.user, step_output, tags)
cls._execute(tag_handler, trans.user, step_output, tags)

@classmethod
def execute(cls, app, sa_session, action, job, replacement_dict, final_job_state=None):
if action.action_arguments:
tag_handler = app.tag_handler.create_tag_handler_session()
tags = [t.replace('#', 'name:') if t.startswith('#') else t for t in [t.strip() for t in action.action_arguments.get('tags', '').split(',') if t.strip()]]
if tags:
for dataset_assoc in job.output_datasets:
if action.output_name == '' or dataset_assoc.name == action.output_name:
cls._execute(app, job.user, dataset_assoc.dataset, tags)
cls._execute(tag_handler, job.user, dataset_assoc.dataset, tags)

for dataset_collection_assoc in job.output_dataset_collection_instances:
if action.output_name == '' or dataset_collection_assoc.name == action.output_name:
cls._execute(app, job.user, dataset_collection_assoc.dataset_collection_instance, tags)

sa_session.flush()
cls._execute(tag_handler, job.user, dataset_collection_assoc.dataset_collection_instance, tags)

@classmethod
def _execute(cls, app, user, output, tags):
app.tag_handler.add_tags_from_list(user, output, tags)
def _execute(cls, tag_handler, user, output, tags):
tag_handler.add_tags_from_list(user, output, tags, flush=False)

@classmethod
def get_short_str(cls, pja):
Expand All @@ -443,8 +443,8 @@ class RemoveTagDatasetAction(TagDatasetAction):
direction = "from"

@classmethod
def _execute(cls, app, user, output, tags):
app.tag_handler.remove_tags_from_list(user, output, tags)
def _execute(cls, tag_handler, user, output, tags):
tag_handler.remove_tags_from_list(user, output, tags)


class ActionBox:
Expand Down
14 changes: 8 additions & 6 deletions lib/galaxy/managers/collections.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ def __init__(

self.hda_manager = hda_manager
self.history_manager = history_manager
self.tag_handler = tag_handler
self.tag_handler = tag_handler.create_tag_handler_session()
self.ldda_manager = ldda_manager

def precreate_dataset_collection_instance(self, trans, parent, name, structure, implicit_inputs=None, implicit_output_name=None, tags=None, completed_collection=None):
Expand Down Expand Up @@ -182,7 +182,7 @@ def _create_instance_for_collection(self, trans, parent, name, dataset_collectio
# values.
if isinstance(tags, list):
assert implicit_inputs is None, implicit_inputs
tags = self.tag_handler.add_tags_from_list(trans.user, dataset_collection_instance, tags)
tags = self.tag_handler.add_tags_from_list(trans.user, dataset_collection_instance, tags, flush=False)
else:
tags = self._append_tags(dataset_collection_instance, implicit_inputs, tags)
return self.__persist(dataset_collection_instance, flush=flush)
Expand All @@ -208,6 +208,8 @@ def create_dataset_collection(self, trans, collection_type, element_identifiers=
hide_source_items=hide_source_items,
copy_elements=copy_elements,
history=history)
if history:
history.add_pending_items()
else:
if has_subcollections:
# Nested collection - recursively create collections as needed.
Expand Down Expand Up @@ -339,7 +341,7 @@ def copy(self, trans, parent, source, encoded_source_id, copy_elements=False, da
copy_kwds["element_destination"] = parent # e.g. a history
if dataset_instance_attributes is not None:
copy_kwds["dataset_instance_attributes"] = dataset_instance_attributes
new_hdca = source_hdca.copy(**copy_kwds)
new_hdca = source_hdca.copy(flush=False, **copy_kwds)
new_hdca.copy_tags_from(target_user=trans.get_user(), source=source_hdca)
if not copy_elements:
parent.add_dataset_collection(new_hdca)
Expand Down Expand Up @@ -493,16 +495,16 @@ def __load_element(self, trans, element_identifier, hide_source_items, copy_elem
decoded_id = int(trans.app.security.decode_id(encoded_id))
hda = self.hda_manager.get_accessible(decoded_id, trans.user)
if copy_elements:
element = self.hda_manager.copy(hda, history=history or trans.history, hide_copy=True)
element = self.hda_manager.copy(hda, history=history or trans.history, hide_copy=True, flush=False)
else:
element = hda
if hide_source_items and self.hda_manager.get_owned(hda.id, user=trans.user, current_history=history or trans.history):
hda.visible = False
self.tag_handler.apply_item_tags(user=trans.user, item=element, tags_str=tag_str)
self.tag_handler.apply_item_tags(user=trans.user, item=element, tags_str=tag_str, flush=False)
elif src_type == 'ldda':
element = self.ldda_manager.get(trans, encoded_id, check_accessible=True)
element = element.to_history_dataset_association(history or trans.history, add_to_history=True, visible=not hide_source_items)
self.tag_handler.apply_item_tags(user=trans.user, item=element, tags_str=tag_str)
self.tag_handler.apply_item_tags(user=trans.user, item=element, tags_str=tag_str, flush=False)
elif src_type == 'hdca':
# TODO: Option to copy? Force copy? Copy or allow if not owned?
element = self.__get_history_collection_instance(trans, encoded_id).collection
Expand Down
20 changes: 10 additions & 10 deletions lib/galaxy/managers/hdas.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@
import logging
import os

from sqlalchemy.orm.session import object_session

from galaxy import (
datatypes,
exceptions,
Expand Down Expand Up @@ -110,26 +112,24 @@ def create(self, history=None, dataset=None, flush=True, **kwargs):
self.session().flush()
return hda

def copy(self, hda, history=None, hide_copy=False, **kwargs):
def copy(self, hda, history=None, hide_copy=False, flush=True, **kwargs):
"""
Copy hda, including annotation and tags, add to history and return the given HDA.
"""
copy = hda.copy(parent_id=kwargs.get('parent_id'), copy_hid=False)
copy = hda.copy(parent_id=kwargs.get('parent_id'), copy_hid=False, copy_tags=hda.tags, flush=flush)
if hide_copy:
copy.visible = False
# add_dataset will update the hid to the next avail. in history
if history:
history.add_dataset(copy)
history.stage_addition(copy)

copy.copied_from_history_dataset_association = hda
copy.set_size()

original_annotation = self.annotation(hda)
self.annotate(copy, original_annotation, user=hda.history.user)

# these use a session flush
original_tags = self.get_tags(hda)
self.set_tags(copy, original_tags, user=hda.history.user)
self.annotate(copy, original_annotation, user=hda.history.user, flush=False)
if flush:
if history:
history.add_pending_items()
object_session(copy).flush()

return copy

Expand Down
Loading

0 comments on commit 0e11349

Please sign in to comment.