Skip to content

Commit

Permalink
Merge pull request #18462 from mvdbeek/add_input_extra_files_path
Browse files Browse the repository at this point in the history
[24.0] Add input extra files to `get_input_fnames`
  • Loading branch information
mvdbeek authored Jun 29, 2024
2 parents 6b8c7f7 + 7eaa54a commit 368eeaa
Show file tree
Hide file tree
Showing 2 changed files with 13 additions and 9 deletions.
20 changes: 12 additions & 8 deletions lib/galaxy/job_execution/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -213,22 +213,26 @@ def get_input_dataset_fnames(self, ds: DatasetInstance) -> List[str]:
for value in ds.metadata.values():
if isinstance(value, MetadataFile):
filenames.append(value.get_file_name())
if ds.dataset and ds.dataset.extra_files_path_exists():
filenames.append(ds.dataset.extra_files_path)
return filenames

def get_input_fnames(self) -> List[str]:
def get_input_datasets(self) -> List[DatasetInstance]:
job = self.job
return [
da.dataset for da in job.input_datasets + job.input_library_datasets if da.dataset
] # da is JobToInputDatasetAssociation object

def get_input_fnames(self) -> List[str]:
filenames = []
for da in job.input_datasets + job.input_library_datasets: # da is JobToInputDatasetAssociation object
if da.dataset:
filenames.extend(self.get_input_dataset_fnames(da.dataset))
for ds in self.get_input_datasets():
filenames.extend(self.get_input_dataset_fnames(ds))
return filenames

def get_input_paths(self) -> List[DatasetPath]:
job = self.job
paths = []
for da in job.input_datasets + job.input_library_datasets: # da is JobToInputDatasetAssociation object
if da.dataset:
paths.append(self.get_input_path(da.dataset))
for ds in self.get_input_datasets():
paths.append(self.get_input_path(ds))
return paths

def get_input_path(self, dataset: DatasetInstance) -> DatasetPath:
Expand Down
2 changes: 1 addition & 1 deletion lib/galaxy/jobs/splitters/basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ def set_basic_defaults(job_wrapper):


def do_split(job_wrapper):
if len(job_wrapper.job_io.get_input_fnames()) > 1 or len(job_wrapper.job_io.get_output_fnames()) > 1:
if len(job_wrapper.job_io.get_input_datasets()) > 1 or len(job_wrapper.job_io.get_output_fnames()) > 1:
log.error("The basic splitter is not capable of handling jobs with multiple inputs or outputs.")
raise Exception("Job Splitting Failed, the basic splitter only handles tools with one input and one output")
# add in the missing information for splitting the one input and merging the one output
Expand Down

0 comments on commit 368eeaa

Please sign in to comment.