diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 0759063..0c0077f 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -13,7 +13,7 @@ This repository follows standard Python conventions: * `README.md`: Overview of this repository * `biosimulators_test_suite/`: Source code for this package * `examples`: Example modeling projects that the source code uses to test biosimulation software tools -* `examples-ouputs`: Example outputs (reports and plots) of executing the example modeling projects +* `examples-outputs`: Example outputs (reports and plots) of executing the example modeling projects * `tests/`: Unit tests for the code for this package * `setup.py`: pip installation script for this package * `setup.cfg`: Configuration for the pip installation script diff --git a/biosimulators_test_suite/_version.py b/biosimulators_test_suite/_version.py index b7e15fa..b04cffb 100644 --- a/biosimulators_test_suite/_version.py +++ b/biosimulators_test_suite/_version.py @@ -1 +1 @@ -__version__ = '0.1.16' +__version__ = '0.1.17' diff --git a/biosimulators_test_suite/config.py b/biosimulators_test_suite/config.py new file mode 100644 index 0000000..7d78a4c --- /dev/null +++ b/biosimulators_test_suite/config.py @@ -0,0 +1,9 @@ +import os + + +class Config(object): + def __init__(self, pull_docker_image=None): + if pull_docker_image is not None: + self.pull_docker_image = pull_docker_image + else: + self.pull_docker_image = os.getenv('PULL_DOCKER_IMAGE', '1').lower() in ['1', 'true'] diff --git a/biosimulators_test_suite/data_model.py b/biosimulators_test_suite/data_model.py index 3f24f7b..9a46108 100644 --- a/biosimulators_test_suite/data_model.py +++ b/biosimulators_test_suite/data_model.py @@ -6,6 +6,7 @@ :License: MIT """ +from .config import Config from .exceptions import SkippedTestCaseException # noqa: F401 from biosimulators_utils.image import get_docker_image import abc @@ -14,7 +15,7 @@ __all__ = [ 'OutputMedium', - 'TestCase', 'SedTaskRequirements', 'ExpectedSedReport', 'ExpectedSedPlot', + 'TestCase', 'SedTaskRequirements', 'ExpectedSedReport', 'ExpectedSedDataSet', 'ExpectedSedPlot', 'AlertType', ] @@ -61,7 +62,7 @@ def eval(self, specifications): """ pass # pragma: no cover - def get_simulator_docker_image(self, specifications, pull=True): + def get_simulator_docker_image(self, specifications, pull=None): """ Get the Docker image for a simulator, pulling if necessary Args: @@ -72,6 +73,8 @@ def get_simulator_docker_image(self, specifications, pull=True): """ docker_client = docker.from_env() image_url = specifications['image']['url'] + if pull is None: + pull = Config().pull_docker_image return get_docker_image(docker_client, image_url, pull=pull) @@ -98,18 +101,18 @@ class ExpectedSedReport(object): Attributes id (:obj:`str`): id - data_sets (:obj:`list` of :obj:`str`): ids of expected datasets + data_sets (:obj:`list` of :obj:`ExpectedSedDataSet`): labels of expected data sets points (:obj:`tuple` of :obj:`int`): number of expected points of - values (:obj:`dict` of :obj:`str` to :obj:`dict` of :obj:`list`): expected values of datasets or elements of datasets + values (:obj:`dict` of :obj:`str` to :obj:`dict` of :obj:`list`): expected values of data sets or elements of data sets """ def __init__(self, id=None, data_sets=None, points=None, values=None): """ Args: id (:obj:`str`, optional): id - data_sets (:obj:`set` of :obj:`str`, optional): ids of expected datasets + data_sets (:obj:`set` of :obj:`ExpectedSedDataSet`, optional): labels of expected data sets points (:obj:`tuple` of :obj:`int`, optional): number of expected points of - values (:obj:`dict` of :obj:`str` to :obj:`dict` of :obj:`list`, optional): expected values of datasets or elements of datasets + values (:obj:`dict` of :obj:`str` to :obj:`dict` of :obj:`list`, optional): expected values of data sets or elements of data sets """ self.id = id self.data_sets = data_sets or set() @@ -117,6 +120,24 @@ def __init__(self, id=None, data_sets=None, points=None, values=None): self.values = values +class ExpectedSedDataSet(object): + """ An expected SED report + + Attributes + id (:obj:`str`): id + label (:obj:`str`): label + """ + + def __init__(self, id=None, label=None): + """ + Args: + id (:obj:`str`): id + label (:obj:`str`): label + """ + self.id = id + self.label = label + + class ExpectedSedPlot(object): """ An expected SED report diff --git a/biosimulators_test_suite/exceptions.py b/biosimulators_test_suite/exceptions.py index 9a7ce72..7add6ce 100644 --- a/biosimulators_test_suite/exceptions.py +++ b/biosimulators_test_suite/exceptions.py @@ -8,7 +8,7 @@ __all__ = [ 'TestCaseException', - 'InvalidOuputsException', + 'InvalidOutputsException', 'SkippedTestCaseException', ] @@ -18,7 +18,7 @@ class TestCaseException(Exception): pass # pragma: no cover -class InvalidOuputsException(TestCaseException): +class InvalidOutputsException(TestCaseException): """ Exception raised when outputs of execution of COMBINE/OMEX archive are not as expected """ pass # pragma: no cover diff --git a/biosimulators_test_suite/test_case/log.py b/biosimulators_test_suite/test_case/log.py index 0b24826..af9ee29 100644 --- a/biosimulators_test_suite/test_case/log.py +++ b/biosimulators_test_suite/test_case/log.py @@ -52,10 +52,11 @@ def eval_outputs(self, specifications, synthetic_archive, synthetic_sed_docs, ou try: with open(log_path, 'r') as file: - status = yaml.load(file) + log = yaml.load(file) except Exception as exception: - warnings.warn('The execution status report produced by the simulator is not valid:\n\n {}'.format( - str(exception).replace('\n', '\n ')), TestCaseWarning) + msg = 'The execution status report produced by the simulator is not valid:\n\n {}'.format( + str(exception).replace('\n', '\n ')) + warnings.warn(msg, TestCaseWarning) return False self._status_valid = True @@ -69,26 +70,27 @@ def is_status_valid(status, self=self): self._status_valid = False try: - is_status_valid(status['status']) + is_status_valid(log['status']) - for doc in status['sedDocuments'].values(): - is_status_valid(doc['status']) + for doc_log in log['sedDocuments']: + is_status_valid(doc_log['status']) - for task in doc['tasks'].values(): - is_status_valid(task['status']) + for task_log in doc_log['tasks']: + is_status_valid(task_log['status']) - for output in doc['outputs'].values(): - is_status_valid(output['status']) + for output_log in doc_log['outputs']: + is_status_valid(output_log['status']) - els = output.get('dataSets', output.get('curves', output.get('surfaces', None))) + els = output_log.get('dataSets', output_log.get('curves', output_log.get('surfaces', None))) if els is None: raise KeyError('Outputs must have one of the keys `dataSets`, `curves` or `surfaces`') - for status in els.values(): - is_status_valid(status) + for el in els: + is_status_valid(el['status']) except Exception as exception: - warnings.warn('The execution status report produced by the simulator is not valid:\n\n {}'.format( - str(exception).replace('\n', '\n ')), TestCaseWarning) + msg = 'The execution status report produced by the simulator is not valid:\n\n {}'.format( + str(exception).replace('\n', '\n ')) + warnings.warn(msg, TestCaseWarning) return False if not self._status_valid: diff --git a/biosimulators_test_suite/test_case/published_project.py b/biosimulators_test_suite/test_case/published_project.py index b06ade6..1a00cb3 100644 --- a/biosimulators_test_suite/test_case/published_project.py +++ b/biosimulators_test_suite/test_case/published_project.py @@ -6,10 +6,11 @@ :License: MIT """ -from ..data_model import (TestCase, SedTaskRequirements, ExpectedSedReport, ExpectedSedPlot, +from ..config import Config +from ..data_model import (TestCase, SedTaskRequirements, ExpectedSedReport, ExpectedSedDataSet, ExpectedSedPlot, AlertType, OutputMedium) -from ..exceptions import InvalidOuputsException, SkippedTestCaseException -from ..warnings import IgnoredTestCaseWarning, SimulatorRuntimeErrorWarning, InvalidOuputsWarning, TestCaseWarning +from ..exceptions import InvalidOutputsException, SkippedTestCaseException +from ..warnings import IgnoredTestCaseWarning, SimulatorRuntimeErrorWarning, InvalidOutputsWarning, TestCaseWarning from .utils import are_array_shapes_equivalent from biosimulators_utils.combine.data_model import CombineArchive, CombineArchiveContentFormatPattern # noqa: F401 from biosimulators_utils.combine.io import CombineArchiveReader, CombineArchiveWriter @@ -18,7 +19,7 @@ from biosimulators_utils.report.io import ReportReader from biosimulators_utils.sedml.data_model import ( # noqa: F401 Report, Task, UniformTimeCourseSimulation, - DataGenerator, DataGeneratorVariable, DataGeneratorVariableSymbol, DataSet, + DataGenerator, Variable, Symbol, DataSet, Model, Simulation, Algorithm) from biosimulators_utils.sedml.io import SedmlSimulationReader, SedmlSimulationWriter from biosimulators_utils.sedml.utils import (remove_algorithm_parameter_changes, @@ -182,13 +183,17 @@ def from_dict(self, data): for exp_report_def in data.get('expectedReports', []): id = exp_report_def['id'] - data_set_labels = set(exp_report_def.get('dataSets', [])) + data_sets = [ExpectedSedDataSet(id=data_set.get('id', None), label=data_set.get('label', None)) + for data_set in exp_report_def.get('dataSets', [])] + data_set_ids = [data_set.id for data_set in data_sets] points = tuple(exp_report_def['points']) values = {} - for key, val in exp_report_def.get('values', {}).items(): + for labelVal in exp_report_def.get('values', []): + data_set_id = labelVal['id'] + val = labelVal['value'] if isinstance(val, dict): - values[key] = {} + values[data_set_id] = {} for k, v in val.items(): multi_index = tuple(int(index) for index in k.split(",")) try: @@ -203,14 +208,14 @@ def from_dict(self, data): self.id.replace('published_project.SimulatorCanExecutePublishedProject:', ''), tuple(p - 1 for p in points), )) - values[key][multi_index] = v + values[data_set_id][multi_index] = v else: - values[key] = numpy.array(val) + values[data_set_id] = numpy.array(val) - invalid_dataset_ids = set(values.keys()).difference(set(data_set_labels)) + invalid_dataset_ids = set(values.keys()).difference(set(data_set_ids)) if invalid_dataset_ids: raise ValueError(( - "The keys of the expected values of report `{}` of published project test case `{}` " + "The `id` fields of the expected values of report `{}` of published project test case `{}` " "should be defined in the 'dataSets' property. " "The following keys were not in the 'dataSets' property:\n - {}").format( id, self.id.replace('published_project.SimulatorCanExecutePublishedProject:', ''), @@ -218,7 +223,7 @@ def from_dict(self, data): self.expected_reports.append(ExpectedSedReport( id=id, - data_sets=data_set_labels, + data_sets=data_sets, points=points, values=values, )) @@ -279,9 +284,10 @@ def eval(self, specifications): out_dir = tempfile.mkdtemp() # pull image and execute COMBINE/OMEX archive for case + pull_docker_image = Config().pull_docker_image try: biosimulators_utils.simulator.exec.exec_sedml_docs_in_archive_with_containerized_simulator( - self.filename, out_dir, specifications['image']['url'], pull_docker_image=True) + self.filename, out_dir, specifications['image']['url'], pull_docker_image=pull_docker_image) except Exception as exception: shutil.rmtree(out_dir) @@ -301,40 +307,34 @@ def eval(self, specifications): else: report_reader = biosimulators_utils.report.io.ReportReader() for expected_report in self.expected_reports: + report = Report() + for data_set in expected_report.data_sets: + report.data_sets.append(DataSet(id=data_set.id, label=data_set.label)) try: - report = report_reader.run(out_dir, expected_report.id, format=ReportFormat.h5) + report_results = report_reader.run(report, out_dir, expected_report.id, format=ReportFormat.h5) except Exception: errors.append('Report {} could not be read'.format(expected_report.id)) continue - missing_data_sets = set(expected_report.data_sets).difference(set(report.index)) + missing_data_sets = set([data_set.id for data_set in expected_report.data_sets]).difference(set(report_results.keys())) if missing_data_sets: errors.append(('Report {} does not contain expected data sets:\n {}\n\n' 'Report contained these data sets:\n {}').format( expected_report.id, '\n '.join(sorted(missing_data_sets)), - '\n '.join(sorted(report.index)), + '\n '.join(sorted(report_results.keys())), )) continue - extra_data_sets = set(report.index).difference(set(expected_report.data_sets)) - if extra_data_sets: - if self.assert_no_extra_datasets: - errors.append('Report {} contains unexpected data sets:\n {}'.format( - expected_report.id, '\n '.join(sorted(extra_data_sets)))) - continue - else: - warnings.warn('Report {} contains unexpected data sets:\n {}'.format( - expected_report.id, '\n '.join(sorted(extra_data_sets))), InvalidOuputsWarning) - - if not are_array_shapes_equivalent(report.shape[1:], expected_report.points): + points = report_results[report.data_sets[0].id].shape + if not are_array_shapes_equivalent(points, expected_report.points): errors.append('Report {} contains incorrect number of points: {} != {}'.format( - expected_report.id, report.shape[1:], expected_report.points)) + expected_report.id, points, expected_report.points)) continue - for data_set_label, expected_value in expected_report.values.items(): + for data_set_id, expected_value in expected_report.values.items(): if isinstance(expected_value, dict): - value = report.loc[data_set_label, :] + value = report_results[data_set_id] for el_id, expected_el_value in expected_value.items(): el_index = numpy.ravel_multi_index([el_id], value.shape)[0] actual_el_value = value[el_index] @@ -347,20 +347,20 @@ def eval(self, specifications): ) except AssertionError: errors.append('Data set {} of report {} does not have expected value at {}: {} != {}'.format( - data_set_label, expected_report.id, el_id, actual_el_value, expected_el_value)) + data_set_id, expected_report.id, el_id, actual_el_value, expected_el_value)) else: try: numpy.testing.assert_allclose( - report.loc[data_set_label, :], + report_results[data_set_id], expected_value, rtol=self.r_tol, atol=self.a_tol, ) except AssertionError: errors.append('Data set {} of report {} does not have expected values'.format( - data_set_label, expected_report.id)) + data_set_id, expected_report.id)) - report_ids = report_reader.get_ids(out_dir) + report_ids = set(report_reader.get_ids(out_dir)) expected_report_ids = set(report.id for report in self.expected_reports) extra_report_ids = report_ids.difference(expected_report_ids) if extra_report_ids: @@ -369,7 +369,7 @@ def eval(self, specifications): '\n '.join(sorted(extra_report_ids)))) else: warnings.warn('Unexpected reports were produced:\n {}'.format( - '\n '.join(sorted(extra_report_ids))), InvalidOuputsWarning) + '\n '.join(sorted(extra_report_ids))), InvalidOutputsWarning) # check expected outputs created: plots if os.path.isfile(os.path.join(out_dir, get_config().PLOTS_PATH)): @@ -389,7 +389,7 @@ def eval(self, specifications): '\n '.join(sorted(missing_plot_ids)))) else: warnings.warn('Plots were not produced:\n {}'.format( - '\n '.join(sorted(missing_plot_ids))), InvalidOuputsWarning) + '\n '.join(sorted(missing_plot_ids))), InvalidOutputsWarning) if extra_plot_ids: if self.assert_no_extra_plots: @@ -397,14 +397,14 @@ def eval(self, specifications): '\n '.join(sorted(extra_plot_ids)))) else: warnings.warn('Extra plots were not produced:\n {}'.format( - '\n '.join(sorted(extra_plot_ids))), InvalidOuputsWarning) + '\n '.join(sorted(extra_plot_ids))), InvalidOutputsWarning) # cleanup outputs shutil.rmtree(out_dir) # raise errors if errors: - raise InvalidOuputsException('\n\n'.join(errors)) + raise InvalidOutputsException('\n\n'.join(errors)) class SyntheticCombineArchiveTestCase(TestCase): @@ -448,9 +448,11 @@ def eval(self, specifications): # read curated archives and find one that is suitable for testing suitable_curated_archive = False - for curated_combine_archive_test_case in self.published_projects_test_cases: + for published_projects_test_case in self.published_projects_test_cases: + self.published_projects_test_case = published_projects_test_case + # read archive - curated_archive_filename = curated_combine_archive_test_case.filename + curated_archive_filename = published_projects_test_case.filename shared_archive_dir = os.path.join(temp_dir, 'archive') os.mkdir(shared_archive_dir) @@ -487,9 +489,10 @@ def eval(self, specifications): # use synthetic archive to test simulator outputs_dir = os.path.join(temp_dir, 'outputs') succeeded = False + pull_docker_image = Config().pull_docker_image try: biosimulators_utils.simulator.exec.exec_sedml_docs_in_archive_with_containerized_simulator( - synthetic_archive_filename, outputs_dir, specifications['image']['url'], pull_docker_image=True) + synthetic_archive_filename, outputs_dir, specifications['image']['url'], pull_docker_image=pull_docker_image) succeeded = self.eval_outputs(specifications, synthetic_archive, synthetic_sed_docs, outputs_dir) finally: @@ -908,11 +911,11 @@ def build_synthetic_archive(self, specifications, curated_archive, curated_archi time_data_set = False for data_gen in doc.data_generators: var = data_gen.variables[0] - if var.symbol == DataGeneratorVariableSymbol.time: + if var.symbol == Symbol.time: for data_set in report.data_sets: if data_set.data_generator == data_gen: time_data_set = True - data_set.label = '__data_set_time__' + data_set.id = '__data_set_time__' break if time_data_set: break @@ -922,10 +925,10 @@ def build_synthetic_archive(self, specifications, curated_archive, curated_archi DataGenerator( id='__data_generator_time__', variables=[ - DataGeneratorVariable( + Variable( id='__variable_time__', task=task, - symbol=DataGeneratorVariableSymbol.time, + symbol=Symbol.time, ), ], math='__variable_time__', @@ -996,15 +999,17 @@ def eval_outputs(self, specifications, synthetic_archive, synthetic_sed_docs, ou sim = doc.simulations[0] report = doc.outputs[0] - data = ReportReader().run(outputs_dir, os.path.join(doc_id, report.id)) + data = ReportReader().run(report, outputs_dir, os.path.join(doc_id, report.id)) - if numpy.any(numpy.isnan(data)): - warnings.warn('The results produced by the simulator include `NaN`.', InvalidOuputsWarning) - has_warnings = True + for data_set_data in data.values(): + if numpy.any(numpy.isnan(data_set_data)): + warnings.warn('The results produced by the simulator include `NaN`.', InvalidOutputsWarning) + has_warnings = True + break try: numpy.testing.assert_allclose( - data.loc['__data_set_time__', :], + data['__data_set_time__'], numpy.linspace(sim.output_start_time, sim.output_end_time, sim.number_of_points + 1), rtol=1e-4, ) diff --git a/biosimulators_test_suite/test_case/results_report.py b/biosimulators_test_suite/test_case/results_report.py index 1601de6..0fb5946 100644 --- a/biosimulators_test_suite/test_case/results_report.py +++ b/biosimulators_test_suite/test_case/results_report.py @@ -17,11 +17,11 @@ import warnings __all__ = [ - 'SimulatorGeneratesReportsOfSimultionResults', + 'SimulatorGeneratesReportsOfSimulationResults', ] -class SimulatorGeneratesReportsOfSimultionResults(SingleMasterSedDocumentCombineArchiveTestCase): +class SimulatorGeneratesReportsOfSimulationResults(SingleMasterSedDocumentCombineArchiveTestCase): """ Test that when a COMBINE/OMEX archive defines a (single) master file, the simulator only executes this file. """ @@ -49,25 +49,21 @@ def eval_outputs(self, specifications, synthetic_archive, synthetic_sed_docs, ou doc_id = os.path.relpath(doc_location, '.') for output in sed_doc.outputs: if isinstance(output, Report): - report_data = ReportReader().run(outputs_dir, os.path.join(doc_id, output.id), format=ReportFormat.h5) + report_data = ReportReader().run(output, outputs_dir, os.path.join(doc_id, output.id), format=ReportFormat.h5) - expected_data_sets = set(data_set.label for data_set in output.data_sets) - data_sets = set(report_data.index) + expected_data_sets = set(data_set.id for data_set in output.data_sets) + data_sets = set(report_data.keys()) missing_data_sets = expected_data_sets.difference(data_sets) - extra_data_sets = data_sets.difference(expected_data_sets) + # extra_data_sets = data_sets.difference(expected_data_sets) if missing_data_sets: raise ValueError('Simulator did not produce the following data sets:\n - {}'.format( '\n - '.join(sorted(missing_data_sets)))) - if extra_data_sets: - warnings.warn('Simulator produced the following extra data sets:\n - {}'.format( - '\n - '.join(sorted(extra_data_sets))), TestCaseWarning) - has_warning = True - - if numpy.any(numpy.isnan(report_data)): - warnings.warn('The results produced by the simulator include `NaN`.', TestCaseWarning) - has_warning = True + for data_set_data in report_data.values(): + if numpy.any(numpy.isnan(data_set_data)): + warnings.warn('The results produced by the simulator include `NaN`.', TestCaseWarning) + has_warning = True return not has_warning diff --git a/biosimulators_test_suite/test_case/sedml.py b/biosimulators_test_suite/test_case/sedml.py index 8fa0dba..aece021 100644 --- a/biosimulators_test_suite/test_case/sedml.py +++ b/biosimulators_test_suite/test_case/sedml.py @@ -5,15 +5,15 @@ :Copyright: 2020, Center for Reproducible Biomedical Modeling :License: MIT """ -from ..exceptions import InvalidOuputsException -from ..warnings import InvalidOuputsWarning +from ..exceptions import InvalidOutputsException +from ..warnings import InvalidOutputsWarning from .published_project import SingleMasterSedDocumentCombineArchiveTestCase, UniformTimeCourseTestCase from biosimulators_utils.combine.data_model import CombineArchive # noqa: F401 from biosimulators_utils.archive.io import ArchiveReader from biosimulators_utils.config import get_config from biosimulators_utils.report.io import ReportReader from biosimulators_utils.sedml.data_model import (SedDocument, Output, Report, Plot2D, Plot3D, DataGenerator, # noqa: F401 - DataGeneratorVariable, UniformTimeCourseSimulation, + Variable, UniformTimeCourseSimulation, DataSet, Curve, Surface, AxisScale, Model, ModelAttributeChange, AlgorithmParameterChange) import abc @@ -79,41 +79,41 @@ def eval_outputs(self, specifications, synthetic_archive, synthetic_sed_docs, ou extra_report_ids = set(report_ids).difference(expected_report_ids) if missing_report_ids: - raise InvalidOuputsException('Simulator did not produce the following reports:\n - {}'.format( + raise InvalidOutputsException('Simulator did not produce the following reports:\n - {}'.format( '\n - '.join(sorted('`' + id + '`' for id in missing_report_ids)) )) if extra_report_ids: msg = 'Simulator produced extra reports:\n - {}'.format( '\n - '.join(sorted('`' + id + '`' for id in extra_report_ids))) - warnings.warn(msg, InvalidOuputsWarning) + warnings.warn(msg, InvalidOutputsWarning) has_warnings = True # data sets - expected_data_set_labels = set() - data_set_labels = set() + expected_data_set_ids = set() + data_set_ids = set() for doc_location, doc in synthetic_sed_docs.items(): doc_id = os.path.relpath(doc_location, './') for output in doc.outputs: if isinstance(output, Report): for data_set in output.data_sets: - expected_data_set_labels.add(os.path.join(doc_id, output.id, data_set.label)) + expected_data_set_ids.add(os.path.join(doc_id, output.id, data_set.id)) - results = ReportReader().run(outputs_dir, os.path.join(doc_id, output.id)) - data_set_labels.update(set(os.path.join(doc_id, output.id, label) for label in results.index)) + results = ReportReader().run(output, outputs_dir, os.path.join(doc_id, output.id)) + data_set_ids.update(set(os.path.join(doc_id, output.id, id) for id in results.keys())) - missing_data_set_labels = expected_data_set_labels.difference(set(data_set_labels)) - extra_data_set_labels = set(data_set_labels).difference(expected_data_set_labels) + missing_data_set_ids = expected_data_set_ids.difference(set(data_set_ids)) + extra_data_set_ids = set(data_set_ids).difference(expected_data_set_ids) - if missing_data_set_labels: - raise InvalidOuputsException('Simulator did not produce the following data sets:\n - {}'.format( - '\n - '.join(sorted('`' + label + '`' for label in missing_data_set_labels)) + if missing_data_set_ids: + raise InvalidOutputsException('Simulator did not produce the following data sets:\n - {}'.format( + '\n - '.join(sorted('`' + id + '`' for id in missing_data_set_ids)) )) - if extra_data_set_labels: + if extra_data_set_ids: msg = 'Simulator produced extra data sets:\n - {}'.format( - '\n - '.join(sorted('`' + label + '`' for label in extra_data_set_labels))) - warnings.warn(msg, InvalidOuputsWarning) + '\n - '.join(sorted('`' + id + '`' for id in extra_data_set_ids))) + warnings.warn(msg, InvalidOutputsWarning) has_warnings = True return not has_warnings @@ -244,7 +244,7 @@ def eval_outputs(self, specifications, synthetic_archive, synthetic_sed_docs, ou doc_id = os.path.relpath(doc_location, './') report = doc.outputs[0] - data = ReportReader().run(outputs_dir, os.path.join(doc_id, report.id)) + data = ReportReader().run(report, outputs_dir, os.path.join(doc_id, report.id)) for alg_specs in specifications['algorithms']: if alg_specs['kisaoId']['id'] == doc.simulations[0].algorithm.kisao_id: @@ -252,11 +252,12 @@ def eval_outputs(self, specifications, synthetic_archive, synthetic_sed_docs, ou expected_dims = alg_specs['dependentDimensions'] - if numpy.squeeze(data).ndim != 1 + len(expected_dims): + data_set_data = data[report.data_sets[0].id] + if numpy.squeeze(data_set_data).ndim != len(expected_dims): msg = ('The specifications for the number of dimensions of each data set of algorithm `{}` differs ' 'from the actual number of dimensions, {} != {}.').format( - doc.simulations[0].algorithm.kisao_id, data.ndim - 1, len(expected_dims)) - warnings.warn(msg, InvalidOuputsWarning) + doc.simulations[0].algorithm.kisao_id, numpy.squeeze(data_set_data).ndim, len(expected_dims)) + warnings.warn(msg, InvalidOutputsWarning) return False else: return True @@ -326,7 +327,7 @@ def build_synthetic_archive(self, specifications, curated_archive, curated_archi sed_doc.data_generators.append(copy_data_gen) for var in data_set.data_generator.variables: - copy_var = DataGeneratorVariable(id=var.id, target=var.target, symbol=var.symbol, model=var.model) + copy_var = Variable(id=var.id, target=var.target, symbol=var.symbol, model=var.model) copy_var.task = copy_tasks[var.task.id] copy_data_gen.variables.append(copy_var) copy_data_set.data_generator = copy_data_gen @@ -427,14 +428,14 @@ def eval_outputs(self, specifications, synthetic_archive, synthetic_sed_docs, ou extra_report_ids = set(report_ids).difference(expected_report_ids) if missing_report_ids: - raise InvalidOuputsException('Simulator did not produce the following reports:\n - {}'.format( + raise InvalidOutputsException('Simulator did not produce the following reports:\n - {}'.format( '\n - '.join(sorted('`' + id + '`' for id in missing_report_ids)) )) if extra_report_ids: msg = 'Simulator produced extra reports:\n - {}'.format( '\n - '.join(sorted('`' + id + '`' for id in extra_report_ids))) - warnings.warn(msg, InvalidOuputsWarning) + warnings.warn(msg, InvalidOutputsWarning) has_warnings = True return not has_warnings @@ -536,7 +537,7 @@ def eval_outputs(self, specifications, synthetic_archive, synthetic_sed_docs, ou """ plots_path = os.path.join(outputs_dir, get_config().PLOTS_PATH) if not os.path.isfile(plots_path): - warnings.warn('Simulator did not produce plots', InvalidOuputsWarning) + warnings.warn('Simulator did not produce plots', InvalidOutputsWarning) return tempdir = tempfile.mkdtemp() @@ -544,7 +545,7 @@ def eval_outputs(self, specifications, synthetic_archive, synthetic_sed_docs, ou archive = ArchiveReader().run(plots_path, tempdir) except Exception: shutil.rmtree(tempdir) - raise InvalidOuputsException('Simulator produced an invalid zip archive of plots') + raise InvalidOutputsException('Simulator produced an invalid zip archive of plots') for file in archive.files: with open(file.local_path, 'rb') as file: @@ -552,7 +553,7 @@ def eval_outputs(self, specifications, synthetic_archive, synthetic_sed_docs, ou PyPDF2.PdfFileReader(file) except Exception: shutil.rmtree(tempdir) - raise InvalidOuputsException('Simulator produced an invalid PDF plot') + raise InvalidOutputsException('Simulator produced an invalid PDF plot') doc = list(synthetic_sed_docs.values())[0] doc_location = list(synthetic_sed_docs.keys())[0] @@ -566,14 +567,14 @@ def eval_outputs(self, specifications, synthetic_archive, synthetic_sed_docs, ou if missing_plot_ids: shutil.rmtree(tempdir) - raise InvalidOuputsException('Simulator did not produce the following plots:\n - {}'.format( + raise InvalidOutputsException('Simulator did not produce the following plots:\n - {}'.format( '\n - '.join(sorted('`' + id + '`' for id in missing_plot_ids)) )) if extra_plot_ids: msg = 'Simulator produced extra plots:\n - {}'.format( '\n - '.join(sorted('`' + id + '`' for id in extra_plot_ids))) - warnings.warn(msg, InvalidOuputsWarning) + warnings.warn(msg, InvalidOutputsWarning) shutil.rmtree(tempdir) @@ -581,6 +582,27 @@ def eval_outputs(self, specifications, synthetic_archive, synthetic_sed_docs, ou class SimulatorProduces2DPlotsTestCase(SimulatorProducesPlotsTestCase): """ Test that a simulator produces 2D plots """ + def is_curated_sed_report_suitable_for_building_synthetic_archive(self, specifications, report): + """ Determine if a SED report is suitable for testing + + Args: + specifications (:obj:`dict`): specifications of the simulator to validate + report (:obj:`Report`): SED report in curated archive + + Returns: + :obj:`bool`: whether the report is suitable for testing + """ + if not super(SimulatorProduces2DPlotsTestCase, self).is_curated_sed_report_suitable_for_building_synthetic_archive( + specifications, report): + return False + + expected_report = next((expected_report for expected_report in self.published_projects_test_case.expected_reports + if expected_report.id == report.id), None) + if expected_report is None: + return False + + return len(expected_report.points) == 1 + def build_plots(self, data_generators): """ Build plots from the defined data generators @@ -611,6 +633,27 @@ def build_plots(self, data_generators): class SimulatorProduces3DPlotsTestCase(SimulatorProducesPlotsTestCase): """ Test that a simulator produces 3D plots """ + def is_curated_sed_report_suitable_for_building_synthetic_archive(self, specifications, report): + """ Determine if a SED report is suitable for testing + + Args: + specifications (:obj:`dict`): specifications of the simulator to validate + report (:obj:`Report`): SED report in curated archive + + Returns: + :obj:`bool`: whether the report is suitable for testing + """ + if not super(SimulatorProduces3DPlotsTestCase, self).is_curated_sed_report_suitable_for_building_synthetic_archive( + specifications, report): + return False + + expected_report = next((expected_report for expected_report in self.published_projects_test_case.expected_reports + if expected_report.id == report.id), None) + if expected_report is None: + return False + + return len(expected_report.points) == 2 + def build_plots(self, data_generators): """ Build plots from the defined data generators diff --git a/biosimulators_test_suite/warnings.py b/biosimulators_test_suite/warnings.py index eb02f20..ff1b3ad 100644 --- a/biosimulators_test_suite/warnings.py +++ b/biosimulators_test_suite/warnings.py @@ -10,7 +10,7 @@ 'TestCaseWarning', 'IgnoredTestCaseWarning', 'SimulatorRuntimeErrorWarning', - 'InvalidOuputsWarning', + 'InvalidOutputsWarning', ] @@ -29,6 +29,6 @@ class IgnoredTestCaseWarning(TestCaseWarning): pass # pragma: no cover -class InvalidOuputsWarning(TestCaseWarning): +class InvalidOutputsWarning(TestCaseWarning): """ Warning that the outputs of the execution of a COMBINE/OMEX archive were not as expected """ pass # pragma: no cover diff --git a/examples/bngl/Dolan-PLoS-Comput-Biol-2015-NHEJ.h5 b/examples/bngl/Dolan-PLoS-Comput-Biol-2015-NHEJ.h5 new file mode 100644 index 0000000..6e62789 Binary files /dev/null and b/examples/bngl/Dolan-PLoS-Comput-Biol-2015-NHEJ.h5 differ diff --git a/examples/bngl/Dolan-PLoS-Comput-Biol-2015-NHEJ.json b/examples/bngl/Dolan-PLoS-Comput-Biol-2015-NHEJ.json index 28c2b5f..ed41574 100644 --- a/examples/bngl/Dolan-PLoS-Comput-Biol-2015-NHEJ.json +++ b/examples/bngl/Dolan-PLoS-Comput-Biol-2015-NHEJ.json @@ -7,14 +7,20 @@ }], "expectedReports": [{ "id": "Dolan2015.sedml/report", - "dataSets": ["time", "MDM2_mRNA", "MDM2"], + "dataSets": [ + {"id": "time", "label": "time"}, + {"id": "MDM2_mRNA", "label": "MDM2_mRNA"}, + {"id": "MDM2", "label": "MDM2"} + ], "points": [101], - "values": { - "time": { + "values": [{ + "id": "time", + "label": "time", + "value": { "0": 0.0, "100": 100 } - } + }] }], "expectedPlots": [], "runtimeFailureAlertType": "exception", diff --git a/examples/bngl/test-bngl.h5 b/examples/bngl/test-bngl.h5 new file mode 100644 index 0000000..d4bcc27 Binary files /dev/null and b/examples/bngl/test-bngl.h5 differ diff --git a/examples/bngl/test-bngl.json b/examples/bngl/test-bngl.json index 310cf5a..3156c13 100644 --- a/examples/bngl/test-bngl.json +++ b/examples/bngl/test-bngl.json @@ -7,14 +7,26 @@ }], "expectedReports": [{ "id": "test.sedml/report", - "dataSets": ["time", "Atot", "Btot", "GA00tot", "GA01tot", "GA10tot", "GB00tot", "GB01tot", "GB10tot"], + "dataSets": [ + {"id": "data_set_time", "label": "time"}, + {"id": "data_set_Atot", "label": "Atot"}, + {"id": "data_set_Btot", "label": "Btot"}, + {"id": "data_set_GA00tot", "label": "GA00tot"}, + {"id": "data_set_GA01tot", "label": "GA01tot"}, + {"id": "data_set_GA10tot", "label": "GA10tot"}, + {"id": "data_set_GB00tot", "label": "GB00tot"}, + {"id": "data_set_GB01tot", "label": "GB01tot"}, + {"id": "data_set_GB10tot", "label": "GB10tot"} + ], "points": [101], - "values": { - "time": { + "values": [{ + "id": "data_set_time", + "label": "time", + "value": { "0": 0.0, "100": 10 } - } + }] }], "expectedPlots": [], "runtimeFailureAlertType": "exception", diff --git a/examples/sbml-core/Caravagna-J-Theor-Biol-2010-tumor-suppressive-oscillations.h5 b/examples/sbml-core/Caravagna-J-Theor-Biol-2010-tumor-suppressive-oscillations.h5 new file mode 100644 index 0000000..5c7991e Binary files /dev/null and b/examples/sbml-core/Caravagna-J-Theor-Biol-2010-tumor-suppressive-oscillations.h5 differ diff --git a/examples/sbml-core/Caravagna-J-Theor-Biol-2010-tumor-suppressive-oscillations.json b/examples/sbml-core/Caravagna-J-Theor-Biol-2010-tumor-suppressive-oscillations.json index 806a80d..f921ce9 100644 --- a/examples/sbml-core/Caravagna-J-Theor-Biol-2010-tumor-suppressive-oscillations.json +++ b/examples/sbml-core/Caravagna-J-Theor-Biol-2010-tumor-suppressive-oscillations.json @@ -7,17 +7,24 @@ }], "expectedReports": [{ "id": "BIOMD0000000912_sim.sedml/BIOMD0000000912_sim", - "dataSets": ["time", "T", "E", "I"], + "dataSets": [ + {"id": "data_set_time", "label": "time"}, + {"id": "data_set_T", "label": "T"}, + {"id": "data_set_E", "label": "E"}, + {"id": "data_set_I", "label": "I"} + ], "points": [5001], - "values": { - "time": { + "values": [{ + "id": "data_set_time", + "label": "time", + "value": { "0": 0.0, "1": 0.2, "2": 0.4, "999": 199.8, "1000": 200 } - } + }] }], "expectedPlots": [{ "id": "BIOMD0000000912_sim.sedml/plot_1" diff --git a/examples/sbml-core/Ciliberto-J-Cell-Biol-2003-morphogenesis-checkpoint.h5 b/examples/sbml-core/Ciliberto-J-Cell-Biol-2003-morphogenesis-checkpoint.h5 new file mode 100644 index 0000000..e16f760 Binary files /dev/null and b/examples/sbml-core/Ciliberto-J-Cell-Biol-2003-morphogenesis-checkpoint.h5 differ diff --git a/examples/sbml-core/Ciliberto-J-Cell-Biol-2003-morphogenesis-checkpoint.json b/examples/sbml-core/Ciliberto-J-Cell-Biol-2003-morphogenesis-checkpoint.json index 478f623..ff8e8e1 100644 --- a/examples/sbml-core/Ciliberto-J-Cell-Biol-2003-morphogenesis-checkpoint.json +++ b/examples/sbml-core/Ciliberto-J-Cell-Biol-2003-morphogenesis-checkpoint.json @@ -7,17 +7,40 @@ }], "expectedReports": [{ "id": "simulation_1.sedml/simulation_1", - "dataSets": ["time", "Cdh1", "Trim", "Clb", "Sic", "PTrim", "PClb", "SBF", "IE", "Cdc20a", "Cdc20", "Swe1", "Swe1M", "PSwe1", "PSwe1M", "Mih1a", "Mcm", "BE", "Cln", "mass"], + "dataSets": [ + {"id": "data_set_time", "label": "time"}, + {"id": "data_set_Cdh1", "label": "Cdh1"}, + {"id": "data_set_Trim", "label": "Trim"}, + {"id": "data_set_Clb", "label": "Clb"}, + {"id": "data_set_Sic", "label": "Sic"}, + {"id": "data_set_PTrim", "label": "PTrim"}, + {"id": "data_set_PClb", "label": "PClb"}, + {"id": "data_set_SBF", "label": "SBF"}, + {"id": "data_set_IE", "label": "IE"}, + {"id": "data_set_Cdc20a", "label": "Cdc20a"}, + {"id": "data_set_Cdc20", "label": "Cdc20"}, + {"id": "data_set_Swe1", "label": "Swe1"}, + {"id": "data_set_Swe1M", "label": "Swe1M"}, + {"id": "data_set_PSwe1", "label": "PSwe1"}, + {"id": "data_set_PSwe1M", "label": "PSwe1M"}, + {"id": "data_set_Mih1a", "label": "Mih1a"}, + {"id": "data_set_Mcm", "label": "Mcm"}, + {"id": "data_set_BE", "label": "BE"}, + {"id": "data_set_Cln", "label": "Cln"}, + {"id": "data_set_mass", "label": "mass"} + ], "points": [101], - "values": { - "time": { + "values": [{ + "id": "data_set_time", + "label": "time", + "value": { "0": 0.0, "1": 1.0, "2": 2.0, "99": 99.0, "100": 100.0 } - } + }] }], "expectedPlots": [], "runtimeFailureAlertType": "exception", diff --git a/examples/sbml-core/Edelstein-Biol-Cybern-1996-Nicotinic-excitation.h5 b/examples/sbml-core/Edelstein-Biol-Cybern-1996-Nicotinic-excitation.h5 new file mode 100644 index 0000000..a21e6e2 Binary files /dev/null and b/examples/sbml-core/Edelstein-Biol-Cybern-1996-Nicotinic-excitation.h5 differ diff --git a/examples/sbml-core/Edelstein-Biol-Cybern-1996-Nicotinic-excitation.json b/examples/sbml-core/Edelstein-Biol-Cybern-1996-Nicotinic-excitation.json index 6a6f10f..bef301b 100644 --- a/examples/sbml-core/Edelstein-Biol-Cybern-1996-Nicotinic-excitation.json +++ b/examples/sbml-core/Edelstein-Biol-Cybern-1996-Nicotinic-excitation.json @@ -7,17 +7,24 @@ }], "expectedReports": [{ "id": "BIOMD0000000002_sim.sedml/BIOMD0000000002_sim", - "dataSets": ["time", "BLL", "IL", "AL"], + "dataSets": [ + {"id": "data_set_time", "label": "time"}, + {"id": "data_set_BLL", "label": "BLL"}, + {"id": "data_set_IL", "label": "IL"}, + {"id": "data_set_AL", "label": "AL"} + ], "points": [1001], - "values": { - "time": { + "values": [{ + "id": "data_set_time", + "label": "time", + "value": { "0": 0.0, "1": 0.0005, "2": 0.0010, "999": 0.4995, "1000": 0.5000 } - } + }] }], "expectedPlots": [], "runtimeFailureAlertType": "exception", diff --git a/examples/sbml-core/Parmar-BMC-Syst-Biol-2017-iron-distribution.h5 b/examples/sbml-core/Parmar-BMC-Syst-Biol-2017-iron-distribution.h5 new file mode 100644 index 0000000..e415af7 Binary files /dev/null and b/examples/sbml-core/Parmar-BMC-Syst-Biol-2017-iron-distribution.h5 differ diff --git a/examples/sbml-core/Parmar-BMC-Syst-Biol-2017-iron-distribution.json b/examples/sbml-core/Parmar-BMC-Syst-Biol-2017-iron-distribution.json index 77dc994..78e6ee8 100644 --- a/examples/sbml-core/Parmar-BMC-Syst-Biol-2017-iron-distribution.json +++ b/examples/sbml-core/Parmar-BMC-Syst-Biol-2017-iron-distribution.json @@ -7,14 +7,19 @@ }], "expectedReports": [{ "id": "Parmar2017_Deficient_Rich_tracer.sedml/simulation_1", - "dataSets": ["time", "FeDuo"], + "dataSets": [ + {"id": "data_set_time", "label": "time"}, + {"id": "data_set_FeDuo", "label": "FeDuo"} + ], "points": [301], - "values": { - "time": { + "values": [{ + "id": "data_set_time", + "label": "time", + "value": { "0": 0.0, "300": 5100.0 } - } + }] }], "expectedPlots": [], "runtimeFailureAlertType": "exception", diff --git a/examples/sbml-core/Szymanska-J-Theor-Biol-2009-HSP-synthesis.h5 b/examples/sbml-core/Szymanska-J-Theor-Biol-2009-HSP-synthesis.h5 new file mode 100644 index 0000000..5533fdd Binary files /dev/null and b/examples/sbml-core/Szymanska-J-Theor-Biol-2009-HSP-synthesis.h5 differ diff --git a/examples/sbml-core/Szymanska-J-Theor-Biol-2009-HSP-synthesis.json b/examples/sbml-core/Szymanska-J-Theor-Biol-2009-HSP-synthesis.json index bb412fa..2646fd6 100644 --- a/examples/sbml-core/Szymanska-J-Theor-Biol-2009-HSP-synthesis.json +++ b/examples/sbml-core/Szymanska-J-Theor-Biol-2009-HSP-synthesis.json @@ -7,14 +7,27 @@ }], "expectedReports": [{ "id": "BIOMD0000000896_sim.sedml/BIOMD0000000896_sim", - "dataSets": ["time", "Hsp70", "HSF", "S", "Hsp70_HSF", "Hsp70_S", "HSF_3", "HSE", "HSF_3_HSE", "mRNA"], + "dataSets": [ + {"id": "data_set_time", "label": "time"}, + {"id": "data_set_Hsp70", "label": "Hsp70"}, + {"id": "data_set_HSF", "label": "HSF"}, + {"id": "data_set_S", "label": "S"}, + {"id": "data_set_Hsp70_HSF", "label": "Hsp70_HSF"}, + {"id": "data_set_Hsp70_S", "label": "Hsp70_S"}, + {"id": "data_set_HSF_3", "label": "HSF_3"}, + {"id": "data_set_HSE", "label": "HSE"}, + {"id": "data_set_HSF_3_HSE", "label": "HSF_3_HSE"}, + {"id": "data_set_mRNA", "label": "mRNA"} + ], "points": [4001], - "values": { - "time": { + "values": [{ + "id": "data_set_time", + "label": "time", + "value": { "0": 0.0, "4000": 1000 } - } + }] }], "expectedPlots": [], "runtimeFailureAlertType": "exception", diff --git a/examples/sbml-core/Tomida-EMBO-J-2003-NFAT-translocation.h5 b/examples/sbml-core/Tomida-EMBO-J-2003-NFAT-translocation.h5 new file mode 100644 index 0000000..4030eab Binary files /dev/null and b/examples/sbml-core/Tomida-EMBO-J-2003-NFAT-translocation.h5 differ diff --git a/examples/sbml-core/Tomida-EMBO-J-2003-NFAT-translocation.json b/examples/sbml-core/Tomida-EMBO-J-2003-NFAT-translocation.json index 5ba95b9..46bf4b6 100644 --- a/examples/sbml-core/Tomida-EMBO-J-2003-NFAT-translocation.json +++ b/examples/sbml-core/Tomida-EMBO-J-2003-NFAT-translocation.json @@ -7,14 +7,22 @@ }], "expectedReports": [{ "id": "BIOMD0000000678_sim.sedml/BIOMD0000000678_sim", - "dataSets": ["time", "NFAT_phosphorylated", "NFAT_dephosphorylated", "NFAT_transported", "stimulus"], + "dataSets": [ + {"id": "data_set_time", "label": "time"}, + {"id": "data_set_NFAT_phosphorylated", "label": "NFAT_phosphorylated"}, + {"id": "data_set_NFAT_dephosphorylated", "label": "NFAT_dephosphorylated"}, + {"id": "data_set_NFAT_transported", "label": "NFAT_transported"}, + {"id": "data_set_stimulus", "label": "stimulus"} + ], "points": [801], - "values": { - "time": { + "values": [{ + "id": "data_set_time", + "label": "time", + "value": { "0": 0.0, "800": 80 } - } + }] }], "expectedPlots": [], "runtimeFailureAlertType": "exception", diff --git a/examples/sbml-core/Varusai-Sci-Rep-2018-mTOR-signaling-LSODA-LSODAR-SBML.h5 b/examples/sbml-core/Varusai-Sci-Rep-2018-mTOR-signaling-LSODA-LSODAR-SBML.h5 new file mode 100644 index 0000000..b544753 Binary files /dev/null and b/examples/sbml-core/Varusai-Sci-Rep-2018-mTOR-signaling-LSODA-LSODAR-SBML.h5 differ diff --git a/examples/sbml-core/Varusai-Sci-Rep-2018-mTOR-signaling-LSODA-LSODAR-SBML.json b/examples/sbml-core/Varusai-Sci-Rep-2018-mTOR-signaling-LSODA-LSODAR-SBML.json index 2ac9dcc..5afa9a3 100644 --- a/examples/sbml-core/Varusai-Sci-Rep-2018-mTOR-signaling-LSODA-LSODAR-SBML.json +++ b/examples/sbml-core/Varusai-Sci-Rep-2018-mTOR-signaling-LSODA-LSODAR-SBML.json @@ -7,14 +7,19 @@ }], "expectedReports": [{ "id": "LSODA.sedml/report_1_task1", - "dataSets": ["Time", "pmTORC1"], + "dataSets": [ + {"id": "data_set_time", "label": "Time"}, + {"id": "data_set_pmTORC1", "label": "pmTORC1"} + ], "points": [1001], - "values": { - "Time": { + "values": [{ + "id": "data_set_time", + "label": "Time", + "value": { "0": 0.0, "1000": 3600 } - } + }] }], "expectedPlots": [], "runtimeFailureAlertType": "exception", diff --git a/examples/sbml-fbc/Escherichia-coli-core-metabolism.h5 b/examples/sbml-fbc/Escherichia-coli-core-metabolism.h5 new file mode 100644 index 0000000..81e395d Binary files /dev/null and b/examples/sbml-fbc/Escherichia-coli-core-metabolism.h5 differ diff --git a/examples/sbml-fbc/Escherichia-coli-core-metabolism.json b/examples/sbml-fbc/Escherichia-coli-core-metabolism.json index bbb9148..906b735 100644 --- a/examples/sbml-fbc/Escherichia-coli-core-metabolism.json +++ b/examples/sbml-fbc/Escherichia-coli-core-metabolism.json @@ -7,13 +7,17 @@ }], "expectedReports": [{ "id": "simulation.sedml/report", - "dataSets": ["objective_value"], + "dataSets": [ + {"id": "data_set_objective_value", "label": "objective_value"} + ], "points": [], - "values": { - "objective_value": 0.8739215069684301 - } + "values": [{ + "id": "data_set_objective_value", + "label": "objective_value", + "value": 0.8739215069684301 + }] }], "expectedPlots": [], "runtimeFailureAlertType": "exception", "identifiers": [] -} +} \ No newline at end of file diff --git a/examples/sbml-qual/Chaouiya-BMC-Syst-Biol-2013-EGF-TNFa-signaling.h5 b/examples/sbml-qual/Chaouiya-BMC-Syst-Biol-2013-EGF-TNFa-signaling.h5 new file mode 100644 index 0000000..414f1db Binary files /dev/null and b/examples/sbml-qual/Chaouiya-BMC-Syst-Biol-2013-EGF-TNFa-signaling.h5 differ diff --git a/examples/sbml-qual/Chaouiya-BMC-Syst-Biol-2013-EGF-TNFa-signaling.json b/examples/sbml-qual/Chaouiya-BMC-Syst-Biol-2013-EGF-TNFa-signaling.json index fc77233..b50bdb4 100644 --- a/examples/sbml-qual/Chaouiya-BMC-Syst-Biol-2013-EGF-TNFa-signaling.json +++ b/examples/sbml-qual/Chaouiya-BMC-Syst-Biol-2013-EGF-TNFa-signaling.json @@ -7,17 +7,25 @@ }], "expectedReports": [{ "id": "simulation.sedml/report", - "dataSets": ["time", "egf", "erk", "nfkb", "tnfa"], + "dataSets": [ + {"id": "data_set_time", "label": "time"}, + {"id": "data_set_egf", "label": "egf"}, + {"id": "data_set_erk", "label": "erk"}, + {"id": "data_set_nfkb", "label": "nfkb"}, + {"id": "data_set_tnfa", "label": "tnfa"} + ], "points": [21], - "values": { - "time": { + "values": [{ + "id": "data_set_time", + "label": "time", + "value": { "0": 0, "1": 1, "2": 2, "19": 19, "20": 20 } - } + }] }], "expectedPlots": [], "runtimeFailureAlertType": "exception", @@ -28,4 +36,4 @@ "namespace": "doi", "id": "10.1186/1752-0509-7-135" }] -} \ No newline at end of file +} diff --git a/tests/fixtures/COPASI.specs.json b/tests/fixtures/COPASI.specs.json index 76f82cc..30d88b6 100644 --- a/tests/fixtures/COPASI.specs.json +++ b/tests/fixtures/COPASI.specs.json @@ -5,7 +5,7 @@ "description": "COPASI is a C++-based software application for the simulation and analysis of biochemical networks and their dynamics.", "urls": [{"type": "Home page", "url": "http://copasi.org/"}], "image": { - "url": "ghcr.io/biosimulators/biosimulators_copasi/copasi:4.30.233", + "url": "ghcr.io/biosimulators/biosimulators_copasi/copasi:latest", "format": { "namespace": "EDAM", "id": "format_3973", "version": null, "supportedFeatures": [] diff --git a/tests/test_case/test_combine_archive.py b/tests/test_case/test_combine_archive.py index 197afb2..6e3f833 100644 --- a/tests/test_case/test_combine_archive.py +++ b/tests/test_case/test_combine_archive.py @@ -1,8 +1,9 @@ from biosimulators_test_suite.test_case import combine_archive from biosimulators_test_suite.test_case.published_project import SimulatorCanExecutePublishedProject from biosimulators_test_suite.warnings import TestCaseWarning +from biosimulators_utils.report.data_model import DataSetResults from biosimulators_utils.report.io import ReportWriter -from biosimulators_utils.sedml.data_model import SedDocument, Task, Report +from biosimulators_utils.sedml.data_model import SedDocument, Task, Report, DataSet import numpy import os import pandas @@ -32,11 +33,15 @@ def test_WhenACombineArchiveHasAMasterFileSimulatorOnlyExecutesThisFile_eval_out with self.assertRaisesRegex(ValueError, 'did not generate'): case.eval_outputs(None, None, None, self.dirname) - data_frame = pandas.DataFrame(numpy.array([[1, 2, 3], [4, 5, 6]]), index=['A', 'B']) - ReportWriter().run(data_frame, self.dirname, 'a.sedml/b') + report = Report(data_sets=[DataSet(id='A', label='A'), DataSet(id='B', label='B')]) + data = DataSetResults({ + 'A': numpy.array([1, 2, 3]), + 'B': numpy.array([4, 5, 6]), + }) + ReportWriter().run(report, data, self.dirname, 'a.sedml/b') case.eval_outputs(None, None, None, self.dirname) - ReportWriter().run(data_frame, self.dirname, 'b.sedml/b') + ReportWriter().run(report, data, self.dirname, 'b.sedml/b') with self.assertWarnsRegex(TestCaseWarning, ''): case.eval_outputs(None, None, None, self.dirname) @@ -59,12 +64,16 @@ def test_WhenACombineArchiveHasNoMasterFileSimulatorExecutesAllSedDocuments_eval with self.assertRaisesRegex(ValueError, 'did not generate'): case.eval_outputs(None, None, None, self.dirname) - data_frame = pandas.DataFrame(numpy.array([[1, 2, 3], [4, 5, 6]]), index=['A', 'B']) - ReportWriter().run(data_frame, self.dirname, 'a.sedml/b') - ReportWriter().run(data_frame, self.dirname, 'b.sedml/b') + report = Report(data_sets=[DataSet(id='A', label='A'), DataSet(id='B', label='B')]) + data = DataSetResults({ + 'A': numpy.array([1, 2, 3]), + 'B': numpy.array([4, 5, 6]), + }) + ReportWriter().run(report, data, self.dirname, 'a.sedml/b') + ReportWriter().run(report, data, self.dirname, 'b.sedml/b') case.eval_outputs(None, None, None, self.dirname) - ReportWriter().run(data_frame, self.dirname, 'c.sedml/b') + ReportWriter().run(report, data, self.dirname, 'c.sedml/b') with self.assertWarnsRegex(TestCaseWarning, ''): case.eval_outputs(None, None, None, self.dirname) diff --git a/tests/test_case/test_log.py b/tests/test_case/test_log.py index 258f737..de9a18d 100644 --- a/tests/test_case/test_log.py +++ b/tests/test_case/test_log.py @@ -2,7 +2,6 @@ from biosimulators_test_suite.test_case.published_project import SimulatorCanExecutePublishedProject from biosimulators_test_suite.warnings import TestCaseWarning from biosimulators_utils.config import get_config -from biosimulators_utils.report.io import ReportWriter from biosimulators_utils.sedml.data_model import SedDocument, Task, Report import numpy import os @@ -12,7 +11,7 @@ import unittest -class CombineArchiveTestCaseTest(unittest.TestCase): +class LogTestCaseTest(unittest.TestCase): IMAGE = 'ghcr.io/biosimulators/biosimulators_copasi/copasi:latest' CURATED_ARCHIVE_FILENAME = os.path.join( os.path.dirname(__file__), '..', '..', @@ -46,21 +45,23 @@ def test_SimulatorReportsTheStatusOfTheExecutionOfCombineArchives_eval_outputs(s with open(log_path, 'w') as file: file.write('status: RUNNING\n') file.write('sedDocuments:\n') - file.write(' doc_1:\n') + file.write(' - id: doc_1\n') file.write(' status: RUNNING\n') file.write(' tasks:\n') - file.write(' task_1:\n') + file.write(' - id: task_1\n') file.write(' status: RUNNING\n') file.write(' outputs:\n') - file.write(' output_1:\n') + file.write(' - id: output_1\n') file.write(' status: RUNNING\n') file.write(' dataSets:\n') - file.write(' data_set_1: RUNNING\n') - file.write(' output_2:\n') + file.write(' - id: data_set_1\n') + file.write(' status: RUNNING\n') + file.write(' - id: output_2\n') file.write(' status: RUNNING\n') file.write(' curves:\n') - file.write(' curve_1: RUNNING\n') - file.write(' output_3:\n') + file.write(' - id curve_1\n') + file.write(' status: RUNNING\n') + file.write(' - id: output_3\n') file.write(' status: RUNNING\n') with self.assertWarnsRegex(TestCaseWarning, 'is not valid'): self.assertEqual(case.eval_outputs(None, None, None, self.dirname), False) @@ -68,24 +69,27 @@ def test_SimulatorReportsTheStatusOfTheExecutionOfCombineArchives_eval_outputs(s with open(log_path, 'w') as file: file.write('status: RUNNING\n') file.write('sedDocuments:\n') - file.write(' doc_1:\n') + file.write(' - id: doc_1\n') file.write(' status: RUNNING\n') file.write(' tasks:\n') - file.write(' task_1:\n') + file.write(' - id: task_1\n') file.write(' status: RUNNING\n') file.write(' outputs:\n') - file.write(' output_1:\n') + file.write(' - id: output_1\n') file.write(' status: RUNNING\n') file.write(' dataSets:\n') - file.write(' data_set_1: RUNNING\n') - file.write(' output_2:\n') + file.write(' - id: data_set_1\n') + file.write(' status: RUNNING\n') + file.write(' - id: output_2\n') file.write(' status: RUNNING\n') file.write(' curves:\n') - file.write(' curve_1: RUNNING\n') - file.write(' output_3:\n') + file.write(' - id: curve_1\n') + file.write(' status: RUNNING\n') + file.write(' - id: output_3\n') file.write(' status: RUNNING\n') file.write(' surfaces:\n') - file.write(' surface_1: RUNNING\n') + file.write(' - id: surface_1\n') + file.write(' status: RUNNING\n') with self.assertWarnsRegex(TestCaseWarning, 'is not valid. By the end of the execution'): self.assertEqual(case.eval_outputs(None, None, None, self.dirname), False) diff --git a/tests/test_case/test_published_project.py b/tests/test_case/test_published_project.py index d3723db..153fcab 100644 --- a/tests/test_case/test_published_project.py +++ b/tests/test_case/test_published_project.py @@ -1,15 +1,16 @@ from biosimulators_test_suite import data_model -from biosimulators_test_suite.exceptions import InvalidOuputsException, SkippedTestCaseException +from biosimulators_test_suite.exceptions import InvalidOutputsException, SkippedTestCaseException from biosimulators_test_suite.results.data_model import TestCaseResult, TestCaseResultType from biosimulators_test_suite.test_case.published_project import ( SimulatorCanExecutePublishedProject, find_cases, SyntheticCombineArchiveTestCase, ConfigurableMasterCombineArchiveTestCase) -from biosimulators_test_suite.warnings import IgnoredTestCaseWarning, SimulatorRuntimeErrorWarning, InvalidOuputsWarning +from biosimulators_test_suite.warnings import IgnoredTestCaseWarning, SimulatorRuntimeErrorWarning, InvalidOutputsWarning from biosimulators_utils.archive.data_model import Archive, ArchiveFile from biosimulators_utils.archive.io import ArchiveWriter from biosimulators_utils.combine.data_model import CombineArchive from biosimulators_utils.combine.io import CombineArchiveReader +from biosimulators_utils.report.data_model import DataSetResults from biosimulators_utils.report.io import ReportWriter, ReportFormat -from biosimulators_utils.sedml.data_model import Task +from biosimulators_utils.sedml.data_model import Task, Report, DataSet from unittest import mock import biosimulators_utils.simulator.exec import functools @@ -69,11 +70,11 @@ def test_CuratedCombineArchiveTestCase_from_dict(self): filename = os.path.join('sbml-core', 'Caravagna-J-Theor-Biol-2010-tumor-suppressive-oscillations.json') with open(os.path.join(base_path, filename), 'r') as file: data = json.load(file) - data['expectedReports'][0]['values']['T'] = [0, 1, 2, 3, 4, 5] + data['expectedReports'][0]['values'][0]['value'] = [0, 1, 2, 3, 4, 5] id = ('published_project.SimulatorCanExecutePublishedProject:' 'sbml-core/Caravagna-J-Theor-Biol-2010-tumor-suppressive-oscillations.json') case = SimulatorCanExecutePublishedProject(id=id).from_dict(data) - numpy.testing.assert_allclose(case.expected_reports[0].values['T'], numpy.array([0, 1, 2, 3, 4, 5])) + numpy.testing.assert_allclose(case.expected_reports[0].values['data_set_time'], numpy.array([0, 1, 2, 3, 4, 5])) def test_CuratedCombineArchiveTestCase_from_dict_error_handling(self): base_path = os.path.join(os.path.dirname(__file__), '..', '..', 'examples') @@ -83,15 +84,15 @@ def test_CuratedCombineArchiveTestCase_from_dict_error_handling(self): id = ('published_project.SimulatorCanExecutePublishedProject:' 'sbml-core/Caravagna-J-Theor-Biol-2010-tumor-suppressive-oscillations.json') - data['expectedReports'][0]['values'] = {'t': [0, 1, 2, 3, 4, 5]} + data['expectedReports'][0]['values'] = [{'id': 't', 'label': 't', 'value': [0, 1, 2, 3, 4, 5]}] with self.assertRaisesRegex(ValueError, "keys were not in the 'dataSets' property"): SimulatorCanExecutePublishedProject(id=id).from_dict(data) - data['expectedReports'][0]['values'] = {'T': {'5001': 1000.2}} + data['expectedReports'][0]['values'] = [{'id': 'T', 'label': 'T', 'value': {'5001': 1000.2}}] with self.assertRaisesRegex(ValueError, "Key must be less than or equal to"): SimulatorCanExecutePublishedProject(id=id).from_dict(data) - data['expectedReports'][0]['values'] = {'T': {'5000': 1000.}} + data['expectedReports'][0]['values'] = [{'id': 'data_set_time', 'label': 'T', 'value': {'5000': 1000.}}] SimulatorCanExecutePublishedProject(id=id).from_dict(data) def test_CuratedCombineArchiveTestCase_from_json(self): @@ -107,10 +108,10 @@ def test_CuratedCombineArchiveTestCase_from_json(self): self.assertEqual(case.task_requirements[0].simulation_algorithm, 'KISAO_0000019') self.assertEqual(len(case.expected_reports), 1) self.assertEqual(case.expected_reports[0].id, 'BIOMD0000000912_sim.sedml/BIOMD0000000912_sim') - self.assertEqual(case.expected_reports[0].data_sets, set(["time", "T", "E", "I"])) + self.assertEqual(set(data_set.label for data_set in case.expected_reports[0].data_sets), set(["time", "T", "E", "I"])) self.assertEqual(case.expected_reports[0].points, (5001,)) self.assertEqual(case.expected_reports[0].values, { - "time": { + "data_set_time": { (0,): 0.0, (1,): 0.2, (2,): 0.4, @@ -132,7 +133,7 @@ def test_CuratedCombineArchiveTestCase_eval(self): base_path = os.path.join(os.path.dirname(__file__), '..', '..', 'examples') filename = os.path.join('sbml-core', 'Caravagna-J-Theor-Biol-2010-tumor-suppressive-oscillations.json') case = SimulatorCanExecutePublishedProject().from_json(base_path, filename) - case.expected_reports[0].values['T'] = numpy.zeros((5001,)) + case.expected_reports[0].values['data_set_time'] = numpy.linspace(0., 1000., 5001,) # skips specs = { @@ -188,24 +189,29 @@ def exec_archive(error, missing_report, extra_report, missing_data_set, extra_da numpy.zeros((points, )), numpy.zeros((points, )), ] - index = ['time', 'T', 'E', 'I'] + ids = ['data_set_time', 'data_set_T', 'data_set_E', 'data_set_I'] + labels = ['time', 'T', 'E', 'I'] if missing_data_set: data.pop() - index.pop() + ids.pop() + labels.pop() if extra_data_set: data.append(numpy.zeros((5001, ))) - index.append('extra') + ids.append('extra') + labels.append('extra') if incorrect_values: data[0][0] = -1 data[1][0] = -1 if extra_report: - df = pandas.DataFrame(numpy.array(data), index=index) - ReportWriter().run(df, out_dir, 'BIOMD0000000912_sim.sedml/extra', ReportFormat.h5) + report = Report(data_sets=[DataSet(id=i, label=l) for i, l in zip(ids, labels)]) + data_set_results = DataSetResults({i: d for i, d in zip(ids, data)}) + ReportWriter().run(report, data_set_results, out_dir, 'BIOMD0000000912_sim.sedml/extra', ReportFormat.h5) if not missing_report: - df = pandas.DataFrame(numpy.array(data), index=index) - ReportWriter().run(df, out_dir, 'BIOMD0000000912_sim.sedml/BIOMD0000000912_sim', ReportFormat.h5) + report = Report(data_sets=[DataSet(id=i, label=l) for i, l in zip(ids, labels)]) + data_set_results = DataSetResults({i: d for i, d in zip(ids, data)}) + ReportWriter().run(report, data_set_results, out_dir, 'BIOMD0000000912_sim.sedml/BIOMD0000000912_sim', ReportFormat.h5) plot_file = os.path.join(out_dir, 'plot.pdf') with open(plot_file, 'w') as file: @@ -235,79 +241,67 @@ def exec_archive(error, missing_report, extra_report, missing_data_set, extra_da case.eval(specs) case.runtime_failure_alert_type = data_model.AlertType.exception - with self.assertRaisesRegex(InvalidOuputsException, 'No reports were generated'): + with self.assertRaisesRegex(InvalidOutputsException, 'No reports were generated'): with mock.patch(exec_archive_method, functools.partial( exec_archive, False, True, False, False, False, False, False, False, False, False)): case.eval(specs) - with self.assertRaisesRegex(InvalidOuputsException, 'could not be read'): + with self.assertRaisesRegex(InvalidOutputsException, 'could not be read'): with mock.patch(exec_archive_method, functools.partial( exec_archive, False, True, True, False, False, False, False, False, False, False)): case.eval(specs) - with self.assertWarnsRegex(InvalidOuputsWarning, 'Unexpected reports were produced'): + with self.assertWarnsRegex(InvalidOutputsWarning, 'Unexpected reports were produced'): with mock.patch(exec_archive_method, functools.partial( exec_archive, False, False, True, False, False, False, False, False, False, False)): case.eval(specs) - with self.assertRaisesRegex(InvalidOuputsException, 'does not contain expected data sets'): + with self.assertRaisesRegex(InvalidOutputsException, 'does not contain expected data sets'): with mock.patch(exec_archive_method, functools.partial( exec_archive, False, False, False, True, False, False, False, False, False, False)): case.eval(specs) - with self.assertWarnsRegex(InvalidOuputsWarning, 'contains unexpected data sets'): - with mock.patch(exec_archive_method, functools.partial( - exec_archive, False, False, False, False, True, False, False, False, False, False)): - case.eval(specs) - - with self.assertRaisesRegex(InvalidOuputsException, 'incorrect number of points'): + with self.assertRaisesRegex(InvalidOutputsException, 'incorrect number of points'): with mock.patch(exec_archive_method, functools.partial( exec_archive, False, False, False, False, False, True, False, False, False, False)): case.eval(specs) - with self.assertRaisesRegex(InvalidOuputsException, 'does not have expected value'): + with self.assertRaisesRegex(InvalidOutputsException, 'does not have expected value'): with mock.patch(exec_archive_method, functools.partial( exec_archive, False, False, False, False, False, False, True, False, False, False)): case.eval(specs) - with self.assertWarnsRegex(InvalidOuputsWarning, 'Plots were not produced'): + with self.assertWarnsRegex(InvalidOutputsWarning, 'Plots were not produced'): with mock.patch(exec_archive_method, functools.partial( exec_archive, False, False, False, False, False, False, False, True, False, False)): case.eval(specs) - with self.assertWarnsRegex(InvalidOuputsWarning, 'Plots were not produced'): + with self.assertWarnsRegex(InvalidOutputsWarning, 'Plots were not produced'): with mock.patch(exec_archive_method, functools.partial( exec_archive, False, False, False, False, False, False, False, False, True, False)): case.eval(specs) - with self.assertWarnsRegex(InvalidOuputsWarning, 'Extra plots were not produced'): + with self.assertWarnsRegex(InvalidOutputsWarning, 'Extra plots were not produced'): with mock.patch(exec_archive_method, functools.partial( exec_archive, False, False, False, False, False, False, False, False, False, True)): case.eval(specs) case.assert_no_extra_reports = True - with self.assertRaisesRegex(InvalidOuputsException, 'Unexpected reports were produced'): + with self.assertRaisesRegex(InvalidOutputsException, 'Unexpected reports were produced'): with mock.patch(exec_archive_method, functools.partial( exec_archive, False, False, True, False, False, False, False, False, False, False)): case.eval(specs) case.assert_no_extra_reports = False - case.assert_no_extra_datasets = True - with self.assertRaisesRegex(InvalidOuputsException, 'contains unexpected data sets'): - with mock.patch(exec_archive_method, functools.partial( - exec_archive, False, False, False, False, True, False, False, False, False, False)): - case.eval(specs) - case.assert_no_extra_datasets = False - case.assert_no_missing_plots = True - with self.assertRaisesRegex(InvalidOuputsException, 'Plots were not produced'): + with self.assertRaisesRegex(InvalidOutputsException, 'Plots were not produced'): with mock.patch(exec_archive_method, functools.partial( exec_archive, False, False, False, False, False, False, False, True, False, False)): case.eval(specs) case.assert_no_missing_plots = False case.assert_no_extra_plots = True - with self.assertRaisesRegex(InvalidOuputsException, 'Extra plots were not produced'): + with self.assertRaisesRegex(InvalidOutputsException, 'Extra plots were not produced'): with mock.patch(exec_archive_method, functools.partial( exec_archive, False, False, False, False, False, False, False, False, False, True)): case.eval(specs) diff --git a/tests/test_case/test_results_report.py b/tests/test_case/test_results_report.py index 9e292eb..f94eae7 100644 --- a/tests/test_case/test_results_report.py +++ b/tests/test_case/test_results_report.py @@ -1,6 +1,7 @@ from biosimulators_test_suite.test_case import results_report from biosimulators_test_suite.test_case.published_project import SimulatorCanExecutePublishedProject from biosimulators_test_suite.warnings import TestCaseWarning +from biosimulators_utils.report.data_model import DataSetResults from biosimulators_utils.report.io import ReportWriter from biosimulators_utils.sedml.data_model import SedDocument, Report, DataSet import numpy @@ -23,8 +24,8 @@ def setUp(self): def tearDown(self): shutil.rmtree(self.dirname) - def test_SimulatorGeneratesReportsOfSimultionResults_eval_outputs(self): - case = results_report.SimulatorGeneratesReportsOfSimultionResults() + def test_SimulatorGeneratesReportsOfSimulationResults_eval_outputs(self): + case = results_report.SimulatorGeneratesReportsOfSimulationResults() with self.assertRaisesRegex(ValueError, 'Simulator must generate reports'): case.eval_outputs(None, None, None, self.dirname) @@ -34,39 +35,46 @@ def test_SimulatorGeneratesReportsOfSimultionResults_eval_outputs(self): Report( id='report_1', data_sets=[ - DataSet(label='A'), - DataSet(label='B'), + DataSet(id='A', label='A'), + DataSet(id='B', label='B'), ], ) ], ) synthetic_sed_docs = {'test.sedml': doc} - data = numpy.array([[1., 2., 3.], [4., 5., 6.]]) - data_frame = pandas.DataFrame(data, index=['A', 'C']) - - ReportWriter().run(data_frame, self.dirname, os.path.join('test.sedml', 'report_1')) + report = Report(data_sets=[DataSet(id='A', label='A'), DataSet(id='C', label='C')]) + data_set_results = DataSetResults({ + 'A': numpy.array([1., 2., 3.]), + 'C': numpy.array([4., 5., 6.]), + }) + ReportWriter().run(report, data_set_results, self.dirname, os.path.join('test.sedml', 'report_1')) with self.assertRaisesRegex(ValueError, 'did not produce'): case.eval_outputs(None, None, synthetic_sed_docs, self.dirname) - data = numpy.array([[1., 2., 3.], [4., 5., 6.], [7., 8., numpy.nan]]) - data_frame = pandas.DataFrame(data, index=['A', 'B', 'C']) - ReportWriter().run(data_frame, self.dirname, os.path.join('test.sedml', 'report_1')) - with self.assertWarnsRegex(TestCaseWarning, 'produced the following extra'): - self.assertEqual(case.eval_outputs(None, None, synthetic_sed_docs, self.dirname), False) + report = Report(data_sets=[DataSet(id='A', label='A'), DataSet(id='B', label='B'), DataSet(id='C', label='C')]) + data_set_results = DataSetResults({ + 'A': numpy.array([1., 2., 3.]), + 'B': numpy.array([4., 5., numpy.nan]), + 'C': numpy.array([7., 8., numpy.nan]), + }) + ReportWriter().run(report, data_set_results, self.dirname, os.path.join('test.sedml', 'report_1')) with self.assertWarnsRegex(TestCaseWarning, 'include `NaN`'): self.assertEqual(case.eval_outputs(None, None, synthetic_sed_docs, self.dirname), False) - data = numpy.array([[1., 2., 3.], [4., 5., 6.]]) - data_frame = pandas.DataFrame(data, index=['A', 'B']) - ReportWriter().run(data_frame, self.dirname, os.path.join('test.sedml', 'report_1')) + report = Report(data_sets=[DataSet(id='A', label='A'), DataSet(id='B', label='B')]) + data_set_results = DataSetResults({ + 'A': numpy.array([1., 2., 3.]), + 'B': numpy.array([4., 5., 6.]), + }) + ReportWriter().run(report, data_set_results, self.dirname, os.path.join('test.sedml', 'report_1')) self.assertEqual(case.eval_outputs(None, None, synthetic_sed_docs, self.dirname), True) - def test_SimulatorGeneratesReportsOfSimultionResults(self): + def test_SimulatorGeneratesReportsOfSimulationResults(self): specs = {'image': {'url': self.IMAGE}} curated_case = SimulatorCanExecutePublishedProject(filename=self.CURATED_ARCHIVE_FILENAME) # test synthetic case generated and used to test simulator - case = results_report.SimulatorGeneratesReportsOfSimultionResults( + case = results_report.SimulatorGeneratesReportsOfSimulationResults( published_projects_test_cases=[curated_case]) self.assertTrue(case.eval(specs)) diff --git a/tests/test_case/test_sedml.py b/tests/test_case/test_sedml.py index 133109b..50df016 100644 --- a/tests/test_case/test_sedml.py +++ b/tests/test_case/test_sedml.py @@ -1,15 +1,16 @@ -from biosimulators_test_suite.exceptions import InvalidOuputsException +from biosimulators_test_suite.exceptions import InvalidOutputsException from biosimulators_test_suite.test_case import sedml from biosimulators_test_suite.test_case.published_project import SimulatorCanExecutePublishedProject, SyntheticCombineArchiveTestCase -from biosimulators_test_suite.warnings import IgnoredTestCaseWarning, InvalidOuputsWarning, TestCaseWarning +from biosimulators_test_suite.warnings import IgnoredTestCaseWarning, InvalidOutputsWarning, TestCaseWarning from biosimulators_utils.archive.data_model import Archive, ArchiveFile from biosimulators_utils.archive.io import ArchiveWriter from biosimulators_utils.config import get_config from biosimulators_utils.combine.data_model import CombineArchive, CombineArchiveContent, CombineArchiveContentFormat +from biosimulators_utils.report.data_model import DataSetResults from biosimulators_utils.report.io import ReportWriter from biosimulators_utils.sedml.data_model import (SedDocument, Task, Report, DataSet, - DataGenerator, DataGeneratorVariable, UniformTimeCourseSimulation, - Algorithm, DataGeneratorVariableSymbol, Model, + DataGenerator, Variable, UniformTimeCourseSimulation, + Algorithm, Symbol, Model, Plot2D, Curve) from biosimulators_utils.simulator.io import read_simulator_specs from unittest import mock @@ -42,31 +43,31 @@ def test_SimulatorSupportsModelsSimulationsTasksDataGeneratorsAndReports_eval_ou Report( id='b', data_sets=[ - DataSet(label='x'), - DataSet(label='y'), + DataSet(id='x', label='x'), + DataSet(id='y', label='y'), ], ), ], ) - with self.assertRaisesRegex(InvalidOuputsException, 'did not produce the following reports'): + report = doc.outputs[0] + + with self.assertRaisesRegex(InvalidOutputsException, 'did not produce the following reports'): case.eval_outputs(None, None, {'./a.sedml': doc}, self.dirname) - data_frame = pandas.DataFrame(numpy.array([[1, 2, 3]]), index=['x']) - ReportWriter().run(data_frame, self.dirname, 'a.sedml/b') - with self.assertRaisesRegex(InvalidOuputsException, 'did not produce the following data sets'): + data_set_results = DataSetResults({'x': numpy.array([1, 2, 3])}) + ReportWriter().run(report, data_set_results, self.dirname, 'a.sedml/b') + with self.assertRaisesRegex(InvalidOutputsException, 'did not produce the following data sets'): case.eval_outputs(None, None, {'./a.sedml': doc}, self.dirname) - data_frame = pandas.DataFrame(numpy.array([[1, 2, 3], [4, 5, 6]]), index=['x', 'y']) - ReportWriter().run(data_frame, self.dirname, 'a.sedml/b') + data_set_results = DataSetResults({'x': numpy.array([1, 2, 3]), 'y': numpy.array([4, 5, 6])}) + ReportWriter().run(report, data_set_results, self.dirname, 'a.sedml/b') self.assertTrue(case.eval_outputs(None, None, {'./a.sedml': doc}, self.dirname)) - data_frame = pandas.DataFrame(numpy.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), index=['x', 'y', 'z']) - ReportWriter().run(data_frame, self.dirname, 'a.sedml/b') - ReportWriter().run(data_frame, self.dirname, 'a.sedml/c') - with self.assertWarnsRegex(InvalidOuputsWarning, 'extra reports'): - self.assertFalse(case.eval_outputs(None, None, {'./a.sedml': doc}, self.dirname)) - with self.assertWarnsRegex(InvalidOuputsWarning, 'extra data sets'): + data_set_results = DataSetResults({'x': numpy.array([1, 2, 3]), 'y': numpy.array([4, 5, 6]), 'z': numpy.array([7, 8, 9])}) + ReportWriter().run(report, data_set_results, self.dirname, 'a.sedml/b') + ReportWriter().run(report, data_set_results, self.dirname, 'a.sedml/c') + with self.assertWarnsRegex(InvalidOutputsWarning, 'extra reports'): self.assertFalse(case.eval_outputs(None, None, {'./a.sedml': doc}, self.dirname)) def test_SimulatorSupportsModelsSimulationsTasksDataGeneratorsAndReports(self): @@ -102,7 +103,7 @@ def get_suitable_sed_doc(sed_docs, case=case): good_doc.data_generators.append( DataGenerator( variables=[ - DataGeneratorVariable( + Variable( id='var_1', task=good_doc.tasks[0] ), @@ -122,7 +123,7 @@ def get_suitable_sed_doc(sed_docs, case=case): }), 'loc-2') good_doc.data_generators[0].variables.append( - DataGeneratorVariable( + Variable( id='var_2', task=good_doc.tasks[0] ), @@ -142,7 +143,7 @@ def get_suitable_sed_doc(sed_docs, case=case): good_doc.data_generators.append( DataGenerator( variables=[ - DataGeneratorVariable( + Variable( id='var_1', task=good_doc.tasks[0] ), @@ -152,7 +153,7 @@ def get_suitable_sed_doc(sed_docs, case=case): good_doc.data_generators.append( DataGenerator( variables=[ - DataGeneratorVariable( + Variable( id='var_2', task=good_doc.tasks[0] ), @@ -193,8 +194,9 @@ def test_SimulatorSupportsMultipleTasksPerSedDocument_eval_outputs(self): with self.assertRaisesRegex(ValueError, 'were not generated'): case.eval_outputs(None, None, None, self.dirname) - data_frame = pandas.DataFrame(numpy.array([[1, 2, 3], [4, 5, 6]]), index=['A', 'B']) - ReportWriter().run(data_frame, self.dirname, 'a.sedml/b') + report = Report(data_sets=[DataSet(id='A', label='A'), DataSet(id='B', label='B')]) + data_set_results = DataSetResults({'A': numpy.array([1, 2, 3]), 'B': numpy.array([4, 5, 6])}) + ReportWriter().run(report, data_set_results, self.dirname, 'a.sedml/b') case.eval_outputs(None, None, None, self.dirname) def test_SimulatorSupportsMultipleTasksPerSedDocument(self): @@ -220,30 +222,33 @@ def test_SimulatorSupportsMultipleReportsPerSedDocument_eval_outputs(self): Report( id='report_1', data_sets=[ - DataSet(label='x'), + DataSet(id='x', label='x'), ], ), Report( id='report_2', data_sets=[ - DataSet(label='y'), + DataSet(id='y', label='y'), ], ), ], ) - with self.assertRaisesRegex(InvalidOuputsException, 'did not produce the following reports'): + with self.assertRaisesRegex(InvalidOutputsException, 'did not produce the following reports'): case.eval_outputs(None, None, {'./a.sedml': doc}, self.dirname) - data_frame = pandas.DataFrame(numpy.array([[1, 2, 3]]), index=['x']) - ReportWriter().run(data_frame, self.dirname, 'a.sedml/report_1') - data_frame = pandas.DataFrame(numpy.array([[4, 5, 6]]), index=['y']) - ReportWriter().run(data_frame, self.dirname, 'a.sedml/report_2') + report = doc.outputs[0] + data_set_results = DataSetResults({'x': numpy.array([1, 2, 3])}) + ReportWriter().run(report, data_set_results, self.dirname, 'a.sedml/report_1') + report = doc.outputs[1] + data_set_results = DataSetResults({'y': numpy.array([4, 5, 6])}) + ReportWriter().run(report, data_set_results, self.dirname, 'a.sedml/report_2') self.assertTrue(case.eval_outputs(None, None, {'./a.sedml': doc}, self.dirname)) - data_frame = pandas.DataFrame(numpy.array([[7, 8, 9]]), index=['z']) - ReportWriter().run(data_frame, self.dirname, 'a.sedml/report_3') - with self.assertWarnsRegex(InvalidOuputsWarning, 'extra reports'): + report = Report(data_sets=[DataSet(id='z', label='z')]) + data_set_results = DataSetResults({'z': numpy.array([7, 8, 9])}) + ReportWriter().run(report, data_set_results, self.dirname, 'a.sedml/report_3') + with self.assertWarnsRegex(InvalidOutputsWarning, 'extra reports'): self.assertFalse(case.eval_outputs(None, None, {'./a.sedml': doc}, self.dirname)) def test_SimulatorSupportsMultipleReportsPerSedDocument(self): @@ -292,11 +297,11 @@ def test_SimulatorSupportsUniformTimeCoursesWithNonZeroOutputStartTimes_build_sy doc.tasks.append(Task(model=doc.models[0], simulation=doc.simulations[0])) doc.data_generators.append(DataGenerator( id='data_gen_x', - variables=[DataGeneratorVariable(id='var_x', task=doc.tasks[0])], + variables=[Variable(id='var_x', task=doc.tasks[0])], math='var_x')) doc.data_generators.append(DataGenerator( id='data_gen_y', - variables=[DataGeneratorVariable(id='var_y', task=doc.tasks[0])], + variables=[Variable(id='var_y', task=doc.tasks[0])], math='var_y')) doc.outputs.append(Report( id='report_1', @@ -314,7 +319,7 @@ def test_SimulatorSupportsUniformTimeCoursesWithNonZeroOutputStartTimes_build_sy case.build_synthetic_archive(None, archive, None, sed_docs) self.assertEqual(len(doc.data_generators), 3) - self.assertEqual(doc.data_generators[-1].variables[0].symbol, DataGeneratorVariableSymbol.time) + self.assertEqual(doc.data_generators[-1].variables[0].symbol, Symbol.time) self.assertEqual(len(doc.outputs[0].data_sets), 3) self.assertEqual(doc.outputs[0].data_sets[-1].data_generator, doc.data_generators[-1]) self.assertEqual(doc.outputs[0].data_sets[-1].label, '__data_set_time__') @@ -324,7 +329,7 @@ def test_SimulatorSupportsUniformTimeCoursesWithNonZeroOutputStartTimes_build_sy doc.simulations[0].number_of_points = 100 doc.outputs[0].data_sets[-1].label = 'time' case.build_synthetic_archive(None, archive, None, sed_docs) - self.assertEqual(doc.outputs[0].data_sets[-1].label, '__data_set_time__') + self.assertEqual(doc.outputs[0].data_sets[-1].id, '__data_set_time__') doc.outputs[0].data_sets = [] self.assertFalse(case.is_curated_sed_report_suitable_for_building_synthetic_archive(None, doc.outputs[0])) @@ -340,20 +345,22 @@ def test_SimulatorSupportsUniformTimeCoursesWithNonZeroOutputStartTimes_eval_out Report( id='report_1', data_sets=[ - DataSet(label='__data_set_time__'), + DataSet(id='__data_set_time__', label='__data_set_time__'), ], ), ], ) - data_frame = pandas.DataFrame(numpy.array([[10., 15., numpy.nan]]), index=['__data_set_time__']) - ReportWriter().run(data_frame, self.dirname, 'a.sedml/report_1') + report = doc.outputs[0] + + data_set_results = DataSetResults({'__data_set_time__': numpy.array([10., 15., numpy.nan])}) + ReportWriter().run(report, data_set_results, self.dirname, 'a.sedml/report_1') with self.assertRaisesRegex(ValueError, 'did not produce the expected time course'): - with self.assertWarnsRegex(InvalidOuputsWarning, 'include `NaN`'): + with self.assertWarnsRegex(InvalidOutputsWarning, 'include `NaN`'): case.eval_outputs(None, None, {'./a.sedml': doc}, self.dirname) - data_frame = pandas.DataFrame(numpy.array([[10., 15., 20.]]), index=['__data_set_time__']) - ReportWriter().run(data_frame, self.dirname, 'a.sedml/report_1') + data_set_results = DataSetResults({'__data_set_time__': numpy.array([10., 15., 20.])}) + ReportWriter().run(report, data_set_results, self.dirname, 'a.sedml/report_1') self.assertTrue(case.eval_outputs(None, None, {'./a.sedml': doc}, self.dirname)) def test_SimulatorSupportsUniformTimeCoursesWithNonZeroOutputStartTimes(self): @@ -393,14 +400,14 @@ def test_SimulatorProducesLinear2DPlots_eval_outputs(self): ], ) - with self.assertWarnsRegex(InvalidOuputsWarning, 'did not produce plots'): + with self.assertWarnsRegex(InvalidOutputsWarning, 'did not produce plots'): case.eval_outputs(None, None, {'./a.sedml': doc}, self.dirname) plots_path = os.path.join(self.dirname, get_config().PLOTS_PATH) with open(plots_path, 'w') as file: file.write('not a zip') - with self.assertRaisesRegex(InvalidOuputsException, 'invalid zip archive'): + with self.assertRaisesRegex(InvalidOutputsException, 'invalid zip archive'): case.eval_outputs(None, None, {'./a.sedml': doc}, self.dirname) plot_1_path = os.path.join(self.dirname, 'plot_1.pdf') @@ -412,7 +419,7 @@ def test_SimulatorProducesLinear2DPlots_eval_outputs(self): ], ) ArchiveWriter().run(archive, plots_path) - with self.assertRaisesRegex(InvalidOuputsException, 'invalid PDF'): + with self.assertRaisesRegex(InvalidOutputsException, 'invalid PDF'): case.eval_outputs(None, None, {'./a.sedml': doc}, self.dirname) with open(plot_1_path, 'wb') as file: @@ -420,7 +427,7 @@ def test_SimulatorProducesLinear2DPlots_eval_outputs(self): writer.addBlankPage(width=20, height=20) writer.write(file) ArchiveWriter().run(archive, plots_path) - with self.assertRaisesRegex(InvalidOuputsException, 'did not produce'): + with self.assertRaisesRegex(InvalidOutputsException, 'did not produce'): case.eval_outputs(None, None, {'./a.sedml': doc}, self.dirname) plot_2_path = os.path.join(self.dirname, 'plot_2.pdf') @@ -557,20 +564,19 @@ def test_SimulatorProducesReportsWithCuratedNumberOfDimensions(self): outputs=[ Report( id='report_1', + data_sets=[DataSet(id='A', label='A'), DataSet(id='B', label='B'), DataSet(id='C', label='C')], ), ], ) - data = numpy.array([numpy.array(1.), numpy.array(2.), numpy.array(3.), ]) - index = ['A', 'B', 'C'] - data_frame = pandas.DataFrame(data, index=index) - ReportWriter().run(data_frame, self.dirname, 'a.sedml/report_1') + report = doc.outputs[0] + + data_set_results = DataSetResults({'A': numpy.array(1.), 'B': numpy.array(2.), 'C': numpy.array(3.)}) + ReportWriter().run(report, data_set_results, self.dirname, 'a.sedml/report_1') self.assertTrue(case.eval_outputs(specs, None, {'./a.sedml': doc}, self.dirname)) - data = numpy.array([numpy.array([1., 2.]), numpy.array([2., 3.]), numpy.array([3., 4.]), ]) - index = ['A', 'B', 'C'] - data_frame = pandas.DataFrame(data, index=index) - ReportWriter().run(data_frame, self.dirname, 'a.sedml/report_1') + data_set_results = DataSetResults({'A': numpy.array([1., 2.]), 'B': numpy.array([2., 3.]), 'C': numpy.array([3., 4.])}) + ReportWriter().run(report, data_set_results, self.dirname, 'a.sedml/report_1') self.assertFalse(case.eval_outputs(specs, None, {'./a.sedml': doc}, self.dirname)) # everything diff --git a/tests/test_examples.py b/tests/test_examples.py new file mode 100644 index 0000000..30f005f --- /dev/null +++ b/tests/test_examples.py @@ -0,0 +1,53 @@ +from biosimulators_utils.combine.io import CombineArchiveReader +from biosimulators_utils.report.data_model import ReportFormat +from biosimulators_utils.report.io import ReportReader +from biosimulators_utils.sedml.data_model import Report, DataSet +from biosimulators_utils.sedml.io import SedmlSimulationReader +import glob +import json +import mock +import numpy +import os +import shutil +import tempfile +import unittest + + +class ExamplesTestCase(unittest.TestCase): + def setUp(self): + self.dirname = tempfile.mkdtemp() + + def tearDown(self): + shutil.rmtree(self.dirname) + + def test(self): + examples_dir = os.path.join(os.path.dirname(__file__), '..', 'examples') + for example_filename in glob.glob(os.path.join(examples_dir, '**', '*.json')): + example_base_dir = os.path.join(os.path.dirname(example_filename)) + reports_filename = example_filename.replace('.omex', '.h5') + if not os.path.isfile(reports_filename): + continue + + with open(example_filename, 'rb') as file: + specs = json.load(file) + + archive_filename = os.path.join(example_base_dir, specs['filename']) + archive_dirname = os.path.join(self.dirname, specs['filename'].replace('.omex', '')) + CombineArchiveReader().run(archive_filename, archive_dirname) + + report_path = specs['filename'].replace('.omex', '.h5') + for expectedReport in specs['expectedReports']: + sedml_location = os.path.dirname(expectedReport['id']) + report_id = os.path.basename(expectedReport['id']) + sedml_filename = os.path.join(archive_dirname, sedml_location) + doc = SedmlSimulationReader().run(sedml_filename) + + report = next(output for output in doc.outputs if output.id == report_id) + + with mock.patch.dict(os.environ, {'H5_REPORTS_PATH': report_path}): + report_results = ReportReader().run(report, example_base_dir, expectedReport['id'], format=ReportFormat.h5) + + self.assertEqual(set(report_results.keys()), set([data_set.id for data_set in report.data_sets])) + self.assertEqual(report_results[report.data_sets[0].id].shape, tuple(expectedReport['points'])) + for data_set_result in report_results.values(): + self.assertFalse(numpy.any(numpy.isnan(data_set_result))) diff --git a/tests/test_examples.sh b/tests/test_examples.sh new file mode 100644 index 0000000..55ed8e4 --- /dev/null +++ b/tests/test_examples.sh @@ -0,0 +1,42 @@ +export REPORT_FORMATS=h5 +export PLOT_FORMATS= +export BUNDLE_OUTPUTS=1 +export KEEP_INDIVIDUAL_OUTPUTS=0 + +bionetgen -o examples/bngl/ -i examples/bngl/Dolan-PLoS-Comput-Biol-2015-NHEJ.omex +mv examples/bngl/reports.h5 examples/bngl/Dolan-PLoS-Comput-Biol-2015-NHEJ.h5 + +bionetgen -o examples/bngl/ -i examples/bngl/test-bngl.omex +mv examples/bngl/reports.h5 examples/bngl/test-bngl.h5 + +tellurium -o examples/sbml-core/ -i examples/sbml-core/Caravagna-J-Theor-Biol-2010-tumor-suppressive-oscillations.omex +mv examples/sbml-core/reports.h5 examples/sbml-core/Caravagna-J-Theor-Biol-2010-tumor-suppressive-oscillations.h5 + +tellurium -o examples/sbml-core/ -i examples/sbml-core/Ciliberto-J-Cell-Biol-2003-morphogenesis-checkpoint.omex +mv examples/sbml-core/reports.h5 examples/sbml-core/Ciliberto-J-Cell-Biol-2003-morphogenesis-checkpoint.h5 + +pysces -o examples/sbml-core/ -i examples/sbml-core/Edelstein-Biol-Cybern-1996-Nicotinic-excitation.omex +mv examples/sbml-core/reports.h5 examples/sbml-core/Edelstein-Biol-Cybern-1996-Nicotinic-excitation.h5 + +tellurium -o examples/sbml-core/ -i examples/sbml-core/Parmar-BMC-Syst-Biol-2017-iron-distribution.omex +mv examples/sbml-core/reports.h5 examples/sbml-core/Parmar-BMC-Syst-Biol-2017-iron-distribution.h5 + +amici -o examples/sbml-core/ -i examples/sbml-core/Szymanska-J-Theor-Biol-2009-HSP-synthesis.omex +mv examples/sbml-core/reports.h5 examples/sbml-core/Szymanska-J-Theor-Biol-2009-HSP-synthesis.h5 + +copasi -o examples/sbml-core/ -i examples/sbml-core/Tomida-EMBO-J-2003-NFAT-translocation.omex +mv examples/sbml-core/reports.h5 examples/sbml-core/Tomida-EMBO-J-2003-NFAT-translocation.h5 + +copasi -o examples/sbml-core/ -i examples/sbml-core/Varusai-Sci-Rep-2018-mTOR-signaling-LSODA-LSODAR-SBML.omex +mv examples/sbml-core/reports.h5 examples/sbml-core/Varusai-Sci-Rep-2018-mTOR-signaling-LSODA-LSODAR-SBML.h5 + +cobrapy -o examples/sbml-fbc/ -i examples/sbml-fbc/Escherichia-coli-core-metabolism.omex +mv examples/sbml-fbc/reports.h5 examples/sbml-fbc/Escherichia-coli-core-metabolism.h5 + +boolnet -o examples/sbml-qual/ -i examples/sbml-qual/Chaouiya-BMC-Syst-Biol-2013-EGF-TNFa-signaling.omex +mv examples/sbml-qual/reports.h5 examples/sbml-qual/Chaouiya-BMC-Syst-Biol-2013-EGF-TNFa-signaling.h5 + +rm examples/bngl/log.yml +rm examples/sbml-core/log.yml +rm examples/sbml-fbc/log.yml +rm examples/sbml-qual/log.yml