Skip to content

Commit

Permalink
Merge branch 'dev' into deploy
Browse files Browse the repository at this point in the history
  • Loading branch information
jonrkarr committed Jan 22, 2021
2 parents a119d52 + b4ef936 commit 83cdf34
Show file tree
Hide file tree
Showing 40 changed files with 599 additions and 309 deletions.
2 changes: 1 addition & 1 deletion CONTRIBUTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ This repository follows standard Python conventions:
* `README.md`: Overview of this repository
* `biosimulators_test_suite/`: Source code for this package
* `examples`: Example modeling projects that the source code uses to test biosimulation software tools
* `examples-ouputs`: Example outputs (reports and plots) of executing the example modeling projects
* `examples-outputs`: Example outputs (reports and plots) of executing the example modeling projects
* `tests/`: Unit tests for the code for this package
* `setup.py`: pip installation script for this package
* `setup.cfg`: Configuration for the pip installation script
Expand Down
2 changes: 1 addition & 1 deletion biosimulators_test_suite/_version.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
__version__ = '0.1.16'
__version__ = '0.1.17'
9 changes: 9 additions & 0 deletions biosimulators_test_suite/config.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
import os


class Config(object):
def __init__(self, pull_docker_image=None):
if pull_docker_image is not None:
self.pull_docker_image = pull_docker_image
else:
self.pull_docker_image = os.getenv('PULL_DOCKER_IMAGE', '1').lower() in ['1', 'true']
33 changes: 27 additions & 6 deletions biosimulators_test_suite/data_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
:License: MIT
"""

from .config import Config
from .exceptions import SkippedTestCaseException # noqa: F401
from biosimulators_utils.image import get_docker_image
import abc
Expand All @@ -14,7 +15,7 @@

__all__ = [
'OutputMedium',
'TestCase', 'SedTaskRequirements', 'ExpectedSedReport', 'ExpectedSedPlot',
'TestCase', 'SedTaskRequirements', 'ExpectedSedReport', 'ExpectedSedDataSet', 'ExpectedSedPlot',
'AlertType',
]

Expand Down Expand Up @@ -61,7 +62,7 @@ def eval(self, specifications):
"""
pass # pragma: no cover

def get_simulator_docker_image(self, specifications, pull=True):
def get_simulator_docker_image(self, specifications, pull=None):
""" Get the Docker image for a simulator, pulling if necessary
Args:
Expand All @@ -72,6 +73,8 @@ def get_simulator_docker_image(self, specifications, pull=True):
"""
docker_client = docker.from_env()
image_url = specifications['image']['url']
if pull is None:
pull = Config().pull_docker_image
return get_docker_image(docker_client, image_url, pull=pull)


Expand All @@ -98,25 +101,43 @@ class ExpectedSedReport(object):
Attributes
id (:obj:`str`): id
data_sets (:obj:`list` of :obj:`str`): ids of expected datasets
data_sets (:obj:`list` of :obj:`ExpectedSedDataSet`): labels of expected data sets
points (:obj:`tuple` of :obj:`int`): number of expected points of
values (:obj:`dict` of :obj:`str` to :obj:`dict` of :obj:`list`): expected values of datasets or elements of datasets
values (:obj:`dict` of :obj:`str` to :obj:`dict` of :obj:`list`): expected values of data sets or elements of data sets
"""

def __init__(self, id=None, data_sets=None, points=None, values=None):
"""
Args:
id (:obj:`str`, optional): id
data_sets (:obj:`set` of :obj:`str`, optional): ids of expected datasets
data_sets (:obj:`set` of :obj:`ExpectedSedDataSet`, optional): labels of expected data sets
points (:obj:`tuple` of :obj:`int`, optional): number of expected points of
values (:obj:`dict` of :obj:`str` to :obj:`dict` of :obj:`list`, optional): expected values of datasets or elements of datasets
values (:obj:`dict` of :obj:`str` to :obj:`dict` of :obj:`list`, optional): expected values of data sets or elements of data sets
"""
self.id = id
self.data_sets = data_sets or set()
self.points = points
self.values = values


class ExpectedSedDataSet(object):
""" An expected SED report
Attributes
id (:obj:`str`): id
label (:obj:`str`): label
"""

def __init__(self, id=None, label=None):
"""
Args:
id (:obj:`str`): id
label (:obj:`str`): label
"""
self.id = id
self.label = label


class ExpectedSedPlot(object):
""" An expected SED report
Expand Down
4 changes: 2 additions & 2 deletions biosimulators_test_suite/exceptions.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@

__all__ = [
'TestCaseException',
'InvalidOuputsException',
'InvalidOutputsException',
'SkippedTestCaseException',
]

Expand All @@ -18,7 +18,7 @@ class TestCaseException(Exception):
pass # pragma: no cover


class InvalidOuputsException(TestCaseException):
class InvalidOutputsException(TestCaseException):
""" Exception raised when outputs of execution of COMBINE/OMEX archive are not as expected """
pass # pragma: no cover

Expand Down
32 changes: 17 additions & 15 deletions biosimulators_test_suite/test_case/log.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,10 +52,11 @@ def eval_outputs(self, specifications, synthetic_archive, synthetic_sed_docs, ou

try:
with open(log_path, 'r') as file:
status = yaml.load(file)
log = yaml.load(file)
except Exception as exception:
warnings.warn('The execution status report produced by the simulator is not valid:\n\n {}'.format(
str(exception).replace('\n', '\n ')), TestCaseWarning)
msg = 'The execution status report produced by the simulator is not valid:\n\n {}'.format(
str(exception).replace('\n', '\n '))
warnings.warn(msg, TestCaseWarning)
return False

self._status_valid = True
Expand All @@ -69,26 +70,27 @@ def is_status_valid(status, self=self):
self._status_valid = False

try:
is_status_valid(status['status'])
is_status_valid(log['status'])

for doc in status['sedDocuments'].values():
is_status_valid(doc['status'])
for doc_log in log['sedDocuments']:
is_status_valid(doc_log['status'])

for task in doc['tasks'].values():
is_status_valid(task['status'])
for task_log in doc_log['tasks']:
is_status_valid(task_log['status'])

for output in doc['outputs'].values():
is_status_valid(output['status'])
for output_log in doc_log['outputs']:
is_status_valid(output_log['status'])

els = output.get('dataSets', output.get('curves', output.get('surfaces', None)))
els = output_log.get('dataSets', output_log.get('curves', output_log.get('surfaces', None)))
if els is None:
raise KeyError('Outputs must have one of the keys `dataSets`, `curves` or `surfaces`')
for status in els.values():
is_status_valid(status)
for el in els:
is_status_valid(el['status'])

except Exception as exception:
warnings.warn('The execution status report produced by the simulator is not valid:\n\n {}'.format(
str(exception).replace('\n', '\n ')), TestCaseWarning)
msg = 'The execution status report produced by the simulator is not valid:\n\n {}'.format(
str(exception).replace('\n', '\n '))
warnings.warn(msg, TestCaseWarning)
return False

if not self._status_valid:
Expand Down
Loading

0 comments on commit 83cdf34

Please sign in to comment.