diff --git a/.readthedocs.yaml b/.readthedocs.yaml index 5befd21e7..a06d0280a 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -8,7 +8,7 @@ version: 2 build: os: ubuntu-20.04 tools: - python: '3.8' + python: '3.11' # Build documentation in the docs/ directory with Sphinx sphinx: diff --git a/CHANGELOG.md b/CHANGELOG.md index c36666c7f..d3b7575d9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ - Expose `starting_time` in `mock_ElectricalSeries`. @h-mayorquin [#1805](https://github.com/NeurodataWithoutBorders/pynwb/pull/1805) - Enhance `get_data_in_units()` to work with objects that have a `channel_conversion` attribute like the `ElectricalSeries`. @h-mayorquin [#1806](https://github.com/NeurodataWithoutBorders/pynwb/pull/1806) - Refactor validation CLI tests to use `{sys.executable} -m coverage` to use the same Python version and run correctly on Debian systems. @yarikoptic [#1811](https://github.com/NeurodataWithoutBorders/pynwb/pull/1811) +- Fixed tests to address newly caught validation errors. @rly [#1839](https://github.com/NeurodataWithoutBorders/pynwb/pull/1839) ### Bug fixes - Fix bug where namespaces were loaded in "w-" mode. @h-mayorquin [#1795](https://github.com/NeurodataWithoutBorders/pynwb/pull/1795) diff --git a/docs/source/overview_citing.rst b/docs/source/overview_citing.rst index bc72e017c..8fda20363 100644 --- a/docs/source/overview_citing.rst +++ b/docs/source/overview_citing.rst @@ -35,7 +35,7 @@ If you use PyNWB in your research, please use the following citation: Using RRID ---------- -* ResourceID: `SCR_017452 `_ +* ResourceID: `SCR_017452 `_ * Proper Citation: **(PyNWB, RRID:SCR_017452)** diff --git a/requirements-min.txt b/requirements-min.txt index 8f52348f1..816d53d43 100644 --- a/requirements-min.txt +++ b/requirements-min.txt @@ -1,6 +1,6 @@ # minimum versions of package dependencies for installing PyNWB h5py==2.10 # support for selection of datasets with list of indices added in 2.10 -hdmf==3.9.0 +hdmf==3.12.0 numpy==1.18 pandas==1.1.5 python-dateutil==2.7.3 diff --git a/requirements.txt b/requirements.txt index 2ad7b813e..d09ec7425 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,7 @@ # pinned dependencies to reproduce an entire development environment to use PyNWB -h5py==3.8.0 -hdmf==3.9.0 -numpy==1.24.2 -pandas==2.0.0 +h5py==3.10.0 +hdmf==3.12.0 +numpy==1.26.1 +pandas==2.1.2 python-dateutil==2.8.2 -setuptools==65.5.1 +setuptools==65.5.1 \ No newline at end of file diff --git a/setup.py b/setup.py index 90aebf55f..0e48c269a 100755 --- a/setup.py +++ b/setup.py @@ -20,7 +20,7 @@ reqs = [ 'h5py>=2.10', - 'hdmf>=3.9.0', + 'hdmf>=3.12.0', 'numpy>=1.16', 'pandas>=1.1.5', 'python-dateutil>=2.7.3', diff --git a/src/pynwb/validate.py b/src/pynwb/validate.py index 62aa41426..827249cbb 100644 --- a/src/pynwb/validate.py +++ b/src/pynwb/validate.py @@ -120,7 +120,11 @@ def _get_cached_namespaces_to_validate( is_method=False, ) def validate(**kwargs): - """Validate NWB file(s) against a namespace or its cached namespaces.""" + """Validate NWB file(s) against a namespace or its cached namespaces. + + NOTE: If an io object is provided and no namespace name is specified, then the file will be validated + against the core namespace, even if use_cached_namespaces is True. + """ from . import NWBHDF5IO # TODO: modularize to avoid circular import io, paths, use_cached_namespaces, namespace, verbose, driver = getargs( diff --git a/tests/back_compat/test_read.py b/tests/back_compat/test_read.py index 792d26e7a..16a119690 100644 --- a/tests/back_compat/test_read.py +++ b/tests/back_compat/test_read.py @@ -29,6 +29,12 @@ class TestReadOldVersions(TestCase): "- expected an array of shape '[None]', got non-array data 'one publication'")], '1.0.3_str_pub.nwb': [("root/general/related_publications (general/related_publications): incorrect shape " "- expected an array of shape '[None]', got non-array data 'one publication'")], + '1.5.1_timeseries_no_data.nwb': [("TimeSeries/data/data (acquisition/test_timeseries/data): argument missing")], + '1.5.1_timeseries_no_unit.nwb': [("TimeSeries/data/unit (acquisition/test_timeseries/data): argument missing")], + '1.5.1_imageseries_no_data.nwb': [("ImageSeries/data/data (acquisition/test_imageseries/data): " + "argument missing")], + '1.5.1_imageseries_no_unit.nwb': [("ImageSeries/data/unit (acquisition/test_imageseries/data): " + "argument missing")], } def get_io(self, path): diff --git a/tests/integration/hdf5/test_misc.py b/tests/integration/hdf5/test_misc.py index 6afd7971e..cd9ab1706 100644 --- a/tests/integration/hdf5/test_misc.py +++ b/tests/integration/hdf5/test_misc.py @@ -109,20 +109,36 @@ class TestDecompositionSeriesIO(NWBH5IOMixin, TestCase): def setUpContainer(self): """ Return the test DecompositionSeries to read/write """ - self.timeseries = TimeSeries(name='dummy timeseries', description='desc', - data=np.ones((3, 3)), unit='flibs', - timestamps=np.ones((3,))) - bands = DynamicTable(name='bands', description='band info for LFPSpectralAnalysis', columns=[ - VectorData(name='band_name', description='name of bands', data=['alpha', 'beta', 'gamma']), - VectorData(name='band_limits', description='low and high cutoffs in Hz', data=np.ones((3, 2))) - ]) - spec_anal = DecompositionSeries(name='LFPSpectralAnalysis', - description='my description', - data=np.ones((3, 3, 3)), - timestamps=np.ones((3,)), - source_timeseries=self.timeseries, - metric='amplitude', - bands=bands) + self.timeseries = TimeSeries( + name='dummy timeseries', + description='desc', + data=np.ones((3, 3)), + unit='flibs', + timestamps=np.ones((3,)), + ) + bands = DynamicTable( + name='bands', + description='band info for LFPSpectralAnalysis', + columns=[ + VectorData(name='band_name', description='name of bands', data=['alpha', 'beta', 'gamma']), + VectorData(name='band_limits', description='low and high cutoffs in Hz', data=np.ones((3, 2))), + VectorData(name='band_mean', description='mean gaussian filters in Hz', data=np.ones((3,))), + VectorData( + name='band_stdev', + description='standard deviation of gaussian filters in Hz', + data=np.ones((3,)) + ), + ], + ) + spec_anal = DecompositionSeries( + name='LFPSpectralAnalysis', + description='my description', + data=np.ones((3, 3, 3)), + timestamps=np.ones((3,)), + source_timeseries=self.timeseries, + metric='amplitude', + bands=bands, + ) return spec_anal @@ -144,27 +160,48 @@ def make_electrode_table(self): """ Make an electrode table, electrode group, and device """ self.table = get_electrode_table() self.dev1 = Device(name='dev1') - self.group = ElectrodeGroup(name='tetrode1', - description='tetrode description', - location='tetrode location', - device=self.dev1) - for i in range(4): + self.group = ElectrodeGroup( + name='tetrode1', + description='tetrode description', + location='tetrode location', + device=self.dev1 + ) + for _ in range(4): self.table.add_row(location='CA1', group=self.group, group_name='tetrode1') def setUpContainer(self): """ Return the test ElectricalSeries to read/write """ self.make_electrode_table(self) - region = DynamicTableRegion(name='source_channels', - data=[0, 2], - description='the first and third electrodes', - table=self.table) + region = DynamicTableRegion( + name='source_channels', + data=[0, 2], + description='the first and third electrodes', + table=self.table + ) data = np.random.randn(100, 2, 30) timestamps = np.arange(100)/100 - ds = DecompositionSeries(name='test_DS', - data=data, - source_channels=region, - timestamps=timestamps, - metric='amplitude') + bands = DynamicTable( + name='bands', + description='band info for LFPSpectralAnalysis', + columns=[ + VectorData(name='band_name', description='name of bands', data=['alpha', 'beta', 'gamma']), + VectorData(name='band_limits', description='low and high cutoffs in Hz', data=np.ones((3, 2))), + VectorData(name='band_mean', description='mean gaussian filters in Hz', data=np.ones((3,))), + VectorData( + name='band_stdev', + description='standard deviation of gaussian filters in Hz', + data=np.ones((3,)) + ), + ], + ) + ds = DecompositionSeries( + name='test_DS', + data=data, + source_channels=region, + timestamps=timestamps, + metric='amplitude', + bands=bands, + ) return ds def addContainer(self, nwbfile): diff --git a/tests/integration/ros3/test_ros3.py b/tests/integration/ros3/test_ros3.py index c2f7b562d..95a891760 100644 --- a/tests/integration/ros3/test_ros3.py +++ b/tests/integration/ros3/test_ros3.py @@ -4,6 +4,7 @@ from pynwb.testing import TestCase import urllib.request import h5py +import warnings class TestRos3Streaming(TestCase): @@ -28,16 +29,28 @@ def setUp(self): def test_read(self): s3_path = 'https://dandiarchive.s3.amazonaws.com/ros3test.nwb' - with NWBHDF5IO(s3_path, mode='r', driver='ros3') as io: - nwbfile = io.read() - test_data = nwbfile.acquisition['ts_name'].data[:] - self.assertEqual(len(test_data), 3) + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + message=r"Ignoring cached namespace .*", + category=UserWarning, + ) + with NWBHDF5IO(s3_path, mode='r', driver='ros3') as io: + nwbfile = io.read() + test_data = nwbfile.acquisition['ts_name'].data[:] + self.assertEqual(len(test_data), 3) def test_dandi_read(self): - with NWBHDF5IO(path=self.s3_test_path, mode='r', driver='ros3') as io: - nwbfile = io.read() - test_data = nwbfile.acquisition['TestData'].data[:] - self.assertEqual(len(test_data), 3) + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + message=r"Ignoring cached namespace .*", + category=UserWarning, + ) + with NWBHDF5IO(path=self.s3_test_path, mode='r', driver='ros3') as io: + nwbfile = io.read() + test_data = nwbfile.acquisition['TestData'].data[:] + self.assertEqual(len(test_data), 3) def test_dandi_get_cached_namespaces(self): expected_namespaces = ["core"] diff --git a/tests/unit/test_misc.py b/tests/unit/test_misc.py index 99e0d6f87..9350d1d2e 100644 --- a/tests/unit/test_misc.py +++ b/tests/unit/test_misc.py @@ -33,7 +33,13 @@ def test_init(self): timestamps=[1., 2., 3.]) bands = DynamicTable(name='bands', description='band info for LFPSpectralAnalysis', columns=[ VectorData(name='band_name', description='name of bands', data=['alpha', 'beta', 'gamma']), - VectorData(name='band_limits', description='low and high cutoffs in Hz', data=np.ones((3, 2))) + VectorData(name='band_limits', description='low and high cutoffs in Hz', data=np.ones((3, 2))), + VectorData(name='band_mean', description='mean gaussian filters in Hz', data=np.ones((3,))), + VectorData( + name='band_stdev', + description='standard deviation of gaussian filters in Hz', + data=np.ones((3,)) + ), ]) spec_anal = DecompositionSeries(name='LFPSpectralAnalysis', description='my description', @@ -49,6 +55,8 @@ def test_init(self): np.testing.assert_equal(spec_anal.timestamps, [1., 2., 3.]) self.assertEqual(spec_anal.bands['band_name'].data, ['alpha', 'beta', 'gamma']) np.testing.assert_equal(spec_anal.bands['band_limits'].data, np.ones((3, 2))) + np.testing.assert_equal(spec_anal.bands['band_mean'].data, np.ones((3,))) + np.testing.assert_equal(spec_anal.bands['band_stdev'].data, np.ones((3,))) self.assertEqual(spec_anal.source_timeseries, timeseries) self.assertEqual(spec_anal.metric, 'amplitude')