diff --git a/.codespellrc b/.codespellrc index ad57c4b81..a38689dfe 100644 --- a/.codespellrc +++ b/.codespellrc @@ -1,7 +1,8 @@ [codespell] # in principle .ipynb can be corrected -- a good number of typos there # nwb-schema -- excluding since submodule, should have its own fixes/checks -skip = .git,*.pdf,*.svg,venvs,env,*.ipynb,nwb-schema +skip = .git,*.pdf,*.svg,venvs,env,nwb-schema +ignore-regex = ^\s*"image/\S+": ".* # it is optin in a url # potatos - demanded to be left alone, autogenerated ignore-words-list = optin,potatos diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index 225516f20..020e86206 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -60,7 +60,7 @@ body: attributes: label: Python Version options: - - "3.7" + - "lower version (unsupported)" - "3.8" - "3.9" - "3.10" diff --git a/.github/workflows/codespell.yml b/.github/workflows/codespell.yml index 7a1e8dc04..7aa79c9e7 100644 --- a/.github/workflows/codespell.yml +++ b/.github/workflows/codespell.yml @@ -16,4 +16,4 @@ jobs: - name: Checkout uses: actions/checkout@v3 - name: Codespell - uses: codespell-project/actions-codespell@v1 + uses: codespell-project/actions-codespell@v2 diff --git a/.github/workflows/run_all_tests.yml b/.github/workflows/run_all_tests.yml index 6c428b83f..c47941c21 100644 --- a/.github/workflows/run_all_tests.yml +++ b/.github/workflows/run_all_tests.yml @@ -22,24 +22,21 @@ jobs: fail-fast: false matrix: include: - - { name: linux-python3.7-minimum , test-tox-env: py37-minimum , build-tox-env: build-py37-minimum , python-ver: "3.7" , os: ubuntu-latest } - - { name: linux-python3.8 , test-tox-env: py38 , build-tox-env: build-py38 , python-ver: "3.8" , os: ubuntu-latest } + - { name: linux-python3.8-minimum , test-tox-env: py38-minimum , build-tox-env: build-py38-minimum , python-ver: "3.8" , os: ubuntu-latest } - { name: linux-python3.9 , test-tox-env: py39 , build-tox-env: build-py39 , python-ver: "3.9" , os: ubuntu-latest } - { name: linux-python3.10 , test-tox-env: py310 , build-tox-env: build-py310 , python-ver: "3.10", os: ubuntu-latest } - { name: linux-python3.11 , test-tox-env: py311 , build-tox-env: build-py311 , python-ver: "3.11", os: ubuntu-latest } - { name: linux-python3.11-optional , test-tox-env: py311-optional , build-tox-env: build-py311-optional , python-ver: "3.11", os: ubuntu-latest } - { name: linux-python3.11-upgraded , test-tox-env: py311-upgraded , build-tox-env: build-py311-upgraded , python-ver: "3.11", os: ubuntu-latest } - { name: linux-python3.11-prerelease , test-tox-env: py311-prerelease, build-tox-env: build-py311-prerelease, python-ver: "3.11", os: ubuntu-latest } - - { name: windows-python3.7-minimum , test-tox-env: py37-minimum , build-tox-env: build-py37-minimum , python-ver: "3.7" , os: windows-latest } - - { name: windows-python3.8 , test-tox-env: py38 , build-tox-env: build-py38 , python-ver: "3.8" , os: windows-latest } + - { name: windows-python3.8-minimum , test-tox-env: py38-minimum , build-tox-env: build-py38-minimum , python-ver: "3.8" , os: windows-latest } - { name: windows-python3.9 , test-tox-env: py39 , build-tox-env: build-py39 , python-ver: "3.9" , os: windows-latest } - { name: windows-python3.10 , test-tox-env: py310 , build-tox-env: build-py310 , python-ver: "3.10", os: windows-latest } - { name: windows-python3.11 , test-tox-env: py311 , build-tox-env: build-py311 , python-ver: "3.11", os: windows-latest } - { name: windows-python3.11-optional , test-tox-env: py311-optional , build-tox-env: build-py311-optional , python-ver: "3.11", os: windows-latest } - { name: windows-python3.11-upgraded , test-tox-env: py311-upgraded , build-tox-env: build-py311-upgraded , python-ver: "3.11", os: windows-latest } - { name: windows-python3.11-prerelease, test-tox-env: py311-prerelease, build-tox-env: build-py311-prerelease, python-ver: "3.11", os: windows-latest } - - { name: macos-python3.7-minimum , test-tox-env: py37-minimum , build-tox-env: build-py37-minimum , python-ver: "3.7" , os: macos-latest } - - { name: macos-python3.8 , test-tox-env: py38 , build-tox-env: build-py38 , python-ver: "3.8" , os: macos-latest } + - { name: macos-python3.8-minimum , test-tox-env: py38-minimum , build-tox-env: build-py38-minimum , python-ver: "3.8" , os: macos-latest } - { name: macos-python3.9 , test-tox-env: py39 , build-tox-env: build-py39 , python-ver: "3.9" , os: macos-latest } - { name: macos-python3.10 , test-tox-env: py310 , build-tox-env: build-py310 , python-ver: "3.10", os: macos-latest } - { name: macos-python3.11 , test-tox-env: py311 , build-tox-env: build-py311 , python-ver: "3.11", os: macos-latest } @@ -92,13 +89,13 @@ jobs: fail-fast: false matrix: include: - - { name: linux-gallery-python3.7-minimum , test-tox-env: gallery-py37-minimum , python-ver: "3.7" , os: ubuntu-latest } + - { name: linux-gallery-python3.8-minimum , test-tox-env: gallery-py38-minimum , python-ver: "3.8" , os: ubuntu-latest } - { name: linux-gallery-python3.11-upgraded , test-tox-env: gallery-py311-upgraded , python-ver: "3.11", os: ubuntu-latest } - { name: linux-gallery-python3.11-prerelease , test-tox-env: gallery-py311-prerelease, python-ver: "3.11", os: ubuntu-latest } - - { name: windows-gallery-python3.7-minimum , test-tox-env: gallery-py37-minimum , python-ver: "3.7" , os: windows-latest } + - { name: windows-gallery-python3.8-minimum , test-tox-env: gallery-py38-minimum , python-ver: "3.8" , os: windows-latest } - { name: windows-gallery-python3.11-upgraded , test-tox-env: gallery-py311-upgraded , python-ver: "3.11", os: windows-latest } - { name: windows-gallery-python3.11-prerelease, test-tox-env: gallery-py311-prerelease, python-ver: "3.11", os: windows-latest } - - { name: macos-gallery-python3.7-minimum , test-tox-env: gallery-py37-minimum , python-ver: "3.7" , os: macos-latest } + - { name: macos-gallery-python3.8-minimum , test-tox-env: gallery-py38-minimum , python-ver: "3.8" , os: macos-latest } - { name: macos-gallery-python3.11-upgraded , test-tox-env: gallery-py311-upgraded , python-ver: "3.11", os: macos-latest } - { name: macos-gallery-python3.11-prerelease , test-tox-env: gallery-py311-prerelease, python-ver: "3.11", os: macos-latest } steps: @@ -111,6 +108,7 @@ jobs: - uses: actions/checkout@v3 with: submodules: 'recursive' + fetch-depth: 0 # tags are required for versioneer to determine the version - name: Set up Python uses: actions/setup-python@v4 @@ -137,8 +135,7 @@ jobs: fail-fast: false matrix: include: - - { name: conda-linux-python3.7-minimum , test-tox-env: py37-minimum , build-tox-env: build-py37-minimum , python-ver: "3.7" , os: ubuntu-latest } - - { name: conda-linux-python3.8 , test-tox-env: py38 , build-tox-env: build-py38 , python-ver: "3.8" , os: ubuntu-latest } + - { name: conda-linux-python3.8-minimum , test-tox-env: py38-minimum , build-tox-env: build-py38-minimum , python-ver: "3.8" , os: ubuntu-latest } - { name: conda-linux-python3.9 , test-tox-env: py39 , build-tox-env: build-py39 , python-ver: "3.9" , os: ubuntu-latest } - { name: conda-linux-python3.10 , test-tox-env: py310 , build-tox-env: build-py310 , python-ver: "3.10", os: ubuntu-latest } - { name: conda-linux-python3.11 , test-tox-env: py311 , build-tox-env: build-py311 , python-ver: "3.11", os: ubuntu-latest } @@ -155,6 +152,7 @@ jobs: - uses: actions/checkout@v3 with: submodules: 'recursive' + fetch-depth: 0 # tags are required for versioneer to determine the version - name: Set up Conda uses: conda-incubator/setup-miniconda@v2 @@ -166,8 +164,7 @@ jobs: run: | conda config --set always_yes yes --set changeps1 no conda info - # the conda dependency resolution for tox under python 3.7 can install the wrong importlib_metadata - conda install -c conda-forge tox "importlib_metadata>4" + conda install -c conda-forge tox - name: Conda reporting run: | @@ -199,9 +196,9 @@ jobs: fail-fast: false matrix: include: - - { name: linux-python3.11-ros3 , python-ver: "3.11", os: ubuntu-latest } - - { name: windows-python3.11-ros3, python-ver: "3.11", os: windows-latest } - - { name: macos-python3.11-ros3 , python-ver: "3.11", os: macos-latest } + - { name: conda-linux-python3.11-ros3 , python-ver: "3.11", os: ubuntu-latest } + - { name: conda-windows-python3.11-ros3, python-ver: "3.11", os: windows-latest } + - { name: conda-macos-python3.11-ros3 , python-ver: "3.11", os: macos-latest } steps: - name: Cancel non-latest runs uses: styfle/cancel-workflow-action@0.11.0 @@ -212,6 +209,7 @@ jobs: - uses: actions/checkout@v3 with: submodules: 'recursive' + fetch-depth: 0 # tags are required for versioneer to determine the version - name: Set up Conda uses: conda-incubator/setup-miniconda@v2 @@ -245,9 +243,9 @@ jobs: fail-fast: false matrix: include: - - { name: linux-gallery-python3.11-ros3 , python-ver: "3.11", os: ubuntu-latest } - - { name: windows-gallery-python3.11-ros3, python-ver: "3.11", os: windows-latest } - - { name: macos-gallery-python3.11-ros3 , python-ver: "3.11", os: macos-latest } + - { name: conda-linux-gallery-python3.11-ros3 , python-ver: "3.11", os: ubuntu-latest } + - { name: conda-windows-gallery-python3.11-ros3, python-ver: "3.11", os: windows-latest } + - { name: conda-macos-gallery-python3.11-ros3 , python-ver: "3.11", os: macos-latest } steps: - name: Cancel non-latest runs uses: styfle/cancel-workflow-action@0.11.0 @@ -258,6 +256,7 @@ jobs: - uses: actions/checkout@v3 with: submodules: 'recursive' + fetch-depth: 0 # tags are required for versioneer to determine the version - name: Set up Conda uses: conda-incubator/setup-miniconda@v2 @@ -283,4 +282,4 @@ jobs: - name: Run gallery ros3 tests run: | - python test.py --example-ros3 \ No newline at end of file + python test.py --example-ros3 diff --git a/.github/workflows/run_coverage.yml b/.github/workflows/run_coverage.yml index a465676bc..acbc3bd05 100644 --- a/.github/workflows/run_coverage.yml +++ b/.github/workflows/run_coverage.yml @@ -71,7 +71,7 @@ jobs: - name: Run integration tests and generate coverage report run: | - python -m coverage run -p test.py --integration --backwards + python -m coverage run -p test.py --integration --validation-module --backwards # validation CLI tests generate separate .coverage files that need to be merged python -m coverage combine python -m coverage xml # codecov uploader requires xml format diff --git a/.github/workflows/run_dandi_read_tests.yml b/.github/workflows/run_dandi_read_tests.yml index ec8cc2e84..857b32c9a 100644 --- a/.github/workflows/run_dandi_read_tests.yml +++ b/.github/workflows/run_dandi_read_tests.yml @@ -47,4 +47,4 @@ jobs: - name: Run DANDI read tests run: | - pytest -rP tests/read_dandi/ + python tests/read_dandi/test_read_dandi.py diff --git a/.github/workflows/run_tests.yml b/.github/workflows/run_tests.yml index 34b01e481..e4479a554 100644 --- a/.github/workflows/run_tests.yml +++ b/.github/workflows/run_tests.yml @@ -19,12 +19,12 @@ jobs: fail-fast: false matrix: include: - - { name: linux-python3.7-minimum , test-tox-env: py37-minimum , build-tox-env: build-py37-minimum , python-ver: "3.7" , os: ubuntu-latest } + - { name: linux-python3.8-minimum , test-tox-env: py38-minimum , build-tox-env: build-py38-minimum , python-ver: "3.8" , os: ubuntu-latest } # NOTE config below with "upload-wheels: true" specifies that wheels should be uploaded as an artifact - - { name: linux-python3.11-upgraded , test-tox-env: py311-upgraded , build-tox-env: build-py311-upgraded , python-ver: "3.11", os: ubuntu-latest , upload-wheels: true } - - { name: windows-python3.7-minimum , test-tox-env: py37-minimum , build-tox-env: build-py37-minimum , python-ver: "3.7" , os: windows-latest } - - { name: windows-python3.11-upgraded , test-tox-env: py311-upgraded , build-tox-env: build-py311-upgraded , python-ver: "3.11", os: windows-latest } - - { name: macos-python3.7-minimum , test-tox-env: py37-minimum , build-tox-env: build-py37-minimum , python-ver: "3.7" , os: macos-latest } + - { name: linux-python3.11-upgraded , test-tox-env: py311-upgraded , build-tox-env: build-py311-upgraded , python-ver: "3.11", os: ubuntu-latest , upload-wheels: true } + - { name: windows-python3.8-minimum , test-tox-env: py38-minimum , build-tox-env: build-py38-minimum , python-ver: "3.8" , os: windows-latest } + - { name: windows-python3.11-upgraded , test-tox-env: py311-upgraded , build-tox-env: build-py311-upgraded , python-ver: "3.11", os: windows-latest } + - { name: macos-python3.8-minimum , test-tox-env: py38-minimum , build-tox-env: build-py38-minimum , python-ver: "3.8" , os: macos-latest } steps: - name: Cancel non-latest runs uses: styfle/cancel-workflow-action@0.11.0 @@ -78,9 +78,9 @@ jobs: fail-fast: false matrix: include: - - { name: linux-gallery-python3.7-minimum , test-tox-env: gallery-py37-minimum , python-ver: "3.7" , os: ubuntu-latest } + - { name: linux-gallery-python3.8-minimum , test-tox-env: gallery-py38-minimum , python-ver: "3.8" , os: ubuntu-latest } - { name: linux-gallery-python3.11-upgraded , test-tox-env: gallery-py311-upgraded, python-ver: "3.11", os: ubuntu-latest } - - { name: windows-gallery-python3.7-minimum , test-tox-env: gallery-py37-minimum , python-ver: "3.7" , os: windows-latest } + - { name: windows-gallery-python3.8-minimum , test-tox-env: gallery-py38-minimum , python-ver: "3.8" , os: windows-latest } - { name: windows-gallery-python3.11-upgraded, test-tox-env: gallery-py311-upgraded, python-ver: "3.11", os: windows-latest } steps: - name: Cancel non-latest runs @@ -92,6 +92,7 @@ jobs: - uses: actions/checkout@v3 with: submodules: 'recursive' + fetch-depth: 0 # tags are required for versioneer to determine the version - name: Set up Python uses: actions/setup-python@v4 @@ -118,7 +119,7 @@ jobs: fail-fast: false matrix: include: - - { name: conda-linux-python3.7-minimum , test-tox-env: py37-minimum , build-tox-env: build-py37-minimum , python-ver: "3.7" , os: ubuntu-latest } + - { name: conda-linux-python3.8-minimum , test-tox-env: py38-minimum , build-tox-env: build-py38-minimum , python-ver: "3.8" , os: ubuntu-latest } - { name: conda-linux-python3.11-upgraded , test-tox-env: py311-upgraded , build-tox-env: build-py311-upgraded , python-ver: "3.11", os: ubuntu-latest } steps: - name: Cancel non-latest runs @@ -142,8 +143,7 @@ jobs: run: | conda config --set always_yes yes --set changeps1 no conda info - # the conda dependency resolution for tox under python 3.7 can install the wrong importlib_metadata - conda install -c conda-forge tox "importlib_metadata>4" + conda install -c conda-forge tox - name: Conda reporting run: | @@ -174,7 +174,7 @@ jobs: fail-fast: false matrix: include: - - { name: linux-python3.11-ros3 , python-ver: "3.11", os: ubuntu-latest } + - { name: conda-linux-python3.11-ros3 , python-ver: "3.11", os: ubuntu-latest } steps: - name: Cancel non-latest runs uses: styfle/cancel-workflow-action@0.11.0 @@ -185,6 +185,7 @@ jobs: - uses: actions/checkout@v3 with: submodules: 'recursive' + fetch-depth: 0 # tags are required for versioneer to determine the version - name: Set up Conda uses: conda-incubator/setup-miniconda@v2 @@ -218,7 +219,7 @@ jobs: fail-fast: false matrix: include: - - { name: linux-gallery-python3.11-ros3 , python-ver: "3.11", os: ubuntu-latest } + - { name: conda-linux-gallery-python3.11-ros3 , python-ver: "3.11", os: ubuntu-latest } steps: - name: Cancel non-latest runs uses: styfle/cancel-workflow-action@0.11.0 @@ -229,6 +230,7 @@ jobs: - uses: actions/checkout@v3 with: submodules: 'recursive' + fetch-depth: 0 # tags are required for versioneer to determine the version - name: Set up Conda uses: conda-incubator/setup-miniconda@v2 @@ -272,6 +274,7 @@ jobs: uses: actions/checkout@v3 with: submodules: 'recursive' + fetch-depth: 0 # tags are required for versioneer to determine the version - name: Set up Python uses: actions/setup-python@v4 diff --git a/CHANGELOG.md b/CHANGELOG.md index d28bb93b5..6a3a79232 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,13 +1,44 @@ # PyNWB Changelog -## PyNWB 2.4.0 (Upcoming) +## PyNWB 2.6.0 (Upcoming) ### Enhancements and minor changes -- Add testing support for Python 3.11. @rly [#1687](https://github.com/NeurodataWithoutBorders/pynwb/pull/1687) -- Add CI testing of NWB files on DANDI. @rly [#1695](https://github.com/NeurodataWithoutBorders/pynwb/pull/1695) - Add `NWBHDF5IO.can_read()`. @bendichter [#1703](https://github.com/NeurodataWithoutBorders/pynwb/pull/1703) - Add `pynwb.get_nwbfile_version()`. @bendichter [#1703](https://github.com/NeurodataWithoutBorders/pynwb/pull/1703) +## PyNWB 2.5.0 (August 18, 2023) + +### Enhancements and minor changes +- Add `TimeSeries.get_timestamps()`. @bendichter [#1741](https://github.com/NeurodataWithoutBorders/pynwb/pull/1741) +- Add `TimeSeries.get_data_in_units()`. @bendichter [#1745](https://github.com/NeurodataWithoutBorders/pynwb/pull/1745) +- Updated `ExternalResources` name change to `HERD`, along with HDMF 3.9.0 being the new minimum. @mavaylon1 [#1754](https://github.com/NeurodataWithoutBorders/pynwb/pull/1754) + +### Documentation and tutorial enhancements +- Updated streaming tutorial to ensure code is run on tests and clarify text. @bendichter [#1760](https://github.com/NeurodataWithoutBorders/pynwb/pull/1760) @oruebel [#1762](https://github.com/NeurodataWithoutBorders/pynwb/pull/1762) +- Fixed minor documentation build warnings and broken links to `basic_trials` tutorial @oruebel [#1762](https://github.com/NeurodataWithoutBorders/pynwb/pull/1762) + +## PyNWB 2.4.1 (July 26, 2023) +- Stop running validation tests as part of integration tests. They cause issues in CI and can be run separately. @rly [#1740](https://github.com/NeurodataWithoutBorders/pynwb/pull/1740) + +## PyNWB 2.4.0 (July 23, 2023) + +### Enhancements and minor changes +- Add support for `ExternalResources`. @mavaylon1 [#1684](https://github.com/NeurodataWithoutBorders/pynwb/pull/1684) +- Update links for making a release. @mavaylon1 [#1720](https://github.com/NeurodataWithoutBorders/pynwb/pull/1720) + +### Bug fixes +- Fixed sphinx-gallery setting to correctly display index in the docs with sphinx-gallery>=0.11. @oruebel [#1733](https://github.com/NeurodataWithoutBorders/pynwb/pull/1733) + +### Documentation and tutorial enhancements +- Added thumbnail for Optogentics tutorial. @oruebel [#1729](https://github.com/NeurodataWithoutBorders/pynwb/pull/1729) +- Update and fix errors in tutorials. @bendichter @oruebel + +## PyNWB 2.3.3 (June 26, 2023) + +### Enhancements and minor changes +- Add testing support for Python 3.11. @rly [#1687](https://github.com/NeurodataWithoutBorders/pynwb/pull/1687) +- Add CI testing of NWB files on DANDI. @rly [#1695](https://github.com/NeurodataWithoutBorders/pynwb/pull/1695) + ### Bug fixes - Remove unused, deprecated `codecov` package from dev installation requirements. @rly [#1688](https://github.com/NeurodataWithoutBorders/pynwb/pull/1688) @@ -15,6 +46,8 @@ [#1690](https://github.com/NeurodataWithoutBorders/pynwb/pull/1690) - Update `requirements-doc.txt` to resolve Python 3.7 incompatibility. @rly [#1694](https://github.com/NeurodataWithoutBorders/pynwb/pull/1694) +- Fixed test battery to show and check for warnings appropriately. @rly + [#1698](https://github.com/NeurodataWithoutBorders/pynwb/pull/1698) ## PyNWB 2.3.2 (April 10, 2023) diff --git a/docs/gallery/advanced_io/parallelio.py b/docs/gallery/advanced_io/parallelio.py index 2e9413c7f..53abdf239 100644 --- a/docs/gallery/advanced_io/parallelio.py +++ b/docs/gallery/advanced_io/parallelio.py @@ -22,7 +22,7 @@ # # 3. **Read from the file in parallel using MPI**: Here each of the 4 MPI ranks reads one time # step from the file - +# # .. code-block:: python # # from mpi4py import MPI diff --git a/docs/gallery/advanced_io/streaming.py b/docs/gallery/advanced_io/streaming.py index 3af5671fb..bb5c2e1d8 100644 --- a/docs/gallery/advanced_io/streaming.py +++ b/docs/gallery/advanced_io/streaming.py @@ -23,111 +23,116 @@ Now you can get the url of a particular NWB file using the dandiset ID and the path of that file within the dandiset. -.. code-block:: python - - from dandi.dandiapi import DandiAPIClient - - dandiset_id = '000006' # ephys dataset from the Svoboda Lab - filepath = 'sub-anm372795/sub-anm372795_ses-20170718.nwb' # 450 kB file - with DandiAPIClient() as client: - asset = client.get_dandiset(dandiset_id, 'draft').get_asset_by_path(filepath) - s3_url = asset.get_content_url(follow_redirects=1, strip_query=True) - - -Streaming Method 1: fsspec --------------------------- -fsspec is another data streaming approach that is quite flexible and has several performance advantages. This library -creates a virtual filesystem for remote stores. With this approach, a virtual file is created for the file and -the virtual filesystem layer takes care of requesting data from the S3 bucket whenever data is -read from the virtual file. Note that this implementation is completely unaware of internals of the HDF5 format -and thus can work for **any** file, not only for the purpose of use with H5PY and PyNWB. - -First install ``fsspec`` and the dependencies of the :py:class:`~fsspec.implementations.http.HTTPFileSystem`: - -.. code-block:: bash - - pip install fsspec requests aiohttp - -Then in Python: - -.. code-block:: python - - import fsspec - import pynwb - import h5py - from fsspec.implementations.cached import CachingFileSystem - - # first, create a virtual filesystem based on the http protocol and use - # caching to save accessed data to RAM. - fs = CachingFileSystem( - fs=fsspec.filesystem("http"), - cache_storage="nwb-cache", # Local folder for the cache - ) - - # next, open the file - with fs.open(s3_url, "rb") as f: - with h5py.File(f) as file: - with pynwb.NWBHDF5IO(file=file, load_namespaces=True) as io: - nwbfile = io.read() - print(nwbfile.acquisition['lick_times'].time_series['lick_left_times'].data[:]) - - -fsspec is a library that can be used to access a variety of different store formats, including (at the time of -writing): - -.. code-block:: python - - from fsspec.registry import known_implementations - known_implementations.keys() - -file, memory, dropbox, http, https, zip, tar, gcs, gs, gdrive, sftp, ssh, ftp, hdfs, arrow_hdfs, webhdfs, s3, s3a, wandb, oci, adl, abfs, az, cached, blockcache, filecache, simplecache, dask, dbfs, github, git, smb, jupyter, jlab, libarchive, reference - -The S3 backend, in particular, may provide additional functionality for accessing data on DANDI. See the -`fsspec documentation on known implementations `_ -for a full updated list of supported store formats. - -Streaming Method 2: ROS3 ------------------------- -ROS3 is one of the supported methods for reading data from a remote store. ROS3 stands for "read only S3" and is a -driver created by the HDF5 Group that allows HDF5 to read HDF5 files stored remotely in s3 buckets. Using this method -requires that your HDF5 library is installed with the ROS3 driver enabled. This is not the default configuration, -so you will need to make sure you install the right version of ``h5py`` that has this advanced configuration enabled. -You can install HDF5 with the ROS3 driver from `conda-forge `_ using ``conda``. You may -first need to uninstall a currently installed version of ``h5py``. - -.. code-block:: bash - - pip uninstall h5py - conda install -c conda-forge "h5py>=3.2" - -Now instantiate a :py:class:`~pynwb.NWBHDF5IO` object with the S3 URL and specify the driver as "ros3". This -will download metadata about the file from the S3 bucket to memory. The values of datasets are accessed lazily, -just like when reading an NWB file stored locally. So, slicing into a dataset will require additional time to -download the sliced data (and only the sliced data) to memory. - -.. code-block:: python - - from pynwb import NWBHDF5IO - - with NWBHDF5IO(s3_url, mode='r', load_namespaces=True, driver='ros3') as io: - nwbfile = io.read() - print(nwbfile) - print(nwbfile.acquisition['lick_times'].time_series['lick_left_times'].data[:]) - -Which streaming method to choose? ---------------------------------- - -fsspec has many advantages over ros3: - -1. fsspec is easier to install -2. fsspec supports caching, which will dramatically speed up repeated requests for the - same region of data -3. fsspec automatically retries when s3 fails to return. -4. fsspec works with other storage backends and -5. fsspec works with other types of files. -6. In our hands, fsspec is faster out-of-the-box. - -For these reasons, we would recommend use fsspec for most Python users. """ # sphinx_gallery_thumbnail_path = 'figures/gallery_thumbnails_streaming.png' + +from dandi.dandiapi import DandiAPIClient + +dandiset_id = '000006' # ephys dataset from the Svoboda Lab +filepath = 'sub-anm372795/sub-anm372795_ses-20170718.nwb' # 450 kB file +with DandiAPIClient() as client: + asset = client.get_dandiset(dandiset_id, 'draft').get_asset_by_path(filepath) + s3_url = asset.get_content_url(follow_redirects=1, strip_query=True) + +############################################## +# Streaming Method 1: fsspec +# -------------------------- +# fsspec is another data streaming approach that is quite flexible and has several performance advantages. This library +# creates a virtual filesystem for remote stores. With this approach, a virtual file is created for the file and +# the virtual filesystem layer takes care of requesting data from the S3 bucket whenever data is +# read from the virtual file. Note that this implementation is completely unaware of internals of the HDF5 format +# and thus can work for **any** file, not only for the purpose of use with H5PY and PyNWB. +# +# First install ``fsspec`` and the dependencies of the :py:class:`~fsspec.implementations.http.HTTPFileSystem`: +# +# .. code-block:: bash +# +# pip install fsspec requests aiohttp +# +# Then in Python: + +import fsspec +import pynwb +import h5py +from fsspec.implementations.cached import CachingFileSystem + +# first, create a virtual filesystem based on the http protocol +fs = fsspec.filesystem("http") + +# create a cache to save downloaded data to disk (optional) +fs = CachingFileSystem( + fs=fs, + cache_storage="nwb-cache", # Local folder for the cache +) + +# next, open the file +with fs.open(s3_url, "rb") as f: + with h5py.File(f) as file: + with pynwb.NWBHDF5IO(file=file, load_namespaces=True) as io: + nwbfile = io.read() + print(nwbfile.acquisition['lick_times'].time_series['lick_left_times'].data[:]) + +################################## +# fsspec is a library that can be used to access a variety of different store formats, including (at the time of +# writing): +# +# .. code-block:: python +# +# from fsspec.registry import known_implementations +# known_implementations.keys() +# +# file, memory, dropbox, http, https, zip, tar, gcs, gs, gdrive, sftp, ssh, ftp, hdfs, arrow_hdfs, webhdfs, s3, s3a, +# wandb, oci, adl, abfs, az, cached, blockcache, filecache, simplecache, dask, dbfs, github, git, smb, jupyter, jlab, +# libarchive, reference +# +# The S3 backend, in particular, may provide additional functionality for accessing data on DANDI. See the +# `fsspec documentation on known implementations `_ +# for a full updated list of supported store formats. +# +# Streaming Method 2: ROS3 +# ------------------------ +# ROS3 stands for "read only S3" and is a driver created by the HDF5 Group that allows HDF5 to read HDF5 files stored +# remotely in s3 buckets. Using this method requires that your HDF5 library is installed with the ROS3 driver enabled. +# With ROS3 support enabled in h5py, we can instantiate a :py:class:`~pynwb.NWBHDF5IO` object with the S3 URL and +# specify the driver as "ros3". + +from pynwb import NWBHDF5IO + +with NWBHDF5IO(s3_url, mode='r', load_namespaces=True, driver='ros3') as io: + nwbfile = io.read() + print(nwbfile) + print(nwbfile.acquisition['lick_times'].time_series['lick_left_times'].data[:]) + +################################## +# This will download metadata about the file from the S3 bucket to memory. The values of datasets are accessed lazily, +# just like when reading an NWB file stored locally. So, slicing into a dataset will require additional time to +# download the sliced data (and only the sliced data) to memory. +# +# .. note:: +# +# Pre-built h5py packages on PyPI do not include this S3 support. If you want this feature, you could use packages +# from conda-forge, or build h5py from source against an HDF5 build with S3 support. You can install HDF5 with +# the ROS3 driver from `conda-forge `_ using ``conda``. You may +# first need to uninstall a currently installed version of ``h5py``. +# +# .. code-block:: bash +# +# pip uninstall h5py +# conda install -c conda-forge "h5py>=3.2" + + +################################################## +# Which streaming method to choose? +# --------------------------------- +# +# From a user perspective, once opened, the :py:class:`~pynwb.file.NWBFile` works the same with +# both fsspec and ros3. However, in general, we currently recommend using fsspec for streaming +# NWB files because it is more performant and reliable than ros3. In particular fsspec: +# +# 1. supports caching, which will dramatically speed up repeated requests for the +# same region of data, +# 2. automatically retries when s3 fails to return, which helps avoid errors when accessing data due to +# intermittent errors in connections with S3, +# 3. works also with other storage backends (e.g., GoogleDrive or Dropbox, not just S3) and file formats, and +# 4. in our experience appears to provide faster out-of-the-box performance than the ros3 driver. diff --git a/docs/gallery/domain/ecephys.py b/docs/gallery/domain/ecephys.py index 561236553..406f2b789 100644 --- a/docs/gallery/domain/ecephys.py +++ b/docs/gallery/domain/ecephys.py @@ -5,17 +5,19 @@ Extracellular Electrophysiology Data ==================================== -The following tutorial describes storage of extracellular electrophysiology data in NWB. -The workflow demonstrated here involves four main steps: +This tutorial describes storage of extracellular electrophysiology data in NWB in four +main steps: 1. Create the electrodes table 2. Add acquired raw voltage data 3. Add LFP data 4. Add spike data +It is recommended to cover :ref:`basics` before this tutorial. -This tutorial assumes that transforming data between these states is done by users--PyNWB does not provide -analysis functionality. It is recommended to cover :ref:`basics` before this tutorial. +.. note:: It is recommended to check if your source data is supported by + `NeuroConv Extracellular Electrophysiology Gallery `_. + If it is supported, it is recommended to use NeuroConv to convert your data. The following examples will reference variables that may not be defined within the block they are used in. For clarity, we define them here: @@ -53,9 +55,9 @@ ####################### # Electrodes Table -# ------------------------------ +# ---------------- # -# In order to store extracellular electrophysiology data, you first must create an electrodes table +# To store extracellular electrophysiology data, you first must create an electrodes table # describing the electrodes that generated this data. Extracellular electrodes are stored in an # ``"electrodes"`` table, which is a :py:class:`~hdmf.common.table.DynamicTable`. # @@ -73,10 +75,9 @@ # :alt: electrodes table UML diagram # :align: center # -# Before creating an :py:class:`~pynwb.ecephys.ElectrodeGroup`, you need to provide some information about the -# device that was used to record from the electrode. This is done by creating a :py:class:`~pynwb.device.Device` -# object using the instance method :py:meth:`~pynwb.file.NWBFile.create_device`. - +# The electrodes table references a required :py:class:`~pynwb.ecephys.ElectrodeGroup`, which is used to represent a +# group of electrodes. Before creating an :py:class:`~pynwb.ecephys.ElectrodeGroup`, you must define a +# :py:class:`~pynwb.device.Device` object using the method :py:meth:`.NWBFile.create_device`. device = nwbfile.create_device( name="array", description="the best array", manufacturer="Probe Company 9000" @@ -85,12 +86,12 @@ ####################### # Once you have created the :py:class:`~pynwb.device.Device`, you can create an # :py:class:`~pynwb.ecephys.ElectrodeGroup`. Then you can add electrodes one-at-a-time with -# :py:meth:`~pynwb.file.NWBFile.add_electrode`. :py:meth:`~pynwb.file.NWBFile.add_electrode` has two required arguments, +# :py:meth:`.NWBFile.add_electrode`. :py:meth:`.NWBFile.add_electrode` has two required arguments, # ``group``, which takes an :py:class:`~pynwb.ecephys.ElectrodeGroup`, and ``location``, which takes a string. It also # has a number of optional metadata fields for electrode features (e.g, ``x``, ``y``, ``z``, ``imp``, # and ``filtering``). Since this table is a :py:class:`~hdmf.common.table.DynamicTable`, we can add -# additional user-specified metadata fields as well. We will be adding a ``"label"`` column to the table. Use the -# following code to add electrodes for an array with 4 shanks and 3 channels per shank. +# additional user-specified metadata as custom columns of the table. We will be adding a ``"label"`` column to the +# table. Use the following code to add electrodes for an array with 4 shanks and 3 channels per shank. nwbfile.add_electrode_column(name="label", description="label of electrode") @@ -139,9 +140,10 @@ # objects. :py:class:`~pynwb.ecephys.ElectricalSeries` is a subclass of :py:class:`~pynwb.base.TimeSeries` # specialized for voltage data. To create the :py:class:`~pynwb.ecephys.ElectricalSeries` objects, we need to # reference a set of rows in the ``"electrodes"`` table to indicate which electrodes were recorded. We will do this -# by creating a :py:class:`~pynwb.core.DynamicTableRegion`, which is a type of link that allows you to reference -# :py:meth:~pynwb.file.NWBFile.create_electrode_table_region` is a convenience function that creates a -# :py:class:`~pynwb.core.DynamicTableRegion` which references the ``"electrodes"`` table. +# by creating a :py:class:`~hdmf.common.table.DynamicTableRegion`, which is a type of link that allows you to reference +# rows of a :py:class:`~hdmf.common.table.DynamicTable`. :py:meth:`.NWBFile.create_electrode_table_region` is a +# convenience function that creates a :py:class:`~hdmf.common.table.DynamicTableRegion` which references the +# ``"electrodes"`` table. all_table_region = nwbfile.create_electrode_table_region( @@ -154,7 +156,7 @@ # ^^^^^^^^^^^^^^^^^ # # Now create an :py:class:`~pynwb.ecephys.ElectricalSeries` object to store raw data collected -# during the experiment, passing in this ``"all_table_region"`` :py:class:`~pynwb.core.DynamicTableRegion` +# during the experiment, passing in this ``"all_table_region"`` :py:class:`~hdmf.common.table.DynamicTableRegion` # reference to all rows of the electrodes table. # # .. only:: html @@ -183,27 +185,19 @@ ) #################### -# NWB organizes data into different groups depending on the type of data. Groups can be thought of -# as folders within the file. Here are some of the groups within an :py:class:`~pynwb.file.NWBFile` and the types of -# data they are intended to store: -# -# * **acquisition**: raw, acquired data that should never change -# * **processing**: processed data, typically the results of preprocessing algorithms and could change -# * **analysis**: results of data analysis -# * **stimuli**: stimuli used in the experiment (e.g., images, videos, light pulses) -# # Since this :py:class:`~pynwb.ecephys.ElectricalSeries` represents raw data from the data acquisition system, -# we will add it to the acquisition group of the :py:class:`~pynwb.file.NWBFile`. +# add it to the acquisition group of the :py:class:`~pynwb.file.NWBFile`. nwbfile.add_acquisition(raw_electrical_series) #################### # LFP -# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +# ^^^ # # Now create an :py:class:`~pynwb.ecephys.ElectricalSeries` object to store LFP data collected during the experiment, -# again passing in the :py:class:`~pynwb.core.DynamicTableRegion` reference to all rows of the ``"electrodes"`` table. +# again passing in the :py:class:`~hdmf.common.table.DynamicTableRegion` reference to all rows of the ``"electrodes"`` +# table. lfp_data = np.random.randn(50, 12) @@ -244,7 +238,7 @@ # # Create a processing module named ``"ecephys"`` and add the :py:class:`~pynwb.ecephys.LFP` object to it. # This is analogous to how we can store the :py:class:`~pynwb.behavior.Position` object in a processing module -# created with the :py:class:`~pynwb.file.NWBFile.create_processing_module` method. +# created with the method :py:meth:`.NWBFile.create_processing_module`. ecephys_module = nwbfile.create_processing_module( @@ -256,29 +250,28 @@ # .. _units_electrode: # # Spike Times -# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +# ^^^^^^^^^^^ # # Spike times are stored in the :py:class:`~pynwb.misc.Units` table, which is a subclass of # :py:class:`~hdmf.common.table.DynamicTable`. Adding columns to the :py:class:`~pynwb.misc.Units` table is analogous # to how we can add columns to the ``"electrodes"`` and ``"trials"`` tables. # -# We will generate some random spike data and populate the :py:meth:`~pynwb.misc.Units` table using the -# :py:class:`~pynwb.file.NWBFile.add_unit` method. Then we can display the :py:class:`~pynwb.misc.Units` table as a -# pandas :py:class:`~pandas.DataFrame`. +# Generate some random spike data and populate the :py:class:`~pynwb.misc.Units` table using the +# method :py:meth:`.NWBFile.add_unit`. nwbfile.add_unit_column(name="quality", description="sorting quality") -poisson_lambda = 20 firing_rate = 20 n_units = 10 +res = 1000 +duration = 20 for n_units_per_shank in range(n_units): - n_spikes = np.random.poisson(lam=poisson_lambda) - spike_times = np.round( - np.cumsum(np.random.exponential(1 / firing_rate, n_spikes)), 5 - ) - nwbfile.add_unit( - spike_times=spike_times, quality="good", waveform_mean=[1.0, 2.0, 3.0, 4.0, 5.0] - ) + spike_times = np.where(np.random.rand((res * duration)) < (firing_rate / res))[0] / res + nwbfile.add_unit(spike_times=spike_times, quality="good") + +####################### +# The :py:class:`~pynwb.misc.Units` table can also be converted to a pandas :py:class:`~pandas.DataFrame`. + nwbfile.units.to_dataframe() @@ -294,7 +287,7 @@ # using these objects. # # For storing spike data, there are two options. Which one you choose depends on what data you have available. -# If you need to store the complete, continuous raw voltage traces, you should store your the traces with +# If you need to store the complete, continuous raw voltage traces, you should store the traces with # :py:class:`~pynwb.ecephys.ElectricalSeries` objects as :ref:`acquisition ` data, and use # the :py:class:`~pynwb.ecephys.EventDetection` class for identifying the spike events in your raw traces. # If you do not want to store the raw voltage traces and only the waveform 'snippets' surrounding spike events, @@ -302,7 +295,7 @@ # :py:class:`~pynwb.ecephys.SpikeEventSeries` objects. # # The results of spike sorting (or clustering) should be stored in the top-level :py:class:`~pynwb.misc.Units` table. -# Note that it is not required to store spike waveforms in order to store spike events or waveforms--if you only +# Note that it is not required to store spike waveforms in order to store spike events or mean waveforms--if you only # want to store the spike times of clustered units you can use only the Units table. # # For local field potential data, there are two options. Again, which one you choose depends on what data you @@ -335,14 +328,14 @@ # Reading electrophysiology data # ------------------------------ # -# We can access the raw data by indexing :py:class:`~pynwb.file.NWBFile.acquisition` +# Access the raw data by indexing :py:class:`~pynwb.file.NWBFile.acquisition` # with the name of the :py:class:`~pynwb.ecephys.ElectricalSeries`, which we named ``"ElectricalSeries"``. # We can also access the LFP data by indexing :py:class:`~pynwb.file.NWBFile.processing` # with the name of the processing module ``"ecephys"``. -# Then, we can access the :py:class:`~pynwb.ecephys.LFP` object inside of the ``"ecephys"`` processing module +# Then, we can access the :py:class:`~pynwb.ecephys.LFP` object inside the ``"ecephys"`` processing module # by indexing it with the name of the :py:class:`~pynwb.ecephys.LFP` object. # The default name of :py:class:`~pynwb.ecephys.LFP` objects is ``"LFP"``. -# Finally, we can access the :py:class:`~pynwb.ecephys.ElectricalSeries` object inside of the +# Finally, we can access the :py:class:`~pynwb.ecephys.ElectricalSeries` object inside the # :py:class:`~pynwb.ecephys.LFP` object by indexing it with the name of the # :py:class:`~pynwb.ecephys.ElectricalSeries` object, which we named ``"ElectricalSeries"``. @@ -355,13 +348,12 @@ #################### # Accessing your data -# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +# ^^^^^^^^^^^^^^^^^^^ # -# Data arrays are read passively from the file. -# Calling the data attribute on a :py:class:`~pynwb.base.pynwb.TimeSeries` -# such as a :py:class:`~pynwb.ecephys.ElectricalSeries` does not read the data -# values, but presents an :py:class:`~h5py` object that can be indexed to read data. -# You can use the ``[:]`` operator to read the entire data array into memory. +# Data arrays are read passively from the file. Calling the data attribute on a :py:class:`~pynwb.base.TimeSeries` +# such as a :py:class:`~pynwb.ecephys.ElectricalSeries` does not read the data values, but presents an +# :py:class:`h5py.Dataset` object that can be indexed to read data. You can use the ``[:]`` operator to read the entire +# data array into memory. # # Load and print all the data values of the :py:class:`~pynwb.ecephys.ElectricalSeries` # object representing the LFP data. @@ -372,14 +364,15 @@ #################### # Accessing data regions -# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +# ^^^^^^^^^^^^^^^^^^^^^^ # # It is often preferable to read only a portion of the data. To do this, index -# or slice into the ``data`` attribute just like if you were indexing or slicing a -# :py:class:`~numpy` array. +# or slice into the ``data`` attribute just like if you index or slice a +# :py:class:`numpy.ndarray`. # # The following code prints elements ``0:10`` in the first dimension (time) # and ``0:3`` in the second dimension (electrodes) from the LFP data we have written. +# It also demonstrates how to access the spike times of the 0th unit. with NWBHDF5IO("ecephys_tutorial.nwb", "r") as io: diff --git a/docs/gallery/domain/ogen.py b/docs/gallery/domain/ogen.py index b6d3e94c7..1d4152980 100644 --- a/docs/gallery/domain/ogen.py +++ b/docs/gallery/domain/ogen.py @@ -12,6 +12,7 @@ When creating a NWB file, the first step is to create the :py:class:`~pynwb.file.NWBFile` object. """ +# sphinx_gallery_thumbnail_path = 'figures/gallery_thumbnails_ogen.png' from datetime import datetime from uuid import uuid4 diff --git a/docs/gallery/domain/ophys.py b/docs/gallery/domain/ophys.py index 9ea9354fd..b8ddb1ae5 100644 --- a/docs/gallery/domain/ophys.py +++ b/docs/gallery/domain/ophys.py @@ -12,8 +12,11 @@ 4. Add image segmentation 5. Add fluorescence and dF/F responses -This tutorial assumes that transforming data between these states is done by users--PyNWB does not provide -analysis functionality. It is recommended to cover :ref:`basics` before this tutorial. +It is recommended to cover :ref:`basics` before this tutorial. + +.. note:: It is recommended to check if your source data is supported by + `NeuroConv Optical Physiology Gallery `_. + If it is supported, it is recommended to use NeuroConv to convert your data. The following examples will reference variables that may not be defined within the block they are used in. For clarity, we define them here: @@ -41,10 +44,10 @@ ) #################### -# Creating and Writing NWB files -# ------------------------------ +# Creating the NWB file +# --------------------- # -# When creating a NWB file, the first step is to create the :py:class:`~pynwb.file.NWBFile` object. +# When creating an NWB file, the first step is to create the :py:class:`~pynwb.file.NWBFile` object. nwbfile = NWBFile( session_description="my first synthetic recording", @@ -82,9 +85,7 @@ # :align: center # # Create a :py:class:`~pynwb.device.Device` named ``"Microscope"`` in the :py:class:`~pynwb.file.NWBFile` object. Then -# create an :py:class:`~pynwb.ophys.OpticalChannel` named ``"OpticalChannel"`` and an -# :py:class:`~pynwb.ophys.ImagingPlane` named ``"ImagingPlane"``, passing in the :py:class:`~pynwb.ophys.OpticalChannel` -# object and the :py:class:`~pynwb.device.Device` object. +# create an :py:class:`~pynwb.ophys.OpticalChannel` named ``"OpticalChannel"``. device = nwbfile.create_device( @@ -97,6 +98,11 @@ description="an optical channel", emission_lambda=500.0, ) + +#################### +# Now, create a :py:class:`~pynwb.ophys.ImagingPlane` named ``"ImagingPlane"``, passing in the +# :py:class:`~pynwb.ophys.OpticalChannel` object and the :py:class:`~pynwb.device.Device` object. + imaging_plane = nwbfile.create_imaging_plane( name="ImagingPlane", optical_channel=optical_channel, @@ -111,15 +117,16 @@ origin_coords=[1.0, 2.0, 3.0], origin_coords_unit="meters", ) + #################### # One-photon Series # ----------------- # Now that we have our :py:class:`~pynwb.ophys.ImagingPlane`, we can create a # :py:class:`~pynwb.ophys.OnePhotonSeries` object to store raw one-photon imaging data. # Here, we have two options. The first option is to supply the raw image data to PyNWB, -# using the data argument. The other option is to provide a path to the image files. -# These two options have trade-offs, so it is worth spending time considering how you -# want to store this data. +# using the data argument. The second option is to provide a path to the image files. +# These two options have trade-offs, so it is worth considering how you want to store +# this data. # using internal data. this data will be stored inside the NWB file one_p_series1 = OnePhotonSeries( @@ -143,7 +150,7 @@ ) #################### -# Since these one-photon data are raw, acquired data, we will add the +# Since these one-photon data are acquired data, we will add the # :py:class:`~pynwb.ophys.OnePhotonSeries` objects to the :py:class:`~pynwb.file.NWBFile` # as acquired data. @@ -153,9 +160,8 @@ #################### # Two-photon Series # ----------------- -# Another option is to create a :py:class:`~pynwb.ophys.TwoPhotonSeries` object to store -# our raw two-photon imaging data. This class behaves similarly to -# :py:class:`~pynwb.ophys.OnePhotonSeries`. +# :py:class:`~pynwb.ophys.TwoPhotonSeries` objects store acquired two-photon imaging +# data. This class behaves similarly to :py:class:`~pynwb.ophys.OnePhotonSeries`. # # .. only:: html # @@ -200,12 +206,10 @@ # Motion Correction (optional) # ---------------------------- # -# You can also store the result of motion correction. -# These should be stored in a :py:class:`~pynwb.ophys.MotionCorrection` object, +# You can also store the result of motion correction using a :py:class:`~pynwb.ophys.MotionCorrection` object, # which is a :py:class:`~pynwb.core.MultiContainerInterface` (similar to :py:class:`~pynwb.behavior.Position`) # which holds 1 or more :py:class:`~pynwb.ophys.CorrectedImageStack` objects. - corrected = ImageSeries( name="corrected", # this must be named "corrected" data=np.ones((1000, 100, 100)), @@ -305,7 +309,7 @@ #################### # Regions Of Interest (ROIs) -# --------------------------------- +# -------------------------- # # Image masks # ^^^^^^^^^^^ @@ -323,7 +327,7 @@ # randomly generate example image masks x = np.random.randint(0, 95) y = np.random.randint(0, 95) - image_mask[x : x + 5, y : y + 5] = 1 + image_mask[x:x + 5, y:y + 5] = 1 # add image mask to plane segmentation ps.add_roi(image_mask=image_mask) @@ -338,10 +342,7 @@ # Alternatively, you could define ROIs using a pixel mask, which is an array of # triplets (x, y, weight) that have a non-zero weight. All undefined pixels are assumed # to be 0. -# -# .. note:: -# You need to be consistent within a :py:class:`~pynwb.ophys.PlaneSegmentation` table. -# You can add ROIs either using image masks, pixel masks, or voxel masks. + ps2 = img_seg.create_plane_segmentation( name="PlaneSegmentation2", @@ -368,7 +369,7 @@ # Voxel masks # ^^^^^^^^^^^ # -# When storing the segmentation of volumetric imaging, you can use imaging masks. +# When storing the segmentation of volumetric imaging, you can use 3D imaging masks. # Alternatively, you could define ROIs using a voxel mask, which is an array of # triplets (x, y, z, weight) that have a non-zero weight. All undefined voxels are # assumed to be 0. @@ -394,7 +395,11 @@ # define an example 4 x 3 x 2 voxel region of weight '0.5' voxel_mask = [] - for ix, iy, iz in product(range(x, x + 4), range(y, y + 3), range(z, z + 2)): + for ix, iy, iz in product( + range(x, x + 4), + range(y, y + 3), + range(z, z + 2) + ): voxel_mask.append((ix, iy, iz, 0.5)) # add voxel mask to plane segmentation @@ -413,7 +418,7 @@ # # Now that the regions of interest are stored, you can store fluorescence data for these # ROIs. This type of data is stored using the :py:class:`~pynwb.ophys.RoiResponseSeries` -# and :py:class:`~pynwb.ophys.Fluorescence` classes. +# classes. # # .. only:: html # diff --git a/docs/gallery/domain/plot_behavior.py b/docs/gallery/domain/plot_behavior.py index 3436be89e..8f341bea1 100644 --- a/docs/gallery/domain/plot_behavior.py +++ b/docs/gallery/domain/plot_behavior.py @@ -100,7 +100,7 @@ # .. note:: # :py:class:`~pynwb.behavior.SpatialSeries` data should be stored as one continuous stream, # as it is acquired, not by trial as is often reshaped for analysis. -# Data can be trial-aligned on-the-fly using the trials table. See the :ref:`basic_trials` tutorial +# Data can be trial-aligned on-the-fly using the trials table. See the :ref:`time_intervals` tutorial # for further information. # # For position data ``reference_frame`` indicates the zero-position, e.g. diff --git a/docs/gallery/general/file.py b/docs/gallery/general/plot_file.py similarity index 82% rename from docs/gallery/general/file.py rename to docs/gallery/general/plot_file.py index 30841eca7..beead22f6 100644 --- a/docs/gallery/general/file.py +++ b/docs/gallery/general/plot_file.py @@ -16,13 +16,13 @@ Background: Basic concepts -------------------------- -In the `NWB Format `_, each experimental session is typically +In the `NWB Format `_, each experiment session is typically represented by a separate NWB file. NWB files are represented in PyNWB by :py:class:`~pynwb.file.NWBFile` objects which provide functionality for creating and retrieving: * :ref:`timeseries_overview` datasets, i.e., objects for storing time series data * :ref:`modules_overview`, i.e., objects for storing and grouping analyses, and - * experimental metadata and other metadata related to data provenance. + * experiment metadata and other metadata related to data provenance. The following sections describe the :py:class:`~pynwb.base.TimeSeries` and :py:class:`~pynwb.base.ProcessingModules` classes in further detail. @@ -33,7 +33,7 @@ ^^^^^^^^^^ :py:class:`~pynwb.base.TimeSeries` objects store time series data and correspond to the *TimeSeries* specifications -provided by the `NWB Format`_ . Like the NWB specification, :py:class:`~pynwb.base.TimeSeries` Python objects +provided by the `NWB Format`_. Like the NWB specification, :py:class:`~pynwb.base.TimeSeries` Python objects follow an object-oriented inheritance pattern, i.e., the class :py:class:`~pynwb.base.TimeSeries` serves as the base class for all other :py:class:`~pynwb.base.TimeSeries` types, such as, :py:class:`~pynwb.ecephys.ElectricalSeries`, which itself may have further subtypes, e.g., @@ -119,12 +119,23 @@ In addition to :py:class:`~pynwb.core.NWBContainer`, which functions as a common base type for Group objects, :py:class:`~pynwb.core.NWBData` provides a common base for the specification of datasets in the NWB format. +NWB organizes data into different groups depending on the type of data. Groups can be thought of +as folders within the file. Here are some of the groups within an :py:class:`~pynwb.file.NWBFile` and the types of +data they are intended to store: + +* **acquisition**: raw, acquired data that should never change +* **processing**: processed data, typically the results of preprocessing algorithms and could change +* **analysis**: results of data analysis +* **stimuli**: stimuli used in the experiment (e.g., images, videos, light pulses) + The following examples will reference variables that may not be defined within the block they are used in. For clarity, we define them here: + """ -from datetime import datetime # sphinx_gallery_thumbnail_path = 'figures/gallery_thumbnails_file.png' + +from datetime import datetime from uuid import uuid4 import numpy as np @@ -148,7 +159,8 @@ # occurred exactly at the session start time. # # Create an :py:class:`~pynwb.file.NWBFile` object with the required fields -# (``session_description``, ``identifier``, ``session_start_time``) and additional metadata. +# (:py:attr:`~pynwb.file.NWBFile.session_description`, :py:attr:`~pynwb.file.NWBFile.identifier`, +# :py:attr:`~pynwb.file.NWBFile.session_start_time`) and additional metadata. # # .. note:: # Use keyword arguments when constructing :py:class:`~pynwb.file.NWBFile` objects. @@ -169,7 +181,7 @@ experiment_description="I went on an adventure to reclaim vast treasures.", # optional related_publications="DOI:10.1016/j.neuron.2016.12.011", # optional ) -print(nwbfile) +nwbfile #################### # .. note:: @@ -184,7 +196,7 @@ # Subject Information # ------------------- # -# In the :py:class:`~pynwb.file.Subject` object we can store information about the experimental subject, +# In the :py:class:`~pynwb.file.Subject` object we can store information about the experiment subject, # such as ``age``, ``species``, ``genotype``, ``sex``, and a ``description``. # # .. only:: html @@ -204,14 +216,15 @@ # The fields in the :py:class:`~pynwb.file.Subject` object are all free-form text (any format will be valid), # however it is recommended to follow particular conventions to help software tools interpret the data: # -# * **age**: `ISO 8601 Duration format `_, e.g., ``"P90D"`` for 90 days old -# * **species**: The formal latin binomial nomenclature, e.g., ``"Mus musculus"``, ``"Homo sapiens"`` +# * **age**: `ISO 8601 Duration format `_, e.g., ``"P90D"`` for 90 +# days old +# * **species**: The formal Latin binomial nomenclature, e.g., ``"Mus musculus"``, ``"Homo sapiens"`` # * **sex**: Single letter abbreviation, e.g., ``"F"`` (female), ``"M"`` (male), ``"U"`` (unknown), and ``"O"`` (other) # # Add the subject information to the :py:class:`~pynwb.file.NWBFile` # by setting the ``subject`` field to the new :py:class:`~pynwb.file.Subject` object. -nwbfile.subject = Subject( +subject = Subject( subject_id="001", age="P90D", description="mouse 5", @@ -219,6 +232,9 @@ sex="M", ) +nwbfile.subject = subject +subject + #################### # .. _basic_timeseries: # @@ -246,6 +262,7 @@ starting_time=0.0, rate=1.0, ) +time_series_with_rate #################### # For irregularly sampled recordings, we need to provide the ``timestamps`` for the ``data``: @@ -257,13 +274,14 @@ unit="m", timestamps=timestamps, ) +time_series_with_timestamps #################### # :py:class:`~pynwb.base.TimeSeries` objects can be added directly to :py:class:`~pynwb.file.NWBFile` using: # -# * :py:meth:`~pynwb.file.NWBFile.add_acquisition` to add *acquisition* data (raw, acquired data that should never change), -# * :py:meth:`~pynwb.file.NWBFile.add_stimulus` to add *stimulus* data, or -# * :py:meth:`~pynwb.file.NWBFile.add_stimulus_template` to store *stimulus templates*. +# * :py:meth:`.NWBFile.add_acquisition` to add *acquisition* data (raw, acquired data that should never change), +# * :py:meth:`.NWBFile.add_stimulus` to add *stimulus* data, or +# * :py:meth:`.NWBFile.add_stimulus_template` to store *stimulus templates*. # nwbfile.add_acquisition(time_series_with_timestamps) @@ -275,7 +293,7 @@ nwbfile.acquisition["test_timeseries"] #################### -# or using the :py:meth:`~pynwb.file.NWBFile.get_acquisition` method: +# or using the method :py:meth:`.NWBFile.get_acquisition`: nwbfile.get_acquisition("test_timeseries") @@ -307,7 +325,7 @@ # create fake data with shape (50, 2) # the first dimension should always represent time position_data = np.array([np.linspace(0, 10, 50), np.linspace(0, 8, 50)]).T -position_timestamps = np.linspace(0, 50) / 200 +position_timestamps = np.linspace(0, 50).astype(float) / 200 spatial_series_obj = SpatialSeries( name="SpatialSeries", @@ -316,12 +334,13 @@ timestamps=position_timestamps, reference_frame="(0,0) is bottom left corner", ) -print(spatial_series_obj) +spatial_series_obj #################### # To help data analysis and visualization tools know that this :py:class:`~pynwb.behavior.SpatialSeries` object # represents the position of the subject, store the :py:class:`~pynwb.behavior.SpatialSeries` object inside -# of a :py:class:`~pynwb.behavior.Position` object, which can hold one or more :py:class:`~pynwb.behavior.SpatialSeries` objects. +# of a :py:class:`~pynwb.behavior.Position` object, which can hold one or more :py:class:`~pynwb.behavior.SpatialSeries` +# objects. # # .. only:: html # @@ -341,6 +360,7 @@ # name is set to "Position" by default position_obj = Position(spatial_series=spatial_series_obj) +position_obj #################### # Behavior Processing Module @@ -358,14 +378,15 @@ # so it would be classified as processed data. # # Create a processing module called ``"behavior"`` for storing behavioral data in the :py:class:`~pynwb.file.NWBFile` -# and add the :py:class:`~pynwb.behavior.Position` object to the processing module using the -# :py:meth:`~pynwb.file.NWBFile.create_processing_module` method: +# and add the :py:class:`~pynwb.behavior.Position` object to the processing module using the method +# :py:meth:`.NWBFile.create_processing_module`: behavior_module = nwbfile.create_processing_module( name="behavior", description="processed behavioral data" ) behavior_module.add(position_obj) +behavior_module #################### # @@ -386,7 +407,7 @@ # Once the behavior processing module is added to the :py:class:`~pynwb.file.NWBFile`, # you can access it with: -print(nwbfile.processing["behavior"]) +nwbfile.processing["behavior"] #################### # .. _basic_writing: @@ -432,12 +453,11 @@ #################### # It is often preferable to read only a portion of the data. -# To do this, index or slice into the ``data`` attribute just like if you were -# indexing or slicing a numpy array. +# To do this, index or slice into the ``data`` attribute just like you +# index or slice a numpy array. with NWBHDF5IO("basics_tutorial.nwb", "r") as io: read_nwbfile = io.read() - print(read_nwbfile.acquisition["test_timeseries"]) print(read_nwbfile.acquisition["test_timeseries"].data[:2]) #################### @@ -453,7 +473,7 @@ # # We can also access the :py:class:`~pynwb.behavior.SpatialSeries` data by referencing the names # of the objects in the hierarchy that contain it. We can access a processing module by indexing -# ``"nwbfile.processing"`` with the name of the processing module, ``"behavior"``. +# ``nwbfile.processing`` with the name of the processing module, ``"behavior"``. # # Then, we can access the :py:class:`~pynwb.behavior.Position` object inside of the ``"behavior"`` # processing module by indexing it with the name of the :py:class:`~pynwb.behavior.Position` object, @@ -479,6 +499,7 @@ # data with the single timestamps instance. PyNWB enables this by letting you reuse timestamps across # :py:class:`~pynwb.base.TimeSeries` objects. To reuse a :py:class:`~pynwb.base.TimeSeries` timestamps in a new # :py:class:`~pynwb.base.TimeSeries`, pass the existing :py:class:`~pynwb.base.TimeSeries` as the new +# :py:class:`~pynwb.base.TimeSeries`, pass the existing :py:class:`~pynwb.base.TimeSeries` as the new # :py:class:`~pynwb.base.TimeSeries` timestamps: data = list(range(101, 201, 10)) @@ -503,12 +524,12 @@ # Trials # ^^^^^^ # -# Trials are stored in :py:class:`pynwb.epoch.TimeIntervals` object which is -# a subclass of :py:class:`pynwb.core.DynamicTable`. -# :py:class:`pynwb.core.DynamicTable` objects are used to store tabular metadata -# throughout NWB, including trials, electrodes and sorted units. They offer -# flexibility for tabular data by allowing required columns, optional columns, -# and custom columns which are not defined in the standard. +# Trials are stored in :py:class:`~pynwb.epoch.TimeIntervals`, which is +# a subclass of :py:class:`~hdmf.common.table.DynamicTable`. +# :py:class:`~hdmf.common.table.DynamicTable` is used to store +# tabular metadata throughout NWB, including trials, electrodes and sorted units. This +# class offers flexibility for tabular data by allowing required columns, optional +# columns, and custom columns which are not defined in the standard. # # .. only:: html # @@ -524,7 +545,7 @@ # :alt: trials UML diagram # :align: center # -# The ``trials`` :py:class:`pynwb.core.DynamicTable` can be thought of +# The ``trials`` :py:class:`~pynwb.epoch.TimeIntervals` class can be thought of # as a table with this structure: # # .. image:: ../../_static/trials_example.png @@ -532,18 +553,12 @@ # :alt: trials table example # :align: center # -# Trials can be added to the :py:class:`~pynwb.file.NWBFile` using the -# methods :py:meth:`~pynwb.file.NWBFile.add_trial_column` and :py:meth:`~pynwb.file.NWBFile.add_trial` -# We can add custom, user-defined columns to the trials table to hold data -# and metadata specific to this experiment or session. -# By default, :py:class:`~pynwb.file.NWBFile` only requires the ``start_time`` -# and ``end_time`` of the trial. Additional columns can be added using -# the :py:meth:`~pynwb.file.NWBFile.add_trial_column` method. -# -# Continue adding to our :py:class:`~pynwb.file.NWBFile` by creating a new -# column for the trials table named ``'correct'``, which will be a boolean array. -# Once all columns have been added, trial data can be populated using -# :py:meth:`~pynwb.file.NWBFile.add_trial`. +# By default, :py:class:`~pynwb.epoch.TimeIntervals` objects only require ``start_time`` +# and ``stop_time`` of each trial. Additional columns can be added using +# the method :py:meth:`.NWBFile.add_trial_column`. When all the desired custom columns +# have been defined, use the :py:meth:`.NWBFile.add_trial` method to add each row. +# In this case, we will add one custom column to the trials table named "correct" +# which will take a boolean array, then add two trials as rows of the table. nwbfile.add_trial_column( name="correct", @@ -553,9 +568,10 @@ nwbfile.add_trial(start_time=6.0, stop_time=10.0, correct=False) #################### -# Tabular data such as trials can be converted to a :py:class:`~pandas.DataFrame`. +# :py:class:`~hdmf.common.table.DynamicTable` and its subclasses can be converted to a pandas +# :py:class:`~pandas.DataFrame` for convenient analysis using :py:meth:`.DynamicTable.to_dataframe`. -print(nwbfile.trials.to_dataframe()) +nwbfile.trials.to_dataframe() #################### # .. _basic_epochs: @@ -563,8 +579,8 @@ # Epochs # ^^^^^^ # -# Epochs can be added to an NWB file using the method :py:meth:`~pynwb.file.NWBFile.add_epoch`. -# The first and second arguments are the start time and stop times, respectively. +# Like trials, epochs can be added to an NWB file using the methods +# :py:meth:`.NWBFile.add_epoch_column` and :py:meth:`.NWBFile.add_epoch`. # The third argument is one or more tags for labeling the epoch, and the fourth argument is a # list of all the :py:class:`~pynwb.base.TimeSeries` that the epoch applies # to. @@ -583,12 +599,13 @@ timeseries=[time_series_with_timestamps], ) +nwbfile.epochs.to_dataframe() + #################### # Other time intervals # ^^^^^^^^^^^^^^^^^^^^ -# Both ``epochs`` and ``trials`` are of of data type :py:class:`~pynwb.epoch.TimeIntervals`, which is a type of -# ``DynamicTable`` for storing information about time intervals. ``"epochs"`` and ``"trials"`` -# are the two default names for :py:class:`~pynwb.base.TimeIntervals` objects, but you can also add your own +# These :py:class:`~pynwb.epoch.TimeIntervals` objects are stored in ``NWBFile.intervals``. In addition to the default +# ``epochs`` and ``trials``, you can also add your own with custom names. sleep_stages = TimeIntervals( name="sleep_stages", @@ -604,6 +621,8 @@ nwbfile.add_time_intervals(sleep_stages) +sleep_stages.to_dataframe() + #################### # Now we overwrite the file with all of the data @@ -616,35 +635,21 @@ # Appending to an NWB file # ------------------------ # -# Using functionality discussed above, NWB allows appending to files. To append to a file, you must read the file, add -# new components, and then write the file. Reading and writing is carried out using :py:class:`~pynwb.NWBHDF5IO`. -# When reading the NWBFile, you must specify that you intend to modify it by setting the *mode* argument in the -# :py:class:`~pynwb.NWBHDF5IO` constructor to ``'a'``. After you have read the file, you can add [#]_ new data to it -# using the standard write/add functionality demonstrated above. -# -# Let's see how this works by adding another :py:class:`~pynwb.base.TimeSeries` to the BehavioralTimeSeries interface -# we created above. -# -# First, read the file and get the interface object. +# To append to a file, read it with :py:class:`~pynwb.NWBHDF5IO` and set the ``mode`` argument to ``'a'``. +# After you have read the file, you can add [#]_ new data to it using the standard write/add functionality demonstrated +# above. Let's see how this works by adding another :py:class:`~pynwb.base.TimeSeries` to acquisition. + io = NWBHDF5IO("basics_tutorial.nwb", mode="a") nwbfile = io.read() -position = nwbfile.processing["behavior"].data_interfaces["Position"] -#################### -# Next, add a new :py:class:`~pynwb.behavior.SpatialSeries`. - -data = list(range(300, 400, 10)) -timestamps = list(range(10)) - -new_spatial_series = SpatialSeries( - name="SpatialSeriesAppended", - data=data, - timestamps=timestamps, - reference_frame="starting_gate", +new_time_series = TimeSeries( + name="new_time_series", + data=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], + timestamps=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], + unit="n.a.", ) -position.add_spatial_series(new_spatial_series) -print(position) +nwbfile.add_acquisition(new_time_series) #################### # Finally, write the changes back to the file and close it. @@ -657,13 +662,10 @@ # example, the default name for :py:class:`~pynwb.ophys.ImageSegmentation` is "ImageSegmentation" and the default # name for :py:class:`~pynwb.ecephys.EventWaveform` is "EventWaveform". # -# .. [#] HDF5 is currently the only backend supported by NWB. +# .. [#] HDF5 is the primary backend supported by NWB. # # .. [#] Neurodata sets can be *very* large, so individual components of the dataset are only loaded into memory when # you request them. This functionality is only possible if an open file handle is kept around until users want to # load data. # # .. [#] NWB only supports *adding* to files. Removal and modifying of existing data is not allowed. - -#################### -# .. _hck04: https://github.com/NeurodataWithoutBorders/nwb_hackathons/tree/master/HCK04_2018_Seattle diff --git a/docs/gallery/general/read_basics.py b/docs/gallery/general/plot_read_basics.py similarity index 61% rename from docs/gallery/general/read_basics.py rename to docs/gallery/general/plot_read_basics.py index d4d50888a..bba380092 100644 --- a/docs/gallery/general/read_basics.py +++ b/docs/gallery/general/plot_read_basics.py @@ -14,7 +14,7 @@ the data in two different ways: (1) by downloading it to your computer and (2) streaming it. We will briefly show tools for exploring NWB Files interactively and refer the reader to the -:nwb_overview:`NWB Overview ` documentation for more details about the available tools. +:nwb_overview:`NWB Overview ` documentation for more details about the available tools. .. seealso:: @@ -24,28 +24,30 @@ The following examples will reference variables that may not be defined within the block they are used in. For clarity, we define them here: """ -import matplotlib.pyplot as plt # sphinx_gallery_thumbnail_path = 'figures/gallery_thumbnails_read_basics.png' + +import matplotlib.pyplot as plt import numpy as np -from dandi.dandiapi import DandiAPIClient from pynwb import NWBHDF5IO #################### -# Read the data -# ------------- -# We will use the `DANDI `_ neurophysiology data archive -# to access an NWB File. We will use data from one session of an experiment by -# `Chandravadia et al. (2020) `_, where +# We will access NWB data on the `DANDI Archive `_, +# and demonstrate reading one session of an experiment by +# `Chandravadia et al. (2020) `_. In this study, # the authors recorded single neuron activity from the medial temporal lobes of human subjects # while they performed a recognition memory task. # # Download the data -# ^^^^^^^^^^^^^^^^^ +# ----------------- # First, we will demonstrate how to download an NWB data file from `DANDI `_ # to your machine. # +# Download using the DANDI Web UI +# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +# You can download files directly from the DANDI website. +# # 1. Go to the DANDI archive and open `this `_ dataset # 2. List the files in this dataset by clicking the "Files" button in Dandiset Actions (top right column of the page). # @@ -69,62 +71,60 @@ # :alt: selecting a folder on dandi # :align: center # -# Stream the data -# ^^^^^^^^^^^^^^^ -# -# Next, we will demonstrate how to stream the data from the DANDI archive without -# having to download it to your machine. -# Streaming data requires having HDF5 installed with the ROS3 (read-only S3) driver. -# You can install from `conda-forge `_ using ``conda``. -# You might need to first uninstall a currently installed version of ``h5py``. -# -# .. code-block:: bash -# -# $ pip uninstall h5py -# $ conda install -c conda-forge "h5py>=3.2" -# -# We can access the data stored in an S3 bucket using the DANDI API, -# which can be installed from pip: +# Downloading data programmatically +# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +# Alternatively, you can download data using the `dandi` Python module. + +from dandi.download import download + +download("https://api.dandiarchive.org/api/assets/0f57f0b0-f021-42bb-8eaa-56cd482e2a29/download/", ".") + +###################################################### +# .. seealso:: # -# .. code-block:: bash +# Learn about all the different ways you can download data from the DANDI Archive +# `here `_ # -# $ pip install -U dandi +# .. seealso:: Streaming data # -# .. seealso:: -# You can learn more about streaming data in the :ref:`streaming` tutorial. +# Instead of downloading data, another approach is to stream data directly from an archive. Streaming data allows you +# to download only the data you want from a file, so it can be a much better approach when the desired data files +# contain a lot of data you don't care about. There are several approaches to streaming NWB files, outlined in +# :ref:`streaming`. # -# Then, we will use the :py:class:`~dandi.dandiapi.DandiAPIClient` to obtain the S3 URL that points to the NWB File -# stored in S3. We will need the identifier of the dataset (``dandiset_id``) and the path -# to the NWB File. -# We can read these from the DANDI archive URL where ``dandiset_id`` is "000004" and -# file is located in "sub-P11HMH" folder. - - -dandiset_id = "000004" -filepath = "sub-P11HMH/sub-P11HMH_ses-20061101_ecephys+image.nwb" -with DandiAPIClient() as client: - asset = client.get_dandiset(dandiset_id, "draft").get_asset_by_path(filepath) - s3_path = asset.get_content_url(follow_redirects=1, strip_query=True) - -#################### -# Using NWBHDF5IO -# --------------- +# Opening an NWB file with NWBHDF5IO +# ---------------------------------- # # Reading and writing NWB data is carried out using the :py:class:`~pynwb.NWBHDF5IO` class. # :py:class:`~pynwb.NWBHDF5IO` reads NWB data that is in the `HDF5 `_ # storage format, a popular, hierarchical format for storing large-scale scientific data. # -# The first argument to the constructor of :py:class:`~pynwb.NWBHDF5IO` is the ``file_path`` - -# this can be the path that points to the downloaded file on your computer or -# it can be an S3 URL. -# -# Use the ``read`` method to read the data into a :py:class:`~pynwb.file.NWBFile` object. +# The first argument to the constructor of :py:class:`~pynwb.NWBHDF5IO` is the ``file_path``. Use the ``read`` method to +# read the data into a :py:class:`~pynwb.file.NWBFile` object. -# Open the file in read mode "r", and specify the driver as "ros3" for S3 files -io = NWBHDF5IO(s3_path, mode="r", driver="ros3") +filepath = "sub-P11HMH_ses-20061101_ecephys+image.nwb" +# Open the file in read mode "r", +io = NWBHDF5IO(filepath, mode="r", load_namespaces=True) nwbfile = io.read() +nwbfile -#################### +####################################### +# :py:class:`~pynwb.NWBHDF5IO` can also be used as a context manager: + +with NWBHDF5IO(filepath, mode="r", load_namespaces=True) as io2: + nwbfile2 = io2.read() + + # data accessible here + +# data not accessible here + +###################################### +# The advantage of using a context manager is that the file is closed automatically when the context finishes +# successfully or if there is an error. Be aware that if you use this method, closing the context (unindenting the code) +# will automatically close the :py:class:`~pynwb.NWBHDF5IO` object and the corresponding h5py File object. The data not +# already read from the NWB file will then be inaccessible, so any code that reads data must be placed within the +# context. +# # Access stimulus data # -------------------- # @@ -135,39 +135,18 @@ #################### # ``NWBFile.stimulus`` is a dictionary that can contain PyNWB objects representing -# different types of data; such as images (grayscale, RGB) or time series of images. +# different types of data, such as images (grayscale, RGB) or time series of images. # In this file, ``NWBFile.stimulus`` contains a single key "StimulusPresentation" with an # :py:class:`~pynwb.image.OpticalSeries` object representing what images were shown to the subject and at what times. nwbfile.stimulus["StimulusPresentation"] #################### -# .. code-block:: none -# -# {'StimulusPresentation': StimulusPresentation pynwb.image.OpticalSeries at 0x140385583638560 -# Fields: -# comments: no comments -# conversion: 1.0 -# data: -# description: no description -# dimension: -# distance: 0.7 -# field_of_view: -# format: raw -# interval: 1 -# orientation: lower left -# resolution: -1.0 -# timestamps: -# timestamps_unit: seconds -# unit: pixels -# } -# -# # Lazy loading of datasets # ------------------------ # Data arrays are read passively from the NWB file. # Accessing the ``data`` attribute of the :py:class:`~pynwb.image.OpticalSeries` object -# does not read the data values, but presents an HDF5 object that can be indexed to read data. +# does not read the data values, but presents an :py:class:`h5py.Dataset` object that can be indexed to read data. # You can use the ``[:]`` operator to read the entire data array into memory. stimulus_presentation = nwbfile.stimulus["StimulusPresentation"] @@ -181,10 +160,6 @@ stimulus_presentation.data.shape #################### -# .. code-block:: none -# -# (200, 400, 300, 3) -# # This :py:class:`~pynwb.image.OpticalSeries` data contains 200 images of size 400x300 pixels with three channels # (red, green, and blue). # @@ -201,13 +176,6 @@ plt.imshow(image, aspect="auto") #################### -# -# .. image:: ../../_static/demo_nwbfile_stimulus_plot_1.png -# :width: 500 -# :alt: NWBFile stimulus image -# :align: center -# -# # Access single unit data # ----------------------- # Data and metadata about sorted single units are stored in :py:class:`~pynwb.misc.Units` @@ -224,7 +192,7 @@ # We can view the single unit data as a :py:class:`~pandas.DataFrame`. units_df = units.to_dataframe() -units_df +units_df.head() #################### # To access the spike times of the first single unit, index :py:class:`~pynwb.file.NWBFile.units` with the column @@ -234,12 +202,6 @@ units["spike_times"][0] #################### -# .. code-block:: none -# -# array([5932.811644, 6081.077044, 6091.982364, 6093.127644, 6095.068204, -# 6097.438244, 6116.694804, 6129.827604, 6134.825004, 6142.583924, ...]) -# -# # Visualize spiking activity relative to stimulus onset # ----------------------------------------------------- # We can look at when these single units spike relative to when image stimuli were presented to the subject. @@ -276,12 +238,6 @@ axs[1].axvline(0, color=[0.5, 0.5, 0.5]) #################### -# -# .. image:: ../../_static/demo_nwbfile_units_plot.png -# :width: 500 -# :alt: NWBFile units visualization -# :align: center -# # Access Trials # ------------- # Trials are stored as :py:class:`~pynwb.epoch.TimeIntervals` object which is a subclass @@ -290,16 +246,16 @@ # and additional metadata. # # .. seealso:: -# You can learn more about trials in the :ref:`basic_trials` tutorial section. +# You can learn more about trials in the :ref:`time_intervals` tutorial. # # Similarly to :py:class:`~pynwb.misc.Units`, we can view trials as a :py:class:`pandas.DataFrame`. trials_df = nwbfile.trials.to_dataframe() -trials_df +trials_df.head() #################### -# The :py:class:`~pynwb.file.NWBFile.stimulus` can be mapped one-to-one to each row (trial) -# of :py:class:`~pynwb.file.NWBFile.trials` based on the ``stim_on_time`` column. +# The stimulus can be mapped one-to-one to each row (trial) of +# :py:class:`~pynwb.file.NWBFile.trials` based on the ``stim_on_time`` column. assert np.all(stimulus_presentation.timestamps[:] == trials_df.stim_on_time[:]) @@ -309,7 +265,7 @@ stim_on_times_landscapes = trials_df[ trials_df.category_name == "landscapes" ].stim_on_time -for time in stim_on_times_landscapes[:3]: +for time in stim_on_times_landscapes.iloc[:3]: img = np.squeeze( stimulus_presentation.data[ np.where(stimulus_presentation.timestamps[:] == time) @@ -321,59 +277,9 @@ plt.imshow(img, aspect="auto") #################### -# -# .. image:: ../../_static/demo_nwbfile_stimulus_plot_2.png -# :width: 500 -# :alt: NWBFile landscapes stimuli image -# :align: center -# # Exploring the NWB file # ---------------------- # So far we have explored the NWB file by printing the :py:class:`~pynwb.file.NWBFile` # object and accessing its attributes, but it may be useful to explore the data in a -# more interactive, visual way. -# -# You can use `NWBWidgets `_, -# a package containing interactive widgets for visualizing NWB data, -# or you can use the `HDFView `_ -# tool, which can open any generic HDF5 file, which an NWB file is. -# -# NWBWidgets -# ^^^^^^^^^^ -# Install NWBWidgets using pip install: -# -# .. code-block:: bash -# -# $ pip install -U nwbwidgets -# -# Then import the ``nwbwidgets`` package and run the ``nwb2widget()`` function on -# the :py:class:`~pynwb.file.NWBFile` object. - -##################### -# -# .. code-block:: python -# -# from nwbwidgets import nwb2widget -# -# nwb2widget(nwbfile) -# - -#################### -# -# .. image:: ../../_static/demo_nwbwidgets.png -# :width: 700 -# :alt: inspect nwb file with nwbwidgets -# :align: center -# -# -# HDFView -# ^^^^^^^ -# To use `HDFView `_ to inspect and explore the NWB file, -# download and install HDFView from `here `_ -# and then open the NWB file using the application. -# -# .. image:: ../../_static/demo_hdfview.png -# :width: 700 -# :alt: inspect nwb file with hdfview -# :align: center -# +# more interactive, visual way. See :ref:`analysistools-explore` for an updated list of programs for +# exploring NWB files. diff --git a/docs/gallery/general/plot_timeintervals.py b/docs/gallery/general/plot_timeintervals.py index 910e6d2dd..a04a400c5 100644 --- a/docs/gallery/general/plot_timeintervals.py +++ b/docs/gallery/general/plot_timeintervals.py @@ -78,7 +78,7 @@ # tables for :py:meth:`~pynwb.file.NWBFile.epochs`, :py:meth:`~pynwb.file.NWBFile.trials`, and # :py:meth:`~pynwb.file.NWBFile.invalid_times`. # -# .. _basic_trials: +# .. _trials: # # Trials # ^^^^^^ diff --git a/docs/gallery/general/scratch.py b/docs/gallery/general/scratch.py index 50d97339a..0e00c5e96 100644 --- a/docs/gallery/general/scratch.py +++ b/docs/gallery/general/scratch.py @@ -12,7 +12,7 @@ .. note:: The scratch space is explicitly for non-standardized data that is not intended for reuse - by others. Standard NWB:N types, and extension if required, should always be used for any data that you + by others. Standard NWB types, and extension if required, should always be used for any data that you intend to share. As such, published data should not include scratch data and a user should be able to ignore any data stored in scratch to use a file. @@ -127,7 +127,7 @@ # # You may end up wanting to store results from some one-off analysis, and writing an extension # to get your data into an NWBFile is too much over head. This is facilitated by the scratch space -# in NWB:N. [#]_ +# in NWB. [#]_ # # First, lets read our processed data and then make a copy diff --git a/docs/notebooks/convert-crcns-ret-1-meisterlab-compare-nwb-1.0.6.ipynb b/docs/notebooks/convert-crcns-ret-1-meisterlab-compare-nwb-1.0.6.ipynb index c348c4bd0..65dc34188 100644 --- a/docs/notebooks/convert-crcns-ret-1-meisterlab-compare-nwb-1.0.6.ipynb +++ b/docs/notebooks/convert-crcns-ret-1-meisterlab-compare-nwb-1.0.6.ipynb @@ -39,7 +39,7 @@ "source": [ "This notebook uses the convert script and API for NWB v.1.0.6 (not the current NWB 2.0 and PyNWB) to generate NWB v1.0.6 data files and compare with the current format. This notebook is mainly for comparison purposes. The corresponding notebook for converting the MeisterLab example data to NWB 2.x is available here: https://github.com/NeurodataWithoutBorders/pynwb/blob/dev/docs/notebooks/convert-crcns-ret-1-meisterlab.ipynb .\n", "\n", - "This example is based on https://github.com/NeurodataWithoutBorders/api-python/blob/master/examples/create_scripts/crcns_ret-1.py from H5Gate (i.e., the orignal write API for NWB v1.x). A tar file with the example data is available for download from: https://portal.nersc.gov/project/crcns/download/nwb-1/example_script_data/source_data_2.tar.gz Please download and uncompress the data file and update the paths in the *Settings* section if you want to run the notebook. " + "This example is based on https://github.com/NeurodataWithoutBorders/api-python/blob/master/examples/create_scripts/crcns_ret-1.py from H5Gate (i.e., the original write API for NWB v1.x). A tar file with the example data is available for download from: https://portal.nersc.gov/project/crcns/download/nwb-1/example_script_data/source_data_2.tar.gz Please download and uncompress the data file and update the paths in the *Settings* section if you want to run the notebook. " ] }, { @@ -71,7 +71,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# 3 Exectute convert using the original H5Gate API" + "# 3 Execute convert using the original H5Gate API" ] }, { @@ -1259,7 +1259,7 @@ "source": [ "Compared to the convert using NWB v1.0.x shown above, the NWB 2 convert example makes the following main changes:\n", "\n", - "* NWB 2.x uses the extension mechanism to add custom data fields rather than adding unspecified custom data directly to the file, i.e., all objects (datasets, attributes, groups etc.) are governed by a formal specification. E.g., in the original script for NWB 1.0.x, pixle_size, meister_x, meister_y, meister_dx, meister_dy were stored as custom datasets in ImageSeries. For NWB 2 we create an extensions MeisterImageSeries which extens ImageSeries and stores those values as attributes pixel_size, x, y, dx, dy. For NWB 2 we chosse attributes instead of datasets simply because these are small, single int and float metadata values for which attributes are more approbirate.\n", + "* NWB 2.x uses the extension mechanism to add custom data fields rather than adding unspecified custom data directly to the file, i.e., all objects (datasets, attributes, groups etc.) are governed by a formal specification. E.g., in the original script for NWB 1.0.x, pixle_size, meister_x, meister_y, meister_dx, meister_dy were stored as custom datasets in ImageSeries. For NWB 2 we create an extensions MeisterImageSeries which extens ImageSeries and stores those values as attributes pixel_size, x, y, dx, dy. For NWB 2 we chose attributes instead of datasets simply because these are small, single int and float metadata values for which attributes are more approbirate.\n", "* Change si_unit attribute to unit for compliance with the spec of ImageSeries \n", "* Moved 'source' attribute from the Module to the Interface as source is not defined in the spec for modules but only for Interface\n", "* Added missing 'source' for SpikeUnit\n", @@ -1269,7 +1269,7 @@ "* NWBContainer is now a base type of all core neurodata_types and as such `help` and `source` attributes have been added to all core types\n", "* The original script reused iterator variables in nested loops. We have updated those occurrence to avoid consusion and avoid possible errors. \n", "* The following custom metadata fields---i.e., datasets that were originally added to the file without being part of the NWB specification and without creation of corresponding extensions---have not yet been integrated with the NWB files:\n", - " * /general custom metdata: /notes, /random_number_generation, /related_publications. This will require extension of NWBFile to extend the spec of /general. Improvements to make this easier have been proposed for discussion at the upcoming hackathon.\n", + " * /general custom metadata: /notes, /random_number_generation, /related_publications. This will require extension of NWBFile to extend the spec of /general. Improvements to make this easier have been proposed for discussion at the upcoming hackathon.\n", " * SpikeUnit custom datasets with additional copies of the per-stimulus spike times (i.e., /processing/Cells/UnitTimes/cell_*/stim_* in the original version). This will require an extension for SpikeUnit.\n", " * /subject, subject/genotype, subject/species : See Issue https://github.com/NeurodataWithoutBorders/pynwb/issues/45 support for subject metadata is upcoming in PyNWB \n", " * /specifications, /specifications/nwb_core.py : See Issue hssue https://github.com/NeurodataWithoutBorders/pynwb/issues/44 will be added by PyNWB automatically" diff --git a/docs/notebooks/convert-crcns-ret-1-meisterlab.ipynb b/docs/notebooks/convert-crcns-ret-1-meisterlab.ipynb index d66a0644e..0107d42aa 100644 --- a/docs/notebooks/convert-crcns-ret-1-meisterlab.ipynb +++ b/docs/notebooks/convert-crcns-ret-1-meisterlab.ipynb @@ -109,7 +109,7 @@ "This example is based on https://github.com/NeurodataWithoutBorders/api-python/blob/master/examples/create_scripts/crcns_ret-1.py from H5Gate. \n", "\n", "Compared to the NWB files generated by the original example we here use the extension mechanism to add custom data fields rather than adding unspecified custom data directly to the file, i.e., all objects (datasets, attributes, groups etc.) are governed by a formal specification.\n", - "* Previously pixle_size, meister_x, meister_y, meister_dx, meister_dy were stored as custom datasets in ImageSeries. Here we create an extensions MeisterImageSeries which extens ImageSeries and stores that values as attributes pixel_size, x, y, dx, dy. We here chosse attributes instead of datasets simply because these are small, single int and float metadata values for which attributes are more approbirate.\n", + "* Previously pixle_size, meister_x, meister_y, meister_dx, meister_dy were stored as custom datasets in ImageSeries. Here we create an extensions MeisterImageSeries which extens ImageSeries and stores that values as attributes pixel_size, x, y, dx, dy. We here chose attributes instead of datasets simply because these are small, single int and float metadata values for which attributes are more approbirate.\n", "\n", "Compared to the NWB files generated by the original example the files generated here contain the following additional main changes:\n", "\n", @@ -123,7 +123,7 @@ "* NWBContainer is now a base type of all core neurodata_types and as such `help` and `source` attributes have been added to all core types\n", "* The original script reused iterator variables in nested loops. We have updated those occurrence to avoid consusion and avoid possible errors. \n", "* The following custom metadata fields---i.e., datasets that were originally added to the file without being part of the NWB specification and without creation of corresponding extensions---have not yet been integrated with the NWB files:\n", - " * /general custom metdata: /notes, /random_number_generation, /related_publications. This will require extension of NWBFile to extend the spec of /general. Improvements to make this easier have been proposed for discussion at the upcoming hackathon.\n", + " * /general custom metadata: /notes, /random_number_generation, /related_publications. This will require extension of NWBFile to extend the spec of /general. Improvements to make this easier have been proposed for discussion at the upcoming hackathon.\n", " * SpikeUnit custom datasets with additional copies of the per-stimulus spike times (i.e., /processing/Cells/UnitTimes/cell_*/stim_* in the original version). This will require an extension for SpikeUnit.\n", " * /subject, subject/genotype, subject/species : See Issue https://github.com/NeurodataWithoutBorders/pynwb/issues/45 support for subject metadata is upcoming in PyNWB \n", " * /specifications, /specifications/nwb_core.py : See Issue https://github.com/NeurodataWithoutBorders/pynwb/issues/44 will be added by PyNWB automatically\n", @@ -591,7 +591,7 @@ "# Build the namespace\n", "ns_builder = NWBNamespaceBuilder('Extension for use in my Lab', ns_name)\n", "\n", - "# Create a custom ImageSeries to add our custom attributes and add our extenions to the namespace\n", + "# Create a custom ImageSeries to add our custom attributes and add our extensions to the namespace\n", "mis_ext = NWBGroupSpec('A custom ImageSeries to add MeisterLab custom metadata',\n", " attributes=[NWBAttributeSpec('x' , 'int', 'meister x', required=False),\n", " NWBAttributeSpec('y' , 'int', 'meister y', required=False),\n", @@ -697,7 +697,7 @@ "metadata": {}, "source": [ "We can now inspect our container class using the usual mechanisms, e.g., help. For illustration purposes, let's call help on our class. Here we can see that:\n", - "* Our custom attributes have been added to the constructor with approbriate documention describing the type and purpose we indicated in the spec for our attributes\n", + "* Our custom attributes have been added to the constructor with approbriate documentation describing the type and purpose we indicated in the spec for our attributes\n", "* From the \"Method resolution order\" documentationw we can see that our MeisterImageSeries inherits from pynwb.image.ImageSeries so that interaction mechanism from the base class are also available in our class" ] }, @@ -746,7 +746,7 @@ " | bits_per_pixel (int): Number of bit per image pixel\n", " | dimension (Iterable): Number of pixels on x, y, (and z) axes.\n", " | resolution (float): The smallest meaningful difference (in specified unit) between values in data\n", - " | conversion (float): Scalar to multiply each element by to conver to volts\n", + " | conversion (float): Scalar to multiply each element by to convert to volts\n", " | timestamps (ndarray or list or tuple or Dataset or DataChunkIterator or DataIO or TimeSeries): Timestamps for samples stored in data\n", " | starting_time (float): The timestamp of the first sample\n", " | rate (float): Sampling rate in Hz\n", @@ -885,7 +885,7 @@ "def convert_single_file(file_stimulus_data, file_meta, spike_units, electrode_meta):\n", " import h5py\n", " #########################################\n", - " # Create the NWBFile containter\n", + " # Create the NWBFile container\n", " ##########################################\n", " nwbfile = NWBFile(session_description=file_meta['description'],\n", " identifier=file_meta['identifier'],\n", @@ -1004,7 +1004,7 @@ "source": [ "## Step 5.2: Convert all files\n", "\n", - "Convert all the files by iteating over the files and calling `convert_single_file` function for each of the file" + "Convert all the files by iterating over the files and calling `convert_single_file` function for each of the file" ] }, { diff --git a/docs/notebooks/convert-crcns-ret-1-old/convert-crcns-ret-1-meisterlab-with-custom-extensions-and-external-stimulus.ipynb b/docs/notebooks/convert-crcns-ret-1-old/convert-crcns-ret-1-meisterlab-with-custom-extensions-and-external-stimulus.ipynb index 73090873a..4d081f7ab 100644 --- a/docs/notebooks/convert-crcns-ret-1-old/convert-crcns-ret-1-meisterlab-with-custom-extensions-and-external-stimulus.ipynb +++ b/docs/notebooks/convert-crcns-ret-1-old/convert-crcns-ret-1-meisterlab-with-custom-extensions-and-external-stimulus.ipynb @@ -100,7 +100,7 @@ "This example is based on https://github.com/NeurodataWithoutBorders/api-python/blob/master/examples/create_scripts/crcns_ret-1.py from H5Gate. \n", "\n", "Compared to the NWB files generated by the original example we here use the extension mechanism to add custom data fields rather than adding unspecified custom data directly to the file, i.e., all objects (datasets, attributes, groups etc.) are governed by a formal specification.\n", - "* Previously pixle_size, meister_x, meister_y, meister_dx, meister_dy were stored as custom datasets in ImageSeries. Here we create an extensions MeisterImageSeries which extens ImageSeries and stores that values as attributes pixel_size, x, y, dx, dy. We here chosse attributes instead of datasets simply because these are small, single int and float metadata values for which attributes are more approbirate.\n", + "* Previously pixle_size, meister_x, meister_y, meister_dx, meister_dy were stored as custom datasets in ImageSeries. Here we create an extensions MeisterImageSeries which extens ImageSeries and stores that values as attributes pixel_size, x, y, dx, dy. We here chose attributes instead of datasets simply because these are small, single int and float metadata values for which attributes are more approbirate.\n", "\n", "Compared to the NWB files generated by the original example the files generated here contain the following additional main changes:\n", "\n", @@ -113,12 +113,12 @@ "* NWBContainer is now a base type of all core neurodata_types and as such `help` and `source` attributes have been added to all core types\n", "* The original script reused iterator variables in nested loops. We have updated those occurrence to avoid consusion and avoid possible errors. \n", "* The following custom metadata fields---i.e., datasets that were originally added to the file without being part of the NWB specification and without creation of corresponding extensions---have not yet been integrated with the NWB files:\n", - " * /general custom metdata: /notes, /random_number_generation, /related_publications. This will require extension of NWBFile to extend the spec of /general. Improvements to make this easier have been proposed for discussion at the upcoming hackathon.\n", + " * /general custom metadata: /notes, /random_number_generation, /related_publications. This will require extension of NWBFile to extend the spec of /general. Improvements to make this easier have been proposed for discussion at the upcoming hackathon.\n", " * SpikeUnit custom datasets with additional copies of the per-stimulus spike times (i.e., /processing/Cells/UnitTimes/cell_*/stim_* in the original version). This will require an extension for SpikeUnit.\n", " * /subject, subject/genotype, subject/species : See Issue https://bitbucket.org/lblneuro/pynwb/issues/45 support for subject metadata is upcoming in PyNWB \n", " * /specifications, /specifications/nwb_core.py : See Issue https://bitbucket.org/lblneuro/pynwb/issues/44 will be added by PyNWB automatically\n", "\n", - "For readability and to ease comparison, we include in Sectoin 6 the original example scrip from H5Gate. Note, the files generated by the original script are omitting a few required datasets/attributes and as such do not actually validate. " + "For readability and to ease comparison, we include in Sectoin 6 the original example script from H5Gate. Note, the files generated by the original script are omitting a few required datasets/attributes and as such do not actually validate. " ] }, { @@ -570,7 +570,7 @@ "# Build the namespace\n", "ns_builder = NWBNamespaceBuilder('Extension for use in my Lab', ns_name)\n", "\n", - "# Create a custom ImageSeries to add our custom attributes and add our extenions to the namespace\n", + "# Create a custom ImageSeries to add our custom attributes and add our extensions to the namespace\n", "mis_ext = NWBGroupSpec('A custom ImageSeries to add MeisterLab custom metadata',\n", " attributes=[NWBAttributeSpec('x' , 'int', 'meister x', required=False),\n", " NWBAttributeSpec('y' , 'int', 'meister y', required=False),\n", @@ -605,7 +605,7 @@ "metadata": {}, "source": [ "We can now inspect our container class using the usual mechanisms, e.g., help. For illustration purposes, let's call help on our class. Here we can see that:\n", - "* Our custom attributes have been added to the constructor with approbriate documention describing the type and purpose we indicated in the spec for our attributes\n", + "* Our custom attributes have been added to the constructor with approbriate documentation describing the type and purpose we indicated in the spec for our attributes\n", "* From the \"Method resolution order\" documentationw we can see that our MeisterImageSeries inherits from pynwb.image.ImageSeries so that interaction mechanism from the base class are also available in our class" ] }, @@ -652,7 +652,7 @@ " | bits_per_pixel (int): Number of bit per image pixel\n", " | dimension (Iterable): Number of pixels on x, y, (and z) axes.\n", " | resolution (float): The smallest meaningful difference (in specified unit) between values in data\n", - " | conversion (float): Scalar to multiply each element by to conver to volts\n", + " | conversion (float): Scalar to multiply each element by to convert to volts\n", " | timestamps (list or ndarray or TimeSeries): Timestamps for samples stored in data\n", " | starting_time (float): The timestamp of the first sample\n", " | rate (float): Sampling rate in Hz\n", @@ -887,7 +887,7 @@ "def convert_single_file(file_stimulus_data, file_meta, spike_units, electrode_meta):\n", " import h5py\n", " #########################################\n", - " # Create the NWBFile containter\n", + " # Create the NWBFile container\n", " ##########################################\n", " nwbfile = NWBFile(file_name=file_meta['output_filename'],\n", " session_description=file_meta['description'],\n", @@ -1001,7 +1001,7 @@ "source": [ "## Step 5.3: Convert all files\n", "\n", - "Convert all the files by iteating over the files and calling `convert_single_file` function for each of the file" + "Convert all the files by iterating over the files and calling `convert_single_file` function for each of the file" ] }, { diff --git a/docs/notebooks/convert-crcns-ret-1-old/convert-crcns-ret-1-meisterlab-with-custom-extensions.ipynb b/docs/notebooks/convert-crcns-ret-1-old/convert-crcns-ret-1-meisterlab-with-custom-extensions.ipynb index de032a1df..d612c11c9 100644 --- a/docs/notebooks/convert-crcns-ret-1-old/convert-crcns-ret-1-meisterlab-with-custom-extensions.ipynb +++ b/docs/notebooks/convert-crcns-ret-1-old/convert-crcns-ret-1-meisterlab-with-custom-extensions.ipynb @@ -100,7 +100,7 @@ "This example is based on https://github.com/NeurodataWithoutBorders/api-python/blob/master/examples/create_scripts/crcns_ret-1.py from H5Gate. \n", "\n", "Compared to the NWB files generated by the original example we here use the extension mechanism to add custom data fields rather than adding unspecified custom data directly to the file, i.e., all objects (datasets, attributes, groups etc.) are governed by a formal specification.\n", - "* Previously pixle_size, meister_x, meister_y, meister_dx, meister_dy were stored as custom datasets in ImageSeries. Here we create an extensions MeisterImageSeries which extens ImageSeries and stores that values as attributes pixel_size, x, y, dx, dy. We here chosse attributes instead of datasets simply because these are small, single int and float metadata values for which attributes are more approbirate.\n", + "* Previously pixle_size, meister_x, meister_y, meister_dx, meister_dy were stored as custom datasets in ImageSeries. Here we create an extensions MeisterImageSeries which extens ImageSeries and stores that values as attributes pixel_size, x, y, dx, dy. We here chose attributes instead of datasets simply because these are small, single int and float metadata values for which attributes are more approbirate.\n", "\n", "Compared to the NWB files generated by the original example the files generated here contain the following additional main changes:\n", "\n", @@ -110,16 +110,16 @@ "* Added missing tags and description for epochs\n", "* Added /general/devices/... to describe the device\n", "* Added neurodata_type and namespace attributes for format compliance\n", - "* Instead of storing stimulus data in external HDF5 files we here store all data in the same NWB file. Added stimulus data to ImageSeries diretly and added corresponding conversion, resolution, unit, etc. attributes to ensure format compliance\n", - "* /general/extracellular_ephys has been resturctured so that alldata about the probe is now in /general/extracellular_ephys/61-channel_probe/device\n", + "* Instead of storing stimulus data in external HDF5 files we here store all data in the same NWB file. Added stimulus data to ImageSeries directly and added corresponding conversion, resolution, unit, etc. attributes to ensure format compliance\n", + "* /general/extracellular_ephys has been restructured so that alldata about the probe is now in /general/extracellular_ephys/61-channel_probe/device\n", "* The original script reused iterator variables in nested loops. We have updated those occurrence to avoid consusion and avoid possible errors. \n", "* The following custom metadata fields---i.e., datasets that were originally added to the file without being part of the NWB specification and without creation of corresponding extensions---have not yet been integrated with the NWB files:\n", - " * /general custom metdata: /notes, /random_number_generation, /related_publications. This will require extension of NWBFile to extend the spec of /general. Improvements to make this easier have been proposed for discussion at the upcoming hackathon.\n", + " * /general custom metadata: /notes, /random_number_generation, /related_publications. This will require extension of NWBFile to extend the spec of /general. Improvements to make this easier have been proposed for discussion at the upcoming hackathon.\n", " * SpikeUnit custom datasets with additional copies of the per-stimulus spike times (i.e., /processing/Cells/UnitTimes/cell_*/stim_* in the original version). This will require an extension for SpikeUnit.\n", " * /subject, subject/genotype, subject/species : See Issue https://bitbucket.org/lblneuro/pynwb/issues/45 support for subject metadata is upcoming in PyNWB \n", " * /specifications, /specifications/nwb_core.py : See Issue https://bitbucket.org/lblneuro/pynwb/issues/44 will be added by PyNWB automatically\n", "\n", - "For readability and to ease comparison, we include in Sectoin 6 the original example scrip from H5Gate. Note, the files generated by the original script are omitting a few required datasets/attributes and as such do not actually validate. " + "For readability and to ease comparison, we include in Sectoin 6 the original example script from H5Gate. Note, the files generated by the original script are omitting a few required datasets/attributes and as such do not actually validate. " ] }, { @@ -555,7 +555,7 @@ "# Build the namespace\n", "ns_builder = NWBNamespaceBuilder('Extension for use in my Lab', ns_name)\n", "\n", - "# Create a custom ImageSeries to add our custom attributes and add our extenions to the namespace\n", + "# Create a custom ImageSeries to add our custom attributes and add our extensions to the namespace\n", "mis_ext = NWBGroupSpec('A custom ImageSeries to add MeisterLab custom metadata',\n", " attributes=[NWBAttributeSpec('x' , 'int', 'meister x', required=False),\n", " NWBAttributeSpec('y' , 'int', 'meister y', required=False),\n", @@ -590,7 +590,7 @@ "metadata": {}, "source": [ "We can now inspect our container class using the usual mechanisms, e.g., help. For illustration purposes, let's call help on our class. Here we can see that:\n", - "* Our custom attributes have been added to the constructor with approbriate documention describing the type and purpose we indicated in the spec for our attributes\n", + "* Our custom attributes have been added to the constructor with approbriate documentation describing the type and purpose we indicated in the spec for our attributes\n", "* From the \"Method resolution order\" documentationw we can see that our MeisterImageSeries inherits from pynwb.image.ImageSeries so that interaction mechanism from the base class are also available in our class" ] }, @@ -637,7 +637,7 @@ " | bits_per_pixel (int): Number of bit per image pixel\n", " | dimension (Iterable): Number of pixels on x, y, (and z) axes.\n", " | resolution (float): The smallest meaningful difference (in specified unit) between values in data\n", - " | conversion (float): Scalar to multiply each element by to conver to volts\n", + " | conversion (float): Scalar to multiply each element by to convert to volts\n", " | timestamps (list or ndarray or TimeSeries): Timestamps for samples stored in data\n", " | starting_time (float): The timestamp of the first sample\n", " | rate (float): Sampling rate in Hz\n", @@ -871,7 +871,7 @@ "source": [ "def convert_single_file(file_stimulus_data, file_meta, spike_units, electrode_meta):\n", " #########################################\n", - " # Create the NWBFile containter\n", + " # Create the NWBFile container\n", " ##########################################\n", " nwbfile = NWBFile(file_name=file_meta['output_filename'],\n", " session_description=file_meta['description'],\n", @@ -974,7 +974,7 @@ "source": [ "## Step 5.3: Convert all files\n", "\n", - "Convert all the files by iteating over the files and calling `convert_single_file` function for each of the file" + "Convert all the files by iterating over the files and calling `convert_single_file` function for each of the file" ] }, { diff --git a/docs/notebooks/convert-crcns-ret-1-old/convert-crcns-ret-1-meisterlab-without-custom-extensions.ipynb b/docs/notebooks/convert-crcns-ret-1-old/convert-crcns-ret-1-meisterlab-without-custom-extensions.ipynb index fc7147686..a03f26ff0 100644 --- a/docs/notebooks/convert-crcns-ret-1-old/convert-crcns-ret-1-meisterlab-without-custom-extensions.ipynb +++ b/docs/notebooks/convert-crcns-ret-1-old/convert-crcns-ret-1-meisterlab-without-custom-extensions.ipynb @@ -105,15 +105,15 @@ "* Added missing tags and description for epochs\n", "* Added /general/devices/... to describe the device\n", "* Added neurodata_type and namespace attributes for format compliance\n", - "* Instead of storing stimulus data in external HDF5 files we here store all data in the same NWB file. Added stimulus data to ImageSeries diretly and added corresponding conversion, resolution, unit, etc. attributes to ensure format compliance\n", - "* /general/extracellular_ephys has been resturctured so that alldata about the probe is now in /general/extracellular_ephys/61-channel_probe/device\n", + "* Instead of storing stimulus data in external HDF5 files we here store all data in the same NWB file. Added stimulus data to ImageSeries directly and added corresponding conversion, resolution, unit, etc. attributes to ensure format compliance\n", + "* /general/extracellular_ephys has been restructured so that alldata about the probe is now in /general/extracellular_ephys/61-channel_probe/device\n", "* The original script reused iterator variables in nested loops. We have updated those occurrence to avoid consusion and avoid possible errors. \n", "* This notebook, currently does not store custom metadata fields (i.e., datasets that were added to the file without being part of the NWB specification and without creation of corresponding extensions). The main omitted objects are:\n", " * ImageSeries: pixle_size, meister_x, meister_y, meister_dx, meister_dy (the data of those variables is available in this notebook as part of the stimulus_data dict / curr_stimulus)\n", - " * /general custom metdata: /subject, subject/genotype, subject/species, /specifications, /specifications/nwb_core.py, /notes, /random_number_generation, /related_publications, \n", + " * /general custom metadata: /subject, subject/genotype, subject/species, /specifications, /specifications/nwb_core.py, /notes, /random_number_generation, /related_publications, \n", " * SpikeUnit custom datasets with additional copies of the per-stimulus spike times: /processing/Cells/UnitTimes/cell_*/stim_*\n", "\n", - "For readability and to ease comparison, we include in Sectoin 6 the original example scrip from H5Gate. Note, the files generated by the original script are omitting a few required datasets/attributes and as such do not actually validate. " + "For readability and to ease comparison, we include in Sectoin 6 the original example script from H5Gate. Note, the files generated by the original script are omitting a few required datasets/attributes and as such do not actually validate. " ] }, { @@ -552,7 +552,7 @@ "source": [ "def convert_single_file(file_stimulus_data, file_meta, spike_units, electrode_meta):\n", " #########################################\n", - " # Create the NWBFile containter\n", + " # Create the NWBFile container\n", " ##########################################\n", " nwbfile = NWBFile(file_name=file_meta['output_filename'],\n", " session_description=file_meta['description'],\n", @@ -649,7 +649,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Convert all the files by iteating over the files and calling `convert_single_file` function for each of the file" + "Convert all the files by iterating over the files and calling `convert_single_file` function for each of the file" ] }, { diff --git a/docs/notebooks/read-Allen-Brain-Oservatory.ipynb b/docs/notebooks/read-Allen-Brain-Oservatory.ipynb index 7f5825a66..84c5288c7 100644 --- a/docs/notebooks/read-Allen-Brain-Oservatory.ipynb +++ b/docs/notebooks/read-Allen-Brain-Oservatory.ipynb @@ -185,7 +185,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Each of these three \"ophys experiments\" corresponds to a single 2-photon microscopy recording session. In each container, the particular visual stimulus that was presented is indicated by the `session_type`. Each `session_type` contains several diffferent stimulus sets; for more information, take a look at the [whitepaper](http://help.brain-map.org/display/observatory/Data+-+Visual+Coding).\n", + "Each of these three \"ophys experiments\" corresponds to a single 2-photon microscopy recording session. In each container, the particular visual stimulus that was presented is indicated by the `session_type`. Each `session_type` contains several different stimulus sets; for more information, take a look at the [whitepaper](http://help.brain-map.org/display/observatory/Data+-+Visual+Coding).\n", "\n", "Lets use pynwb to load the data from `three_session_B`. If this is the first time through this notebook this might take a minute to download these files, and will require approximately 300Mb of total disk space for the three nwb files: " ] @@ -215,7 +215,7 @@ "metadata": {}, "source": [ "# Reading legacy nwb files with pynwb:\n", - "Now that we have downloaded the nwb files using the `BrainObservatoryCache`, we can use pynwb to load the data and take a peek inside. Because this file was created from [version 1.0 of the NWB Schema](https://github.com/NeurodataWithoutBorders/specification/blob/master/version_1.0.6/NWB_file_format_specification_1.0.6.pdf>), we have to use a object called a type map to help the [NWB 2.0 schema](http://nwb-schema.readthedocs.io/en/latest/format.html) interpret the data in the \"legacy\" file. Alot has changed in the transition from NWB 1.0 to 2.0, including a more modular software architecture, a simplified (and extended) specification language, a mechanism for easy creation of custom schema extensions ([Click here for more information](http://www.nwb.org/2017/09/06/what-is-new-in-nwbn-v2-0/))." + "Now that we have downloaded the nwb files using the `BrainObservatoryCache`, we can use pynwb to load the data and take a peek inside. Because this file was created from [version 1.0 of the NWB Schema](https://github.com/NeurodataWithoutBorders/specification/blob/master/version_1.0.6/NWB_file_format_specification_1.0.6.pdf>), we have to use a object called a type map to help the [NWB 2.0 schema](http://nwb-schema.readthedocs.io/en/latest/format.html) interpret the data in the \"legacy\" file. A lot has changed in the transition from NWB 1.0 to 2.0, including a more modular software architecture, a simplified (and extended) specification language, a mechanism for easy creation of custom schema extensions ([Click here for more information](http://www.nwb.org/2017/09/06/what-is-new-in-nwbn-v2-0/))." ] }, { @@ -423,7 +423,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Lets use the allensdk to plot one of these image templates from one of the natural movies that was shown during this session. The template is has an original shape of 304x608 pixels, however this source image is stretched to fit on a 1200x1920 monitor, and warped so that scene appears flat from the perspective of the viewer, whos eye is close to the screen. For more information about the Brain Observatory Stimulus, check out the [stimulus whitepaper](http://help.brain-map.org/download/attachments/10616846/VisualCoding_VisualStimuli.pdf)" + "Lets use the allensdk to plot one of these image templates from one of the natural movies that was shown during this session. The template is has an original shape of 304x608 pixels, however this source image is stretched to fit on a 1200x1920 monitor, and warped so that scene appears flat from the perspective of the viewer, whose eye is close to the screen. For more information about the Brain Observatory Stimulus, check out the [stimulus whitepaper](http://help.brain-map.org/download/attachments/10616846/VisualCoding_VisualStimuli.pdf)" ] }, { diff --git a/docs/source/_static/demo_hdfview.png b/docs/source/_static/demo_hdfview.png deleted file mode 100644 index e87750ab0..000000000 Binary files a/docs/source/_static/demo_hdfview.png and /dev/null differ diff --git a/docs/source/_static/demo_nwbfile_stimulus_plot_1.png b/docs/source/_static/demo_nwbfile_stimulus_plot_1.png deleted file mode 100644 index 708240366..000000000 Binary files a/docs/source/_static/demo_nwbfile_stimulus_plot_1.png and /dev/null differ diff --git a/docs/source/_static/demo_nwbfile_stimulus_plot_2.png b/docs/source/_static/demo_nwbfile_stimulus_plot_2.png deleted file mode 100644 index 6bf9e2ced..000000000 Binary files a/docs/source/_static/demo_nwbfile_stimulus_plot_2.png and /dev/null differ diff --git a/docs/source/_static/demo_nwbfile_units_plot.png b/docs/source/_static/demo_nwbfile_units_plot.png deleted file mode 100644 index d51e0f413..000000000 Binary files a/docs/source/_static/demo_nwbfile_units_plot.png and /dev/null differ diff --git a/docs/source/_static/demo_nwbwidgets.png b/docs/source/_static/demo_nwbwidgets.png deleted file mode 100644 index edb844402..000000000 Binary files a/docs/source/_static/demo_nwbwidgets.png and /dev/null differ diff --git a/docs/source/_static/trials_example.png b/docs/source/_static/trials_example.png index 294b50eb2..8f3b82221 100644 Binary files a/docs/source/_static/trials_example.png and b/docs/source/_static/trials_example.png differ diff --git a/docs/source/conf.py b/docs/source/conf.py index 3b21fe457..143d9d2c6 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -75,7 +75,7 @@ class CustomSphinxGallerySectionSortKey(ExampleTitleSortKey): # listed here will be added in alphabetical order based on title after the # explicitly listed galleries GALLERY_ORDER = { - 'general': ['file.py'], + 'general': ['plot_file.py'], # Sort domain-specific tutorials based on domain to group tutorials belonging to the same domain 'domain': [ "ecephys.py", @@ -134,6 +134,7 @@ def __call__(self, filename): 'min_reported_time': 5, 'remove_config_comments': True, 'within_subsection_order': CustomSphinxGallerySectionSortKey, + 'nested_sections': False, # See issue https://github.com/sphinx-gallery/sphinx-gallery/issues/1152 } intersphinx_mapping = { @@ -145,6 +146,8 @@ def __call__(self, filename): 'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None), 'dandi': ('https://dandi.readthedocs.io/en/stable/', None), 'fsspec': ("https://filesystem-spec.readthedocs.io/en/latest/", None), + 'nwbwidgets': ("https://nwb-widgets.readthedocs.io/en/latest/", None), + 'nwb-overview': ("https://nwb-overview.readthedocs.io/en/latest/", None), } extlinks = { diff --git a/docs/source/figures/gallery_thumbnails.pptx b/docs/source/figures/gallery_thumbnails.pptx index 6850d8819..51ebe79d0 100644 Binary files a/docs/source/figures/gallery_thumbnails.pptx and b/docs/source/figures/gallery_thumbnails.pptx differ diff --git a/docs/source/figures/gallery_thumbnails_ogen.png b/docs/source/figures/gallery_thumbnails_ogen.png new file mode 100644 index 000000000..8ad652070 Binary files /dev/null and b/docs/source/figures/gallery_thumbnails_ogen.png differ diff --git a/docs/source/install_developers.rst b/docs/source/install_developers.rst index a83866325..b2c2d18e7 100644 --- a/docs/source/install_developers.rst +++ b/docs/source/install_developers.rst @@ -6,7 +6,7 @@ Installing PyNWB for Developers PyNWB has the following minimum requirements, which must be installed before you can get started using PyNWB. -#. Python 3.7, 3.8, 3.9, 3.10, or 3.11 +#. Python 3.8, 3.9, 3.10, or 3.11 #. pip diff --git a/docs/source/install_users.rst b/docs/source/install_users.rst index 6da8f733a..6e33c2035 100644 --- a/docs/source/install_users.rst +++ b/docs/source/install_users.rst @@ -6,7 +6,7 @@ Installing PyNWB PyNWB has the following minimum requirements, which must be installed before you can get started using PyNWB. -#. Python 3.7, 3.8, 3.9, 3.10, or 3.11 +#. Python 3.8, 3.9, 3.10, or 3.11 #. pip .. note:: If you are a developer then please see the :ref:`install_developers` installation instructions instead. diff --git a/docs/source/make_a_release.rst b/docs/source/make_a_release.rst index 7d3a5bc64..f88b627ea 100644 --- a/docs/source/make_a_release.rst +++ b/docs/source/make_a_release.rst @@ -220,8 +220,8 @@ In order to release a new version on conda-forge manually, follow the steps belo 6. Modify ``meta.yaml``. - Update the `version string `_ and - `sha256 `_. + Update the `version string (line 2) `_ and + `sha256 (line 3) `_. We have to modify the sha and the version string in the ``meta.yaml`` file. diff --git a/docs/source/testing/mock.rst b/docs/source/testing/mock.rst index fe0f761a3..c640c2c14 100644 --- a/docs/source/testing/mock.rst +++ b/docs/source/testing/mock.rst @@ -7,9 +7,9 @@ neurodata objects. However, this can be quite laborious for some types. For inst :py:class:`~hdmf.common.table.DynamicTableRegion` of a :py:class:`~pynwb.ophys.PlaneSegmentation` table with the appropriate rows. This object in turn requires input of an :py:class:`~pynwb.ophys.ImageSegmentation` object, which in turn requires a :py:class:`~pynwb.device.Device` and an :py:class:`~pynwb.ophys.OpticalChannel` object. In -the end, creating a single neurodata object in this case requires the creation of 5 other objects. ``testing.mock`` -is a module that creates boilerplate objects with a single line of code that can be used for testing. In this case, you -could simply run +the end, creating a single neurodata object in this case requires the creation of 5 other objects. +:py:mod:`.testing.mock` is a module that creates boilerplate objects with a single line of code that can be used for +testing. In this case, you could simply run .. code-block:: python @@ -28,6 +28,44 @@ necessary neurodata types. You can customize any of these fields just as you wou roi_response_series = mock_RoiResponseSeries(data=[[1,2,3], [1,2,3]]) + +If you want to create objects and automatically add them to an :py:class:`~pynwb.file.NWBFile`, create an +:py:class:`~pynwb.file.NWBFile` and pass it into the mock function: + +.. code-block:: python + + from pynwb.testing.mock.file import mock_NWBFile + from pynwb.testing.mock.ophys import mock_RoiResponseSeries + + nwbfile = mock_NWBFile() + mock_RoiResponseSeries(nwbfile=nwbfile) + +Now this NWBFile contains an :py:class:`~pynwb.ophys.RoiResponseSeries` and all the upstream classes: + +.. code-block:: + + >>> print(nwbfile) + + root pynwb.file.NWBFile at 0x4335131760 + Fields: + devices: { + Device , + Device2 + } + file_create_date: [datetime.datetime(2023, 6, 26, 21, 56, 44, 322249, tzinfo=tzlocal())] + identifier: 3c13e816-a50f-49a9-85ec-93b9944c3e79 + imaging_planes: { + ImagingPlane , + ImagingPlane2 + } + processing: { + ophys + } + session_description: session_description + session_start_time: 1970-01-01 00:00:00-05:00 + timestamps_reference_time: 1970-01-01 00:00:00-05:00 + + Name generator -------------- Two neurodata objects stored in the same location within an NWB file must have unique names. This can cause an error diff --git a/environment-ros3.yml b/environment-ros3.yml index 83e9d37d2..ae15e985c 100644 --- a/environment-ros3.yml +++ b/environment-ros3.yml @@ -12,4 +12,7 @@ dependencies: - pandas==2.0.0 - python-dateutil==2.8.2 - setuptools - - dandi==0.52.0 # NOTE: dandi does not support osx-arm64 + - dandi==0.55.1 # NOTE: dandi does not support osx-arm64 + - fsspec==2023.6.0 + - requests==2.28.1 + - aiohttp==3.8.3 diff --git a/requirements-dev.txt b/requirements-dev.txt index 4bc50e943..a19b50bd3 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -5,13 +5,10 @@ black==23.3.0 codespell==2.2.4 coverage==7.2.2 -flake8==6.0.0; python_version >= "3.8" -flake8==5.0.4; python_version < "3.8" +flake8==6.0.0 flake8-debugger==4.1.2 flake8-print==5.0.0 -isort==5.12.0; python_version >= "3.8" -isort==5.11.5; python_version < "3.8" +isort==5.12.0 pytest==7.1.2 pytest-cov==4.0.0 -tox==4.4.8; python_version >= "3.8" -tox==3.28.0; python_version < "3.8" +tox==4.4.8 diff --git a/requirements-doc.txt b/requirements-doc.txt index f2a82da73..2050f4439 100644 --- a/requirements-doc.txt +++ b/requirements-doc.txt @@ -11,5 +11,4 @@ sphinx-copybutton dataframe_image # used to render large dataframe as image in the sphinx gallery to improve html display lxml # used by dataframe_image when using the matplotlib backend hdf5plugin -importlib-metadata<4.3; python_version < "3.8" # TODO: remove when minimum python version is 3.8 - +dandi>=0.46.6 diff --git a/requirements-min.txt b/requirements-min.txt index 46a79dce0..8f52348f1 100644 --- a/requirements-min.txt +++ b/requirements-min.txt @@ -1,7 +1,7 @@ # minimum versions of package dependencies for installing PyNWB h5py==2.10 # support for selection of datasets with list of indices added in 2.10 -hdmf==3.5.4 -numpy==1.16 +hdmf==3.9.0 +numpy==1.18 pandas==1.1.5 python-dateutil==2.7.3 setuptools diff --git a/requirements.txt b/requirements.txt index 1b20665ca..2ad7b813e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,9 +1,7 @@ # pinned dependencies to reproduce an entire development environment to use PyNWB h5py==3.8.0 -hdmf==3.5.4 -numpy==1.24.2; python_version >= "3.8" -numpy==1.21.5; python_version < "3.8" # note that numpy 1.22 dropped python 3.7 support -pandas==2.0.0; python_version >= "3.8" -pandas==1.3.5; python_version < "3.8" # note that pandas 1.4 dropped python 3.7 support +hdmf==3.9.0 +numpy==1.24.2 +pandas==2.0.0 python-dateutil==2.8.2 setuptools==65.5.1 diff --git a/setup.cfg b/setup.cfg index 3c492df25..cccacf048 100644 --- a/setup.cfg +++ b/setup.cfg @@ -28,6 +28,7 @@ per-file-ignores = tests/integration/__init__.py:F401 src/pynwb/testing/__init__.py:F401 src/pynwb/validate.py:T201 + tests/read_dandi/test_read_dandi.py:T201 setup.py:T201 test.py:T201 scripts/*:T201 diff --git a/setup.py b/setup.py index 2522618fc..90aebf55f 100755 --- a/setup.py +++ b/setup.py @@ -20,7 +20,7 @@ reqs = [ 'h5py>=2.10', - 'hdmf>=3.5.4', + 'hdmf>=3.9.0', 'numpy>=1.16', 'pandas>=1.1.5', 'python-dateutil>=2.7.3', @@ -44,10 +44,9 @@ 'packages': pkgs, 'package_dir': {'': 'src'}, 'package_data': {'pynwb': ["%s/*.yaml" % schema_dir, "%s/*.json" % schema_dir]}, - 'python_requires': '>=3.7', + 'python_requires': '>=3.8', 'classifiers': [ "Programming Language :: Python", - "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", diff --git a/src/pynwb/__init__.py b/src/pynwb/__init__.py index fc23b615c..0034d8ad5 100644 --- a/src/pynwb/__init__.py +++ b/src/pynwb/__init__.py @@ -253,10 +253,13 @@ def can_read(path: str): {'name': 'file', 'type': [h5py.File, 'S3File'], 'doc': 'a pre-existing h5py.File object', 'default': None}, {'name': 'comm', 'type': "Intracomm", 'doc': 'the MPI communicator to use for parallel I/O', 'default': None}, - {'name': 'driver', 'type': str, 'doc': 'driver for h5py to use when opening HDF5 file', 'default': None}) + {'name': 'driver', 'type': str, 'doc': 'driver for h5py to use when opening HDF5 file', 'default': None}, + {'name': 'herd_path', 'type': str, 'doc': 'The path to the HERD', + 'default': None},) def __init__(self, **kwargs): - path, mode, manager, extensions, load_namespaces, file_obj, comm, driver =\ - popargs('path', 'mode', 'manager', 'extensions', 'load_namespaces', 'file', 'comm', 'driver', kwargs) + path, mode, manager, extensions, load_namespaces, file_obj, comm, driver, herd_path =\ + popargs('path', 'mode', 'manager', 'extensions', 'load_namespaces', + 'file', 'comm', 'driver', 'herd_path', kwargs) # Define the BuildManager to use if load_namespaces: if manager is not None: @@ -285,7 +288,8 @@ def __init__(self, **kwargs): elif manager is None: manager = get_manager() # Open the file - super().__init__(path, manager=manager, mode=mode, file=file_obj, comm=comm, driver=driver) + super().__init__(path, manager=manager, mode=mode, file=file_obj, comm=comm, + driver=driver, herd_path=herd_path) @property def nwb_version(self): @@ -319,7 +323,8 @@ def read(self, **kwargs): raise TypeError("NWB version %s not supported. PyNWB supports NWB files version 2 and above." % str(file_version_str)) # read the file - return super().read(**kwargs) + file = super().read(**kwargs) + return file @docval({'name': 'src_io', 'type': HDMFIO, 'doc': 'the HDMFIO object (such as NWBHDF5IO) that was used to read the data to export'}, @@ -371,7 +376,6 @@ def export(self, **kwargs): from .core import NWBContainer, NWBData # noqa: F401,E402 from .base import TimeSeries, ProcessingModule # noqa: F401,E402 from .file import NWBFile # noqa: F401,E402 - from . import behavior # noqa: F401,E402 from . import device # noqa: F401,E402 from . import ecephys # noqa: F401,E402 diff --git a/src/pynwb/base.py b/src/pynwb/base.py index 41f163c70..bec8903d5 100644 --- a/src/pynwb/base.py +++ b/src/pynwb/base.py @@ -281,6 +281,15 @@ def __add_link(self, links_key, link): def time_unit(self): return self.__time_unit + def get_timestamps(self): + if self.fields.get('timestamps'): + return self.timestamps + else: + return np.arange(len(self.data)) / self.rate + self.starting_time + + def get_data_in_units(self): + return np.asarray(self.data) * self.conversion + self.offset + @register_class('Image', CORE_NAMESPACE) class Image(NWBData): diff --git a/src/pynwb/file.py b/src/pynwb/file.py index 31b2d8e1e..b473e571a 100644 --- a/src/pynwb/file.py +++ b/src/pynwb/file.py @@ -8,6 +8,7 @@ import pandas as pd from hdmf.common import DynamicTableRegion, DynamicTable +from hdmf.container import HERDManager from hdmf.utils import docval, getargs, get_docval, popargs, popargs_to_dict, AllowPositional from . import register_class, CORE_NAMESPACE @@ -149,7 +150,7 @@ def __init__(self, **kwargs): @register_class('NWBFile', CORE_NAMESPACE) -class NWBFile(MultiContainerInterface): +class NWBFile(MultiContainerInterface, HERDManager): """ A representation of an NWB file. """ diff --git a/src/pynwb/misc.py b/src/pynwb/misc.py index 098fce1de..4d977b4f2 100644 --- a/src/pynwb/misc.py +++ b/src/pynwb/misc.py @@ -75,7 +75,7 @@ def __init__(self, **kwargs): {'name': 'features', 'type': (list, np.ndarray), 'doc': 'the feature values for this time point'}) def add_features(self, **kwargs): time, features = getargs('time', 'features', kwargs) - if type(self.timestamps) == list and type(self.data) is list: + if isinstance(self.timestamps, list) and isinstance(self.data, list): self.timestamps.append(time) self.data.append(features) else: diff --git a/src/pynwb/resources.py b/src/pynwb/resources.py new file mode 100644 index 000000000..acdc22b12 --- /dev/null +++ b/src/pynwb/resources.py @@ -0,0 +1,14 @@ +from hdmf.common import HERD as hdmf_HERD +from . import get_type_map as tm +from hdmf.utils import docval, get_docval + + +class HERD(hdmf_HERD): + """ + HDMF External Resources Data Structure. + A table for mapping user terms (i.e. keys) to resource entities. + """ + @docval(*get_docval(hdmf_HERD.__init__)) + def __init__(self, **kwargs): + kwargs['type_map'] = tm() + super().__init__(**kwargs) diff --git a/src/pynwb/testing/mock/base.py b/src/pynwb/testing/mock/base.py index 37271dd8f..45b95fc08 100644 --- a/src/pynwb/testing/mock/base.py +++ b/src/pynwb/testing/mock/base.py @@ -1,25 +1,30 @@ +from typing import Optional + import numpy as np +from ... import NWBFile from ...base import TimeSeries from .utils import name_generator def mock_TimeSeries( - name=None, + name: Optional[str] = None, data=None, - unit="volts", - resolution=-1.0, - conversion=1.0, + unit: str = "volts", + resolution: float = -1.0, + conversion: float = 1.0, timestamps=None, - starting_time=None, - rate=10.0, - comments="no comments", - description="no description", + starting_time: Optional[float] = None, + rate: Optional[float] = 10.0, + comments: str = "no comments", + description: str = "no description", control=None, control_description=None, continuity=None, -): - return TimeSeries( + nwbfile: Optional[NWBFile] = None, + offset=0., +) -> TimeSeries: + time_series = TimeSeries( name=name or name_generator("TimeSeries"), data=data if data is not None else np.array([1, 2, 3, 4]), unit=unit, @@ -33,4 +38,10 @@ def mock_TimeSeries( control=control, control_description=control_description, continuity=continuity, + offset=offset, ) + + if nwbfile is not None: + nwbfile.add_acquisition(time_series) + + return time_series diff --git a/src/pynwb/testing/mock/behavior.py b/src/pynwb/testing/mock/behavior.py index b0aba5eb1..b76d21dc8 100644 --- a/src/pynwb/testing/mock/behavior.py +++ b/src/pynwb/testing/mock/behavior.py @@ -1,5 +1,8 @@ +from typing import Optional + import numpy as np +from ... import NWBFile, TimeSeries from ...behavior import ( PupilTracking, Position, @@ -11,21 +14,22 @@ def mock_SpatialSeries( - name=None, + name: Optional[str] = None, data=None, - reference_frame="lower left is (0, 0)", - unit="meters", + reference_frame: str = "lower left is (0, 0)", + unit: str = "meters", conversion=1.0, resolution=-1.0, timestamps=None, - starting_time=None, - rate=10.0, - comments="no comments", - description="no description", + starting_time: Optional[float] = None, + rate: Optional[float] = 10.0, + comments: str = "no comments", + description: str = "no description", control=None, control_description=None, -): - return SpatialSeries( + nwbfile: Optional[NWBFile] = None, +) -> SpatialSeries: + spatial_series = SpatialSeries( name=name or name_generator("SpatialSeries"), data=data if data is not None else np.array([1, 2, 3, 4]), reference_frame=reference_frame, @@ -41,21 +45,48 @@ def mock_SpatialSeries( control_description=control_description, ) + if nwbfile is not None: + nwbfile.add_acquisition(spatial_series) + + return spatial_series + def mock_Position( - name=None, spatial_series=None, -): - return Position(name=name or name_generator("Position"), spatial_series=spatial_series or [mock_SpatialSeries()]) + name: Optional[str] = None, spatial_series: Optional[SpatialSeries] = None, nwbfile: Optional[NWBFile] = None, +) -> Position: + + position = Position( + name=name or name_generator("Position"), spatial_series=spatial_series or [mock_SpatialSeries(nwbfile=nwbfile)] + ) + + if nwbfile is not None: + nwbfile.add_acquisition(position) + return position def mock_PupilTracking( - name=None, time_series=None, -): - return PupilTracking(name=name or name_generator("PupilTracking"), time_series=time_series or [mock_TimeSeries()]) + name: Optional[str] = None, time_series: Optional[TimeSeries] = None, nwbfile: Optional[NWBFile] = None +) -> PupilTracking: + pupil_tracking = PupilTracking( + name=name or name_generator("PupilTracking"), time_series=time_series or [mock_TimeSeries(nwbfile=nwbfile)] + ) + + if nwbfile is not None: + nwbfile.add_acquisition(pupil_tracking) + + return pupil_tracking -def mock_CompassDirection(name=None, spatial_series=None): - return CompassDirection( +def mock_CompassDirection( + name: Optional[str] = None, spatial_series: Optional[SpatialSeries] = None, nwbfile: Optional[NWBFile] = None +) -> CompassDirection: + + compass_direction = CompassDirection( name=name or name_generator("CompassDirection"), - spatial_series=spatial_series or [mock_SpatialSeries(unit="radians")], + spatial_series=spatial_series or [mock_SpatialSeries(unit="radians", nwbfile=nwbfile)], ) + + if nwbfile is not None: + nwbfile.add_acquisition(compass_direction) + + return compass_direction diff --git a/src/pynwb/testing/mock/device.py b/src/pynwb/testing/mock/device.py index b1ad960cc..06ac628e8 100644 --- a/src/pynwb/testing/mock/device.py +++ b/src/pynwb/testing/mock/device.py @@ -1,15 +1,24 @@ +from typing import Optional + +from ... import NWBFile from ...device import Device from .utils import name_generator def mock_Device( - name=None, - description="description", - manufacturer=None, -): - return Device( + name: Optional[str] = None, + description: str = "description", + manufacturer: Optional[str] = None, + nwbfile: Optional[NWBFile] = None, +) -> Device: + device = Device( name=name or name_generator("Device"), description=description, manufacturer=manufacturer, ) + + if nwbfile is not None: + nwbfile.add_device(device) + + return device diff --git a/src/pynwb/testing/mock/ecephys.py b/src/pynwb/testing/mock/ecephys.py index 3e9e2d960..888f19962 100644 --- a/src/pynwb/testing/mock/ecephys.py +++ b/src/pynwb/testing/mock/ecephys.py @@ -1,42 +1,62 @@ +from typing import Optional + import numpy as np -from hdmf.common.table import DynamicTableRegion +from hdmf.common.table import DynamicTableRegion, DynamicTable -from ...file import ElectrodeTable +from ...device import Device +from ...file import ElectrodeTable, NWBFile from ...ecephys import ElectricalSeries, ElectrodeGroup, SpikeEventSeries from .device import mock_Device from .utils import name_generator def mock_ElectrodeGroup( - name=None, - description="description", - location="location", - device=None, - position=None, -): - return ElectrodeGroup( + name: Optional[str] = None, + description: str = "description", + location: str = "location", + device: Optional[Device] = None, + position: Optional[str] = None, + nwbfile: Optional[NWBFile] = None, +) -> ElectrodeGroup: + + electrode_group = ElectrodeGroup( name=name or name_generator("ElectrodeGroup"), description=description, location=location, - device=device or mock_Device(), + device=device or mock_Device(nwbfile=nwbfile), position=position, ) + if nwbfile is not None: + nwbfile.add_electrode_group(electrode_group) + + return electrode_group -def mock_ElectrodeTable(n_rows=5, group=None): - table = ElectrodeTable() - group = group if group is not None else mock_ElectrodeGroup() + +def mock_ElectrodeTable( + n_rows: int = 5, group: Optional[ElectrodeGroup] = None, nwbfile: Optional[NWBFile] = None +) -> DynamicTable: + electrodes_table = ElectrodeTable() + group = group if group is not None else mock_ElectrodeGroup(nwbfile=nwbfile) for i in range(n_rows): - table.add_row( + electrodes_table.add_row( location="CA1", group=group, group_name=group.name, ) - return table + + if nwbfile is not None: + nwbfile.electrodes = electrodes_table + + return electrodes_table -def mock_electrodes(n_electrodes=5, table=mock_ElectrodeTable(n_rows=5)): +def mock_electrodes( + n_electrodes: int = 5, table: Optional[DynamicTable] = None, nwbfile: Optional[NWBFile] = None +) -> DynamicTableRegion: + + table = table or mock_ElectrodeTable(n_rows=5, nwbfile=nwbfile) return DynamicTableRegion( name="electrodes", data=list(range(n_electrodes)), @@ -46,36 +66,48 @@ def mock_electrodes(n_electrodes=5, table=mock_ElectrodeTable(n_rows=5)): def mock_ElectricalSeries( - name=None, - description="description", + name: Optional[str] = None, + description: str = "description", data=None, - rate=30000.0, + rate: float = 30000.0, timestamps=None, - electrodes=None, - filtering="filtering", -): - return ElectricalSeries( + electrodes: Optional[DynamicTableRegion] = None, + filtering: str = "filtering", + nwbfile: Optional[NWBFile] = None +) -> ElectricalSeries: + electrical_series = ElectricalSeries( name=name or name_generator("ElectricalSeries"), description=description, data=data if data is not None else np.ones((10, 5)), rate=rate, timestamps=timestamps, - electrodes=electrodes or mock_electrodes(), + electrodes=electrodes or mock_electrodes(nwbfile=nwbfile), filtering=filtering, ) + if nwbfile is not None: + nwbfile.add_acquisition(electrical_series) + + return electrical_series + def mock_SpikeEventSeries( - name=None, - description="description", + name: Optional[str] = None, + description: str = "description", data=None, timestamps=np.arange(10).astype(float), - electrodes=None, -): - return SpikeEventSeries( + electrodes: Optional[DynamicTableRegion] = None, + nwbfile: Optional[NWBFile] = None, +) -> SpikeEventSeries: + spike_event_series = SpikeEventSeries( name=name or name_generator("SpikeEventSeries"), description=description, data=data if data is not None else np.ones((10, 5)), timestamps=timestamps if timestamps is not None else np.arange(10).astype(float), - electrodes=electrodes if electrodes is not None else mock_electrodes(), + electrodes=electrodes if electrodes is not None else mock_electrodes(nwbfile=nwbfile), ) + + if nwbfile is not None: + nwbfile.add_acquisition(spike_event_series) + + return spike_event_series diff --git a/src/pynwb/testing/mock/file.py b/src/pynwb/testing/mock/file.py index 1447b97cc..943f86dcb 100644 --- a/src/pynwb/testing/mock/file.py +++ b/src/pynwb/testing/mock/file.py @@ -1,3 +1,4 @@ +from typing import Optional from uuid import uuid4 from datetime import datetime from dateutil.tz import tzlocal @@ -7,31 +8,35 @@ def mock_NWBFile( - session_description='session_description', - identifier=None, - session_start_time=datetime(1970, 1, 1, tzinfo=tzlocal()), - subject=None, + session_description: str = 'session_description', + identifier: Optional[str] = None, + session_start_time: datetime = datetime(1970, 1, 1, tzinfo=tzlocal()), **kwargs ): return NWBFile( session_description=session_description, identifier=identifier or str(uuid4()), session_start_time=session_start_time, - subject=subject or mock_Subject(), **kwargs ) def mock_Subject( - age="P50D", - description="this is a mock mouse.", - sex="F", - subject_id=None, + age: Optional[str] = "P50D", + description: str = "this is a mock mouse.", + sex: Optional[str] = "F", + subject_id: Optional[str] = None, + nwbfile: Optional[NWBFile] = None, ): - return Subject( + subject = Subject( age=age, description=description, sex=sex, subject_id=subject_id or name_generator("subject"), ) + + if nwbfile is not None: + nwbfile.subject = subject + + return subject diff --git a/src/pynwb/testing/mock/icephys.py b/src/pynwb/testing/mock/icephys.py index bd3a0f1b5..2f13323e8 100644 --- a/src/pynwb/testing/mock/icephys.py +++ b/src/pynwb/testing/mock/icephys.py @@ -1,3 +1,5 @@ +from typing import Optional + import numpy as np from pynwb.icephys import ( @@ -12,90 +14,113 @@ from .utils import name_generator from .device import mock_Device +from ... import NWBFile +from ...device import Device def mock_IntracellularElectrode( - name=None, description="description", device=None, -): - return IntracellularElectrode( + name: Optional[str] = None, + description: str = "description", + device: Optional[Device] = None, + nwbfile: Optional[NWBFile] = None, +) -> IntracellularElectrode: + intracellular_electrode = IntracellularElectrode( name=name or name_generator("IntracellularElectrode"), description=description, - device=device or mock_Device(), + device=device or mock_Device(nwbfile=nwbfile), ) + if nwbfile is not None: + nwbfile.add_icephys_electrode(intracellular_electrode) + + return intracellular_electrode + def mock_VoltageClampStimulusSeries( - name=None, + name: Optional[str] = None, data=None, - rate=100_000., - electrode=None, - gain=0.02, + rate: float = 100_000., + electrode: Optional[IntracellularElectrode] = None, + gain: float = 0.02, timestamps=None, - starting_time=None, -): - return VoltageClampStimulusSeries( + starting_time: Optional[float] = None, + nwbfile: Optional[NWBFile] = None, +) -> VoltageClampStimulusSeries: + voltage_clamp_stimulus_series = VoltageClampStimulusSeries( name=name or name_generator("VoltageClampStimulusSeries"), data=data or np.ones((30,)), rate=None if timestamps else rate, - electrode=electrode or mock_IntracellularElectrode(), + electrode=electrode or mock_IntracellularElectrode(nwbfile=nwbfile), gain=gain, timestamps=timestamps, starting_time=starting_time, ) + if nwbfile is not None: + nwbfile.add_stimulus(voltage_clamp_stimulus_series) + + return voltage_clamp_stimulus_series + def mock_VoltageClampSeries( - name=None, + name: Optional[str] = None, data=None, - conversion=1.0, - resolution=np.nan, - starting_time=None, - rate=100_000.0, - electrode=None, - gain=0.02, - capacitance_slow=100e-12, - resistance_comp_correction=70.0, -): - return VoltageClampSeries( + conversion: float = 1.0, + resolution: float = np.nan, + starting_time: Optional[float] = None, + rate: Optional[float] = 100_000.0, + electrode: Optional[IntracellularElectrode] = None, + gain: float = 0.02, + capacitance_slow: float = 100e-12, + resistance_comp_correction: float = 70.0, + nwbfile: Optional[NWBFile] = None, +) -> VoltageClampSeries: + voltage_clamp_series = VoltageClampSeries( name=name if name is not None else name_generator("VoltageClampSeries"), data=data if data is not None else np.ones((30,)), conversion=conversion, resolution=resolution, starting_time=starting_time, rate=rate, - electrode=electrode or mock_IntracellularElectrode(), + electrode=electrode or mock_IntracellularElectrode(nwbfile=nwbfile), gain=gain, capacitance_slow=capacitance_slow, resistance_comp_correction=resistance_comp_correction, ) + if nwbfile is not None: + nwbfile.add_acquisition(voltage_clamp_series) + + return voltage_clamp_series + def mock_CurrentClampSeries( - name=None, + name: Optional[str] = None, data=None, - electrode=None, - gain=0.02, - stimulus_description="N/A", + electrode: Optional[IntracellularElectrode] = None, + gain: float = 0.02, + stimulus_description: str = "N/A", bias_current=None, bridge_balance=None, capacitance_compensation=None, resolution=-1.0, conversion=1.0, timestamps=None, - starting_time=None, - rate=100_000., - comments="no comments", - description="no description", + starting_time: Optional[float] = None, + rate: Optional[float] = 100_000., + comments: str = "no comments", + description: str = "no description", control=None, control_description=None, sweep_number=None, offset=0.0, - unit="volts", -): - return CurrentClampSeries( + unit: str = "volts", + nwbfile: Optional[NWBFile] = None, +) -> CurrentClampSeries: + current_clamp_series = CurrentClampSeries( name=name if name is not None else name_generator("CurrentClampSeries"), data=data if data is not None else np.ones((30,)), - electrode=electrode or mock_IntracellularElectrode(), + electrode=electrode or mock_IntracellularElectrode(nwbfile=nwbfile), gain=gain, stimulus_description=stimulus_description, bias_current=bias_current, @@ -115,9 +140,14 @@ def mock_CurrentClampSeries( unit=unit, ) + if nwbfile is not None: + nwbfile.add_acquisition(current_clamp_series) + + return current_clamp_series + def mock_CurrentClampStimulusSeries( - name=None, + name: Optional[str] = None, data=None, electrode=None, gain=0.02, @@ -134,11 +164,12 @@ def mock_CurrentClampStimulusSeries( sweep_number=None, offset=0.0, unit="amperes", -): - return CurrentClampStimulusSeries( + nwbfile: Optional[NWBFile] = None, +) -> CurrentClampStimulusSeries: + current_clamp_stimulus_series = CurrentClampStimulusSeries( name=name or name_generator("CurrentClampStimulusSeries"), data=data if data is not None else np.ones((30,)), - electrode=electrode or mock_IntracellularElectrode(), + electrode=electrode or mock_IntracellularElectrode(nwbfile=nwbfile), gain=gain, stimulus_description=stimulus_description, resolution=resolution, @@ -155,11 +186,16 @@ def mock_CurrentClampStimulusSeries( unit=unit, ) + if nwbfile is not None: + nwbfile.add_stimulus(current_clamp_stimulus_series) + + return current_clamp_stimulus_series + def mock_IZeroClampSeries( - name=None, + name: Optional[str] = None, data=None, - electrode=None, + electrode: Optional[IntracellularElectrode] = None, gain=.02, stimulus_description="N/A", resolution=-1.0, @@ -174,11 +210,12 @@ def mock_IZeroClampSeries( sweep_number=None, offset=0.0, unit="volts", -): - return IZeroClampSeries( + nwbfile: Optional[NWBFile] = None, +) -> IZeroClampSeries: + izero_clamp_series = IZeroClampSeries( name=name or name_generator("IZeroClampSeries"), data=data if data is not None else np.ones((30,)), - electrode=electrode or mock_IntracellularElectrode(), + electrode=electrode or mock_IntracellularElectrode(nwbfile=nwbfile), gain=gain, stimulus_description=stimulus_description, resolution=resolution, @@ -195,13 +232,25 @@ def mock_IZeroClampSeries( unit=unit, ) + if nwbfile is not None: + nwbfile.add_acquisition(izero_clamp_series) -def mock_IntracellularRecordingsTable(n_rows=5): + return izero_clamp_series + + +def mock_IntracellularRecordingsTable( + n_rows: int = 5, nwbfile: Optional[NWBFile] = None +) -> IntracellularRecordingsTable: irt = IntracellularRecordingsTable() for _ in range(n_rows): - electrode = mock_IntracellularElectrode() + electrode = mock_IntracellularElectrode(nwbfile=nwbfile) irt.add_recording( electrode=electrode, - stimulus=mock_VoltageClampStimulusSeries(electrode=electrode), - response=mock_VoltageClampSeries(electrode=electrode), + stimulus=mock_VoltageClampStimulusSeries(electrode=electrode, nwbfile=nwbfile), + response=mock_VoltageClampSeries(electrode=electrode, nwbfile=nwbfile), ) + + if nwbfile is not None: + nwbfile.intracellular_recordings = irt + + return irt diff --git a/src/pynwb/testing/mock/ogen.py b/src/pynwb/testing/mock/ogen.py index ceed36a43..43155d800 100644 --- a/src/pynwb/testing/mock/ogen.py +++ b/src/pynwb/testing/mock/ogen.py @@ -1,5 +1,9 @@ +from typing import Optional + import numpy as np +from ... import NWBFile +from ...device import Device from ...ogen import OptogeneticStimulusSite, OptogeneticSeries from .device import mock_Device @@ -7,39 +11,46 @@ def mock_OptogeneticStimulusSite( - name=None, - device=None, - description="optogenetic stimulus site", - excitation_lambda=500., - location="part of the brain", -): - return OptogeneticStimulusSite( + name: Optional[str] = None, + device: Optional[Device] = None, + description: str = "optogenetic stimulus site", + excitation_lambda: float = 500., + location: str = "part of the brain", + nwbfile: Optional[NWBFile] = None, +) -> OptogeneticStimulusSite: + optogenetic_stimulus_site = OptogeneticStimulusSite( name=name or name_generator("OptogeneticStimulusSite"), - device=device or mock_Device(), + device=device or mock_Device(nwbfile=nwbfile), description=description, excitation_lambda=excitation_lambda, location=location ) + if nwbfile is not None: + nwbfile.add_ogen_site(optogenetic_stimulus_site) + + return optogenetic_stimulus_site + def mock_OptogeneticSeries( - name=None, + name: Optional[str] = None, data=None, - site=None, - resolution=-1.0, - conversion=1.0, + site: Optional[OptogeneticStimulusSite] = None, + resolution: float = -1.0, + conversion: float = 1.0, timestamps=None, - starting_time=None, - rate=10.0, - comments="no comments", - description="no description", + starting_time: Optional[float] = None, + rate: Optional[float] = 10.0, + comments: str = "no comments", + description: str = "no description", control=None, control_description=None, -): - return OptogeneticSeries( + nwbfile: Optional[NWBFile] = None, +) -> OptogeneticSeries: + optogenetic_series = OptogeneticSeries( name=name or name_generator("OptogeneticSeries"), data=data if data is not None else np.array([1, 2, 3, 4]), - site=site or mock_OptogeneticStimulusSite(), + site=site or mock_OptogeneticStimulusSite(nwbfile=nwbfile), resolution=resolution, conversion=conversion, timestamps=timestamps, @@ -50,3 +61,8 @@ def mock_OptogeneticSeries( control=control, control_description=control_description, ) + + if nwbfile is not None: + nwbfile.add_acquisition(optogenetic_series) + + return optogenetic_series diff --git a/src/pynwb/testing/mock/ophys.py b/src/pynwb/testing/mock/ophys.py index f35d19720..d9ba02572 100644 --- a/src/pynwb/testing/mock/ophys.py +++ b/src/pynwb/testing/mock/ophys.py @@ -1,6 +1,11 @@ +from typing import Optional, Sequence + import numpy as np from hdmf.common.table import DynamicTableRegion + +from ... import NWBFile, ProcessingModule +from ...device import Device from ...ophys import ( RoiResponseSeries, OpticalChannel, @@ -17,38 +22,45 @@ def mock_OpticalChannel( - name=None, - description="description", - emission_lambda=500.0, -): - return OpticalChannel( + name: Optional[str] = None, + description: str = "description", + emission_lambda: float = 500.0, + nwbfile: Optional[NWBFile] = None, +) -> OpticalChannel: + optical_channel = OpticalChannel( name=name or name_generator("OpticalChannel"), description=description, emission_lambda=emission_lambda, ) + if nwbfile is not None: + mock_ImagingPlane(nwbfile=nwbfile, optical_channel=optical_channel) + + return optical_channel + def mock_ImagingPlane( - name=None, - optical_channel=None, - description="description", - device=None, - excitation_lambda=500.0, - indicator="indicator", - location="unknown", - imaging_rate=30.0, + name: Optional[str] = None, + optical_channel: Optional[OpticalChannel] = None, + description: str = "description", + device: Optional[Device] = None, + excitation_lambda: float = 500.0, + indicator: str = "indicator", + location: str = "unknown", + imaging_rate: float = 30.0, manifold=None, - conversion=1.0, - unit="meters", + conversion: float = 1.0, + unit: str = "meters", reference_frame=None, origin_coords=None, - origin_coords_unit="meters", + origin_coords_unit: str = "meters", grid_spacing=None, - grid_spacing_unit="meters", -): - return ImagingPlane( + grid_spacing_unit: str = "meters", + nwbfile: Optional[NWBFile] = None, +) -> ImagingPlane: + imaging_plane = ImagingPlane( name=name or name_generator("ImagingPlane"), - optical_channel=optical_channel or mock_OpticalChannel(), + optical_channel=optical_channel or mock_OpticalChannel(nwbfile=nwbfile), description=description, - device=device or mock_Device(), + device=device or mock_Device(nwbfile=nwbfile), excitation_lambda=excitation_lambda, indicator=indicator, location=location, @@ -63,13 +75,20 @@ def mock_ImagingPlane( grid_spacing_unit=grid_spacing_unit, ) + if nwbfile is not None: + if "ophys" not in nwbfile.processing: + nwbfile.create_processing_module("ophys", "ophys") + nwbfile.add_imaging_plane(imaging_plane) + + return imaging_plane + def mock_OnePhotonSeries( - name=None, - imaging_plane=None, + name: Optional[str] = None, + imaging_plane: Optional[ImagingPlane] = None, data=None, - rate=50.0, - unit="n.a.", + rate: Optional[float] = 50.0, + unit: str = "n.a.", exposure_time=None, binning=None, power=None, @@ -91,10 +110,11 @@ def mock_OnePhotonSeries( control=None, control_description=None, device=None, -): - return OnePhotonSeries( + nwbfile: Optional[NWBFile] = None, +) -> OnePhotonSeries: + one_photon_series = OnePhotonSeries( name=name if name is not None else name_generator("OnePhotonSeries"), - imaging_plane=imaging_plane or mock_ImagingPlane(), + imaging_plane=imaging_plane or mock_ImagingPlane(nwbfile=nwbfile), data=data if data is not None else np.ones((20, 5, 5)), unit=unit, exposure_time=exposure_time, @@ -120,10 +140,15 @@ def mock_OnePhotonSeries( device=device, ) + if nwbfile is not None: + nwbfile.add_acquisition(one_photon_series) + + return one_photon_series + def mock_TwoPhotonSeries( - name=None, - imaging_plane=None, + name: Optional[str] = None, + imaging_plane: Optional[ImagingPlane] = None, data=None, rate=50.0, unit="n.a.", @@ -144,10 +169,11 @@ def mock_TwoPhotonSeries( control=None, control_description=None, device=None, -): - return TwoPhotonSeries( + nwbfile: Optional[NWBFile] = None, +) -> TwoPhotonSeries: + two_photon_series = TwoPhotonSeries( name=name if name is not None else name_generator("TwoPhotonSeries"), - imaging_plane=imaging_plane or mock_ImagingPlane(), + imaging_plane=imaging_plane or mock_ImagingPlane(nwbfile=nwbfile), data=data if data is not None else np.ones((20, 5, 5)), unit=unit, format=format, @@ -170,17 +196,23 @@ def mock_TwoPhotonSeries( device=device, ) + if nwbfile is not None: + nwbfile.add_acquisition(two_photon_series) + + return two_photon_series + def mock_PlaneSegmentation( - description="no description", - imaging_plane=None, - name=None, + description: str = "no description", + imaging_plane: Optional[ImagingPlane] = None, + name: Optional[str] = None, reference_images=None, - n_rois=5, -): + n_rois: int = 5, + nwbfile: Optional[NWBFile] = None, +) -> PlaneSegmentation: plane_segmentation = PlaneSegmentation( description=description, - imaging_plane=imaging_plane or mock_ImagingPlane(), + imaging_plane=imaging_plane or mock_ImagingPlane(nwbfile=nwbfile), name=name if name is not None else name_generator("PlaneSegmentation"), reference_images=reference_images, ) @@ -188,22 +220,37 @@ def mock_PlaneSegmentation( for _ in range(n_rois): plane_segmentation.add_roi(image_mask=np.zeros((10, 10))) + if nwbfile is not None: + if "ophys" not in nwbfile.processing: + nwbfile.create_processing_module("ophys", "ophys") + nwbfile.processing["ophys"].add(plane_segmentation) + return plane_segmentation def mock_ImageSegmentation( - plane_segmentations=None, name=None, -): - return ImageSegmentation( - plane_segmentations=plane_segmentations or [mock_PlaneSegmentation()], + plane_segmentations: Optional[Sequence[PlaneSegmentation]] = None, + name: Optional[str] = None, + nwbfile: Optional[NWBFile] = None +) -> ImageSegmentation: + image_segmentation = ImageSegmentation( + plane_segmentations=plane_segmentations or [mock_PlaneSegmentation(nwbfile=nwbfile)], name=name or name_generator("ImageSegmentation"), ) + if nwbfile is not None: + if "ophys" not in nwbfile.processing: + nwbfile.create_processing_module("ophys", "ophys") + + nwbfile.processing["ophys"].add(image_segmentation) + + return image_segmentation + def mock_RoiResponseSeries( - name=None, + name: Optional[str] = None, data=None, - unit="n.a.", + unit: str = "n.a.", rois=None, resolution=-1.0, conversion=1.0, @@ -215,7 +262,9 @@ def mock_RoiResponseSeries( control=None, control_description=None, n_rois=None, -): + plane_segmentation: Optional[PlaneSegmentation] = None, + nwbfile: Optional[NWBFile] = None, +) -> RoiResponseSeries: if data is not None: if n_rois is not None and n_rois != data.shape[1]: raise ValueError("Argument conflict: n_rois does not match second dimension of data.") @@ -223,7 +272,9 @@ def mock_RoiResponseSeries( else: n_rois = 5 - return RoiResponseSeries( + plane_seg = plane_segmentation or mock_PlaneSegmentation(n_rois=n_rois, nwbfile=nwbfile) + + roi_response_series = RoiResponseSeries( name=name if name is not None else name_generator("RoiResponseSeries"), data=data if data is not None else np.ones((30, n_rois)), unit=unit, @@ -231,7 +282,7 @@ def mock_RoiResponseSeries( or DynamicTableRegion( name="rois", description="rois", - table=mock_PlaneSegmentation(n_rois=n_rois), + table=plane_seg, data=list(range(n_rois)), ), resolution=resolution, @@ -245,16 +296,67 @@ def mock_RoiResponseSeries( control_description=control_description, ) + if nwbfile is not None: + if "ophys" not in nwbfile.processing: + nwbfile.create_processing_module("ophys", "ophys") + + if plane_seg.name not in nwbfile.processing["ophys"].data_interfaces: + nwbfile.processing["ophys"].add(plane_seg) + + nwbfile.processing["ophys"].add(roi_response_series) + + return roi_response_series -def mock_DfOverF(roi_response_series=None, name=None): - return DfOverF( - roi_response_series=roi_response_series or [mock_RoiResponseSeries()], + +def mock_DfOverF( + roi_response_series: Optional[RoiResponseSeries] = None, + name: Optional[str] = None, + nwbfile: Optional[NWBFile] = None +) -> DfOverF: + df_over_f = DfOverF( name=name if name is not None else name_generator("DfOverF"), ) + plane_seg = mock_PlaneSegmentation(nwbfile=nwbfile) + + if nwbfile is not None: + if "ophys" not in nwbfile.processing: + nwbfile.create_processing_module("ophys", "ophys") + + nwbfile.processing["ophys"].add(df_over_f) + + else: + pm = ProcessingModule(name="ophys", description="ophys") + pm.add(plane_seg) + pm.add(df_over_f) + + df_over_f.add_roi_response_series( + roi_response_series or mock_RoiResponseSeries(nwbfile=nwbfile, plane_segmentation=plane_seg) + ) + return df_over_f -def mock_Fluorescence(roi_response_series=None, name=None): - return Fluorescence( - roi_response_series=roi_response_series or [mock_RoiResponseSeries()], +def mock_Fluorescence( + roi_response_series: Optional[Sequence[RoiResponseSeries]] = None, + name: Optional[str] = None, + nwbfile: Optional[NWBFile] = None, +) -> Fluorescence: + fluorescence = Fluorescence( name=name if name is not None else name_generator("Fluorescence"), ) + plane_seg = mock_PlaneSegmentation(nwbfile=nwbfile) + + if nwbfile is not None: + if "ophys" not in nwbfile.processing: + nwbfile.create_processing_module("ophys", "ophys") + + nwbfile.processing["ophys"].add(fluorescence) + else: + pm = ProcessingModule(name="ophys", description="ophys") + pm.add(plane_seg) + pm.add(fluorescence) + + fluorescence.add_roi_response_series( + roi_response_series or mock_RoiResponseSeries(nwbfile=nwbfile, plane_segmentation=plane_seg) + ) + + return fluorescence diff --git a/src/pynwb/testing/testh5io.py b/src/pynwb/testing/testh5io.py index 08626f943..b45407bfb 100644 --- a/src/pynwb/testing/testh5io.py +++ b/src/pynwb/testing/testh5io.py @@ -107,7 +107,7 @@ def roundtripContainer(self, cache_spec=False): BrokenLinkWarning)): raise Exception('%s: %s' % (w.category.__name__, w.message)) else: - warnings.warn(w.message, w.category) + warnings.showwarning(w.message, w.category, w.filename, w.lineno, w.file, w.line) try: return self.getContainer(self.read_nwbfile) @@ -141,7 +141,7 @@ def roundtripExportContainer(self, cache_spec=False): BrokenLinkWarning)): raise Exception('%s: %s' % (w.category.__name__, w.message)) else: - warnings.warn(w.message, w.category) + warnings.showwarning(w.message, w.category, w.filename, w.lineno, w.file, w.line) try: return self.getContainer(self.read_exported_nwbfile) diff --git a/test.py b/test.py index ba9eebe47..16191ae3f 100755 --- a/test.py +++ b/test.py @@ -12,7 +12,16 @@ import traceback import unittest -flags = {'pynwb': 2, 'integration': 3, 'example': 4, 'backwards': 5, 'validation': 6, 'ros3': 7, 'example-ros3': 8} +flags = { + 'pynwb': 2, + 'integration': 3, + 'example': 4, + 'backwards': 5, + 'validate-examples': 6, + 'ros3': 7, + 'example-ros3': 8, + 'validation-module': 9 +} TOTAL = 0 FAILURES = 0 @@ -154,7 +163,7 @@ def validate_nwbs(): def get_namespaces(nwbfile): comp = run(["python", "-m", "pynwb.validate", - "--list-namespaces", "--cached-namespace", nwb], + "--list-namespaces", nwbfile], stdout=PIPE, stderr=STDOUT, universal_newlines=True, timeout=30) if comp.returncode != 0: @@ -170,14 +179,13 @@ def get_namespaces(nwbfile): cmds = [] cmds += [["python", "-m", "pynwb.validate", nwb]] - cmds += [["python", "-m", "pynwb.validate", "--cached-namespace", nwb]] cmds += [["python", "-m", "pynwb.validate", "--no-cached-namespace", nwb]] for ns in namespaces: # for some reason, this logging command is necessary to correctly printing the namespace in the # next logging command logging.info("Namespace found: %s" % ns) - cmds += [["python", "-m", "pynwb.validate", "--cached-namespace", "--ns", ns, nwb]] + cmds += [["python", "-m", "pynwb.validate", "--ns", ns, nwb]] for cmd in cmds: logging.info("Validating with \"%s\"." % (" ".join(cmd[:-1]))) @@ -233,9 +241,6 @@ def run_integration_tests(verbose=True): run_test_suite("tests/integration/utils", "integration utils tests", verbose=verbose) - # also test the validation script - run_test_suite("tests/validation", "validation tests", verbose=verbose) - def clean_up_tests(): # remove files generated from running example files @@ -298,18 +303,21 @@ def main(): help='run example tests with ros3 streaming') parser.add_argument('-b', '--backwards', action='append_const', const=flags['backwards'], dest='suites', help='run backwards compatibility tests') - parser.add_argument('-w', '--validation', action='append_const', const=flags['validation'], dest='suites', - help='run example tests and validation tests on example NWB files') + parser.add_argument('-w', '--validate-examples', action='append_const', const=flags['validate-examples'], + dest='suites', help='run example tests and validation tests on example NWB files') parser.add_argument('-r', '--ros3', action='append_const', const=flags['ros3'], dest='suites', help='run ros3 streaming tests') + parser.add_argument('-x', '--validation-module', action='append_const', const=flags['validation-module'], + dest='suites', help='run tests on pynwb.validate') args = parser.parse_args() if not args.suites: args.suites = list(flags.values()) # remove from test suites run by default args.suites.pop(args.suites.index(flags['example'])) args.suites.pop(args.suites.index(flags['example-ros3'])) - args.suites.pop(args.suites.index(flags['validation'])) + args.suites.pop(args.suites.index(flags['validate-examples'])) args.suites.pop(args.suites.index(flags['ros3'])) + args.suites.pop(args.suites.index(flags['validation-module'])) # set up logger root = logging.getLogger() @@ -332,8 +340,10 @@ def main(): run_test_suite("tests/unit", "pynwb unit tests", verbose=args.verbosity) # Run example tests - if flags['example'] in args.suites or flags['validation'] in args.suites: + is_run_example_tests = False + if flags['example'] in args.suites or flags['validate-examples'] in args.suites: run_example_tests() + is_run_example_tests = True # Run example tests with ros3 streaming examples # NOTE this requires h5py to be built with ROS3 support and the dandi package to be installed @@ -342,13 +352,17 @@ def main(): run_example_ros3_tests() # Run validation tests on the example NWB files generated above - if flags['validation'] in args.suites: + if flags['validate-examples'] in args.suites: validate_nwbs() # Run integration tests if flags['integration'] in args.suites: run_integration_tests(verbose=args.verbosity) + # Run validation module tests, requires coverage to be installed + if flags['validation-module'] in args.suites: + run_test_suite("tests/validation", "validation tests", verbose=args.verbosity) + # Run backwards compatibility tests if flags['backwards'] in args.suites: run_test_suite("tests/back_compat", "pynwb backwards compatibility tests", verbose=args.verbosity) @@ -358,7 +372,7 @@ def main(): run_test_suite("tests/integration/ros3", "pynwb ros3 streaming tests", verbose=args.verbosity) # Delete files generated from running example tests above - if flags['example'] in args.suites or flags['validation'] in args.suites: + if is_run_example_tests: clean_up_tests() final_message = 'Ran %s tests' % TOTAL diff --git a/tests/integration/hdf5/test_ecephys.py b/tests/integration/hdf5/test_ecephys.py index 9d810270c..ff67d27c9 100644 --- a/tests/integration/hdf5/test_ecephys.py +++ b/tests/integration/hdf5/test_ecephys.py @@ -1,10 +1,21 @@ from hdmf.common import DynamicTableRegion - -from pynwb.ecephys import ElectrodeGroup, ElectricalSeries, FilteredEphys, LFP, Clustering, ClusterWaveforms,\ - SpikeEventSeries, EventWaveform, EventDetection, FeatureExtraction +from pynwb import NWBFile + +from pynwb.ecephys import ( + ElectrodeGroup, + ElectricalSeries, + FilteredEphys, + LFP, + Clustering, + ClusterWaveforms, + SpikeEventSeries, + EventWaveform, + EventDetection, + FeatureExtraction, +) from pynwb.device import Device from pynwb.file import ElectrodeTable as get_electrode_table -from pynwb.testing import NWBH5IOMixin, AcquisitionH5IOMixin, TestCase +from pynwb.testing import NWBH5IOMixin, AcquisitionH5IOMixin, NWBH5IOFlexMixin, TestCase class TestElectrodeGroupIO(NWBH5IOMixin, TestCase): @@ -28,27 +39,36 @@ def getContainer(self, nwbfile): return nwbfile.get_electrode_group(self.container.name) -class TestElectricalSeriesIO(AcquisitionH5IOMixin, TestCase): +def setup_electrode_table(): + table = get_electrode_table() + dev1 = Device(name='dev1') + group = ElectrodeGroup( + name='tetrode1', + description='tetrode description', + location='tetrode location', + device=dev1 + ) + for i in range(4): + table.add_row(location='CA1', group=group, group_name='tetrode1') + return table, group, dev1 - @staticmethod - def make_electrode_table(self): - """ Make an electrode table, electrode group, and device """ - self.table = get_electrode_table() - self.dev1 = Device(name='dev1') - self.group = ElectrodeGroup(name='tetrode1', - description='tetrode description', - location='tetrode location', - device=self.dev1) - for i in range(4): - self.table.add_row(location='CA1', group=self.group, group_name='tetrode1') - def setUpContainer(self): - """ Return the test ElectricalSeries to read/write """ - self.make_electrode_table(self) +class TestElectricalSeriesIO(NWBH5IOFlexMixin, TestCase): + + def getContainerType(self): + return "ElectricalSeries" + + def addContainer(self): + """ Add the test ElectricalSeries and related objects to the given NWBFile """ + table, group, dev1 = setup_electrode_table() + self.nwbfile.add_device(dev1) + self.nwbfile.add_electrode_group(group) + self.nwbfile.set_electrode_table(table) + region = DynamicTableRegion(name='electrodes', data=[0, 2], description='the first and third electrodes', - table=self.table) + table=table) data = list(zip(range(10), range(10, 20))) timestamps = list(map(lambda x: x/10., range(10))) channel_conversion = [1., 2., 3., 4.] @@ -61,14 +81,11 @@ def setUpContainer(self): filtering=filtering, timestamps=timestamps ) - return es - def addContainer(self, nwbfile): - """ Add the test ElectricalSeries and related objects to the given NWBFile """ - nwbfile.add_device(self.dev1) - nwbfile.add_electrode_group(self.group) - nwbfile.set_electrode_table(self.table) - nwbfile.add_acquisition(self.container) + self.nwbfile.add_acquisition(es) + + def getContainer(self, nwbfile: NWBFile): + return nwbfile.acquisition['test_eS'] def test_eg_ref(self): """ @@ -82,58 +99,70 @@ def test_eg_ref(self): self.assertIsInstance(row2.iloc[0]['group'], ElectrodeGroup) -class MultiElectricalSeriesIOMixin(AcquisitionH5IOMixin): - """ - Mixin class for methods to run a roundtrip test writing an NWB file with multiple ElectricalSeries. +class TestLFPIO(NWBH5IOFlexMixin, TestCase): - The abstract method setUpContainer needs to be implemented by classes that include this mixin. - def setUpContainer(self): - # return a test Container to read/write - """ + def getContainerType(self): + return "LFP" + + def addContainer(self): + table, group, dev1 = setup_electrode_table() + self.nwbfile.add_device(dev1) + self.nwbfile.add_electrode_group(group) + self.nwbfile.set_electrode_table(table) - def setUpTwoElectricalSeries(self): - """ Return two test ElectricalSeries to read/write """ - TestElectricalSeriesIO.make_electrode_table(self) region1 = DynamicTableRegion(name='electrodes', data=[0, 2], description='the first and third electrodes', - table=self.table) + table=table) region2 = DynamicTableRegion(name='electrodes', data=[1, 3], description='the second and fourth electrodes', - table=self.table) + table=table) data1 = list(zip(range(10), range(10, 20))) data2 = list(zip(reversed(range(10)), reversed(range(10, 20)))) timestamps = list(map(lambda x: x/10., range(10))) es1 = ElectricalSeries(name='test_eS1', data=data1, electrodes=region1, timestamps=timestamps) es2 = ElectricalSeries(name='test_eS2', data=data2, electrodes=region2, channel_conversion=[4., .4], timestamps=timestamps) - return es1, es2 + lfp = LFP() + self.nwbfile.add_acquisition(lfp) + lfp.add_electrical_series([es1, es2]) - def addContainer(self, nwbfile): - """ Add the test ElectricalSeries and related objects to the given NWBFile """ - nwbfile.add_device(self.dev1) - nwbfile.add_electrode_group(self.group) - nwbfile.set_electrode_table(self.table) - nwbfile.add_acquisition(self.container) + def getContainer(self, nwbfile: NWBFile): + return nwbfile.acquisition['LFP'] -class TestLFPIO(MultiElectricalSeriesIOMixin, TestCase): +class TestFilteredEphysIO(NWBH5IOFlexMixin, TestCase): - def setUpContainer(self): - """ Return a test LFP to read/write """ - es = self.setUpTwoElectricalSeries() - lfp = LFP(es) - return lfp + def getContainerType(self): + return "FilteredEphys" + def addContainer(self): + table, group, dev1 = setup_electrode_table() + self.nwbfile.add_device(dev1) + self.nwbfile.add_electrode_group(group) + self.nwbfile.set_electrode_table(table) -class TestFilteredEphysIO(MultiElectricalSeriesIOMixin, TestCase): + region1 = DynamicTableRegion(name='electrodes', + data=[0, 2], + description='the first and third electrodes', + table=table) + region2 = DynamicTableRegion(name='electrodes', + data=[1, 3], + description='the second and fourth electrodes', + table=table) + data1 = list(zip(range(10), range(10, 20))) + data2 = list(zip(reversed(range(10)), reversed(range(10, 20)))) + timestamps = list(map(lambda x: x/10., range(10))) + es1 = ElectricalSeries(name='test_eS1', data=data1, electrodes=region1, timestamps=timestamps) + es2 = ElectricalSeries(name='test_eS2', data=data2, electrodes=region2, channel_conversion=[4., .4], + timestamps=timestamps) + fe = FilteredEphys() + self.nwbfile.add_acquisition(fe) + fe.add_electrical_series([es1, es2]) - def setUpContainer(self): - """ Return a test FilteredEphys to read/write """ - es = self.setUpTwoElectricalSeries() - fe = FilteredEphys(es) - return fe + def getContainer(self, nwbfile: NWBFile): + return nwbfile.acquisition['FilteredEphys'] class TestClusteringIO(AcquisitionH5IOMixin, TestCase): @@ -155,28 +184,35 @@ def roundtripExportContainer(self, cache_spec=False): return super().roundtripExportContainer(cache_spec) -class EventWaveformConstructor(AcquisitionH5IOMixin, TestCase): +class EventWaveformConstructor(NWBH5IOFlexMixin, TestCase): + + def getContainerType(self): + return "SpikeEventSeries" + + def addContainer(self): + """ Add the test SpikeEventSeries and related objects to the given NWBFile """ + table, group, dev1 = setup_electrode_table() + self.nwbfile.add_device(dev1) + self.nwbfile.add_electrode_group(group) + self.nwbfile.set_electrode_table(table) - def setUpContainer(self): - """ Return a test EventWaveform to read/write """ - TestElectricalSeriesIO.make_electrode_table(self) region = DynamicTableRegion(name='electrodes', data=[0, 2], description='the first and third electrodes', - table=self.table) - sES = SpikeEventSeries(name='test_sES', - data=((1, 1), (2, 2), (3, 3)), - timestamps=[0., 1., 2.], - electrodes=region) - ew = EventWaveform(sES) - return ew + table=table) + ses = SpikeEventSeries( + name='test_sES', + data=((1, 1), (2, 2), (3, 3)), + timestamps=[0., 1., 2.], + electrodes=region + ) - def addContainer(self, nwbfile): - """ Add the test EventWaveform and related objects to the given NWBFile """ - nwbfile.add_device(self.dev1) - nwbfile.add_electrode_group(self.group) - nwbfile.set_electrode_table(self.table) - nwbfile.add_acquisition(self.container) + ew = EventWaveform() + self.nwbfile.add_acquisition(ew) + ew.add_spike_event_series(ses) + + def getContainer(self, nwbfile: NWBFile): + return nwbfile.acquisition['EventWaveform'] class ClusterWaveformsConstructor(AcquisitionH5IOMixin, TestCase): @@ -210,51 +246,66 @@ def roundtripExportContainer(self, cache_spec=False): return super().roundtripExportContainer(cache_spec) -class FeatureExtractionConstructor(AcquisitionH5IOMixin, TestCase): +class FeatureExtractionConstructor(NWBH5IOFlexMixin, TestCase): + + def getContainerType(self): + return "FeatureExtraction" + + def addContainer(self): + """ Add the test FeatureExtraction and related objects to the given NWBFile """ + table, group, dev1 = setup_electrode_table() + self.nwbfile.add_device(dev1) + self.nwbfile.add_electrode_group(group) + self.nwbfile.set_electrode_table(table) - def setUpContainer(self): - """ Return a test FeatureExtraction to read/write """ event_times = [1.9, 3.5] - TestElectricalSeriesIO.make_electrode_table(self) region = DynamicTableRegion(name='electrodes', data=[0, 2], description='the first and third electrodes', - table=self.table) + table=table) description = ['desc1', 'desc2', 'desc3'] features = [[[0., 1., 2.], [3., 4., 5.]], [[6., 7., 8.], [9., 10., 11.]]] fe = FeatureExtraction(electrodes=region, description=description, times=event_times, features=features) - return fe - def addContainer(self, nwbfile): - """ Add the test FeatureExtraction and related objects to the given NWBFile """ - nwbfile.add_device(self.dev1) - nwbfile.add_electrode_group(self.group) - nwbfile.set_electrode_table(self.table) - nwbfile.add_acquisition(self.container) + self.nwbfile.add_acquisition(fe) + def getContainer(self, nwbfile: NWBFile): + return nwbfile.acquisition['FeatureExtraction'] -class EventDetectionConstructor(AcquisitionH5IOMixin, TestCase): - def setUpContainer(self): - """ Return a test EventDetection to read/write """ - TestElectricalSeriesIO.make_electrode_table(self) +class EventDetectionConstructor(NWBH5IOFlexMixin, TestCase): + + def getContainerType(self): + return "EventDetection" + + def addContainer(self): + """ Add the test EventDetection and related objects to the given NWBFile """ + table, group, dev1 = setup_electrode_table() + self.nwbfile.add_device(dev1) + self.nwbfile.add_electrode_group(group) + self.nwbfile.set_electrode_table(table) + region = DynamicTableRegion(name='electrodes', data=[0, 2], description='the first and third electrodes', - table=self.table) + table=table) data = list(range(10)) ts = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0] - self.eS = ElectricalSeries(name='test_eS', data=data, electrodes=region, timestamps=ts) - eD = EventDetection(detection_method='detection_method', - source_electricalseries=self.eS, - source_idx=(1, 2, 3), - times=(0.1, 0.2, 0.3)) - return eD + eS = ElectricalSeries( + name='test_eS', + data=data, + electrodes=region, + timestamps=ts + ) + eD = EventDetection( + detection_method='detection_method', + source_electricalseries=eS, + source_idx=(1, 2, 3), + times=(0.1, 0.2, 0.3) + ) - def addContainer(self, nwbfile): - """ Add the test EventDetection and related objects to the given NWBFile """ - nwbfile.add_device(self.dev1) - nwbfile.add_electrode_group(self.group) - nwbfile.set_electrode_table(self.table) - nwbfile.add_acquisition(self.eS) - nwbfile.add_acquisition(self.container) + self.nwbfile.add_acquisition(eS) + self.nwbfile.add_acquisition(eD) + + def getContainer(self, nwbfile: NWBFile): + return nwbfile.acquisition['EventDetection'] diff --git a/tests/read_dandi/test_read_dandi.py b/tests/read_dandi/test_read_dandi.py index 84e9f3f62..0e0698d77 100644 --- a/tests/read_dandi/test_read_dandi.py +++ b/tests/read_dandi/test_read_dandi.py @@ -1,52 +1,62 @@ +"""Test reading NWB files from the DANDI Archive using ROS3.""" from dandi.dandiapi import DandiAPIClient +import random import sys import traceback from pynwb import NWBHDF5IO -from pynwb.testing import TestCase -class TestReadNWBDandisets(TestCase): - """Test reading NWB files from the DANDI Archive using ROS3.""" +# NOTE: do not name the function with "test_" prefix, otherwise pytest +# will try to run it as a test + +def read_first_nwb_asset(): + """Test reading the first NWB asset from a random selection of 50 dandisets that uses NWB.""" + num_dandisets_to_read = 50 + client = DandiAPIClient() + dandisets = list(client.get_dandisets()) + random.shuffle(dandisets) + dandisets_to_read = dandisets[:num_dandisets_to_read] + print("Reading NWB files from the following dandisets:") + print([d.get_raw_metadata()["identifier"] for d in dandisets_to_read]) + + failed_reads = dict() + for i, dandiset in enumerate(dandisets_to_read): + dandiset_metadata = dandiset.get_raw_metadata() + + # skip any dandisets that do not use NWB + if not any( + data_standard["identifier"] == "RRID:SCR_015242" # this is the RRID for NWB + for data_standard in dandiset_metadata["assetsSummary"].get("dataStandard", []) + ): + continue + + dandiset_identifier = dandiset_metadata["identifier"] + print("--------------") + print(f"{i}: {dandiset_identifier}") + + # iterate through assets until we get an NWB file (it could be MP4) + assets = dandiset.get_assets() + first_asset = next(assets) + while first_asset.path.split(".")[-1] != "nwb": + first_asset = next(assets) + if first_asset.path.split(".")[-1] != "nwb": + print("No NWB files?!") + continue - def test_read_first_nwb_asset(self): - """Test reading the first NWB asset from each dandiset that uses NWB.""" - client = DandiAPIClient() - dandisets = client.get_dandisets() + s3_url = first_asset.get_content_url(follow_redirects=1, strip_query=True) - failed_reads = dict() - for i, dandiset in enumerate(dandisets): - dandiset_metadata = dandiset.get_raw_metadata() + try: + with NWBHDF5IO(path=s3_url, load_namespaces=True, driver="ros3") as io: + io.read() + except Exception as e: + print(traceback.format_exc()) + failed_reads[dandiset] = e - # skip any dandisets that do not use NWB - if not any( - data_standard["identifier"] == "RRID:SCR_015242" # this is the RRID for NWB - for data_standard in dandiset_metadata["assetsSummary"].get("dataStandard", []) - ): - continue + if failed_reads: + print(failed_reads) + sys.exit(1) - dandiset_identifier = dandiset_metadata["identifier"] - print("--------------") - print(f"{i}: {dandiset_identifier}") - # iterate through assets until we get an NWB file (it could be MP4) - assets = dandiset.get_assets() - first_asset = next(assets) - while first_asset.path.split(".")[-1] != "nwb": - first_asset = next(assets) - if first_asset.path.split(".")[-1] != "nwb": - print("No NWB files?!") - continue - - s3_url = first_asset.get_content_url(follow_redirects=1, strip_query=True) - - try: - with NWBHDF5IO(path=s3_url, load_namespaces=True, driver="ros3") as io: - io.read() - except Exception as e: - print(traceback.format_exc()) - failed_reads[dandiset] = e - - if failed_reads: - print(failed_reads) - sys.exit(1) +if __name__ == "__main__": + read_first_nwb_asset() diff --git a/tests/unit/test_base.py b/tests/unit/test_base.py index a27a90e96..805f946ec 100644 --- a/tests/unit/test_base.py +++ b/tests/unit/test_base.py @@ -1,4 +1,5 @@ import numpy as np +from numpy.testing import assert_array_equal from pynwb.base import ( ProcessingModule, @@ -10,6 +11,7 @@ ImageReferences ) from pynwb.testing import TestCase +from pynwb.testing.mock.base import mock_TimeSeries from hdmf.data_utils import DataChunkIterator from hdmf.backends.hdf5 import H5DataIO @@ -386,6 +388,23 @@ def test_dimension_warning(self): timestamps=[0.3, 0.4, 0.5, 0.6, 0.7, 0.8], ) + def test_get_timestamps(self): + time_series = mock_TimeSeries(data=[1, 2, 3], rate=40.0, starting_time=30.0) + assert_array_equal(time_series.get_timestamps(), [30, 30+1/40, 30+2/40]) + + time_series = mock_TimeSeries(data=[1, 2, 3], timestamps=[3, 4, 5], rate=None) + assert_array_equal(time_series.get_timestamps(), [3, 4, 5]) + + def test_get_data_in_units(self): + ts = mock_TimeSeries(data=[1., 2., 3.], conversion=2., offset=3.) + assert_array_equal(ts.get_data_in_units(), [5., 7., 9.]) + + ts = mock_TimeSeries(data=[1., 2., 3.], conversion=2.) + assert_array_equal(ts.get_data_in_units(), [2., 4., 6.]) + + ts = mock_TimeSeries(data=[1., 2., 3.]) + assert_array_equal(ts.get_data_in_units(), [1., 2., 3.]) + class TestImage(TestCase): def test_init(self): diff --git a/tests/unit/test_ecephys.py b/tests/unit/test_ecephys.py index 6cdfcd59e..6f76a5e8c 100644 --- a/tests/unit/test_ecephys.py +++ b/tests/unit/test_ecephys.py @@ -2,8 +2,19 @@ import numpy as np -from pynwb.ecephys import ElectricalSeries, SpikeEventSeries, EventDetection, Clustering, EventWaveform,\ - ClusterWaveforms, LFP, FilteredEphys, FeatureExtraction, ElectrodeGroup +from pynwb.base import ProcessingModule +from pynwb.ecephys import ( + ElectricalSeries, + SpikeEventSeries, + EventDetection, + Clustering, + EventWaveform, + ClusterWaveforms, + LFP, + FilteredEphys, + FeatureExtraction, + ElectrodeGroup, +) from pynwb.device import Device from pynwb.file import ElectrodeTable from pynwb.testing import TestCase @@ -207,7 +218,11 @@ def test_init(self): table, region = self._create_table_and_region() sES = SpikeEventSeries('test_sES', list(range(10)), list(range(10)), region) - ew = EventWaveform(sES) + pm = ProcessingModule(name='test_module', description='a test module') + ew = EventWaveform() + pm.add(table) + pm.add(ew) + ew.add_spike_event_series(sES) self.assertEqual(ew.spike_event_series['test_sES'], sES) self.assertEqual(ew['test_sES'], ew.spike_event_series['test_sES']) @@ -264,10 +279,25 @@ def _create_table_and_region(self): ) return table, region + def test_init(self): + _, region = self._create_table_and_region() + eS = ElectricalSeries('test_eS', [0, 1, 2, 3], region, timestamps=[0.1, 0.2, 0.3, 0.4]) + msg = ( + "The linked table for DynamicTableRegion 'electrodes' does not share " + "an ancestor with the DynamicTableRegion." + ) + with self.assertWarnsRegex(UserWarning, msg): + lfp = LFP(eS) + self.assertEqual(lfp.electrical_series.get('test_eS'), eS) + self.assertEqual(lfp['test_eS'], lfp.electrical_series.get('test_eS')) + def test_add_electrical_series(self): lfp = LFP() table, region = self._create_table_and_region() eS = ElectricalSeries('test_eS', [0, 1, 2, 3], region, timestamps=[0.1, 0.2, 0.3, 0.4]) + pm = ProcessingModule(name='test_module', description='a test module') + pm.add(table) + pm.add(lfp) lfp.add_electrical_series(eS) self.assertEqual(lfp.electrical_series.get('test_eS'), eS) @@ -285,16 +315,24 @@ def _create_table_and_region(self): return table, region def test_init(self): - table, region = self._create_table_and_region() + _, region = self._create_table_and_region() eS = ElectricalSeries('test_eS', [0, 1, 2, 3], region, timestamps=[0.1, 0.2, 0.3, 0.4]) - fe = FilteredEphys(eS) + msg = ( + "The linked table for DynamicTableRegion 'electrodes' does not share " + "an ancestor with the DynamicTableRegion." + ) + with self.assertWarnsRegex(UserWarning, msg): + fe = FilteredEphys(eS) self.assertEqual(fe.electrical_series.get('test_eS'), eS) self.assertEqual(fe['test_eS'], fe.electrical_series.get('test_eS')) def test_add_electrical_series(self): - fe = FilteredEphys() table, region = self._create_table_and_region() eS = ElectricalSeries('test_eS', [0, 1, 2, 3], region, timestamps=[0.1, 0.2, 0.3, 0.4]) + pm = ProcessingModule(name='test_module', description='a test module') + fe = FilteredEphys() + pm.add(table) + pm.add(fe) fe.add_electrical_series(eS) self.assertEqual(fe.electrical_series.get('test_eS'), eS) self.assertEqual(fe['test_eS'], fe.electrical_series.get('test_eS')) diff --git a/tests/unit/test_epoch_legacy.py b/tests/unit/test_epoch_legacy.py index d204c94e3..1f6c50f38 100644 --- a/tests/unit/test_epoch_legacy.py +++ b/tests/unit/test_epoch_legacy.py @@ -3,7 +3,6 @@ from pynwb import NWBFile, NWBHDF5IO from pynwb.base import TimeSeries, TimeSeriesReference, TimeSeriesReferenceVectorData import numpy as np -import warnings import h5py @@ -66,41 +65,35 @@ def test_legacy_format(self): nwbfile = NWBFile(description, identifier, self.start_time, file_create_date=self.create_date) self.addContainer(nwbfile) - with warnings.catch_warnings(record=True) as ws: - # write the file - with NWBHDF5IO(self.filename, mode='w') as write_io: - write_io.write(nwbfile, cache_spec=False) - # Modify the HDF5 file to look like NWB 2.4 and earlier. This simply means - # modifying the neurodata_type on the TimeIntervals.timeseries column - with h5py.File(self.filename, mode='a') as infile: - infile['/intervals/epochs/timeseries'].attrs['neurodata_type'] = 'VectorData' - infile.attrs['nwb_version'] = '2.3.0' - # Make sure we didn't have warnings - self.assertEqual(len(ws), 0) + # write the file + with NWBHDF5IO(self.filename, mode='w') as write_io: + write_io.write(nwbfile, cache_spec=False) + # Modify the HDF5 file to look like NWB 2.4 and earlier. This simply means + # modifying the neurodata_type on the TimeIntervals.timeseries column + with h5py.File(self.filename, mode='a') as infile: + infile['/intervals/epochs/timeseries'].attrs['neurodata_type'] = 'VectorData' + infile.attrs['nwb_version'] = '2.3.0' # Read the file back - with warnings.catch_warnings(record=True) as ws: - self.reader = NWBHDF5IO(self.filename, mode='r') - self.read_nwbfile = self.reader.read() + self.reader = NWBHDF5IO(self.filename, mode='r') + self.read_nwbfile = self.reader.read() - # Test that the VectorData column for timeseries has been converted to TimeSeriesReferenceVectorData - self.assertIsInstance(self.read_nwbfile.epochs.timeseries, TimeSeriesReferenceVectorData) + # Test that the VectorData column for timeseries has been converted to TimeSeriesReferenceVectorData + self.assertIsInstance(self.read_nwbfile.epochs.timeseries, TimeSeriesReferenceVectorData) - # Test that slicing into epochs.timeseries works as expected - re = self.read_nwbfile.epochs.timeseries[0] - self.assertIsInstance(re, TimeSeriesReference) - self.assertTupleEqual((re[0], re[1], re[2].object_id), (0, 5, nwbfile.get_acquisition('a').object_id)) + # Test that slicing into epochs.timeseries works as expected + re = self.read_nwbfile.epochs.timeseries[0] + self.assertIsInstance(re, TimeSeriesReference) + self.assertTupleEqual((re[0], re[1], re[2].object_id), (0, 5, nwbfile.get_acquisition('a').object_id)) - # Test that slicing into epochs works as expected - re = self.read_nwbfile.epochs[0:1] - self.assertListEqual(re.columns.tolist(), ['start_time', 'stop_time', 'temperature', 'tags', 'timeseries']) - for i in re.loc[0, 'timeseries']: - self.assertIsInstance(i, TimeSeriesReference) - self.assertTupleEqual( - (re.loc[0, 'timeseries'][0][0], re.loc[0, 'timeseries'][0][1], re.loc[0, 'timeseries'][0][2].object_id), - (0, 5, nwbfile.get_acquisition('a').object_id)) - self.assertTupleEqual( - (re.loc[0, 'timeseries'][1][0], re.loc[0, 'timeseries'][1][1], re.loc[0, 'timeseries'][1][2].object_id), - (0, 3, nwbfile.get_acquisition('b').object_id)) - # Make sure we didn't have warnings - self.assertEqual(len(ws), 0) + # Test that slicing into epochs works as expected + re = self.read_nwbfile.epochs[0:1] + self.assertListEqual(re.columns.tolist(), ['start_time', 'stop_time', 'temperature', 'tags', 'timeseries']) + for i in re.loc[0, 'timeseries']: + self.assertIsInstance(i, TimeSeriesReference) + self.assertTupleEqual( + (re.loc[0, 'timeseries'][0][0], re.loc[0, 'timeseries'][0][1], re.loc[0, 'timeseries'][0][2].object_id), + (0, 5, nwbfile.get_acquisition('a').object_id)) + self.assertTupleEqual( + (re.loc[0, 'timeseries'][1][0], re.loc[0, 'timeseries'][1][1], re.loc[0, 'timeseries'][1][2].object_id), + (0, 3, nwbfile.get_acquisition('b').object_id)) diff --git a/tests/unit/test_file.py b/tests/unit/test_file.py index bb5c9c1e1..c9bd98ad0 100644 --- a/tests/unit/test_file.py +++ b/tests/unit/test_file.py @@ -563,9 +563,8 @@ def test_simple(self): with NWBHDF5IO(self.path, 'w') as io: io.write(nwbfile, cache_spec=False) - with self.assertWarnsWith(UserWarning, "No cached namespaces found in %s" % self.path): - with NWBHDF5IO(self.path, 'r', load_namespaces=True) as reader: - nwbfile = reader.read() + with NWBHDF5IO(self.path, 'r', load_namespaces=True) as reader: + nwbfile = reader.read() class TestTimestampsRefDefault(TestCase): diff --git a/tests/unit/test_mock.py b/tests/unit/test_mock.py index 6f59c2007..d24e47551 100644 --- a/tests/unit/test_mock.py +++ b/tests/unit/test_mock.py @@ -1,3 +1,5 @@ +from pynwb import NWBHDF5IO + from pynwb.testing.mock.file import mock_Subject, mock_NWBFile from pynwb.testing.mock.base import mock_TimeSeries @@ -49,45 +51,57 @@ from pynwb.testing.mock.utils import name_generator, name_generator_registry +mock_functions = [ + mock_ImagingPlane, + mock_OnePhotonSeries, + mock_TwoPhotonSeries, + mock_RoiResponseSeries, + mock_PlaneSegmentation, + mock_OpticalChannel, + mock_Fluorescence, + mock_DfOverF, + mock_ImageSegmentation, + mock_OptogeneticStimulusSite, + mock_OptogeneticSeries, + mock_Device, + mock_Position, + mock_PupilTracking, + mock_CompassDirection, + mock_SpatialSeries, + mock_ElectrodeGroup, + mock_ElectrodeTable, + mock_ElectricalSeries, + mock_SpikeEventSeries, + mock_Subject, + mock_NWBFile, + mock_TimeSeries, + mock_CurrentClampSeries, + mock_IZeroClampSeries, + mock_VoltageClampSeries, + mock_VoltageClampStimulusSeries, + mock_IntracellularElectrode, + mock_CurrentClampStimulusSeries, + mock_IntracellularRecordingsTable, +] -@pytest.mark.parametrize( - "mock_function", [ - mock_ImagingPlane, - mock_OnePhotonSeries, - mock_TwoPhotonSeries, - mock_RoiResponseSeries, - mock_PlaneSegmentation, - mock_OpticalChannel, - mock_Fluorescence, - mock_DfOverF, - mock_ImageSegmentation, - mock_OptogeneticStimulusSite, - mock_OptogeneticSeries, - mock_Device, - mock_Position, - mock_PupilTracking, - mock_CompassDirection, - mock_SpatialSeries, - mock_ElectrodeGroup, - mock_ElectrodeTable, - mock_ElectricalSeries, - mock_SpikeEventSeries, - mock_Subject, - mock_NWBFile, - mock_TimeSeries, - mock_CurrentClampSeries, - mock_IZeroClampSeries, - mock_VoltageClampSeries, - mock_VoltageClampStimulusSeries, - mock_IntracellularElectrode, - mock_CurrentClampStimulusSeries, - mock_IntracellularRecordingsTable, - ], -) + +@pytest.mark.parametrize("mock_function", mock_functions) def test_mock(mock_function): mock_function() +@pytest.mark.parametrize("mock_function", mock_functions) +def test_mock_write(mock_function, tmp_path): + if mock_function is mock_NWBFile: + return + nwbfile = mock_NWBFile() + assert mock_function(nwbfile=nwbfile) is not None + + test_file = tmp_path / (mock_function.__name__ + ".nwb") + with NWBHDF5IO(test_file, "w") as io: + io.write(nwbfile) + + def test_name_generator(): name_generator_registry.clear() # reset registry diff --git a/tests/unit/test_ophys.py b/tests/unit/test_ophys.py index 1ebb7c640..88bd24535 100644 --- a/tests/unit/test_ophys.py +++ b/tests/unit/test_ophys.py @@ -2,7 +2,7 @@ import numpy as np -from pynwb.base import TimeSeries +from pynwb.base import TimeSeries, ProcessingModule from pynwb.device import Device from pynwb.image import ImageSeries from pynwb.ophys import ( @@ -398,9 +398,15 @@ def test_warnings(self): class DfOverFConstructor(TestCase): def test_init(self): + pm = ProcessingModule(name='ophys', description="Optical physiology") + ps = create_plane_segmentation() - rt_region = ps.create_roi_table_region(description='the second ROI', region=[1]) + pm.add(ps) + + dof = DfOverF() + pm.add(dof) + rt_region = ps.create_roi_table_region(description='the second ROI', region=[1]) rrs = RoiResponseSeries( name='test_ts', data=[1, 2, 3], @@ -408,26 +414,32 @@ def test_init(self): unit='unit', timestamps=[0.1, 0.2, 0.3] ) + dof.add_roi_response_series(rrs) - dof = DfOverF(rrs) self.assertEqual(dof.roi_response_series['test_ts'], rrs) class FluorescenceConstructor(TestCase): def test_init(self): + pm = ProcessingModule(name='ophys', description="Optical physiology") + ps = create_plane_segmentation() - rt_region = ps.create_roi_table_region(description='the second ROI', region=[1]) + pm.add(ps) - ts = RoiResponseSeries( + ff = Fluorescence() + pm.add(ff) + + rt_region = ps.create_roi_table_region(description='the second ROI', region=[1]) + rrs = RoiResponseSeries( name='test_ts', data=[1, 2, 3], rois=rt_region, unit='unit', timestamps=[0.1, 0.2, 0.3] ) + ff.add_roi_response_series(rrs) - ff = Fluorescence(ts) - self.assertEqual(ff.roi_response_series['test_ts'], ts) + self.assertEqual(ff.roi_response_series['test_ts'], rrs) class ImageSegmentationConstructor(TestCase): diff --git a/tests/unit/test_resources.py b/tests/unit/test_resources.py new file mode 100644 index 000000000..108a7fd84 --- /dev/null +++ b/tests/unit/test_resources.py @@ -0,0 +1,19 @@ +import warnings + +from pynwb.resources import HERD +from pynwb.testing import TestCase + + +class TestNWBContainer(TestCase): + def test_constructor(self): + """ + Test constructor + """ + with warnings.catch_warnings(record=True): + warnings.filterwarnings( + "ignore", + message=r"HERD is experimental .*", + category=UserWarning, + ) + er = HERD() + self.assertIsInstance(er, HERD) diff --git a/tests/validation/test_validate.py b/tests/validation/test_validate.py index 813f8d4e3..74ce0992c 100644 --- a/tests/validation/test_validate.py +++ b/tests/validation/test_validate.py @@ -2,6 +2,7 @@ import re from unittest.mock import patch from io import StringIO +import warnings from pynwb.testing import TestCase from pynwb import validate, NWBHDF5IO @@ -29,8 +30,6 @@ def test_validate_file_no_cache(self): "tests/back_compat/1.0.2_nwbfile.nwb"], capture_output=True) stderr_regex = re.compile( - r".*UserWarning: No cached namespaces found in tests/back_compat/1\.0\.2_nwbfile\.nwb\s*" - r"warnings.warn\(msg\)\s*" r"The file tests/back_compat/1\.0\.2_nwbfile\.nwb has no cached namespace information\. " r"Falling back to PyNWB namespace information\.\s*" ) @@ -47,8 +46,6 @@ def test_validate_file_no_cache_bad_ns(self): "--ns", "notfound"], capture_output=True) stderr_regex = re.compile( - r".*UserWarning: No cached namespaces found in tests/back_compat/1\.0\.2_nwbfile\.nwb\s*" - r"warnings.warn\(msg\)\s*" r"The file tests/back_compat/1\.0\.2_nwbfile\.nwb has no cached namespace information\. " r"Falling back to PyNWB namespace information\.\s*" r"The namespace 'notfound' could not be found in PyNWB namespace information as only " @@ -222,26 +219,44 @@ def test_validate_io_cached(self): def test_validate_io_cached_extension(self): """Test that validating a file with cached spec against its cached namespaces succeeds.""" - with NWBHDF5IO('tests/back_compat/2.1.0_nwbfile_with_extension.nwb', 'r', load_namespaces=True) as io: - errors = validate(io) - self.assertEqual(errors, []) + with warnings.catch_warnings(record=True): + warnings.filterwarnings( + "ignore", + message=r"Ignoring cached namespace .*", + category=UserWarning, + ) + with NWBHDF5IO('tests/back_compat/2.1.0_nwbfile_with_extension.nwb', 'r', load_namespaces=True) as io: + errors = validate(io) + self.assertEqual(errors, []) def test_validate_io_cached_extension_pass_ns(self): """Test that validating a file with cached extension spec against the extension namespace succeeds.""" - with NWBHDF5IO('tests/back_compat/2.1.0_nwbfile_with_extension.nwb', 'r', load_namespaces=True) as io: - errors = validate(io, 'ndx-testextension') - self.assertEqual(errors, []) + with warnings.catch_warnings(record=True): + warnings.filterwarnings( + "ignore", + message=r"Ignoring cached namespace .*", + category=UserWarning, + ) + with NWBHDF5IO('tests/back_compat/2.1.0_nwbfile_with_extension.nwb', 'r', load_namespaces=True) as io: + errors = validate(io, 'ndx-testextension') + self.assertEqual(errors, []) def test_validate_io_cached_core_with_io(self): """ For back-compatability, test that validating a file with cached extension spec against the core namespace succeeds when using the `io` + `namespace` keywords. """ - with NWBHDF5IO( - path='tests/back_compat/2.1.0_nwbfile_with_extension.nwb', mode='r', load_namespaces=True - ) as io: - results = validate(io=io, namespace="core") - self.assertEqual(results, []) + with warnings.catch_warnings(record=True): + warnings.filterwarnings( + "ignore", + message=r"Ignoring cached namespace .*", + category=UserWarning, + ) + with NWBHDF5IO( + path='tests/back_compat/2.1.0_nwbfile_with_extension.nwb', mode='r', load_namespaces=True + ) as io: + results = validate(io=io, namespace="core") + self.assertEqual(results, []) def test_validate_file_cached_extension(self): """ diff --git a/tox.ini b/tox.ini index 9caaccbb5..10b1e0df4 100644 --- a/tox.ini +++ b/tox.ini @@ -4,7 +4,7 @@ # and then run "tox" from this directory. [tox] -envlist = py37, py38, py39, py310, py311 +envlist = py38, py39, py310, py311 requires = pip >= 22.0 [testenv] @@ -61,9 +61,9 @@ deps = ; -rrequirements-opt.txt commands = {[testenv]commands} -# Test with python 3.7; pinned dev reqs; minimum run reqs -[testenv:py37-minimum] -basepython = python3.7 +# Test with python 3.8; pinned dev reqs; minimum run reqs +[testenv:py38-minimum] +basepython = python3.8 deps = -rrequirements-dev.txt -rrequirements-min.txt @@ -75,10 +75,6 @@ commands = python -m pip install --upgrade build python -m build -[testenv:build-py37] -basepython = python3.7 -commands = {[testenv:build]commands} - [testenv:build-py38] basepython = python3.8 commands = {[testenv:build]commands} @@ -120,8 +116,8 @@ deps = ; -rrequirements-opt.txt commands = {[testenv:build]commands} -[testenv:build-py37-minimum] -basepython = python3.7 +[testenv:build-py38-minimum] +basepython = python3.8 deps = -rrequirements-dev.txt -rrequirements-min.txt @@ -146,11 +142,6 @@ commands = python -m pip list python test.py --example -[testenv:gallery-py37] -basepython = python3.7 -deps = {[testenv:gallery]deps} -commands = {[testenv:gallery]commands} - [testenv:gallery-py38] basepython = python3.8 deps = {[testenv:gallery]deps} @@ -195,9 +186,9 @@ commands = python -m pip list python test.py --example -# Test with python 3.7; pinned dev and doc reqs; minimum run reqs -[testenv:gallery-py37-minimum] -basepython = python3.7 +# Test with python 3.8; pinned dev and doc reqs; minimum run reqs +[testenv:gallery-py38-minimum] +basepython = python3.8 deps = -rrequirements-min.txt -commands = {[testenv:gallery]commands} +commands = {[testenv:gallery]commands} \ No newline at end of file