diff --git a/.codespellrc b/.codespellrc index ad57c4b81..a38689dfe 100644 --- a/.codespellrc +++ b/.codespellrc @@ -1,7 +1,8 @@ [codespell] # in principle .ipynb can be corrected -- a good number of typos there # nwb-schema -- excluding since submodule, should have its own fixes/checks -skip = .git,*.pdf,*.svg,venvs,env,*.ipynb,nwb-schema +skip = .git,*.pdf,*.svg,venvs,env,nwb-schema +ignore-regex = ^\s*"image/\S+": ".* # it is optin in a url # potatos - demanded to be left alone, autogenerated ignore-words-list = optin,potatos diff --git a/.github/workflows/codespell.yml b/.github/workflows/codespell.yml index 7a1e8dc04..7aa79c9e7 100644 --- a/.github/workflows/codespell.yml +++ b/.github/workflows/codespell.yml @@ -16,4 +16,4 @@ jobs: - name: Checkout uses: actions/checkout@v3 - name: Codespell - uses: codespell-project/actions-codespell@v1 + uses: codespell-project/actions-codespell@v2 diff --git a/.github/workflows/run_all_tests.yml b/.github/workflows/run_all_tests.yml index 0e9d9b131..c47941c21 100644 --- a/.github/workflows/run_all_tests.yml +++ b/.github/workflows/run_all_tests.yml @@ -196,9 +196,9 @@ jobs: fail-fast: false matrix: include: - - { name: linux-python3.11-ros3 , python-ver: "3.11", os: ubuntu-latest } - - { name: windows-python3.11-ros3, python-ver: "3.11", os: windows-latest } - - { name: macos-python3.11-ros3 , python-ver: "3.11", os: macos-latest } + - { name: conda-linux-python3.11-ros3 , python-ver: "3.11", os: ubuntu-latest } + - { name: conda-windows-python3.11-ros3, python-ver: "3.11", os: windows-latest } + - { name: conda-macos-python3.11-ros3 , python-ver: "3.11", os: macos-latest } steps: - name: Cancel non-latest runs uses: styfle/cancel-workflow-action@0.11.0 @@ -243,9 +243,9 @@ jobs: fail-fast: false matrix: include: - - { name: linux-gallery-python3.11-ros3 , python-ver: "3.11", os: ubuntu-latest } - - { name: windows-gallery-python3.11-ros3, python-ver: "3.11", os: windows-latest } - - { name: macos-gallery-python3.11-ros3 , python-ver: "3.11", os: macos-latest } + - { name: conda-linux-gallery-python3.11-ros3 , python-ver: "3.11", os: ubuntu-latest } + - { name: conda-windows-gallery-python3.11-ros3, python-ver: "3.11", os: windows-latest } + - { name: conda-macos-gallery-python3.11-ros3 , python-ver: "3.11", os: macos-latest } steps: - name: Cancel non-latest runs uses: styfle/cancel-workflow-action@0.11.0 diff --git a/.github/workflows/run_dandi_read_tests.yml b/.github/workflows/run_dandi_read_tests.yml index ec8cc2e84..857b32c9a 100644 --- a/.github/workflows/run_dandi_read_tests.yml +++ b/.github/workflows/run_dandi_read_tests.yml @@ -47,4 +47,4 @@ jobs: - name: Run DANDI read tests run: | - pytest -rP tests/read_dandi/ + python tests/read_dandi/test_read_dandi.py diff --git a/.github/workflows/run_tests.yml b/.github/workflows/run_tests.yml index 89793901d..e4479a554 100644 --- a/.github/workflows/run_tests.yml +++ b/.github/workflows/run_tests.yml @@ -174,7 +174,7 @@ jobs: fail-fast: false matrix: include: - - { name: linux-python3.11-ros3 , python-ver: "3.11", os: ubuntu-latest } + - { name: conda-linux-python3.11-ros3 , python-ver: "3.11", os: ubuntu-latest } steps: - name: Cancel non-latest runs uses: styfle/cancel-workflow-action@0.11.0 @@ -219,7 +219,7 @@ jobs: fail-fast: false matrix: include: - - { name: linux-gallery-python3.11-ros3 , python-ver: "3.11", os: ubuntu-latest } + - { name: conda-linux-gallery-python3.11-ros3 , python-ver: "3.11", os: ubuntu-latest } steps: - name: Cancel non-latest runs uses: styfle/cancel-workflow-action@0.11.0 diff --git a/docs/gallery/general/scratch.py b/docs/gallery/general/scratch.py index 50d97339a..0e00c5e96 100644 --- a/docs/gallery/general/scratch.py +++ b/docs/gallery/general/scratch.py @@ -12,7 +12,7 @@ .. note:: The scratch space is explicitly for non-standardized data that is not intended for reuse - by others. Standard NWB:N types, and extension if required, should always be used for any data that you + by others. Standard NWB types, and extension if required, should always be used for any data that you intend to share. As such, published data should not include scratch data and a user should be able to ignore any data stored in scratch to use a file. @@ -127,7 +127,7 @@ # # You may end up wanting to store results from some one-off analysis, and writing an extension # to get your data into an NWBFile is too much over head. This is facilitated by the scratch space -# in NWB:N. [#]_ +# in NWB. [#]_ # # First, lets read our processed data and then make a copy diff --git a/docs/notebooks/convert-crcns-ret-1-meisterlab-compare-nwb-1.0.6.ipynb b/docs/notebooks/convert-crcns-ret-1-meisterlab-compare-nwb-1.0.6.ipynb index c348c4bd0..65dc34188 100644 --- a/docs/notebooks/convert-crcns-ret-1-meisterlab-compare-nwb-1.0.6.ipynb +++ b/docs/notebooks/convert-crcns-ret-1-meisterlab-compare-nwb-1.0.6.ipynb @@ -39,7 +39,7 @@ "source": [ "This notebook uses the convert script and API for NWB v.1.0.6 (not the current NWB 2.0 and PyNWB) to generate NWB v1.0.6 data files and compare with the current format. This notebook is mainly for comparison purposes. The corresponding notebook for converting the MeisterLab example data to NWB 2.x is available here: https://github.com/NeurodataWithoutBorders/pynwb/blob/dev/docs/notebooks/convert-crcns-ret-1-meisterlab.ipynb .\n", "\n", - "This example is based on https://github.com/NeurodataWithoutBorders/api-python/blob/master/examples/create_scripts/crcns_ret-1.py from H5Gate (i.e., the orignal write API for NWB v1.x). A tar file with the example data is available for download from: https://portal.nersc.gov/project/crcns/download/nwb-1/example_script_data/source_data_2.tar.gz Please download and uncompress the data file and update the paths in the *Settings* section if you want to run the notebook. " + "This example is based on https://github.com/NeurodataWithoutBorders/api-python/blob/master/examples/create_scripts/crcns_ret-1.py from H5Gate (i.e., the original write API for NWB v1.x). A tar file with the example data is available for download from: https://portal.nersc.gov/project/crcns/download/nwb-1/example_script_data/source_data_2.tar.gz Please download and uncompress the data file and update the paths in the *Settings* section if you want to run the notebook. " ] }, { @@ -71,7 +71,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# 3 Exectute convert using the original H5Gate API" + "# 3 Execute convert using the original H5Gate API" ] }, { @@ -1259,7 +1259,7 @@ "source": [ "Compared to the convert using NWB v1.0.x shown above, the NWB 2 convert example makes the following main changes:\n", "\n", - "* NWB 2.x uses the extension mechanism to add custom data fields rather than adding unspecified custom data directly to the file, i.e., all objects (datasets, attributes, groups etc.) are governed by a formal specification. E.g., in the original script for NWB 1.0.x, pixle_size, meister_x, meister_y, meister_dx, meister_dy were stored as custom datasets in ImageSeries. For NWB 2 we create an extensions MeisterImageSeries which extens ImageSeries and stores those values as attributes pixel_size, x, y, dx, dy. For NWB 2 we chosse attributes instead of datasets simply because these are small, single int and float metadata values for which attributes are more approbirate.\n", + "* NWB 2.x uses the extension mechanism to add custom data fields rather than adding unspecified custom data directly to the file, i.e., all objects (datasets, attributes, groups etc.) are governed by a formal specification. E.g., in the original script for NWB 1.0.x, pixle_size, meister_x, meister_y, meister_dx, meister_dy were stored as custom datasets in ImageSeries. For NWB 2 we create an extensions MeisterImageSeries which extens ImageSeries and stores those values as attributes pixel_size, x, y, dx, dy. For NWB 2 we chose attributes instead of datasets simply because these are small, single int and float metadata values for which attributes are more approbirate.\n", "* Change si_unit attribute to unit for compliance with the spec of ImageSeries \n", "* Moved 'source' attribute from the Module to the Interface as source is not defined in the spec for modules but only for Interface\n", "* Added missing 'source' for SpikeUnit\n", @@ -1269,7 +1269,7 @@ "* NWBContainer is now a base type of all core neurodata_types and as such `help` and `source` attributes have been added to all core types\n", "* The original script reused iterator variables in nested loops. We have updated those occurrence to avoid consusion and avoid possible errors. \n", "* The following custom metadata fields---i.e., datasets that were originally added to the file without being part of the NWB specification and without creation of corresponding extensions---have not yet been integrated with the NWB files:\n", - " * /general custom metdata: /notes, /random_number_generation, /related_publications. This will require extension of NWBFile to extend the spec of /general. Improvements to make this easier have been proposed for discussion at the upcoming hackathon.\n", + " * /general custom metadata: /notes, /random_number_generation, /related_publications. This will require extension of NWBFile to extend the spec of /general. Improvements to make this easier have been proposed for discussion at the upcoming hackathon.\n", " * SpikeUnit custom datasets with additional copies of the per-stimulus spike times (i.e., /processing/Cells/UnitTimes/cell_*/stim_* in the original version). This will require an extension for SpikeUnit.\n", " * /subject, subject/genotype, subject/species : See Issue https://github.com/NeurodataWithoutBorders/pynwb/issues/45 support for subject metadata is upcoming in PyNWB \n", " * /specifications, /specifications/nwb_core.py : See Issue hssue https://github.com/NeurodataWithoutBorders/pynwb/issues/44 will be added by PyNWB automatically" diff --git a/docs/notebooks/convert-crcns-ret-1-meisterlab.ipynb b/docs/notebooks/convert-crcns-ret-1-meisterlab.ipynb index d66a0644e..0107d42aa 100644 --- a/docs/notebooks/convert-crcns-ret-1-meisterlab.ipynb +++ b/docs/notebooks/convert-crcns-ret-1-meisterlab.ipynb @@ -109,7 +109,7 @@ "This example is based on https://github.com/NeurodataWithoutBorders/api-python/blob/master/examples/create_scripts/crcns_ret-1.py from H5Gate. \n", "\n", "Compared to the NWB files generated by the original example we here use the extension mechanism to add custom data fields rather than adding unspecified custom data directly to the file, i.e., all objects (datasets, attributes, groups etc.) are governed by a formal specification.\n", - "* Previously pixle_size, meister_x, meister_y, meister_dx, meister_dy were stored as custom datasets in ImageSeries. Here we create an extensions MeisterImageSeries which extens ImageSeries and stores that values as attributes pixel_size, x, y, dx, dy. We here chosse attributes instead of datasets simply because these are small, single int and float metadata values for which attributes are more approbirate.\n", + "* Previously pixle_size, meister_x, meister_y, meister_dx, meister_dy were stored as custom datasets in ImageSeries. Here we create an extensions MeisterImageSeries which extens ImageSeries and stores that values as attributes pixel_size, x, y, dx, dy. We here chose attributes instead of datasets simply because these are small, single int and float metadata values for which attributes are more approbirate.\n", "\n", "Compared to the NWB files generated by the original example the files generated here contain the following additional main changes:\n", "\n", @@ -123,7 +123,7 @@ "* NWBContainer is now a base type of all core neurodata_types and as such `help` and `source` attributes have been added to all core types\n", "* The original script reused iterator variables in nested loops. We have updated those occurrence to avoid consusion and avoid possible errors. \n", "* The following custom metadata fields---i.e., datasets that were originally added to the file without being part of the NWB specification and without creation of corresponding extensions---have not yet been integrated with the NWB files:\n", - " * /general custom metdata: /notes, /random_number_generation, /related_publications. This will require extension of NWBFile to extend the spec of /general. Improvements to make this easier have been proposed for discussion at the upcoming hackathon.\n", + " * /general custom metadata: /notes, /random_number_generation, /related_publications. This will require extension of NWBFile to extend the spec of /general. Improvements to make this easier have been proposed for discussion at the upcoming hackathon.\n", " * SpikeUnit custom datasets with additional copies of the per-stimulus spike times (i.e., /processing/Cells/UnitTimes/cell_*/stim_* in the original version). This will require an extension for SpikeUnit.\n", " * /subject, subject/genotype, subject/species : See Issue https://github.com/NeurodataWithoutBorders/pynwb/issues/45 support for subject metadata is upcoming in PyNWB \n", " * /specifications, /specifications/nwb_core.py : See Issue https://github.com/NeurodataWithoutBorders/pynwb/issues/44 will be added by PyNWB automatically\n", @@ -591,7 +591,7 @@ "# Build the namespace\n", "ns_builder = NWBNamespaceBuilder('Extension for use in my Lab', ns_name)\n", "\n", - "# Create a custom ImageSeries to add our custom attributes and add our extenions to the namespace\n", + "# Create a custom ImageSeries to add our custom attributes and add our extensions to the namespace\n", "mis_ext = NWBGroupSpec('A custom ImageSeries to add MeisterLab custom metadata',\n", " attributes=[NWBAttributeSpec('x' , 'int', 'meister x', required=False),\n", " NWBAttributeSpec('y' , 'int', 'meister y', required=False),\n", @@ -697,7 +697,7 @@ "metadata": {}, "source": [ "We can now inspect our container class using the usual mechanisms, e.g., help. For illustration purposes, let's call help on our class. Here we can see that:\n", - "* Our custom attributes have been added to the constructor with approbriate documention describing the type and purpose we indicated in the spec for our attributes\n", + "* Our custom attributes have been added to the constructor with approbriate documentation describing the type and purpose we indicated in the spec for our attributes\n", "* From the \"Method resolution order\" documentationw we can see that our MeisterImageSeries inherits from pynwb.image.ImageSeries so that interaction mechanism from the base class are also available in our class" ] }, @@ -746,7 +746,7 @@ " | bits_per_pixel (int): Number of bit per image pixel\n", " | dimension (Iterable): Number of pixels on x, y, (and z) axes.\n", " | resolution (float): The smallest meaningful difference (in specified unit) between values in data\n", - " | conversion (float): Scalar to multiply each element by to conver to volts\n", + " | conversion (float): Scalar to multiply each element by to convert to volts\n", " | timestamps (ndarray or list or tuple or Dataset or DataChunkIterator or DataIO or TimeSeries): Timestamps for samples stored in data\n", " | starting_time (float): The timestamp of the first sample\n", " | rate (float): Sampling rate in Hz\n", @@ -885,7 +885,7 @@ "def convert_single_file(file_stimulus_data, file_meta, spike_units, electrode_meta):\n", " import h5py\n", " #########################################\n", - " # Create the NWBFile containter\n", + " # Create the NWBFile container\n", " ##########################################\n", " nwbfile = NWBFile(session_description=file_meta['description'],\n", " identifier=file_meta['identifier'],\n", @@ -1004,7 +1004,7 @@ "source": [ "## Step 5.2: Convert all files\n", "\n", - "Convert all the files by iteating over the files and calling `convert_single_file` function for each of the file" + "Convert all the files by iterating over the files and calling `convert_single_file` function for each of the file" ] }, { diff --git a/docs/notebooks/convert-crcns-ret-1-old/convert-crcns-ret-1-meisterlab-with-custom-extensions-and-external-stimulus.ipynb b/docs/notebooks/convert-crcns-ret-1-old/convert-crcns-ret-1-meisterlab-with-custom-extensions-and-external-stimulus.ipynb index 73090873a..4d081f7ab 100644 --- a/docs/notebooks/convert-crcns-ret-1-old/convert-crcns-ret-1-meisterlab-with-custom-extensions-and-external-stimulus.ipynb +++ b/docs/notebooks/convert-crcns-ret-1-old/convert-crcns-ret-1-meisterlab-with-custom-extensions-and-external-stimulus.ipynb @@ -100,7 +100,7 @@ "This example is based on https://github.com/NeurodataWithoutBorders/api-python/blob/master/examples/create_scripts/crcns_ret-1.py from H5Gate. \n", "\n", "Compared to the NWB files generated by the original example we here use the extension mechanism to add custom data fields rather than adding unspecified custom data directly to the file, i.e., all objects (datasets, attributes, groups etc.) are governed by a formal specification.\n", - "* Previously pixle_size, meister_x, meister_y, meister_dx, meister_dy were stored as custom datasets in ImageSeries. Here we create an extensions MeisterImageSeries which extens ImageSeries and stores that values as attributes pixel_size, x, y, dx, dy. We here chosse attributes instead of datasets simply because these are small, single int and float metadata values for which attributes are more approbirate.\n", + "* Previously pixle_size, meister_x, meister_y, meister_dx, meister_dy were stored as custom datasets in ImageSeries. Here we create an extensions MeisterImageSeries which extens ImageSeries and stores that values as attributes pixel_size, x, y, dx, dy. We here chose attributes instead of datasets simply because these are small, single int and float metadata values for which attributes are more approbirate.\n", "\n", "Compared to the NWB files generated by the original example the files generated here contain the following additional main changes:\n", "\n", @@ -113,12 +113,12 @@ "* NWBContainer is now a base type of all core neurodata_types and as such `help` and `source` attributes have been added to all core types\n", "* The original script reused iterator variables in nested loops. We have updated those occurrence to avoid consusion and avoid possible errors. \n", "* The following custom metadata fields---i.e., datasets that were originally added to the file without being part of the NWB specification and without creation of corresponding extensions---have not yet been integrated with the NWB files:\n", - " * /general custom metdata: /notes, /random_number_generation, /related_publications. This will require extension of NWBFile to extend the spec of /general. Improvements to make this easier have been proposed for discussion at the upcoming hackathon.\n", + " * /general custom metadata: /notes, /random_number_generation, /related_publications. This will require extension of NWBFile to extend the spec of /general. Improvements to make this easier have been proposed for discussion at the upcoming hackathon.\n", " * SpikeUnit custom datasets with additional copies of the per-stimulus spike times (i.e., /processing/Cells/UnitTimes/cell_*/stim_* in the original version). This will require an extension for SpikeUnit.\n", " * /subject, subject/genotype, subject/species : See Issue https://bitbucket.org/lblneuro/pynwb/issues/45 support for subject metadata is upcoming in PyNWB \n", " * /specifications, /specifications/nwb_core.py : See Issue https://bitbucket.org/lblneuro/pynwb/issues/44 will be added by PyNWB automatically\n", "\n", - "For readability and to ease comparison, we include in Sectoin 6 the original example scrip from H5Gate. Note, the files generated by the original script are omitting a few required datasets/attributes and as such do not actually validate. " + "For readability and to ease comparison, we include in Sectoin 6 the original example script from H5Gate. Note, the files generated by the original script are omitting a few required datasets/attributes and as such do not actually validate. " ] }, { @@ -570,7 +570,7 @@ "# Build the namespace\n", "ns_builder = NWBNamespaceBuilder('Extension for use in my Lab', ns_name)\n", "\n", - "# Create a custom ImageSeries to add our custom attributes and add our extenions to the namespace\n", + "# Create a custom ImageSeries to add our custom attributes and add our extensions to the namespace\n", "mis_ext = NWBGroupSpec('A custom ImageSeries to add MeisterLab custom metadata',\n", " attributes=[NWBAttributeSpec('x' , 'int', 'meister x', required=False),\n", " NWBAttributeSpec('y' , 'int', 'meister y', required=False),\n", @@ -605,7 +605,7 @@ "metadata": {}, "source": [ "We can now inspect our container class using the usual mechanisms, e.g., help. For illustration purposes, let's call help on our class. Here we can see that:\n", - "* Our custom attributes have been added to the constructor with approbriate documention describing the type and purpose we indicated in the spec for our attributes\n", + "* Our custom attributes have been added to the constructor with approbriate documentation describing the type and purpose we indicated in the spec for our attributes\n", "* From the \"Method resolution order\" documentationw we can see that our MeisterImageSeries inherits from pynwb.image.ImageSeries so that interaction mechanism from the base class are also available in our class" ] }, @@ -652,7 +652,7 @@ " | bits_per_pixel (int): Number of bit per image pixel\n", " | dimension (Iterable): Number of pixels on x, y, (and z) axes.\n", " | resolution (float): The smallest meaningful difference (in specified unit) between values in data\n", - " | conversion (float): Scalar to multiply each element by to conver to volts\n", + " | conversion (float): Scalar to multiply each element by to convert to volts\n", " | timestamps (list or ndarray or TimeSeries): Timestamps for samples stored in data\n", " | starting_time (float): The timestamp of the first sample\n", " | rate (float): Sampling rate in Hz\n", @@ -887,7 +887,7 @@ "def convert_single_file(file_stimulus_data, file_meta, spike_units, electrode_meta):\n", " import h5py\n", " #########################################\n", - " # Create the NWBFile containter\n", + " # Create the NWBFile container\n", " ##########################################\n", " nwbfile = NWBFile(file_name=file_meta['output_filename'],\n", " session_description=file_meta['description'],\n", @@ -1001,7 +1001,7 @@ "source": [ "## Step 5.3: Convert all files\n", "\n", - "Convert all the files by iteating over the files and calling `convert_single_file` function for each of the file" + "Convert all the files by iterating over the files and calling `convert_single_file` function for each of the file" ] }, { diff --git a/docs/notebooks/convert-crcns-ret-1-old/convert-crcns-ret-1-meisterlab-with-custom-extensions.ipynb b/docs/notebooks/convert-crcns-ret-1-old/convert-crcns-ret-1-meisterlab-with-custom-extensions.ipynb index de032a1df..d612c11c9 100644 --- a/docs/notebooks/convert-crcns-ret-1-old/convert-crcns-ret-1-meisterlab-with-custom-extensions.ipynb +++ b/docs/notebooks/convert-crcns-ret-1-old/convert-crcns-ret-1-meisterlab-with-custom-extensions.ipynb @@ -100,7 +100,7 @@ "This example is based on https://github.com/NeurodataWithoutBorders/api-python/blob/master/examples/create_scripts/crcns_ret-1.py from H5Gate. \n", "\n", "Compared to the NWB files generated by the original example we here use the extension mechanism to add custom data fields rather than adding unspecified custom data directly to the file, i.e., all objects (datasets, attributes, groups etc.) are governed by a formal specification.\n", - "* Previously pixle_size, meister_x, meister_y, meister_dx, meister_dy were stored as custom datasets in ImageSeries. Here we create an extensions MeisterImageSeries which extens ImageSeries and stores that values as attributes pixel_size, x, y, dx, dy. We here chosse attributes instead of datasets simply because these are small, single int and float metadata values for which attributes are more approbirate.\n", + "* Previously pixle_size, meister_x, meister_y, meister_dx, meister_dy were stored as custom datasets in ImageSeries. Here we create an extensions MeisterImageSeries which extens ImageSeries and stores that values as attributes pixel_size, x, y, dx, dy. We here chose attributes instead of datasets simply because these are small, single int and float metadata values for which attributes are more approbirate.\n", "\n", "Compared to the NWB files generated by the original example the files generated here contain the following additional main changes:\n", "\n", @@ -110,16 +110,16 @@ "* Added missing tags and description for epochs\n", "* Added /general/devices/... to describe the device\n", "* Added neurodata_type and namespace attributes for format compliance\n", - "* Instead of storing stimulus data in external HDF5 files we here store all data in the same NWB file. Added stimulus data to ImageSeries diretly and added corresponding conversion, resolution, unit, etc. attributes to ensure format compliance\n", - "* /general/extracellular_ephys has been resturctured so that alldata about the probe is now in /general/extracellular_ephys/61-channel_probe/device\n", + "* Instead of storing stimulus data in external HDF5 files we here store all data in the same NWB file. Added stimulus data to ImageSeries directly and added corresponding conversion, resolution, unit, etc. attributes to ensure format compliance\n", + "* /general/extracellular_ephys has been restructured so that alldata about the probe is now in /general/extracellular_ephys/61-channel_probe/device\n", "* The original script reused iterator variables in nested loops. We have updated those occurrence to avoid consusion and avoid possible errors. \n", "* The following custom metadata fields---i.e., datasets that were originally added to the file without being part of the NWB specification and without creation of corresponding extensions---have not yet been integrated with the NWB files:\n", - " * /general custom metdata: /notes, /random_number_generation, /related_publications. This will require extension of NWBFile to extend the spec of /general. Improvements to make this easier have been proposed for discussion at the upcoming hackathon.\n", + " * /general custom metadata: /notes, /random_number_generation, /related_publications. This will require extension of NWBFile to extend the spec of /general. Improvements to make this easier have been proposed for discussion at the upcoming hackathon.\n", " * SpikeUnit custom datasets with additional copies of the per-stimulus spike times (i.e., /processing/Cells/UnitTimes/cell_*/stim_* in the original version). This will require an extension for SpikeUnit.\n", " * /subject, subject/genotype, subject/species : See Issue https://bitbucket.org/lblneuro/pynwb/issues/45 support for subject metadata is upcoming in PyNWB \n", " * /specifications, /specifications/nwb_core.py : See Issue https://bitbucket.org/lblneuro/pynwb/issues/44 will be added by PyNWB automatically\n", "\n", - "For readability and to ease comparison, we include in Sectoin 6 the original example scrip from H5Gate. Note, the files generated by the original script are omitting a few required datasets/attributes and as such do not actually validate. " + "For readability and to ease comparison, we include in Sectoin 6 the original example script from H5Gate. Note, the files generated by the original script are omitting a few required datasets/attributes and as such do not actually validate. " ] }, { @@ -555,7 +555,7 @@ "# Build the namespace\n", "ns_builder = NWBNamespaceBuilder('Extension for use in my Lab', ns_name)\n", "\n", - "# Create a custom ImageSeries to add our custom attributes and add our extenions to the namespace\n", + "# Create a custom ImageSeries to add our custom attributes and add our extensions to the namespace\n", "mis_ext = NWBGroupSpec('A custom ImageSeries to add MeisterLab custom metadata',\n", " attributes=[NWBAttributeSpec('x' , 'int', 'meister x', required=False),\n", " NWBAttributeSpec('y' , 'int', 'meister y', required=False),\n", @@ -590,7 +590,7 @@ "metadata": {}, "source": [ "We can now inspect our container class using the usual mechanisms, e.g., help. For illustration purposes, let's call help on our class. Here we can see that:\n", - "* Our custom attributes have been added to the constructor with approbriate documention describing the type and purpose we indicated in the spec for our attributes\n", + "* Our custom attributes have been added to the constructor with approbriate documentation describing the type and purpose we indicated in the spec for our attributes\n", "* From the \"Method resolution order\" documentationw we can see that our MeisterImageSeries inherits from pynwb.image.ImageSeries so that interaction mechanism from the base class are also available in our class" ] }, @@ -637,7 +637,7 @@ " | bits_per_pixel (int): Number of bit per image pixel\n", " | dimension (Iterable): Number of pixels on x, y, (and z) axes.\n", " | resolution (float): The smallest meaningful difference (in specified unit) between values in data\n", - " | conversion (float): Scalar to multiply each element by to conver to volts\n", + " | conversion (float): Scalar to multiply each element by to convert to volts\n", " | timestamps (list or ndarray or TimeSeries): Timestamps for samples stored in data\n", " | starting_time (float): The timestamp of the first sample\n", " | rate (float): Sampling rate in Hz\n", @@ -871,7 +871,7 @@ "source": [ "def convert_single_file(file_stimulus_data, file_meta, spike_units, electrode_meta):\n", " #########################################\n", - " # Create the NWBFile containter\n", + " # Create the NWBFile container\n", " ##########################################\n", " nwbfile = NWBFile(file_name=file_meta['output_filename'],\n", " session_description=file_meta['description'],\n", @@ -974,7 +974,7 @@ "source": [ "## Step 5.3: Convert all files\n", "\n", - "Convert all the files by iteating over the files and calling `convert_single_file` function for each of the file" + "Convert all the files by iterating over the files and calling `convert_single_file` function for each of the file" ] }, { diff --git a/docs/notebooks/convert-crcns-ret-1-old/convert-crcns-ret-1-meisterlab-without-custom-extensions.ipynb b/docs/notebooks/convert-crcns-ret-1-old/convert-crcns-ret-1-meisterlab-without-custom-extensions.ipynb index fc7147686..a03f26ff0 100644 --- a/docs/notebooks/convert-crcns-ret-1-old/convert-crcns-ret-1-meisterlab-without-custom-extensions.ipynb +++ b/docs/notebooks/convert-crcns-ret-1-old/convert-crcns-ret-1-meisterlab-without-custom-extensions.ipynb @@ -105,15 +105,15 @@ "* Added missing tags and description for epochs\n", "* Added /general/devices/... to describe the device\n", "* Added neurodata_type and namespace attributes for format compliance\n", - "* Instead of storing stimulus data in external HDF5 files we here store all data in the same NWB file. Added stimulus data to ImageSeries diretly and added corresponding conversion, resolution, unit, etc. attributes to ensure format compliance\n", - "* /general/extracellular_ephys has been resturctured so that alldata about the probe is now in /general/extracellular_ephys/61-channel_probe/device\n", + "* Instead of storing stimulus data in external HDF5 files we here store all data in the same NWB file. Added stimulus data to ImageSeries directly and added corresponding conversion, resolution, unit, etc. attributes to ensure format compliance\n", + "* /general/extracellular_ephys has been restructured so that alldata about the probe is now in /general/extracellular_ephys/61-channel_probe/device\n", "* The original script reused iterator variables in nested loops. We have updated those occurrence to avoid consusion and avoid possible errors. \n", "* This notebook, currently does not store custom metadata fields (i.e., datasets that were added to the file without being part of the NWB specification and without creation of corresponding extensions). The main omitted objects are:\n", " * ImageSeries: pixle_size, meister_x, meister_y, meister_dx, meister_dy (the data of those variables is available in this notebook as part of the stimulus_data dict / curr_stimulus)\n", - " * /general custom metdata: /subject, subject/genotype, subject/species, /specifications, /specifications/nwb_core.py, /notes, /random_number_generation, /related_publications, \n", + " * /general custom metadata: /subject, subject/genotype, subject/species, /specifications, /specifications/nwb_core.py, /notes, /random_number_generation, /related_publications, \n", " * SpikeUnit custom datasets with additional copies of the per-stimulus spike times: /processing/Cells/UnitTimes/cell_*/stim_*\n", "\n", - "For readability and to ease comparison, we include in Sectoin 6 the original example scrip from H5Gate. Note, the files generated by the original script are omitting a few required datasets/attributes and as such do not actually validate. " + "For readability and to ease comparison, we include in Sectoin 6 the original example script from H5Gate. Note, the files generated by the original script are omitting a few required datasets/attributes and as such do not actually validate. " ] }, { @@ -552,7 +552,7 @@ "source": [ "def convert_single_file(file_stimulus_data, file_meta, spike_units, electrode_meta):\n", " #########################################\n", - " # Create the NWBFile containter\n", + " # Create the NWBFile container\n", " ##########################################\n", " nwbfile = NWBFile(file_name=file_meta['output_filename'],\n", " session_description=file_meta['description'],\n", @@ -649,7 +649,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Convert all the files by iteating over the files and calling `convert_single_file` function for each of the file" + "Convert all the files by iterating over the files and calling `convert_single_file` function for each of the file" ] }, { diff --git a/docs/notebooks/read-Allen-Brain-Oservatory.ipynb b/docs/notebooks/read-Allen-Brain-Oservatory.ipynb index 7f5825a66..84c5288c7 100644 --- a/docs/notebooks/read-Allen-Brain-Oservatory.ipynb +++ b/docs/notebooks/read-Allen-Brain-Oservatory.ipynb @@ -185,7 +185,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Each of these three \"ophys experiments\" corresponds to a single 2-photon microscopy recording session. In each container, the particular visual stimulus that was presented is indicated by the `session_type`. Each `session_type` contains several diffferent stimulus sets; for more information, take a look at the [whitepaper](http://help.brain-map.org/display/observatory/Data+-+Visual+Coding).\n", + "Each of these three \"ophys experiments\" corresponds to a single 2-photon microscopy recording session. In each container, the particular visual stimulus that was presented is indicated by the `session_type`. Each `session_type` contains several different stimulus sets; for more information, take a look at the [whitepaper](http://help.brain-map.org/display/observatory/Data+-+Visual+Coding).\n", "\n", "Lets use pynwb to load the data from `three_session_B`. If this is the first time through this notebook this might take a minute to download these files, and will require approximately 300Mb of total disk space for the three nwb files: " ] @@ -215,7 +215,7 @@ "metadata": {}, "source": [ "# Reading legacy nwb files with pynwb:\n", - "Now that we have downloaded the nwb files using the `BrainObservatoryCache`, we can use pynwb to load the data and take a peek inside. Because this file was created from [version 1.0 of the NWB Schema](https://github.com/NeurodataWithoutBorders/specification/blob/master/version_1.0.6/NWB_file_format_specification_1.0.6.pdf>), we have to use a object called a type map to help the [NWB 2.0 schema](http://nwb-schema.readthedocs.io/en/latest/format.html) interpret the data in the \"legacy\" file. Alot has changed in the transition from NWB 1.0 to 2.0, including a more modular software architecture, a simplified (and extended) specification language, a mechanism for easy creation of custom schema extensions ([Click here for more information](http://www.nwb.org/2017/09/06/what-is-new-in-nwbn-v2-0/))." + "Now that we have downloaded the nwb files using the `BrainObservatoryCache`, we can use pynwb to load the data and take a peek inside. Because this file was created from [version 1.0 of the NWB Schema](https://github.com/NeurodataWithoutBorders/specification/blob/master/version_1.0.6/NWB_file_format_specification_1.0.6.pdf>), we have to use a object called a type map to help the [NWB 2.0 schema](http://nwb-schema.readthedocs.io/en/latest/format.html) interpret the data in the \"legacy\" file. A lot has changed in the transition from NWB 1.0 to 2.0, including a more modular software architecture, a simplified (and extended) specification language, a mechanism for easy creation of custom schema extensions ([Click here for more information](http://www.nwb.org/2017/09/06/what-is-new-in-nwbn-v2-0/))." ] }, { @@ -423,7 +423,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Lets use the allensdk to plot one of these image templates from one of the natural movies that was shown during this session. The template is has an original shape of 304x608 pixels, however this source image is stretched to fit on a 1200x1920 monitor, and warped so that scene appears flat from the perspective of the viewer, whos eye is close to the screen. For more information about the Brain Observatory Stimulus, check out the [stimulus whitepaper](http://help.brain-map.org/download/attachments/10616846/VisualCoding_VisualStimuli.pdf)" + "Lets use the allensdk to plot one of these image templates from one of the natural movies that was shown during this session. The template is has an original shape of 304x608 pixels, however this source image is stretched to fit on a 1200x1920 monitor, and warped so that scene appears flat from the perspective of the viewer, whose eye is close to the screen. For more information about the Brain Observatory Stimulus, check out the [stimulus whitepaper](http://help.brain-map.org/download/attachments/10616846/VisualCoding_VisualStimuli.pdf)" ] }, { diff --git a/setup.cfg b/setup.cfg index 3c492df25..cccacf048 100644 --- a/setup.cfg +++ b/setup.cfg @@ -28,6 +28,7 @@ per-file-ignores = tests/integration/__init__.py:F401 src/pynwb/testing/__init__.py:F401 src/pynwb/validate.py:T201 + tests/read_dandi/test_read_dandi.py:T201 setup.py:T201 test.py:T201 scripts/*:T201 diff --git a/src/pynwb/testing/mock/ophys.py b/src/pynwb/testing/mock/ophys.py index 5b43828fa..d9ba02572 100644 --- a/src/pynwb/testing/mock/ophys.py +++ b/src/pynwb/testing/mock/ophys.py @@ -4,7 +4,7 @@ from hdmf.common.table import DynamicTableRegion -from ... import NWBFile +from ... import NWBFile, ProcessingModule from ...device import Device from ...ophys import ( RoiResponseSeries, @@ -272,6 +272,8 @@ def mock_RoiResponseSeries( else: n_rois = 5 + plane_seg = plane_segmentation or mock_PlaneSegmentation(n_rois=n_rois, nwbfile=nwbfile) + roi_response_series = RoiResponseSeries( name=name if name is not None else name_generator("RoiResponseSeries"), data=data if data is not None else np.ones((30, n_rois)), @@ -280,7 +282,7 @@ def mock_RoiResponseSeries( or DynamicTableRegion( name="rois", description="rois", - table=plane_segmentation or mock_PlaneSegmentation(n_rois=n_rois, nwbfile=nwbfile), + table=plane_seg, data=list(range(n_rois)), ), resolution=resolution, @@ -298,6 +300,9 @@ def mock_RoiResponseSeries( if "ophys" not in nwbfile.processing: nwbfile.create_processing_module("ophys", "ophys") + if plane_seg.name not in nwbfile.processing["ophys"].data_interfaces: + nwbfile.processing["ophys"].add(plane_seg) + nwbfile.processing["ophys"].add(roi_response_series) return roi_response_series @@ -309,9 +314,9 @@ def mock_DfOverF( nwbfile: Optional[NWBFile] = None ) -> DfOverF: df_over_f = DfOverF( - roi_response_series=roi_response_series or [mock_RoiResponseSeries(nwbfile=nwbfile)], name=name if name is not None else name_generator("DfOverF"), ) + plane_seg = mock_PlaneSegmentation(nwbfile=nwbfile) if nwbfile is not None: if "ophys" not in nwbfile.processing: @@ -319,6 +324,14 @@ def mock_DfOverF( nwbfile.processing["ophys"].add(df_over_f) + else: + pm = ProcessingModule(name="ophys", description="ophys") + pm.add(plane_seg) + pm.add(df_over_f) + + df_over_f.add_roi_response_series( + roi_response_series or mock_RoiResponseSeries(nwbfile=nwbfile, plane_segmentation=plane_seg) + ) return df_over_f @@ -328,13 +341,22 @@ def mock_Fluorescence( nwbfile: Optional[NWBFile] = None, ) -> Fluorescence: fluorescence = Fluorescence( - roi_response_series=roi_response_series or [mock_RoiResponseSeries(nwbfile=nwbfile)], name=name if name is not None else name_generator("Fluorescence"), ) + plane_seg = mock_PlaneSegmentation(nwbfile=nwbfile) if nwbfile is not None: if "ophys" not in nwbfile.processing: nwbfile.create_processing_module("ophys", "ophys") + nwbfile.processing["ophys"].add(fluorescence) + else: + pm = ProcessingModule(name="ophys", description="ophys") + pm.add(plane_seg) + pm.add(fluorescence) + + fluorescence.add_roi_response_series( + roi_response_series or mock_RoiResponseSeries(nwbfile=nwbfile, plane_segmentation=plane_seg) + ) return fluorescence diff --git a/test.py b/test.py index 96b33445c..16191ae3f 100755 --- a/test.py +++ b/test.py @@ -163,7 +163,7 @@ def validate_nwbs(): def get_namespaces(nwbfile): comp = run(["python", "-m", "pynwb.validate", - "--list-namespaces", "--cached-namespace", nwb], + "--list-namespaces", nwbfile], stdout=PIPE, stderr=STDOUT, universal_newlines=True, timeout=30) if comp.returncode != 0: @@ -179,14 +179,13 @@ def get_namespaces(nwbfile): cmds = [] cmds += [["python", "-m", "pynwb.validate", nwb]] - cmds += [["python", "-m", "pynwb.validate", "--cached-namespace", nwb]] cmds += [["python", "-m", "pynwb.validate", "--no-cached-namespace", nwb]] for ns in namespaces: # for some reason, this logging command is necessary to correctly printing the namespace in the # next logging command logging.info("Namespace found: %s" % ns) - cmds += [["python", "-m", "pynwb.validate", "--cached-namespace", "--ns", ns, nwb]] + cmds += [["python", "-m", "pynwb.validate", "--ns", ns, nwb]] for cmd in cmds: logging.info("Validating with \"%s\"." % (" ".join(cmd[:-1]))) diff --git a/tests/integration/hdf5/test_ecephys.py b/tests/integration/hdf5/test_ecephys.py index df6e81dfa..ff67d27c9 100644 --- a/tests/integration/hdf5/test_ecephys.py +++ b/tests/integration/hdf5/test_ecephys.py @@ -1,4 +1,5 @@ from hdmf.common import DynamicTableRegion +from pynwb import NWBFile from pynwb.ecephys import ( ElectrodeGroup, @@ -14,7 +15,7 @@ ) from pynwb.device import Device from pynwb.file import ElectrodeTable as get_electrode_table -from pynwb.testing import NWBH5IOMixin, AcquisitionH5IOMixin, TestCase +from pynwb.testing import NWBH5IOMixin, AcquisitionH5IOMixin, NWBH5IOFlexMixin, TestCase class TestElectrodeGroupIO(NWBH5IOMixin, TestCase): @@ -38,27 +39,36 @@ def getContainer(self, nwbfile): return nwbfile.get_electrode_group(self.container.name) -class TestElectricalSeriesIO(AcquisitionH5IOMixin, TestCase): +def setup_electrode_table(): + table = get_electrode_table() + dev1 = Device(name='dev1') + group = ElectrodeGroup( + name='tetrode1', + description='tetrode description', + location='tetrode location', + device=dev1 + ) + for i in range(4): + table.add_row(location='CA1', group=group, group_name='tetrode1') + return table, group, dev1 - @staticmethod - def make_electrode_table(self): - """ Make an electrode table, electrode group, and device """ - self.table = get_electrode_table() - self.dev1 = Device(name='dev1') - self.group = ElectrodeGroup(name='tetrode1', - description='tetrode description', - location='tetrode location', - device=self.dev1) - for i in range(4): - self.table.add_row(location='CA1', group=self.group, group_name='tetrode1') - def setUpContainer(self): - """ Return the test ElectricalSeries to read/write """ - self.make_electrode_table(self) +class TestElectricalSeriesIO(NWBH5IOFlexMixin, TestCase): + + def getContainerType(self): + return "ElectricalSeries" + + def addContainer(self): + """ Add the test ElectricalSeries and related objects to the given NWBFile """ + table, group, dev1 = setup_electrode_table() + self.nwbfile.add_device(dev1) + self.nwbfile.add_electrode_group(group) + self.nwbfile.set_electrode_table(table) + region = DynamicTableRegion(name='electrodes', data=[0, 2], description='the first and third electrodes', - table=self.table) + table=table) data = list(zip(range(10), range(10, 20))) timestamps = list(map(lambda x: x/10., range(10))) channel_conversion = [1., 2., 3., 4.] @@ -71,14 +81,11 @@ def setUpContainer(self): filtering=filtering, timestamps=timestamps ) - return es - def addContainer(self, nwbfile): - """ Add the test ElectricalSeries and related objects to the given NWBFile """ - nwbfile.add_device(self.dev1) - nwbfile.add_electrode_group(self.group) - nwbfile.set_electrode_table(self.table) - nwbfile.add_acquisition(self.container) + self.nwbfile.add_acquisition(es) + + def getContainer(self, nwbfile: NWBFile): + return nwbfile.acquisition['test_eS'] def test_eg_ref(self): """ @@ -92,58 +99,70 @@ def test_eg_ref(self): self.assertIsInstance(row2.iloc[0]['group'], ElectrodeGroup) -class MultiElectricalSeriesIOMixin(AcquisitionH5IOMixin): - """ - Mixin class for methods to run a roundtrip test writing an NWB file with multiple ElectricalSeries. +class TestLFPIO(NWBH5IOFlexMixin, TestCase): + + def getContainerType(self): + return "LFP" - The abstract method setUpContainer needs to be implemented by classes that include this mixin. - def setUpContainer(self): - # return a test Container to read/write - """ + def addContainer(self): + table, group, dev1 = setup_electrode_table() + self.nwbfile.add_device(dev1) + self.nwbfile.add_electrode_group(group) + self.nwbfile.set_electrode_table(table) - def setUpTwoElectricalSeries(self): - """ Return two test ElectricalSeries to read/write """ - TestElectricalSeriesIO.make_electrode_table(self) region1 = DynamicTableRegion(name='electrodes', data=[0, 2], description='the first and third electrodes', - table=self.table) + table=table) region2 = DynamicTableRegion(name='electrodes', data=[1, 3], description='the second and fourth electrodes', - table=self.table) + table=table) data1 = list(zip(range(10), range(10, 20))) data2 = list(zip(reversed(range(10)), reversed(range(10, 20)))) timestamps = list(map(lambda x: x/10., range(10))) es1 = ElectricalSeries(name='test_eS1', data=data1, electrodes=region1, timestamps=timestamps) es2 = ElectricalSeries(name='test_eS2', data=data2, electrodes=region2, channel_conversion=[4., .4], timestamps=timestamps) - return es1, es2 + lfp = LFP() + self.nwbfile.add_acquisition(lfp) + lfp.add_electrical_series([es1, es2]) - def addContainer(self, nwbfile): - """ Add the test ElectricalSeries and related objects to the given NWBFile """ - nwbfile.add_device(self.dev1) - nwbfile.add_electrode_group(self.group) - nwbfile.set_electrode_table(self.table) - nwbfile.add_acquisition(self.container) + def getContainer(self, nwbfile: NWBFile): + return nwbfile.acquisition['LFP'] -class TestLFPIO(MultiElectricalSeriesIOMixin, TestCase): +class TestFilteredEphysIO(NWBH5IOFlexMixin, TestCase): - def setUpContainer(self): - """ Return a test LFP to read/write """ - es = self.setUpTwoElectricalSeries() - lfp = LFP(es) - return lfp + def getContainerType(self): + return "FilteredEphys" + def addContainer(self): + table, group, dev1 = setup_electrode_table() + self.nwbfile.add_device(dev1) + self.nwbfile.add_electrode_group(group) + self.nwbfile.set_electrode_table(table) -class TestFilteredEphysIO(MultiElectricalSeriesIOMixin, TestCase): + region1 = DynamicTableRegion(name='electrodes', + data=[0, 2], + description='the first and third electrodes', + table=table) + region2 = DynamicTableRegion(name='electrodes', + data=[1, 3], + description='the second and fourth electrodes', + table=table) + data1 = list(zip(range(10), range(10, 20))) + data2 = list(zip(reversed(range(10)), reversed(range(10, 20)))) + timestamps = list(map(lambda x: x/10., range(10))) + es1 = ElectricalSeries(name='test_eS1', data=data1, electrodes=region1, timestamps=timestamps) + es2 = ElectricalSeries(name='test_eS2', data=data2, electrodes=region2, channel_conversion=[4., .4], + timestamps=timestamps) + fe = FilteredEphys() + self.nwbfile.add_acquisition(fe) + fe.add_electrical_series([es1, es2]) - def setUpContainer(self): - """ Return a test FilteredEphys to read/write """ - es = self.setUpTwoElectricalSeries() - fe = FilteredEphys(es) - return fe + def getContainer(self, nwbfile: NWBFile): + return nwbfile.acquisition['FilteredEphys'] class TestClusteringIO(AcquisitionH5IOMixin, TestCase): @@ -165,28 +184,35 @@ def roundtripExportContainer(self, cache_spec=False): return super().roundtripExportContainer(cache_spec) -class EventWaveformConstructor(AcquisitionH5IOMixin, TestCase): +class EventWaveformConstructor(NWBH5IOFlexMixin, TestCase): + + def getContainerType(self): + return "SpikeEventSeries" + + def addContainer(self): + """ Add the test SpikeEventSeries and related objects to the given NWBFile """ + table, group, dev1 = setup_electrode_table() + self.nwbfile.add_device(dev1) + self.nwbfile.add_electrode_group(group) + self.nwbfile.set_electrode_table(table) - def setUpContainer(self): - """ Return a test EventWaveform to read/write """ - TestElectricalSeriesIO.make_electrode_table(self) region = DynamicTableRegion(name='electrodes', data=[0, 2], description='the first and third electrodes', - table=self.table) - sES = SpikeEventSeries(name='test_sES', - data=((1, 1), (2, 2), (3, 3)), - timestamps=[0., 1., 2.], - electrodes=region) - ew = EventWaveform(sES) - return ew + table=table) + ses = SpikeEventSeries( + name='test_sES', + data=((1, 1), (2, 2), (3, 3)), + timestamps=[0., 1., 2.], + electrodes=region + ) - def addContainer(self, nwbfile): - """ Add the test EventWaveform and related objects to the given NWBFile """ - nwbfile.add_device(self.dev1) - nwbfile.add_electrode_group(self.group) - nwbfile.set_electrode_table(self.table) - nwbfile.add_acquisition(self.container) + ew = EventWaveform() + self.nwbfile.add_acquisition(ew) + ew.add_spike_event_series(ses) + + def getContainer(self, nwbfile: NWBFile): + return nwbfile.acquisition['EventWaveform'] class ClusterWaveformsConstructor(AcquisitionH5IOMixin, TestCase): @@ -220,51 +246,66 @@ def roundtripExportContainer(self, cache_spec=False): return super().roundtripExportContainer(cache_spec) -class FeatureExtractionConstructor(AcquisitionH5IOMixin, TestCase): +class FeatureExtractionConstructor(NWBH5IOFlexMixin, TestCase): + + def getContainerType(self): + return "FeatureExtraction" + + def addContainer(self): + """ Add the test FeatureExtraction and related objects to the given NWBFile """ + table, group, dev1 = setup_electrode_table() + self.nwbfile.add_device(dev1) + self.nwbfile.add_electrode_group(group) + self.nwbfile.set_electrode_table(table) - def setUpContainer(self): - """ Return a test FeatureExtraction to read/write """ event_times = [1.9, 3.5] - TestElectricalSeriesIO.make_electrode_table(self) region = DynamicTableRegion(name='electrodes', data=[0, 2], description='the first and third electrodes', - table=self.table) + table=table) description = ['desc1', 'desc2', 'desc3'] features = [[[0., 1., 2.], [3., 4., 5.]], [[6., 7., 8.], [9., 10., 11.]]] fe = FeatureExtraction(electrodes=region, description=description, times=event_times, features=features) - return fe - def addContainer(self, nwbfile): - """ Add the test FeatureExtraction and related objects to the given NWBFile """ - nwbfile.add_device(self.dev1) - nwbfile.add_electrode_group(self.group) - nwbfile.set_electrode_table(self.table) - nwbfile.add_acquisition(self.container) + self.nwbfile.add_acquisition(fe) + def getContainer(self, nwbfile: NWBFile): + return nwbfile.acquisition['FeatureExtraction'] -class EventDetectionConstructor(AcquisitionH5IOMixin, TestCase): - def setUpContainer(self): - """ Return a test EventDetection to read/write """ - TestElectricalSeriesIO.make_electrode_table(self) +class EventDetectionConstructor(NWBH5IOFlexMixin, TestCase): + + def getContainerType(self): + return "EventDetection" + + def addContainer(self): + """ Add the test EventDetection and related objects to the given NWBFile """ + table, group, dev1 = setup_electrode_table() + self.nwbfile.add_device(dev1) + self.nwbfile.add_electrode_group(group) + self.nwbfile.set_electrode_table(table) + region = DynamicTableRegion(name='electrodes', data=[0, 2], description='the first and third electrodes', - table=self.table) + table=table) data = list(range(10)) ts = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0] - self.eS = ElectricalSeries(name='test_eS', data=data, electrodes=region, timestamps=ts) - eD = EventDetection(detection_method='detection_method', - source_electricalseries=self.eS, - source_idx=(1, 2, 3), - times=(0.1, 0.2, 0.3)) - return eD + eS = ElectricalSeries( + name='test_eS', + data=data, + electrodes=region, + timestamps=ts + ) + eD = EventDetection( + detection_method='detection_method', + source_electricalseries=eS, + source_idx=(1, 2, 3), + times=(0.1, 0.2, 0.3) + ) - def addContainer(self, nwbfile): - """ Add the test EventDetection and related objects to the given NWBFile """ - nwbfile.add_device(self.dev1) - nwbfile.add_electrode_group(self.group) - nwbfile.set_electrode_table(self.table) - nwbfile.add_acquisition(self.eS) - nwbfile.add_acquisition(self.container) + self.nwbfile.add_acquisition(eS) + self.nwbfile.add_acquisition(eD) + + def getContainer(self, nwbfile: NWBFile): + return nwbfile.acquisition['EventDetection'] diff --git a/tests/read_dandi/test_read_dandi.py b/tests/read_dandi/test_read_dandi.py index 84e9f3f62..0e0698d77 100644 --- a/tests/read_dandi/test_read_dandi.py +++ b/tests/read_dandi/test_read_dandi.py @@ -1,52 +1,62 @@ +"""Test reading NWB files from the DANDI Archive using ROS3.""" from dandi.dandiapi import DandiAPIClient +import random import sys import traceback from pynwb import NWBHDF5IO -from pynwb.testing import TestCase -class TestReadNWBDandisets(TestCase): - """Test reading NWB files from the DANDI Archive using ROS3.""" +# NOTE: do not name the function with "test_" prefix, otherwise pytest +# will try to run it as a test + +def read_first_nwb_asset(): + """Test reading the first NWB asset from a random selection of 50 dandisets that uses NWB.""" + num_dandisets_to_read = 50 + client = DandiAPIClient() + dandisets = list(client.get_dandisets()) + random.shuffle(dandisets) + dandisets_to_read = dandisets[:num_dandisets_to_read] + print("Reading NWB files from the following dandisets:") + print([d.get_raw_metadata()["identifier"] for d in dandisets_to_read]) + + failed_reads = dict() + for i, dandiset in enumerate(dandisets_to_read): + dandiset_metadata = dandiset.get_raw_metadata() + + # skip any dandisets that do not use NWB + if not any( + data_standard["identifier"] == "RRID:SCR_015242" # this is the RRID for NWB + for data_standard in dandiset_metadata["assetsSummary"].get("dataStandard", []) + ): + continue + + dandiset_identifier = dandiset_metadata["identifier"] + print("--------------") + print(f"{i}: {dandiset_identifier}") + + # iterate through assets until we get an NWB file (it could be MP4) + assets = dandiset.get_assets() + first_asset = next(assets) + while first_asset.path.split(".")[-1] != "nwb": + first_asset = next(assets) + if first_asset.path.split(".")[-1] != "nwb": + print("No NWB files?!") + continue - def test_read_first_nwb_asset(self): - """Test reading the first NWB asset from each dandiset that uses NWB.""" - client = DandiAPIClient() - dandisets = client.get_dandisets() + s3_url = first_asset.get_content_url(follow_redirects=1, strip_query=True) - failed_reads = dict() - for i, dandiset in enumerate(dandisets): - dandiset_metadata = dandiset.get_raw_metadata() + try: + with NWBHDF5IO(path=s3_url, load_namespaces=True, driver="ros3") as io: + io.read() + except Exception as e: + print(traceback.format_exc()) + failed_reads[dandiset] = e - # skip any dandisets that do not use NWB - if not any( - data_standard["identifier"] == "RRID:SCR_015242" # this is the RRID for NWB - for data_standard in dandiset_metadata["assetsSummary"].get("dataStandard", []) - ): - continue + if failed_reads: + print(failed_reads) + sys.exit(1) - dandiset_identifier = dandiset_metadata["identifier"] - print("--------------") - print(f"{i}: {dandiset_identifier}") - # iterate through assets until we get an NWB file (it could be MP4) - assets = dandiset.get_assets() - first_asset = next(assets) - while first_asset.path.split(".")[-1] != "nwb": - first_asset = next(assets) - if first_asset.path.split(".")[-1] != "nwb": - print("No NWB files?!") - continue - - s3_url = first_asset.get_content_url(follow_redirects=1, strip_query=True) - - try: - with NWBHDF5IO(path=s3_url, load_namespaces=True, driver="ros3") as io: - io.read() - except Exception as e: - print(traceback.format_exc()) - failed_reads[dandiset] = e - - if failed_reads: - print(failed_reads) - sys.exit(1) +if __name__ == "__main__": + read_first_nwb_asset() diff --git a/tests/unit/test_ecephys.py b/tests/unit/test_ecephys.py index 26320394b..6f76a5e8c 100644 --- a/tests/unit/test_ecephys.py +++ b/tests/unit/test_ecephys.py @@ -2,6 +2,7 @@ import numpy as np +from pynwb.base import ProcessingModule from pynwb.ecephys import ( ElectricalSeries, SpikeEventSeries, @@ -217,7 +218,11 @@ def test_init(self): table, region = self._create_table_and_region() sES = SpikeEventSeries('test_sES', list(range(10)), list(range(10)), region) - ew = EventWaveform(sES) + pm = ProcessingModule(name='test_module', description='a test module') + ew = EventWaveform() + pm.add(table) + pm.add(ew) + ew.add_spike_event_series(sES) self.assertEqual(ew.spike_event_series['test_sES'], sES) self.assertEqual(ew['test_sES'], ew.spike_event_series['test_sES']) @@ -274,10 +279,25 @@ def _create_table_and_region(self): ) return table, region + def test_init(self): + _, region = self._create_table_and_region() + eS = ElectricalSeries('test_eS', [0, 1, 2, 3], region, timestamps=[0.1, 0.2, 0.3, 0.4]) + msg = ( + "The linked table for DynamicTableRegion 'electrodes' does not share " + "an ancestor with the DynamicTableRegion." + ) + with self.assertWarnsRegex(UserWarning, msg): + lfp = LFP(eS) + self.assertEqual(lfp.electrical_series.get('test_eS'), eS) + self.assertEqual(lfp['test_eS'], lfp.electrical_series.get('test_eS')) + def test_add_electrical_series(self): lfp = LFP() table, region = self._create_table_and_region() eS = ElectricalSeries('test_eS', [0, 1, 2, 3], region, timestamps=[0.1, 0.2, 0.3, 0.4]) + pm = ProcessingModule(name='test_module', description='a test module') + pm.add(table) + pm.add(lfp) lfp.add_electrical_series(eS) self.assertEqual(lfp.electrical_series.get('test_eS'), eS) @@ -295,16 +315,24 @@ def _create_table_and_region(self): return table, region def test_init(self): - table, region = self._create_table_and_region() + _, region = self._create_table_and_region() eS = ElectricalSeries('test_eS', [0, 1, 2, 3], region, timestamps=[0.1, 0.2, 0.3, 0.4]) - fe = FilteredEphys(eS) + msg = ( + "The linked table for DynamicTableRegion 'electrodes' does not share " + "an ancestor with the DynamicTableRegion." + ) + with self.assertWarnsRegex(UserWarning, msg): + fe = FilteredEphys(eS) self.assertEqual(fe.electrical_series.get('test_eS'), eS) self.assertEqual(fe['test_eS'], fe.electrical_series.get('test_eS')) def test_add_electrical_series(self): - fe = FilteredEphys() table, region = self._create_table_and_region() eS = ElectricalSeries('test_eS', [0, 1, 2, 3], region, timestamps=[0.1, 0.2, 0.3, 0.4]) + pm = ProcessingModule(name='test_module', description='a test module') + fe = FilteredEphys() + pm.add(table) + pm.add(fe) fe.add_electrical_series(eS) self.assertEqual(fe.electrical_series.get('test_eS'), eS) self.assertEqual(fe['test_eS'], fe.electrical_series.get('test_eS')) diff --git a/tests/unit/test_file.py b/tests/unit/test_file.py index bb5c9c1e1..c9bd98ad0 100644 --- a/tests/unit/test_file.py +++ b/tests/unit/test_file.py @@ -563,9 +563,8 @@ def test_simple(self): with NWBHDF5IO(self.path, 'w') as io: io.write(nwbfile, cache_spec=False) - with self.assertWarnsWith(UserWarning, "No cached namespaces found in %s" % self.path): - with NWBHDF5IO(self.path, 'r', load_namespaces=True) as reader: - nwbfile = reader.read() + with NWBHDF5IO(self.path, 'r', load_namespaces=True) as reader: + nwbfile = reader.read() class TestTimestampsRefDefault(TestCase): diff --git a/tests/unit/test_ophys.py b/tests/unit/test_ophys.py index 1ebb7c640..88bd24535 100644 --- a/tests/unit/test_ophys.py +++ b/tests/unit/test_ophys.py @@ -2,7 +2,7 @@ import numpy as np -from pynwb.base import TimeSeries +from pynwb.base import TimeSeries, ProcessingModule from pynwb.device import Device from pynwb.image import ImageSeries from pynwb.ophys import ( @@ -398,9 +398,15 @@ def test_warnings(self): class DfOverFConstructor(TestCase): def test_init(self): + pm = ProcessingModule(name='ophys', description="Optical physiology") + ps = create_plane_segmentation() - rt_region = ps.create_roi_table_region(description='the second ROI', region=[1]) + pm.add(ps) + + dof = DfOverF() + pm.add(dof) + rt_region = ps.create_roi_table_region(description='the second ROI', region=[1]) rrs = RoiResponseSeries( name='test_ts', data=[1, 2, 3], @@ -408,26 +414,32 @@ def test_init(self): unit='unit', timestamps=[0.1, 0.2, 0.3] ) + dof.add_roi_response_series(rrs) - dof = DfOverF(rrs) self.assertEqual(dof.roi_response_series['test_ts'], rrs) class FluorescenceConstructor(TestCase): def test_init(self): + pm = ProcessingModule(name='ophys', description="Optical physiology") + ps = create_plane_segmentation() - rt_region = ps.create_roi_table_region(description='the second ROI', region=[1]) + pm.add(ps) - ts = RoiResponseSeries( + ff = Fluorescence() + pm.add(ff) + + rt_region = ps.create_roi_table_region(description='the second ROI', region=[1]) + rrs = RoiResponseSeries( name='test_ts', data=[1, 2, 3], rois=rt_region, unit='unit', timestamps=[0.1, 0.2, 0.3] ) + ff.add_roi_response_series(rrs) - ff = Fluorescence(ts) - self.assertEqual(ff.roi_response_series['test_ts'], ts) + self.assertEqual(ff.roi_response_series['test_ts'], rrs) class ImageSegmentationConstructor(TestCase): diff --git a/tests/unit/test_resources.py b/tests/unit/test_resources.py index e04f5c653..108a7fd84 100644 --- a/tests/unit/test_resources.py +++ b/tests/unit/test_resources.py @@ -1,3 +1,5 @@ +import warnings + from pynwb.resources import HERD from pynwb.testing import TestCase @@ -7,5 +9,11 @@ def test_constructor(self): """ Test constructor """ - er = HERD() - self.assertIsInstance(er, HERD) + with warnings.catch_warnings(record=True): + warnings.filterwarnings( + "ignore", + message=r"HERD is experimental .*", + category=UserWarning, + ) + er = HERD() + self.assertIsInstance(er, HERD) diff --git a/tests/validation/test_validate.py b/tests/validation/test_validate.py index 813f8d4e3..74ce0992c 100644 --- a/tests/validation/test_validate.py +++ b/tests/validation/test_validate.py @@ -2,6 +2,7 @@ import re from unittest.mock import patch from io import StringIO +import warnings from pynwb.testing import TestCase from pynwb import validate, NWBHDF5IO @@ -29,8 +30,6 @@ def test_validate_file_no_cache(self): "tests/back_compat/1.0.2_nwbfile.nwb"], capture_output=True) stderr_regex = re.compile( - r".*UserWarning: No cached namespaces found in tests/back_compat/1\.0\.2_nwbfile\.nwb\s*" - r"warnings.warn\(msg\)\s*" r"The file tests/back_compat/1\.0\.2_nwbfile\.nwb has no cached namespace information\. " r"Falling back to PyNWB namespace information\.\s*" ) @@ -47,8 +46,6 @@ def test_validate_file_no_cache_bad_ns(self): "--ns", "notfound"], capture_output=True) stderr_regex = re.compile( - r".*UserWarning: No cached namespaces found in tests/back_compat/1\.0\.2_nwbfile\.nwb\s*" - r"warnings.warn\(msg\)\s*" r"The file tests/back_compat/1\.0\.2_nwbfile\.nwb has no cached namespace information\. " r"Falling back to PyNWB namespace information\.\s*" r"The namespace 'notfound' could not be found in PyNWB namespace information as only " @@ -222,26 +219,44 @@ def test_validate_io_cached(self): def test_validate_io_cached_extension(self): """Test that validating a file with cached spec against its cached namespaces succeeds.""" - with NWBHDF5IO('tests/back_compat/2.1.0_nwbfile_with_extension.nwb', 'r', load_namespaces=True) as io: - errors = validate(io) - self.assertEqual(errors, []) + with warnings.catch_warnings(record=True): + warnings.filterwarnings( + "ignore", + message=r"Ignoring cached namespace .*", + category=UserWarning, + ) + with NWBHDF5IO('tests/back_compat/2.1.0_nwbfile_with_extension.nwb', 'r', load_namespaces=True) as io: + errors = validate(io) + self.assertEqual(errors, []) def test_validate_io_cached_extension_pass_ns(self): """Test that validating a file with cached extension spec against the extension namespace succeeds.""" - with NWBHDF5IO('tests/back_compat/2.1.0_nwbfile_with_extension.nwb', 'r', load_namespaces=True) as io: - errors = validate(io, 'ndx-testextension') - self.assertEqual(errors, []) + with warnings.catch_warnings(record=True): + warnings.filterwarnings( + "ignore", + message=r"Ignoring cached namespace .*", + category=UserWarning, + ) + with NWBHDF5IO('tests/back_compat/2.1.0_nwbfile_with_extension.nwb', 'r', load_namespaces=True) as io: + errors = validate(io, 'ndx-testextension') + self.assertEqual(errors, []) def test_validate_io_cached_core_with_io(self): """ For back-compatability, test that validating a file with cached extension spec against the core namespace succeeds when using the `io` + `namespace` keywords. """ - with NWBHDF5IO( - path='tests/back_compat/2.1.0_nwbfile_with_extension.nwb', mode='r', load_namespaces=True - ) as io: - results = validate(io=io, namespace="core") - self.assertEqual(results, []) + with warnings.catch_warnings(record=True): + warnings.filterwarnings( + "ignore", + message=r"Ignoring cached namespace .*", + category=UserWarning, + ) + with NWBHDF5IO( + path='tests/back_compat/2.1.0_nwbfile_with_extension.nwb', mode='r', load_namespaces=True + ) as io: + results = validate(io=io, namespace="core") + self.assertEqual(results, []) def test_validate_file_cached_extension(self): """