diff --git a/.github/workflows/deploy-tests.yml b/.github/workflows/deploy-tests.yml index 6793f0fe1..b41266e10 100644 --- a/.github/workflows/deploy-tests.yml +++ b/.github/workflows/deploy-tests.yml @@ -3,12 +3,11 @@ name: Deploy tests on: pull_request: +concurrency: # Cancel previous workflows on the same pull request + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + jobs: - cancel-previous-runs: - runs-on: ubuntu-latest - steps: - - name: Cancel Previous Runs - uses: styfle/cancel-workflow-action@0.9.1 assess-file-changes: uses: catalystneuro/neuroconv/.github/workflows/assess-file-changes.yml@main diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml index b1dc6a28a..2aa000307 100644 --- a/.github/workflows/testing.yml +++ b/.github/workflows/testing.yml @@ -156,7 +156,7 @@ jobs: run: pytest -rsx -n auto --dist loadscope --cov=./ --cov-report xml:./codecov.xml - name: Upload full coverage to Codecov if: ${{ matrix.python-version == '3.9' && matrix.os == 'ubuntu-latest' }} - uses: codecov/codecov-action@v1 + uses: codecov/codecov-action@v3 with: token: ${{ secrets.CODECOV_TOKEN }} file: ./codecov.xml diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 5dc35fd5c..edafd2755 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -6,7 +6,7 @@ repos: - id: end-of-file-fixer - id: trailing-whitespace - repo: https://github.com/psf/black - rev: 22.12.0 + rev: 23.1.0 hooks: - id: black exclude: ^docs/ diff --git a/CHANGELOG.md b/CHANGELOG.md index 1bc566b6f..89c54a2ee 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,12 +1,20 @@ # Upcoming +### Features +* The `OpenEphysRecordingInterface` is now a wrapper for `OpenEphysBinaryRecordingInterface`. [PR #294](https://github.com/catalystneuro/neuroconv/pull/294) + + +# v0.2.4 + ### Deprecation * All usages of `use_times` have been removed from spikeinterface tools and interfaces. The function `add_electrical_series` now determines whether the timestamps of the spikeinterface recording extractor are uniform or not and automatically stores the data according to best practices [PR #40](https://github.com/catalystneuro/neuroconv/pull/40) +* Dropped Python 3.7 support. [PR #237](https://github.com/catalystneuro/neuroconv/pull/237) ### Features * Added a tool for determining rising and falling frames from TTL signals (`parse_rising_frames_from_ttl` and `get_falling_frames_from_ttl`). [PR #244](https://github.com/catalystneuro/neuroconv/pull/244) * Added the `SpikeGLXNIDQInterface` for reading data from `.nidq.bin` files, as well as the ability to parse event times from specific channels via the `get_event_starting_times_from_ttl` method. Also included a `neuroconv.tools.testing.MockSpikeGLXNIDQInterface` for testing purposes. [PR #247](https://github.com/catalystneuro/neuroconv/pull/247) * Improved handling of writing multiple probes to the same `NWB` file [PR #255](https://github.com/catalystneuro/neuroconv/pull/255) +* Added basic temporal alignment methods to all DataInterfaces. These are `get_timestamps`, `align_starting_time`, `align_timestamps`, and `align_by_interpolation`. Added tests that serve as a first demonstration of the intended uses in a variety of cases. [PR #237](https://github.com/catalystneuro/neuroconv/pull/237) ### Pending deprecation * Added `DeprecationWarnings` to all `spikeextractors` backends. [PR #265](https://github.com/catalystneuro/neuroconv/pull/265) @@ -15,6 +23,7 @@ ### Fixes * Temporarily hotfixed the `tensorflow` dependency after the release of `deeplabcut==2.3.0`. [PR #268](https://github.com/catalystneuro/neuroconv/pull/268) * Fixed cleanup of waveform tests in SI tools. [PR #277](https://github.com/catalystneuro/neuroconv/pull/277) +* Fixed metadata structure for the CsvTimeIntervalsInterface, which was previously not passed validation in NWBConverters. [PR #237](https://github.com/catalystneuro/neuroconv/pull/237) * Added propagation of the `load_sync_channel` argument for the `SpikeGLXNIDQInterface`. [PR #282](https://github.com/catalystneuro/neuroconv/pull/282) * Fixed the default `es_key` used by stand-alone write using any `RecordingExtractorInterface` or `LFPExtractorInterface`. [PR #288](https://github.com/catalystneuro/neuroconv/pull/288) * Fixed the default `ExtractorName` used to load the spikeinterface extractor of the `SpikeGLXLFPInterface`. [PR #288](https://github.com/catalystneuro/neuroconv/pull/288) diff --git a/setup.py b/setup.py index 7438363f2..b29d524e2 100644 --- a/setup.py +++ b/setup.py @@ -44,7 +44,7 @@ setup( name="neuroconv", - version="0.2.4", + version="0.2.5", description="Convert data from proprietary formats to NWB format.", long_description=long_description, long_description_content_type="text/markdown", diff --git a/src/neuroconv/basedatainterface.py b/src/neuroconv/basedatainterface.py index 9761ff55c..bc8df84f0 100644 --- a/src/neuroconv/basedatainterface.py +++ b/src/neuroconv/basedatainterface.py @@ -1,8 +1,9 @@ """Authors: Cody Baker and Ben Dichter.""" -from abc import abstractmethod, ABC import uuid +from abc import abstractmethod, ABC from typing import Optional +import numpy as np from pynwb import NWBFile from .utils import get_base_schema, get_schema_from_method_signature @@ -22,7 +23,7 @@ def get_conversion_options_schema(cls): return get_schema_from_method_signature(cls.run_conversion, exclude=["nwbfile", "metadata"]) def __init__(self, **source_data): - self.source_data = source_data + self.source_data: dict = source_data def get_metadata_schema(self): """Retrieve JSON schema for metadata.""" @@ -46,6 +47,91 @@ def get_metadata(self): return metadata + @abstractmethod + def get_original_timestamps(self) -> np.ndarray: + """ + Retrieve the original unaltered timestamps for the data in this interface. + + This function should retrieve the data on-demand by re-initializing the IO. + + Returns + ------- + timestamps: numpy.ndarray + The timestamps for the data stream. + """ + raise NotImplementedError( + "Unable to retrieve the original unaltered timestamps for this interface! " + "Define the `get_original_timestamps` method for this interface." + ) + + @abstractmethod + def get_timestamps(self) -> np.ndarray: + """ + Retrieve the timestamps for the data in this interface. + + Returns + ------- + timestamps: numpy.ndarray + The timestamps for the data stream. + """ + raise NotImplementedError( + "Unable to retrieve timestamps for this interface! Define the `get_timestamps` method for this interface." + ) + + @abstractmethod + def align_timestamps(self, aligned_timestamps: np.ndarray): + """ + Replace all timestamps for this interface with those aligned to the common session start time. + + Must be in units seconds relative to the common 'session_start_time'. + + Parameters + ---------- + aligned_timestamps : numpy.ndarray + The synchronized timestamps for data in this interface. + """ + raise NotImplementedError( + "The protocol for synchronizing the timestamps of this interface has not been specified!" + ) + + def align_starting_time(self, starting_time: float): + """ + Align the starting time for this interface relative to the common session start time. + + Must be in units seconds relative to the common 'session_start_time'. + + Parameters + ---------- + starting_time : float + The starting time for all temporal data in this interface. + """ + self.align_timestamps(aligned_timestamps=self.get_timestamps() + starting_time) + + def align_by_interpolation(self, unaligned_timestamps: np.ndarray, aligned_timestamps: np.ndarray): + """ + Interpolate the timestamps of this interface using a mapping from some unaligned time basis to its aligned one. + + Use this method if the unaligned timestamps of the data in this interface are not directly tracked by a primary + system, but are known to occur between timestamps that are tracked, then align the timestamps of this interface + by interpolating between the two. + + An example could be a metronomic TTL pulse (e.g., every second) from a secondary data stream to the primary + timing system; if the time references of this interface are recorded within the relative time of the secondary + data stream, then their exact time in the primary system is inferred given the pulse times. + + Must be in units seconds relative to the common 'session_start_time'. + + Parameters + ---------- + unaligned_timestamps : numpy.ndarray + The timestamps of the unaligned secondary time basis. + aligned_timestamps : numpy.ndarray + The timestamps aligned to the primary time basis. + """ + self.align_timestamps( + aligned_timestamps=np.interp(x=self.get_timestamps(), xp=unaligned_timestamps, fp=aligned_timestamps) + ) + def get_conversion_options(self): """Child DataInterface classes should override this to match their conversion options.""" return dict() @@ -78,5 +164,4 @@ def run_conversion( If 'nwbfile_path' is specified, informs user after a successful write operation. The default is True. """ - raise NotImplementedError("The run_conversion method for this DataInterface has not been defined!") diff --git a/src/neuroconv/datainterfaces/__init__.py b/src/neuroconv/datainterfaces/__init__.py index 5d4a8f0f0..cafd6153d 100644 --- a/src/neuroconv/datainterfaces/__init__.py +++ b/src/neuroconv/datainterfaces/__init__.py @@ -19,8 +19,8 @@ BlackrockRecordingInterface, BlackrockSortingInterface, ) -from .ecephys.openephys.openephysdatainterface import ( - OpenEphysRecordingInterface, +from .ecephys.openephys.openephysbinarydatainterface import ( + OpenEphysRecordingInterface, # temporary import until transitioning to the renamed interface OpenEphysSortingInterface, ) from .ecephys.axona.axonadatainterface import ( diff --git a/src/neuroconv/datainterfaces/behavior/audio/audiointerface.py b/src/neuroconv/datainterfaces/behavior/audio/audiointerface.py index c4abb2e20..6bb23fda0 100644 --- a/src/neuroconv/datainterfaces/behavior/audio/audiointerface.py +++ b/src/neuroconv/datainterfaces/behavior/audio/audiointerface.py @@ -2,6 +2,7 @@ from typing import Optional from warnings import warn +import numpy as np from scipy.io.wavfile import read from neuroconv.basedatainterface import BaseDataInterface @@ -79,7 +80,6 @@ def __init__(self, file_paths: list, verbose: bool = False): super().__init__(file_paths=file_paths) def get_metadata_schema(self): - metadata_schema = super().get_metadata_schema() time_series_metadata_schema = get_schema_from_hdmf_class(TimeSeries) metadata_schema["properties"]["Behavior"] = get_base_schema(tag="Behavior") @@ -112,6 +112,22 @@ def get_metadata(self) -> dict: metadata.update(Behavior=behavior_metadata) return metadata + def get_original_timestamps(self) -> np.ndarray: + raise NotImplementedError( + "Unable to retrieve the original unaltered timestamps for this interface! " + "Define the `get_original_timestamps` method for this interface." + ) + + def get_timestamps(self) -> np.ndarray: + raise NotImplementedError( + "Unable to retrieve timestamps for this interface! Define the `get_timestamps` method for this interface." + ) + + def align_timestamps(self, aligned_timestamps: np.ndarray): + raise NotImplementedError( + "The protocol for synchronizing the timestamps of this interface has not been specified!" + ) + def run_conversion( self, nwbfile_path: Optional[FilePathType] = None, diff --git a/src/neuroconv/datainterfaces/behavior/deeplabcut/deeplabcutdatainterface.py b/src/neuroconv/datainterfaces/behavior/deeplabcut/deeplabcutdatainterface.py index 8b07f2eb3..603b24706 100644 --- a/src/neuroconv/datainterfaces/behavior/deeplabcut/deeplabcutdatainterface.py +++ b/src/neuroconv/datainterfaces/behavior/deeplabcut/deeplabcutdatainterface.py @@ -2,6 +2,7 @@ from typing import Optional from pathlib import Path +import numpy as np from pynwb.file import NWBFile from ....basedatainterface import BaseDataInterface @@ -81,6 +82,22 @@ def get_metadata(self): ) return metadata + def get_original_timestamps(self) -> np.ndarray: + raise NotImplementedError( + "Unable to retrieve the original unaltered timestamps for this interface! " + "Define the `get_original_timestamps` method for this interface." + ) + + def get_timestamps(self) -> np.ndarray: + raise NotImplementedError( + "Unable to retrieve timestamps for this interface! Define the `get_timestamps` method for this interface." + ) + + def align_timestamps(self, aligned_timestamps: np.ndarray): + raise NotImplementedError( + "The protocol for synchronizing the timestamps of this interface has not been specified!" + ) + def run_conversion( self, nwbfile_path: OptionalFilePathType = None, diff --git a/src/neuroconv/datainterfaces/behavior/sleap/sleapdatainterface.py b/src/neuroconv/datainterfaces/behavior/sleap/sleapdatainterface.py index ee03b7505..3a14d8817 100644 --- a/src/neuroconv/datainterfaces/behavior/sleap/sleapdatainterface.py +++ b/src/neuroconv/datainterfaces/behavior/sleap/sleapdatainterface.py @@ -2,6 +2,7 @@ from typing import Optional from pathlib import Path +import numpy as np from pynwb.file import NWBFile from ....basedatainterface import BaseDataInterface @@ -44,6 +45,22 @@ def __init__( self.verbose = verbose super().__init__(file_path=file_path) + def get_original_timestamps(self) -> np.ndarray: + raise NotImplementedError( + "Unable to retrieve the original unaltered timestamps for this interface! " + "Define the `get_original_timestamps` method for this interface." + ) + + def get_timestamps(self) -> np.ndarray: + raise NotImplementedError( + "Unable to retrieve timestamps for this interface! Define the `get_timestamps` method for this interface." + ) + + def align_timestamps(self, aligned_timestamps: np.ndarray): + raise NotImplementedError( + "The protocol for synchronizing the timestamps of this interface has not been specified!" + ) + def run_conversion( self, nwbfile_path: OptionalFilePathType = None, @@ -81,7 +98,6 @@ def run_conversion( with make_or_load_nwbfile( nwbfile_path=nwbfile_path, nwbfile=nwbfile, metadata=metadata, overwrite=overwrite, verbose=self.verbose ) as nwbfile_out: - labels = self.sleap_io.load_slp(self.file_path) nwbfile_out = self.sleap_io.io.nwb.append_nwb_data( labels=labels, nwbfile=nwbfile_out, pose_estimation_metadata=pose_estimation_metadata diff --git a/src/neuroconv/datainterfaces/behavior/video/videodatainterface.py b/src/neuroconv/datainterfaces/behavior/video/videodatainterface.py index 13d2eca08..724b18471 100644 --- a/src/neuroconv/datainterfaces/behavior/video/videodatainterface.py +++ b/src/neuroconv/datainterfaces/behavior/video/videodatainterface.py @@ -91,7 +91,6 @@ def get_metadata_schema(self): return metadata_schema def get_metadata(self): - metadata = super().get_metadata() behavior_metadata = dict( Movies=[ @@ -103,6 +102,22 @@ def get_metadata(self): return metadata + def get_original_timestamps(self) -> np.ndarray: + raise NotImplementedError( + "Unable to retrieve the original unaltered timestamps for this interface! " + "Define the `get_original_timestamps` method for this interface." + ) + + def get_timestamps(self) -> np.ndarray: + raise NotImplementedError( + "Unable to retrieve timestamps for this interface! Define the `get_timestamps` method for this interface." + ) + + def align_timestamps(self, aligned_timestamps: np.ndarray): + raise NotImplementedError( + "The protocol for synchronizing the timestamps of this interface has not been specified!" + ) + def run_conversion( self, nwbfile_path: Optional[FilePathType] = None, @@ -233,7 +248,6 @@ def run_conversion( nwbfile_path=nwbfile_path, nwbfile=nwbfile, metadata=metadata, overwrite=overwrite, verbose=self.verbose ) as nwbfile_out: for j, (image_series_kwargs, file_list) in enumerate(zip(videos_metadata_unique, file_paths_list)): - with VideoCaptureContext(str(file_list[0])) as vc: fps = vc.get_video_fps() max_frames = stub_frames if stub_test else None diff --git a/src/neuroconv/datainterfaces/ecephys/axona/axona_utils.py b/src/neuroconv/datainterfaces/ecephys/axona/axona_utils.py index 62ed1818b..3517f57f0 100644 --- a/src/neuroconv/datainterfaces/ecephys/axona/axona_utils.py +++ b/src/neuroconv/datainterfaces/ecephys/axona/axona_utils.py @@ -111,7 +111,6 @@ def read_all_eeg_file_lfp_data(file_path: FilePathType) -> np.ndarray: eeg_memmaps = list() sampling_rates = set() for fname in file_path_list: - sampling_rates.add(get_eeg_sampling_frequency(parent_path / fname)) eeg_memmaps.append(read_eeg_file_lfp_data(parent_path / fname)) @@ -397,7 +396,6 @@ def get_position_object(file_path: FilePathType) -> Position: position_timestamps = position_data[:, 0] for ichan in range(position_data.shape[1]): - spatial_series = SpatialSeries( name=position_channel_names[ichan], timestamps=position_timestamps, diff --git a/src/neuroconv/datainterfaces/ecephys/axona/axonadatainterface.py b/src/neuroconv/datainterfaces/ecephys/axona/axonadatainterface.py index 4c6a0e200..81fddfad2 100644 --- a/src/neuroconv/datainterfaces/ecephys/axona/axonadatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/axona/axonadatainterface.py @@ -32,7 +32,6 @@ def __init__(self, file_path: FilePathType, verbose: bool = True): self.recording_extractor.set_channel_groups(tetrode_id) def extract_nwb_file_metadata(self): - raw_annotations = self.recording_extractor.neo_reader.raw_annotations session_start_time = raw_annotations["blocks"][0]["segments"][0]["rec_datetime"] session_description = self.metadata_in_set_file["comments"] @@ -76,7 +75,6 @@ def extract_ecephys_metadata(self): return ecephys_metadata def get_metadata(self): - metadata = super().get_metadata() nwbfile_metadata = self.extract_nwb_file_metadata() @@ -124,7 +122,6 @@ def get_source_schema(cls): ) def __init__(self, file_path: FilePathType): - data = read_all_eeg_file_lfp_data(file_path).T sampling_frequency = get_eeg_sampling_frequency(file_path) super().__init__(traces_list=[data], sampling_frequency=sampling_frequency) diff --git a/src/neuroconv/datainterfaces/ecephys/baserecordingextractorinterface.py b/src/neuroconv/datainterfaces/ecephys/baserecordingextractorinterface.py index d305454d6..93d184740 100644 --- a/src/neuroconv/datainterfaces/ecephys/baserecordingextractorinterface.py +++ b/src/neuroconv/datainterfaces/ecephys/baserecordingextractorinterface.py @@ -1,6 +1,7 @@ """Authors: Cody Baker and Ben Dichter.""" from typing import Optional +import numpy as np from pynwb import NWBFile from pynwb.device import Device from pynwb.ecephys import ElectrodeGroup @@ -80,6 +81,15 @@ def get_metadata(self): return metadata + def get_original_timestamps(self) -> np.ndarray: + return self.Extractor(**self.source_data).get_times() + + def get_timestamps(self) -> np.ndarray: + return self.recording_extractor.get_times() + + def align_timestamps(self, aligned_timestamps: np.ndarray): + self.recording_extractor.set_times(times=aligned_timestamps) + def subset_recording(self, stub_test: bool = False): """ Subset a recording extractor according to stub and channel subset options. diff --git a/src/neuroconv/datainterfaces/ecephys/basesortingextractorinterface.py b/src/neuroconv/datainterfaces/ecephys/basesortingextractorinterface.py index e79a32dad..b7c05b25c 100644 --- a/src/neuroconv/datainterfaces/ecephys/basesortingextractorinterface.py +++ b/src/neuroconv/datainterfaces/ecephys/basesortingextractorinterface.py @@ -71,6 +71,15 @@ def get_metadata_schema(self): ) return metadata_schema + def get_original_timestamps(self) -> np.ndarray: + return self.Extractor(**self.source_data).get_times() + + def get_timestamps(self) -> np.ndarray: + return self.sorting_extractor.get_times() + + def align_timestamps(self, synchronized_timestamps: np.ndarray): + self.sorting_extractor.set_times(times=synchronized_timestamps) + def subset_sorting(self): from spikeextractors import SortingExtractor, SubSortingExtractor from spikeinterface import BaseSorting diff --git a/src/neuroconv/datainterfaces/ecephys/edf/edfdatainterface.py b/src/neuroconv/datainterfaces/ecephys/edf/edfdatainterface.py index 1c9b74e53..db0034bf5 100644 --- a/src/neuroconv/datainterfaces/ecephys/edf/edfdatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/edf/edfdatainterface.py @@ -28,7 +28,6 @@ def __init__(self, file_path: FilePathType, verbose: bool = True): self.edf_header = self.recording_extractor.neo_reader.edf_header def extract_nwb_file_metadata(self): - nwbfile_metadata = dict( session_start_time=self.edf_header["startdate"], experimenter=self.edf_header["technician"], @@ -40,7 +39,6 @@ def extract_nwb_file_metadata(self): return nwbfile_metadata def extract_subject_metadata(self): - subject_metadata = dict( subject_id=self.edf_header["patientcode"], date_of_birth=self.edf_header["birthdate"], @@ -52,7 +50,6 @@ def extract_subject_metadata(self): return subject_metadata def get_metadata(self): - metadata = super().get_metadata() nwbfile_metadata = self.extract_nwb_file_metadata() metadata["NWBFile"].update(nwbfile_metadata) diff --git a/src/neuroconv/datainterfaces/ecephys/intan/intandatainterface.py b/src/neuroconv/datainterfaces/ecephys/intan/intandatainterface.py index 7a5e7f7a1..fbfedb682 100644 --- a/src/neuroconv/datainterfaces/ecephys/intan/intandatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/intan/intandatainterface.py @@ -39,7 +39,6 @@ def extract_electrode_metadata_with_pyintan(file_path): def extract_electrode_metadata(recording_extractor): - channel_name_array = recording_extractor.get_property("channel_name") group_names = [channel.split("-")[0] for channel in channel_name_array] diff --git a/src/neuroconv/datainterfaces/ecephys/openephys/openephysdatainterface.py b/src/neuroconv/datainterfaces/ecephys/openephys/openephysbinarydatainterface.py similarity index 89% rename from src/neuroconv/datainterfaces/ecephys/openephys/openephysdatainterface.py rename to src/neuroconv/datainterfaces/ecephys/openephys/openephysbinarydatainterface.py index 91991f185..d6f348e09 100644 --- a/src/neuroconv/datainterfaces/ecephys/openephys/openephysdatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/openephys/openephysbinarydatainterface.py @@ -7,8 +7,8 @@ from ....utils import get_schema_from_method_signature, FolderPathType -class OpenEphysRecordingInterface(BaseRecordingExtractorInterface): - """Primary data interface for converting OpenEphys data. Uses +class OpenEphysBinaryRecordingInterface(BaseRecordingExtractorInterface): + """Primary data interface for converting binary OpenEphys data (.dat files). Uses :py:class:`~spikeinterface.extractors.OpenEphysBinaryRecordingExtractor`.""" ExtractorName = "OpenEphysBinaryRecordingExtractor" @@ -19,7 +19,9 @@ def get_source_schema(cls): source_schema = get_schema_from_method_signature( class_method=cls.__init__, exclude=["recording_id", "experiment_id", "stub_test"] ) - source_schema["properties"]["folder_path"]["description"] = "Path to directory containing OpenEphys files." + source_schema["properties"]["folder_path"][ + "description" + ] = "Path to directory containing OpenEphys binary files." return source_schema def __init__( @@ -88,6 +90,11 @@ def get_metadata(self): return metadata +# Temporary solution for safely transitioning to the renamed interface +class OpenEphysRecordingInterface(OpenEphysBinaryRecordingInterface): + ExtractorName = "OpenEphysBinaryRecordingExtractor" + + class OpenEphysSortingInterface(BaseSortingExtractorInterface): """Primary data interface class for converting OpenEphys spiking data.""" diff --git a/src/neuroconv/datainterfaces/icephys/abf/abfdatainterface.py b/src/neuroconv/datainterfaces/icephys/abf/abfdatainterface.py index 68971ccc0..3b2c3d121 100644 --- a/src/neuroconv/datainterfaces/icephys/abf/abfdatainterface.py +++ b/src/neuroconv/datainterfaces/icephys/abf/abfdatainterface.py @@ -23,7 +23,6 @@ def get_start_datetime(neo_reader): class AbfInterface(BaseIcephysInterface): - ExtractorName = "AxonIO" @classmethod diff --git a/src/neuroconv/datainterfaces/icephys/baseicephysinterface.py b/src/neuroconv/datainterfaces/icephys/baseicephysinterface.py index e8181d13f..ff9765013 100644 --- a/src/neuroconv/datainterfaces/icephys/baseicephysinterface.py +++ b/src/neuroconv/datainterfaces/icephys/baseicephysinterface.py @@ -2,6 +2,7 @@ from typing import Optional, Tuple from warnings import warn +import numpy as np from pynwb import NWBFile, NWBHDF5IO from ...baseextractorinterface import BaseExtractorInterface @@ -47,7 +48,6 @@ def __init__(self, file_paths: list): self.n_channels = get_number_of_electrodes(neo_reader=self.readers_list[0]) def get_metadata_schema(self) -> dict: - metadata_schema = super().get_metadata_schema() if DandiIcephysMetadata: metadata_schema["properties"]["ndx-dandi-icephys"] = get_schema_from_hdmf_class(DandiIcephysMetadata) @@ -67,6 +67,22 @@ def get_metadata(self) -> dict: ) return metadata + def get_original_timestamps(self) -> np.ndarray: + raise NotImplementedError( + "Unable to retrieve the original unaltered timestamps for this interface! " + "Define the `get_original_timestamps` method for this interface." + ) + + def get_timestamps(self) -> np.ndarray: + raise NotImplementedError( + "Unable to retrieve timestamps for this interface! Define the `get_timestamps` method for this interface." + ) + + def align_timestamps(self, aligned_timestamps: np.ndarray): + raise NotImplementedError( + "The protocol for synchronizing the timestamps of this interface has not been specified!" + ) + def run_conversion( self, nwbfile: NWBFile = None, diff --git a/src/neuroconv/datainterfaces/ophys/baseimagingextractorinterface.py b/src/neuroconv/datainterfaces/ophys/baseimagingextractorinterface.py index 9e344f624..bd431fd29 100644 --- a/src/neuroconv/datainterfaces/ophys/baseimagingextractorinterface.py +++ b/src/neuroconv/datainterfaces/ophys/baseimagingextractorinterface.py @@ -1,6 +1,7 @@ """Author: Ben Dichter.""" from typing import Optional +import numpy as np from pynwb import NWBFile from pynwb.device import Device from pynwb.ophys import ImagingPlane, TwoPhotonSeries @@ -72,6 +73,22 @@ def get_metadata(self): two_photon_series["rate"] = float(two_photon_series["rate"]) return metadata + def get_original_timestamps(self) -> np.ndarray: + raise NotImplementedError( + "Unable to retrieve the original unaltered timestamps for this interface! " + "Define the `get_original_timestamps` method for this interface." + ) + + def get_timestamps(self) -> np.ndarray: + raise NotImplementedError( + "Unable to retrieve timestamps for this interface! Define the `get_timestamps` method for this interface." + ) + + def align_timestamps(self, aligned_timestamps: np.ndarray): + raise NotImplementedError( + "The protocol for synchronizing the timestamps of this interface has not been specified!" + ) + def run_conversion( self, nwbfile_path: OptionalFilePathType = None, diff --git a/src/neuroconv/datainterfaces/ophys/basesegmentationextractorinterface.py b/src/neuroconv/datainterfaces/ophys/basesegmentationextractorinterface.py index 535d26caa..0c16e81eb 100644 --- a/src/neuroconv/datainterfaces/ophys/basesegmentationextractorinterface.py +++ b/src/neuroconv/datainterfaces/ophys/basesegmentationextractorinterface.py @@ -1,6 +1,7 @@ """Authors: Heberto Mayorquin, Cody Baker and Ben Dichter.""" from typing import Optional +import numpy as np from pynwb import NWBFile from pynwb.device import Device from pynwb.ophys import Fluorescence, ImageSegmentation, ImagingPlane, TwoPhotonSeries @@ -58,6 +59,22 @@ def get_metadata(self) -> dict: metadata.update(get_nwb_segmentation_metadata(self.segmentation_extractor)) return metadata + def get_original_timestamps(self) -> np.ndarray: + raise NotImplementedError( + "Unable to retrieve the original unaltered timestamps for this interface! " + "Define the `get_original_timestamps` method for this interface." + ) + + def get_timestamps(self) -> np.ndarray: + raise NotImplementedError( + "Unable to retrieve timestamps for this interface! Define the `get_timestamps` method for this interface." + ) + + def align_timestamps(self, aligned_timestamps: np.ndarray): + raise NotImplementedError( + "The protocol for synchronizing the timestamps of this interface has not been specified!" + ) + def run_conversion( self, nwbfile_path: Optional[FilePathType] = None, diff --git a/src/neuroconv/datainterfaces/ophys/cnmfe/cnmfedatainterface.py b/src/neuroconv/datainterfaces/ophys/cnmfe/cnmfedatainterface.py index 3de185de8..0eb709b2f 100644 --- a/src/neuroconv/datainterfaces/ophys/cnmfe/cnmfedatainterface.py +++ b/src/neuroconv/datainterfaces/ophys/cnmfe/cnmfedatainterface.py @@ -6,6 +6,5 @@ class CnmfeSegmentationInterface(BaseSegmentationExtractorInterface): """Data interface for constrained non-negative matrix factorization (CNMFE) segmentation extractor.""" def __init__(self, file_path: FilePathType, verbose: bool = True): - super().__init__(file_path=file_path) self.verbose = verbose diff --git a/src/neuroconv/datainterfaces/text/timeintervalsinterface.py b/src/neuroconv/datainterfaces/text/timeintervalsinterface.py index fc2d0862f..867ffc4a6 100644 --- a/src/neuroconv/datainterfaces/text/timeintervalsinterface.py +++ b/src/neuroconv/datainterfaces/text/timeintervalsinterface.py @@ -1,8 +1,8 @@ from abc import abstractmethod import os - from typing import Dict, Optional +import numpy as np from pynwb import NWBFile from ...basedatainterface import BaseDataInterface @@ -13,9 +13,7 @@ class TimeIntervalsInterface(BaseDataInterface): - """ - Abstract Interface for time intervals. - """ + """Abstract Interface for time intervals.""" def __init__( self, @@ -24,7 +22,6 @@ def __init__( verbose: bool = True, ): """ - Parameters ---------- file_path : FilePath @@ -35,7 +32,8 @@ def __init__( super().__init__(file_path=file_path) self.verbose = verbose - self.df = self._read_file(file_path, **read_kwargs) + self._read_kwargs = read_kwargs + self.dataframe = self._read_file(file_path, **read_kwargs) self.time_intervals = None def get_metadata(self): @@ -52,6 +50,67 @@ def get_metadata_schema(self): fpath = os.path.join(os.path.split(__file__)[0], "timeintervals.schema.json") return load_dict_from_file(fpath) + def get_original_timestamps(self, column: str) -> np.ndarray: + if not column.endswith("_time"): + raise ValueError("Timing columns on a TimeIntervals table need to end with '_time'!") + + return self._read_file(**self.source_data, **self._read_kwargs)[column].values + + def get_timestamps(self, column: str) -> np.ndarray: + if not column.endswith("_time"): + raise ValueError("Timing columns on a TimeIntervals table need to end with '_time'!") + + return self.dataframe[column].values + + def align_starting_time(self, starting_time: float): + timing_columns = [column for column in self.dataframe.columns if column.endswith("_time")] + + for column in timing_columns: + self.dataframe[column] += starting_time + + def align_timestamps(self, aligned_timestamps: np.ndarray, column: str, interpolate_other_columns: bool = False): + if not column.endswith("_time"): + raise ValueError("Timing columns on a TimeIntervals table need to end with '_time'!") + + unaligned_timestamps = np.array(self.dataframe[column]) + self.dataframe[column] = aligned_timestamps + + if not interpolate_other_columns: + return + + other_timing_columns = [ + other_column + for other_column in self.dataframe.columns + if other_column.endswith("_time") and other_column != column + ] + for other_timing_column in other_timing_columns: + self.align_by_interpolation( + unaligned_timestamps=unaligned_timestamps, + aligned_timestamps=aligned_timestamps, + column=other_timing_column, + ) + + def align_by_interpolation(self, unaligned_timestamps: np.ndarray, aligned_timestamps: np.ndarray, column: str): + current_timestamps = self.get_timestamps(column=column) + assert ( + current_timestamps[1] >= unaligned_timestamps[0] + ), "All current timestamps except for the first must be strictly within the unaligned mapping." + assert ( + current_timestamps[-2] <= unaligned_timestamps[-1] + ), "All current timestamps except for the last must be strictly within the unaligned mapping." + # Assume timing column is ascending otherwise + + self.align_timestamps( + aligned_timestamps=np.interp( + x=current_timestamps, + xp=unaligned_timestamps, + fp=aligned_timestamps, + left=2 * aligned_timestamps[0] - aligned_timestamps[1], # If first or last values are outside alignment + right=2 * aligned_timestamps[-1] - aligned_timestamps[-2], # then use the most recent diff to regress + ), + column=column, + ) + def run_conversion( self, nwbfile_path: Optional[FilePathType] = None, @@ -66,7 +125,7 @@ def run_conversion( nwbfile_path=nwbfile_path, nwbfile=nwbfile, metadata=metadata, overwrite=overwrite, verbose=self.verbose ) as nwbfile_out: self.time_intervals = convert_df_to_time_intervals( - self.df, + self.dataframe, column_name_mapping=column_name_mapping, column_descriptions=column_descriptions, **metadata["TimeIntervals"][tag], diff --git a/src/neuroconv/nwbconverter.py b/src/neuroconv/nwbconverter.py index 16436266b..45fbd0975 100644 --- a/src/neuroconv/nwbconverter.py +++ b/src/neuroconv/nwbconverter.py @@ -176,7 +176,6 @@ def run_conversion( overwrite=overwrite, verbose=self.verbose, ) as nwbfile_out: - for interface_name, data_interface in self.data_interface_objects.items(): data_interface.run_conversion( nwbfile=nwbfile_out, metadata=metadata, **conversion_options_to_run.get(interface_name, dict()) diff --git a/src/neuroconv/tools/roiextractors/roiextractors.py b/src/neuroconv/tools/roiextractors/roiextractors.py index ad3d1b1ff..06c68ed0e 100644 --- a/src/neuroconv/tools/roiextractors/roiextractors.py +++ b/src/neuroconv/tools/roiextractors/roiextractors.py @@ -1006,12 +1006,10 @@ def write_segmentation( with make_or_load_nwbfile( nwbfile_path=nwbfile_path, nwbfile=nwbfile, metadata=metadata_base_common, overwrite=overwrite, verbose=verbose ) as nwbfile_out: - _ = get_module(nwbfile=nwbfile_out, name="ophys", description="contains optical physiology processed data") for plane_no_loop, (segmentation_extractor, metadata) in enumerate( zip(segmentation_extractors, metadata_base_list) ): - # Add device: add_devices(nwbfile=nwbfile_out, metadata=metadata) diff --git a/src/neuroconv/tools/signal_processing.py b/src/neuroconv/tools/signal_processing.py index 3f7e9133f..d35e9aa0a 100644 --- a/src/neuroconv/tools/signal_processing.py +++ b/src/neuroconv/tools/signal_processing.py @@ -3,6 +3,8 @@ import numpy as np +from ..utils import ArrayType + def get_rising_frames_from_ttl(trace: np.ndarray, threshold: Optional[float] = None) -> np.ndarray: """ diff --git a/src/neuroconv/tools/spikeinterface/spikeinterface.py b/src/neuroconv/tools/spikeinterface/spikeinterface.py index bdb52c485..865ce7f32 100644 --- a/src/neuroconv/tools/spikeinterface/spikeinterface.py +++ b/src/neuroconv/tools/spikeinterface/spikeinterface.py @@ -48,11 +48,11 @@ def set_dynamic_table_property( raise ValueError("'ids' and 'values' should be lists of same size") if index is False: if property_name in dynamic_table: - for (row_id, value) in zip(row_ids, values): + for row_id, value in zip(row_ids, values): dynamic_table[property_name].data[ids.index(row_id)] = value else: col_data = [default_value] * len(ids) # init with default val - for (row_id, value) in zip(row_ids, values): + for row_id, value in zip(row_ids, values): col_data[ids.index(row_id)] = value dynamic_table.add_column( name=property_name, description=description, data=col_data, index=index, table=table @@ -1058,7 +1058,6 @@ def write_recording( with make_or_load_nwbfile( nwbfile_path=nwbfile_path, nwbfile=nwbfile, metadata=metadata, overwrite=overwrite, verbose=verbose ) as nwbfile_out: - # Convenience function to add device, electrode groups and electrodes info add_electrodes_info(recording=recording, nwbfile=nwbfile_out, metadata=metadata) @@ -1541,7 +1540,7 @@ def write_sorting( write_in_processing_module=write_in_processing_module, units_table_name=units_name, unit_table_description=units_description, - write_waveforms=True, + write_waveforms=False, ) return nwbfile_out diff --git a/src/neuroconv/tools/testing/__init__.py b/src/neuroconv/tools/testing/__init__.py index 6bfd977da..4ab322d6c 100644 --- a/src/neuroconv/tools/testing/__init__.py +++ b/src/neuroconv/tools/testing/__init__.py @@ -1,2 +1,2 @@ from .mock_ttl_signals import generate_mock_ttl_signal, regenerate_test_cases -from .mock_interfaces import MockSpikeGLXNIDQInterface +from .mock_interfaces import MockSpikeGLXNIDQInterface, MockBehaviorEventInterface diff --git a/src/neuroconv/tools/testing/mock_interfaces.py b/src/neuroconv/tools/testing/mock_interfaces.py index ccb47df5c..a76814874 100644 --- a/src/neuroconv/tools/testing/mock_interfaces.py +++ b/src/neuroconv/tools/testing/mock_interfaces.py @@ -2,23 +2,68 @@ from typing import Optional, List import numpy as np +from pynwb import NWBFile +from pynwb.base import DynamicTable from .mock_ttl_signals import generate_mock_ttl_signal +from ...basedatainterface import BaseDataInterface from ...datainterfaces import SpikeGLXNIDQInterface +from ...utils import ArrayType, get_schema_from_method_signature + + +class MockBehaviorEventInterface(BaseDataInterface): + @classmethod + def get_source_schema(cls) -> dict: + source_schema = get_schema_from_method_signature(class_method=cls.__init__, exclude=["event_times"]) + source_schema["additionalProperties"] = True + return source_schema + + def __init__(self, event_times: Optional[ArrayType] = None): + """ + Define event times for some behavior. + + Parameters + ---------- + event_times : list of floats, optional + The event times to set as timestamps for this interface. + The default is the array [1.2, 2.3, 3.4] for similarity to the timescale of the MockSpikeGLXNIDQInterface. + """ + event_times = event_times or [1.2, 2.3, 3.4] + self.event_times = np.array(event_times) + self.original_event_times = np.array(event_times) # Make a copy of the initial loaded timestamps + + def get_original_timestamps(self) -> np.ndarray: + return self.original_event_times + + def get_timestamps(self) -> np.ndarray: + return self.event_times + + def align_timestamps(self, aligned_timestamps: np.ndarray): + self.event_times = aligned_timestamps + + def run_conversion(self, nwbfile: NWBFile, metadata: dict): + table = DynamicTable(name="BehaviorEvents", description="Times of various classified behaviors.") + table.add_column(name="event_time", description="Time of each event.") + for timestamp in self.get_timestamps(): # adding data by column gives error + table.add_row(event_time=timestamp) + nwbfile.add_acquisition(table) class MockSpikeGLXNIDQInterface(SpikeGLXNIDQInterface): ExtractorName = "NumpyRecording" + @classmethod + def get_source_schema(cls) -> dict: + source_schema = get_schema_from_method_signature(class_method=cls.__init__, exclude=["ttl_times"]) + source_schema["additionalProperties"] = True + return source_schema + def __init__( self, signal_duration: float = 7.0, ttl_times: Optional[List[List[float]]] = None, ttl_duration: float = 1.0 ): """ Define a mock SpikeGLXNIDQInterface by overriding the recording extractor to be a mock TTL signal. - # TODO, allow definition of channel names and more than one TTL, if desired. - # TODO, make the metadata of this mock mimic the true thing - Parameters ---------- signal_duration : float, default: 7.0 diff --git a/src/neuroconv/tools/testing/mock_ttl_signals.py b/src/neuroconv/tools/testing/mock_ttl_signals.py index e4c8789af..b87e37614 100644 --- a/src/neuroconv/tools/testing/mock_ttl_signals.py +++ b/src/neuroconv/tools/testing/mock_ttl_signals.py @@ -113,7 +113,8 @@ def generate_mock_ttl_signal( ttl_times = np.array(ttl_times) else: ttl_times = np.arange(start=1.0, stop=signal_duration, step=2.0) - assert not any( + + assert len(ttl_times) == 1 or not any( # np.diff errors out when len(ttl_times) < 2 np.diff(ttl_times) <= ttl_duration ), "There are overlapping TTL 'on' intervals! Please specify disjoint on/off periods." diff --git a/src/neuroconv/utils/json_schema.py b/src/neuroconv/utils/json_schema.py index a8a7ba5a0..789059d0e 100644 --- a/src/neuroconv/utils/json_schema.py +++ b/src/neuroconv/utils/json_schema.py @@ -85,7 +85,9 @@ def get_schema_from_method_signature(class_method: classmethod, exclude: list = if any(valid_args): param_types = [annotation_json_type_map[x.__name__] for x in np.array(args)[valid_args]] else: - raise ValueError("No valid arguments were found in the json type mapping!") + raise ValueError( + f"No valid arguments were found in the json type mapping for parameter {param}" + ) num_params = len(set(param_types)) conflict_message = ( "Conflicting json parameter types were detected from the annotation! " @@ -104,7 +106,7 @@ def get_schema_from_method_signature(class_method: classmethod, exclude: list = param_type = annotation_json_type_map[arg.__name__] else: raise ValueError( - f"No valid arguments were found in the json type mapping {arg} for parameter {param}" + f"No valid arguments were found in the json type mapping '{arg}' for parameter {param}" ) if arg == FilePathType: input_schema["properties"].update({param_name: dict(format="file")}) @@ -136,7 +138,13 @@ def fill_defaults(schema: dict, defaults: dict, overwrite: bool = True): defaults: dict overwrite: bool """ - for key, val in schema["properties"].items(): + # patternProperties introduced with the CsvTimeIntervalsInterface + # caused issue with NWBConverter.get_metadata_schema() call leading here + properties_reference = "properties" + if properties_reference not in schema and "patternProperties" in schema: + properties_reference = "patternProperties" + + for key, val in schema[properties_reference].items(): if key in defaults: if val["type"] == "object": fill_defaults(val, defaults[key], overwrite=overwrite) diff --git a/tests/test_behavior/test_video_utils.py b/tests/test_behavior/test_video_utils.py index e89d8adce..cde9515d7 100644 --- a/tests/test_behavior/test_video_utils.py +++ b/tests/test_behavior/test_video_utils.py @@ -22,7 +22,6 @@ @unittest.skipIf(not CV2_INSTALLED, "cv2 not installed") class TestVideoContext(unittest.TestCase): - frame_shape = (100, 200, 3) number_of_frames = 30 fps = 25 @@ -121,7 +120,6 @@ def test_isopened_assertions(self): @unittest.skipIf(not CV2_INSTALLED, "cv2 not installed") class TestMovieInterface(unittest.TestCase): - frame_shape = (800, 600, 3) number_of_frames = 50 fps = 25 diff --git a/tests/test_ecephys/test_mock_interfaces.py b/tests/test_ecephys/test_mock_nidq_interface.py similarity index 100% rename from tests/test_ecephys/test_mock_interfaces.py rename to tests/test_ecephys/test_mock_nidq_interface.py diff --git a/tests/test_ecephys/test_tools_spikeinterface.py b/tests/test_ecephys/test_tools_spikeinterface.py index 1434ed629..f25ba3f0a 100644 --- a/tests/test_ecephys/test_tools_spikeinterface.py +++ b/tests/test_ecephys/test_tools_spikeinterface.py @@ -57,7 +57,6 @@ def setUp(self): ) def test_default_values(self): - add_electrical_series(recording=self.test_recording_extractor, nwbfile=self.nwbfile, iterator_type=None) acquisition_module = self.nwbfile.acquisition @@ -177,7 +176,6 @@ def test_write_multiple_electrical_series_from_different_groups(self): self.test_recording_extractor.set_channel_groups(original_groups) def test_invalid_write_as_argument_assertion(self): - write_as = "any_other_string_that_is_not_raw_lfp_or_processed" reg_expression = f"'write_as' should be 'raw', 'processed' or 'lfp', but instead received value {write_as}" @@ -289,7 +287,6 @@ def setUp(self): ) def test_uniform_values(self): - gains = self.gains_default offsets = self.offset_defaults self.test_recording_extractor.set_channel_gains(gains=gains) @@ -315,7 +312,6 @@ def test_uniform_values(self): np.testing.assert_array_almost_equal(data_in_volts, traces_data_in_volts) def test_uniform_non_default(self): - gains = self.gains_uniform offsets = self.offsets_uniform self.test_recording_extractor.set_channel_gains(gains=gains) @@ -341,7 +337,6 @@ def test_uniform_non_default(self): np.testing.assert_array_almost_equal(data_in_volts, traces_data_in_volts) def test_variable_gains(self): - gains = self.gains_variable offsets = self.offsets_uniform self.test_recording_extractor.set_channel_gains(gains=gains) @@ -371,7 +366,6 @@ def test_variable_gains(self): np.testing.assert_array_almost_equal(data_in_volts, traces_data_in_volts) def test_null_offsets_in_recording_extractor(self): - gains = self.gains_default self.test_recording_extractor.set_channel_gains(gains=gains) @@ -398,7 +392,6 @@ def test_null_offsets_in_recording_extractor(self): np.testing.assert_array_almost_equal(data_in_volts, traces_data_in_volts) def test_variable_offsets_assertion(self): - gains = self.gains_default offsets = self.offsets_variable self.test_recording_extractor.set_channel_gains(gains=gains) @@ -439,7 +432,6 @@ def setUp(self): ) def test_default_chunking(self): - add_electrical_series(recording=self.test_recording_extractor, nwbfile=self.nwbfile) acquisition_module = self.nwbfile.acquisition @@ -467,7 +459,6 @@ def test_iterator_opts_propagation(self): assert electrical_series_data_iterator.chunk_shape == iterator_opts["chunk_shape"] def test_hdfm_iterator(self): - add_electrical_series(recording=self.test_recording_extractor, nwbfile=self.nwbfile, iterator_type="v1") acquisition_module = self.nwbfile.acquisition @@ -489,7 +480,6 @@ def test_non_iterative_write(self): isinstance(electrical_series.data, np.ndarray) def test_non_iterative_write_assertion(self): - # Estimate num of frames required to exceed memory capabilities dtype = self.test_recording_extractor.get_dtype() element_size_in_bytes = dtype.itemsize @@ -512,7 +502,6 @@ def test_non_iterative_write_assertion(self): check_if_recording_traces_fit_into_memory(recording=mock_recorder) def test_invalid_iterator_type_assertion(self): - iterator_type = "invalid_iterator_type" reg_expression = "iterator_type (.*?)" @@ -570,7 +559,6 @@ def test_default_values_single_segment(self): np.testing.assert_array_almost_equal(expected_data, extracted_data) def test_write_multiple_segments(self): - write_recording(recording=self.multiple_segment_recording_extractor, nwbfile=self.nwbfile, iterator_type=None) acquisition_module = self.nwbfile.acquisition diff --git a/tests/test_minimal/test_converter.py b/tests/test_minimal/test_converter.py index 1741b1a72..bf552dbda 100644 --- a/tests/test_minimal/test_converter.py +++ b/tests/test_minimal/test_converter.py @@ -4,6 +4,7 @@ from datetime import datetime import unittest +import numpy as np from pynwb import NWBFile from neuroconv import NWBConverter, ConverterPipe @@ -23,11 +24,24 @@ def test_converter(): nwbfile_path = str(test_dir / "extension_test.nwb") class NdxEventsInterface(BaseDataInterface): + def __init__(self): + self.timestamps = np.array([0.0, 0.5, 0.6, 2.0, 2.05, 3.0, 3.5, 3.6, 4.0]) + self.original_timestamps = np.array(self.timestamps) + + def get_original_timestamps(self) -> np.ndarray: + return self.original_timestamps + + def get_timestamps(self) -> np.ndarray: + return self.timestamps + + def align_timestamps(self, aligned_timestamps: np.ndarray): + self.timestamps = aligned_timestamps + def run_conversion(self, nwbfile: NWBFile, metadata: dict): events = LabeledEvents( name="LabeledEvents", description="events from my experiment", - timestamps=[0.0, 0.5, 0.6, 2.0, 2.05, 3.0, 3.5, 3.6, 4.0], + timestamps=self.get_timestamps(), resolution=1e-5, data=[0, 1, 2, 3, 5, 0, 1, 2, 4], labels=["trial_start", "cue_onset", "cue_offset", "response_left", "response_right", "reward"], @@ -52,6 +66,15 @@ class InterfaceA(BaseDataInterface): def __init__(self, **source_data): super().__init__(**source_data) + def get_original_timestamps(self): + pass + + def get_timestamps(self): + pass + + def align_timestamps(self): + pass + def run_conversion(self): pass @@ -61,6 +84,15 @@ class InterfaceB(BaseDataInterface): def __init__(self, **source_data): super().__init__(**source_data) + def get_original_timestamps(self): + pass + + def get_timestamps(self): + pass + + def align_timestamps(self): + pass + def run_conversion(self): pass @@ -80,7 +112,6 @@ class NWBConverterChild(NWBConverter): assert converter.data_interface_classes["InterfaceB"] is self.InterfaceB def test_pipe_list_init(self): - interface_a = self.InterfaceA() interface_b = self.InterfaceB() data_interfaces_list = [interface_a, interface_b] @@ -96,7 +127,6 @@ def test_pipe_list_init(self): assert converter.data_interface_objects["InterfaceB"] is interface_b def test_pipe_list_dict(self): - interface_a = self.InterfaceA() interface_b = self.InterfaceB() data_interfaces_dict = dict(InterfaceA=interface_a, InterfaceB=interface_b) @@ -126,7 +156,6 @@ class NWBConverterChild(NWBConverter): assert converter_arguments.data_interface_classes == converter_child_class.data_interface_classes def test_unique_names_with_list_argument(self): - interface_a = self.InterfaceA() interface_a2 = self.InterfaceA() interface_b = self.InterfaceB() diff --git a/tests/test_minimal/test_testing/test_mocks/test_mock_interfaces.py b/tests/test_minimal/test_testing/test_mocks/test_mock_interfaces.py new file mode 100644 index 000000000..54e6912f2 --- /dev/null +++ b/tests/test_minimal/test_testing/test_mocks/test_mock_interfaces.py @@ -0,0 +1,10 @@ +from pathlib import Path + +from pynwb import NWBHDF5IO +from hdmf.testing import TestCase +from numpy.testing import assert_array_equal + +from neuroconv.tools.testing import MockBehaviorEventInterface + + +# TODO diff --git a/tests/test_minimal/test_tools_hdmf.py b/tests/test_minimal/test_tools_hdmf.py index 91b6d2eae..1c787a28c 100644 --- a/tests/test_minimal/test_tools_hdmf.py +++ b/tests/test_minimal/test_tools_hdmf.py @@ -38,7 +38,6 @@ def test_min_axis_too_large(): def test_sliceable_data_chunk_iterator(): - data = np.arange(100).reshape(10, 10) dci = SliceableDataChunkIterator(data=data, buffer_shape=(5, 5), chunk_shape=(5, 5)) diff --git a/tests/test_minimal/test_utils/test_json_schema_utils.py b/tests/test_minimal/test_utils/test_json_schema_utils.py index cdbe46ee2..b222d7a89 100644 --- a/tests/test_minimal/test_utils/test_json_schema_utils.py +++ b/tests/test_minimal/test_utils/test_json_schema_utils.py @@ -133,7 +133,6 @@ def test_dict_deep_update_4(): def test_fill_defaults(): - schema = dict( additionalProperties=False, properties=dict( diff --git a/tests/test_on_data/test_gin_behavior.py b/tests/test_on_data/test_gin_behavior.py index 90dcb66e6..083a95fff 100644 --- a/tests/test_on_data/test_gin_behavior.py +++ b/tests/test_on_data/test_gin_behavior.py @@ -21,7 +21,6 @@ class TestSLEAPInterface(unittest.TestCase): - savedir = OUTPUT_PATH @parameterized.expand( diff --git a/tests/test_on_data/test_gin_ecephys/test_aux_interfaces.py b/tests/test_on_data/test_gin_ecephys/test_aux_interfaces.py index 950f65e94..51c4576f4 100644 --- a/tests/test_on_data/test_gin_ecephys/test_aux_interfaces.py +++ b/tests/test_on_data/test_gin_ecephys/test_aux_interfaces.py @@ -76,7 +76,6 @@ class TestConverter(NWBConverter): # NWBRecordingExtractor on spikeinterface does not yet support loading data written from multiple segments. if recording.get_num_segments() == 1: - # Spikeinterface behavior is to load the electrode table channel_name property as a channel_id nwb_recording = NwbRecordingExtractor(file_path=nwbfile_path, electrical_series_name=electrical_series_name) if "channel_name" in recording.get_property_keys(): diff --git a/tests/test_on_data/test_gin_ecephys/test_raw_recordings.py b/tests/test_on_data/test_gin_ecephys/test_raw_recordings.py index 14763d375..54c0fa564 100644 --- a/tests/test_on_data/test_gin_ecephys/test_raw_recordings.py +++ b/tests/test_on_data/test_gin_ecephys/test_raw_recordings.py @@ -256,7 +256,6 @@ class TestConverter(NWBConverter): # NWBRecordingExtractor on spikeinterface does not yet support loading data written from multiple segment. if recording.get_num_segments() == 1: - # Spikeinterface behavior is to load the electrode table channel_name property as a channel_id nwb_recording = NwbRecordingExtractor(file_path=nwbfile_path, electrical_series_name=electrical_series_name) if "channel_name" in recording.get_property_keys(): diff --git a/tests/test_on_data/test_metadata/test_edf_metadata.py b/tests/test_on_data/test_metadata/test_edf_metadata.py index 7a93bb194..8a044f969 100644 --- a/tests/test_on_data/test_metadata/test_edf_metadata.py +++ b/tests/test_on_data/test_metadata/test_edf_metadata.py @@ -13,7 +13,6 @@ def setUpClass(cls): cls.interface = EDFRecordingInterface(file_path=file_path) def test_nwb_metadata(self): - nwb_metadata = self.interface.extract_nwb_file_metadata() extracted_session_start_time = nwb_metadata["session_start_time"] diff --git a/tests/test_on_data/test_metadata/test_neuroscope_metadata.py b/tests/test_on_data/test_metadata/test_neuroscope_metadata.py index b6164d64f..74d319143 100644 --- a/tests/test_on_data/test_metadata/test_neuroscope_metadata.py +++ b/tests/test_on_data/test_metadata/test_neuroscope_metadata.py @@ -15,7 +15,6 @@ def test_neuroscope_session_start_time(): def test_get_metadata(): - sx = NeuroScopeSortingInterface( str(NEUROSCOPE_PATH / "dataset_1"), xml_file_path=str(NEUROSCOPE_PATH / "dataset_1" / "YutaMouse42-151117.xml"), diff --git a/tests/test_on_data/test_metadata/test_spikeglx_metadata.py b/tests/test_on_data/test_metadata/test_spikeglx_metadata.py index 6cf937666..62612608d 100644 --- a/tests/test_on_data/test_metadata/test_spikeglx_metadata.py +++ b/tests/test_on_data/test_metadata/test_spikeglx_metadata.py @@ -14,7 +14,6 @@ def test_spikelgx_session_start_time_ap(): - folder_path = SPIKEGLX_PATH / "Noise4Sam_g0" / "Noise4Sam_g0_imec0" stream_id = "imec0.ap" recording = SpikeGLXRecordingExtractor(folder_path=folder_path, stream_id=stream_id) diff --git a/tests/test_on_data/test_temporal_alignment/__init__.py b/tests/test_on_data/test_temporal_alignment/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/test_on_data/test_temporal_alignment/test_temporal_alignment_methods.py b/tests/test_on_data/test_temporal_alignment/test_temporal_alignment_methods.py new file mode 100644 index 000000000..e6bce1a79 --- /dev/null +++ b/tests/test_on_data/test_temporal_alignment/test_temporal_alignment_methods.py @@ -0,0 +1,527 @@ +from tempfile import mkdtemp +from shutil import rmtree +from pathlib import Path +from typing import Union, Dict +from datetime import datetime + +import numpy as np +from numpy.testing import assert_array_equal, assert_array_almost_equal +from hdmf.testing import TestCase +from pandas import DataFrame +from pynwb import NWBHDF5IO + +from neuroconv import NWBConverter, ConverterPipe +from neuroconv.datainterfaces import CsvTimeIntervalsInterface +from neuroconv.tools.testing import MockBehaviorEventInterface, MockSpikeGLXNIDQInterface + + +class TestNIDQInterfacePulseTimesAlignment(TestCase): + @classmethod + def setUpClass(cls): + trial_system_delayed_start = 3.23 # Trial tracking system starts 3.23 seconds after SpikeGLX + trial_system_total_time = 10.0 # Was tracking trials for 10 seconds, according to trial tracking system + cls.regular_trial_length = ( + 1.0 # For simplicity, each trial lasts 1 second according to the trial tracking system + ) + trial_system_average_delay_per_second = 0.0001 # Clock on trial tracking system is slightly slower than NIDQ + + # The drift from the trial tracking system adds up over time + trial_system_total_drift = trial_system_total_time * trial_system_average_delay_per_second + + cls.expected_unaligned_trial_start_times = np.arange( + start=0.0, stop=trial_system_total_time, step=cls.regular_trial_length + ) + cls.aligned_trial_start_times = ( + cls.expected_unaligned_trial_start_times + + trial_system_delayed_start + + np.linspace( # use linspace to match the exact length of timestamps + start=0.0, stop=trial_system_total_drift, num=len(cls.expected_unaligned_trial_start_times) + ) + ) + + # Timing of events according to trial system + cls.unaligned_behavior_event_timestamps = [5.6, 7.3, 8.7] # Timing of events according to trial tracking system + + # Timing of events when interpolated by aligned trial times + cls.aligned_behavior_event_timestamps = [8.830632, 10.530796, 11.930964] + + cls.tmpdir = Path(mkdtemp()) + cls.csv_file_path = cls.tmpdir / "testing_nidq_pulse_times_alignment_trial_table.csv" + dataframe = DataFrame( + data=dict( + start_time=cls.expected_unaligned_trial_start_times, + stop_time=cls.expected_unaligned_trial_start_times + cls.regular_trial_length, + ) + ) + dataframe.to_csv(path_or_buf=cls.csv_file_path, index=False) + + @classmethod + def tearDownClass(cls): + rmtree(cls.tmpdir) + + def setUp(self): + self.nidq_interface = MockSpikeGLXNIDQInterface( + signal_duration=23.0, ttl_times=[self.aligned_trial_start_times], ttl_duration=0.1 + ) + self.trial_interface = CsvTimeIntervalsInterface(file_path=self.csv_file_path) + self.behavior_interface = MockBehaviorEventInterface(event_times=self.unaligned_behavior_event_timestamps) + + def assertNWBFileTimesAligned(self, nwbfile_path: Union[str, Path]): + with NWBHDF5IO(path=nwbfile_path) as io: + nwbfile = io.read() + + # High level groups were written to file + assert "BehaviorEvents" in nwbfile.acquisition + assert "ElectricalSeriesNIDQ" in nwbfile.acquisition + assert "trials" in nwbfile.intervals + + # Aligned data was written + assert_array_almost_equal( + x=nwbfile.acquisition["BehaviorEvents"]["event_time"][:], + y=self.aligned_behavior_event_timestamps, + ) + assert_array_almost_equal( + x=nwbfile.intervals["trials"]["start_time"][:], y=self.aligned_trial_start_times, decimal=4 + ) + assert_array_almost_equal( + x=nwbfile.intervals["trials"]["stop_time"][:], + y=self.aligned_trial_start_times + self.regular_trial_length, + decimal=4, + ) + + def test_alignment_interfaces(self): + unaligned_trial_start_times = self.trial_interface.get_original_timestamps(column="start_time") + inferred_aligned_trial_start_timestamps = self.nidq_interface.get_event_times_from_ttl( + channel_name="nidq#XA0" # The channel receiving pulses from the DLC system + ) + + self.trial_interface.align_timestamps( + aligned_timestamps=inferred_aligned_trial_start_timestamps, + column="start_time", + interpolate_other_columns=True, + ) + + self.behavior_interface.align_by_interpolation( + unaligned_timestamps=unaligned_trial_start_times, + aligned_timestamps=inferred_aligned_trial_start_timestamps, + ) + + assert_array_equal( + x=self.trial_interface.get_timestamps(column="start_time"), y=inferred_aligned_trial_start_timestamps + ) + assert_array_almost_equal( + x=self.trial_interface.get_timestamps(column="stop_time"), + y=inferred_aligned_trial_start_timestamps + self.regular_trial_length, + decimal=2, + ) + assert_array_almost_equal(x=self.behavior_interface.get_timestamps(), y=self.aligned_behavior_event_timestamps) + + # Check original unaltered timestamps are the same + assert_array_equal( + x=self.trial_interface.get_original_timestamps(column="start_time"), + y=self.expected_unaligned_trial_start_times, + ) + assert_array_equal( + x=self.trial_interface.get_original_timestamps(column="stop_time"), + y=self.expected_unaligned_trial_start_times + self.regular_trial_length, + ) + assert_array_equal( + x=self.behavior_interface.get_original_timestamps(), + y=self.unaligned_behavior_event_timestamps, + ) + + converter = ConverterPipe(data_interfaces=[self.nidq_interface, self.trial_interface, self.behavior_interface]) + metadata = converter.get_metadata() + + nwbfile_path = self.tmpdir / "test_nidq_pulse_times_alignment_converter_pipe.nwb" + converter.run_conversion(nwbfile_path=nwbfile_path, metadata=metadata) + + self.assertNWBFileTimesAligned(nwbfile_path=nwbfile_path) + + def test_alignment_nwbconverter_direct_modification(self): + class TestAlignmentConverter(NWBConverter): + data_interface_classes = dict( + NIDQ=MockSpikeGLXNIDQInterface, Trials=CsvTimeIntervalsInterface, Behavior=MockBehaviorEventInterface + ) + + source_data = dict( + NIDQ=dict(signal_duration=23.0, ttl_times=[self.aligned_trial_start_times], ttl_duration=0.1), + Trials=dict(file_path=str(self.csv_file_path)), + Behavior=dict(event_times=self.unaligned_behavior_event_timestamps), + ) + converter = TestAlignmentConverter(source_data=source_data) + metadata = converter.get_metadata() + + unaligned_trial_start_times = converter.data_interface_objects["Trials"].get_original_timestamps( + column="start_time" + ) + inferred_aligned_trial_start_timestamps = converter.data_interface_objects["NIDQ"].get_event_times_from_ttl( + channel_name="nidq#XA0" # The channel receiving pulses from the DLC system + ) + + converter.data_interface_objects["Trials"].align_timestamps( + aligned_timestamps=inferred_aligned_trial_start_timestamps, column="start_time" + ) + + # True stop times are not tracked, so estimate them from using the known regular trial length + converter.data_interface_objects["Trials"].align_timestamps( + aligned_timestamps=inferred_aligned_trial_start_timestamps + self.regular_trial_length, column="stop_time" + ) + + converter.data_interface_objects["Behavior"].align_by_interpolation( + unaligned_timestamps=unaligned_trial_start_times, + aligned_timestamps=inferred_aligned_trial_start_timestamps, + ) + + nwbfile_path = self.tmpdir / "test_nidq_pulse_times_alignment_nwbconverter_direct_modification.nwb" + converter.run_conversion(nwbfile_path=nwbfile_path, metadata=metadata) + + self.assertNWBFileTimesAligned(nwbfile_path=nwbfile_path) + + def test_alignment_nwbconverter_internal_modification(self): + class TestAlignmentConverter(NWBConverter): + data_interface_classes = dict( + NIDQ=MockSpikeGLXNIDQInterface, Trials=CsvTimeIntervalsInterface, Behavior=MockBehaviorEventInterface + ) + + def __init__(self, source_data: Dict[str, dict], verbose: bool = True): + super().__init__(source_data=source_data, verbose=verbose) + + unaligned_trial_start_times = self.data_interface_objects["Trials"].get_original_timestamps( + column="start_time" + ) + inferred_aligned_trial_start_timestamps = self.data_interface_objects["NIDQ"].get_event_times_from_ttl( + channel_name="nidq#XA0" # The channel receiving pulses from the DLC system + ) + + self.data_interface_objects["Trials"].align_timestamps( + aligned_timestamps=inferred_aligned_trial_start_timestamps, column="start_time" + ) + + # True stop times are not tracked, so estimate them from using the known regular trial length + self.data_interface_objects["Trials"].align_timestamps( + # for this usage, the regular trial length would be hard-coded + aligned_timestamps=inferred_aligned_trial_start_timestamps + 1.0, + column="stop_time", + ) + + self.data_interface_objects["Behavior"].align_by_interpolation( + unaligned_timestamps=unaligned_trial_start_times, + aligned_timestamps=inferred_aligned_trial_start_timestamps, + ) + + source_data = dict( + NIDQ=dict(signal_duration=23.0, ttl_times=[self.aligned_trial_start_times], ttl_duration=0.1), + Trials=dict(file_path=str(self.csv_file_path)), + Behavior=dict(event_times=self.unaligned_behavior_event_timestamps), + ) + converter = TestAlignmentConverter(source_data=source_data) + metadata = converter.get_metadata() + + nwbfile_path = self.tmpdir / "test_nidq_alignment_pulse_times_nwbconverter_internal_modification.nwb" + converter.run_conversion(nwbfile_path=nwbfile_path, metadata=metadata) + + self.assertNWBFileTimesAligned(nwbfile_path=nwbfile_path) + + +class TestExternalPuleTimesAlignment(TestNIDQInterfacePulseTimesAlignment): + """ + This test case is less about ensuring the functionality (which is identical to above) and more about depicting + the intended usage in practice. + + Some labs already have workflows put together for handling synchronization. + + In this case, they simply store the timestamps in separate files and load them in during the conversion. + """ + + def setUp(self): + self.trial_interface = CsvTimeIntervalsInterface(file_path=self.csv_file_path) + self.behavior_interface = MockBehaviorEventInterface(event_times=self.unaligned_behavior_event_timestamps) + + def assertNWBFileTimesAligned(self, nwbfile_path: Union[str, Path]): + with NWBHDF5IO(path=nwbfile_path) as io: + nwbfile = io.read() + + # High level groups were written to file + assert "BehaviorEvents" in nwbfile.acquisition + assert "trials" in nwbfile.intervals + + # Aligned data was written + assert_array_almost_equal( + x=nwbfile.acquisition["BehaviorEvents"]["event_time"][:], + y=self.aligned_behavior_event_timestamps, + decimal=4, + ) + assert_array_almost_equal( + x=nwbfile.intervals["trials"]["start_time"][:], y=self.aligned_trial_start_times, decimal=5 + ) + assert_array_almost_equal( + x=nwbfile.intervals["trials"]["stop_time"][:], + y=self.aligned_trial_start_times + self.regular_trial_length, + decimal=5, + ) + + def test_alignment_interfaces(self): + unaligned_trial_start_timestamps = self.trial_interface.get_original_timestamps(column="start_time") + externally_aligned_timestamps = self.aligned_trial_start_times + + self.trial_interface.align_timestamps(aligned_timestamps=externally_aligned_timestamps, column="start_time") + + # True stop times are not tracked, so estimate them from using the known regular trial length + self.trial_interface.align_timestamps( + aligned_timestamps=externally_aligned_timestamps + self.regular_trial_length, column="stop_time" + ) + + self.behavior_interface.align_by_interpolation( + unaligned_timestamps=unaligned_trial_start_timestamps, + aligned_timestamps=externally_aligned_timestamps, + ) + + assert_array_equal(x=self.trial_interface.get_timestamps(column="start_time"), y=externally_aligned_timestamps) + assert_array_equal( + x=self.trial_interface.get_timestamps(column="stop_time"), + y=externally_aligned_timestamps + self.regular_trial_length, + ) + assert_array_almost_equal( + x=self.behavior_interface.get_timestamps(), y=self.aligned_behavior_event_timestamps, decimal=4 + ) + + # Check original unaltered timestamps are the same + assert_array_equal( + x=self.trial_interface.get_original_timestamps(column="start_time"), + y=self.expected_unaligned_trial_start_times, + ) + assert_array_equal( + x=self.trial_interface.get_original_timestamps(column="stop_time"), + y=self.expected_unaligned_trial_start_times + self.regular_trial_length, + ) + assert_array_equal( + x=self.behavior_interface.get_original_timestamps(), + y=self.unaligned_behavior_event_timestamps, + ) + + converter = ConverterPipe(data_interfaces=[self.trial_interface, self.behavior_interface]) + metadata = converter.get_metadata() + metadata["NWBFile"]["session_start_time"] = datetime(1970, 1, 1) # No NIDQ to automaticall include star time + + nwbfile_path = self.tmpdir / "test_external_pulse_times_alignment_converter_pipe.nwb" + converter.run_conversion(nwbfile_path=nwbfile_path, metadata=metadata) + + self.assertNWBFileTimesAligned(nwbfile_path=nwbfile_path) + + def test_alignment_nwbconverter_direct_modification(self): + class TestAlignmentConverter(NWBConverter): + data_interface_classes = dict(Trials=CsvTimeIntervalsInterface, Behavior=MockBehaviorEventInterface) + + source_data = dict( + Trials=dict(file_path=str(self.csv_file_path)), + Behavior=dict(event_times=self.unaligned_behavior_event_timestamps), + ) + converter = TestAlignmentConverter(source_data=source_data) + metadata = converter.get_metadata() + metadata["NWBFile"]["session_start_time"] = datetime(1970, 1, 1) # No NIDQ to automaticall include star time + + unaligned_trial_start_timestamps = converter.data_interface_objects["Trials"].get_timestamps( + column="start_time" + ) + externally_aligned_timestamps = self.aligned_trial_start_times + + converter.data_interface_objects["Trials"].align_timestamps( + aligned_timestamps=externally_aligned_timestamps, column="start_time" + ) + + # True stop times are not tracked, so estimate them from using the known regular trial length + converter.data_interface_objects["Trials"].align_timestamps( + aligned_timestamps=externally_aligned_timestamps + self.regular_trial_length, column="stop_time" + ) + + converter.data_interface_objects["Behavior"].align_by_interpolation( + unaligned_timestamps=unaligned_trial_start_timestamps, + aligned_timestamps=externally_aligned_timestamps, + ) + + nwbfile_path = self.tmpdir / "test_external_pulse_times_alignment_nwbconverter_direct_modification.nwb" + converter.run_conversion(nwbfile_path=nwbfile_path, metadata=metadata) + + self.assertNWBFileTimesAligned(nwbfile_path=nwbfile_path) + + def test_alignment_nwbconverter_internal_modification(self): + def mimic_reading_externally_aligned_timestamps(): + """Needed to define small function here to allow proper namespace references inside class scope.""" + return self.aligned_trial_start_times + + class TestAlignmentConverter(NWBConverter): + data_interface_classes = dict(Trials=CsvTimeIntervalsInterface, Behavior=MockBehaviorEventInterface) + + def __init__(self, source_data: Dict[str, dict], verbose: bool = True): + super().__init__(source_data=source_data, verbose=verbose) + + unaligned_trial_start_timestamps = self.data_interface_objects["Trials"].get_timestamps( + column="start_time" + ) + externally_aligned_timestamps = mimic_reading_externally_aligned_timestamps() + + self.data_interface_objects["Trials"].align_timestamps( + aligned_timestamps=externally_aligned_timestamps, column="start_time" + ) + + # True stop times are not tracked, so estimate them from using the known regular trial length + self.data_interface_objects["Trials"].align_timestamps( + # for this usage, the regular trial length would be hard-coded + aligned_timestamps=externally_aligned_timestamps + 1.0, + column="stop_time", + ) + + self.data_interface_objects["Behavior"].align_by_interpolation( + unaligned_timestamps=unaligned_trial_start_timestamps, + aligned_timestamps=externally_aligned_timestamps, + ) + + source_data = dict( + Trials=dict(file_path=str(self.csv_file_path)), + Behavior=dict(event_times=self.unaligned_behavior_event_timestamps), + ) + converter = TestAlignmentConverter(source_data=source_data) + metadata = converter.get_metadata() + metadata["NWBFile"]["session_start_time"] = datetime(1970, 1, 1) # No NIDQ to automaticall include star time + + nwbfile_path = self.tmpdir / "test_external_pulse_times_alignment_nwbconverter_internal_modification.nwb" + converter.run_conversion(nwbfile_path=nwbfile_path, metadata=metadata) + + self.assertNWBFileTimesAligned(nwbfile_path=nwbfile_path) + + +class TestNIDQInterfaceOnSignalAlignment(TestNIDQInterfacePulseTimesAlignment): + @classmethod + def setUpClass(cls): + cls.trial_system_delayed_start = 3.23 # Trial tracking system starts 3.23 seconds after SpikeGLX + trial_system_total_time = 10.0 # Was tracking trials for 10 seconds, according to trial tracking system + cls.regular_trial_length = ( + 1.0 # For simplicity, each trial lasts 1 second according to the trial tracking system + ) + + cls.expected_unaligned_trial_start_times = np.arange( + start=0.0, stop=trial_system_total_time, step=cls.regular_trial_length + ) + cls.aligned_trial_start_times = cls.expected_unaligned_trial_start_times + cls.trial_system_delayed_start + + # Timing of events according to trial system + cls.unaligned_behavior_event_timestamps = [5.6, 7.3, 8.7] # Timing of events according to trial tracking system + + # Timing of events when interpolated by aligned trial times + cls.aligned_behavior_event_timestamps = [8.83, 10.53, 11.93] + + cls.tmpdir = Path(mkdtemp()) + cls.csv_file_path = cls.tmpdir / "testing_nidq_single_pulse_alignment_trial_table.csv" + dataframe = DataFrame( + data=dict( + start_time=cls.expected_unaligned_trial_start_times, + stop_time=cls.expected_unaligned_trial_start_times + cls.regular_trial_length, + ) + ) + dataframe.to_csv(path_or_buf=cls.csv_file_path, index=False) + + def setUp(self): + self.nidq_interface = MockSpikeGLXNIDQInterface( + signal_duration=23.0, ttl_times=[[self.trial_system_delayed_start]], ttl_duration=0.1 + ) + self.trial_interface = CsvTimeIntervalsInterface(file_path=self.csv_file_path) + self.behavior_interface = MockBehaviorEventInterface(event_times=self.unaligned_behavior_event_timestamps) + + def test_alignment_interfaces(self): + inferred_aligned_trial_start_time = self.nidq_interface.get_event_times_from_ttl( + channel_name="nidq#XA0" # The channel receiving pulses from the DLC system + )[0] + + self.trial_interface.align_starting_time(starting_time=inferred_aligned_trial_start_time) + self.behavior_interface.align_starting_time(starting_time=inferred_aligned_trial_start_time) + + assert_array_equal(x=self.trial_interface.get_timestamps(column="start_time"), y=self.aligned_trial_start_times) + assert_array_equal( + x=self.trial_interface.get_timestamps(column="stop_time"), + y=self.aligned_trial_start_times + self.regular_trial_length, + ) + assert_array_almost_equal(x=self.behavior_interface.get_timestamps(), y=self.aligned_behavior_event_timestamps) + + # Check original unaltered timestamps are the same + assert_array_equal( + x=self.trial_interface.get_original_timestamps(column="start_time"), + y=self.expected_unaligned_trial_start_times, + ) + assert_array_equal( + x=self.trial_interface.get_original_timestamps(column="stop_time"), + y=self.expected_unaligned_trial_start_times + self.regular_trial_length, + ) + assert_array_equal( + x=self.behavior_interface.get_original_timestamps(), + y=self.unaligned_behavior_event_timestamps, + ) + + converter = ConverterPipe(data_interfaces=[self.nidq_interface, self.trial_interface, self.behavior_interface]) + metadata = converter.get_metadata() + + nwbfile_path = self.tmpdir / "test_nidq_on_signal_alignment_converter_pipe.nwb" + converter.run_conversion(nwbfile_path=nwbfile_path, metadata=metadata) + + self.assertNWBFileTimesAligned(nwbfile_path=nwbfile_path) + + def test_alignment_nwbconverter_direct_modification(self): + class TestAlignmentConverter(NWBConverter): + data_interface_classes = dict( + NIDQ=MockSpikeGLXNIDQInterface, Trials=CsvTimeIntervalsInterface, Behavior=MockBehaviorEventInterface + ) + + source_data = dict( + NIDQ=dict(signal_duration=23.0, ttl_times=[self.aligned_trial_start_times], ttl_duration=0.1), + Trials=dict(file_path=str(self.csv_file_path)), + Behavior=dict(event_times=self.unaligned_behavior_event_timestamps), + ) + converter = TestAlignmentConverter(source_data=source_data) + metadata = converter.get_metadata() + + inferred_aligned_trial_start_time = converter.data_interface_objects["NIDQ"].get_event_times_from_ttl( + channel_name="nidq#XA0" # The channel receiving pulses from the DLC system + )[0] + + converter.data_interface_objects["Trials"].align_starting_time(starting_time=inferred_aligned_trial_start_time) + converter.data_interface_objects["Behavior"].align_starting_time( + starting_time=inferred_aligned_trial_start_time + ) + + nwbfile_path = self.tmpdir / "test_nidq_on_signal_alignment_nwbconverter_direct_modification.nwb" + converter.run_conversion(nwbfile_path=nwbfile_path, metadata=metadata) + + self.assertNWBFileTimesAligned(nwbfile_path=nwbfile_path) + + def test_alignment_nwbconverter_internal_modification(self): + class TestAlignmentConverter(NWBConverter): + data_interface_classes = dict( + NIDQ=MockSpikeGLXNIDQInterface, Trials=CsvTimeIntervalsInterface, Behavior=MockBehaviorEventInterface + ) + + def __init__(self, source_data: Dict[str, dict], verbose: bool = True): + super().__init__(source_data=source_data, verbose=verbose) + + inferred_aligned_trial_start_time = self.data_interface_objects["NIDQ"].get_event_times_from_ttl( + channel_name="nidq#XA0" # The channel receiving pulses from the DLC system + )[0] + + self.data_interface_objects["Trials"].align_starting_time( + starting_time=inferred_aligned_trial_start_time + ) + self.data_interface_objects["Behavior"].align_starting_time( + starting_time=inferred_aligned_trial_start_time + ) + + source_data = dict( + NIDQ=dict(signal_duration=23.0, ttl_times=[self.aligned_trial_start_times], ttl_duration=0.1), + Trials=dict(file_path=str(self.csv_file_path)), + Behavior=dict(event_times=self.unaligned_behavior_event_timestamps), + ) + converter = TestAlignmentConverter(source_data=source_data) + metadata = converter.get_metadata() + + nwbfile_path = self.tmpdir / "test_nidq_on_signal_alignment_nwbconverter_internal_modification.nwb" + converter.run_conversion(nwbfile_path=nwbfile_path, metadata=metadata) + + self.assertNWBFileTimesAligned(nwbfile_path=nwbfile_path) diff --git a/tests/test_ophys/test_tools_roiextractors.py b/tests/test_ophys/test_tools_roiextractors.py index 1b3f0260e..12395e24c 100644 --- a/tests/test_ophys/test_tools_roiextractors.py +++ b/tests/test_ophys/test_tools_roiextractors.py @@ -58,7 +58,6 @@ def test_add_device(self): assert device_name in devices def test_add_device_with_further_metadata(self): - device_name = "new_device" description = "device_description" manufacturer = "manufactuer" @@ -120,7 +119,6 @@ def test_not_overwriting_devices(self): assert device_name1 in devices def test_add_device_defaults(self): - add_devices(self.nwbfile, metadata=self.metadata) devices = self.nwbfile.devices @@ -129,7 +127,6 @@ def test_add_device_defaults(self): assert "Microscope" in devices def test_add_empty_device_list_in_metadata(self): - device_list = [] self.metadata["Ophys"].update(Device=device_list) add_devices(self.nwbfile, metadata=self.metadata) @@ -139,7 +136,6 @@ def test_add_empty_device_list_in_metadata(self): assert len(devices) == 0 def test_device_object(self): - device_name = "device_object" device_object = Device(name=device_name) device_list = [device_object] @@ -152,7 +148,6 @@ def test_device_object(self): assert device_name in devices def test_device_object_and_metadata_mix(self): - device_object = Device(name="device_object") device_metadata = dict(name="device_metadata") device_list = [device_object, device_metadata] @@ -202,7 +197,6 @@ def setUp(self): self.metadata["Ophys"].update(ImagingPlane=[self.imaging_plane_metadata]) def test_add_imaging_plane(self): - add_imaging_plane(nwbfile=self.nwbfile, metadata=self.metadata) imaging_planes = self.nwbfile.imaging_planes @@ -213,7 +207,6 @@ def test_add_imaging_plane(self): assert imaging_plane.description == self.imaging_plane_description def test_not_overwriting_imaging_plane_if_same_name(self): - add_imaging_plane(nwbfile=self.nwbfile, metadata=self.metadata) self.imaging_plane_metadata["description"] = "modified description" @@ -224,7 +217,6 @@ def test_not_overwriting_imaging_plane_if_same_name(self): assert self.imaging_plane_name in imaging_planes def test_add_two_imaging_planes(self): - # Add the first imaging plane first_imaging_plane_name = "first_imaging_plane_name" first_imaging_plane_description = "first_imaging_plane_description" @@ -1218,7 +1210,6 @@ def test_invalid_iterator_type_raises_error(self): ) def test_non_iterative_write_assertion(self): - # Estimate num of frames required to exceed memory capabilities dtype = self.imaging_extractor.get_dtype() element_size_in_bytes = dtype.itemsize @@ -1306,7 +1297,6 @@ def test_iterator_options_propagation(self): self.assertEqual(data_chunk_iterator.chunk_shape, chunk_shape) def test_add_two_photon_series_roundtrip(self): - metadata = self.metadata add_two_photon_series(imaging=self.imaging_extractor, nwbfile=self.nwbfile, metadata=metadata) @@ -1349,7 +1339,6 @@ def setUp(self): ) def test_add_sumary_images(self): - segmentation_extractor = generate_dummy_segmentation_extractor(num_rows=10, num_columns=15) images_set_name = "images_set_name" @@ -1370,7 +1359,6 @@ def test_add_sumary_images(self): np.testing.assert_almost_equal(expected_images_dict[image_name], extracted_images_dict[image_name]) def test_extractor_with_one_summary_image_suppressed(self): - segmentation_extractor = generate_dummy_segmentation_extractor(num_rows=10, num_columns=15) segmentation_extractor._image_correlation = None @@ -1389,7 +1377,6 @@ def test_extractor_with_one_summary_image_suppressed(self): assert extracted_images_number == expected_images_number def test_extractor_with_no_summary_images(self): - segmentation_extractor = generate_dummy_segmentation_extractor( num_rows=10, num_columns=15, has_summary_images=False ) @@ -1405,7 +1392,6 @@ def test_extractor_with_no_summary_images(self): assert images_set_name not in ophys.data_interfaces def test_extractor_with_no_summary_images_and_no_ophys_module(self): - segmentation_extractor = generate_dummy_segmentation_extractor( num_rows=10, num_columns=15, has_summary_images=False )