From 9eab6f04c5d9ccbf7d98e0fcfb3c9a717b4c1c05 Mon Sep 17 00:00:00 2001 From: Cody Baker <51133164+CodyCBakerPhD@users.noreply.github.com> Date: Fri, 10 Feb 2023 10:54:26 -0500 Subject: [PATCH 01/28] Update spikeinterfacerecordingdatachunkiterator.py --- ...pikeinterfacerecordingdatachunkiterator.py | 20 ++----------------- 1 file changed, 2 insertions(+), 18 deletions(-) diff --git a/src/neuroconv/tools/spikeinterface/spikeinterfacerecordingdatachunkiterator.py b/src/neuroconv/tools/spikeinterface/spikeinterfacerecordingdatachunkiterator.py index 7964bf7f1..cb1676a5e 100644 --- a/src/neuroconv/tools/spikeinterface/spikeinterfacerecordingdatachunkiterator.py +++ b/src/neuroconv/tools/spikeinterface/spikeinterfacerecordingdatachunkiterator.py @@ -2,20 +2,16 @@ from typing import Tuple, Iterable, Optional, Union from warnings import warn -from spikeinterface.core.old_api_utils import OldToNewRecording -from spikeextractors import RecordingExtractor from hdmf.data_utils import GenericDataChunkIterator from spikeinterface import BaseRecording -SpikeInterfaceRecording = Union[BaseRecording, RecordingExtractor] - class SpikeInterfaceRecordingDataChunkIterator(GenericDataChunkIterator): """DataChunkIterator specifically for use on RecordingExtractor objects.""" def __init__( self, - recording: SpikeInterfaceRecording, + recording: BaseRecording, segment_index: int = 0, return_scaled: bool = False, buffer_gb: Optional[float] = None, @@ -64,19 +60,7 @@ def __init__( Dictionary of keyword arguments to be passed directly to tqdm. See https://github.com/tqdm/tqdm#parameters for options. """ - if isinstance(recording, RecordingExtractor): - self.recording = OldToNewRecording(oldapi_recording_extractor=recording) - # TODO: Remove spikeextractors backend - warn( - message=( - "Interfaces using a spikeextractors backend will soon be deprecated! " - "Please use the SpikeInterface backend instead." - ), - category=DeprecationWarning, - stacklevel=2, - ) - else: - self.recording = recording + self.recording = recording self.segment_index = segment_index self.return_scaled = return_scaled self.channel_ids = recording.get_channel_ids() From e228159d34888dc7e8cd1e818fbc71e3810ed6f1 Mon Sep 17 00:00:00 2001 From: Cody Baker <51133164+CodyCBakerPhD@users.noreply.github.com> Date: Fri, 10 Feb 2023 10:55:21 -0500 Subject: [PATCH 02/28] Update spikeinterface.py --- src/neuroconv/tools/spikeinterface/spikeinterface.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/neuroconv/tools/spikeinterface/spikeinterface.py b/src/neuroconv/tools/spikeinterface/spikeinterface.py index 865ce7f32..99bd9ecc3 100644 --- a/src/neuroconv/tools/spikeinterface/spikeinterface.py +++ b/src/neuroconv/tools/spikeinterface/spikeinterface.py @@ -11,7 +11,6 @@ import pynwb from spikeinterface import BaseRecording, BaseSorting, WaveformExtractor from spikeinterface.core.old_api_utils import OldToNewRecording, OldToNewSorting -from spikeextractors import RecordingExtractor, SortingExtractor from numbers import Real from hdmf.data_utils import DataChunkIterator, AbstractDataChunkIterator from hdmf.backends.hdf5.h5_utils import H5DataIO @@ -23,8 +22,8 @@ from ...utils import dict_deep_update, OptionalFilePathType, calculate_regular_series_rate -SpikeInterfaceRecording = Union[BaseRecording, RecordingExtractor] -SpikeInterfaceSorting = Union[BaseSorting, SortingExtractor] +SpikeInterfaceRecording = BaseRecording +SpikeInterfaceSorting = BaseSorting def set_dynamic_table_property( From e0ed730ddfd823adc96cb919a4e512c60e8e1e25 Mon Sep 17 00:00:00 2001 From: Cody Baker Date: Fri, 10 Feb 2023 13:40:58 -0500 Subject: [PATCH 03/28] removed from main tools --- .../tools/spikeinterface/spikeinterface.py | 462 ++---------------- 1 file changed, 48 insertions(+), 414 deletions(-) diff --git a/src/neuroconv/tools/spikeinterface/spikeinterface.py b/src/neuroconv/tools/spikeinterface/spikeinterface.py index 99bd9ecc3..b1c17f6b3 100644 --- a/src/neuroconv/tools/spikeinterface/spikeinterface.py +++ b/src/neuroconv/tools/spikeinterface/spikeinterface.py @@ -4,13 +4,11 @@ import numpy as np from packaging.version import Version from typing import Union, Optional, List -from warnings import warn from collections import defaultdict from nwbinspector.utils import get_package_version import pynwb from spikeinterface import BaseRecording, BaseSorting, WaveformExtractor -from spikeinterface.core.old_api_utils import OldToNewRecording, OldToNewSorting from numbers import Real from hdmf.data_utils import DataChunkIterator, AbstractDataChunkIterator from hdmf.backends.hdf5.h5_utils import H5DataIO @@ -22,62 +20,16 @@ from ...utils import dict_deep_update, OptionalFilePathType, calculate_regular_series_rate -SpikeInterfaceRecording = BaseRecording -SpikeInterfaceSorting = BaseSorting - - -def set_dynamic_table_property( - dynamic_table, - row_ids, - property_name, - values, - index=False, - default_value=np.nan, - table=False, - description="no description", -): - if not isinstance(row_ids, list) or not all(isinstance(x, int) for x in row_ids): - raise TypeError("'ids' must be a list of integers") - ids = list(dynamic_table.id[:]) - if any([i not in ids for i in row_ids]): - raise ValueError("'ids' contains values outside the range of existing ids") - if not isinstance(property_name, str): - raise TypeError("'property_name' must be a string") - if len(row_ids) != len(values) and index is False: - raise ValueError("'ids' and 'values' should be lists of same size") - if index is False: - if property_name in dynamic_table: - for row_id, value in zip(row_ids, values): - dynamic_table[property_name].data[ids.index(row_id)] = value - else: - col_data = [default_value] * len(ids) # init with default val - for row_id, value in zip(row_ids, values): - col_data[ids.index(row_id)] = value - dynamic_table.add_column( - name=property_name, description=description, data=col_data, index=index, table=table - ) - else: - if property_name in dynamic_table: - # TODO - raise NotImplementedError - else: - dynamic_table.add_column(name=property_name, description=description, data=values, index=index, table=table) - - -def get_nwb_metadata(recording: SpikeInterfaceRecording, metadata: dict = None): +def get_nwb_metadata(recording: BaseRecording, metadata: dict = None): """ Return default metadata for all recording fields. Parameters ---------- - recording: SpikeInterfaceRecording + recording: spikeinterface.BaseRecording metadata: dict metadata info for constructing the nwb file (optional). """ - if isinstance(recording, RecordingExtractor): - checked_recording = OldToNewRecording(oldapi_recording_extractor=recording) - else: - checked_recording = recording metadata = dict( NWBFile=dict( session_description="Auto-generated by NwbRecordingExtractor without description.", @@ -86,8 +38,8 @@ def get_nwb_metadata(recording: SpikeInterfaceRecording, metadata: dict = None): Ecephys=dict( Device=[dict(name="Device", description="Ecephys probe. Automatically generated.")], ElectrodeGroup=[ - dict(name=str(gn), description="no description", location="unknown", device="Device") - for gn in np.unique(checked_recording.get_channel_groups()) + dict(name=str(group_name), description="no description", location="unknown", device="Device") + for group_name in np.unique(recording.get_channel_groups()) ], ), ) @@ -133,7 +85,7 @@ def add_devices(nwbfile: pynwb.NWBFile, metadata: dict = None): nwbfile.create_device(**dict(defaults, **dev)) -def add_electrode_groups(recording: SpikeInterfaceRecording, nwbfile: pynwb.NWBFile, metadata: dict = None): +def add_electrode_groups(recording: BaseRecording, nwbfile: pynwb.NWBFile, metadata: dict = None): """ Add electrode group information to nwbfile object. @@ -142,7 +94,7 @@ def add_electrode_groups(recording: SpikeInterfaceRecording, nwbfile: pynwb.NWBF Parameters ---------- - recording: SpikeInterfaceRecording + recording: spikeinterface.BaseRecording nwbfile: NWBFile nwb file to which the recording information is to be added metadata: dict @@ -162,10 +114,6 @@ def add_electrode_groups(recording: SpikeInterfaceRecording, nwbfile: pynwb.NWBF but will only use default description and location. """ assert isinstance(nwbfile, pynwb.NWBFile), "'nwbfile' should be of type pynwb.NWBFile" - if isinstance(recording, RecordingExtractor): - checked_recording = OldToNewRecording(oldapi_recording_extractor=recording) - else: - checked_recording = recording if len(nwbfile.devices) == 0: warnings.warn("When adding ElectrodeGroup, no Devices were found on nwbfile. Creating a Device now...") add_devices(nwbfile=nwbfile, metadata=metadata) @@ -174,10 +122,10 @@ def add_electrode_groups(recording: SpikeInterfaceRecording, nwbfile: pynwb.NWBF if "Ecephys" not in metadata: metadata["Ecephys"] = dict() - if "group_name" in checked_recording.get_property_keys(): - group_names = np.unique(checked_recording.get_property("group_name")) + if "group_name" in recording.get_property_keys(): + group_names = np.unique(recording.get_property("group_name")) else: - group_names = np.unique(checked_recording.get_channel_groups()).astype("str", copy=False) + group_names = np.unique(recording.get_channel_groups()).astype("str", copy=False) defaults = [ dict( @@ -219,20 +167,18 @@ def add_electrode_groups(recording: SpikeInterfaceRecording, nwbfile: pynwb.NWBF ) electrode_group_kwargs = dict(defaults[0]) electrode_group_kwargs.update(device=device) - for grp_name in np.unique(checked_recording.get_channel_groups()).tolist(): - electrode_group_kwargs.update(name=str(grp_name)) + for group_name in np.unique(recording.get_channel_groups()).tolist(): + electrode_group_kwargs.update(name=str(group_name)) nwbfile.create_electrode_group(**electrode_group_kwargs) -def add_electrodes( - recording: SpikeInterfaceRecording, nwbfile: pynwb.NWBFile, metadata: dict = None, exclude: tuple = () -): +def add_electrodes(recording: BaseRecording, nwbfile: pynwb.NWBFile, metadata: dict = None, exclude: tuple = ()): """ Add channels from recording object as electrodes to nwbfile object. Parameters ---------- - recording: SpikeInterfaceRecording + recording: spikeinterface.BaseRecording nwbfile: NWBFile nwb file to which the recording information is to be added metadata: dict @@ -261,34 +207,6 @@ def add_electrodes( object to ignore when writing to the NWBFile. """ assert isinstance(nwbfile, pynwb.NWBFile), "'nwbfile' should be of type pynwb.NWBFile" - if isinstance(recording, RecordingExtractor): - msg = ( - "Support for spikeextractors.RecordingExtractor objects is deprecated. " - "Use spikeinterface.BaseRecording objects" - ) - warnings.warn(msg, DeprecationWarning, stacklevel=2) - checked_recording = OldToNewRecording(oldapi_recording_extractor=recording) - # TODO: Remove spikeextractors backend - warn( - message=( - "Interfaces using a spikeextractors backend will soon be deprecated! " - "Please use the SpikeInterface backend instead." - ), - category=DeprecationWarning, - stacklevel=2, - ) - else: - checked_recording = recording - - # this flag is used to keep old behavior of assigning "id" from int channel_ids - old_api = isinstance(checked_recording, OldToNewRecording) - - # For older versions of pynwb, we need to manually add these columns - if get_package_version("pynwb") < Version("1.3.0"): - if nwbfile.electrodes is None or "rel_x" not in nwbfile.electrodes.colnames: - nwbfile.add_electrode_column("rel_x", "x position of electrode in electrode group") - if nwbfile.electrodes is None or "rel_y" not in nwbfile.electrodes.colnames: - nwbfile.add_electrode_column("rel_y", "y position of electrode in electrode group") # Test that metadata has the expected structure electrodes_metadata = list() @@ -315,12 +233,12 @@ def add_electrodes( # 1. Build columns details from extractor properties: dict(name: dict(description='',data=data, index=False)) data_to_add = defaultdict(dict) - recorder_properties = checked_recording.get_property_keys() + recorder_properties = recording.get_property_keys() excluded_properties = list(exclude) + ["contact_vector"] properties_to_extract = [property for property in recorder_properties if property not in excluded_properties] for property in properties_to_extract: - data = checked_recording.get_property(property) + data = recording.get_property(property) index = isinstance(data[0], (list, np.ndarray, tuple)) # booleans are parsed as strings if isinstance(data[0], (bool, np.bool_)): @@ -334,17 +252,13 @@ def add_electrodes( raise ValueError(f"{extra_descriptions} are not available in the recording extractor, set them first") # Channel name logic - channel_ids = checked_recording.get_channel_ids() + channel_ids = recording.get_channel_ids() if "channel_name" in data_to_add: # if 'channel_name' is set as a property, it is used to override default channel_ids (and "id") channel_name_array = data_to_add["channel_name"]["data"] else: channel_name_array = channel_ids.astype("str", copy=False) data_to_add["channel_name"].update(description="unique channel reference", data=channel_name_array, index=False) - if old_api: - # If the channel ids are integer keep the old behavior of asigning nwbfile.electrodes.id equal to channel_ids - if np.issubdtype(channel_ids.dtype, np.integer): - data_to_add["id"].update(data=channel_ids, index=False) # Location in spikeinterface is equivalent to rel_x, rel_y, rel_z in the nwb standard if "location" in data_to_add: @@ -379,7 +293,7 @@ def add_electrodes( if len(groupless_names) > 0: electrode_group_list = [dict(name=group_name) for group_name in groupless_names] missing_group_metadata = dict(Ecephys=dict(ElectrodeGroup=electrode_group_list)) - add_electrode_groups(recording=checked_recording, nwbfile=nwbfile, metadata=missing_group_metadata) + add_electrode_groups(recording=recording, nwbfile=nwbfile, metadata=missing_group_metadata) group_list = [nwbfile.electrode_groups[group_name] for group_name in group_name_array] data_to_add["group"].update(description="the ElectrodeGroup object", data=group_list, index=False) @@ -437,7 +351,7 @@ def add_electrodes( ] properties_with_data = [property for property in properties_to_add_by_rows if "data" in data_to_add[property]] - rows_in_data = [index for index in range(checked_recording.get_num_channels())] + rows_in_data = [index for index in range(recording.get_num_channels())] rows_to_add = [ index for index in rows_in_data @@ -496,12 +410,13 @@ def add_electrodes( nwbfile.add_electrode_column(property, **cols_args) -def check_if_recording_traces_fit_into_memory(recording: SpikeInterfaceRecording, segment_index: int = 0) -> None: - """Raises an error if the full traces of a recording extractor are larger than psutil.virtual_memory().available +def check_if_recording_traces_fit_into_memory(recording: BaseRecording, segment_index: int = 0) -> None: + """ + Raises an error if the full traces of a recording extractor are larger than psutil.virtual_memory().available. Parameters ---------- - recording : SpikeInterfaceRecording + recording : spikeinterface.BaseRecording A recording extractor object from spikeinterface. segment_index : int, optional The segment index of the recording extractor object, by default 0 @@ -536,7 +451,7 @@ def _recording_traces_to_hdmf_iterator( Parameters ---------- - recording : BaseRecording + recording : spikeinterface.BaseRecording A recording extractor from spikeinterface segment_index : int, optional The recording segment to add to the NWBFile. @@ -562,7 +477,6 @@ def _recording_traces_to_hdmf_iterator( ValueError If the iterator_type is not 'v1', 'v2' or None. """ - supported_iterator_types = ["v1", "v2", None] if iterator_type not in supported_iterator_types: message = f"iterator_type {iterator_type} should be either 'v1', 'v2' (recommended) or None" @@ -589,7 +503,7 @@ def _recording_traces_to_hdmf_iterator( def add_electrical_series( - recording: SpikeInterfaceRecording, + recording: BaseRecording, nwbfile: pynwb.NWBFile, metadata: dict = None, segment_index: int = 0, @@ -649,27 +563,13 @@ def add_electrical_series( Missing keys in an element of metadata['Ecephys']['ElectrodeGroup'] will be auto-populated with defaults whenever possible. """ - if isinstance(recording, RecordingExtractor): - checked_recording = OldToNewRecording(oldapi_recording_extractor=recording) - # TODO: Remove spikeextractors backend - warn( - message=( - "Interfaces using a spikeextractors backend will soon be deprecated! " - "Please use the SpikeInterface backend instead." - ), - category=DeprecationWarning, - stacklevel=2, - ) - else: - checked_recording = recording - assert write_as in [ "raw", "processed", "lfp", ], f"'write_as' should be 'raw', 'processed' or 'lfp', but instead received value {write_as}" - segment_signature = "" if checked_recording.get_num_segments() == 1 else segment_index + segment_signature = "" if recording.get_num_segments() == 1 else segment_index modality_signature = write_as.upper() if write_as == "lfp" else write_as.capitalize() default_name = f"ElectricalSeries{modality_signature}{segment_signature}" default_description = dict(raw="Raw acquired data", lfp="Processed data - LFP", processed="Processed data") @@ -693,11 +593,11 @@ def add_electrical_series( eseries_kwargs.update(metadata["Ecephys"][es_key]) # Indexes by channel ids if they are integer or by indices otherwise. - channel_name_array = checked_recording.get_channel_ids() + channel_name_array = recording.get_channel_ids() if np.issubdtype(channel_name_array.dtype, np.integer): channel_indices = channel_name_array else: - channel_indices = checked_recording.ids_to_indices(channel_name_array) + channel_indices = recording.ids_to_indices(channel_name_array) add_electrodes(recording=recording, nwbfile=nwbfile, metadata=metadata) @@ -710,8 +610,8 @@ def add_electrical_series( # Spikeinterface guarantees data in micro volts when return_scaled=True. This multiplies by gain and adds offsets # In nwb to get traces in Volts we take data*channel_conversion*conversion + offset - channel_conversion = checked_recording.get_channel_gains() - channel_offset = checked_recording.get_channel_offsets() + channel_conversion = recording.get_channel_gains() + channel_offset = recording.get_channel_offsets() unique_channel_conversion = np.unique(channel_conversion) unique_channel_conversion = unique_channel_conversion[0] if len(unique_channel_conversion) == 1 else None @@ -733,7 +633,7 @@ def add_electrical_series( # Iterator ephys_data_iterator = _recording_traces_to_hdmf_iterator( - recording=checked_recording, + recording=recording, segment_index=segment_index, iterator_type=iterator_type, iterator_opts=iterator_opts, @@ -743,13 +643,13 @@ def add_electrical_series( ) # Timestamps vs rate - timestamps = checked_recording.get_times(segment_index=segment_index) + timestamps = recording.get_times(segment_index=segment_index) rate = calculate_regular_series_rate(series=timestamps) # Returns None if it is not regular starting_time = starting_time if starting_time is not None else 0 if rate: starting_time = starting_time + timestamps[0] - eseries_kwargs.update(starting_time=starting_time, rate=checked_recording.get_sampling_frequency()) + eseries_kwargs.update(starting_time=starting_time, rate=recording.get_sampling_frequency()) else: shifted_time_stamps = starting_time + timestamps wrapped_timestamps = H5DataIO( @@ -767,57 +667,7 @@ def add_electrical_series( ecephys_mod.data_interfaces["LFP"].add_electrical_series(es) -def add_epochs(recording: RecordingExtractor, nwbfile: pynwb.NWBFile): - """ - Auxiliary static method for nwbextractor. - - Adds epochs from recording object to nwbfile object. - - Parameters - ---------- - recording: RecordingExtractor - Epochs are supported only by spikeinterface/spikeextractors RecordingExtractor objects; does not support - spikeinterface/spikeinterface BaseRecording objects. - nwbfile: NWBFile - nwb file to which the recording information is to be added - """ - assert isinstance( - recording, RecordingExtractor - ), "'recording' should be a spikeinterface/spikeextractors RecordingExtractor object!" - assert isinstance(nwbfile, pynwb.NWBFile), "'nwbfile' should be of type pynwb.NWBFile" - - # TODO: Remove spikeextractors backend - warn( - message=( - "Interfaces using a spikeextractors backend will soon be deprecated! " - "Please use the SpikeInterface backend instead." - ), - category=DeprecationWarning, - stacklevel=2, - ) - - for epoch_name in recording.get_epoch_names(): - epoch = recording.get_epoch_info(epoch_name) - if nwbfile.epochs is None: - nwbfile.add_epoch( - start_time=recording.frame_to_time(epoch["start_frame"]), - stop_time=recording.frame_to_time(epoch["end_frame"] - 1), - tags=epoch_name, - ) - else: - if [epoch_name] in nwbfile.epochs["tags"][:]: - ind = nwbfile.epochs["tags"][:].index([epoch_name]) - nwbfile.epochs["start_time"].data[ind] = recording.frame_to_time(epoch["start_frame"]) - nwbfile.epochs["stop_time"].data[ind] = recording.frame_to_time(epoch["end_frame"]) - else: - nwbfile.add_epoch( - start_time=recording.frame_to_time(epoch["start_frame"]), - stop_time=recording.frame_to_time(epoch["end_frame"]), - tags=epoch_name, - ) - - -def add_electrodes_info(recording: RecordingExtractor, nwbfile: pynwb.NWBFile, metadata: dict = None): +def add_electrodes_info(recording: BaseRecording, nwbfile: pynwb.NWBFile, metadata: dict = None): """ Add device, electrode_groups, and electrodes info to the nwbfile. @@ -853,90 +703,8 @@ def add_electrodes_info(recording: RecordingExtractor, nwbfile: pynwb.NWBFile, m add_electrodes(recording=recording, nwbfile=nwbfile, metadata=metadata) -def add_all_to_nwbfile( - recording: SpikeInterfaceRecording, - nwbfile=None, - starting_time: Optional[float] = None, - metadata: dict = None, - write_as: str = "raw", - es_key: str = None, - write_electrical_series: bool = True, - write_scaled: bool = False, - compression: Optional[str] = "gzip", - compression_opts: Optional[int] = None, - iterator_type: Optional[str] = None, - iterator_opts: Optional[dict] = None, -): - """ - Auxiliary static method for nwbextractor. - Adds all recording related information from recording object and metadata to the NWBFile object. - - Parameters - ---------- - recording : SpikeInterfaceRecording - nwbfile : NWBFile, optional - nwb file to which the recording information is to be added - starting_time : float, optional - Sets the starting time of the ElectricalSeries to a manually set value. - metadata : dict, optional - metadata info for constructing the NWB file. - Check the auxiliary function docstrings for more information - about metadata format. - write_as : {'raw', 'processed', 'lfp'} - How to save the traces data in the NWB file. - - 'raw': save it in acquisition - - 'processed': save it as FilteredEphys, in a processing module - - 'lfp': save it as LFP, in a processing module - es_key : str, optional - Key in metadata dictionary containing metadata info for the specific electrical series - write_electrical_series : bool, default: True - If True (default), electrical series are written in acquisition. If False, only device, electrode_groups, - and electrodes are written to NWB. - write_scaled : bool, default: True - If True, writes the scaled traces (return_scaled=True) - compression : {'gzip', 'lzf'}, optional - Type of compression to use. - Set to None to disable all compression. - compression_opts : int (optional, defaults to 4) - Only applies to compression="gzip". Controls the level of the GZIP. - iterator_type : {'v2', 'v1'} - The type of DataChunkIterator to use. - 'v1' is the original DataChunkIterator of the hdmf data_utils. - 'v2' is the locally developed RecordingExtractorDataChunkIterator, which offers full control over chunking. - iterator_opts : dict, optional - Dictionary of options for the RecordingExtractorDataChunkIterator (iterator_type='v2') - or DataChunkIterator (iterator_type='v1'). - Valid options are - buffer_gb : float (optional, defaults to 1 GB, available for both 'v2' and 'v1') - Recommended to be as much free RAM as available). Automatically calculates suitable buffer shape. - chunk_mb : float (optional, defaults to 1 MB, only available for 'v2') - Should be below 1 MB. Automatically calculates suitable chunk shape. - If manual specification of buffer_shape and chunk_shape are desired, these may be specified as well. - """ - if nwbfile is not None: - assert isinstance(nwbfile, pynwb.NWBFile), "'nwbfile' should be of type pynwb.NWBFile" - add_electrodes_info(recording=recording, nwbfile=nwbfile, metadata=metadata) - - if write_electrical_series: - add_electrical_series( - recording=recording, - nwbfile=nwbfile, - starting_time=starting_time, - metadata=metadata, - write_as=write_as, - es_key=es_key, - write_scaled=write_scaled, - compression=compression, - compression_opts=compression_opts, - iterator_type=iterator_type, - iterator_opts=iterator_opts, - ) - if isinstance(recording, RecordingExtractor): - add_epochs(recording=recording, nwbfile=nwbfile) - - def write_recording( - recording: SpikeInterfaceRecording, + recording: BaseRecording, nwbfile_path: OptionalFilePathType = None, nwbfile: Optional[pynwb.NWBFile] = None, metadata: Optional[dict] = None, @@ -957,7 +725,7 @@ def write_recording( Parameters ---------- - recording : SpikeInterfaceRecording + recording : spikeinterface.BaseRecording nwbfile_path : FilePathType Path for where to write or load (if overwrite=False) the NWBFile. If specified, the context will always write to this location. @@ -1061,7 +829,7 @@ def write_recording( add_electrodes_info(recording=recording, nwbfile=nwbfile_out, metadata=metadata) if write_electrical_series: - number_of_segments = recording.get_num_segments() if isinstance(recording, BaseRecording) else 1 + number_of_segments = recording.get_num_segments() for segment_index in range(number_of_segments): add_electrical_series( recording=recording, @@ -1077,10 +845,6 @@ def write_recording( iterator_type=iterator_type, iterator_opts=iterator_opts, ) - - # For objects of the legacy spikeextractors we support adding epochs - if isinstance(recording, RecordingExtractor): - add_epochs(recording=recording, nwbfile=nwbfile_out) return nwbfile_out @@ -1098,7 +862,7 @@ def get_nspikes(units_table: pynwb.misc.Units, unit_id: int): def add_units_table( - sorting: SpikeInterfaceSorting, + sorting: BaseSorting, nwbfile: pynwb.NWBFile, unit_ids: Optional[List[Union[str, int]]] = None, property_descriptions: Optional[dict] = None, @@ -1117,7 +881,7 @@ def add_units_table( Parameters ---------- - sorting : SpikeInterfaceSorting + sorting : spikeinterface.BaseSorting nwbfile : NWBFile unit_ids : list of int or list of str, optional Controls the unit_ids that will be written to the nwb file. If None, all @@ -1153,28 +917,6 @@ def add_units_table( if not isinstance(nwbfile, pynwb.NWBFile): raise TypeError(f"nwbfile type should be an instance of pynwb.NWBFile but got {type(nwbfile)}") - if isinstance(sorting, SortingExtractor): - msg = ( - "Support for spikeextractors.SortingExtractor objects is deprecated. " - "Use spikeinterface.BaseSorting objects" - ) - warnings.warn(msg, DeprecationWarning, stacklevel=2) - checked_sorting = OldToNewSorting(oldapi_sorting_extractor=sorting) - # TODO: Remove spikeextractors backend - warn( - message=( - "Interfaces using a spikeextractors backend will soon be deprecated! " - "Please use the SpikeInterface backend instead." - ), - category=DeprecationWarning, - stacklevel=2, - ) - else: - checked_sorting = sorting - - # this flag is used to keep old behavior of assigning "id" from int unit_ids - old_api = isinstance(checked_sorting, OldToNewSorting) - if write_in_processing_module: ecephys_mod = get_module( nwbfile=nwbfile, @@ -1214,12 +956,12 @@ def add_units_table( property_descriptions = dict(default_descriptions, **property_descriptions) data_to_add = defaultdict(dict) - sorting_properties = checked_sorting.get_property_keys() + sorting_properties = sorting.get_property_keys() excluded_properties = list(skip_properties) + ["contact_vector"] properties_to_extract = [property for property in sorting_properties if property not in excluded_properties] if unit_ids is not None: - checked_sorting = checked_sorting.select_units(unit_ids=unit_ids) + checked_sorting = sorting.select_units(unit_ids=unit_ids) if unit_electrode_indices is not None: unit_electrode_indices = np.array(unit_electrode_indices)[checked_sorting.ids_to_indices(unit_ids)] unit_ids = checked_sorting.unit_ids @@ -1242,10 +984,6 @@ def add_units_table( else: unit_name_array = unit_ids.astype("str", copy=False) data_to_add["unit_name"].update(description="Unique reference for each unit.", data=unit_name_array) - if old_api: - # If the channel ids are integer keep the old behavior of asigning table's id equal to unit_ids - if np.issubdtype(unit_ids.dtype, np.integer): - data_to_add["id"].update(data=unit_ids.astype("int")) units_table_previous_properties = set(units_table.colnames) - set({"spike_times"}) extracted_properties = set(data_to_add) @@ -1303,7 +1041,7 @@ def add_units_table( if unit_electrode_indices is not None: unit_kwargs["electrodes"] = unit_electrode_indices[row] units_table.add_unit(spike_times=spike_times, **unit_kwargs, enforce_unique_id=True) - added_unit_table_ids = units_table.id[-len(rows_to_add) :] + # added_unit_table_ids = units_table.id[-len(rows_to_add) :] # TODO - this line is unused? # Add unit_name as a column and fill previously existing rows with unit_name equal to str(ids) previous_table_size = len(units_table.id[:]) - len(unit_name_array) @@ -1349,113 +1087,9 @@ def add_units_table( cols_args["data"] = extended_data units_table.add_column(property, **cols_args) - if write_waveforms: - assert write_table_first_time, "write_waveforms is not supported with re-write" - units_table = _add_waveforms_to_units_table( - sorting=sorting, - units_table=units_table, - row_ids=added_unit_table_ids, - skip_features=skip_features, - ) - - -def _add_waveforms_to_units_table( - sorting: SortingExtractor, - units_table, - row_ids, - skip_features: Optional[List[str]] = None, -): - """ - Auxiliary method for adding waveforms to an existing units_table. - - Parameters - ---------- - sorting : A spikeextractors SortingExtractor. - units_table : a previously created units table - skip_features : list of str - Each string in this list that matches a spike feature will not be written to the NWBFile. - """ - unit_ids = sorting.get_unit_ids() - - if isinstance(sorting, SortingExtractor): - # TODO: Remove spikeextractors backend - warn( - message=( - "Interfaces using a spikeextractors backend will soon be deprecated! " - "Please use the SpikeInterface backend instead." - ), - category=DeprecationWarning, - stacklevel=2, - ) - - all_features = set() - for unit_id in unit_ids: - all_features.update(sorting.get_unit_spike_feature_names(unit_id)) - if skip_features is None: - skip_features = [] - # Check that multidimensional features have the same shape across units - feature_shapes = dict() - for feature_name in all_features: - shapes = [] - for unit_id in unit_ids: - if feature_name in sorting.get_unit_spike_feature_names(unit_id=unit_id): - feat_value = sorting.get_unit_spike_features(unit_id=unit_id, feature_name=feature_name) - if isinstance(feat_value[0], (int, np.integer, float, str, bool)): - break - elif isinstance(feat_value[0], (list, np.ndarray)): # multidimensional features - if np.array(feat_value).ndim > 1: - shapes.append(np.array(feat_value).shape) - feature_shapes[feature_name] = shapes - elif isinstance(feat_value[0], dict): - print(f"Skipping feature '{feature_name}' because dictionaries are not supported.") - skip_features.append(feature_name) - break - else: - print(f"Skipping feature '{feature_name}' because not share across all units.") - skip_features.append(feature_name) - break - nspikes = {k: get_nspikes(units_table, k) for k in row_ids} - for feature_name in feature_shapes.keys(): - # skip first dimension (num_spikes) when comparing feature shape - if not np.all([elem[1:] == feature_shapes[feature_name][0][1:] for elem in feature_shapes[feature_name]]): - print(f"Skipping feature '{feature_name}' because it has variable size across units.") - skip_features.append(feature_name) - for feature_name in set(all_features) - set(skip_features): - values = [] - if not feature_name.endswith("_idxs"): - for unit_id in sorting.get_unit_ids(): - feat_vals = sorting.get_unit_spike_features(unit_id=unit_id, feature_name=feature_name) - if len(feat_vals) < nspikes[unit_id]: - skip_features.append(feature_name) - print(f"Skipping feature '{feature_name}' because it is not defined for all spikes.") - break - else: - all_feat_vals = feat_vals - values.append(all_feat_vals) - flatten_vals = [item for sublist in values for item in sublist] - nspks_list = [sp for sp in nspikes.values()] - spikes_index = np.cumsum(nspks_list).astype("int64") - if feature_name in units_table: # If property already exists, skip it - warnings.warn(f"Feature {feature_name} already present in units table, skipping it") - continue - set_dynamic_table_property( - dynamic_table=units_table, - row_ids=[int(k) for k in row_ids], - property_name=feature_name, - values=flatten_vals, - index=spikes_index, - ) - else: - """ - Currently (2022-04-22), spikeinterface does not support waveform features. - """ - pass - - return units_table - def write_sorting( - sorting: SpikeInterfaceSorting, + sorting: BaseSorting, nwbfile_path: OptionalFilePathType = None, nwbfile: Optional[pynwb.NWBFile] = None, metadata: Optional[dict] = None, @@ -1474,7 +1108,7 @@ def write_sorting( Parameters ---------- - sorting : SpikeInterfaceSorting + sorting : spikeinterface.BaseSorting nwbfile_path : FilePathType, optional Path for where to write or load (if overwrite=False) the NWBFile. If specified, the context will always write to this location. @@ -1575,7 +1209,7 @@ def add_waveforms( overwrite : bool, default: False Whether to overwrite the NWBFile if one exists at the nwbfile_path. The default is False (append mode). - recording : BaseRecording, optional + recording : spikeinterface.BaseRecording, optional If the waveform_extractor is 'recordingless', this argument needs to be passed to save electrode info. Otherwise, electrodes info is not added to the nwb file. unit_ids : list, optional @@ -1670,7 +1304,7 @@ def write_waveforms( Parameters ---------- - sorting : SortingExtractor + sorting : spikeinterface.WaveformExtractor nwbfile_path : FilePathType Path for where to write or load (if overwrite=False) the NWBFile. If specified, the context will always write to this location. @@ -1687,7 +1321,7 @@ def write_waveforms( overwrite : bool, optional Whether or not to overwrite the NWBFile if one exists at the nwbfile_path. The default is False (append mode). - recording : BaseRecording, optional + recording : spikeinterface.BaseRecording, optional If the waveform_extractor is 'recordingless', this argument needs to be passed to save electrode info. Otherwise, electrodes info is not added to the nwb file. verbose : bool, optional @@ -1763,9 +1397,9 @@ def get_electrode_group_indices(recording, nwbfile): return electrode_group_indices -def waveform_extractor_has_recording(waveform_extractor) -> bool: +def waveform_extractor_has_recording(waveform_extractor) -> bool: # TODO - this can probably be replaced now """ - Temporary helper function to substitute unreleased built-in waveform_extractor.has_recording() + Temporary helper function to substitute unreleased built-in waveform_extractor.has_recording(). Parameters ---------- From e1982dd94c1421e3f178815db4a42d817cd5bab0 Mon Sep 17 00:00:00 2001 From: Cody Baker <51133164+CodyCBakerPhD@users.noreply.github.com> Date: Fri, 10 Feb 2023 13:58:09 -0500 Subject: [PATCH 04/28] Update requirements.txt --- src/neuroconv/datainterfaces/ecephys/requirements.txt | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/neuroconv/datainterfaces/ecephys/requirements.txt b/src/neuroconv/datainterfaces/ecephys/requirements.txt index e525be695..130975702 100644 --- a/src/neuroconv/datainterfaces/ecephys/requirements.txt +++ b/src/neuroconv/datainterfaces/ecephys/requirements.txt @@ -1,3 +1,2 @@ -spikeinterface>=0.95.1 -spikeextractors>=0.9.10 +spikeinterface>=0.97.0 packaging<22.0 From 1a285e04bcc855b46defcb2d4217f5a2b60fb31f Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 23 Feb 2023 11:19:37 +0000 Subject: [PATCH 05/28] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/neuroconv/datainterfaces/ecephys/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/neuroconv/datainterfaces/ecephys/requirements.txt b/src/neuroconv/datainterfaces/ecephys/requirements.txt index 4fbcebd9b..130975702 100644 --- a/src/neuroconv/datainterfaces/ecephys/requirements.txt +++ b/src/neuroconv/datainterfaces/ecephys/requirements.txt @@ -1,2 +1,2 @@ spikeinterface>=0.97.0 -packaging<22.0 \ No newline at end of file +packaging<22.0 From 1a28053cc88bd8b0b830624315b01bae01daf662 Mon Sep 17 00:00:00 2001 From: CodyCBakerPhD Date: Fri, 24 Feb 2023 04:53:47 -0500 Subject: [PATCH 06/28] cleaning --- .../ecephys/axona/axonadatainterface.py | 4 +- .../ecephys/biocam/biocamdatainterface.py | 2 +- .../ecephys/spikeglx/spikeglx_utils.py | 66 +++---------------- .../ecephys/spikeglx/spikeglxdatainterface.py | 55 +++++----------- 4 files changed, 28 insertions(+), 99 deletions(-) diff --git a/src/neuroconv/datainterfaces/ecephys/axona/axonadatainterface.py b/src/neuroconv/datainterfaces/ecephys/axona/axonadatainterface.py index 81fddfad2..7f557d145 100644 --- a/src/neuroconv/datainterfaces/ecephys/axona/axonadatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/axona/axonadatainterface.py @@ -10,8 +10,8 @@ class AxonaRecordingInterface(BaseRecordingExtractorInterface): - """Primary data interface class for converting a Axona data using a - :py:class:`~spikeinterface.extractors.AxonaRecordingExtractor`.""" + """ + Primary data interface class for converting raw Axona data using a :py:class:`~spikeinterface.extractors.AxonaRecordingExtractor`.""" def __init__(self, file_path: FilePathType, verbose: bool = True): """ diff --git a/src/neuroconv/datainterfaces/ecephys/biocam/biocamdatainterface.py b/src/neuroconv/datainterfaces/ecephys/biocam/biocamdatainterface.py index cd4166344..f65a222d6 100644 --- a/src/neuroconv/datainterfaces/ecephys/biocam/biocamdatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/biocam/biocamdatainterface.py @@ -17,7 +17,7 @@ def __init__(self, file_path: FilePathType, verbose: bool = True): Parameters ---------- - folder_path : string or Path + file_path : string or Path Path to the .bwr file. verbose : bool, default: True Allows verbose. diff --git a/src/neuroconv/datainterfaces/ecephys/spikeglx/spikeglx_utils.py b/src/neuroconv/datainterfaces/ecephys/spikeglx/spikeglx_utils.py index df910587b..c2d34cee6 100644 --- a/src/neuroconv/datainterfaces/ecephys/spikeglx/spikeglx_utils.py +++ b/src/neuroconv/datainterfaces/ecephys/spikeglx/spikeglx_utils.py @@ -1,70 +1,24 @@ +"""Utilities used by the SpikeGLX interfaces.""" from datetime import datetime from pathlib import Path from ....utils import FilePathType -def _assert_single_shank_for_spike_extractors(recording): - """Raises an exception for a se.SpikeGLXRecordingExtractor object initialized in a file - with complex geometry as this is not (and will not be )supported in the old spikeextractors API. - - Parameters - ---------- - recording : se.SpikeGLXRecordingExtractor - a newly instantiated version of the spikeextractors object - - Raises - ------ - NotImplementedError - Raises a not implemented error. - """ - meta = recording._meta - # imDatPrb_type 0 and 21 correspond to single shank channels - # see https://billkarsh.github.io/SpikeGLX/help/imroTables/ - imDatPrb_type = meta["imDatPrb_type"] - if imDatPrb_type not in ["0", "21"]: - raise NotImplementedError( - "More than a single shank is not supported in spikeextractors, use the new spikeinterface." - ) - - -def _fetch_metadata_dic_for_spikextractors_spikelgx_object(recording) -> dict: - """ - fetches the meta file from a se.SpikeGLXRecordingExtractor object. - Parameters - ---------- - recording : se.SpikeGLXRecordingExtractor - a newly instantiated version of the spikeextractors object - - - Returns - ------- - dict - a dictionary with the metadadata concerning the recording +def get_session_start_time(recording_metadata: dict) -> datetime: """ - from spikeextractors import SubRecordingExtractor - - if isinstance(recording, SubRecordingExtractor): - recording_metadata = recording._parent_recording._meta - else: - recording_metadata = recording._meta - - return recording_metadata + Fetches the session start time from the recording_metadata dictionary. - -def get_session_start_time(recording_metadata: dict) -> datetime: - """Fetches the session start time from the recording_metadata dic Parameters ---------- recording_metadata : dict - the metadata dic as obtained from the Spikelgx recording. + The metadata dictionary as obtained from the Spikelgx recording. Returns ------- datetime or None the session start time in datetime format. """ - session_start_time = recording_metadata.get("fileCreateTime", None) if session_start_time.startswith("0000-00-00"): # date was removed. This sometimes happens with human data to protect the @@ -76,15 +30,16 @@ def get_session_start_time(recording_metadata: dict) -> datetime: def fetch_stream_id_for_spikelgx_file(file_path: FilePathType) -> str: - """Returns the stream_id for a spikelgx file + """ + Returns the stream_id for a spikelgx file. Example of file name structure: Consider the filenames: `Noise4Sam_g0_t0.nidq.bin` or `Noise4Sam_g0_t0.imec0.lf.bin` The filenames consist of 3 or 4 parts separated by `.` - 1. "Noise4Sam_g0_t0" will be the `name` variable. This chosen by the user at recording time. - 2. "_gt0_" will give the `seg_index` (here 0) - 3. "nidq" or "imec0" will give the `device` variable - 4. "lf" or "ap" will be the `signal_kind` variable (for nidq the signal kind is an empty string) + 1. "Noise4Sam_g0_t0" will be the `name` variable. This chosen by the user at recording time. + 2. "_gt0_" will give the `seg_index` (here 0) + 3. "nidq" or "imec0" will give the `device` variable + 4. "lf" or "ap" will be the `signal_kind` variable (for nidq the signal kind is an empty string) stream_id is the concatenation of `device.signal_kind` @@ -98,7 +53,6 @@ def fetch_stream_id_for_spikelgx_file(file_path: FilePathType) -> str: str the stream_id """ - suffixes = Path(file_path).suffixes device = next(suffix for suffix in suffixes if "imec" in suffix or "nidq" in suffix) signal_kind = "" diff --git a/src/neuroconv/datainterfaces/ecephys/spikeglx/spikeglxdatainterface.py b/src/neuroconv/datainterfaces/ecephys/spikeglx/spikeglxdatainterface.py index ee1aef3e3..33d20f25e 100644 --- a/src/neuroconv/datainterfaces/ecephys/spikeglx/spikeglxdatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/spikeglx/spikeglxdatainterface.py @@ -1,7 +1,6 @@ -"""Authors: Cody Baker, Heberto Mayorquin and Ben Dichter.""" +"""The primary data interfaces for SpikeGLX.""" from pathlib import Path import json -from warnings import warn from typing import Optional from pynwb import NWBFile @@ -9,17 +8,11 @@ from ..baserecordingextractorinterface import BaseRecordingExtractorInterface from ....utils import get_schema_from_method_signature, get_schema_from_hdmf_class, FilePathType, dict_deep_update -from .spikeglx_utils import ( - get_session_start_time, - _fetch_metadata_dic_for_spikextractors_spikelgx_object, - _assert_single_shank_for_spike_extractors, - fetch_stream_id_for_spikelgx_file, -) +from .spikeglx_utils import get_session_start_time, fetch_stream_id_for_spikelgx_file def add_recording_extractor_properties(recording_extractor) -> None: """Automatically add shankgroup_name and shank_electrode_number for spikeglx.""" - probe = recording_extractor.get_probe() channel_ids = recording_extractor.get_channel_ids() @@ -70,34 +63,15 @@ def __init__( self.stub_test = stub_test self.stream_id = fetch_stream_id_for_spikelgx_file(file_path) - if spikeextractors_backend: # pragma: no cover - # TODO: Remove spikeextractors backend - warn( - message=( - "Interfaces using a spikeextractors backend will soon be deprecated! " - "Please use the SpikeInterface backend instead." - ), - category=DeprecationWarning, - stacklevel=2, - ) - from spikeextractors import SpikeGLXRecordingExtractor - from spikeinterface.core.old_api_utils import OldToNewRecording - - self.Extractor = SpikeGLXRecordingExtractor - super().__init__(file_path=str(file_path), verbose=verbose) - _assert_single_shank_for_spike_extractors(self.recording_extractor) - self.meta = _fetch_metadata_dic_for_spikextractors_spikelgx_object(self.recording_extractor) - self.recording_extractor = OldToNewRecording(oldapi_recording_extractor=self.recording_extractor) - else: - file_path = Path(file_path) - folder_path = file_path.parent - super().__init__( - folder_path=folder_path, - stream_id=self.stream_id, - verbose=verbose, - ) - self.source_data["file_path"] = str(file_path) - self.meta = self.recording_extractor.neo_reader.signals_info_dict[(0, self.stream_id)]["meta"] + file_path = Path(file_path) + folder_path = file_path.parent + super().__init__( + folder_path=folder_path, + stream_id=self.stream_id, + verbose=verbose, + ) + self.source_data["file_path"] = str(file_path) + self.meta = self.recording_extractor.neo_reader.signals_info_dict[(0, self.stream_id)]["meta"] # Mount the probe # TODO - this can be removed in the next release of SpikeInterface (probe mounts automatically) @@ -149,15 +123,16 @@ def get_metadata(self) -> dict: return metadata def get_device_metadata(self) -> dict: - """Returns a device with description including the metadat as described here - # https://billkarsh.github.io/SpikeGLX/Sgl_help/Metadata_30.html + """ + Returns a device with description including the metadata. + + Details described in https://billkarsh.github.io/SpikeGLX/Sgl_help/Metadata_30.html Returns ------- dict a dict containing the metadata necessary for creating the device """ - meta = self.meta metadata_dict = dict() if "imDatPrb_type" in self.meta: From c80a06cd47d6b2b947666bd37d2e2385f07e7d92 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 24 Feb 2023 09:55:28 +0000 Subject: [PATCH 07/28] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../datainterfaces/ecephys/axona/axonadatainterface.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/neuroconv/datainterfaces/ecephys/axona/axonadatainterface.py b/src/neuroconv/datainterfaces/ecephys/axona/axonadatainterface.py index 7f557d145..39dd9a65b 100644 --- a/src/neuroconv/datainterfaces/ecephys/axona/axonadatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/axona/axonadatainterface.py @@ -11,7 +11,8 @@ class AxonaRecordingInterface(BaseRecordingExtractorInterface): """ - Primary data interface class for converting raw Axona data using a :py:class:`~spikeinterface.extractors.AxonaRecordingExtractor`.""" + Primary data interface class for converting raw Axona data using a :py:class:`~spikeinterface.extractors.AxonaRecordingExtractor`. + """ def __init__(self, file_path: FilePathType, verbose: bool = True): """ From 13367fe1965f6899b9ce0742e3d6d4e8489ca76c Mon Sep 17 00:00:00 2001 From: CodyCBakerPhD Date: Fri, 24 Feb 2023 05:03:43 -0500 Subject: [PATCH 08/28] continue cleaning --- CHANGELOG.md | 3 + .../ecephys/ced/ceddatainterface.py | 3 +- .../ecephys/intan/intandatainterface.py | 7 +- .../neuroscope/neuroscopedatainterface.py | 123 +----------------- 4 files changed, 7 insertions(+), 129 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index df67ed588..d3c7fcd57 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,7 @@ ### Back-compatibility break * `ExtractorInterface` classes now access their extractor with the classmethod `cls.get_extractor()` instead of the attribute `self.Extractor`. [PR #324](https://github.com/catalystneuro/neuroconv/pull/324) * The `spikeextractor_backend` option was removed for several `RecordingExtractorInterface` classes. [PR #324](https://github.com/catalystneuro/neuroconv/pull/324) +* The `NeuroScopeMultiRecordingExtractor` has been removed. [PR #309](https://github.com/catalystneuro/neuroconv/pull/309) ### Features * The `OpenEphysRecordingInterface` is now a wrapper for `OpenEphysBinaryRecordingInterface`. [PR #294](https://github.com/catalystneuro/neuroconv/pull/294) @@ -26,6 +27,8 @@ [PR #322](https://github.com/catalystneuro/neuroconv/pull/322) * Moved instructions to build the documentation from README.md to ReadTheDocs. [PR #323](https://github.com/catalystneuro/neuroconv/pull/323) + + # v0.2.4 ### Deprecation diff --git a/src/neuroconv/datainterfaces/ecephys/ced/ceddatainterface.py b/src/neuroconv/datainterfaces/ecephys/ced/ceddatainterface.py index 89a508620..97ff6d874 100644 --- a/src/neuroconv/datainterfaces/ecephys/ced/ceddatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/ced/ceddatainterface.py @@ -31,8 +31,7 @@ def get_source_schema(cls): def get_all_channels_info(cls, file_path: FilePathType): """Retrieve and inspect necessary channel information prior to initialization.""" _test_sonpy_installation() - getattr(cls, "RX") # Required to trigger dynamic access in case this method is called first - return cls.RX.get_all_channels_info(file_path=file_path) + return cls.get_extractor().get_all_channels_info(file_path=file_path) def __init__(self, file_path: FilePathType, verbose: bool = True): """ diff --git a/src/neuroconv/datainterfaces/ecephys/intan/intandatainterface.py b/src/neuroconv/datainterfaces/ecephys/intan/intandatainterface.py index 0474a9d8c..adce38b56 100644 --- a/src/neuroconv/datainterfaces/ecephys/intan/intandatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/intan/intandatainterface.py @@ -60,12 +60,7 @@ class IntanRecordingInterface(BaseRecordingExtractorInterface): """Primary data interface class for converting Intan data using the :py:class:`~spikeinterface.extractors.IntanRecordingExtractor`.""" - def __init__( - self, - file_path: FilePathType, - stream_id: str = "0", - verbose: bool = True, - ): + def __init__(self, file_path: FilePathType, stream_id: str = "0", verbose: bool = True): """ Load and prepare raw data and corresponding metadata from the Intan format (.rhd or .rhs files). diff --git a/src/neuroconv/datainterfaces/ecephys/neuroscope/neuroscopedatainterface.py b/src/neuroconv/datainterfaces/ecephys/neuroscope/neuroscopedatainterface.py index 155793b8c..0f1d07942 100644 --- a/src/neuroconv/datainterfaces/ecephys/neuroscope/neuroscopedatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/neuroscope/neuroscopedatainterface.py @@ -135,65 +135,6 @@ def get_metadata(self): return metadata -class NeuroScopeMultiRecordingTimeInterface(NeuroScopeRecordingInterface): - """Primary data interface class for converting a NeuroscopeMultiRecordingTimeExtractor.""" - - RXModule = "spikeextractors" - RXName = "NeuroscopeMultiRecordingTimeExtractor" - - def __init__( - self, - folder_path: FolderPathType, - gain: Optional[float] = None, - xml_file_path: OptionalFilePathType = None, - ): - """ - Load and prepare raw acquisition data and corresponding metadata from the Neuroscope format (.dat files). - - For all the .dat files in the folder_path, this concatenates them in time assuming no gaps in between. - If there are gaps, timestamps inside the RecordingExtractor should be overridden. - - Parameters - ---------- - folder_path : FolderPathType - Path to folder of multiple .dat files. - gain : Optional[float], optional - Conversion factors from int16 to Volts are not contained in xml_file_path; set them explicitly here. - Most common value is 0.195 for an intan recording system. - The default is None. - xml_file_path : OptionalFilePathType, optional - Path to .xml file containing device and electrode configuration. - If unspecified, it will be automatically set as the only .xml file in the same folder as the .dat file. - The default is None. - """ - # TODO: Remove this sub interface - warn( - message=( - "Interfaces using a spikeextractors backend will soon be deprecated! " - "Please use the SpikeInterface backend with multiple segments instead." - ), - category=DeprecationWarning, - stacklevel=2, - ) - - get_package(package_name="lxml") - from spikeinterface.core.old_api_utils import OldToNewRecording - - if xml_file_path is None: - xml_file_path = get_xml_file_path(data_file_path=folder_path) - super(NeuroScopeRecordingInterface, self).__init__( - folder_path=folder_path, - gain=gain, - xml_file_path=xml_file_path, - ) - self.recording_extractor = OldToNewRecording(oldapi_recording_extractor=self.recording_extractor) - - self.recording_extractor = subset_shank_channels( - recording_extractor=self.recording_extractor, xml_file_path=xml_file_path - ) - add_recording_extractor_properties(recording_extractor=self.recording_extractor, xml_file_path=xml_file_path) - - class NeuroScopeLFPInterface(BaseLFPExtractorInterface): """Primary data interface class for converting Neuroscope LFP data.""" @@ -204,7 +145,6 @@ def __init__( file_path: FilePathType, gain: Optional[float] = None, xml_file_path: OptionalFilePathType = None, - spikeextractors_backend: bool = False, ): """ Load and prepare lfp data and corresponding metadata from the Neuroscope format (.eeg or .lfp files). @@ -221,35 +161,14 @@ def __init__( Path to .xml file containing device and electrode configuration. If unspecified, it will be automatically set as the only .xml file in the same folder as the .dat file. The default is None. - spikeextractors_backend : bool - False by default. When True the interface uses the old extractor from the spikextractors library instead - of a new spikeinterface object. """ get_package(package_name="lxml") if xml_file_path is None: xml_file_path = get_xml_file_path(data_file_path=file_path) - if spikeextractors_backend: - # TODO: Remove spikeextractors backend - warn( - message=( - "Interfaces using a spikeextractors backend will soon be deprecated! " - "Please use the SpikeInterface backend instead." - ), - category=DeprecationWarning, - stacklevel=2, - ) - - from spikeextractors import NeuroscopeRecordingExtractor - from spikeinterface.core.old_api_utils import OldToNewRecording - - self.Extractor = NeuroscopeRecordingExtractor - super().__init__(file_path=file_path, xml_file_path=xml_file_path) - self.recording_extractor = OldToNewRecording(oldapi_recording_extractor=self.recording_extractor) - else: - super().__init__(file_path=file_path) - self.source_data["xml_file_path"] = xml_file_path + super().__init__(file_path=file_path) + self.source_data["xml_file_path"] = xml_file_path add_recording_extractor_properties( recording_extractor=self.recording_extractor, xml_file_path=xml_file_path, gain=gain @@ -277,12 +196,6 @@ def __init__( exclude_shanks: Optional[list] = None, xml_file_path: OptionalFilePathType = None, verbose: bool = True, - spikeextractors_backend: bool = False, - # TODO: we can enable this once - # a) waveforms on unit columns support conversion factor in NWB - # b) write_sorting utils support writing said waveforms properly to a units table - # load_waveforms: bool = False, - # gain: Optional[float] = None, ): """ Load and prepare spike sorted data and corresponding metadata from the Neuroscope format (.res/.clu files). @@ -301,35 +214,8 @@ def __init__( Path to .xml file containing device and electrode configuration. If unspecified, it will be automatically set as the only .xml file in the same folder as the .dat file. The default is None. - load_waveforms : bool, optional - If True, extracts waveform data from .spk.%i files in the path corresponding to - the .res.%i and .clue.%i files and sets these as unit spike features. - The default is False. - Not currently in use pending updates to NWB waveforms. - gain : float, optional - If loading waveforms, this value converts the data type of the waveforms to units of microvolts. - Conversion factors from int16 to Volts are not contained in xml_file_path; set them explicitly here. - Most common value is 0.195 for an intan recording system. - The default is None. - Not currently in use pending updates to NWB waveforms. - spikeextractors_backend : bool - False by default. When True the interface uses the old extractor from the spikextractors library instead - of a new spikeinterface object. """ get_package(package_name="lxml") - from spikeextractors import NeuroscopeMultiSortingExtractor - - if spikeextractors_backend: - # TODO: Remove spikeextractors backend - warn( - message=( - "Interfaces using a spikeextractors backend will soon be deprecated! " - "Please use the SpikeInterface backend instead." - ), - category=DeprecationWarning, - stacklevel=2, - ) - self.Extractor = NeuroscopeMultiSortingExtractor super().__init__( folder_path=folder_path, @@ -337,11 +223,6 @@ def __init__( exclude_shanks=exclude_shanks, xml_file_path=xml_file_path, verbose=verbose, - # TODO: we can enable this once - # a) waveforms on unit columns support conversion factor in NWB - # b) write_sorting utils support writing said waveforms properly to a units table - # load_waveforms=load_waveforms, - # gain=gain, ) def get_metadata(self): From 114b40977597c7761eaf98d745656c8ee5031bfa Mon Sep 17 00:00:00 2001 From: CodyCBakerPhD Date: Fri, 24 Feb 2023 05:16:10 -0500 Subject: [PATCH 09/28] clean more --- CHANGELOG.md | 3 + src/neuroconv/datainterfaces/__init__.py | 3 - .../ecephys/phy/phydatainterface.py | 15 ----- .../spikegadgets/spikegadgetsdatainterface.py | 8 +-- .../ecephys/spikeinterface/__init__.py | 0 .../spikeinterface/sipickledatainterfaces.py | 55 ------------------- .../ecephys/tdt/tdtdatainterface.py | 7 +-- 7 files changed, 5 insertions(+), 86 deletions(-) delete mode 100644 src/neuroconv/datainterfaces/ecephys/spikeinterface/__init__.py delete mode 100644 src/neuroconv/datainterfaces/ecephys/spikeinterface/sipickledatainterfaces.py diff --git a/CHANGELOG.md b/CHANGELOG.md index d3c7fcd57..cf4912b61 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,8 +3,11 @@ ### Back-compatibility break * `ExtractorInterface` classes now access their extractor with the classmethod `cls.get_extractor()` instead of the attribute `self.Extractor`. [PR #324](https://github.com/catalystneuro/neuroconv/pull/324) * The `spikeextractor_backend` option was removed for several `RecordingExtractorInterface` classes. [PR #324](https://github.com/catalystneuro/neuroconv/pull/324) +* The `spikeextractor_backend` option was removed for all remaining classes. [PR #309](https://github.com/catalystneuro/neuroconv/pull/309) +* The `NeuroScopeMultiRecordingExtractor` has been removed. If your conversion required this, please submit an issue requesting instructions for how to implement it. [PR #309](https://github.com/catalystneuro/neuroconv/pull/309) * The `NeuroScopeMultiRecordingExtractor` has been removed. [PR #309](https://github.com/catalystneuro/neuroconv/pull/309) + ### Features * The `OpenEphysRecordingInterface` is now a wrapper for `OpenEphysBinaryRecordingInterface`. [PR #294](https://github.com/catalystneuro/neuroconv/pull/294) * Swapped the backend for `CellExplorerSortingInterface` from `spikeextactors` to `spikeinterface`. [PR #267](https://github.com/catalystneuro/neuroconv/pull/267) diff --git a/src/neuroconv/datainterfaces/__init__.py b/src/neuroconv/datainterfaces/__init__.py index b44b6c3ef..6284402ae 100644 --- a/src/neuroconv/datainterfaces/__init__.py +++ b/src/neuroconv/datainterfaces/__init__.py @@ -69,15 +69,12 @@ NeuralynxRecordingInterface, NeuralynxSortingInterface, NeuroScopeRecordingInterface, - NeuroScopeMultiRecordingTimeInterface, NeuroScopeSortingInterface, NeuroScopeLFPInterface, SpikeGLXRecordingInterface, SpikeGLXLFPInterface, SpikeGLXNIDQInterface, SpikeGadgetsRecordingInterface, - SIPickleRecordingInterface, - SIPickleSortingInterface, IntanRecordingInterface, CEDRecordingInterface, CellExplorerSortingInterface, diff --git a/src/neuroconv/datainterfaces/ecephys/phy/phydatainterface.py b/src/neuroconv/datainterfaces/ecephys/phy/phydatainterface.py index 37f701f16..3aeb60761 100644 --- a/src/neuroconv/datainterfaces/ecephys/phy/phydatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/phy/phydatainterface.py @@ -15,7 +15,6 @@ def __init__( folder_path: FolderPathType, exclude_cluster_groups: Optional[list] = None, verbose: bool = True, - spikeextractors_backend: bool = False, ): """ Initialize a PhySortingInterface. @@ -27,19 +26,5 @@ def __init__( exclude_cluster_groups : str or list of str, optional Cluster groups to exclude (e.g. "noise" or ["noise", "mua"]). verbose : bool, default: True - spikeextractors_backend : bool, default: False """ - if spikeextractors_backend: - # TODO: Remove spikeextractors backend - warn( - message=( - "Interfaces using a spikeextractors backend will soon be deprecated! " - "Please use the SpikeInterface backend instead." - ), - category=DeprecationWarning, - stacklevel=2, - ) - from spikeextractors import PhySortingExtractor - - self.Extractor = PhySortingExtractor super().__init__(folder_path=folder_path, exclude_cluster_groups=exclude_cluster_groups, verbose=verbose) diff --git a/src/neuroconv/datainterfaces/ecephys/spikegadgets/spikegadgetsdatainterface.py b/src/neuroconv/datainterfaces/ecephys/spikegadgets/spikegadgetsdatainterface.py index 70234b02b..1b5bfe708 100644 --- a/src/neuroconv/datainterfaces/ecephys/spikegadgets/spikegadgetsdatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/spikegadgets/spikegadgetsdatainterface.py @@ -16,12 +16,7 @@ def get_source_schema(cls): source_schema["properties"]["file_path"].update(description="Path to SpikeGadgets (.rec) file.") return source_schema - def __init__( - self, - file_path: FilePathType, - gains: Optional[ArrayType] = None, - verbose: bool = True, - ): + def __init__(self, file_path: FilePathType, gains: Optional[ArrayType] = None, verbose: bool = True): """ Recording Interface for the SpikeGadgets Format. @@ -34,7 +29,6 @@ def __init__( acquisition system. Thus it must be specified either as a single value (if all channels have the same gain) or an array of values for each channel. """ - super().__init__(file_path=file_path, stream_id="trodes", verbose=verbose) self.source_data = dict(file_path=file_path, verbose=verbose) diff --git a/src/neuroconv/datainterfaces/ecephys/spikeinterface/__init__.py b/src/neuroconv/datainterfaces/ecephys/spikeinterface/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/neuroconv/datainterfaces/ecephys/spikeinterface/sipickledatainterfaces.py b/src/neuroconv/datainterfaces/ecephys/spikeinterface/sipickledatainterfaces.py deleted file mode 100644 index 8085cb6f7..000000000 --- a/src/neuroconv/datainterfaces/ecephys/spikeinterface/sipickledatainterfaces.py +++ /dev/null @@ -1,55 +0,0 @@ -"""Authors: Alessio Buccino.""" -from warnings import warn - -from ..baserecordingextractorinterface import BaseRecordingExtractorInterface -from ..basesortingextractorinterface import BaseSortingExtractorInterface -from ....utils import FilePathType - - -class SIPickleRecordingInterface(BaseRecordingExtractorInterface): - """Primary interface for reading and converting SpikeInterface Recording objects through .pkl files.""" - - ExtractorModuleName = "spikeextractors" - ExtractorName = "load_extractor_from_pickle" - - def __init__(self, file_path: FilePathType, verbose: bool = True): - """ - Initialize reading of SpikeInterface Pickle files. - - Parameters - ---------- - file_path : FilePathType - Path to .pkl file. - verbose : bool, optional, default=True - """ - # TODO: Remove entire interfaces - warn( - message=( - "Interfaces using a spikeextractors backend will soon be deprecated! " - "Please use the SpikeInterface backend instead." - ), - category=DeprecationWarning, - stacklevel=2, - ) - - from spikeextractors import load_extractor_from_pickle - - self.recording_extractor = load_extractor_from_pickle(pkl_file=file_path) - self.subset_channels = None - self.source_data = dict(file_path=file_path) - self.verbose = verbose - self.es_key = None - - -class SIPickleSortingInterface(BaseSortingExtractorInterface): - """Primary interface for reading and converting SpikeInterface Sorting objects through .pkl files.""" - - ExtractorModuleName = "spikeextractors" - ExtractorName = "load_extractor_from_pickle" - - def __init__(self, file_path: FilePathType, verbose: bool = True): - from spikeextractors import load_extractor_from_pickle - - self.sorting_extractor = load_extractor_from_pickle(pkl_file=file_path) - self.source_data = dict(file_path=file_path) - self.verbose = verbose diff --git a/src/neuroconv/datainterfaces/ecephys/tdt/tdtdatainterface.py b/src/neuroconv/datainterfaces/ecephys/tdt/tdtdatainterface.py index 1a6346e03..37dc660a9 100644 --- a/src/neuroconv/datainterfaces/ecephys/tdt/tdtdatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/tdt/tdtdatainterface.py @@ -24,12 +24,7 @@ def __init__(self, folder_path: FolderPathType, stream_id: str = "0", verbose: b ----- Stream "0" corresponds to LFP for gin data. Other streams seem non-electrical. """ - - super().__init__( - folder_path=folder_path, - stream_id=stream_id, - verbose=verbose, - ) + super().__init__(folder_path=folder_path, stream_id=stream_id, verbose=verbose) # Fix channel name format channel_names = self.recording_extractor.get_property("channel_name") From 09c6e750446f6436052ad611cd6a49ce032c1fa3 Mon Sep 17 00:00:00 2001 From: CodyCBakerPhD Date: Fri, 24 Feb 2023 05:18:30 -0500 Subject: [PATCH 10/28] clean init --- src/neuroconv/datainterfaces/__init__.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/neuroconv/datainterfaces/__init__.py b/src/neuroconv/datainterfaces/__init__.py index 6284402ae..b10afba41 100644 --- a/src/neuroconv/datainterfaces/__init__.py +++ b/src/neuroconv/datainterfaces/__init__.py @@ -2,7 +2,6 @@ from .ecephys.neuroscope.neuroscopedatainterface import ( NeuroScopeRecordingInterface, NeuroScopeLFPInterface, - NeuroScopeMultiRecordingTimeInterface, NeuroScopeSortingInterface, ) from .ecephys.spikeglx.spikeglxdatainterface import SpikeGLXRecordingInterface, SpikeGLXLFPInterface From eed0960d119997d33197142984c964d6821e5979 Mon Sep 17 00:00:00 2001 From: CodyCBakerPhD Date: Fri, 24 Feb 2023 05:21:25 -0500 Subject: [PATCH 11/28] cleain init --- src/neuroconv/datainterfaces/__init__.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/neuroconv/datainterfaces/__init__.py b/src/neuroconv/datainterfaces/__init__.py index b10afba41..acb1b772e 100644 --- a/src/neuroconv/datainterfaces/__init__.py +++ b/src/neuroconv/datainterfaces/__init__.py @@ -7,10 +7,6 @@ from .ecephys.spikeglx.spikeglxdatainterface import SpikeGLXRecordingInterface, SpikeGLXLFPInterface from .ecephys.spikeglx.spikeglxnidqinterface import SpikeGLXNIDQInterface from .ecephys.spikegadgets.spikegadgetsdatainterface import SpikeGadgetsRecordingInterface -from .ecephys.spikeinterface.sipickledatainterfaces import ( - SIPickleRecordingInterface, - SIPickleSortingInterface, -) from .ecephys.intan.intandatainterface import IntanRecordingInterface from .ecephys.ced.ceddatainterface import CEDRecordingInterface from .ecephys.cellexplorer.cellexplorerdatainterface import CellExplorerSortingInterface From bdf2b4e41c3c90b3a029737010cfd7f84caa3b5f Mon Sep 17 00:00:00 2001 From: CodyCBakerPhD Date: Fri, 24 Feb 2023 05:25:10 -0500 Subject: [PATCH 12/28] clean init --- src/neuroconv/tools/spikeinterface/__init__.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/neuroconv/tools/spikeinterface/__init__.py b/src/neuroconv/tools/spikeinterface/__init__.py index e4d549422..ff48fc9fb 100644 --- a/src/neuroconv/tools/spikeinterface/__init__.py +++ b/src/neuroconv/tools/spikeinterface/__init__.py @@ -5,7 +5,6 @@ add_electrodes, check_if_recording_traces_fit_into_memory, add_electrical_series, - add_epochs, write_recording, write_sorting, write_waveforms, From ab9c6443f55b90b87dc29a14074f00e7da83526f Mon Sep 17 00:00:00 2001 From: CodyCBakerPhD Date: Fri, 24 Feb 2023 05:31:17 -0500 Subject: [PATCH 13/28] fix --- .../tools/spikeinterface/spikeinterface.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/neuroconv/tools/spikeinterface/spikeinterface.py b/src/neuroconv/tools/spikeinterface/spikeinterface.py index 736e1accc..c849dc2b1 100644 --- a/src/neuroconv/tools/spikeinterface/spikeinterface.py +++ b/src/neuroconv/tools/spikeinterface/spikeinterface.py @@ -961,14 +961,14 @@ def add_units_table( properties_to_extract = [property for property in sorting_properties if property not in excluded_properties] if unit_ids is not None: - checked_sorting = sorting.select_units(unit_ids=unit_ids) + sorting = sorting.select_units(unit_ids=unit_ids) if unit_electrode_indices is not None: - unit_electrode_indices = np.array(unit_electrode_indices)[checked_sorting.ids_to_indices(unit_ids)] - unit_ids = checked_sorting.unit_ids + unit_electrode_indices = np.array(unit_electrode_indices)[sorting.ids_to_indices(unit_ids)] + unit_ids = sorting.unit_ids # Extract properties for property in properties_to_extract: - data = checked_sorting.get_property(property) + data = sorting.get_property(property) if isinstance(data[0], (bool, np.bool_)): data = data.astype(str) index = isinstance(data[0], (list, np.ndarray, tuple)) @@ -1007,7 +1007,7 @@ def add_units_table( has_electrodes_column = "electrodes" in units_table.colnames properties_with_data = {property for property in properties_to_add_by_rows if "data" in data_to_add[property]} - rows_in_data = [index for index in range(checked_sorting.get_num_units())] + rows_in_data = [index for index in range(sorting.get_num_units())] if not has_electrodes_column: rows_to_add = [index for index in rows_in_data if unit_name_array[index] not in unit_names_used_previously] else: @@ -1028,8 +1028,8 @@ def add_units_table( spike_times = [] # Extract and concatenate the spike times from multiple segments - for segment_index in range(checked_sorting.get_num_segments()): - segment_spike_times = checked_sorting.get_unit_spike_train( + for segment_index in range(sorting.get_num_segments()): + segment_spike_times = sorting.get_unit_spike_train( unit_id=unit_ids[row], segment_index=segment_index, return_times=True ) spike_times.append(segment_spike_times) From 36d0cf0cdfac59906dcbc8aed2c9eca7fba31777 Mon Sep 17 00:00:00 2001 From: CodyCBakerPhD Date: Fri, 24 Feb 2023 07:52:43 -0500 Subject: [PATCH 14/28] fix tests to use SI --- CHANGELOG.md | 2 +- tests/test_ecephys/test_ecephys_interfaces.py | 38 +- .../test_tools_spikeextractors.py | 599 ------------------ 3 files changed, 5 insertions(+), 634 deletions(-) delete mode 100644 tests/test_ecephys/test_tools_spikeextractors.py diff --git a/CHANGELOG.md b/CHANGELOG.md index cf4912b61..13308a82d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,7 +6,7 @@ * The `spikeextractor_backend` option was removed for all remaining classes. [PR #309](https://github.com/catalystneuro/neuroconv/pull/309) * The `NeuroScopeMultiRecordingExtractor` has been removed. If your conversion required this, please submit an issue requesting instructions for how to implement it. [PR #309](https://github.com/catalystneuro/neuroconv/pull/309) * The `NeuroScopeMultiRecordingExtractor` has been removed. [PR #309](https://github.com/catalystneuro/neuroconv/pull/309) - +* The `SIPickle` interfaces have been removed. [PR #309](https://github.com/catalystneuro/neuroconv/pull/309) ### Features * The `OpenEphysRecordingInterface` is now a wrapper for `OpenEphysBinaryRecordingInterface`. [PR #294](https://github.com/catalystneuro/neuroconv/pull/294) diff --git a/tests/test_ecephys/test_ecephys_interfaces.py b/tests/test_ecephys/test_ecephys_interfaces.py index 93b987c30..940a885a6 100644 --- a/tests/test_ecephys/test_ecephys_interfaces.py +++ b/tests/test_ecephys/test_ecephys_interfaces.py @@ -8,13 +8,12 @@ import numpy as np import pytest -import spikeextractors as se -from spikeextractors.testing import check_recordings_equal, check_sortings_equal from hdmf.testing import TestCase from pynwb import NWBHDF5IO +from spikeinterface.extractors import NumpySorting from neuroconv import NWBConverter -from neuroconv.datainterfaces import SIPickleRecordingInterface, SIPickleSortingInterface, CEDRecordingInterface +from neuroconv.datainterfaces import CEDRecordingInterface from neuroconv.datainterfaces.ecephys.basesortingextractorinterface import BaseSortingExtractorInterface python_version = Version(get_python_version()) @@ -49,46 +48,17 @@ def test_ced_import_assertions_3_11(self): CEDRecordingInterface.get_all_channels_info(file_path="does_not_matter.smrx") -def test_pkl_interface(): - toy_data = se.example_datasets.toy_example() - test_dir = Path(mkdtemp()) - output_folder = test_dir / "test_pkl" - nwbfile_path = str(test_dir / "test_pkl_files.nwb") - - se.save_si_object(object_name="test_recording", si_object=toy_data[0], output_folder=output_folder) - se.save_si_object(object_name="test_sorting", si_object=toy_data[1], output_folder=output_folder) - - class SpikeInterfaceTestNWBConverter(NWBConverter): - data_interface_classes = dict(Recording=SIPickleRecordingInterface, Sorting=SIPickleSortingInterface) - - source_data = dict( - Recording=dict(file_path=str(test_dir / "test_pkl" / "test_recording.pkl")), - Sorting=dict(file_path=str(test_dir / "test_pkl" / "test_sorting.pkl")), - ) - converter = SpikeInterfaceTestNWBConverter(source_data=source_data) - metadata = converter.get_metadata() - metadata["NWBFile"]["session_start_time"] = datetime.now().astimezone() - converter.run_conversion(nwbfile_path=nwbfile_path, metadata=metadata, overwrite=True) - - nwb_recording = se.NwbRecordingExtractor(file_path=nwbfile_path) - nwb_sorting = se.NwbSortingExtractor(file_path=nwbfile_path) - check_recordings_equal(RX1=toy_data[0], RX2=nwb_recording) - check_recordings_equal(RX1=toy_data[0], RX2=nwb_recording, return_scaled=False) - check_sortings_equal(SX1=toy_data[1], SX2=nwb_sorting) - - class TestSortingInterface(unittest.TestCase): def setUp(self) -> None: self.sorting_start_frames = [100, 200, 300] self.num_frames = 1000 - sorting = se.NumpySortingExtractor() - sorting.set_sampling_frequency(3000) + sorting = NumpySorting(sampling_frequency=3000.0) sorting.add_unit(unit_id=1, times=np.arange(self.sorting_start_frames[0], self.num_frames)) sorting.add_unit(unit_id=2, times=np.arange(self.sorting_start_frames[1], self.num_frames)) sorting.add_unit(unit_id=3, times=np.arange(self.sorting_start_frames[2], self.num_frames)) class TestSortingInterface(BaseSortingExtractorInterface): - Extractor = se.NumpySortingExtractor + ExtractorName = "NumpySorting" def __init__(self, verbose: bool = True): self.sorting_extractor = sorting diff --git a/tests/test_ecephys/test_tools_spikeextractors.py b/tests/test_ecephys/test_tools_spikeextractors.py deleted file mode 100644 index 3abbff27d..000000000 --- a/tests/test_ecephys/test_tools_spikeextractors.py +++ /dev/null @@ -1,599 +0,0 @@ -import shutil -import tempfile -import unittest -from pathlib import Path -from datetime import datetime - -import numpy as np - -from pynwb import NWBHDF5IO, NWBFile -import spikeextractors as se - -from spikeextractors.testing import ( - check_sortings_equal, - check_recordings_equal, - check_dumping, - check_recording_return_types, - get_default_nwbfile_metadata, -) - -from neuroconv.tools import spikeinterface # testing aliased import -from neuroconv.tools.spikeinterface import ( - get_nwb_metadata, - write_recording, - write_sorting, -) - -from neuroconv.tools.spikeinterface.spikeinterfacerecordingdatachunkiterator import ( - SpikeInterfaceRecordingDataChunkIterator, -) -from neuroconv.utils import FilePathType - -testing_session_time = datetime.now().astimezone() - - -def _create_example(seed): - channel_ids = [0, 1, 2, 3] - num_channels = 4 - num_frames = 1000 - num_ttls = 30 - sampling_frequency = 30000 - X = np.random.RandomState(seed=seed).normal(0, 1, (num_channels, num_frames)) - geom = np.random.RandomState(seed=seed).normal(0, 1, (num_channels, 2)) - X = (X * 100).astype(int) - ttls = np.sort(np.random.permutation(num_frames)[:num_ttls]) - - RX = se.NumpyRecordingExtractor(timeseries=X, sampling_frequency=sampling_frequency, geom=geom) - RX.set_ttls(ttls) - RX.set_channel_locations([0, 0], channel_ids=0) - RX.add_epoch("epoch1", 0, 10) - RX.add_epoch("epoch2", 10, 20) - for i, channel_id in enumerate(RX.get_channel_ids()): - RX.set_channel_property(channel_id=channel_id, property_name="shared_channel_prop", value=i) - RX2 = se.NumpyRecordingExtractor(timeseries=X, sampling_frequency=sampling_frequency, geom=geom) - RX2.copy_epochs(RX) - times = np.arange(RX.get_num_frames()) / RX.get_sampling_frequency() + 5 - RX2.set_times(times) - - RX3 = se.NumpyRecordingExtractor(timeseries=X, sampling_frequency=sampling_frequency, geom=geom) - - SX = se.NumpySortingExtractor() - SX.set_sampling_frequency(sampling_frequency) - spike_times = [200, 300, 400] - train1 = np.sort(np.rint(np.random.RandomState(seed=seed).uniform(0, num_frames, spike_times[0])).astype(int)) - SX.add_unit(unit_id=1, times=train1) - SX.add_unit(unit_id=2, times=np.sort(np.random.RandomState(seed=seed).uniform(0, num_frames, spike_times[1]))) - SX.add_unit(unit_id=3, times=np.sort(np.random.RandomState(seed=seed).uniform(0, num_frames, spike_times[2]))) - SX.set_unit_property(unit_id=1, property_name="int_prop", value=80) - SX.set_unit_property(unit_id=1, property_name="float_prop", value=80.0) - SX.set_unit_property(unit_id=1, property_name="str_prop", value="test_val") - SX.add_epoch("epoch1", 0, 10) - SX.add_epoch("epoch2", 10, 20) - - SX2 = se.NumpySortingExtractor() - SX2.set_sampling_frequency(sampling_frequency) - spike_times2 = [100, 150, 450] - train2 = np.rint(np.random.RandomState(seed=seed).uniform(0, num_frames, spike_times2[0])).astype(int) - SX2.add_unit(unit_id=3, times=train2) - SX2.add_unit(unit_id=4, times=np.random.RandomState(seed=seed).uniform(0, num_frames, spike_times2[1])) - SX2.add_unit(unit_id=5, times=np.random.RandomState(seed=seed).uniform(0, num_frames, spike_times2[2])) - SX2.set_unit_property(unit_id=4, property_name="stability", value=80) - SX2.set_unit_spike_features(unit_id=3, feature_name="widths", value=np.asarray([3] * spike_times2[0])) - SX2.copy_epochs(SX) - SX2.copy_times(RX2) - for i, unit_id in enumerate(SX2.get_unit_ids()): - SX2.set_unit_property(unit_id=unit_id, property_name="shared_unit_prop", value=i) - SX2.set_unit_spike_features( - unit_id=unit_id, feature_name="shared_unit_feature", value=np.asarray([i] * spike_times2[i]) - ) - SX3 = se.NumpySortingExtractor() - train3 = np.asarray([1, 20, 21, 35, 38, 45, 46, 47]) - SX3.add_unit(unit_id=0, times=train3) - features3 = np.asarray([0, 5, 10, 15, 20, 25, 30, 35]) - features4 = np.asarray([0, 10, 20, 30]) - feature4_idx = np.asarray([0, 2, 4, 6]) - SX3.set_unit_spike_features(unit_id=0, feature_name="dummy", value=features3) - SX3.set_unit_spike_features(unit_id=0, feature_name="dummy2", value=features4, indexes=feature4_idx) - - example_info = dict( - channel_ids=channel_ids, - num_channels=num_channels, - num_frames=num_frames, - sampling_frequency=sampling_frequency, - unit_ids=[1, 2, 3], - train1=train1, - train2=train2, - train3=train3, - features3=features3, - unit_prop=80, - channel_prop=(0, 0), - ttls=ttls, - epochs_info=((0, 10), (10, 20)), - geom=geom, - times=times, - ) - - return RX, RX2, RX3, SX, SX2, SX3, example_info - - -class TestExtractors(unittest.TestCase): - def setUp(self): - self.RX, self.RX2, self.RX3, self.SX, self.SX2, self.SX3, self.example_info = _create_example(seed=0) - self.test_dir = tempfile.mkdtemp() - self.placeholder_metadata = dict(NWBFile=dict(session_start_time=testing_session_time)) - - def tearDown(self): - del self.RX, self.RX2, self.RX3, self.SX, self.SX2, self.SX3 - shutil.rmtree(self.test_dir) - - def check_si_roundtrip(self, path: FilePathType): - RX_nwb = se.NwbRecordingExtractor(path) - check_recording_return_types(RX_nwb) - check_recordings_equal(self.RX, RX_nwb) - check_dumping(RX_nwb) - - def test_write_recording(self): - path = self.test_dir + "/test.nwb" - - spikeinterface.write_recording(self.RX, path, metadata=self.placeholder_metadata) # testing aliased import - RX_nwb = se.NwbRecordingExtractor(path) - check_recording_return_types(RX_nwb) - check_recordings_equal(self.RX, RX_nwb) - check_dumping(RX_nwb) - del RX_nwb - - write_recording(recording=self.RX, nwbfile_path=path, overwrite=True, metadata=self.placeholder_metadata) - RX_nwb = se.NwbRecordingExtractor(path) - check_recording_return_types(RX_nwb) - check_recordings_equal(self.RX, RX_nwb) - check_dumping(RX_nwb) - - # Test write_electrical_series=False - write_recording( - recording=self.RX, - nwbfile_path=path, - overwrite=True, - write_electrical_series=False, - metadata=self.placeholder_metadata, - ) - with NWBHDF5IO(path, "r") as io: - nwbfile = io.read() - assert len(nwbfile.acquisition) == 0 - assert len(nwbfile.devices) == 1 - assert len(nwbfile.electrode_groups) == 1 - assert len(nwbfile.electrodes) == self.RX.get_num_channels() - # Writing multiple recordings using metadata - metadata = get_default_nwbfile_metadata() - # Re-mapping from spikextractors metadata to new standard (we probably should get rid of this test) - metadata["Ecephys"]["ElectricalSeriesRaw"] = metadata["Ecephys"]["ElectricalSeries_raw"] - metadata["Ecephys"]["ElectricalSeriesLFP"] = metadata["Ecephys"]["ElectricalSeries_lfp"] - metadata["Ecephys"]["ElectricalSeriesProcessed"] = metadata["Ecephys"]["ElectricalSeries_processed"] - metadata["NWBFile"].update(self.placeholder_metadata["NWBFile"]) - path_multi = self.test_dir + "/test_multiple.nwb" - write_recording( - recording=self.RX, - nwbfile_path=path_multi, - metadata=metadata, - write_as="raw", - es_key="ElectricalSeriesRaw", - ) - write_recording( - recording=self.RX2, - nwbfile_path=path_multi, - metadata=metadata, - write_as="processed", - es_key="ElectricalSeriesProcessed", - ) - write_recording( - recording=self.RX3, - nwbfile_path=path_multi, - metadata=metadata, - write_as="lfp", - es_key="ElectricalSeriesLFP", - ) - - RX_nwb = se.NwbRecordingExtractor(file_path=path_multi, electrical_series_name="raw_traces") - check_recording_return_types(RX_nwb) - check_recordings_equal(self.RX, RX_nwb) - check_dumping(RX_nwb) - del RX_nwb - - def write_recording_compression(self): - path = self.test_dir + "/test.nwb" - write_recording( - recording=self.RX, nwbfile_path=path, overwrite=True, metadata=self.placeholder_metadata - ) # Testing default compression, should be "gzip" - - compression = "gzip" - with NWBHDF5IO(path=path, mode="r") as io: - nwbfile = io.read() - compression_out = nwbfile.acquisition["ElectricalSeriesRaw"].data.compression - self.assertEqual( - compression_out, - compression, - "Intended compression type does not match what was written! " - f"(Out: {compression_out}, should be: {compression})", - ) - self.check_si_roundtrip(path=path) - - write_recording( - recording=self.RX, - nwbfile_path=path, - overwrite=True, - compression=compression, - metadata=self.placeholder_metadata, - ) - with NWBHDF5IO(path=path, mode="r") as io: - nwbfile = io.read() - compression_out = nwbfile.acquisition["ElectricalSeriesRaw"].data.compression - self.assertEqual( - compression_out, - compression, - "Intended compression type does not match what was written! " - f"(Out: {compression_out}, should be: {compression})", - ) - self.check_si_roundtrip(path=path) - - compression = "lzf" - write_recording( - recording=self.RX, - nwbfile_path=path, - overwrite=True, - compression=compression, - metadata=self.placeholder_metadata, - ) - with NWBHDF5IO(path=path, mode="r") as io: - nwbfile = io.read() - compression_out = nwbfile.acquisition["ElectricalSeriesRaw"].data.compression - self.assertEqual( - compression_out, - compression, - "Intended compression type does not match what was written! " - f"(Out: {compression_out}, should be: {compression})", - ) - self.check_si_roundtrip(path=path) - - compression = None - write_recording( - recording=self.RX, - nwbfile_path=path, - overwrite=True, - compression=compression, - metadata=self.placeholder_metadata, - ) - with NWBHDF5IO(path=path, mode="r") as io: - nwbfile = io.read() - compression_out = nwbfile.acquisition["ElectricalSeriesRaw"].data.compression - self.assertEqual( - compression_out, - compression, - "Intended compression type does not match what was written! " - f"(Out: {compression_out}, should be: {compression})", - ) - self.check_si_roundtrip(path=path) - - def test_write_recording_chunking(self): - path = self.test_dir + "/test.nwb" - - write_recording(recording=self.RX, nwbfile_path=path, overwrite=True, metadata=self.placeholder_metadata) - with NWBHDF5IO(path=path, mode="r") as io: - nwbfile = io.read() - chunks_out = nwbfile.acquisition["ElectricalSeriesRaw"].data.chunks - test_iterator = SpikeInterfaceRecordingDataChunkIterator(recording=self.RX) - self.assertEqual( - chunks_out, - test_iterator.chunk_shape, - "Intended chunk shape does not match what was written! " - f"(Out: {chunks_out}, should be: {test_iterator.chunk_shape})", - ) - self.check_si_roundtrip(path=path) - - def test_write_sorting(self): - path = self.test_dir + "/test.nwb" - sf = self.RX.get_sampling_frequency() - - # Append sorting to existing file - write_recording(recording=self.RX, nwbfile_path=path, overwrite=True, metadata=self.placeholder_metadata) - spikeinterface.write_sorting(sorting=self.SX, nwbfile_path=path, overwrite=False) # testing aliased import - SX_nwb = se.NwbSortingExtractor(path) - check_sortings_equal(self.SX, SX_nwb) - check_dumping(SX_nwb) - - # Test for handling unit property descriptions argument - property_descriptions = dict(stability="This is a description of stability.") - write_sorting( - sorting=self.SX, - nwbfile_path=path, - property_descriptions=property_descriptions, - overwrite=True, - metadata=self.placeholder_metadata, - ) - SX_nwb = se.NwbSortingExtractor(path, sampling_frequency=sf) - check_sortings_equal(self.SX, SX_nwb) - check_dumping(SX_nwb) - - # Test for handling skip_properties argument - write_sorting( - sorting=self.SX, - nwbfile_path=path, - skip_properties=["stability"], - overwrite=True, - metadata=self.placeholder_metadata, - ) - SX_nwb = se.NwbSortingExtractor(path, sampling_frequency=sf) - assert "stability" not in SX_nwb.get_shared_unit_property_names() - check_sortings_equal(self.SX, SX_nwb) - check_dumping(SX_nwb) - - # Test for handling skip_features argument - # SX2 has timestamps, so loading it back from Nwb will not recover the same spike frames. - write_sorting( - sorting=self.SX2, - nwbfile_path=path, - skip_features=["widths"], - overwrite=True, - metadata=self.placeholder_metadata, - ) - SX_nwb = se.NwbSortingExtractor(path, sampling_frequency=sf) - assert "widths" not in SX_nwb.get_shared_unit_spike_feature_names() - check_sortings_equal(self.SX2, SX_nwb) - check_dumping(SX_nwb) - - write_sorting(sorting=self.SX, nwbfile_path=path, overwrite=True, metadata=self.placeholder_metadata) - write_sorting(sorting=self.SX, nwbfile_path=path, overwrite=False, write_as="processing") - with NWBHDF5IO(path=path, mode="r") as io: - nwbfile = io.read() - units_1_id = nwbfile.units.id[:] - units_1_spike_times = nwbfile.units.spike_times[:] - units_2_id = nwbfile.processing["ecephys"]["units"].id[:] - units_2_spike_times = nwbfile.processing["ecephys"]["units"].spike_times[:] - - np.testing.assert_array_equal(nwbfile.units["float_prop"][:], [80.0, np.nan, np.nan]) - np.testing.assert_array_equal(nwbfile.units["int_prop"][:], [80.0, np.nan, np.nan]) - np.testing.assert_array_equal(nwbfile.units["str_prop"][:], ["test_val", "", ""]) - np.testing.assert_array_equal( - x=units_1_id, - y=units_2_id, - err_msg=f"Processing unit ids do not match! (Out: {units_2_id}, should be: {units_1_id})", - ) - np.testing.assert_array_equal( - x=units_1_spike_times, - y=units_2_spike_times, - err_msg=( - f"Processing unit ids do not match! (Out: {units_2_spike_times}, should be: {units_1_spike_times})" - ), - ) - - units_name = "test_name" - write_sorting( - sorting=self.SX, - nwbfile_path=path, - overwrite=True, - write_as="processing", - units_name=units_name, - metadata=self.placeholder_metadata, - ) - with NWBHDF5IO(path=path, mode="r") as io: - nwbfile = io.read() - name_out = nwbfile.processing["ecephys"][units_name].name - self.assertEqual( - name_out, - units_name, - f"Units table name not written correctly! (value is: {name_out}, should be: {units_name})", - ) - - units_description = "test_description" - write_sorting(sorting=self.SX, nwbfile_path=path, overwrite=False, units_description=units_description) - SX_nwb = se.NwbSortingExtractor(path, sampling_frequency=sf) - check_sortings_equal(self.SX, SX_nwb) - check_dumping(SX_nwb) - with NWBHDF5IO(path=path, mode="r") as io: - nwbfile = io.read() - description_out = nwbfile.units.description - self.assertEqual( - description_out, - units_description, - "Units table description not written correctly! " - f"(value is: {description_out}, should be: {units_description})", - ) - - def check_metadata_write(self, metadata: dict, nwbfile_path: Path, recording: se.RecordingExtractor): - standard_metadata = get_nwb_metadata(recording=recording) - device_defaults = dict( - name="Device", description="Ecephys probe. Automatically generated." - ) # from the individual add_devices function - electrode_group_defaults = dict( # from the individual add_electrode_groups function - name="Electrode Group", description="no description", location="unknown", device="Device" - ) - - with NWBHDF5IO(path=nwbfile_path, mode="r", load_namespaces=True) as io: - nwbfile = io.read() - - device_source = metadata["Ecephys"].get("Device", standard_metadata["Ecephys"]["Device"]) - self.assertEqual(len(device_source), len(nwbfile.devices)) - for device in device_source: - device_name = device.get("name", device_defaults["name"]) - self.assertIn(device_name, nwbfile.devices) - self.assertEqual( - device.get("description", device_defaults["description"]), nwbfile.devices[device_name].description - ) - self.assertEqual(device.get("manufacturer"), nwbfile.devices[device["name"]].manufacturer) - electrode_group_source = metadata["Ecephys"].get( - "ElectrodeGroup", standard_metadata["Ecephys"]["ElectrodeGroup"] - ) - self.assertEqual(len(electrode_group_source), len(nwbfile.electrode_groups)) - for group in electrode_group_source: - group_name = group.get("name", electrode_group_defaults["name"]) - self.assertIn(group_name, nwbfile.electrode_groups) - self.assertEqual( - group.get("description", electrode_group_defaults["description"]), - nwbfile.electrode_groups[group_name].description, - ) - self.assertEqual( - group.get("location", electrode_group_defaults["location"]), - nwbfile.electrode_groups[group_name].location, - ) - device_name = group.get("device", electrode_group_defaults["device"]) - self.assertIn(device_name, nwbfile.devices) - self.assertEqual(nwbfile.electrode_groups[group_name].device, nwbfile.devices[device_name]) - n_channels = len(recording.get_channel_ids()) - electrode_source = metadata["Ecephys"].get("Electrodes", []) - self.assertEqual(n_channels, len(nwbfile.electrodes)) - for column in electrode_source: - column_name = column["name"] - self.assertIn(column_name, nwbfile.electrodes) - self.assertEqual(column["description"], getattr(nwbfile.electrodes, column_name).description) - if column_name in ["x", "y", "z", "rel_x", "rel_y", "rel_z"]: - for j in n_channels: - self.assertEqual(column["data"][j], getattr(nwbfile.electrodes[j], column_name).values[0]) - else: - for j in n_channels: - self.assertTrue( - column["data"][j] == getattr(nwbfile.electrodes[j], column_name).values[0] - or ( - np.isnan(column["data"][j]) - and np.isnan(getattr(nwbfile.electrodes[j], column_name).values[0]) - ) - ) - - def test_nwb_metadata(self): - path = self.test_dir + "/test_metadata.nwb" - - write_recording(recording=self.RX, nwbfile_path=path, overwrite=True, metadata=self.placeholder_metadata) - self.check_metadata_write(metadata=get_nwb_metadata(recording=self.RX), nwbfile_path=path, recording=self.RX) - - # Manually adjusted device name - must properly adjust electrode_group reference - metadata2 = get_nwb_metadata(recording=self.RX) - metadata2["Ecephys"]["Device"] = [dict(name="TestDevice", description="A test device.", manufacturer="unknown")] - metadata2["Ecephys"]["ElectrodeGroup"][0]["device"] = "TestDevice" - metadata2["NWBFile"].update(self.placeholder_metadata["NWBFile"]) - write_recording(recording=self.RX, metadata=metadata2, nwbfile_path=path, overwrite=True) - self.check_metadata_write(metadata=metadata2, nwbfile_path=path, recording=self.RX) - - # Two devices in metadata - metadata3 = get_nwb_metadata(recording=self.RX) - metadata3["Ecephys"]["Device"].append( - dict(name="Device2", description="A second device.", manufacturer="unknown") - ) - metadata3["NWBFile"].update(self.placeholder_metadata["NWBFile"]) - write_recording(recording=self.RX, metadata=metadata3, nwbfile_path=path, overwrite=True) - self.check_metadata_write(metadata=metadata3, nwbfile_path=path, recording=self.RX) - - # Forcing default auto-population from add_electrode_groups, and not get_nwb_metdata - metadata4 = get_nwb_metadata(recording=self.RX) - metadata4["Ecephys"]["Device"] = [dict(name="TestDevice", description="A test device.", manufacturer="unknown")] - metadata4["Ecephys"].pop("ElectrodeGroup") - metadata4["NWBFile"].update(self.placeholder_metadata["NWBFile"]) - write_recording(recording=self.RX, metadata=metadata4, nwbfile_path=path, overwrite=True) - self.check_metadata_write(metadata=metadata4, nwbfile_path=path, recording=self.RX) - - -class TestWriteElectrodes(unittest.TestCase): - def setUp(self): - self.RX, self.RX2, _, _, _, _, _ = _create_example(seed=0) - self.test_dir = tempfile.mkdtemp() - self.path1 = self.test_dir + "/test_electrodes1.nwb" - self.path2 = self.test_dir + "/test_electrodes2.nwb" - self.path3 = self.test_dir + "/test_electrodes3.nwb" - self.nwbfile1 = NWBFile("sess desc1", "file id1", testing_session_time) - self.nwbfile2 = NWBFile("sess desc2", "file id2", testing_session_time) - self.nwbfile3 = NWBFile("sess desc3", "file id3", testing_session_time) - self.metadata_list = [dict(Ecephys={i: dict(name=i, description="desc")}) for i in ["es1", "es2"]] - - # change channel_ids - id_offset = np.max(self.RX.get_channel_ids()) - self.RX2 = se.subrecordingextractor.SubRecordingExtractor( - self.RX2, renamed_channel_ids=np.array(self.RX2.get_channel_ids()) + id_offset + 1 - ) - self.RX2.set_channel_groups([2 * i for i in self.RX.get_channel_groups()]) - # add common properties: - for no, (chan_id1, chan_id2) in enumerate(zip(self.RX.get_channel_ids(), self.RX2.get_channel_ids())): - self.RX2.set_channel_property(chan_id2, "prop1", "10Hz") - self.RX.set_channel_property(chan_id1, "prop1", "10Hz") - self.RX2.set_channel_property(chan_id2, "brain_area", "M1") - self.RX.set_channel_property(chan_id1, "brain_area", "PMd") - self.RX2.set_channel_property(chan_id2, "group_name", "M1") - self.RX.set_channel_property(chan_id1, "group_name", "PMd") - if no % 2 == 0: - self.RX2.set_channel_property(chan_id2, "prop2", float(chan_id2)) - self.RX.set_channel_property(chan_id1, "prop2", float(chan_id1)) - self.RX2.set_channel_property(chan_id2, "prop3", str(chan_id2)) - self.RX.set_channel_property(chan_id1, "prop3", str(chan_id1)) - - def test_append_same_properties(self): - self.nwbfile1 = write_recording( - recording=self.RX, nwbfile=self.nwbfile1, metadata=self.metadata_list[0], es_key="es1" - ) - self.nwbfile1 = write_recording( - recording=self.RX2, nwbfile=self.nwbfile1, metadata=self.metadata_list[1], es_key="es2" - ) - with NWBHDF5IO(str(self.path1), "w") as io: - io.write(self.nwbfile1) - with NWBHDF5IO(str(self.path1), "r") as io: - nwb = io.read() - assert all(nwb.electrodes.id.data[()] == np.array(self.RX.get_channel_ids() + self.RX2.get_channel_ids())) - assert all([i in nwb.electrodes.colnames for i in ["prop1", "prop2", "prop3"]]) - for i, chan_id in enumerate(nwb.electrodes.id.data): - assert nwb.electrodes["prop1"][i] == "10Hz" - if chan_id in self.RX.get_channel_ids(): - assert nwb.electrodes["location"][i] == "PMd" - assert nwb.electrodes["group_name"][i] == "PMd" - assert nwb.electrodes["group"][i].name == "PMd" - else: - assert nwb.electrodes["location"][i] == "M1" - assert nwb.electrodes["group_name"][i] == "M1" - assert nwb.electrodes["group"][i].name == "M1" - if i % 2 == 0: - assert nwb.electrodes["prop2"][i] == chan_id - assert nwb.electrodes["prop3"][i] == str(chan_id) - else: - assert np.isnan(nwb.electrodes["prop2"][i]) - assert nwb.electrodes["prop3"][i] == "" - - def test_different_channel_properties(self): - for chan_id in self.RX2.get_channel_ids(): - self.RX2.clear_channel_property(chan_id, "prop2") - self.RX2.set_channel_property(chan_id, "prop_new", chan_id) - self.nwbfile1 = write_recording( - recording=self.RX, nwbfile=self.nwbfile1, metadata=self.metadata_list[0], es_key="es1" - ) - self.nwbfile1 = write_recording( - recording=self.RX2, nwbfile=self.nwbfile1, metadata=self.metadata_list[1], es_key="es2" - ) - with NWBHDF5IO(str(self.path1), "w") as io: - io.write(self.nwbfile1) - with NWBHDF5IO(str(self.path1), "r") as io: - nwb = io.read() - for i, chan_id in enumerate(nwb.electrodes.id.data): - if i < len(nwb.electrodes.id.data) / 2: - assert np.isnan(nwb.electrodes["prop_new"][i]) - if i % 2 == 0: - assert nwb.electrodes["prop2"][i] == chan_id - else: - assert np.isnan(nwb.electrodes["prop2"][i]) - else: - assert np.isnan(nwb.electrodes["prop2"][i]) - assert nwb.electrodes["prop_new"][i] == chan_id - - def test_group_set_custom_description(self): - for i, grp_name in enumerate(["PMd", "M1"]): - self.metadata_list[i]["Ecephys"].update( - ElectrodeGroup=[dict(name=grp_name, description=grp_name + " description")] - ) - self.nwbfile1 = write_recording( - recording=self.RX, nwbfile=self.nwbfile1, metadata=self.metadata_list[0], es_key="es1" - ) - self.nwbfile1 = write_recording( - recording=self.RX2, nwbfile=self.nwbfile1, metadata=self.metadata_list[1], es_key="es2" - ) - with NWBHDF5IO(str(self.path1), "w") as io: - io.write(self.nwbfile1) - with NWBHDF5IO(str(self.path1), "r") as io: - nwb = io.read() - for i, chan_id in enumerate(nwb.electrodes.id.data): - if i < len(nwb.electrodes.id.data) / 2: - assert nwb.electrodes["group_name"][i] == "PMd" - assert nwb.electrodes["group"][i].description == "PMd description" - else: - assert nwb.electrodes["group_name"][i] == "M1" - assert nwb.electrodes["group"][i].description == "M1 description" From 91aaf7d0f5b4e14fe6730fe833a0eb03d11c47b0 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Fri, 24 Feb 2023 14:35:07 +0100 Subject: [PATCH 15/28] Update NumpySorting instantiation --- tests/test_ecephys/test_ecephys_interfaces.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/tests/test_ecephys/test_ecephys_interfaces.py b/tests/test_ecephys/test_ecephys_interfaces.py index 940a885a6..d5346af88 100644 --- a/tests/test_ecephys/test_ecephys_interfaces.py +++ b/tests/test_ecephys/test_ecephys_interfaces.py @@ -52,10 +52,14 @@ class TestSortingInterface(unittest.TestCase): def setUp(self) -> None: self.sorting_start_frames = [100, 200, 300] self.num_frames = 1000 - sorting = NumpySorting(sampling_frequency=3000.0) - sorting.add_unit(unit_id=1, times=np.arange(self.sorting_start_frames[0], self.num_frames)) - sorting.add_unit(unit_id=2, times=np.arange(self.sorting_start_frames[1], self.num_frames)) - sorting.add_unit(unit_id=3, times=np.arange(self.sorting_start_frames[2], self.num_frames)) + times = np.array([], dtype="int") + labels = np.array([], dtype="int") + for i, start_frame in enumerate(self.sorting_start_frames): + times_i = np.arange(start_frame, self.num_frames, dtype="int") + labels_i = (i + 1) * np.ones_like(times_i, dtype="int") + times = np.concatenate((times, times_i)) + labels = np.concatenate((labels, labels_i)) + sorting = NumpySorting.from_times_labels(times, labels, sampling_frequency=3000.0) class TestSortingInterface(BaseSortingExtractorInterface): ExtractorName = "NumpySorting" From ff6f2256df13e592b3cd2d0ae320916b5976d5a7 Mon Sep 17 00:00:00 2001 From: CodyCBakerPhD Date: Fri, 24 Feb 2023 18:56:14 -0500 Subject: [PATCH 16/28] fix stubbing --- .../ecephys/basesortingextractorinterface.py | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) diff --git a/src/neuroconv/datainterfaces/ecephys/basesortingextractorinterface.py b/src/neuroconv/datainterfaces/ecephys/basesortingextractorinterface.py index 930513bec..9b1c1225f 100644 --- a/src/neuroconv/datainterfaces/ecephys/basesortingextractorinterface.py +++ b/src/neuroconv/datainterfaces/ecephys/basesortingextractorinterface.py @@ -81,9 +81,6 @@ def align_timestamps(self, synchronized_timestamps: np.ndarray): self.sorting_extractor.set_times(times=synchronized_timestamps) def subset_sorting(self): - from spikeextractors import SortingExtractor, SubSortingExtractor - from spikeinterface import BaseSorting - max_min_spike_time = max( [ min(x) @@ -93,17 +90,7 @@ def subset_sorting(self): ] ) end_frame = 1.1 * max_min_spike_time - if isinstance(self.sorting_extractor, SortingExtractor): - stub_sorting_extractor = SubSortingExtractor( - self.sorting_extractor, - unit_ids=self.sorting_extractor.get_unit_ids(), - start_frame=0, - end_frame=end_frame, - ) - elif isinstance(self.sorting_extractor, BaseSorting): - stub_sorting_extractor = self.sorting_extractor.frame_slice(start_frame=0, end_frame=end_frame) - else: - raise TypeError(f"{self.sorting_extractor} should be either se.SortingExtractor or si.BaseSorting") + stub_sorting_extractor = self.sorting_extractor.frame_slice(start_frame=0, end_frame=end_frame) return stub_sorting_extractor def run_conversion( From 5603d034a695dc64abd4c3550957782fb79cad84 Mon Sep 17 00:00:00 2001 From: CodyCBakerPhD Date: Fri, 24 Feb 2023 19:17:58 -0500 Subject: [PATCH 17/28] fix conflict --- .../ecephys/spikeglx/spikeglxdatainterface.py | 37 +------------------ 1 file changed, 1 insertion(+), 36 deletions(-) diff --git a/src/neuroconv/datainterfaces/ecephys/spikeglx/spikeglxdatainterface.py b/src/neuroconv/datainterfaces/ecephys/spikeglx/spikeglxdatainterface.py index 23275156a..d39b78b4a 100644 --- a/src/neuroconv/datainterfaces/ecephys/spikeglx/spikeglxdatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/spikeglx/spikeglxdatainterface.py @@ -1,14 +1,11 @@ """The primary data interfaces for SpikeGLX.""" from pathlib import Path import json -<<<<<<< HEAD from typing import Optional from pynwb import NWBFile from pynwb.ecephys import ElectricalSeries -======= -from warnings import warn ->>>>>>> main + from ..baserecordingextractorinterface import BaseRecordingExtractorInterface from ....utils import get_schema_from_method_signature, get_schema_from_hdmf_class, FilePathType, dict_deep_update @@ -65,7 +62,6 @@ def __init__( self.stream_id = fetch_stream_id_for_spikelgx_file(file_path) -<<<<<<< HEAD file_path = Path(file_path) folder_path = file_path.parent super().__init__( @@ -75,37 +71,6 @@ def __init__( ) self.source_data["file_path"] = str(file_path) self.meta = self.recording_extractor.neo_reader.signals_info_dict[(0, self.stream_id)]["meta"] -======= - if spikeextractors_backend: # pragma: no cover - # TODO: Remove spikeextractors backend - warn( - message=( - "Interfaces using a spikeextractors backend will soon be deprecated! " - "Please use the SpikeInterface backend instead." - ), - category=DeprecationWarning, - stacklevel=2, - ) - from spikeextractors import SpikeGLXRecordingExtractor - from spikeinterface.core.old_api_utils import OldToNewRecording - - self.Extractor = SpikeGLXRecordingExtractor - super().__init__(file_path=str(file_path), verbose=verbose, es_key=es_key) - _assert_single_shank_for_spike_extractors(self.recording_extractor) - self.meta = _fetch_metadata_dic_for_spikextractors_spikelgx_object(self.recording_extractor) - self.recording_extractor = OldToNewRecording(oldapi_recording_extractor=self.recording_extractor) - else: - file_path = Path(file_path) - folder_path = file_path.parent - super().__init__( - folder_path=folder_path, - stream_id=self.stream_id, - verbose=verbose, - es_key=es_key, - ) - self.source_data["file_path"] = str(file_path) - self.meta = self.recording_extractor.neo_reader.signals_info_dict[(0, self.stream_id)]["meta"] ->>>>>>> main # Mount the probe # TODO - this can be removed in the next release of SpikeInterface (probe mounts automatically) From be212ad50f891cad43745f84b1b17c3b2d3c358e Mon Sep 17 00:00:00 2001 From: CodyCBakerPhD Date: Sat, 25 Feb 2023 12:18:36 -0500 Subject: [PATCH 18/28] fix --- src/neuroconv/tools/spikeinterface/spikeinterface.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/neuroconv/tools/spikeinterface/spikeinterface.py b/src/neuroconv/tools/spikeinterface/spikeinterface.py index e164a46a3..d920d39dc 100644 --- a/src/neuroconv/tools/spikeinterface/spikeinterface.py +++ b/src/neuroconv/tools/spikeinterface/spikeinterface.py @@ -14,7 +14,6 @@ from hdmf.data_utils import DataChunkIterator, AbstractDataChunkIterator from nwbinspector.utils import get_package_version from packaging.version import Version -from spikeextractors import RecordingExtractor, SortingExtractor from spikeinterface import BaseRecording, BaseSorting, WaveformExtractor from numbers import Real from hdmf.data_utils import DataChunkIterator, AbstractDataChunkIterator @@ -599,9 +598,11 @@ def add_electrical_series( assert es_key in metadata["Ecephys"], f"metadata['Ecephys'] dictionary does not contain key '{es_key}'" eseries_kwargs.update(metadata["Ecephys"][es_key]) - # If the recording extractor has more than 1 segment, append numbers to the names so that the names are unique. 0-pad these names based on the number of segments. If there are 10 segments use 2 digits, if there are 100 segments use 3 digits, etc. - if checked_recording.get_num_segments() > 1: - width = int(np.ceil(np.log10(checked_recording.get_num_segments()))) + # If the recording extractor has more than 1 segment, append numbers to the names so that the names are unique. + # 0-pad these names based on the number of segments. + # If there are 10 segments use 2 digits, if there are 100 segments use 3 digits, etc. + if recording.get_num_segments() > 1: + width = int(np.ceil(np.log10((recording.get_num_segments())))) eseries_kwargs["name"] += f"{segment_index:0{width}}" # Indexes by channel ids if they are integer or by indices otherwise. From b855f09ef1c42c91998003433c3c7146277ad3d1 Mon Sep 17 00:00:00 2001 From: CodyCBakerPhD Date: Sat, 25 Feb 2023 13:58:00 -0500 Subject: [PATCH 19/28] fix --- .../ecephys/baserecordingextractorinterface.py | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/src/neuroconv/datainterfaces/ecephys/baserecordingextractorinterface.py b/src/neuroconv/datainterfaces/ecephys/baserecordingextractorinterface.py index 052703b23..6c87b28c2 100644 --- a/src/neuroconv/datainterfaces/ecephys/baserecordingextractorinterface.py +++ b/src/neuroconv/datainterfaces/ecephys/baserecordingextractorinterface.py @@ -111,9 +111,6 @@ def subset_recording(self, stub_test: bool = False): ---------- stub_test : bool, default: False """ - from spikeextractors import RecordingExtractor, SubRecordingExtractor - from spikeinterface import BaseRecording - kwargs = dict() if stub_test: num_frames = 100 @@ -121,12 +118,7 @@ def subset_recording(self, stub_test: bool = False): kwargs.update(end_frame=end_frame) if self.subset_channels is not None: kwargs.update(channel_ids=self.subset_channels) - if isinstance(self.recording_extractor, RecordingExtractor): - recording_extractor = SubRecordingExtractor(self.recording_extractor, **kwargs) - elif isinstance(self.recording_extractor, BaseRecording): - recording_extractor = self.recording_extractor.frame_slice(start_frame=0, end_frame=end_frame) - else: - raise TypeError(f"{self.recording_extractor} should be either se.RecordingExtractor or si.BaseRecording") + recording_extractor = self.recording_extractor.frame_slice(start_frame=0, end_frame=end_frame) return recording_extractor def run_conversion( From d0717d180458057dca2f7dca2eeaf89c5ef8da97 Mon Sep 17 00:00:00 2001 From: CodyCBakerPhD Date: Sat, 25 Feb 2023 17:15:33 -0500 Subject: [PATCH 20/28] fix --- .../test_gin_ecephys/test_sorting.py | 24 +++++++------------ 1 file changed, 9 insertions(+), 15 deletions(-) diff --git a/tests/test_on_data/test_gin_ecephys/test_sorting.py b/tests/test_on_data/test_gin_ecephys/test_sorting.py index b4134a355..1552f14cb 100644 --- a/tests/test_on_data/test_gin_ecephys/test_sorting.py +++ b/tests/test_on_data/test_gin_ecephys/test_sorting.py @@ -4,8 +4,6 @@ import numpy as np from parameterized import parameterized, param -from spikeextractors import NwbSortingExtractor, SortingExtractor -from spikeextractors.testing import check_sortings_equal from spikeinterface.core.testing import check_sortings_equal as check_sorting_equal_si from spikeinterface.extractors import NwbSortingExtractor as NwbSortingExtractorSI @@ -140,19 +138,15 @@ class TestConverter(NWBConverter): sf = 30000 sorting.set_sampling_frequency(sf) - if isinstance(sorting, SortingExtractor): - nwb_sorting = NwbSortingExtractor(file_path=nwbfile_path, sampling_frequency=sf) - check_sortings_equal(SX1=sorting, SX2=nwb_sorting) - else: - # NWBSortingExtractor on spikeinterface does not yet support loading data written from multiple segment. - if sorting.get_num_segments() == 1: - nwb_sorting = NwbSortingExtractorSI(file_path=nwbfile_path, sampling_frequency=sf) - # In the NWBSortingExtractor, since unit_names could be not unique, - # table "ids" are loaded as unit_ids. Here we rename the original sorting accordingly - sorting_renamed = sorting.select_units( - unit_ids=sorting.unit_ids, renamed_unit_ids=np.arange(len(sorting.unit_ids)) - ) - check_sorting_equal_si(SX1=sorting_renamed, SX2=nwb_sorting) + # NWBSortingExtractor on spikeinterface does not yet support loading data written from multiple segment. + if sorting.get_num_segments() == 1: + nwb_sorting = NwbSortingExtractorSI(file_path=nwbfile_path, sampling_frequency=sf) + # In the NWBSortingExtractor, since unit_names could be not unique, + # table "ids" are loaded as unit_ids. Here we rename the original sorting accordingly + sorting_renamed = sorting.select_units( + unit_ids=sorting.unit_ids, renamed_unit_ids=np.arange(len(sorting.unit_ids)) + ) + check_sorting_equal_si(SX1=sorting_renamed, SX2=nwb_sorting) if __name__ == "__main__": From fc7d56725ea9bcae78c2c904234401a875cd2768 Mon Sep 17 00:00:00 2001 From: CodyCBakerPhD Date: Sat, 25 Feb 2023 17:27:04 -0500 Subject: [PATCH 21/28] subtle issue in previous conflict resolution --- .../datainterfaces/ecephys/spikeglx/spikeglxdatainterface.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/neuroconv/datainterfaces/ecephys/spikeglx/spikeglxdatainterface.py b/src/neuroconv/datainterfaces/ecephys/spikeglx/spikeglxdatainterface.py index d39b78b4a..fb826d0ef 100644 --- a/src/neuroconv/datainterfaces/ecephys/spikeglx/spikeglxdatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/spikeglx/spikeglxdatainterface.py @@ -68,6 +68,7 @@ def __init__( folder_path=folder_path, stream_id=self.stream_id, verbose=verbose, + es_key=self.es_key, ) self.source_data["file_path"] = str(file_path) self.meta = self.recording_extractor.neo_reader.signals_info_dict[(0, self.stream_id)]["meta"] From 385843b824f8858d4f4b087d3308b2563e7a5b1d Mon Sep 17 00:00:00 2001 From: CodyCBakerPhD Date: Sat, 25 Feb 2023 18:05:10 -0500 Subject: [PATCH 22/28] subtle issue in previous conflict resolution --- .../datainterfaces/ecephys/spikeglx/spikeglxdatainterface.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/neuroconv/datainterfaces/ecephys/spikeglx/spikeglxdatainterface.py b/src/neuroconv/datainterfaces/ecephys/spikeglx/spikeglxdatainterface.py index fb826d0ef..b9417c030 100644 --- a/src/neuroconv/datainterfaces/ecephys/spikeglx/spikeglxdatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/spikeglx/spikeglxdatainterface.py @@ -68,7 +68,7 @@ def __init__( folder_path=folder_path, stream_id=self.stream_id, verbose=verbose, - es_key=self.es_key, + es_key=es_key, ) self.source_data["file_path"] = str(file_path) self.meta = self.recording_extractor.neo_reader.signals_info_dict[(0, self.stream_id)]["meta"] From f7ffeca33e727c6b42b6e89180af247b5a7a43c8 Mon Sep 17 00:00:00 2001 From: CodyCBakerPhD Date: Sun, 26 Feb 2023 11:18:40 -0500 Subject: [PATCH 23/28] final cleaning? --- .../test_gin_ecephys/test_sorting.py | 39 +++++++------------ 1 file changed, 15 insertions(+), 24 deletions(-) diff --git a/tests/test_on_data/test_gin_ecephys/test_sorting.py b/tests/test_on_data/test_gin_ecephys/test_sorting.py index 1552f14cb..2ffa4701c 100644 --- a/tests/test_on_data/test_gin_ecephys/test_sorting.py +++ b/tests/test_on_data/test_gin_ecephys/test_sorting.py @@ -90,32 +90,23 @@ class TestEcephysSortingNwbConversions(unittest.TestCase): interface_kwargs=dict(file_path=str(DATA_PATH / "plexon" / "File_plexon_2.plx")), case_name="plexon_sorting", ), + param( + data_interface=NeuroScopeSortingInterface, + interface_kwargs=dict( + folder_path=str(DATA_PATH / "neuroscope" / "dataset_1"), + xml_file_path=str(DATA_PATH / "neuroscope" / "dataset_1" / "YutaMouse42-151117.xml"), + ), + case_name="neuroscope_sorting", + ), + param( + data_interface=PhySortingInterface, + interface_kwargs=dict( + folder_path=str(DATA_PATH / "phy" / "phy_example_0"), + ), + case_name="phy_sorting", + ), ] - for spikeextractors_backend in [False, True]: - parameterized_sorting_list.append( - param( - data_interface=NeuroScopeSortingInterface, - interface_kwargs=dict( - folder_path=str(DATA_PATH / "neuroscope" / "dataset_1"), - xml_file_path=str(DATA_PATH / "neuroscope" / "dataset_1" / "YutaMouse42-151117.xml"), - spikeextractors_backend=spikeextractors_backend, - ), - case_name=f"spikeextractors_backend_{spikeextractors_backend}", - ) - ) - - parameterized_sorting_list.append( - param( - data_interface=PhySortingInterface, - interface_kwargs=dict( - folder_path=str(DATA_PATH / "phy" / "phy_example_0"), - spikeextractors_backend=spikeextractors_backend, - ), - case_name=f"spikeextractors_backend_{spikeextractors_backend}", - ) - ) - @parameterized.expand(input=parameterized_sorting_list, name_func=custom_name_func) def test_convert_sorting_extractor_to_nwb(self, data_interface, interface_kwargs, case_name=""): nwbfile_path = str(self.savedir / f"{data_interface.__name__}_{case_name}.nwb") From fb30aa5d480db3d72d117bc1105dbdb3802ee2fc Mon Sep 17 00:00:00 2001 From: Ben Dichter Date: Sun, 26 Feb 2023 13:53:26 -0500 Subject: [PATCH 24/28] Update CHANGELOG.md --- CHANGELOG.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5c49f8f69..bc55870f2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,8 +2,7 @@ ### Back-compatibility break * `ExtractorInterface` classes now access their extractor with the classmethod `cls.get_extractor()` instead of the attribute `self.Extractor`. [PR #324](https://github.com/catalystneuro/neuroconv/pull/324) -* The `spikeextractor_backend` option was removed for several `RecordingExtractorInterface` classes. [PR #324](https://github.com/catalystneuro/neuroconv/pull/324) -* The `spikeextractor_backend` option was removed for all remaining classes. [PR #309](https://github.com/catalystneuro/neuroconv/pull/309) +* The `spikeextractor_backend` option was removed for all`RecordingExtractorInterface` classes. ([PR #324](https://github.com/catalystneuro/neuroconv/pull/324), [PR #309](https://github.com/catalystneuro/neuroconv/pull/309)] * The `NeuroScopeMultiRecordingExtractor` has been removed. If your conversion required this, please submit an issue requesting instructions for how to implement it. [PR #309](https://github.com/catalystneuro/neuroconv/pull/309) * The `NeuroScopeMultiRecordingExtractor` has been removed. [PR #309](https://github.com/catalystneuro/neuroconv/pull/309) * The `SIPickle` interfaces have been removed. [PR #309](https://github.com/catalystneuro/neuroconv/pull/309) From 37227af84167ab8b2f05d25b84731a697f29fd5d Mon Sep 17 00:00:00 2001 From: Ben Dichter Date: Sun, 26 Feb 2023 13:53:34 -0500 Subject: [PATCH 25/28] Update CHANGELOG.md --- CHANGELOG.md | 1 - 1 file changed, 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index bc55870f2..cf548f44d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,7 +4,6 @@ * `ExtractorInterface` classes now access their extractor with the classmethod `cls.get_extractor()` instead of the attribute `self.Extractor`. [PR #324](https://github.com/catalystneuro/neuroconv/pull/324) * The `spikeextractor_backend` option was removed for all`RecordingExtractorInterface` classes. ([PR #324](https://github.com/catalystneuro/neuroconv/pull/324), [PR #309](https://github.com/catalystneuro/neuroconv/pull/309)] * The `NeuroScopeMultiRecordingExtractor` has been removed. If your conversion required this, please submit an issue requesting instructions for how to implement it. [PR #309](https://github.com/catalystneuro/neuroconv/pull/309) -* The `NeuroScopeMultiRecordingExtractor` has been removed. [PR #309](https://github.com/catalystneuro/neuroconv/pull/309) * The `SIPickle` interfaces have been removed. [PR #309](https://github.com/catalystneuro/neuroconv/pull/309) * The previous conversion option `es_key` has been moved to the `__init__` of all `BaseRecordingExtractorInterface` classes. It is no longer possible to use this argument in the `run_conversion` method. [PR #318](https://github.com/catalystneuro/neuroconv/pull/318) From d3e598717e78241be5683e33065826880483d4d3 Mon Sep 17 00:00:00 2001 From: Ben Dichter Date: Sun, 26 Feb 2023 13:53:42 -0500 Subject: [PATCH 26/28] Update src/neuroconv/datainterfaces/ecephys/axona/axonadatainterface.py --- .../datainterfaces/ecephys/axona/axonadatainterface.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/neuroconv/datainterfaces/ecephys/axona/axonadatainterface.py b/src/neuroconv/datainterfaces/ecephys/axona/axonadatainterface.py index 265f6c728..25a22cef1 100644 --- a/src/neuroconv/datainterfaces/ecephys/axona/axonadatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/axona/axonadatainterface.py @@ -11,7 +11,7 @@ class AxonaRecordingInterface(BaseRecordingExtractorInterface): """ - Primary data interface class for converting raw Axona data using a :py:class:`~spikeinterface.extractors.AxonaRecordingExtractor`. + DataInterface for converting raw Axona data using a :py:class:`~spikeinterface.extractors.AxonaRecordingExtractor`. """ def __init__(self, file_path: FilePathType, verbose: bool = True, es_key: str = "ElectricalSeries"): From 1953f3f06c35db73640cd9f19b7947ac5fa046df Mon Sep 17 00:00:00 2001 From: Ben Dichter Date: Sun, 26 Feb 2023 13:53:47 -0500 Subject: [PATCH 27/28] Update src/neuroconv/datainterfaces/ecephys/spikeglx/spikeglxdatainterface.py --- .../datainterfaces/ecephys/spikeglx/spikeglxdatainterface.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/neuroconv/datainterfaces/ecephys/spikeglx/spikeglxdatainterface.py b/src/neuroconv/datainterfaces/ecephys/spikeglx/spikeglxdatainterface.py index b9417c030..991042972 100644 --- a/src/neuroconv/datainterfaces/ecephys/spikeglx/spikeglxdatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/spikeglx/spikeglxdatainterface.py @@ -1,4 +1,4 @@ -"""The primary data interfaces for SpikeGLX.""" +"""DataInterfaces for SpikeGLX.""" from pathlib import Path import json from typing import Optional From fb06914dc95f43c7404e257934c85c97d1ad17ca Mon Sep 17 00:00:00 2001 From: Ben Dichter Date: Sun, 26 Feb 2023 14:36:33 -0500 Subject: [PATCH 28/28] Update CHANGELOG.md --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index cf548f44d..5ceabeb4c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,7 +2,7 @@ ### Back-compatibility break * `ExtractorInterface` classes now access their extractor with the classmethod `cls.get_extractor()` instead of the attribute `self.Extractor`. [PR #324](https://github.com/catalystneuro/neuroconv/pull/324) -* The `spikeextractor_backend` option was removed for all`RecordingExtractorInterface` classes. ([PR #324](https://github.com/catalystneuro/neuroconv/pull/324), [PR #309](https://github.com/catalystneuro/neuroconv/pull/309)] +* The `spikeextractor_backend` option was removed for all `RecordingExtractorInterface` classes. ([PR #324](https://github.com/catalystneuro/neuroconv/pull/324), [PR #309](https://github.com/catalystneuro/neuroconv/pull/309)] * The `NeuroScopeMultiRecordingExtractor` has been removed. If your conversion required this, please submit an issue requesting instructions for how to implement it. [PR #309](https://github.com/catalystneuro/neuroconv/pull/309) * The `SIPickle` interfaces have been removed. [PR #309](https://github.com/catalystneuro/neuroconv/pull/309) * The previous conversion option `es_key` has been moved to the `__init__` of all `BaseRecordingExtractorInterface` classes. It is no longer possible to use this argument in the `run_conversion` method. [PR #318](https://github.com/catalystneuro/neuroconv/pull/318)