Skip to content

Commit

Permalink
Merge branch 'main' into spike2
Browse files Browse the repository at this point in the history
  • Loading branch information
bendichter committed Feb 26, 2023
2 parents a977376 + dc517e2 commit 9aaa91f
Show file tree
Hide file tree
Showing 20 changed files with 139 additions and 1,390 deletions.
4 changes: 3 additions & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,9 @@

### Back-compatibility break
* `ExtractorInterface` classes now access their extractor with the classmethod `cls.get_extractor()` instead of the attribute `self.Extractor`. [PR #324](https://github.com/catalystneuro/neuroconv/pull/324)
* The `spikeextractor_backend` option was removed for several `RecordingExtractorInterface` classes. [PR #324](https://github.com/catalystneuro/neuroconv/pull/324)
* The `spikeextractor_backend` option was removed for all `RecordingExtractorInterface` classes. ([PR #324](https://github.com/catalystneuro/neuroconv/pull/324), [PR #309](https://github.com/catalystneuro/neuroconv/pull/309)]
* The `NeuroScopeMultiRecordingExtractor` has been removed. If your conversion required this, please submit an issue requesting instructions for how to implement it. [PR #309](https://github.com/catalystneuro/neuroconv/pull/309)
* The `SIPickle` interfaces have been removed. [PR #309](https://github.com/catalystneuro/neuroconv/pull/309)
* The previous conversion option `es_key` has been moved to the `__init__` of all `BaseRecordingExtractorInterface` classes. It is no longer possible to use this argument in the `run_conversion` method. [PR #318](https://github.com/catalystneuro/neuroconv/pull/318)

### Features
Expand Down
8 changes: 0 additions & 8 deletions src/neuroconv/datainterfaces/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,16 +2,11 @@
from .ecephys.neuroscope.neuroscopedatainterface import (
NeuroScopeRecordingInterface,
NeuroScopeLFPInterface,
NeuroScopeMultiRecordingTimeInterface,
NeuroScopeSortingInterface,
)
from .ecephys.spikeglx.spikeglxdatainterface import SpikeGLXRecordingInterface, SpikeGLXLFPInterface
from .ecephys.spikeglx.spikeglxnidqinterface import SpikeGLXNIDQInterface
from .ecephys.spikegadgets.spikegadgetsdatainterface import SpikeGadgetsRecordingInterface
from .ecephys.spikeinterface.sipickledatainterfaces import (
SIPickleRecordingInterface,
SIPickleSortingInterface,
)
from .ecephys.intan.intandatainterface import IntanRecordingInterface
from .ecephys.spike2.spike2datainterface import Spike2RecordingInterface
from .ecephys.spike2.spike2datainterface import CEDRecordingInterface
Expand Down Expand Up @@ -70,16 +65,13 @@
NeuralynxRecordingInterface,
NeuralynxSortingInterface,
NeuroScopeRecordingInterface,
NeuroScopeMultiRecordingTimeInterface,
NeuroScopeSortingInterface,
NeuroScopeLFPInterface,
Spike2RecordingInterface,
SpikeGLXRecordingInterface,
SpikeGLXLFPInterface,
SpikeGLXNIDQInterface,
SpikeGadgetsRecordingInterface,
SIPickleRecordingInterface,
SIPickleSortingInterface,
IntanRecordingInterface,
CEDRecordingInterface,
CellExplorerSortingInterface,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,9 @@


class AxonaRecordingInterface(BaseRecordingExtractorInterface):
"""Primary data interface class for converting a Axona data using a
:py:class:`~spikeinterface.extractors.AxonaRecordingExtractor`."""
"""
DataInterface for converting raw Axona data using a :py:class:`~spikeinterface.extractors.AxonaRecordingExtractor`.
"""

def __init__(self, file_path: FilePathType, verbose: bool = True, es_key: str = "ElectricalSeries"):
"""
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -111,22 +111,14 @@ def subset_recording(self, stub_test: bool = False):
----------
stub_test : bool, default: False
"""
from spikeextractors import RecordingExtractor, SubRecordingExtractor
from spikeinterface import BaseRecording

kwargs = dict()
if stub_test:
num_frames = 100
end_frame = min([num_frames, self.recording_extractor.get_num_frames()])
kwargs.update(end_frame=end_frame)
if self.subset_channels is not None:
kwargs.update(channel_ids=self.subset_channels)
if isinstance(self.recording_extractor, RecordingExtractor):
recording_extractor = SubRecordingExtractor(self.recording_extractor, **kwargs)
elif isinstance(self.recording_extractor, BaseRecording):
recording_extractor = self.recording_extractor.frame_slice(start_frame=0, end_frame=end_frame)
else:
raise TypeError(f"{self.recording_extractor} should be either se.RecordingExtractor or si.BaseRecording")
recording_extractor = self.recording_extractor.frame_slice(start_frame=0, end_frame=end_frame)
return recording_extractor

def run_conversion(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -81,9 +81,6 @@ def align_timestamps(self, synchronized_timestamps: np.ndarray):
self.sorting_extractor.set_times(times=synchronized_timestamps)

def subset_sorting(self):
from spikeextractors import SortingExtractor, SubSortingExtractor
from spikeinterface import BaseSorting

max_min_spike_time = max(
[
min(x)
Expand All @@ -93,17 +90,7 @@ def subset_sorting(self):
]
)
end_frame = 1.1 * max_min_spike_time
if isinstance(self.sorting_extractor, SortingExtractor):
stub_sorting_extractor = SubSortingExtractor(
self.sorting_extractor,
unit_ids=self.sorting_extractor.get_unit_ids(),
start_frame=0,
end_frame=end_frame,
)
elif isinstance(self.sorting_extractor, BaseSorting):
stub_sorting_extractor = self.sorting_extractor.frame_slice(start_frame=0, end_frame=end_frame)
else:
raise TypeError(f"{self.sorting_extractor} should be either se.SortingExtractor or si.BaseSorting")
stub_sorting_extractor = self.sorting_extractor.frame_slice(start_frame=0, end_frame=end_frame)
return stub_sorting_extractor

def run_conversion(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -127,65 +127,6 @@ def get_metadata(self):
return metadata


class NeuroScopeMultiRecordingTimeInterface(NeuroScopeRecordingInterface):
"""Primary data interface class for converting a NeuroscopeMultiRecordingTimeExtractor."""

RXModule = "spikeextractors"
RXName = "NeuroscopeMultiRecordingTimeExtractor"

def __init__(
self,
folder_path: FolderPathType,
gain: Optional[float] = None,
xml_file_path: OptionalFilePathType = None,
):
"""
Load and prepare raw acquisition data and corresponding metadata from the Neuroscope format (.dat files).
For all the .dat files in the folder_path, this concatenates them in time assuming no gaps in between.
If there are gaps, timestamps inside the RecordingExtractor should be overridden.
Parameters
----------
folder_path : FolderPathType
Path to folder of multiple .dat files.
gain : Optional[float], optional
Conversion factors from int16 to Volts are not contained in xml_file_path; set them explicitly here.
Most common value is 0.195 for an intan recording system.
The default is None.
xml_file_path : OptionalFilePathType, optional
Path to .xml file containing device and electrode configuration.
If unspecified, it will be automatically set as the only .xml file in the same folder as the .dat file.
The default is None.
"""
# TODO: Remove this sub interface
warn(
message=(
"Interfaces using a spikeextractors backend will soon be deprecated! "
"Please use the SpikeInterface backend with multiple segments instead."
),
category=DeprecationWarning,
stacklevel=2,
)

get_package(package_name="lxml")
from spikeinterface.core.old_api_utils import OldToNewRecording

if xml_file_path is None:
xml_file_path = get_xml_file_path(data_file_path=folder_path)
super(NeuroScopeRecordingInterface, self).__init__(
folder_path=folder_path,
gain=gain,
xml_file_path=xml_file_path,
)
self.recording_extractor = OldToNewRecording(oldapi_recording_extractor=self.recording_extractor)

self.recording_extractor = subset_shank_channels(
recording_extractor=self.recording_extractor, xml_file_path=xml_file_path
)
add_recording_extractor_properties(recording_extractor=self.recording_extractor, xml_file_path=xml_file_path)


class NeuroScopeLFPInterface(BaseLFPExtractorInterface):
"""Primary data interface class for converting Neuroscope LFP data."""

Expand All @@ -196,7 +137,6 @@ def __init__(
file_path: FilePathType,
gain: Optional[float] = None,
xml_file_path: OptionalFilePathType = None,
spikeextractors_backend: bool = False,
):
"""
Load and prepare lfp data and corresponding metadata from the Neuroscope format (.eeg or .lfp files).
Expand All @@ -213,35 +153,14 @@ def __init__(
Path to .xml file containing device and electrode configuration.
If unspecified, it will be automatically set as the only .xml file in the same folder as the .dat file.
The default is None.
spikeextractors_backend : bool
False by default. When True the interface uses the old extractor from the spikextractors library instead
of a new spikeinterface object.
"""
get_package(package_name="lxml")

if xml_file_path is None:
xml_file_path = get_xml_file_path(data_file_path=file_path)

if spikeextractors_backend:
# TODO: Remove spikeextractors backend
warn(
message=(
"Interfaces using a spikeextractors backend will soon be deprecated! "
"Please use the SpikeInterface backend instead."
),
category=DeprecationWarning,
stacklevel=2,
)

from spikeextractors import NeuroscopeRecordingExtractor
from spikeinterface.core.old_api_utils import OldToNewRecording

self.Extractor = NeuroscopeRecordingExtractor
super().__init__(file_path=file_path, xml_file_path=xml_file_path)
self.recording_extractor = OldToNewRecording(oldapi_recording_extractor=self.recording_extractor)
else:
super().__init__(file_path=file_path)
self.source_data["xml_file_path"] = xml_file_path
super().__init__(file_path=file_path)
self.source_data["xml_file_path"] = xml_file_path

add_recording_extractor_properties(
recording_extractor=self.recording_extractor, xml_file_path=xml_file_path, gain=gain
Expand Down Expand Up @@ -269,12 +188,6 @@ def __init__(
exclude_shanks: Optional[list] = None,
xml_file_path: OptionalFilePathType = None,
verbose: bool = True,
spikeextractors_backend: bool = False,
# TODO: we can enable this once
# a) waveforms on unit columns support conversion factor in NWB
# b) write_sorting utils support writing said waveforms properly to a units table
# load_waveforms: bool = False,
# gain: Optional[float] = None,
):
"""
Load and prepare spike sorted data and corresponding metadata from the Neuroscope format (.res/.clu files).
Expand All @@ -293,47 +206,15 @@ def __init__(
Path to .xml file containing device and electrode configuration.
If unspecified, it will be automatically set as the only .xml file in the same folder as the .dat file.
The default is None.
load_waveforms : bool, optional
If True, extracts waveform data from .spk.%i files in the path corresponding to
the .res.%i and .clue.%i files and sets these as unit spike features.
The default is False.
Not currently in use pending updates to NWB waveforms.
gain : float, optional
If loading waveforms, this value converts the data type of the waveforms to units of microvolts.
Conversion factors from int16 to Volts are not contained in xml_file_path; set them explicitly here.
Most common value is 0.195 for an intan recording system.
The default is None.
Not currently in use pending updates to NWB waveforms.
spikeextractors_backend : bool
False by default. When True the interface uses the old extractor from the spikextractors library instead
of a new spikeinterface object.
"""
get_package(package_name="lxml")
from spikeextractors import NeuroscopeMultiSortingExtractor

if spikeextractors_backend:
# TODO: Remove spikeextractors backend
warn(
message=(
"Interfaces using a spikeextractors backend will soon be deprecated! "
"Please use the SpikeInterface backend instead."
),
category=DeprecationWarning,
stacklevel=2,
)
self.Extractor = NeuroscopeMultiSortingExtractor

super().__init__(
folder_path=folder_path,
keep_mua_units=keep_mua_units,
exclude_shanks=exclude_shanks,
xml_file_path=xml_file_path,
verbose=verbose,
# TODO: we can enable this once
# a) waveforms on unit columns support conversion factor in NWB
# b) write_sorting utils support writing said waveforms properly to a units table
# load_waveforms=load_waveforms,
# gain=gain,
)

def get_metadata(self):
Expand Down
15 changes: 0 additions & 15 deletions src/neuroconv/datainterfaces/ecephys/phy/phydatainterface.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@ def __init__(
folder_path: FolderPathType,
exclude_cluster_groups: Optional[list] = None,
verbose: bool = True,
spikeextractors_backend: bool = False,
):
"""
Initialize a PhySortingInterface.
Expand All @@ -27,19 +26,5 @@ def __init__(
exclude_cluster_groups : str or list of str, optional
Cluster groups to exclude (e.g. "noise" or ["noise", "mua"]).
verbose : bool, default: True
spikeextractors_backend : bool, default: False
"""
if spikeextractors_backend:
# TODO: Remove spikeextractors backend
warn(
message=(
"Interfaces using a spikeextractors backend will soon be deprecated! "
"Please use the SpikeInterface backend instead."
),
category=DeprecationWarning,
stacklevel=2,
)
from spikeextractors import PhySortingExtractor

self.Extractor = PhySortingExtractor
super().__init__(folder_path=folder_path, exclude_cluster_groups=exclude_cluster_groups, verbose=verbose)
1 change: 0 additions & 1 deletion src/neuroconv/datainterfaces/ecephys/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,3 +1,2 @@
spikeinterface>=0.97.0
spikeextractors>=0.9.10
packaging<22.0
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,7 @@ def get_source_schema(cls):
def get_all_channels_info(cls, file_path: FilePathType):
"""Retrieve and inspect necessary channel information prior to initialization."""
_test_sonpy_installation()
getattr(cls, "RX") # Required to trigger dynamic access in case this method is called first
return cls.RX.get_all_channels_info(file_path=file_path)
return cls.get_extractor().get_all_channels_info(file_path=file_path)

def __init__(self, file_path: FilePathType, verbose: bool = True, es_key: str = "ElectricalSeries"):
"""
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,6 @@ def __init__(
or an array of values for each channel.
es_key : str, default: "ElectricalSeries"
"""

super().__init__(file_path=file_path, stream_id="trodes", verbose=verbose, es_key=es_key)

self.source_data = dict(file_path=file_path, verbose=verbose)
Expand Down
Loading

0 comments on commit 9aaa91f

Please sign in to comment.