From 727d4f27e4d115508075417ff0fbd3bb415e67d2 Mon Sep 17 00:00:00 2001 From: zm711 <92116279+zm711@users.noreply.github.com> Date: Wed, 7 Feb 2024 18:07:36 -0500 Subject: [PATCH] add more docstrings --- neo/io/basefromrawio.py | 20 ++-- neo/io/baseio.py | 13 ++- neo/rawio/alphaomegarawio.py | 25 +++-- neo/rawio/axographrawio.py | 62 +++++------ neo/rawio/axonarawio.py | 39 ++++--- neo/rawio/axonrawio.py | 23 ++++ neo/rawio/baserawio.py | 179 ++++++++++++++++++++---------- neo/rawio/bci2000rawio.py | 5 + neo/rawio/biocamrawio.py | 17 ++- neo/rawio/blackrockrawio.py | 75 ++++++------- neo/rawio/brainvisionrawio.py | 13 ++- neo/rawio/cedrawio.py | 15 ++- neo/rawio/edfrawio.py | 29 +++-- neo/rawio/elanrawio.py | 22 +++- neo/rawio/intanrawio.py | 33 ++++-- neo/rawio/maxwellrawio.py | 11 +- neo/rawio/mearecrawio.py | 45 ++++---- neo/rawio/medrawio.py | 21 +++- neo/rawio/micromedrawio.py | 5 + neo/rawio/neuroscoperawio.py | 4 +- neo/rawio/nixrawio.py | 10 ++ neo/rawio/openephysrawio.py | 24 ++-- neo/rawio/rawbinarysignalrawio.py | 20 ++++ neo/rawio/rawmcsrawio.py | 9 ++ neo/rawio/spike2rawio.py | 14 ++- 25 files changed, 497 insertions(+), 236 deletions(-) diff --git a/neo/io/basefromrawio.py b/neo/io/basefromrawio.py index b54322928..750bdb083 100644 --- a/neo/io/basefromrawio.py +++ b/neo/io/basefromrawio.py @@ -87,7 +87,7 @@ def read_block( block_index: int, default: 0 In the case of multiple blocks, the block_index specifies which block to read lazy: bool, default: False - Whether to read the block lazily (True) or load into memory (false) + Whether to read the block lazily (True) or load into memory (False) create_group_across_segment: bool | dict | None, default: None If True : * Create a neo.Group to group AnalogSignal segments @@ -98,9 +98,8 @@ def read_block( * for example: create_group_across_segment = { 'AnalogSignal': True, 'SpikeTrain': False, ...} signal_group_mode: 'split-all' | 'group-by-same-units' | None, default: None This control behavior for grouping channels in AnalogSignal. - * 'split-all': each channel will give an AnalogSignal - * 'group-by-same-units' all channel sharing the same quantity units ar grouped in - a 2D AnalogSignal + * 'split-all': each channel will be give an AnalogSignal + * 'group-by-same-units' all channel sharing the same quantity units are grouped in a 2D AnalogSignal By default None since the default is dependant on the IO load_waveforms: bool, default: False Determines whether SpikeTrains.waveforms is created @@ -221,20 +220,19 @@ def read_segment( Whether to lazily load the segment (True) or to load the segment into memory (False) signal_group_mode: 'split-all' | 'group-by-same-units' | None, default: None This control behavior for grouping channels in AnalogSignal. - * 'split-all': each channel will give an AnalogSignal - * 'group-by-same-units' all channel sharing the same quantity units ar grouped in - a 2D AnalogSignal + * 'split-all': each channel will be give an AnalogSignal + * 'group-by-same-units' all channel sharing the same quantity units are grouped in a 2D AnalogSignal load_waveforms: bool, default: False Determines whether SpikeTrains.waveforms is created - time_slice: tuple[float | None] | None, default: None + time_slice: tuple[quantity.Quantities | None] | None, default: None Whether to take a time slice of the data - * None: indicates from beginning of segment to the end of the segment + * None: indicates from beginning of the segment to the end of the segment * tuple: (t_start, t_stop) with t_start and t_stop being quantities in seconds * tuple: (None, t_stop) indicates the beginning of the segment to t_stop * tuple: (t_start, None) indicates from t_start to the end of the segment strict_slicing: bool, default: True Control if an error is raised or not when t_start or t_stop - is outside the real time range of the segment. + is outside of the real time range of the segment. Returns ------- @@ -243,7 +241,7 @@ def read_segment( """ if lazy: - assert time_slice is None, "For lazy=True you must specify time_slice when LazyObject.load(time_slice=...)" + assert time_slice is None, "For lazy=True you must specify a time_slice when LazyObject.load(time_slice=...)" assert ( not load_waveforms diff --git a/neo/io/baseio.py b/neo/io/baseio.py index abc368f84..47cfe29cd 100644 --- a/neo/io/baseio.py +++ b/neo/io/baseio.py @@ -135,7 +135,7 @@ def read(self, lazy: bool = False, **kargs): Returns ------ - block_list: list[neo.Block] + block_list: list[neo.core.Block] Returns all the data from the file as Blocks """ if lazy and not self.support_lazy: @@ -154,6 +154,17 @@ def read(self, lazy: bool = False, **kargs): raise NotImplementedError def write(self, bl, **kargs): + """ + Writes a given block if IO supports writing + + Parameters + ---------- + bl: neo.core.Block + The neo Block to be written + kargs: dict + IO specific additional arguments + + """ if Block in self.writeable_objects: if isinstance(bl, Sequence): assert hasattr(self, "write_all_blocks"), ( diff --git a/neo/rawio/alphaomegarawio.py b/neo/rawio/alphaomegarawio.py index 06cf1e35d..603f97b36 100644 --- a/neo/rawio/alphaomegarawio.py +++ b/neo/rawio/alphaomegarawio.py @@ -47,20 +47,21 @@ class AlphaOmegaRawIO(BaseRawIO): """ AlphaOmega MPX file format 4 reader. Handles several segments. - - A segment is a continuous record (when record starts/stops). - + A segment is a continuous recording (when recording starts/stops). Only files in current `dirname` are loaded, subfolders are not explored. - :param dirname: folder from where to load the data - :type dirname: str or Path-like - :param lsx_files: list of lsx files in `dirname` referencing mpx files to - load (optional). If None (default), read all mpx files in `dirname` - :type lsx_files: list of strings or None - :param prune_channels: if True removes the empty channels, defaults to True - :type prune_channels: bool - - .. warning:: + Parameters + ---------- + dirname: str | Path + The folder from which the data will be loaded + lsx_files: list[str] | None, default: None + List of lsx files in `dirname` referencing mpx files to load (optional) + If None all mpx files will be read + prune_channels: bool, default: True + If True removes the empty channels + + Notes + ----- Because channels must be gathered into coherent streams, channels names **must** be the default channel names in AlphaRS or Alpha LAB SNR software. diff --git a/neo/rawio/axographrawio.py b/neo/rawio/axographrawio.py index 45f4cd1b6..012d2ebe3 100644 --- a/neo/rawio/axographrawio.py +++ b/neo/rawio/axographrawio.py @@ -174,52 +174,50 @@ class AxographRawIO(BaseRawIO): """ RawIO class for reading AxoGraph files (.axgd, .axgx) - Args: - filename (string): - File name of the AxoGraph file to read. - force_single_segment (bool): - Episodic files are normally read as multi-Segment Neo objects. This - parameter can force AxographRawIO to put all signals into a single - Segment. Default: False. - - Example: + Parameters + ---------- + filename: str + File name of the AxoGraph file to read. + force_single_segment: bool, default: False + Episodic files are normally read as multi-Segment Neo objects. This + parameter can force AxographRawIO to put all signals into a single + Segment. + + Examples + -------- >>> import neo - >>> r = neo.rawio.AxographRawIO(filename=filename) - >>> r.parse_header() - >>> print(r) + >>> reader = neo.rawio.AxographRawIO(filename=filename) + >>> reader.parse_header() + >>> print(reader) >>> # get signals - >>> raw_chunk = r.get_analogsignal_chunk( - ... block_index=0, seg_index=0, - ... i_start=0, i_stop=1024, - ... channel_names=channel_names) - >>> float_chunk = r.rescale_signal_raw_to_float( - ... raw_chunk, - ... dtype='float64', - ... channel_names=channel_names) + >>> raw_chunk = reader.get_analogsignal_chunk(block_index=0, + ... seg_index=0, + ... i_start=0, + ... i_stop=1024, + ... channel_names=channel_names) + + >>> float_chunk = r.rescale_signal_raw_to_float(raw_chunk, + ... dtype='float64', + ... channel_names=channel_names) >>> print(float_chunk) >>> # get event markers - >>> ev_raw_times, _, ev_labels = r.get_event_timestamps( - ... event_channel_index=0) - >>> ev_times = r.rescale_event_timestamp( - ... ev_raw_times, dtype='float64') + >>> ev_raw_times, _, ev_labels = reader.get_event_timestamps(event_channel_index=0) + >>> ev_times = reader.rescale_event_timestamp(ev_raw_times, dtype='float64') >>> print([ev for ev in zip(ev_times, ev_labels)]) >>> # get interval bars - >>> ep_raw_times, ep_raw_durations, ep_labels = r.get_event_timestamps( - ... event_channel_index=1) - >>> ep_times = r.rescale_event_timestamp( - ... ep_raw_times, dtype='float64') - >>> ep_durations = r.rescale_epoch_duration( - ... ep_raw_durations, dtype='float64') + >>> ep_raw_times, ep_raw_durations, ep_labels = reader.get_event_timestamps(event_channel_index=1) + >>> ep_times = reader.rescale_event_timestamp(ep_raw_times, dtype='float64') + >>> ep_durations = reader.rescale_epoch_duration(ep_raw_durations, dtype='float64') >>> print([ep for ep in zip(ep_times, ep_durations, ep_labels)]) >>> # get notes - >>> print(r.info['notes']) + >>> print(reader.info['notes']) >>> # get other miscellaneous info - >>> print(r.info) + >>> print(reader.info) """ name = "AxographRawIO" diff --git a/neo/rawio/axonarawio.py b/neo/rawio/axonarawio.py index 79dc73b25..727d873c6 100644 --- a/neo/rawio/axonarawio.py +++ b/neo/rawio/axonarawio.py @@ -32,25 +32,36 @@ class AxonaRawIO(BaseRawIO): """ - Class for reading raw, continuous data from the Axona dacqUSB system: + Class for reading raw, continuous data from the Axona dacqUSB system + + Parameters + ---------- + filename: str + The name of the *.bin file containing the data + + Notes + ----- + http://space-memory-navigation.org/DacqUSBFileFormats.pdf The raw data is saved in .bin binary files with an accompanying .set file about the recording setup (see the above manual for details). - Usage:: - - import neo.rawio - r = neo.rawio.AxonaRawIO(filename=os.path.join(dir_name, base_filename)) - r.parse_header() - print(r) - raw_chunk = r.get_analogsignal_chunk(block_index=0, seg_index=0, - i_start=0, i_stop=1024, - channel_names=channel_names) - float_chunk = reader.rescale_signal_raw_to_float( - raw_chunk, dtype='float64', - channel_indexes=[0, 3, 6] - ) + Examples + -------- + + >>> import neo.rawio + >>> r = neo.rawio.AxonaRawIO(filename=os.path.join(dir_name, base_filename)) + >>> r.parse_header() + >>> print(r) + >>> raw_chunk = r.get_analogsignal_chunk(block_index=0, + seg_index=0, + i_start=0, + i_stop=1024, + channel_names=channel_names) + >>> float_chunk = reader.rescale_signal_raw_to_float(raw_chunk, + dtype='float64', + channel_indexes=[0, 3, 6]) """ diff --git a/neo/rawio/axonrawio.py b/neo/rawio/axonrawio.py index df4861ef6..0800a8106 100644 --- a/neo/rawio/axonrawio.py +++ b/neo/rawio/axonrawio.py @@ -53,6 +53,29 @@ class AxonRawIO(BaseRawIO): + """ + Class for Class for reading data from pCLAMP and AxoScope files (.abf version 1 and 2) + + Parameters + ---------- + filename: str, default: '' + The *.abf file to be read + + Notes + ----- + This code is a port of abfload and abf2load written in Matlab (BSD-2-Clause licence) by + Copyright (c) 2009, Forrest Collman, fcollman@princeton.edu + Copyright (c) 2004, Harald Hentschke + + Examples + -------- + + >>> import neo.rawio + >>> reader = neo.rawio.AxonRawIO(filename='mydata.abf') + >>> reader.parse_header() + >>> print(reader) + + """ extensions = ["abf"] rawmode = "one-file" diff --git a/neo/rawio/baserawio.py b/neo/rawio/baserawio.py index b85d8e941..4a0800407 100644 --- a/neo/rawio/baserawio.py +++ b/neo/rawio/baserawio.py @@ -612,13 +612,14 @@ def _get_stream_index_from_arg(self, stream_index_arg: int | None): ---------- stream_index_arg: int | None, default: None The stream_index to verify - If None checks if only one stream exists and then returns 0 if this it is single stream + If None checks if only one stream exists and then returns 0 if it is single stream Returns ------- stream_index: int The stream_index to be used for function requiring a stream_index - """ + + """ if stream_index_arg is None: assert self.header["signal_streams"].size == 1, "stream_index must be given for multiple stream files" stream_index = 0 @@ -631,7 +632,7 @@ def _get_stream_index_from_arg(self, stream_index_arg: int | None): def get_signal_size(self, block_index: int, seg_index: int, stream_index: int | None = None): """ - Retrieve the length of a single section of the channels in a stream. + Retrieves the length of a single section of the channels in a stream. Parameters ---------- @@ -646,7 +647,7 @@ def get_signal_size(self, block_index: int, seg_index: int, stream_index: int | Returns ------- signal_size: int - The number of samples for a given signal within the given block, segment, and stream + The number of samples for a given signal within the desired block, segment, and stream """ stream_index = self._get_stream_index_from_arg(stream_index) @@ -654,22 +655,22 @@ def get_signal_size(self, block_index: int, seg_index: int, stream_index: int | def get_signal_t_start(self, block_index: int, seg_index: int, stream_index: int | None = None): """ - Retrieve the t_start of a single section of the channels in a stream. + Retrieves the t_start of a single section of the channels in a stream. Parameters ---------- block_index: int - The desired block in which to get a signal size + The desired block in which to get a t_start seg_index: int - The desired segment of the block in which to get the signal size + The desired segment of the block in which to get the t_start stream_index: int | None, default: None - The optional stream index in which to determine signal size + The optional stream index in which to determine t_start This is required for data with multiple streams Returns ------- signal_t_start: float - The start time for a given signal within the given block, segment, and stream + The start time for a given signal within the desired block, segment, and stream """ stream_index = self._get_stream_index_from_arg(stream_index) @@ -677,7 +678,7 @@ def get_signal_t_start(self, block_index: int, seg_index: int, stream_index: int def get_signal_sampling_rate(self, stream_index: int | None = None): """ - Retrieve sampling rate for a stream and all channels in that stream. + Retrieves the sampling rate for a stream and all channels withinin that stream. Parameters ---------- @@ -711,26 +712,30 @@ def get_analogsignal_chunk( prefer_slice: bool = False, ): """ - Return a chunk of raw signal as a Numpy array. + Returns a chunk of raw signal as a Numpy array. Parameters ---------- block_index: int, default: 0 - The block with the desired segment to assess + The block with the desired analog signal seg_index: int, default: 0 - The segment containing the desired section to assess + The segment containing the desired analog signal i_start: int | None, default: None - The index of the first sample to retrieve within the section + The index of the first sample (not time) of the desired analog signal i_stop: int | None, default: None - The index of one past the last sample to retrieve within the section + The index of one past the last sample (not time) of the desired analog signal stream_index: int | None, default: None - The index of the stream containing the channels to assess + The index of the stream containing the channels to assess for the analog signal + This is required for data with multiple streams channel_indexes: list[int] | np.array[int]| slice | None, default: None The list of indexes of channels to retrieve + One of channel_indexes, channel_names, or channel_ids must be given channel_names: list[str] | None, default: None The list of channel names to retrieve + One of channel_indexes, channel_names, or channel_ids must be given channel_ids: list[str] | None, default: None - list of channel_ids to retrieve + The list of channel_ids to retrieve + One of channel_indexes, channel_names, or channel_ids must be given Returns ------- @@ -747,16 +752,17 @@ def get_analogsignal_chunk( Examples -------- # tetrode with 1 sec recording at sampling_rate = 1000. Hz + >>> rawio_reader.parse_header() >>> raw_sigs = rawio_reader.get_analogsignal_chunk(block_index=2, seg_index=0, stream_index=0) >>> raw_sigs.shape - >>> (1000,4) # 1000 samples by 4 channels + (1000,4) # 1000 samples by 4 channels >>> raw_sigs.dtype - >>> 'int16' # returns the dtype from the recording itself + 'int16' # returns the dtype from the recording itself # If we only want one electrode >>> raw_sigs_one_electrode = rawio_reader.get_analogsignal_chunk(block_index=2, seg_index=0, stream_index=0, channel_indexes=[0]) >>> raw_sigs_one_electrode.shape - >>> (1000,1) + (1000,1) """ @@ -804,10 +810,8 @@ def rescale_signal_raw_to_float( channel_ids: list[str] | None = None, ): """ - Rescale a chunk of raw signals which are provided as a Numpy array. These are normally - returned by a call to get_analogsignal_chunk. The channels are specified either by - channel_names, if provided, otherwise by channel_ids, if provided, otherwise by - channel_indexes, if provided, otherwise all channels are selected. + Rescales a chunk of raw signals which are provided as a Numpy array. These are normally + returned by a call to get_analogsignal_chunk. Parameters ---------- @@ -829,17 +833,26 @@ def rescale_signal_raw_to_float( float_signal: np.array (n_samples, n_channels) The rescaled signal + Notes + ----- + The channels are specified either by channel_names, if provided, otherwise by channel_ids, + if provided, otherwise by channel_indexes, if provided, otherwise all channels are selected. + + These are rawio dependent because rescaling of the NumPy array requires the offset and gain + stored within the header of the rawio + + Examples -------- # Once we have a `raw_sigs` using rawio.get_analogsignal_chunk() we can convert to voltages with a desired dtype # If we used `stream_index=0` with `get_analogsignal_chunk` we use `stream_index=0` here >>> float_sigs = rawio_reader.rescale_signal_raw_to_float(raw_signal=raw_sigs, dtype='float32', stream_index=0) >>> float_sigs.dtype - >>> 'float32' + 'float32' >>> float_sigs.shape - >>> (1000,4) + (1000,4) >>> float_sigs.shape == raw_sigs.shape - >>> True + True """ @@ -901,17 +914,17 @@ def get_spike_timestamps( Parameters ---------- block_index: int, default: 0 - The block containing the section to get the timestamps + The block containing the section to get the spike timestamps seg_index: int, default: 0 - The segment containing the section to get the timestamps + The segment containing the section to get the spike timestamps spike_channel_index: int, default: 0 - The channels in which to count the spikes on + The channel in which to collect spike timestamps t_start: float | None, default: None The time in seconds for the start of the section to get spike timestamps - None indicates to start at the beginning of the section + None indicates to start at the beginning of the segment t_stop: float | None, default: None The time in seconds for the end of the section to get spike timestamps - None indicates to end at the end of the section + None indicates to end at the end of the segment Returns ------- @@ -923,13 +936,24 @@ def get_spike_timestamps( The timestamp datatype is as close to the format itself. Sometimes float/int32/int64. Sometimes it is the index on the signal but not always. The conversion to second or index_on_signal is done outside this method. + + + Examples + -------- + # to look at block 1, segment 0, and channel 3 on a tetrode from 10 + # seconds to 30 seconds we would do: + >>> timestamps = rawio_reader.get_spike_timestamps(block_index=1, + seg_index=0, + spike_channel_index=3, + t_start=10, + t_stop=30) """ timestamp = self._get_spike_timestamps(block_index, seg_index, spike_channel_index, t_start, t_stop) return timestamp def rescale_spike_timestamp(self, spike_timestamps: np.ndarray, dtype: np.dtype = "float64"): """ - Rescale spike timestamps to seconds. + Rescale spike timestamps from samples to seconds. Parameters ---------- @@ -942,6 +966,13 @@ def rescale_spike_timestamp(self, spike_timestamps: np.ndarray, dtype: np.dtype ------- scaled_spike_timestamps: np.array The spiketimes in seconds + + Examples + -------- + # After running `get_spike_timestamps` and returning timestamps we can do the following: + >>> scaled_spike_timestamps = rawio_reader.rescale_spike_timestamps(spike_timestamps=timestamps, + dtype='float64') + """ return self._rescale_spike_timestamp(spike_timestamps, dtype) @@ -955,27 +986,27 @@ def get_spike_raw_waveforms( t_stop: float | None = None, ): """ - Gets the waveforms for a particular set of channels + Gets the waveforms for one channel within one segment of one block Parameters ---------- block_index: int, default: 0 - The block containing the section to get the waveforms + The block containing the desired set of waveform data seg_index: int, default: 0 - The segment containing the section to get the wavforms + The segment containing the desired set of waveform data spike_channel_index: int, default: 0 - The channels in which to count the spikes on + The channel index on which to get waveform data t_start: float | None, default: None The time in seconds for the start of the section to get waveforms - None indicates to start at the beginning of the section + None indicates to start at the beginning of the segment t_stop: float | None, default: None The time in seconds for the end of the section to waveforms - None indicates to end at the end of the section + None indicates to end at the end of the segment Returns ------- - wf: np.ndarray - #SAM what is the shape of this + wf: np.ndarray (nb_spike, nb_channel, nb_sample)) + A NumPy array of spikes, channels and samples """ wf = self._get_spike_raw_waveforms(block_index, seg_index, spike_channel_index, t_start, t_stop) return wf @@ -984,21 +1015,21 @@ def rescale_waveforms_to_float( self, raw_waveforms: np.ndarray, dtype: np.dtype = "float32", spike_channel_index: int = 0 ): """ - Rescale waveforms to seconds. + Rescale waveforms to based on the rawio's waveform gain and waveform offset Parameters ---------- raw_waveforms: np.ndarray The array containing the spike_timestamps to convert dtype: np.dtype, default: "float64" - The dtype in which to convert the spike time in seconds. Must be accepted by the numpy.dtype constructor + The dtype in which to convert the spike time to. Must be accepted by the numpy.dtype constructor spike_channel_index: int, default: 0 - The channels on which to index for scaling the waveforms + The channel index of the desired channel to rescale Returns ------- - float_waveforms: np.array - The scaled waveforms + float_waveforms: np.ndarray (nb_spikes, nb_channels, nb_samples) + The scaled waveforms to the dtype specified by dtype """ wf_gain = self.header["spike_channels"]["wf_gain"][spike_channel_index] wf_offset = self.header["spike_channels"]["wf_offset"][spike_channel_index] @@ -1015,7 +1046,7 @@ def rescale_waveforms_to_float( # event and epoch zone def event_count(self, block_index: int = 0, seg_index: int = 0, event_channel_index: int = 0): """ - Returns the count of evens for a particular block segment and channel index + Returns the count of events for a particular block, segment, and channel_index Parameters ---------- @@ -1054,19 +1085,19 @@ def get_event_timestamps( The index of the channel in which to count events t_start: float | None, default: None The time in seconds for the start of the section to get waveforms - None indicates to start at the beginning of the section + None indicates to start at the beginning of the segment t_stop: float | None, default: None The time in seconds for the end of the section to waveforms - None indicates to end at the end of the section + None indicates to end at the end of the segment Returns ------- timestamp: np.array - The timestamps of events - labels: np.array - The labels of the events + The timestamps of events (in samples) durations: np.array The durations of each event + labels: np.array + The labels of the events Notes ----- @@ -1074,6 +1105,17 @@ def get_event_timestamps( Sometimes it is the index on the signal but not always. The conversion to second or index_on_signal is done outside this method. + Examples + -------- + # A given rawio reader that generates events data. For this example we will + # look at Block 0, Segment 1, on Channel 1, with a start time at the beginning + # of the segment and an end time of 5 minutes (300 s) + >>> event_timestamps, durations, labels = rawio_reader.get_event_timestamps(block_index=0, + seg_index=1, + event_channel_index=1, + t_start=None, + t_stop=300) + """ timestamp, durations, labels = self._get_event_timestamps( block_index, seg_index, event_channel_index, t_start, t_stop @@ -1089,16 +1131,26 @@ def rescale_event_timestamp( Parameters ---------- event_timestamps: np.ndarray - The array containing the spike_timestamps to convert + The array containing the event timestamps to convert dtype: np.dtype, default: "float64" - The dtype in which to convert the spike time in seconds. Must be accepted by the numpy.dtype constructor + The dtype in which to convert the event time in seconds. Must be accepted by the numpy.dtype constructor event_channel_index: int, default: 0 - The channels on which to index for scaling the events + The channel index for scaling the events Returns ------- scaled_event_timestamps: np.array - The scaled event_timestamps + The scaled event timestamps in seconds + + Examples + -------- + # Using the event_timestamps from the `get_event_timestamps` function we can then scale from samples into + # seconds using this `rescale_event_timestamp`. We use the same event_channel_index as used during the + # `get_event_timestamps` + >>> event_timestamps_seconds = rawio_reader.rescale_event_timestamp(event_timestamps=event_timestamps, + dtype='float64', + event_channel_index=1) + """ return self._rescale_event_timestamp(event_timestamps, dtype, event_channel_index) @@ -1106,21 +1158,28 @@ def rescale_epoch_duration( self, raw_duration: np.ndarray, dtype: np.dtype = "float64", event_channel_index: int = 0 ): """ - Rescale epoch raw duration to seconds. + Rescales the epoch duration from samples to seconds Parameters ---------- raw_duration: np.ndarray - The array containing the spike_timestamps to convert + The array containing the epoch times in samples dtype: np.dtype, default: "float64" The dtype in which to convert the spike time in seconds. Must be accepted by the numpy.dtype constructor event_channel_index: int, default: 0 - The channels on which to index for scaling epochs + The channel on which to index for scaling epochs Returns ------- scaled_epoch_durations: np.array - The scaled epoch durations + The scaled epoch durations in seconds + + Examples + -------- + # In this example we use the durations obtained from running `get_event_timestamps` + >>> duration_seconds = rawio_reader.rescale_epoch_duration(raw_durations=durations, + dtype='float64', + event_channel_index=0) """ return self._rescale_epoch_duration(raw_duration, dtype, event_channel_index) diff --git a/neo/rawio/bci2000rawio.py b/neo/rawio/bci2000rawio.py index 7bc21fb4b..593a71216 100644 --- a/neo/rawio/bci2000rawio.py +++ b/neo/rawio/bci2000rawio.py @@ -24,6 +24,11 @@ class BCI2000RawIO(BaseRawIO): """ Class for reading data from a BCI2000 .dat file, either version 1.0 or 1.1 + + Parameters + ---------- + filename: str, default: '' + The *.dat recorded by BCI2000 """ extensions = ["dat"] diff --git a/neo/rawio/biocamrawio.py b/neo/rawio/biocamrawio.py index 3af76e41f..a9008f444 100644 --- a/neo/rawio/biocamrawio.py +++ b/neo/rawio/biocamrawio.py @@ -23,15 +23,24 @@ class BiocamRawIO(BaseRawIO): """ Class for reading data from a Biocam h5 file. - Usage: + Parameters + ---------- + filename: str, default: '' + The *.h5 file to be read + + Examples + -------- >>> import neo.rawio >>> r = neo.rawio.BiocamRawIO(filename='biocam.h5') >>> r.parse_header() >>> print(r) - >>> raw_chunk = r.get_analogsignal_chunk(block_index=0, seg_index=0, - i_start=0, i_stop=1024, + >>> raw_chunk = r.get_analogsignal_chunk(block_index=0, + seg_index=0, + i_start=0, + i_stop=1024, channel_names=channel_names) - >>> float_chunk = r.rescale_signal_raw_to_float(raw_chunk, dtype='float64', + >>> float_chunk = r.rescale_signal_raw_to_float(raw_chunk, + dtype='float64', channel_indexes=[0, 3, 6]) """ diff --git a/neo/rawio/blackrockrawio.py b/neo/rawio/blackrockrawio.py index 49e28b9fb..de85c5ef7 100644 --- a/neo/rawio/blackrockrawio.py +++ b/neo/rawio/blackrockrawio.py @@ -76,57 +76,48 @@ class BlackrockRawIO(BaseRawIO): """ - Class for reading data in from a file set recorded by the Blackrock - (Cerebus) recording system. - - Upon initialization, the class is linked to the available set of Blackrock - files. - - Note: This routine will handle files according to specification 2.1, 2.2, + Class for reading data in from a file set recorded by the Blackrock (Cerebus) recording system. + Upon initialization, the class is linked to the available set of Blackrock files. + + Parameters + ---------- + filename: str, default: '' + File name (without extension) of the set of Blackrock files to associate with. + Any .nsX or .nev, .sif, or .ccf extensions are ignored when parsing this parameter. + nsx_override: str | None, default: None + File name of the .nsX files (without extension). If None, filename is used. + nev_override str | None, default: None + File name of the .nev file (without extension). If None, filename is used. + nsx_to_load int | list | 'max' | 'all' | None, default None: + IDs of nsX file from which to load data, e.g., if set to 5 only data from the ns5 file are loaded. + If 'all', then all nsX will be loaded. Contrary to previous version of the IO (<0.7), nsx_to_load + must be set at the init before parse_header(). + load_nev: bool, default: True + Load (or not) events/spikes by ignoring or not the nev file. + + Notes + ----- + * Note: This routine will handle files according to specification 2.1, 2.2, and 2.3. Recording pauses that may occur in file specifications 2.2 and 2.3 are automatically extracted and the data set is split into different segments. - The Blackrock data format consists not of a single file, but a set of + * The Blackrock data format consists not of a single file, but a set of different files. This constructor associates itself with a set of files that constitute a common data set. By default, all files belonging to the file set have the same base name, but different extensions. However, by using the override parameters, individual filenames can be set. - - Args: - filename (string): - File name (without extension) of the set of Blackrock files to - associate with. Any .nsX or .nev, .sif, or .ccf extensions are - ignored when parsing this parameter. - nsx_override (string): - File name of the .nsX files (without extension). If None, - filename is used. - Default: None. - nev_override (string): - File name of the .nev file (without extension). If None, - filename is used. - Default: None. - nsx_to_load (int, list, 'max', 'all' (=None)) default None: - IDs of nsX file from which to load data, e.g., if set to - 5 only data from the ns5 file are loaded. - If 'all', then all nsX will be loaded. - Contrary to previous version of the IO (<0.7), nsx_to_load - must be set at the init before parse_header(). - load_nev (bool): - Load (or not) events/spikes by ignoring or not the nev file. - Default: True - - Examples: - >>> reader = BlackrockRawIO(filename='FileSpec2.3001', nsx_to_load=5) - >>> reader.parse_header() - - Inspect a set of file consisting of files FileSpec2.3001.ns5 and - FileSpec2.3001.nev - - >>> print(reader) - - Display all informations about signal channels, units, segment size.... + + Examples + -------- + >>> import neo.rawio + >>> # Inspect a set of file consisting of files FileSpec2.3001.ns5 and FileSpec2.3001.nev + >>> reader = neo.rawio.BlackrockRawIO(filename='FileSpec2.3001', nsx_to_load=5) + >>> reader.parse_header() + >>> print(reader) + + """ extensions = ["ns" + str(_) for _ in range(1, 7)] diff --git a/neo/rawio/brainvisionrawio.py b/neo/rawio/brainvisionrawio.py index c836b8ecc..c5232a7ce 100644 --- a/neo/rawio/brainvisionrawio.py +++ b/neo/rawio/brainvisionrawio.py @@ -23,7 +23,18 @@ class BrainVisionRawIO(BaseRawIO): - """ """ + """ Class for reading BrainVision files + + Parameters + ---------- + filename: str, default: '' + The *.vhdr file to load + + Examples + -------- + >>> import neo.rawio + >>> reader = neo.rawio.BrainVisionRawIO(filename=data_filename) + """ extensions = ["vhdr"] rawmode = "one-file" diff --git a/neo/rawio/cedrawio.py b/neo/rawio/cedrawio.py index 22bbf0e10..a7f01c4a1 100644 --- a/neo/rawio/cedrawio.py +++ b/neo/rawio/cedrawio.py @@ -35,9 +35,20 @@ class CedRawIO(BaseRawIO): """ Class for reading data from CED (Cambridge Electronic Design) spike2. - This internally uses the sonpy package which is closed source. + + Parameters + ---------- + filename: str, default: '' + The *.smr or *.smrx file to load + take_ideal_sampling_rate: bool, default: False + If true use the `GetIdealRate` function from sonpy package + + Notes + ----- + * This internally uses the sonpy package which is closed source. + + * This IO reads smr and smrx files - This IO reads smr and smrx files """ extensions = ["smr", "smrx"] diff --git a/neo/rawio/edfrawio.py b/neo/rawio/edfrawio.py index 324e83eeb..dee43d583 100644 --- a/neo/rawio/edfrawio.py +++ b/neo/rawio/edfrawio.py @@ -33,15 +33,26 @@ class EDFRawIO(BaseRawIO): Class for reading European Data Format files (EDF and EDF+). Currently only continuous EDF+ files (EDF+C) and original EDF files (EDF) are supported - Usage: - >>> import neo.rawio - >>> r = neo.rawio.EDFRawIO(filename='file.edf') - >>> r.parse_header() - >>> print(r) - >>> raw_chunk = r.get_analogsignal_chunk(block_index=0, seg_index=0, - i_start=0, i_stop=1024, stream_index=0, channel_indexes=range(10)) - >>> float_chunk = reader.rescale_signal_raw_to_float(raw_chunk, dtype='float64', - channel_indexes=[0, 3, 6]) + Parameters + ---------- + filename: str, default: '' + The EDF+C file or EDF file to be loaded + + Examples + -------- + >>> import neo.rawio + >>> r = neo.rawio.EDFRawIO(filename='file.edf') + >>> r.parse_header() + >>> print(r) + >>> raw_chunk = r.get_analogsignal_chunk(block_index=0, + seg_index=0, + i_start=0, + i_stop=1024, + stream_index=0, + channel_indexes=range(10)) + >>> float_chunk = reader.rescale_signal_raw_to_float(raw_chunk, + dtype='float64', + channel_indexes=[0, 3, 6]) """ extensions = ["edf"] diff --git a/neo/rawio/elanrawio.py b/neo/rawio/elanrawio.py index 295ced04c..43d38ed81 100644 --- a/neo/rawio/elanrawio.py +++ b/neo/rawio/elanrawio.py @@ -9,7 +9,7 @@ An Elan dataset is separated into 3 files : - .eeg raw data file - - .eeg.ent hearder file + - .eeg.ent header file - .eeg.pos event file Author: Samuel Garcia @@ -32,6 +32,26 @@ class ElanRawIO(BaseRawIO): + """ + Class for reading time-frequency EEG data maps from the Elan software + + Parameters + ---------- + filename: str | None, default: None + The raw data to load (*.eeg) + entfile: str | None, default: None + The header file to load (*.eeg.ent) + posfile: str | None, deafult: None + The event file to load (*.eeg.pos) + + Examples + -------- + >>> import neo.rawio + >>> reader = neo.rawio.ElanRawIO(filename='data.eeg', entfile='header.eeg.ent', posfile='events.eeg.pos') + >>> reader.parse_header() + >>> print(reader) + """ + extensions = ["eeg"] rawmode = "one-file" diff --git a/neo/rawio/intanrawio.py b/neo/rawio/intanrawio.py index 5c60bd33e..87138f3fb 100644 --- a/neo/rawio/intanrawio.py +++ b/neo/rawio/intanrawio.py @@ -33,24 +33,43 @@ class IntanRawIO(BaseRawIO): """ - Intan reader can handle two file formats 'rhd' and 'rhs'. It will automatically + Class for reading rhd and rhs Intan data + + Parameters + ---------- + filename: str, default: '' + name of the 'rhd' or 'rhs' data file + + Notes + ----- + * Intan reader can handle two file formats 'rhd' and 'rhs'. It will automatically check for the file extension and will gather the header information based on the extension. Additionally it functions with RHS v 1.0 and RHD 1.0, 1.1, 1.2, 1.3, 2.0, 3.0, and 3.1 files. - Intan files contain amplifier channels labeled 'A', 'B' 'C' or 'D' + + * Intan files contain amplifier channels labeled 'A', 'B' 'C' or 'D' depending on the port in which they were recorded along with the following additional channels. + 0: 'RHD2000' amplifier channel 1: 'RHD2000 auxiliary input channel', 2: 'RHD2000 supply voltage channel', 3: 'USB board ADC input channel', 4: 'USB board digital input channel', 5: 'USB board digital output channel' - Due to the structure of the digital input and output channels these can be accessed + + * Due to the structure of the digital input and output channels these can be accessed as one long vector, which must be post-processed. - Parameters - ---------- - filename: str - name of the 'rhd' or 'rhs' data file + + Examples + -------- + >>> import neo.rawio + >>> reader = neo.rawio.IntanRawIO(filename='data.rhd') + >>> reader.parse_header() + >>> raw_chunk = reader.get_analogsignal_chunk(block_index=0, + seg_index=0 + stream_index=0) + >>> float_chunk = reader.rescale_signal_raw_to_float(raw_chunk, stream_index=0) + """ extensions = ["rhd", "rhs"] diff --git a/neo/rawio/maxwellrawio.py b/neo/rawio/maxwellrawio.py index c8b4a69bb..b4898bbda 100644 --- a/neo/rawio/maxwellrawio.py +++ b/neo/rawio/maxwellrawio.py @@ -11,7 +11,7 @@ * the implementation in spyking-circus https://github.com/spyking-circus/spyking-circus/blob/master/circus/files/maxwell.py -The implementation do not handle spike at the moment. +This implementation does not handle spikes at the moment. For maxtwo device, each well will be a different signal stream. @@ -37,6 +37,15 @@ class MaxwellRawIO(BaseRawIO): """ Class for reading MaxOne or MaxTwo files. + + Parameters + ---------- + + filename: str, default: '' + The *.h5 file to be loaded + rec_name: str | None, default: None + If multiple recordings the one to analyze + """ extensions = ["h5"] diff --git a/neo/rawio/mearecrawio.py b/neo/rawio/mearecrawio.py index 6139f382c..90ce280c1 100644 --- a/neo/rawio/mearecrawio.py +++ b/neo/rawio/mearecrawio.py @@ -23,31 +23,34 @@ class MEArecRawIO(BaseRawIO): """ - Class for "reading" fake data from a MEArec file. - - This class provides a convenient way to read data from a MEArec file. + Class for "reading" simulated data from a MEArec file. Parameters ---------- - filename : str + filename : str, default: '' The filename of the MEArec file to read. - load_spiketrains : bool, optional - Whether or not to load spike train data. Defaults to `True`. - load_analogsignal : bool, optional - Whether or not to load continuous recording data. Defaults to `True`. - - - Usage: - >>> import neo.rawio - >>> r = neo.rawio.MEArecRawIO(filename='mearec.h5') - >>> r.parse_header() - >>> print(r) - >>> raw_chunk = r.get_analogsignal_chunk(block_index=0, seg_index=0, - i_start=0, i_stop=1024, channel_names=channel_names) - >>> float_chunk = reader.rescale_signal_raw_to_float(raw_chunk, dtype='float64', - channel_indexes=[0, 3, 6]) - >>> spike_timestamp = reader.spike_timestamps(unit_index=0, t_start=None, t_stop=None) - >>> spike_times = reader.rescale_spike_timestamp(spike_timestamp, 'float64') + load_spiketrains : bool, default: True + Whether or not to load spike train data. + load_analogsignal : bool, default: True + Whether or not to load continuous recording data. + + + Examples + -------- + >>> import neo.rawio + >>> r = neo.rawio.MEArecRawIO(filename='mearec.h5', load_spiketrains=True) + >>> r.parse_header() + >>> print(r) + >>> raw_chunk = r.get_analogsignal_chunk(block_index=0, + seg_index=0, + i_start=0, + i_stop=1024, + channel_names=channel_names) + >>> float_chunk = reader.rescale_signal_raw_to_float(raw_chunk, + dtype='float64', + channel_indexes=[0, 3, 6]) + >>> spike_timestamp = reader.spike_timestamps(unit_index=0, t_start=None, t_stop=None) + >>> spike_times = reader.rescale_spike_timestamp(spike_timestamp, 'float64') """ diff --git a/neo/rawio/medrawio.py b/neo/rawio/medrawio.py index 9e99c07f7..9b141ec15 100644 --- a/neo/rawio/medrawio.py +++ b/neo/rawio/medrawio.py @@ -24,18 +24,27 @@ class MedRawIO(BaseRawIO): Uses the dhn-med-py MED python package (version >= 1.0.0), created by Dark Horse Neuro, Inc. and medformat.org. + Parameters + ---------- + dirname: str | Path | None, default: None + The folder containing the data files to load + password: str | None, default: None + The password for the Med session + keep_original_times: bool, default: False + If True UTC timestamps are used and returned as seconds referenced + to midnight 1 Jan 1970 + If False timestamps are referenced to the beginning of the session + with the beginning being 0 + + Notes + ----- Currently reads the entire MED session. Every discontinuity is considered to be a new segment. Channels are grouped by sampling frequency, to create streams. In MED all channels will line up time-wise, so streams will span the entire recording, and continuous sections of those streams are divided up into segments. - Timestamps generated are referenced to the beginning of the session, - with the beginning of the session being timestamp zero. If UTC timestamps - are desired, then the keep_original_times flag in the constructor can be - set to True (it defaults to False) and the timestamps used in the object - will be seconds, reference to midnight 1 Jan 1970 (assuming that that - data is available in the MED data session). + """ extensions = ["medd", "rdat", "ridx"] diff --git a/neo/rawio/micromedrawio.py b/neo/rawio/micromedrawio.py index 95747cdc6..ffcbdfb87 100644 --- a/neo/rawio/micromedrawio.py +++ b/neo/rawio/micromedrawio.py @@ -33,6 +33,11 @@ def read_f(self, fmt, offset=None): class MicromedRawIO(BaseRawIO): """ Class for reading data from micromed (.trc). + + Parameters + ---------- + filename: str, default: None + The *.trc file to be loaded """ extensions = ["trc", "TRC"] diff --git a/neo/rawio/neuroscoperawio.py b/neo/rawio/neuroscoperawio.py index 3087d1bb5..71545001c 100644 --- a/neo/rawio/neuroscoperawio.py +++ b/neo/rawio/neuroscoperawio.py @@ -41,13 +41,15 @@ def __init__(self, filename, binary_file=None): ---------- filename : str, Path Usually the path of an xml file - binary_file : str or Path optional + binary_file : str | Path | None, default: None The binary data file Supported formats: ['.dat', '.lfp', '.eeg'] Neuroscope format is composed of two files: a xml file with metadata and a binary file in either .dat, .lfp or .eeg format. + Notes + ----- For backwards compatibility, we offer three ways of initializing the reader. Cases: diff --git a/neo/rawio/nixrawio.py b/neo/rawio/nixrawio.py index 1ceaeb9c2..ea863dcef 100644 --- a/neo/rawio/nixrawio.py +++ b/neo/rawio/nixrawio.py @@ -33,6 +33,16 @@ class NIXRawIO(BaseRawIO): + """Class for reading NIX files + + Parameters + ---------- + filename: str, default: '' + The nix file to be load + + Notes + ----- + Requires the nixio package to be installed""" extensions = ["nix", "h5"] rawmode = "one-file" diff --git a/neo/rawio/openephysrawio.py b/neo/rawio/openephysrawio.py index 64de9bb13..eec7ce475 100644 --- a/neo/rawio/openephysrawio.py +++ b/neo/rawio/openephysrawio.py @@ -28,6 +28,20 @@ class OpenEphysRawIO(BaseRawIO): """ + Class for reading openephys data + + Parameters + ---------- + dirname: str + The directory where the files are stored. + ignore_timestamps_errors: bool + (deprecated) This parameter is not used anymore. + fill_gap_value: int + When gaps are detected in continuous files, the gap is filled with this value. + Default is 0. + + Notes + ----- OpenEphys GUI software offers several data formats, see https://open-ephys.atlassian.net/wiki/spaces/OEW/pages/491632/Data+format @@ -62,16 +76,6 @@ class OpenEphysRawIO(BaseRawIO): Theses gaps are checked channel per channel which make the parse_header() slow. If gaps are detected then they are filled with zeros but the the reading will be much slower for getting signals. - Parameters - ---------- - dirname: str - The directory where the files are stored. - ignore_timestamps_errors: bool - (deprecated) This parameter is not used anymore. - fill_gap_value: int - When gaps are detected in continuous files, the gap is filled with this value. - Default is 0. - """ # file formats used by openephys diff --git a/neo/rawio/rawbinarysignalrawio.py b/neo/rawio/rawbinarysignalrawio.py index 5e3870873..f6e8ac4ce 100644 --- a/neo/rawio/rawbinarysignalrawio.py +++ b/neo/rawio/rawbinarysignalrawio.py @@ -32,6 +32,26 @@ class RawBinarySignalIO class RawBinarySignalRawIO(BaseRawIO): + """ + Class for reading raw binary files with user specified values + Parameters + ---------- + filename: str, default: '' + The *.raw or *.bin binary file to load + dtype: np.dtype, default: 'int16' + The dtype that the data is stored with. Must be acceptable by the numpy.dtype constructor + sampling_rate: float, default: 10000.0 + The sampling rate of the file + nb_channel: int, default: 2 + The number of channels for the file + signal_gain: float, default: 1.0 + The gain for the signal in the binary file + signal_offset: float, default: 0.0 + The offset for the signal in the binary file + bytesoffset: int: 0 + The offset for the bytes + """ + extensions = ["raw", "bin"] rawmode = "one-file" diff --git a/neo/rawio/rawmcsrawio.py b/neo/rawio/rawmcsrawio.py index 4541b758e..845f7e41f 100644 --- a/neo/rawio/rawmcsrawio.py +++ b/neo/rawio/rawmcsrawio.py @@ -29,6 +29,15 @@ class RawMCSRawIO(BaseRawIO): + """ + Class for reading an mcs file converted by the MC_DataToo binary converter + + Parameters + ---------- + filename: str, default: '' + The *.raw MCS file to be loaded + + """ extensions = ["raw"] rawmode = "one-file" diff --git a/neo/rawio/spike2rawio.py b/neo/rawio/spike2rawio.py index 870f63e3a..d5a1fe9ef 100644 --- a/neo/rawio/spike2rawio.py +++ b/neo/rawio/spike2rawio.py @@ -32,8 +32,20 @@ class Spike2RawIO(BaseRawIO): """ - This implementation in neo read only old smr files. + This implementation in neo reads only old smr files. For smrx files you need to use CedRawIO which is based on sonpy. + + Parameters + ---------- + filename: str, default: '' + The *.smr file to be loaded + take_ideal_sampling_rate: bool, default: False + If True takes the `ideal_rate` from info + ced_units: bool, default: True + If True uses the ced unit ids + try_signal_grouping: bool, default: True + If True will attempt to group signals together + """ extensions = ["smr"]