From afb723b638365b719dca4b6f2d0adb2c083435c8 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Thu, 26 Oct 2023 17:45:48 +0200 Subject: [PATCH] Unify 'defaults' --- .../comparison/multicomparisons.py | 36 +++---- .../comparison/paircomparisons.py | 94 +++++++++--------- src/spikeinterface/core/base.py | 43 ++++---- src/spikeinterface/core/baseevent.py | 32 +++--- src/spikeinterface/core/baserecording.py | 64 ++++++------ .../core/baserecordingsnippets.py | 40 ++++---- src/spikeinterface/core/basesnippets.py | 28 +++--- src/spikeinterface/core/basesorting.py | 26 +++-- .../core/binaryrecordingextractor.py | 16 +-- src/spikeinterface/core/channelslice.py | 19 ++-- src/spikeinterface/core/core_tools.py | 93 +++++++++-------- src/spikeinterface/core/datasets.py | 18 ++-- src/spikeinterface/core/frameslicesorting.py | 8 +- src/spikeinterface/core/generate.py | 21 ++-- src/spikeinterface/core/job_tools.py | 28 +++--- src/spikeinterface/core/node_pipeline.py | 65 ++++++------ .../core/npysnippetsextractor.py | 18 ++-- src/spikeinterface/core/numpyextractors.py | 20 ++-- src/spikeinterface/core/recording_tools.py | 19 ++-- src/spikeinterface/core/segmentutils.py | 30 +++--- src/spikeinterface/core/waveform_extractor.py | 99 +++++++++---------- src/spikeinterface/core/waveform_tools.py | 40 ++++---- .../core/zarrrecordingextractor.py | 2 +- src/spikeinterface/curation/auto_merge.py | 79 ++++++++------- .../curation/remove_redundant.py | 30 +++--- .../curation/sortingview_curation.py | 14 +-- src/spikeinterface/exporters/report.py | 16 +-- src/spikeinterface/exporters/to_phy.py | 24 ++--- .../cellexplorersortingextractor.py | 4 +- .../extractors/iblstreamingrecording.py | 2 +- .../extractors/klustaextractors.py | 2 +- .../extractors/mdaextractors.py | 6 +- .../extractors/neoextractors/alphaomega.py | 6 +- .../extractors/neoextractors/biocam.py | 10 +- .../extractors/neoextractors/blackrock.py | 14 +-- .../extractors/neoextractors/ced.py | 6 +- .../extractors/neoextractors/edf.py | 4 +- .../extractors/neoextractors/intan.py | 4 +- .../extractors/neoextractors/maxwell.py | 6 +- .../extractors/neoextractors/mcsraw.py | 6 +- .../neoextractors/neobaseextractor.py | 14 +-- .../extractors/neoextractors/neuralynx.py | 8 +- .../extractors/neoextractors/neuroexplorer.py | 4 +- .../extractors/neoextractors/neuroscope.py | 8 +- .../extractors/neoextractors/nix.py | 6 +- .../extractors/neoextractors/openephys.py | 26 ++--- .../extractors/neoextractors/plexon.py | 4 +- .../extractors/neoextractors/plexon2.py | 4 +- .../extractors/neoextractors/spike2.py | 4 +- .../extractors/neoextractors/spikegadgets.py | 4 +- .../extractors/neoextractors/spikeglx.py | 4 +- .../extractors/neoextractors/tdt.py | 4 +- .../extractors/nwbextractors.py | 22 ++--- src/spikeinterface/extractors/toy_example.py | 34 +++---- .../extractors/tridesclousextractors.py | 2 +- .../extractors/waveclussnippetstextractors.py | 14 ++- .../postprocessing/amplitude_scalings.py | 15 +-- .../postprocessing/correlograms.py | 16 +-- src/spikeinterface/postprocessing/isi.py | 16 +-- .../postprocessing/noise_level.py | 12 +-- .../postprocessing/principal_component.py | 4 +- .../postprocessing/spike_amplitudes.py | 4 +- .../postprocessing/spike_locations.py | 32 +++--- .../postprocessing/template_metrics.py | 6 +- .../postprocessing/template_similarity.py | 2 +- .../postprocessing/unit_localization.py | 50 +++++----- .../preprocessing/average_across_direction.py | 8 +- src/spikeinterface/preprocessing/clip.py | 18 ++-- .../preprocessing/correct_lsb.py | 16 +-- .../preprocessing/depth_order.py | 4 +- .../preprocessing/detect_bad_channels.py | 84 ++++++++-------- .../preprocessing/directional_derivative.py | 12 +-- src/spikeinterface/preprocessing/filter.py | 16 +-- .../preprocessing/highpass_spatial_filter.py | 28 +++--- .../preprocessing/interpolate_bad_channels.py | 12 +-- .../preprocessing/normalize_scale.py | 28 +++--- .../preprocessing/phase_shift.py | 11 ++- .../preprocessing/preprocessing_tools.py | 12 +-- .../preprocessing/remove_artifacts.py | 20 ++-- src/spikeinterface/preprocessing/resample.py | 8 +- .../preprocessing/silence_periods.py | 4 +- .../preprocessing/tests/test_resample.py | 16 +-- src/spikeinterface/preprocessing/whiten.py | 6 +- .../preprocessing/zero_channel_pad.py | 5 +- .../qualitymetrics/misc_metrics.py | 72 +++++++------- .../qualitymetrics/pca_metrics.py | 14 +-- .../quality_metric_calculator.py | 4 +- src/spikeinterface/sorters/runsorter.py | 18 ++-- .../sortingcomponents/peak_detection.py | 4 +- src/spikeinterface/widgets/amplitudes.py | 29 +++--- .../widgets/crosscorrelograms.py | 20 ++-- src/spikeinterface/widgets/gtstudy.py | 2 +- src/spikeinterface/widgets/metrics.py | 14 +-- src/spikeinterface/widgets/motion.py | 36 +++---- src/spikeinterface/widgets/multicomparison.py | 25 +++-- src/spikeinterface/widgets/peak_activity.py | 10 +- src/spikeinterface/widgets/quality_metrics.py | 20 ++-- src/spikeinterface/widgets/sorting_summary.py | 26 +++-- src/spikeinterface/widgets/spike_locations.py | 29 +++--- .../widgets/spikes_on_traces.py | 60 +++++------ .../widgets/template_metrics.py | 20 ++-- .../widgets/template_similarity.py | 18 ++-- src/spikeinterface/widgets/traces.py | 55 +++++------ src/spikeinterface/widgets/unit_depths.py | 12 +-- src/spikeinterface/widgets/unit_locations.py | 28 +++--- src/spikeinterface/widgets/unit_presence.py | 10 +- src/spikeinterface/widgets/unit_probe_map.py | 6 +- src/spikeinterface/widgets/unit_summary.py | 8 +- src/spikeinterface/widgets/unit_waveforms.py | 72 +++++++------- .../widgets/unit_waveforms_density_map.py | 28 +++--- src/spikeinterface/widgets/utils.py | 12 +-- 111 files changed, 1182 insertions(+), 1189 deletions(-) diff --git a/src/spikeinterface/comparison/multicomparisons.py b/src/spikeinterface/comparison/multicomparisons.py index f44e14c4c4..2ff98f58be 100644 --- a/src/spikeinterface/comparison/multicomparisons.py +++ b/src/spikeinterface/comparison/multicomparisons.py @@ -25,22 +25,22 @@ class MultiSortingComparison(BaseMultiComparison, MixinSpikeTrainComparison): ---------- sorting_list: list List of sorting extractor objects to be compared - name_list: list + name_list: list, default: None List of spike sorter names. If not given, sorters are named as 'sorter0', 'sorter1', 'sorter2', etc. - delta_time: float - Number of ms to consider coincident spikes (default 0.4 ms) - match_score: float - Minimum agreement score to match units (default 0.5) - chance_score: float - Minimum agreement score to for a possible match (default 0.1) - n_jobs: int + delta_time: float, default: 0.4 + Number of ms to consider coincident spikes + match_score: float, default: 0.5 + Minimum agreement score to match units + chance_score: float, default: 0.1 + Minimum agreement score to for a possible match + n_jobs: int, default: -1 Number of cores to use in parallel. Uses all available if -1 - spiketrain_mode: str + spiketrain_mode: "union" | "intersection", default: "union" Mode to extract agreement spike trains: - - 'union': spike trains are the union between the spike trains of the best matching two sorters - - 'intersection': spike trains are the intersection between the spike trains of the + - "union": spike trains are the union between the spike trains of the best matching two sorters + - "intersection": spike trains are the intersection between the spike trains of the best matching two sorters - verbose: bool + verbose: bool, default: False if True, output is verbose Returns @@ -309,13 +309,13 @@ class MultiTemplateComparison(BaseMultiComparison, MixinTemplateComparison): ---------- waveform_list: list List of waveform extractor objects to be compared - name_list: list + name_list: list, default: None List of session names. If not given, sorters are named as 'sess0', 'sess1', 'sess2', etc. - match_score: float - Minimum agreement score to match units (default 0.5) - chance_score: float - Minimum agreement score to for a possible match (default 0.1) - verbose: bool + match_score: float, default: 0.8 + Minimum agreement score to match units + chance_score: float, default: 0.3 + Minimum agreement score to for a possible match + verbose: bool, default: False if True, output is verbose Returns diff --git a/src/spikeinterface/comparison/paircomparisons.py b/src/spikeinterface/comparison/paircomparisons.py index 75976ed44f..42ab48be8e 100644 --- a/src/spikeinterface/comparison/paircomparisons.py +++ b/src/spikeinterface/comparison/paircomparisons.py @@ -111,19 +111,19 @@ class SymmetricSortingComparison(BasePairSorterComparison): The first sorting for the comparison sorting2: SortingExtractor The second sorting for the comparison - sorting1_name: str + sorting1_name: str, default: None The name of sorter 1 - sorting2_name: : str + sorting2_name: : str, default: None The name of sorter 2 - delta_time: float - Number of ms to consider coincident spikes (default 0.4 ms) - match_score: float - Minimum agreement score to match units (default 0.5) - chance_score: float - Minimum agreement score to for a possible match (default 0.1) - n_jobs: int + delta_time: float, default: 0.4 + Number of ms to consider coincident spikes + match_score: float, default: 0.5 + Minimum agreement score to match units + chance_score: float, default: 0.1 + Minimum agreement score to for a possible match + n_jobs: int, default: -1 Number of cores to use in parallel. Uses all available if -1 - verbose: bool + verbose: bool, default: False If True, output is verbose Returns @@ -139,7 +139,6 @@ def __init__( sorting1_name=None, sorting2_name=None, delta_time=0.4, - sampling_frequency=None, match_score=0.5, chance_score=0.1, n_jobs=-1, @@ -214,34 +213,35 @@ class GroundTruthComparison(BasePairSorterComparison): The first sorting for the comparison tested_sorting: SortingExtractor The second sorting for the comparison - gt_name: str + gt_name: str, default: None The name of sorter 1 - tested_name: : str + tested_name: : str, default: None The name of sorter 2 - delta_time: float - Number of ms to consider coincident spikes (default 0.4 ms) match_score: float - Minimum agreement score to match units (default 0.5) - chance_score: float - Minimum agreement score to for a possible match (default 0.1) - redundant_score: float - Agreement score above which units are redundant (default 0.2) - overmerged_score: float - Agreement score above which units can be overmerged (default 0.2) - well_detected_score: float - Agreement score above which units are well detected (default 0.8) - exhaustive_gt: bool (default True) + delta_time: float, default: 0.4 + Number of ms to consider coincident spikes + match_score: float, default: 0.5 + Minimum agreement score to match units + chance_score: float, default: 0.1 + Minimum agreement score to for a possible match + redundant_score: float, default: 0.2 + Agreement score above which units are redundant + overmerged_score: float, default: 0.2 + Agreement score above which units can be overmerged + well_detected_score: float, default: 0.8 + Agreement score above which units are well detected + exhaustive_gt: bool, default: False Tell if the ground true is "exhaustive" or not. In other world if the GT have all possible units. It allows more performance measurement. For instance, MEArec simulated dataset have exhaustive_gt=True - match_mode: 'hungarian', or 'best' - What is match used for counting : 'hungarian' or 'best match'. - n_jobs: int + match_mode: "hungarian" | "best", default: 'hungarian + The method to match units + n_jobs: int, default: -1 Number of cores to use in parallel. Uses all available if -1 - compute_labels: bool - If True, labels are computed at instantiation (default False) - compute_misclassifications: bool - If True, misclassifications are computed at instantiation (default False) - verbose: bool + compute_labels: bool, default: False + If True, labels are computed at instantiation + compute_misclassifications: bool, default: False + If True, misclassifications are computed at instantiation + verbose: bool, default: False If True, output is verbose Returns @@ -471,7 +471,7 @@ def get_well_detected_units(self, well_detected_score=None): Parameters ---------- - well_detected_score: float (default 0.8) + well_detected_score: float, default: None The agreement score above which tested units are counted as "well detected". """ @@ -507,7 +507,7 @@ def get_false_positive_units(self, redundant_score=None): Parameters ---------- - redundant_score: float (default 0.2) + redundant_score: float, default: None The agreement score below which tested units are counted as "false positive"" (and not "redundant"). """ @@ -547,7 +547,7 @@ def get_redundant_units(self, redundant_score=None): Parameters ---------- - redundant_score=None: float (default 0.2) + redundant_score=None: float, default: None The agreement score above which tested units are counted as "redundant" (and not "false positive" ). """ @@ -582,7 +582,7 @@ def get_overmerged_units(self, overmerged_score=None): Parameters ---------- - overmerged_score: float (default 0.4) + overmerged_score: float, default: None Tested units with 2 or more agreement scores above 'overmerged_score' are counted as "overmerged". """ @@ -693,16 +693,16 @@ class TemplateComparison(BasePairComparison, MixinTemplateComparison): The first waveform extractor to get templates to compare we2 : WaveformExtractor The second waveform extractor to get templates to compare - unit_ids1 : list, optional - List of units from we1 to compare, by default None - unit_ids2 : list, optional - List of units from we2 to compare, by default None - similarity_method : str, optional - Method for the similaroty matrix, by default "cosine_similarity" - sparsity_dict : dict, optional - Dictionary for sparsity, by default None - verbose : bool, optional - If True, output is verbose, by default False + unit_ids1 : list, default: None + List of units from we1 to compare + unit_ids2 : list, default: None + List of units from we2 to compare + similarity_method : str, default: "cosine_similarity" + Method for the similaroty matrix + sparsity_dict : dict, default: None + Dictionary for sparsity + verbose : bool, default: False + If True, output is verbose Returns ------- diff --git a/src/spikeinterface/core/base.py b/src/spikeinterface/core/base.py index ad31b97d8e..4295a1f4f7 100644 --- a/src/spikeinterface/core/base.py +++ b/src/spikeinterface/core/base.py @@ -171,11 +171,11 @@ def set_property(self, key, values: Sequence, ids: Optional[Sequence] = None, mi The property name values : np.array Array of values for the property - ids : list/np.array, optional - List of subset of ids to set the values, by default None - missing_value : object, optional + ids : list/np.array, default: None + List of subset of ids to set the values, default: None + missing_value : object, default: None In case the property is set on a subset of values ('ids' not None), - it specifies the how the missing values should be filled, by default None. + it specifies the how the missing values should be filled. The missing_value has to be specified for types int and unsigned int. """ @@ -320,18 +320,18 @@ def to_dict( Parameters ---------- - include_annotations: bool - If True, all annotations are added to the dict, by default False - include_properties: bool - If True, all properties are added to the dict, by default False - relative_to: str, Path, or None - If not None, files and folders are serialized relative to this path, by default None + include_annotations: bool, default: False + If True, all annotations are added to the dict + include_properties: bool, default: False + If True, all properties are added to the dict + relative_to: str, Path, or None, default: None + If not None, files and folders are serialized relative to this path Used in waveform extractor to maintain relative paths to binary files even if the containing folder / diretory is moved folder_metadata: str, Path, or None Folder with numpy `npy` files containing additional information (e.g. probe in BaseRecording) and properties. - recursive: bool - If True, all dicitionaries in the kwargs are expanded with `to_dict` as well, by default False. + recursive: bool, default: False + If True, all dicitionaries in the kwargs are expanded with `to_dict` as well Returns ------- @@ -866,21 +866,22 @@ def save_to_zarr( Parameters ---------- - name: str or None + name: str or None, default: None Name of the subfolder in get_global_tmp_folder() If 'name' is given, 'folder' must be None. - folder: str, Path, or None + folder: str, Path, or None, default: None The folder used to save the zarr output. If the folder does not have a '.zarr' suffix, it will be automatically appended. - storage_options: dict or None + storage_options: dict or None, default: None Storage options for zarr `store`. E.g., if "s3://" or "gcs://" they can provide authentication methods, etc. For cloud storage locations, this should not be None (in case of default values, use an empty dict) - channel_chunk_size: int or None - Channels per chunk. Default None (chunking in time only) - verbose: bool - If True (default), the output is verbose. - zarr_path: str, Path, or None - (Deprecated) Name of the zarr folder (.zarr). + channel_chunk_size: int or None, default: None + Channels per chunk + verbose: bool, default: True + If True, the output is verbose + zarr_path: str, Path, or None, default: None + (Deprecated) Name of the zarr folder (.zarr) + **save_kwargs: Keyword arguments for saving. Returns ------- diff --git a/src/spikeinterface/core/baseevent.py b/src/spikeinterface/core/baseevent.py index 87651977e5..895a1f501a 100644 --- a/src/spikeinterface/core/baseevent.py +++ b/src/spikeinterface/core/baseevent.py @@ -80,14 +80,14 @@ def get_events( Parameters ---------- - channel_id : int or str, optional - The event channel id, by default None - segment_index : int, optional - The segment index, required for multi-segment objects, by default None - start_time : float, optional - The start time in seconds, by default None - end_time : float, optional - The end time in seconds, by default None + channel_id : int or str, default: None + The event channel id + segment_index : int or None, default: None + The segment index, required for multi-segment objects + start_time : float, default: None + The start time in seconds + end_time : float, default: None + The end time in seconds Returns ------- @@ -110,14 +110,14 @@ def get_event_times( Parameters ---------- - channel_id : int or str, optional - The event channel id, by default None - segment_index : int, optional - The segment index, required for multi-segment objects, by default None - start_time : float, optional - The start time in seconds, by default None - end_time : float, optional - The end time in seconds, by default None + channel_id : int or str, default: None + The event channel id + segment_index : int or None, default: None + The segment index, required for multi-segment objects + start_time : float, default: None + The start time in seconds + end_time : float, default: None + The end time in seconds Returns ------- diff --git a/src/spikeinterface/core/baserecording.py b/src/spikeinterface/core/baserecording.py index 2977211c25..3bb706e74b 100644 --- a/src/spikeinterface/core/baserecording.py +++ b/src/spikeinterface/core/baserecording.py @@ -136,9 +136,9 @@ def get_num_samples(self, segment_index=None) -> int: Parameters ---------- - segment_index : int, optional + segment_index : int or None, default: None The segment index to retrieve the number of samples for. - For multi-segment objects, it is required, by default None + For multi-segment objects, it is required, default: None With single segment recording returns the number of samples in the segment Returns @@ -171,9 +171,9 @@ def get_duration(self, segment_index=None) -> float: Parameters ---------- - segment_index : int, optional + segment_index : int or None, default: None The sample index to retrieve the duration for. - For multi-segment objects, it is required, by default None + For multi-segment objects, it is required, default: None With single segment recording returns the duration of the single segment Returns @@ -204,9 +204,9 @@ def get_memory_size(self, segment_index=None) -> int: Parameters ---------- - segment_index : int, optional + segment_index : int or None, default: None The index of the segment for which the memory size should be calculated. - For multi-segment objects, it is required, by default None + For multi-segment objects, it is required, default: None With single segment recording returns the memory size of the single segment Returns @@ -249,22 +249,22 @@ def get_traces( Parameters ---------- - segment_index : Union[int, None], optional - The segment index to get traces from. If recording is multi-segment, it is required, by default None - start_frame : Union[int, None], optional - The start frame. If None, 0 is used, by default None - end_frame : Union[int, None], optional - The end frame. If None, the number of samples in the segment is used, by default None - channel_ids : Union[Iterable, None], optional - The channel ids. If None, all channels are used, by default None - order : Union[str, None], optional - The order of the traces ("C" | "F"). If None, traces are returned as they are, by default None - return_scaled : bool, optional + segment_index : Union[int, None], default: None + The segment index to get traces from. If recording is multi-segment, it is required, default: None + start_frame : Union[int, None], default: None + The start frame. If None, 0 is used, default: None + end_frame : Union[int, None], default: None + The end frame. If None, the number of samples in the segment is used, default: None + channel_ids : Union[Iterable, None], default: None + The channel ids. If None, all channels are used, default: None + order : Union[str, None], default: None + The order of the traces ("C" | "F"). If None, traces are returned as they are, default: None + return_scaled : bool, default: None If True and the recording has scaling (gain_to_uV and offset_to_uV properties), - traces are scaled to uV, by default False - cast_unsigned : bool, optional + traces are scaled to uV, default: False + cast_unsigned : bool, default: None If True and the traces are unsigned, they are cast to integer and centered - (an offset of (2**nbits) is subtracted), by default False + (an offset of (2**nbits) is subtracted), default: False Returns ------- @@ -362,8 +362,8 @@ def get_times(self, segment_index=None): Parameters ---------- - segment_index : int, optional - The segment index (required for multi-segment), by default None + segment_index : int or None, default: None + The segment index (required for multi-segment) Returns ------- @@ -380,8 +380,8 @@ def has_time_vector(self, segment_index=None): Parameters ---------- - segment_index : int, optional - The segment index (required for multi-segment), by default None + segment_index : int or None, default: None + The segment index (required for multi-segment) Returns ------- @@ -400,10 +400,10 @@ def set_times(self, times, segment_index=None, with_warning=True): ---------- times : 1d np.array The time vector - segment_index : int, optional - The segment index (required for multi-segment), by default None - with_warning : bool, optional - If True, a warning is printed, by default True + segment_index : int or None, default: None + The segment index (required for multi-segment) + with_warning : bool, default: True + If True, a warning is printed """ segment_index = self._check_segment_index(segment_index) rs = self._recording_segments[segment_index] @@ -770,13 +770,13 @@ def get_traces( Parameters ---------- - start_frame: (Union[int, None], optional) + start_frame: Union[int, None], default: None start sample index, or zero if None. Defaults to None. - end_frame: (Union[int, None], optional) + end_frame: Union[int, None], default: None end_sample, or number of samples if None. Defaults to None. - channel_indices: (Union[List, None], optional) + channel_indices: Union[List, None], default: None Indices of channels to return, or all channels if None. Defaults to None. - order: (Order, optional) + order: list or None, default: None The memory order of the returned array. Use Order.C for C order, Order.F for Fortran order, or Order.K to keep the order of the underlying data. Defaults to Order.K. diff --git a/src/spikeinterface/core/baserecordingsnippets.py b/src/spikeinterface/core/baserecordingsnippets.py index d411f38d2a..61bbc9ba6c 100644 --- a/src/spikeinterface/core/baserecordingsnippets.py +++ b/src/spikeinterface/core/baserecordingsnippets.py @@ -259,12 +259,12 @@ def create_dummy_probe_from_locations(self, locations, shape="circle", shape_par ---------- locations : np.array Array with channel locations (num_channels, ndim) [ndim can be 2 or 3] - shape : str, optional - Electrode shapes, by default "circle" - shape_params : dict, optional - Shape parameters, by default {"radius": 1} - axes : str, optional - If ndim is 3, indicates the axes that define the plane of the electrodes, by default "xy" + shape : str, default: "circle" + Electrode shapes + shape_params : dict, default: {"radius": 1} + Shape parameters + axes : str, default: "xy" + If ndim is 3, indicates the axes that define the plane of the electrodes Returns ------- @@ -293,12 +293,12 @@ def set_dummy_probe_from_locations(self, locations, shape="circle", shape_params ---------- locations : np.array Array with channel locations (num_channels, ndim) [ndim can be 2 or 3] - shape : str, optional - Electrode shapes, by default "circle" - shape_params : dict, optional - Shape parameters, by default {"radius": 1} - axes : str, optional - If ndim is 3, indicates the axes that define the plane of the electrodes, by default "xy" + shape : str, default: default: "circle" + Electrode shapes + shape_params : dict, default: {"radius": 1} + Shape parameters + axes : str, default: "xy" + If ndim is 3, indicates the axes that define the plane of the electrodes """ probe = self.create_dummy_probe_from_locations(locations, shape=shape, shape_params=shape_params, axes=axes) self.set_probe(probe, in_place=True) @@ -386,8 +386,8 @@ def planarize(self, axes: str = "xy"): Parameters ---------- - axes : str, optional - The axes to keep, by default "xy" + axes : str, default: "xy" + The axes to keep Returns ------- @@ -412,8 +412,8 @@ def channel_slice(self, channel_ids, renamed_channel_ids=None): ---------- channel_ids : np.array or list The list of channels to keep - renamed_channel_ids : np.array or list, optional - A list of renamed channels, by default None + renamed_channel_ids : np.array or list, default: None + A list of renamed channels Returns ------- @@ -479,10 +479,10 @@ def split_by(self, property="group", outputs="dict"): Parameters ---------- - property : str, optional - The property to use to split the object, by default 'group' - outputs : str, optional - 'dict' or 'list', by default 'dict' + property : str, default: 'group' + The property to use to split the object, default: 'group' + outputs : "dict" | "list", default: "dict" + Whether to return a dict or a list Returns ------- diff --git a/src/spikeinterface/core/basesnippets.py b/src/spikeinterface/core/basesnippets.py index b4e3c11f55..e1ac588334 100644 --- a/src/spikeinterface/core/basesnippets.py +++ b/src/spikeinterface/core/basesnippets.py @@ -1,4 +1,6 @@ -from typing import List, Union +from __future__ import annotations + +from typing import Union from .base import BaseSegment from .baserecordingsnippets import BaseRecordingSnippets import numpy as np @@ -17,7 +19,7 @@ class BaseSnippets(BaseRecordingSnippets): _main_features = [] def __init__( - self, sampling_frequency: float, nbefore: Union[int, None], snippet_len: int, channel_ids: List, dtype + self, sampling_frequency: float, nbefore: Union[int, None], snippet_len: int, channel_ids: list, dtype ): BaseRecordingSnippets.__init__( self, channel_ids=channel_ids, sampling_frequency=sampling_frequency, dtype=dtype @@ -25,7 +27,7 @@ def __init__( self._nbefore = nbefore self._snippet_len = snippet_len - self._snippets_segments: List[BaseSnippetsSegment] = [] + self._snippets_segments: list[BaseSnippetsSegment] = [] # initialize main annotation and properties def __repr__(self): @@ -90,7 +92,7 @@ def get_snippets( self, indices=None, segment_index: Union[int, None] = None, - channel_ids: Union[List, None] = None, + channel_ids: Union[list, None] = None, return_scaled=False, ): segment_index = self._check_segment_index(segment_index) @@ -116,7 +118,7 @@ def get_snippets_from_frames( segment_index: Union[int, None] = None, start_frame: Union[int, None] = None, end_frame: Union[int, None] = None, - channel_ids: Union[List, None] = None, + channel_ids: Union[list, None] = None, return_scaled=False, ): segment_index = self._check_segment_index(segment_index) @@ -220,18 +222,18 @@ def __init__(self): def get_snippets( self, - indices=None, - channel_indices: Union[List, None] = None, + indices, + channel_indices: Union[list, None] = None, ) -> np.ndarray: """ Return the snippets, optionally for a subset of samples and/or channels Parameters ---------- - indexes: (Union[int, None], optional) - indices of the snippets to return, or all if None. Defaults to None. - channel_indices: (Union[List, None], optional) - Indices of channels to return, or all channels if None. Defaults to None. + indices: list[int] + Indices of the snippets to return + channel_indices: Union[list, None], default: None + Indices of channels to return, or all channels if None Returns ------- @@ -262,9 +264,9 @@ def frames_to_indices(self, start_frame: Union[int, None] = None, end_frame: Uni Parameters ---------- - start_frame: (Union[int, None], optional) + start_frame: Union[int, None], default: None start sample index, or zero if None. Defaults to None. - end_frame: (Union[int, None], optional) + end_frame: Union[int, None], default: None end_sample, or number of samples if None. Defaults to None. Returns diff --git a/src/spikeinterface/core/basesorting.py b/src/spikeinterface/core/basesorting.py index 2a06a699cb..90f816466d 100644 --- a/src/spikeinterface/core/basesorting.py +++ b/src/spikeinterface/core/basesorting.py @@ -66,9 +66,9 @@ def get_num_samples(self, segment_index=None): Parameters ---------- - segment_index : int, optional + segment_index : int or None, default: None The segment index to retrieve the number of samples for. - For multi-segment objects, it is required, by default None + For multi-segment objects, it is required Returns ------- @@ -157,9 +157,8 @@ def register_recording(self, recording, check_spike_frames=True): recording : BaseRecording Recording with the same number of segments as current sorting. Assigned to self._recording. - check_spike_frames : bool, optional + check_spike_frames : bool, default: True If True, assert for each segment that all spikes are within the recording's range. - By default True. """ assert np.isclose( self.get_sampling_frequency(), recording.get_sampling_frequency(), atol=0.1 @@ -317,8 +316,8 @@ def select_units(self, unit_ids, renamed_unit_ids=None): ---------- unit_ids : numpy.array or list List of unit ids to keep - renamed_unit_ids : numpy.array or list, optional - If given, the kept unit ids are renamed, by default None + renamed_unit_ids : numpy.array or list, default: None + If given, the kept unit ids are renamed Returns ------- @@ -432,16 +431,15 @@ def to_spike_vector(self, concatenated=True, extremum_channel_inds=None, use_cac Parameters ---------- - concatenated: bool - With concatenated=True (default) the output is one numpy "spike vector" with spikes from all segments. + concatenated: bool, default: True + With concatenated=True the output is one numpy "spike vector" with spikes from all segments. With concatenated=False the output is a list "spike vector" by segment. - extremum_channel_inds: None or dict + extremum_channel_inds: None or dict, default: None If a dictionnary of unit_id to channel_ind is given then an extra field 'channel_index'. This can be convinient for computing spikes postion after sorter. - This dict can be computed with `get_template_extremum_channel(we, outputs="index")` - use_cache: bool - When True (default) the spikes vector is cached as an attribute of the object (`_cached_spike_vector`). + use_cache: bool, default: True + When True the spikes vector is cached as an attribute of the object (`_cached_spike_vector`). This caching only occurs when extremum_channel_inds=None. Returns @@ -597,8 +595,8 @@ def get_unit_spike_train( Parameters ---------- unit_id - start_frame: int, optional - end_frame: int, optional + start_frame: int, default: None + end_frame: int, default: None Returns ------- diff --git a/src/spikeinterface/core/binaryrecordingextractor.py b/src/spikeinterface/core/binaryrecordingextractor.py index b45290caa5..5755effafe 100644 --- a/src/spikeinterface/core/binaryrecordingextractor.py +++ b/src/spikeinterface/core/binaryrecordingextractor.py @@ -26,19 +26,19 @@ class BinaryRecordingExtractor(BaseRecording): Number of channels dtype: str or dtype The dtype of the binary file - time_axis: int - The axis of the time dimension (default 0: F order) - t_starts: None or list of float + time_axis: int, default: 0 + The axis of the time dimension + t_starts: None or list of float, default: None Times in seconds of the first sample for each segment - channel_ids: list (optional) + channel_ids: list, default: None A list of channel ids - file_offset: int (optional) + file_offset: int, default: 0 Number of bytes in the file to offset by during memmap instantiation. - gain_to_uV: float or array-like (optional) + gain_to_uV: float or array-like, default: None The gain to apply to the traces - offset_to_uV: float or array-like + offset_to_uV: float or array-like, default: None The offset to apply to the traces - is_filtered: bool or None + is_filtered: bool or None, default: None If True, the recording is assumed to be filtered. If None, is_filtered is not set. Notes diff --git a/src/spikeinterface/core/channelslice.py b/src/spikeinterface/core/channelslice.py index 3a21e356a6..9987edadc6 100644 --- a/src/spikeinterface/core/channelslice.py +++ b/src/spikeinterface/core/channelslice.py @@ -1,4 +1,5 @@ -from typing import List, Union +from __future__ import annotations +from typing import Union import numpy as np @@ -87,7 +88,7 @@ def get_traces( self, start_frame: Union[int, None] = None, end_frame: Union[int, None] = None, - channel_indices: Union[List, None] = None, + channel_indices: Union[list, None] = None, ) -> np.ndarray: parent_indices = self._parent_channel_indices[channel_indices] traces = self._parent_recording_segment.get_traces(start_frame, end_frame, parent_indices) @@ -181,20 +182,18 @@ def get_frames(self, indices=None): def get_snippets( self, - indices, - channel_indices: Union[List, None] = None, + indices: list[int], + channel_indices: Union[list, None] = None, ) -> np.ndarray: """ Return the snippets, optionally for a subset of samples and/or channels Parameters ---------- - indexes: (Union[int, None], optional) - start sample index, or zero if None. Defaults to None. - end_frame: (Union[int, None], optional) - end_sample, or number of samples if None. Defaults to None. - channel_indices: (Union[List, None], optional) - Indices of channels to return, or all channels if None. Defaults to None. + indices: list[int] + Indices of the snippets to return + channel_indices: Union[List, None], default: None + Indices of channels to return, or all channels if None Returns ------- diff --git a/src/spikeinterface/core/core_tools.py b/src/spikeinterface/core/core_tools.py index 106a794f6e..0bf24098bc 100644 --- a/src/spikeinterface/core/core_tools.py +++ b/src/spikeinterface/core/core_tools.py @@ -172,10 +172,10 @@ def read_binary_recording(file, num_channels, dtype, time_axis=0, offset=0): Number of channels dtype: dtype dtype of the file - time_axis: 0 (default) or 1 + time_axis: 0 or 1, default: 0 If 0 then traces are transposed to ensure (nb_sample, nb_channel) in the file. If 1, the traces shape (nb_channel, nb_sample) is kept in the file. - offset: int + offset: int, default: 0 number of offset bytes """ @@ -243,7 +243,7 @@ def _write_binary_chunk(segment_index, start_frame, end_frame, worker_ctx): def write_binary_recording( recording, - file_paths=None, + file_paths, dtype=None, add_file_extension=True, byte_offset=0, @@ -261,19 +261,18 @@ def write_binary_recording( ---------- recording: RecordingExtractor The recording extractor object to be saved in .dat format - file_path: str + file_path: str or list[str] The path to the file. - dtype: dtype + dtype: dtype, default: None Type of the saved data. Default float32. - add_file_extension: bool - If True (default), file the '.raw' file extension is added if the file name is not a 'raw', 'bin', or 'dat' - byte_offset: int - Offset in bytes (default 0) to for the binary file (e.g. to write a header) - auto_cast_uint: bool - If True (default), unsigned integers are automatically cast to int if the specified dtype is signed + add_file_extension: bool, default: True + If True, file the '.raw' file extension is added if the file name is not a 'raw', 'bin', or 'dat' + byte_offset: int, default: 0 + Offset in bytes to for the binary file (e.g. to write a header) + auto_cast_uint: bool, default: True + If True, unsigned integers are automatically cast to int if the specified dtype is signed {} """ - assert file_paths is not None, "Provide 'file_path'" job_kwargs = fix_job_kwargs(job_kwargs) file_path_list = [file_paths] if not isinstance(file_paths, list) else file_paths @@ -430,12 +429,12 @@ def write_memory_recording(recording, dtype=None, verbose=False, auto_cast_uint= ---------- recording: RecordingExtractor The recording extractor object to be saved in .dat format - dtype: dtype - Type of the saved data. Default float32. - verbose: bool + dtype: dtype, default: None + Type of the saved data + verbose: bool, default: False If True, output is verbose (when chunks are used) - auto_cast_uint: bool - If True (default), unsigned integers are automatically cast to int if the specified dtype is signed + auto_cast_uint: bool, default: True + If True, unsigned integers are automatically cast to int if the specified dtype is signed {} Returns @@ -514,30 +513,30 @@ def write_to_h5_dataset_format( Path to dataset in h5 file (e.g. '/dataset') segment_index: int index of segment - save_path: str + save_path: str, default: None The path to the file. - file_handle: file handle + file_handle: file handle, default: None The file handle to dump data. This can be used to append data to an header. In case file_handle is given, the file is NOT closed after writing the binary data. - time_axis: 0 (default) or 1 + time_axis: 0 or 1, default: 0 If 0 then traces are transposed to ensure (nb_sample, nb_channel) in the file. If 1, the traces shape (nb_channel, nb_sample) is kept in the file. - single_axis: bool, default False - If True, a single-channel recording is saved as a one dimensional array. - dtype: dtype - Type of the saved data. Default float32. - chunk_size: None or int + single_axis: bool, default: False + If True, a single-channel recording is saved as a one dimensional array + dtype: dtype, default None + Type of the saved data + chunk_size: None or int, default: None Number of chunks to save the file in. This avoid to much memory consumption for big files. - If None and 'chunk_memory' is given, the file is saved in chunks of 'chunk_memory' MB (default 500MB) - chunk_memory: None or str - Chunk size in bytes must endswith 'k', 'M' or 'G' (default '500M') - verbose: bool + If None and 'chunk_memory' is given, the file is saved in chunks of 'chunk_memory' MB + chunk_memory: None or str, default: '500M' + Chunk size in bytes must endswith 'k', 'M' or 'G' + verbose: bool, default: False If True, output is verbose (when chunks are used) - auto_cast_uint: bool - If True (default), unsigned integers are automatically cast to int if the specified dtype is signed - return_scaled : bool, optional + auto_cast_uint: bool, default: True + If True, unsigned integers are automatically cast to int if the specified dtype is signed + return_scaled : bool, default: False If True and the recording has scaling (gain_to_uV and offset_to_uV properties), - traces are dumped to uV, by default False + traces are dumped to uV """ import h5py @@ -655,18 +654,18 @@ def write_traces_to_zarr( Storage options for zarr `store`. E.g., if "s3://" or "gcs://" they can provide authentication methods, etc. dataset_paths: list List of paths to traces datasets in the zarr group - channel_chunk_size: int or None - Channels per chunk. Default None (chunking in time only) - dtype: dtype - Type of the saved data. Default float32. - compressor: zarr compressor or None + channel_chunk_size: int or None, default: None (chunking in time only) + Channels per chunk + dtype: dtype, default: None + Type of the saved data + compressor: zarr compressor or None, default: None Zarr compressor - filters: list + filters: list, default: None List of zarr filters - verbose: bool + verbose: bool, default: False If True, output is verbose (when chunks are used) - auto_cast_uint: bool - If True (default), unsigned integers are automatically cast to int if the specified dtype is signed + auto_cast_uint: bool, default: True + If True, unsigned integers are automatically cast to int if the specified dtype is signed {} """ assert dataset_paths is not None, "Provide 'file_path'" @@ -804,10 +803,10 @@ def recursive_path_modifier(d, func, target="path", copy=True) -> dict: Extractor dictionary func : function Function to apply to the path. It must take a path as input and return a path - target : str, optional - String to match to dictionary key, by default 'path' - copy : bool, optional - If True the original dictionary is deep copied, by default True (at first call) + target : str, default: "path" + String to match to dictionary key + copy : bool, default: True (at first call) + If True the original dictionary is deep copied Returns ------- @@ -870,7 +869,7 @@ def convert_seconds_to_str(seconds: float, long_notation: bool = True) -> str: ---------- seconds : float The duration in seconds. - long_notation : bool, optional, default: True + long_notation : bool, default: True Whether to display the time with additional units (such as milliseconds, minutes, hours, or days). If set to True, the function will display a more detailed representation of the duration, including other units alongside the primary diff --git a/src/spikeinterface/core/datasets.py b/src/spikeinterface/core/datasets.py index e3b6d7b22d..7e03492d1a 100644 --- a/src/spikeinterface/core/datasets.py +++ b/src/spikeinterface/core/datasets.py @@ -20,19 +20,17 @@ def download_dataset( Parameters ---------- - repo : str, optional - The repository to download the dataset from, - defaults to: 'https://gin.g-node.org/NeuralEnsemble/ephy_testing_data' - remote_path : str + repo : str, default: "https://gin.g-node.org/NeuralEnsemble/ephy_testing_data" + The repository to download the dataset from + remote_path : str, default: "mearec/mearec_test_10s.h5" A specific subdirectory in the repository to download (e.g. Mearec, SpikeGLX, etc) - defaults to: "mearec/mearec_test_10s.h5" - local_folder : str, optional + local_folder : str, default: None The destination folder / directory to download the dataset to. defaults to the path "get_global_dataset_folder()" / f{repo_name} (see `spikeinterface.core.globals`) - update_if_exists : bool, optional - Forces re-download of the dataset if it already exists, by default False - unlock : bool, optional - Use to enable the edition of the downloaded file content, by default False + update_if_exists : bool, default: False + Forces re-download of the dataset if it already exists, default: False + unlock : bool, default: False + Use to enable the edition of the downloaded file content, default: False Returns ------- diff --git a/src/spikeinterface/core/frameslicesorting.py b/src/spikeinterface/core/frameslicesorting.py index ed1391b0e2..fd7b6a1391 100644 --- a/src/spikeinterface/core/frameslicesorting.py +++ b/src/spikeinterface/core/frameslicesorting.py @@ -20,15 +20,15 @@ class FrameSliceSorting(BaseSorting): Parameters ---------- parent_sorting: BaseSorting - start_frame: None or int + start_frame: None or int, default None Earliest included frame in the parent sorting(/recording). Spike times(/traces) are re-referenced to start_frame in the - sliced objects. Set to 0 by default. - end_frame: None or int + sliced objects. Set to 0 if None. + end_frame: None or int, default None Latest frame in the parent sorting(/recording). As for usual python slicing, the end frame is excluded (such that the max spike frame in the sliced sorting is `end_frame - start_frame - 1`) - If None (default), the end_frame is either: + If None, the end_frame is either: - The total number of samples, if a recording is assigned - The maximum spike frame + 1, if no recording is assigned """ diff --git a/src/spikeinterface/core/generate.py b/src/spikeinterface/core/generate.py index 44ea02d32c..2fdab4e460 100644 --- a/src/spikeinterface/core/generate.py +++ b/src/spikeinterface/core/generate.py @@ -43,13 +43,13 @@ def generate_recording( num_channels : int, default 2 The number of channels in the recording. sampling_frequency : float, default 30000. (in Hz) - The sampling frequency of the recording, by default 30000. + The sampling frequency of the recording, default: 30000. durations: List[float], default [5.0, 2.5] - The duration in seconds of each segment in the recording, by default [5.0, 2.5]. + The duration in seconds of each segment in the recording, default: [5.0, 2.5]. Note that the number of segments is determined by the length of this list. set_probe: bool, default True ndim : int, default 2 - The number of dimensions of the probe, by default 2. Set to 3 to make 3 dimensional probes. + The number of dimensions of the probe, default: 2. Set to 3 to make 3 dimensional probes. seed : Optional[int] A seed for the np.ramdom.default_rng function mode: str ["lazy", "legacy"] Default "lazy". @@ -344,7 +344,7 @@ def synthesize_random_firings( If float, all units will have the same firing rate. add_shift_shuffle: bool, default False Optionaly add a small shuffle on half spike to autocorrelogram - seed: int, optional + seed: int, default: None seed for the generator Returns @@ -546,8 +546,8 @@ def synthetize_spike_train_bad_isi(duration, baseline_rate, num_violations, viol Firing rate for 'true' spikes. num_violations : int Number of contaminating spikes. - violation_delta : float, optional - Temporal offset of contaminating spikes (in seconds), by default 1e-5. + violation_delta : float, default: 1e-5 + Temporal offset of contaminating spikes (in seconds) Returns ------- @@ -588,9 +588,9 @@ class NoiseGeneratorRecording(BaseRecording): The durations of each segment in seconds. Note that the length of this list is the number of segments. noise_level: float, default 1: Std of the white noise - dtype : Optional[Union[np.dtype, str]], default='float32' + dtype : Optional[Union[np.dtype, str]], default: 'float32' The dtype of the recording. Note that only np.float32 and np.float64 are supported. - seed : Optional[int], default=None + seed : Optional[int], default: None The seed for np.random.default_rng. strategy : "tile_pregenerated" or "on_the_fly" The strategy of generating noise chunk: @@ -763,8 +763,9 @@ def generate_recording_by_size( The size in gigabytes (GiB) of the recording. num_channels: int Number of channels. - seed : int, optional - The seed for np.random.default_rng, by default None + seed : int, default: None + The seed for np.random.default_rng + Returns ------- GeneratorRecording diff --git a/src/spikeinterface/core/job_tools.py b/src/spikeinterface/core/job_tools.py index cf7a67489c..d7dc276b95 100644 --- a/src/spikeinterface/core/job_tools.py +++ b/src/spikeinterface/core/job_tools.py @@ -270,32 +270,32 @@ class ChunkRecordingExecutor: Arguments for init_func verbose: bool If True, output is verbose - progress_bar: bool - If True, a progress bar is printed to monitor the progress of the process - handle_returns: bool + job_name: str, default: '' + Job name + handle_returns: bool, default: False If True, the function can return values - gather_func: None or callable + gather_func: None or callable, default: None Optional function that is called in the main thread and retrieves the results of each worker. This function can be used instead of `handle_returns` to implement custom storage on-the-fly. - n_jobs: int - Number of jobs to be used (default 1). Use -1 to use as many jobs as number of cores - total_memory: str + n_jobs: int, default: 1 + Number of jobs to be used. Use -1 to use as many jobs as number of cores + total_memory: str, default: None Total memory (RAM) to use (e.g. "1G", "500M") - chunk_memory: str + chunk_memory: str, default: None Memory per chunk (RAM) to use (e.g. "1G", "500M") - chunk_size: int or None + chunk_size: int or None, default: None Size of each chunk in number of samples. If 'total_memory' or 'chunk_memory' are used, it is ignored. chunk_duration : str or float or None Chunk duration in s if float or with units if str (e.g. '1s', '500ms') - mp_context : str or None - "fork" (default) or "spawn". If None, the context is taken by the recording.get_preferred_mp_context(). + mp_context : str or None, default: None + "fork" or "spawn". If None, the context is taken by the recording.get_preferred_mp_context(). "fork" is only available on UNIX systems. - job_name: str - Job name - max_threads_per_process: int or None + max_threads_per_process: int or None, default: None Limit the number of thread per process using threadpoolctl modules. This used only when n_jobs>1 If None, no limits. + progress_bar: bool, default: False + If True, a progress bar is printed to monitor the progress of the process Returns diff --git a/src/spikeinterface/core/node_pipeline.py b/src/spikeinterface/core/node_pipeline.py index a6dabf77b5..a00df98e05 100644 --- a/src/spikeinterface/core/node_pipeline.py +++ b/src/spikeinterface/core/node_pipeline.py @@ -55,10 +55,10 @@ def __init__( ---------- recording : BaseRecording The recording object. - parents : Optional[List[PipelineNode]], optional - Pass parents nodes to perform a previous computation, by default None - return_output : bool or tuple of bool - Whether or not the output of the node is returned by the pipeline, by default False + parents : Optional[List[PipelineNode]], default: None + Pass parents nodes to perform a previous computation + return_output : bool or tuple of bool, default: True + Whether or not the output of the node is returned by the pipeline. When a Node have several toutputs then this can be a tuple of bool. @@ -154,10 +154,10 @@ class SpikeRetriever(PeakSource): If False, the max channel is computed for each spike given a radius around the template max channel. extremum_channel_inds: dict of int The extremum channel index dict given from template. - radius_um: float (default 50.) + radius_um: float, default: 50 The radius to find the real max channel. Used only when channel_from_template=False - peak_sign: str (default "neg") + peak_sign: str, default: "neg" Peak sign to find the max channel. Used only when channel_from_template=False """ @@ -256,14 +256,14 @@ def __init__( ---------- recording : BaseRecording The recording object. - parents : Optional[List[PipelineNode]], optional - Pass parents nodes to perform a previous computation, by default None - return_output : bool, optional - Whether or not the output of the node is returned by the pipeline, by default False - ms_before : float, optional - The number of milliseconds to include before the peak of the spike, by default 1. - ms_after : float, optional - The number of milliseconds to include after the peak of the spike, by default 1. + ms_before : float + The number of milliseconds to include before the peak of the spike + ms_after : float + The number of milliseconds to include after the peak of the spike + parents : Optional[List[PipelineNode]], default: None + Pass parents nodes to perform a previous computation + return_output : bool, default: False + Whether or not the output of the node is returned by the pipeline """ PipelineNode.__init__(self, recording=recording, parents=parents, return_output=return_output) @@ -291,14 +291,15 @@ def __init__( ---------- recording : BaseRecording The recording object. - parents : Optional[List[PipelineNode]], optional - Pass parents nodes to perform a previous computation, by default None - return_output : bool, optional - Whether or not the output of the node is returned by the pipeline, by default False - ms_before : float, optional - The number of milliseconds to include before the peak of the spike, by default 1. - ms_after : float, optional - The number of milliseconds to include after the peak of the spike, by default 1. + ms_before : float + The number of milliseconds to include before the peak of the spike + ms_after : float + The number of milliseconds to include after the peak of the spike + parents : Optional[List[PipelineNode]], default: None + Pass parents nodes to perform a previous computation + return_output : bool, default: False + Whether or not the output of the node is returned by the pipeline + """ WaveformsNode.__init__( @@ -344,17 +345,15 @@ def __init__( Parameters ---------- recording : BaseRecording - The recording object. - parents : Optional[List[PipelineNode]], optional - Pass parents nodes to perform a previous computation, by default None - return_output : bool, optional - Whether or not the output of the node is returned by the pipeline, by default False - ms_before : float, optional - The number of milliseconds to include before the peak of the spike, by default 1. - ms_after : float, optional - The number of milliseconds to include after the peak of the spike, by default 1. - - + The recording object + ms_before : float + The number of milliseconds to include before the peak of the spike + ms_after : float + The number of milliseconds to include after the peak of the spike + parents : Optional[List[PipelineNode]], default: None + Pass parents nodes to perform a previous computation + return_output : bool, default: False + Whether or not the output of the node is returned by the pipeline """ WaveformsNode.__init__( self, diff --git a/src/spikeinterface/core/npysnippetsextractor.py b/src/spikeinterface/core/npysnippetsextractor.py index 69c48356e5..40fbfac4d3 100644 --- a/src/spikeinterface/core/npysnippetsextractor.py +++ b/src/spikeinterface/core/npysnippetsextractor.py @@ -109,12 +109,10 @@ def get_snippets( Parameters ---------- - indexes: (Union[int, None], optional) - start sample index, or zero if None. Defaults to None. - end_frame: (Union[int, None], optional) - end_sample, or number of samples if None. Defaults to None. - channel_indices: (Union[List, None], optional) - Indices of channels to return, or all channels if None. Defaults to None. + indices: list[int] + Indices of the snippets to return, or all if None + channel_indices: Union[List, None], default: None + Indices of channels to return, or all channels if None Returns ------- @@ -134,10 +132,10 @@ def frames_to_indices(self, start_frame: Union[int, None] = None, end_frame: Uni Parameters ---------- - start_frame: (Union[int, None], optional) - start sample index, or zero if None. Defaults to None. - end_frame: (Union[int, None], optional) - end_sample, or number of samples if None. Defaults to None. + start_frame: Union[int, None], default: None + start sample index, or zero if None + end_frame: Union[int, None], default: None + end_sample, or number of samples if None Returns ------- diff --git a/src/spikeinterface/core/numpyextractors.py b/src/spikeinterface/core/numpyextractors.py index 3d7ec6cd1a..380d461d8e 100644 --- a/src/spikeinterface/core/numpyextractors.py +++ b/src/spikeinterface/core/numpyextractors.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import numpy as np from spikeinterface.core import ( BaseRecording, @@ -14,7 +16,7 @@ from multiprocessing.shared_memory import SharedMemory -from typing import List, Union +from typing import Union class NumpyRecording(BaseRecording): @@ -547,19 +549,17 @@ def __init__(self, snippets, spikesframes): def get_snippets( self, indices, - channel_indices: Union[List, None] = None, + channel_indices: Union[list, None] = None, ) -> np.ndarray: """ Return the snippets, optionally for a subset of samples and/or channels Parameters ---------- - indexes: (Union[int, None], optional) - start sample index, or zero if None. Defaults to None. - end_frame: (Union[int, None], optional) - end_sample, or number of samples if None. Defaults to None. - channel_indices: (Union[List, None], optional) - Indices of channels to return, or all channels if None. Defaults to None. + indices: list[int] + Indices of the snippets to return + channel_indices: Union[list, None], default: None + Indices of channels to return, or all channels if None Returns ------- @@ -579,9 +579,9 @@ def frames_to_indices(self, start_frame: Union[int, None] = None, end_frame: Uni Parameters ---------- - start_frame: (Union[int, None], optional) + start_frame: Union[int, None], default: None start sample index, or zero if None. Defaults to None. - end_frame: (Union[int, None], optional) + end_frame: Union[int, None], default: None end_sample, or number of samples if None. Defaults to None. Returns diff --git a/src/spikeinterface/core/recording_tools.py b/src/spikeinterface/core/recording_tools.py index ff9cd99389..2790e2954c 100644 --- a/src/spikeinterface/core/recording_tools.py +++ b/src/spikeinterface/core/recording_tools.py @@ -23,16 +23,19 @@ def get_random_data_chunks( ---------- recording: BaseRecording The recording to get random chunks from - return_scaled: bool + return_scaled: bool, default: False If True, returned chunks are scaled to uV - num_chunks_per_segment: int + num_chunks_per_segment: int, default: 20 Number of chunks per segment - chunk_size: int + chunk_size: int, default: 10000 Size of a chunk in number of frames - concatenated: bool (default True) - If True chunk are concatenated along time axis. - seed: int + concatenated: bool, default: True + If True chunk are concatenated along time axis + seed: int, default: 0 Random seed + margin_frames: int, default: 0 + Margin in number of frames to avoid edge effects + Returns ------- chunk_list: np.array @@ -98,7 +101,7 @@ def get_closest_channels(recording, channel_ids=None, num_channels=None): The recording extractor to get closest channels channel_ids: list List of channels ids to compute there near neighborhood - num_channels: int, optional + num_channels: int, default: None Maximum number of neighborhood channels to return Returns @@ -315,7 +318,7 @@ def order_channels_by_depth(recording, channel_ids=None, dimensions=("x", "y"), dimensions : str, tuple, or list If str, it needs to be 'x', 'y', 'z'. If tuple or list, it sorts the locations in two dimensions using lexsort. - This approach is recommended since there is less ambiguity, by default ('x', 'y') + This approach is recommended since there is less ambiguity, default: ('x', 'y') flip: bool, default: False If flip is False then the order is bottom first (starting from tip of the probe). If flip is True then the order is upper first. diff --git a/src/spikeinterface/core/segmentutils.py b/src/spikeinterface/core/segmentutils.py index 85e36cf7a5..614dd0b295 100644 --- a/src/spikeinterface/core/segmentutils.py +++ b/src/spikeinterface/core/segmentutils.py @@ -41,8 +41,8 @@ class AppendSegmentRecording(BaseRecording): ---------- recording_list : list of BaseRecording A list of recordings - sampling_frequency_max_diff : float - Maximum allowed difference of sampling frequencies across recordings (default 0) + sampling_frequency_max_diff : float, default: 0 + Maximum allowed difference of sampling frequencies across recordings """ def __init__(self, recording_list, sampling_frequency_max_diff=0): @@ -106,10 +106,10 @@ class ConcatenateSegmentRecording(BaseRecording): ---------- recording_list : list of BaseRecording A list of recordings - ignore_times: bool - If True (default), time information (t_start, time_vector) is ignored when concatenating recordings. - sampling_frequency_max_diff : float - Maximum allowed difference of sampling frequencies across recordings (default 0) + ignore_times: bool, default: True + If True, time information (t_start, time_vector) is ignored when concatenating recordings + sampling_frequency_max_diff : float, default: 0 + Maximum allowed difference of sampling frequencies across recordings """ def __init__(self, recording_list, ignore_times=True, sampling_frequency_max_diff=0): @@ -284,8 +284,8 @@ class AppendSegmentSorting(BaseSorting): ---------- sorting_list : list of BaseSorting A list of sortings - sampling_frequency_max_diff : float - Maximum allowed difference of sampling frequencies across sortings (default 0) + sampling_frequency_max_diff : float, default: 0 + Maximum allowed difference of sampling frequencies across sortings """ def __init__(self, sorting_list, sampling_frequency_max_diff=0): @@ -345,15 +345,15 @@ class ConcatenateSegmentSorting(BaseSorting): A list of sortings. If `total_samples_list` is not provided, all sortings should have an assigned recording. Otherwise, all sortings should be monosegments. - total_samples_list : list[int] or None + total_samples_list : list[int] or None, default: None If the sortings have no assigned recording, the total number of samples of each of the concatenated (monosegment) sortings is pulled from this list. - ignore_times : bool - If True (default), time information (t_start, time_vector) is ignored + ignore_times : bool, default: True + If True, time information (t_start, time_vector) is ignored when concatenating the sortings' assigned recordings. - sampling_frequency_max_diff : float - Maximum allowed difference of sampling frequencies across sortings (default 0) + sampling_frequency_max_diff : float, default: 0 + Maximum allowed difference of sampling frequencies across sortings """ def __init__(self, sorting_list, total_samples_list=None, ignore_times=True, sampling_frequency_max_diff=0): @@ -523,12 +523,12 @@ class SplitSegmentSorting(BaseSorting): ---------- parent_sorting : BaseSorting Sorting with a single segment (e.g. from sorting concatenated recording) - recording_or_recording_list : list of recordings, ConcatenateSegmentRecording, or None + recording_or_recording_list : list of recordings, ConcatenateSegmentRecording, or None, default: None If list of recordings, uses the lengths of those recordings to split the sorting into smaller segments If ConcatenateSegmentRecording, uses the associated list of recordings to split the sorting into smaller segments - If None, looks for the recording associated with the sorting (default None) + If None, looks for the recording associated with the sorting """ def __init__(self, parent_sorting: BaseSorting, recording_or_recording_list=None): diff --git a/src/spikeinterface/core/waveform_extractor.py b/src/spikeinterface/core/waveform_extractor.py index d4ae140b90..26b702e3d6 100644 --- a/src/spikeinterface/core/waveform_extractor.py +++ b/src/spikeinterface/core/waveform_extractor.py @@ -875,15 +875,15 @@ def save( ---------- folder : str or Path The output waveform folder - format : str, optional - "binary", "zarr", by default "binary" + format : "binary" | "zarr", default: "binary" + The backend to use for saving the waveforms overwrite : bool - If True and folder exists, it is deleted, by default False - use_relative_path : bool, optional + If True and folder exists, it is deleted, default: False + use_relative_path : bool, default: False If True, the recording and sorting paths are relative to the waveforms folder. This allows portability of the waveform folder provided that the relative paths are the same, - but forces all the data files to be in the same drive, by default False - sparsity : ChannelSparsity, optional + but forces all the data files to be in the same drive + sparsity : ChannelSparsity, default: None If given and WaveformExtractor is not sparse, it makes the returned WaveformExtractor sparse """ folder = Path(folder) @@ -1050,17 +1050,17 @@ def get_waveforms( ---------- unit_id: int or str Unit id to retrieve waveforms for - with_index: bool - If True, spike indices of extracted waveforms are returned (default False) - cache: bool - If True, waveforms are cached to the self._waveforms dictionary (default False) - lazy: bool + with_index: bool, default: False + If True, spike indices of extracted waveforms are returned + cache: bool, default: False + If True, waveforms are cached to the self._waveforms dictionary + lazy: bool, default: True If True, waveforms are loaded as memmap objects (when format="binary") or Zarr datasets (when format="zarr"). - If False, waveforms are loaded as np.array objects (default True) - sparsity: ChannelSparsity, optional + If False, waveforms are loaded as np.array objects + sparsity: ChannelSparsity, default: None Sparsity to apply to the waveforms (if WaveformExtractor is not sparse) - force_dense: bool (False) + force_dense: bool, default: False Return dense waveforms even if the waveform extractor is sparse Returns @@ -1164,7 +1164,7 @@ def get_waveforms_segment(self, segment_index: int, unit_id, sparsity): The segment index to retrieve waveforms from unit_id: int or str Unit id to retrieve waveforms for - sparsity: ChannelSparsity, optional + sparsity: ChannelSparsity, default: None Sparsity to apply to the waveforms (if WaveformExtractor is not sparse) Returns @@ -1229,8 +1229,8 @@ def get_all_templates(self, unit_ids: Optional[Iterable] = None, mode="average") ---------- unit_ids: list or None Unit ids to retrieve waveforms for - mode: str - 'average' (default) or 'median' , 'std' + mode: 'average' | 'median' | 'std', default: 'average' + The mode to compute the templates Returns ------- @@ -1256,9 +1256,9 @@ def get_template(self, unit_id, mode="average", sparsity=None, force_dense: bool ---------- unit_id: int or str Unit id to retrieve waveforms for - mode: str - 'average' (default), 'median' , 'std'(standard deviation) - sparsity: ChannelSparsity, optional + mode: 'average' | 'median' | 'std', default: 'average' + The mode to compute the template + sparsity: ChannelSparsity, default: None Sparsity to apply to the waveforms (if WaveformExtractor is not sparse) force_dense: bool (False) Return a dense template even if the waveform extractor is sparse @@ -1314,9 +1314,9 @@ def get_template_segment(self, unit_id, segment_index, mode="average", sparsity= Unit id to retrieve waveforms for segment_index: int The segment index to retrieve template from - mode: str - 'average' (default), 'median', 'std'(standard deviation) - sparsity: ChannelSparsity, optional + mode: 'average' | 'median' | 'std', default: 'average' + The mode to compute the template + sparsity: ChannelSparsity, default: None Sparsity to apply to the waveforms (if WaveformExtractor is not sparse). Returns @@ -1502,46 +1502,45 @@ def extract_waveforms( The recording object sorting: Sorting The sorting object - folder: str or Path or None + folder: str or Path or None, default: None The folder where waveforms are cached - mode: str - "folder" (default) or "memory". The "folder" argument must be specified in case of mode "folder". + mode: "folder" | "memory, default: 'folder' + The mode to store waveforms. If 'folder', waveforms are stored on disk in the specified folder. + The "folder" argument must be specified in case of mode "folder". If "memory" is used, the waveforms are stored in RAM. Use this option carefully! - precompute_template: None or list - Precompute average/std/median for template. If None not precompute. - ms_before: float + precompute_template: None or list, default: ['average'] + Precompute average/std/median for template. If None, no templates are precomputed + ms_before: float, default: 1.0 Time in ms to cut before spike peak - ms_after: float + ms_after: float, default: 2.0 Time in ms to cut after spike peak - max_spikes_per_unit: int or None - Number of spikes per unit to extract waveforms from (default 500). + max_spikes_per_unit: int or None, default: 500 + Number of spikes per unit to extract waveforms from Use None to extract waveforms for all spikes - overwrite: bool - If True and 'folder' exists, the folder is removed and waveforms are recomputed. + overwrite: bool, default: False + If True and 'folder' exists, the folder is removed and waveforms are recomputed Otherwise an error is raised. - return_scaled: bool - If True and recording has gain_to_uV/offset_to_uV properties, waveforms are converted to uV. - dtype: dtype or None - Dtype of the output waveforms. If None, the recording dtype is maintained. + return_scaled: bool, default: True + If True and recording has gain_to_uV/offset_to_uV properties, waveforms are converted to uV + dtype: dtype or None, default: None + Dtype of the output waveforms. If None, the recording dtype is maintained sparse: bool, default: True If True, before extracting all waveforms the `precompute_sparsity()` function is run using a few spikes to get an estimate of dense templates to create a ChannelSparsity object. Then, the waveforms will be sparse at extraction time, which saves a lot of memory. When True, you must some provide kwargs handle `precompute_sparsity()` to control the kind of - sparsity you want to apply (by radius, by best channels, ...). - sparsity: ChannelSparsity or None + sparsity you want to apply (by radius, by best channels, ...) + sparsity: ChannelSparsity or None, default: None The sparsity used to compute waveforms. If this is given, `sparse` is ignored. Default None. - num_spikes_for_sparsity: int (default 100) + num_spikes_for_sparsity: int, default: 100 The number of spikes to use to estimate sparsity (if sparse=True). - allow_unfiltered: bool + allow_unfiltered: bool, default: False If true, will accept an allow_unfiltered recording. - False by default. - use_relative_path: bool + use_relative_path: bool, default: False If True, the recording and sorting paths are relative to the waveforms folder. This allows portability of the waveform folder provided that the relative paths are the same, but forces all the data files to be in the same drive. - Default is False. - seed: int or None + seed: int or None, default: None Random seed for spike selection sparsity kwargs: @@ -1654,11 +1653,11 @@ def load_waveforms(folder, with_recording: bool = True, sorting: Optional[BaseSo ---------- folder : str or Path The folder / zarr folder where the waveform extractor is stored - with_recording : bool, optional - If True, the recording is loaded, by default True. + with_recording : bool, default: True + If True, the recording is loaded. If False, the WaveformExtractor object in recordingless mode. - sorting : BaseSorting, optional - If passed, the sorting object associated to the waveform extractor, by default None + sorting : BaseSorting, default: None + If passed, the sorting object associated to the waveform extractor Returns ------- diff --git a/src/spikeinterface/core/waveform_tools.py b/src/spikeinterface/core/waveform_tools.py index a2f1296e31..07fea4c8f0 100644 --- a/src/spikeinterface/core/waveform_tools.py +++ b/src/spikeinterface/core/waveform_tools.py @@ -54,18 +54,18 @@ def extract_waveforms_to_buffers( N samples before spike nafter: int N samples after spike - mode: str - Mode to use ('memmap' | 'shared_memory') - return_scaled: bool - Scale traces before exporting to buffer or not. - folder: str or path + mode: "memmap" | "shared_memory", default "memmap" + The mode to use for the buffer + return_scaled: bool, default False + Scale traces before exporting to buffer or not + folder: str or path or None, default: None In case of memmap mode, folder to save npy files - dtype: numpy.dtype + dtype: numpy.dtype, default: None dtype for waveforms buffer - sparsity_mask: None or array of bool + sparsity_mask: None or array of bool, default: None If not None shape must be must be (len(unit_ids), len(channel_ids)) - copy: bool - If True (default), the output shared memory object is copied to a numpy standard array. + copy: bool, default: False + If True, the output shared memory object is copied to a numpy standard array. If copy=False then arrays_info is also return. Please keep in mind that arrays_info need to be referenced as long as waveforms_by_units will be used otherwise it will be very hard to debug. Also when copy=False the SharedMemory will need to be unlink manually @@ -419,7 +419,7 @@ def extract_waveforms_to_single_buffer( Important note: for the "shared_memory" mode wf_array_info contains reference to the shared memmory buffer, this variable must be referenced as long as arrays is used. This variable must also unlink() when the array is de-referenced. - To avoid this complicated behavior, by default (copy=True) the shared memmory buffer is copied into a standard + To avoid this complicated behavior, default: (copy=True) the shared memmory buffer is copied into a standard numpy array. @@ -436,18 +436,18 @@ def extract_waveforms_to_single_buffer( N samples before spike nafter: int N samples after spike - mode: str - Mode to use ('memmap' | 'shared_memory') - return_scaled: bool - Scale traces before exporting to buffer or not. - file_path: str or path - In case of memmap mode, file to save npy file. - dtype: numpy.dtype + mode: "memmap" | "shared_memory", default "memmap" + The mode to use for the buffer + return_scaled: bool, default False + Scale traces before exporting to buffer or not + file_path: str or path or None, default: None + In case of memmap mode, file to save npy file + dtype: numpy.dtype, default: None dtype for waveforms buffer - sparsity_mask: None or array of bool + sparsity_mask: None or array of bool, default: None If not None shape must be must be (len(unit_ids), len(channel_ids)) - copy: bool - If True (default), the output shared memory object is copied to a numpy standard array and no reference + copy: bool, default: False + If True, the output shared memory object is copied to a numpy standard array and no reference to the internal shared memory object is kept. If copy=False then the shared memory object is also returned. Please keep in mind that the shared memory object need to be referenced as long as all_waveforms will be used otherwise it might produce segmentation diff --git a/src/spikeinterface/core/zarrrecordingextractor.py b/src/spikeinterface/core/zarrrecordingextractor.py index 4dc94a24dd..6c15044ad9 100644 --- a/src/spikeinterface/core/zarrrecordingextractor.py +++ b/src/spikeinterface/core/zarrrecordingextractor.py @@ -169,7 +169,7 @@ def get_default_zarr_compressor(clevel=5): Parameters ---------- - clevel : int, optional + clevel : int, default: 5 Compression level (higher -> more compressed). Minimum 1, maximum 9. By default 5 diff --git a/src/spikeinterface/curation/auto_merge.py b/src/spikeinterface/curation/auto_merge.py index 5e7047a5c1..2f217fd102 100644 --- a/src/spikeinterface/curation/auto_merge.py +++ b/src/spikeinterface/curation/auto_merge.py @@ -58,45 +58,44 @@ def get_potential_auto_merge( ---------- waveform_extractor: WaveformExtractor The waveform extractor - minimum_spikes: int + minimum_spikes: int, default: 1000 Minimum number of spikes for each unit to consider a potential merge. - Enough spikes are needed to estimate the correlogram, by default 1000 - maximum_distance_um: float - Minimum distance between units for considering a merge, by default 150 - peak_sign: "neg"/"pos"/"both" - Peak sign used to estimate the maximum channel of a template, by default "neg" - bin_ms: float - Bin size in ms used for computing the correlogram, by default 0.25 - window_ms: float - Window size in ms used for computing the correlogram, by default 100 - corr_diff_thresh: float + Enough spikes are needed to estimate the correlogram + maximum_distance_um: float, default: 150 + Minimum distance between units for considering a merge + peak_sign: "neg" | "pos" | "both", default: "neg" + Peak sign used to estimate the maximum channel of a template + bin_ms: float, default: 0.25 + Bin size in ms used for computing the correlogram + window_ms: float, default: 100 + Window size in ms used for computing the correlogram + corr_diff_thresh: float, default: 0.16 The threshold on the "correlogram distance metric" for considering a merge. - It needs to be between 0 and 1, by default 0.16 - template_diff_thresh: float + It needs to be between 0 and 1 + template_diff_thresh: float, default: 0.25 The threshold on the "template distance metric" for considering a merge. - It needs to be between 0 and 1, by default 0.25 - censored_period_ms: float - Used to compute the refractory period violations aka "contamination", by default 0 - refractory_period_ms: float - Used to compute the refractory period violations aka "contamination", by default 1 - sigma_smooth_ms: float - Parameters to smooth the correlogram estimation, by default 0.6 - contamination_threshold: float - Threshold for not taking in account a unit when it is too contaminated, by default 0.2 - adaptative_window_threshold:: float - Parameter to detect the window size in correlogram estimation, by default 0.5 - censor_correlograms_ms: float - The period to censor on the auto and cross-correlograms, by default 0.15 ms - num_channels: int - Number of channel to use for template similarity computation, by default 5 - num_shift: int - Number of shifts in samles to be explored for template similarity computation, by default 5 - firing_contamination_balance: float - Parameter to control the balance between firing rate and contamination in computing unit "quality score", - by default 1.5 - extra_outputs: bool - If True, an additional dictionary (`outs`) with processed data is returned, by default False - steps: None or list of str + It needs to be between 0 and 1 + censored_period_ms: float, default: 0.3 + Used to compute the refractory period violations aka "contamination" + refractory_period_ms: float, default: 1 + Used to compute the refractory period violations aka "contamination" + sigma_smooth_ms: float, default: 0.6 + Parameters to smooth the correlogram estimation + contamination_threshold: float, default: 0.2 + Threshold for not taking in account a unit when it is too contaminated + adaptative_window_threshold:: float, default: 0.5 + Parameter to detect the window size in correlogram estimation + censor_correlograms_ms: float, default: 0.15 + The period to censor on the auto and cross-correlograms + num_channels: int, default: 5 + Number of channel to use for template similarity computation + num_shift: int, default: 5 + Number of shifts in samles to be explored for template similarity computation + firing_contamination_balance: float, default: 1.5 + Parameter to control the balance between firing rate and contamination in computing unit "quality score" + extra_outputs: bool, default: False + If True, an additional dictionary (`outs`) with processed data is returned + steps: None or list of str, default: None which steps to run (gives flexibility to running just some steps) If None all steps are done. Pontential steps: 'min_spikes', 'remove_contaminated', 'unit_positions', 'correlogram', 'template_similarity', @@ -378,10 +377,10 @@ def compute_templates_diff(sorting, templates, num_channels=5, num_shift=5, pair The sorting object templates : np.array The templates array (num_units, num_samples, num_channels) - num_channels: int, optional - Number of channel to use for template similarity computation, by default 5 - num_shift: int, optional - Number of shifts in samles to be explored for template similarity computation, by default 5 + num_channels: int, default: 5 + Number of channel to use for template similarity computation + num_shift: int, default: 5 + Number of shifts in samles to be explored for template similarity computation pair_mask: None or boolean array A bool matrix of size (num_units, num_units) to select which pair to compute. diff --git a/src/spikeinterface/curation/remove_redundant.py b/src/spikeinterface/curation/remove_redundant.py index e13f83550a..a64729189d 100644 --- a/src/spikeinterface/curation/remove_redundant.py +++ b/src/spikeinterface/curation/remove_redundant.py @@ -37,15 +37,15 @@ def remove_redundant_units( If WaveformExtractor, the spike trains can be optionally realigned using the peak shift in the template to improve the matching procedure. If BaseSorting, the spike trains are not aligned. - align : bool, optional - If True, spike trains are aligned (if a WaveformExtractor is used), by default False - delta_time : float, optional - The time in ms to consider matching spikes, by default 0.4 - agreement_threshold : float, optional - Threshold on the agreement scores to flag possible redundant/duplicate units, by default 0.2 - duplicate_threshold : float, optional + align : bool, default: False + If True, spike trains are aligned (if a WaveformExtractor is used) + delta_time : float, default: 0.4 + The time in ms to consider matching spikes + agreement_threshold : float, default: 0.2 + Threshold on the agreement scores to flag possible redundant/duplicate units + duplicate_threshold : float, default: 0.8 Final threshold on the portion of coincident events over the number of spikes above which the - unit is removed, by default 0.8 + unit is removed remove_strategy: 'minimum_shift' | 'highest_amplitude' | 'max_spikes', default: 'minimum_shift' Which strategy to remove one of the two duplicated units: @@ -56,7 +56,7 @@ def remove_redundant_units( peak_sign: 'neg' |'pos' | 'both', default: 'neg' Used when remove_strategy='highest_amplitude' - extra_outputs: bool + extra_outputs: bool, default: False If True, will return the redundant pairs. Returns @@ -147,13 +147,13 @@ def find_redundant_units(sorting, delta_time: float = 0.4, agreement_threshold=0 ---------- sorting : BaseSorting The input sorting object - delta_time : float, optional - The time in ms to consider matching spikes, by default 0.4 - agreement_threshold : float, optional - Threshold on the agreement scores to flag possible redundant/duplicate units, by default 0.2 - duplicate_threshold : float, optional + delta_time : float, default: 0.4 + The time in ms to consider matching spikes + agreement_threshold : float, default: 0.2 + Threshold on the agreement scores to flag possible redundant/duplicate units + duplicate_threshold : float, default: 0.8 Final threshold on the portion of coincident events over the number of spikes above which the - unit is flagged as duplicate/redundant, by default 0.8 + unit is flagged as duplicate/redundant Returns ------- diff --git a/src/spikeinterface/curation/sortingview_curation.py b/src/spikeinterface/curation/sortingview_curation.py index 626ea79eb9..b115bc8e0c 100644 --- a/src/spikeinterface/curation/sortingview_curation.py +++ b/src/spikeinterface/curation/sortingview_curation.py @@ -19,16 +19,16 @@ def apply_sortingview_curation( The sorting object to be curated uri_or_json : str or Path The URI curation link from SortingView or the path to the curation json file - exclude_labels : list, optional + exclude_labels : list, default: None Optional list of labels to exclude (e.g. ["reject", "noise"]). - Mutually exclusive with include_labels, by default None - include_labels : list, optional + Mutually exclusive with include_labels + include_labels : list, default: Optional list of labels to include (e.g. ["accept"]). Mutually exclusive with exclude_labels, by default None - skip_merge : bool, optional - If True, merges are not applied (only labels), by default False - verbose : bool, optional - If True, output is verbose, by default False + skip_merge : bool, default: False + If True, merges are not applied (only labels) + verbose : bool, default: False + If True, output is verbose Returns ------- diff --git a/src/spikeinterface/exporters/report.py b/src/spikeinterface/exporters/report.py index 4910c4348f..57a5ab0166 100644 --- a/src/spikeinterface/exporters/report.py +++ b/src/spikeinterface/exporters/report.py @@ -30,16 +30,16 @@ def export_report( If WaveformExtractor is provide then the compute is faster otherwise output_folder: str The output folder where the report files are saved - remove_if_exists: bool + remove_if_exists: bool, default: False If True and the output folder exists, it is removed - format: str - 'png' (default) or 'pdf' or any format handled by matplotlib - peak_sign: 'neg' or 'pos' + format: str, default: "png" + The output figure format (any format handled by matplotlib) + peak_sign: "neg" or "pos", default: "neg" used to compute amplitudes and metrics - show_figures: bool - If True, figures are shown. If False (default), figures are closed after saving. - force_computation: bool default False - Force or not some heavy computaion before exporting. + show_figures: bool, default: False + If True, figures are shown. If False, figures are closed after saving + force_computation: bool, default: False + Force or not some heavy computaion before exporting {} """ import pandas as pd diff --git a/src/spikeinterface/exporters/to_phy.py b/src/spikeinterface/exporters/to_phy.py index 0529c99d12..7347d6c0e6 100644 --- a/src/spikeinterface/exporters/to_phy.py +++ b/src/spikeinterface/exporters/to_phy.py @@ -47,23 +47,23 @@ def export_to_phy( If WaveformExtractor is provide then the compute is faster otherwise output_folder: str | Path The output folder where the phy template-gui files are saved - compute_pc_features: bool - If True (default), pc features are computed - compute_amplitudes: bool - If True (default), waveforms amplitudes are computed - sparsity: ChannelSparsity or None - The sparsity object. - copy_binary: bool + compute_pc_features: bool, default: True + If True, pc features are computed + compute_amplitudes: bool, default: True + If True, waveforms amplitudes are computed + sparsity: ChannelSparsity or None, default: None + The sparsity object + copy_binary: bool, default: True If True, the recording is copied and saved in the phy 'output_folder' - remove_if_exists: bool + remove_if_exists: bool, default: False If True and 'output_folder' exists, it is removed and overwritten - peak_sign: 'neg', 'pos', 'both' + peak_sign: "neg" | "pos" | "both", default: "neg" Used by compute_spike_amplitudes - template_mode: str + template_mode: str, default: 'median' Parameter 'mode' to be given to WaveformExtractor.get_template() - dtype: dtype or None + dtype: dtype or None, default: None Dtype to save binary data - verbose: bool + verbose: bool, default: True If True, output is verbose use_relative_path : bool, default: False If True and `copy_binary=True` saves the binary file `dat_path` in the `params.py` relative to `output_folder` (ie `dat_path=r'recording.dat'`). If `copy_binary=False`, then uses a path relative to the `output_folder` diff --git a/src/spikeinterface/extractors/cellexplorersortingextractor.py b/src/spikeinterface/extractors/cellexplorersortingextractor.py index 0980e89f1c..3436313b4d 100644 --- a/src/spikeinterface/extractors/cellexplorersortingextractor.py +++ b/src/spikeinterface/extractors/cellexplorersortingextractor.py @@ -24,9 +24,9 @@ class CellExplorerSortingExtractor(BaseSorting): ---------- file_path: str | Path Path to `.mat` file containing spikes. Usually named `session_id.spikes.cellinfo.mat` - sampling_frequency: float | None, optional + sampling_frequency: float | None, default: None The sampling frequency of the data. If None, it will be extracted from the files. - session_info_file_path: str | Path | None, optional + session_info_file_path: str | Path | None, default: None Path to the `sessionInfo.mat` file. If None, it will be inferred from the file_path. """ diff --git a/src/spikeinterface/extractors/iblstreamingrecording.py b/src/spikeinterface/extractors/iblstreamingrecording.py index fcd03f8bcf..69626f3bd9 100644 --- a/src/spikeinterface/extractors/iblstreamingrecording.py +++ b/src/spikeinterface/extractors/iblstreamingrecording.py @@ -32,7 +32,7 @@ class IblStreamingRecordingExtractor(BaseRecording): load_sync_channels : bool, default: false Load or not the last channel (sync). If not then the probe is loaded. - cache_folder : str, optional + cache_folder : str or None, default: None The location to temporarily store chunks of data during streaming. The default uses the folder designated by ONE.alyx._par.CACHE_DIR / "cache", which is typically the designated 'Downloads' folder on your operating system. As long as `remove_cached` is set to True, the only files that will diff --git a/src/spikeinterface/extractors/klustaextractors.py b/src/spikeinterface/extractors/klustaextractors.py index f6a86ae9ae..83718cffb2 100644 --- a/src/spikeinterface/extractors/klustaextractors.py +++ b/src/spikeinterface/extractors/klustaextractors.py @@ -31,7 +31,7 @@ class KlustaSortingExtractor(BaseSorting): ---------- file_or_folder_path : str or Path Path to the ALF folder. - exclude_cluster_groups: list or str, optional + exclude_cluster_groups: list or str, default: None Cluster groups to exclude (e.g. "noise" or ["noise", "mua"]). Returns diff --git a/src/spikeinterface/extractors/mdaextractors.py b/src/spikeinterface/extractors/mdaextractors.py index 1eb0182318..5f3faca477 100644 --- a/src/spikeinterface/extractors/mdaextractors.py +++ b/src/spikeinterface/extractors/mdaextractors.py @@ -89,11 +89,11 @@ def write_recording( Sampling frequency is appended to this dictionary. raw_fname: str File name of raw file. Defaults to 'raw.mda'. - params_fname: str + params_fname: str, default 'params.json' File name of params file. Defaults to 'params.json'. - geom_fname: str + geom_fname: str, default 'geom.csv' File name of geom file. Defaults to 'geom.csv'. - dtype: dtype + dtype: dtype or None, default None Data type to be used. If None dtype is same as recording traces. **job_kwargs: Use by job_tools modules to set: diff --git a/src/spikeinterface/extractors/neoextractors/alphaomega.py b/src/spikeinterface/extractors/neoextractors/alphaomega.py index a58b5ab5ec..57bc1dfbd6 100644 --- a/src/spikeinterface/extractors/neoextractors/alphaomega.py +++ b/src/spikeinterface/extractors/neoextractors/alphaomega.py @@ -15,11 +15,11 @@ class AlphaOmegaRecordingExtractor(NeoBaseRecordingExtractor): ---------- folder_path: str or Path-like The folder path to the AlphaOmega recordings. - lsx_files: list of strings or None, optional + lsx_files: list of strings or None, default: None A list of listings files that refers to mpx files to load. - stream_id: {'RAW', 'LFP', 'SPK', 'ACC', 'AI', 'UD'}, optional + stream_id: {'RAW', 'LFP', 'SPK', 'ACC', 'AI', 'UD'}, default: 'RAW' If there are several streams, specify the stream id you want to load. - stream_name: str, optional + stream_name: str, default: None If there are several streams, specify the stream name you want to load. all_annotations: bool, default: False Load exhaustively all annotations from neo. diff --git a/src/spikeinterface/extractors/neoextractors/biocam.py b/src/spikeinterface/extractors/neoextractors/biocam.py index 3e30cf77ae..cde1167835 100644 --- a/src/spikeinterface/extractors/neoextractors/biocam.py +++ b/src/spikeinterface/extractors/neoextractors/biocam.py @@ -17,15 +17,15 @@ class BiocamRecordingExtractor(NeoBaseRecordingExtractor): ---------- file_path: str The file path to load the recordings from. - mea_pitch: float, optional + mea_pitch: float, default: None The inter-electrode distance (pitch) between electrodes. - electrode_width: float, optional + electrode_width: float, default: None Width of the electrodes in um. - stream_id: str, optional + stream_id: str, default: None If there are several streams, specify the stream id you want to load. - stream_name: str, optional + stream_name: str, default: None If there are several streams, specify the stream name you want to load. - all_annotations: bool (default False) + all_annotations: bool, default: False Load exhaustively all annotations from neo. """ diff --git a/src/spikeinterface/extractors/neoextractors/blackrock.py b/src/spikeinterface/extractors/neoextractors/blackrock.py index 8300e6bc5e..474bdd21a0 100644 --- a/src/spikeinterface/extractors/neoextractors/blackrock.py +++ b/src/spikeinterface/extractors/neoextractors/blackrock.py @@ -19,9 +19,9 @@ class BlackrockRecordingExtractor(NeoBaseRecordingExtractor): ---------- file_path: str The file path to load the recordings from. - stream_id: str, optional + stream_id: str, default: None If there are several streams, specify the stream id you want to load. - stream_name: str, optional + stream_name: str, default: None If there are several streams, specify the stream name you want to load. all_annotations: bool, default: False Load exhaustively all annotations from neo. @@ -74,14 +74,14 @@ class BlackrockSortingExtractor(NeoBaseSortingExtractor): Parameters ---------- file_path: str - The file path to load the recordings from. - sampling_frequency: float, None by default. + The file path to load the recordings from + sampling_frequency: float, default: None The sampling frequency for the sorting extractor. When the signal data is available (.ncs) those files will be used to extract the frequency automatically. Otherwise, the sampling frequency needs to be specified for - this extractor to be initialized. - stream_id: str, optional + this extractor to be initialized + stream_id: str, default: None Used to extract information about the sampling frequency and t_start from the analog signal if provided. - stream_name: str, optional + stream_name: str, default: None Used to extract information about the sampling frequency and t_start from the analog signal if provided. """ diff --git a/src/spikeinterface/extractors/neoextractors/ced.py b/src/spikeinterface/extractors/neoextractors/ced.py index 2451ca8fe1..e7bc1bffb4 100644 --- a/src/spikeinterface/extractors/neoextractors/ced.py +++ b/src/spikeinterface/extractors/neoextractors/ced.py @@ -17,11 +17,11 @@ class CedRecordingExtractor(NeoBaseRecordingExtractor): ---------- file_path: str The file path to the smr or smrx file. - stream_id: str, optional + stream_id: str, default: None If there are several streams, specify the stream id you want to load. - stream_name: str, optional + stream_name: str, default: None If there are several streams, specify the stream name you want to load. - block_index: int, optional + block_index: int, default: None If there are several blocks, specify the block index you want to load. all_annotations: bool, default: False Load exhaustively all annotations from neo. diff --git a/src/spikeinterface/extractors/neoextractors/edf.py b/src/spikeinterface/extractors/neoextractors/edf.py index 5d8c56ee87..5aa51d9725 100644 --- a/src/spikeinterface/extractors/neoextractors/edf.py +++ b/src/spikeinterface/extractors/neoextractors/edf.py @@ -15,10 +15,10 @@ class EDFRecordingExtractor(NeoBaseRecordingExtractor): ---------- file_path: str The file path to load the recordings from. - stream_id: str, optional + stream_id: str, default: None If there are several streams, specify the stream id you want to load. For this neo reader streams are defined by their sampling frequency. - stream_name: str, optional + stream_name: str, default: None If there are several streams, specify the stream name you want to load. all_annotations: bool, default: False Load exhaustively all annotations from neo. diff --git a/src/spikeinterface/extractors/neoextractors/intan.py b/src/spikeinterface/extractors/neoextractors/intan.py index 2a61e7385f..3584844180 100644 --- a/src/spikeinterface/extractors/neoextractors/intan.py +++ b/src/spikeinterface/extractors/neoextractors/intan.py @@ -15,9 +15,9 @@ class IntanRecordingExtractor(NeoBaseRecordingExtractor): ---------- file_path: str The file path to load the recordings from. - stream_id: str, optional + stream_id: str, default: None If there are several streams, specify the stream id you want to load. - stream_name: str, optional + stream_name: str, default: None If there are several streams, specify the stream name you want to load. all_annotations: bool, default: False Load exhaustively all annotations from neo. diff --git a/src/spikeinterface/extractors/neoextractors/maxwell.py b/src/spikeinterface/extractors/neoextractors/maxwell.py index ac85dbdf30..ea54f9f201 100644 --- a/src/spikeinterface/extractors/neoextractors/maxwell.py +++ b/src/spikeinterface/extractors/neoextractors/maxwell.py @@ -20,15 +20,15 @@ class MaxwellRecordingExtractor(NeoBaseRecordingExtractor): ---------- file_path: str The file path to the maxwell h5 file. - stream_id: str, optional + stream_id: str, default: None If there are several streams, specify the stream id you want to load. For MaxTwo when there are several wells at the same time you need to specify stream_id='well000' or 'well0001', etc. - stream_name: str, optional + stream_name: str, default: None If there are several streams, specify the stream name you want to load. all_annotations: bool, default: False Load exhaustively all annotations from neo. - rec_name: str, optional + rec_name: str, default: None When the file contains several recordings you need to specify the one you want to extract. (rec_name='rec0000'). install_maxwell_plugin: bool, default: False diff --git a/src/spikeinterface/extractors/neoextractors/mcsraw.py b/src/spikeinterface/extractors/neoextractors/mcsraw.py index 4b6af54bcd..24eea2d058 100644 --- a/src/spikeinterface/extractors/neoextractors/mcsraw.py +++ b/src/spikeinterface/extractors/neoextractors/mcsraw.py @@ -18,11 +18,11 @@ class MCSRawRecordingExtractor(NeoBaseRecordingExtractor): ---------- file_path: str The file path to load the recordings from. - stream_id: str, optional + stream_id: str, default: None If there are several streams, specify the stream id you want to load. - stream_name: str, optional + stream_name: str, default: None If there are several streams, specify the stream name you want to load. - block_index: int, optional + block_index: int, default: None If there are several blocks, specify the block index you want to load. all_annotations: bool, default: False Load exhaustively all annotations from neo. diff --git a/src/spikeinterface/extractors/neoextractors/neobaseextractor.py b/src/spikeinterface/extractors/neoextractors/neobaseextractor.py index 5c94d99b1e..40c246bafa 100644 --- a/src/spikeinterface/extractors/neoextractors/neobaseextractor.py +++ b/src/spikeinterface/extractors/neoextractors/neobaseextractor.py @@ -167,15 +167,15 @@ def __init__( Parameters ---------- - stream_id : Optional[str], default=None + stream_id : Optional[str], default: None The ID of the stream to extract from the data. - stream_name : Optional[str], default=None + stream_name : Optional[str], default: None The name of the stream to extract from the data. - block_index : Optional[int], default=None + block_index : Optional[int], default: None The index of the block to extract from the data. - all_annotations : bool, default=False + all_annotations : bool, default: False If True, include all annotations in the extracted data. - use_names_as_ids : Optional[bool], default=None + use_names_as_ids : Optional[bool], default: None If True, use channel names as IDs. Otherwise, use default IDs. neo_kwargs : Dict[str, Any] Additional keyword arguments to pass to the NeoBaseExtractor for initialization. @@ -402,7 +402,7 @@ def _infer_sampling_frequency_from_analog_signal(self, stream_id: Optional[str] Parameters ---------- - stream_id : str, optional + stream_id : str, default: None The ID of the stream from which to infer the sampling frequency. If not provided, the function will look for a common sampling frequency across all streams. (default is None) @@ -491,7 +491,7 @@ def _infer_t_start_from_signal_stream(self, segment_index: int, stream_id: Optio ---------- segment_index : int The index of the segment in which to look for the stream. - stream_id : str, optional + stream_id : str, default: None The ID of the stream from which to infer t_start. If not provided, the function will look for streams with a matching sampling frequency. diff --git a/src/spikeinterface/extractors/neoextractors/neuralynx.py b/src/spikeinterface/extractors/neoextractors/neuralynx.py index 672602b66c..47452b8003 100644 --- a/src/spikeinterface/extractors/neoextractors/neuralynx.py +++ b/src/spikeinterface/extractors/neoextractors/neuralynx.py @@ -16,9 +16,9 @@ class NeuralynxRecordingExtractor(NeoBaseRecordingExtractor): ---------- folder_path: str The file path to load the recordings from. - stream_id: str, optional + stream_id: str, default: None If there are several streams, specify the stream id you want to load. - stream_name: str, optional + stream_name: str, default: None If there are several streams, specify the stream name you want to load. all_annotations: bool, default: False Load exhaustively all annotations from neo. @@ -54,9 +54,9 @@ class NeuralynxSortingExtractor(NeoBaseSortingExtractor): sampling_frequency: float The sampling frequency for the spiking channels. When the signal data is available (.ncs) those files will be used to extract the frequency. Otherwise, the sampling frequency needs to be specified for this extractor. - stream_id: str, optional + stream_id: str, default: None Used to extract information about the sampling frequency and t_start from the analog signal if provided. - stream_name: str, optional + stream_name: str, default: None Used to extract information about the sampling frequency and t_start from the analog signal if provided. """ diff --git a/src/spikeinterface/extractors/neoextractors/neuroexplorer.py b/src/spikeinterface/extractors/neoextractors/neuroexplorer.py index 2c8603cb9c..94c6953a3d 100644 --- a/src/spikeinterface/extractors/neoextractors/neuroexplorer.py +++ b/src/spikeinterface/extractors/neoextractors/neuroexplorer.py @@ -36,10 +36,10 @@ class NeuroExplorerRecordingExtractor(NeoBaseRecordingExtractor): ---------- file_path: str The file path to load the recordings from. - stream_id: str, optional + stream_id: str, default: None If there are several streams, specify the stream id you want to load. For this neo reader streams are defined by their sampling frequency. - stream_name: str, optional + stream_name: str, default: None If there are several streams, specify the stream name you want to load. all_annotations: bool, default: False Load exhaustively all annotations from neo. diff --git a/src/spikeinterface/extractors/neoextractors/neuroscope.py b/src/spikeinterface/extractors/neoextractors/neuroscope.py index c652ce4fb9..b2731dc5dc 100644 --- a/src/spikeinterface/extractors/neoextractors/neuroscope.py +++ b/src/spikeinterface/extractors/neoextractors/neuroscope.py @@ -25,11 +25,11 @@ class NeuroScopeRecordingExtractor(NeoBaseRecordingExtractor): ---------- file_path: str The file path to the binary container usually a .dat, .lfp, .eeg extension. - xml_file_path: str, optional + xml_file_path: str, default: None The path to the xml file. If None, the xml file is assumed to have the same name as the binary file. - stream_id: str, optional + stream_id: str, default: None If there are several streams, specify the stream id you want to load. - stream_name: str, optional + stream_name: str, default: None If there are several streams, specify the stream name you want to load. all_annotations: bool, default: False Load exhaustively all annotations from neo. @@ -97,7 +97,7 @@ class NeuroScopeSortingExtractor(BaseSorting): exclude_shanks : list Optional. List of indices to ignore. The set of all possible indices is chosen by default, extracted as the final integer of all the .res.%i and .clu.%i pairs. - xml_file_path : PathType, optional + xml_file_path : PathType, default: None Path to the .xml file referenced by this sorting. """ diff --git a/src/spikeinterface/extractors/neoextractors/nix.py b/src/spikeinterface/extractors/neoextractors/nix.py index 2762e5645b..298b8c6019 100644 --- a/src/spikeinterface/extractors/neoextractors/nix.py +++ b/src/spikeinterface/extractors/neoextractors/nix.py @@ -15,11 +15,11 @@ class NixRecordingExtractor(NeoBaseRecordingExtractor): ---------- file_path: str The file path to load the recordings from. - stream_id: str, optional + stream_id: str, default: None If there are several streams, specify the stream id you want to load. - stream_name: str, optional + stream_name: str, default: None If there are several streams, specify the stream name you want to load. - block_index: int, optional + block_index: int, default: None If there are several blocks, specify the block index you want to load. all_annotations: bool, default: False Load exhaustively all annotations from neo. diff --git a/src/spikeinterface/extractors/neoextractors/openephys.py b/src/spikeinterface/extractors/neoextractors/openephys.py index bb3ae3435a..3ffa01377a 100644 --- a/src/spikeinterface/extractors/neoextractors/openephys.py +++ b/src/spikeinterface/extractors/neoextractors/openephys.py @@ -50,11 +50,11 @@ class OpenEphysLegacyRecordingExtractor(NeoBaseRecordingExtractor): ---------- folder_path: str The folder path to load the recordings from. - stream_id: str, optional + stream_id: str, default: None If there are several streams, specify the stream id you want to load. - stream_name: str, optional + stream_name: str, default: None If there are several streams, specify the stream name you want to load. - block_index: int, optional + block_index: int, default: None If there are several blocks (experiments), specify the block index you want to load. all_annotations: bool (default False) Load exhaustively all annotation from neo. @@ -108,22 +108,22 @@ class OpenEphysBinaryRecordingExtractor(NeoBaseRecordingExtractor): ---------- folder_path: str The folder path to the root folder (containing the record node folders). - load_sync_channel : bool + load_sync_channel : bool, default: False If False (default) and a SYNC channel is present (e.g. Neuropixels), this is not loaded. If True, the SYNC channel is loaded and can be accessed in the analog signals. - load_sync_timestamps : bool + load_sync_timestamps : bool, default: False If True, the synchronized_timestamps are loaded and set as times to the recording. If False (default), only the t_start and sampling rate are set, and timestamps are assumed to be uniform and linearly increasing. - experiment_names: str, list, or None + experiment_names: str, list, or None, default: None If multiple experiments are available, this argument allows users to select one or more experiments. If None, all experiements are loaded as blocks. E.g. 'experiment_names="experiment2"', 'experiment_names=["experiment1", "experiment2"]' - stream_id: str, optional + stream_id: str, default: None If there are several streams, specify the stream id you want to load. - stream_name: str, optional + stream_name: str, default: None If there are several streams, specify the stream name you want to load. - block_index: int, optional + block_index: int, default: None If there are several blocks (experiments), specify the block index you want to load. all_annotations: bool (default False) Load exhaustively all annotation from neo. @@ -287,11 +287,11 @@ def read_openephys(folder_path, **kwargs): ---------- folder_path: str or Path Path to openephys folder - stream_id: str, optional + stream_id: str, default: None If there are several streams, specify the stream id you want to load. - stream_name: str, optional + stream_name: str, default: None If there are several streams, specify the stream name you want to load. - block_index: int, optional + block_index: int, default: None If there are several blocks (experiments), specify the block index you want to load. all_annotations: bool (default False) Load exhaustively all annotation from neo. @@ -319,7 +319,7 @@ def read_openephys_event(folder_path, block_index=None): ---------- folder_path: str or Path Path to openephys folder - block_index: int, optional + block_index: int, default: None If there are several blocks (experiments), specify the block index you want to load. Returns diff --git a/src/spikeinterface/extractors/neoextractors/plexon.py b/src/spikeinterface/extractors/neoextractors/plexon.py index c3ff59fe82..b62bd473b6 100644 --- a/src/spikeinterface/extractors/neoextractors/plexon.py +++ b/src/spikeinterface/extractors/neoextractors/plexon.py @@ -15,9 +15,9 @@ class PlexonRecordingExtractor(NeoBaseRecordingExtractor): ---------- file_path: str The file path to load the recordings from. - stream_id: str, optional + stream_id: str, default: None If there are several streams, specify the stream id you want to load. - stream_name: str, optional + stream_name: str, default: None If there are several streams, specify the stream name you want to load. all_annotations: bool, default: False Load exhaustively all annotations from neo. diff --git a/src/spikeinterface/extractors/neoextractors/plexon2.py b/src/spikeinterface/extractors/neoextractors/plexon2.py index 8dbfc67e90..d176e6546d 100644 --- a/src/spikeinterface/extractors/neoextractors/plexon2.py +++ b/src/spikeinterface/extractors/neoextractors/plexon2.py @@ -13,9 +13,9 @@ class Plexon2RecordingExtractor(NeoBaseRecordingExtractor): ---------- file_path: str The file path to load the recordings from. - stream_id: str, optional + stream_id: str, default: None If there are several streams, specify the stream id you want to load. - stream_name: str, optional + stream_name: str, default: None If there are several streams, specify the stream name you want to load. all_annotations: bool, default: False Load exhaustively all annotations from neo. diff --git a/src/spikeinterface/extractors/neoextractors/spike2.py b/src/spikeinterface/extractors/neoextractors/spike2.py index af172855ed..a600c61c11 100644 --- a/src/spikeinterface/extractors/neoextractors/spike2.py +++ b/src/spikeinterface/extractors/neoextractors/spike2.py @@ -16,9 +16,9 @@ class Spike2RecordingExtractor(NeoBaseRecordingExtractor): ---------- file_path: str The file path to load the recordings from. - stream_id: str, optional + stream_id: str, default: None If there are several streams, specify the stream id you want to load. - stream_name: str, optional + stream_name: str, default: None If there are several streams, specify the stream name you want to load. all_annotations: bool, default: False Load exhaustively all annotations from neo. diff --git a/src/spikeinterface/extractors/neoextractors/spikegadgets.py b/src/spikeinterface/extractors/neoextractors/spikegadgets.py index 49d55ca3eb..18b6496d6e 100644 --- a/src/spikeinterface/extractors/neoextractors/spikegadgets.py +++ b/src/spikeinterface/extractors/neoextractors/spikegadgets.py @@ -15,9 +15,9 @@ class SpikeGadgetsRecordingExtractor(NeoBaseRecordingExtractor): ---------- file_path: str The file path to load the recordings from. - stream_id: str, optional + stream_id: str, default: None If there are several streams, specify the stream id you want to load. - stream_name: str, optional + stream_name: str, default: None If there are several streams, specify the stream name you want to load. all_annotations: bool, default: False Load exhaustively all annotations from neo. diff --git a/src/spikeinterface/extractors/neoextractors/spikeglx.py b/src/spikeinterface/extractors/neoextractors/spikeglx.py index 8c3b33505d..c66464cf98 100644 --- a/src/spikeinterface/extractors/neoextractors/spikeglx.py +++ b/src/spikeinterface/extractors/neoextractors/spikeglx.py @@ -33,10 +33,10 @@ class SpikeGLXRecordingExtractor(NeoBaseRecordingExtractor): load_sync_channel: bool default False Whether or not to load the last channel in the stream, which is typically used for synchronization. If True, then the probe is not loaded. - stream_id: str, optional + stream_id: str, default: None If there are several streams, specify the stream id you want to load. For example, 'imec0.ap' 'nidq' or 'imec0.lf'. - stream_name: str, optional + stream_name: str, default: None If there are several streams, specify the stream name you want to load. all_annotations: bool, default: False Load exhaustively all annotations from neo. diff --git a/src/spikeinterface/extractors/neoextractors/tdt.py b/src/spikeinterface/extractors/neoextractors/tdt.py index 60cd39c010..a95154a37d 100644 --- a/src/spikeinterface/extractors/neoextractors/tdt.py +++ b/src/spikeinterface/extractors/neoextractors/tdt.py @@ -15,9 +15,9 @@ class TdtRecordingExtractor(NeoBaseRecordingExtractor): ---------- folder_path: str The folder path to the tdt folder. - stream_id: str, optional + stream_id: str, default: None If there are several streams, specify the stream id you want to load. - stream_name: str, optional + stream_name: str, default: None If there are several streams, specify the stream name you want to load. all_annotations: bool, default: False Load exhaustively all annotations from neo. diff --git a/src/spikeinterface/extractors/nwbextractors.py b/src/spikeinterface/extractors/nwbextractors.py index bca4c75d99..ceb99909a6 100644 --- a/src/spikeinterface/extractors/nwbextractors.py +++ b/src/spikeinterface/extractors/nwbextractors.py @@ -25,7 +25,7 @@ def retrieve_electrical_series(nwbfile: NWBFile, electrical_series_name: Optiona ---------- nwbfile : NWBFile The NWBFile object from which to extract the ElectricalSeries. - electrical_series_name : str, optional + electrical_series_name : str, default: None The name of the ElectricalSeries to extract. If not specified, it will return the first found ElectricalSeries if there's only one; otherwise, it raises an error. @@ -80,9 +80,9 @@ def read_nwbfile( ---------- file_path : Path, str The path to the NWB file. - stream_mode : "fsspec" or "ros3", optional + stream_mode : "fsspec" or "ros3" or None, default: None The streaming mode to use. Default assumes the file is on the local disk. - stream_cache_path : str, optional + stream_cache_path : str or None, default: None The path to the cache storage. Default is None. Returns @@ -144,16 +144,16 @@ class NwbRecordingExtractor(BaseRecording): ---------- file_path: str or Path Path to NWB file or s3 url. - electrical_series_name: str, optional + electrical_series_name: str or None, default: None The name of the ElectricalSeries. Used if multiple ElectricalSeries are present. load_time_vector: bool, default: False If True, the time vector is loaded to the recording object. samples_for_rate_estimation: int, default: 100000 The number of timestamp samples to use to estimate the rate. Used if 'rate' is not specified in the ElectricalSeries. - stream_mode: str, optional + stream_mode: str or None, default: None Specify the stream mode: "fsspec" or "ros3". - stream_cache_path: str or Path, optional + stream_cache_path: str or Path or None, default: None Local path for caching. Default: cwd/cache. Returns @@ -424,16 +424,16 @@ class NwbSortingExtractor(BaseSorting): ---------- file_path: str or Path Path to NWB file. - electrical_series_name: str, optional + electrical_series_name: str or None, default: None The name of the ElectricalSeries (if multiple ElectricalSeries are present). - sampling_frequency: float, optional + sampling_frequency: float or None, default: None The sampling frequency in Hz (required if no ElectricalSeries is available). samples_for_rate_estimation: int, default: 100000 The number of timestamp samples to use to estimate the rate. Used if 'rate' is not specified in the ElectricalSeries. - stream_mode: str, optional + stream_mode: str or None, default: None Specify the stream mode: "fsspec" or "ros3". - stream_cache_path: str or Path, optional + stream_cache_path: str or Path or None, default: None Local path for caching. Default: cwd/cache. Returns @@ -590,7 +590,7 @@ def read_nwb(file_path, load_recording=True, load_sorting=False, electrical_seri If True, the recording object is loaded. load_sorting : bool, default: False If True, the recording object is loaded. - electrical_series_name: str, optional + electrical_series_name: str or None, default: None The name of the ElectricalSeries (if multiple ElectricalSeries are present) Returns diff --git a/src/spikeinterface/extractors/toy_example.py b/src/spikeinterface/extractors/toy_example.py index 2a97dfdb17..d281862789 100644 --- a/src/spikeinterface/extractors/toy_example.py +++ b/src/spikeinterface/extractors/toy_example.py @@ -41,25 +41,25 @@ def toy_example( Parameters ---------- - duration: float (or list if multi segment) - Duration in seconds (default 10). - num_channels: int - Number of channels (default 4). - num_units: int - Number of units (default 10). - sampling_frequency: float - Sampling frequency (default 30000). - num_segments: int - Number of segments (default 2). - spike_times: ndarray (or list of multi segment) - Spike time in the recording. - spike_labels: ndarray (or list of multi segment) + duration: float or list[float], default: 10 + Duration in seconds. If a list is provided, it will be the duration of each segment. + num_channels: int, default: 4 + Number of channels + num_units: int, default: 10 + Number of units + sampling_frequency: float, default: 30000 + Sampling frequency + num_segments: int, default: 2 + Number of segments. + spike_times: np.array or list[nparray] or None, default: None + Spike time in the recording + spike_labels: np.array or list[nparray] or None, default: None Cluster label for each spike time (needs to specified both together). # score_detection: int (between 0 and 1) - # Generate the sorting based on a subset of spikes compare with the trace generation. - firing_rate: float - The firing rate for the units (in Hz). - seed: int + # Generate the sorting based on a subset of spikes compare with the trace generation + firing_rate: float, default: 3.0 + The firing rate for the units (in Hz) + seed: int or None, default: None Seed for random initialization. Returns diff --git a/src/spikeinterface/extractors/tridesclousextractors.py b/src/spikeinterface/extractors/tridesclousextractors.py index 8b0ce37e7a..6bf248c62a 100644 --- a/src/spikeinterface/extractors/tridesclousextractors.py +++ b/src/spikeinterface/extractors/tridesclousextractors.py @@ -11,7 +11,7 @@ class TridesclousSortingExtractor(BaseSorting): ---------- folder_path : str or Path Path to the Tridesclous folder. - chan_grp : list, optional + chan_grp : list or None, default: None The channel group(s) to load. Returns diff --git a/src/spikeinterface/extractors/waveclussnippetstextractors.py b/src/spikeinterface/extractors/waveclussnippetstextractors.py index 2e4c28b12e..d4bdc1aede 100644 --- a/src/spikeinterface/extractors/waveclussnippetstextractors.py +++ b/src/spikeinterface/extractors/waveclussnippetstextractors.py @@ -97,12 +97,10 @@ def get_snippets( Parameters ---------- - indexes: (Union[int, None], optional) - start sample index, or zero if None. Defaults to None. - end_frame: (Union[int, None], optional) - end_sample, or number of samples if None. Defaults to None. - channel_indices: (Union[List, None], optional) - Indices of channels to return, or all channels if None. Defaults to None. + indices: list[int] + Indices of the snippets to return + channel_indices: Union[list, None], default: None + Indices of channels to return, or all channels if None Returns ------- @@ -122,9 +120,9 @@ def frames_to_indices(self, start_frame: Union[int, None] = None, end_frame: Uni Parameters ---------- - start_frame: (Union[int, None], optional) + start_frame: Union[int, None], default: None start sample index, or zero if None. Defaults to None. - end_frame: (Union[int, None], optional) + end_frame: Union[int, None], default: None end_sample, or number of samples if None. Defaults to None. Returns diff --git a/src/spikeinterface/postprocessing/amplitude_scalings.py b/src/spikeinterface/postprocessing/amplitude_scalings.py index 7e6c95a875..e3edb9712f 100644 --- a/src/spikeinterface/postprocessing/amplitude_scalings.py +++ b/src/spikeinterface/postprocessing/amplitude_scalings.py @@ -153,8 +153,9 @@ def get_data(self, outputs="concatenated"): Get computed spike amplitudes. Parameters ---------- - outputs : str, optional - 'concatenated' or 'by_unit', by default 'concatenated' + outputs : 'concatenated' | 'by_unit', default: "concatenated" + The output format + Returns ------- spike_amplitudes : np.array or dict @@ -206,16 +207,16 @@ def compute_amplitude_scalings( ---------- waveform_extractor: WaveformExtractor The waveform extractor object - sparsity: ChannelSparsity, default: None + sparsity: ChannelSparsity or None, default: None If waveforms are not sparse, sparsity is required if the number of channels is greater than `max_dense_channels`. If the waveform extractor is sparse, its sparsity is automatically used. max_dense_channels: int, default: 16 Maximum number of channels to allow running without sparsity. To compute amplitude scaling using dense waveforms, set this to None, sparsity to None, and pass dense waveforms as input. - ms_before : float, default: None + ms_before : float or None, default: None The cut out to apply before the spike peak to extract local waveforms. If None, the WaveformExtractor ms_before is used. - ms_after : float, default: None + ms_after : float or None, default: None The cut out to apply after the spike peak to extract local waveforms. If None, the WaveformExtractor ms_after is used. handle_collisions: bool, default: True @@ -591,9 +592,9 @@ def fit_collision( # ---------- # we : WaveformExtractor # The WaveformExtractor object. -# sparsity : ChannelSparsity, default=None +# sparsity : ChannelSparsity, default: None # The ChannelSparsity. If None, only main channels are plotted. -# num_collisions : int, default=None +# num_collisions : int, default: None # Number of collisions to plot. If None, all collisions are plotted. # """ # assert we.is_extension("amplitude_scalings"), "Could not find amplitude scalings extension!" diff --git a/src/spikeinterface/postprocessing/correlograms.py b/src/spikeinterface/postprocessing/correlograms.py index 6e693635eb..5d0e1e17a1 100644 --- a/src/spikeinterface/postprocessing/correlograms.py +++ b/src/spikeinterface/postprocessing/correlograms.py @@ -146,15 +146,15 @@ def compute_correlograms( Parameters ---------- waveform_or_sorting_extractor : WaveformExtractor or BaseSorting - If WaveformExtractor, the correlograms are saved as WaveformExtensions. + If WaveformExtractor, the correlograms are saved as WaveformExtensions load_if_exists : bool, default: False - Whether to load precomputed crosscorrelograms, if they already exist. - window_ms : float, optional - The window in ms, by default 100.0. - bin_ms : float, optional - The bin size in ms, by default 5.0. - method : str, optional - "auto" | "numpy" | "numba". If _auto" and numba is installed, numba is used, by default "auto" + Whether to load precomputed crosscorrelograms, if they already exist + window_ms : float, default: 100.0 + The window in ms + bin_ms : float, default: 5 + The bin size in ms + method : str, default: "auto" + "auto" | "numpy" | "numba". If _auto" and numba is installed, numba is used Returns ------- diff --git a/src/spikeinterface/postprocessing/isi.py b/src/spikeinterface/postprocessing/isi.py index e98e64f753..96bf78b9b4 100644 --- a/src/spikeinterface/postprocessing/isi.py +++ b/src/spikeinterface/postprocessing/isi.py @@ -77,15 +77,15 @@ def compute_isi_histograms( Parameters ---------- waveform_or_sorting_extractor : WaveformExtractor or BaseSorting - If WaveformExtractor, the ISI histograms are saved as WaveformExtensions. + If WaveformExtractor, the ISI histograms are saved as WaveformExtensions load_if_exists : bool, default: False - Whether to load precomputed crosscorrelograms, if they already exist. - window_ms : float, optional - The window in ms, by default 50.0. - bin_ms : float, optional - The bin size in ms, by default 1.0. - method : str, optional - "auto" | "numpy" | "numba". If "auto" and numba is installed, numba is used, by default "auto" + Whether to load precomputed crosscorrelograms, if they already exist + window_ms : float, default: 50 + The window in ms + bin_ms : float, default: 1 + The bin size in ms + method : str, default: "auto" + "auto" | "numpy" | "numba". If "auto" and numba is installed, numba is used Returns ------- diff --git a/src/spikeinterface/postprocessing/noise_level.py b/src/spikeinterface/postprocessing/noise_level.py index 8b5c04dab1..a428945769 100644 --- a/src/spikeinterface/postprocessing/noise_level.py +++ b/src/spikeinterface/postprocessing/noise_level.py @@ -56,13 +56,11 @@ def compute_noise_levels(waveform_extractor, load_if_exists=False, **params): Parameters ---------- waveform_extractor: WaveformExtractor - A waveform extractor object. - num_chunks_per_segment: int (deulf 20) - Number of chunks to estimate the noise - chunk_size: int (default 10000) - Size of chunks in sample - seed: int (default None) - Eventualy a seed for reproducibility. + A waveform extractor object + load_if_exists: bool, default False + If True, the noise levels are loaded if they already exist + **params: dict with additional parameters + Returns ------- diff --git a/src/spikeinterface/postprocessing/principal_component.py b/src/spikeinterface/postprocessing/principal_component.py index 8383dcbb43..94cab7c8c5 100644 --- a/src/spikeinterface/postprocessing/principal_component.py +++ b/src/spikeinterface/postprocessing/principal_component.py @@ -122,9 +122,9 @@ def get_all_projections(self, channel_ids=None, unit_ids=None, outputs="id"): Parameters ---------- - channel_ids : list, optional + channel_ids : list, default: None List of channel ids on which projections are computed - unit_ids : list, optional + unit_ids : list, default: None List of unit ids to return projections for outputs: str * 'id': 'all_labels' contain unit ids diff --git a/src/spikeinterface/postprocessing/spike_amplitudes.py b/src/spikeinterface/postprocessing/spike_amplitudes.py index ccd2121174..d21d3668bb 100644 --- a/src/spikeinterface/postprocessing/spike_amplitudes.py +++ b/src/spikeinterface/postprocessing/spike_amplitudes.py @@ -93,8 +93,8 @@ def get_data(self, outputs="concatenated"): Parameters ---------- - outputs : str, optional - 'concatenated' or 'by_unit', by default 'concatenated' + outputs : 'concatenated' | 'by_unit', default: "concatenated" + The output format Returns ------- diff --git a/src/spikeinterface/postprocessing/spike_locations.py b/src/spikeinterface/postprocessing/spike_locations.py index 28eed131cd..ac00e89a01 100644 --- a/src/spikeinterface/postprocessing/spike_locations.py +++ b/src/spikeinterface/postprocessing/spike_locations.py @@ -82,8 +82,8 @@ def get_data(self, outputs="concatenated"): Parameters ---------- - outputs : str, optional - 'concatenated' or 'by_unit', by default 'concatenated' + outputs : 'concatenated' | 'by_unit', default: "concatenated" + The output format Returns ------- @@ -140,30 +140,30 @@ def compute_spike_locations( Parameters ---------- waveform_extractor : WaveformExtractor - A waveform extractor object. + A waveform extractor object load_if_exists : bool, default: False - Whether to load precomputed spike locations, if they already exist. - ms_before : float - The left window, before a peak, in milliseconds. - ms_after : float - The right window, after a peak, in milliseconds. + Whether to load precomputed spike locations, if they already exist + ms_before : float, default: 0.5 + The left window, before a peak, in milliseconds + ms_after : float, default: 0.5 + The right window, after a peak, in milliseconds spike_retriver_kwargs: dict - A dictionary to control the behavior for getting the maximum channel for each spike. + A dictionary to control the behavior for getting the maximum channel for each spike This dictionary contains: * channel_from_template: bool, default True - For each spike is the maximum channel computed from template or re estimated at every spikes. + For each spike is the maximum channel computed from template or re estimated at every spikes channel_from_template = True is old behavior but less acurate channel_from_template = False is slower but more accurate * radius_um: float, default 50 - In case channel_from_template=False, this is the radius to get the true peak. + In case channel_from_template=False, this is the radius to get the true peak * peak_sign="neg" In case channel_from_template=False, this is the peak sign. - method : str - 'center_of_mass' / 'monopolar_triangulation' / 'grid_convolution' - method_kwargs : dict + method : 'center_of_mass' | 'monopolar_triangulation' | 'grid_convolution', default: 'center_of_mass' + The localization method to use + method_kwargs : dict, default: {} Other kwargs depending on the method. - outputs : str - 'concatenated' (default) / 'by_unit' + outputs : 'concatenated' | 'by_unit', default: 'concatenated' + The output format {} Returns diff --git a/src/spikeinterface/postprocessing/template_metrics.py b/src/spikeinterface/postprocessing/template_metrics.py index a359e2a814..fa911a7f09 100644 --- a/src/spikeinterface/postprocessing/template_metrics.py +++ b/src/spikeinterface/postprocessing/template_metrics.py @@ -241,12 +241,12 @@ def compute_template_metrics( Parameters ---------- - waveform_extractor : WaveformExtractor, optional + waveform_extractor : WaveformExtractor The waveform extractor used to compute template metrics load_if_exists : bool, default: False Whether to load precomputed template metrics, if they already exist. - metric_names : list, optional - List of metrics to compute (see si.postprocessing.get_template_metric_names()), by default None + metric_names : list or None, default: None + List of metrics to compute (see si.postprocessing.get_template_metric_names()) peak_sign : {"neg", "pos"}, default: "neg" Whether to use the positive ("pos") or negative ("neg") peaks to estimate extremum channels. upsampling_factor : int, default: 10 diff --git a/src/spikeinterface/postprocessing/template_similarity.py b/src/spikeinterface/postprocessing/template_similarity.py index e026604b68..9b729b3176 100644 --- a/src/spikeinterface/postprocessing/template_similarity.py +++ b/src/spikeinterface/postprocessing/template_similarity.py @@ -92,7 +92,7 @@ def compute_template_similarity( Whether to load precomputed similarity, if is already exists. method: str Method name ('cosine_similarity') - waveform_extractor_other: WaveformExtractor, optional + waveform_extractor_other: WaveformExtractor, default: None A second waveform extractor object Returns diff --git a/src/spikeinterface/postprocessing/unit_localization.py b/src/spikeinterface/postprocessing/unit_localization.py index 48ceb34a4e..dee7c1b872 100644 --- a/src/spikeinterface/postprocessing/unit_localization.py +++ b/src/spikeinterface/postprocessing/unit_localization.py @@ -69,8 +69,8 @@ def get_data(self, outputs="numpy"): Parameters ---------- - outputs : str, optional - 'numpy' or 'by_unit', by default 'numpy' + outputs : 'numpy' | 'by_unit', default: "numpy" + The output format Returns ------- @@ -104,15 +104,15 @@ def compute_unit_locations( Parameters ---------- waveform_extractor: WaveformExtractor - A waveform extractor object. + A waveform extractor object load_if_exists : bool, default: False - Whether to load precomputed unit locations, if they already exist. - method: str - 'center_of_mass' / 'monopolar_triangulation' / 'grid_convolution' - outputs: str - 'numpy' (default) / 'by_unit' + Whether to load precomputed unit locations, if they already exist + method: 'center_of_mass' | 'monopolar_triangulation' | 'grid_convolution', default: 'center_of_mass' + The method to use for localization + outputs: 'numpy' | 'by_unit', default: 'numpy' + The output format method_kwargs: - Other kwargs depending on the method. + Other kwargs depending on the method Returns ------- @@ -247,17 +247,17 @@ def compute_monopolar_triangulation( ---------- waveform_extractor:WaveformExtractor A waveform extractor object - method: str ('least_square', 'minimize_with_log_penality') - 2 variants of the method - radius_um: float + method: 'least_square' | 'minimize_with_log_penality', default: 'least_square' + The optimizer to use + radius_um: float, default: 75 For channel sparsity - max_distance_um: float + max_distance_um: float, default: 1000 to make bounddary in x, y, z and also for alpha - return_alpha: bool default False + return_alpha: bool, default: False Return or not the alpha value - enforce_decrease : bool (default False) + enforce_decrease : bool, default: False Enforce spatial decreasingness for PTP vectors - feature: string in ['ptp', 'energy', 'peak_voltage'] + feature: 'ptp', 'energy', 'peak_voltage', default: 'ptp' The available features to consider for estimating the position via monopolar triangulation are peak-to-peak amplitudes ('ptp', default), energy ('energy', as L2 norm) or voltages at the center of the waveform @@ -387,24 +387,24 @@ def compute_grid_convolution( ---------- waveform_extractor: WaveformExtractor The waveform extractor - peak_sign: str + peak_sign: str, default: 'neg' Sign of the template to compute best channels ('neg', 'pos', 'both') - radius_um: float + radius_um: float, default: 40.0 Radius to consider for the fake templates - upsampling_um: float + upsampling_um: float, default: 5 Upsampling resolution for the grid of templates - sigma_um: np.array + sigma_um: np.array, default: np.linspace(5.0, 25.0, 5) Spatial decays of the fake templates - sigma_ms: float + sigma_ms: float, default: 0.25 The temporal decay of the fake templates - margin_um: float + margin_um: float, default: 50 The margin for the grid of fake templates - prototype: np.array + prototype: np.array or None, default: None Fake waveforms for the templates. If None, generated as Gaussian - percentile: float (default 10) + percentile: float, default: 10 The percentage in [0, 100] of the best scalar products kept to estimate the position - sparsity_threshold: float (default 0.01) + sparsity_threshold: float, default: 0.01 The sparsity threshold (in 0-1) below which weights should be considered as 0. Returns ------- diff --git a/src/spikeinterface/preprocessing/average_across_direction.py b/src/spikeinterface/preprocessing/average_across_direction.py index 3f056dfada..b8ce2c9a78 100644 --- a/src/spikeinterface/preprocessing/average_across_direction.py +++ b/src/spikeinterface/preprocessing/average_across_direction.py @@ -24,12 +24,12 @@ def __init__( ---------- parent_recording : BaseRecording recording to zero-pad - direction : str + direction : str, default 'y' Channels living at unique positions along this direction will be averaged. - dtype : optional numpy dtype - If unset, parent dtype is preserved, but the average will - lose accuracy, so float32 by default. + dtype : numpy dtype, default float32 + If None, parent dtype is preserved, but the average will + lose accuracy """ parent_channel_locations = parent_recording.get_channel_locations() dim = ["x", "y", "z"].index(direction) diff --git a/src/spikeinterface/preprocessing/clip.py b/src/spikeinterface/preprocessing/clip.py index cc18d51d2e..32502bc48f 100644 --- a/src/spikeinterface/preprocessing/clip.py +++ b/src/spikeinterface/preprocessing/clip.py @@ -15,10 +15,10 @@ class ClipRecording(BasePreprocessor): ---------- recording: RecordingExtractor The recording extractor to be transformed - a_min: float or `None` (default `None`) + a_min: float or None, default: None Minimum value. If `None`, clipping is not performed on lower interval edge. - a_max: float or `None` (default `None`) + a_max: float or None, default: None Maximum value. If `None`, clipping is not performed on upper interval edge. @@ -59,22 +59,22 @@ class BlankSaturationRecording(BasePreprocessor): The recording extractor to be transformed Minimum value. If `None`, clipping is not performed on lower interval edge. - abs_threshold: float or None + abs_threshold: float or None, default: None The absolute value for considering that the signal is saturating - quantile_threshold: float or None + quantile_threshold: float or None, default: None Tha value in [0, 1] used if abs_threshold is None to automatically set the abs_threshold given the data. Must be provided if abs_threshold is None - direction: string in ['upper', 'lower', 'both'] + direction: 'upper' | 'lower' | 'both', default: 'upper' Only values higher than the detection threshold are set to fill_value ('higher'), or only values lower than the detection threshold ('lower'), or both ('both') - fill_value: float or None + fill_value: float or None, default: None The value to write instead of the saturating signal. If None, then the value is automatically computed as the median signal value - num_chunks_per_segment: int (default 50) + num_chunks_per_segment: int, default: 50 The number of chunks per segments to consider to estimate the threshold/fill_values - chunk_size: int (default 500) + chunk_size: int, default: 500 The chunk size to estimate the threshold/fill_values - seed: int (default 0) + seed: int, default: 0 The seed to select the random chunks Returns diff --git a/src/spikeinterface/preprocessing/correct_lsb.py b/src/spikeinterface/preprocessing/correct_lsb.py index fe2e5f00cb..bd1fc39230 100644 --- a/src/spikeinterface/preprocessing/correct_lsb.py +++ b/src/spikeinterface/preprocessing/correct_lsb.py @@ -14,14 +14,14 @@ def correct_lsb(recording, num_chunks_per_segment=20, chunk_size=10000, seed=Non ---------- recording : RecordingExtractor The recording extractor to be LSB-corrected. - num_chunks_per_segment: int - Number of chunks per segment for random chunk, by default 20 - chunk_size : int - Size of a chunk in number for random chunk, by default 10000 - seed : int - Random seed for random chunk, by default None - verbose : bool - If True, estimate LSB value is printed, by default False + num_chunks_per_segment: int, default: 20 + Number of chunks per segment for random chunk + chunk_size : int, default: 10000 + Size of a chunk in number for random chunk + seed : int or None, default: None + Random seed for random chunk + verbose : bool, default: False + If True, estimate LSB value is printed Returns ------- diff --git a/src/spikeinterface/preprocessing/depth_order.py b/src/spikeinterface/preprocessing/depth_order.py index 55e34ba5dd..6702243703 100644 --- a/src/spikeinterface/preprocessing/depth_order.py +++ b/src/spikeinterface/preprocessing/depth_order.py @@ -14,10 +14,10 @@ class DepthOrderRecording(ChannelSliceRecording): The recording to re-order. channel_ids : list/array or None If given, a subset of channels to order locations for - dimensions : str, tuple, list + dimensions : str, tuple, list, default: ('x', 'y') If str, it needs to be 'x', 'y', 'z'. If tuple or list, it sorts the locations in two dimensions using lexsort. - This approach is recommended since there is less ambiguity, by default ('x', 'y') + This approach is recommended since there is less ambiguity flip: bool, default: False If flip is False then the order is bottom first (starting from tip of the probe). If flip is True then the order is upper first. diff --git a/src/spikeinterface/preprocessing/detect_bad_channels.py b/src/spikeinterface/preprocessing/detect_bad_channels.py index e6e2836a35..046a748bea 100644 --- a/src/spikeinterface/preprocessing/detect_bad_channels.py +++ b/src/spikeinterface/preprocessing/detect_bad_channels.py @@ -50,49 +50,47 @@ def detect_bad_channels( ---------- recording : BaseRecording The recording for which bad channels are detected - method : str + method : str, default: 'coherence+psd' The method to be used: - * coeherence+psd (default, developed by IBL) + * coeherence+psd (developed by IBL) * mad * std - std_mad_threshold (mstd) : float - (method std, mad) + std_mad_threshold : float, default: 5 The standard deviation/mad multiplier threshold - psd_hf_threshold (coeherence+psd) : float + psd_hf_threshold (coeherence+psd) : float, default: 0.02 An absolute threshold (uV^2/Hz) used as a cutoff for noise channels. Channels with average power at >80% Nyquist larger than this threshold - will be labeled as noise, by default 0.02 - dead_channel_threshold (coeherence+psd) : float, optional - Threshold for channel coherence below which channels are labeled as dead, by default -0.5 - noisy_channel_threshold (coeherence+psd) : float - Threshold for channel coherence above which channels are labeled as noisy (together with psd condition), - by default 1 - outside_channel_threshold (coeherence+psd) : float + will be labeled as noise + dead_channel_threshold (coeherence+psd) : float, default: -0.5 + Threshold for channel coherence below which channels are labeled as dead + noisy_channel_threshold (coeherence+psd) : float, default: 1 + Threshold for channel coherence above which channels are labeled as noisy (together with psd condition) + outside_channel_threshold (coeherence+psd) : float, default: -0.75 Threshold for channel coherence above which channels at the edge of the recording are marked as outside - of the brain, by default -0.75 - n_neighbors (coeherence+psd) : int - Number of channel neighbors to compute median filter (needs to be odd), by default 11 - nyquist_threshold (coeherence+psd) : float + of the brain + n_neighbors (coeherence+psd) : int, default: 11 + Number of channel neighbors to compute median filter (needs to be odd) + nyquist_threshold (coeherence+psd) : float, default: 0.8 Frequency with respect to Nyquist (Fn=1) above which the mean of the PSD is calculated and compared - with psd_hf_threshold, by default 0.8 - direction (coeherence+psd): str - 'x', 'y', 'z', the depth dimension, by default 'y' - highpass_filter_cutoff : float - If the recording is not filtered, the cutoff frequency of the highpass filter, by default 300 - chunk_duration_s : float - Duration of each chunk, by default 0.5 - num_random_chunks : int - Number of random chunks, by default 100 + with psd_hf_threshold + direction (coeherence+psd): str, default: 'y' + 'x', 'y', 'z', the depth dimension + highpass_filter_cutoff : float, default: 300 + If the recording is not filtered, the cutoff frequency of the highpass filter + chunk_duration_s : float, default: 0.5 + Duration of each chunk + num_random_chunks : int, default: 100 + Number of random chunks Having many chunks is important for reproducibility. - welch_window_ms : float - Window size for the scipy.signal.welch that will be converted to nperseg, by default 10ms - neighborhood_r2_threshold : float, default 0.95 + welch_window_ms : float, default: 10 + Window size for the scipy.signal.welch that will be converted to nperseg + neighborhood_r2_threshold : float, default: 0.95 R^2 threshold for the neighborhood_r2 method. - neighborhood_r2_radius_um : float, default 30 + neighborhood_r2_radius_um : float, default: 30 Spatial radius below which two channels are considered neighbors in the neighborhood_r2 method. - seed : int or None - The random seed to extract chunks, by default None + seed : int or None, default: None + The random seed to extract chunks Returns ------- @@ -294,19 +292,19 @@ def detect_bad_channels_ibl( psd_hf_threshold : float Threshold for high frequency PSD. If mean PSD above `nyquist_threshold` * fn is greater than this value, channels are flagged as noisy (together with channel coherence condition). - dead_channel_thr : float, optional - Threshold for channel coherence below which channels are labeled as dead, by default -0.5 - noisy_channel_thr : float - Threshold for channel coherence above which channels are labeled as noisy (together with psd condition), - by default -0.5 - outside_channel_thr : float + dead_channel_thr : float, default: -0.5 + Threshold for channel coherence below which channels are labeled as dead + noisy_channel_thr : float, default: 1 + Threshold for channel coherence above which channels are labeled as noisy (together with psd condition) + outside_channel_thr : float, default: -0.75 Threshold for channel coherence above which channels - n_neighbors : int, optional - Number of neighbors to compute median fitler, by default 11 - nyquist_threshold : float, optional - Threshold on Nyquist frequency to calculate HF noise band, by default 0.8 - welch_window_ms: float - Window size for the scipy.signal.welch that will be converted to nperseg, by default 10ms + n_neighbors : int, default: 11 + Number of neighbors to compute median fitler + nyquist_threshold : float, default: 0.8 + Threshold on Nyquist frequency to calculate HF noise band + welch_window_ms: float, default: 0.3 + Window size for the scipy.signal.welch that will be converted to nperseg + Returns ------- 1d array diff --git a/src/spikeinterface/preprocessing/directional_derivative.py b/src/spikeinterface/preprocessing/directional_derivative.py index d74b2a71ef..e1baaaf614 100644 --- a/src/spikeinterface/preprocessing/directional_derivative.py +++ b/src/spikeinterface/preprocessing/directional_derivative.py @@ -30,15 +30,15 @@ def __init__( ---------- recording : BaseRecording recording to zero-pad - direction : str + direction : str, default 'y' Gradients will be taken along this dimension. - order : int + order : int, default 1 np.gradient will be applied this many times. - edge_order : int + edge_order : int, default 1 Order of gradient accuracy at edges; see np.gradient for details. - dtype : optional numpy dtype - If unset, parent dtype is preserved, but the derivative can - overflow or lose accuracy, so "float32" by default. + dtype : numpy dtype, default "float32" + If None, parent dtype is preserved, but the derivative can + overflow or lose accuracy """ parent_channel_locations = recording.get_channel_locations() dim = ["x", "y", "z"].index(direction) diff --git a/src/spikeinterface/preprocessing/filter.py b/src/spikeinterface/preprocessing/filter.py index b31088edf7..f504c4b2c3 100644 --- a/src/spikeinterface/preprocessing/filter.py +++ b/src/spikeinterface/preprocessing/filter.py @@ -30,20 +30,20 @@ class FilterRecording(BasePreprocessor): ---------- recording: Recording The recording extractor to be re-referenced - band: float or list + band: float or list, default: [300.0, 6000.0] If float, cutoff frequency in Hz for 'highpass' filter type If list. band (low, high) in Hz for 'bandpass' filter type - btype: str + btype: str, default: 'bandpass' Type of the filter ('bandpass', 'highpass') - margin_ms: float + margin_ms: float, default: 5.0 Margin in ms on border to avoid border effect - filter_mode: str 'sos' or 'ba' + filter_mode: 'sos' | 'ba', default 'sos' Filter form of the filter coefficients: - - second-order sections (default): 'sos' - - numerator/denominator: 'ba' - coef: ndarray or None + - second-order sections ('sos') + - numerator/denominator: ('ba') + coef: array or None, default: None Filter coefficients in the filter_mode form. - dtype: dtype or None + dtype: dtype or None, default: None The dtype of the returned traces. If None, the dtype of the parent recording is used {} diff --git a/src/spikeinterface/preprocessing/highpass_spatial_filter.py b/src/spikeinterface/preprocessing/highpass_spatial_filter.py index 4df4a409bc..77260bd820 100644 --- a/src/spikeinterface/preprocessing/highpass_spatial_filter.py +++ b/src/spikeinterface/preprocessing/highpass_spatial_filter.py @@ -26,25 +26,25 @@ class HighpassSpatialFilterRecording(BasePreprocessor): ---------- recording : BaseRecording The parent recording - n_channel_pad : int + n_channel_pad : int, default: 60 Number of channels to pad prior to filtering. Channels are padded with mirroring. - If None, no padding is applied, by default 60 - n_channel_taper : int + If None, no padding is applied + n_channel_taper : int, default: 0 Number of channels to perform cosine tapering on prior to filtering. If None and n_channel_pad is set, n_channel_taper will be set to the number of padded channels. - Otherwise, the passed value will be used, by default None - direction : str - The direction in which the spatial filter is applied, by default "y" - apply_agc : bool - It True, Automatic Gain Control is applied, by default True - agc_window_length_s : float - Window in seconds to compute Hanning window for AGC, by default 0.01 - highpass_butter_order : int - Order of spatial butterworth filter, by default 3 - highpass_butter_wn : float - Critical frequency (with respect to Nyquist) of spatial butterworth filter, by default 0.01 + Otherwise, the passed value will be used + direction : str, default: 'y' + The direction in which the spatial filter is applied + apply_agc : bool, default: True + It True, Automatic Gain Control is applied + agc_window_length_s : float, default: 0.1 + Window in seconds to compute Hanning window for AGC + highpass_butter_order : int, default: 3 + Order of spatial butterworth filter + highpass_butter_wn : float, default: 0.01 + Critical frequency (with respect to Nyquist) of spatial butterworth filter Returns ------- diff --git a/src/spikeinterface/preprocessing/interpolate_bad_channels.py b/src/spikeinterface/preprocessing/interpolate_bad_channels.py index 95ecd0fe52..10c5a55265 100644 --- a/src/spikeinterface/preprocessing/interpolate_bad_channels.py +++ b/src/spikeinterface/preprocessing/interpolate_bad_channels.py @@ -23,15 +23,15 @@ class InterpolateBadChannelsRecording(BasePreprocessor): The parent recording bad_channel_ids : list or 1d np.array Channel ids of the bad channels to interpolate. - sigma_um : float + sigma_um : float or None, default: None Distance between sequential channels in um. If None, will use - the most common distance between y-axis channels, by default None - p : float + the most common distance between y-axis channels + p : float, default: 1.3 Exponent of the Gaussian kernel. Determines rate of decay - for distance weightings, by default 1.3 - weights : np.array + for distance weightings + weights : np.array or None, default: None The weights to give to bad_channel_ids at interpolation. - If None, weights are automatically computed, by default None + If None, weights are automatically computed Returns ------- diff --git a/src/spikeinterface/preprocessing/normalize_scale.py b/src/spikeinterface/preprocessing/normalize_scale.py index bd53866b6a..e1ebb9d86a 100644 --- a/src/spikeinterface/preprocessing/normalize_scale.py +++ b/src/spikeinterface/preprocessing/normalize_scale.py @@ -35,18 +35,18 @@ class NormalizeByQuantileRecording(BasePreprocessor): ---------- recording: RecordingExtractor The recording extractor to be transformed - scalar: float + scale: float, default: 1.0 Scale for the output distribution - median: float + median: float, default: 0.0 Median for the output distribution - q1: float (default 0.01) + q1: float, default: 0.01 Lower quantile used for measuring the scale - q1: float (default 0.99) + q1: float, default: 0.99 Upper quantile used for measuring the - seed: int - Random seed for reproducibility - dtype: str or np.dtype - The dtype of the output traces. Default "float32" + mode: 'by_channel' or 'pool_channel', default: 'by_channel' + If 'by_channel' each channel is rescaled independently. + dtype: str or np.dtype, default: "float32" + The dtype of the output traces **random_chunk_kwargs: Keyword arguments for `spikeinterface.core.get_random_data_chunk()` function Returns @@ -179,10 +179,10 @@ class CenterRecording(BasePreprocessor): ---------- recording: RecordingExtractor The recording extractor to be centered - mode: str - 'median' (default) | 'mean' - dtype: str or np.dtype - The dtype of the output traces. Default "float32" + mode: 'median' | 'mean', default: 'median' + The method used to center the traces + dtype: str or np.dtype, default: "float32" + The dtype of the output traces **random_chunk_kwargs: Keyword arguments for `spikeinterface.core.get_random_data_chunk()` function Returns @@ -227,8 +227,8 @@ class ZScoreRecording(BasePreprocessor): ---------- recording: RecordingExtractor The recording extractor to be centered - mode: str - "median+mad" (default) or "mean+std" + mode: "median+mad" | "mean+std", default: "median+mad" + The mode to compute the zscore dtype: None or dtype If None the the parent dtype is kept. For integer dtype a int_scale must be also given. diff --git a/src/spikeinterface/preprocessing/phase_shift.py b/src/spikeinterface/preprocessing/phase_shift.py index bdba55038d..570ce48a5d 100644 --- a/src/spikeinterface/preprocessing/phase_shift.py +++ b/src/spikeinterface/preprocessing/phase_shift.py @@ -23,12 +23,13 @@ class PhaseShiftRecording(BasePreprocessor): ---------- recording: Recording The recording. It need to have "inter_sample_shift" in properties. - margin_ms: float (default 40) - margin in ms for computation + margin_ms: float, default: 40.0 + Margin in ms for computation. 40ms ensure a very small error when doing chunk processing - inter_sample_shift: None or numpy array - If "inter_sample_shift" is not in recording.properties - we can externaly provide one. + inter_sample_shift: None or numpy array, default: None + If "inter_sample_shift" is not in recording properties, + we can externally provide one. + Returns ------- filter_recording: PhaseShiftRecording diff --git a/src/spikeinterface/preprocessing/preprocessing_tools.py b/src/spikeinterface/preprocessing/preprocessing_tools.py index 17b05df5ad..6531362975 100644 --- a/src/spikeinterface/preprocessing/preprocessing_tools.py +++ b/src/spikeinterface/preprocessing/preprocessing_tools.py @@ -27,20 +27,20 @@ def get_spatial_interpolation_kernel( The recording extractor to be transformed target_location: array shape (n, 2) Scale for the output distribution - method: 'kriging' or 'idw' or 'nearest' + method: 'kriging' | 'idw' | 'nearest', default: 'kriging' Choice of the method 'kriging' : the same one used in kilosort 'idw' : inverse distance weithed 'nearest' : use nereast channel - sigma_um : float or list (default 20.) + sigma_um : float or list, default: 20.0 Used in the 'kriging' formula. When list, it needs to have 2 elements (for the x and y directions). - p: int (default 1) + p: int, default: 1 Used in the 'kriging' formula - sparse_thresh: None or float (default None) + sparse_thresh: None or float, default: None If not None for 'kriging' force small value to be zeros to get a sparse matrix. - num_closest: int (default 3) + num_closest: int, default: 3 Used for 'idw' - force_extrapolate: bool (false by default) + force_extrapolate: bool, default: False How to handle when target location are outside source location. When False : no extrapolation all target location outside are set to zero. When True : extrapolation done with the formula of the method. diff --git a/src/spikeinterface/preprocessing/remove_artifacts.py b/src/spikeinterface/preprocessing/remove_artifacts.py index 1eafa48a0b..f4dc98bd4e 100644 --- a/src/spikeinterface/preprocessing/remove_artifacts.py +++ b/src/spikeinterface/preprocessing/remove_artifacts.py @@ -22,20 +22,20 @@ class RemoveArtifactsRecording(BasePreprocessor): The recording extractor to remove artifacts from list_triggers: list of lists/arrays One list per segment of int with the stimulation trigger frames - ms_before: float or None + ms_before: float or None, default: 0.5 Time interval in ms to remove before the trigger events. If None, then also ms_after must be None and a single sample is removed - ms_after: float or None + ms_after: float or None, default: 3.0 Time interval in ms to remove after the trigger events. If None, then also ms_before must be None and a single sample is removed list_labels: list of lists/arrays or None One list per segment of labels with the stimulation labels for the given artefacs. labels should be strings, for JSON serialization. Required for 'median' and 'average' modes. - mode: str + mode: "zeros", "linear", "cubic", "average", "median", default: "zeros" Determines what artifacts are replaced by. Can be one of the following: - - 'zeros' (default): Artifacts are replaced by zeros. + - 'zeros': Artifacts are replaced by zeros. - 'median': The median over all artifacts is computed and subtracted for each occurence of an artifact @@ -59,26 +59,26 @@ class RemoveArtifactsRecording(BasePreprocessor): continuation of the trace. If the trace starts or ends with an artifact, the gap is filled with the closest available value before or after the artifact. - fit_sample_spacing: float + fit_sample_spacing: float, default: 1.0 Determines the spacing (in ms) of reference points for the cubic spline fit if mode = 'cubic'. Default = 1ms. Note: The actual fit samples are the median of the 5 data points around the time of each sample point to avoid excessive influence from hyper-local fluctuations. - artifacts: dict + artifacts: dict, default: None If provided (when mode is 'median' or 'average') then it must be a dict with keys that are the labels of the artifacts, and values the artifacts themselves, on all channels (and thus bypassing ms_before and ms_after) - sparsity: dict + sparsity: dict, default: None If provided (when mode is 'median' or 'average') then it must be a dict with keys that are the labels of the artifacts, and values that are boolean mask of the channels where the artifacts should be considered (for subtraction/scaling) - scale_amplitude: False + scale_amplitude: False, default: False If true, then for mode 'median' or 'average' the amplitude of the template will be scaled in amplitude at each time occurence to minimize residuals - time_jitter: float (default 0) + time_jitter: float, default: 0 If non 0, then for mode 'median' or 'average', a time jitter in ms can be allowed to minimize the residuals - waveforms_kwargs: dict or None + waveforms_kwargs: dict or None, default: None The arguments passed to the WaveformExtractor object when extracting the artifacts, for mode 'median' or 'average'. By default, the global job kwargs are used, in addition to {'allow_unfiltered' : True, 'mode':'memory'}. diff --git a/src/spikeinterface/preprocessing/resample.py b/src/spikeinterface/preprocessing/resample.py index a00893846a..cffbdb2419 100644 --- a/src/spikeinterface/preprocessing/resample.py +++ b/src/spikeinterface/preprocessing/resample.py @@ -26,12 +26,12 @@ class ResampleRecording(BasePreprocessor): The recording extractor to be re-referenced resample_rate : int The resampling frequency - margin : float (default 100) + margin : float, default: 100.0 Margin in ms for computations, will be used to decrease edge effects. - dtype : dtype or None + dtype : dtype or None, default: None The dtype of the returned traces. If None, the dtype of the parent recording is used. - skip_checks : bool - If True, checks on sampling frequencies and cutoff filter frequencies are skipped, by default False + skip_checks : bool, default: False + If True, checks on sampling frequencies and cutoff filter frequencies are skipped Returns ------- diff --git a/src/spikeinterface/preprocessing/silence_periods.py b/src/spikeinterface/preprocessing/silence_periods.py index c2ffcc6843..a945dd8761 100644 --- a/src/spikeinterface/preprocessing/silence_periods.py +++ b/src/spikeinterface/preprocessing/silence_periods.py @@ -21,10 +21,10 @@ class SilencedPeriodsRecording(BasePreprocessor): list_periods: list of lists/arrays One list per segment of tuples (start_frame, end_frame) to silence - mode: str + mode: "zeros" | "noise, default "zeros" Determines what periods are replaced by. Can be one of the following: - - 'zeros' (default): Artifacts are replaced by zeros. + - 'zeros': Artifacts are replaced by zeros. - 'noise': The periods are filled with a gaussion noise that has the same variance that the one in the recordings, on a per channel diff --git a/src/spikeinterface/preprocessing/tests/test_resample.py b/src/spikeinterface/preprocessing/tests/test_resample.py index 32c1b938bf..d17617487f 100644 --- a/src/spikeinterface/preprocessing/tests/test_resample.py +++ b/src/spikeinterface/preprocessing/tests/test_resample.py @@ -38,14 +38,14 @@ def create_sinusoidal_traces(sampling_frequency=3e4, duration=30, freqs_n=10, ma Parameters ---------- - sampling_frequency : float, optional - Sampling rate of the signal, by default 3e4 - duration : int, optional - Duration of the signal in seconds, by default 30 - freqs_n : int, optional - Total frequencies to span on the signal, by default 10 - max_freq : int, optional - Maximum frequency of sinusoids, by default 10000 + sampling_frequency : float, default: 30000 + Sampling rate of the signal + duration : int, default: 30 + Duration of the signal in seconds + freqs_n : int, default: 10 + Total frequencies to span on the signal + max_freq : int, default: 10000 + Maximum frequency of sinusoids Returns ------- diff --git a/src/spikeinterface/preprocessing/whiten.py b/src/spikeinterface/preprocessing/whiten.py index ac80f58182..7b3689899c 100644 --- a/src/spikeinterface/preprocessing/whiten.py +++ b/src/spikeinterface/preprocessing/whiten.py @@ -33,7 +33,7 @@ class WhitenRecording(BasePreprocessor): Small epsilon to regularize SVD. If None, eps is defaulted to 1e-8. If the data is float type and scaled down to very small values, then the eps is automatically set to a small fraction (1e-3) of the median of the squared data. - W : 2d np.array, default: None + W : 2d np.array or None, default: None Pre-computed whitening matrix M : 1d np.array or None, default: None Pre-computed means. @@ -145,9 +145,9 @@ def compute_whitening_matrix(recording, mode, random_chunk_kwargs, apply_mean, r Keyword arguments for get_random_data_chunks() apply_mean : bool If True, the mean is removed prior to computing the covariance - radius_um : float, default: None + radius_um : float or None, default: None Used for mode = 'local' to get the neighborhood - eps : float, default: None + eps : float or None, default: None Small epsilon to regularize SVD. If None, the default is set to 1e-8, but if the data is float type and scaled down to very small values, eps is automatically set to a small fraction (1e-3) of the median of the squared data. diff --git a/src/spikeinterface/preprocessing/zero_channel_pad.py b/src/spikeinterface/preprocessing/zero_channel_pad.py index 124b2b080e..5d8da8fa3f 100644 --- a/src/spikeinterface/preprocessing/zero_channel_pad.py +++ b/src/spikeinterface/preprocessing/zero_channel_pad.py @@ -145,9 +145,8 @@ def __init__(self, parent_recording: BaseRecording, num_channels: int, channel_m recording to zero-pad num_channels : int Total number of channels in the zero-channel-padded recording - channel_mapping : Union[list, None], optional - Mapping from the channel index in the original recording to the zero-channel-padded recording, - by default None. + channel_mapping : Union[list, None], default: None + Mapping from the channel index in the original recording to the zero-channel-padded recording. If None, sorts the channel indices in ascending y channel location and puts them at the beginning of the zero-channel-padded recording. """ diff --git a/src/spikeinterface/qualitymetrics/misc_metrics.py b/src/spikeinterface/qualitymetrics/misc_metrics.py index d3f875959e..8b6fe5bc12 100644 --- a/src/spikeinterface/qualitymetrics/misc_metrics.py +++ b/src/spikeinterface/qualitymetrics/misc_metrics.py @@ -251,7 +251,7 @@ def compute_isi_violations(waveform_extractor, isi_threshold_ms=1.5, min_isi_ms= The waveform extractor object isi_threshold_ms : float, default: 1.5 Threshold for classifying adjacent spikes as an ISI violation, in ms. - This is the biophysical refractory period (default=1.5). + This is the biophysical refractory period min_isi_ms : float, default: 0 Minimum possible inter-spike interval, in ms. This is the artificial refractory period enforced @@ -422,19 +422,19 @@ def compute_sliding_rp_violations( ---------- waveform_extractor : WaveformExtractor The waveform extractor object. - min_spikes : int, default 0 + min_spikes : int, default: 0 Contamination is set to np.nan if the unit has less than this many spikes across all segments. - bin_size_ms : float - The size of binning for the autocorrelogram in ms, by default 0.25 - window_size_s : float - Window in seconds to compute correlogram, by default 1 - exclude_ref_period_below_ms : float - Refractory periods below this value are excluded, by default 0.5 - max_ref_period_ms : float - Maximum refractory period to test in ms, by default 10 ms - contamination_values : 1d array or None - The contamination values to test, by default np.arange(0.5, 35, 0.5) % + bin_size_ms : float, default: 0.25 + The size of binning for the autocorrelogram in ms + window_size_s : float, default: 1 + Window in seconds to compute correlogram + exclude_ref_period_below_ms : float, default: 0.5 + Refractory periods below this value are excluded + max_ref_period_ms : float, default: 10 + Maximum refractory period to test in ms + contamination_values : 1d array or None, default: None + The contamination values to test, If None, it is set to np.arange(0.5, 35, 0.5) unit_ids : list or None List of unit ids to compute the sliding RP violations. If None, all units are used. @@ -935,23 +935,23 @@ def compute_drift_metrics( ---------- waveform_extractor : WaveformExtractor The waveform extractor object. - interval_s : int, optional - Interval length is seconds for computing spike depth, by default 60 - min_spikes_per_interval : int, optional - Minimum number of spikes for computing depth in an interval, by default 100 - direction : str, optional - The direction along which drift metrics are estimated, by default 'y' - min_fraction_valid_intervals : float, optional + interval_s : int, default: 60 + Interval length is seconds for computing spike depth + min_spikes_per_interval : int, default: 100 + Minimum number of spikes for computing depth in an interval + direction : str, default: 'y' + The direction along which drift metrics are estimated + min_fraction_valid_intervals : float, default: 0.5 The fraction of valid (not NaN) position estimates to estimate drifts. E.g., if 0.5 at least 50% of estimated positions in the intervals need to be valid, - otherwise drift metrics are set to None, by default 0.5 - min_num_bins : int, optional + otherwise drift metrics are set to None + min_num_bins : int, default: 2 Minimum number of bins required to return a valid metric value. In case there are less bins, the metric values are set to NaN. - return_positions : bool, optional - If True, median positions are returned (for debugging), by default False - unit_ids : list or None - List of unit ids to compute the drift metrics. If None, all units are used. + return_positions : bool, default: False + If True, median positions are returned (for debugging) + unit_ids : list or None, default: None + List of unit ids to compute the drift metrics. If None, all units are used Returns ------- @@ -1128,7 +1128,7 @@ def isi_violations(spike_trains, total_duration_s, isi_threshold_s=0.0015, min_i The total duration of the recording (in seconds) isi_threshold_s : float, default: 0.0015 Threshold for classifying adjacent spikes as an ISI violation, in seconds. - This is the biophysical refractory period (default=1.5). + This is the biophysical refractory period min_isi_s : float, default: 0 Minimum possible inter-spike interval, in seconds. This is the artificial refractory period enforced @@ -1249,16 +1249,16 @@ def slidingRP_violations( The acquisition sampling rate bin_size_ms : float The size (in ms) of binning for the autocorrelogram. - window_size_s : float - Window in seconds to compute correlogram, by default 2 - exclude_ref_period_below_ms : float - Refractory periods below this value are excluded, by default 0.5 - max_ref_period_ms : float - Maximum refractory period to test in ms, by default 10 ms - contamination_values : 1d array or None - The contamination values to test, by default np.arange(0.5, 35, 0.5) / 100 - return_conf_matrix : bool - If True, the confidence matrix (n_contaminations, n_ref_periods) is returned, by default False + window_size_s : float, default: 1 + Window in seconds to compute correlogram + exclude_ref_period_below_ms : float, default: 0.5 + Refractory periods below this value are excluded + max_ref_period_ms : float, default: 10 + Maximum refractory period to test in ms + contamination_values : 1d array or None, default: None + The contamination values to test, if None it is set to np.arange(0.5, 35, 0.5) / 100 + return_conf_matrix : bool, default: False + If True, the confidence matrix (n_contaminations, n_ref_periods) is returned Code adapted from: https://github.com/SteinmetzLab/slidingRefractory/blob/master/python/slidingRP/metrics.py#L166 diff --git a/src/spikeinterface/qualitymetrics/pca_metrics.py b/src/spikeinterface/qualitymetrics/pca_metrics.py index ed06f7d738..b4e60f5937 100644 --- a/src/spikeinterface/qualitymetrics/pca_metrics.py +++ b/src/spikeinterface/qualitymetrics/pca_metrics.py @@ -72,12 +72,12 @@ def calculate_pc_metrics( ---------- pca : WaveformPrincipalComponent Waveform object with principal components computed. - metric_names : list of str, optional + metric_names : list of str, default: None The list of PC metrics to compute. If not provided, defaults to all PC metrics. - sparsity: ChannelSparsity or None + sparsity: ChannelSparsity or None, default: None The sparsity object. This is used also to identify neighbor - units and speed up computations. If None (default) all channels and all units are used + units and speed up computations. If None all channels and all units are used for each unit. qm_params : dict or None Dictionary with parameters for each PC metric function. @@ -393,11 +393,11 @@ def nearest_neighbors_isolation( Recomputed if None. max_spikes : int, default: 1000 Max number of spikes to use per unit. - min_spikes : int, optional, default: 10 + min_spikes : int, default: 10 Min number of spikes a unit must have to go through with metric computation. Units with spikes < min_spikes gets numpy.NaN as the quality metric, and are ignored when selecting other units' neighbors. - min_fr : float, optional, default: 0.0 + min_fr : float, default: 0.0 Min firing rate a unit must have to go through with metric computation. Units with firing rate < min_fr gets numpy.NaN as the quality metric, and are ignored when selecting other units' neighbors. @@ -599,10 +599,10 @@ def nearest_neighbors_noise_overlap( Recomputed if None. max_spikes : int, default: 1000 The max number of spikes to use per cluster. - min_spikes : int, optional, default: 10 + min_spikes : int, default: 10 Min number of spikes a unit must have to go through with metric computation. Units with spikes < min_spikes gets numpy.NaN as the quality metric. - min_fr : float, optional, default: 0.0 + min_fr : float, default: 0.0 Min firing rate a unit must have to go through with metric computation. Units with firing rate < min_fr gets numpy.NaN as the quality metric. n_neighbors : int, default: 5 diff --git a/src/spikeinterface/qualitymetrics/quality_metric_calculator.py b/src/spikeinterface/qualitymetrics/quality_metric_calculator.py index a2d0cc41b0..53309db282 100644 --- a/src/spikeinterface/qualitymetrics/quality_metric_calculator.py +++ b/src/spikeinterface/qualitymetrics/quality_metric_calculator.py @@ -198,10 +198,10 @@ def compute_quality_metrics( qm_params : dict or None Dictionary with parameters for quality metrics calculation. Default parameters can be obtained with: `si.qualitymetrics.get_default_qm_params()` - sparsity : dict or None + sparsity : dict or None, default: None If given, the sparse channel_ids for each unit in PCA metrics computation. This is used also to identify neighbor units and speed up computations. - If None (default) all channels and all units are used for each unit. + If None all channels and all units are used for each unit. skip_pc_metrics : bool If True, PC metrics computation is skipped. n_jobs : int diff --git a/src/spikeinterface/sorters/runsorter.py b/src/spikeinterface/sorters/runsorter.py index a49a605a75..8d1ab82c5c 100644 --- a/src/spikeinterface/sorters/runsorter.py +++ b/src/spikeinterface/sorters/runsorter.py @@ -354,15 +354,15 @@ def run_sorter_container( sorter_name: str recording: BaseRecording mode: str - container_image: str, optional - output_folder: str, optional - remove_existing_folder: bool, optional - delete_output_folder: bool, optional - verbose: bool, optional - raise_error: bool, optional - with_output: bool, optional - delete_container_files: bool, optional - extra_requirements: list, optional + container_image: str, default: None + output_folder: str, default: None + remove_existing_folder: bool, default: None + delete_output_folder: bool, default: None + verbose: bool, default: None + raise_error: bool, default: None + with_output: bool, default: None + delete_container_files: bool, default: None + extra_requirements: list, default: None sorter_params: """ diff --git a/src/spikeinterface/sortingcomponents/peak_detection.py b/src/spikeinterface/sortingcomponents/peak_detection.py index bc52ea2c70..9615223ff6 100644 --- a/src/spikeinterface/sortingcomponents/peak_detection.py +++ b/src/spikeinterface/sortingcomponents/peak_detection.py @@ -159,9 +159,9 @@ def __init__( The waveform extraction node to use. waveform_denoising_node The waveform denoising node to use. - num_iterations : int, optional, default=2 + num_iterations : int, optional, default: 2 The number of iterations to run the algorithm. - return_output : bool, optional, default=True + return_output : bool, optional, default: True """ PeakDetector.__init__(self, recording, return_output=return_output) self.peak_detector_node = peak_detector_node diff --git a/src/spikeinterface/widgets/amplitudes.py b/src/spikeinterface/widgets/amplitudes.py index 6b6496a577..51ea53c2f1 100644 --- a/src/spikeinterface/widgets/amplitudes.py +++ b/src/spikeinterface/widgets/amplitudes.py @@ -15,24 +15,23 @@ class AmplitudesWidget(BaseWidget): ---------- waveform_extractor : WaveformExtractor The input waveform extractor - unit_ids : list - List of unit ids, default None - segment_index : int - The segment index (or None if mono-segment), default None - max_spikes_per_unit : int - Number of max spikes per unit to display. Use None for all spikes. - Default None. - hide_unit_selector : bool - If True the unit selector is not displayed, default False + unit_ids : list or None, default: None + List of unit ids + segment_index : int or None, default: None + The segment index (or None if mono-segment) + max_spikes_per_unit : int or None, default: None + Number of max spikes per unit to display. Use None for all spikes + hide_unit_selector : bool, default: False + If True the unit selector is not displayed (sortingview backend) - plot_histogram : bool - If True, an histogram of the amplitudes is plotted on the right axis, default False + plot_histogram : bool, default: False + If True, an histogram of the amplitudes is plotted on the right axis (matplotlib backend) - bins : int + bins : int, default: None If plot_histogram is True, the number of bins for the amplitude histogram. - If None this is automatically adjusted, default None - plot_legend : bool - True includes legend in plot, default True + If None this is automatically adjusted + plot_legend : bool, default: True + True includes legend in plot """ def __init__( diff --git a/src/spikeinterface/widgets/crosscorrelograms.py b/src/spikeinterface/widgets/crosscorrelograms.py index 3ec3fa11b6..9403a5dd03 100644 --- a/src/spikeinterface/widgets/crosscorrelograms.py +++ b/src/spikeinterface/widgets/crosscorrelograms.py @@ -15,16 +15,16 @@ class CrossCorrelogramsWidget(BaseWidget): ---------- waveform_or_sorting_extractor : WaveformExtractor or BaseSorting The object to compute/get crosscorrelograms from - unit_ids list - List of unit ids, default None - window_ms : float - Window for CCGs in ms, default 100.0 ms - bin_ms : float - Bin size in ms, default 1.0 ms - hide_unit_selector : bool - For sortingview backend, if True the unit selector is not displayed, default False - unit_colors: dict or None - If given, a dictionary with unit ids as keys and colors as values, default None + unit_ids list or None, default: None + List of unit ids + window_ms : float, default: 100.0 + Window for CCGs in ms + bin_ms : float, default: 1.0 + Bin size in ms + hide_unit_selector : bool, default: False + For sortingview backend, if True the unit selector is not displayed + unit_colors: dict or None, default: None + If given, a dictionary with unit ids as keys and colors as values """ def __init__( diff --git a/src/spikeinterface/widgets/gtstudy.py b/src/spikeinterface/widgets/gtstudy.py index 6e4433ee60..559aabce02 100644 --- a/src/spikeinterface/widgets/gtstudy.py +++ b/src/spikeinterface/widgets/gtstudy.py @@ -136,7 +136,7 @@ class StudyPerformances(BaseWidget): mode: "ordered" | "snr" | "swarm", default: "ordered" Which plot mode to use: - * "ordered": plot performance metrics vs unit indices ordered by decreasing accuracy (default) + * "ordered": plot performance metrics vs unit indices ordered by decreasing accuracy * "snr": plot performance metrics vs snr * "swarm": plot performance metrics as a swarm plot (see seaborn.swarmplot for details) performance_names: list or tuple, default: ('accuracy', 'precision', 'recall') diff --git a/src/spikeinterface/widgets/metrics.py b/src/spikeinterface/widgets/metrics.py index bc44e58a33..ac40e0f460 100644 --- a/src/spikeinterface/widgets/metrics.py +++ b/src/spikeinterface/widgets/metrics.py @@ -16,18 +16,18 @@ class MetricsBaseWidget(BaseWidget): Data frame with metrics sorting: BaseSorting The sorting object used for metrics calculations - unit_ids: list + unit_ids: list or None, default: None List of unit ids, default None - skip_metrics: list or None + skip_metrics: list or None, default: None If given, a list of quality metrics to skip, default None - include_metrics: list or None + include_metrics: list or None, default: None If given, a list of quality metrics to include, default None - unit_colors : dict or None + unit_colors : dict or None, default: None If given, a dictionary with unit ids as keys and colors as values, default None - hide_unit_selector : bool + hide_unit_selector : bool, default: False For sortingview backend, if True the unit selector is not displayed, default False - include_metrics_data : bool - If True, metrics data are included in unit table, by default True + include_metrics_data : bool, default: True + If True, metrics data are included in unit table """ def __init__( diff --git a/src/spikeinterface/widgets/motion.py b/src/spikeinterface/widgets/motion.py index cb11bcce0c..fbe2c8fa8b 100644 --- a/src/spikeinterface/widgets/motion.py +++ b/src/spikeinterface/widgets/motion.py @@ -11,24 +11,24 @@ class MotionWidget(BaseWidget): ---------- motion_info: dict The motion info return by correct_motion() or load back with load_motion_info() - recording : RecordingExtractor, optional - The recording extractor object (only used to get "real" times), default None - sampling_frequency : float, optional - The sampling frequency (needed if recording is None), default None - depth_lim : tuple - The min and max depth to display, default None (min and max of the recording) - motion_lim : tuple - The min and max motion to display, default None (min and max of the motion) - color_amplitude : bool - If True, the color of the scatter points is the amplitude of the peaks, default False - scatter_decimate : int - If > 1, the scatter points are decimated, default None - amplitude_cmap : str - The colormap to use for the amplitude, default 'inferno' - amplitude_clim : tuple - The min and max amplitude to display, default None (min and max of the amplitudes) - amplitude_alpha : float - The alpha of the scatter points, default 0.5 + recording : RecordingExtractor, default: None + The recording extractor object (only used to get "real" times) + sampling_frequency : float, default: None + The sampling frequency (needed if recording is None) + depth_lim : tuple or None, default: None + The min and max depth to display, if None (min and max of the recording) + motion_lim : tuple or None, default: None + The min and max motion to display, if None (min and max of the motion) + color_amplitude : bool, default: False + If True, the color of the scatter points is the amplitude of the peaks + scatter_decimate : int, default: None + If > 1, the scatter points are decimated + amplitude_cmap : str, default: 'inferno' + The colormap to use for the amplitude + amplitude_clim : tuple or None, default: None + The min and max amplitude to display, if None (min and max of the amplitudes) + amplitude_alpha : float, default: 1 + The alpha of the scatter points """ def __init__( diff --git a/src/spikeinterface/widgets/multicomparison.py b/src/spikeinterface/widgets/multicomparison.py index e01a79dfd5..f1df62a7da 100644 --- a/src/spikeinterface/widgets/multicomparison.py +++ b/src/spikeinterface/widgets/multicomparison.py @@ -13,15 +13,15 @@ class MultiCompGraphWidget(BaseWidget): ---------- multi_comparison: BaseMultiComparison The multi comparison object - draw_labels: bool + draw_labels: bool, default: False If True unit labels are shown - node_cmap: matplotlib colormap - The colormap to be used for the nodes (default 'viridis') - edge_cmap: matplotlib colormap - The colormap to be used for the edges (default 'hot') - alpha_edges: float + node_cmap: matplotlib colormap, default: 'viridis' + The colormap to be used for the nodes + edge_cmap: matplotlib colormap, default: 'hot' + The colormap to be used for the edges + alpha_edges: float, default: 0.5 Alpha value for edges - colorbar: bool + colorbar: bool, default: False If True a colorbar for the edges is plotted """ @@ -199,13 +199,12 @@ class MultiCompAgreementBySorterWidget(BaseWidget): The multi comparison object plot_type: str 'pie' or 'bar' - cmap: matplotlib colormap - The colormap to be used for the nodes (default 'Reds') - axes: list of matplotlib axes - The axes to be used for the individual plots. If not given the required axes are created. If provided, the ax - and figure parameters are ignored. + cmap: matplotlib colormap, default: 'Reds' + The colormap to be used for the nodes + fontsize: int, default: 9 + The text fontsize show_legend: bool - Show the legend in the last axes (default True). + Show the legend in the last axes Returns ------- diff --git a/src/spikeinterface/widgets/peak_activity.py b/src/spikeinterface/widgets/peak_activity.py index 8501d7ef7d..f2b3562aff 100644 --- a/src/spikeinterface/widgets/peak_activity.py +++ b/src/spikeinterface/widgets/peak_activity.py @@ -21,16 +21,16 @@ class PeakActivityMapWidget(BaseWidget): peaks: None or numpy array Optionally can give already detected peaks to avoid multiple computation. - detect_peaks_kwargs: None or dict + detect_peaks_kwargs: None or dict, default: None If peaks is None here the kwargs for detect_peak function. - bin_duration_s: None or float + bin_duration_s: None or float, default: None If None then static image If not None then it is an animation per bin. - with_contact_color: bool (default True) + with_contact_color: bool, default: True Plot rates with contact colors - with_interpolated_map: bool (default True) + with_interpolated_map: bool, default: True Plot rates with interpolated map - with_channel_ids: bool False default + with_channel_ids: bool, default: False Add channel ids text on the probe diff --git a/src/spikeinterface/widgets/quality_metrics.py b/src/spikeinterface/widgets/quality_metrics.py index 4a6b46b72d..4406557d93 100644 --- a/src/spikeinterface/widgets/quality_metrics.py +++ b/src/spikeinterface/widgets/quality_metrics.py @@ -10,16 +10,16 @@ class QualityMetricsWidget(MetricsBaseWidget): ---------- waveform_extractor : WaveformExtractor The object to compute/get quality metrics from - unit_ids: list - List of unit ids, default None - include_metrics: list - If given, a list of quality metrics to include, default None - skip_metrics: list or None - If given, a list of quality metrics to skip, default None - unit_colors : dict or None - If given, a dictionary with unit ids as keys and colors as values, default None - hide_unit_selector : bool - For sortingview backend, if True the unit selector is not displayed, default False + unit_ids: list or None, default: None + List of unit ids + include_metrics: list or None, default: None + If given, a list of quality metrics to include + skip_metrics: list or None, default: None + If given, a list of quality metrics to skip + unit_colors : dict or None, default: None + If given, a dictionary with unit ids as keys and colors as values + hide_unit_selector : bool, default: False + For sortingview backend, if True the unit selector is not displayed """ def __init__( diff --git a/src/spikeinterface/widgets/sorting_summary.py b/src/spikeinterface/widgets/sorting_summary.py index b9760205f9..9b98c1adaa 100644 --- a/src/spikeinterface/widgets/sorting_summary.py +++ b/src/spikeinterface/widgets/sorting_summary.py @@ -19,18 +19,24 @@ class SortingSummaryWidget(BaseWidget): Parameters ---------- waveform_extractor : WaveformExtractor - The waveform extractor object. - sparsity : ChannelSparsity or None - Optional ChannelSparsity to apply, default None + The waveform extractor object + unit_ids : list or None, default: None + List of unit ids + sparsity : ChannelSparsity or None, default: None + Optional ChannelSparsity to apply If WaveformExtractor is already sparse, the argument is ignored - max_amplitudes_per_unit : int or None - Maximum number of spikes per unit for plotting amplitudes, - by default None (all spikes) - curation : bool - If True, manual curation is enabled, by default False + max_amplitudes_per_unit : int or None, default: None + Maximum number of spikes per unit for plotting amplitudes. + If None, all spikes are plotted + curation : bool, default: False + If True, manual curation is enabled (sortingview backend) - unit_table_properties : list or None - List of properties to be added to the unit table, by default None + unit_table_properties : list or None, default: None + List of properties to be added to the unit table + label_choices : list or None, default: None + List of labels to be added to the curation table + unit_table_properties : list or None, default: None + List of properties to be added to the unit table (sortingview backend) """ diff --git a/src/spikeinterface/widgets/spike_locations.py b/src/spikeinterface/widgets/spike_locations.py index fda2356105..eb0090900b 100644 --- a/src/spikeinterface/widgets/spike_locations.py +++ b/src/spikeinterface/widgets/spike_locations.py @@ -13,25 +13,24 @@ class SpikeLocationsWidget(BaseWidget): ---------- waveform_extractor : WaveformExtractor The object to compute/get spike locations from - unit_ids : list - List of unit ids, default None - segment_index : int or None - The segment index (or None if mono-segment), default None - max_spikes_per_unit : int + unit_ids : list or None, default None + List of unit ids + segment_index : int or None, default None + The segment index (or None if mono-segment) + max_spikes_per_unit : int or None, default 500 Number of max spikes per unit to display. Use None for all spikes. - Default 500. - with_channel_ids : bool - Add channel ids text on the probe, default False - unit_colors : dict or None - If given, a dictionary with unit ids as keys and colors as values, default None - hide_unit_selector : bool - For sortingview backend, if True the unit selector is not displayed, default False - plot_all_units : bool + with_channel_ids : bool, default False + Add channel ids text on the probe + unit_colors : dict or None, default None + If given, a dictionary with unit ids as keys and colors as values + hide_unit_selector : bool, default False + For sortingview backend, if True the unit selector is not displayed + plot_all_units : bool, default True If True, all units are plotted. The unselected ones (not in unit_ids), are plotted in grey. Default True (matplotlib backend) - plot_legend : bool + plot_legend : bool, default False If True, the legend is plotted. Default False (matplotlib backend) - hide_axis : bool + hide_axis : bool, default False If True, the axis is set to off. Default False (matplotlib backend) """ diff --git a/src/spikeinterface/widgets/spikes_on_traces.py b/src/spikeinterface/widgets/spikes_on_traces.py index b68efc3f8a..bf53d7c926 100644 --- a/src/spikeinterface/widgets/spikes_on_traces.py +++ b/src/spikeinterface/widgets/spikes_on_traces.py @@ -19,44 +19,44 @@ class SpikesOnTracesWidget(BaseWidget): ---------- waveform_extractor : WaveformExtractor The waveform extractor - channel_ids : list - The channel ids to display, default None - unit_ids : list - List of unit ids, default None - order_channel_by_depth : bool - If true orders channel by depth, default False - time_range: list - List with start time and end time, default None - sparsity : ChannelSparsity or None - Optional ChannelSparsity to apply. - If WaveformExtractor is already sparse, the argument is ignored, default None - unit_colors : dict or None - If given, a dictionary with unit ids as keys and colors as values, default None + channel_ids : list or None, default: None + The channel ids to display + unit_ids : list or None, default: None + List of unit ids + order_channel_by_depth : bool, default: False + If true orders channel by depth + time_range: list or None, default: None + List with start time and end time in seconds + sparsity : ChannelSparsity or None, default: None + Optional ChannelSparsity to apply + If WaveformExtractor is already sparse, the argument is ignored + unit_colors : dict or None, default: None + If given, a dictionary with unit ids as keys and colors as values If None, then the get_unit_colors() is internally used. (matplotlib backend) mode : str in ('line', 'map', 'auto') default: 'auto' * 'line': classical for low channel count * 'map': for high channel count use color heat map * 'auto': auto switch depending on the channel count ('line' if less than 64 channels, 'map' otherwise) - return_scaled : bool - If True and the recording has scaled traces, it plots the scaled traces, default False - cmap : str - matplotlib colormap used in mode 'map', default 'RdBu' - show_channel_ids : bool - Set yticks with channel ids, default False - color_groups : bool - If True groups are plotted with different colors, default False - color : str - The color used to draw the traces, default None - clim : None, tuple or dict + return_scaled : bool, default: False + If True and the recording has scaled traces, it plots the scaled traces + cmap : str, default: 'RdBu' + matplotlib colormap used in mode 'map' + show_channel_ids : bool, default: False + Set yticks with channel ids + color_groups : bool, default: False + If True groups are plotted with different colors + color : str or None, default: None + The color used to draw the traces + clim : None, tuple or dict, default: None When mode is 'map', this argument controls color limits. If dict, keys should be the same as recording keys Default None - with_colorbar : bool - When mode is 'map', a colorbar is added, by default True - tile_size : int - For sortingview backend, the size of each tile in the rendered image, default 512 - seconds_per_row : float - For 'map' mode and sortingview backend, seconds to render in each row, default 0.2 + with_colorbar : bool, default: True + When mode is 'map', a colorbar is added + tile_size : int, default: 512 + For sortingview backend, the size of each tile in the rendered image + seconds_per_row : float, default: 0.2 + For 'map' mode and sortingview backend, seconds to render in each row """ def __init__( diff --git a/src/spikeinterface/widgets/template_metrics.py b/src/spikeinterface/widgets/template_metrics.py index 748babb57d..1658efe737 100644 --- a/src/spikeinterface/widgets/template_metrics.py +++ b/src/spikeinterface/widgets/template_metrics.py @@ -10,16 +10,16 @@ class TemplateMetricsWidget(MetricsBaseWidget): ---------- waveform_extractor : WaveformExtractor The object to compute/get template metrics from - unit_ids : list - List of unit ids, default None - include_metrics : list - If given list of quality metrics to include, default None - skip_metrics : list or None - If given, a list of quality metrics to skip, default None - unit_colors : dict or None - If given, a dictionary with unit ids as keys and colors as values, default None - hide_unit_selector : bool - For sortingview backend, if True the unit selector is not displayed, default False + unit_ids : list or None, default: None + List of unit ids + include_metrics : list or None, default: None + If given list of quality metrics to include + skip_metrics : list or None or None, default: None + If given, a list of quality metrics to skip + unit_colors : dict or None, default: None + If given, a dictionary with unit ids as keys and colors as values + hide_unit_selector : bool, default: False + For sortingview backend, if True the unit selector is not displayed """ def __init__( diff --git a/src/spikeinterface/widgets/template_similarity.py b/src/spikeinterface/widgets/template_similarity.py index 63ac177835..2469ced807 100644 --- a/src/spikeinterface/widgets/template_similarity.py +++ b/src/spikeinterface/widgets/template_similarity.py @@ -12,17 +12,17 @@ class TemplateSimilarityWidget(BaseWidget): ---------- waveform_extractor : WaveformExtractor The object to compute/get template similarity from - unit_ids : list + unit_ids : list or None, default: None List of unit ids default None - display_diagonal_values : bool + display_diagonal_values : bool, default: False If False, the diagonal is displayed as zeros. - If True, the similarity values (all 1s) are displayed, default False - cmap : str - The matplotlib colormap. Default 'viridis'. - show_unit_ticks : bool - If True, ticks display unit ids, default False. - show_colorbar : bool - If True, color bar is displayed, default True. + If True, the similarity values (all 1s) are displayed + cmap : str, default: 'viridis' + The matplotlib colormap + show_unit_ticks : bool, default: False + If True, ticks display unit ids + show_colorbar : bool, default: True + If True, color bar is displayed """ def __init__( diff --git a/src/spikeinterface/widgets/traces.py b/src/spikeinterface/widgets/traces.py index fc8b30eb05..e4893060f8 100644 --- a/src/spikeinterface/widgets/traces.py +++ b/src/spikeinterface/widgets/traces.py @@ -16,41 +16,40 @@ class TracesWidget(BaseWidget): recording: RecordingExtractor, dict, or list The recording extractor object. If dict (or list) then it is a multi-layer display to compare, for example, different processing steps - segment_index: None or int - The segment index (required for multi-segment recordings), default None - channel_ids: list - The channel ids to display, default None - order_channel_by_depth: bool - Reorder channel by depth, default False - time_range: list - List with start time and end time, default None - mode: str + segment_index: None or int, default: None + The segment index (required for multi-segment recordings) + channel_ids: list or None, default: None + The channel ids to display + order_channel_by_depth: bool, default: False + Reorder channel by depth + time_range: list, tuple or None, default: None + List with start time and end time + mode: str, default: 'auto' Three possible modes, default 'auto': * 'line': classical for low channel count * 'map': for high channel count use color heat map * 'auto': auto switch depending on the channel count ('line' if less than 64 channels, 'map' otherwise) - return_scaled: bool - If True and the recording has scaled traces, it plots the scaled traces, default False - cmap: str - matplotlib colormap used in mode 'map', default 'RdBu' - show_channel_ids: bool - Set yticks with channel ids, default False - color_groups: bool - If True groups are plotted with different colors, default False - color: str - The color used to draw the traces, default None - clim: None, tuple or dict + return_scaled: bool, default: False + If True and the recording has scaled traces, it plots the scaled traces + cmap: str, default: 'RdBu_r' + matplotlib colormap used in mode 'map' + show_channel_ids: bool, default: False + Set yticks with channel ids + color_groups: bool, default: False + If True groups are plotted with different colors + color: str or None, default: None + The color used to draw the traces + clim: None, tuple or dict, default: None When mode is 'map', this argument controls color limits. If dict, keys should be the same as recording keys - Default None - with_colorbar: bool - When mode is 'map', a colorbar is added, by default True - tile_size: int - For sortingview backend, the size of each tile in the rendered image, default 1500 - seconds_per_row: float - For 'map' mode and sortingview backend, seconds to render in each row, default 0.2 - add_legend : bool + with_colorbar: bool, default: True + When mode is 'map', a colorbar is added + tile_size: int, default: 1500 + For sortingview backend, the size of each tile in the rendered image + seconds_per_row: float, default: 0.2 + For 'map' mode and sortingview backend, seconds to render in each row + add_legend : bool, default: True If True adds legend to figures, default True """ diff --git a/src/spikeinterface/widgets/unit_depths.py b/src/spikeinterface/widgets/unit_depths.py index 1cc7c909a1..f507b70fd1 100644 --- a/src/spikeinterface/widgets/unit_depths.py +++ b/src/spikeinterface/widgets/unit_depths.py @@ -16,12 +16,12 @@ class UnitDepthsWidget(BaseWidget): ---------- waveform_extractor : WaveformExtractor The input waveform extractor - unit_colors : dict or None - If given, a dictionary with unit ids as keys and colors as values, default None - depth_axis : int - The dimension of unit_locations that is depth, default 1 - peak_sign: str (neg/pos/both) - Sign of peak for amplitudes, default 'neg' + unit_colors : dict or None, default: None + If given, a dictionary with unit ids as keys and colors as values + depth_axis : int, default: 1 + The dimension of unit_locations that is depth + peak_sign: 'neg' or 'pos' or 'both', default: 'neg' + Sign of peak for amplitudes """ def __init__( diff --git a/src/spikeinterface/widgets/unit_locations.py b/src/spikeinterface/widgets/unit_locations.py index b41ee3508b..d5f26f6dfd 100644 --- a/src/spikeinterface/widgets/unit_locations.py +++ b/src/spikeinterface/widgets/unit_locations.py @@ -16,21 +16,21 @@ class UnitLocationsWidget(BaseWidget): ---------- waveform_extractor : WaveformExtractor The object to compute/get unit locations from - unit_ids : list - List of unit ids default None - with_channel_ids : bool - Add channel ids text on the probe, default False - unit_colors : dict or None - If given, a dictionary with unit ids as keys and colors as values, default None - hide_unit_selector : bool - If True, the unit selector is not displayed, default False (sortingview backend) - plot_all_units : bool + unit_ids : list or None, default: None + List of unit ids + with_channel_ids : bool, default: False + Add channel ids text on the probe + unit_colors : dict or None, default: None + If given, a dictionary with unit ids as keys and colors as values + hide_unit_selector : bool, default: False + If True, the unit selector is not displayed (sortingview backend) + plot_all_units : bool, default: True If True, all units are plotted. The unselected ones (not in unit_ids), - are plotted in grey, default True (matplotlib backend) - plot_legend : bool - If True, the legend is plotted, default False (matplotlib backend) - hide_axis : bool - If True, the axis is set to off, default False (matplotlib backend) + are plotted in grey (matplotlib backend) + plot_legend : bool, default: False + If True, the legend is plotted (matplotlib backend) + hide_axis : bool, default: False + If True, the axis is set to off (matplotlib backend) """ def __init__( diff --git a/src/spikeinterface/widgets/unit_presence.py b/src/spikeinterface/widgets/unit_presence.py index 3d605936a2..089c278af3 100644 --- a/src/spikeinterface/widgets/unit_presence.py +++ b/src/spikeinterface/widgets/unit_presence.py @@ -13,12 +13,12 @@ class UnitPresenceWidget(BaseWidget): The sorting extractor object segment_index: None or int The segment index. - time_range: list + time_range: list or None, default None List with start time and end time - bin_duration_s: float, default 0.5 - Bin size (in seconds) for the heat map time axis. - smooth_sigma: float or None - + bin_duration_s: float, default: 0.5 + Bin size (in seconds) for the heat map time axis + smooth_sigma: float, default: 4.5 + Sigma for the Gaussian kernel (in number of bins) """ def __init__( diff --git a/src/spikeinterface/widgets/unit_probe_map.py b/src/spikeinterface/widgets/unit_probe_map.py index 4068c1c530..b8eea80ef4 100644 --- a/src/spikeinterface/widgets/unit_probe_map.py +++ b/src/spikeinterface/widgets/unit_probe_map.py @@ -22,9 +22,9 @@ class UnitProbeMapWidget(BaseWidget): List of unit ids. channel_ids: list The channel ids to display - animated: True/False - animation for amplitude on time - with_channel_ids: bool False default + animated: bool, default: False + Animation for amplitude on time + with_channel_ids: bool, default: False add channel ids text on the probe """ diff --git a/src/spikeinterface/widgets/unit_summary.py b/src/spikeinterface/widgets/unit_summary.py index 964b5813e6..35fde07326 100644 --- a/src/spikeinterface/widgets/unit_summary.py +++ b/src/spikeinterface/widgets/unit_summary.py @@ -23,10 +23,10 @@ class UnitSummaryWidget(BaseWidget): The waveform extractor object unit_id : int or str The unit id to plot the summary of - unit_colors : dict or None - If given, a dictionary with unit ids as keys and colors as values, default None - sparsity : ChannelSparsity or None - Optional ChannelSparsity to apply, default None + unit_colors : dict or None, default: None + If given, a dictionary with unit ids as keys and colors as values, + sparsity : ChannelSparsity or None, default: None + Optional ChannelSparsity to apply. If WaveformExtractor is already sparse, the argument is ignored """ diff --git a/src/spikeinterface/widgets/unit_waveforms.py b/src/spikeinterface/widgets/unit_waveforms.py index 8ffc931bf2..87d929fc06 100644 --- a/src/spikeinterface/widgets/unit_waveforms.py +++ b/src/spikeinterface/widgets/unit_waveforms.py @@ -16,47 +16,47 @@ class UnitWaveformsWidget(BaseWidget): ---------- waveform_extractor : WaveformExtractor The input waveform extractor - channel_ids: list - The channel ids to display, default None - unit_ids : list - List of unit ids, default None - plot_templates : bool - If True, templates are plotted over the waveforms, default True - sparsity : ChannelSparsity or None - Optional ChannelSparsity to apply, default None + channel_ids: list or None, default: None + The channel ids to display + unit_ids : list or None, default: None + List of unit ids + plot_templates : bool, default: True + If True, templates are plotted over the waveforms + sparsity : ChannelSparsity or None, default: None + Optional ChannelSparsity to apply. If WaveformExtractor is already sparse, the argument is ignored - set_title : bool - Create a plot title with the unit number if True, default True - plot_channels : bool - Plot channel locations below traces, default False - unit_selected_waveforms : None or dict + set_title : bool, default: True + Create a plot title with the unit number if True + plot_channels : bool, default: False + Plot channel locations below traces + unit_selected_waveforms : None or dict, default: None A dict key is unit_id and value is the subset of waveforms indices that should be - be displayed (matplotlib backend), default None - max_spikes_per_unit : int or None + be displayed (matplotlib backend) + max_spikes_per_unit : int or None, default: 50 If given and unit_selected_waveforms is None, only max_spikes_per_unit random units are - displayed per waveform, default 50 (matplotlib backend) - axis_equal : bool - Equal aspect ratio for x and y axis, to visualize the array geometry to scale, default False - lw_waveforms : float - Line width for the waveforms, default 1 (matplotlib backend) - lw_templates : float - Line width for the templates, default 2 (matplotlib backend) - unit_colors : None or dict - A dict key is unit_id and value is any color format handled by matplotlib, default None + displayed per waveform, (matplotlib backend) + axis_equal : bool, default: False + Equal aspect ratio for x and y axis, to visualize the array geometry to scale + lw_waveforms : float, default: 1 + Line width for the waveforms, (matplotlib backend) + lw_templates : float, default: 2 + Line width for the templates, (matplotlib backend) + unit_colors : None or dict, default: None + A dict key is unit_id and value is any color format handled by matplotlib. If None, then the get_unit_colors() is internally used. (matplotlib backend) - alpha_waveforms : float - Alpha value for waveforms, default 0.5 (matplotlib backend) - alpha_templates : float - Alpha value for templates, default 1 (matplotlib backend) - hide_unit_selector : bool - For sortingview backend, if True the unit selector is not displayed, default False - same_axis : bool - If True, waveforms and templates are displayed on the same axis, default False (matplotlib backend) - x_offset_units : bool + alpha_waveforms : float, default: 0.5 + Alpha value for waveforms (matplotlib backend) + alpha_templates : float, default: 1 + Alpha value for templates, (matplotlib backend) + hide_unit_selector : bool, default: False + For sortingview backend, if True the unit selector is not displayed + same_axis : bool, default: False + If True, waveforms and templates are displayed on the same axis (matplotlib backend) + x_offset_units : bool, default: False In case same_axis is True, this parameter allow to x-offset the waveforms for different units - (recommended for a few units), default False (matlotlib backend) - plot_legend : bool - Display legend, default True + (recommended for a few units) (matlotlib backend) + plot_legend : bool, default: True + Display legend (matplotlib backend) """ def __init__( diff --git a/src/spikeinterface/widgets/unit_waveforms_density_map.py b/src/spikeinterface/widgets/unit_waveforms_density_map.py index b3391c0712..b7abf79943 100644 --- a/src/spikeinterface/widgets/unit_waveforms_density_map.py +++ b/src/spikeinterface/widgets/unit_waveforms_density_map.py @@ -14,23 +14,23 @@ class UnitWaveformDensityMapWidget(BaseWidget): ---------- waveform_extractor : WaveformExtractor The waveformextractor for calculating waveforms - channel_ids : list - The channel ids to display, default None - unit_ids : list - List of unit ids, default None - sparsity : ChannelSparsity or None - Optional ChannelSparsity to apply, default None + channel_ids : list or None, default: None + The channel ids to display + unit_ids : list or None, default: None + List of unit ids + sparsity : ChannelSparsity or None, default: None + Optional ChannelSparsity to apply If WaveformExtractor is already sparse, the argument is ignored - use_max_channel : bool - Use only the max channel, default False - peak_sign : str (neg/pos/both) - Used to detect max channel only when use_max_channel=True, default 'neg' - unit_colors : None or dict + use_max_channel : bool, default: False + Use only the max channel + peak_sign : 'pos' or 'neg' or 'both', default: 'neg' + Used to detect max channel only when use_max_channel=True + unit_colors : None or dict, default: None A dict key is unit_id and value is any color format handled by matplotlib. - If None, then the get_unit_colors() is internally used, default None - same_axis : bool + If None, then the get_unit_colors() is internally used + same_axis : bool, default: False If True then all density are plot on the same axis and then channels is the union - all channel per units, default False + all channel per units """ def __init__( diff --git a/src/spikeinterface/widgets/utils.py b/src/spikeinterface/widgets/utils.py index 03d5be0c53..5d56709921 100644 --- a/src/spikeinterface/widgets/utils.py +++ b/src/spikeinterface/widgets/utils.py @@ -28,15 +28,15 @@ def get_some_colors(keys, color_engine="auto", map_name="gist_ncar", format="RGB The engine to generate colors map_name : str Used for matplotlib - format: str - The output formats, default 'RGBA' - shuffle : bool or None - Shuffle or not, default None + format: str, default: 'RGBA' + The output formats + shuffle : bool or None, default: None + Shuffle or not the colors. If None then: * set to True for matplotlib and colorsys * set to False for distinctipy - seed: int or None - Set the seed, default None + seed: int or None, default: None + Set the seed Returns -------