From d6e1e0d6a00641377921176c02aa9edc75877b96 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Fri, 27 Oct 2023 14:15:46 +0200 Subject: [PATCH] Final round of cleaning --- .../comparison/basecomparison.py | 2 +- .../comparison/comparisontools.py | 12 ++--- .../comparison/multicomparisons.py | 10 ++-- .../comparison/paircomparisons.py | 20 ++++---- src/spikeinterface/core/baserecording.py | 9 ++-- src/spikeinterface/core/basesnippets.py | 4 +- .../core/binaryrecordingextractor.py | 4 +- src/spikeinterface/core/core_tools.py | 5 +- src/spikeinterface/core/generate.py | 2 +- src/spikeinterface/core/numpyextractors.py | 12 ++--- src/spikeinterface/core/waveform_extractor.py | 13 ++--- src/spikeinterface/curation/auto_merge.py | 6 +-- .../curation/curationsorting.py | 8 ++-- .../curation/mergeunitssorting.py | 7 ++- .../curation/remove_duplicated_spikes.py | 2 +- .../curation/remove_redundant.py | 6 +-- src/spikeinterface/exporters/to_phy.py | 12 ++--- .../extractors/combinatoextractors.py | 6 +-- .../extractors/hdsortextractors.py | 8 ++-- .../extractors/iblstreamingrecording.py | 14 +++--- .../extractors/mclustextractors.py | 4 +- .../extractors/mdaextractors.py | 24 +++++----- .../extractors/neoextractors/alphaomega.py | 2 +- .../extractors/neoextractors/neuroscope.py | 13 ++--- .../extractors/neoextractors/openephys.py | 6 +-- .../extractors/neoextractors/spikeglx.py | 8 ++-- .../extractors/nwbextractors.py | 14 +++--- .../extractors/shybridextractors.py | 10 ++-- .../extractors/waveclussnippetstextractors.py | 4 +- .../postprocessing/amplitude_scalings.py | 4 +- .../postprocessing/principal_component.py | 12 ++--- .../postprocessing/spike_amplitudes.py | 4 +- .../postprocessing/template_metrics.py | 4 +- .../postprocessing/template_similarity.py | 4 +- .../preprocessing/common_reference.py | 18 +++---- .../preprocessing/depth_order.py | 4 +- .../preprocessing/detect_bad_channels.py | 8 +--- .../preprocessing/directional_derivative.py | 4 +- src/spikeinterface/preprocessing/filter.py | 10 ++-- .../preprocessing/filter_opencl.py | 6 +-- src/spikeinterface/preprocessing/motion.py | 12 ++--- .../preprocessing/normalize_scale.py | 8 ++-- .../preprocessing/preprocessing_tools.py | 3 +- .../preprocessing/remove_artifacts.py | 32 ++++++------- .../preprocessing/silence_periods.py | 6 +-- src/spikeinterface/preprocessing/whiten.py | 14 +++--- .../preprocessing/zero_channel_pad.py | 12 ++--- .../qualitymetrics/misc_metrics.py | 16 +++---- .../qualitymetrics/pca_metrics.py | 4 +- src/spikeinterface/sorters/launcher.py | 16 +++---- .../benchmark/benchmark_matching.py | 48 +++++++++---------- .../sortingcomponents/clustering/main.py | 2 +- .../sortingcomponents/clustering/split.py | 18 +++---- .../sortingcomponents/features_from_peaks.py | 2 +- .../sortingcomponents/matching/main.py | 8 ++-- .../sortingcomponents/matching/wobble.py | 28 +++++------ .../sortingcomponents/peak_localization.py | 8 ++-- .../sortingcomponents/peak_selection.py | 20 ++++---- .../waveforms/waveform_thresholder.py | 4 +- src/spikeinterface/widgets/base.py | 26 +++++----- src/spikeinterface/widgets/collision.py | 16 +++---- src/spikeinterface/widgets/gtstudy.py | 2 +- src/spikeinterface/widgets/motion.py | 2 +- src/spikeinterface/widgets/multicomparison.py | 16 +++---- src/spikeinterface/widgets/spike_locations.py | 6 +-- .../widgets/spikes_on_traces.py | 19 ++++---- .../widgets/template_similarity.py | 2 +- src/spikeinterface/widgets/unit_depths.py | 2 +- .../widgets/unit_waveforms_density_map.py | 2 +- src/spikeinterface/widgets/utils.py | 4 +- 70 files changed, 336 insertions(+), 347 deletions(-) diff --git a/src/spikeinterface/comparison/basecomparison.py b/src/spikeinterface/comparison/basecomparison.py index 5af20d79b5..7a231f3cb4 100644 --- a/src/spikeinterface/comparison/basecomparison.py +++ b/src/spikeinterface/comparison/basecomparison.py @@ -223,7 +223,7 @@ class BasePairComparison(BaseComparison): It handles the matching procedurs. Agreement scores must be computed in inherited classes by overriding the - '_do_agreement(self)' function + "_do_agreement(self)" function """ def __init__(self, object1, object2, name1, name2, match_score=0.5, chance_score=0.1, verbose=False): diff --git a/src/spikeinterface/comparison/comparisontools.py b/src/spikeinterface/comparison/comparisontools.py index 20ee7910b4..cd89f042cf 100644 --- a/src/spikeinterface/comparison/comparisontools.py +++ b/src/spikeinterface/comparison/comparisontools.py @@ -570,7 +570,7 @@ def do_confusion_matrix(event_counts1, event_counts2, match_12, match_event_coun def do_count_score(event_counts1, event_counts2, match_12, match_event_count): """ For each ground truth units count how many: - 'tp', 'fn', 'cl', 'fp', 'num_gt', 'num_tested', 'tested_id' + "tp", "fn", "cl", "fp", "num_gt", "num_tested", "tested_id" Parameters ---------- @@ -634,8 +634,8 @@ def compute_performance(count_score): Note : * we don't have TN because it do not make sens here. - * 'accuracy' = 'tp_rate' because TN=0 - * 'recall' = 'sensitivity' + * "accuracy" = "tp_rate" because TN=0 + * "recall" = "sensitivity" """ import pandas as pd @@ -674,7 +674,7 @@ def make_matching_events(times1, times2, delta): Returns ------- - matching_event: numpy array dtype = ['index1', 'index2', 'delta'] + matching_event: numpy array dtype = ["index1", "index2", "delta"] 1d of collision """ times_concat = np.concatenate((times1, times2)) @@ -731,8 +731,8 @@ def make_collision_events(sorting, delta): ------- collision_events: numpy array dtype = [('index1', 'int64'), ('unit_id1', 'int64'), - ('index2', 'int64'), ('unit_id2', 'int64'), - ('delta', 'int64')] + ('index2', 'int64'), ('unit_id2', 'int64'), + ('delta', 'int64')] 1d of all collision """ unit_ids = np.array(sorting.get_unit_ids()) diff --git a/src/spikeinterface/comparison/multicomparisons.py b/src/spikeinterface/comparison/multicomparisons.py index 2ff98f58be..bc7d76ea5a 100644 --- a/src/spikeinterface/comparison/multicomparisons.py +++ b/src/spikeinterface/comparison/multicomparisons.py @@ -26,7 +26,7 @@ class MultiSortingComparison(BaseMultiComparison, MixinSpikeTrainComparison): sorting_list: list List of sorting extractor objects to be compared name_list: list, default: None - List of spike sorter names. If not given, sorters are named as 'sorter0', 'sorter1', 'sorter2', etc. + List of spike sorter names. If not given, sorters are named as "sorter0", "sorter1", "sorter2", etc. delta_time: float, default: 0.4 Number of ms to consider coincident spikes match_score: float, default: 0.5 @@ -156,15 +156,15 @@ def _do_agreement_matrix(self, minimum_agreement=1): def get_agreement_sorting(self, minimum_agreement_count=1, minimum_agreement_count_only=False): """ - Returns AgreementSortingExtractor with units with a 'minimum_matching' agreement. + Returns AgreementSortingExtractor with units with a "minimum_matching" agreement. Parameters ---------- minimum_agreement_count: int Minimum number of matches among sorters to include a unit. minimum_agreement_count_only: bool - If True, only units with agreement == 'minimum_matching' are included. - If False, units with an agreement >= 'minimum_matching' are included + If True, only units with agreement == "minimum_matching" are included. + If False, units with an agreement >= "minimum_matching" are included Returns ------- @@ -310,7 +310,7 @@ class MultiTemplateComparison(BaseMultiComparison, MixinTemplateComparison): waveform_list: list List of waveform extractor objects to be compared name_list: list, default: None - List of session names. If not given, sorters are named as 'sess0', 'sess1', 'sess2', etc. + List of session names. If not given, sorters are named as "sess0", "sess1", "sess2", etc. match_score: float, default: 0.8 Minimum agreement score to match units chance_score: float, default: 0.3 diff --git a/src/spikeinterface/comparison/paircomparisons.py b/src/spikeinterface/comparison/paircomparisons.py index 42ab48be8e..e2dc30493d 100644 --- a/src/spikeinterface/comparison/paircomparisons.py +++ b/src/spikeinterface/comparison/paircomparisons.py @@ -233,7 +233,7 @@ class GroundTruthComparison(BasePairSorterComparison): Tell if the ground true is "exhaustive" or not. In other world if the GT have all possible units. It allows more performance measurement. For instance, MEArec simulated dataset have exhaustive_gt=True - match_mode: "hungarian" | "best", default: 'hungarian + match_mode: "hungarian" | "best", default: "hungarian" The method to match units n_jobs: int, default: -1 Number of cores to use in parallel. Uses all available if -1 @@ -379,21 +379,21 @@ def _do_score_labels(self): def get_performance(self, method="by_unit", output="pandas"): """ Get performance rate with several method: - * 'raw_count' : just render the raw count table - * 'by_unit' : render perf as rate unit by unit of the GT - * 'pooled_with_average' : compute rate unit by unit and average + * "raw_count" : just render the raw count table + * "by_unit" : render perf as rate unit by unit of the GT + * "pooled_with_average" : compute rate unit by unit and average Parameters ---------- - method: str - 'by_unit', or 'pooled_with_average' - output: str - 'pandas' or 'dict' + method: "by_unit" | "pooled_with_average", default: "by_unit" + The method to compute performance + output: "pandas" | "dict", default: "pandas" + The output format Returns ------- perf: pandas dataframe/series (or dict) - dataframe/series (based on 'output') with performance entries + dataframe/series (based on "output") with performance entries """ import pandas as pd @@ -583,7 +583,7 @@ def get_overmerged_units(self, overmerged_score=None): Parameters ---------- overmerged_score: float, default: None - Tested units with 2 or more agreement scores above 'overmerged_score' + Tested units with 2 or more agreement scores above "overmerged_score" are counted as "overmerged". """ assert self.exhaustive_gt, "overmerged_units list is valid only if exhaustive_gt=True" diff --git a/src/spikeinterface/core/baserecording.py b/src/spikeinterface/core/baserecording.py index e3188dc8ed..6dfe038558 100644 --- a/src/spikeinterface/core/baserecording.py +++ b/src/spikeinterface/core/baserecording.py @@ -771,15 +771,14 @@ def get_traces( Parameters ---------- start_frame: Union[int, None], default: None - start sample index, or zero if None. Defaults to None. + start sample index, or zero if None end_frame: Union[int, None], default: None - end_sample, or number of samples if None. Defaults to None. + end_sample, or number of samples if None channel_indices: Union[List, None], default: None - Indices of channels to return, or all channels if None. Defaults to None. + Indices of channels to return, or all channels if None order: list or None, default: None The memory order of the returned array. - Use Order.C for C order, Order.F for Fortran order, or Order.K to keep the order of the underlying data. - Defaults to Order.K. + Use Order.C for C order, Order.F for Fortran order, or Order.K to keep the order of the underlying data Returns ------- diff --git a/src/spikeinterface/core/basesnippets.py b/src/spikeinterface/core/basesnippets.py index f74a88610f..02262fd88e 100644 --- a/src/spikeinterface/core/basesnippets.py +++ b/src/spikeinterface/core/basesnippets.py @@ -265,9 +265,9 @@ def frames_to_indices(self, start_frame: Union[int, None] = None, end_frame: Uni Parameters ---------- start_frame: Union[int, None], default: None - start sample index, or zero if None. Defaults to None. + start sample index, or zero if None end_frame: Union[int, None], default: None - end_sample, or number of samples if None. Defaults to None. + end_sample, or number of samples if None Returns ------- diff --git a/src/spikeinterface/core/binaryrecordingextractor.py b/src/spikeinterface/core/binaryrecordingextractor.py index 5755effafe..d8c6512a38 100644 --- a/src/spikeinterface/core/binaryrecordingextractor.py +++ b/src/spikeinterface/core/binaryrecordingextractor.py @@ -140,8 +140,8 @@ def write_recording(recording, file_paths, dtype=None, **job_kwargs): The recording extractor object to be saved in .dat format file_paths: str The path to the file. - dtype: dtype - Type of the saved data. Default float32. + dtype: dtype, default: None + Type of the saved data {} """ write_binary_recording(recording, file_paths=file_paths, dtype=dtype, **job_kwargs) diff --git a/src/spikeinterface/core/core_tools.py b/src/spikeinterface/core/core_tools.py index d0df5ea10e..ab330a75e0 100644 --- a/src/spikeinterface/core/core_tools.py +++ b/src/spikeinterface/core/core_tools.py @@ -263,9 +263,8 @@ def write_binary_recording( The recording extractor object to be saved in .dat format file_path: str or list[str] The path to the file. - dtype: dtype, default: None - Type of the saved data. Default float32. - add_file_extension: bool, default: True + dtype: dtype or None, default: None + Type of the saved data If True, file the ".raw" file extension is added if the file name is not a "raw", "bin", or "dat" byte_offset: int, default: 0 Offset in bytes to for the binary file (e.g. to write a header) diff --git a/src/spikeinterface/core/generate.py b/src/spikeinterface/core/generate.py index 435e949e9f..3fb01ea02f 100644 --- a/src/spikeinterface/core/generate.py +++ b/src/spikeinterface/core/generate.py @@ -52,7 +52,7 @@ def generate_recording( The number of dimensions of the probe, default: 2. Set to 3 to make 3 dimensional probes. seed : Optional[int] A seed for the np.ramdom.default_rng function - mode: str ["lazy", "legacy"] Default "lazy". + mode: str ["lazy", "legacy"], default: "lazy". "legacy": generate a NumpyRecording with white noise. This mode is kept for backward compatibility and will be deprecated version 0.100.0. "lazy": return a NoiseGeneratorRecording instance. diff --git a/src/spikeinterface/core/numpyextractors.py b/src/spikeinterface/core/numpyextractors.py index 380d461d8e..82075e638c 100644 --- a/src/spikeinterface/core/numpyextractors.py +++ b/src/spikeinterface/core/numpyextractors.py @@ -171,10 +171,11 @@ def from_times_labels(times_list, labels_list, sampling_frequency, unit_ids=None Parameters ---------- times_list: list of array (or array) - An array of spike times (in frames). + An array of spike times (in frames) labels_list: list of array (or array) - An array of spike labels corresponding to the given times. - unit_ids: (None by default) the explicit list of unit_ids that should be extracted from labels_list + An array of spike labels corresponding to the given times + unit_ids: list or None, default: None + The explicit list of unit_ids that should be extracted from labels_list If None, then it will be np.unique(labels_list) """ @@ -580,10 +581,9 @@ def frames_to_indices(self, start_frame: Union[int, None] = None, end_frame: Uni Parameters ---------- start_frame: Union[int, None], default: None - start sample index, or zero if None. Defaults to None. + start sample index, or zero if None end_frame: Union[int, None], default: None - end_sample, or number of samples if None. Defaults to None. - + end_sample, or number of samples if None Returns ------- snippets: slice diff --git a/src/spikeinterface/core/waveform_extractor.py b/src/spikeinterface/core/waveform_extractor.py index 71b2cf70b5..c75e6a2082 100644 --- a/src/spikeinterface/core/waveform_extractor.py +++ b/src/spikeinterface/core/waveform_extractor.py @@ -41,9 +41,8 @@ class WaveformExtractor: rec_attributes: None or dict When recording is None then a minimal dict with some attributes is needed. - allow_unfiltered: bool + allow_unfiltered: bool, default: False If true, will accept unfiltered recording. - False by default. Returns ------- we: WaveformExtractor @@ -650,9 +649,8 @@ def set_recording( rec_attributes: None or dict When recording is None then a minimal dict with some attributes is needed. - allow_unfiltered: bool + allow_unfiltered: bool, default: False If true, will accept unfiltered recording. - False by default. """ if recording is None: # Recordless mode. @@ -1531,7 +1529,7 @@ def extract_waveforms( When True, you must some provide kwargs handle `precompute_sparsity()` to control the kind of sparsity you want to apply (by radius, by best channels, ...) sparsity: ChannelSparsity or None, default: None - The sparsity used to compute waveforms. If this is given, `sparse` is ignored. Default None. + The sparsity used to compute waveforms. If this is given, `sparse` is ignored num_spikes_for_sparsity: int, default: 100 The number of spikes to use to estimate sparsity (if sparse=True). allow_unfiltered: bool, default: False @@ -1697,9 +1695,8 @@ def precompute_sparsity( Time in ms to cut before spike peak ms_after: float Time in ms to cut after spike peak - allow_unfiltered: bool - If true, will accept an allow_unfiltered recording. - False by default. + allow_unfiltered: bool, default: False + If true, will accept an allow_unfiltered recording kwargs for sparsity strategy: diff --git a/src/spikeinterface/curation/auto_merge.py b/src/spikeinterface/curation/auto_merge.py index 2f217fd102..6db8d856cb 100644 --- a/src/spikeinterface/curation/auto_merge.py +++ b/src/spikeinterface/curation/auto_merge.py @@ -98,8 +98,8 @@ def get_potential_auto_merge( steps: None or list of str, default: None which steps to run (gives flexibility to running just some steps) If None all steps are done. - Pontential steps: 'min_spikes', 'remove_contaminated', 'unit_positions', 'correlogram', 'template_similarity', - 'check_increase_score'. Please check steps explanations above! + Pontential steps: "min_spikes", "remove_contaminated", "unit_positions", "correlogram", "template_similarity", + "check_increase_score". Please check steps explanations above! Returns ------- @@ -311,7 +311,7 @@ def smooth_correlogram(correlograms, bins, sigma_smooth_ms=0.6): import scipy.signal # OLD implementation : smooth correlogram by low pass filter - # b, a = scipy.signal.butter(N=2, Wn = correlogram_low_pass / (1e3 / bin_ms /2), btype='low') + # b, a = scipy.signal.butter(N=2, Wn = correlogram_low_pass / (1e3 / bin_ms /2), btype="low") # correlograms_smoothed = scipy.signal.filtfilt(b, a, correlograms, axis=2) # new implementation smooth by convolution with a Gaussian kernel diff --git a/src/spikeinterface/curation/curationsorting.py b/src/spikeinterface/curation/curationsorting.py index bdb33e9eb1..c50c6914e1 100644 --- a/src/spikeinterface/curation/curationsorting.py +++ b/src/spikeinterface/curation/curationsorting.py @@ -14,10 +14,10 @@ class CurationSorting: ---------- parent_sorting: Recording The recording object - properties_policy: str - Policy used to propagate properties after split and merge operation. If 'keep' the properties will be - passed to the new units (if the original units have the same value). If 'remove' the new units will have - an empty value for all the properties. Default: 'keep' + properties_policy: "keep" | "remove" + Policy used to propagate properties after split and merge operation. If "keep" the properties will be + passed to the new units (if the original units have the same value). If "remove" the new units will have + an empty value for all the properties make_graph: bool True to keep a Networkx graph instance with the curation history Returns diff --git a/src/spikeinterface/curation/mergeunitssorting.py b/src/spikeinterface/curation/mergeunitssorting.py index 5295cc76d8..ae033d5531 100644 --- a/src/spikeinterface/curation/mergeunitssorting.py +++ b/src/spikeinterface/curation/mergeunitssorting.py @@ -17,11 +17,10 @@ class MergeUnitsSorting(BaseSorting): but it can also have more (merge multiple units at once). new_unit_ids: None or list A new unit_ids for merged units. If given, it needs to have the same length as `units_to_merge` - properties_policy: str ('keep', 'remove') - Policy used to propagate properties. If 'keep' the properties will be passed to the new units - (if the units_to_merge have the same value). If 'remove' the new units will have an empty + properties_policy: "keep" | "remove", default: "keep" + Policy used to propagate properties. If "keep" the properties will be passed to the new units + (if the units_to_merge have the same value). If "remove" the new units will have an empty value for all the properties of the new unit. - Default: 'keep' delta_time_ms: float or None Number of ms to consider for duplicated spikes. None won't check for duplications diff --git a/src/spikeinterface/curation/remove_duplicated_spikes.py b/src/spikeinterface/curation/remove_duplicated_spikes.py index 04af69b37a..e29e88377e 100644 --- a/src/spikeinterface/curation/remove_duplicated_spikes.py +++ b/src/spikeinterface/curation/remove_duplicated_spikes.py @@ -17,7 +17,7 @@ class RemoveDuplicatedSpikesSorting(BaseSorting): The parent sorting. censored_period_ms: float The censored period to consider 2 spikes to be duplicated (in ms). - method: str in ("keep_first", "keep_last", "keep_first_iterative', 'keep_last_iterative", random") + method: "keep_first" | "keep_last" | "keep_first_iterative" | "keep_last_iterative" | "random", default: "keep_first" Method used to remove the duplicated spikes. If method = "random", will randomly choose to remove the first or last spike. If method = "keep_first", for each ISI violation, will remove the second spike. diff --git a/src/spikeinterface/curation/remove_redundant.py b/src/spikeinterface/curation/remove_redundant.py index 9940af69b9..88868c8730 100644 --- a/src/spikeinterface/curation/remove_redundant.py +++ b/src/spikeinterface/curation/remove_redundant.py @@ -26,9 +26,9 @@ def remove_redundant_units( When a redundant pair is found, there are several strategies to choose which unit is the best: - * 'minimum_shift' - * 'highest_amplitude' - * 'max_spikes' + * "minimum_shift" + * "highest_amplitude" + * "max_spikes" Parameters diff --git a/src/spikeinterface/exporters/to_phy.py b/src/spikeinterface/exporters/to_phy.py index 7347d6c0e6..59771331bc 100644 --- a/src/spikeinterface/exporters/to_phy.py +++ b/src/spikeinterface/exporters/to_phy.py @@ -54,20 +54,20 @@ def export_to_phy( sparsity: ChannelSparsity or None, default: None The sparsity object copy_binary: bool, default: True - If True, the recording is copied and saved in the phy 'output_folder' + If True, the recording is copied and saved in the phy "output_folder" remove_if_exists: bool, default: False - If True and 'output_folder' exists, it is removed and overwritten + If True and "output_folder" exists, it is removed and overwritten peak_sign: "neg" | "pos" | "both", default: "neg" Used by compute_spike_amplitudes - template_mode: str, default: 'median' - Parameter 'mode' to be given to WaveformExtractor.get_template() + template_mode: str, default: "median" + Parameter "mode" to be given to WaveformExtractor.get_template() dtype: dtype or None, default: None Dtype to save binary data verbose: bool, default: True If True, output is verbose use_relative_path : bool, default: False - If True and `copy_binary=True` saves the binary file `dat_path` in the `params.py` relative to `output_folder` (ie `dat_path=r'recording.dat'`). If `copy_binary=False`, then uses a path relative to the `output_folder` - If False, uses an absolute path in the `params.py` (ie `dat_path=r'path/to/the/recording.dat'`) + If True and `copy_binary=True` saves the binary file `dat_path` in the `params.py` relative to `output_folder` (ie `dat_path=r"recording.dat"`). If `copy_binary=False`, then uses a path relative to the `output_folder` + If False, uses an absolute path in the `params.py` (ie `dat_path=r"path/to/the/recording.dat"`) {} """ diff --git a/src/spikeinterface/extractors/combinatoextractors.py b/src/spikeinterface/extractors/combinatoextractors.py index fa2bdde450..737bdfe7a4 100644 --- a/src/spikeinterface/extractors/combinatoextractors.py +++ b/src/spikeinterface/extractors/combinatoextractors.py @@ -22,9 +22,9 @@ class CombinatoSortingExtractor(BaseSorting): Path to the Combinato folder. sampling_frequency : int, default: 30000 The sampling frequency. - user : str - The username that ran the sorting. Defaults to 'simple'. - det_sign : {'both', 'pos', 'neg'} + user : str, default: "simple" + The username that ran the sorting + det_sign : "both", "pos", "neg", default: "both" Which sign was used for detection. keep_good_only : bool, default: True Whether to only keep good units. diff --git a/src/spikeinterface/extractors/hdsortextractors.py b/src/spikeinterface/extractors/hdsortextractors.py index 178596d052..074a3fbd40 100644 --- a/src/spikeinterface/extractors/hdsortextractors.py +++ b/src/spikeinterface/extractors/hdsortextractors.py @@ -191,13 +191,13 @@ def write_sorting(sorting, save_path, locations=None, noise_std_by_channel=None, if noise_std_by_channel is None: noise_std_by_channel = np.ones((1, n_channels)) - dict_to_save = {'Units': units, - 'MultiElectrode': multi_electrode, - 'noiseStd': noise_std_by_channel, + dict_to_save = {"Units": units, + "MultiElectrode": multi_electrode, + "noiseStd": noise_std_by_channel, "samplingRate": sorting._sampling_frequency} # Save Units and MultiElectrode to .mat file: - MATSortingExtractor.write_dict_to_mat(save_path, dict_to_save, version='7.3') + MATSortingExtractor.write_dict_to_mat(save_path, dict_to_save, version="7.3") """ diff --git a/src/spikeinterface/extractors/iblstreamingrecording.py b/src/spikeinterface/extractors/iblstreamingrecording.py index 69626f3bd9..ca0f0a0335 100644 --- a/src/spikeinterface/extractors/iblstreamingrecording.py +++ b/src/spikeinterface/extractors/iblstreamingrecording.py @@ -18,14 +18,14 @@ class IblStreamingRecordingExtractor(BaseRecording): ---------- session : str The session ID to extract recordings for. - In ONE, this is sometimes referred to as the 'eid'. + In ONE, this is sometimes referred to as the "eid". When doing a session lookup such as >>> from one.api import ONE >>> one = ONE(base_url="https://openalyx.internationalbrainlab.org", password="international", silent=True) - >>> sessions = one.alyx.rest('sessions', 'list', tag='2022_Q2_IBL_et_al_RepeatedSite') + >>> sessions = one.alyx.rest("sessions", "list", tag="2022_Q2_IBL_et_al_RepeatedSite") - each returned value in `sessions` refers to it as the 'id'. + each returned value in `sessions` refers to it as the "id". stream_name : str The name of the stream to load for the session. These can be retrieved from calling `StreamingIblExtractor.get_stream_names(session="")`. @@ -35,7 +35,7 @@ class IblStreamingRecordingExtractor(BaseRecording): cache_folder : str or None, default: None The location to temporarily store chunks of data during streaming. The default uses the folder designated by ONE.alyx._par.CACHE_DIR / "cache", which is typically the designated - 'Downloads' folder on your operating system. As long as `remove_cached` is set to True, the only files that will + "Downloads" folder on your operating system. As long as `remove_cached` is set to True, the only files that will persist in this folder are the metadata header files and the chunk of data being actively streamed and used in RAM. remove_cached : bool, default: True Whether or not to remove streamed data from the cache immediately after it is read. @@ -61,14 +61,14 @@ def get_stream_names(cls, session: str, cache_folder: Optional[Union[Path, str]] ---------- session : str The session ID to extract recordings for. - In ONE, this is sometimes referred to as the 'eid'. + In ONE, this is sometimes referred to as the "eid". When doing a session lookup such as >>> from one.api import ONE >>> one = ONE(base_url="https://openalyx.internationalbrainlab.org", password="international", silent=True) - >>> sessions = one.alyx.rest('sessions', 'list', tag='2022_Q2_IBL_et_al_RepeatedSite') + >>> sessions = one.alyx.rest("sessions", "list", tag="2022_Q2_IBL_et_al_RepeatedSite") - each returned value in `sessions` refers to it as the 'id'. + each returned value in `sessions` refers to it as the "id". Returns ------- diff --git a/src/spikeinterface/extractors/mclustextractors.py b/src/spikeinterface/extractors/mclustextractors.py index 9ca802c58d..dfe5bcda26 100644 --- a/src/spikeinterface/extractors/mclustextractors.py +++ b/src/spikeinterface/extractors/mclustextractors.py @@ -15,9 +15,9 @@ class MClustSortingExtractor(BaseSorting): Path to folder with t files. sampling_frequency : sampling frequency sampling frequency in Hz. - sampling_frequency_raw: float or None + sampling_frequency_raw: float or None, default: None Required to read files with raw formats. In that case, the samples are saved in the same - unit as the input data. Default None + unit as the input data Examples: - If raw time is in tens of ms sampling_frequency_raw=10000 - If raw time is in samples sampling_frequency_raw=sampling_frequency diff --git a/src/spikeinterface/extractors/mdaextractors.py b/src/spikeinterface/extractors/mdaextractors.py index 81c4515d59..229e3ef0d0 100644 --- a/src/spikeinterface/extractors/mdaextractors.py +++ b/src/spikeinterface/extractors/mdaextractors.py @@ -21,12 +21,12 @@ class MdaRecordingExtractor(BaseRecording): ---------- folder_path : str or Path Path to the MDA folder. - raw_fname: str - File name of raw file. Defaults to 'raw.mda'. - params_fname: str - File name of params file. Defaults to 'params.json'. - geom_fname: str - File name of geom file. Defaults to 'geom.csv'. + raw_fname: str, default: "raw.mda" + File name of raw file + params_fname: str, default: "params.json" + File name of params file + geom_fname: str, default: "geom.csv" + File name of geom file Returns ------- @@ -87,12 +87,12 @@ def write_recording( params: dictionary Dictionary with optional parameters to save metadata. Sampling frequency is appended to this dictionary. - raw_fname: str - File name of raw file. Defaults to 'raw.mda'. - params_fname: str, default: 'params.json' - File name of params file. Defaults to 'params.json'. - geom_fname: str, default: 'geom.csv' - File name of geom file. Defaults to 'geom.csv'. + raw_fname: str, default: "raw.mda" + File name of raw file + params_fname: str, default: "params.json" + File name of params file + geom_fname: str, default: "geom.csv" + File name of geom file dtype: dtype or None, default: None Data type to be used. If None dtype is same as recording traces. **job_kwargs: diff --git a/src/spikeinterface/extractors/neoextractors/alphaomega.py b/src/spikeinterface/extractors/neoextractors/alphaomega.py index 57bc1dfbd6..8d9eee0924 100644 --- a/src/spikeinterface/extractors/neoextractors/alphaomega.py +++ b/src/spikeinterface/extractors/neoextractors/alphaomega.py @@ -17,7 +17,7 @@ class AlphaOmegaRecordingExtractor(NeoBaseRecordingExtractor): The folder path to the AlphaOmega recordings. lsx_files: list of strings or None, default: None A list of listings files that refers to mpx files to load. - stream_id: {'RAW', 'LFP', 'SPK', 'ACC', 'AI', 'UD'}, default: 'RAW' + stream_id: {"RAW", "LFP", "SPK", "ACC", "AI", "UD"}, default: "RAW" If there are several streams, specify the stream id you want to load. stream_name: str, default: None If there are several streams, specify the stream name you want to load. diff --git a/src/spikeinterface/extractors/neoextractors/neuroscope.py b/src/spikeinterface/extractors/neoextractors/neuroscope.py index 89a21897af..2df95d4af5 100644 --- a/src/spikeinterface/extractors/neoextractors/neuroscope.py +++ b/src/spikeinterface/extractors/neoextractors/neuroscope.py @@ -67,11 +67,11 @@ class NeuroScopeSortingExtractor(BaseSorting): """ Extracts spiking information from an arbitrary number of .res.%i and .clu.%i files in the general folder path. - The .res is a text file with a sorted list of spiketimes from all units displayed in sample (integer '%i') units. + The .res is a text file with a sorted list of spiketimes from all units displayed in sample (integer "%i") units. The .clu file is a file with one more row than the .res with the first row corresponding to the total number of unique ids in the file (and may exclude 0 & 1 from this count) with the rest of the rows indicating which unit id the corresponding entry in the .res file refers to. - The group id is loaded as unit property 'group'. + The group id is loaded as unit property "group". In the original Neuroscope format: Unit ID 0 is the cluster of unsorted spikes (noise). @@ -92,8 +92,8 @@ class NeuroScopeSortingExtractor(BaseSorting): clufile_path : PathType Optional. Path to a particular .clu text file. If given, only the single .clu file (and the respective .res file) are loaded - keep_mua_units : bool - Optional. Whether or not to return sorted spikes from multi-unit activity. Defaults to True. + keep_mua_units : bool, default: True + Optional. Whether or not to return sorted spikes from multi-unit activity exclude_shanks : list Optional. List of indices to ignore. The set of all possible indices is chosen by default, extracted as the final integer of all the .res.%i and .clu.%i pairs. @@ -303,8 +303,9 @@ def read_neuroscope( file_path: str The xml file. stream_id: str or None - keep_mua_units: bool - Optional. Whether or not to return sorted spikes from multi-unit activity. Defaults to True. + The stream id to load. If None, the first stream is loaded + keep_mua_units: bool, default: False + Optional. Whether or not to return sorted spikes from multi-unit activity exclude_shanks: list Optional. List of indices to ignore. The set of all possible indices is chosen by default, extracted as the final integer of all the .res. % i and .clu. % i pairs. diff --git a/src/spikeinterface/extractors/neoextractors/openephys.py b/src/spikeinterface/extractors/neoextractors/openephys.py index 4e89385e42..9879eb89d0 100644 --- a/src/spikeinterface/extractors/neoextractors/openephys.py +++ b/src/spikeinterface/extractors/neoextractors/openephys.py @@ -118,7 +118,7 @@ class OpenEphysBinaryRecordingExtractor(NeoBaseRecordingExtractor): experiment_names: str, list, or None, default: None If multiple experiments are available, this argument allows users to select one or more experiments. If None, all experiements are loaded as blocks. - E.g. 'experiment_names="experiment2"', 'experiment_names=["experiment1", "experiment2"]' + E.g. `experiment_names="experiment2"`, `experiment_names=["experiment1", "experiment2"]` stream_id: str, default: None If there are several streams, specify the stream id you want to load stream_name: str, default: None @@ -281,7 +281,7 @@ def map_to_neo_kwargs(cls, folder_path): def read_openephys(folder_path, **kwargs): """ - Read 'legacy' or 'binary' Open Ephys formats + Read "legacy" or "binary" Open Ephys formats Parameters ---------- @@ -307,7 +307,7 @@ def read_openephys(folder_path, **kwargs): experiment_names: str, list, or None, default: None If multiple experiments are available, this argument allows users to select one or more experiments. If None, all experiements are loaded as blocks. - E.g. 'experiment_names="experiment2"', 'experiment_names=["experiment1", "experiment2"]' + E.g. `experiment_names="experiment2"`, `experiment_names=["experiment1", "experiment2"]` For open ephsy binary format only ignore_timestamps_errors: bool, default: False Ignore the discontinuous timestamps errors in neo diff --git a/src/spikeinterface/extractors/neoextractors/spikeglx.py b/src/spikeinterface/extractors/neoextractors/spikeglx.py index cd35330d53..db6ee9bd48 100644 --- a/src/spikeinterface/extractors/neoextractors/spikeglx.py +++ b/src/spikeinterface/extractors/neoextractors/spikeglx.py @@ -22,9 +22,9 @@ class SpikeGLXRecordingExtractor(NeoBaseRecordingExtractor): Based on :py:class:`neo.rawio.SpikeGLXRawIO` - Contrary to older verion this reader is folder based. - So if the folder contain several streams ('imec0.ap' 'nidq' 'imec0.lf') - then it has to be specified with 'stream_id'. + Contrary to older verions, this reader is folder-based. + If the folder contains several streams (e.g., "imec0.ap", "nidq" ,"imec0.lf"), + then the stream has to be specified with "stream_id" or "stream_name". Parameters ---------- @@ -35,7 +35,7 @@ class SpikeGLXRecordingExtractor(NeoBaseRecordingExtractor): If True, then the probe is not loaded. stream_id: str or None, default: None If there are several streams, specify the stream id you want to load. - For example, 'imec0.ap' 'nidq' or 'imec0.lf'. + For example, "imec0.ap", "nidq", or "imec0.lf". stream_name: str or None, default: None If there are several streams, specify the stream name you want to load. all_annotations: bool, default: False diff --git a/src/spikeinterface/extractors/nwbextractors.py b/src/spikeinterface/extractors/nwbextractors.py index ceb99909a6..f7b445cdb9 100644 --- a/src/spikeinterface/extractors/nwbextractors.py +++ b/src/spikeinterface/extractors/nwbextractors.py @@ -81,9 +81,9 @@ def read_nwbfile( file_path : Path, str The path to the NWB file. stream_mode : "fsspec" or "ros3" or None, default: None - The streaming mode to use. Default assumes the file is on the local disk. + The streaming mode to use. If None it assumes the file is on the local disk. stream_cache_path : str or None, default: None - The path to the cache storage. Default is None. + The path to the cache storage Returns ------- @@ -150,11 +150,11 @@ class NwbRecordingExtractor(BaseRecording): If True, the time vector is loaded to the recording object. samples_for_rate_estimation: int, default: 100000 The number of timestamp samples to use to estimate the rate. - Used if 'rate' is not specified in the ElectricalSeries. + Used if "rate" is not specified in the ElectricalSeries. stream_mode: str or None, default: None Specify the stream mode: "fsspec" or "ros3". stream_cache_path: str or Path or None, default: None - Local path for caching. Default: cwd/cache. + Local path for caching. If None it uses cwd Returns ------- @@ -430,11 +430,11 @@ class NwbSortingExtractor(BaseSorting): The sampling frequency in Hz (required if no ElectricalSeries is available). samples_for_rate_estimation: int, default: 100000 The number of timestamp samples to use to estimate the rate. - Used if 'rate' is not specified in the ElectricalSeries. + Used if "rate" is not specified in the ElectricalSeries. stream_mode: str or None, default: None Specify the stream mode: "fsspec" or "ros3". stream_cache_path: str or Path or None, default: None - Local path for caching. Default: cwd/cache. + Local path for caching. If None it uses cwd Returns ------- @@ -597,7 +597,7 @@ def read_nwb(file_path, load_recording=True, load_sorting=False, electrical_seri ------- extractors: extractor or tuple Single RecordingExtractor/SortingExtractor or tuple with both - (depending on 'load_recording'/'load_sorting') arguments. + (depending on "load_recording"/"load_sorting") arguments. """ outputs = () if load_recording: diff --git a/src/spikeinterface/extractors/shybridextractors.py b/src/spikeinterface/extractors/shybridextractors.py index 130c0ce47e..d7ae09144f 100644 --- a/src/spikeinterface/extractors/shybridextractors.py +++ b/src/spikeinterface/extractors/shybridextractors.py @@ -81,14 +81,14 @@ def write_recording(recording, save_path, initial_sorting_fn, dtype="float32", * Parameters ---------- recording: RecordingExtractor - The recording extractor to be converted and saved. + The recording extractor to be converted and saved save_path: str - Full path to desired target folder. + Full path to desired target folder initial_sorting_fn: str Full path to the initial sorting csv file (can also be generated - using write_sorting static method from the SHYBRIDSortingExtractor). - dtype: dtype - Type of the saved data. Default float32. + using write_sorting static method from the SHYBRIDSortingExtractor) + dtype: dtype, default: float32 + Type of the saved data **write_binary_kwargs: keyword arguments for write_to_binary_dat_format() function """ try: diff --git a/src/spikeinterface/extractors/waveclussnippetstextractors.py b/src/spikeinterface/extractors/waveclussnippetstextractors.py index d4bdc1aede..3bcda1ea70 100644 --- a/src/spikeinterface/extractors/waveclussnippetstextractors.py +++ b/src/spikeinterface/extractors/waveclussnippetstextractors.py @@ -121,9 +121,9 @@ def frames_to_indices(self, start_frame: Union[int, None] = None, end_frame: Uni Parameters ---------- start_frame: Union[int, None], default: None - start sample index, or zero if None. Defaults to None. + start sample index, or zero if Non end_frame: Union[int, None], default: None - end_sample, or number of samples if None. Defaults to None. + end_sample, or number of samples if None Returns ------- diff --git a/src/spikeinterface/postprocessing/amplitude_scalings.py b/src/spikeinterface/postprocessing/amplitude_scalings.py index e2ba4c2a18..2aaf4d20b9 100644 --- a/src/spikeinterface/postprocessing/amplitude_scalings.py +++ b/src/spikeinterface/postprocessing/amplitude_scalings.py @@ -235,8 +235,8 @@ def compute_amplitude_scalings( ------- amplitude_scalings: np.array or list of dict The amplitude scalings. - - If 'concatenated' all amplitudes for all spikes and all units are concatenated - - If 'by_unit', amplitudes are returned as a list (for segments) of dictionaries (for units) + - If "concatenated" all amplitudes for all spikes and all units are concatenated + - If "by_unit", amplitudes are returned as a list (for segments) of dictionaries (for units) """ if load_if_exists and waveform_extractor.is_extension(AmplitudeScalingsCalculator.extension_name): sac = waveform_extractor.load_extension(AmplitudeScalingsCalculator.extension_name) diff --git a/src/spikeinterface/postprocessing/principal_component.py b/src/spikeinterface/postprocessing/principal_component.py index 4fbabcfe31..cf32e79b25 100644 --- a/src/spikeinterface/postprocessing/principal_component.py +++ b/src/spikeinterface/postprocessing/principal_component.py @@ -127,13 +127,13 @@ def get_all_projections(self, channel_ids=None, unit_ids=None, outputs="id"): unit_ids : list, default: None List of unit ids to return projections for outputs: str - * 'id': 'all_labels' contain unit ids - * 'index': 'all_labels' contain unit indices + * "id": "all_labels" contain unit ids + * "index": "all_labels" contain unit indices Returns ------- all_labels: np.array - Array with labels (ids or indices based on 'outputs') of returned PCA projections + Array with labels (ids or indices based on "outputs") of returned PCA projections all_projections: np.array The PCA projections (num_all_waveforms, num_components, num_channels) """ @@ -169,7 +169,7 @@ def project_new(self, new_waveforms, unit_id=None, sparse=False): new_waveforms: np.array Array with new waveforms to project with shape (num_waveforms, num_samples, num_channels) unit_id: int or str - In case PCA is sparse and mode is by_channel_local, the unit_id of 'new_waveforms' + In case PCA is sparse and mode is by_channel_local, the unit_id of "new_waveforms" sparse: bool, default: False If True, and sparsity is not None, only projections on sparse channels are returned. @@ -186,7 +186,7 @@ def project_new(self, new_waveforms, unit_id=None, sparse=False): wfs0 = self.waveform_extractor.get_waveforms(unit_id=self.waveform_extractor.sorting.unit_ids[0]) assert ( wfs0.shape[1] == new_waveforms.shape[1] - ), "Mismatch in number of samples between waveforms used to fit the pca model and 'new_waveforms" + ), "Mismatch in number of samples between waveforms used to fit the pca model and 'new_waveforms'" num_channels = len(self.waveform_extractor.channel_ids) # check waveform shapes @@ -200,7 +200,7 @@ def project_new(self, new_waveforms, unit_id=None, sparse=False): else: assert ( wfs0.shape[2] == new_waveforms.shape[2] - ), "Mismatch in number of channels between waveforms used to fit the pca model and 'new_waveforms" + ), "Mismatch in number of channels between waveforms used to fit the pca model and 'new_waveforms'" channel_inds = np.arange(num_channels, dtype=int) # get channel ids and pca models diff --git a/src/spikeinterface/postprocessing/spike_amplitudes.py b/src/spikeinterface/postprocessing/spike_amplitudes.py index 30c622ac46..50dac50ad3 100644 --- a/src/spikeinterface/postprocessing/spike_amplitudes.py +++ b/src/spikeinterface/postprocessing/spike_amplitudes.py @@ -160,8 +160,8 @@ def compute_spike_amplitudes( ------- amplitudes: np.array or list of dict The spike amplitudes. - - If 'concatenated' all amplitudes for all spikes and all units are concatenated - - If 'by_unit', amplitudes are returned as a list (for segments) of dictionaries (for units) + - If "concatenated" all amplitudes for all spikes and all units are concatenated + - If "by_unit", amplitudes are returned as a list (for segments) of dictionaries (for units) """ if load_if_exists and waveform_extractor.is_extension(SpikeAmplitudesCalculator.extension_name): sac = waveform_extractor.load_extension(SpikeAmplitudesCalculator.extension_name) diff --git a/src/spikeinterface/postprocessing/template_metrics.py b/src/spikeinterface/postprocessing/template_metrics.py index fa911a7f09..858af3ee08 100644 --- a/src/spikeinterface/postprocessing/template_metrics.py +++ b/src/spikeinterface/postprocessing/template_metrics.py @@ -278,8 +278,8 @@ def compute_template_metrics( ------- template_metrics : pd.DataFrame Dataframe with the computed template metrics. - If 'sparsity' is None, the index is the unit_id. - If 'sparsity' is given, the index is a multi-index (unit_id, channel_id) + If "sparsity" is None, the index is the unit_id. + If "sparsity" is given, the index is a multi-index (unit_id, channel_id) Notes ----- diff --git a/src/spikeinterface/postprocessing/template_similarity.py b/src/spikeinterface/postprocessing/template_similarity.py index 0c0c8a516d..5febdf83f7 100644 --- a/src/spikeinterface/postprocessing/template_similarity.py +++ b/src/spikeinterface/postprocessing/template_similarity.py @@ -90,8 +90,8 @@ def compute_template_similarity( A waveform extractor object load_if_exists : bool, default: False Whether to load precomputed similarity, if is already exists. - method: str - Method name ('cosine_similarity') + method: str, default: "cosine_similarity" + The method to compute the similarity waveform_extractor_other: WaveformExtractor, default: None A second waveform extractor object diff --git a/src/spikeinterface/preprocessing/common_reference.py b/src/spikeinterface/preprocessing/common_reference.py index 6d6ce256de..219854f340 100644 --- a/src/spikeinterface/preprocessing/common_reference.py +++ b/src/spikeinterface/preprocessing/common_reference.py @@ -16,22 +16,22 @@ class CommonReferenceRecording(BasePreprocessor): ---------- recording: RecordingExtractor The recording extractor to be re-referenced - reference: str 'global', 'single' or 'local' - If 'global' then CMR/CAR is used either by groups or all channel way. - If 'single', the selected channel(s) is remove from all channels. operator is no used in that case. - If 'local', an average CMR/CAR is implemented with only k channels selected the nearest outside of a radius around each channel - operator: str 'median' or 'average' - If 'median', common median reference (CMR) is implemented (the median of + reference: "global" | "single" | "local", default: "global" + If "global" then CMR/CAR is used either by groups or all channel way. + If "single", the selected channel(s) is remove from all channels. operator is no used in that case. + If "local", an average CMR/CAR is implemented with only k channels selected the nearest outside of a radius around each channel + operator: "median" | "average", default: "median" + If "median", common median reference (CMR) is implemented (the median of the selected channels is removed for each timestamp). - If 'average', common average reference (CAR) is implemented (the mean of the selected channels is removed + If "average", common average reference (CAR) is implemented (the mean of the selected channels is removed for each timestamp). groups: list List of lists containing the channel ids for splitting the reference. The CMR, CAR, or referencing with respect to single channels are applied group-wise. However, this is not applied for the local CAR. It is useful when dealing with different channel groups, e.g. multiple tetrodes. ref_channel_ids: list or int - If no 'groups' are specified, all channels are referenced to 'ref_channel_ids'. If 'groups' is provided, then a - list of channels to be applied to each group is expected. If 'single' reference, a list of one channel or an + If no "groups" are specified, all channels are referenced to "ref_channel_ids". If "groups" is provided, then a + list of channels to be applied to each group is expected. If "single" reference, a list of one channel or an int is expected. local_radius: tuple(int, int) Use in the local CAR implementation as the selecting annulus (exclude radius, include radius) diff --git a/src/spikeinterface/preprocessing/depth_order.py b/src/spikeinterface/preprocessing/depth_order.py index 260dc44926..7c4259d9af 100644 --- a/src/spikeinterface/preprocessing/depth_order.py +++ b/src/spikeinterface/preprocessing/depth_order.py @@ -14,8 +14,8 @@ class DepthOrderRecording(ChannelSliceRecording): The recording to re-order. channel_ids : list/array or None If given, a subset of channels to order locations for - dimensions : str or tuple, list, default: ('x', 'y') - If str, it needs to be 'x', 'y', 'z'. + dimensions : str or tuple, list, default: ("x", "y") + If str, it needs to be "x", "y", "z". If tuple or list, it sorts the locations in two dimensions using lexsort. This approach is recommended since there is less ambiguity flip: bool, default: False diff --git a/src/spikeinterface/preprocessing/detect_bad_channels.py b/src/spikeinterface/preprocessing/detect_bad_channels.py index 365636893b..a162cfe636 100644 --- a/src/spikeinterface/preprocessing/detect_bad_channels.py +++ b/src/spikeinterface/preprocessing/detect_bad_channels.py @@ -50,12 +50,8 @@ def detect_bad_channels( ---------- recording : BaseRecording The recording for which bad channels are detected - method : str, default: 'coherence+psd' - The method to be used: - - * coeherence+psd (developed by IBL) - * mad - * std + method : "coeherence+psd" | "std" | "mad" | "neighborhood_r2", default: "coeherence+psd" + The method to be used for bad channel detection std_mad_threshold : float, default: 5 The standard deviation/mad multiplier threshold psd_hf_threshold (coeherence+psd) : float, default: 0.02 diff --git a/src/spikeinterface/preprocessing/directional_derivative.py b/src/spikeinterface/preprocessing/directional_derivative.py index 618769b424..48bcf77d7f 100644 --- a/src/spikeinterface/preprocessing/directional_derivative.py +++ b/src/spikeinterface/preprocessing/directional_derivative.py @@ -18,8 +18,8 @@ def __init__( ): """Take derivative of any `order` along `direction` - np.gradient is applied independently along each colum (direction='y') - or row (direction='x'). Accounts for channel spacings and boundary + np.gradient is applied independently along each colum (direction="y") + or row (direction="x"). Accounts for channel spacings and boundary issues using np.gradient -- see that function's documentation for more information about `edge_order`. diff --git a/src/spikeinterface/preprocessing/filter.py b/src/spikeinterface/preprocessing/filter.py index 698b7ee65a..1d6947be79 100644 --- a/src/spikeinterface/preprocessing/filter.py +++ b/src/spikeinterface/preprocessing/filter.py @@ -10,10 +10,10 @@ * filter_order: order The order of the filter - * filter_mode: 'sos or 'ba' - 'sos' is bi quadratic and more stable than ab so thery are prefered. + * filter_mode: "sos or "ba" + "sos" is bi quadratic and more stable than ab so thery are prefered. * ftype: str - Filter type for iirdesign ('butter' / 'cheby1' / ... all possible of scipy.signal.iirdesign) + Filter type for iirdesign ("butter" / "cheby1" / ... all possible of scipy.signal.iirdesign) """ @@ -31,8 +31,8 @@ class FilterRecording(BasePreprocessor): recording: Recording The recording extractor to be re-referenced band: float or list, default: [300.0, 6000.0] - If float, cutoff frequency in Hz for 'highpass' filter type - If list. band (low, high) in Hz for 'bandpass' filter type + If float, cutoff frequency in Hz for "highpass" filter type + If list. band (low, high) in Hz for "bandpass" filter type btype: "bandpass" | "highpass", default: "bandpass" Type of the filter margin_ms: float, default: 5.0 diff --git a/src/spikeinterface/preprocessing/filter_opencl.py b/src/spikeinterface/preprocessing/filter_opencl.py index d3a08297c6..e16e2cfd08 100644 --- a/src/spikeinterface/preprocessing/filter_opencl.py +++ b/src/spikeinterface/preprocessing/filter_opencl.py @@ -18,7 +18,7 @@ class FilterOpenCLRecording(BasePreprocessor): """ Simple implementation of FilterRecording in OpenCL. - Only filter_mode='sos' is supported. + Only filter_mode="sos" is supported. Author : Samuel Garcia This kernel is ported from "tridesclous" @@ -29,9 +29,9 @@ class FilterOpenCLRecording(BasePreprocessor): The recording extractor to be re-referenced N: order - filter_mode: 'sos' only + filter_mode: "sos" only - ftypestr: 'butter' / 'cheby1' / ... all possible of scipy.signal.iirdesign + ftypestr: "butter" / "cheby1" / ... all possible of scipy.signal.iirdesign margin: margin in second on border to avoid border effect diff --git a/src/spikeinterface/preprocessing/motion.py b/src/spikeinterface/preprocessing/motion.py index 6ab1a9afce..8672b48340 100644 --- a/src/spikeinterface/preprocessing/motion.py +++ b/src/spikeinterface/preprocessing/motion.py @@ -200,14 +200,14 @@ def correct_motion( ---------- recording: RecordingExtractor The recording extractor to be transformed - preset: str - The preset name. Default "nonrigid_accurate". - folder: Path str or None - If not None then intermediate motion info are saved into a folder. Default None - output_motion_info: bool + preset: str, default: "nonrigid_accurate" + The preset name + folder: Path str or None, default: None + If not None then intermediate motion info are saved into a folder + output_motion_info: bool, default: False If True, then the function returns a `motion_info` dictionary that contains variables to check intermediate steps (motion_histogram, non_rigid_windows, pairwise_displacement) - This dictionary is the same when reloaded from the folder. Default False + This dictionary is the same when reloaded from the folder detect_kwargs: dict Optional parameters to overwrite the ones in the preset for "detect" step. select_kwargs: dict diff --git a/src/spikeinterface/preprocessing/normalize_scale.py b/src/spikeinterface/preprocessing/normalize_scale.py index 4668e5344a..03afada380 100644 --- a/src/spikeinterface/preprocessing/normalize_scale.py +++ b/src/spikeinterface/preprocessing/normalize_scale.py @@ -43,8 +43,8 @@ class NormalizeByQuantileRecording(BasePreprocessor): Lower quantile used for measuring the scale q1: float, default: 0.99 Upper quantile used for measuring the - mode: 'by_channel' or 'pool_channel', default: 'by_channel' - If 'by_channel' each channel is rescaled independently. + mode: "by_channel" | "pool_channel", default: "by_channel" + If "by_channel" each channel is rescaled independently. dtype: str or np.dtype, default: "float32" The dtype of the output traces **random_chunk_kwargs: Keyword arguments for `spikeinterface.core.get_random_data_chunk()` function @@ -123,8 +123,8 @@ class ScaleRecording(BasePreprocessor): Scalar for the traces of the recording extractor or array with scalars for each channel offset: float or array Offset for the traces of the recording extractor or array with offsets for each channel - dtype: str or np.dtype - The dtype of the output traces. Default "float32" + dtype: str or np.dtype, default: "float32" + The dtype of the output traces Returns ------- diff --git a/src/spikeinterface/preprocessing/preprocessing_tools.py b/src/spikeinterface/preprocessing/preprocessing_tools.py index 7e67a1ee9a..039d054b64 100644 --- a/src/spikeinterface/preprocessing/preprocessing_tools.py +++ b/src/spikeinterface/preprocessing/preprocessing_tools.py @@ -125,8 +125,7 @@ def get_kriging_kernel_distance(locations_1, locations_2, sigma_um, p, distance_ In case sigma_um is list then this mimics the Kilosort2.5 behavior, which uses two separate sigmas for each dimension. In the later case the metric is always a "cityblock" p : float - Weight parameter on the exponential function. Default - in IBL kriging interpolation is 1.3. + Weight parameter on the exponential function Results ---------- diff --git a/src/spikeinterface/preprocessing/remove_artifacts.py b/src/spikeinterface/preprocessing/remove_artifacts.py index 12fc7a6e10..534a3fb5a4 100644 --- a/src/spikeinterface/preprocessing/remove_artifacts.py +++ b/src/spikeinterface/preprocessing/remove_artifacts.py @@ -9,10 +9,10 @@ class RemoveArtifactsRecording(BasePreprocessor): """ Removes stimulation artifacts from recording extractor traces. By default, - artifact periods are zeroed-out (mode = 'zeros'). This is only recommended + artifact periods are zeroed-out (mode = "zeros"). This is only recommended for traces that are centered around zero (e.g. through a prior highpass filter); if this is not the case, linear and cubic interpolation modes are - also available, controlled by the 'mode' input argument. + also available, controlled by the "mode" input argument. Note that several artifacts can be removed at once (potentially with distinct duration each), if labels are specified @@ -31,29 +31,29 @@ class RemoveArtifactsRecording(BasePreprocessor): list_labels: list of lists/arrays or None One list per segment of labels with the stimulation labels for the given artefacs. labels should be strings, for JSON serialization. - Required for 'median' and 'average' modes. + Required for "median" and "average" modes. mode: "zeros", "linear", "cubic", "average", "median", default: "zeros" Determines what artifacts are replaced by. Can be one of the following: - - 'zeros': Artifacts are replaced by zeros. + - "zeros": Artifacts are replaced by zeros. - - 'median': The median over all artifacts is computed and subtracted for + - "median": The median over all artifacts is computed and subtracted for each occurence of an artifact - - 'average': The mean over all artifacts is computed and subtracted for each + - "average": The mean over all artifacts is computed and subtracted for each occurence of an artifact - - 'linear': Replacement are obtained through Linear interpolation between + - "linear": Replacement are obtained through Linear interpolation between the trace before and after the artifact. If the trace starts or ends with an artifact period, the gap is filled with the closest available value before or after the artifact. - - 'cubic': Cubic spline interpolation between the trace before and after + - "cubic": Cubic spline interpolation between the trace before and after the artifact, referenced to evenly spaced fit points before and after the artifact. This is an option thatcan be helpful if there are significant LFP effects around the time of the artifact, but visual inspection of fit behaviour with your chosen settings is recommended. - The spacing of fit points is controlled by 'fit_sample_spacing', with + The spacing of fit points is controlled by "fit_sample_spacing", with greater spacing between points leading to a fit that is less sensitive to high frequency fluctuations but at the cost of a less smooth continuation of the trace. @@ -61,27 +61,27 @@ class RemoveArtifactsRecording(BasePreprocessor): the closest available value before or after the artifact. fit_sample_spacing: float, default: 1.0 Determines the spacing (in ms) of reference points for the cubic spline - fit if mode = 'cubic'. Default = 1ms. Note: The actual fit samples are + fit if mode = "cubic". Note: The actual fit samples are the median of the 5 data points around the time of each sample point to avoid excessive influence from hyper-local fluctuations. artifacts: dict or None, default: None - If provided (when mode is 'median' or 'average') then it must be a dict with + If provided (when mode is "median" or "average") then it must be a dict with keys that are the labels of the artifacts, and values the artifacts themselves, on all channels (and thus bypassing ms_before and ms_after) sparsity: dict or None, default: None - If provided (when mode is 'median' or 'average') then it must be a dict with + If provided (when mode is "median" or "average") then it must be a dict with keys that are the labels of the artifacts, and values that are boolean mask of the channels where the artifacts should be considered (for subtraction/scaling) scale_amplitude: False, default: False - If true, then for mode 'median' or 'average' the amplitude of the template + If true, then for mode "median" or "average" the amplitude of the template will be scaled in amplitude at each time occurence to minimize residuals time_jitter: float, default: 0 - If non 0, then for mode 'median' or 'average', a time jitter in ms + If non 0, then for mode "median" or "average", a time jitter in ms can be allowed to minimize the residuals waveforms_kwargs: dict or None, default: None The arguments passed to the WaveformExtractor object when extracting the - artifacts, for mode 'median' or 'average'. - By default, the global job kwargs are used, in addition to {'allow_unfiltered' : True, 'mode':'memory'}. + artifacts, for mode "median" or "average". + By default, the global job kwargs are used, in addition to {"allow_unfiltered" : True, "mode":"memory"}. To estimate sparse artifact Returns diff --git a/src/spikeinterface/preprocessing/silence_periods.py b/src/spikeinterface/preprocessing/silence_periods.py index 5a8774558b..4299d199ed 100644 --- a/src/spikeinterface/preprocessing/silence_periods.py +++ b/src/spikeinterface/preprocessing/silence_periods.py @@ -9,7 +9,7 @@ class SilencedPeriodsRecording(BasePreprocessor): """ Silence user-defined periods from recording extractor traces. By default, - periods are zeroed-out (mode = 'zeros'). You can also fill the periods with noise. + periods are zeroed-out (mode = "zeros"). You can also fill the periods with noise. Note that both methods assume that traces that are centered around zero. If this is not the case, make sure you apply a filter or center function prior to silencing periods. @@ -24,9 +24,9 @@ class SilencedPeriodsRecording(BasePreprocessor): mode: "zeros" | "noise, default: "zeros" Determines what periods are replaced by. Can be one of the following: - - 'zeros': Artifacts are replaced by zeros. + - "zeros": Artifacts are replaced by zeros. - - 'noise': The periods are filled with a gaussion noise that has the + - "noise": The periods are filled with a gaussion noise that has the same variance that the one in the recordings, on a per channel basis **random_chunk_kwargs: Keyword arguments for `spikeinterface.core.get_random_data_chunk()` function diff --git a/src/spikeinterface/preprocessing/whiten.py b/src/spikeinterface/preprocessing/whiten.py index 7b3689899c..3bea9b91bb 100644 --- a/src/spikeinterface/preprocessing/whiten.py +++ b/src/spikeinterface/preprocessing/whiten.py @@ -18,11 +18,11 @@ class WhitenRecording(BasePreprocessor): dtype: None or dtype, default: None If None the the parent dtype is kept. For integer dtype a int_scale must be also given. - mode: 'global' / 'local', default: 'global' - 'global' use the entire covariance matrix to compute the W matrix - 'local' use local covariance (by radius) to compute the W matrix + mode: "global" | "local", default: "global" + "global" use the entire covariance matrix to compute the W matrix + "local" use local covariance (by radius) to compute the W matrix radius_um: None or float, default: None - Used for mode = 'local' to get the neighborhood + Used for mode = "local" to get the neighborhood apply_mean: bool, default: False Substract or not the mean matrix M before the dot product with W. int_scale : None or float, default: None @@ -138,15 +138,15 @@ def compute_whitening_matrix(recording, mode, random_chunk_kwargs, apply_mean, r mode : str The mode to compute the whitening matrix. - * 'global': compute SVD using all channels - * 'local': compute SVD on local neighborhood (controlled by `radius_um`) + * "global": compute SVD using all channels + * "local": compute SVD on local neighborhood (controlled by `radius_um`) random_chunk_kwargs : dict Keyword arguments for get_random_data_chunks() apply_mean : bool If True, the mean is removed prior to computing the covariance radius_um : float or None, default: None - Used for mode = 'local' to get the neighborhood + Used for mode = "local" to get the neighborhood eps : float or None, default: None Small epsilon to regularize SVD. If None, the default is set to 1e-8, but if the data is float type and scaled down to very small values, eps is automatically set to a small fraction (1e-3) of the median of the squared data. diff --git a/src/spikeinterface/preprocessing/zero_channel_pad.py b/src/spikeinterface/preprocessing/zero_channel_pad.py index 5d8da8fa3f..ee6eb014aa 100644 --- a/src/spikeinterface/preprocessing/zero_channel_pad.py +++ b/src/spikeinterface/preprocessing/zero_channel_pad.py @@ -17,12 +17,12 @@ class TracePaddedRecording(BasePreprocessor): ---------- parent_recording_segment : BaseRecording The parent recording segment from which the traces are to be retrieved. - padding_start : int - The amount of padding to add to the left of the traces. Default is 0. It has to be non-negative - padding_end : int - The amount of padding to add to the right of the traces. Default is 0. It has to be non-negative - fill_value: float - The value to pad with. Default is 0. + padding_start : int, default: 0 + The amount of padding to add to the left of the traces. It has to be non-negative + padding_end : int, default: 0 + The amount of padding to add to the right of the traces. It has to be non-negative + fill_value: float, default: 0 + The value to pad with """ def __init__( diff --git a/src/spikeinterface/qualitymetrics/misc_metrics.py b/src/spikeinterface/qualitymetrics/misc_metrics.py index b791d7b247..5c734b9100 100644 --- a/src/spikeinterface/qualitymetrics/misc_metrics.py +++ b/src/spikeinterface/qualitymetrics/misc_metrics.py @@ -184,9 +184,9 @@ def compute_snrs( ---------- waveform_extractor : WaveformExtractor The waveform extractor object. - peak_sign : {'neg', 'pos', 'both'} + peak_sign : "neg" | "pos" | "both", default: "neg" The sign of the template to compute best channels. - peak_mode: {'extremum', 'at_index'} + peak_mode: "extremum" | "at_index", default: "extremum" How to compute the amplitude. Extremum takes the maxima/minima At_index takes the value at t=waveform_extractor.nbefore @@ -659,8 +659,8 @@ def compute_amplitude_cv_metrics( min_num_bins : int, default: 10 The minimum number of bins to compute the median and range. If the number of bins is less than this then the median and range are set to NaN. - amplitude_extension : str, default: 'spike_amplitudes' - The name of the extension to load the amplitudes from. 'spike_amplitudes' or 'amplitude_scalings'. + amplitude_extension : str, default: "spike_amplitudes" + The name of the extension to load the amplitudes from. "spike_amplitudes" or "amplitude_scalings". unit_ids : list or None List of unit ids to compute the amplitude spread. If None, all units are used. @@ -760,7 +760,7 @@ def compute_amplitude_cutoffs( ---------- waveform_extractor : WaveformExtractor The waveform extractor object. - peak_sign : {'neg', 'pos', 'both'} + peak_sign : "neg" | "pos" | "both", default: "neg" The sign of the peaks. num_histogram_bins : int, default: 100 The number of bins to use to compute the amplitude histogram. @@ -856,7 +856,7 @@ def compute_amplitude_medians(waveform_extractor, peak_sign="neg", unit_ids=None ---------- waveform_extractor : WaveformExtractor The waveform extractor object. - peak_sign : {'neg', 'pos', 'both'} + peak_sign : "neg" | "pos" | "both", default: "neg" The sign of the peaks. unit_ids : list or None List of unit ids to compute the amplitude medians. If None, all units are used. @@ -929,7 +929,7 @@ def compute_drift_metrics( * drift_std: standard deviation of the drift signal * drift_mad: median absolute deviation of the drift signal - Requires 'spike_locations' extension. If this is not present, metrics are set to NaN. + Requires "spike_locations" extension. If this is not present, metrics are set to NaN. Parameters ---------- @@ -1179,7 +1179,7 @@ def amplitude_cutoff(amplitudes, num_histogram_bins=500, histogram_smoothing_val ---------- amplitudes : ndarray_like The amplitudes (in uV) of the spikes for one unit. - peak_sign : {'neg', 'pos', 'both'} + peak_sign : "neg" | "pos" | "both", default: "neg" The sign of the template to compute best channels. num_histogram_bins : int, default: 500 The number of bins to use to compute the amplitude histogram. diff --git a/src/spikeinterface/qualitymetrics/pca_metrics.py b/src/spikeinterface/qualitymetrics/pca_metrics.py index b4e60f5937..6f1c1c0d69 100644 --- a/src/spikeinterface/qualitymetrics/pca_metrics.py +++ b/src/spikeinterface/qualitymetrics/pca_metrics.py @@ -407,7 +407,7 @@ def nearest_neighbors_isolation( The number of PC components to use to project the snippets to. radius_um : float, default: 100 The radius, in um, that channels need to be within the peak channel to be included. - peak_sign: str, default: 'neg' + peak_sign: "neg" | "pos" | "both", default: "neg" The peak_sign used to compute sparsity and neighbor units. Used if waveform_extractor is not sparse already. min_spatial_overlap : float, default: 100 @@ -611,7 +611,7 @@ def nearest_neighbors_noise_overlap( The number of PC components to use to project the snippets to. radius_um : float, default: 100 The radius, in um, that channels need to be within the peak channel to be included. - peak_sign: str, default: 'neg' + peak_sign: "neg" | "pos" | "both", default: "neg" The peak_sign used to compute sparsity and neighbor units. Used if waveform_extractor is not sparse already. seed : int, default: 0 diff --git a/src/spikeinterface/sorters/launcher.py b/src/spikeinterface/sorters/launcher.py index 6455d720c6..94b56754e8 100644 --- a/src/spikeinterface/sorters/launcher.py +++ b/src/spikeinterface/sorters/launcher.py @@ -334,18 +334,18 @@ def run_sorters( The working directory. sorter_params: dict of dict with sorter_name as key This allow to overwrite default params for sorter. - mode_if_folder_exists: {'raise', 'overwrite', 'keep'} + mode_if_folder_exists: "raise" | "overwrite" | "keep", default: "raise" The mode when the subfolder of recording/sorter already exists. - * 'raise' : raise error if subfolder exists - * 'overwrite' : delete and force recompute - * 'keep' : do not compute again if f=subfolder exists and log is OK - engine: {'loop', 'joblib', 'dask'} + * "raise" : raise error if subfolder exists + * "overwrite" : delete and force recompute + * "keep" : do not compute again if f=subfolder exists and log is OK + engine: "loop" | "joblib" | "dask', default: "loop" Which engine to use to run sorter. engine_kwargs: dict This contains kwargs specific to the launcher engine: - * 'loop' : no kwargs - * 'joblib' : {'n_jobs' : } number of processes - * 'dask' : {'client':} the dask client for submitting task + * "loop" : no kwargs + * "joblib" : {"n_jobs" : } number of processes + * "dask" : {"client":} the dask client for submitting task verbose: bool Controls sorter verboseness. with_output: bool diff --git a/src/spikeinterface/sortingcomponents/benchmark/benchmark_matching.py b/src/spikeinterface/sortingcomponents/benchmark/benchmark_matching.py index 2e170fd83e..9c862f4278 100644 --- a/src/spikeinterface/sortingcomponents/benchmark/benchmark_matching.py +++ b/src/spikeinterface/sortingcomponents/benchmark/benchmark_matching.py @@ -124,11 +124,11 @@ def run_matching_num_spikes(self, spike_num, seed=0, we_kwargs=None, template_mo Parameters ---------- spike_num: int - The maximum number of spikes per unit. - seed: int - Random seed. (Default: 0) + The maximum number of spikes per unit + seed: int, default: 0 + Random seed we_kwargs: dict - A dictionary of keyword arguments for the WaveformExtractor. + A dictionary of keyword arguments for the WaveformExtractor template_mode: "mean" | "median" | "std", default: "median" The mode to use to extract templates from the WaveformExtractor @@ -188,14 +188,14 @@ def run_matching_misclassed( ---------- fraction_misclassed: float The fraction of misclassified spikes. - min_similarity: float - The minimum cosine similarity between templates to be considered similar. (Default: -1) - seed: int - Random seed. (Default: 0) + min_similarity: float, default: -1 + The minimum cosine similarity between templates to be considered similar + seed: int, default: 0 + Random seed we_kwargs: dict - A dictionary of keyword arguments for the WaveformExtractor. + A dictionary of keyword arguments for the WaveformExtractor template_mode: "mean" | "median" | "std", default: "median" - The mode to use to extract templates from the WaveformExtractor. (Default: 'median') + The mode to use to extract templates from the WaveformExtractor Returns ------- @@ -261,14 +261,14 @@ def run_matching_missing_units( ---------- fraction_missing: float The fraction of missing units. - snr_threshold: float - The SNR threshold below which units are considered missing. (Default: 0) - seed: int - Random seed. (Default: 0) + snr_threshold: float, default: 0 + The SNR threshold below which units are considered missing + seed: int, default: 0 + Random seed we_kwargs: dict A dictionary of keyword arguments for the WaveformExtractor. template_mode: "mean" | "median" | "std", default: "median" - The mode to use to extract templates from the WaveformExtractor. (Default: 'median') + The mode to use to extract templates from the WaveformExtractor Returns ------- @@ -335,16 +335,16 @@ def run_matching_vary_parameter( ---------- parameters: array-like The values of the parameter to vary. - parameter_name: {'num_spikes', 'fraction_misclassed', 'fraction_missing} + parameter_name: "num_spikes", "fraction_misclassed", "fraction_missing" The name of the parameter to vary. - num_replicates: int - The number of replicates to run for each parameter value. (Default: 1) + num_replicates: int, default: 1 + The number of replicates to run for each parameter value we_kwargs: dict - A dictionary of keyword arguments for the WaveformExtractor. + A dictionary of keyword arguments for the WaveformExtractor template_mode: "mean" | "median" | "std", default: "median" - The mode to use to extract templates from the WaveformExtractor. (Default: 'median') + The mode to use to extract templates from the WaveformExtractor **kwargs - Keyword arguments for the run_matching method. + Keyword arguments for the run_matching method Returns ------- @@ -438,15 +438,15 @@ def compare_all_sortings(self, matching_df, collision=False, ground_truth="from_ A dataframe of NumpySortings for each method/parameter_value/iteration combination. collision: bool If True, use the CollisionGTComparison class. If False, use the compare_sorter_to_ground_truth function. - ground_truth: {'from_self' | 'from_df'} - If 'from_self', use the ground-truth sorting stored in the BenchmarkMatching object. If 'from_df', use the + ground_truth: "from_self" | "from_df", default: "from_self" + If "from_self", use the ground-truth sorting stored in the BenchmarkMatching object. If "from_df", use the ground-truth sorting stored in the matching_df. **kwargs Keyword arguments for the comparison function. Notes ----- - This function adds a new column to the matching_df called 'comparison' that contains the GroundTruthComparison + This function adds a new column to the matching_df called "comparison" that contains the GroundTruthComparison object for each row. """ if ground_truth == "from_self": diff --git a/src/spikeinterface/sortingcomponents/clustering/main.py b/src/spikeinterface/sortingcomponents/clustering/main.py index 69637c1c64..fa1c860814 100644 --- a/src/spikeinterface/sortingcomponents/clustering/main.py +++ b/src/spikeinterface/sortingcomponents/clustering/main.py @@ -15,7 +15,7 @@ def find_cluster_from_peaks(recording, peaks, method="stupid", method_kwargs={}, peaks: WaveformExtractor The waveform extractor method: str - Which method to use ('stupid' | 'XXXX') + Which method to use ("stupid" | "XXXX") method_kwargs: dict, default: dict() Keyword arguments for the chosen method extra_outputs: bool, default: False diff --git a/src/spikeinterface/sortingcomponents/clustering/split.py b/src/spikeinterface/sortingcomponents/clustering/split.py index 48ec26679e..9418e8efbe 100644 --- a/src/spikeinterface/sortingcomponents/clustering/split.py +++ b/src/spikeinterface/sortingcomponents/clustering/split.py @@ -37,17 +37,17 @@ def split_clusters( recording: Recording Recording object features_dict_or_folder: dict or folder - A dictionary of features precomputed with peak_pipeline or a folder containing npz file for features. - method: str + A dictionary of features precomputed with peak_pipeline or a folder containing npz file for features + method: str, default: "hdbscan_on_local_pca" The method name - method_kwargs: dict + method_kwargs: dict, default: dict() The method option - recursive: bool Default False - Reccursive or not. - recursive_depth: None or int - If recursive=True, then this is the max split per spikes. - returns_split_count: bool - Optionally return the split count vector. Same size as labels. + recursive: bool, default: False + Recursive or not + recursive_depth: None or int, default: None + If recursive=True, then this is the max split per spikes + returns_split_count: bool, default: False + Optionally return the split count vector. Same size as labels Returns ------- diff --git a/src/spikeinterface/sortingcomponents/features_from_peaks.py b/src/spikeinterface/sortingcomponents/features_from_peaks.py index a35aaac05f..f7f020d153 100644 --- a/src/spikeinterface/sortingcomponents/features_from_peaks.py +++ b/src/spikeinterface/sortingcomponents/features_from_peaks.py @@ -31,7 +31,7 @@ def compute_features_from_peaks( The recording extractor object peaks: array Peaks array, as returned by detect_peaks() in "compact_numpy" way - feature_list: list, default: ['ptp'] + feature_list: list, default: ["ptp"] List of features to be computed. Possible features are: - amplitude - ptp diff --git a/src/spikeinterface/sortingcomponents/matching/main.py b/src/spikeinterface/sortingcomponents/matching/main.py index 260c6a89f3..eec9052e7c 100644 --- a/src/spikeinterface/sortingcomponents/matching/main.py +++ b/src/spikeinterface/sortingcomponents/matching/main.py @@ -12,8 +12,8 @@ def find_spikes_from_templates(recording, method="naive", method_kwargs={}, extr ---------- recording: RecordingExtractor The recording extractor object - method: str - Which method to use ('naive' | 'tridesclous' | 'circus' | 'circus-omp' | 'wobble') + method: "naive" | "tridesclous" | "circus" | "circus-omp" | "wobble" + Which method to use for template matching method_kwargs: dict, optional Keyword arguments for the chosen method extra_outputs: bool @@ -30,8 +30,8 @@ def find_spikes_from_templates(recording, method="naive", method_kwargs={}, extr Notes ----- - For all methods except 'wobble', templates are represented as a WaveformExtractor in method_kwargs - so statistics can be extracted. For 'wobble' templates are represented as a numpy.ndarray. + For all methods except "wobble", templates are represented as a WaveformExtractor in method_kwargs + so statistics can be extracted. For "wobble" templates are represented as a numpy.ndarray. """ from .method_list import matching_methods diff --git a/src/spikeinterface/sortingcomponents/matching/wobble.py b/src/spikeinterface/sortingcomponents/matching/wobble.py index 5327e28916..07b9f8baa4 100644 --- a/src/spikeinterface/sortingcomponents/matching/wobble.py +++ b/src/spikeinterface/sortingcomponents/matching/wobble.py @@ -42,8 +42,8 @@ class WobbleParameters: Notes ----- - 'Peaks' refer to relative maxima in the convolution of the templates with the voltage trace - (or residual) and 'spikes' refer to putative extracellular action potentials (EAPs). Peaks are considered spikes + "Peaks" refer to relative maxima in the convolution of the templates with the voltage trace + (or residual) and "spikes" refer to putative extracellular action potentials (EAPs). Peaks are considered spikes if their amplitude clears the threshold parameter. """ @@ -107,8 +107,8 @@ class TemplateMetadata: Notes ----- - A 'unit' refers to a putative neuron which may have one or more 'templates' of its spike waveform. - Each 'template' may have many upsampled 'jittered_templates' depending on the 'jitter_factor'. + A "unit" refers to a putative neuron which may have one or more "templates" of its spike waveform. + Each "template" may have many upsampled "jittered_templates" depending on the "jitter_factor". """ num_samples: int @@ -275,21 +275,21 @@ def __post_init__(self): class WobbleMatch(BaseTemplateMatchingEngine): """Template matching method from the Paninski lab. - Templates are jittered or 'wobbled' in time and amplitude to capture variability in spike amplitude and + Templates are jittered or "wobbled" in time and amplitude to capture variability in spike amplitude and super-resolution jitter in spike timing. Algorithm --------- At initialization: - 1. Compute channel sparsity to determine which units are 'visible' to each other + 1. Compute channel sparsity to determine which units are "visible" to each other 2. Compress Templates using Singular Value Decomposition into rank approx_rank 3. Upsample the temporal component of compressed templates and re-index to obtain many super-resolution-jittered temporal components for each template 3. Convolve each pair of jittered compressed templates together (subject to channel sparsity) For each chunk of traces: - 1. Compute the 'objective function' to be minimized by convolving each true template with the traces + 1. Compute the "objective function" to be minimized by convolving each true template with the traces 2. Normalize the objective relative to the magnitude of each true template - 3. Detect spikes by indexing peaks in the objective corresponding to 'matches' between the spike and a template + 3. Detect spikes by indexing peaks in the objective corresponding to "matches" between the spike and a template 4. Determine which super-resolution-jittered template best matches each spike and scale the amplitude to match 5. Subtract scaled pairwise convolved jittered templates from the objective(s) to account for the effect of removing detected spikes from the traces @@ -299,11 +299,11 @@ class WobbleMatch(BaseTemplateMatchingEngine): Notes ----- For consistency, throughout this module - - a 'unit' refers to a putative neuron which may have one or more 'templates' of its spike waveform - - Each 'template' may have many upsampled 'jittered_templates' depending on the 'jitter_factor' - - 'peaks' refer to relative maxima in the convolution of the templates with the voltage trace - - 'spikes' refer to putative extracellular action potentials (EAPs) - - 'peaks' are considered spikes if their amplitude clears the threshold parameter + - a "unit" refers to a putative neuron which may have one or more "templates" of its spike waveform + - Each "template" may have many upsampled "jittered_templates" depending on the "jitter_factor" + - "peaks" refer to relative maxima in the convolution of the templates with the voltage trace + - "spikes" refer to putative extracellular action potentials (EAPs) + - "peaks" are considered spikes if their amplitude clears the threshold parameter """ default_params = { @@ -512,7 +512,7 @@ def find_peaks(cls, objective, objective_normalized, spike_trains, params, templ scalings : ndarray (num_spikes,) Amplitude scaling used for each spike. distance_metric : ndarray (num_spikes) - A metric that describes how good of a 'fit' each spike is to its corresponding template + A metric that describes how good of a "fit" each spike is to its corresponding template Notes ----- diff --git a/src/spikeinterface/sortingcomponents/peak_localization.py b/src/spikeinterface/sortingcomponents/peak_localization.py index abd28ef2f5..75c8f7f03f 100644 --- a/src/spikeinterface/sortingcomponents/peak_localization.py +++ b/src/spikeinterface/sortingcomponents/peak_localization.py @@ -102,7 +102,7 @@ def localize_peaks(recording, peaks, method="center_of_mass", ms_before=0.5, ms_ ------- peak_locations: ndarray Array with estimated location for each spike. - The dtype depends on the method. ('x', 'y') or ('x', 'y', 'z', 'alpha'). + The dtype depends on the method. ("x", "y") or ("x", "y", "z", "alpha"). """ peak_retriever = PeakRetriever(recording, peaks) peak_locations = _run_localization_from_peak_source( @@ -165,8 +165,8 @@ class LocalizeCenterOfMass(LocalizeBase): params_doc = """ radius_um: float Radius in um for channel sparsity. - feature: str ['ptp', 'mean', 'energy', 'peak_voltage'] - Feature to consider for computation. Default is 'ptp' + feature: "ptp" | "mean" | "energy" | "peak_voltage", default: "ptp" + Feature to consider for computation """ def __init__(self, recording, return_output=True, parents=["extract_waveforms"], radius_um=75.0, feature="ptp"): @@ -232,7 +232,7 @@ class LocalizeMonopolarTriangulation(PipelineNode): feature: "ptp", "energy", "peak_voltage", default: "ptp" The available features to consider for estimating the position via monopolar triangulation are peak-to-peak amplitudes (ptp, default), - energy ('energy', as L2 norm) or voltages at the center of the waveform + energy ("energy", as L2 norm) or voltages at the center of the waveform (peak_voltage) """ diff --git a/src/spikeinterface/sortingcomponents/peak_selection.py b/src/spikeinterface/sortingcomponents/peak_selection.py index e36c2f0c3d..823da2d928 100644 --- a/src/spikeinterface/sortingcomponents/peak_selection.py +++ b/src/spikeinterface/sortingcomponents/peak_selection.py @@ -11,15 +11,15 @@ def select_peaks(peaks, method="uniform", seed=None, return_indices=False, **met Parameters ---------- peaks: the peaks that have been found - method: 'uniform', 'uniform_locations', 'smart_sampling_amplitudes', 'smart_sampling_locations', - 'smart_sampling_locations_and_time' + method: "uniform", "uniform_locations", "smart_sampling_amplitudes", "smart_sampling_locations", + "smart_sampling_locations_and_time" Method to use. Options: - * 'uniform': a random subset is selected from all the peaks, on a per channel basis by default - * 'smart_sampling_amplitudes': peaks are selected via monte-carlo rejection probabilities + * "uniform": a random subset is selected from all the peaks, on a per channel basis by default + * "smart_sampling_amplitudes": peaks are selected via monte-carlo rejection probabilities based on peak amplitudes, on a per channel basis - * 'smart_sampling_locations': peaks are selection via monte-carlo rejections probabilities + * "smart_sampling_locations": peaks are selection via monte-carlo rejections probabilities based on peak locations, on a per area region basis- - * 'smart_sampling_locations_and_time': peaks are selection via monte-carlo rejections probabilities + * "smart_sampling_locations_and_time": peaks are selection via monte-carlo rejections probabilities based on peak locations and time positions, assuming everything is independent seed: int @@ -29,13 +29,13 @@ def select_peaks(peaks, method="uniform", seed=None, return_indices=False, **met method_kwargs: dict of kwargs method Keyword arguments for the chosen method: - 'uniform': + "uniform": * select_per_channel: bool, default: False If True, the selection is done on a per channel basis * n_peaks: int If select_per_channel is True, this is the number of peaks per channels, otherwise this is the total number of peaks - 'smart_sampling_amplitudes': + "smart_sampling_amplitudes": * noise_levels : array The noise levels used while detecting the peaks * n_peaks: int @@ -43,12 +43,12 @@ def select_peaks(peaks, method="uniform", seed=None, return_indices=False, **met otherwise this is the total number of peaks * select_per_channel: bool, default: False If True, the selection is done on a per channel basis - 'smart_sampling_locations': + "smart_sampling_locations": * n_peaks: int Total number of peaks to select * peaks_locations: array The locations of all the peaks, computed via localize_peaks - 'smart_sampling_locations_and_time': + "smart_sampling_locations_and_time": * n_peaks: int Total number of peaks to select * peaks_locations: array diff --git a/src/spikeinterface/sortingcomponents/waveforms/waveform_thresholder.py b/src/spikeinterface/sortingcomponents/waveforms/waveform_thresholder.py index 0e97e7f283..a1e532eeb7 100644 --- a/src/spikeinterface/sortingcomponents/waveforms/waveform_thresholder.py +++ b/src/spikeinterface/sortingcomponents/waveforms/waveform_thresholder.py @@ -16,8 +16,8 @@ class WaveformThresholder(WaveformsNode): This node allows you to perform adaptive masking by setting channels to 0 that have a given feature below a certain threshold. The available features - to consider are peak-to-peak amplitude ('ptp'), mean amplitude ('mean'), - energy ('energy'), and peak voltage ('peak_voltage'). + to consider are peak-to-peak amplitude ("ptp"), mean amplitude ("mean"), + energy ("energy"), and peak voltage ("peak_voltage"). Parameters ---------- diff --git a/src/spikeinterface/widgets/base.py b/src/spikeinterface/widgets/base.py index 9fc7b73707..2057ff28dd 100644 --- a/src/spikeinterface/widgets/base.py +++ b/src/spikeinterface/widgets/base.py @@ -21,23 +21,23 @@ def set_default_plotter_backend(backend): backend_kwargs_desc = { "matplotlib": { - "figure": "Matplotlib figure. When None, it is created. Default None", - "ax": "Single matplotlib axis. When None, it is created. Default None", - "axes": "Multiple matplotlib axes. When None, they is created. Default None", - "ncols": "Number of columns to create in subplots. Default 5", - "figsize": "Size of matplotlib figure. Default None", - "figtitle": "The figure title. Default None", + "figure": "Matplotlib figure. When None, it is created, default: None", + "ax": "Single matplotlib axis. When None, it is created, default: None", + "axes": "Multiple matplotlib axes. When None, they is created, default: None", + "ncols": "Number of columns to create in subplots, default: 5", + "figsize": "Size of matplotlib figure, default: None", + "figtitle": "The figure title, default: None", }, "sortingview": { - "generate_url": "If True, the figurl URL is generated and printed. Default True", - "display": "If True and in jupyter notebook/lab, the widget is displayed in the cell. Default True.", - "figlabel": "The figurl figure label. Default None", - "height": "The height of the sortingview View in jupyter. Default None", + "generate_url": "If True, the figurl URL is generated and printed, default: True", + "display": "If True and in jupyter notebook/lab, the widget is displayed in the cell, default: True.", + "figlabel": "The figurl figure label, default: None", + "height": "The height of the sortingview View in jupyter, default: None", }, "ipywidgets": { - "width_cm": "Width of the figure in cm (default 10)", - "height_cm": "Height of the figure in cm (default 6)", - "display": "If True, widgets are immediately displayed", + "width_cm": "Width of the figure in cm, default: 10", + "height_cm": "Height of the figure in cm, default 6", + "display": "If True, widgets are immediately displayed, default: True", # "controllers": "" }, "ephyviewer": {}, diff --git a/src/spikeinterface/widgets/collision.py b/src/spikeinterface/widgets/collision.py index 2b86a2af2d..046146635c 100644 --- a/src/spikeinterface/widgets/collision.py +++ b/src/spikeinterface/widgets/collision.py @@ -13,13 +13,13 @@ class ComparisonCollisionBySimilarityWidget(BaseWidget): The collision ground truth comparison object templates: array template of units - mode: 'heatmap' or 'lines' - to see collision curves for every pairs ('heatmap') or as lines averaged over pairs. + mode: "heatmap" or "lines" + to see collision curves for every pairs ("heatmap") or as lines averaged over pairs. similarity_bins: array - if mode is 'lines', the bins used to average the pairs + if mode is "lines", the bins used to average the pairs cmap: string - colormap used to show averages if mode is 'lines' - metric: 'cosine_similarity' + colormap used to show averages if mode is "lines" + metric: "cosine_similarity" metric for ordering good_only: True keep only the pairs with a non zero accuracy (found templates) @@ -182,12 +182,12 @@ class StudyComparisonCollisionBySimilarityWidget(BaseWidget): The collision study object. case_keys: list or None A selection of cases to plot, if None, then all. - metric: 'cosine_similarity' + metric: "cosine_similarity" metric for ordering similarity_bins: array - if mode is 'lines', the bins used to average the pairs + if mode is "lines", the bins used to average the pairs cmap: string - colormap used to show averages if mode is 'lines' + colormap used to show averages if mode is "lines" good_only: False keep only the pairs with a non zero accuracy (found templates) min_accuracy: float diff --git a/src/spikeinterface/widgets/gtstudy.py b/src/spikeinterface/widgets/gtstudy.py index 559aabce02..5e934f9702 100644 --- a/src/spikeinterface/widgets/gtstudy.py +++ b/src/spikeinterface/widgets/gtstudy.py @@ -139,7 +139,7 @@ class StudyPerformances(BaseWidget): * "ordered": plot performance metrics vs unit indices ordered by decreasing accuracy * "snr": plot performance metrics vs snr * "swarm": plot performance metrics as a swarm plot (see seaborn.swarmplot for details) - performance_names: list or tuple, default: ('accuracy', 'precision', 'recall') + performance_names: list or tuple, default: ("accuracy", "precision", "recall") Which performances to plot ("accuracy", "precision", "recall") case_keys: list or None A selection of cases to plot, if None, then all. diff --git a/src/spikeinterface/widgets/motion.py b/src/spikeinterface/widgets/motion.py index fbe2c8fa8b..b097dca1f0 100644 --- a/src/spikeinterface/widgets/motion.py +++ b/src/spikeinterface/widgets/motion.py @@ -23,7 +23,7 @@ class MotionWidget(BaseWidget): If True, the color of the scatter points is the amplitude of the peaks scatter_decimate : int, default: None If > 1, the scatter points are decimated - amplitude_cmap : str, default: 'inferno' + amplitude_cmap : str, default: "inferno" The colormap to use for the amplitude amplitude_clim : tuple or None, default: None The min and max amplitude to display, if None (min and max of the amplitudes) diff --git a/src/spikeinterface/widgets/multicomparison.py b/src/spikeinterface/widgets/multicomparison.py index f1df62a7da..fb34156fef 100644 --- a/src/spikeinterface/widgets/multicomparison.py +++ b/src/spikeinterface/widgets/multicomparison.py @@ -15,9 +15,9 @@ class MultiCompGraphWidget(BaseWidget): The multi comparison object draw_labels: bool, default: False If True unit labels are shown - node_cmap: matplotlib colormap, default: 'viridis' + node_cmap: matplotlib colormap, default: "viridis" The colormap to be used for the nodes - edge_cmap: matplotlib colormap, default: 'hot' + edge_cmap: matplotlib colormap, default: "hot" The colormap to be used for the edges alpha_edges: float, default: 0.5 Alpha value for edges @@ -119,9 +119,9 @@ class MultiCompGlobalAgreementWidget(BaseWidget): ---------- multi_comparison: BaseMultiComparison The multi comparison object - plot_type: str - 'pie' or 'bar' - cmap: matplotlib colormap, default: 'YlOrRd' + plot_type: "pie" | "bar", default: "pie" + The plot type + cmap: matplotlib colormap, default: "YlOrRd" The colormap to be used for the nodes fontsize: int, default: 9 The text fontsize @@ -197,9 +197,9 @@ class MultiCompAgreementBySorterWidget(BaseWidget): ---------- multi_comparison: BaseMultiComparison The multi comparison object - plot_type: str - 'pie' or 'bar' - cmap: matplotlib colormap, default: 'Reds' + plot_type: "pie" | "bar", default: "pie + The plot type + cmap: matplotlib colormap, default: "Reds" The colormap to be used for the nodes fontsize: int, default: 9 The text fontsize diff --git a/src/spikeinterface/widgets/spike_locations.py b/src/spikeinterface/widgets/spike_locations.py index 3946932d53..6ab0962f99 100644 --- a/src/spikeinterface/widgets/spike_locations.py +++ b/src/spikeinterface/widgets/spike_locations.py @@ -27,11 +27,11 @@ class SpikeLocationsWidget(BaseWidget): For sortingview backend, if True the unit selector is not displayed plot_all_units : bool, default: True If True, all units are plotted. The unselected ones (not in unit_ids), - are plotted in grey. Default True (matplotlib backend) + are plotted in grey (matplotlib backend) plot_legend : bool, default: False - If True, the legend is plotted. Default False (matplotlib backend) + If True, the legend is plotted (matplotlib backend) hide_axis : bool, default: False - If True, the axis is set to off. Default False (matplotlib backend) + If True, the axis is set to off (matplotlib backend) """ # possible_backends = {} diff --git a/src/spikeinterface/widgets/spikes_on_traces.py b/src/spikeinterface/widgets/spikes_on_traces.py index bf53d7c926..b6946542b7 100644 --- a/src/spikeinterface/widgets/spikes_on_traces.py +++ b/src/spikeinterface/widgets/spikes_on_traces.py @@ -33,14 +33,14 @@ class SpikesOnTracesWidget(BaseWidget): unit_colors : dict or None, default: None If given, a dictionary with unit ids as keys and colors as values If None, then the get_unit_colors() is internally used. (matplotlib backend) - mode : str in ('line', 'map', 'auto') default: 'auto' - * 'line': classical for low channel count - * 'map': for high channel count use color heat map - * 'auto': auto switch depending on the channel count ('line' if less than 64 channels, 'map' otherwise) + mode : "line" | "map" | "auto", default: "auto" + * "line": classical for low channel count + * "map": for high channel count use color heat map + * "auto": auto switch depending on the channel count ("line" if less than 64 channels, "map" otherwise) return_scaled : bool, default: False If True and the recording has scaled traces, it plots the scaled traces - cmap : str, default: 'RdBu' - matplotlib colormap used in mode 'map' + cmap : str, default: "RdBu" + matplotlib colormap used in mode "map" show_channel_ids : bool, default: False Set yticks with channel ids color_groups : bool, default: False @@ -48,15 +48,14 @@ class SpikesOnTracesWidget(BaseWidget): color : str or None, default: None The color used to draw the traces clim : None, tuple or dict, default: None - When mode is 'map', this argument controls color limits. + When mode is "map", this argument controls color limits. If dict, keys should be the same as recording keys - Default None with_colorbar : bool, default: True - When mode is 'map', a colorbar is added + When mode is "map", a colorbar is added tile_size : int, default: 512 For sortingview backend, the size of each tile in the rendered image seconds_per_row : float, default: 0.2 - For 'map' mode and sortingview backend, seconds to render in each row + For "map" mode and sortingview backend, seconds to render in each row """ def __init__( diff --git a/src/spikeinterface/widgets/template_similarity.py b/src/spikeinterface/widgets/template_similarity.py index 3d7216c4dc..4ab469f456 100644 --- a/src/spikeinterface/widgets/template_similarity.py +++ b/src/spikeinterface/widgets/template_similarity.py @@ -17,7 +17,7 @@ class TemplateSimilarityWidget(BaseWidget): display_diagonal_values : bool, default: False If False, the diagonal is displayed as zeros. If True, the similarity values (all 1s) are displayed - cmap : matplotlib colormap, default: 'viridis' + cmap : matplotlib colormap, default: "viridis" The matplotlib colormap show_unit_ticks : bool, default: False If True, ticks display unit ids diff --git a/src/spikeinterface/widgets/unit_depths.py b/src/spikeinterface/widgets/unit_depths.py index f507b70fd1..1e40a7940e 100644 --- a/src/spikeinterface/widgets/unit_depths.py +++ b/src/spikeinterface/widgets/unit_depths.py @@ -20,7 +20,7 @@ class UnitDepthsWidget(BaseWidget): If given, a dictionary with unit ids as keys and colors as values depth_axis : int, default: 1 The dimension of unit_locations that is depth - peak_sign: 'neg' or 'pos' or 'both', default: 'neg' + peak_sign: "neg" | "pos" | "both", default: "neg" Sign of peak for amplitudes """ diff --git a/src/spikeinterface/widgets/unit_waveforms_density_map.py b/src/spikeinterface/widgets/unit_waveforms_density_map.py index b7abf79943..c49d866139 100644 --- a/src/spikeinterface/widgets/unit_waveforms_density_map.py +++ b/src/spikeinterface/widgets/unit_waveforms_density_map.py @@ -23,7 +23,7 @@ class UnitWaveformDensityMapWidget(BaseWidget): If WaveformExtractor is already sparse, the argument is ignored use_max_channel : bool, default: False Use only the max channel - peak_sign : 'pos' or 'neg' or 'both', default: 'neg' + peak_sign : "neg" | "pos" | "both", default: "neg" Used to detect max channel only when use_max_channel=True unit_colors : None or dict, default: None A dict key is unit_id and value is any color format handled by matplotlib. diff --git a/src/spikeinterface/widgets/utils.py b/src/spikeinterface/widgets/utils.py index 5d56709921..e31ef7679e 100644 --- a/src/spikeinterface/widgets/utils.py +++ b/src/spikeinterface/widgets/utils.py @@ -24,11 +24,11 @@ def get_some_colors(keys, color_engine="auto", map_name="gist_ncar", format="RGB Parameters ---------- - color_engine : str 'auto' / 'matplotlib' / 'colorsys' / 'distinctipy' + color_engine : "auto" | "matplotlib" | "colorsys" | "distinctipy", default: "auto" The engine to generate colors map_name : str Used for matplotlib - format: str, default: 'RGBA' + format: str, default: "RGBA" The output formats shuffle : bool or None, default: None Shuffle or not the colors.