From c15d6d3aa2084e77ff60156bef2f3c06ddc1b1ba Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Fri, 27 Oct 2023 14:18:04 +0200 Subject: [PATCH] Add unsaved files --- src/spikeinterface/core/base.py | 4 ++-- .../extractors/neoextractors/openephys.py | 2 +- src/spikeinterface/extractors/neuropixels_utils.py | 2 +- src/spikeinterface/sorters/launcher.py | 2 +- .../sortingcomponents/motion_estimation.py | 10 +++++----- .../sortingcomponents/peak_detection.py | 12 ++++++------ src/spikeinterface/widgets/base.py | 4 ++-- 7 files changed, 18 insertions(+), 18 deletions(-) diff --git a/src/spikeinterface/core/base.py b/src/spikeinterface/core/base.py index 795a6703f9..f188ce7aa6 100644 --- a/src/spikeinterface/core/base.py +++ b/src/spikeinterface/core/base.py @@ -269,8 +269,8 @@ def copy_metadata( If True, only the main annotations/properties are copied. ids: list List of ids to copy the metadata to. If None, all ids are copied. - skip_properties: list - List of properties to skip. Default is None. + skip_properties: list, default: None + List of properties to skip """ if ids is None: diff --git a/src/spikeinterface/extractors/neoextractors/openephys.py b/src/spikeinterface/extractors/neoextractors/openephys.py index 9879eb89d0..1d17cd728c 100644 --- a/src/spikeinterface/extractors/neoextractors/openephys.py +++ b/src/spikeinterface/extractors/neoextractors/openephys.py @@ -331,7 +331,7 @@ def read_openephys(folder_path, **kwargs): def read_openephys_event(folder_path, block_index=None): """ - Read Open Ephys events from 'binary' format. + Read Open Ephys events from "binary" format. Parameters ---------- diff --git a/src/spikeinterface/extractors/neuropixels_utils.py b/src/spikeinterface/extractors/neuropixels_utils.py index 0db2394dc0..6bef869eb8 100644 --- a/src/spikeinterface/extractors/neuropixels_utils.py +++ b/src/spikeinterface/extractors/neuropixels_utils.py @@ -111,7 +111,7 @@ def synchronize_neuropixel_streams(recording_ref, recording_other): Method used : 1. detect pulse times on both streams. - 2. make a linear regression from 'other' to 'ref'. + 2. make a linear regression from "other" to "ref". The slope is nclose to 1 and corresponds to the sample rate correction The intercept is close to 0 and corresponds to the delta time start diff --git a/src/spikeinterface/sorters/launcher.py b/src/spikeinterface/sorters/launcher.py index 94b56754e8..15098c8430 100644 --- a/src/spikeinterface/sorters/launcher.py +++ b/src/spikeinterface/sorters/launcher.py @@ -339,7 +339,7 @@ def run_sorters( * "raise" : raise error if subfolder exists * "overwrite" : delete and force recompute * "keep" : do not compute again if f=subfolder exists and log is OK - engine: "loop" | "joblib" | "dask', default: "loop" + engine: "loop" | "joblib" | "dask", default: "loop" Which engine to use to run sorter. engine_kwargs: dict This contains kwargs specific to the launcher engine: diff --git a/src/spikeinterface/sortingcomponents/motion_estimation.py b/src/spikeinterface/sortingcomponents/motion_estimation.py index 139af9c9ff..df73575a01 100644 --- a/src/spikeinterface/sortingcomponents/motion_estimation.py +++ b/src/spikeinterface/sortingcomponents/motion_estimation.py @@ -226,10 +226,10 @@ class DecentralizedRegistration: error_sigma: float, default: 0.2 In case weight_scale="exp" this controls the sigma of the exponential. conv_engine: "numpy" or "torch" or None, default: None - In case of pairwise_displacement_method="conv', what library to use to compute + In case of pairwise_displacement_method="conv", what library to use to compute the underlying correlation torch_device=None - In case of conv_engine='torch', you can control which device (cpu or gpu) + In case of conv_engine="torch", you can control which device (cpu or gpu) batch_size: int Size of batch for the convolution. Increasing this will speed things up dramatically on GPUs and sometimes on CPU as well. @@ -241,10 +241,10 @@ class DecentralizedRegistration: When not None the parwise discplament matrix is computed in a small time horizon. In short only pair of bins close in time. So the pariwaise matrix is super sparse and have values only the diagonal. - convergence_method: 'lsmr', 'lsqr_robust', 'gradient_descent' + convergence_method: "lsmr" | "lsqr_robust" | "gradient_descent", default: "lsqr_robust" Which method to use to compute the global displacement vector from the pairwise matrix. robust_regression_sigma: float - Use for convergence_method='lsqr_robust' for iterative selection of the regression. + Use for convergence_method="lsqr_robust" for iterative selection of the regression. temporal_prior : bool, default: True Ensures continuity across time, unless there is evidence in the recording for jumps. spatial_prior : bool, default: False @@ -259,7 +259,7 @@ class DecentralizedRegistration: - "time" : the displacement at a given time (in seconds) is subtracted - "mode_search" : an attempt is made to guess the mode. needs work. lsqr_robust_n_iter: int - Number of iteration for convergence_method='lsqr_robust'. + Number of iteration for convergence_method="lsqr_robust". """ @classmethod diff --git a/src/spikeinterface/sortingcomponents/peak_detection.py b/src/spikeinterface/sortingcomponents/peak_detection.py index 8552f15fb2..ec790e614a 100644 --- a/src/spikeinterface/sortingcomponents/peak_detection.py +++ b/src/spikeinterface/sortingcomponents/peak_detection.py @@ -53,8 +53,8 @@ def detect_peaks( ): """Peak detection based on threshold crossing in term of k x MAD. - In 'by_channel' : peak are detected in each channel independently - In 'locally_exclusive' : a single best peak is taken from a set of neighboring channels + In "by_channel" : peak are detected in each channel independently + In "locally_exclusive" : a single best peak is taken from a set of neighboring channels Parameters ---------- @@ -357,7 +357,7 @@ def compute(self, traces, start_frame, end_frame, segment_index, max_margin): class DetectPeakByChannel(PeakDetectorWrapper): - """Detect peaks using the 'by channel' method.""" + """Detect peaks using the "by channel" method.""" name = "by_channel" engine = "numpy" @@ -439,7 +439,7 @@ def detect_peaks(cls, traces, peak_sign, abs_threholds, exclude_sweep_size): class DetectPeakByChannelTorch(PeakDetectorWrapper): - """Detect peaks using the 'by channel' method with pytorch.""" + """Detect peaks using the "by channel" method with pytorch.""" name = "by_channel_torch" engine = "torch" @@ -505,7 +505,7 @@ def detect_peaks(cls, traces, peak_sign, abs_threholds, exclude_sweep_size, devi class DetectPeakLocallyExclusive(PeakDetectorWrapper): - """Detect peaks using the 'locally exclusive' method.""" + """Detect peaks using the "locally exclusive" method.""" name = "locally_exclusive" engine = "numba" @@ -581,7 +581,7 @@ def detect_peaks(cls, traces, peak_sign, abs_threholds, exclude_sweep_size, neig class DetectPeakLocallyExclusiveTorch(PeakDetectorWrapper): - """Detect peaks using the 'locally exclusive' method with pytorch.""" + """Detect peaks using the "locally exclusive" method with pytorch.""" name = "locally_exclusive_torch" engine = "torch" diff --git a/src/spikeinterface/widgets/base.py b/src/spikeinterface/widgets/base.py index 2057ff28dd..a5d3cb2429 100644 --- a/src/spikeinterface/widgets/base.py +++ b/src/spikeinterface/widgets/base.py @@ -6,7 +6,7 @@ def get_default_plotter_backend(): """Return the default backend for spikeinterface widgets. - The default backend is 'matplotlib' at init. + The default backend is "matplotlib" at init. It can be be globally set with `set_default_plotter_backend(backend)` """ @@ -123,7 +123,7 @@ def __init__(self, d): Helper function that transform a dict into an object where attributes are the keys of the dict - d = {'a': 1, 'b': 'yep'} + d = {"a": 1, "b": "yep"} o = to_attr(d) print(o.a, o.b) """