From 092c13c9683194819bfdf78e4c79ebb04b005344 Mon Sep 17 00:00:00 2001 From: mhhennig Date: Sat, 27 Jul 2024 16:08:54 +0100 Subject: [PATCH 1/7] added lowpass parameter, fixed verbose option --- src/spikeinterface/sorters/external/herdingspikes.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/spikeinterface/sorters/external/herdingspikes.py b/src/spikeinterface/sorters/external/herdingspikes.py index a84c05c240..f3bbb530ef 100644 --- a/src/spikeinterface/sorters/external/herdingspikes.py +++ b/src/spikeinterface/sorters/external/herdingspikes.py @@ -19,6 +19,7 @@ class HerdingspikesSorter(BaseSorter): "chunk_size": None, "rescale": True, "rescale_value": -1280.0, + "lowpass": True, "common_reference": "median", "spike_duration": 1.0, "amp_avg_duration": 0.4, @@ -53,6 +54,7 @@ class HerdingspikesSorter(BaseSorter): "out_file": "Path and filename to store detection and clustering results. (`str`, `HS2_detected`)", "verbose": "Print progress information. (`bool`, `True`)", "chunk_size": " Number of samples per chunk during detection. If `None`, a suitable value will be estimated. (`int`, `None`)", + "lowpass": "Enable internal low-pass filtering (simple two-step average). (`bool`, `True`)", "common_reference": "Method for common reference filtering, can be `average` or `median` (`str`, `median`)", "rescale": "Automatically re-scale the data. (`bool`, `True`)", "rescale_value": "Factor by which data is re-scaled. (`float`, `-1280.0`)", @@ -122,20 +124,21 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): hs_version = version.parse(hs.__version__) - if hs_version >= version.parse("0.4.001"): + if hs_version >= version.parse("0.4.1"): lightning_api = True else: lightning_api = False assert ( lightning_api - ), "HerdingSpikes version <0.4.001 is no longer supported. run:\n>>> pip install --upgrade herdingspikes" + ), "HerdingSpikes version <0.4.001 is no longer supported. To upgrade, run:\n>>> pip install --upgrade herdingspikes" recording = cls.load_recording_from_folder(sorter_output_folder.parent, with_warnings=False) sorted_file = str(sorter_output_folder / "HS2_sorted.hdf5") params["out_file"] = str(sorter_output_folder / "HS2_detected") p = params + p.update({"verbose": verbose}) det = hs.HSDetectionLightning(recording, p) det.DetectFromRaw() From 0b7dc336d28dca614ea98c5abd975b8c61346fd6 Mon Sep 17 00:00:00 2001 From: "Matthias H. Hennig" Date: Mon, 29 Jul 2024 11:12:34 +0100 Subject: [PATCH 2/7] Update src/spikeinterface/sorters/external/herdingspikes.py Co-authored-by: Alessio Buccino --- src/spikeinterface/sorters/external/herdingspikes.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/spikeinterface/sorters/external/herdingspikes.py b/src/spikeinterface/sorters/external/herdingspikes.py index f3bbb530ef..94d66e7f86 100644 --- a/src/spikeinterface/sorters/external/herdingspikes.py +++ b/src/spikeinterface/sorters/external/herdingspikes.py @@ -131,7 +131,7 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): assert ( lightning_api - ), "HerdingSpikes version <0.4.001 is no longer supported. To upgrade, run:\n>>> pip install --upgrade herdingspikes" + ), "HerdingSpikes version <0.4.1 is no longer supported. To upgrade, run:\n>>> pip install --upgrade herdingspikes" recording = cls.load_recording_from_folder(sorter_output_folder.parent, with_warnings=False) From 36eec5718de05ffcd609aea93d0d8603d7e43298 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Mon, 29 Jul 2024 18:39:41 +0200 Subject: [PATCH 3/7] Fix postprocessing docs --- doc/modules/postprocessing.rst | 58 ++++++++++++++++++++-------------- 1 file changed, 35 insertions(+), 23 deletions(-) diff --git a/doc/modules/postprocessing.rst b/doc/modules/postprocessing.rst index cc2b064ed0..3c30f248c8 100644 --- a/doc/modules/postprocessing.rst +++ b/doc/modules/postprocessing.rst @@ -208,9 +208,11 @@ For dense waveforms, sparsity can also be passed as an argument. .. code-block:: python - pc = sorting_analyzer.compute(input="principal_components", - n_components=3, - mode="by_channel_local") + pc = sorting_analyzer.compute( + input="principal_components", + n_components=3, + mode="by_channel_local" + ) For more information, see :py:func:`~spikeinterface.postprocessing.compute_principal_components` @@ -243,9 +245,7 @@ each spike. .. code-block:: python - amplitudes = sorting_analyzer.compute(input="spike_amplitudes", - peak_sign="neg", - outputs="concatenated") + amplitudes = sorting_analyzer.compute(input="spike_amplitudes", peak_sign="neg") For more information, see :py:func:`~spikeinterface.postprocessing.compute_spike_amplitudes` @@ -263,15 +263,17 @@ with center of mass (:code:`method="center_of_mass"` - fast, but less accurate), .. code-block:: python - spike_locations = sorting_analyzer.compute(input="spike_locations", - ms_before=0.5, - ms_after=0.5, - spike_retriever_kwargs=dict( - channel_from_template=True, - radius_um=50, - peak_sign="neg" - ), - method="center_of_mass") + spike_locations = sorting_analyzer.compute( + input="spike_locations", + ms_before=0.5, + ms_after=0.5, + spike_retriever_kwargs=dict( + channel_from_template=True, + radius_um=50, + peak_sign="neg" + ), + method="center_of_mass" + ) For more information, see :py:func:`~spikeinterface.postprocessing.compute_spike_locations` @@ -329,6 +331,12 @@ Optionally, the following multi-channel metrics can be computed by setting: Visualization of template metrics. Image from `ecephys_spike_sorting `_ from the Allen Institute. + +.. code-block:: python + + tm = sorting_analyzer.compute(input="template_metrics", include_multi_channel_metrics=True) + + For more information, see :py:func:`~spikeinterface.postprocessing.compute_template_metrics` @@ -340,10 +348,12 @@ with shape (num_units, num_units, num_bins) with all correlograms for each pair .. code-block:: python - ccg = sorting_analyzer.compute(input="correlograms", - window_ms=50.0, - bin_ms=1.0, - method="auto") + ccg = sorting_analyzer.compute( + input="correlograms", + window_ms=50.0, + bin_ms=1.0, + method="auto" + ) For more information, see :py:func:`~spikeinterface.postprocessing.compute_correlograms` @@ -357,10 +367,12 @@ This extension computes the histograms of inter-spike-intervals. The computed ou .. code-block:: python - isi = sorting_analyer.compute(input="isi_histograms" - window_ms=50.0, - bin_ms=1.0, - method="auto") + isi = sorting_analyer.compute( + input="isi_histograms" + window_ms=50.0, + bin_ms=1.0, + method="auto" + ) For more information, see :py:func:`~spikeinterface.postprocessing.compute_isi_histograms` From 4b83e8b52a906826570df49c7bd29a5c26050235 Mon Sep 17 00:00:00 2001 From: Zach McKenzie <92116279+zm711@users.noreply.github.com> Date: Mon, 29 Jul 2024 13:41:09 -0400 Subject: [PATCH 4/7] fix docstring and error --- src/spikeinterface/postprocessing/spike_amplitudes.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/spikeinterface/postprocessing/spike_amplitudes.py b/src/spikeinterface/postprocessing/spike_amplitudes.py index e82a9e61e4..f158952132 100644 --- a/src/spikeinterface/postprocessing/spike_amplitudes.py +++ b/src/spikeinterface/postprocessing/spike_amplitudes.py @@ -44,8 +44,8 @@ class ComputeSpikeAmplitudes(AnalyzerExtension): The localization method to use method_kwargs : dict, default: dict() Other kwargs depending on the method. - outputs : "concatenated" | "by_unit", default: "concatenated" - The output format + outputs : "numpy" | "by_unit", default: "numpy" + The output format, either concatenated as numpy array or separated on a per unit basis Returns ------- @@ -148,7 +148,7 @@ def _get_data(self, outputs="numpy"): amplitudes_by_units[segment_index][unit_id] = all_amplitudes[inds] return amplitudes_by_units else: - raise ValueError(f"Wrong .get_data(outputs={outputs})") + raise ValueError(f"Wrong .get_data(outputs={outputs}); possibilities are `numpy` or `by_unit`") register_result_extension(ComputeSpikeAmplitudes) From 983cf753ebc17a85b52e413e565225e1207ed8a4 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Mon, 29 Jul 2024 20:04:38 +0200 Subject: [PATCH 5/7] Protect median against nans in get_prototype_spike --- src/spikeinterface/sortingcomponents/tools.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/tools.py b/src/spikeinterface/sortingcomponents/tools.py index facefac4c5..969b20c272 100644 --- a/src/spikeinterface/sortingcomponents/tools.py +++ b/src/spikeinterface/sortingcomponents/tools.py @@ -70,18 +70,18 @@ def extract_waveform_at_max_channel(rec, peaks, ms_before=0.5, ms_after=1.5, **j def get_prototype_spike(recording, peaks, ms_before=0.5, ms_after=0.5, nb_peaks=1000, **job_kwargs): + from spikeinterface.sortingcomponents.peak_selection import select_peaks + nbefore = int(ms_before * recording.sampling_frequency / 1000.0) nafter = int(ms_after * recording.sampling_frequency / 1000.0) - from spikeinterface.sortingcomponents.peak_selection import select_peaks - few_peaks = select_peaks(peaks, recording=recording, method="uniform", n_peaks=nb_peaks, margin=(nbefore, nafter)) waveforms = extract_waveform_at_max_channel( recording, few_peaks, ms_before=ms_before, ms_after=ms_after, **job_kwargs ) with np.errstate(divide="ignore", invalid="ignore"): - prototype = np.median(waveforms[:, :, 0] / (np.abs(waveforms[:, nbefore, 0][:, np.newaxis])), axis=0) + prototype = np.nanmedian(waveforms[:, :, 0] / (np.abs(waveforms[:, nbefore, 0][:, np.newaxis])), axis=0) return prototype From dcd64b2285d6e2cb5d7d541ff0ff3bf9cc714089 Mon Sep 17 00:00:00 2001 From: zm711 <92116279+zm711@users.noreply.github.com> Date: Wed, 31 Jul 2024 08:09:27 -0400 Subject: [PATCH 6/7] some more numpydoc fixes --- .../comparison/basecomparison.py | 4 +- .../comparison/comparisontools.py | 110 +++++++++--------- .../curation/curation_format.py | 4 +- src/spikeinterface/curation/curation_tools.py | 10 +- .../curation/splitunitsorting.py | 2 +- src/spikeinterface/sorters/container_tools.py | 30 ++--- .../sorters/external/kilosort.py | 6 +- .../sorters/external/kilosort2.py | 6 +- .../sorters/external/kilosort2_5.py | 6 +- .../sorters/external/kilosort3.py | 6 +- .../sorters/external/kilosortbase.py | 6 +- 11 files changed, 95 insertions(+), 95 deletions(-) diff --git a/src/spikeinterface/comparison/basecomparison.py b/src/spikeinterface/comparison/basecomparison.py index f1d2130d38..3a39f08a7c 100644 --- a/src/spikeinterface/comparison/basecomparison.py +++ b/src/spikeinterface/comparison/basecomparison.py @@ -63,9 +63,9 @@ def compute_subgraphs(self): Computes subgraphs of connected components. Returns ------- - sg_object_names: list + sg_object_names : list List of sorter names for each node in the connected component subgraph - sg_units: list + sg_units : list List of unit ids for each node in the connected component subgraph """ if self.clean_graph is not None: diff --git a/src/spikeinterface/comparison/comparisontools.py b/src/spikeinterface/comparison/comparisontools.py index 87d0bf512b..814cb907ea 100644 --- a/src/spikeinterface/comparison/comparisontools.py +++ b/src/spikeinterface/comparison/comparisontools.py @@ -14,16 +14,16 @@ def count_matching_events(times1, times2, delta=10): Parameters ---------- - times1: list + times1 : list List of spike train 1 frames - times2: list + times2 : list List of spike train 2 frames - delta: int + delta : int Number of frames for considering matching events Returns ------- - matching_count: int + matching_count : int Number of matching events """ times_concat = np.concatenate((times1, times2)) @@ -45,16 +45,16 @@ def compute_agreement_score(num_matches, num1, num2): Parameters ---------- - num_matches: int + num_matches : int Number of matches - num1: int + num1 : int Number of events in spike train 1 - num2: int + num2 : int Number of events in spike train 2 Returns ------- - score: float + score : float Agreement score """ denom = num1 + num2 - num_matches @@ -71,12 +71,12 @@ def do_count_event(sorting): Parameters ---------- - sorting: SortingExtractor + sorting : SortingExtractor A sorting extractor Returns ------- - event_count: pd.Series + event_count : pd.Series Nb of spike by units. """ import pandas as pd @@ -90,14 +90,14 @@ def count_match_spikes(times1, all_times2, delta_frames): # , event_counts1, ev Parameters ---------- - times1: array + times1 : array Spike train 1 frames - all_times2: list of array + all_times2 : list of array List of spike trains from sorting 2 Returns ------- - matching_events_count: list + matching_events_count : list List of counts of matching events """ matching_event_counts = np.zeros(len(all_times2), dtype="int64") @@ -337,18 +337,18 @@ def make_agreement_scores(sorting1, sorting2, delta_frames, ensure_symmetry=True Parameters ---------- - sorting1: SortingExtractor + sorting1 : SortingExtractor The first sorting extractor - sorting2: SortingExtractor + sorting2 : SortingExtractor The second sorting extractor - delta_frames: int + delta_frames : int Number of frames to consider spikes coincident - ensure_symmetry: bool, default: True + ensure_symmetry : bool, default: True If ensure_symmetry is True, then the algo is run two times by switching sorting1 and sorting2. And the minimum of the two results is taken. Returns ------- - agreement_scores: array (float) + agreement_scores : array (float) The agreement score matrix. """ import pandas as pd @@ -401,16 +401,16 @@ def make_possible_match(agreement_scores, min_score): Parameters ---------- - agreement_scores: pd.DataFrame + agreement_scores : pd.DataFrame - min_score: float + min_score : float Returns ------- - best_match_12: pd.Series + best_match_12 : pd.Series - best_match_21: pd.Series + best_match_21 : pd.Series """ unit1_ids = np.array(agreement_scores.index) @@ -442,16 +442,16 @@ def make_best_match(agreement_scores, min_score): Parameters ---------- - agreement_scores: pd.DataFrame + agreement_scores : pd.DataFrame - min_score: float + min_score : float Returns ------- - best_match_12: pd.Series + best_match_12 : pd.Series - best_match_21: pd.Series + best_match_21 : pd.Series """ import pandas as pd @@ -490,14 +490,14 @@ def make_hungarian_match(agreement_scores, min_score): ---------- agreement_scores: pd.DataFrame - min_score: float + min_score : float Returns ------- - hungarian_match_12: pd.Series + hungarian_match_12 : pd.Series - hungarian_match_21: pd.Series + hungarian_match_21 : pd.Series """ import pandas as pd @@ -541,22 +541,22 @@ def do_score_labels(sorting1, sorting2, delta_frames, unit_map12, label_misclass Parameters ---------- - sorting1: SortingExtractor instance + sorting1 : SortingExtractor instance The ground truth sorting - sorting2: SortingExtractor instance + sorting2 : SortingExtractor instance The tested sorting - delta_frames: int + delta_frames : int Number of frames to consider spikes coincident - unit_map12: pd.Series + unit_map12 : pd.Series Dict of matching from sorting1 to sorting2 - label_misclassification: bool + label_misclassification : bool If True, misclassification errors are labelled Returns ------- - labels_st1: dict of lists of np.array of str + labels_st1 : dict of lists of np.array of str Contain score labels for units of sorting 1 for each segment - labels_st2: dict of lists of np.array of str + labels_st2 : dict of lists of np.array of str Contain score labels for units of sorting 2 for each segment """ unit1_ids = sorting1.get_unit_ids() @@ -647,12 +647,12 @@ def compare_spike_trains(spiketrain1, spiketrain2, delta_frames=10): Parameters ---------- - spiketrain1, spiketrain2: numpy.array + spiketrain1, spiketrain2 : numpy.array Times of spikes for the 2 spike trains. Returns ------- - lab_st1, lab_st2: numpy.array + lab_st1, lab_st2 : numpy.array Label of score for each spike """ lab_st1 = np.array(["UNPAIRED"] * len(spiketrain1)) @@ -684,19 +684,19 @@ def do_confusion_matrix(event_counts1, event_counts2, match_12, match_event_coun Parameters ---------- - event_counts1: pd.Series + event_counts1 : pd.Series Number of event per units 1 - event_counts2: pd.Series + event_counts2 : pd.Series Number of event per units 2 - match_12: pd.Series + match_12 : pd.Series Series of matching from sorting1 to sorting2. Can be the hungarian or best match. - match_event_count: pd.DataFrame + match_event_count : pd.DataFrame The match count matrix given by make_match_count_matrix Returns ------- - confusion_matrix: pd.DataFrame + confusion_matrix : pd.DataFrame The confusion matrix index are units1 reordered columns are units2 redordered @@ -746,19 +746,19 @@ def do_count_score(event_counts1, event_counts2, match_12, match_event_count): Parameters ---------- - event_counts1: pd.Series + event_counts1 : pd.Series Number of event per units 1 - event_counts2: pd.Series + event_counts2 : pd.Series Number of event per units 2 - match_12: pd.Series + match_12 : pd.Series Series of matching from sorting1 to sorting2. Can be the hungarian or best match. - match_event_count: pd.DataFrame + match_event_count : pd.DataFrame The match count matrix given by make_match_count_matrix Returns ------- - count_score: pd.DataFrame + count_score : pd.DataFrame A table with one line per GT units and columns are tp/fn/fp/... """ @@ -837,16 +837,16 @@ def make_matching_events(times1, times2, delta): Parameters ---------- - times1: list + times1 : list List of spike train 1 frames - times2: list + times2 : list List of spike train 2 frames - delta: int + delta : int Number of frames for considering matching events Returns ------- - matching_event: numpy array dtype = ["index1", "index2", "delta"] + matching_event : numpy array dtype = ["index1", "index2", "delta"] 1d of collision """ times_concat = np.concatenate((times1, times2)) @@ -894,14 +894,14 @@ def make_collision_events(sorting, delta): Parameters ---------- - sorting: SortingExtractor + sorting : SortingExtractor The sorting extractor object for counting collision events - delta: int + delta : int Number of frames for considering collision events Returns ------- - collision_events: numpy array + collision_events : numpy array dtype = [('index1', 'int64'), ('unit_id1', 'int64'), ('index2', 'int64'), ('unit_id2', 'int64'), ('delta', 'int64')] diff --git a/src/spikeinterface/curation/curation_format.py b/src/spikeinterface/curation/curation_format.py index 88190a9bab..babe7aac40 100644 --- a/src/spikeinterface/curation/curation_format.py +++ b/src/spikeinterface/curation/curation_format.py @@ -87,7 +87,7 @@ def convert_from_sortingview_curation_format_v0(sortingview_dict, destination_fo Returns ------- - curation_dict: dict + curation_dict : dict A curation dictionary """ @@ -138,7 +138,7 @@ def curation_label_to_vectors(curation_dict): Returns ------- - labels: dict of numpy vector + labels : dict of numpy vector """ unit_ids = list(curation_dict["unit_ids"]) diff --git a/src/spikeinterface/curation/curation_tools.py b/src/spikeinterface/curation/curation_tools.py index 408a666613..3402638a16 100644 --- a/src/spikeinterface/curation/curation_tools.py +++ b/src/spikeinterface/curation/curation_tools.py @@ -106,18 +106,18 @@ def find_duplicated_spikes( Parameters ---------- - spike_train: np.ndarray + spike_train : np.ndarray The spike train on which to look for duplicated spikes. - censored_period: int + censored_period : int The censored period for duplicates (in sample time). - method: "keep_first" |"keep_last" | "keep_first_iterative" | "keep_last_iterative" |random", default: "random" + method : "keep_first" |"keep_last" | "keep_first_iterative" | "keep_last_iterative" |random", default: "random" Method used to remove the duplicated spikes. - seed: int | None + seed : int | None The seed to use if method="random". Returns ------- - indices_of_duplicates: np.ndarray + indices_of_duplicates : np.ndarray The indices of spikes considered to be duplicates. """ diff --git a/src/spikeinterface/curation/splitunitsorting.py b/src/spikeinterface/curation/splitunitsorting.py index 33c14dfe5a..08ab704224 100644 --- a/src/spikeinterface/curation/splitunitsorting.py +++ b/src/spikeinterface/curation/splitunitsorting.py @@ -13,7 +13,7 @@ class SplitUnitSorting(BaseSorting): Parameters ---------- - sorting: BaseSorting + sorting : BaseSorting The sorting object parent_unit_id : int Unit id of the unit to split diff --git a/src/spikeinterface/sorters/container_tools.py b/src/spikeinterface/sorters/container_tools.py index 8e03090eaf..6406919455 100644 --- a/src/spikeinterface/sorters/container_tools.py +++ b/src/spikeinterface/sorters/container_tools.py @@ -55,16 +55,16 @@ def __init__(self, mode, container_image, volumes, py_user_base, extra_kwargs): """ Parameters ---------- - mode: "docker" | "singularity" + mode : "docker" | "singularity" The container mode - container_image: str + container_image : str container image name and tag - volumes: dict + volumes : dict dict of volumes to bind - py_user_base: str + py_user_base : str Python user base folder to set as PYTHONUSERBASE env var in Singularity mode Prevents from overwriting user's packages when running pip install - extra_kwargs: dict + extra_kwargs : dict Extra kwargs to start container """ assert mode in ("docker", "singularity") @@ -180,28 +180,28 @@ def install_package_in_container( Parameters ---------- - container_client: ContainerClient + container_client : ContainerClient The container client - package_name: str + package_name : str The package name - installation_mode: str + installation_mode : str The installation mode - extra: str + extra : str Extra pip install arguments, e.g. [full] - version: str + version : str The package version to install - tag: str + tag : str The github tag to install - github_url: str + github_url : str The github url to install (needed for github mode) - container_folder_source: str + container_folder_source : str The container folder source (needed for folder mode) - verbose: bool + verbose : bool If True, print output of pip install command Returns ------- - res_output: str + res_output : str The output of the pip install command """ assert installation_mode in ("pypi", "github", "folder") diff --git a/src/spikeinterface/sorters/external/kilosort.py b/src/spikeinterface/sorters/external/kilosort.py index 56adb3b632..102703f912 100644 --- a/src/spikeinterface/sorters/external/kilosort.py +++ b/src/spikeinterface/sorters/external/kilosort.py @@ -137,14 +137,14 @@ def _get_specific_options(cls, ops, params): Parameters ---------- - ops: dict + ops : dict options data - params: dict + params : dict Custom parameters dictionary for kilosort3 Returns ---------- - ops: dict + ops : dict Final ops data """ diff --git a/src/spikeinterface/sorters/external/kilosort2.py b/src/spikeinterface/sorters/external/kilosort2.py index 0425ad5e53..cedcfe2a5e 100644 --- a/src/spikeinterface/sorters/external/kilosort2.py +++ b/src/spikeinterface/sorters/external/kilosort2.py @@ -148,14 +148,14 @@ def _get_specific_options(cls, ops, params): Parameters ---------- - ops: dict + ops : dict options data - params: dict + params : dict Custom parameters dictionary for kilosort3 Returns ---------- - ops: dict + ops : dict Final ops data """ diff --git a/src/spikeinterface/sorters/external/kilosort2_5.py b/src/spikeinterface/sorters/external/kilosort2_5.py index b3d1718d59..ea93ffde0d 100644 --- a/src/spikeinterface/sorters/external/kilosort2_5.py +++ b/src/spikeinterface/sorters/external/kilosort2_5.py @@ -164,14 +164,14 @@ def _get_specific_options(cls, ops, params): Parameters ---------- - ops: dict + ops : dict options data - params: dict + params : dict Custom parameters dictionary for kilosort3 Returns ---------- - ops: dict + ops : dict Final ops data """ # frequency for high pass filtering (300) diff --git a/src/spikeinterface/sorters/external/kilosort3.py b/src/spikeinterface/sorters/external/kilosort3.py index f560fd7e1e..4066948e2e 100644 --- a/src/spikeinterface/sorters/external/kilosort3.py +++ b/src/spikeinterface/sorters/external/kilosort3.py @@ -160,14 +160,14 @@ def _get_specific_options(cls, ops, params): Parameters ---------- - ops: dict + ops : dict options data - params: dict + params : dict Custom parameters dictionary for kilosort3 Returns ---------- - ops: dict + ops : dict Final ops data """ # frequency for high pass filtering (150) diff --git a/src/spikeinterface/sorters/external/kilosortbase.py b/src/spikeinterface/sorters/external/kilosortbase.py index d04128b50c..ba8b88b6bc 100644 --- a/src/spikeinterface/sorters/external/kilosortbase.py +++ b/src/spikeinterface/sorters/external/kilosortbase.py @@ -79,11 +79,11 @@ def _generate_ops_file(cls, recording, params, sorter_output_folder, binary_file Parameters ---------- - recording: BaseRecording + recording : BaseRecording The recording to generate the channel map file - params: dict + params : dict Custom parameters dictionary for kilosort - sorter_output_folder: pathlib.Path + sorter_output_folder : pathlib.Path Path object to save `ops.mat` """ ops = {} From 6f63f76d0089c4458529b3653b45a85bec467374 Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Wed, 31 Jul 2024 15:44:16 -0300 Subject: [PATCH 7/7] patch widgets (#3238) Co-authored-by: Alessio Buccino --- src/spikeinterface/widgets/utils_ipywidgets.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/spikeinterface/widgets/utils_ipywidgets.py b/src/spikeinterface/widgets/utils_ipywidgets.py index e31f0e0444..0aae1777a9 100644 --- a/src/spikeinterface/widgets/utils_ipywidgets.py +++ b/src/spikeinterface/widgets/utils_ipywidgets.py @@ -10,7 +10,9 @@ def check_ipywidget_backend(): import matplotlib mpl_backend = matplotlib.get_backend() - assert "ipympl" in mpl_backend, "To use the 'ipywidgets' backend, you have to set %matplotlib widget" + assert ( + "ipympl" in mpl_backend or "widget" in mpl_backend + ), "To use the 'ipywidgets' backend, you have to set %matplotlib widget" class TimeSlider(W.HBox):