From d2c06abc620856319e6fec95b4b0552b54d03b7f Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 15 Mar 2024 12:13:40 +0000 Subject: [PATCH 1/3] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- doc/how_to/get_started.rst | 34 +++++++++++++++++----------------- examples/how_to/get_started.py | 12 ++++++------ 2 files changed, 23 insertions(+), 23 deletions(-) diff --git a/doc/how_to/get_started.rst b/doc/how_to/get_started.rst index 97dae29f72..30276a32d4 100644 --- a/doc/how_to/get_started.rst +++ b/doc/how_to/get_started.rst @@ -86,7 +86,7 @@ both a “recording” and a “sorting” object. .. parsed-literal:: - MEArecRecordingExtractor: 32 channels - 32.0kHz - 1 segments - 320,000 samples - 10.00s + MEArecRecordingExtractor: 32 channels - 32.0kHz - 1 segments - 320,000 samples - 10.00s float32 dtype - 39.06 MiB file_path: /home/nolanlab/spikeinterface_datasets/ephy_testing_data/mearec/mearec_test_10s.h5 MEArecSortingExtractor: 10 units - 1 segments - 32.0kHz @@ -126,7 +126,7 @@ This is how you retrieve info from a ``BaseRecording``\ … fs = recording.get_sampling_frequency() num_chan = recording.get_num_channels() num_seg = recording.get_num_segments() - + print("Channel ids:", channel_ids) print("Sampling frequency:", fs) print("Number of channels:", num_chan) @@ -150,7 +150,7 @@ This is how you retrieve info from a ``BaseRecording``\ … num_seg = recording.get_num_segments() unit_ids = sorting_true.get_unit_ids() spike_train = sorting_true.get_unit_spike_train(unit_id=unit_ids[0]) - + print("Number of segments:", num_seg) print("Unit ids:", unit_ids) print("Spike train of first unit:", spike_train) @@ -180,9 +180,9 @@ to set it *manually*. probe = recording.get_probe() print(probe) - + from probeinterface.plotting import plot_probe - + _ = plot_probe(probe) @@ -214,7 +214,7 @@ object to disk. print(recording_f) recording_cmr = si.common_reference(recording_f, reference="global", operator="median") print(recording_cmr) - + # this computes and saves the recording after applying the preprocessing chain recording_preprocessed = recording_cmr.save(format="binary") print(recording_preprocessed) @@ -222,9 +222,9 @@ object to disk. .. parsed-literal:: - BandpassFilterRecording: 32 channels - 32.0kHz - 1 segments - 320,000 samples - 10.00s + BandpassFilterRecording: 32 channels - 32.0kHz - 1 segments - 320,000 samples - 10.00s float32 dtype - 39.06 MiB - CommonReferenceRecording: 32 channels - 32.0kHz - 1 segments - 320,000 samples - 10.00s + CommonReferenceRecording: 32 channels - 32.0kHz - 1 segments - 320,000 samples - 10.00s float32 dtype - 39.06 MiB Use cache_folder=/tmp/spikeinterface_cache/tmp8sr7ylv1/PVPX8CJL write_binary_recording with n_jobs = 4 and chunk_size = 32000 @@ -238,7 +238,7 @@ object to disk. .. parsed-literal:: - BinaryFolderRecording: 32 channels - 32.0kHz - 1 segments - 320,000 samples - 10.00s + BinaryFolderRecording: 32 channels - 32.0kHz - 1 segments - 320,000 samples - 10.00s float32 dtype - 39.06 MiB @@ -325,7 +325,7 @@ Alternatively we can pass a full dictionary containing the parameters: other_params = ss.get_default_sorter_params("tridesclous") other_params["detect_threshold"] = 6 - + # parameters set by params dictionary sorting_TDC_2 = ss.run_sorter( sorter_name="tridesclous", recording=recording_preprocessed, output_folder="tdc_output2", **other_params @@ -638,11 +638,11 @@ accommodate the duration: .dataframe tbody tr th:only-of-type { vertical-align: middle; } - + .dataframe tbody tr th { vertical-align: top; } - + .dataframe thead th { text-align: right; } @@ -961,7 +961,7 @@ on the “Save as snapshot (sha://)” and copy the URI: .. code:: ipython3 uri = "sha1://68cb54a9aaed2303fb82dedbc302c853e818f1b6" - + sorting_curated_sv = scur.apply_sortingview_curation(sorting_TDC, uri_or_json=uri) print(sorting_curated_sv) print(sorting_curated_sv.get_property("accept")) @@ -1032,7 +1032,7 @@ above a certain threshold: keep_mask = (qm["snr"] > 10) & (qm["isi_violations_ratio"] < 0.01) print("Mask:", keep_mask.values) - + sorting_curated_auto = sorting_TDC.select_units(sorting_TDC.unit_ids[keep_mask]) print(sorting_curated_auto) @@ -1076,7 +1076,7 @@ performance and plot a confusion matrix .. parsed-literal:: accuracy recall precision false_discovery_rate miss_rate - gt_unit_id + gt_unit_id #0 1.0 1.0 1.0 0.0 0.0 #1 1.0 1.0 1.0 0.0 0.0 #2 0.976744 0.976744 1.0 0.0 0.023256 @@ -1155,9 +1155,9 @@ graph showing how the units are matched between the sorters. .. code:: ipython3 sorting_agreement = comp_multi.get_agreement_sorting(minimum_agreement_count=2) - + print("Units in agreement between TDC, SC2, and KS2:", sorting_agreement.get_unit_ids()) - + w_multi = sw.plot_multicomparison_agreement(comp_multi) w_multi = sw.plot_multicomparison_agreement_by_sorter(comp_multi) diff --git a/examples/how_to/get_started.py b/examples/how_to/get_started.py index 0c403d535a..4b843491fc 100644 --- a/examples/how_to/get_started.py +++ b/examples/how_to/get_started.py @@ -199,8 +199,8 @@ print("Units found by tridesclous:", sorting_TDC.get_unit_ids()) print("Units found by spyking-circus2:", sorting_SC2.get_unit_ids()) -# If a sorter is not installed locally, we can also avoid installing it and run it anyways, using a container (Docker or Singularity). -# To do this, you will need to install Docker. More information [here](https://spikeinterface.readthedocs.io/en/latest/modules/sorters.html?highlight=docker#running-sorters-in-docker-singularity-containers). +# If a sorter is not installed locally, we can also avoid installing it and run it anyways, using a container (Docker or Singularity). +# To do this, you will need to install Docker. More information [here](https://spikeinterface.readthedocs.io/en/latest/modules/sorters.html?highlight=docker#running-sorters-in-docker-singularity-containers). # Let's run `Kilosort2` using Docker: sorting_KS2 = ss.run_sorter(sorter_name="kilosort2", recording=recording_preprocessed, docker_image=True, verbose=True) @@ -213,7 +213,7 @@ # This folder is where all the postprocessing data will be saved such as waveforms and templates. Let's calculate # some waveforms. When doing this, the function samples some spikes (by default `max_spikes_per_unit=500`) -# for each unit, extracts their waveforms, and stores them to disk in `extensions/waveforms`. +# for each unit, extracts their waveforms, and stores them to disk in `extensions/waveforms`. # These waveforms are helpful to compute the average waveform, or "template", for each unit and then to compute, for example, quality metrics. # Computations with the `SortingAnalyzer` object are done using the `compute` method: @@ -258,13 +258,13 @@ # + print(sa_TDC.get_saved_extension_names()) print(sa_TDC.get_loaded_extension_names()) -# - +# - # ...or delete an extension... # + sa_TDC.delete_extension("spike_amplitudes") -# - +# - # This deletes the extension's data in the `SortingAnalyzer` folder. # @@ -282,7 +282,7 @@ sa_TDC.compute("spike_amplitudes") # - -# Once we have computed all of the postprocessing information, we can compute quality +# Once we have computed all of the postprocessing information, we can compute quality # metrics (some quality metrics require certain extensions - e.g., drift metrics require `spike_locations`): qm_params = sqm.get_default_qm_params() From f3330c66587f8ebb69d48b72d7085a4470921dd2 Mon Sep 17 00:00:00 2001 From: chrishalcrow <57948917+chrishalcrow@users.noreply.github.com> Date: Fri, 15 Mar 2024 15:30:00 +0000 Subject: [PATCH 2/3] Update examples/how_to/get_started.py Co-authored-by: Zach McKenzie <92116279+zm711@users.noreply.github.com> --- examples/how_to/get_started.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/examples/how_to/get_started.py b/examples/how_to/get_started.py index 4b843491fc..a156836424 100644 --- a/examples/how_to/get_started.py +++ b/examples/how_to/get_started.py @@ -59,9 +59,10 @@ import spikeinterface.curation as scur import spikeinterface.widgets as sw -# Alternatively, we can import all submodules at once which +# Alternatively, we can import all submodules at once with `import spikeinterface.full as si` which # internally imports core+extractors+preprocessing+sorters+postprocessing+ -# qualitymetrics+comparison+widgets+exporters +# qualitymetrics+comparison+widgets+exporters. In this case all aliases in the following tutorial +# would be `si` # # This is useful for notebooks, but it is a heavier import because internally many more dependencies # are imported (scipy/sklearn/networkx/matplotlib/h5py...) From b5eb69aab142792abc02c3aef544a4023efe7636 Mon Sep 17 00:00:00 2001 From: chrishalcrow <57948917+chrishalcrow@users.noreply.github.com> Date: Fri, 15 Mar 2024 15:30:12 +0000 Subject: [PATCH 3/3] Update examples/how_to/get_started.py Co-authored-by: Zach McKenzie <92116279+zm711@users.noreply.github.com> --- examples/how_to/get_started.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/how_to/get_started.py b/examples/how_to/get_started.py index a156836424..7d324378ad 100644 --- a/examples/how_to/get_started.py +++ b/examples/how_to/get_started.py @@ -210,7 +210,7 @@ # For postprocessing SpikeInterface pairs recording and sorting objects into a `SortingAnalyzer` object. # The `SortingAnalyzer` can be loaded in memory or saved in a folder. Here, we save it in binary format. -sa_TDC = si.create_sorting_analyzer(sorting_TDC, recording_preprocessed, format='binary_folder', folder='sa_TDC_binary') +sa_TDC = si.create_sorting_analyzer(sorting=sorting_TDC, recording=recording_preprocessed, format='binary_folder', folder='sa_TDC_binary') # This folder is where all the postprocessing data will be saved such as waveforms and templates. Let's calculate # some waveforms. When doing this, the function samples some spikes (by default `max_spikes_per_unit=500`)