Skip to content

Commit

Permalink
Merge branch 'sorting_in_get_started' of https://github.com/chrishalc…
Browse files Browse the repository at this point in the history
…row/spikeinterface into sorting_in_get_started
  • Loading branch information
chrishalcrow committed Mar 15, 2024
2 parents 0da56a6 + b5eb69a commit 7d1899d
Show file tree
Hide file tree
Showing 2 changed files with 26 additions and 25 deletions.
34 changes: 17 additions & 17 deletions doc/how_to/get_started.rst
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ both a “recording” and a “sorting” object.
.. parsed-literal::
MEArecRecordingExtractor: 32 channels - 32.0kHz - 1 segments - 320,000 samples - 10.00s
MEArecRecordingExtractor: 32 channels - 32.0kHz - 1 segments - 320,000 samples - 10.00s
float32 dtype - 39.06 MiB
file_path: /home/nolanlab/spikeinterface_datasets/ephy_testing_data/mearec/mearec_test_10s.h5
MEArecSortingExtractor: 10 units - 1 segments - 32.0kHz
Expand Down Expand Up @@ -128,7 +128,7 @@ This is how you retrieve info from a ``BaseRecording``\ …
fs = recording.get_sampling_frequency()
num_chan = recording.get_num_channels()
num_seg = recording.get_num_segments()
print("Channel ids:", channel_ids)
print("Sampling frequency:", fs)
print("Number of channels:", num_chan)
Expand All @@ -152,7 +152,7 @@ This is how you retrieve info from a ``BaseRecording``\ …
num_seg = recording.get_num_segments()
unit_ids = sorting_true.get_unit_ids()
spike_train = sorting_true.get_unit_spike_train(unit_id=unit_ids[0])
print("Number of segments:", num_seg)
print("Unit ids:", unit_ids)
print("Spike train of first unit:", spike_train)
Expand Down Expand Up @@ -182,9 +182,9 @@ to set it *manually*.
probe = recording.get_probe()
print(probe)
from probeinterface.plotting import plot_probe
_ = plot_probe(probe)
Expand Down Expand Up @@ -216,17 +216,17 @@ object to disk.
print(recording_f)
recording_cmr = si.common_reference(recording_f, reference="global", operator="median")
print(recording_cmr)
# this computes and saves the recording after applying the preprocessing chain
recording_preprocessed = recording_cmr.save(format="binary")
print(recording_preprocessed)
.. parsed-literal::
BandpassFilterRecording: 32 channels - 32.0kHz - 1 segments - 320,000 samples - 10.00s
BandpassFilterRecording: 32 channels - 32.0kHz - 1 segments - 320,000 samples - 10.00s
float32 dtype - 39.06 MiB
CommonReferenceRecording: 32 channels - 32.0kHz - 1 segments - 320,000 samples - 10.00s
CommonReferenceRecording: 32 channels - 32.0kHz - 1 segments - 320,000 samples - 10.00s
float32 dtype - 39.06 MiB
Use cache_folder=/tmp/spikeinterface_cache/tmp8zkscdxr/3IT027JP
write_binary_recording with n_jobs = 4 and chunk_size = 32000
Expand All @@ -240,7 +240,7 @@ object to disk.
.. parsed-literal::
BinaryFolderRecording: 32 channels - 32.0kHz - 1 segments - 320,000 samples - 10.00s
BinaryFolderRecording: 32 channels - 32.0kHz - 1 segments - 320,000 samples - 10.00s
float32 dtype - 39.06 MiB
Expand Down Expand Up @@ -327,7 +327,7 @@ Alternatively we can pass a full dictionary containing the parameters:
other_params = ss.get_default_sorter_params("tridesclous")
other_params["detect_threshold"] = 6
# parameters set by params dictionary
sorting_TDC_2 = ss.run_sorter(
sorter_name="tridesclous", recording=recording_preprocessed, output_folder="tdc_output2", **other_params
Expand Down Expand Up @@ -671,11 +671,11 @@ in the same way as earlier
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
Expand Down Expand Up @@ -1001,7 +1001,7 @@ on the “Save as snapshot (sha://)” and copy the URI:
.. code:: ipython3
uri = "sha1://68cb54a9aaed2303fb82dedbc302c853e818f1b6"
sorting_curated_sv = scur.apply_sortingview_curation(sorting_TDC, uri_or_json=uri)
print(sorting_curated_sv)
print(sorting_curated_sv.get_property("accept"))
Expand Down Expand Up @@ -1073,7 +1073,7 @@ above a certain threshold:
qm_data = analyzer_TDC.get_extension("quality_metrics").get_data()
keep_mask = (qm_data["snr"] > 10) & (qm_data["isi_violations_ratio"] < 0.01)
print("Mask:", keep_mask.values)
sorting_curated_auto = sorting_TDC.select_units(sorting_TDC.unit_ids[keep_mask])
print(sorting_curated_auto)
Expand Down Expand Up @@ -1117,7 +1117,7 @@ performance and plot a confusion matrix
.. parsed-literal::
accuracy recall precision false_discovery_rate miss_rate
gt_unit_id
gt_unit_id
#0 1.0 1.0 1.0 0.0 0.0
#1 1.0 1.0 1.0 0.0 0.0
#2 0.976744 0.976744 1.0 0.0 0.023256
Expand Down Expand Up @@ -1196,9 +1196,9 @@ graph showing how the units are matched between the sorters.
.. code:: ipython3
sorting_agreement = comp_multi.get_agreement_sorting(minimum_agreement_count=2)
print("Units in agreement between TDC, SC2, and KS2:", sorting_agreement.get_unit_ids())
w_multi = sw.plot_multicomparison_agreement(comp_multi)
w_multi = sw.plot_multicomparison_agreement_by_sorter(comp_multi)
Expand Down
17 changes: 9 additions & 8 deletions examples/how_to/get_started.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,9 +59,10 @@
import spikeinterface.curation as scur
import spikeinterface.widgets as sw

# Alternatively, we can import all submodules at once with `import spikeinterface.full as si` which
# Alternatively, we can import all submodules at once with `import spikeinterface.full as si` with `import spikeinterface.full as si` which
# internally imports core+extractors+preprocessing+sorters+postprocessing+
# qualitymetrics+comparison+widgets+exporters. In this case all aliases in the following tutorial
# would be `si`. In this case all aliases in the following tutorial
# would be `si`.

# This is useful for notebooks, but it is a heavier import because internally many more dependencies
Expand Down Expand Up @@ -200,8 +201,8 @@
print("Units found by tridesclous:", sorting_TDC.get_unit_ids())
print("Units found by spyking-circus2:", sorting_SC2.get_unit_ids())

# If a sorter is not installed locally, we can also avoid installing it and run it anyways, using a container (Docker or Singularity).
# To do this, you will need to install Docker. More information [here](https://spikeinterface.readthedocs.io/en/latest/modules/sorters.html?highlight=docker#running-sorters-in-docker-singularity-containers).
# If a sorter is not installed locally, we can also avoid installing it and run it anyways, using a container (Docker or Singularity).
# To do this, you will need to install Docker. More information [here](https://spikeinterface.readthedocs.io/en/latest/modules/sorters.html?highlight=docker#running-sorters-in-docker-singularity-containers).
# Let's run `Kilosort2` using Docker:

sorting_KS2 = ss.run_sorter(sorter_name="kilosort2", recording=recording_preprocessed, docker_image=True, verbose=True)
Expand All @@ -210,11 +211,11 @@
# For postprocessing SpikeInterface pairs recording and sorting objects into a `SortingAnalyzer` object.
# The `SortingAnalyzer` can be loaded in memory or saved in a folder. Here, we save it in binary format.

analyzer_TDC = si.create_sorting_analyzer(sorting=sorting_TDC, recording=recording_preprocessed, format='binary_folder', folder='analyzer_TDC_binary')
analyzer_TDC = si.create_sorting_analyzer(sorting=sorting=sorting_TDC, recording=recording=recording_preprocessed, format='binary_folder', folder='analyzer_TDC_binary')

# This folder is where all the postprocessing data will be saved such as waveforms and templates. Let's calculate
# some waveforms. When doing this, the function samples some spikes (by default `max_spikes_per_unit=500`)
# for each unit, extracts their waveforms, and stores them to disk in `extensions/waveforms`.
# for each unit, extracts their waveforms, and stores them to disk in `extensions/waveforms`.
# These waveforms are helpful to compute the average waveform, or "template", for each unit and then to compute, for example, quality metrics.
# Computations with the `SortingAnalyzer` object are done using the `compute` method:

Expand Down Expand Up @@ -266,13 +267,13 @@
# +
print(analyzer_TDC.get_saved_extension_names())
print(analyzer_TDC.get_loaded_extension_names())
# -
# -

# ...or delete an extension...

# +
analyzer_TDC.delete_extension("spike_amplitudes")
# -
# -

# This deletes the extension's data in the `SortingAnalyzer` folder.
#
Expand All @@ -290,7 +291,7 @@
analyzer_TDC.compute("spike_amplitudes")
# -

# Once we have computed all of the postprocessing information, we can compute quality
# Once we have computed all of the postprocessing information, we can compute quality
# metrics (some quality metrics require certain extensions - e.g., drift metrics require `spike_locations`):

qm_params = sqm.get_default_qm_params()
Expand Down

0 comments on commit 7d1899d

Please sign in to comment.