diff --git a/.github/actions/build-test-environment/action.yml b/.github/actions/build-test-environment/action.yml index 53fcd37f45..723e8a702f 100644 --- a/.github/actions/build-test-environment/action.yml +++ b/.github/actions/build-test-environment/action.yml @@ -21,7 +21,7 @@ runs: python -m pip install -U pip # Official recommended way source ${{ github.workspace }}/test_env/bin/activate pip install tabulate # This produces summaries at the end - pip install -e .[test,extractors,streaming_extractors,full] + pip install -e .[test,extractors,streaming_extractors,test_extractors,full] shell: bash - name: Force installation of latest dev from key-packages when running dev (not release) run: | diff --git a/.github/actions/install-wine/action.yml b/.github/actions/install-wine/action.yml index 3ae08ecd34..85e70b471d 100644 --- a/.github/actions/install-wine/action.yml +++ b/.github/actions/install-wine/action.yml @@ -2,20 +2,29 @@ name: Install packages description: This action installs the package and its dependencies for testing inputs: - python-version: - description: 'Python version to set up' - required: false os: description: 'Operating system to set up' - required: false + required: true runs: using: "composite" steps: - - name: Install wine (needed for Plexon2) + - name: Install wine on Linux + if: runner.os == 'Linux' run: | sudo rm -f /etc/apt/sources.list.d/microsoft-prod.list sudo dpkg --add-architecture i386 sudo apt-get update -qq sudo apt-get install -yqq --allow-downgrades libc6:i386 libgcc-s1:i386 libstdc++6:i386 wine shell: bash + - name: Install wine on macOS + if: runner.os == 'macOS' + run: | + brew install --cask xquartz + brew install --cask wine-stable + shell: bash + + - name: Skip installation on Windows + if: ${{ inputs.os == 'Windows' }} + run: echo "Skipping Wine installation on Windows. Not necessary." + shell: bash diff --git a/.github/determine_testing_environment.py b/.github/determine_testing_environment.py index 4945ccc807..95ad0afc49 100644 --- a/.github/determine_testing_environment.py +++ b/.github/determine_testing_environment.py @@ -30,56 +30,59 @@ exporters_changed = False sortingcomponents_changed = False generation_changed = False +stream_extractors_changed = False for changed_file in changed_files_in_the_pull_request_paths: file_is_in_src = changed_file.parts[0] == "src" - if not file_is_in_src: - - if changed_file.name == "pyproject.toml": - pyproject_toml_changed = True - - else: - if changed_file.name == "neobaseextractor.py": - neobaseextractor_changed = True - elif changed_file.name == "plexon2.py": - extractors_changed = True - elif "core" in changed_file.parts: - conditions_changed = True - elif "extractors" in changed_file.parts: - extractors_changed = True - elif "preprocessing" in changed_file.parts: - preprocessing_changed = True - elif "postprocessing" in changed_file.parts: - postprocessing_changed = True - elif "qualitymetrics" in changed_file.parts: - qualitymetrics_changed = True - elif "comparison" in changed_file.parts: - comparison_changed = True - elif "curation" in changed_file.parts: - curation_changed = True - elif "widgets" in changed_file.parts: - widgets_changed = True - elif "exporters" in changed_file.parts: - exporters_changed = True - elif "sortingcomponents" in changed_file.parts: - sortingcomponents_changed = True - elif "generation" in changed_file.parts: - generation_changed = True - elif "sorters" in changed_file.parts: - if "external" in changed_file.parts: - sorters_external_changed = True - elif "internal" in changed_file.parts: - sorters_internal_changed = True - else: - sorters_changed = True + if changed_file.name == "pyproject.toml": + pyproject_toml_changed = True + elif changed_file.name == "neobaseextractor.py": + neobaseextractor_changed = True + extractors_changed = True + elif changed_file.name == "plexon2.py": + plexon2_changed = True + elif changed_file.name == "nwbextractors.py": + extractors_changed = True # There are NWB tests that are not streaming + stream_extractors_changed = True + elif changed_file.name == "iblextractors.py": + stream_extractors_changed = True + elif "core" in changed_file.parts: + core_changed = True + elif "extractors" in changed_file.parts: + extractors_changed = True + elif "preprocessing" in changed_file.parts: + preprocessing_changed = True + elif "postprocessing" in changed_file.parts: + postprocessing_changed = True + elif "qualitymetrics" in changed_file.parts: + qualitymetrics_changed = True + elif "comparison" in changed_file.parts: + comparison_changed = True + elif "curation" in changed_file.parts: + curation_changed = True + elif "widgets" in changed_file.parts: + widgets_changed = True + elif "exporters" in changed_file.parts: + exporters_changed = True + elif "sortingcomponents" in changed_file.parts: + sortingcomponents_changed = True + elif "generation" in changed_file.parts: + generation_changed = True + elif "sorters" in changed_file.parts: + if "external" in changed_file.parts: + sorters_external_changed = True + elif "internal" in changed_file.parts: + sorters_internal_changed = True + else: + sorters_changed = True run_everything = core_changed or pyproject_toml_changed or neobaseextractor_changed run_generation_tests = run_everything or generation_changed -run_extractor_tests = run_everything or extractors_changed +run_extractor_tests = run_everything or extractors_changed or plexon2_changed run_preprocessing_tests = run_everything or preprocessing_changed run_postprocessing_tests = run_everything or postprocessing_changed run_qualitymetrics_tests = run_everything or qualitymetrics_changed @@ -93,8 +96,11 @@ run_sorters_test = run_everything or sorters_changed run_internal_sorters_test = run_everything or run_sortingcomponents_tests or sorters_internal_changed +run_streaming_extractors_test = stream_extractors_changed + install_plexon_dependencies = plexon2_changed + environment_varaiables_to_add = { "RUN_EXTRACTORS_TESTS": run_extractor_tests, "RUN_PREPROCESSING_TESTS": run_preprocessing_tests, @@ -109,6 +115,7 @@ "RUN_SORTERS_TESTS": run_sorters_test, "RUN_INTERNAL_SORTERS_TESTS": run_internal_sorters_test, "INSTALL_PLEXON_DEPENDENCIES": install_plexon_dependencies, + "RUN_STREAMING_EXTRACTORS_TESTS": run_streaming_extractors_test, } # Write the conditions to the GITHUB_ENV file diff --git a/.github/workflows/all-tests.yml b/.github/workflows/all-tests.yml index cce73a9008..855fd072de 100644 --- a/.github/workflows/all-tests.yml +++ b/.github/workflows/all-tests.yml @@ -68,6 +68,7 @@ jobs: echo "RUN_SORTERS_TESTS=${RUN_SORTERS_TESTS}" echo "RUN_INTERNAL_SORTERS_TESTS=${RUN_INTERNAL_SORTERS_TESTS}" echo "INSTALL_PLEXON_DEPENDENCIES=${INSTALL_PLEXON_DEPENDENCIES}" + echo "RUN_STREAMING_EXTRACTORS_TESTS=${RUN_STREAMING_EXTRACTORS_TESTS}" - name: Install packages run: | @@ -78,9 +79,8 @@ jobs: run: pytest -m "core" shell: bash - - name: Install Other Testing Dependencies + - name: Install Dependencies for Timing Display run: | - pip install -e .[test] pip install tabulate pip install pandas shell: bash @@ -91,6 +91,7 @@ jobs: run: echo "dataset_hash=$(git ls-remote https://gin.g-node.org/NeuralEnsemble/ephy_testing_data.git HEAD | cut -f1)" >> $GITHUB_OUTPUT - name: Cache datasets + if: env.RUN_EXTRACTORS_TESTS == 'true' id: cache-datasets uses: actions/cache/restore@v4 with: @@ -119,6 +120,9 @@ jobs: fi git config --global filter.annex.process "git-annex filter-process" # recommended for efficiency + - name : Install Plexon dependencies + if: env.INSTALL_PLEXON_DEPENDENCIES == 'true' + uses: ./.github/actions/install-wine - name: Set execute permissions on run_tests.sh shell: bash @@ -130,16 +134,28 @@ jobs: HDF5_PLUGIN_PATH: ${{ github.workspace }}/hdf5_plugin_path_maxwell if: env.RUN_EXTRACTORS_TESTS == 'true' run: | - pip install -e .[extractors,streaming_extractors] + pip install -e .[extractors,streaming_extractors,test_extractors] ./.github/run_tests.sh "extractors and not streaming_extractors" --no-virtual-env + - name: Test streaming extracotors + shell: bash + if: env.RUN_STREAMING_EXTRACTORS_TESTS + run: | + pip install -e .[streaming_extractors,test_extractors] + ./.github/run_tests.sh "streaming_extractors" --no-virtual-env + - name: Test preprocessing shell: bash if: env.RUN_PREPROCESSING_TESTS == 'true' run: | - pip install -e .[preprocessing] + pip install -e .[preprocessing,test_preprocessing] ./.github/run_tests.sh "preprocessing and not deepinterpolation" --no-virtual-env + - name: Install remaining testing dependencies # TODO: Remove this step once we have better modularization + shell: bash + run: | + pip install -e .[test] + - name: Test postprocessing shell: bash if: env.RUN_POSTPROCESSING_TESTS == 'true' diff --git a/.github/workflows/full-test.yml b/.github/workflows/full-test.yml deleted file mode 100644 index ed2f28dc23..0000000000 --- a/.github/workflows/full-test.yml +++ /dev/null @@ -1,167 +0,0 @@ -name: Full spikeinterface tests - -on: - pull_request: - types: [synchronize, opened, reopened] - branches: - - main - -concurrency: # Cancel previous workflows on the same pull request - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true - -env: # For the sortingview backend - KACHERY_CLOUD_CLIENT_ID: ${{ secrets.KACHERY_CLOUD_CLIENT_ID }} - KACHERY_CLOUD_PRIVATE_KEY: ${{ secrets.KACHERY_CLOUD_PRIVATE_KEY }} - -jobs: - full-tests-depending-on-changed-files: - name: Test on (${{ matrix.os }}) - runs-on: ${{ matrix.os }} - strategy: - fail-fast: false - matrix: - # "macos-latest", "windows-latest" - os: ["ubuntu-latest", ] - steps: - - uses: actions/checkout@v4 - - uses: actions/setup-python@v5 - with: - python-version: '3.10' - - name: Get current year-month - id: date - run: echo "date=$(date +'%Y-%m')" >> $GITHUB_OUTPUT - - name: Get ephy_testing_data current head hash - # the key depends on the last comit repo https://gin.g-node.org/NeuralEnsemble/ephy_testing_data.git - id: vars - run: | - echo "HASH_EPHY_DATASET=$(git ls-remote https://gin.g-node.org/NeuralEnsemble/ephy_testing_data.git HEAD | cut -f1)" >> $GITHUB_OUTPUT - - name: Restore cached gin data for extractors tests - uses: actions/cache/restore@v4 - id: cache-datasets - env: - # the key depends on the last comit repo https://gin.g-node.org/NeuralEnsemble/ephy_testing_data.git - HASH_EPHY_DATASET: git ls-remote https://gin.g-node.org/NeuralEnsemble/ephy_testing_data.git HEAD | cut -f1 - with: - path: ~/spikeinterface_datasets - key: ${{ runner.os }}-datasets-${{ steps.vars.outputs.HASH_EPHY_DATASET }} - restore-keys: ${{ runner.os }}-datasets - - name: Install packages - uses: ./.github/actions/build-test-environment - - name: Get changed files - id: changed-files - uses: tj-actions/changed-files@v41 - - name: Module changes - id: modules-changed - run: | - for file in ${{ steps.changed-files.outputs.all_changed_files }}; do - if [[ $file == *"pyproject.toml" ]]; then - echo "pyproject.toml changed" - echo "CORE_CHANGED=true" >> $GITHUB_OUTPUT - fi - if [[ $file == *"/core/"* || $file == *"/extractors/neoextractors/neobaseextractor.py" ]]; then - echo "Core changed" - echo "CORE_CHANGED=true" >> $GITHUB_OUTPUT - fi - if [[ $file == *"/extractors/"* ]]; then - echo "Extractors changed" - echo "EXTRACTORS_CHANGED=true" >> $GITHUB_OUTPUT - fi - if [[ $file == *"plexon2"* ]]; then - echo "Plexon2 changed" - echo "PLEXON2_CHANGED=true" >> $GITHUB_OUTPUT - fi - if [[ $file == *"/preprocessing/"* ]]; then - echo "Preprocessing changed" - echo "PREPROCESSING_CHANGED=true" >> $GITHUB_OUTPUT - fi - if [[ $file == *"/postprocessing/"* ]]; then - echo "Postprocessing changed" - echo "POSTPROCESSING_CHANGED=true" >> $GITHUB_OUTPUT - fi - if [[ $file == *"/qualitymetrics/"* ]]; then - echo "Quality metrics changed" - echo "QUALITYMETRICS_CHANGED=true" >> $GITHUB_OUTPUT - fi - if [[ $file == *"/sorters/"* && $file != *"/sorters/internal/"* && $file != *"/sorters/external/"* ]]; then - echo "Sorters changed" - echo "SORTERS_CHANGED=true" >> $GITHUB_OUTPUT - fi - if [[ $file == *"/sorters/external"* ]]; then - echo "External sorters changed" - echo "SORTERS_EXTERNAL_CHANGED=true" >> $GITHUB_OUTPUT - fi - if [[ $file == *"/sorters/internal"* ]]; then - echo "Internal sorters changed" - echo "SORTERS_INTERNAL_CHANGED=true" >> $GITHUB_OUTPUT - fi - if [[ $file == *"/comparison/"* ]]; then - echo "Comparison changed" - echo "COMPARISON_CHANGED=true" >> $GITHUB_OUTPUT - fi - if [[ $file == *"/curation/"* ]]; then - echo "Curation changed" - echo "CURATION_CHANGED=true" >> $GITHUB_OUTPUT - fi - if [[ $file == *"/widgets/"* ]]; then - echo "Widgets changed" - echo "WIDGETS_CHANGED=true" >> $GITHUB_OUTPUT - fi - if [[ $file == *"/exporters/"* ]]; then - echo "Exporters changed" - echo "EXPORTERS_CHANGED=true" >> $GITHUB_OUTPUT - fi - if [[ $file == *"/sortingcomponents/"* ]]; then - echo "Sortingcomponents changed" - echo "SORTINGCOMPONENTS_CHANGED=true" >> $GITHUB_OUTPUT - fi - if [[ $file == *"/generation/"* ]]; then - echo "Generation changed" - echo "GENERATION_CHANGED=true" >> $GITHUB_OUTPUT - fi - done - - name: Set execute permissions on run_tests.sh - run: chmod +x .github/run_tests.sh - - name: Install Wine (Plexon2) - if: ${{ steps.modules-changed.outputs.PLEXON2_CHANGED == 'true' }} - uses: ./.github/actions/install-wine - - name: Test core - run: ./.github/run_tests.sh core - - name: Test extractors - env: - HDF5_PLUGIN_PATH: ${{ github.workspace }}/hdf5_plugin_path_maxwell - if: ${{ steps.modules-changed.outputs.EXTRACTORS_CHANGED == 'true' || steps.modules-changed.outputs.CORE_CHANGED == 'true' }} - run: ./.github/run_tests.sh "extractors and not streaming_extractors" - - name: Test preprocessing - if: ${{ steps.modules-changed.outputs.PREPROCESSING_CHANGED == 'true' || steps.modules-changed.outputs.CORE_CHANGED == 'true' }} - run: ./.github/run_tests.sh "preprocessing and not deepinterpolation" - - name: Test postprocessing - if: ${{ steps.modules-changed.outputs.POSTPROCESSING_CHANGED == 'true' || steps.modules-changed.outputs.CORE_CHANGED == 'true' }} - run: ./.github/run_tests.sh postprocessing - - name: Test quality metrics - if: ${{ steps.modules-changed.outputs.QUALITYMETRICS_CHANGED == 'true' || steps.modules-changed.outputs.CORE_CHANGED == 'true' }} - run: ./.github/run_tests.sh qualitymetrics - - name: Test core sorters - if: ${{ steps.modules-changed.outputs.SORTERS_CHANGED == 'true' || steps.modules-changed.outputs.CORE_CHANGED == 'true' }} - run: ./.github/run_tests.sh sorters - - name: Test comparison - if: ${{ steps.modules-changed.outputs.COMPARISON_CHANGED == 'true' || steps.modules-changed.outputs.GENERATION_CHANGED == 'true' || steps.modules-changed.outputs.CORE_CHANGED == 'true' }} - run: ./.github/run_tests.sh comparison - - name: Test curation - if: ${{ steps.modules-changed.outputs.CURATION_CHANGED == 'true' || steps.modules-changed.outputs.CORE_CHANGED == 'true' }} - run: ./.github/run_tests.sh curation - - name: Test widgets - if: ${{ steps.modules-changed.outputs.WIDGETS_CHANGED == 'true' || steps.modules-changed.outputs.CORE_CHANGED == 'true' || steps.modules-changed.outputs.QUALITYMETRICS_CHANGED == 'true' || steps.modules-changed.outputs.PREPROCESSING_CHANGED == 'true'}} - run: ./.github/run_tests.sh widgets - - name: Test exporters - if: ${{ steps.modules-changed.outputs.EXPORTERS_CHANGED == 'true' || steps.modules-changed.outputs.CORE_CHANGED == 'true' || steps.modules-changed.outputs.WIDGETS_CHANGED == 'true' }} - run: ./.github/run_tests.sh exporters - - name: Test sortingcomponents - if: ${{ steps.modules-changed.outputs.SORTINGCOMPONENTS_CHANGED == 'true' || steps.modules-changed.outputs.CORE_CHANGED == 'true' }} - run: ./.github/run_tests.sh sortingcomponents - - name: Test internal sorters - if: ${{ steps.modules-changed.outputs.SORTERS_INTERNAL_CHANGED == 'true' || steps.modules-changed.outputs.SORTINGCOMPONENTS_CHANGED || steps.modules-changed.outputs.CORE_CHANGED == 'true' }} - run: ./.github/run_tests.sh sorters_internal - - name: Test generation - if: ${{ steps.modules-changed.outputs.GENERATION_CHANGED == 'true' || steps.modules-changed.outputs.CORE_CHANGED == 'true' }} - run: ./.github/run_tests.sh generation diff --git a/.github/workflows/streaming-extractor-test.yml b/.github/workflows/streaming-extractor-test.yml deleted file mode 100644 index 0cb88c3077..0000000000 --- a/.github/workflows/streaming-extractor-test.yml +++ /dev/null @@ -1,49 +0,0 @@ -name: Test streaming extractors - -on: - pull_request: - types: [synchronize, opened, reopened] - branches: - - main - -concurrency: # Cancel previous workflows on the same pull request - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true - -jobs: - test-streaming-extractors: - name: Testing using ${{ matrix.os }} with ${{ matrix.python-version }} - runs-on: ${{ matrix.os }} - strategy: - fail-fast: false - matrix: - os: ["ubuntu-latest"] - python-version: ["3.10"] - steps: - - uses: actions/checkout@v4 - - uses: actions/setup-python@v5 - with: - python-version: ${{ matrix.python-version }} - - uses: s-weigand/setup-conda@v1 - with: - update-conda: true - python-version: ${{ matrix.python-version }} - conda-channels: conda-forge - - name: Get changed files - id: changed-files - uses: tj-actions/changed-files@v41 - - name: Module changes - id: modules-changed - run: | - for file in ${{ steps.changed-files.outputs.all_changed_files }}; do - if [[ $file == *"/nwbextractors.py" || $file == *"/iblstreamingrecording.py"* ]]; then - echo "Streaming files changed changed" - echo "STREAMING_CHANGED=true" >> $GITHUB_OUTPUT - fi - done - - name: Install package and streaming extractor dependencies - if: ${{ steps.modules-changed.outputs.STREAMING_CHANGED == 'true' }} - run: pip install -e .[test_core,streaming_extractors] - - name: run tests - if: steps.modules-changed.outputs.STREAMING_CHANGED == 'true' - run: pytest -m "streaming_extractors" -vv -ra diff --git a/.gitignore b/.gitignore index 6c9fa6869f..9c4dda937c 100644 --- a/.gitignore +++ b/.gitignore @@ -1,52 +1,8 @@ **/cache_folder/** -spikeinterface/core/tests/*.raw -spikeinterface/core/tests/*.json -spikeinterface/core/tests/*.pkl -spikeinterface/core/tests/*.npz - -spikeinterface/core/tests/*/*.json -spikeinterface/core/tests/*/*.raw -spikeinterface/core/tests/*/*.npy - -spikeinterface/core/tests/*/*/*.json -spikeinterface/core/tests/*/*/*.raw -spikeinterface/core/tests/*/*/*.npy - - -spikeinterface/extractors/tests/*/*/*.json -spikeinterface/extractors/tests/*/*/*.raw -spikeinterface/extractors/tests/*.npz -spikeinterface/extractors/tests/*.npy -spikeinterface/extractors/tests/extractor_testing_files/* - -spikeinterface/toolkit/*/tests/*/*.json -spikeinterface/toolkit/*/tests/*/*.raw -spikeinterface/toolkit/*/tests/*/*.npy -spikeinterface/toolkit/*/tests/*/*.npz -spikeinterface/toolkit/*/tests/*/*/*.json -spikeinterface/toolkit/*/tests/*/*/*.raw -spikeinterface/toolkit/*/tests/*/*/*.npy -spikeinterface/toolkit/*/tests/*/*/*.npz - -spikeinterface/toolkit/*/tests/*_output/* - -spikeinterface/exporters/tests/*/* - -spikeinterface/sorters/tests/*/* - - spikeinterface/widgets/tests/mearec_test/* -examples/modules_gallery/**/*.raw -examples/modules_gallery/**/*.npy -examples/modules_gallery/**/*.json -examples/modules_gallery/**/*.pkl -examples/modules_gallery/**/*.npz -examples/modules_gallery/**/*.csv -examples/modules_gallery/**/*.zarr - - +# Vscode .vscode/* # MauroToro: My absolute mess of envs @@ -107,21 +63,6 @@ coverage.xml .hypothesis/ .pytest_cache/ -# Translations -*.mo -*.pot - -# Django stuff: -*.log -local_settings.py -db.sqlite3 - -# Flask stuff: -instance/ -.webassets-cache - -# Scrapy stuff: -.scrapy # PyBuilder target/ @@ -135,11 +76,7 @@ target/ # pyenv .python-version -# celery beat schedule file -celerybeat-schedule -# SageMath parsed files -*.sage.py # Environments .env @@ -192,8 +129,14 @@ examples/modules/extractors/firings_true.mda examples/modules/extractors/circular_layout.csv examples/modules/comparison/tmp_* examples/modules/comparison/a_study_folder/* -examples/modules/toolkit/phy/* -examples/modules/toolkit/tmp_* + +examples/modules_gallery/**/*.raw +examples/modules_gallery/**/*.npy +examples/modules_gallery/**/*.json +examples/modules_gallery/**/*.pkl +examples/modules_gallery/**/*.npz +examples/modules_gallery/**/*.csv +examples/modules_gallery/**/*.zarr # Files and folders generated during tests test_folder/ diff --git a/conftest.py b/conftest.py index c4bac6628a..ce5e07b47b 100644 --- a/conftest.py +++ b/conftest.py @@ -1,19 +1,7 @@ import pytest -import shutil -import os from pathlib import Path -ON_GITHUB = bool(os.getenv('GITHUB_ACTIONS')) - - -# define marks -mark_names = ["core", "extractors", "preprocessing", "postprocessing", - "sorters_external", "sorters_internal", "sorters", - "qualitymetrics", "comparison", "curation", - "widgets", "exporters", "sortingcomponents", "generation"] - - @pytest.fixture(scope="module") def create_cache_folder(tmp_path_factory): cache_folder = tmp_path_factory.mktemp("cache_folder") diff --git a/doc/how_to/benchmark_with_hybrid_recordings.rst b/doc/how_to/benchmark_with_hybrid_recordings.rst new file mode 100644 index 0000000000..9e8c6c7d65 --- /dev/null +++ b/doc/how_to/benchmark_with_hybrid_recordings.rst @@ -0,0 +1,2552 @@ +Benchmark spike sorting with hybrid recordings +============================================== + +This example shows how to use the SpikeInterface hybrid recordings +framework to benchmark spike sorting results. + +Hybrid recordings are built from existing recordings by injecting units +with known spiking activity. The template (aka average waveforms) of the +injected units can be from previous spike sorted data. In this example, +we will be using an open database of templates that we have constructed +from the International Brain Laboratory - Brain Wide Map (available on +`DANDI `__). + +Importantly, recordings from long-shank probes, such as Neuropixels, +usually experience drifts. Such drifts have to be taken into account in +order to smoothly inject spikes into the recording. + +.. code:: ipython3 + + import spikeinterface as si + import spikeinterface.extractors as se + import spikeinterface.preprocessing as spre + import spikeinterface.comparison as sc + import spikeinterface.generation as sgen + import spikeinterface.widgets as sw + + from spikeinterface.sortingcomponents.motion_estimation import estimate_motion + + import numpy as np + import matplotlib.pyplot as plt + from pathlib import Path + +.. code:: ipython3 + + %matplotlib inline + +.. code:: ipython3 + + si.set_global_job_kwargs(n_jobs=16) + +For this notebook, we will use a drifting recording similar to the one +acquired by Nick Steinmetz and available +`here `__, where an +triangular motion was imposed to the recording by moving the probe up +and down with a micro-manipulator. + +.. code:: ipython3 + + workdir = Path("/ssd980/working/hybrid/steinmetz_imposed_motion") + workdir.mkdir(exist_ok=True) + +.. code:: ipython3 + + recording_np1_imposed = se.read_spikeglx("/hdd1/data/spikeglx/nick-steinmetz/dataset1/p1_g0_t0/") + recording_preproc = spre.highpass_filter(recording_np1_imposed) + recording_preproc = spre.common_reference(recording_preproc) + +To visualize the drift, we can estimate the motion and plot it: + +.. code:: ipython3 + + # to correct for drift, we need a float dtype + recording_preproc = spre.astype(recording_preproc, "float") + _, motion_info = spre.correct_motion( + recording_preproc, preset="nonrigid_fast_and_accurate", n_jobs=4, progress_bar=True, output_motion_info=True + ) + + + +.. parsed-literal:: + + detect and localize: 0%| | 0/1958 [00:00 {minimum_depth}") + len(templates_selected_info) + + + + +.. parsed-literal:: + + 31 + + + +We can now retrieve the selected templates as a ``Templates`` object: + +.. code:: ipython3 + + templates_selected = sgen.query_templates_from_database(templates_selected_info, verbose=True) + print(templates_selected) + + +.. parsed-literal:: + + Fetching templates from 2 datasets + Templates: 31 units - 240 samples - 384 channels + sampling_frequency=30.00 kHz - ms_before=3.00 ms - ms_after=5.00 ms + Probe - IMEC - Neuropixels 1.0 - 18194814141 - 384ch - 1shanks + + +While we selected templates from a target aread and at certain depths, +we can see that the template amplitudes are quite large. This will make +spike sorting easy… we can further manipulate the ``Templates`` by +rescaling, relocating, or further selections with the +``sgen.scale_template_to_range``, ``sgen.relocate_templates``, and +``sgen.select_templates`` functions. + +In our case, let’s rescale the amplitudes between 50 and 150 +:math:`\mu`\ V and relocate them towards the bottom half of the probe, +where the activity looks interesting! + +.. code:: ipython3 + + min_amplitude = 50 + max_amplitude = 150 + templates_scaled = sgen.scale_template_to_range( + templates=templates_selected, + min_amplitude=min_amplitude, + max_amplitude=max_amplitude + ) + + min_displacement = 1000 + max_displacement = 3000 + templates_relocated = sgen.relocate_templates( + templates=templates_scaled, + min_displacement=min_displacement, + max_displacement=max_displacement + ) + +Let’s plot the selected templates: + +.. code:: ipython3 + + sparsity_plot = si.compute_sparsity(templates_relocated) + fig = plt.figure(figsize=(10, 10)) + w = sw.plot_unit_templates(templates_relocated, sparsity=sparsity_plot, ncols=4, figure=fig) + w.figure.subplots_adjust(wspace=0.5, hspace=0.7) + + + +.. image:: benchmark_with_hybrid_recordings_files/benchmark_with_hybrid_recordings_20_0.png + + +Constructing hybrid recordings +------------------------------ + +We can construct now hybrid recordings with the selected templates. + +We will do this in two ways to show how important it is to account for +drifts when injecting hybrid spikes. + +- For the first recording we will not pass the estimated motion + (``recording_hybrid_ignore_drift``). +- For the second recording, we will pass and account for the estimated + motion (``recording_hybrid_with_drift``). + +.. code:: ipython3 + + recording_hybrid_ignore_drift, sorting_hybrid = sgen.generate_hybrid_recording( + recording=recording_preproc, templates=templates_relocated, seed=2308 + ) + recording_hybrid_ignore_drift + + + + +.. raw:: html + +
InjectTemplatesRecording: 384 channels - 30.0kHz - 1 segments - 58,715,724 samples - 1,957.19s (32.62 minutes) - float64 dtype - 167.99 GiB
Channel IDs
    ['imec0.ap#AP0' 'imec0.ap#AP1' 'imec0.ap#AP2' 'imec0.ap#AP3' + 'imec0.ap#AP4' 'imec0.ap#AP5' 'imec0.ap#AP6' 'imec0.ap#AP7' + 'imec0.ap#AP8' 'imec0.ap#AP9' 'imec0.ap#AP10' 'imec0.ap#AP11' + 'imec0.ap#AP12' 'imec0.ap#AP13' 'imec0.ap#AP14' 'imec0.ap#AP15' + 'imec0.ap#AP16' 'imec0.ap#AP17' 'imec0.ap#AP18' 'imec0.ap#AP19' + 'imec0.ap#AP20' 'imec0.ap#AP21' 'imec0.ap#AP22' 'imec0.ap#AP23' + 'imec0.ap#AP24' 'imec0.ap#AP25' 'imec0.ap#AP26' 'imec0.ap#AP27' + 'imec0.ap#AP28' 'imec0.ap#AP29' 'imec0.ap#AP30' 'imec0.ap#AP31' + 'imec0.ap#AP32' 'imec0.ap#AP33' 'imec0.ap#AP34' 'imec0.ap#AP35' + 'imec0.ap#AP36' 'imec0.ap#AP37' 'imec0.ap#AP38' 'imec0.ap#AP39' + 'imec0.ap#AP40' 'imec0.ap#AP41' 'imec0.ap#AP42' 'imec0.ap#AP43' + 'imec0.ap#AP44' 'imec0.ap#AP45' 'imec0.ap#AP46' 'imec0.ap#AP47' + 'imec0.ap#AP48' 'imec0.ap#AP49' 'imec0.ap#AP50' 'imec0.ap#AP51' + 'imec0.ap#AP52' 'imec0.ap#AP53' 'imec0.ap#AP54' 'imec0.ap#AP55' + 'imec0.ap#AP56' 'imec0.ap#AP57' 'imec0.ap#AP58' 'imec0.ap#AP59' + 'imec0.ap#AP60' 'imec0.ap#AP61' 'imec0.ap#AP62' 'imec0.ap#AP63' + 'imec0.ap#AP64' 'imec0.ap#AP65' 'imec0.ap#AP66' 'imec0.ap#AP67' + 'imec0.ap#AP68' 'imec0.ap#AP69' 'imec0.ap#AP70' 'imec0.ap#AP71' + 'imec0.ap#AP72' 'imec0.ap#AP73' 'imec0.ap#AP74' 'imec0.ap#AP75' + 'imec0.ap#AP76' 'imec0.ap#AP77' 'imec0.ap#AP78' 'imec0.ap#AP79' + 'imec0.ap#AP80' 'imec0.ap#AP81' 'imec0.ap#AP82' 'imec0.ap#AP83' + 'imec0.ap#AP84' 'imec0.ap#AP85' 'imec0.ap#AP86' 'imec0.ap#AP87' + 'imec0.ap#AP88' 'imec0.ap#AP89' 'imec0.ap#AP90' 'imec0.ap#AP91' + 'imec0.ap#AP92' 'imec0.ap#AP93' 'imec0.ap#AP94' 'imec0.ap#AP95' + 'imec0.ap#AP96' 'imec0.ap#AP97' 'imec0.ap#AP98' 'imec0.ap#AP99' + 'imec0.ap#AP100' 'imec0.ap#AP101' 'imec0.ap#AP102' 'imec0.ap#AP103' + 'imec0.ap#AP104' 'imec0.ap#AP105' 'imec0.ap#AP106' 'imec0.ap#AP107' + 'imec0.ap#AP108' 'imec0.ap#AP109' 'imec0.ap#AP110' 'imec0.ap#AP111' + 'imec0.ap#AP112' 'imec0.ap#AP113' 'imec0.ap#AP114' 'imec0.ap#AP115' + 'imec0.ap#AP116' 'imec0.ap#AP117' 'imec0.ap#AP118' 'imec0.ap#AP119' + 'imec0.ap#AP120' 'imec0.ap#AP121' 'imec0.ap#AP122' 'imec0.ap#AP123' + 'imec0.ap#AP124' 'imec0.ap#AP125' 'imec0.ap#AP126' 'imec0.ap#AP127' + 'imec0.ap#AP128' 'imec0.ap#AP129' 'imec0.ap#AP130' 'imec0.ap#AP131' + 'imec0.ap#AP132' 'imec0.ap#AP133' 'imec0.ap#AP134' 'imec0.ap#AP135' + 'imec0.ap#AP136' 'imec0.ap#AP137' 'imec0.ap#AP138' 'imec0.ap#AP139' + 'imec0.ap#AP140' 'imec0.ap#AP141' 'imec0.ap#AP142' 'imec0.ap#AP143' + 'imec0.ap#AP144' 'imec0.ap#AP145' 'imec0.ap#AP146' 'imec0.ap#AP147' + 'imec0.ap#AP148' 'imec0.ap#AP149' 'imec0.ap#AP150' 'imec0.ap#AP151' + 'imec0.ap#AP152' 'imec0.ap#AP153' 'imec0.ap#AP154' 'imec0.ap#AP155' + 'imec0.ap#AP156' 'imec0.ap#AP157' 'imec0.ap#AP158' 'imec0.ap#AP159' + 'imec0.ap#AP160' 'imec0.ap#AP161' 'imec0.ap#AP162' 'imec0.ap#AP163' + 'imec0.ap#AP164' 'imec0.ap#AP165' 'imec0.ap#AP166' 'imec0.ap#AP167' + 'imec0.ap#AP168' 'imec0.ap#AP169' 'imec0.ap#AP170' 'imec0.ap#AP171' + 'imec0.ap#AP172' 'imec0.ap#AP173' 'imec0.ap#AP174' 'imec0.ap#AP175' + 'imec0.ap#AP176' 'imec0.ap#AP177' 'imec0.ap#AP178' 'imec0.ap#AP179' + 'imec0.ap#AP180' 'imec0.ap#AP181' 'imec0.ap#AP182' 'imec0.ap#AP183' + 'imec0.ap#AP184' 'imec0.ap#AP185' 'imec0.ap#AP186' 'imec0.ap#AP187' + 'imec0.ap#AP188' 'imec0.ap#AP189' 'imec0.ap#AP190' 'imec0.ap#AP191' + 'imec0.ap#AP192' 'imec0.ap#AP193' 'imec0.ap#AP194' 'imec0.ap#AP195' + 'imec0.ap#AP196' 'imec0.ap#AP197' 'imec0.ap#AP198' 'imec0.ap#AP199' + 'imec0.ap#AP200' 'imec0.ap#AP201' 'imec0.ap#AP202' 'imec0.ap#AP203' + 'imec0.ap#AP204' 'imec0.ap#AP205' 'imec0.ap#AP206' 'imec0.ap#AP207' + 'imec0.ap#AP208' 'imec0.ap#AP209' 'imec0.ap#AP210' 'imec0.ap#AP211' + 'imec0.ap#AP212' 'imec0.ap#AP213' 'imec0.ap#AP214' 'imec0.ap#AP215' + 'imec0.ap#AP216' 'imec0.ap#AP217' 'imec0.ap#AP218' 'imec0.ap#AP219' + 'imec0.ap#AP220' 'imec0.ap#AP221' 'imec0.ap#AP222' 'imec0.ap#AP223' + 'imec0.ap#AP224' 'imec0.ap#AP225' 'imec0.ap#AP226' 'imec0.ap#AP227' + 'imec0.ap#AP228' 'imec0.ap#AP229' 'imec0.ap#AP230' 'imec0.ap#AP231' + 'imec0.ap#AP232' 'imec0.ap#AP233' 'imec0.ap#AP234' 'imec0.ap#AP235' + 'imec0.ap#AP236' 'imec0.ap#AP237' 'imec0.ap#AP238' 'imec0.ap#AP239' + 'imec0.ap#AP240' 'imec0.ap#AP241' 'imec0.ap#AP242' 'imec0.ap#AP243' + 'imec0.ap#AP244' 'imec0.ap#AP245' 'imec0.ap#AP246' 'imec0.ap#AP247' + 'imec0.ap#AP248' 'imec0.ap#AP249' 'imec0.ap#AP250' 'imec0.ap#AP251' + 'imec0.ap#AP252' 'imec0.ap#AP253' 'imec0.ap#AP254' 'imec0.ap#AP255' + 'imec0.ap#AP256' 'imec0.ap#AP257' 'imec0.ap#AP258' 'imec0.ap#AP259' + 'imec0.ap#AP260' 'imec0.ap#AP261' 'imec0.ap#AP262' 'imec0.ap#AP263' + 'imec0.ap#AP264' 'imec0.ap#AP265' 'imec0.ap#AP266' 'imec0.ap#AP267' + 'imec0.ap#AP268' 'imec0.ap#AP269' 'imec0.ap#AP270' 'imec0.ap#AP271' + 'imec0.ap#AP272' 'imec0.ap#AP273' 'imec0.ap#AP274' 'imec0.ap#AP275' + 'imec0.ap#AP276' 'imec0.ap#AP277' 'imec0.ap#AP278' 'imec0.ap#AP279' + 'imec0.ap#AP280' 'imec0.ap#AP281' 'imec0.ap#AP282' 'imec0.ap#AP283' + 'imec0.ap#AP284' 'imec0.ap#AP285' 'imec0.ap#AP286' 'imec0.ap#AP287' + 'imec0.ap#AP288' 'imec0.ap#AP289' 'imec0.ap#AP290' 'imec0.ap#AP291' + 'imec0.ap#AP292' 'imec0.ap#AP293' 'imec0.ap#AP294' 'imec0.ap#AP295' + 'imec0.ap#AP296' 'imec0.ap#AP297' 'imec0.ap#AP298' 'imec0.ap#AP299' + 'imec0.ap#AP300' 'imec0.ap#AP301' 'imec0.ap#AP302' 'imec0.ap#AP303' + 'imec0.ap#AP304' 'imec0.ap#AP305' 'imec0.ap#AP306' 'imec0.ap#AP307' + 'imec0.ap#AP308' 'imec0.ap#AP309' 'imec0.ap#AP310' 'imec0.ap#AP311' + 'imec0.ap#AP312' 'imec0.ap#AP313' 'imec0.ap#AP314' 'imec0.ap#AP315' + 'imec0.ap#AP316' 'imec0.ap#AP317' 'imec0.ap#AP318' 'imec0.ap#AP319' + 'imec0.ap#AP320' 'imec0.ap#AP321' 'imec0.ap#AP322' 'imec0.ap#AP323' + 'imec0.ap#AP324' 'imec0.ap#AP325' 'imec0.ap#AP326' 'imec0.ap#AP327' + 'imec0.ap#AP328' 'imec0.ap#AP329' 'imec0.ap#AP330' 'imec0.ap#AP331' + 'imec0.ap#AP332' 'imec0.ap#AP333' 'imec0.ap#AP334' 'imec0.ap#AP335' + 'imec0.ap#AP336' 'imec0.ap#AP337' 'imec0.ap#AP338' 'imec0.ap#AP339' + 'imec0.ap#AP340' 'imec0.ap#AP341' 'imec0.ap#AP342' 'imec0.ap#AP343' + 'imec0.ap#AP344' 'imec0.ap#AP345' 'imec0.ap#AP346' 'imec0.ap#AP347' + 'imec0.ap#AP348' 'imec0.ap#AP349' 'imec0.ap#AP350' 'imec0.ap#AP351' + 'imec0.ap#AP352' 'imec0.ap#AP353' 'imec0.ap#AP354' 'imec0.ap#AP355' + 'imec0.ap#AP356' 'imec0.ap#AP357' 'imec0.ap#AP358' 'imec0.ap#AP359' + 'imec0.ap#AP360' 'imec0.ap#AP361' 'imec0.ap#AP362' 'imec0.ap#AP363' + 'imec0.ap#AP364' 'imec0.ap#AP365' 'imec0.ap#AP366' 'imec0.ap#AP367' + 'imec0.ap#AP368' 'imec0.ap#AP369' 'imec0.ap#AP370' 'imec0.ap#AP371' + 'imec0.ap#AP372' 'imec0.ap#AP373' 'imec0.ap#AP374' 'imec0.ap#AP375' + 'imec0.ap#AP376' 'imec0.ap#AP377' 'imec0.ap#AP378' 'imec0.ap#AP379' + 'imec0.ap#AP380' 'imec0.ap#AP381' 'imec0.ap#AP382' 'imec0.ap#AP383']
Annotations
  • is_filtered : True
  • probe_0_planar_contour : [[ -11 9989] + [ -11 -11] + [ 24 -186] + [ 59 -11] + [ 59 9989]]
  • probes_info : [{'model_name': 'Neuropixels 1.0', 'manufacturer': 'IMEC', 'probe_type': '0', 'serial_number': '18408406612', 'part_number': 'PRB_1_4_0480_1_C', 'port': '1', 'slot': '2'}]
Channel Properties
    gain_to_uV [2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375]
    offset_to_uV [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
    channel_names ['AP0' 'AP1' 'AP2' 'AP3' 'AP4' 'AP5' 'AP6' 'AP7' 'AP8' 'AP9' 'AP10' 'AP11' + 'AP12' 'AP13' 'AP14' 'AP15' 'AP16' 'AP17' 'AP18' 'AP19' 'AP20' 'AP21' + 'AP22' 'AP23' 'AP24' 'AP25' 'AP26' 'AP27' 'AP28' 'AP29' 'AP30' 'AP31' + 'AP32' 'AP33' 'AP34' 'AP35' 'AP36' 'AP37' 'AP38' 'AP39' 'AP40' 'AP41' + 'AP42' 'AP43' 'AP44' 'AP45' 'AP46' 'AP47' 'AP48' 'AP49' 'AP50' 'AP51' + 'AP52' 'AP53' 'AP54' 'AP55' 'AP56' 'AP57' 'AP58' 'AP59' 'AP60' 'AP61' + 'AP62' 'AP63' 'AP64' 'AP65' 'AP66' 'AP67' 'AP68' 'AP69' 'AP70' 'AP71' + 'AP72' 'AP73' 'AP74' 'AP75' 'AP76' 'AP77' 'AP78' 'AP79' 'AP80' 'AP81' + 'AP82' 'AP83' 'AP84' 'AP85' 'AP86' 'AP87' 'AP88' 'AP89' 'AP90' 'AP91' + 'AP92' 'AP93' 'AP94' 'AP95' 'AP96' 'AP97' 'AP98' 'AP99' 'AP100' 'AP101' + 'AP102' 'AP103' 'AP104' 'AP105' 'AP106' 'AP107' 'AP108' 'AP109' 'AP110' + 'AP111' 'AP112' 'AP113' 'AP114' 'AP115' 'AP116' 'AP117' 'AP118' 'AP119' + 'AP120' 'AP121' 'AP122' 'AP123' 'AP124' 'AP125' 'AP126' 'AP127' 'AP128' + 'AP129' 'AP130' 'AP131' 'AP132' 'AP133' 'AP134' 'AP135' 'AP136' 'AP137' + 'AP138' 'AP139' 'AP140' 'AP141' 'AP142' 'AP143' 'AP144' 'AP145' 'AP146' + 'AP147' 'AP148' 'AP149' 'AP150' 'AP151' 'AP152' 'AP153' 'AP154' 'AP155' + 'AP156' 'AP157' 'AP158' 'AP159' 'AP160' 'AP161' 'AP162' 'AP163' 'AP164' + 'AP165' 'AP166' 'AP167' 'AP168' 'AP169' 'AP170' 'AP171' 'AP172' 'AP173' + 'AP174' 'AP175' 'AP176' 'AP177' 'AP178' 'AP179' 'AP180' 'AP181' 'AP182' + 'AP183' 'AP184' 'AP185' 'AP186' 'AP187' 'AP188' 'AP189' 'AP190' 'AP191' + 'AP192' 'AP193' 'AP194' 'AP195' 'AP196' 'AP197' 'AP198' 'AP199' 'AP200' + 'AP201' 'AP202' 'AP203' 'AP204' 'AP205' 'AP206' 'AP207' 'AP208' 'AP209' + 'AP210' 'AP211' 'AP212' 'AP213' 'AP214' 'AP215' 'AP216' 'AP217' 'AP218' + 'AP219' 'AP220' 'AP221' 'AP222' 'AP223' 'AP224' 'AP225' 'AP226' 'AP227' + 'AP228' 'AP229' 'AP230' 'AP231' 'AP232' 'AP233' 'AP234' 'AP235' 'AP236' + 'AP237' 'AP238' 'AP239' 'AP240' 'AP241' 'AP242' 'AP243' 'AP244' 'AP245' + 'AP246' 'AP247' 'AP248' 'AP249' 'AP250' 'AP251' 'AP252' 'AP253' 'AP254' + 'AP255' 'AP256' 'AP257' 'AP258' 'AP259' 'AP260' 'AP261' 'AP262' 'AP263' + 'AP264' 'AP265' 'AP266' 'AP267' 'AP268' 'AP269' 'AP270' 'AP271' 'AP272' + 'AP273' 'AP274' 'AP275' 'AP276' 'AP277' 'AP278' 'AP279' 'AP280' 'AP281' + 'AP282' 'AP283' 'AP284' 'AP285' 'AP286' 'AP287' 'AP288' 'AP289' 'AP290' + 'AP291' 'AP292' 'AP293' 'AP294' 'AP295' 'AP296' 'AP297' 'AP298' 'AP299' + 'AP300' 'AP301' 'AP302' 'AP303' 'AP304' 'AP305' 'AP306' 'AP307' 'AP308' + 'AP309' 'AP310' 'AP311' 'AP312' 'AP313' 'AP314' 'AP315' 'AP316' 'AP317' + 'AP318' 'AP319' 'AP320' 'AP321' 'AP322' 'AP323' 'AP324' 'AP325' 'AP326' + 'AP327' 'AP328' 'AP329' 'AP330' 'AP331' 'AP332' 'AP333' 'AP334' 'AP335' + 'AP336' 'AP337' 'AP338' 'AP339' 'AP340' 'AP341' 'AP342' 'AP343' 'AP344' + 'AP345' 'AP346' 'AP347' 'AP348' 'AP349' 'AP350' 'AP351' 'AP352' 'AP353' + 'AP354' 'AP355' 'AP356' 'AP357' 'AP358' 'AP359' 'AP360' 'AP361' 'AP362' + 'AP363' 'AP364' 'AP365' 'AP366' 'AP367' 'AP368' 'AP369' 'AP370' 'AP371' + 'AP372' 'AP373' 'AP374' 'AP375' 'AP376' 'AP377' 'AP378' 'AP379' 'AP380' + 'AP381' 'AP382' 'AP383']
    contact_vector [(0, 16., 0., 'square', 12., '', 'e0', 0, 'um', 1., 0., 0., 1., 0, 0, 0, 500, 250, 1) + (0, 48., 0., 'square', 12., '', 'e1', 1, 'um', 1., 0., 0., 1., 1, 0, 0, 500, 250, 1) + (0, 0., 20., 'square', 12., '', 'e2', 2, 'um', 1., 0., 0., 1., 2, 0, 0, 500, 250, 1) + (0, 32., 20., 'square', 12., '', 'e3', 3, 'um', 1., 0., 0., 1., 3, 0, 0, 500, 250, 1) + (0, 16., 40., 'square', 12., '', 'e4', 4, 'um', 1., 0., 0., 1., 4, 0, 0, 500, 250, 1) + (0, 48., 40., 'square', 12., '', 'e5', 5, 'um', 1., 0., 0., 1., 5, 0, 0, 500, 250, 1) + (0, 0., 60., 'square', 12., '', 'e6', 6, 'um', 1., 0., 0., 1., 6, 0, 0, 500, 250, 1) + (0, 32., 60., 'square', 12., '', 'e7', 7, 'um', 1., 0., 0., 1., 7, 0, 0, 500, 250, 1) + (0, 16., 80., 'square', 12., '', 'e8', 8, 'um', 1., 0., 0., 1., 8, 0, 0, 500, 250, 1) + (0, 48., 80., 'square', 12., '', 'e9', 9, 'um', 1., 0., 0., 1., 9, 0, 0, 500, 250, 1) + (0, 0., 100., 'square', 12., '', 'e10', 10, 'um', 1., 0., 0., 1., 10, 0, 0, 500, 250, 1) + (0, 32., 100., 'square', 12., '', 'e11', 11, 'um', 1., 0., 0., 1., 11, 0, 0, 500, 250, 1) + (0, 16., 120., 'square', 12., '', 'e12', 12, 'um', 1., 0., 0., 1., 12, 0, 0, 500, 250, 1) + (0, 48., 120., 'square', 12., '', 'e13', 13, 'um', 1., 0., 0., 1., 13, 0, 0, 500, 250, 1) + (0, 0., 140., 'square', 12., '', 'e14', 14, 'um', 1., 0., 0., 1., 14, 0, 0, 500, 250, 1) + (0, 32., 140., 'square', 12., '', 'e15', 15, 'um', 1., 0., 0., 1., 15, 0, 0, 500, 250, 1) + (0, 16., 160., 'square', 12., '', 'e16', 16, 'um', 1., 0., 0., 1., 16, 0, 0, 500, 250, 1) + (0, 48., 160., 'square', 12., '', 'e17', 17, 'um', 1., 0., 0., 1., 17, 0, 0, 500, 250, 1) + (0, 0., 180., 'square', 12., '', 'e18', 18, 'um', 1., 0., 0., 1., 18, 0, 0, 500, 250, 1) + (0, 32., 180., 'square', 12., '', 'e19', 19, 'um', 1., 0., 0., 1., 19, 0, 0, 500, 250, 1) + (0, 16., 200., 'square', 12., '', 'e20', 20, 'um', 1., 0., 0., 1., 20, 0, 0, 500, 250, 1) + (0, 48., 200., 'square', 12., '', 'e21', 21, 'um', 1., 0., 0., 1., 21, 0, 0, 500, 250, 1) + (0, 0., 220., 'square', 12., '', 'e22', 22, 'um', 1., 0., 0., 1., 22, 0, 0, 500, 250, 1) + (0, 32., 220., 'square', 12., '', 'e23', 23, 'um', 1., 0., 0., 1., 23, 0, 0, 500, 250, 1) + (0, 16., 240., 'square', 12., '', 'e24', 24, 'um', 1., 0., 0., 1., 24, 0, 0, 500, 250, 1) + (0, 48., 240., 'square', 12., '', 'e25', 25, 'um', 1., 0., 0., 1., 25, 0, 0, 500, 250, 1) + (0, 0., 260., 'square', 12., '', 'e26', 26, 'um', 1., 0., 0., 1., 26, 0, 0, 500, 250, 1) + (0, 32., 260., 'square', 12., '', 'e27', 27, 'um', 1., 0., 0., 1., 27, 0, 0, 500, 250, 1) + (0, 16., 280., 'square', 12., '', 'e28', 28, 'um', 1., 0., 0., 1., 28, 0, 0, 500, 250, 1) + (0, 48., 280., 'square', 12., '', 'e29', 29, 'um', 1., 0., 0., 1., 29, 0, 0, 500, 250, 1) + (0, 0., 300., 'square', 12., '', 'e30', 30, 'um', 1., 0., 0., 1., 30, 0, 0, 500, 250, 1) + (0, 32., 300., 'square', 12., '', 'e31', 31, 'um', 1., 0., 0., 1., 31, 0, 0, 500, 250, 1) + (0, 16., 320., 'square', 12., '', 'e32', 32, 'um', 1., 0., 0., 1., 32, 0, 0, 500, 250, 1) + (0, 48., 320., 'square', 12., '', 'e33', 33, 'um', 1., 0., 0., 1., 33, 0, 0, 500, 250, 1) + (0, 0., 340., 'square', 12., '', 'e34', 34, 'um', 1., 0., 0., 1., 34, 0, 0, 500, 250, 1) + (0, 32., 340., 'square', 12., '', 'e35', 35, 'um', 1., 0., 0., 1., 35, 0, 0, 500, 250, 1) + (0, 16., 360., 'square', 12., '', 'e36', 36, 'um', 1., 0., 0., 1., 36, 0, 0, 500, 250, 1) + (0, 48., 360., 'square', 12., '', 'e37', 37, 'um', 1., 0., 0., 1., 37, 0, 0, 500, 250, 1) + (0, 0., 380., 'square', 12., '', 'e38', 38, 'um', 1., 0., 0., 1., 38, 0, 0, 500, 250, 1) + (0, 32., 380., 'square', 12., '', 'e39', 39, 'um', 1., 0., 0., 1., 39, 0, 0, 500, 250, 1) + (0, 16., 400., 'square', 12., '', 'e40', 40, 'um', 1., 0., 0., 1., 40, 0, 0, 500, 250, 1) + (0, 48., 400., 'square', 12., '', 'e41', 41, 'um', 1., 0., 0., 1., 41, 0, 0, 500, 250, 1) + (0, 0., 420., 'square', 12., '', 'e42', 42, 'um', 1., 0., 0., 1., 42, 0, 0, 500, 250, 1) + (0, 32., 420., 'square', 12., '', 'e43', 43, 'um', 1., 0., 0., 1., 43, 0, 0, 500, 250, 1) + (0, 16., 440., 'square', 12., '', 'e44', 44, 'um', 1., 0., 0., 1., 44, 0, 0, 500, 250, 1) + (0, 48., 440., 'square', 12., '', 'e45', 45, 'um', 1., 0., 0., 1., 45, 0, 0, 500, 250, 1) + (0, 0., 460., 'square', 12., '', 'e46', 46, 'um', 1., 0., 0., 1., 46, 0, 0, 500, 250, 1) + (0, 32., 460., 'square', 12., '', 'e47', 47, 'um', 1., 0., 0., 1., 47, 0, 0, 500, 250, 1) + (0, 16., 480., 'square', 12., '', 'e48', 48, 'um', 1., 0., 0., 1., 48, 0, 0, 500, 250, 1) + (0, 48., 480., 'square', 12., '', 'e49', 49, 'um', 1., 0., 0., 1., 49, 0, 0, 500, 250, 1) + (0, 0., 500., 'square', 12., '', 'e50', 50, 'um', 1., 0., 0., 1., 50, 0, 0, 500, 250, 1) + (0, 32., 500., 'square', 12., '', 'e51', 51, 'um', 1., 0., 0., 1., 51, 0, 0, 500, 250, 1) + (0, 16., 520., 'square', 12., '', 'e52', 52, 'um', 1., 0., 0., 1., 52, 0, 0, 500, 250, 1) + (0, 48., 520., 'square', 12., '', 'e53', 53, 'um', 1., 0., 0., 1., 53, 0, 0, 500, 250, 1) + (0, 0., 540., 'square', 12., '', 'e54', 54, 'um', 1., 0., 0., 1., 54, 0, 0, 500, 250, 1) + (0, 32., 540., 'square', 12., '', 'e55', 55, 'um', 1., 0., 0., 1., 55, 0, 0, 500, 250, 1) + (0, 16., 560., 'square', 12., '', 'e56', 56, 'um', 1., 0., 0., 1., 56, 0, 0, 500, 250, 1) + (0, 48., 560., 'square', 12., '', 'e57', 57, 'um', 1., 0., 0., 1., 57, 0, 0, 500, 250, 1) + (0, 0., 580., 'square', 12., '', 'e58', 58, 'um', 1., 0., 0., 1., 58, 0, 0, 500, 250, 1) + (0, 32., 580., 'square', 12., '', 'e59', 59, 'um', 1., 0., 0., 1., 59, 0, 0, 500, 250, 1) + (0, 16., 600., 'square', 12., '', 'e60', 60, 'um', 1., 0., 0., 1., 60, 0, 0, 500, 250, 1) + (0, 48., 600., 'square', 12., '', 'e61', 61, 'um', 1., 0., 0., 1., 61, 0, 0, 500, 250, 1) + (0, 0., 620., 'square', 12., '', 'e62', 62, 'um', 1., 0., 0., 1., 62, 0, 0, 500, 250, 1) + (0, 32., 620., 'square', 12., '', 'e63', 63, 'um', 1., 0., 0., 1., 63, 0, 0, 500, 250, 1) + (0, 16., 640., 'square', 12., '', 'e64', 64, 'um', 1., 0., 0., 1., 64, 0, 0, 500, 250, 1) + (0, 48., 640., 'square', 12., '', 'e65', 65, 'um', 1., 0., 0., 1., 65, 0, 0, 500, 250, 1) + (0, 0., 660., 'square', 12., '', 'e66', 66, 'um', 1., 0., 0., 1., 66, 0, 0, 500, 250, 1) + (0, 32., 660., 'square', 12., '', 'e67', 67, 'um', 1., 0., 0., 1., 67, 0, 0, 500, 250, 1) + (0, 16., 680., 'square', 12., '', 'e68', 68, 'um', 1., 0., 0., 1., 68, 0, 0, 500, 250, 1) + (0, 48., 680., 'square', 12., '', 'e69', 69, 'um', 1., 0., 0., 1., 69, 0, 0, 500, 250, 1) + (0, 0., 700., 'square', 12., '', 'e70', 70, 'um', 1., 0., 0., 1., 70, 0, 0, 500, 250, 1) + (0, 32., 700., 'square', 12., '', 'e71', 71, 'um', 1., 0., 0., 1., 71, 0, 0, 500, 250, 1) + (0, 16., 720., 'square', 12., '', 'e72', 72, 'um', 1., 0., 0., 1., 72, 0, 0, 500, 250, 1) + (0, 48., 720., 'square', 12., '', 'e73', 73, 'um', 1., 0., 0., 1., 73, 0, 0, 500, 250, 1) + (0, 0., 740., 'square', 12., '', 'e74', 74, 'um', 1., 0., 0., 1., 74, 0, 0, 500, 250, 1) + (0, 32., 740., 'square', 12., '', 'e75', 75, 'um', 1., 0., 0., 1., 75, 0, 0, 500, 250, 1) + (0, 16., 760., 'square', 12., '', 'e76', 76, 'um', 1., 0., 0., 1., 76, 0, 0, 500, 250, 1) + (0, 48., 760., 'square', 12., '', 'e77', 77, 'um', 1., 0., 0., 1., 77, 0, 0, 500, 250, 1) + (0, 0., 780., 'square', 12., '', 'e78', 78, 'um', 1., 0., 0., 1., 78, 0, 0, 500, 250, 1) + (0, 32., 780., 'square', 12., '', 'e79', 79, 'um', 1., 0., 0., 1., 79, 0, 0, 500, 250, 1) + (0, 16., 800., 'square', 12., '', 'e80', 80, 'um', 1., 0., 0., 1., 80, 0, 0, 500, 250, 1) + (0, 48., 800., 'square', 12., '', 'e81', 81, 'um', 1., 0., 0., 1., 81, 0, 0, 500, 250, 1) + (0, 0., 820., 'square', 12., '', 'e82', 82, 'um', 1., 0., 0., 1., 82, 0, 0, 500, 250, 1) + (0, 32., 820., 'square', 12., '', 'e83', 83, 'um', 1., 0., 0., 1., 83, 0, 0, 500, 250, 1) + (0, 16., 840., 'square', 12., '', 'e84', 84, 'um', 1., 0., 0., 1., 84, 0, 0, 500, 250, 1) + (0, 48., 840., 'square', 12., '', 'e85', 85, 'um', 1., 0., 0., 1., 85, 0, 0, 500, 250, 1) + (0, 0., 860., 'square', 12., '', 'e86', 86, 'um', 1., 0., 0., 1., 86, 0, 0, 500, 250, 1) + (0, 32., 860., 'square', 12., '', 'e87', 87, 'um', 1., 0., 0., 1., 87, 0, 0, 500, 250, 1) + (0, 16., 880., 'square', 12., '', 'e88', 88, 'um', 1., 0., 0., 1., 88, 0, 0, 500, 250, 1) + (0, 48., 880., 'square', 12., '', 'e89', 89, 'um', 1., 0., 0., 1., 89, 0, 0, 500, 250, 1) + (0, 0., 900., 'square', 12., '', 'e90', 90, 'um', 1., 0., 0., 1., 90, 0, 0, 500, 250, 1) + (0, 32., 900., 'square', 12., '', 'e91', 91, 'um', 1., 0., 0., 1., 91, 0, 0, 500, 250, 1) + (0, 16., 920., 'square', 12., '', 'e92', 92, 'um', 1., 0., 0., 1., 92, 0, 0, 500, 250, 1) + (0, 48., 920., 'square', 12., '', 'e93', 93, 'um', 1., 0., 0., 1., 93, 0, 0, 500, 250, 1) + (0, 0., 940., 'square', 12., '', 'e94', 94, 'um', 1., 0., 0., 1., 94, 0, 0, 500, 250, 1) + (0, 32., 940., 'square', 12., '', 'e95', 95, 'um', 1., 0., 0., 1., 95, 0, 0, 500, 250, 1) + (0, 16., 960., 'square', 12., '', 'e96', 96, 'um', 1., 0., 0., 1., 96, 0, 0, 500, 250, 1) + (0, 48., 960., 'square', 12., '', 'e97', 97, 'um', 1., 0., 0., 1., 97, 0, 0, 500, 250, 1) + (0, 0., 980., 'square', 12., '', 'e98', 98, 'um', 1., 0., 0., 1., 98, 0, 0, 500, 250, 1) + (0, 32., 980., 'square', 12., '', 'e99', 99, 'um', 1., 0., 0., 1., 99, 0, 0, 500, 250, 1) + (0, 16., 1000., 'square', 12., '', 'e100', 100, 'um', 1., 0., 0., 1., 100, 0, 0, 500, 250, 1) + (0, 48., 1000., 'square', 12., '', 'e101', 101, 'um', 1., 0., 0., 1., 101, 0, 0, 500, 250, 1) + (0, 0., 1020., 'square', 12., '', 'e102', 102, 'um', 1., 0., 0., 1., 102, 0, 0, 500, 250, 1) + (0, 32., 1020., 'square', 12., '', 'e103', 103, 'um', 1., 0., 0., 1., 103, 0, 0, 500, 250, 1) + (0, 16., 1040., 'square', 12., '', 'e104', 104, 'um', 1., 0., 0., 1., 104, 0, 0, 500, 250, 1) + (0, 48., 1040., 'square', 12., '', 'e105', 105, 'um', 1., 0., 0., 1., 105, 0, 0, 500, 250, 1) + (0, 0., 1060., 'square', 12., '', 'e106', 106, 'um', 1., 0., 0., 1., 106, 0, 0, 500, 250, 1) + (0, 32., 1060., 'square', 12., '', 'e107', 107, 'um', 1., 0., 0., 1., 107, 0, 0, 500, 250, 1) + (0, 16., 1080., 'square', 12., '', 'e108', 108, 'um', 1., 0., 0., 1., 108, 0, 0, 500, 250, 1) + (0, 48., 1080., 'square', 12., '', 'e109', 109, 'um', 1., 0., 0., 1., 109, 0, 0, 500, 250, 1) + (0, 0., 1100., 'square', 12., '', 'e110', 110, 'um', 1., 0., 0., 1., 110, 0, 0, 500, 250, 1) + (0, 32., 1100., 'square', 12., '', 'e111', 111, 'um', 1., 0., 0., 1., 111, 0, 0, 500, 250, 1) + (0, 16., 1120., 'square', 12., '', 'e112', 112, 'um', 1., 0., 0., 1., 112, 0, 0, 500, 250, 1) + (0, 48., 1120., 'square', 12., '', 'e113', 113, 'um', 1., 0., 0., 1., 113, 0, 0, 500, 250, 1) + (0, 0., 1140., 'square', 12., '', 'e114', 114, 'um', 1., 0., 0., 1., 114, 0, 0, 500, 250, 1) + (0, 32., 1140., 'square', 12., '', 'e115', 115, 'um', 1., 0., 0., 1., 115, 0, 0, 500, 250, 1) + (0, 16., 1160., 'square', 12., '', 'e116', 116, 'um', 1., 0., 0., 1., 116, 0, 0, 500, 250, 1) + (0, 48., 1160., 'square', 12., '', 'e117', 117, 'um', 1., 0., 0., 1., 117, 0, 0, 500, 250, 1) + (0, 0., 1180., 'square', 12., '', 'e118', 118, 'um', 1., 0., 0., 1., 118, 0, 0, 500, 250, 1) + (0, 32., 1180., 'square', 12., '', 'e119', 119, 'um', 1., 0., 0., 1., 119, 0, 0, 500, 250, 1) + (0, 16., 1200., 'square', 12., '', 'e120', 120, 'um', 1., 0., 0., 1., 120, 0, 0, 500, 250, 1) + (0, 48., 1200., 'square', 12., '', 'e121', 121, 'um', 1., 0., 0., 1., 121, 0, 0, 500, 250, 1) + (0, 0., 1220., 'square', 12., '', 'e122', 122, 'um', 1., 0., 0., 1., 122, 0, 0, 500, 250, 1) + (0, 32., 1220., 'square', 12., '', 'e123', 123, 'um', 1., 0., 0., 1., 123, 0, 0, 500, 250, 1) + (0, 16., 1240., 'square', 12., '', 'e124', 124, 'um', 1., 0., 0., 1., 124, 0, 0, 500, 250, 1) + (0, 48., 1240., 'square', 12., '', 'e125', 125, 'um', 1., 0., 0., 1., 125, 0, 0, 500, 250, 1) + (0, 0., 1260., 'square', 12., '', 'e126', 126, 'um', 1., 0., 0., 1., 126, 0, 0, 500, 250, 1) + (0, 32., 1260., 'square', 12., '', 'e127', 127, 'um', 1., 0., 0., 1., 127, 0, 0, 500, 250, 1) + (0, 16., 1280., 'square', 12., '', 'e128', 128, 'um', 1., 0., 0., 1., 128, 0, 0, 500, 250, 1) + (0, 48., 1280., 'square', 12., '', 'e129', 129, 'um', 1., 0., 0., 1., 129, 0, 0, 500, 250, 1) + (0, 0., 1300., 'square', 12., '', 'e130', 130, 'um', 1., 0., 0., 1., 130, 0, 0, 500, 250, 1) + (0, 32., 1300., 'square', 12., '', 'e131', 131, 'um', 1., 0., 0., 1., 131, 0, 0, 500, 250, 1) + (0, 16., 1320., 'square', 12., '', 'e132', 132, 'um', 1., 0., 0., 1., 132, 0, 0, 500, 250, 1) + (0, 48., 1320., 'square', 12., '', 'e133', 133, 'um', 1., 0., 0., 1., 133, 0, 0, 500, 250, 1) + (0, 0., 1340., 'square', 12., '', 'e134', 134, 'um', 1., 0., 0., 1., 134, 0, 0, 500, 250, 1) + (0, 32., 1340., 'square', 12., '', 'e135', 135, 'um', 1., 0., 0., 1., 135, 0, 0, 500, 250, 1) + (0, 16., 1360., 'square', 12., '', 'e136', 136, 'um', 1., 0., 0., 1., 136, 0, 0, 500, 250, 1) + (0, 48., 1360., 'square', 12., '', 'e137', 137, 'um', 1., 0., 0., 1., 137, 0, 0, 500, 250, 1) + (0, 0., 1380., 'square', 12., '', 'e138', 138, 'um', 1., 0., 0., 1., 138, 0, 0, 500, 250, 1) + (0, 32., 1380., 'square', 12., '', 'e139', 139, 'um', 1., 0., 0., 1., 139, 0, 0, 500, 250, 1) + (0, 16., 1400., 'square', 12., '', 'e140', 140, 'um', 1., 0., 0., 1., 140, 0, 0, 500, 250, 1) + (0, 48., 1400., 'square', 12., '', 'e141', 141, 'um', 1., 0., 0., 1., 141, 0, 0, 500, 250, 1) + (0, 0., 1420., 'square', 12., '', 'e142', 142, 'um', 1., 0., 0., 1., 142, 0, 0, 500, 250, 1) + (0, 32., 1420., 'square', 12., '', 'e143', 143, 'um', 1., 0., 0., 1., 143, 0, 0, 500, 250, 1) + (0, 16., 1440., 'square', 12., '', 'e144', 144, 'um', 1., 0., 0., 1., 144, 0, 0, 500, 250, 1) + (0, 48., 1440., 'square', 12., '', 'e145', 145, 'um', 1., 0., 0., 1., 145, 0, 0, 500, 250, 1) + (0, 0., 1460., 'square', 12., '', 'e146', 146, 'um', 1., 0., 0., 1., 146, 0, 0, 500, 250, 1) + (0, 32., 1460., 'square', 12., '', 'e147', 147, 'um', 1., 0., 0., 1., 147, 0, 0, 500, 250, 1) + (0, 16., 1480., 'square', 12., '', 'e148', 148, 'um', 1., 0., 0., 1., 148, 0, 0, 500, 250, 1) + (0, 48., 1480., 'square', 12., '', 'e149', 149, 'um', 1., 0., 0., 1., 149, 0, 0, 500, 250, 1) + (0, 0., 1500., 'square', 12., '', 'e150', 150, 'um', 1., 0., 0., 1., 150, 0, 0, 500, 250, 1) + (0, 32., 1500., 'square', 12., '', 'e151', 151, 'um', 1., 0., 0., 1., 151, 0, 0, 500, 250, 1) + (0, 16., 1520., 'square', 12., '', 'e152', 152, 'um', 1., 0., 0., 1., 152, 0, 0, 500, 250, 1) + (0, 48., 1520., 'square', 12., '', 'e153', 153, 'um', 1., 0., 0., 1., 153, 0, 0, 500, 250, 1) + (0, 0., 1540., 'square', 12., '', 'e154', 154, 'um', 1., 0., 0., 1., 154, 0, 0, 500, 250, 1) + (0, 32., 1540., 'square', 12., '', 'e155', 155, 'um', 1., 0., 0., 1., 155, 0, 0, 500, 250, 1) + (0, 16., 1560., 'square', 12., '', 'e156', 156, 'um', 1., 0., 0., 1., 156, 0, 0, 500, 250, 1) + (0, 48., 1560., 'square', 12., '', 'e157', 157, 'um', 1., 0., 0., 1., 157, 0, 0, 500, 250, 1) + (0, 0., 1580., 'square', 12., '', 'e158', 158, 'um', 1., 0., 0., 1., 158, 0, 0, 500, 250, 1) + (0, 32., 1580., 'square', 12., '', 'e159', 159, 'um', 1., 0., 0., 1., 159, 0, 0, 500, 250, 1) + (0, 16., 1600., 'square', 12., '', 'e160', 160, 'um', 1., 0., 0., 1., 160, 0, 0, 500, 250, 1) + (0, 48., 1600., 'square', 12., '', 'e161', 161, 'um', 1., 0., 0., 1., 161, 0, 0, 500, 250, 1) + (0, 0., 1620., 'square', 12., '', 'e162', 162, 'um', 1., 0., 0., 1., 162, 0, 0, 500, 250, 1) + (0, 32., 1620., 'square', 12., '', 'e163', 163, 'um', 1., 0., 0., 1., 163, 0, 0, 500, 250, 1) + (0, 16., 1640., 'square', 12., '', 'e164', 164, 'um', 1., 0., 0., 1., 164, 0, 0, 500, 250, 1) + (0, 48., 1640., 'square', 12., '', 'e165', 165, 'um', 1., 0., 0., 1., 165, 0, 0, 500, 250, 1) + (0, 0., 1660., 'square', 12., '', 'e166', 166, 'um', 1., 0., 0., 1., 166, 0, 0, 500, 250, 1) + (0, 32., 1660., 'square', 12., '', 'e167', 167, 'um', 1., 0., 0., 1., 167, 0, 0, 500, 250, 1) + (0, 16., 1680., 'square', 12., '', 'e168', 168, 'um', 1., 0., 0., 1., 168, 0, 0, 500, 250, 1) + (0, 48., 1680., 'square', 12., '', 'e169', 169, 'um', 1., 0., 0., 1., 169, 0, 0, 500, 250, 1) + (0, 0., 1700., 'square', 12., '', 'e170', 170, 'um', 1., 0., 0., 1., 170, 0, 0, 500, 250, 1) + (0, 32., 1700., 'square', 12., '', 'e171', 171, 'um', 1., 0., 0., 1., 171, 0, 0, 500, 250, 1) + (0, 16., 1720., 'square', 12., '', 'e172', 172, 'um', 1., 0., 0., 1., 172, 0, 0, 500, 250, 1) + (0, 48., 1720., 'square', 12., '', 'e173', 173, 'um', 1., 0., 0., 1., 173, 0, 0, 500, 250, 1) + (0, 0., 1740., 'square', 12., '', 'e174', 174, 'um', 1., 0., 0., 1., 174, 0, 0, 500, 250, 1) + (0, 32., 1740., 'square', 12., '', 'e175', 175, 'um', 1., 0., 0., 1., 175, 0, 0, 500, 250, 1) + (0, 16., 1760., 'square', 12., '', 'e176', 176, 'um', 1., 0., 0., 1., 176, 0, 0, 500, 250, 1) + (0, 48., 1760., 'square', 12., '', 'e177', 177, 'um', 1., 0., 0., 1., 177, 0, 0, 500, 250, 1) + (0, 0., 1780., 'square', 12., '', 'e178', 178, 'um', 1., 0., 0., 1., 178, 0, 0, 500, 250, 1) + (0, 32., 1780., 'square', 12., '', 'e179', 179, 'um', 1., 0., 0., 1., 179, 0, 0, 500, 250, 1) + (0, 16., 1800., 'square', 12., '', 'e180', 180, 'um', 1., 0., 0., 1., 180, 0, 0, 500, 250, 1) + (0, 48., 1800., 'square', 12., '', 'e181', 181, 'um', 1., 0., 0., 1., 181, 0, 0, 500, 250, 1) + (0, 0., 1820., 'square', 12., '', 'e182', 182, 'um', 1., 0., 0., 1., 182, 0, 0, 500, 250, 1) + (0, 32., 1820., 'square', 12., '', 'e183', 183, 'um', 1., 0., 0., 1., 183, 0, 0, 500, 250, 1) + (0, 16., 1840., 'square', 12., '', 'e184', 184, 'um', 1., 0., 0., 1., 184, 0, 0, 500, 250, 1) + (0, 48., 1840., 'square', 12., '', 'e185', 185, 'um', 1., 0., 0., 1., 185, 0, 0, 500, 250, 1) + (0, 0., 1860., 'square', 12., '', 'e186', 186, 'um', 1., 0., 0., 1., 186, 0, 0, 500, 250, 1) + (0, 32., 1860., 'square', 12., '', 'e187', 187, 'um', 1., 0., 0., 1., 187, 0, 0, 500, 250, 1) + (0, 16., 1880., 'square', 12., '', 'e188', 188, 'um', 1., 0., 0., 1., 188, 0, 0, 500, 250, 1) + (0, 48., 1880., 'square', 12., '', 'e189', 189, 'um', 1., 0., 0., 1., 189, 0, 0, 500, 250, 1) + (0, 0., 1900., 'square', 12., '', 'e190', 190, 'um', 1., 0., 0., 1., 190, 0, 0, 500, 250, 1) + (0, 32., 1900., 'square', 12., '', 'e191', 191, 'um', 1., 0., 0., 1., 191, 0, 0, 500, 250, 1) + (0, 16., 1920., 'square', 12., '', 'e192', 192, 'um', 1., 0., 0., 1., 192, 0, 0, 500, 250, 1) + (0, 48., 1920., 'square', 12., '', 'e193', 193, 'um', 1., 0., 0., 1., 193, 0, 0, 500, 250, 1) + (0, 0., 1940., 'square', 12., '', 'e194', 194, 'um', 1., 0., 0., 1., 194, 0, 0, 500, 250, 1) + (0, 32., 1940., 'square', 12., '', 'e195', 195, 'um', 1., 0., 0., 1., 195, 0, 0, 500, 250, 1) + (0, 16., 1960., 'square', 12., '', 'e196', 196, 'um', 1., 0., 0., 1., 196, 0, 0, 500, 250, 1) + (0, 48., 1960., 'square', 12., '', 'e197', 197, 'um', 1., 0., 0., 1., 197, 0, 0, 500, 250, 1) + (0, 0., 1980., 'square', 12., '', 'e198', 198, 'um', 1., 0., 0., 1., 198, 0, 0, 500, 250, 1) + (0, 32., 1980., 'square', 12., '', 'e199', 199, 'um', 1., 0., 0., 1., 199, 0, 0, 500, 250, 1) + (0, 16., 2000., 'square', 12., '', 'e200', 200, 'um', 1., 0., 0., 1., 200, 0, 0, 500, 250, 1) + (0, 48., 2000., 'square', 12., '', 'e201', 201, 'um', 1., 0., 0., 1., 201, 0, 0, 500, 250, 1) + (0, 0., 2020., 'square', 12., '', 'e202', 202, 'um', 1., 0., 0., 1., 202, 0, 0, 500, 250, 1) + (0, 32., 2020., 'square', 12., '', 'e203', 203, 'um', 1., 0., 0., 1., 203, 0, 0, 500, 250, 1) + (0, 16., 2040., 'square', 12., '', 'e204', 204, 'um', 1., 0., 0., 1., 204, 0, 0, 500, 250, 1) + (0, 48., 2040., 'square', 12., '', 'e205', 205, 'um', 1., 0., 0., 1., 205, 0, 0, 500, 250, 1) + (0, 0., 2060., 'square', 12., '', 'e206', 206, 'um', 1., 0., 0., 1., 206, 0, 0, 500, 250, 1) + (0, 32., 2060., 'square', 12., '', 'e207', 207, 'um', 1., 0., 0., 1., 207, 0, 0, 500, 250, 1) + (0, 16., 2080., 'square', 12., '', 'e208', 208, 'um', 1., 0., 0., 1., 208, 0, 0, 500, 250, 1) + (0, 48., 2080., 'square', 12., '', 'e209', 209, 'um', 1., 0., 0., 1., 209, 0, 0, 500, 250, 1) + (0, 0., 2100., 'square', 12., '', 'e210', 210, 'um', 1., 0., 0., 1., 210, 0, 0, 500, 250, 1) + (0, 32., 2100., 'square', 12., '', 'e211', 211, 'um', 1., 0., 0., 1., 211, 0, 0, 500, 250, 1) + (0, 16., 2120., 'square', 12., '', 'e212', 212, 'um', 1., 0., 0., 1., 212, 0, 0, 500, 250, 1) + (0, 48., 2120., 'square', 12., '', 'e213', 213, 'um', 1., 0., 0., 1., 213, 0, 0, 500, 250, 1) + (0, 0., 2140., 'square', 12., '', 'e214', 214, 'um', 1., 0., 0., 1., 214, 0, 0, 500, 250, 1) + (0, 32., 2140., 'square', 12., '', 'e215', 215, 'um', 1., 0., 0., 1., 215, 0, 0, 500, 250, 1) + (0, 16., 2160., 'square', 12., '', 'e216', 216, 'um', 1., 0., 0., 1., 216, 0, 0, 500, 250, 1) + (0, 48., 2160., 'square', 12., '', 'e217', 217, 'um', 1., 0., 0., 1., 217, 0, 0, 500, 250, 1) + (0, 0., 2180., 'square', 12., '', 'e218', 218, 'um', 1., 0., 0., 1., 218, 0, 0, 500, 250, 1) + (0, 32., 2180., 'square', 12., '', 'e219', 219, 'um', 1., 0., 0., 1., 219, 0, 0, 500, 250, 1) + (0, 16., 2200., 'square', 12., '', 'e220', 220, 'um', 1., 0., 0., 1., 220, 0, 0, 500, 250, 1) + (0, 48., 2200., 'square', 12., '', 'e221', 221, 'um', 1., 0., 0., 1., 221, 0, 0, 500, 250, 1) + (0, 0., 2220., 'square', 12., '', 'e222', 222, 'um', 1., 0., 0., 1., 222, 0, 0, 500, 250, 1) + (0, 32., 2220., 'square', 12., '', 'e223', 223, 'um', 1., 0., 0., 1., 223, 0, 0, 500, 250, 1) + (0, 16., 2240., 'square', 12., '', 'e224', 224, 'um', 1., 0., 0., 1., 224, 0, 0, 500, 250, 1) + (0, 48., 2240., 'square', 12., '', 'e225', 225, 'um', 1., 0., 0., 1., 225, 0, 0, 500, 250, 1) + (0, 0., 2260., 'square', 12., '', 'e226', 226, 'um', 1., 0., 0., 1., 226, 0, 0, 500, 250, 1) + (0, 32., 2260., 'square', 12., '', 'e227', 227, 'um', 1., 0., 0., 1., 227, 0, 0, 500, 250, 1) + (0, 16., 2280., 'square', 12., '', 'e228', 228, 'um', 1., 0., 0., 1., 228, 0, 0, 500, 250, 1) + (0, 48., 2280., 'square', 12., '', 'e229', 229, 'um', 1., 0., 0., 1., 229, 0, 0, 500, 250, 1) + (0, 0., 2300., 'square', 12., '', 'e230', 230, 'um', 1., 0., 0., 1., 230, 0, 0, 500, 250, 1) + (0, 32., 2300., 'square', 12., '', 'e231', 231, 'um', 1., 0., 0., 1., 231, 0, 0, 500, 250, 1) + (0, 16., 2320., 'square', 12., '', 'e232', 232, 'um', 1., 0., 0., 1., 232, 0, 0, 500, 250, 1) + (0, 48., 2320., 'square', 12., '', 'e233', 233, 'um', 1., 0., 0., 1., 233, 0, 0, 500, 250, 1) + (0, 0., 2340., 'square', 12., '', 'e234', 234, 'um', 1., 0., 0., 1., 234, 0, 0, 500, 250, 1) + (0, 32., 2340., 'square', 12., '', 'e235', 235, 'um', 1., 0., 0., 1., 235, 0, 0, 500, 250, 1) + (0, 16., 2360., 'square', 12., '', 'e236', 236, 'um', 1., 0., 0., 1., 236, 0, 0, 500, 250, 1) + (0, 48., 2360., 'square', 12., '', 'e237', 237, 'um', 1., 0., 0., 1., 237, 0, 0, 500, 250, 1) + (0, 0., 2380., 'square', 12., '', 'e238', 238, 'um', 1., 0., 0., 1., 238, 0, 0, 500, 250, 1) + (0, 32., 2380., 'square', 12., '', 'e239', 239, 'um', 1., 0., 0., 1., 239, 0, 0, 500, 250, 1) + (0, 16., 2400., 'square', 12., '', 'e240', 240, 'um', 1., 0., 0., 1., 240, 0, 0, 500, 250, 1) + (0, 48., 2400., 'square', 12., '', 'e241', 241, 'um', 1., 0., 0., 1., 241, 0, 0, 500, 250, 1) + (0, 0., 2420., 'square', 12., '', 'e242', 242, 'um', 1., 0., 0., 1., 242, 0, 0, 500, 250, 1) + (0, 32., 2420., 'square', 12., '', 'e243', 243, 'um', 1., 0., 0., 1., 243, 0, 0, 500, 250, 1) + (0, 16., 2440., 'square', 12., '', 'e244', 244, 'um', 1., 0., 0., 1., 244, 0, 0, 500, 250, 1) + (0, 48., 2440., 'square', 12., '', 'e245', 245, 'um', 1., 0., 0., 1., 245, 0, 0, 500, 250, 1) + (0, 0., 2460., 'square', 12., '', 'e246', 246, 'um', 1., 0., 0., 1., 246, 0, 0, 500, 250, 1) + (0, 32., 2460., 'square', 12., '', 'e247', 247, 'um', 1., 0., 0., 1., 247, 0, 0, 500, 250, 1) + (0, 16., 2480., 'square', 12., '', 'e248', 248, 'um', 1., 0., 0., 1., 248, 0, 0, 500, 250, 1) + (0, 48., 2480., 'square', 12., '', 'e249', 249, 'um', 1., 0., 0., 1., 249, 0, 0, 500, 250, 1) + (0, 0., 2500., 'square', 12., '', 'e250', 250, 'um', 1., 0., 0., 1., 250, 0, 0, 500, 250, 1) + (0, 32., 2500., 'square', 12., '', 'e251', 251, 'um', 1., 0., 0., 1., 251, 0, 0, 500, 250, 1) + (0, 16., 2520., 'square', 12., '', 'e252', 252, 'um', 1., 0., 0., 1., 252, 0, 0, 500, 250, 1) + (0, 48., 2520., 'square', 12., '', 'e253', 253, 'um', 1., 0., 0., 1., 253, 0, 0, 500, 250, 1) + (0, 0., 2540., 'square', 12., '', 'e254', 254, 'um', 1., 0., 0., 1., 254, 0, 0, 500, 250, 1) + (0, 32., 2540., 'square', 12., '', 'e255', 255, 'um', 1., 0., 0., 1., 255, 0, 0, 500, 250, 1) + (0, 16., 2560., 'square', 12., '', 'e256', 256, 'um', 1., 0., 0., 1., 256, 0, 0, 500, 250, 1) + (0, 48., 2560., 'square', 12., '', 'e257', 257, 'um', 1., 0., 0., 1., 257, 0, 0, 500, 250, 1) + (0, 0., 2580., 'square', 12., '', 'e258', 258, 'um', 1., 0., 0., 1., 258, 0, 0, 500, 250, 1) + (0, 32., 2580., 'square', 12., '', 'e259', 259, 'um', 1., 0., 0., 1., 259, 0, 0, 500, 250, 1) + (0, 16., 2600., 'square', 12., '', 'e260', 260, 'um', 1., 0., 0., 1., 260, 0, 0, 500, 250, 1) + (0, 48., 2600., 'square', 12., '', 'e261', 261, 'um', 1., 0., 0., 1., 261, 0, 0, 500, 250, 1) + (0, 0., 2620., 'square', 12., '', 'e262', 262, 'um', 1., 0., 0., 1., 262, 0, 0, 500, 250, 1) + (0, 32., 2620., 'square', 12., '', 'e263', 263, 'um', 1., 0., 0., 1., 263, 0, 0, 500, 250, 1) + (0, 16., 2640., 'square', 12., '', 'e264', 264, 'um', 1., 0., 0., 1., 264, 0, 0, 500, 250, 1) + (0, 48., 2640., 'square', 12., '', 'e265', 265, 'um', 1., 0., 0., 1., 265, 0, 0, 500, 250, 1) + (0, 0., 2660., 'square', 12., '', 'e266', 266, 'um', 1., 0., 0., 1., 266, 0, 0, 500, 250, 1) + (0, 32., 2660., 'square', 12., '', 'e267', 267, 'um', 1., 0., 0., 1., 267, 0, 0, 500, 250, 1) + (0, 16., 2680., 'square', 12., '', 'e268', 268, 'um', 1., 0., 0., 1., 268, 0, 0, 500, 250, 1) + (0, 48., 2680., 'square', 12., '', 'e269', 269, 'um', 1., 0., 0., 1., 269, 0, 0, 500, 250, 1) + (0, 0., 2700., 'square', 12., '', 'e270', 270, 'um', 1., 0., 0., 1., 270, 0, 0, 500, 250, 1) + (0, 32., 2700., 'square', 12., '', 'e271', 271, 'um', 1., 0., 0., 1., 271, 0, 0, 500, 250, 1) + (0, 16., 2720., 'square', 12., '', 'e272', 272, 'um', 1., 0., 0., 1., 272, 0, 0, 500, 250, 1) + (0, 48., 2720., 'square', 12., '', 'e273', 273, 'um', 1., 0., 0., 1., 273, 0, 0, 500, 250, 1) + (0, 0., 2740., 'square', 12., '', 'e274', 274, 'um', 1., 0., 0., 1., 274, 0, 0, 500, 250, 1) + (0, 32., 2740., 'square', 12., '', 'e275', 275, 'um', 1., 0., 0., 1., 275, 0, 0, 500, 250, 1) + (0, 16., 2760., 'square', 12., '', 'e276', 276, 'um', 1., 0., 0., 1., 276, 0, 0, 500, 250, 1) + (0, 48., 2760., 'square', 12., '', 'e277', 277, 'um', 1., 0., 0., 1., 277, 0, 0, 500, 250, 1) + (0, 0., 2780., 'square', 12., '', 'e278', 278, 'um', 1., 0., 0., 1., 278, 0, 0, 500, 250, 1) + (0, 32., 2780., 'square', 12., '', 'e279', 279, 'um', 1., 0., 0., 1., 279, 0, 0, 500, 250, 1) + (0, 16., 2800., 'square', 12., '', 'e280', 280, 'um', 1., 0., 0., 1., 280, 0, 0, 500, 250, 1) + (0, 48., 2800., 'square', 12., '', 'e281', 281, 'um', 1., 0., 0., 1., 281, 0, 0, 500, 250, 1) + (0, 0., 2820., 'square', 12., '', 'e282', 282, 'um', 1., 0., 0., 1., 282, 0, 0, 500, 250, 1) + (0, 32., 2820., 'square', 12., '', 'e283', 283, 'um', 1., 0., 0., 1., 283, 0, 0, 500, 250, 1) + (0, 16., 2840., 'square', 12., '', 'e284', 284, 'um', 1., 0., 0., 1., 284, 0, 0, 500, 250, 1) + (0, 48., 2840., 'square', 12., '', 'e285', 285, 'um', 1., 0., 0., 1., 285, 0, 0, 500, 250, 1) + (0, 0., 2860., 'square', 12., '', 'e286', 286, 'um', 1., 0., 0., 1., 286, 0, 0, 500, 250, 1) + (0, 32., 2860., 'square', 12., '', 'e287', 287, 'um', 1., 0., 0., 1., 287, 0, 0, 500, 250, 1) + (0, 16., 2880., 'square', 12., '', 'e288', 288, 'um', 1., 0., 0., 1., 288, 0, 0, 500, 250, 1) + (0, 48., 2880., 'square', 12., '', 'e289', 289, 'um', 1., 0., 0., 1., 289, 0, 0, 500, 250, 1) + (0, 0., 2900., 'square', 12., '', 'e290', 290, 'um', 1., 0., 0., 1., 290, 0, 0, 500, 250, 1) + (0, 32., 2900., 'square', 12., '', 'e291', 291, 'um', 1., 0., 0., 1., 291, 0, 0, 500, 250, 1) + (0, 16., 2920., 'square', 12., '', 'e292', 292, 'um', 1., 0., 0., 1., 292, 0, 0, 500, 250, 1) + (0, 48., 2920., 'square', 12., '', 'e293', 293, 'um', 1., 0., 0., 1., 293, 0, 0, 500, 250, 1) + (0, 0., 2940., 'square', 12., '', 'e294', 294, 'um', 1., 0., 0., 1., 294, 0, 0, 500, 250, 1) + (0, 32., 2940., 'square', 12., '', 'e295', 295, 'um', 1., 0., 0., 1., 295, 0, 0, 500, 250, 1) + (0, 16., 2960., 'square', 12., '', 'e296', 296, 'um', 1., 0., 0., 1., 296, 0, 0, 500, 250, 1) + (0, 48., 2960., 'square', 12., '', 'e297', 297, 'um', 1., 0., 0., 1., 297, 0, 0, 500, 250, 1) + (0, 0., 2980., 'square', 12., '', 'e298', 298, 'um', 1., 0., 0., 1., 298, 0, 0, 500, 250, 1) + (0, 32., 2980., 'square', 12., '', 'e299', 299, 'um', 1., 0., 0., 1., 299, 0, 0, 500, 250, 1) + (0, 16., 3000., 'square', 12., '', 'e300', 300, 'um', 1., 0., 0., 1., 300, 0, 0, 500, 250, 1) + (0, 48., 3000., 'square', 12., '', 'e301', 301, 'um', 1., 0., 0., 1., 301, 0, 0, 500, 250, 1) + (0, 0., 3020., 'square', 12., '', 'e302', 302, 'um', 1., 0., 0., 1., 302, 0, 0, 500, 250, 1) + (0, 32., 3020., 'square', 12., '', 'e303', 303, 'um', 1., 0., 0., 1., 303, 0, 0, 500, 250, 1) + (0, 16., 3040., 'square', 12., '', 'e304', 304, 'um', 1., 0., 0., 1., 304, 0, 0, 500, 250, 1) + (0, 48., 3040., 'square', 12., '', 'e305', 305, 'um', 1., 0., 0., 1., 305, 0, 0, 500, 250, 1) + (0, 0., 3060., 'square', 12., '', 'e306', 306, 'um', 1., 0., 0., 1., 306, 0, 0, 500, 250, 1) + (0, 32., 3060., 'square', 12., '', 'e307', 307, 'um', 1., 0., 0., 1., 307, 0, 0, 500, 250, 1) + (0, 16., 3080., 'square', 12., '', 'e308', 308, 'um', 1., 0., 0., 1., 308, 0, 0, 500, 250, 1) + (0, 48., 3080., 'square', 12., '', 'e309', 309, 'um', 1., 0., 0., 1., 309, 0, 0, 500, 250, 1) + (0, 0., 3100., 'square', 12., '', 'e310', 310, 'um', 1., 0., 0., 1., 310, 0, 0, 500, 250, 1) + (0, 32., 3100., 'square', 12., '', 'e311', 311, 'um', 1., 0., 0., 1., 311, 0, 0, 500, 250, 1) + (0, 16., 3120., 'square', 12., '', 'e312', 312, 'um', 1., 0., 0., 1., 312, 0, 0, 500, 250, 1) + (0, 48., 3120., 'square', 12., '', 'e313', 313, 'um', 1., 0., 0., 1., 313, 0, 0, 500, 250, 1) + (0, 0., 3140., 'square', 12., '', 'e314', 314, 'um', 1., 0., 0., 1., 314, 0, 0, 500, 250, 1) + (0, 32., 3140., 'square', 12., '', 'e315', 315, 'um', 1., 0., 0., 1., 315, 0, 0, 500, 250, 1) + (0, 16., 3160., 'square', 12., '', 'e316', 316, 'um', 1., 0., 0., 1., 316, 0, 0, 500, 250, 1) + (0, 48., 3160., 'square', 12., '', 'e317', 317, 'um', 1., 0., 0., 1., 317, 0, 0, 500, 250, 1) + (0, 0., 3180., 'square', 12., '', 'e318', 318, 'um', 1., 0., 0., 1., 318, 0, 0, 500, 250, 1) + (0, 32., 3180., 'square', 12., '', 'e319', 319, 'um', 1., 0., 0., 1., 319, 0, 0, 500, 250, 1) + (0, 16., 3200., 'square', 12., '', 'e320', 320, 'um', 1., 0., 0., 1., 320, 0, 0, 500, 250, 1) + (0, 48., 3200., 'square', 12., '', 'e321', 321, 'um', 1., 0., 0., 1., 321, 0, 0, 500, 250, 1) + (0, 0., 3220., 'square', 12., '', 'e322', 322, 'um', 1., 0., 0., 1., 322, 0, 0, 500, 250, 1) + (0, 32., 3220., 'square', 12., '', 'e323', 323, 'um', 1., 0., 0., 1., 323, 0, 0, 500, 250, 1) + (0, 16., 3240., 'square', 12., '', 'e324', 324, 'um', 1., 0., 0., 1., 324, 0, 0, 500, 250, 1) + (0, 48., 3240., 'square', 12., '', 'e325', 325, 'um', 1., 0., 0., 1., 325, 0, 0, 500, 250, 1) + (0, 0., 3260., 'square', 12., '', 'e326', 326, 'um', 1., 0., 0., 1., 326, 0, 0, 500, 250, 1) + (0, 32., 3260., 'square', 12., '', 'e327', 327, 'um', 1., 0., 0., 1., 327, 0, 0, 500, 250, 1) + (0, 16., 3280., 'square', 12., '', 'e328', 328, 'um', 1., 0., 0., 1., 328, 0, 0, 500, 250, 1) + (0, 48., 3280., 'square', 12., '', 'e329', 329, 'um', 1., 0., 0., 1., 329, 0, 0, 500, 250, 1) + (0, 0., 3300., 'square', 12., '', 'e330', 330, 'um', 1., 0., 0., 1., 330, 0, 0, 500, 250, 1) + (0, 32., 3300., 'square', 12., '', 'e331', 331, 'um', 1., 0., 0., 1., 331, 0, 0, 500, 250, 1) + (0, 16., 3320., 'square', 12., '', 'e332', 332, 'um', 1., 0., 0., 1., 332, 0, 0, 500, 250, 1) + (0, 48., 3320., 'square', 12., '', 'e333', 333, 'um', 1., 0., 0., 1., 333, 0, 0, 500, 250, 1) + (0, 0., 3340., 'square', 12., '', 'e334', 334, 'um', 1., 0., 0., 1., 334, 0, 0, 500, 250, 1) + (0, 32., 3340., 'square', 12., '', 'e335', 335, 'um', 1., 0., 0., 1., 335, 0, 0, 500, 250, 1) + (0, 16., 3360., 'square', 12., '', 'e336', 336, 'um', 1., 0., 0., 1., 336, 0, 0, 500, 250, 1) + (0, 48., 3360., 'square', 12., '', 'e337', 337, 'um', 1., 0., 0., 1., 337, 0, 0, 500, 250, 1) + (0, 0., 3380., 'square', 12., '', 'e338', 338, 'um', 1., 0., 0., 1., 338, 0, 0, 500, 250, 1) + (0, 32., 3380., 'square', 12., '', 'e339', 339, 'um', 1., 0., 0., 1., 339, 0, 0, 500, 250, 1) + (0, 16., 3400., 'square', 12., '', 'e340', 340, 'um', 1., 0., 0., 1., 340, 0, 0, 500, 250, 1) + (0, 48., 3400., 'square', 12., '', 'e341', 341, 'um', 1., 0., 0., 1., 341, 0, 0, 500, 250, 1) + (0, 0., 3420., 'square', 12., '', 'e342', 342, 'um', 1., 0., 0., 1., 342, 0, 0, 500, 250, 1) + (0, 32., 3420., 'square', 12., '', 'e343', 343, 'um', 1., 0., 0., 1., 343, 0, 0, 500, 250, 1) + (0, 16., 3440., 'square', 12., '', 'e344', 344, 'um', 1., 0., 0., 1., 344, 0, 0, 500, 250, 1) + (0, 48., 3440., 'square', 12., '', 'e345', 345, 'um', 1., 0., 0., 1., 345, 0, 0, 500, 250, 1) + (0, 0., 3460., 'square', 12., '', 'e346', 346, 'um', 1., 0., 0., 1., 346, 0, 0, 500, 250, 1) + (0, 32., 3460., 'square', 12., '', 'e347', 347, 'um', 1., 0., 0., 1., 347, 0, 0, 500, 250, 1) + (0, 16., 3480., 'square', 12., '', 'e348', 348, 'um', 1., 0., 0., 1., 348, 0, 0, 500, 250, 1) + (0, 48., 3480., 'square', 12., '', 'e349', 349, 'um', 1., 0., 0., 1., 349, 0, 0, 500, 250, 1) + (0, 0., 3500., 'square', 12., '', 'e350', 350, 'um', 1., 0., 0., 1., 350, 0, 0, 500, 250, 1) + (0, 32., 3500., 'square', 12., '', 'e351', 351, 'um', 1., 0., 0., 1., 351, 0, 0, 500, 250, 1) + (0, 16., 3520., 'square', 12., '', 'e352', 352, 'um', 1., 0., 0., 1., 352, 0, 0, 500, 250, 1) + (0, 48., 3520., 'square', 12., '', 'e353', 353, 'um', 1., 0., 0., 1., 353, 0, 0, 500, 250, 1) + (0, 0., 3540., 'square', 12., '', 'e354', 354, 'um', 1., 0., 0., 1., 354, 0, 0, 500, 250, 1) + (0, 32., 3540., 'square', 12., '', 'e355', 355, 'um', 1., 0., 0., 1., 355, 0, 0, 500, 250, 1) + (0, 16., 3560., 'square', 12., '', 'e356', 356, 'um', 1., 0., 0., 1., 356, 0, 0, 500, 250, 1) + (0, 48., 3560., 'square', 12., '', 'e357', 357, 'um', 1., 0., 0., 1., 357, 0, 0, 500, 250, 1) + (0, 0., 3580., 'square', 12., '', 'e358', 358, 'um', 1., 0., 0., 1., 358, 0, 0, 500, 250, 1) + (0, 32., 3580., 'square', 12., '', 'e359', 359, 'um', 1., 0., 0., 1., 359, 0, 0, 500, 250, 1) + (0, 16., 3600., 'square', 12., '', 'e360', 360, 'um', 1., 0., 0., 1., 360, 0, 0, 500, 250, 1) + (0, 48., 3600., 'square', 12., '', 'e361', 361, 'um', 1., 0., 0., 1., 361, 0, 0, 500, 250, 1) + (0, 0., 3620., 'square', 12., '', 'e362', 362, 'um', 1., 0., 0., 1., 362, 0, 0, 500, 250, 1) + (0, 32., 3620., 'square', 12., '', 'e363', 363, 'um', 1., 0., 0., 1., 363, 0, 0, 500, 250, 1) + (0, 16., 3640., 'square', 12., '', 'e364', 364, 'um', 1., 0., 0., 1., 364, 0, 0, 500, 250, 1) + (0, 48., 3640., 'square', 12., '', 'e365', 365, 'um', 1., 0., 0., 1., 365, 0, 0, 500, 250, 1) + (0, 0., 3660., 'square', 12., '', 'e366', 366, 'um', 1., 0., 0., 1., 366, 0, 0, 500, 250, 1) + (0, 32., 3660., 'square', 12., '', 'e367', 367, 'um', 1., 0., 0., 1., 367, 0, 0, 500, 250, 1) + (0, 16., 3680., 'square', 12., '', 'e368', 368, 'um', 1., 0., 0., 1., 368, 0, 0, 500, 250, 1) + (0, 48., 3680., 'square', 12., '', 'e369', 369, 'um', 1., 0., 0., 1., 369, 0, 0, 500, 250, 1) + (0, 0., 3700., 'square', 12., '', 'e370', 370, 'um', 1., 0., 0., 1., 370, 0, 0, 500, 250, 1) + (0, 32., 3700., 'square', 12., '', 'e371', 371, 'um', 1., 0., 0., 1., 371, 0, 0, 500, 250, 1) + (0, 16., 3720., 'square', 12., '', 'e372', 372, 'um', 1., 0., 0., 1., 372, 0, 0, 500, 250, 1) + (0, 48., 3720., 'square', 12., '', 'e373', 373, 'um', 1., 0., 0., 1., 373, 0, 0, 500, 250, 1) + (0, 0., 3740., 'square', 12., '', 'e374', 374, 'um', 1., 0., 0., 1., 374, 0, 0, 500, 250, 1) + (0, 32., 3740., 'square', 12., '', 'e375', 375, 'um', 1., 0., 0., 1., 375, 0, 0, 500, 250, 1) + (0, 16., 3760., 'square', 12., '', 'e376', 376, 'um', 1., 0., 0., 1., 376, 0, 0, 500, 250, 1) + (0, 48., 3760., 'square', 12., '', 'e377', 377, 'um', 1., 0., 0., 1., 377, 0, 0, 500, 250, 1) + (0, 0., 3780., 'square', 12., '', 'e378', 378, 'um', 1., 0., 0., 1., 378, 0, 0, 500, 250, 1) + (0, 32., 3780., 'square', 12., '', 'e379', 379, 'um', 1., 0., 0., 1., 379, 0, 0, 500, 250, 1) + (0, 16., 3800., 'square', 12., '', 'e380', 380, 'um', 1., 0., 0., 1., 380, 0, 0, 500, 250, 1) + (0, 48., 3800., 'square', 12., '', 'e381', 381, 'um', 1., 0., 0., 1., 381, 0, 0, 500, 250, 1) + (0, 0., 3820., 'square', 12., '', 'e382', 382, 'um', 1., 0., 0., 1., 382, 0, 0, 500, 250, 1) + (0, 32., 3820., 'square', 12., '', 'e383', 383, 'um', 1., 0., 0., 1., 383, 0, 0, 500, 250, 1)]
    location [[ 16. 0.] + [ 48. 0.] + [ 0. 20.] + [ 32. 20.] + [ 16. 40.] + [ 48. 40.] + [ 0. 60.] + [ 32. 60.] + [ 16. 80.] + [ 48. 80.] + [ 0. 100.] + [ 32. 100.] + [ 16. 120.] + [ 48. 120.] + [ 0. 140.] + [ 32. 140.] + [ 16. 160.] + [ 48. 160.] + [ 0. 180.] + [ 32. 180.] + [ 16. 200.] + [ 48. 200.] + [ 0. 220.] + [ 32. 220.] + [ 16. 240.] + [ 48. 240.] + [ 0. 260.] + [ 32. 260.] + [ 16. 280.] + [ 48. 280.] + [ 0. 300.] + [ 32. 300.] + [ 16. 320.] + [ 48. 320.] + [ 0. 340.] + [ 32. 340.] + [ 16. 360.] + [ 48. 360.] + [ 0. 380.] + [ 32. 380.] + [ 16. 400.] + [ 48. 400.] + [ 0. 420.] + [ 32. 420.] + [ 16. 440.] + [ 48. 440.] + [ 0. 460.] + [ 32. 460.] + [ 16. 480.] + [ 48. 480.] + [ 0. 500.] + [ 32. 500.] + [ 16. 520.] + [ 48. 520.] + [ 0. 540.] + [ 32. 540.] + [ 16. 560.] + [ 48. 560.] + [ 0. 580.] + [ 32. 580.] + [ 16. 600.] + [ 48. 600.] + [ 0. 620.] + [ 32. 620.] + [ 16. 640.] + [ 48. 640.] + [ 0. 660.] + [ 32. 660.] + [ 16. 680.] + [ 48. 680.] + [ 0. 700.] + [ 32. 700.] + [ 16. 720.] + [ 48. 720.] + [ 0. 740.] + [ 32. 740.] + [ 16. 760.] + [ 48. 760.] + [ 0. 780.] + [ 32. 780.] + [ 16. 800.] + [ 48. 800.] + [ 0. 820.] + [ 32. 820.] + [ 16. 840.] + [ 48. 840.] + [ 0. 860.] + [ 32. 860.] + [ 16. 880.] + [ 48. 880.] + [ 0. 900.] + [ 32. 900.] + [ 16. 920.] + [ 48. 920.] + [ 0. 940.] + [ 32. 940.] + [ 16. 960.] + [ 48. 960.] + [ 0. 980.] + [ 32. 980.] + [ 16. 1000.] + [ 48. 1000.] + [ 0. 1020.] + [ 32. 1020.] + [ 16. 1040.] + [ 48. 1040.] + [ 0. 1060.] + [ 32. 1060.] + [ 16. 1080.] + [ 48. 1080.] + [ 0. 1100.] + [ 32. 1100.] + [ 16. 1120.] + [ 48. 1120.] + [ 0. 1140.] + [ 32. 1140.] + [ 16. 1160.] + [ 48. 1160.] + [ 0. 1180.] + [ 32. 1180.] + [ 16. 1200.] + [ 48. 1200.] + [ 0. 1220.] + [ 32. 1220.] + [ 16. 1240.] + [ 48. 1240.] + [ 0. 1260.] + [ 32. 1260.] + [ 16. 1280.] + [ 48. 1280.] + [ 0. 1300.] + [ 32. 1300.] + [ 16. 1320.] + [ 48. 1320.] + [ 0. 1340.] + [ 32. 1340.] + [ 16. 1360.] + [ 48. 1360.] + [ 0. 1380.] + [ 32. 1380.] + [ 16. 1400.] + [ 48. 1400.] + [ 0. 1420.] + [ 32. 1420.] + [ 16. 1440.] + [ 48. 1440.] + [ 0. 1460.] + [ 32. 1460.] + [ 16. 1480.] + [ 48. 1480.] + [ 0. 1500.] + [ 32. 1500.] + [ 16. 1520.] + [ 48. 1520.] + [ 0. 1540.] + [ 32. 1540.] + [ 16. 1560.] + [ 48. 1560.] + [ 0. 1580.] + [ 32. 1580.] + [ 16. 1600.] + [ 48. 1600.] + [ 0. 1620.] + [ 32. 1620.] + [ 16. 1640.] + [ 48. 1640.] + [ 0. 1660.] + [ 32. 1660.] + [ 16. 1680.] + [ 48. 1680.] + [ 0. 1700.] + [ 32. 1700.] + [ 16. 1720.] + [ 48. 1720.] + [ 0. 1740.] + [ 32. 1740.] + [ 16. 1760.] + [ 48. 1760.] + [ 0. 1780.] + [ 32. 1780.] + [ 16. 1800.] + [ 48. 1800.] + [ 0. 1820.] + [ 32. 1820.] + [ 16. 1840.] + [ 48. 1840.] + [ 0. 1860.] + [ 32. 1860.] + [ 16. 1880.] + [ 48. 1880.] + [ 0. 1900.] + [ 32. 1900.] + [ 16. 1920.] + [ 48. 1920.] + [ 0. 1940.] + [ 32. 1940.] + [ 16. 1960.] + [ 48. 1960.] + [ 0. 1980.] + [ 32. 1980.] + [ 16. 2000.] + [ 48. 2000.] + [ 0. 2020.] + [ 32. 2020.] + [ 16. 2040.] + [ 48. 2040.] + [ 0. 2060.] + [ 32. 2060.] + [ 16. 2080.] + [ 48. 2080.] + [ 0. 2100.] + [ 32. 2100.] + [ 16. 2120.] + [ 48. 2120.] + [ 0. 2140.] + [ 32. 2140.] + [ 16. 2160.] + [ 48. 2160.] + [ 0. 2180.] + [ 32. 2180.] + [ 16. 2200.] + [ 48. 2200.] + [ 0. 2220.] + [ 32. 2220.] + [ 16. 2240.] + [ 48. 2240.] + [ 0. 2260.] + [ 32. 2260.] + [ 16. 2280.] + [ 48. 2280.] + [ 0. 2300.] + [ 32. 2300.] + [ 16. 2320.] + [ 48. 2320.] + [ 0. 2340.] + [ 32. 2340.] + [ 16. 2360.] + [ 48. 2360.] + [ 0. 2380.] + [ 32. 2380.] + [ 16. 2400.] + [ 48. 2400.] + [ 0. 2420.] + [ 32. 2420.] + [ 16. 2440.] + [ 48. 2440.] + [ 0. 2460.] + [ 32. 2460.] + [ 16. 2480.] + [ 48. 2480.] + [ 0. 2500.] + [ 32. 2500.] + [ 16. 2520.] + [ 48. 2520.] + [ 0. 2540.] + [ 32. 2540.] + [ 16. 2560.] + [ 48. 2560.] + [ 0. 2580.] + [ 32. 2580.] + [ 16. 2600.] + [ 48. 2600.] + [ 0. 2620.] + [ 32. 2620.] + [ 16. 2640.] + [ 48. 2640.] + [ 0. 2660.] + [ 32. 2660.] + [ 16. 2680.] + [ 48. 2680.] + [ 0. 2700.] + [ 32. 2700.] + [ 16. 2720.] + [ 48. 2720.] + [ 0. 2740.] + [ 32. 2740.] + [ 16. 2760.] + [ 48. 2760.] + [ 0. 2780.] + [ 32. 2780.] + [ 16. 2800.] + [ 48. 2800.] + [ 0. 2820.] + [ 32. 2820.] + [ 16. 2840.] + [ 48. 2840.] + [ 0. 2860.] + [ 32. 2860.] + [ 16. 2880.] + [ 48. 2880.] + [ 0. 2900.] + [ 32. 2900.] + [ 16. 2920.] + [ 48. 2920.] + [ 0. 2940.] + [ 32. 2940.] + [ 16. 2960.] + [ 48. 2960.] + [ 0. 2980.] + [ 32. 2980.] + [ 16. 3000.] + [ 48. 3000.] + [ 0. 3020.] + [ 32. 3020.] + [ 16. 3040.] + [ 48. 3040.] + [ 0. 3060.] + [ 32. 3060.] + [ 16. 3080.] + [ 48. 3080.] + [ 0. 3100.] + [ 32. 3100.] + [ 16. 3120.] + [ 48. 3120.] + [ 0. 3140.] + [ 32. 3140.] + [ 16. 3160.] + [ 48. 3160.] + [ 0. 3180.] + [ 32. 3180.] + [ 16. 3200.] + [ 48. 3200.] + [ 0. 3220.] + [ 32. 3220.] + [ 16. 3240.] + [ 48. 3240.] + [ 0. 3260.] + [ 32. 3260.] + [ 16. 3280.] + [ 48. 3280.] + [ 0. 3300.] + [ 32. 3300.] + [ 16. 3320.] + [ 48. 3320.] + [ 0. 3340.] + [ 32. 3340.] + [ 16. 3360.] + [ 48. 3360.] + [ 0. 3380.] + [ 32. 3380.] + [ 16. 3400.] + [ 48. 3400.] + [ 0. 3420.] + [ 32. 3420.] + [ 16. 3440.] + [ 48. 3440.] + [ 0. 3460.] + [ 32. 3460.] + [ 16. 3480.] + [ 48. 3480.] + [ 0. 3500.] + [ 32. 3500.] + [ 16. 3520.] + [ 48. 3520.] + [ 0. 3540.] + [ 32. 3540.] + [ 16. 3560.] + [ 48. 3560.] + [ 0. 3580.] + [ 32. 3580.] + [ 16. 3600.] + [ 48. 3600.] + [ 0. 3620.] + [ 32. 3620.] + [ 16. 3640.] + [ 48. 3640.] + [ 0. 3660.] + [ 32. 3660.] + [ 16. 3680.] + [ 48. 3680.] + [ 0. 3700.] + [ 32. 3700.] + [ 16. 3720.] + [ 48. 3720.] + [ 0. 3740.] + [ 32. 3740.] + [ 16. 3760.] + [ 48. 3760.] + [ 0. 3780.] + [ 32. 3780.] + [ 16. 3800.] + [ 48. 3800.] + [ 0. 3820.] + [ 32. 3820.]]
    group [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
    inter_sample_shift [0. 0. 0.07692308 0.07692308 0.15384615 0.15384615 + 0.23076923 0.23076923 0.30769231 0.30769231 0.38461538 0.38461538 + 0.46153846 0.46153846 0.53846154 0.53846154 0.61538462 0.61538462 + 0.69230769 0.69230769 0.76923077 0.76923077 0.84615385 0.84615385 + 0. 0. 0.07692308 0.07692308 0.15384615 0.15384615 + 0.23076923 0.23076923 0.30769231 0.30769231 0.38461538 0.38461538 + 0.46153846 0.46153846 0.53846154 0.53846154 0.61538462 0.61538462 + 0.69230769 0.69230769 0.76923077 0.76923077 0.84615385 0.84615385 + 0. 0. 0.07692308 0.07692308 0.15384615 0.15384615 + 0.23076923 0.23076923 0.30769231 0.30769231 0.38461538 0.38461538 + 0.46153846 0.46153846 0.53846154 0.53846154 0.61538462 0.61538462 + 0.69230769 0.69230769 0.76923077 0.76923077 0.84615385 0.84615385 + 0. 0. 0.07692308 0.07692308 0.15384615 0.15384615 + 0.23076923 0.23076923 0.30769231 0.30769231 0.38461538 0.38461538 + 0.46153846 0.46153846 0.53846154 0.53846154 0.61538462 0.61538462 + 0.69230769 0.69230769 0.76923077 0.76923077 0.84615385 0.84615385 + 0. 0. 0.07692308 0.07692308 0.15384615 0.15384615 + 0.23076923 0.23076923 0.30769231 0.30769231 0.38461538 0.38461538 + 0.46153846 0.46153846 0.53846154 0.53846154 0.61538462 0.61538462 + 0.69230769 0.69230769 0.76923077 0.76923077 0.84615385 0.84615385 + 0. 0. 0.07692308 0.07692308 0.15384615 0.15384615 + 0.23076923 0.23076923 0.30769231 0.30769231 0.38461538 0.38461538 + 0.46153846 0.46153846 0.53846154 0.53846154 0.61538462 0.61538462 + 0.69230769 0.69230769 0.76923077 0.76923077 0.84615385 0.84615385 + 0. 0. 0.07692308 0.07692308 0.15384615 0.15384615 + 0.23076923 0.23076923 0.30769231 0.30769231 0.38461538 0.38461538 + 0.46153846 0.46153846 0.53846154 0.53846154 0.61538462 0.61538462 + 0.69230769 0.69230769 0.76923077 0.76923077 0.84615385 0.84615385 + 0. 0. 0.07692308 0.07692308 0.15384615 0.15384615 + 0.23076923 0.23076923 0.30769231 0.30769231 0.38461538 0.38461538 + 0.46153846 0.46153846 0.53846154 0.53846154 0.61538462 0.61538462 + 0.69230769 0.69230769 0.76923077 0.76923077 0.84615385 0.84615385 + 0. 0. 0.07692308 0.07692308 0.15384615 0.15384615 + 0.23076923 0.23076923 0.30769231 0.30769231 0.38461538 0.38461538 + 0.46153846 0.46153846 0.53846154 0.53846154 0.61538462 0.61538462 + 0.69230769 0.69230769 0.76923077 0.76923077 0.84615385 0.84615385 + 0. 0. 0.07692308 0.07692308 0.15384615 0.15384615 + 0.23076923 0.23076923 0.30769231 0.30769231 0.38461538 0.38461538 + 0.46153846 0.46153846 0.53846154 0.53846154 0.61538462 0.61538462 + 0.69230769 0.69230769 0.76923077 0.76923077 0.84615385 0.84615385 + 0. 0. 0.07692308 0.07692308 0.15384615 0.15384615 + 0.23076923 0.23076923 0.30769231 0.30769231 0.38461538 0.38461538 + 0.46153846 0.46153846 0.53846154 0.53846154 0.61538462 0.61538462 + 0.69230769 0.69230769 0.76923077 0.76923077 0.84615385 0.84615385 + 0. 0. 0.07692308 0.07692308 0.15384615 0.15384615 + 0.23076923 0.23076923 0.30769231 0.30769231 0.38461538 0.38461538 + 0.46153846 0.46153846 0.53846154 0.53846154 0.61538462 0.61538462 + 0.69230769 0.69230769 0.76923077 0.76923077 0.84615385 0.84615385 + 0. 0. 0.07692308 0.07692308 0.15384615 0.15384615 + 0.23076923 0.23076923 0.30769231 0.30769231 0.38461538 0.38461538 + 0.46153846 0.46153846 0.53846154 0.53846154 0.61538462 0.61538462 + 0.69230769 0.69230769 0.76923077 0.76923077 0.84615385 0.84615385 + 0. 0. 0.07692308 0.07692308 0.15384615 0.15384615 + 0.23076923 0.23076923 0.30769231 0.30769231 0.38461538 0.38461538 + 0.46153846 0.46153846 0.53846154 0.53846154 0.61538462 0.61538462 + 0.69230769 0.69230769 0.76923077 0.76923077 0.84615385 0.84615385 + 0. 0. 0.07692308 0.07692308 0.15384615 0.15384615 + 0.23076923 0.23076923 0.30769231 0.30769231 0.38461538 0.38461538 + 0.46153846 0.46153846 0.53846154 0.53846154 0.61538462 0.61538462 + 0.69230769 0.69230769 0.76923077 0.76923077 0.84615385 0.84615385 + 0. 0. 0.07692308 0.07692308 0.15384615 0.15384615 + 0.23076923 0.23076923 0.30769231 0.30769231 0.38461538 0.38461538 + 0.46153846 0.46153846 0.53846154 0.53846154 0.61538462 0.61538462 + 0.69230769 0.69230769 0.76923077 0.76923077 0.84615385 0.84615385]
+ + + +Note that the ``generate_hybrid_recording`` is warning us that we might +want to account for drift! + +.. code:: ipython3 + + # by passing the `sorting_hybrid` object, we make sure that injected spikes are the same + # this will take a bit more time because it's interpolating the templates to account for drifts + recording_hybrid_with_drift, sorting_hybrid = sgen.generate_hybrid_recording( + recording=recording_preproc, + templates=templates_relocated, + motion=motion_info["motion"], + sorting=sorting_hybrid, + seed=2308, + ) + recording_hybrid_with_drift + + + + +.. raw:: html + +
InjectDriftingTemplatesRecording: 384 channels - 30.0kHz - 1 segments - 58,715,724 samples - 1,957.19s (32.62 minutes) - float64 dtype - 167.99 GiB
Channel IDs
    ['imec0.ap#AP0' 'imec0.ap#AP1' 'imec0.ap#AP2' 'imec0.ap#AP3' + 'imec0.ap#AP4' 'imec0.ap#AP5' 'imec0.ap#AP6' 'imec0.ap#AP7' + 'imec0.ap#AP8' 'imec0.ap#AP9' 'imec0.ap#AP10' 'imec0.ap#AP11' + 'imec0.ap#AP12' 'imec0.ap#AP13' 'imec0.ap#AP14' 'imec0.ap#AP15' + 'imec0.ap#AP16' 'imec0.ap#AP17' 'imec0.ap#AP18' 'imec0.ap#AP19' + 'imec0.ap#AP20' 'imec0.ap#AP21' 'imec0.ap#AP22' 'imec0.ap#AP23' + 'imec0.ap#AP24' 'imec0.ap#AP25' 'imec0.ap#AP26' 'imec0.ap#AP27' + 'imec0.ap#AP28' 'imec0.ap#AP29' 'imec0.ap#AP30' 'imec0.ap#AP31' + 'imec0.ap#AP32' 'imec0.ap#AP33' 'imec0.ap#AP34' 'imec0.ap#AP35' + 'imec0.ap#AP36' 'imec0.ap#AP37' 'imec0.ap#AP38' 'imec0.ap#AP39' + 'imec0.ap#AP40' 'imec0.ap#AP41' 'imec0.ap#AP42' 'imec0.ap#AP43' + 'imec0.ap#AP44' 'imec0.ap#AP45' 'imec0.ap#AP46' 'imec0.ap#AP47' + 'imec0.ap#AP48' 'imec0.ap#AP49' 'imec0.ap#AP50' 'imec0.ap#AP51' + 'imec0.ap#AP52' 'imec0.ap#AP53' 'imec0.ap#AP54' 'imec0.ap#AP55' + 'imec0.ap#AP56' 'imec0.ap#AP57' 'imec0.ap#AP58' 'imec0.ap#AP59' + 'imec0.ap#AP60' 'imec0.ap#AP61' 'imec0.ap#AP62' 'imec0.ap#AP63' + 'imec0.ap#AP64' 'imec0.ap#AP65' 'imec0.ap#AP66' 'imec0.ap#AP67' + 'imec0.ap#AP68' 'imec0.ap#AP69' 'imec0.ap#AP70' 'imec0.ap#AP71' + 'imec0.ap#AP72' 'imec0.ap#AP73' 'imec0.ap#AP74' 'imec0.ap#AP75' + 'imec0.ap#AP76' 'imec0.ap#AP77' 'imec0.ap#AP78' 'imec0.ap#AP79' + 'imec0.ap#AP80' 'imec0.ap#AP81' 'imec0.ap#AP82' 'imec0.ap#AP83' + 'imec0.ap#AP84' 'imec0.ap#AP85' 'imec0.ap#AP86' 'imec0.ap#AP87' + 'imec0.ap#AP88' 'imec0.ap#AP89' 'imec0.ap#AP90' 'imec0.ap#AP91' + 'imec0.ap#AP92' 'imec0.ap#AP93' 'imec0.ap#AP94' 'imec0.ap#AP95' + 'imec0.ap#AP96' 'imec0.ap#AP97' 'imec0.ap#AP98' 'imec0.ap#AP99' + 'imec0.ap#AP100' 'imec0.ap#AP101' 'imec0.ap#AP102' 'imec0.ap#AP103' + 'imec0.ap#AP104' 'imec0.ap#AP105' 'imec0.ap#AP106' 'imec0.ap#AP107' + 'imec0.ap#AP108' 'imec0.ap#AP109' 'imec0.ap#AP110' 'imec0.ap#AP111' + 'imec0.ap#AP112' 'imec0.ap#AP113' 'imec0.ap#AP114' 'imec0.ap#AP115' + 'imec0.ap#AP116' 'imec0.ap#AP117' 'imec0.ap#AP118' 'imec0.ap#AP119' + 'imec0.ap#AP120' 'imec0.ap#AP121' 'imec0.ap#AP122' 'imec0.ap#AP123' + 'imec0.ap#AP124' 'imec0.ap#AP125' 'imec0.ap#AP126' 'imec0.ap#AP127' + 'imec0.ap#AP128' 'imec0.ap#AP129' 'imec0.ap#AP130' 'imec0.ap#AP131' + 'imec0.ap#AP132' 'imec0.ap#AP133' 'imec0.ap#AP134' 'imec0.ap#AP135' + 'imec0.ap#AP136' 'imec0.ap#AP137' 'imec0.ap#AP138' 'imec0.ap#AP139' + 'imec0.ap#AP140' 'imec0.ap#AP141' 'imec0.ap#AP142' 'imec0.ap#AP143' + 'imec0.ap#AP144' 'imec0.ap#AP145' 'imec0.ap#AP146' 'imec0.ap#AP147' + 'imec0.ap#AP148' 'imec0.ap#AP149' 'imec0.ap#AP150' 'imec0.ap#AP151' + 'imec0.ap#AP152' 'imec0.ap#AP153' 'imec0.ap#AP154' 'imec0.ap#AP155' + 'imec0.ap#AP156' 'imec0.ap#AP157' 'imec0.ap#AP158' 'imec0.ap#AP159' + 'imec0.ap#AP160' 'imec0.ap#AP161' 'imec0.ap#AP162' 'imec0.ap#AP163' + 'imec0.ap#AP164' 'imec0.ap#AP165' 'imec0.ap#AP166' 'imec0.ap#AP167' + 'imec0.ap#AP168' 'imec0.ap#AP169' 'imec0.ap#AP170' 'imec0.ap#AP171' + 'imec0.ap#AP172' 'imec0.ap#AP173' 'imec0.ap#AP174' 'imec0.ap#AP175' + 'imec0.ap#AP176' 'imec0.ap#AP177' 'imec0.ap#AP178' 'imec0.ap#AP179' + 'imec0.ap#AP180' 'imec0.ap#AP181' 'imec0.ap#AP182' 'imec0.ap#AP183' + 'imec0.ap#AP184' 'imec0.ap#AP185' 'imec0.ap#AP186' 'imec0.ap#AP187' + 'imec0.ap#AP188' 'imec0.ap#AP189' 'imec0.ap#AP190' 'imec0.ap#AP191' + 'imec0.ap#AP192' 'imec0.ap#AP193' 'imec0.ap#AP194' 'imec0.ap#AP195' + 'imec0.ap#AP196' 'imec0.ap#AP197' 'imec0.ap#AP198' 'imec0.ap#AP199' + 'imec0.ap#AP200' 'imec0.ap#AP201' 'imec0.ap#AP202' 'imec0.ap#AP203' + 'imec0.ap#AP204' 'imec0.ap#AP205' 'imec0.ap#AP206' 'imec0.ap#AP207' + 'imec0.ap#AP208' 'imec0.ap#AP209' 'imec0.ap#AP210' 'imec0.ap#AP211' + 'imec0.ap#AP212' 'imec0.ap#AP213' 'imec0.ap#AP214' 'imec0.ap#AP215' + 'imec0.ap#AP216' 'imec0.ap#AP217' 'imec0.ap#AP218' 'imec0.ap#AP219' + 'imec0.ap#AP220' 'imec0.ap#AP221' 'imec0.ap#AP222' 'imec0.ap#AP223' + 'imec0.ap#AP224' 'imec0.ap#AP225' 'imec0.ap#AP226' 'imec0.ap#AP227' + 'imec0.ap#AP228' 'imec0.ap#AP229' 'imec0.ap#AP230' 'imec0.ap#AP231' + 'imec0.ap#AP232' 'imec0.ap#AP233' 'imec0.ap#AP234' 'imec0.ap#AP235' + 'imec0.ap#AP236' 'imec0.ap#AP237' 'imec0.ap#AP238' 'imec0.ap#AP239' + 'imec0.ap#AP240' 'imec0.ap#AP241' 'imec0.ap#AP242' 'imec0.ap#AP243' + 'imec0.ap#AP244' 'imec0.ap#AP245' 'imec0.ap#AP246' 'imec0.ap#AP247' + 'imec0.ap#AP248' 'imec0.ap#AP249' 'imec0.ap#AP250' 'imec0.ap#AP251' + 'imec0.ap#AP252' 'imec0.ap#AP253' 'imec0.ap#AP254' 'imec0.ap#AP255' + 'imec0.ap#AP256' 'imec0.ap#AP257' 'imec0.ap#AP258' 'imec0.ap#AP259' + 'imec0.ap#AP260' 'imec0.ap#AP261' 'imec0.ap#AP262' 'imec0.ap#AP263' + 'imec0.ap#AP264' 'imec0.ap#AP265' 'imec0.ap#AP266' 'imec0.ap#AP267' + 'imec0.ap#AP268' 'imec0.ap#AP269' 'imec0.ap#AP270' 'imec0.ap#AP271' + 'imec0.ap#AP272' 'imec0.ap#AP273' 'imec0.ap#AP274' 'imec0.ap#AP275' + 'imec0.ap#AP276' 'imec0.ap#AP277' 'imec0.ap#AP278' 'imec0.ap#AP279' + 'imec0.ap#AP280' 'imec0.ap#AP281' 'imec0.ap#AP282' 'imec0.ap#AP283' + 'imec0.ap#AP284' 'imec0.ap#AP285' 'imec0.ap#AP286' 'imec0.ap#AP287' + 'imec0.ap#AP288' 'imec0.ap#AP289' 'imec0.ap#AP290' 'imec0.ap#AP291' + 'imec0.ap#AP292' 'imec0.ap#AP293' 'imec0.ap#AP294' 'imec0.ap#AP295' + 'imec0.ap#AP296' 'imec0.ap#AP297' 'imec0.ap#AP298' 'imec0.ap#AP299' + 'imec0.ap#AP300' 'imec0.ap#AP301' 'imec0.ap#AP302' 'imec0.ap#AP303' + 'imec0.ap#AP304' 'imec0.ap#AP305' 'imec0.ap#AP306' 'imec0.ap#AP307' + 'imec0.ap#AP308' 'imec0.ap#AP309' 'imec0.ap#AP310' 'imec0.ap#AP311' + 'imec0.ap#AP312' 'imec0.ap#AP313' 'imec0.ap#AP314' 'imec0.ap#AP315' + 'imec0.ap#AP316' 'imec0.ap#AP317' 'imec0.ap#AP318' 'imec0.ap#AP319' + 'imec0.ap#AP320' 'imec0.ap#AP321' 'imec0.ap#AP322' 'imec0.ap#AP323' + 'imec0.ap#AP324' 'imec0.ap#AP325' 'imec0.ap#AP326' 'imec0.ap#AP327' + 'imec0.ap#AP328' 'imec0.ap#AP329' 'imec0.ap#AP330' 'imec0.ap#AP331' + 'imec0.ap#AP332' 'imec0.ap#AP333' 'imec0.ap#AP334' 'imec0.ap#AP335' + 'imec0.ap#AP336' 'imec0.ap#AP337' 'imec0.ap#AP338' 'imec0.ap#AP339' + 'imec0.ap#AP340' 'imec0.ap#AP341' 'imec0.ap#AP342' 'imec0.ap#AP343' + 'imec0.ap#AP344' 'imec0.ap#AP345' 'imec0.ap#AP346' 'imec0.ap#AP347' + 'imec0.ap#AP348' 'imec0.ap#AP349' 'imec0.ap#AP350' 'imec0.ap#AP351' + 'imec0.ap#AP352' 'imec0.ap#AP353' 'imec0.ap#AP354' 'imec0.ap#AP355' + 'imec0.ap#AP356' 'imec0.ap#AP357' 'imec0.ap#AP358' 'imec0.ap#AP359' + 'imec0.ap#AP360' 'imec0.ap#AP361' 'imec0.ap#AP362' 'imec0.ap#AP363' + 'imec0.ap#AP364' 'imec0.ap#AP365' 'imec0.ap#AP366' 'imec0.ap#AP367' + 'imec0.ap#AP368' 'imec0.ap#AP369' 'imec0.ap#AP370' 'imec0.ap#AP371' + 'imec0.ap#AP372' 'imec0.ap#AP373' 'imec0.ap#AP374' 'imec0.ap#AP375' + 'imec0.ap#AP376' 'imec0.ap#AP377' 'imec0.ap#AP378' 'imec0.ap#AP379' + 'imec0.ap#AP380' 'imec0.ap#AP381' 'imec0.ap#AP382' 'imec0.ap#AP383']
Annotations
  • is_filtered : True
  • probe_0_planar_contour : [[ -11 9989] + [ -11 -11] + [ 24 -186] + [ 59 -11] + [ 59 9989]]
  • probes_info : [{'manufacturer': 'IMEC', 'model_name': 'Neuropixels 1.0', 'serial_number': '18194814141'}]
Channel Properties
    gain_to_uV [2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375 + 2.34375 2.34375 2.34375 2.34375 2.34375 2.34375]
    offset_to_uV [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
    channel_names ['AP0' 'AP1' 'AP2' 'AP3' 'AP4' 'AP5' 'AP6' 'AP7' 'AP8' 'AP9' 'AP10' 'AP11' + 'AP12' 'AP13' 'AP14' 'AP15' 'AP16' 'AP17' 'AP18' 'AP19' 'AP20' 'AP21' + 'AP22' 'AP23' 'AP24' 'AP25' 'AP26' 'AP27' 'AP28' 'AP29' 'AP30' 'AP31' + 'AP32' 'AP33' 'AP34' 'AP35' 'AP36' 'AP37' 'AP38' 'AP39' 'AP40' 'AP41' + 'AP42' 'AP43' 'AP44' 'AP45' 'AP46' 'AP47' 'AP48' 'AP49' 'AP50' 'AP51' + 'AP52' 'AP53' 'AP54' 'AP55' 'AP56' 'AP57' 'AP58' 'AP59' 'AP60' 'AP61' + 'AP62' 'AP63' 'AP64' 'AP65' 'AP66' 'AP67' 'AP68' 'AP69' 'AP70' 'AP71' + 'AP72' 'AP73' 'AP74' 'AP75' 'AP76' 'AP77' 'AP78' 'AP79' 'AP80' 'AP81' + 'AP82' 'AP83' 'AP84' 'AP85' 'AP86' 'AP87' 'AP88' 'AP89' 'AP90' 'AP91' + 'AP92' 'AP93' 'AP94' 'AP95' 'AP96' 'AP97' 'AP98' 'AP99' 'AP100' 'AP101' + 'AP102' 'AP103' 'AP104' 'AP105' 'AP106' 'AP107' 'AP108' 'AP109' 'AP110' + 'AP111' 'AP112' 'AP113' 'AP114' 'AP115' 'AP116' 'AP117' 'AP118' 'AP119' + 'AP120' 'AP121' 'AP122' 'AP123' 'AP124' 'AP125' 'AP126' 'AP127' 'AP128' + 'AP129' 'AP130' 'AP131' 'AP132' 'AP133' 'AP134' 'AP135' 'AP136' 'AP137' + 'AP138' 'AP139' 'AP140' 'AP141' 'AP142' 'AP143' 'AP144' 'AP145' 'AP146' + 'AP147' 'AP148' 'AP149' 'AP150' 'AP151' 'AP152' 'AP153' 'AP154' 'AP155' + 'AP156' 'AP157' 'AP158' 'AP159' 'AP160' 'AP161' 'AP162' 'AP163' 'AP164' + 'AP165' 'AP166' 'AP167' 'AP168' 'AP169' 'AP170' 'AP171' 'AP172' 'AP173' + 'AP174' 'AP175' 'AP176' 'AP177' 'AP178' 'AP179' 'AP180' 'AP181' 'AP182' + 'AP183' 'AP184' 'AP185' 'AP186' 'AP187' 'AP188' 'AP189' 'AP190' 'AP191' + 'AP192' 'AP193' 'AP194' 'AP195' 'AP196' 'AP197' 'AP198' 'AP199' 'AP200' + 'AP201' 'AP202' 'AP203' 'AP204' 'AP205' 'AP206' 'AP207' 'AP208' 'AP209' + 'AP210' 'AP211' 'AP212' 'AP213' 'AP214' 'AP215' 'AP216' 'AP217' 'AP218' + 'AP219' 'AP220' 'AP221' 'AP222' 'AP223' 'AP224' 'AP225' 'AP226' 'AP227' + 'AP228' 'AP229' 'AP230' 'AP231' 'AP232' 'AP233' 'AP234' 'AP235' 'AP236' + 'AP237' 'AP238' 'AP239' 'AP240' 'AP241' 'AP242' 'AP243' 'AP244' 'AP245' + 'AP246' 'AP247' 'AP248' 'AP249' 'AP250' 'AP251' 'AP252' 'AP253' 'AP254' + 'AP255' 'AP256' 'AP257' 'AP258' 'AP259' 'AP260' 'AP261' 'AP262' 'AP263' + 'AP264' 'AP265' 'AP266' 'AP267' 'AP268' 'AP269' 'AP270' 'AP271' 'AP272' + 'AP273' 'AP274' 'AP275' 'AP276' 'AP277' 'AP278' 'AP279' 'AP280' 'AP281' + 'AP282' 'AP283' 'AP284' 'AP285' 'AP286' 'AP287' 'AP288' 'AP289' 'AP290' + 'AP291' 'AP292' 'AP293' 'AP294' 'AP295' 'AP296' 'AP297' 'AP298' 'AP299' + 'AP300' 'AP301' 'AP302' 'AP303' 'AP304' 'AP305' 'AP306' 'AP307' 'AP308' + 'AP309' 'AP310' 'AP311' 'AP312' 'AP313' 'AP314' 'AP315' 'AP316' 'AP317' + 'AP318' 'AP319' 'AP320' 'AP321' 'AP322' 'AP323' 'AP324' 'AP325' 'AP326' + 'AP327' 'AP328' 'AP329' 'AP330' 'AP331' 'AP332' 'AP333' 'AP334' 'AP335' + 'AP336' 'AP337' 'AP338' 'AP339' 'AP340' 'AP341' 'AP342' 'AP343' 'AP344' + 'AP345' 'AP346' 'AP347' 'AP348' 'AP349' 'AP350' 'AP351' 'AP352' 'AP353' + 'AP354' 'AP355' 'AP356' 'AP357' 'AP358' 'AP359' 'AP360' 'AP361' 'AP362' + 'AP363' 'AP364' 'AP365' 'AP366' 'AP367' 'AP368' 'AP369' 'AP370' 'AP371' + 'AP372' 'AP373' 'AP374' 'AP375' 'AP376' 'AP377' 'AP378' 'AP379' 'AP380' + 'AP381' 'AP382' 'AP383']
    contact_vector [(0, 16., 0., 'circle', 1., '', '', 0, 'um', 1., 0., 0., 1.) + (0, 48., 0., 'circle', 1., '', '', 1, 'um', 1., 0., 0., 1.) + (0, 0., 20., 'circle', 1., '', '', 2, 'um', 1., 0., 0., 1.) + (0, 32., 20., 'circle', 1., '', '', 3, 'um', 1., 0., 0., 1.) + (0, 16., 40., 'circle', 1., '', '', 4, 'um', 1., 0., 0., 1.) + (0, 48., 40., 'circle', 1., '', '', 5, 'um', 1., 0., 0., 1.) + (0, 0., 60., 'circle', 1., '', '', 6, 'um', 1., 0., 0., 1.) + (0, 32., 60., 'circle', 1., '', '', 7, 'um', 1., 0., 0., 1.) + (0, 16., 80., 'circle', 1., '', '', 8, 'um', 1., 0., 0., 1.) + (0, 48., 80., 'circle', 1., '', '', 9, 'um', 1., 0., 0., 1.) + (0, 0., 100., 'circle', 1., '', '', 10, 'um', 1., 0., 0., 1.) + (0, 32., 100., 'circle', 1., '', '', 11, 'um', 1., 0., 0., 1.) + (0, 16., 120., 'circle', 1., '', '', 12, 'um', 1., 0., 0., 1.) + (0, 48., 120., 'circle', 1., '', '', 13, 'um', 1., 0., 0., 1.) + (0, 0., 140., 'circle', 1., '', '', 14, 'um', 1., 0., 0., 1.) + (0, 32., 140., 'circle', 1., '', '', 15, 'um', 1., 0., 0., 1.) + (0, 16., 160., 'circle', 1., '', '', 16, 'um', 1., 0., 0., 1.) + (0, 48., 160., 'circle', 1., '', '', 17, 'um', 1., 0., 0., 1.) + (0, 0., 180., 'circle', 1., '', '', 18, 'um', 1., 0., 0., 1.) + (0, 32., 180., 'circle', 1., '', '', 19, 'um', 1., 0., 0., 1.) + (0, 16., 200., 'circle', 1., '', '', 20, 'um', 1., 0., 0., 1.) + (0, 48., 200., 'circle', 1., '', '', 21, 'um', 1., 0., 0., 1.) + (0, 0., 220., 'circle', 1., '', '', 22, 'um', 1., 0., 0., 1.) + (0, 32., 220., 'circle', 1., '', '', 23, 'um', 1., 0., 0., 1.) + (0, 16., 240., 'circle', 1., '', '', 24, 'um', 1., 0., 0., 1.) + (0, 48., 240., 'circle', 1., '', '', 25, 'um', 1., 0., 0., 1.) + (0, 0., 260., 'circle', 1., '', '', 26, 'um', 1., 0., 0., 1.) + (0, 32., 260., 'circle', 1., '', '', 27, 'um', 1., 0., 0., 1.) + (0, 16., 280., 'circle', 1., '', '', 28, 'um', 1., 0., 0., 1.) + (0, 48., 280., 'circle', 1., '', '', 29, 'um', 1., 0., 0., 1.) + (0, 0., 300., 'circle', 1., '', '', 30, 'um', 1., 0., 0., 1.) + (0, 32., 300., 'circle', 1., '', '', 31, 'um', 1., 0., 0., 1.) + (0, 16., 320., 'circle', 1., '', '', 32, 'um', 1., 0., 0., 1.) + (0, 48., 320., 'circle', 1., '', '', 33, 'um', 1., 0., 0., 1.) + (0, 0., 340., 'circle', 1., '', '', 34, 'um', 1., 0., 0., 1.) + (0, 32., 340., 'circle', 1., '', '', 35, 'um', 1., 0., 0., 1.) + (0, 16., 360., 'circle', 1., '', '', 36, 'um', 1., 0., 0., 1.) + (0, 48., 360., 'circle', 1., '', '', 37, 'um', 1., 0., 0., 1.) + (0, 0., 380., 'circle', 1., '', '', 38, 'um', 1., 0., 0., 1.) + (0, 32., 380., 'circle', 1., '', '', 39, 'um', 1., 0., 0., 1.) + (0, 16., 400., 'circle', 1., '', '', 40, 'um', 1., 0., 0., 1.) + (0, 48., 400., 'circle', 1., '', '', 41, 'um', 1., 0., 0., 1.) + (0, 0., 420., 'circle', 1., '', '', 42, 'um', 1., 0., 0., 1.) + (0, 32., 420., 'circle', 1., '', '', 43, 'um', 1., 0., 0., 1.) + (0, 16., 440., 'circle', 1., '', '', 44, 'um', 1., 0., 0., 1.) + (0, 48., 440., 'circle', 1., '', '', 45, 'um', 1., 0., 0., 1.) + (0, 0., 460., 'circle', 1., '', '', 46, 'um', 1., 0., 0., 1.) + (0, 32., 460., 'circle', 1., '', '', 47, 'um', 1., 0., 0., 1.) + (0, 16., 480., 'circle', 1., '', '', 48, 'um', 1., 0., 0., 1.) + (0, 48., 480., 'circle', 1., '', '', 49, 'um', 1., 0., 0., 1.) + (0, 0., 500., 'circle', 1., '', '', 50, 'um', 1., 0., 0., 1.) + (0, 32., 500., 'circle', 1., '', '', 51, 'um', 1., 0., 0., 1.) + (0, 16., 520., 'circle', 1., '', '', 52, 'um', 1., 0., 0., 1.) + (0, 48., 520., 'circle', 1., '', '', 53, 'um', 1., 0., 0., 1.) + (0, 0., 540., 'circle', 1., '', '', 54, 'um', 1., 0., 0., 1.) + (0, 32., 540., 'circle', 1., '', '', 55, 'um', 1., 0., 0., 1.) + (0, 16., 560., 'circle', 1., '', '', 56, 'um', 1., 0., 0., 1.) + (0, 48., 560., 'circle', 1., '', '', 57, 'um', 1., 0., 0., 1.) + (0, 0., 580., 'circle', 1., '', '', 58, 'um', 1., 0., 0., 1.) + (0, 32., 580., 'circle', 1., '', '', 59, 'um', 1., 0., 0., 1.) + (0, 16., 600., 'circle', 1., '', '', 60, 'um', 1., 0., 0., 1.) + (0, 48., 600., 'circle', 1., '', '', 61, 'um', 1., 0., 0., 1.) + (0, 0., 620., 'circle', 1., '', '', 62, 'um', 1., 0., 0., 1.) + (0, 32., 620., 'circle', 1., '', '', 63, 'um', 1., 0., 0., 1.) + (0, 16., 640., 'circle', 1., '', '', 64, 'um', 1., 0., 0., 1.) + (0, 48., 640., 'circle', 1., '', '', 65, 'um', 1., 0., 0., 1.) + (0, 0., 660., 'circle', 1., '', '', 66, 'um', 1., 0., 0., 1.) + (0, 32., 660., 'circle', 1., '', '', 67, 'um', 1., 0., 0., 1.) + (0, 16., 680., 'circle', 1., '', '', 68, 'um', 1., 0., 0., 1.) + (0, 48., 680., 'circle', 1., '', '', 69, 'um', 1., 0., 0., 1.) + (0, 0., 700., 'circle', 1., '', '', 70, 'um', 1., 0., 0., 1.) + (0, 32., 700., 'circle', 1., '', '', 71, 'um', 1., 0., 0., 1.) + (0, 16., 720., 'circle', 1., '', '', 72, 'um', 1., 0., 0., 1.) + (0, 48., 720., 'circle', 1., '', '', 73, 'um', 1., 0., 0., 1.) + (0, 0., 740., 'circle', 1., '', '', 74, 'um', 1., 0., 0., 1.) + (0, 32., 740., 'circle', 1., '', '', 75, 'um', 1., 0., 0., 1.) + (0, 16., 760., 'circle', 1., '', '', 76, 'um', 1., 0., 0., 1.) + (0, 48., 760., 'circle', 1., '', '', 77, 'um', 1., 0., 0., 1.) + (0, 0., 780., 'circle', 1., '', '', 78, 'um', 1., 0., 0., 1.) + (0, 32., 780., 'circle', 1., '', '', 79, 'um', 1., 0., 0., 1.) + (0, 16., 800., 'circle', 1., '', '', 80, 'um', 1., 0., 0., 1.) + (0, 48., 800., 'circle', 1., '', '', 81, 'um', 1., 0., 0., 1.) + (0, 0., 820., 'circle', 1., '', '', 82, 'um', 1., 0., 0., 1.) + (0, 32., 820., 'circle', 1., '', '', 83, 'um', 1., 0., 0., 1.) + (0, 16., 840., 'circle', 1., '', '', 84, 'um', 1., 0., 0., 1.) + (0, 48., 840., 'circle', 1., '', '', 85, 'um', 1., 0., 0., 1.) + (0, 0., 860., 'circle', 1., '', '', 86, 'um', 1., 0., 0., 1.) + (0, 32., 860., 'circle', 1., '', '', 87, 'um', 1., 0., 0., 1.) + (0, 16., 880., 'circle', 1., '', '', 88, 'um', 1., 0., 0., 1.) + (0, 48., 880., 'circle', 1., '', '', 89, 'um', 1., 0., 0., 1.) + (0, 0., 900., 'circle', 1., '', '', 90, 'um', 1., 0., 0., 1.) + (0, 32., 900., 'circle', 1., '', '', 91, 'um', 1., 0., 0., 1.) + (0, 16., 920., 'circle', 1., '', '', 92, 'um', 1., 0., 0., 1.) + (0, 48., 920., 'circle', 1., '', '', 93, 'um', 1., 0., 0., 1.) + (0, 0., 940., 'circle', 1., '', '', 94, 'um', 1., 0., 0., 1.) + (0, 32., 940., 'circle', 1., '', '', 95, 'um', 1., 0., 0., 1.) + (0, 16., 960., 'circle', 1., '', '', 96, 'um', 1., 0., 0., 1.) + (0, 48., 960., 'circle', 1., '', '', 97, 'um', 1., 0., 0., 1.) + (0, 0., 980., 'circle', 1., '', '', 98, 'um', 1., 0., 0., 1.) + (0, 32., 980., 'circle', 1., '', '', 99, 'um', 1., 0., 0., 1.) + (0, 16., 1000., 'circle', 1., '', '', 100, 'um', 1., 0., 0., 1.) + (0, 48., 1000., 'circle', 1., '', '', 101, 'um', 1., 0., 0., 1.) + (0, 0., 1020., 'circle', 1., '', '', 102, 'um', 1., 0., 0., 1.) + (0, 32., 1020., 'circle', 1., '', '', 103, 'um', 1., 0., 0., 1.) + (0, 16., 1040., 'circle', 1., '', '', 104, 'um', 1., 0., 0., 1.) + (0, 48., 1040., 'circle', 1., '', '', 105, 'um', 1., 0., 0., 1.) + (0, 0., 1060., 'circle', 1., '', '', 106, 'um', 1., 0., 0., 1.) + (0, 32., 1060., 'circle', 1., '', '', 107, 'um', 1., 0., 0., 1.) + (0, 16., 1080., 'circle', 1., '', '', 108, 'um', 1., 0., 0., 1.) + (0, 48., 1080., 'circle', 1., '', '', 109, 'um', 1., 0., 0., 1.) + (0, 0., 1100., 'circle', 1., '', '', 110, 'um', 1., 0., 0., 1.) + (0, 32., 1100., 'circle', 1., '', '', 111, 'um', 1., 0., 0., 1.) + (0, 16., 1120., 'circle', 1., '', '', 112, 'um', 1., 0., 0., 1.) + (0, 48., 1120., 'circle', 1., '', '', 113, 'um', 1., 0., 0., 1.) + (0, 0., 1140., 'circle', 1., '', '', 114, 'um', 1., 0., 0., 1.) + (0, 32., 1140., 'circle', 1., '', '', 115, 'um', 1., 0., 0., 1.) + (0, 16., 1160., 'circle', 1., '', '', 116, 'um', 1., 0., 0., 1.) + (0, 48., 1160., 'circle', 1., '', '', 117, 'um', 1., 0., 0., 1.) + (0, 0., 1180., 'circle', 1., '', '', 118, 'um', 1., 0., 0., 1.) + (0, 32., 1180., 'circle', 1., '', '', 119, 'um', 1., 0., 0., 1.) + (0, 16., 1200., 'circle', 1., '', '', 120, 'um', 1., 0., 0., 1.) + (0, 48., 1200., 'circle', 1., '', '', 121, 'um', 1., 0., 0., 1.) + (0, 0., 1220., 'circle', 1., '', '', 122, 'um', 1., 0., 0., 1.) + (0, 32., 1220., 'circle', 1., '', '', 123, 'um', 1., 0., 0., 1.) + (0, 16., 1240., 'circle', 1., '', '', 124, 'um', 1., 0., 0., 1.) + (0, 48., 1240., 'circle', 1., '', '', 125, 'um', 1., 0., 0., 1.) + (0, 0., 1260., 'circle', 1., '', '', 126, 'um', 1., 0., 0., 1.) + (0, 32., 1260., 'circle', 1., '', '', 127, 'um', 1., 0., 0., 1.) + (0, 16., 1280., 'circle', 1., '', '', 128, 'um', 1., 0., 0., 1.) + (0, 48., 1280., 'circle', 1., '', '', 129, 'um', 1., 0., 0., 1.) + (0, 0., 1300., 'circle', 1., '', '', 130, 'um', 1., 0., 0., 1.) + (0, 32., 1300., 'circle', 1., '', '', 131, 'um', 1., 0., 0., 1.) + (0, 16., 1320., 'circle', 1., '', '', 132, 'um', 1., 0., 0., 1.) + (0, 48., 1320., 'circle', 1., '', '', 133, 'um', 1., 0., 0., 1.) + (0, 0., 1340., 'circle', 1., '', '', 134, 'um', 1., 0., 0., 1.) + (0, 32., 1340., 'circle', 1., '', '', 135, 'um', 1., 0., 0., 1.) + (0, 16., 1360., 'circle', 1., '', '', 136, 'um', 1., 0., 0., 1.) + (0, 48., 1360., 'circle', 1., '', '', 137, 'um', 1., 0., 0., 1.) + (0, 0., 1380., 'circle', 1., '', '', 138, 'um', 1., 0., 0., 1.) + (0, 32., 1380., 'circle', 1., '', '', 139, 'um', 1., 0., 0., 1.) + (0, 16., 1400., 'circle', 1., '', '', 140, 'um', 1., 0., 0., 1.) + (0, 48., 1400., 'circle', 1., '', '', 141, 'um', 1., 0., 0., 1.) + (0, 0., 1420., 'circle', 1., '', '', 142, 'um', 1., 0., 0., 1.) + (0, 32., 1420., 'circle', 1., '', '', 143, 'um', 1., 0., 0., 1.) + (0, 16., 1440., 'circle', 1., '', '', 144, 'um', 1., 0., 0., 1.) + (0, 48., 1440., 'circle', 1., '', '', 145, 'um', 1., 0., 0., 1.) + (0, 0., 1460., 'circle', 1., '', '', 146, 'um', 1., 0., 0., 1.) + (0, 32., 1460., 'circle', 1., '', '', 147, 'um', 1., 0., 0., 1.) + (0, 16., 1480., 'circle', 1., '', '', 148, 'um', 1., 0., 0., 1.) + (0, 48., 1480., 'circle', 1., '', '', 149, 'um', 1., 0., 0., 1.) + (0, 0., 1500., 'circle', 1., '', '', 150, 'um', 1., 0., 0., 1.) + (0, 32., 1500., 'circle', 1., '', '', 151, 'um', 1., 0., 0., 1.) + (0, 16., 1520., 'circle', 1., '', '', 152, 'um', 1., 0., 0., 1.) + (0, 48., 1520., 'circle', 1., '', '', 153, 'um', 1., 0., 0., 1.) + (0, 0., 1540., 'circle', 1., '', '', 154, 'um', 1., 0., 0., 1.) + (0, 32., 1540., 'circle', 1., '', '', 155, 'um', 1., 0., 0., 1.) + (0, 16., 1560., 'circle', 1., '', '', 156, 'um', 1., 0., 0., 1.) + (0, 48., 1560., 'circle', 1., '', '', 157, 'um', 1., 0., 0., 1.) + (0, 0., 1580., 'circle', 1., '', '', 158, 'um', 1., 0., 0., 1.) + (0, 32., 1580., 'circle', 1., '', '', 159, 'um', 1., 0., 0., 1.) + (0, 16., 1600., 'circle', 1., '', '', 160, 'um', 1., 0., 0., 1.) + (0, 48., 1600., 'circle', 1., '', '', 161, 'um', 1., 0., 0., 1.) + (0, 0., 1620., 'circle', 1., '', '', 162, 'um', 1., 0., 0., 1.) + (0, 32., 1620., 'circle', 1., '', '', 163, 'um', 1., 0., 0., 1.) + (0, 16., 1640., 'circle', 1., '', '', 164, 'um', 1., 0., 0., 1.) + (0, 48., 1640., 'circle', 1., '', '', 165, 'um', 1., 0., 0., 1.) + (0, 0., 1660., 'circle', 1., '', '', 166, 'um', 1., 0., 0., 1.) + (0, 32., 1660., 'circle', 1., '', '', 167, 'um', 1., 0., 0., 1.) + (0, 16., 1680., 'circle', 1., '', '', 168, 'um', 1., 0., 0., 1.) + (0, 48., 1680., 'circle', 1., '', '', 169, 'um', 1., 0., 0., 1.) + (0, 0., 1700., 'circle', 1., '', '', 170, 'um', 1., 0., 0., 1.) + (0, 32., 1700., 'circle', 1., '', '', 171, 'um', 1., 0., 0., 1.) + (0, 16., 1720., 'circle', 1., '', '', 172, 'um', 1., 0., 0., 1.) + (0, 48., 1720., 'circle', 1., '', '', 173, 'um', 1., 0., 0., 1.) + (0, 0., 1740., 'circle', 1., '', '', 174, 'um', 1., 0., 0., 1.) + (0, 32., 1740., 'circle', 1., '', '', 175, 'um', 1., 0., 0., 1.) + (0, 16., 1760., 'circle', 1., '', '', 176, 'um', 1., 0., 0., 1.) + (0, 48., 1760., 'circle', 1., '', '', 177, 'um', 1., 0., 0., 1.) + (0, 0., 1780., 'circle', 1., '', '', 178, 'um', 1., 0., 0., 1.) + (0, 32., 1780., 'circle', 1., '', '', 179, 'um', 1., 0., 0., 1.) + (0, 16., 1800., 'circle', 1., '', '', 180, 'um', 1., 0., 0., 1.) + (0, 48., 1800., 'circle', 1., '', '', 181, 'um', 1., 0., 0., 1.) + (0, 0., 1820., 'circle', 1., '', '', 182, 'um', 1., 0., 0., 1.) + (0, 32., 1820., 'circle', 1., '', '', 183, 'um', 1., 0., 0., 1.) + (0, 16., 1840., 'circle', 1., '', '', 184, 'um', 1., 0., 0., 1.) + (0, 48., 1840., 'circle', 1., '', '', 185, 'um', 1., 0., 0., 1.) + (0, 0., 1860., 'circle', 1., '', '', 186, 'um', 1., 0., 0., 1.) + (0, 32., 1860., 'circle', 1., '', '', 187, 'um', 1., 0., 0., 1.) + (0, 16., 1880., 'circle', 1., '', '', 188, 'um', 1., 0., 0., 1.) + (0, 48., 1880., 'circle', 1., '', '', 189, 'um', 1., 0., 0., 1.) + (0, 0., 1900., 'circle', 1., '', '', 190, 'um', 1., 0., 0., 1.) + (0, 32., 1900., 'circle', 1., '', '', 191, 'um', 1., 0., 0., 1.) + (0, 16., 1920., 'circle', 1., '', '', 192, 'um', 1., 0., 0., 1.) + (0, 48., 1920., 'circle', 1., '', '', 193, 'um', 1., 0., 0., 1.) + (0, 0., 1940., 'circle', 1., '', '', 194, 'um', 1., 0., 0., 1.) + (0, 32., 1940., 'circle', 1., '', '', 195, 'um', 1., 0., 0., 1.) + (0, 16., 1960., 'circle', 1., '', '', 196, 'um', 1., 0., 0., 1.) + (0, 48., 1960., 'circle', 1., '', '', 197, 'um', 1., 0., 0., 1.) + (0, 0., 1980., 'circle', 1., '', '', 198, 'um', 1., 0., 0., 1.) + (0, 32., 1980., 'circle', 1., '', '', 199, 'um', 1., 0., 0., 1.) + (0, 16., 2000., 'circle', 1., '', '', 200, 'um', 1., 0., 0., 1.) + (0, 48., 2000., 'circle', 1., '', '', 201, 'um', 1., 0., 0., 1.) + (0, 0., 2020., 'circle', 1., '', '', 202, 'um', 1., 0., 0., 1.) + (0, 32., 2020., 'circle', 1., '', '', 203, 'um', 1., 0., 0., 1.) + (0, 16., 2040., 'circle', 1., '', '', 204, 'um', 1., 0., 0., 1.) + (0, 48., 2040., 'circle', 1., '', '', 205, 'um', 1., 0., 0., 1.) + (0, 0., 2060., 'circle', 1., '', '', 206, 'um', 1., 0., 0., 1.) + (0, 32., 2060., 'circle', 1., '', '', 207, 'um', 1., 0., 0., 1.) + (0, 16., 2080., 'circle', 1., '', '', 208, 'um', 1., 0., 0., 1.) + (0, 48., 2080., 'circle', 1., '', '', 209, 'um', 1., 0., 0., 1.) + (0, 0., 2100., 'circle', 1., '', '', 210, 'um', 1., 0., 0., 1.) + (0, 32., 2100., 'circle', 1., '', '', 211, 'um', 1., 0., 0., 1.) + (0, 16., 2120., 'circle', 1., '', '', 212, 'um', 1., 0., 0., 1.) + (0, 48., 2120., 'circle', 1., '', '', 213, 'um', 1., 0., 0., 1.) + (0, 0., 2140., 'circle', 1., '', '', 214, 'um', 1., 0., 0., 1.) + (0, 32., 2140., 'circle', 1., '', '', 215, 'um', 1., 0., 0., 1.) + (0, 16., 2160., 'circle', 1., '', '', 216, 'um', 1., 0., 0., 1.) + (0, 48., 2160., 'circle', 1., '', '', 217, 'um', 1., 0., 0., 1.) + (0, 0., 2180., 'circle', 1., '', '', 218, 'um', 1., 0., 0., 1.) + (0, 32., 2180., 'circle', 1., '', '', 219, 'um', 1., 0., 0., 1.) + (0, 16., 2200., 'circle', 1., '', '', 220, 'um', 1., 0., 0., 1.) + (0, 48., 2200., 'circle', 1., '', '', 221, 'um', 1., 0., 0., 1.) + (0, 0., 2220., 'circle', 1., '', '', 222, 'um', 1., 0., 0., 1.) + (0, 32., 2220., 'circle', 1., '', '', 223, 'um', 1., 0., 0., 1.) + (0, 16., 2240., 'circle', 1., '', '', 224, 'um', 1., 0., 0., 1.) + (0, 48., 2240., 'circle', 1., '', '', 225, 'um', 1., 0., 0., 1.) + (0, 0., 2260., 'circle', 1., '', '', 226, 'um', 1., 0., 0., 1.) + (0, 32., 2260., 'circle', 1., '', '', 227, 'um', 1., 0., 0., 1.) + (0, 16., 2280., 'circle', 1., '', '', 228, 'um', 1., 0., 0., 1.) + (0, 48., 2280., 'circle', 1., '', '', 229, 'um', 1., 0., 0., 1.) + (0, 0., 2300., 'circle', 1., '', '', 230, 'um', 1., 0., 0., 1.) + (0, 32., 2300., 'circle', 1., '', '', 231, 'um', 1., 0., 0., 1.) + (0, 16., 2320., 'circle', 1., '', '', 232, 'um', 1., 0., 0., 1.) + (0, 48., 2320., 'circle', 1., '', '', 233, 'um', 1., 0., 0., 1.) + (0, 0., 2340., 'circle', 1., '', '', 234, 'um', 1., 0., 0., 1.) + (0, 32., 2340., 'circle', 1., '', '', 235, 'um', 1., 0., 0., 1.) + (0, 16., 2360., 'circle', 1., '', '', 236, 'um', 1., 0., 0., 1.) + (0, 48., 2360., 'circle', 1., '', '', 237, 'um', 1., 0., 0., 1.) + (0, 0., 2380., 'circle', 1., '', '', 238, 'um', 1., 0., 0., 1.) + (0, 32., 2380., 'circle', 1., '', '', 239, 'um', 1., 0., 0., 1.) + (0, 16., 2400., 'circle', 1., '', '', 240, 'um', 1., 0., 0., 1.) + (0, 48., 2400., 'circle', 1., '', '', 241, 'um', 1., 0., 0., 1.) + (0, 0., 2420., 'circle', 1., '', '', 242, 'um', 1., 0., 0., 1.) + (0, 32., 2420., 'circle', 1., '', '', 243, 'um', 1., 0., 0., 1.) + (0, 16., 2440., 'circle', 1., '', '', 244, 'um', 1., 0., 0., 1.) + (0, 48., 2440., 'circle', 1., '', '', 245, 'um', 1., 0., 0., 1.) + (0, 0., 2460., 'circle', 1., '', '', 246, 'um', 1., 0., 0., 1.) + (0, 32., 2460., 'circle', 1., '', '', 247, 'um', 1., 0., 0., 1.) + (0, 16., 2480., 'circle', 1., '', '', 248, 'um', 1., 0., 0., 1.) + (0, 48., 2480., 'circle', 1., '', '', 249, 'um', 1., 0., 0., 1.) + (0, 0., 2500., 'circle', 1., '', '', 250, 'um', 1., 0., 0., 1.) + (0, 32., 2500., 'circle', 1., '', '', 251, 'um', 1., 0., 0., 1.) + (0, 16., 2520., 'circle', 1., '', '', 252, 'um', 1., 0., 0., 1.) + (0, 48., 2520., 'circle', 1., '', '', 253, 'um', 1., 0., 0., 1.) + (0, 0., 2540., 'circle', 1., '', '', 254, 'um', 1., 0., 0., 1.) + (0, 32., 2540., 'circle', 1., '', '', 255, 'um', 1., 0., 0., 1.) + (0, 16., 2560., 'circle', 1., '', '', 256, 'um', 1., 0., 0., 1.) + (0, 48., 2560., 'circle', 1., '', '', 257, 'um', 1., 0., 0., 1.) + (0, 0., 2580., 'circle', 1., '', '', 258, 'um', 1., 0., 0., 1.) + (0, 32., 2580., 'circle', 1., '', '', 259, 'um', 1., 0., 0., 1.) + (0, 16., 2600., 'circle', 1., '', '', 260, 'um', 1., 0., 0., 1.) + (0, 48., 2600., 'circle', 1., '', '', 261, 'um', 1., 0., 0., 1.) + (0, 0., 2620., 'circle', 1., '', '', 262, 'um', 1., 0., 0., 1.) + (0, 32., 2620., 'circle', 1., '', '', 263, 'um', 1., 0., 0., 1.) + (0, 16., 2640., 'circle', 1., '', '', 264, 'um', 1., 0., 0., 1.) + (0, 48., 2640., 'circle', 1., '', '', 265, 'um', 1., 0., 0., 1.) + (0, 0., 2660., 'circle', 1., '', '', 266, 'um', 1., 0., 0., 1.) + (0, 32., 2660., 'circle', 1., '', '', 267, 'um', 1., 0., 0., 1.) + (0, 16., 2680., 'circle', 1., '', '', 268, 'um', 1., 0., 0., 1.) + (0, 48., 2680., 'circle', 1., '', '', 269, 'um', 1., 0., 0., 1.) + (0, 0., 2700., 'circle', 1., '', '', 270, 'um', 1., 0., 0., 1.) + (0, 32., 2700., 'circle', 1., '', '', 271, 'um', 1., 0., 0., 1.) + (0, 16., 2720., 'circle', 1., '', '', 272, 'um', 1., 0., 0., 1.) + (0, 48., 2720., 'circle', 1., '', '', 273, 'um', 1., 0., 0., 1.) + (0, 0., 2740., 'circle', 1., '', '', 274, 'um', 1., 0., 0., 1.) + (0, 32., 2740., 'circle', 1., '', '', 275, 'um', 1., 0., 0., 1.) + (0, 16., 2760., 'circle', 1., '', '', 276, 'um', 1., 0., 0., 1.) + (0, 48., 2760., 'circle', 1., '', '', 277, 'um', 1., 0., 0., 1.) + (0, 0., 2780., 'circle', 1., '', '', 278, 'um', 1., 0., 0., 1.) + (0, 32., 2780., 'circle', 1., '', '', 279, 'um', 1., 0., 0., 1.) + (0, 16., 2800., 'circle', 1., '', '', 280, 'um', 1., 0., 0., 1.) + (0, 48., 2800., 'circle', 1., '', '', 281, 'um', 1., 0., 0., 1.) + (0, 0., 2820., 'circle', 1., '', '', 282, 'um', 1., 0., 0., 1.) + (0, 32., 2820., 'circle', 1., '', '', 283, 'um', 1., 0., 0., 1.) + (0, 16., 2840., 'circle', 1., '', '', 284, 'um', 1., 0., 0., 1.) + (0, 48., 2840., 'circle', 1., '', '', 285, 'um', 1., 0., 0., 1.) + (0, 0., 2860., 'circle', 1., '', '', 286, 'um', 1., 0., 0., 1.) + (0, 32., 2860., 'circle', 1., '', '', 287, 'um', 1., 0., 0., 1.) + (0, 16., 2880., 'circle', 1., '', '', 288, 'um', 1., 0., 0., 1.) + (0, 48., 2880., 'circle', 1., '', '', 289, 'um', 1., 0., 0., 1.) + (0, 0., 2900., 'circle', 1., '', '', 290, 'um', 1., 0., 0., 1.) + (0, 32., 2900., 'circle', 1., '', '', 291, 'um', 1., 0., 0., 1.) + (0, 16., 2920., 'circle', 1., '', '', 292, 'um', 1., 0., 0., 1.) + (0, 48., 2920., 'circle', 1., '', '', 293, 'um', 1., 0., 0., 1.) + (0, 0., 2940., 'circle', 1., '', '', 294, 'um', 1., 0., 0., 1.) + (0, 32., 2940., 'circle', 1., '', '', 295, 'um', 1., 0., 0., 1.) + (0, 16., 2960., 'circle', 1., '', '', 296, 'um', 1., 0., 0., 1.) + (0, 48., 2960., 'circle', 1., '', '', 297, 'um', 1., 0., 0., 1.) + (0, 0., 2980., 'circle', 1., '', '', 298, 'um', 1., 0., 0., 1.) + (0, 32., 2980., 'circle', 1., '', '', 299, 'um', 1., 0., 0., 1.) + (0, 16., 3000., 'circle', 1., '', '', 300, 'um', 1., 0., 0., 1.) + (0, 48., 3000., 'circle', 1., '', '', 301, 'um', 1., 0., 0., 1.) + (0, 0., 3020., 'circle', 1., '', '', 302, 'um', 1., 0., 0., 1.) + (0, 32., 3020., 'circle', 1., '', '', 303, 'um', 1., 0., 0., 1.) + (0, 16., 3040., 'circle', 1., '', '', 304, 'um', 1., 0., 0., 1.) + (0, 48., 3040., 'circle', 1., '', '', 305, 'um', 1., 0., 0., 1.) + (0, 0., 3060., 'circle', 1., '', '', 306, 'um', 1., 0., 0., 1.) + (0, 32., 3060., 'circle', 1., '', '', 307, 'um', 1., 0., 0., 1.) + (0, 16., 3080., 'circle', 1., '', '', 308, 'um', 1., 0., 0., 1.) + (0, 48., 3080., 'circle', 1., '', '', 309, 'um', 1., 0., 0., 1.) + (0, 0., 3100., 'circle', 1., '', '', 310, 'um', 1., 0., 0., 1.) + (0, 32., 3100., 'circle', 1., '', '', 311, 'um', 1., 0., 0., 1.) + (0, 16., 3120., 'circle', 1., '', '', 312, 'um', 1., 0., 0., 1.) + (0, 48., 3120., 'circle', 1., '', '', 313, 'um', 1., 0., 0., 1.) + (0, 0., 3140., 'circle', 1., '', '', 314, 'um', 1., 0., 0., 1.) + (0, 32., 3140., 'circle', 1., '', '', 315, 'um', 1., 0., 0., 1.) + (0, 16., 3160., 'circle', 1., '', '', 316, 'um', 1., 0., 0., 1.) + (0, 48., 3160., 'circle', 1., '', '', 317, 'um', 1., 0., 0., 1.) + (0, 0., 3180., 'circle', 1., '', '', 318, 'um', 1., 0., 0., 1.) + (0, 32., 3180., 'circle', 1., '', '', 319, 'um', 1., 0., 0., 1.) + (0, 16., 3200., 'circle', 1., '', '', 320, 'um', 1., 0., 0., 1.) + (0, 48., 3200., 'circle', 1., '', '', 321, 'um', 1., 0., 0., 1.) + (0, 0., 3220., 'circle', 1., '', '', 322, 'um', 1., 0., 0., 1.) + (0, 32., 3220., 'circle', 1., '', '', 323, 'um', 1., 0., 0., 1.) + (0, 16., 3240., 'circle', 1., '', '', 324, 'um', 1., 0., 0., 1.) + (0, 48., 3240., 'circle', 1., '', '', 325, 'um', 1., 0., 0., 1.) + (0, 0., 3260., 'circle', 1., '', '', 326, 'um', 1., 0., 0., 1.) + (0, 32., 3260., 'circle', 1., '', '', 327, 'um', 1., 0., 0., 1.) + (0, 16., 3280., 'circle', 1., '', '', 328, 'um', 1., 0., 0., 1.) + (0, 48., 3280., 'circle', 1., '', '', 329, 'um', 1., 0., 0., 1.) + (0, 0., 3300., 'circle', 1., '', '', 330, 'um', 1., 0., 0., 1.) + (0, 32., 3300., 'circle', 1., '', '', 331, 'um', 1., 0., 0., 1.) + (0, 16., 3320., 'circle', 1., '', '', 332, 'um', 1., 0., 0., 1.) + (0, 48., 3320., 'circle', 1., '', '', 333, 'um', 1., 0., 0., 1.) + (0, 0., 3340., 'circle', 1., '', '', 334, 'um', 1., 0., 0., 1.) + (0, 32., 3340., 'circle', 1., '', '', 335, 'um', 1., 0., 0., 1.) + (0, 16., 3360., 'circle', 1., '', '', 336, 'um', 1., 0., 0., 1.) + (0, 48., 3360., 'circle', 1., '', '', 337, 'um', 1., 0., 0., 1.) + (0, 0., 3380., 'circle', 1., '', '', 338, 'um', 1., 0., 0., 1.) + (0, 32., 3380., 'circle', 1., '', '', 339, 'um', 1., 0., 0., 1.) + (0, 16., 3400., 'circle', 1., '', '', 340, 'um', 1., 0., 0., 1.) + (0, 48., 3400., 'circle', 1., '', '', 341, 'um', 1., 0., 0., 1.) + (0, 0., 3420., 'circle', 1., '', '', 342, 'um', 1., 0., 0., 1.) + (0, 32., 3420., 'circle', 1., '', '', 343, 'um', 1., 0., 0., 1.) + (0, 16., 3440., 'circle', 1., '', '', 344, 'um', 1., 0., 0., 1.) + (0, 48., 3440., 'circle', 1., '', '', 345, 'um', 1., 0., 0., 1.) + (0, 0., 3460., 'circle', 1., '', '', 346, 'um', 1., 0., 0., 1.) + (0, 32., 3460., 'circle', 1., '', '', 347, 'um', 1., 0., 0., 1.) + (0, 16., 3480., 'circle', 1., '', '', 348, 'um', 1., 0., 0., 1.) + (0, 48., 3480., 'circle', 1., '', '', 349, 'um', 1., 0., 0., 1.) + (0, 0., 3500., 'circle', 1., '', '', 350, 'um', 1., 0., 0., 1.) + (0, 32., 3500., 'circle', 1., '', '', 351, 'um', 1., 0., 0., 1.) + (0, 16., 3520., 'circle', 1., '', '', 352, 'um', 1., 0., 0., 1.) + (0, 48., 3520., 'circle', 1., '', '', 353, 'um', 1., 0., 0., 1.) + (0, 0., 3540., 'circle', 1., '', '', 354, 'um', 1., 0., 0., 1.) + (0, 32., 3540., 'circle', 1., '', '', 355, 'um', 1., 0., 0., 1.) + (0, 16., 3560., 'circle', 1., '', '', 356, 'um', 1., 0., 0., 1.) + (0, 48., 3560., 'circle', 1., '', '', 357, 'um', 1., 0., 0., 1.) + (0, 0., 3580., 'circle', 1., '', '', 358, 'um', 1., 0., 0., 1.) + (0, 32., 3580., 'circle', 1., '', '', 359, 'um', 1., 0., 0., 1.) + (0, 16., 3600., 'circle', 1., '', '', 360, 'um', 1., 0., 0., 1.) + (0, 48., 3600., 'circle', 1., '', '', 361, 'um', 1., 0., 0., 1.) + (0, 0., 3620., 'circle', 1., '', '', 362, 'um', 1., 0., 0., 1.) + (0, 32., 3620., 'circle', 1., '', '', 363, 'um', 1., 0., 0., 1.) + (0, 16., 3640., 'circle', 1., '', '', 364, 'um', 1., 0., 0., 1.) + (0, 48., 3640., 'circle', 1., '', '', 365, 'um', 1., 0., 0., 1.) + (0, 0., 3660., 'circle', 1., '', '', 366, 'um', 1., 0., 0., 1.) + (0, 32., 3660., 'circle', 1., '', '', 367, 'um', 1., 0., 0., 1.) + (0, 16., 3680., 'circle', 1., '', '', 368, 'um', 1., 0., 0., 1.) + (0, 48., 3680., 'circle', 1., '', '', 369, 'um', 1., 0., 0., 1.) + (0, 0., 3700., 'circle', 1., '', '', 370, 'um', 1., 0., 0., 1.) + (0, 32., 3700., 'circle', 1., '', '', 371, 'um', 1., 0., 0., 1.) + (0, 16., 3720., 'circle', 1., '', '', 372, 'um', 1., 0., 0., 1.) + (0, 48., 3720., 'circle', 1., '', '', 373, 'um', 1., 0., 0., 1.) + (0, 0., 3740., 'circle', 1., '', '', 374, 'um', 1., 0., 0., 1.) + (0, 32., 3740., 'circle', 1., '', '', 375, 'um', 1., 0., 0., 1.) + (0, 16., 3760., 'circle', 1., '', '', 376, 'um', 1., 0., 0., 1.) + (0, 48., 3760., 'circle', 1., '', '', 377, 'um', 1., 0., 0., 1.) + (0, 0., 3780., 'circle', 1., '', '', 378, 'um', 1., 0., 0., 1.) + (0, 32., 3780., 'circle', 1., '', '', 379, 'um', 1., 0., 0., 1.) + (0, 16., 3800., 'circle', 1., '', '', 380, 'um', 1., 0., 0., 1.) + (0, 48., 3800., 'circle', 1., '', '', 381, 'um', 1., 0., 0., 1.) + (0, 0., 3820., 'circle', 1., '', '', 382, 'um', 1., 0., 0., 1.) + (0, 32., 3820., 'circle', 1., '', '', 383, 'um', 1., 0., 0., 1.)]
    location [[ 16. 0.] + [ 48. 0.] + [ 0. 20.] + [ 32. 20.] + [ 16. 40.] + [ 48. 40.] + [ 0. 60.] + [ 32. 60.] + [ 16. 80.] + [ 48. 80.] + [ 0. 100.] + [ 32. 100.] + [ 16. 120.] + [ 48. 120.] + [ 0. 140.] + [ 32. 140.] + [ 16. 160.] + [ 48. 160.] + [ 0. 180.] + [ 32. 180.] + [ 16. 200.] + [ 48. 200.] + [ 0. 220.] + [ 32. 220.] + [ 16. 240.] + [ 48. 240.] + [ 0. 260.] + [ 32. 260.] + [ 16. 280.] + [ 48. 280.] + [ 0. 300.] + [ 32. 300.] + [ 16. 320.] + [ 48. 320.] + [ 0. 340.] + [ 32. 340.] + [ 16. 360.] + [ 48. 360.] + [ 0. 380.] + [ 32. 380.] + [ 16. 400.] + [ 48. 400.] + [ 0. 420.] + [ 32. 420.] + [ 16. 440.] + [ 48. 440.] + [ 0. 460.] + [ 32. 460.] + [ 16. 480.] + [ 48. 480.] + [ 0. 500.] + [ 32. 500.] + [ 16. 520.] + [ 48. 520.] + [ 0. 540.] + [ 32. 540.] + [ 16. 560.] + [ 48. 560.] + [ 0. 580.] + [ 32. 580.] + [ 16. 600.] + [ 48. 600.] + [ 0. 620.] + [ 32. 620.] + [ 16. 640.] + [ 48. 640.] + [ 0. 660.] + [ 32. 660.] + [ 16. 680.] + [ 48. 680.] + [ 0. 700.] + [ 32. 700.] + [ 16. 720.] + [ 48. 720.] + [ 0. 740.] + [ 32. 740.] + [ 16. 760.] + [ 48. 760.] + [ 0. 780.] + [ 32. 780.] + [ 16. 800.] + [ 48. 800.] + [ 0. 820.] + [ 32. 820.] + [ 16. 840.] + [ 48. 840.] + [ 0. 860.] + [ 32. 860.] + [ 16. 880.] + [ 48. 880.] + [ 0. 900.] + [ 32. 900.] + [ 16. 920.] + [ 48. 920.] + [ 0. 940.] + [ 32. 940.] + [ 16. 960.] + [ 48. 960.] + [ 0. 980.] + [ 32. 980.] + [ 16. 1000.] + [ 48. 1000.] + [ 0. 1020.] + [ 32. 1020.] + [ 16. 1040.] + [ 48. 1040.] + [ 0. 1060.] + [ 32. 1060.] + [ 16. 1080.] + [ 48. 1080.] + [ 0. 1100.] + [ 32. 1100.] + [ 16. 1120.] + [ 48. 1120.] + [ 0. 1140.] + [ 32. 1140.] + [ 16. 1160.] + [ 48. 1160.] + [ 0. 1180.] + [ 32. 1180.] + [ 16. 1200.] + [ 48. 1200.] + [ 0. 1220.] + [ 32. 1220.] + [ 16. 1240.] + [ 48. 1240.] + [ 0. 1260.] + [ 32. 1260.] + [ 16. 1280.] + [ 48. 1280.] + [ 0. 1300.] + [ 32. 1300.] + [ 16. 1320.] + [ 48. 1320.] + [ 0. 1340.] + [ 32. 1340.] + [ 16. 1360.] + [ 48. 1360.] + [ 0. 1380.] + [ 32. 1380.] + [ 16. 1400.] + [ 48. 1400.] + [ 0. 1420.] + [ 32. 1420.] + [ 16. 1440.] + [ 48. 1440.] + [ 0. 1460.] + [ 32. 1460.] + [ 16. 1480.] + [ 48. 1480.] + [ 0. 1500.] + [ 32. 1500.] + [ 16. 1520.] + [ 48. 1520.] + [ 0. 1540.] + [ 32. 1540.] + [ 16. 1560.] + [ 48. 1560.] + [ 0. 1580.] + [ 32. 1580.] + [ 16. 1600.] + [ 48. 1600.] + [ 0. 1620.] + [ 32. 1620.] + [ 16. 1640.] + [ 48. 1640.] + [ 0. 1660.] + [ 32. 1660.] + [ 16. 1680.] + [ 48. 1680.] + [ 0. 1700.] + [ 32. 1700.] + [ 16. 1720.] + [ 48. 1720.] + [ 0. 1740.] + [ 32. 1740.] + [ 16. 1760.] + [ 48. 1760.] + [ 0. 1780.] + [ 32. 1780.] + [ 16. 1800.] + [ 48. 1800.] + [ 0. 1820.] + [ 32. 1820.] + [ 16. 1840.] + [ 48. 1840.] + [ 0. 1860.] + [ 32. 1860.] + [ 16. 1880.] + [ 48. 1880.] + [ 0. 1900.] + [ 32. 1900.] + [ 16. 1920.] + [ 48. 1920.] + [ 0. 1940.] + [ 32. 1940.] + [ 16. 1960.] + [ 48. 1960.] + [ 0. 1980.] + [ 32. 1980.] + [ 16. 2000.] + [ 48. 2000.] + [ 0. 2020.] + [ 32. 2020.] + [ 16. 2040.] + [ 48. 2040.] + [ 0. 2060.] + [ 32. 2060.] + [ 16. 2080.] + [ 48. 2080.] + [ 0. 2100.] + [ 32. 2100.] + [ 16. 2120.] + [ 48. 2120.] + [ 0. 2140.] + [ 32. 2140.] + [ 16. 2160.] + [ 48. 2160.] + [ 0. 2180.] + [ 32. 2180.] + [ 16. 2200.] + [ 48. 2200.] + [ 0. 2220.] + [ 32. 2220.] + [ 16. 2240.] + [ 48. 2240.] + [ 0. 2260.] + [ 32. 2260.] + [ 16. 2280.] + [ 48. 2280.] + [ 0. 2300.] + [ 32. 2300.] + [ 16. 2320.] + [ 48. 2320.] + [ 0. 2340.] + [ 32. 2340.] + [ 16. 2360.] + [ 48. 2360.] + [ 0. 2380.] + [ 32. 2380.] + [ 16. 2400.] + [ 48. 2400.] + [ 0. 2420.] + [ 32. 2420.] + [ 16. 2440.] + [ 48. 2440.] + [ 0. 2460.] + [ 32. 2460.] + [ 16. 2480.] + [ 48. 2480.] + [ 0. 2500.] + [ 32. 2500.] + [ 16. 2520.] + [ 48. 2520.] + [ 0. 2540.] + [ 32. 2540.] + [ 16. 2560.] + [ 48. 2560.] + [ 0. 2580.] + [ 32. 2580.] + [ 16. 2600.] + [ 48. 2600.] + [ 0. 2620.] + [ 32. 2620.] + [ 16. 2640.] + [ 48. 2640.] + [ 0. 2660.] + [ 32. 2660.] + [ 16. 2680.] + [ 48. 2680.] + [ 0. 2700.] + [ 32. 2700.] + [ 16. 2720.] + [ 48. 2720.] + [ 0. 2740.] + [ 32. 2740.] + [ 16. 2760.] + [ 48. 2760.] + [ 0. 2780.] + [ 32. 2780.] + [ 16. 2800.] + [ 48. 2800.] + [ 0. 2820.] + [ 32. 2820.] + [ 16. 2840.] + [ 48. 2840.] + [ 0. 2860.] + [ 32. 2860.] + [ 16. 2880.] + [ 48. 2880.] + [ 0. 2900.] + [ 32. 2900.] + [ 16. 2920.] + [ 48. 2920.] + [ 0. 2940.] + [ 32. 2940.] + [ 16. 2960.] + [ 48. 2960.] + [ 0. 2980.] + [ 32. 2980.] + [ 16. 3000.] + [ 48. 3000.] + [ 0. 3020.] + [ 32. 3020.] + [ 16. 3040.] + [ 48. 3040.] + [ 0. 3060.] + [ 32. 3060.] + [ 16. 3080.] + [ 48. 3080.] + [ 0. 3100.] + [ 32. 3100.] + [ 16. 3120.] + [ 48. 3120.] + [ 0. 3140.] + [ 32. 3140.] + [ 16. 3160.] + [ 48. 3160.] + [ 0. 3180.] + [ 32. 3180.] + [ 16. 3200.] + [ 48. 3200.] + [ 0. 3220.] + [ 32. 3220.] + [ 16. 3240.] + [ 48. 3240.] + [ 0. 3260.] + [ 32. 3260.] + [ 16. 3280.] + [ 48. 3280.] + [ 0. 3300.] + [ 32. 3300.] + [ 16. 3320.] + [ 48. 3320.] + [ 0. 3340.] + [ 32. 3340.] + [ 16. 3360.] + [ 48. 3360.] + [ 0. 3380.] + [ 32. 3380.] + [ 16. 3400.] + [ 48. 3400.] + [ 0. 3420.] + [ 32. 3420.] + [ 16. 3440.] + [ 48. 3440.] + [ 0. 3460.] + [ 32. 3460.] + [ 16. 3480.] + [ 48. 3480.] + [ 0. 3500.] + [ 32. 3500.] + [ 16. 3520.] + [ 48. 3520.] + [ 0. 3540.] + [ 32. 3540.] + [ 16. 3560.] + [ 48. 3560.] + [ 0. 3580.] + [ 32. 3580.] + [ 16. 3600.] + [ 48. 3600.] + [ 0. 3620.] + [ 32. 3620.] + [ 16. 3640.] + [ 48. 3640.] + [ 0. 3660.] + [ 32. 3660.] + [ 16. 3680.] + [ 48. 3680.] + [ 0. 3700.] + [ 32. 3700.] + [ 16. 3720.] + [ 48. 3720.] + [ 0. 3740.] + [ 32. 3740.] + [ 16. 3760.] + [ 48. 3760.] + [ 0. 3780.] + [ 32. 3780.] + [ 16. 3800.] + [ 48. 3800.] + [ 0. 3820.] + [ 32. 3820.]]
    group [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
    inter_sample_shift [0. 0. 0.07692308 0.07692308 0.15384615 0.15384615 + 0.23076923 0.23076923 0.30769231 0.30769231 0.38461538 0.38461538 + 0.46153846 0.46153846 0.53846154 0.53846154 0.61538462 0.61538462 + 0.69230769 0.69230769 0.76923077 0.76923077 0.84615385 0.84615385 + 0. 0. 0.07692308 0.07692308 0.15384615 0.15384615 + 0.23076923 0.23076923 0.30769231 0.30769231 0.38461538 0.38461538 + 0.46153846 0.46153846 0.53846154 0.53846154 0.61538462 0.61538462 + 0.69230769 0.69230769 0.76923077 0.76923077 0.84615385 0.84615385 + 0. 0. 0.07692308 0.07692308 0.15384615 0.15384615 + 0.23076923 0.23076923 0.30769231 0.30769231 0.38461538 0.38461538 + 0.46153846 0.46153846 0.53846154 0.53846154 0.61538462 0.61538462 + 0.69230769 0.69230769 0.76923077 0.76923077 0.84615385 0.84615385 + 0. 0. 0.07692308 0.07692308 0.15384615 0.15384615 + 0.23076923 0.23076923 0.30769231 0.30769231 0.38461538 0.38461538 + 0.46153846 0.46153846 0.53846154 0.53846154 0.61538462 0.61538462 + 0.69230769 0.69230769 0.76923077 0.76923077 0.84615385 0.84615385 + 0. 0. 0.07692308 0.07692308 0.15384615 0.15384615 + 0.23076923 0.23076923 0.30769231 0.30769231 0.38461538 0.38461538 + 0.46153846 0.46153846 0.53846154 0.53846154 0.61538462 0.61538462 + 0.69230769 0.69230769 0.76923077 0.76923077 0.84615385 0.84615385 + 0. 0. 0.07692308 0.07692308 0.15384615 0.15384615 + 0.23076923 0.23076923 0.30769231 0.30769231 0.38461538 0.38461538 + 0.46153846 0.46153846 0.53846154 0.53846154 0.61538462 0.61538462 + 0.69230769 0.69230769 0.76923077 0.76923077 0.84615385 0.84615385 + 0. 0. 0.07692308 0.07692308 0.15384615 0.15384615 + 0.23076923 0.23076923 0.30769231 0.30769231 0.38461538 0.38461538 + 0.46153846 0.46153846 0.53846154 0.53846154 0.61538462 0.61538462 + 0.69230769 0.69230769 0.76923077 0.76923077 0.84615385 0.84615385 + 0. 0. 0.07692308 0.07692308 0.15384615 0.15384615 + 0.23076923 0.23076923 0.30769231 0.30769231 0.38461538 0.38461538 + 0.46153846 0.46153846 0.53846154 0.53846154 0.61538462 0.61538462 + 0.69230769 0.69230769 0.76923077 0.76923077 0.84615385 0.84615385 + 0. 0. 0.07692308 0.07692308 0.15384615 0.15384615 + 0.23076923 0.23076923 0.30769231 0.30769231 0.38461538 0.38461538 + 0.46153846 0.46153846 0.53846154 0.53846154 0.61538462 0.61538462 + 0.69230769 0.69230769 0.76923077 0.76923077 0.84615385 0.84615385 + 0. 0. 0.07692308 0.07692308 0.15384615 0.15384615 + 0.23076923 0.23076923 0.30769231 0.30769231 0.38461538 0.38461538 + 0.46153846 0.46153846 0.53846154 0.53846154 0.61538462 0.61538462 + 0.69230769 0.69230769 0.76923077 0.76923077 0.84615385 0.84615385 + 0. 0. 0.07692308 0.07692308 0.15384615 0.15384615 + 0.23076923 0.23076923 0.30769231 0.30769231 0.38461538 0.38461538 + 0.46153846 0.46153846 0.53846154 0.53846154 0.61538462 0.61538462 + 0.69230769 0.69230769 0.76923077 0.76923077 0.84615385 0.84615385 + 0. 0. 0.07692308 0.07692308 0.15384615 0.15384615 + 0.23076923 0.23076923 0.30769231 0.30769231 0.38461538 0.38461538 + 0.46153846 0.46153846 0.53846154 0.53846154 0.61538462 0.61538462 + 0.69230769 0.69230769 0.76923077 0.76923077 0.84615385 0.84615385 + 0. 0. 0.07692308 0.07692308 0.15384615 0.15384615 + 0.23076923 0.23076923 0.30769231 0.30769231 0.38461538 0.38461538 + 0.46153846 0.46153846 0.53846154 0.53846154 0.61538462 0.61538462 + 0.69230769 0.69230769 0.76923077 0.76923077 0.84615385 0.84615385 + 0. 0. 0.07692308 0.07692308 0.15384615 0.15384615 + 0.23076923 0.23076923 0.30769231 0.30769231 0.38461538 0.38461538 + 0.46153846 0.46153846 0.53846154 0.53846154 0.61538462 0.61538462 + 0.69230769 0.69230769 0.76923077 0.76923077 0.84615385 0.84615385 + 0. 0. 0.07692308 0.07692308 0.15384615 0.15384615 + 0.23076923 0.23076923 0.30769231 0.30769231 0.38461538 0.38461538 + 0.46153846 0.46153846 0.53846154 0.53846154 0.61538462 0.61538462 + 0.69230769 0.69230769 0.76923077 0.76923077 0.84615385 0.84615385 + 0. 0. 0.07692308 0.07692308 0.15384615 0.15384615 + 0.23076923 0.23076923 0.30769231 0.30769231 0.38461538 0.38461538 + 0.46153846 0.46153846 0.53846154 0.53846154 0.61538462 0.61538462 + 0.69230769 0.69230769 0.76923077 0.76923077 0.84615385 0.84615385]
+ + + +We can use the ``SortingAnalyzer`` to estimate spike locations and plot +them: + +.. code:: ipython3 + + # construct analyzers and compute spike locations + analyzer_hybrid_ignore_drift = si.create_sorting_analyzer(sorting_hybrid, recording_hybrid_ignore_drift) + analyzer_hybrid_ignore_drift.compute(["random_spikes", "templates"]) + analyzer_hybrid_ignore_drift.compute("spike_locations", method="grid_convolution") + + analyzer_hybrid_with_drift = si.create_sorting_analyzer(sorting_hybrid, recording_hybrid_with_drift) + analyzer_hybrid_with_drift.compute(["random_spikes", "templates"]) + analyzer_hybrid_with_drift.compute("spike_locations", method="grid_convolution") + + + +.. parsed-literal:: + + estimate_sparsity: 0%| | 0/1958 [00:00`_. -The following quantities are required: +In this method the number of spikes whose refractory period are violated, denoted :math:`n_v`, is used. -- :math:`ISI_t` : biological threshold for ISI violation. -- :math:`ISI_{min}`: minimum ISI threshold enforced by the data recording system used. -- :math:`ISI_s` : the array of ISI violations which are observed in the unit's spike train. -- :math:`\#`: denotes count. +Here, the refactory period :math:`t_r` is adjusted to take account of the data recording system's minimum possible refactory +period. E.g. if a system has a sampling rate of :math:`f \text{ Hz}`, the closest that two spikes from the same unit can possibly +be is :math:`1/f \, \text{s}`. Hence the refactory period :math:`t_r` is the expected biological threshold minus this minimum possible +threshold. -The threshold for ISI violations is the biological ISI threshold, :math:`ISI_t`, minus the minimum ISI threshold, :math:`ISI_{min}` enforced by the data recording system used. -The array of inter-spike-intervals observed in the unit's spike train, :math:`ISI_s`, is used to identify the count (:math:`\#`) of observed ISI's below this threshold. -For a recording with a duration of :math:`T_r` seconds, and a unit with :math:`N_s` spikes, the rate of ISI violations is: +The contamination rate is calculated to be .. math:: - \textrm{ISI violations} = \frac{ \#( ISI_s < ISI_t) T_r }{ 2 N_s^2 (ISI_t - ISI_{min}) } + C = \frac{ n_v T }{ 2 N^2 t_r } Calculation from the [Llobet]_ paper ------------------------------------ -The following quantities are required: - -- :math:`T` the duration of the recording. -- :math:`N` the number of spikes in the unit's spike train. -- :math:`t_r` the duration of the unit's refractory period. -- :math:`n_v` the number of violations of the refractory period. +In this method the number spikes which violate other spikes' refractory periods, denoted :math:`\tilde{n}_v`, is used. -The estimated contamination :math:`C` can be calculated with 2 extreme scenarios. In the first one, the contaminant spikes are completely random (or come from an infinite number of other neurons). In the second one, the contaminant spikes come from a single other neuron: +The estimated contamination :math:`C` is calculated in 2 extreme scenarios. In the first, the contaminant spikes +are completely random (or come from an infinite number of other neurons). In the second, the contaminant spikes +come from a single other neuron. In these scenarios, the contamination rate is .. math:: C = \frac{FP}{TP + FP} \approx \begin{cases} - 1 - \sqrt{1 - \frac{n_v T}{N^2 t_r}} \text{ for the case of random contamination} \\ - \frac{1}{2} \left( 1 - \sqrt{1 - \frac{2 n_v T}{N^2 t_r}} \right) \text{ for the case of 1 contaminant neuron} + 1 - \sqrt{1 - \frac{\tilde{n}_v T}{N^2 t_r}} \text{ for the case of random contamination} \\ + \frac{1}{2} \left( 1 - \sqrt{1 - \frac{2 \tilde{n}_v T}{N^2 t_r}} \right) \text{ for the case of 1 contaminant neuron} \end{cases} Where :math:`TP` is the number of true positives (detected spikes that come from the neuron) and :math:`FP` is the number of false positives (detected spikes that don't come from the neuron). @@ -58,7 +60,9 @@ Expectation and use ------------------- ISI violations identifies unit contamination - a high value indicates a highly contaminated unit. -Despite being a ratio, ISI violations can exceed 1 (or become a complex number in the [Llobet]_ formula). This is usually due to the contaminant events being correlated with our neuron, and their number is greater than a purely random spike train. +Despite being a ratio, the contamination can exceed 1 (or become a complex number in the [Llobet]_ formula). +This is usually due to the contaminant events being correlated with our neuron, and their number is +greater than a purely random spike train. Example code ------------ @@ -86,8 +90,8 @@ With SpikeInterface: References ---------- -Hill implementation (:code:`isi_violation`) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +UMS implementation (:code:`isi_violation`) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. autofunction:: spikeinterface.qualitymetrics.misc_metrics.compute_isi_violations @@ -160,5 +164,5 @@ Links to original implementations Literature ---------- -Introduced by [Hill]_ (2011). +Introduced in UltraMegaSort2000 [UMS]_ (2011). Also described by [Llobet]_ (2022) diff --git a/doc/references.rst b/doc/references.rst index ace51db951..5fbcbecb63 100644 --- a/doc/references.rst +++ b/doc/references.rst @@ -50,9 +50,11 @@ If you use the :code:`qualitymetrics` module, i.e. you use the :code:`analyzer.c or :code:`compute_quality_metrics()` methods, please include the citations for the :code:`metric_names` that were particularly important for your research: -- :code:`amplitude_cutoff` or :code:`isi_violation` [Hill]_ -- :code:`amplitude_median` or :code:`sliding_rp_violation` [IBL]_ +- :code:`amplitude_cutoff` [Hill]_ +- :code:`amplitude_median` [IBL]_ +- :code:`sliding_rp_violation` [IBL]_ - :code:`drift` [Siegle]_ +- :code:`isi_violation` [UMS]_ - :code:`rp_violation` [Llobet]_ - :code:`sd_ratio` [Pouzat]_ - :code:`snr` [Lemon]_ [Jackson]_ @@ -122,6 +124,8 @@ References .. [Siegle] `Survey of Spiking in the Mouse Visual System Reveals Functional Hierarchy. 2021. `_ +.. [UMS] `UltraMegaSort2000 - Spike sorting and quality metrics for extracellular spike data. 2011. `_ + .. [Varol] `Decentralized Motion Inference and Registration of Neuropixel Data. 2021. `_ .. [Windolf] `Robust Online Multiband Drift Estimation in Electrophysiology Data. 2022. `_ diff --git a/examples/how_to/README.md b/examples/how_to/README.md index af17859ca7..01f11a7a28 100644 --- a/examples/how_to/README.md +++ b/examples/how_to/README.md @@ -14,17 +14,21 @@ with `nbconvert`. Here are the steps (in this example for the `get_started`): ``` >>> jupytext --to notebook get_started.py +>>> jupytext --set-formats ipynb,py get_started.ipynb ``` 2. Run the notebook +3. Sync the run notebook to the .py file: -3. Convert the notebook to .rst +``` +>>> jupytext --sync get_started.ipynb +``` + +4. Convert the notebook to .rst ``` >>> jupyter nbconvert get_started.ipynb --to rst ->>> jupyter nbconvert analyse_neuropixels.ipynb --to rst ``` - -4. Move the .rst and associated folder (e.g. `get_started.rst` and `get_started_files` folder) to the `doc/how_to`. +5. Move the .rst and associated folder (e.g. `get_started.rst` and `get_started_files` folder) to the `doc/how_to`. diff --git a/examples/how_to/analyse_neuropixels.py b/examples/how_to/analyze_neuropixels.py similarity index 99% rename from examples/how_to/analyse_neuropixels.py rename to examples/how_to/analyze_neuropixels.py index ce5bacdda0..aeee8b15b4 100644 --- a/examples/how_to/analyse_neuropixels.py +++ b/examples/how_to/analyze_neuropixels.py @@ -14,7 +14,7 @@ # name: python3 # --- -# # Analyse Neuropixels datasets +# # Analyze Neuropixels datasets # # This example shows how to perform Neuropixels-specific analysis, including custom pre- and post-processing. diff --git a/examples/how_to/benchmark_with_hybrid_recordings.py b/examples/how_to/benchmark_with_hybrid_recordings.py new file mode 100644 index 0000000000..5507ab7a7f --- /dev/null +++ b/examples/how_to/benchmark_with_hybrid_recordings.py @@ -0,0 +1,293 @@ +# --- +# jupyter: +# jupytext: +# cell_metadata_filter: -all +# formats: ipynb,py +# text_representation: +# extension: .py +# format_name: light +# format_version: '1.5' +# jupytext_version: 1.16.2 +# kernelspec: +# display_name: Python 3 (ipykernel) +# language: python +# name: python3 +# --- + +# # Benchmark spike sorting with hybrid recordings +# +# This example shows how to use the SpikeInterface hybrid recordings framework to benchmark spike sorting results. +# +# Hybrid recordings are built from existing recordings by injecting units with known spiking activity. +# The template (aka average waveforms) of the injected units can be from previous spike sorted data. +# In this example, we will be using an open database of templates that we have constructed from the International Brain Laboratory - Brain Wide Map (available on [DANDI](https://dandiarchive.org/dandiset/000409?search=IBL&page=2&sortOption=0&sortDir=-1&showDrafts=true&showEmpty=false&pos=9)). +# +# Importantly, recordings from long-shank probes, such as Neuropixels, usually experience drifts. Such drifts have to be taken into account in order to smoothly inject spikes into the recording. + +# + +import spikeinterface as si +import spikeinterface.extractors as se +import spikeinterface.preprocessing as spre +import spikeinterface.comparison as sc +import spikeinterface.generation as sgen +import spikeinterface.widgets as sw + +from spikeinterface.sortingcomponents.motion_estimation import estimate_motion + +import numpy as np +import matplotlib.pyplot as plt +from pathlib import Path +# - + +# %matplotlib inline + +si.set_global_job_kwargs(n_jobs=16) + +# For this notebook, we will use a drifting recording similar to the one acquired by Nick Steinmetz and available [here](https://doi.org/10.6084/m9.figshare.14024495.v1), where an triangular motion was imposed to the recording by moving the probe up and down with a micro-manipulator. + +workdir = Path("/ssd980/working/hybrid/steinmetz_imposed_motion") +workdir.mkdir(exist_ok=True) + +recording_np1_imposed = se.read_spikeglx("/hdd1/data/spikeglx/nick-steinmetz/dataset1/p1_g0_t0/") +recording_preproc = spre.highpass_filter(recording_np1_imposed) +recording_preproc = spre.common_reference(recording_preproc) + +# To visualize the drift, we can estimate the motion and plot it: + +# to correct for drift, we need a float dtype +recording_preproc = spre.astype(recording_preproc, "float") +_, motion_info = spre.correct_motion( + recording_preproc, preset="nonrigid_fast_and_accurate", n_jobs=4, progress_bar=True, output_motion_info=True +) + +ax = sw.plot_drift_raster_map( + peaks=motion_info["peaks"], + peak_locations=motion_info["peak_locations"], + recording=recording_preproc, + cmap="Greys_r", + scatter_decimate=10, + depth_lim=(-10, 3000) +) + +# ## Retrieve templates from database + +# + +templates_info = sgen.fetch_templates_database_info() + +print(f"Number of templates in database: {len(templates_info)}") +print(f"Template database columns: {templates_info.columns}") +# - + +available_brain_areas = np.unique(templates_info.brain_area) +print(f"Available brain areas: {available_brain_areas}") + +# Let's perform a query: templates from visual brain regions and at the "top" of the probe + +target_area = ["VISa5", "VISa6a", "VISp5", "VISp6a", "VISrl6b"] +minimum_depth = 1500 +templates_selected_info = templates_info.query(f"brain_area in {target_area} and depth_along_probe > {minimum_depth}") +len(templates_selected_info) + +# We can now retrieve the selected templates as a `Templates` object: + +templates_selected = sgen.query_templates_from_database(templates_selected_info, verbose=True) +print(templates_selected) + +# While we selected templates from a target aread and at certain depths, we can see that the template amplitudes are quite large. This will make spike sorting easy... we can further manipulate the `Templates` by rescaling, relocating, or further selections with the `sgen.scale_template_to_range`, `sgen.relocate_templates`, and `sgen.select_templates` functions. +# +# In our case, let's rescale the amplitudes between 50 and 150 $\mu$V and relocate them towards the bottom half of the probe, where the activity looks interesting! + +# + +min_amplitude = 50 +max_amplitude = 150 +templates_scaled = sgen.scale_template_to_range( + templates=templates_selected, + min_amplitude=min_amplitude, + max_amplitude=max_amplitude +) + +min_displacement = 1000 +max_displacement = 3000 +templates_relocated = sgen.relocate_templates( + templates=templates_scaled, + min_displacement=min_displacement, + max_displacement=max_displacement +) +# - + +# Let's plot the selected templates: + +sparsity_plot = si.compute_sparsity(templates_relocated) +fig = plt.figure(figsize=(10, 10)) +w = sw.plot_unit_templates(templates_relocated, sparsity=sparsity_plot, ncols=4, figure=fig) +w.figure.subplots_adjust(wspace=0.5, hspace=0.7) + +# ## Constructing hybrid recordings +# +# We can construct now hybrid recordings with the selected templates. +# +# We will do this in two ways to show how important it is to account for drifts when injecting hybrid spikes. +# +# - For the first recording we will not pass the estimated motion (`recording_hybrid_ignore_drift`). +# - For the second recording, we will pass and account for the estimated motion (`recording_hybrid_with_drift`). + +recording_hybrid_ignore_drift, sorting_hybrid = sgen.generate_hybrid_recording( + recording=recording_preproc, templates=templates_relocated, seed=2308 +) +recording_hybrid_ignore_drift + +# Note that the `generate_hybrid_recording` is warning us that we might want to account for drift! + +# by passing the `sorting_hybrid` object, we make sure that injected spikes are the same +# this will take a bit more time because it's interpolating the templates to account for drifts +recording_hybrid_with_drift, sorting_hybrid = sgen.generate_hybrid_recording( + recording=recording_preproc, + templates=templates_relocated, + motion=motion_info["motion"], + sorting=sorting_hybrid, + seed=2308, +) +recording_hybrid_with_drift + +# We can use the `SortingAnalyzer` to estimate spike locations and plot them: + +# + +# construct analyzers and compute spike locations +analyzer_hybrid_ignore_drift = si.create_sorting_analyzer(sorting_hybrid, recording_hybrid_ignore_drift) +analyzer_hybrid_ignore_drift.compute(["random_spikes", "templates"]) +analyzer_hybrid_ignore_drift.compute("spike_locations", method="grid_convolution") + +analyzer_hybrid_with_drift = si.create_sorting_analyzer(sorting_hybrid, recording_hybrid_with_drift) +analyzer_hybrid_with_drift.compute(["random_spikes", "templates"]) +analyzer_hybrid_with_drift.compute("spike_locations", method="grid_convolution") +# - + +# Let's plot the added hybrid spikes using the drift maps: + +fig, axs = plt.subplots(ncols=2, figsize=(10, 7), sharex=True, sharey=True) +_ = sw.plot_drift_raster_map( + peaks=motion_info["peaks"], + peak_locations=motion_info["peak_locations"], + recording=recording_preproc, + cmap="Greys_r", + scatter_decimate=10, + ax=axs[0], +) +_ = sw.plot_drift_raster_map( + sorting_analyzer=analyzer_hybrid_ignore_drift, + color_amplitude=False, + color="r", + scatter_decimate=10, + ax=axs[0] +) +_ = sw.plot_drift_raster_map( + peaks=motion_info["peaks"], + peak_locations=motion_info["peak_locations"], + recording=recording_preproc, + cmap="Greys_r", + scatter_decimate=10, + ax=axs[1], +) +_ = sw.plot_drift_raster_map( + sorting_analyzer=analyzer_hybrid_with_drift, + color_amplitude=False, + color="b", + scatter_decimate=10, + ax=axs[1] +) +axs[0].set_title("Hybrid spikes\nIgnoring drift") +axs[1].set_title("Hybrid spikes\nAccounting for drift") +axs[0].set_xlim(1000, 1500) +axs[0].set_ylim(500, 2500) + +# We can see that clearly following drift is essential in order to properly blend the hybrid spikes into the recording! + +# ## Ground-truth study +# +# In this section we will use the hybrid recording to benchmark a few spike sorters: +# +# - `Kilosort2.5` +# - `Kilosort3` +# - `Kilosort4` +# - `Spyking-CIRCUS 2` + +# to speed up computations, let's first dump the recording to binary +recording_hybrid_bin = recording_hybrid_with_drift.save( + folder=workdir / "hybrid_bin", + overwrite=True +) + +# + +datasets = { + "hybrid": (recording_hybrid_bin, sorting_hybrid), +} + +cases = { + ("kilosort2.5", "hybrid"): { + "label": "KS2.5", + "dataset": "hybrid", + "run_sorter_params": { + "sorter_name": "kilosort2_5", + }, + }, + ("kilosort3", "hybrid"): { + "label": "KS3", + "dataset": "hybrid", + "run_sorter_params": { + "sorter_name": "kilosort3", + }, + }, + ("kilosort4", "hybrid"): { + "label": "KS4", + "dataset": "hybrid", + "run_sorter_params": {"sorter_name": "kilosort4", "nblocks": 5}, + }, + ("sc2", "hybrid"): { + "label": "spykingcircus2", + "dataset": "hybrid", + "run_sorter_params": { + "sorter_name": "spykingcircus2", + }, + }, +} + +# + +study_folder = workdir / "gt_study" + +gtstudy = sc.GroundTruthStudy(study_folder) + +# - + +# run the spike sorting jobs +gtstudy.run_sorters(verbose=False, keep=True) + +# run the comparisons +gtstudy.run_comparisons(exhaustive_gt=False) + +# ## Plot performances +# +# Given that we know the exactly where we injected the hybrid spikes, we can now compute and plot performance metrics: accuracy, precision, and recall. +# +# In the following plot, the x axis is the unit index, while the y axis is the performance metric. The units are sorted by performance. + +w_perf = sw.plot_study_performances(gtstudy, figsize=(12, 7)) +w_perf.axes[0, 0].legend(loc=4) + +# From the performance plots, we can see that there is no clear "winner", but `Kilosort3` definitely performs worse than the other options. +# +# Although non of the sorters find all units perfectly, `Kilosort2.5`, `Kilosort4`, and `SpyKING CIRCUS 2` all find around 10-12 hybrid units with accuracy greater than 80%. +# `Kilosort4` has a better overall curve, being able to find almost all units with an accuracy above 50%. `Kilosort2.5` performs well when looking at precision (finding all spikes in a hybrid unit), at the cost of lower recall (finding spikes when it shouldn't). +# +# +# In this example, we showed how to: +# +# - Access and fetch templates from the SpikeInterface template database +# - Manipulate templates (scaling/relocating) +# - Construct hybrid recordings accounting for drifts +# - Use the `GroundTruthStudy` to benchmark different sorters +# +# The hybrid framework can be extended to target multiple recordings from different brain regions and species and creating recordings of increasing complexity to challenge the existing sorters! +# +# In addition, hybrid studies can also be used to fine-tune spike sorting parameters on specific datasets. +# +# **Are you ready to try it on your data?** diff --git a/pyproject.toml b/pyproject.toml index 69f4067d13..2ba53328e7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -85,6 +85,7 @@ streaming_extractors = [ "s3fs" ] + preprocessing = [ "scipy", ] @@ -124,10 +125,23 @@ test_core = [ # for github test : probeinterface and neo from master # for release we need pypi, so this need to be commented - # "probeinterface @ git+https://github.com/SpikeInterface/probeinterface.git", - # "neo @ git+https://github.com/NeuralEnsemble/python-neo.git", + "probeinterface @ git+https://github.com/SpikeInterface/probeinterface.git", + "neo @ git+https://github.com/NeuralEnsemble/python-neo.git", +] + +test_extractors = [ + # Functions to download data in neo test suite + "pooch>=1.8.2", + "datalad>=1.0.2", + "probeinterface @ git+https://github.com/SpikeInterface/probeinterface.git", + "neo @ git+https://github.com/NeuralEnsemble/python-neo.git", +] + +test_preprocessing = [ + "ibllib>=2.36.0", # for IBL ] + test = [ "pytest", "pytest-dependency", @@ -139,15 +153,16 @@ test = [ # preprocessing "ibllib>=2.36.0", # for IBL + # streaming templates + "s3fs", + # tridesclous "numba", "hdbscan>=0.8.33", # Previous version had a broken wheel # for sortingview backend "sortingview", - # Download data - "pooch>=1.8.2", - "datalad>=1.0.2", + ## install tridesclous for testing ## "tridesclous>=1.6.8", @@ -172,13 +187,16 @@ docs = [ "ipython", # for notebooks in the gallery - "MEArec", # Use as an example - "datalad==0.16.2", # Download mearec data, not sure if needed as is installed with conda as well because of git-annex + "MEArec", # Use as an example "pandas", # in the modules gallery comparison tutorial "hdbscan>=0.8.33", # For sorters spykingcircus2 + tridesclous "numba", # For many postprocessing functions "xarray", # For use of SortingAnalyzer zarr format "networkx", + # Download data + "pooch>=1.8.2", + "datalad>=1.0.2", + # for release we need pypi, so this needs to be commented # "probeinterface @ git+https://github.com/SpikeInterface/probeinterface.git", # We always build from the latest version # "neo @ git+https://github.com/NeuralEnsemble/python-neo.git", # We always build from the latest version diff --git a/src/spikeinterface/core/base.py b/src/spikeinterface/core/base.py index 4922707b35..5800166f39 100644 --- a/src/spikeinterface/core/base.py +++ b/src/spikeinterface/core/base.py @@ -881,7 +881,7 @@ def save_to_folder( Parameters ---------- - name : str , optional + name : str or Path, optional The name of the subfolder within the global temporary folder. If `folder` is provided, this argument must be None. folder : str or Path, optional diff --git a/src/spikeinterface/core/baserecording.py b/src/spikeinterface/core/baserecording.py index aab7577b31..e70c95bb65 100644 --- a/src/spikeinterface/core/baserecording.py +++ b/src/spikeinterface/core/baserecording.py @@ -45,7 +45,9 @@ def __init__(self, sampling_frequency: float, channel_ids: list, dtype): self.annotate(is_filtered=False) def __repr__(self): - extractor_name = self.__class__.__name__ + + class_name = self.__class__.__name__ + name_to_display = class_name num_segments = self.get_num_segments() txt = self._repr_header() @@ -55,7 +57,7 @@ def __repr__(self): split_index = txt.rfind("-", 0, 100) # Find the last "-" before character 100 if split_index != -1: first_line = txt[:split_index] - recording_string_space = len(extractor_name) + 2 # Length of extractor_name plus ": " + recording_string_space = len(name_to_display) + 2 # Length of name_to_display plus ": " white_space_to_align_with_first_line = " " * recording_string_space second_line = white_space_to_align_with_first_line + txt[split_index + 1 :].lstrip() txt = first_line + "\n" + second_line @@ -95,7 +97,8 @@ def list_to_string(lst, max_size=6): return txt def _repr_header(self): - extractor_name = self.__class__.__name__ + class_name = self.__class__.__name__ + name_to_display = class_name num_segments = self.get_num_segments() num_channels = self.get_num_channels() sf_khz = self.get_sampling_frequency() / 1000.0 @@ -106,7 +109,7 @@ def _repr_header(self): total_memory_size = self.get_total_memory_size() txt = ( - f"{extractor_name}: " + f"{name_to_display}: " f"{num_channels} channels - " f"{sf_khz:0.1f}kHz - " f"{num_segments} segments - " @@ -848,8 +851,10 @@ def time_to_sample_index(self, time_s): sample_index = time_s * self.sampling_frequency else: sample_index = (time_s - self.t_start) * self.sampling_frequency + sample_index = round(sample_index) else: sample_index = np.searchsorted(self.time_vector, time_s, side="right") - 1 + return int(sample_index) def get_num_samples(self) -> int: diff --git a/src/spikeinterface/core/binaryfolder.py b/src/spikeinterface/core/binaryfolder.py index 546ac85f93..fca08d9c26 100644 --- a/src/spikeinterface/core/binaryfolder.py +++ b/src/spikeinterface/core/binaryfolder.py @@ -25,7 +25,6 @@ class BinaryFolderRecording(BinaryRecordingExtractor): The recording """ - extractor_name = "BinaryFolder" mode = "folder" name = "binaryfolder" diff --git a/src/spikeinterface/core/binaryrecordingextractor.py b/src/spikeinterface/core/binaryrecordingextractor.py index 8fb9a78f2a..64c1b9b2e6 100644 --- a/src/spikeinterface/core/binaryrecordingextractor.py +++ b/src/spikeinterface/core/binaryrecordingextractor.py @@ -52,7 +52,6 @@ class BinaryRecordingExtractor(BaseRecording): The recording Extractor """ - extractor_name = "BinaryRecording" mode = "file" name = "binary" diff --git a/src/spikeinterface/core/core_tools.py b/src/spikeinterface/core/core_tools.py index 066ab58d8c..1f2e644be6 100644 --- a/src/spikeinterface/core/core_tools.py +++ b/src/spikeinterface/core/core_tools.py @@ -75,6 +75,7 @@ class SIJsonEncoder(json.JSONEncoder): def default(self, obj): from spikeinterface.core.base import BaseExtractor + from spikeinterface.sortingcomponents.motion_utils import Motion # Over-write behaviors for datetime object if isinstance(obj, datetime.datetime): @@ -98,6 +99,12 @@ def default(self, obj): if isinstance(obj, BaseExtractor): return obj.to_dict() + if isinstance(obj, Path): + return str(obj) + + if isinstance(obj, Motion): + return obj.to_dict() + # The base-class handles the assertion return super().default(obj) diff --git a/src/spikeinterface/core/generate.py b/src/spikeinterface/core/generate.py index 9924a22403..11909bce0e 100644 --- a/src/spikeinterface/core/generate.py +++ b/src/spikeinterface/core/generate.py @@ -3,7 +3,6 @@ import warnings import numpy as np from typing import Union, Optional, List, Literal -import warnings from math import ceil from .basesorting import SpikeVectorSortingSegment @@ -1441,7 +1440,6 @@ def generate_templates( dtype="float32", upsample_factor=None, unit_params=None, - unit_params_range=None, mode="ellipsoid", ): """ @@ -1498,9 +1496,7 @@ def generate_templates( * (num_units, num_samples, num_channels, upsample_factor) if upsample_factor is not None """ - unit_params = unit_params or dict() - unit_params_range = unit_params_range or dict() rng = np.random.default_rng(seed=seed) # neuron location must be 3D @@ -1858,7 +1854,7 @@ def get_traces( wf = template[start_template:end_template] if self.amplitude_vector is not None: wf = wf * self.amplitude_vector[i] - traces[start_traces:end_traces] += wf + traces[start_traces:end_traces] += wf.astype(traces.dtype, copy=False) return traces.astype(self.dtype, copy=False) diff --git a/src/spikeinterface/core/node_pipeline.py b/src/spikeinterface/core/node_pipeline.py index 0722ede23f..ceff8577d3 100644 --- a/src/spikeinterface/core/node_pipeline.py +++ b/src/spikeinterface/core/node_pipeline.py @@ -516,7 +516,6 @@ def _init_peak_pipeline(recording, nodes): worker_ctx["recording"] = recording worker_ctx["nodes"] = nodes worker_ctx["max_margin"] = max(node.get_trace_margin() for node in nodes) - return worker_ctx diff --git a/src/spikeinterface/core/npyfoldersnippets.py b/src/spikeinterface/core/npyfoldersnippets.py index 514a56fdf5..eec510aff8 100644 --- a/src/spikeinterface/core/npyfoldersnippets.py +++ b/src/spikeinterface/core/npyfoldersnippets.py @@ -26,7 +26,6 @@ class NpyFolderSnippets(NpySnippetsExtractor): The snippets """ - extractor_name = "NpyFolderSnippets" mode = "folder" name = "npyfolder" diff --git a/src/spikeinterface/core/npysnippetsextractor.py b/src/spikeinterface/core/npysnippetsextractor.py index fe66252c28..a5fb12a397 100644 --- a/src/spikeinterface/core/npysnippetsextractor.py +++ b/src/spikeinterface/core/npysnippetsextractor.py @@ -15,7 +15,6 @@ class NpySnippetsExtractor(BaseSnippets): All spike are store in two columns maner index+labels """ - extractor_name = "NpySnippets" mode = "file" name = "npy" diff --git a/src/spikeinterface/core/npzsortingextractor.py b/src/spikeinterface/core/npzsortingextractor.py index 5a40e3ba55..f60dadd8ec 100644 --- a/src/spikeinterface/core/npzsortingextractor.py +++ b/src/spikeinterface/core/npzsortingextractor.py @@ -16,7 +16,6 @@ class NpzSortingExtractor(BaseSorting): All spike are store in two columns maner index+labels """ - extractor_name = "NpzSortingExtractor" mode = "file" name = "npz" diff --git a/src/spikeinterface/core/numpyextractors.py b/src/spikeinterface/core/numpyextractors.py index 0ba1c05417..1ee472ffa4 100644 --- a/src/spikeinterface/core/numpyextractors.py +++ b/src/spikeinterface/core/numpyextractors.py @@ -37,7 +37,6 @@ class NumpyRecording(BaseRecording): An optional list of channel_ids. If None, linear channels are assumed """ - extractor_name = "Numpy" mode = "memory" name = "numpy" @@ -143,7 +142,6 @@ class SharedMemoryRecording(BaseRecording): If True, the main instance will unlink the sharedmem buffer when deleted """ - extractor_name = "SharedMemory" mode = "memory" name = "SharedMemory" diff --git a/src/spikeinterface/core/old_api_utils.py b/src/spikeinterface/core/old_api_utils.py index 65de8aedf6..ea2f20d631 100644 --- a/src/spikeinterface/core/old_api_utils.py +++ b/src/spikeinterface/core/old_api_utils.py @@ -59,8 +59,6 @@ class NewToOldSorting: * unique segment """ - extractor_name = "NewToOldSorting" - def __init__(self, sorting): assert sorting.get_num_segments() == 1 self._sorting = sorting diff --git a/src/spikeinterface/core/sortinganalyzer.py b/src/spikeinterface/core/sortinganalyzer.py index d790308b76..fc20029ce6 100644 --- a/src/spikeinterface/core/sortinganalyzer.py +++ b/src/spikeinterface/core/sortinganalyzer.py @@ -970,7 +970,9 @@ def compute_one_extension(self, extension_name, save=True, verbose=False, **kwar extension_class = get_extension_class(extension_name) for child in _get_children_dependencies(extension_name): - self.delete_extension(child) + if self.has_extension(child): + print(f"Deleting {child}") + self.delete_extension(child) if extension_class.need_job_kwargs: params, job_kwargs = split_job_kwargs(kwargs) diff --git a/src/spikeinterface/core/sortingfolder.py b/src/spikeinterface/core/sortingfolder.py index ef6f524b6c..d0751d33fa 100644 --- a/src/spikeinterface/core/sortingfolder.py +++ b/src/spikeinterface/core/sortingfolder.py @@ -22,7 +22,6 @@ class NumpyFolderSorting(BaseSorting): """ - extractor_name = "NumpyFolderSorting" mode = "folder" name = "NumpyFolder" @@ -91,7 +90,6 @@ class NpzFolderSorting(NpzSortingExtractor): The sorting """ - extractor_name = "NpzFolder" mode = "folder" name = "npzfolder" diff --git a/src/spikeinterface/core/template.py b/src/spikeinterface/core/template.py index 066d79b6b4..b64f0610ea 100644 --- a/src/spikeinterface/core/template.py +++ b/src/spikeinterface/core/template.py @@ -353,9 +353,9 @@ def from_zarr_group(cls, zarr_group: "zarr.Group") -> "Templates": the `add_templates_to_zarr_group` method. """ - templates_array = zarr_group["templates_array"] - channel_ids = zarr_group["channel_ids"] - unit_ids = zarr_group["unit_ids"] + templates_array = zarr_group["templates_array"][:] + channel_ids = zarr_group["channel_ids"][:] + unit_ids = zarr_group["unit_ids"][:] sampling_frequency = zarr_group.attrs["sampling_frequency"] nbefore = zarr_group.attrs["nbefore"] @@ -364,7 +364,7 @@ def from_zarr_group(cls, zarr_group: "zarr.Group") -> "Templates": sparsity_mask = None if "sparsity_mask" in zarr_group: - sparsity_mask = zarr_group["sparsity_mask"] + sparsity_mask = zarr_group["sparsity_mask"][:] probe = None if "probe" in zarr_group: @@ -449,7 +449,7 @@ def __eq__(self, other): return True - def get_channel_locations(self): + def get_channel_locations(self) -> np.ndarray: assert self.probe is not None, "Templates.get_channel_locations() needs a probe to be set" channel_locations = self.probe.contact_positions return channel_locations diff --git a/src/spikeinterface/core/template_tools.py b/src/spikeinterface/core/template_tools.py index 1ba9372322..934b18ed49 100644 --- a/src/spikeinterface/core/template_tools.py +++ b/src/spikeinterface/core/template_tools.py @@ -3,7 +3,6 @@ import warnings from .template import Templates -from .sparsity import _sparsity_doc from .sortinganalyzer import SortingAnalyzer @@ -50,7 +49,7 @@ def _get_nbefore(one_object): raise ValueError("SortingAnalyzer need extension 'templates' to be computed") return ext.nbefore else: - raise ValueError("Input should be Templates or SortingAnalyzer or SortingAnalyzer") + raise ValueError("Input should be Templates or SortingAnalyzer") def get_template_amplitudes( diff --git a/src/spikeinterface/core/tests/test_jsonification.py b/src/spikeinterface/core/tests/test_jsonification.py index 4417ea342f..316dac3abc 100644 --- a/src/spikeinterface/core/tests/test_jsonification.py +++ b/src/spikeinterface/core/tests/test_jsonification.py @@ -7,11 +7,7 @@ from spikeinterface.core.core_tools import SIJsonEncoder from spikeinterface.core.generate import generate_recording, generate_sorting - -@pytest.fixture(scope="module") -def numpy_generated_recording(): - recording = generate_recording() - return recording +from pathlib import Path @pytest.fixture(scope="module") @@ -124,8 +120,25 @@ def test_numpy_dtype_alises_encoding(): json.dumps(np.float32, cls=SIJsonEncoder) -def test_recording_encoding(numpy_generated_recording): - recording = numpy_generated_recording +def test_path_encoding(tmp_path): + + temporary_path = tmp_path / "a_path_for_this_test" + + json.dumps(temporary_path, cls=SIJsonEncoder) + + +def test_path_as_annotation(tmp_path): + temporary_path = tmp_path / "a_path_for_this_test" + + recording = generate_recording() + recording.annotate(path=temporary_path) + + json.dumps(recording, cls=SIJsonEncoder) + + +def test_recording_encoding(): + recording = generate_recording() + json.dumps(recording, cls=SIJsonEncoder) @@ -200,4 +213,4 @@ def test_encoding_numpy_scalars_within_nested_extractors_dict(nested_extractor_d if __name__ == "__main__": nested_extractor = nested_extractor() - test_encoding_numpy_scalars_within_nested_extractors(nested_extractor_) + test_encoding_numpy_scalars_within_nested_extractors(nested_extractor) diff --git a/src/spikeinterface/core/zarrextractors.py b/src/spikeinterface/core/zarrextractors.py index ee8021fa72..4851c0eb5c 100644 --- a/src/spikeinterface/core/zarrextractors.py +++ b/src/spikeinterface/core/zarrextractors.py @@ -31,7 +31,6 @@ class ZarrRecordingExtractor(BaseRecording): The recording Extractor """ - extractor_name = "ZarrRecording" installed = True mode = "folder" installation_mesg = "" @@ -168,7 +167,6 @@ class ZarrSortingExtractor(BaseSorting): The sorting Extractor """ - extractor_name = "ZarrSorting" installed = True mode = "folder" installation_mesg = "" diff --git a/src/spikeinterface/exporters/tests/test_export_to_phy.py b/src/spikeinterface/exporters/tests/test_export_to_phy.py index 47294b3cf7..a54dbf7290 100644 --- a/src/spikeinterface/exporters/tests/test_export_to_phy.py +++ b/src/spikeinterface/exporters/tests/test_export_to_phy.py @@ -91,6 +91,69 @@ def test_export_to_phy_by_property(sorting_analyzer_with_group_for_export, creat assert template_inds.shape == (sorting_analyzer.unit_ids.size, 4) +def test_export_to_phy_metrics(sorting_analyzer_sparse_for_export, create_cache_folder): + cache_folder = create_cache_folder + + sorting_analyzer = sorting_analyzer_sparse_for_export + + # quality metrics are computed already + qm = sorting_analyzer.get_extension("quality_metrics").get_data() + output_folder = cache_folder / "phy_output_qm" + export_to_phy( + sorting_analyzer, + output_folder, + compute_pc_features=False, + compute_amplitudes=False, + n_jobs=1, + chunk_size=10000, + progress_bar=True, + add_quality_metrics=True, + ) + for col_name in qm.columns: + assert (output_folder / f"cluster_{col_name}.tsv").is_file() + + # quality metrics are computed already + tm_ext = sorting_analyzer.compute("template_metrics") + tm = tm_ext.get_data() + output_folder = cache_folder / "phy_output_tm_not_qm" + export_to_phy( + sorting_analyzer, + output_folder, + compute_pc_features=False, + compute_amplitudes=False, + n_jobs=1, + chunk_size=10000, + progress_bar=True, + add_quality_metrics=False, + add_template_metrics=True, + ) + for col_name in tm.columns: + assert (output_folder / f"cluster_{col_name}.tsv").is_file() + for col_name in qm.columns: + assert not (output_folder / f"cluster_{col_name}.tsv").is_file() + + # custom metrics + sorting_analyzer.sorting.set_property("custom_metric", np.random.rand(sorting_analyzer.unit_ids.size)) + output_folder = cache_folder / "phy_output_custom" + export_to_phy( + sorting_analyzer, + output_folder, + compute_pc_features=False, + compute_amplitudes=False, + n_jobs=1, + chunk_size=10000, + progress_bar=True, + add_quality_metrics=False, + add_template_metrics=False, + additional_properties=["custom_metric"], + ) + assert (output_folder / "cluster_custom_metric.tsv").is_file() + for col_name in tm.columns: + assert not (output_folder / f"cluster_{col_name}.tsv").is_file() + for col_name in qm.columns: + assert not (output_folder / f"cluster_{col_name}.tsv").is_file() + + if __name__ == "__main__": sorting_analyzer_sparse = make_sorting_analyzer(sparse=True) sorting_analyzer_group = make_sorting_analyzer(sparse=False, with_group=True) diff --git a/src/spikeinterface/exporters/to_phy.py b/src/spikeinterface/exporters/to_phy.py index d7be6c1ba3..7b3c7daab0 100644 --- a/src/spikeinterface/exporters/to_phy.py +++ b/src/spikeinterface/exporters/to_phy.py @@ -25,8 +25,10 @@ def export_to_phy( sparsity: Optional[ChannelSparsity] = None, copy_binary: bool = True, remove_if_exists: bool = False, - peak_sign: Literal["both", "neg", "pos"] = "neg", template_mode: str = "average", + add_quality_metrics: bool = True, + add_template_metrics: bool = True, + additional_properties: list | None = None, dtype: Optional[npt.DTypeLike] = None, verbose: bool = True, use_relative_path: bool = False, @@ -51,10 +53,14 @@ def export_to_phy( If True, the recording is copied and saved in the phy "output_folder" remove_if_exists : bool, default: False If True and "output_folder" exists, it is removed and overwritten - peak_sign : "neg" | "pos" | "both", default: "neg" - Used by compute_spike_amplitudes template_mode : str, default: "average" Parameter "mode" to be given to SortingAnalyzer.get_template() + add_quality_metrics : bool, default: True + If True, quality metrics (if computed) are saved as Phy tsv and will appear in the ClusterView. + add_template_metrics : bool, default: True + If True, template metrics (if computed) are saved as Phy tsv and will appear in the ClusterView. + additional_properties : list | None, default: None + List of additional properties to be saved as Phy tsv and will appear in the ClusterView. dtype : dtype or None, default: None Dtype to save binary data verbose : bool, default: True @@ -244,7 +250,7 @@ def export_to_phy( channel_group = pd.DataFrame({"cluster_id": [i for i in range(len(unit_ids))], "channel_group": unit_groups}) channel_group.to_csv(output_folder / "cluster_channel_group.tsv", sep="\t", index=False) - if sorting_analyzer.has_extension("quality_metrics"): + if sorting_analyzer.has_extension("quality_metrics") and add_quality_metrics: qm_data = sorting_analyzer.get_extension("quality_metrics").get_data() for column_name in qm_data.columns: # already computed by phy @@ -253,6 +259,19 @@ def export_to_phy( {"cluster_id": [i for i in range(len(unit_ids))], column_name: qm_data[column_name].values} ) metric.to_csv(output_folder / f"cluster_{column_name}.tsv", sep="\t", index=False) + if sorting_analyzer.has_extension("template_metrics") and add_template_metrics: + tm_data = sorting_analyzer.get_extension("template_metrics").get_data() + for column_name in tm_data.columns: + metric = pd.DataFrame( + {"cluster_id": [i for i in range(len(unit_ids))], column_name: tm_data[column_name].values} + ) + metric.to_csv(output_folder / f"cluster_{column_name}.tsv", sep="\t", index=False) + if additional_properties is not None: + for prop_name in additional_properties: + prop_data = sorting.get_property(prop_name) + if prop_data is not None: + prop = pd.DataFrame({"cluster_id": [i for i in range(len(unit_ids))], prop_name: prop_data}) + prop.to_csv(output_folder / f"cluster_{prop_name}.tsv", sep="\t", index=False) if verbose: print("Run:\nphy template-gui ", str(output_folder / "params.py")) diff --git a/src/spikeinterface/extractors/alfsortingextractor.py b/src/spikeinterface/extractors/alfsortingextractor.py index f4287541da..fa6490135c 100644 --- a/src/spikeinterface/extractors/alfsortingextractor.py +++ b/src/spikeinterface/extractors/alfsortingextractor.py @@ -24,7 +24,6 @@ class ALFSortingExtractor(BaseSorting): The loaded data. """ - extractor_name = "ALFSorting" installation_mesg = "To use the ALF extractors, install ONE-api: \n\n pip install ONE-api\n\n" name = "alf" diff --git a/src/spikeinterface/extractors/cbin_ibl.py b/src/spikeinterface/extractors/cbin_ibl.py index 1687acb073..a09cea9863 100644 --- a/src/spikeinterface/extractors/cbin_ibl.py +++ b/src/spikeinterface/extractors/cbin_ibl.py @@ -39,7 +39,6 @@ class CompressedBinaryIblExtractor(BaseRecording): The loaded data. """ - extractor_name = "CompressedBinaryIbl" mode = "folder" installation_mesg = "To use the CompressedBinaryIblExtractor, install mtscomp: \n\n pip install mtscomp\n\n" name = "cbin_ibl" diff --git a/src/spikeinterface/extractors/cellexplorersortingextractor.py b/src/spikeinterface/extractors/cellexplorersortingextractor.py index 9b77965c43..736927a1ee 100644 --- a/src/spikeinterface/extractors/cellexplorersortingextractor.py +++ b/src/spikeinterface/extractors/cellexplorersortingextractor.py @@ -29,7 +29,6 @@ class CellExplorerSortingExtractor(BaseSorting): Path to the `sessionInfo.mat` file. If None, it will be inferred from the file_path. """ - extractor_name = "CellExplorerSortingExtractor" mode = "file" installation_mesg = "To use the CellExplorerSortingExtractor install pymatreader" diff --git a/src/spikeinterface/extractors/combinatoextractors.py b/src/spikeinterface/extractors/combinatoextractors.py index e0d01e10e5..8828ea8b64 100644 --- a/src/spikeinterface/extractors/combinatoextractors.py +++ b/src/spikeinterface/extractors/combinatoextractors.py @@ -37,7 +37,6 @@ class CombinatoSortingExtractor(BaseSorting): The loaded data. """ - extractor_name = "CombinatoSortingExtractor" installed = HAVE_H5PY installation_mesg = "To use the CombinatoSortingExtractor install h5py: \n\n pip install h5py\n\n" name = "combinato" diff --git a/src/spikeinterface/extractors/hdsortextractors.py b/src/spikeinterface/extractors/hdsortextractors.py index 1be49f70be..19038344ee 100644 --- a/src/spikeinterface/extractors/hdsortextractors.py +++ b/src/spikeinterface/extractors/hdsortextractors.py @@ -25,7 +25,6 @@ class HDSortSortingExtractor(MatlabHelper, BaseSorting): The loaded data. """ - extractor_name = "HDSortSortingExtractor" mode = "file" name = "hdsort" diff --git a/src/spikeinterface/extractors/herdingspikesextractors.py b/src/spikeinterface/extractors/herdingspikesextractors.py index 87f7dd74c4..4fe915a96b 100644 --- a/src/spikeinterface/extractors/herdingspikesextractors.py +++ b/src/spikeinterface/extractors/herdingspikesextractors.py @@ -31,7 +31,6 @@ class HerdingspikesSortingExtractor(BaseSorting): The loaded data. """ - extractor_name = "HS2Sorting" installed = HAVE_HS2SX # check at class level if installed or not mode = "file" installation_mesg = ( diff --git a/src/spikeinterface/extractors/iblextractors.py b/src/spikeinterface/extractors/iblextractors.py index 27bb95854f..34481c94f1 100644 --- a/src/spikeinterface/extractors/iblextractors.py +++ b/src/spikeinterface/extractors/iblextractors.py @@ -65,7 +65,6 @@ class IblRecordingExtractor(BaseRecording): The recording extractor which allows access to the traces. """ - extractor_name = "IblRecording" mode = "folder" installation_mesg = "To use the IblRecordingSegment, install ibllib: \n\n pip install ONE-api\npip install ibllib\n" name = "ibl_recording" @@ -305,11 +304,10 @@ class IblSortingExtractor(BaseSorting): The loaded data. """ - extractor_name = "IBLSorting" name = "ibl" installation_mesg = "IBL extractors require ibllib as a dependency." " To install, run: \n\n pip install ibllib\n\n" - def __init__(self, pid, good_clusters_only=False, load_unit_properties=True, one=None): + def __init__(self, pid: str, good_clusters_only: bool = False, load_unit_properties: bool = True, one=None): try: from one.api import ONE from brainbox.io.one import SpikeSortingLoader diff --git a/src/spikeinterface/extractors/klustaextractors.py b/src/spikeinterface/extractors/klustaextractors.py index dc01b3b9eb..82534771a1 100644 --- a/src/spikeinterface/extractors/klustaextractors.py +++ b/src/spikeinterface/extractors/klustaextractors.py @@ -43,7 +43,6 @@ class KlustaSortingExtractor(BaseSorting): The loaded data. """ - extractor_name = "KlustaSortingExtractor" installed = HAVE_H5PY # check at class level if installed or not installation_mesg = ( "To use the KlustaSortingExtractor install h5py: \n\n pip install h5py\n\n" # error message when not installed diff --git a/src/spikeinterface/extractors/matlabhelpers.py b/src/spikeinterface/extractors/matlabhelpers.py index e9948575a2..ac701c89e6 100644 --- a/src/spikeinterface/extractors/matlabhelpers.py +++ b/src/spikeinterface/extractors/matlabhelpers.py @@ -5,7 +5,6 @@ class MatlabHelper: - extractor_name = "MATSortingExtractor" mode = "file" installation_mesg = ( "To use the MATSortingExtractor install h5py and scipy: " "\n\n pip install h5py scipy\n\n" diff --git a/src/spikeinterface/extractors/mclustextractors.py b/src/spikeinterface/extractors/mclustextractors.py index ed0454f682..5cfa583054 100644 --- a/src/spikeinterface/extractors/mclustextractors.py +++ b/src/spikeinterface/extractors/mclustextractors.py @@ -29,7 +29,6 @@ class MClustSortingExtractor(BaseSorting): Loaded data. """ - extractor_name = "MClustSortingExtractor" name = "mclust" def __init__(self, folder_path, sampling_frequency, sampling_frequency_raw=None): diff --git a/src/spikeinterface/extractors/mcsh5extractors.py b/src/spikeinterface/extractors/mcsh5extractors.py index c55f9d47db..f419b7e64d 100644 --- a/src/spikeinterface/extractors/mcsh5extractors.py +++ b/src/spikeinterface/extractors/mcsh5extractors.py @@ -24,7 +24,6 @@ class MCSH5RecordingExtractor(BaseRecording): The loaded data. """ - extractor_name = "MCSH5Recording" mode = "file" installation_mesg = ( "To use the MCSH5RecordingExtractor install h5py: \n\n pip install h5py\n\n" # error message when not installed diff --git a/src/spikeinterface/extractors/mdaextractors.py b/src/spikeinterface/extractors/mdaextractors.py index 0ecad27994..acc7be58dd 100644 --- a/src/spikeinterface/extractors/mdaextractors.py +++ b/src/spikeinterface/extractors/mdaextractors.py @@ -36,7 +36,6 @@ class MdaRecordingExtractor(BaseRecording): The loaded data. """ - extractor_name = "MdaRecording" mode = "folder" name = "mda" @@ -193,7 +192,6 @@ class MdaSortingExtractor(BaseSorting): The loaded data. """ - extractor_name = "MdaSorting" mode = "file" name = "mda" diff --git a/src/spikeinterface/extractors/neoextractors/neuroscope.py b/src/spikeinterface/extractors/neoextractors/neuroscope.py index 2f6502b1c8..104f47af24 100644 --- a/src/spikeinterface/extractors/neoextractors/neuroscope.py +++ b/src/spikeinterface/extractors/neoextractors/neuroscope.py @@ -103,7 +103,6 @@ class NeuroScopeSortingExtractor(BaseSorting): Path to the .xml file referenced by this sorting. """ - extractor_name = "NeuroscopeSortingExtractor" name = "neuroscope" def __init__( diff --git a/src/spikeinterface/extractors/neoextractors/plexon2.py b/src/spikeinterface/extractors/neoextractors/plexon2.py index c7351a308b..6c9160f13b 100644 --- a/src/spikeinterface/extractors/neoextractors/plexon2.py +++ b/src/spikeinterface/extractors/neoextractors/plexon2.py @@ -13,8 +13,8 @@ class Plexon2RecordingExtractor(NeoBaseRecordingExtractor): Parameters ---------- - file_path : str - The file path to load the recordings from. + file_path : str | Path + The file path of the plexon2 file. It should have the .pl2 extension. stream_id : str, default: None If there are several streams, specify the stream id you want to load. stream_name : str, default: None @@ -23,7 +23,7 @@ class Plexon2RecordingExtractor(NeoBaseRecordingExtractor): If True, the names of the signals are used as channel ids. If False, the channel ids are a combination of the source id and the channel index. - Example for widegain signals: + Example for wideband signals: names: ["WB01", "WB02", "WB03", "WB04"] ids: ["source3.1" , "source3.2", "source3.3", "source3.4"] all_annotations : bool, default: False diff --git a/src/spikeinterface/extractors/nwbextractors.py b/src/spikeinterface/extractors/nwbextractors.py index ccb2ff4370..9786766af1 100644 --- a/src/spikeinterface/extractors/nwbextractors.py +++ b/src/spikeinterface/extractors/nwbextractors.py @@ -472,7 +472,6 @@ class NwbRecordingExtractor(BaseRecording): >>> rec = NwbRecordingExtractor(s3_url, stream_mode="fsspec", stream_cache_path="cache") """ - extractor_name = "NwbRecording" mode = "file" name = "nwb" installation_mesg = "To use the Nwb extractors, install pynwb: \n\n pip install pynwb\n\n" @@ -1001,7 +1000,6 @@ class NwbSortingExtractor(BaseSorting): The sorting extractor for the NWB file. """ - extractor_name = "NwbSorting" mode = "file" installation_mesg = "To use the Nwb extractors, install pynwb: \n\n pip install pynwb\n\n" name = "nwb" diff --git a/src/spikeinterface/extractors/phykilosortextractors.py b/src/spikeinterface/extractors/phykilosortextractors.py index e65ff0adfb..3287f7422f 100644 --- a/src/spikeinterface/extractors/phykilosortextractors.py +++ b/src/spikeinterface/extractors/phykilosortextractors.py @@ -26,7 +26,6 @@ class BasePhyKilosortSortingExtractor(BaseSorting): If True, all cluster properties are loaded from the tsv/csv files. """ - extractor_name = "BasePhyKilosortSorting" installed = False # check at class level if installed or not mode = "folder" installation_mesg = ( @@ -229,7 +228,6 @@ class PhySortingExtractor(BasePhyKilosortSortingExtractor): The loaded Sorting object. """ - extractor_name = "PhySorting" name = "phy" def __init__( @@ -271,7 +269,6 @@ class KiloSortSortingExtractor(BasePhyKilosortSortingExtractor): The loaded Sorting object. """ - extractor_name = "KiloSortSorting" name = "kilosort" def __init__(self, folder_path: Path | str, keep_good_only: bool = False, remove_empty_units: bool = True): diff --git a/src/spikeinterface/extractors/shybridextractors.py b/src/spikeinterface/extractors/shybridextractors.py index 86a5276d62..b53b3b2056 100644 --- a/src/spikeinterface/extractors/shybridextractors.py +++ b/src/spikeinterface/extractors/shybridextractors.py @@ -30,7 +30,6 @@ class SHYBRIDRecordingExtractor(BinaryRecordingExtractor): Loaded data. """ - extractor_name = "SHYBRIDRecording" mode = "folder" installation_mesg = ( "To use the SHYBRID extractors, install SHYBRID and pyyaml: " "\n\n pip install shybrid pyyaml\n\n" @@ -159,7 +158,6 @@ class SHYBRIDSortingExtractor(BaseSorting): Loaded data. """ - extractor_name = "SHYBRIDSorting" installation_mesg = "To use the SHYBRID extractors, install SHYBRID: \n\n pip install shybrid\n\n" name = "shybrid" diff --git a/src/spikeinterface/extractors/spykingcircusextractors.py b/src/spikeinterface/extractors/spykingcircusextractors.py index af966cb823..7c3fb154fe 100644 --- a/src/spikeinterface/extractors/spykingcircusextractors.py +++ b/src/spikeinterface/extractors/spykingcircusextractors.py @@ -29,7 +29,6 @@ class SpykingCircusSortingExtractor(BaseSorting): Loaded data. """ - extractor_name = "SpykingCircusSortingExtractor" installed = HAVE_H5PY # check at class level if installed or not mode = "folder" installation_mesg = "To use the SpykingCircusSortingExtractor install h5py: \n\n pip install h5py\n\n" diff --git a/src/spikeinterface/extractors/tridesclousextractors.py b/src/spikeinterface/extractors/tridesclousextractors.py index 531af3d9da..8589f03fd4 100644 --- a/src/spikeinterface/extractors/tridesclousextractors.py +++ b/src/spikeinterface/extractors/tridesclousextractors.py @@ -22,7 +22,6 @@ class TridesclousSortingExtractor(BaseSorting): Loaded data. """ - extractor_name = "TridesclousSortingExtractor" mode = "folder" installation_mesg = "To use the TridesclousSortingExtractor install tridesclous: \n\n pip install tridesclous\n\n" # error message when not installed name = "tridesclous" diff --git a/src/spikeinterface/extractors/waveclussnippetstextractors.py b/src/spikeinterface/extractors/waveclussnippetstextractors.py index 56f66aec8a..7c26eee7bd 100644 --- a/src/spikeinterface/extractors/waveclussnippetstextractors.py +++ b/src/spikeinterface/extractors/waveclussnippetstextractors.py @@ -10,7 +10,6 @@ class WaveClusSnippetsExtractor(MatlabHelper, BaseSnippets): - extractor_name = "WaveClusSnippetsExtractor" name = "waveclus" def __init__(self, file_path): diff --git a/src/spikeinterface/extractors/waveclustextractors.py b/src/spikeinterface/extractors/waveclustextractors.py index 02c668a902..844b1cc7cf 100644 --- a/src/spikeinterface/extractors/waveclustextractors.py +++ b/src/spikeinterface/extractors/waveclustextractors.py @@ -25,7 +25,6 @@ class WaveClusSortingExtractor(MatlabHelper, BaseSorting): Loaded data. """ - extractor_name = "WaveClusSortingExtractor" name = "waveclus" def __init__(self, file_path, keep_good_only=True): diff --git a/src/spikeinterface/extractors/yassextractors.py b/src/spikeinterface/extractors/yassextractors.py index 729df81c65..61a49ccf01 100644 --- a/src/spikeinterface/extractors/yassextractors.py +++ b/src/spikeinterface/extractors/yassextractors.py @@ -29,7 +29,6 @@ class YassSortingExtractor(BaseSorting): Loaded data. """ - extractor_name = "YassExtractor" mode = "folder" installed = HAVE_YAML # check at class level if installed or not installation_mesg = ( diff --git a/src/spikeinterface/generation/__init__.py b/src/spikeinterface/generation/__init__.py index eae6320e8d..7a2291d932 100644 --- a/src/spikeinterface/generation/__init__.py +++ b/src/spikeinterface/generation/__init__.py @@ -5,6 +5,14 @@ InjectDriftingTemplatesRecording, make_linear_displacement, ) + +from .hybrid_tools import ( + generate_hybrid_recording, + estimate_templates_from_recording, + select_templates, + scale_template_to_range, + relocate_templates, +) from .noise_tools import generate_noise from .drifting_generator import ( make_one_displacement_vector, diff --git a/src/spikeinterface/generation/drift_tools.py b/src/spikeinterface/generation/drift_tools.py index 99e4f4d36e..cce2e08b58 100644 --- a/src/spikeinterface/generation/drift_tools.py +++ b/src/spikeinterface/generation/drift_tools.py @@ -1,10 +1,12 @@ from __future__ import annotations + +import math from typing import Optional import numpy as np from numpy.typing import ArrayLike -from spikeinterface.core import Templates, BaseRecording, BaseSorting, BaseRecordingSegment -import math +from probeinterface import Probe +from spikeinterface.core import BaseRecording, BaseRecordingSegment, BaseSorting, Templates def interpolate_templates(templates_array, source_locations, dest_locations, interpolation_method="cubic"): @@ -116,22 +118,80 @@ class DriftingTemplates(Templates): This is the same strategy used by MEArec. """ - def __init__(self, **kwargs): - Templates.__init__(self, **kwargs) + def __init__(self, templates_array_moved=None, displacements=None, **static_kwargs): + Templates.__init__(self, **static_kwargs) assert self.probe is not None, "DriftingTemplates need a Probe in the init" - - self.templates_array_moved = None - self.displacements = None + if templates_array_moved is not None: + if displacements is None: + raise ValueError( + "Please pass both template_array_moved and displacements to DriftingTemplates " + "if you are using precomputed displaced templates." + ) + self.templates_array_moved = templates_array_moved + self.displacements = displacements @classmethod - def from_static(cls, templates): - drifting_teplates = cls( + def from_static_templates(cls, templates: Templates): + """ + Construct a DriftingTemplates object given static templates. + The drifting templates can be then computed using the `precompute_displacements` method. + + Parameters + ---------- + templates : Templates + The static templates. + + Returns + ------- + drifting_templates : DriftingTemplates + The drifting templates object. + + """ + drifting_templates = cls( templates_array=templates.templates_array, sampling_frequency=templates.sampling_frequency, nbefore=templates.nbefore, probe=templates.probe, ) - return drifting_teplates + return drifting_templates + + @classmethod + def from_precomputed_templates( + cls, + templates_array_moved: ArrayLike, + displacements: ArrayLike, + sampling_frequency: float, + nbefore: int, + probe: Probe, + ): + """Construct a DriftingTemplates object given precomputed drifting templates + + Parameters + ---------- + templates_array_moved : np.array + Shape is (num_displacement, num_templates, num_samples, num_channels) + displacements : np.array + Shape is (num_displacement, 2). Last axis is xy, as in make_linear_displacement below. + sampling_frequency : float + nbefore : int + probe : probeinterface.Probe + + Returns + ------- + drifting_templates : DriftingTemplates + The drifting templates object. + """ + # take the central templates as representatives, just to make the super() + # constructor happy. they won't be used as drifting templates. + templates_static = templates_array_moved[templates_array_moved.shape[0] // 2] + return cls( + templates_array=templates_static, + sampling_frequency=sampling_frequency, + nbefore=nbefore, + probe=probe, + templates_array_moved=templates_array_moved, + displacements=displacements, + ) def move_one_template(self, unit_index, displacement, **interpolation_kwargs): """ @@ -442,7 +502,8 @@ def __init__( # TODO: self.upsample_vector = upsample_vector self.upsample_vector = None self.parent_recording = parent_recording_segment - self.num_samples = parent_recording_segment.get_num_frames() if num_samples is None else num_samples + self.num_samples = parent_recording_segment.get_num_samples() if num_samples is None else num_samples + self.num_samples = int(num_samples) self.displacement_indices = displacement_indices self.templates_array_moved = templates_array_moved @@ -506,8 +567,8 @@ def get_traces( wf = template[start_template:end_template] if self.amplitude_vector is not None: - wf *= self.amplitude_vector[i] - traces[start_traces:end_traces] += wf + wf = wf * self.amplitude_vector[i] + traces[start_traces:end_traces] += wf.astype(self.dtype, copy=False) return traces.astype(self.dtype) diff --git a/src/spikeinterface/generation/drifting_generator.py b/src/spikeinterface/generation/drifting_generator.py index 7f617c3ade..a0e8ece37e 100644 --- a/src/spikeinterface/generation/drifting_generator.py +++ b/src/spikeinterface/generation/drifting_generator.py @@ -25,12 +25,18 @@ # this should be moved in probeinterface but later _toy_probes = { + "Neuropixel-384": dict( + num_columns=4, + num_contact_per_column=[96] * 4, + xpitch=16, + ypitch=40, + y_shift_per_column=[20, 0, 20, 0], + contact_shapes="square", + contact_shape_params={"width": 12}, + ), "Neuropixel-128": dict( num_columns=4, - num_contact_per_column=[ - 32, - ] - * 4, + num_contact_per_column=[32] * 4, xpitch=16, ypitch=40, y_shift_per_column=[20, 0, 20, 0], @@ -66,22 +72,24 @@ def make_one_displacement_vector( Parameters ---------- - drift_mode: "zigzag" | "bumps", default: "zigzag" - The drift mode - duration: float, default: 600 + drift_mode : "zigzag" | "bumps", default: "zigzag" + The drift mode. + duration : float, default: 600 Duration in seconds - displacement_sampling_frequency: float, default: 5 - Sample rate of the vector - t_start_drift: float | None, default: None - Time in s when drift starts - t_end_drift: float | None, default: None - Time in s when drift ends - period_s: float, default: 200. + amplitude_factor : float, default: 1 + The amplitude factor of the drift. + displacement_sampling_frequency : float, default: 5 + Sample rate of the vector. + t_start_drift : float | None, default: None + Time in s when drift starts. + t_end_drift : float | None, default: None + Time in s when drift ends. + period_s : float, default: 200. Period of the zigzag in seconds - bump_interval_s: tuple, default: (30, 90.) - Range interval between random bumps in seconds - seed: None | int - The seed for the random bumps + bump_interval_s : tuple, default: (30, 90.) + Range interval between random bumps in seconds. + seed : None | int + The seed for the random bumps. Returns ------- @@ -170,34 +178,34 @@ def generate_displacement_vector( Parameters ---------- - duration: float + duration : float Duration of the displacement vector in seconds - unit_locations: np.array + unit_locations : np.array The unit location with shape (num_units, 3) - displacement_sampling_frequency: float, default: 5. + displacement_sampling_frequency : float, default: 5. The sampling frequency of the displacement vector - drift_start_um: list of float, default: [0, 20.] - The start boundary of the motion - drift_stop_um: list of float, default: [0, -20.] - The stop boundary of the motion - drift_step_um: float, default: 1 + drift_start_um : list of float, default: [0, 20.] + The start boundary of the motion in the x and y direction. + drift_stop_um : list of float, default: [0, -20.] + The stop boundary of the motion in the x and y direction. + drift_step_um : float, default: 1 Use to create the displacements_steps array. This ensures an odd number of steps - motion_list: list of dict + motion_list : list of dict List of dicts containing individual motion vector parameters. len(motion_list) == displacement_vectors.shape[2] Returns ------- - displacement_vectors: numpy.ndarray + displacement_vectors : numpy.ndarray The drift vector is a numpy array with shape (num_times, 2, num_motions) num_motions is generally 1, but can be > 1 in case of combining several drift vectors - displacement_unit_factor: numpy array | None, default: None + displacement_unit_factor : numpy array | None, default: None A array containing the factor per unit of each drift (num_units, num_motions). This is used to create non-rigid drift with a factor gradient of depending on the unit positions - displacement_sampling_frequency: float + displacement_sampling_frequency : float The sampling frequency of drift vector - displacements_steps: numpy array + displacements_steps : numpy array Position of the motion steps (from start to step) with shape (num_step, 2) """ @@ -295,38 +303,38 @@ def generate_drifting_recording( Parameters ---------- - num_units: int, default: 250 + num_units : int, default: 250 Number of units. - duration: float, default: 600. + duration : float, default: 600. The duration in seconds. - sampling_frequency: float, dfault: 30000. + sampling_frequency : float, dfault: 30000. The sampling frequency. - probe_name: str, default: "Neuropixel-128" + probe_name : str, default: "Neuropixel-128" The probe type if generate_probe_kwargs is None. - generate_probe_kwargs: None or dict + generate_probe_kwargs : None or dict A dict to generate the probe, this supersede probe_name when not None. - generate_unit_locations_kwargs: dict + generate_unit_locations_kwargs : dict Parameters given to generate_unit_locations(). - generate_displacement_vector_kwargs: dict + generate_displacement_vector_kwargs : dict Parameters given to generate_displacement_vector(). - generate_templates_kwargs: dict + generate_templates_kwargs : dict Parameters given to generate_templates() - generate_sorting_kwargs: dict + generate_sorting_kwargs : dict Parameters given to generate_sorting(). - generate_noise_kwargs: dict + generate_noise_kwargs : dict Parameters given to generate_noise(). - extra_outputs: bool, default False + extra_outputs : bool, default False Return optionaly a dict with more variables. - seed: None ot int + seed : None ot int A unique seed for all steps. Returns ------- - static_recording: Recording + static_recording : Recording A generated recording with no motion. - drifting_recording: Recording + drifting_recording : Recording A generated recording with motion. - sorting: Sorting + sorting : Sorting The ground trith soring object. Same for both recordings. extra_infos: @@ -407,7 +415,7 @@ def generate_drifting_recording( is_scaled=True, ) - drifting_templates = DriftingTemplates.from_static(templates) + drifting_templates = DriftingTemplates.from_static_templates(templates) sorting = generate_sorting( num_units=num_units, diff --git a/src/spikeinterface/generation/hybrid_tools.py b/src/spikeinterface/generation/hybrid_tools.py new file mode 100644 index 0000000000..8f2ef0ec21 --- /dev/null +++ b/src/spikeinterface/generation/hybrid_tools.py @@ -0,0 +1,568 @@ +from __future__ import annotations + +import warnings +from typing import Literal +import numpy as np + +from spikeinterface.core import BaseRecording, BaseSorting, Templates + +from spikeinterface.core.generate import ( + generate_templates, + generate_unit_locations, + generate_sorting, + InjectTemplatesRecording, + _ensure_seed, +) +from spikeinterface.core.template_tools import get_template_extremum_channel + +from spikeinterface.sortingcomponents.motion import Motion + +from spikeinterface.generation.drift_tools import ( + InjectDriftingTemplatesRecording, + DriftingTemplates, + make_linear_displacement, + interpolate_templates, + move_dense_templates, +) + + +def estimate_templates_from_recording( + recording: BaseRecording, + ms_before: float = 2, + ms_after: float = 2, + sorter_name: str = "spykingcircus2", + run_sorter_kwargs: dict | None = None, + job_kwargs: dict | None = None, +): + """ + Get dense templates from a recording. Internally, SpyKING CIRCUS 2 is used by default + with the only twist that the template matching step is not launched. Instead, a Template + object is returned based on the results of the clustering. Other sorters can be invoked + with the `sorter_name` and `run_sorter_kwargs` parameters. + + Parameters + ---------- + ms_before : float + The time before peaks of templates. + ms_after : float + The time after peaks of templates. + sorter_name : str + The sorter to be used in order to get some fast clustering. + run_sorter_kwargs : dict + The parameters to provide to the run_sorter function of spikeinterface. + job_kwargs : dict + The jobe keyword arguments to be used in the estimation of the templates. + + Returns + ------- + templates: Templates + The estimated templates + """ + from spikeinterface.core.waveform_tools import estimate_templates + from spikeinterface.sorters.runsorter import run_sorter + + if sorter_name == "spykingcircus2": + if "matching" not in run_sorter_kwargs: + run_sorter_kwargs["matching"] = {"method": None} + + run_sorter_kwargs = run_sorter_kwargs or {} + sorting = run_sorter(sorter_name, recording, **run_sorter_kwargs) + + spikes = sorting.to_spike_vector() + unit_ids = sorting.unit_ids + sampling_frequency = recording.get_sampling_frequency() + nbefore = int(ms_before * sampling_frequency / 1000.0) + nafter = int(ms_after * sampling_frequency / 1000.0) + + job_kwargs = job_kwargs or {} + templates_array = estimate_templates(recording, spikes, unit_ids, nbefore, nafter, **job_kwargs) + + sparsity_mask = None + channel_ids = recording.channel_ids + probe = recording.get_probe() + + templates = Templates( + templates_array, sampling_frequency, nbefore, True, sparsity_mask, channel_ids, unit_ids, probe=probe + ) + + return templates + + +def select_templates( + templates: Templates, + min_amplitude: float | None = None, + max_amplitude: float | None = None, + min_depth: float | None = None, + max_depth: float | None = None, + amplitude_function: Literal["ptp", "min", "max"] = "ptp", + depth_direction: Literal["x", "y"] = "y", +): + """ + Select templates from an existing Templates object based on amplitude and depth. + + Parameters + ---------- + templates : Templates + The input templates. + min_amplitude : float | None, default: None + The minimum amplitude of the templates. + max_amplitude : float | None, default: None + The maximum amplitude of the templates. + min_depth : float | None, default: None + The minimum depth of the templates. + max_depth : float | None, default: None + The maximum depth of the templates. + amplitude_function : "ptp" | "min" | "max", default: "ptp" + The function to use to compute the amplitude of the templates. Can be "ptp", "min" or "max". + depth_direction : "x" | "y", default: "y" + The direction in which to move the templates. Can be "x" or "y". + + Returns + ------- + Templates + The selected templates + """ + assert ( + min_amplitude is not None or max_amplitude is not None or min_depth is not None or max_depth is not None + ), "At least one of min_amplitude, max_amplitude, min_depth, max_depth should be provided" + # get template amplitudes and depth + extremum_channel_indices = list(get_template_extremum_channel(templates, outputs="index").values()) + extremum_channel_indices = np.array(extremum_channel_indices, dtype=int) + + mask = np.ones(templates.num_units, dtype=bool) + if min_amplitude is not None or max_amplitude is not None: + # filter amplitudes + if amplitude_function == "ptp": + amp_fun = np.ptp + elif amplitude_function == "min": + amp_fun = np.min + elif amplitude_function == "max": + amp_fun = np.max + amplitudes = np.zeros(templates.num_units) + templates_array = templates.templates_array + for i in range(templates.num_units): + amplitudes[i] = amp_fun(templates_array[i, :, extremum_channel_indices[i]]) + if min_amplitude is not None: + mask &= amplitudes >= min_amplitude + if max_amplitude is not None: + mask &= amplitudes <= max_amplitude + if min_depth is not None or max_depth is not None: + assert templates.probe is not None, "Templates should have a probe to filter based on depth" + depth_dimension = ["x", "y"].index(depth_direction) + channel_depths = templates.get_channel_locations()[:, depth_dimension] + unit_depths = channel_depths[extremum_channel_indices] + if min_depth is not None: + mask &= unit_depths >= min_depth + if max_depth is not None: + mask &= unit_depths <= max_depth + if np.sum(mask) == 0: + warnings.warn("No templates left after filtering") + return None + filtered_unit_ids = templates.unit_ids[mask] + filtered_templates = templates.select_units(filtered_unit_ids) + + return filtered_templates + + +def scale_template_to_range( + templates: Templates, + min_amplitude: float, + max_amplitude: float, + amplitude_function: Literal["ptp", "min", "max"] = "ptp", +): + """ + Scale templates to have a range with the provided minimum and maximum amplitudes. + + Parameters + ---------- + templates : Templates + The input templates. + min_amplitude : float + The minimum amplitude of the output templates after scaling. + max_amplitude : float + The maximum amplitude of the output templates after scaling. + + Returns + ------- + Templates + The scaled templates. + """ + extremum_channel_indices = list(get_template_extremum_channel(templates, outputs="index").values()) + extremum_channel_indices = np.array(extremum_channel_indices, dtype=int) + + # get amplitudes + if amplitude_function == "ptp": + amp_fun = np.ptp + elif amplitude_function == "min": + amp_fun = np.min + elif amplitude_function == "max": + amp_fun = np.max + amplitudes = np.zeros(templates.num_units) + templates_array = templates.templates_array + for i in range(templates.num_units): + amplitudes[i] = amp_fun(templates_array[i, :, extremum_channel_indices[i]]) + + # scale templates to meet min_amplitude and max_amplitude range + min_scale = np.min(amplitudes) / min_amplitude + max_scale = np.max(amplitudes) / max_amplitude + m = (max_scale - min_scale) / (np.max(amplitudes) - np.min(amplitudes)) + scales = m * (amplitudes - np.min(amplitudes)) + min_scale + + scaled_templates_array = templates.templates_array / scales[:, None, None] + + return Templates( + templates_array=scaled_templates_array, + sampling_frequency=templates.sampling_frequency, + nbefore=templates.nbefore, + sparsity_mask=templates.sparsity_mask, + channel_ids=templates.channel_ids, + unit_ids=templates.unit_ids, + probe=templates.probe, + ) + + +def relocate_templates( + templates: Templates, + min_displacement: float, + max_displacement: float, + margin: float = 0.0, + favor_borders: bool = True, + depth_direction: Literal["x", "y"] = "y", + seed: int | None = None, +): + """ + Relocates templates to have a minimum and maximum displacement. + + Parameters + ---------- + templates : Templates + The input templates + min_displacement : float + The minimum displacement of the templates + max_displacement : float + The maximum displacement of the templates + margin : float, default: 0.0 + The margin to keep between the templates and the borders of the probe. + If greater than 0, the templates are allowed to go beyond the borders of the probe. + favor_borders : bool, default: True + If True, the templates are always moved to the borders of the probe if this is + possoble based on the min_displacement and max_displacement constraints. + This avoids a bias in moving templates towards the center of the probe. + depth_direction : "x" | "y", default: "y" + The direction in which to move the templates. Can be "x" or "y" + seed : int or None, default: None + Seed for random initialization. + + + Returns + ------- + Templates + The relocated templates. + """ + seed = _ensure_seed(seed) + + extremum_channel_indices = list(get_template_extremum_channel(templates, outputs="index").values()) + extremum_channel_indices = np.array(extremum_channel_indices, dtype=int) + depth_dimension = ["x", "y"].index(depth_direction) + channel_depths = templates.get_channel_locations()[:, depth_dimension] + unit_depths = channel_depths[extremum_channel_indices] + + assert margin >= 0, "margin should be positive" + top_margin = np.max(channel_depths) + margin + bottom_margin = np.min(channel_depths) - margin + + templates_array_moved = np.zeros_like(templates.templates_array, dtype=templates.templates_array.dtype) + + rng = np.random.default_rng(seed) + displacements = rng.uniform(low=min_displacement, high=max_displacement, size=templates.num_units) + for i in range(templates.num_units): + # by default, displacement is positive + displacement = displacements[i] + unit_depth = unit_depths[i] + if not favor_borders: + displacement *= rng.choice([-1.0, 1.0]) + if unit_depth + displacement > top_margin: + displacement = -displacement + elif unit_depth - displacement < bottom_margin: + displacement = -displacement + else: + # check if depth is closer to top or bottom + if unit_depth > (top_margin - bottom_margin) / 2: + # if over top margin, move down + if unit_depth + displacement > top_margin: + displacement = -displacement + else: + # if within bottom margin, move down + if unit_depth - displacement >= bottom_margin: + displacement = -displacement + displacement_vector = np.zeros(2) + displacement_vector[depth_dimension] = displacement + templates_array_moved[i] = move_dense_templates( + templates.templates_array[i][None], + displacements=displacement_vector[None], + source_probe=templates.probe, + )[0] + + return Templates( + templates_array=templates_array_moved, + sampling_frequency=templates.sampling_frequency, + nbefore=templates.nbefore, + sparsity_mask=templates.sparsity_mask, + channel_ids=templates.channel_ids, + unit_ids=templates.unit_ids, + probe=templates.probe, + ) + + +def generate_hybrid_recording( + recording: BaseRecording, + sorting: BaseSorting | None = None, + templates: Templates | None = None, + motion: Motion | None = None, + are_templates_scaled: bool = True, + unit_locations: np.ndarray | None = None, + drift_step_um: float = 1.0, + upsample_factor: int | None = None, + upsample_vector: np.ndarray | None = None, + amplitude_std: float = 0.05, + generate_sorting_kwargs: dict = dict(num_units=10, firing_rates=15, refractory_period_ms=4.0, seed=2205), + generate_unit_locations_kwargs: dict = dict(margin_um=10.0, minimum_z=5.0, maximum_z=50.0, minimum_distance=20), + generate_templates_kwargs: dict = dict(ms_before=1.0, ms_after=3.0), + seed: int | None = None, +) -> tuple[BaseRecording, BaseSorting]: + """ + Generate an hybrid recording with spike given sorting+templates. + + The function starts from an existing recording and injects hybrid units in it. + The templates can be provided or generated. If the templates are not provided, + they are generated (using the `spikeinterface.core.generate.generate_templates()` function + and with arguments provided in `generate_templates_kwargs`). + The sorting can be provided or generated. If the sorting is not provided, it is generated + (using the `spikeinterface.core.generate.generate_sorting` function and with arguments + provided in `generate_sorting_kwargs`). + The injected spikes can optionally follow a motion pattern provided by a Motion object. + + Parameters + ---------- + recording : BaseRecording + The recording to inject units in. + sorting : Sorting | None, default: None + An external sorting object. If not provide, one is generated. + templates : Templates | None, default: None + The templates of units. + If None they are generated. + motion : Motion | None, default: None + The motion object to use for the drifting templates. + are_templates_scaled : bool, default: True + If True, the templates are assumed to be in uV, otherwise in the same unit as the recording. + In case the recording has scaling, the templates are "unscaled" before injection. + ms_before : float, default: 1.5 + Cut out in ms before spike peak. + ms_after : float, default: 3 + Cut out in ms after spike peak. + unit_locations : np.array, default: None + The locations at which the templates should be injected. If not provided, generated (see + generate_unit_location_kwargs). + drift_step_um : float, default: 1.0 + The step in um to use for the drifting templates. + upsample_factor : None or int, default: None + A upsampling factor used only when templates are not provided. + upsample_vector : np.array or None + Optional the upsample_vector can given. This has the same shape as spike_vector + amplitude_std : float, default: 0.05 + The standard deviation of the modulation to apply to the spikes when injecting them + into the recording. + generate_sorting_kwargs : dict + When sorting is not provide, this dict is used to generated a Sorting. + generate_unit_locations_kwargs : dict + Dict used to generated template when template not provided. + generate_templates_kwargs : dict + Dict used to generated template when template not provided. + seed : int or None + Seed for random initialization. + If None a diffrent Recording is generated at every call. + Note: even with None a generated recording keep internaly a seed to regenerate the same signal after dump/load. + + Returns + ------- + recording: BaseRecording + The generated hybrid recording extractor. + sorting: Sorting + The generated sorting extractor for the injected units. + """ + + # if None so the same seed will be used for all steps + seed = _ensure_seed(seed) + rng = np.random.default_rng(seed) + + sampling_frequency = recording.sampling_frequency + probe = recording.get_probe() + num_segments = recording.get_num_segments() + dtype = recording.dtype + durations = np.array([recording.get_duration(segment_index) for segment_index in range(num_segments)]) + channel_locations = probe.contact_positions + + assert ( + templates is not None or sorting is not None or generate_sorting_kwargs is not None + ), "Provide templates or sorting or generate_sorting_kwargs" + + # check num_units + num_units = None + if templates is not None: + assert isinstance(templates, Templates), "templates should be a Templates object" + num_units = templates.num_units + if sorting is not None: + assert isinstance(sorting, BaseSorting), "sorting should be a Sorting object" + if num_units is not None: + assert num_units == sorting.get_num_units(), "num_units should be the same in templates and sorting" + else: + num_units = sorting.get_num_units() + if num_units is None: + assert "num_units" in generate_sorting_kwargs, "num_units should be provided in generate_sorting_kwargs" + num_units = generate_sorting_kwargs["num_units"] + else: + generate_sorting_kwargs["num_units"] = num_units + + if templates is None: + if unit_locations is None: + unit_locations = generate_unit_locations(num_units, channel_locations, **generate_unit_locations_kwargs) + else: + assert len(unit_locations) == num_units, "unit_locations and num_units should have the same length" + templates_array = generate_templates( + channel_locations, + unit_locations, + sampling_frequency, + upsample_factor=upsample_factor, + seed=seed, + dtype=dtype, + **generate_templates_kwargs, + ) + ms_before = generate_templates_kwargs["ms_before"] + ms_after = generate_templates_kwargs["ms_after"] + nbefore = int(ms_before * sampling_frequency / 1000.0) + nafter = int(ms_after * sampling_frequency / 1000.0) + templates_ = Templates(templates_array, sampling_frequency, nbefore, True, None, None, None, probe) + else: + from spikeinterface.postprocessing.localization_tools import compute_monopolar_triangulation + + assert isinstance(templates, Templates), "templates should be a Templates object" + assert ( + templates.num_channels == recording.get_num_channels() + ), "templates and recording should have the same number of channels" + nbefore = templates.nbefore + nafter = templates.nafter + unit_locations = compute_monopolar_triangulation(templates) + + channel_locations_rel = channel_locations - channel_locations[0] + templates_locations = templates.get_channel_locations() + templates_locations_rel = templates_locations - templates_locations[0] + + if not np.allclose(channel_locations_rel, templates_locations_rel): + warnings.warn("Channel locations are different between recording and templates. Interpolating templates.") + templates_array = np.zeros(templates.templates_array.shape, dtype=dtype) + for i in range(len(templates_array)): + src_template = templates.templates_array[i][np.newaxis, :, :] + templates_array[i] = interpolate_templates(src_template, templates_locations_rel, channel_locations_rel) + else: + templates_array = templates.templates_array + + # manage scaling of templates + templates_ = templates + if recording.has_scaleable_traces(): + if are_templates_scaled: + templates_array = (templates_array - recording.get_channel_offsets()) / recording.get_channel_gains() + # make a copy of the templates and reset templates_array (might have scaled templates) + templates_ = templates.select_units(templates.unit_ids) + templates_.templates_array = templates_array + + if sorting is None: + generate_sorting_kwargs = generate_sorting_kwargs.copy() + generate_sorting_kwargs["durations"] = durations + generate_sorting_kwargs["sampling_frequency"] = sampling_frequency + generate_sorting_kwargs["seed"] = seed + sorting = generate_sorting(**generate_sorting_kwargs) + else: + assert sorting.sampling_frequency == sampling_frequency + + num_spikes = sorting.to_spike_vector().size + sorting.set_property("gt_unit_locations", unit_locations) + + assert (nbefore + nafter) == templates_array.shape[ + 1 + ], "templates and ms_before, ms_after should have the same length" + + if templates_array.ndim == 3: + upsample_vector = None + else: + if upsample_vector is None: + upsample_factor = templates_array.shape[3] + upsample_vector = rng.integers(0, upsample_factor, size=num_spikes) + + if amplitude_std is not None: + amplitude_factor = rng.normal(loc=1, scale=amplitude_std, size=num_spikes) + else: + amplitude_factor = None + + if motion is not None: + assert num_segments == motion.num_segments, "recording and motion should have the same number of segments" + dim = motion.dim + motion_array_concat = np.concatenate(motion.displacement) + if dim == 0: + start = np.array([np.min(motion_array_concat), 0]) + stop = np.array([np.max(motion_array_concat), 0]) + elif dim == 1: + start = np.array([0, np.min(motion_array_concat)]) + stop = np.array([0, np.max(motion_array_concat)]) + elif dim == 2: + raise NotImplementedError("3D motion not implemented yet") + num_step = int((stop - start)[dim] / drift_step_um) + displacements = make_linear_displacement(start, stop, num_step=num_step) + + # use templates_, because templates_array might have been scaled + drifting_templates = DriftingTemplates.from_static_templates(templates_) + drifting_templates.precompute_displacements(displacements) + + # calculate displacement vectors for each segment and unit + # for each unit, we interpolate the motion at its location + displacement_sampling_frequency = 1.0 / np.diff(motion.temporal_bins_s[0])[0] + displacement_vectors = [] + for segment_index in range(motion.num_segments): + temporal_bins_segment = motion.temporal_bins_s[segment_index] + displacement_vector = np.zeros((len(temporal_bins_segment), 2, num_units)) + for unit_index in range(num_units): + motion_for_unit = motion.get_displacement_at_time_and_depth( + times_s=temporal_bins_segment, + locations_um=unit_locations[unit_index], + segment_index=segment_index, + grid=True, + ) + displacement_vector[:, motion.dim, unit_index] = motion_for_unit[motion.dim, :] + displacement_vectors.append(displacement_vector) + # since displacement is estimated by interpolation for each unit, the unit factor is an eye + displacement_unit_factor = np.eye(num_units) + + hybrid_recording = InjectDriftingTemplatesRecording( + sorting=sorting, + parent_recording=recording, + drifting_templates=drifting_templates, + displacement_vectors=displacement_vectors, + displacement_sampling_frequency=displacement_sampling_frequency, + displacement_unit_factor=displacement_unit_factor, + num_samples=(np.array(durations) * sampling_frequency).astype("int64"), + amplitude_factor=amplitude_factor, + ) + + else: + warnings.warn( + "No Motion is provided! Please check that your recording is drift-free, otherwise the hybrid recording " + "will have stationary units over a drifting recording..." + ) + hybrid_recording = InjectTemplatesRecording( + sorting, + templates_array, + nbefore=nbefore, + parent_recording=recording, + upsample_vector=upsample_vector, + ) + + return hybrid_recording, sorting diff --git a/src/spikeinterface/generation/noise_tools.py b/src/spikeinterface/generation/noise_tools.py index 48555b3062..11f30e352f 100644 --- a/src/spikeinterface/generation/noise_tools.py +++ b/src/spikeinterface/generation/noise_tools.py @@ -10,25 +10,24 @@ def generate_noise( Parameters ---------- - probe: Probe + probe : Probe A probe object. - sampling_frequency: float + sampling_frequency : float Sampling frequency - durations: list of float + durations : list of float Durations - dtype: np.dtype + dtype : np.dtype Dtype - noise_levels: float | np.array | tuple + noise_levels : float | np.array | tuple If scalar same noises on all channels. If array then per channels noise level. If tuple, then this represent the range. - - seed: None | int + seed : None | int The seed for random generator. Returns ------- - noise: NoiseGeneratorRecording + noise : NoiseGeneratorRecording A lazy noise generator recording. """ diff --git a/src/spikeinterface/generation/tests/test_drift_tools.py b/src/spikeinterface/generation/tests/test_drift_tools.py index 8a4837100e..5647b33930 100644 --- a/src/spikeinterface/generation/tests/test_drift_tools.py +++ b/src/spikeinterface/generation/tests/test_drift_tools.py @@ -94,11 +94,12 @@ def test_move_dense_templates(): def test_DriftingTemplates(): static_templates = make_some_templates() - drifting_templates = DriftingTemplates.from_static(static_templates) + drifting_templates = DriftingTemplates.from_static_templates(static_templates) displacement = np.array([[5.0, 10.0]]) unit_index = 0 moved_template_array = drifting_templates.move_one_template(unit_index, displacement) + assert not np.array_equal(moved_template_array, static_templates.templates_array[unit_index]) num_move = 5 amplitude_motion_um = 20 @@ -112,6 +113,25 @@ def test_DriftingTemplates(): static_templates.num_channels, ) + # test from precomputed + drifting_templates_from_precomputed = DriftingTemplates.from_precomputed_templates( + templates_array_moved=drifting_templates.templates_array_moved, + displacements=drifting_templates.displacements, + sampling_frequency=drifting_templates.sampling_frequency, + probe=drifting_templates.probe, + nbefore=drifting_templates.nbefore, + ) + assert drifting_templates_from_precomputed.templates_array_moved.shape == ( + num_move, + static_templates.num_units, + static_templates.num_samples, + static_templates.num_channels, + ) + assert np.array_equal( + drifting_templates_from_precomputed.templates_array_moved, drifting_templates.templates_array_moved + ) + assert np.array_equal(drifting_templates_from_precomputed.displacements, drifting_templates.displacements) + def test_InjectDriftingTemplatesRecording(create_cache_folder): cache_folder = create_cache_folder @@ -119,7 +139,7 @@ def test_InjectDriftingTemplatesRecording(create_cache_folder): probe = templates.probe # drifting templates - drifting_templates = DriftingTemplates.from_static(templates) + drifting_templates = DriftingTemplates.from_static_templates(templates) channel_locations = probe.contact_positions num_units = templates.unit_ids.size diff --git a/src/spikeinterface/generation/tests/test_hybrid_tools.py b/src/spikeinterface/generation/tests/test_hybrid_tools.py new file mode 100644 index 0000000000..d31a0ec81d --- /dev/null +++ b/src/spikeinterface/generation/tests/test_hybrid_tools.py @@ -0,0 +1,83 @@ +import numpy as np + +from spikeinterface.core import Templates +from spikeinterface.core.generate import ( + generate_ground_truth_recording, + generate_sorting, + generate_templates, + generate_unit_locations, +) +from spikeinterface.preprocessing.motion import correct_motion, load_motion_info +from spikeinterface.generation.hybrid_tools import ( + estimate_templates_from_recording, + generate_hybrid_recording, +) + + +def test_generate_hybrid_no_motion(): + rec, _ = generate_ground_truth_recording(sampling_frequency=20000, seed=0) + hybrid, _ = generate_hybrid_recording(rec, seed=0) + assert rec.get_num_channels() == hybrid.get_num_channels() + assert rec.get_num_frames() == hybrid.get_num_frames() + assert rec.get_num_segments() == hybrid.get_num_segments() + assert np.array_equal(rec.get_channel_locations(), hybrid.get_channel_locations()) + + +def test_generate_hybrid_with_sorting(): + gt_sorting = generate_sorting(durations=[10], num_units=20, sampling_frequency=20000, seed=0) + rec, _ = generate_ground_truth_recording(durations=[10], sampling_frequency=20000, sorting=gt_sorting, seed=0) + hybrid, sorting_hybrid = generate_hybrid_recording(rec, sorting=gt_sorting) + assert rec.get_num_channels() == hybrid.get_num_channels() + assert rec.get_num_frames() == hybrid.get_num_frames() + assert rec.get_num_segments() == hybrid.get_num_segments() + assert np.array_equal(rec.get_channel_locations(), hybrid.get_channel_locations()) + assert sorting_hybrid.get_num_units() == len(hybrid.templates) + + +def test_generate_hybrid_motion(): + rec, _ = generate_ground_truth_recording(sampling_frequency=20000, durations=[10], seed=0) + _, motion_info = correct_motion(rec, output_motion_info=True) + motion = motion_info["motion"] + hybrid, sorting_hybrid = generate_hybrid_recording(rec, motion=motion, seed=0) + assert rec.get_num_channels() == hybrid.get_num_channels() + assert rec.get_num_frames() == hybrid.get_num_frames() + assert rec.get_num_segments() == hybrid.get_num_segments() + assert np.array_equal(rec.get_channel_locations(), hybrid.get_channel_locations()) + assert sorting_hybrid.get_num_units() == len(hybrid.drifting_templates.unit_ids) + + +def test_generate_hybrid_from_templates(): + num_units = 10 + ms_before = 2 + ms_after = 4 + rec, _ = generate_ground_truth_recording(sampling_frequency=20000, seed=0) + channel_locations = rec.get_channel_locations() + unit_locations = generate_unit_locations(num_units, channel_locations=channel_locations, seed=0) + templates_array = generate_templates( + channel_locations, unit_locations, rec.sampling_frequency, ms_before, ms_after, seed=0 + ) + nbefore = int(ms_before * rec.sampling_frequency / 1000) + templates = Templates(templates_array, rec.sampling_frequency, nbefore, True, None, None, None, rec.get_probe()) + hybrid, sorting_hybrid = generate_hybrid_recording(rec, templates=templates, seed=0) + assert np.array_equal(hybrid.templates, templates.templates_array) + assert rec.get_num_channels() == hybrid.get_num_channels() + assert rec.get_num_frames() == hybrid.get_num_frames() + assert rec.get_num_segments() == hybrid.get_num_segments() + assert np.array_equal(rec.get_channel_locations(), hybrid.get_channel_locations()) + assert sorting_hybrid.get_num_units() == num_units + + +def test_estimate_templates(create_cache_folder): + cache_folder = create_cache_folder + rec, _ = generate_ground_truth_recording(num_units=10, sampling_frequency=20000, seed=0) + templates = estimate_templates_from_recording( + rec, run_sorter_kwargs=dict(folder=cache_folder / "sc", remove_existing_folder=True) + ) + assert len(templates.templates_array) > 0 + + +if __name__ == "__main__": + test_generate_hybrid_no_motion() + test_generate_hybrid_motion() + test_estimate_templates() + test_generate_hybrid_with_sorting() diff --git a/src/spikeinterface/generation/tests/test_mock.py b/src/spikeinterface/generation/tests/test_mock.py deleted file mode 100644 index 37c6bde47e..0000000000 --- a/src/spikeinterface/generation/tests/test_mock.py +++ /dev/null @@ -1,3 +0,0 @@ -def test_mock(): - # TODO: Add test logic here - pass diff --git a/src/spikeinterface/postprocessing/__init__.py b/src/spikeinterface/postprocessing/__init__.py index ae071a55e0..31bfdf1863 100644 --- a/src/spikeinterface/postprocessing/__init__.py +++ b/src/spikeinterface/postprocessing/__init__.py @@ -21,11 +21,7 @@ from .correlograms import ( ComputeCorrelograms, compute_correlograms, - compute_autocorrelogram_from_spiketrain, - compute_crosscorrelogram_from_spiketrain, correlogram_for_one_segment, - compute_correlograms_numba, - compute_correlograms_numpy, ) from .isi import ( @@ -40,7 +36,6 @@ from .unit_locations import ( compute_unit_locations, ComputeUnitLocations, - compute_center_of_mass, ) from .amplitude_scalings import compute_amplitude_scalings, ComputeAmplitudeScalings diff --git a/src/spikeinterface/postprocessing/correlograms.py b/src/spikeinterface/postprocessing/correlograms.py index bc7d2578fa..7c22260dbe 100644 --- a/src/spikeinterface/postprocessing/correlograms.py +++ b/src/spikeinterface/postprocessing/correlograms.py @@ -16,35 +16,55 @@ class ComputeCorrelograms(AnalyzerExtension): """ - Compute auto and cross correlograms. + Compute auto and cross correlograms of unit spike times. Parameters ---------- sorting_analyzer: SortingAnalyzer A SortingAnalyzer object window_ms : float, default: 50.0 - The window in ms + The window around the spike to compute the correlation in ms. For example, + if 50 ms, the correlations will be computed at lags -25 ms ... 25 ms. bin_ms : float, default: 1.0 - The bin size in ms + The bin size in ms. This determines the bin size over which to + combine lags. For example, with a window size of -25 ms to 25 ms, and + bin size 1 ms, the correlation will be binned as -25 ms, -24 ms, ... method : "auto" | "numpy" | "numba", default: "auto" - If "auto" and numba is installed, numba is used, otherwise numpy is used + If "auto" and numba is installed, numba is used, otherwise numpy is used. Returns ------- - ccgs : np.array + correlogram : np.array Correlograms with shape (num_units, num_units, num_bins) - The diagonal of ccgs is the auto correlogram. - ccgs[A, B, :] is the symetrie of ccgs[B, A, :] - ccgs[A, B, :] have to be read as the histogram of spiketimesA - spiketimesB + The diagonal of the correlogram (e.g. correlogram[A, A, :]) + holds the unit auto correlograms. The off-diagonal elements + are the cross-correlograms between units, where correlogram[A, B, :] + and correlogram[B, A, :] represent cross-correlation between + the same pair of units, applied in opposite directions, + correlogram[A, B, :] = correlogram[B, A, ::-1]. bins : np.array The bin edges in ms - Returns - ------- - isi_histograms : np.array - 2D array with ISI histograms (num_units, num_bins) - bins : np.array - 1D array with bins in ms + Notes + ----- + In the extracellular electrophysiology context, a correlogram + is a visualisation of the results of a cross-correlation + between two spike trains. The cross-correlation slides one spike train + along another sample-by-sample, taking the correlation at each 'lag'. This results + in a plot with 'lag' (i.e. time offset) on the x-axis and 'correlation' + (i.e. how similar to two spike trains are) on the y-axis. In this + implementation, the y-axis result is the 'counts' of spike matches per + time bin (rather than a computer correlation or covariance). + + In the present implementation, a 'window' around spikes is first + specified. For example, if a window of 100 ms is taken, we will + take the correlation at lags from -50 ms to +50 ms around the spike peak. + In theory, we can have as many lags as we have samples. Often, this + visualisation is too high resolution and instead the lags are binned + (e.g. -50 to -45 ms, ..., -5 to 0 ms, 0 to 5 ms, ...., 45 to 50 ms). + When using counts as output, binning the lags involves adding up all counts across + a range of lags. + """ @@ -71,7 +91,7 @@ def _select_extension_data(self, unit_ids): return new_data def _run(self, verbose=False): - ccgs, bins = compute_correlograms_on_sorting(self.sorting_analyzer.sorting, **self.params) + ccgs, bins = _compute_correlograms_on_sorting(self.sorting_analyzer.sorting, **self.params) self.data["ccgs"] = ccgs self.data["bins"] = bins @@ -89,7 +109,10 @@ def compute_correlograms( bin_ms: float = 1.0, method: str = "auto", ): - + """ + Compute correlograms using Numba or Numpy. + See ComputeCorrelograms() for details. + """ if isinstance(sorting_analyzer_or_sorting, MockWaveformExtractor): sorting_analyzer_or_sorting = sorting_analyzer_or_sorting.sorting @@ -98,7 +121,7 @@ def compute_correlograms( sorting_analyzer_or_sorting, window_ms=window_ms, bin_ms=bin_ms, method=method ) else: - return compute_correlograms_on_sorting( + return _compute_correlograms_on_sorting( sorting_analyzer_or_sorting, window_ms=window_ms, bin_ms=bin_ms, method=method ) @@ -107,6 +130,33 @@ def compute_correlograms( def _make_bins(sorting, window_ms, bin_ms): + """ + Create the bins for the correlogram, in samples. + + The autocorrelogram bins are centered around zero. Each bin + increases in a positive / negative direction starting at zero. + + For example, given a window_ms of 50 ms and a bin_ms of + 5 ms, the bins in unit ms will be: + [-25 to -20, ..., -5 to 0, 0 to 5, ..., 20 to 25]. + + The window size will be clipped if not divisible by the bin size. + + Parameters + ---------- + See ComputeCorrelograms() for parameters. + + Returns + ------- + + bins : np.ndarray + The bins edges in ms + window_size : int + The window size in samples + bin_size : int + The bin size in samples + + """ fs = sorting.sampling_frequency window_size = int(round(fs * window_ms / 2 * 1e-3)) @@ -120,62 +170,56 @@ def _make_bins(sorting, window_ms, bin_ms): return bins, window_size, bin_size -def compute_autocorrelogram_from_spiketrain(spike_times, window_size, bin_size): +def _compute_num_bins(window_size, bin_size): """ - Computes the auto-correlogram from a given spike train. - - This implementation only works if you have numba installed, to accelerate the - computation time. + Internal function to compute number of bins, expects + window_size and bin_size are already divisible. These are + typically generated in `_make_bins()`. - Parameters - ---------- - spike_times: np.ndarray - The ordered spike train to compute the auto-correlogram. - window_size: int - Compute the auto-correlogram between -window_size and +window_size (in sampling time). - bin_size: int - Size of a bin (in sampling time). Returns ------- - tuple (auto_corr, bins) - auto_corr: np.ndarray[int64] - The computed auto-correlogram. + num_bins : int + The total number of bins to span the window, in samples + half_num_bins : int + Half the number of bins. The bins are an equal number + of bins that look forward and backwards from zero, e.g. + [..., -10 to -5, -5 to 0, 0 to 5, 5 to 10, ...] + """ - assert HAVE_NUMBA - return _compute_autocorr_numba(spike_times.astype(np.int64), window_size, bin_size) + num_half_bins = int(window_size // bin_size) + num_bins = int(2 * num_half_bins) + return num_bins, num_half_bins -def compute_crosscorrelogram_from_spiketrain(spike_times1, spike_times2, window_size, bin_size): + +def _compute_correlograms_on_sorting(sorting, window_ms, bin_ms, method="auto"): """ - Computes the cros-correlogram between two given spike trains. + Computes cross-correlograms from multiple units. - This implementation only works if you have numba installed, to accelerate the - computation time. + Entry function to compute correlograms across all units in a `Sorting` + object (i.e. spike trains at all determined offsets will be computed + for each unit against every other unit). Parameters ---------- - spike_times1: np.ndarray - The ordered spike train to compare against the second one. - spike_times2: np.ndarray - The ordered spike train that serves as a reference for the cross-correlogram. - window_size: int - Compute the auto-correlogram between -window_size and +window_size (in sampling time). - bin_size: int - Size of a bin (in sampling time). + sorting : Sorting + A SpikeInterface Sorting object + window_ms : float + The window size over which to perform the cross-correlation, in ms + bin_ms : float + The size of which to bin lags, in ms. + method : str + To use "numpy" or "numba". "auto" will use numba if available, + otherwise numpy. Returns ------- - tuple (auto_corr, bins) - auto_corr: np.ndarray[int64] - The computed auto-correlogram. - """ - assert HAVE_NUMBA - return _compute_crosscorr_numba(spike_times1.astype(np.int64), spike_times2.astype(np.int64), window_size, bin_size) - - -def compute_correlograms_on_sorting(sorting, window_ms, bin_ms, method="auto"): - """ - Computes several cross-correlogram in one course from several clusters. + correlograms : np.array + A (num_units, num_units, num_bins) array where unit x unit correlation + matrices are stacked at all determined time bins. Note the true + correlation is not returned but instead the count of number of matches. + bins : np.array + The bins edges in ms """ assert method in ("auto", "numba", "numpy") @@ -185,23 +229,23 @@ def compute_correlograms_on_sorting(sorting, window_ms, bin_ms, method="auto"): bins, window_size, bin_size = _make_bins(sorting, window_ms, bin_ms) if method == "numpy": - correlograms = compute_correlograms_numpy(sorting, window_size, bin_size) + correlograms = _compute_correlograms_numpy(sorting, window_size, bin_size) if method == "numba": - correlograms = compute_correlograms_numba(sorting, window_size, bin_size) + correlograms = _compute_correlograms_numba(sorting, window_size, bin_size) return correlograms, bins # LOW-LEVEL IMPLEMENTATIONS -def compute_correlograms_numpy(sorting, window_size, bin_size): +def _compute_correlograms_numpy(sorting, window_size, bin_size): """ - Computes cross-correlograms for all units in a sorting object. + Computes correlograms for all units in a sorting object. This very elegant implementation is copied from phy package written by Cyrille Rossant. https://github.com/cortex-lab/phylib/blob/master/phylib/stats/ccg.py - The main modification is way the positive and negative are handled explicitly - for rounding reasons. + The main modification is the way positive and negative are handled + explicitly for rounding reasons. Other slight modifications have been made to fit the SpikeInterface data model (e.g. adding the ability to handle multiple segments). @@ -212,30 +256,66 @@ def compute_correlograms_numpy(sorting, window_size, bin_size): num_units = len(sorting.unit_ids) spikes = sorting.to_spike_vector(concatenated=False) - num_half_bins = int(window_size // bin_size) - num_bins = int(2 * num_half_bins) + num_bins, num_half_bins = _compute_num_bins(window_size, bin_size) correlograms = np.zeros((num_units, num_units, num_bins), dtype="int64") for seg_index in range(num_seg): spike_times = spikes[seg_index]["sample_index"] - spike_labels = spikes[seg_index]["unit_index"] + spike_unit_indices = spikes[seg_index]["unit_index"] - c0 = correlogram_for_one_segment(spike_times, spike_labels, window_size, bin_size) + c0 = correlogram_for_one_segment(spike_times, spike_unit_indices, window_size, bin_size) correlograms += c0 return correlograms -def correlogram_for_one_segment(spike_times, spike_labels, window_size, bin_size): - """ - Called by compute_correlograms_numpy +def correlogram_for_one_segment(spike_times, spike_unit_indices, window_size, bin_size): """ + A very well optimized algorithm for the cross-correlation of + spike trains, copied from the Phy package, written by Cyrille Rossant. - num_half_bins = int(window_size // bin_size) - num_bins = int(2 * num_half_bins) - num_units = len(np.unique(spike_labels)) + Parameters + ---------- + spike_times : np.ndarray + An array of spike times (in samples, not seconds). + This contains spikes from all units. + spike_unit_indices : np.ndarray + An array of labels indicating the unit of the corresponding + spike in `spike_times`. + window_size : int + The window size over which to perform the cross-correlation, in samples + bin_size : int + The size of which to bin lags, in samples. + + Returns + ------- + correlograms : np.array + A (num_units, num_units, num_bins) array of correlograms + between all units at each lag time bin. + + Notes + ----- + For all spikes, time difference between this spike and + every other spike within the window is directly computed + and stored as a count in the relevant lag time bin. + + Initially, the spike_times array is shifted by 1 position, and the difference + computed. This gives the time differences between the closest spikes + (skipping the zero-lag case). Next, the differences between + spikes times in samples are converted into units relative to + bin_size ('binarized'). Spikes in which the binarized difference to + their closest neighbouring spike is greater than half the bin-size are + masked. + + Finally, the indices of the (num_units, num_units, num_bins) correlogram + that need incrementing are done so with `ravel_multi_index()`. This repeats + for all shifts along the spike_train until no spikes have a corresponding + match within the window size. + """ + num_bins, num_half_bins = _compute_num_bins(window_size, bin_size) + num_units = len(np.unique(spike_unit_indices)) correlograms = np.zeros((num_units, num_units, num_bins), dtype="int64") @@ -243,8 +323,8 @@ def correlogram_for_one_segment(spike_times, spike_labels, window_size, bin_size # within the correlogram time window. mask = np.ones_like(spike_times, dtype="bool") - # The loop continues as long as there is at least one spike with - # a matching spike. + # The loop continues as long as there is at least one + # spike with a matching spike. shift = 1 while mask[:-shift].any(): # Number of time samples between spike i and spike i+shift. @@ -264,15 +344,15 @@ def correlogram_for_one_segment(spike_times, spike_labels, window_size, bin_size m = mask[:-shift] # Find the indices in the raveled correlograms array that need - # to be incremented, taking into account the spike clusters. + # to be incremented, taking into account the spike unit labels. if sign == 1: indices = np.ravel_multi_index( - (spike_labels[+shift:][m], spike_labels[:-shift][m], spike_diff_b[m] + num_half_bins), + (spike_unit_indices[+shift:][m], spike_unit_indices[:-shift][m], spike_diff_b[m] + num_half_bins), correlograms.shape, ) else: indices = np.ravel_multi_index( - (spike_labels[:-shift][m], spike_labels[+shift:][m], spike_diff_b[m] + num_half_bins), + (spike_unit_indices[:-shift][m], spike_unit_indices[+shift:][m], spike_diff_b[m] + num_half_bins), correlograms.shape, ) @@ -280,35 +360,66 @@ def correlogram_for_one_segment(spike_times, spike_labels, window_size, bin_size bbins = np.bincount(indices) correlograms.ravel()[: len(bbins)] += bbins + if sign == 1: + # For positive sign, the end bin is < num_half_bins (e.g. + # bin = 29, num_half_bins = 30, will go to index 59 (i.e. the + # last bin). For negative sign, the first bin is == num_half_bins + # e.g. bin = -30, with num_half_bins = 30 will go to bin 0. Therefore + # sign == 1 must mask spike_diff_b <= num_half_bins but sign == -1 + # must count all (possibly repeating across units) cases of + # spike_diff_b == num_half_bins. So we turn it back on here + # for the next loop that starts with the -1 case. + mask[:-shift][spike_diff_b == num_half_bins] = True + shift += 1 return correlograms -def compute_correlograms_numba(sorting, window_size, bin_size): +def _compute_correlograms_numba(sorting, window_size, bin_size): """ - Computes several cross-correlogram in one course - from several cluster. + Computes cross-correlograms between all units in `sorting`. This is a "brute force" method using compiled code (numba) - to accelerate the computation. + to accelerate the computation. See + `_compute_correlograms_one_segment_numba()` for details. + + Parameters + ---------- + sorting : Sorting + A SpikeInterface Sorting object + window_size : int + The window size over which to perform the cross-correlation, in samples + bin_size : int + The size of which to bin lags, in samples. + + Returns + ------- + correlograms: np.array + A (num_units, num_units, num_bins) array of correlograms + between all units at each lag time bin. Implementation: Aurélien Wyngaard """ - assert HAVE_NUMBA, "numba version of this function requires installation of numba" - num_bins = 2 * int(window_size / bin_size) + num_bins, num_half_bins = _compute_num_bins(window_size, bin_size) num_units = len(sorting.unit_ids) + spikes = sorting.to_spike_vector(concatenated=False) correlograms = np.zeros((num_units, num_units, num_bins), dtype=np.int64) for seg_index in range(sorting.get_num_segments()): spike_times = spikes[seg_index]["sample_index"] - spike_labels = spikes[seg_index]["unit_index"] - - _compute_correlograms_numba( - correlograms, spike_times.astype(np.int64), spike_labels.astype(np.int32), window_size, bin_size + spike_unit_indices = spikes[seg_index]["unit_index"] + + _compute_correlograms_one_segment_numba( + correlograms, + spike_times.astype(np.int64, copy=False), + spike_unit_indices.astype(np.int32, copy=False), + window_size, + bin_size, + num_half_bins, ) return correlograms @@ -316,75 +427,71 @@ def compute_correlograms_numba(sorting, window_size, bin_size): if HAVE_NUMBA: - @numba.jit(nopython=True, nogil=True, cache=False) - def _compute_autocorr_numba(spike_times, window_size, bin_size): - num_half_bins = window_size // bin_size - num_bins = 2 * num_half_bins - - auto_corr = np.zeros(num_bins, dtype=np.int64) - - for i in range(len(spike_times)): - for j in range(i + 1, len(spike_times)): - diff = spike_times[j] - spike_times[i] - - if diff > window_size: - break - - bin = int(math.floor(diff / bin_size)) - # ~ auto_corr[num_bins//2 - bin - 1] += 1 - auto_corr[num_half_bins + bin] += 1 - # ~ print(diff, bin, num_half_bins + bin) - - bin = int(math.floor(-diff / bin_size)) - auto_corr[num_half_bins + bin] += 1 - # ~ print(diff, bin, num_half_bins + bin) - - return auto_corr + @numba.jit( + nopython=True, + nogil=True, + cache=False, + ) + def _compute_correlograms_one_segment_numba( + correlograms, spike_times, spike_unit_indices, window_size, bin_size, num_half_bins + ): + """ + Compute the correlograms using `numba` for speed. + + The algorithm works by brute-force iteration through all + pairs of spikes (skipping those when outside of the window). + The spike-time difference and its time bin are computed + and stored in a (num_units, num_units, num_bins) + correlogram. The correlogram must be passed as an + argument and is filled in-place. + + Parameters + --------- + + correlograms: np.array + A (num_units, num_units, num_bins) array of correlograms + between all units at each lag time bin. This is passed + as counts for all segments are added to it. + spike_times : np.ndarray + An array of spike times (in samples, not seconds). + This contains spikes from all units. + spike_unit_indices : np.ndarray + An array of labels indicating the unit of the corresponding + spike in `spike_times`. + window_size : int + The window size over which to perform the cross-correlation, in samples + bin_size : int + The size of which to bin lags, in samples. + """ + start_j = 0 + for i in range(spike_times.size): + for j in range(start_j, spike_times.size): - @numba.jit(nopython=True, nogil=True, cache=False) - def _compute_crosscorr_numba(spike_times1, spike_times2, window_size, bin_size): - num_half_bins = window_size // bin_size - num_bins = 2 * num_half_bins + if i == j: + continue - cross_corr = np.zeros(num_bins, dtype=np.int64) + diff = spike_times[i] - spike_times[j] - start_j = 0 - for i in range(len(spike_times1)): - for j in range(start_j, len(spike_times2)): - diff = spike_times1[i] - spike_times2[j] + # When the diff is exactly the window size, keep going + # without iterating start_j in case this spike also has + # other diffs with other units that == window size. + if diff == window_size: + continue - if diff >= window_size: + # if the time of spike i is more than window size later than + # spike j, then spike i + 1 will also be more than a window size + # later than spike j. Iterate the start_j and check the next spike. + if diff > window_size: start_j += 1 continue + + # If the time of spike i is more than a window size earlier + # than spike j, then all following j spikes will be even later + # i spikes and so all more than a window size earlier. So move + # onto the next i. if diff < -window_size: break - bin = int(math.floor(diff / bin_size)) - # ~ bin = diff // bin_size - cross_corr[num_half_bins + bin] += 1 - # ~ print(diff, bin, num_half_bins + bin) + bin = diff // bin_size - return cross_corr - - @numba.jit( - nopython=True, - nogil=True, - cache=False, - parallel=True, - ) - def _compute_correlograms_numba(correlograms, spike_times, spike_labels, window_size, bin_size): - n_units = correlograms.shape[0] - - for i in numba.prange(n_units): - # ~ for i in range(n_units): - spike_times1 = spike_times[spike_labels == i] - - for j in range(i, n_units): - spike_times2 = spike_times[spike_labels == j] - - if i == j: - correlograms[i, j, :] += _compute_autocorr_numba(spike_times1, window_size, bin_size) - else: - cc = _compute_crosscorr_numba(spike_times1, spike_times2, window_size, bin_size) - correlograms[i, j, :] += cc - correlograms[j, i, :] += cc[::-1] + correlograms[spike_unit_indices[i], spike_unit_indices[j], num_half_bins + bin] += 1 diff --git a/src/spikeinterface/postprocessing/localization_tools.py b/src/spikeinterface/postprocessing/localization_tools.py new file mode 100644 index 0000000000..b7571a6f3e --- /dev/null +++ b/src/spikeinterface/postprocessing/localization_tools.py @@ -0,0 +1,623 @@ +from __future__ import annotations + +import warnings + +import numpy as np + +try: + import numba + + HAVE_NUMBA = True +except ImportError: + HAVE_NUMBA = False + + +from spikeinterface.core import compute_sparsity, SortingAnalyzer, Templates +from spikeinterface.core.template_tools import get_template_extremum_channel, _get_nbefore, get_dense_templates_array + + +def compute_monopolar_triangulation( + sorting_analyzer_or_templates: SortingAnalyzer | Templates, + optimizer: str = "least_square", + radius_um: float = 75, + max_distance_um: float = 1000, + return_alpha: bool = False, + enforce_decrease: bool = False, + feature: str = "ptp", +) -> np.ndarray: + """ + Localize unit with monopolar triangulation. + This method is from Julien Boussard, Erdem Varol and Charlie Windolf + https://www.biorxiv.org/content/10.1101/2021.11.05.467503v1 + + There are 2 implementations of the 2 optimizer variants: + * https://github.com/int-brain-lab/spikes_localization_registration/blob/main/localization_pipeline/localizer.py + * https://github.com/cwindolf/spike-psvae/blob/main/spike_psvae/localization.py + + Important note about axis: + * x/y are dimmension on the probe plane (dim0, dim1) + * y is the depth by convention + * z it the orthogonal axis to the probe plan (dim2) + + Code from Erdem, Julien and Charlie do not use the same convention!!! + + + Parameters + ---------- + sorting_analyzer_or_templates : SortingAnalyzer | Templates + A SortingAnalyzer or Templates object + method : "least_square" | "minimize_with_log_penality", default: "least_square" + The optimizer to use + radius_um : float, default: 75 + For channel sparsity + max_distance_um : float, default: 1000 + to make bounddary in x, y, z and also for alpha + return_alpha : bool, default: False + Return or not the alpha value + enforce_decrease : bool, default: False + Enforce spatial decreasingness for PTP vectors + feature : "ptp" | "energy" | "peak_voltage", default: "ptp" + The available features to consider for estimating the position via + monopolar triangulation are peak-to-peak amplitudes ("ptp", default), + energy ("energy", as L2 norm) or voltages at the center of the waveform + ("peak_voltage") + + Returns + ------- + unit_location: np.ndarray + 3d or 4d, x, y, z, alpha + alpha is the amplitude at source estimation + """ + assert optimizer in ("least_square", "minimize_with_log_penality") + + assert feature in ["ptp", "energy", "peak_voltage"], f"{feature} is not a valid feature" + unit_ids = sorting_analyzer_or_templates.unit_ids + + contact_locations = sorting_analyzer_or_templates.get_channel_locations() + + sparsity = compute_sparsity(sorting_analyzer_or_templates, method="radius", radius_um=radius_um) + templates = get_dense_templates_array( + sorting_analyzer_or_templates, return_scaled=get_return_scaled(sorting_analyzer_or_templates) + ) + nbefore = _get_nbefore(sorting_analyzer_or_templates) + + if enforce_decrease: + neighbours_mask = np.zeros((templates.shape[0], templates.shape[2]), dtype=bool) + for i, unit_id in enumerate(unit_ids): + chan_inds = sparsity.unit_id_to_channel_indices[unit_id] + neighbours_mask[i, chan_inds] = True + enforce_decrease_radial_parents = make_radial_order_parents(contact_locations, neighbours_mask) + best_channels = get_template_extremum_channel(sorting_analyzer_or_templates, outputs="index") + + unit_location = np.zeros((unit_ids.size, 4), dtype="float64") + for i, unit_id in enumerate(unit_ids): + chan_inds = sparsity.unit_id_to_channel_indices[unit_id] + local_contact_locations = contact_locations[chan_inds, :] + + # wf is (nsample, nchan) - chann is only nieghboor + wf = templates[i, :, :][:, chan_inds] + if feature == "ptp": + wf_data = wf.ptp(axis=0) + elif feature == "energy": + wf_data = np.linalg.norm(wf, axis=0) + elif feature == "peak_voltage": + wf_data = np.abs(wf[nbefore]) + + # if enforce_decrease: + # enforce_decrease_shells_data( + # wf_data, best_channels[unit_id], enforce_decrease_radial_parents, in_place=True + # ) + + unit_location[i] = solve_monopolar_triangulation(wf_data, local_contact_locations, max_distance_um, optimizer) + + if not return_alpha: + unit_location = unit_location[:, :3] + + return unit_location + + +def compute_center_of_mass( + sorting_analyzer_or_templates: SortingAnalyzer | Templates, + peak_sign: str = "neg", + radius_um: float = 75, + feature: str = "ptp", +) -> np.ndarray: + """ + Computes the center of mass (COM) of a unit based on the template amplitudes. + + Parameters + ---------- + sorting_analyzer_or_templates : SortingAnalyzer | Templates + A SortingAnalyzer or Templates object + peak_sign : "neg" | "pos" | "both", default: "neg" + Sign of the template to compute best channels + radius_um : float + Radius to consider in order to estimate the COM + feature : "ptp" | "mean" | "energy" | "peak_voltage", default: "ptp" + Feature to consider for computation + + Returns + ------- + unit_location: np.array + """ + unit_ids = sorting_analyzer_or_templates.unit_ids + + contact_locations = sorting_analyzer_or_templates.get_channel_locations() + + assert feature in ["ptp", "mean", "energy", "peak_voltage"], f"{feature} is not a valid feature" + + sparsity = compute_sparsity( + sorting_analyzer_or_templates, peak_sign=peak_sign, method="radius", radius_um=radius_um + ) + templates = get_dense_templates_array( + sorting_analyzer_or_templates, return_scaled=get_return_scaled(sorting_analyzer_or_templates) + ) + nbefore = _get_nbefore(sorting_analyzer_or_templates) + + unit_location = np.zeros((unit_ids.size, 2), dtype="float64") + for i, unit_id in enumerate(unit_ids): + chan_inds = sparsity.unit_id_to_channel_indices[unit_id] + local_contact_locations = contact_locations[chan_inds, :] + + wf = templates[i, :, :] + + if feature == "ptp": + wf_data = (wf[:, chan_inds]).ptp(axis=0) + elif feature == "mean": + wf_data = (wf[:, chan_inds]).mean(axis=0) + elif feature == "energy": + wf_data = np.linalg.norm(wf[:, chan_inds], axis=0) + elif feature == "peak_voltage": + wf_data = wf[nbefore, chan_inds] + + # center of mass + com = np.sum(wf_data[:, np.newaxis] * local_contact_locations, axis=0) / np.sum(wf_data) + unit_location[i, :] = com + + return unit_location + + +def compute_grid_convolution( + sorting_analyzer_or_templates: SortingAnalyzer | Templates, + peak_sign: str = "neg", + radius_um: float = 40.0, + upsampling_um: float = 5, + sigma_ms: float = 0.25, + margin_um: float = 50, + prototype: np.ndarray | None = None, + percentile: float = 5, + weight_method: dict = {}, +) -> np.ndarray: + """ + Estimate the positions of the templates from a large grid of fake templates + + Parameters + ---------- + sorting_analyzer_or_templates : SortingAnalyzer | Templates + A SortingAnalyzer or Templates object + peak_sign : "neg" | "pos" | "both", default: "neg" + Sign of the template to compute best channels + radius_um : float, default: 40.0 + Radius to consider for the fake templates + upsampling_um : float, default: 5 + Upsampling resolution for the grid of templates + sigma_ms : float, default: 0.25 + The temporal decay of the fake templates + margin_um : float, default: 50 + The margin for the grid of fake templates + prototype : np.array or None, default: None + Fake waveforms for the templates. If None, generated as Gaussian + percentile : float, default: 5 + The percentage in [0, 100] of the best scalar products kept to + estimate the position + weight_method : dict + Parameter that should be provided to the get_convolution_weights() function + in order to know how to estimate the positions. One argument is mode that could + be either gaussian_2d (KS like) or exponential_3d (default) + Returns + ------- + unit_location: np.array + """ + + contact_locations = sorting_analyzer_or_templates.get_channel_locations() + unit_ids = sorting_analyzer_or_templates.unit_ids + + templates = get_dense_templates_array( + sorting_analyzer_or_templates, return_scaled=get_return_scaled(sorting_analyzer_or_templates) + ) + nbefore = _get_nbefore(sorting_analyzer_or_templates) + nafter = templates.shape[1] - nbefore + + fs = sorting_analyzer_or_templates.sampling_frequency + percentile = 100 - percentile + assert 0 <= percentile <= 100, "Percentile should be in [0, 100]" + + time_axis = np.arange(-nbefore, nafter) * 1000 / fs + if prototype is None: + prototype = np.exp(-(time_axis**2) / (2 * (sigma_ms**2))) + if peak_sign == "neg": + prototype *= -1 + + prototype = prototype[:, np.newaxis] + + template_positions, weights, nearest_template_mask, z_factors = get_grid_convolution_templates_and_weights( + contact_locations, radius_um, upsampling_um, margin_um, weight_method + ) + + peak_channels = get_template_extremum_channel(sorting_analyzer_or_templates, peak_sign, outputs="index") + + weights_sparsity_mask = weights > 0 + + nb_weights = weights.shape[0] + unit_location = np.zeros((unit_ids.size, 3), dtype="float64") + + for i, unit_id in enumerate(unit_ids): + main_chan = peak_channels[unit_id] + wf = templates[i, :, :] + nearest_mask = nearest_template_mask[main_chan, :] + channel_mask = np.sum(weights_sparsity_mask[:, :, nearest_mask], axis=(0, 2)) > 0 + num_templates = np.sum(nearest_mask) + sub_w = weights[:, channel_mask, :][:, :, nearest_mask] + global_products = (wf[:, channel_mask] * prototype).sum(axis=0) + + dot_products = np.zeros((nb_weights, num_templates), dtype=np.float32) + for count in range(nb_weights): + dot_products[count] = np.dot(global_products, sub_w[count]) + + mask = dot_products < 0 + if percentile > 0: + dot_products[mask] = np.nan + ## We need to catch warnings because some line can have only NaN, and + ## if so the nanpercentile function throws a warning + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + thresholds = np.nanpercentile(dot_products, percentile) + thresholds = np.nan_to_num(thresholds) + dot_products[dot_products < thresholds] = 0 + dot_products[mask] = 0 + + nearest_templates = template_positions[nearest_mask] + for count in range(nb_weights): + unit_location[i, :2] += np.dot(dot_products[count], nearest_templates) + + scalar_products = dot_products.sum(1) + unit_location[i, 2] = np.dot(z_factors, scalar_products) + with np.errstate(divide="ignore", invalid="ignore"): + unit_location[i] /= scalar_products.sum() + unit_location = np.nan_to_num(unit_location) + + return unit_location + + +def get_return_scaled(sorting_analyzer_or_templates): + if isinstance(sorting_analyzer_or_templates, Templates): + return_scaled = sorting_analyzer_or_templates.is_scaled + else: + return_scaled = sorting_analyzer_or_templates.return_scaled + return return_scaled + + +def make_initial_guess_and_bounds(wf_data, local_contact_locations, max_distance_um, initial_z=20): + # constant for initial guess and bounds + ind_max = np.argmax(wf_data) + max_ptp = wf_data[ind_max] + max_alpha = max_ptp * max_distance_um + + # initial guess is the center of mass + com = np.sum(wf_data[:, np.newaxis] * local_contact_locations, axis=0) / np.sum(wf_data) + x0 = np.zeros(4, dtype="float32") + x0[:2] = com + x0[2] = initial_z + initial_alpha = np.sqrt(np.sum((com - local_contact_locations[ind_max, :]) ** 2) + initial_z**2) * max_ptp + x0[3] = initial_alpha + + # bounds depend on initial guess + bounds = ( + [x0[0] - max_distance_um, x0[1] - max_distance_um, 1, 0], + [x0[0] + max_distance_um, x0[1] + max_distance_um, max_distance_um * 10, max_alpha], + ) + + return x0, bounds + + +def solve_monopolar_triangulation(wf_data, local_contact_locations, max_distance_um, optimizer): + import scipy.optimize + + x0, bounds = make_initial_guess_and_bounds(wf_data, local_contact_locations, max_distance_um) + + if optimizer == "least_square": + args = (wf_data, local_contact_locations) + try: + output = scipy.optimize.least_squares(estimate_distance_error, x0=x0, bounds=bounds, args=args) + return tuple(output["x"]) + except Exception as e: + print(f"scipy.optimize.least_squares error: {e}") + return (np.nan, np.nan, np.nan, np.nan) + + if optimizer == "minimize_with_log_penality": + x0 = x0[:3] + bounds = [(bounds[0][0], bounds[1][0]), (bounds[0][1], bounds[1][1]), (bounds[0][2], bounds[1][2])] + max_data = wf_data.max() + args = (wf_data, local_contact_locations, max_data) + try: + output = scipy.optimize.minimize(estimate_distance_error_with_log, x0=x0, bounds=bounds, args=args) + # final alpha + q = data_at(*output["x"], 1.0, local_contact_locations) + alpha = (wf_data * q).sum() / np.square(q).sum() + return (*output["x"], alpha) + except Exception as e: + print(f"scipy.optimize.minimize error: {e}") + return (np.nan, np.nan, np.nan, np.nan) + + +# ---- +# optimizer "least_square" + + +def estimate_distance_error(vec, wf_data, local_contact_locations): + # vec dims ar (x, y, z amplitude_factor) + # given that for contact_location x=dim0 + z=dim1 and y is orthogonal to probe + dist = np.sqrt(((local_contact_locations - vec[np.newaxis, :2]) ** 2).sum(axis=1) + vec[2] ** 2) + data_estimated = vec[3] / dist + err = wf_data - data_estimated + return err + + +# ---- +# optimizer "minimize_with_log_penality" + + +def data_at(x, y, z, alpha, local_contact_locations): + return alpha / np.sqrt( + np.square(x - local_contact_locations[:, 0]) + np.square(y - local_contact_locations[:, 1]) + np.square(z) + ) + + +def estimate_distance_error_with_log(vec, wf_data, local_contact_locations, max_data): + x, y, z = vec + q = data_at(x, y, z, 1.0, local_contact_locations) + alpha = (q * wf_data / max_data).sum() / (q * q).sum() + err = ( + np.square(wf_data / max_data - data_at(x, y, z, alpha, local_contact_locations)).mean() + - np.log1p(10.0 * z) / 10000.0 + ) + return err + + +# --- +# waveform cleaning for localization. could be moved to another file + + +def make_shell(channel, geom, n_jumps=1): + """See make_shells""" + from scipy.spatial.distance import cdist + + pt = geom[channel] + dists = cdist([pt], geom).ravel() + radius = np.unique(dists)[1 : n_jumps + 1][-1] + return np.setdiff1d(np.flatnonzero(dists <= radius + 1e-8), [channel]) + + +def make_shells(geom, n_jumps=1): + """Get the neighbors of a channel within a radius + + That radius is found by figuring out the distance to the closest channel, + then the channel which is the next closest (but farther than the closest), + etc... for n_jumps. + + So, if n_jumps is 1, it will return the indices of channels which are + as close as the closest channel. If n_jumps is 2, it will include those + and also the indices of the next-closest channels. And so on... + + Returns + ------- + shell_neighbors : list + List of length geom.shape[0] (aka, the number of channels) + The ith entry in the list is an array with the indices of the neighbors + of the ith channel. + i is not included in these arrays (a channel is not in its own shell). + """ + return [make_shell(c, geom, n_jumps=n_jumps) for c in range(geom.shape[0])] + + +def make_radial_order_parents(geom, neighbours_mask, n_jumps_per_growth=1, n_jumps_parent=3): + """Pre-computes a helper data structure for enforce_decrease_shells""" + n_channels = len(geom) + + # which channels should we consider as possible parents for each channel? + shells = make_shells(geom, n_jumps=n_jumps_parent) + + radial_parents = [] + for channel, neighbors in enumerate(neighbours_mask): + channel_parents = [] + + # convert from boolean mask to list of indices + neighbors = np.flatnonzero(neighbors) + + # the closest shell will do nothing + already_seen = [channel] + shell0 = make_shell(channel, geom, n_jumps=n_jumps_per_growth) + already_seen += sorted(c for c in shell0 if c not in already_seen) + + # so we start at the second jump + jumps = 2 + while len(already_seen) < (neighbors < n_channels).sum(): + # grow our search -- what are the next-closest channels? + new_shell = make_shell(channel, geom, n_jumps=jumps * n_jumps_per_growth) + new_shell = list(sorted(c for c in new_shell if (c not in already_seen) and (c in neighbors))) + + # for each new channel, find the intersection of the channels + # from previous shells and that channel's shell in `shells` + for new_chan in new_shell: + parents = np.intersect1d(shells[new_chan], already_seen) + parents_rel = np.flatnonzero(np.isin(neighbors, parents)) + if not len(parents_rel): + # this can happen for some strange geometries. in that case, bail. + continue + channel_parents.append((np.flatnonzero(neighbors == new_chan).item(), parents_rel)) + + # add this shell to what we have seen + already_seen += new_shell + jumps += 1 + + radial_parents.append(channel_parents) + + return radial_parents + + +def enforce_decrease_shells_data(wf_data, maxchan, radial_parents, in_place=False): + """Radial enforce decrease""" + (C,) = wf_data.shape + + # allocate storage for decreasing version of data + decreasing_data = wf_data if in_place else wf_data.copy() + + # loop to enforce data decrease from parent shells + for c, parents_rel in radial_parents[maxchan]: + if decreasing_data[c] > decreasing_data[parents_rel].max(): + decreasing_data[c] *= decreasing_data[parents_rel].max() / decreasing_data[c] + + return decreasing_data + + +def get_grid_convolution_templates_and_weights( + contact_locations, radius_um=40, upsampling_um=5, margin_um=50, weight_method={"mode": "exponential_3d"} +): + """Get a upsampled grid of artificial templates given a particular probe layout + + Parameters + ---------- + contact_locations: array + The positions of the channels + radius_um: float + Radius in um for channel sparsity. + upsampling_um: float + Upsampling resolution for the grid of templates + margin_um: float + The margin for the grid of fake templates + weight_method: dict + Parameter that should be provided to the get_convolution_weights() function + in order to know how to estimate the positions. One argument is mode that could + be either gaussian_2d (KS like) or exponential_3d (default) + + Returns + ------- + template_positions: array + The positions of the upsampled templates + weights: + The weights of the templates, on a per channel basis + nearest_template_mask: array + A sparsity mask to to know which template is close to the contact locations, given + the radius_um parameter + z_factors: array + The z_factors that have been used to generate the weights along the third dimension + """ + + import sklearn.metrics + + x_min, x_max = contact_locations[:, 0].min(), contact_locations[:, 0].max() + y_min, y_max = contact_locations[:, 1].min(), contact_locations[:, 1].max() + + x_min -= margin_um + x_max += margin_um + y_min -= margin_um + y_max += margin_um + + eps = upsampling_um / 10 + + all_x, all_y = np.meshgrid( + np.arange(x_min, x_max + eps, upsampling_um), np.arange(y_min, y_max + eps, upsampling_um) + ) + + nb_templates = all_x.size + + template_positions = np.zeros((nb_templates, 2)) + template_positions[:, 0] = all_x.flatten() + template_positions[:, 1] = all_y.flatten() + + # mask to get nearest template given a channel + dist = sklearn.metrics.pairwise_distances(contact_locations, template_positions) + nearest_template_mask = dist <= radius_um + weights, z_factors = get_convolution_weights(dist, **weight_method) + + return template_positions, weights, nearest_template_mask, z_factors + + +def get_convolution_weights( + distances, + z_list_um=np.linspace(0, 120.0, 5), + sigma_list_um=np.linspace(5, 25, 5), + sparsity_threshold=None, + sigma_3d=2.5, + mode="exponential_3d", +): + """Get normalized weights for creating artificial templates, given some precomputed distances + + Parameters + ---------- + distances: 2D array + The distances between the source channels (real ones) and the upsampled one (virual ones) + sparsity_threshold: float, default None + The sparsity_threshold below which weights are set to 0 (speeding up computations). If None, + then a default value of 0.5/sqrt(distances.shape[0]) is set + mode: exponential_3d | gaussian_2d + The inference scheme to be used to get the convolution weights + Keyword arguments for the chosen method: + "gaussian_2d" (similar to KiloSort): + * sigma_list_um: array, default np.linspace(5, 25, 5) + The list of sigma to consider for decaying exponentials + "exponential_3d" (default): + * z_list_um: array, default np.linspace(0, 120.0, 5) + The list of z to consider for putative depth of the sources + * sigma_3d: float, default 2.5 + The scaling factor controling the decay of the exponential + + Returns + ------- + weights: + The weights of the templates, on a per channel basis + z_factors: array + The z_factors that have been used to generate the weights along the third dimension + """ + + if sparsity_threshold is not None: + assert 0 <= sparsity_threshold <= 1, "sparsity_threshold should be in [0, 1]" + + if mode == "exponential_3d": + weights = np.zeros((len(z_list_um), distances.shape[0], distances.shape[1]), dtype=np.float32) + for count, z in enumerate(z_list_um): + dist_3d = np.sqrt(distances**2 + z**2) + weights[count] = np.exp(-dist_3d / sigma_3d) + z_factors = z_list_um + elif mode == "gaussian_2d": + weights = np.zeros((len(sigma_list_um), distances.shape[0], distances.shape[1]), dtype=np.float32) + for count, sigma in enumerate(sigma_list_um): + alpha = 2 * (sigma**2) + weights[count] = np.exp(-(distances**2) / alpha) + z_factors = sigma_list_um + + # normalize to get normalized values in [0, 1] + with np.errstate(divide="ignore", invalid="ignore"): + norm = np.linalg.norm(weights, axis=1)[:, np.newaxis, :] + weights /= norm + + weights[~np.isfinite(weights)] = 0.0 + + # If sparsity is None or non zero, we are pruning weights that are below the + # sparsification factor. This will speed up furter computations + if sparsity_threshold is None: + sparsity_threshold = 0.5 / np.sqrt(distances.shape[0]) + weights[weights < sparsity_threshold] = 0 + + # re normalize to ensure we have unitary norms + with np.errstate(divide="ignore", invalid="ignore"): + norm = np.linalg.norm(weights, axis=1)[:, np.newaxis, :] + weights /= norm + + weights[~np.isfinite(weights)] = 0.0 + + return weights, z_factors + + +if HAVE_NUMBA: + enforce_decrease_shells = numba.jit(enforce_decrease_shells_data, nopython=True) diff --git a/src/spikeinterface/postprocessing/tests/test_correlograms.py b/src/spikeinterface/postprocessing/tests/test_correlograms.py index eef4af10fc..66d84c9565 100644 --- a/src/spikeinterface/postprocessing/tests/test_correlograms.py +++ b/src/spikeinterface/postprocessing/tests/test_correlograms.py @@ -7,12 +7,18 @@ except ModuleNotFoundError as err: HAVE_NUMBA = False - from spikeinterface import NumpySorting, generate_sorting from spikeinterface.postprocessing.tests.common_extension_tests import AnalyzerExtensionCommonTestSuite from spikeinterface.postprocessing import ComputeCorrelograms -from spikeinterface.postprocessing.correlograms import compute_correlograms_on_sorting, _make_bins +from spikeinterface.postprocessing.correlograms import ( + _compute_correlograms_on_sorting, + _make_bins, + compute_correlograms, +) import pytest +from pytest import param + +SKIP_NUMBA = pytest.mark.skipif(not HAVE_NUMBA, reason="Numba not available") class TestComputeCorrelograms(AnalyzerExtensionCommonTestSuite): @@ -22,17 +28,37 @@ class TestComputeCorrelograms(AnalyzerExtensionCommonTestSuite): [ dict(method="numpy"), dict(method="auto"), - pytest.param(dict(method="numba"), marks=pytest.mark.skipif(not HAVE_NUMBA, reason="Numba not available")), + param(dict(method="numba"), marks=SKIP_NUMBA), ], ) def test_extension(self, params): self.run_extension_tests(ComputeCorrelograms, params) + @pytest.mark.parametrize("method", ["numpy", param("numba", marks=SKIP_NUMBA)]) + def test_sortinganalyzer_correlograms(self, method): + """ + Test the outputs when using SortingAnalyzer against + the output passing sorting directly to `compute_correlograms`. + Sorting to `compute_correlograms` is tested extensively below + so if these match it means `SortingAnalyzer` is working. + """ + sorting_analyzer = self._prepare_sorting_analyzer("memory", sparse=False, extension_class=ComputeCorrelograms) + + params = dict(method=method, window_ms=100, bin_ms=6.5) + ext_numpy = sorting_analyzer.compute(ComputeCorrelograms.extension_name, **params) + + result_sorting, bins_sorting = compute_correlograms(self.sorting, **params) + + assert np.array_equal(result_sorting, ext_numpy.data["ccgs"]) + assert np.array_equal(bins_sorting, ext_numpy.data["bins"]) + +# Unit Tests +############ def test_make_bins(): """ Check the `_make_bins()` function that generates time bins (lags) for - the correllogram creates the expected number of bins. + the correlogram creates the expected number of bins. """ sorting = generate_sorting(num_units=5, sampling_frequency=30000.0, durations=[10.325, 3.5], seed=0) @@ -45,96 +71,78 @@ def test_make_bins(): bin_ms = 2.0 bins, window_size, bin_size = _make_bins(sorting, window_ms, bin_ms) assert bins.size == np.floor(window_ms / bin_ms) + 1 + assert np.array_equal(bins, np.linspace(-30, 30, bins.size)) -def _test_correlograms(sorting, window_ms, bin_ms, methods): - for method in methods: - correlograms, bins = compute_correlograms_on_sorting(sorting, window_ms=window_ms, bin_ms=bin_ms, method=method) - if method == "numpy": - ref_bins = bins - else: - assert np.allclose(bins, ref_bins, atol=1e-10), f"Failed with method={method}" - - -def test_equal_results_correlograms(): - # compare that the 2 methods have same results - methods = ["numpy"] - if HAVE_NUMBA: - methods.append("numba") +@pytest.mark.skipif(not HAVE_NUMBA, reason="Numba not available") +@pytest.mark.parametrize("window_and_bin_ms", [(60.0, 2.0), (3.57, 1.6421)]) +def test_equal_results_correlograms(window_and_bin_ms): + """ + Test that the 2 methods have same results with some varied time bins + that are not tested in other tests. + """ + window_ms, bin_ms = window_and_bin_ms sorting = generate_sorting(num_units=5, sampling_frequency=30000.0, durations=[10.325, 3.5], seed=0) - _test_correlograms(sorting, window_ms=60.0, bin_ms=2.0, methods=methods) - _test_correlograms(sorting, window_ms=43.57, bin_ms=1.6421, methods=methods) + result_numpy, bins_numpy = _compute_correlograms_on_sorting( + sorting, window_ms=window_ms, bin_ms=bin_ms, method="numpy" + ) + result_numba, bins_numba = _compute_correlograms_on_sorting( + sorting, window_ms=window_ms, bin_ms=bin_ms, method="numba" + ) + + assert np.array_equal(result_numpy, result_numba) + assert np.array_equal(result_numpy, result_numba) -def test_flat_cross_correlogram(): +@pytest.mark.parametrize("method", ["numpy", param("numba", marks=SKIP_NUMBA)]) +def test_flat_cross_correlogram(method): """ Check that the correlogram (num_units x num_units x num_bins) does not vary too much across time bins (lags), for entries representing two different units. """ sorting = generate_sorting(num_units=2, sampling_frequency=10000.0, durations=[100000.0], seed=0) - methods = ["numpy"] - if HAVE_NUMBA: - methods.append("numba") + correlograms, bins = _compute_correlograms_on_sorting(sorting, window_ms=50.0, bin_ms=1.0, method=method) + cc = correlograms[0, 1, :].copy() + m = np.mean(cc) - for method in methods: - correlograms, bins = compute_correlograms_on_sorting(sorting, window_ms=50.0, bin_ms=1.0, method=method) - cc = correlograms[0, 1, :].copy() - m = np.mean(cc) - assert np.all(cc > (m * 0.90)) - assert np.all(cc < (m * 1.10)) + assert np.all(cc > (m * 0.90)) + assert np.all(cc < (m * 1.10)) -def test_auto_equal_cross_correlograms(): +@pytest.mark.parametrize("method", ["numpy", param("numba", marks=SKIP_NUMBA)]) +def test_auto_equal_cross_correlograms(method): """ - check if cross correlogram is the same as autocorrelogram + Check if cross correlogram is the same as autocorrelogram by removing n spike in bin zeros - numpy method: - * have problem for the left bin - * have problem on center """ - - methods = ["numpy"] - if HAVE_NUMBA: - methods.append("numba") - num_spike = 2000 spike_times = np.sort(np.unique(np.random.randint(0, 100000, num_spike))) num_spike = spike_times.size units_dict = {"1": spike_times, "2": spike_times} sorting = NumpySorting.from_unit_dict([units_dict], sampling_frequency=10000.0) - for method in methods: - correlograms, bins = compute_correlograms_on_sorting(sorting, window_ms=10.0, bin_ms=0.1, method=method) + correlograms, bins = _compute_correlograms_on_sorting(sorting, window_ms=10.0, bin_ms=0.1, method=method) - num_half_bins = correlograms.shape[2] // 2 + num_half_bins = correlograms.shape[2] // 2 - cc = correlograms[0, 1, :] - ac = correlograms[0, 0, :] - cc_corrected = cc.copy() - cc_corrected[num_half_bins] -= num_spike + cc = correlograms[0, 1, :] + ac = correlograms[0, 0, :] + cc_corrected = cc.copy() + cc_corrected[num_half_bins] -= num_spike - if method == "numpy": - # numpy method have some border effect on left - assert np.array_equal(cc_corrected[1:num_half_bins], ac[1:num_half_bins]) - # numpy method have some problem on center - assert np.array_equal(cc_corrected[num_half_bins + 1 :], ac[num_half_bins + 1 :]) - else: - assert np.array_equal(cc_corrected, ac) + assert np.array_equal(cc_corrected, ac) -def test_detect_injected_correlation(): +@pytest.mark.parametrize("method", ["numpy", param("numba", marks=SKIP_NUMBA)]) +def test_detect_injected_correlation(method): """ Inject 1.44 ms of correlation every 13 spikes and compute cross-correlation. Check that the time bin lag with the peak correlation lag is 1.44 ms (within tolerance of a sampling period). """ - methods = ["numpy"] - if HAVE_NUMBA: - methods.append("numba") - sampling_frequency = 10000.0 num_spike = 2000 rng = np.random.default_rng(seed=0) @@ -143,6 +151,7 @@ def test_detect_injected_correlation(): n = min(spike_times1.size, spike_times2.size) spike_times1 = spike_times1[:n] spike_times2 = spike_times2[:n] + # inject 1.44 ms correlation every 13 spikes injected_delta_ms = 1.44 spike_times2[::13] = spike_times1[::13] + int(injected_delta_ms / 1000 * sampling_frequency) @@ -151,15 +160,212 @@ def test_detect_injected_correlation(): units_dict = {"1": spike_times1, "2": spike_times2} sorting = NumpySorting.from_unit_dict([units_dict], sampling_frequency=sampling_frequency) - for method in methods: - correlograms, bins = compute_correlograms_on_sorting(sorting, window_ms=10.0, bin_ms=0.1, method=method) + correlograms, bins = _compute_correlograms_on_sorting(sorting, window_ms=10.0, bin_ms=0.1, method=method) + + cc_01 = correlograms[0, 1, :] + cc_10 = correlograms[1, 0, :] - cc_01 = correlograms[0, 1, :] - cc_10 = correlograms[1, 0, :] + peak_location_01_ms = bins[np.argmax(cc_01)] + peak_location_02_ms = bins[np.argmax(cc_10)] - peak_location_01_ms = bins[np.argmax(cc_01)] - peak_location_02_ms = bins[np.argmax(cc_10)] + sampling_period_ms = 1000.0 / sampling_frequency + assert abs(peak_location_01_ms) - injected_delta_ms < sampling_period_ms + assert abs(peak_location_02_ms) - injected_delta_ms < sampling_period_ms - sampling_period_ms = 1000.0 / sampling_frequency - assert abs(peak_location_01_ms) - injected_delta_ms < sampling_period_ms - assert abs(peak_location_02_ms) - injected_delta_ms < sampling_period_ms + +# Functional Tests +################### +@pytest.mark.parametrize("fill_all_bins", [True, False]) +@pytest.mark.parametrize("on_time_bin", [True, False]) +@pytest.mark.parametrize("multi_segment", [True, False]) +def test_compute_correlograms(fill_all_bins, on_time_bin, multi_segment): + """ + Test the entry function `compute_correlograms` under a variety of conditions. + For specifics of `fill_all_bins` and `on_time_bin` see `generate_correlogram_test_dataset()`. + + This function tests numpy and numba in one go, to avoid over-parameterising the method. + It tests both a single-segment and multi-segment dataset. The way that segments are + handled for the correlogram is to combine counts across all segments, therefore the + counts should double when two segments with identical spike times / labels are used. + """ + sampling_frequency = 30000 + window_ms, bin_ms, spike_times, spike_unit_indices, expected_bins, expected_result_auto, expected_result_corr = ( + generate_correlogram_test_dataset(sampling_frequency, fill_all_bins, on_time_bin) + ) + + if multi_segment: + sorting = NumpySorting.from_times_labels( + times_list=[spike_times], labels_list=[spike_unit_indices], sampling_frequency=sampling_frequency + ) + else: + sorting = NumpySorting.from_times_labels( + times_list=[spike_times, spike_times], + labels_list=[spike_unit_indices, spike_unit_indices], + sampling_frequency=sampling_frequency, + ) + expected_result_auto *= 2 + expected_result_corr *= 2 + + result_numba, bins_numba = compute_correlograms(sorting, window_ms=window_ms, bin_ms=bin_ms, method="numba") + result_numpy, bins_numpy = compute_correlograms(sorting, window_ms=window_ms, bin_ms=bin_ms, method="numpy") + + for auto_idx in [(0, 0), (1, 1), (2, 2)]: + assert np.array_equal(expected_result_auto, result_numpy[auto_idx]) + assert np.array_equal(expected_result_auto, result_numba[auto_idx]) + + for auto_idx in [(1, 0), (0, 1), (0, 2), (2, 0), (1, 2), (2, 1)]: + assert np.array_equal(expected_result_corr, result_numpy[auto_idx]) + assert np.array_equal(expected_result_corr, result_numba[auto_idx]) + + +@pytest.mark.parametrize("method", ["numpy", param("numba", marks=SKIP_NUMBA)]) +def test_compute_correlograms_different_units(method): + """ + Make a supplementary test to `test_compute_correlograms` in which all + units had the same spike train. Test here a simpler and accessible + test case with only two neurons with different spike time differences + within and across units. + + This case is simple enough to validate by hand, for example for the + result[1, 1] case we are looking at the autocorrelogram of the unit '1'. + The spike times are 4 and 16 s, therefore we expect to see a count in + the +/- 10 to 15 s bin. + """ + sampling_frequency = 30000 + spike_times = np.array([0, 4, 8, 16]) / 1000 * sampling_frequency + spike_times.astype(int) + + spike_unit_indices = np.array([0, 1, 0, 1]) + + window_ms = 40 + bin_ms = 5 + + sorting = NumpySorting.from_times_labels( + times_list=[spike_times], labels_list=[spike_unit_indices], sampling_frequency=sampling_frequency + ) + + result, bins = compute_correlograms(sorting, window_ms=window_ms, bin_ms=bin_ms, method=method) + + assert np.array_equal(result[0, 0], np.array([0, 0, 1, 0, 0, 1, 0, 0])) + + assert np.array_equal(result[1, 1], np.array([0, 1, 0, 0, 0, 0, 1, 0])) + + assert np.array_equal(result[1, 0], np.array([0, 0, 0, 1, 1, 1, 0, 1])) + + assert np.array_equal(result[0, 1], np.array([1, 0, 1, 1, 1, 0, 0, 0])) + + +def generate_correlogram_test_dataset(sampling_frequency, fill_all_bins, hit_bin_edge): + """ + This generates a detailed correlogram test and expected outputs, for a number of + test cases: + + overflow edges : when there are counts expected in every measured bins, otherwise + counts are expected only in a (central) subset of bins. + hit_bin_edge : if `True`, the difference in spike times are created to land + exactly as multiples of the bin size, an edge case that caused + some problems in previous iterations of the algorithm. + + The approach used is to create a set of spike times which are + multiples of a 'base_diff_time'. When `hit_bin_edge` is `False` this is + set to 5.1 ms. So, we have spikes at: + 5.1 ms, 10.2 ms, 15.3 ms, ..., base_diff_time * num_filled_bins + + This means consecutive spike times are 5.1 ms apart. Then every two + spike times are 10.2 ms apart. This gives predictable bin counts, + that are maximal at the smaller bins (e.g. 5-10 s) and minimal at + the later bins (e.g. 100-105 s). Note at more than num_filled_bins the + the times will overflow to the next bin and test wont work. None of these + parameters should be changed. + + When `hit_bin_edge` is `False`, we expect that bin counts will increase from the + edge of the bins to the middle, maximum in the middle, 0 in the exact center + (-5 to 0, 0 to 5) and then decreasing until the end of the bin. For the autocorrelation, + the zero-lag case is not included and the two central bins will be zero. + + Different units are tested by repeating the spike times. This means all + results for all units autocorrelation and cross-correlation will be + identical, simplifying the tests. The only difference is that auto-correlation + does not count the zero-lag bins but cross-correlation does. Because the + spike times are identical, this means in the cross-correlation case we have + `num_filled_bins` in the central bin. By convention, this is always put + in the positive (i.e. 0-5 s) not negative (-5 to 0 s) bin. I guess it + could make sense to force it into both positive and negative bins? + + Finally, the case when the time differences are exactly the bin + size is tested. In this case the spike times are [0, 5, 10, 15, ...] + with all diffs 5 and the `bin_ms` set to 5. By convention, when spike + diffs hit the bin edge they are set into the 'right' (i.e. positive) + bin. For positive bins this does not change, but for negative bins + all entries are shifted one place to the right. + """ + num_units = 3 + + # These give us 61 bins, [-150, -145,...,0,...,145, 150] + window_ms = 300 + bin_ms = 5 + + # If overflow edges, we will have a diff at every possible + # bin e.g. the counts will be [31, 30, ..., 30, 31]. If not, + # test the case where there are zero bins e.g. [0, 0, 9, 8, ..., 8, 9, 0, 0]. + if fill_all_bins: + num_filled_bins = 60 + else: + num_filled_bins = 10 + + # If we are on a time bin, make the time delays exactly + # the same as a time bin, testing this tricky edge case. + if hit_bin_edge: + base_diff_time = bin_ms / 1000 + else: + base_diff_time = bin_ms / 1000 + 0.0001 # i.e. 0.0051 s + + # Now, make a set of times that increase by `base_diff_time` e.g. + # if base_diff_time=0.0051 then our spike times are [`0.0051, 0.0102, ...]` + spike_times = np.repeat(np.arange(num_filled_bins), num_units) * base_diff_time + spike_unit_indices = np.tile(np.arange(num_units), int(spike_times.size / num_units)) + + spike_times *= sampling_frequency + spike_times = spike_times.astype(int) + + # Here generate the expected results. This is done pretty much hard-coded + # to be as explicit as possible. + + # Generate the expected bins + num_bins = int(window_ms / bin_ms) + assert window_ms == 300, "dont change the window_ms" + assert bin_ms == 5, "dont change the bin_ms" + expected_bins = np.linspace(-150, 150, num_bins + 1) + + # In this case, all time bins are shifted to the right for the + # negative shift due to the diffs lying on the bin edge. + # [30, 31, ..., 59, 0, 59, ..., 30, 31] + if fill_all_bins and hit_bin_edge: + expected_result_auto = np.r_[np.arange(30, 60), 0, np.flip(np.arange(31, 60))] + + # In this case there are no edge effects and the bin counts + # [31, 30, ..., 59, 0, 0, 59, ..., 30, 31] + # are symmetrical + elif fill_all_bins and not hit_bin_edge: + forward = np.r_[np.arange(31, 60), 0] + expected_result_auto = np.r_[forward, np.flip(forward)] + + # Here we have many zero bins, but the existing bins are + # shifted left in the negative-bin base + # [0, 0, ..., 1, 2, 3, ..., 10, 0, 10, ..., 3, 2, 1, ..., 0] + elif not fill_all_bins and hit_bin_edge: + forward = np.r_[np.zeros(19), np.arange(10)] + expected_result_auto = np.r_[0, forward, 0, np.flip(forward)] + + # Here we have many zero bins and they are symmetrical + # [0, 0, ..., 1, 2, 3, ..., 10, 0, 10, ..., 3, 2, 1, ..., 0, 0] + elif not fill_all_bins and not hit_bin_edge: + forward = np.r_[np.zeros(19), np.arange(10), 0] + expected_result_auto = np.r_[forward, np.flip(forward)] + + # The zero-lag bins are only skipped in the autocorrelogram + # case. + expected_result_corr = expected_result_auto.copy() + expected_result_corr[int(num_bins / 2)] = num_filled_bins + + return window_ms, bin_ms, spike_times, spike_unit_indices, expected_bins, expected_result_auto, expected_result_corr diff --git a/src/spikeinterface/postprocessing/unit_locations.py b/src/spikeinterface/postprocessing/unit_locations.py index 16d9955e58..9435030775 100644 --- a/src/spikeinterface/postprocessing/unit_locations.py +++ b/src/spikeinterface/postprocessing/unit_locations.py @@ -1,21 +1,14 @@ from __future__ import annotations -import warnings - import numpy as np - - -try: - import numba - - HAVE_NUMBA = True -except ImportError: - HAVE_NUMBA = False +import warnings from ..core.sortinganalyzer import register_result_extension, AnalyzerExtension -from ..core import compute_sparsity -from ..core.template_tools import get_template_extremum_channel, _get_nbefore, get_dense_templates_array - +from .localization_tools import ( + compute_center_of_mass, + compute_grid_convolution, + compute_monopolar_triangulation, +) dtype_localize_by_method = { "center_of_mass": [("x", "float64"), ("y", "float64")], @@ -90,592 +83,3 @@ def get_data(self, outputs="numpy"): register_result_extension(ComputeUnitLocations) compute_unit_locations = ComputeUnitLocations.function_factory() - - -def make_initial_guess_and_bounds(wf_data, local_contact_locations, max_distance_um, initial_z=20): - # constant for initial guess and bounds - ind_max = np.argmax(wf_data) - max_ptp = wf_data[ind_max] - max_alpha = max_ptp * max_distance_um - - # initial guess is the center of mass - com = np.sum(wf_data[:, np.newaxis] * local_contact_locations, axis=0) / np.sum(wf_data) - x0 = np.zeros(4, dtype="float32") - x0[:2] = com - x0[2] = initial_z - initial_alpha = np.sqrt(np.sum((com - local_contact_locations[ind_max, :]) ** 2) + initial_z**2) * max_ptp - x0[3] = initial_alpha - - # bounds depend on initial guess - bounds = ( - [x0[0] - max_distance_um, x0[1] - max_distance_um, 1, 0], - [x0[0] + max_distance_um, x0[1] + max_distance_um, max_distance_um * 10, max_alpha], - ) - - return x0, bounds - - -def solve_monopolar_triangulation(wf_data, local_contact_locations, max_distance_um, optimizer): - import scipy.optimize - - x0, bounds = make_initial_guess_and_bounds(wf_data, local_contact_locations, max_distance_um) - - if optimizer == "least_square": - args = (wf_data, local_contact_locations) - try: - output = scipy.optimize.least_squares(estimate_distance_error, x0=x0, bounds=bounds, args=args) - return tuple(output["x"]) - except Exception as e: - print(f"scipy.optimize.least_squares error: {e}") - return (np.nan, np.nan, np.nan, np.nan) - - if optimizer == "minimize_with_log_penality": - x0 = x0[:3] - bounds = [(bounds[0][0], bounds[1][0]), (bounds[0][1], bounds[1][1]), (bounds[0][2], bounds[1][2])] - max_data = wf_data.max() - args = (wf_data, local_contact_locations, max_data) - try: - output = scipy.optimize.minimize(estimate_distance_error_with_log, x0=x0, bounds=bounds, args=args) - # final alpha - q = data_at(*output["x"], 1.0, local_contact_locations) - alpha = (wf_data * q).sum() / np.square(q).sum() - return (*output["x"], alpha) - except Exception as e: - print(f"scipy.optimize.minimize error: {e}") - return (np.nan, np.nan, np.nan, np.nan) - - -# ---- -# optimizer "least_square" - - -def estimate_distance_error(vec, wf_data, local_contact_locations): - # vec dims ar (x, y, z amplitude_factor) - # given that for contact_location x=dim0 + z=dim1 and y is orthogonal to probe - dist = np.sqrt(((local_contact_locations - vec[np.newaxis, :2]) ** 2).sum(axis=1) + vec[2] ** 2) - data_estimated = vec[3] / dist - err = wf_data - data_estimated - return err - - -# ---- -# optimizer "minimize_with_log_penality" - - -def data_at(x, y, z, alpha, local_contact_locations): - return alpha / np.sqrt( - np.square(x - local_contact_locations[:, 0]) + np.square(y - local_contact_locations[:, 1]) + np.square(z) - ) - - -def estimate_distance_error_with_log(vec, wf_data, local_contact_locations, max_data): - x, y, z = vec - q = data_at(x, y, z, 1.0, local_contact_locations) - alpha = (q * wf_data / max_data).sum() / (q * q).sum() - err = ( - np.square(wf_data / max_data - data_at(x, y, z, alpha, local_contact_locations)).mean() - - np.log1p(10.0 * z) / 10000.0 - ) - return err - - -def compute_monopolar_triangulation( - sorting_analyzer, - optimizer="minimize_with_log_penality", - radius_um=75, - max_distance_um=1000, - return_alpha=False, - enforce_decrease=False, - feature="ptp", -): - """ - Localize unit with monopolar triangulation. - This method is from Julien Boussard, Erdem Varol and Charlie Windolf - https://www.biorxiv.org/content/10.1101/2021.11.05.467503v1 - - There are 2 implementations of the 2 optimizer variants: - * https://github.com/int-brain-lab/spikes_localization_registration/blob/main/localization_pipeline/localizer.py - * https://github.com/cwindolf/spike-psvae/blob/main/spike_psvae/localization.py - - Important note about axis: - * x/y are dimmension on the probe plane (dim0, dim1) - * y is the depth by convention - * z it the orthogonal axis to the probe plan (dim2) - - Code from Erdem, Julien and Charlie do not use the same convention!!! - - - Parameters - ---------- - sorting_analyzer: SortingAnalyzer - A SortingAnalyzer object - method: "least_square" | "minimize_with_log_penality", default: "least_square" - The optimizer to use - radius_um: float, default: 75 - For channel sparsity - max_distance_um: float, default: 1000 - to make bounddary in x, y, z and also for alpha - return_alpha: bool, default: False - Return or not the alpha value - enforce_decrease : bool, default: False - Enforce spatial decreasingness for PTP vectors - feature: "ptp" | "energy" | "peak_voltage", default: "ptp" - The available features to consider for estimating the position via - monopolar triangulation are peak-to-peak amplitudes ("ptp", default), - energy ("energy", as L2 norm) or voltages at the center of the waveform - ("peak_voltage") - - Returns - ------- - unit_location: np.array - 3d or 4d, x, y, z, alpha - alpha is the amplitude at source estimation - """ - assert optimizer in ("least_square", "minimize_with_log_penality") - - assert feature in ["ptp", "energy", "peak_voltage"], f"{feature} is not a valid feature" - unit_ids = sorting_analyzer.unit_ids - - contact_locations = sorting_analyzer.get_channel_locations() - - sparsity = compute_sparsity(sorting_analyzer, method="radius", radius_um=radius_um) - templates = get_dense_templates_array(sorting_analyzer, return_scaled=sorting_analyzer.return_scaled) - nbefore = _get_nbefore(sorting_analyzer) - - if enforce_decrease: - neighbours_mask = np.zeros((templates.shape[0], templates.shape[2]), dtype=bool) - for i, unit_id in enumerate(unit_ids): - chan_inds = sparsity.unit_id_to_channel_indices[unit_id] - neighbours_mask[i, chan_inds] = True - enforce_decrease_radial_parents = make_radial_order_parents(contact_locations, neighbours_mask) - best_channels = get_template_extremum_channel(sorting_analyzer, outputs="index") - - unit_location = np.zeros((unit_ids.size, 4), dtype="float64") - for i, unit_id in enumerate(unit_ids): - chan_inds = sparsity.unit_id_to_channel_indices[unit_id] - local_contact_locations = contact_locations[chan_inds, :] - - # wf is (nsample, nchan) - chann is only nieghboor - wf = templates[i, :, :][:, chan_inds] - if feature == "ptp": - wf_data = wf.ptp(axis=0) - elif feature == "energy": - wf_data = np.linalg.norm(wf, axis=0) - elif feature == "peak_voltage": - wf_data = np.abs(wf[nbefore]) - - # if enforce_decrease: - # enforce_decrease_shells_data( - # wf_data, best_channels[unit_id], enforce_decrease_radial_parents, in_place=True - # ) - - unit_location[i] = solve_monopolar_triangulation(wf_data, local_contact_locations, max_distance_um, optimizer) - - if not return_alpha: - unit_location = unit_location[:, :3] - - return unit_location - - -def compute_center_of_mass(sorting_analyzer, peak_sign="neg", radius_um=75, feature="ptp"): - """ - Computes the center of mass (COM) of a unit based on the template amplitudes. - - Parameters - ---------- - sorting_analyzer: SortingAnalyzer - A SortingAnalyzer object - peak_sign: "neg" | "pos" | "both", default: "neg" - Sign of the template to compute best channels - radius_um: float - Radius to consider in order to estimate the COM - feature: "ptp" | "mean" | "energy" | "peak_voltage", default: "ptp" - Feature to consider for computation - - Returns - ------- - unit_location: np.array - """ - unit_ids = sorting_analyzer.unit_ids - - contact_locations = sorting_analyzer.get_channel_locations() - - assert feature in ["ptp", "mean", "energy", "peak_voltage"], f"{feature} is not a valid feature" - - sparsity = compute_sparsity(sorting_analyzer, peak_sign=peak_sign, method="radius", radius_um=radius_um) - templates = get_dense_templates_array(sorting_analyzer, return_scaled=sorting_analyzer.return_scaled) - nbefore = _get_nbefore(sorting_analyzer) - - unit_location = np.zeros((unit_ids.size, 2), dtype="float64") - for i, unit_id in enumerate(unit_ids): - chan_inds = sparsity.unit_id_to_channel_indices[unit_id] - local_contact_locations = contact_locations[chan_inds, :] - - wf = templates[i, :, :] - - if feature == "ptp": - wf_data = (wf[:, chan_inds]).ptp(axis=0) - elif feature == "mean": - wf_data = (wf[:, chan_inds]).mean(axis=0) - elif feature == "energy": - wf_data = np.linalg.norm(wf[:, chan_inds], axis=0) - elif feature == "peak_voltage": - wf_data = wf[nbefore, chan_inds] - - # center of mass - com = np.sum(wf_data[:, np.newaxis] * local_contact_locations, axis=0) / np.sum(wf_data) - unit_location[i, :] = com - - return unit_location - - -def compute_grid_convolution( - sorting_analyzer, - peak_sign="neg", - radius_um=40.0, - upsampling_um=5, - sigma_ms=0.25, - margin_um=50, - prototype=None, - percentile=5, - weight_method={}, -): - """ - Estimate the positions of the templates from a large grid of fake templates - - Parameters - ---------- - sorting_analyzer: SortingAnalyzer - A SortingAnalyzer object - peak_sign: "neg" | "pos" | "both", default: "neg" - Sign of the template to compute best channels - radius_um: float, default: 40.0 - Radius to consider for the fake templates - upsampling_um: float, default: 5 - Upsampling resolution for the grid of templates - sigma_ms: float, default: 0.25 - The temporal decay of the fake templates - margin_um: float, default: 50 - The margin for the grid of fake templates - prototype: np.array or None, default: None - Fake waveforms for the templates. If None, generated as Gaussian - percentile: float, default: 5 - The percentage in [0, 100] of the best scalar products kept to - estimate the position - weight_method: dict - Parameter that should be provided to the get_convolution_weights() function - in order to know how to estimate the positions. One argument is mode that could - be either gaussian_2d (KS like) or exponential_3d (default) - Returns - ------- - unit_location: np.array - """ - - contact_locations = sorting_analyzer.get_channel_locations() - unit_ids = sorting_analyzer.unit_ids - - templates = get_dense_templates_array(sorting_analyzer, return_scaled=sorting_analyzer.return_scaled) - nbefore = _get_nbefore(sorting_analyzer) - nafter = templates.shape[1] - nbefore - - fs = sorting_analyzer.sampling_frequency - percentile = 100 - percentile - assert 0 <= percentile <= 100, "Percentile should be in [0, 100]" - - time_axis = np.arange(-nbefore, nafter) * 1000 / fs - if prototype is None: - prototype = np.exp(-(time_axis**2) / (2 * (sigma_ms**2))) - if peak_sign == "neg": - prototype *= -1 - - prototype = prototype[:, np.newaxis] - - template_positions, weights, nearest_template_mask, z_factors = get_grid_convolution_templates_and_weights( - contact_locations, radius_um, upsampling_um, margin_um, weight_method - ) - - peak_channels = get_template_extremum_channel(sorting_analyzer, peak_sign, outputs="index") - - weights_sparsity_mask = weights > 0 - - nb_weights = weights.shape[0] - unit_location = np.zeros((unit_ids.size, 3), dtype="float64") - - for i, unit_id in enumerate(unit_ids): - main_chan = peak_channels[unit_id] - wf = templates[i, :, :] - nearest_mask = nearest_template_mask[main_chan, :] - channel_mask = np.sum(weights_sparsity_mask[:, :, nearest_mask], axis=(0, 2)) > 0 - num_templates = np.sum(nearest_mask) - sub_w = weights[:, channel_mask, :][:, :, nearest_mask] - global_products = (wf[:, channel_mask] * prototype).sum(axis=0) - - dot_products = np.zeros((nb_weights, num_templates), dtype=np.float32) - for count in range(nb_weights): - dot_products[count] = np.dot(global_products, sub_w[count]) - - mask = dot_products < 0 - if percentile > 0: - dot_products[mask] = np.nan - ## We need to catch warnings because some line can have only NaN, and - ## if so the nanpercentile function throws a warning - with warnings.catch_warnings(): - warnings.filterwarnings("ignore") - thresholds = np.nanpercentile(dot_products, percentile) - thresholds = np.nan_to_num(thresholds) - dot_products[dot_products < thresholds] = 0 - dot_products[mask] = 0 - - nearest_templates = template_positions[nearest_mask] - for count in range(nb_weights): - unit_location[i, :2] += np.dot(dot_products[count], nearest_templates) - - scalar_products = dot_products.sum(1) - unit_location[i, 2] = np.dot(z_factors, scalar_products) - with np.errstate(divide="ignore", invalid="ignore"): - unit_location[i] /= scalar_products.sum() - unit_location = np.nan_to_num(unit_location) - - return unit_location - - -# --- -# waveform cleaning for localization. could be moved to another file - - -def make_shell(channel, geom, n_jumps=1): - """See make_shells""" - from scipy.spatial.distance import cdist - - pt = geom[channel] - dists = cdist([pt], geom).ravel() - radius = np.unique(dists)[1 : n_jumps + 1][-1] - return np.setdiff1d(np.flatnonzero(dists <= radius + 1e-8), [channel]) - - -def make_shells(geom, n_jumps=1): - """Get the neighbors of a channel within a radius - - That radius is found by figuring out the distance to the closest channel, - then the channel which is the next closest (but farther than the closest), - etc... for n_jumps. - - So, if n_jumps is 1, it will return the indices of channels which are - as close as the closest channel. If n_jumps is 2, it will include those - and also the indices of the next-closest channels. And so on... - - Returns - ------- - shell_neighbors : list - List of length geom.shape[0] (aka, the number of channels) - The ith entry in the list is an array with the indices of the neighbors - of the ith channel. - i is not included in these arrays (a channel is not in its own shell). - """ - return [make_shell(c, geom, n_jumps=n_jumps) for c in range(geom.shape[0])] - - -def make_radial_order_parents(geom, neighbours_mask, n_jumps_per_growth=1, n_jumps_parent=3): - """Pre-computes a helper data structure for enforce_decrease_shells""" - n_channels = len(geom) - - # which channels should we consider as possible parents for each channel? - shells = make_shells(geom, n_jumps=n_jumps_parent) - - radial_parents = [] - for channel, neighbors in enumerate(neighbours_mask): - channel_parents = [] - - # convert from boolean mask to list of indices - neighbors = np.flatnonzero(neighbors) - - # the closest shell will do nothing - already_seen = [channel] - shell0 = make_shell(channel, geom, n_jumps=n_jumps_per_growth) - already_seen += sorted(c for c in shell0 if c not in already_seen) - - # so we start at the second jump - jumps = 2 - while len(already_seen) < (neighbors < n_channels).sum(): - # grow our search -- what are the next-closest channels? - new_shell = make_shell(channel, geom, n_jumps=jumps * n_jumps_per_growth) - new_shell = list(sorted(c for c in new_shell if (c not in already_seen) and (c in neighbors))) - - # for each new channel, find the intersection of the channels - # from previous shells and that channel's shell in `shells` - for new_chan in new_shell: - parents = np.intersect1d(shells[new_chan], already_seen) - parents_rel = np.flatnonzero(np.isin(neighbors, parents)) - if not len(parents_rel): - # this can happen for some strange geometries. in that case, bail. - continue - channel_parents.append((np.flatnonzero(neighbors == new_chan).item(), parents_rel)) - - # add this shell to what we have seen - already_seen += new_shell - jumps += 1 - - radial_parents.append(channel_parents) - - return radial_parents - - -def enforce_decrease_shells_data(wf_data, maxchan, radial_parents, in_place=False): - """Radial enforce decrease""" - (C,) = wf_data.shape - - # allocate storage for decreasing version of data - decreasing_data = wf_data if in_place else wf_data.copy() - - # loop to enforce data decrease from parent shells - for c, parents_rel in radial_parents[maxchan]: - if decreasing_data[c] > decreasing_data[parents_rel].max(): - decreasing_data[c] *= decreasing_data[parents_rel].max() / decreasing_data[c] - - return decreasing_data - - -def get_grid_convolution_templates_and_weights( - contact_locations, radius_um=40, upsampling_um=5, margin_um=50, weight_method={"mode": "exponential_3d"} -): - """Get a upsampled grid of artificial templates given a particular probe layout - - Parameters - ---------- - contact_locations: array - The positions of the channels - radius_um: float - Radius in um for channel sparsity. - upsampling_um: float - Upsampling resolution for the grid of templates - margin_um: float - The margin for the grid of fake templates - weight_method: dict - Parameter that should be provided to the get_convolution_weights() function - in order to know how to estimate the positions. One argument is mode that could - be either gaussian_2d (KS like) or exponential_3d (default) - - Returns - ------- - template_positions: array - The positions of the upsampled templates - weights: - The weights of the templates, on a per channel basis - nearest_template_mask: array - A sparsity mask to to know which template is close to the contact locations, given - the radius_um parameter - z_factors: array - The z_factors that have been used to generate the weights along the third dimension - """ - - import sklearn.metrics - - x_min, x_max = contact_locations[:, 0].min(), contact_locations[:, 0].max() - y_min, y_max = contact_locations[:, 1].min(), contact_locations[:, 1].max() - - x_min -= margin_um - x_max += margin_um - y_min -= margin_um - y_max += margin_um - - dx = np.abs(x_max - x_min) - dy = np.abs(y_max - y_min) - - eps = upsampling_um / 10 - - all_x, all_y = np.meshgrid( - np.arange(x_min, x_max + eps, upsampling_um), np.arange(y_min, y_max + eps, upsampling_um) - ) - - nb_templates = all_x.size - - template_positions = np.zeros((nb_templates, 2)) - template_positions[:, 0] = all_x.flatten() - template_positions[:, 1] = all_y.flatten() - - # mask to get nearest template given a channel - dist = sklearn.metrics.pairwise_distances(contact_locations, template_positions) - nearest_template_mask = dist <= radius_um - weights, z_factors = get_convolution_weights(dist, **weight_method) - - return template_positions, weights, nearest_template_mask, z_factors - - -def get_convolution_weights( - distances, - z_list_um=np.linspace(0, 120.0, 5), - sigma_list_um=np.linspace(5, 25, 5), - sparsity_threshold=None, - sigma_3d=2.5, - mode="exponential_3d", -): - """Get normalized weights for creating artificial templates, given some precomputed distances - - Parameters - ---------- - distances: 2D array - The distances between the source channels (real ones) and the upsampled one (virual ones) - sparsity_threshold: float, default None - The sparsity_threshold below which weights are set to 0 (speeding up computations). If None, - then a default value of 0.5/sqrt(distances.shape[0]) is set - mode: exponential_3d | gaussian_2d - The inference scheme to be used to get the convolution weights - Keyword arguments for the chosen method: - "gaussian_2d" (similar to KiloSort): - * sigma_list_um: array, default np.linspace(5, 25, 5) - The list of sigma to consider for decaying exponentials - "exponential_3d" (default): - * z_list_um: array, default np.linspace(0, 120.0, 5) - The list of z to consider for putative depth of the sources - * sigma_3d: float, default 2.5 - The scaling factor controling the decay of the exponential - - Returns - ------- - weights: - The weights of the templates, on a per channel basis - z_factors: array - The z_factors that have been used to generate the weights along the third dimension - """ - - if sparsity_threshold is not None: - assert 0 <= sparsity_threshold <= 1, "sparsity_threshold should be in [0, 1]" - - if mode == "exponential_3d": - weights = np.zeros((len(z_list_um), distances.shape[0], distances.shape[1]), dtype=np.float32) - for count, z in enumerate(z_list_um): - dist_3d = np.sqrt(distances**2 + z**2) - weights[count] = np.exp(-dist_3d / sigma_3d) - z_factors = z_list_um - elif mode == "gaussian_2d": - weights = np.zeros((len(sigma_list_um), distances.shape[0], distances.shape[1]), dtype=np.float32) - for count, sigma in enumerate(sigma_list_um): - alpha = 2 * (sigma**2) - weights[count] = np.exp(-(distances**2) / alpha) - z_factors = sigma_list_um - - # normalize to get normalized values in [0, 1] - with np.errstate(divide="ignore", invalid="ignore"): - norm = np.linalg.norm(weights, axis=1)[:, np.newaxis, :] - weights /= norm - - weights[~np.isfinite(weights)] = 0.0 - - # If sparsity is None or non zero, we are pruning weights that are below the - # sparsification factor. This will speed up furter computations - if sparsity_threshold is None: - sparsity_threshold = 0.5 / np.sqrt(distances.shape[0]) - weights[weights < sparsity_threshold] = 0 - - # re normalize to ensure we have unitary norms - with np.errstate(divide="ignore", invalid="ignore"): - norm = np.linalg.norm(weights, axis=1)[:, np.newaxis, :] - weights /= norm - - weights[~np.isfinite(weights)] = 0.0 - - return weights, z_factors - - -if HAVE_NUMBA: - enforce_decrease_shells = numba.jit(enforce_decrease_shells_data, nopython=True) diff --git a/src/spikeinterface/preprocessing/__init__.py b/src/spikeinterface/preprocessing/__init__.py index 38343f8804..5f9ac046e1 100644 --- a/src/spikeinterface/preprocessing/__init__.py +++ b/src/spikeinterface/preprocessing/__init__.py @@ -1,6 +1,6 @@ from .preprocessinglist import * -from .motion import correct_motion, load_motion_info +from .motion import correct_motion, load_motion_info, save_motion_info from .preprocessing_tools import get_spatial_interpolation_kernel from .detect_bad_channels import detect_bad_channels diff --git a/src/spikeinterface/preprocessing/motion.py b/src/spikeinterface/preprocessing/motion.py index c77745a4ff..82a89220bd 100644 --- a/src/spikeinterface/preprocessing/motion.py +++ b/src/spikeinterface/preprocessing/motion.py @@ -2,6 +2,7 @@ import numpy as np import json +import shutil from pathlib import Path import time @@ -18,8 +19,8 @@ method="locally_exclusive", peak_sign="neg", detect_threshold=8.0, - exclude_sweep_ms=0.1, - radius_um=50, + exclude_sweep_ms=0.8, + radius_um=80., ), "select_kwargs": dict(), "localize_peaks_kwargs": dict( @@ -34,16 +35,13 @@ "estimate_motion_kwargs": dict( method="decentralized", direction="y", - bin_s=2.0, + bin_s=1.0, rigid=False, bin_um=5.0, - margin_um=0.0, - # win_shape="gaussian", - # win_step_um=50.0, - # win_scale_um=150.0, + hist_margin_um=20.0, win_shape="gaussian", - win_step_um=100.0, - win_scale_um=200.0, + win_step_um=200.0, + win_scale_um=300.0, histogram_depth_smooth_um=5.0, histogram_time_smooth_s=None, pairwise_displacement_method="conv", @@ -77,13 +75,14 @@ method="locally_exclusive", peak_sign="neg", detect_threshold=8.0, - exclude_sweep_ms=0.5, - radius_um=50, + exclude_sweep_ms=0.8, + radius_um=80., ), "select_kwargs": dict(), "localize_peaks_kwargs": dict( method="grid_convolution", - radius_um=40.0, + # radius_um=40.0, + radius_um=80.0, upsampling_um=5.0, sigma_ms=0.25, margin_um=30.0, @@ -96,10 +95,7 @@ bin_s=2.0, rigid=False, bin_um=5.0, - margin_um=0.0, - # win_shape="gaussian", - # win_step_um=50.0, - # win_scale_um=150.0, + hist_margin_um=0.0, win_shape="gaussian", win_step_um=100.0, win_scale_um=200.0, @@ -182,7 +178,7 @@ rigid=False, win_step_um=50.0, win_scale_um=150.0, - margin_um=0, + hist_margin_um=0, win_shape="rect", ), "interpolate_motion_kwargs": dict( @@ -200,11 +196,13 @@ } + def correct_motion( recording, preset="nonrigid_accurate", folder=None, output_motion_info=False, + overwrite=False, detect_kwargs={}, select_kwargs={}, localize_peaks_kwargs={}, @@ -257,6 +255,8 @@ def correct_motion( If True, then the function returns a `motion_info` dictionary that contains variables to check intermediate steps (motion_histogram, non_rigid_windows, pairwise_displacement) This dictionary is the same when reloaded from the folder + overwrite : bool, default: False + If True and folder is given, overwrite the folder if it already exists detect_kwargs : dict Optional parameters to overwrite the ones in the preset for "detect" step. select_kwargs : dict @@ -315,11 +315,13 @@ def correct_motion( if folder is not None: folder = Path(folder) - folder.mkdir(exist_ok=True, parents=True) + if overwrite: + if folder.is_dir(): + import shutil - (folder / "parameters.json").write_text(json.dumps(parameters, indent=4, cls=SIJsonEncoder), encoding="utf8") - if recording.check_serializability("json"): - recording.dump_to_json(folder / "recording.json") + shutil.rmtree(folder) + else: + assert not folder.is_dir(), f"Folder {folder} already exists" if not do_selection: # maybe do this directly in the folder when not None, but might be slow on external storage @@ -331,7 +333,7 @@ def correct_motion( node1 = ExtractDenseWaveforms(recording, parents=[node0], ms_before=0.1, ms_after=0.3) - # node nolcalize + # node detect + localize method = localize_peaks_kwargs.pop("method", "center_of_mass") method_class = localize_peak_methods[method] node2 = method_class(recording, parents=[node0, node1], return_output=True, **localize_peaks_kwargs) @@ -370,9 +372,6 @@ def correct_motion( select_peaks=t2 - t1, localize_peaks=t3 - t2, ) - if folder is not None: - np.save(folder / "peaks.npy", peaks) - np.save(folder / "peak_locations.npy", peak_locations) t0 = time.perf_counter() motion = estimate_motion(recording, peaks, peak_locations, **estimate_motion_kwargs) @@ -381,18 +380,17 @@ def correct_motion( recording_corrected = InterpolateMotionRecording(recording, motion, **interpolate_motion_kwargs) + motion_info = dict( + parameters=parameters, + run_times=run_times, + peaks=peaks, + peak_locations=peak_locations, + motion=motion, + ) if folder is not None: - (folder / "run_times.json").write_text(json.dumps(run_times, indent=4), encoding="utf8") - motion.save(folder / "motion") + save_motion_info(motion_info, folder, overwrite=overwrite) if output_motion_info: - motion_info = dict( - parameters=parameters, - run_times=run_times, - peaks=peaks, - peak_locations=peak_locations, - motion=motion, - ) return recording_corrected, motion_info else: return recording_corrected @@ -408,6 +406,25 @@ def correct_motion( correct_motion.__doc__ = correct_motion.__doc__.format(_doc_presets, _shared_job_kwargs_doc) +def save_motion_info(motion_info, folder, overwrite=False): + folder = Path(folder) + if folder.is_dir(): + if not overwrite: + raise FileExistsError(f"Folder {folder} already exists. Use `overwrite=True` to overwrite.") + else: + shutil.rmtree(folder) + folder.mkdir(exist_ok=True, parents=True) + + (folder / "parameters.json").write_text( + json.dumps(motion_info["parameters"], indent=4, cls=SIJsonEncoder), encoding="utf8" + ) + (folder / "run_times.json").write_text(json.dumps(motion_info["run_times"], indent=4), encoding="utf8") + + np.save(folder / "peaks.npy", motion_info["peaks"]) + np.save(folder / "peak_locations.npy", motion_info["peak_locations"]) + motion_info["motion"].save(folder / "motion") + + def load_motion_info(folder): from spikeinterface.sortingcomponents.motion import Motion diff --git a/src/spikeinterface/preprocessing/preprocessinglist.py b/src/spikeinterface/preprocessing/preprocessinglist.py index 1b28be9752..8f3729b49b 100644 --- a/src/spikeinterface/preprocessing/preprocessinglist.py +++ b/src/spikeinterface/preprocessing/preprocessinglist.py @@ -24,6 +24,8 @@ CenterRecording, center, ) +from .scale import scale_to_uV + from .whiten import WhitenRecording, whiten, compute_whitening_matrix from .rectify import RectifyRecording, rectify from .clip import BlankSaturationRecording, blank_staturation, ClipRecording, clip diff --git a/src/spikeinterface/preprocessing/scale.py b/src/spikeinterface/preprocessing/scale.py new file mode 100644 index 0000000000..bc77577ce0 --- /dev/null +++ b/src/spikeinterface/preprocessing/scale.py @@ -0,0 +1,43 @@ +from __future__ import annotations + +import numpy as np + +from spikeinterface.core import BaseRecording +from spikeinterface.preprocessing.basepreprocessor import BasePreprocessor + + +def scale_to_uV(recording: BasePreprocessor) -> BasePreprocessor: + """ + Scale raw traces to microvolts (µV). + + This preprocessor uses the channel-specific gain and offset information + stored in the recording extractor to convert the raw traces to µV units. + + Parameters + ---------- + recording : BaseRecording + The recording extractor to be scaled. The recording extractor must + have gains and offsets otherwise an error will be raised. + + Raises + ------ + AssertionError + If the recording extractor does not have scaleable traces. + """ + # To avoid a circular import + from spikeinterface.preprocessing import ScaleRecording + + if not recording.has_scaleable_traces(): + error_msg = "Recording must have gains and offsets set to be scaled to µV" + raise RuntimeError(error_msg) + + gain = recording.get_channel_gains() + offset = recording.get_channel_offsets() + + scaled_to_uV_recording = ScaleRecording(recording, gain=gain, offset=offset, dtype="float32") + + # We do this so when get_traces(return_scaled=True) is called, the return is the same. + scaled_to_uV_recording.set_channel_gains(gains=1.0) + scaled_to_uV_recording.set_channel_offsets(offsets=0.0) + + return scaled_to_uV_recording diff --git a/src/spikeinterface/preprocessing/tests/test_motion.py b/src/spikeinterface/preprocessing/tests/test_motion.py index a298b41d8f..baa7235263 100644 --- a/src/spikeinterface/preprocessing/tests/test_motion.py +++ b/src/spikeinterface/preprocessing/tests/test_motion.py @@ -1,10 +1,7 @@ import shutil -from pathlib import Path -import numpy as np -import pytest from spikeinterface.core import generate_recording -from spikeinterface.preprocessing import correct_motion, load_motion_info +from spikeinterface.preprocessing import correct_motion, load_motion_info, save_motion_info def test_estimate_and_correct_motion(create_cache_folder): @@ -19,9 +16,16 @@ def test_estimate_and_correct_motion(create_cache_folder): rec_corrected = correct_motion(rec, folder=folder) print(rec_corrected) + # test reloading motion info motion_info = load_motion_info(folder) print(motion_info.keys()) + # test saving motion info + save_folder = folder / "motion_info" + save_motion_info(motion_info=motion_info, folder=save_folder) + motion_info_loaded = load_motion_info(save_folder) + assert motion_info_loaded["motion"] == motion_info["motion"] + if __name__ == "__main__": # print(correct_motion.__doc__) diff --git a/src/spikeinterface/preprocessing/tests/test_scaling.py b/src/spikeinterface/preprocessing/tests/test_scaling.py new file mode 100644 index 0000000000..321d7c9df2 --- /dev/null +++ b/src/spikeinterface/preprocessing/tests/test_scaling.py @@ -0,0 +1,70 @@ +import pytest +import numpy as np +from spikeinterface.core.testing_tools import generate_recording +from spikeinterface.preprocessing import scale_to_uV, CenterRecording + + +def test_scale_to_uV(): + # Create a sample recording extractor with fake gains and offsets + num_channels = 4 + sampling_frequency = 30_000.0 + durations = [1.0, 1.0] # seconds + recording = generate_recording( + num_channels=num_channels, + durations=durations, + sampling_frequency=sampling_frequency, + ) + + rng = np.random.default_rng(0) + gains = rng.random(size=(num_channels)).astype(np.float32) + offsets = rng.random(size=(num_channels)).astype(np.float32) + recording.set_channel_gains(gains) + recording.set_channel_offsets(offsets) + + # Apply the preprocessor + scaled_recording = scale_to_uV(recording=recording) + + # Check if the traces are indeed scaled + expected_traces = recording.get_traces(return_scaled=True, segment_index=0) + scaled_traces = scaled_recording.get_traces(segment_index=0) + + np.testing.assert_allclose(scaled_traces, expected_traces) + + # Test for the error when recording doesn't have scaleable traces + recording.set_channel_gains(None) # Remove gains to make traces unscaleable + with pytest.raises(RuntimeError): + scale_to_uV(recording) + + +def test_scaling_in_preprocessing_chain(): + + # Create a sample recording extractor with fake gains and offsets + num_channels = 4 + sampling_frequency = 30_000.0 + durations = [1.0] # seconds + recording = generate_recording( + num_channels=num_channels, + durations=durations, + sampling_frequency=sampling_frequency, + ) + + rng = np.random.default_rng(0) + gains = rng.random(size=(num_channels)).astype(np.float32) + offsets = rng.random(size=(num_channels)).astype(np.float32) + + recording.set_channel_gains(gains) + recording.set_channel_offsets(offsets) + + centered_recording = CenterRecording(scale_to_uV(recording=recording)) + traces_scaled_with_argument = centered_recording.get_traces(return_scaled=True) + + # Chain preprocessors + centered_recording_scaled = CenterRecording(scale_to_uV(recording=recording)) + traces_scaled_with_preprocessor = centered_recording_scaled.get_traces() + + np.testing.assert_allclose(traces_scaled_with_argument, traces_scaled_with_preprocessor) + + # Test if the scaling is not done twice + traces_scaled_with_preprocessor_and_argument = centered_recording_scaled.get_traces(return_scaled=True) + + np.testing.assert_allclose(traces_scaled_with_preprocessor, traces_scaled_with_preprocessor_and_argument) diff --git a/src/spikeinterface/qualitymetrics/misc_metrics.py b/src/spikeinterface/qualitymetrics/misc_metrics.py index cbb55aeb8b..433c04d248 100644 --- a/src/spikeinterface/qualitymetrics/misc_metrics.py +++ b/src/spikeinterface/qualitymetrics/misc_metrics.py @@ -241,7 +241,7 @@ def compute_isi_violations(sorting_analyzer, isi_threshold_ms=1.5, min_isi_ms=0, It computes several metrics related to isi violations: * isi_violations_ratio: the relative firing rate of the hypothetical neurons that are - generating the ISI violations. Described in [Hill]_. See Notes. + generating the ISI violations. See Notes. * isi_violation_count: number of ISI violations Parameters @@ -261,22 +261,29 @@ def compute_isi_violations(sorting_analyzer, isi_threshold_ms=1.5, min_isi_ms=0, Returns ------- isi_violations_ratio : dict - The isi violation ratio described in [Hill]_. + The isi violation ratio. isi_violation_count : dict Number of violations. Notes ----- - You can interpret an ISI violations ratio value of 0.5 as meaning that contaminating spikes are - occurring at roughly half the rate of "true" spikes for that unit. - In cases of highly contaminated units, the ISI violations ratio can sometimes be greater than 1. + The returned ISI violations ratio approximates the fraction of spikes in each + unit which are contaminted. The formulation assumes that the contaminating spikes + are statistically independent from the other spikes in that cluster. This + approximation can break down in reality, especially for highly contaminated units. + See the discussion in Section 4.1 of [Llobet]_ for more details. + + This method counts the number of spikes whose isi is violated. If there are three + spikes within `isi_threshold_ms`, the first and second are violated. Hence there are two + spikes which have been violated. This is is contrast to `compute_refrac_period_violations`, + which counts the number of violations. References ---------- - Based on metrics described in [Hill]_ + Based on metrics originally implemented in Ultra Mega Sort [UMS]_. - Originally written in Matlab by Nick Steinmetz (https://github.com/cortex-lab/sortingQuality) - and converted to Python by Daniel Denman. + This implementation is based on one of the original implementations written in Matlab by Nick Steinmetz + (https://github.com/cortex-lab/sortingQuality) and converted to Python by Daniel Denman. """ res = namedtuple("isi_violation", ["isi_violations_ratio", "isi_violations_count"]) @@ -324,7 +331,6 @@ def compute_refrac_period_violations( Calculate the number of refractory period violations. This is similar (but slightly different) to the ISI violations. - The key difference being that the violations are not only computed on consecutive spikes. This is required for some formulas (e.g. the ones from Llobet & Wyngaard 2022). @@ -351,6 +357,12 @@ def compute_refrac_period_violations( ----- Requires "numba" package + This method counts the number of violations which occur during the refactory period. + For example, if there are three spikes within `refractory_period_ms`, the second and third spikes + violate the first spike and the third spike violates the second spike. Hence there + are three violations. This is in contrast to `compute_isi_violations`, which + computes the number of spikes which have been violated. + References ---------- Based on metrics described in [Llobet]_ @@ -388,11 +400,11 @@ def compute_refrac_period_violations( nb_violations = {} rp_contamination = {} - for i, unit_id in enumerate(sorting.unit_ids): + for unit_index, unit_id in enumerate(sorting.unit_ids): if unit_id not in unit_ids: continue - nb_violations[unit_id] = n_v = nb_rp_violations[i] + nb_violations[unit_id] = n_v = nb_rp_violations[unit_index] N = num_spikes[unit_id] if N == 0: rp_contamination[unit_id] = np.nan @@ -1085,10 +1097,10 @@ def compute_drift_metrics( spikes_in_bin = spikes_in_segment[i0:i1] spike_locations_in_bin = spike_locations_in_segment[i0:i1][direction] - for unit_ind in np.arange(len(unit_ids)): - mask = spikes_in_bin["unit_index"] == unit_ind + for i, unit_id in enumerate(unit_ids): + mask = spikes_in_bin["unit_index"] == sorting.id_to_index(unit_id) if np.sum(mask) >= min_spikes_per_interval: - median_positions[unit_ind, bin_index] = np.median(spike_locations_in_bin[mask]) + median_positions[i, bin_index] = np.median(spike_locations_in_bin[mask]) if median_position_segments is None: median_position_segments = median_positions else: diff --git a/src/spikeinterface/qualitymetrics/tests/test_metrics_functions.py b/src/spikeinterface/qualitymetrics/tests/test_metrics_functions.py index 2d4eeb360b..aec8201f44 100644 --- a/src/spikeinterface/qualitymetrics/tests/test_metrics_functions.py +++ b/src/spikeinterface/qualitymetrics/tests/test_metrics_functions.py @@ -12,8 +12,12 @@ from spikeinterface.qualitymetrics.utils import create_ground_truth_pc_distributions +from spikeinterface.qualitymetrics.quality_metric_list import ( + _misc_metric_name_to_func, +) from spikeinterface.qualitymetrics import ( + get_quality_metric_list, mahalanobis_metrics, lda_metrics, nearest_neighbors_metrics, @@ -34,6 +38,7 @@ compute_amplitude_cv_metrics, compute_sd_ratio, get_synchrony_counts, + compute_quality_metrics, ) from spikeinterface.core.basesorting import minimum_spike_dtype @@ -42,6 +47,125 @@ job_kwargs = dict(n_jobs=2, progress_bar=True, chunk_duration="1s") +def _small_sorting_analyzer(): + recording, sorting = generate_ground_truth_recording( + durations=[2.0], + num_units=4, + seed=1205, + ) + + sorting = sorting.select_units([3, 2, 0], ["#3", "#9", "#4"]) + + sorting_analyzer = create_sorting_analyzer(recording=recording, sorting=sorting, format="memory") + + extensions_to_compute = { + "random_spikes": {"seed": 1205}, + "noise_levels": {"seed": 1205}, + "waveforms": {}, + "templates": {}, + "spike_amplitudes": {}, + "spike_locations": {}, + "principal_components": {}, + } + + sorting_analyzer.compute(extensions_to_compute) + + return sorting_analyzer + + +@pytest.fixture(scope="module") +def small_sorting_analyzer(): + return _small_sorting_analyzer() + + +def test_unit_structure_in_output(small_sorting_analyzer): + + qm_params = { + "presence_ratio": {"bin_duration_s": 0.1}, + "amplitude_cutoff": {"num_histogram_bins": 3}, + "amplitude_cv": {"average_num_spikes_per_bin": 7, "min_num_bins": 3}, + "firing_range": {"bin_size_s": 1}, + "isi_violation": {"isi_threshold_ms": 10}, + "drift": {"interval_s": 1, "min_spikes_per_interval": 5}, + "sliding_rp_violation": {"max_ref_period_ms": 50, "bin_size_ms": 0.15}, + "rp_violation": {"refractory_period_ms": 10.0, "censored_period_ms": 0.0}, + } + + for metric_name in get_quality_metric_list(): + + try: + qm_param = qm_params[metric_name] + except: + qm_param = {} + + result_all = _misc_metric_name_to_func[metric_name](sorting_analyzer=small_sorting_analyzer, **qm_param) + result_sub = _misc_metric_name_to_func[metric_name]( + sorting_analyzer=small_sorting_analyzer, unit_ids=["#4", "#9"], **qm_param + ) + + if isinstance(result_all, dict): + assert list(result_all.keys()) == ["#3", "#9", "#4"] + assert list(result_sub.keys()) == ["#4", "#9"] + assert result_sub["#9"] == result_all["#9"] + assert result_sub["#4"] == result_all["#4"] + + else: + for result_ind, result in enumerate(result_sub): + + assert list(result_all[result_ind].keys()) == ["#3", "#9", "#4"] + assert result_sub[result_ind].keys() == set(["#4", "#9"]) + + assert result_sub[result_ind]["#9"] == result_all[result_ind]["#9"] + assert result_sub[result_ind]["#4"] == result_all[result_ind]["#4"] + + +def test_unit_id_order_independence(small_sorting_analyzer): + """ + Takes two almost-identical sorting_analyzers, whose unit_ids are in different orders and have different labels, + and checks that their calculated quality metrics are independent of the ordering and labelling. + """ + + recording = small_sorting_analyzer.recording + sorting = small_sorting_analyzer.sorting.select_units(["#4", "#9", "#3"], [0, 2, 3]) + + small_sorting_analyzer_2 = create_sorting_analyzer(recording=recording, sorting=sorting, format="memory") + + extensions_to_compute = { + "random_spikes": {"seed": 1205}, + "noise_levels": {"seed": 1205}, + "waveforms": {}, + "templates": {}, + "spike_amplitudes": {}, + "spike_locations": {}, + "principal_components": {}, + } + + small_sorting_analyzer_2.compute(extensions_to_compute) + + # need special params to get non-nan results on a short recording + qm_params = { + "presence_ratio": {"bin_duration_s": 0.1}, + "amplitude_cutoff": {"num_histogram_bins": 3}, + "amplitude_cv": {"average_num_spikes_per_bin": 7, "min_num_bins": 3}, + "firing_range": {"bin_size_s": 1}, + "isi_violation": {"isi_threshold_ms": 10}, + "drift": {"interval_s": 1, "min_spikes_per_interval": 5}, + "sliding_rp_violation": {"max_ref_period_ms": 50, "bin_size_ms": 0.15}, + } + + quality_metrics_1 = compute_quality_metrics( + small_sorting_analyzer, metric_names=get_quality_metric_list(), qm_params=qm_params + ) + quality_metrics_2 = compute_quality_metrics( + small_sorting_analyzer_2, metric_names=get_quality_metric_list(), qm_params=qm_params + ) + + for metric, metric_1_data in quality_metrics_1.items(): + assert quality_metrics_2[metric][3] == metric_1_data["#3"] + assert quality_metrics_2[metric][2] == metric_1_data["#9"] + assert quality_metrics_2[metric][0] == metric_1_data["#4"] + + def _sorting_analyzer_simple(): recording, sorting = generate_ground_truth_recording( durations=[ diff --git a/src/spikeinterface/sorters/internal/simplesorter.py b/src/spikeinterface/sorters/internal/simplesorter.py index 199352ab73..0f44e4079a 100644 --- a/src/spikeinterface/sorters/internal/simplesorter.py +++ b/src/spikeinterface/sorters/internal/simplesorter.py @@ -112,9 +112,12 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): ms_before = params["waveforms"]["ms_before"] ms_after = params["waveforms"]["ms_after"] + nbefore = int(ms_before * sampling_frequency / 1000.0) + nafter = int(ms_after * sampling_frequency / 1000.0) # SVD for time compression - few_peaks = select_peaks(peaks, method="uniform", n_peaks=5000) + + few_peaks = select_peaks(peaks, recording=recording, method="uniform", n_peaks=5000, margin=(nbefore, nafter)) few_wfs = extract_waveform_at_max_channel( recording, few_peaks, ms_before=ms_before, ms_after=ms_after, **job_kwargs ) diff --git a/src/spikeinterface/sorters/internal/spyking_circus2.py b/src/spikeinterface/sorters/internal/spyking_circus2.py index b5df0f1059..45cc93d0b6 100644 --- a/src/spikeinterface/sorters/internal/spyking_circus2.py +++ b/src/spikeinterface/sorters/internal/spyking_circus2.py @@ -278,29 +278,30 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): matching_params["templates"] = templates matching_job_params = job_kwargs.copy() - for value in ["chunk_size", "chunk_memory", "total_memory", "chunk_duration"]: - if value in matching_job_params: - matching_job_params[value] = None - matching_job_params["chunk_duration"] = "100ms" + if matching_method is not None: + for value in ["chunk_size", "chunk_memory", "total_memory", "chunk_duration"]: + if value in matching_job_params: + matching_job_params[value] = None + matching_job_params["chunk_duration"] = "100ms" + + spikes = find_spikes_from_templates( + recording_w, matching_method, method_kwargs=matching_params, **matching_job_params + ) - spikes = find_spikes_from_templates( - recording_w, matching_method, method_kwargs=matching_params, **matching_job_params - ) + if params["debug"]: + fitting_folder = sorter_output_folder / "fitting" + fitting_folder.mkdir(parents=True, exist_ok=True) + np.save(fitting_folder / "spikes", spikes) - if params["debug"]: - fitting_folder = sorter_output_folder / "fitting" - fitting_folder.mkdir(parents=True, exist_ok=True) - np.save(fitting_folder / "spikes", spikes) - - if verbose: - print("We found %d spikes" % len(spikes)) - - ## And this is it! We have a spyking circus - sorting = np.zeros(spikes.size, dtype=minimum_spike_dtype) - sorting["sample_index"] = spikes["sample_index"] - sorting["unit_index"] = spikes["cluster_index"] - sorting["segment_index"] = spikes["segment_index"] - sorting = NumpySorting(sorting, sampling_frequency, unit_ids) + if verbose: + print("We found %d spikes" % len(spikes)) + + ## And this is it! We have a spyking circus + sorting = np.zeros(spikes.size, dtype=minimum_spike_dtype) + sorting["sample_index"] = spikes["sample_index"] + sorting["unit_index"] = spikes["cluster_index"] + sorting["segment_index"] = spikes["segment_index"] + sorting = NumpySorting(sorting, sampling_frequency, unit_ids) sorting_folder = sorter_output_folder / "sorting" if sorting_folder.exists(): diff --git a/src/spikeinterface/sortingcomponents/benchmark/benchmark_peak_localization.py b/src/spikeinterface/sortingcomponents/benchmark/benchmark_peak_localization.py index 3eda5db3b6..05d142113b 100644 --- a/src/spikeinterface/sortingcomponents/benchmark/benchmark_peak_localization.py +++ b/src/spikeinterface/sortingcomponents/benchmark/benchmark_peak_localization.py @@ -1,6 +1,6 @@ from __future__ import annotations -from spikeinterface.postprocessing.unit_locations import ( +from spikeinterface.postprocessing.localization_tools import ( compute_center_of_mass, compute_monopolar_triangulation, compute_grid_convolution, diff --git a/src/spikeinterface/sortingcomponents/clustering/circus.py b/src/spikeinterface/sortingcomponents/clustering/circus.py index 65a89702c7..2bacf36ac9 100644 --- a/src/spikeinterface/sortingcomponents/clustering/circus.py +++ b/src/spikeinterface/sortingcomponents/clustering/circus.py @@ -90,7 +90,7 @@ def main_function(cls, recording, peaks, params): tmp_folder.mkdir(parents=True, exist_ok=True) # SVD for time compression - few_peaks = select_peaks(peaks, method="uniform", n_peaks=10000) + few_peaks = select_peaks(peaks, recording=recording, method="uniform", n_peaks=10000, margin=(nbefore, nafter)) few_wfs = extract_waveform_at_max_channel( recording, few_peaks, ms_before=ms_before, ms_after=ms_after, **params["job_kwargs"] ) diff --git a/src/spikeinterface/sortingcomponents/clustering/tdc.py b/src/spikeinterface/sortingcomponents/clustering/tdc.py index 46a9f1d18a..13af5b0fab 100644 --- a/src/spikeinterface/sortingcomponents/clustering/tdc.py +++ b/src/spikeinterface/sortingcomponents/clustering/tdc.py @@ -73,8 +73,11 @@ def main_function(cls, recording, peaks, params): ms_before = params["waveforms"]["ms_before"] ms_after = params["waveforms"]["ms_after"] + nbefore = int(ms_before * sampling_frequency / 1000.0) + nafter = int(ms_after * sampling_frequency / 1000.0) + # SVD for time compression - few_peaks = select_peaks(peaks, method="uniform", n_peaks=5000) + few_peaks = select_peaks(peaks, recording=recording, method="uniform", n_peaks=5000, margin=(nbefore, nafter)) few_wfs = extract_waveform_at_max_channel( recording, few_peaks, ms_before=ms_before, ms_after=ms_after, **job_kwargs ) diff --git a/src/spikeinterface/sortingcomponents/motion/motion_interpolation.py b/src/spikeinterface/sortingcomponents/motion/motion_interpolation.py index 32bb7634e9..11ce11e1aa 100644 --- a/src/spikeinterface/sortingcomponents/motion/motion_interpolation.py +++ b/src/spikeinterface/sortingcomponents/motion/motion_interpolation.py @@ -27,6 +27,9 @@ def correct_motion_on_peaks(peaks, peak_locations, motion, recording): corrected_peak_locations: np.array Motion-corrected peak locations """ + if recording is None: + raise ValueError("correct_motion_on_peaks need recording to be not None") + corrected_peak_locations = peak_locations.copy() for segment_index in range(motion.num_segments): diff --git a/src/spikeinterface/sortingcomponents/motion/motion_utils.py b/src/spikeinterface/sortingcomponents/motion/motion_utils.py index de94b7a899..228110b7ec 100644 --- a/src/spikeinterface/sortingcomponents/motion/motion_utils.py +++ b/src/spikeinterface/sortingcomponents/motion/motion_utils.py @@ -94,8 +94,9 @@ def get_displacement_at_time_and_depth(self, times_s, locations_um, segment_inde locations_um: np.array Either this is a one-dimensional array (a vector of positions along self.dimension), or else a 2d array with the 2 or 3 spatial dimensions indexed along axis=1. - segment_index: int, optional - grid : bool + segment_index: int, default: None + The index of the segment to evaluate. If None, and there is only one segment, then that segment is used. + grid : bool, default: False If grid=False, the default, then times_s and locations_um should have the same one-dimensional shape, and the returned displacement[i] is the displacement at time times_s[i] and location locations_um[i]. @@ -155,6 +156,7 @@ def to_dict(self): temporal_bins_s=self.temporal_bins_s, spatial_bins_um=self.spatial_bins_um, interpolation_method=self.interpolation_method, + direction=self.direction, ) def save(self, folder): @@ -224,9 +226,10 @@ def __eq__(self, other): def copy(self): return Motion( - self.displacement.copy(), - self.temporal_bins_s.copy(), + [d.copy() for d in self.displacement], + [t.copy() for t in self.temporal_bins_s], self.spatial_bins_um.copy(), + direction=self.direction, interpolation_method=self.interpolation_method, ) diff --git a/src/spikeinterface/sortingcomponents/peak_detection.py b/src/spikeinterface/sortingcomponents/peak_detection.py index b6f7709d27..0d5c92ff28 100644 --- a/src/spikeinterface/sortingcomponents/peak_detection.py +++ b/src/spikeinterface/sortingcomponents/peak_detection.py @@ -23,7 +23,7 @@ base_peak_dtype, ) -from spikeinterface.postprocessing.unit_locations import get_convolution_weights +from spikeinterface.postprocessing.localization_tools import get_convolution_weights from .tools import make_multi_method_doc diff --git a/src/spikeinterface/sortingcomponents/peak_localization.py b/src/spikeinterface/sortingcomponents/peak_localization.py index 23faea2d79..6d2ad09239 100644 --- a/src/spikeinterface/sortingcomponents/peak_localization.py +++ b/src/spikeinterface/sortingcomponents/peak_localization.py @@ -24,8 +24,11 @@ from ..postprocessing.unit_locations import ( dtype_localize_by_method, possible_localization_methods, - solve_monopolar_triangulation, +) + +from ..postprocessing.localization_tools import ( make_radial_order_parents, + solve_monopolar_triangulation, enforce_decrease_shells_data, get_grid_convolution_templates_and_weights, ) @@ -66,6 +69,8 @@ def get_localization_pipeline_nodes( elif method == "grid_convolution": if "prototype" not in method_kwargs: assert isinstance(peak_source, (PeakRetriever, SpikeRetriever)) + # extract prototypes silently + job_kwargs["progress_bar"] = False method_kwargs["prototype"] = get_prototype_spike( recording, peak_source.peaks, ms_before=ms_before, ms_after=ms_after, **job_kwargs ) diff --git a/src/spikeinterface/sortingcomponents/peak_selection.py b/src/spikeinterface/sortingcomponents/peak_selection.py index 397f59dbd9..fed026b6a7 100644 --- a/src/spikeinterface/sortingcomponents/peak_selection.py +++ b/src/spikeinterface/sortingcomponents/peak_selection.py @@ -6,7 +6,9 @@ import numpy as np -def select_peaks(peaks, method="uniform", seed=None, return_indices=False, **method_kwargs): +def select_peaks( + peaks, recording=None, method="uniform", seed=None, return_indices=False, margin=None, **method_kwargs +): """ Method to select a subset of peaks from a set of peaks. Usually use for reducing computational foorptint of downstream methods. @@ -28,6 +30,9 @@ def select_peaks(peaks, method="uniform", seed=None, return_indices=False, **met The seed for random generations return_indices: bool If True, return the indices of selection such that selected_peaks = peaks[selected_indices] + margin : Margin in timesteps. default: None. Otherwise should be a tuple (nbefore, nafter) + preventing peaks to be selected at the borders of the segments. A recording should be provided to get the duration + of the segments method_kwargs: dict of kwargs method Keyword arguments for the chosen method: @@ -66,8 +71,26 @@ def select_peaks(peaks, method="uniform", seed=None, return_indices=False, **met return_indices is True. """ + if margin is not None: + assert recording is not None, "recording should be provided if margin is not None" + selected_indices = select_peak_indices(peaks, method=method, seed=seed, **method_kwargs) selected_peaks = peaks[selected_indices] + num_segments = len(np.unique(selected_peaks["segment_index"])) + + if margin is not None: + to_keep = np.zeros(len(selected_peaks), dtype=bool) + for segment_index in range(num_segments): + num_samples_in_segment = recording.get_num_samples(segment_index) + i0, i1 = np.searchsorted(selected_peaks["segment_index"], [segment_index, segment_index + 1]) + while selected_peaks["sample_index"][i0] <= margin[0]: + i0 += 1 + while selected_peaks["sample_index"][i1 - 1] >= (num_samples_in_segment - margin[1]): + i1 -= 1 + to_keep[i0:i1] = True + selected_indices = selected_indices[to_keep] + selected_peaks = peaks[selected_indices] + if return_indices: return selected_peaks, selected_indices else: @@ -260,7 +283,9 @@ def select_peak_indices(peaks, method, seed, **method_kwargs): ) selected_indices = np.concatenate(selected_indices) - selected_indices = selected_indices[np.argsort(peaks[selected_indices]["sample_index"])] + selected_indices = selected_indices[ + np.lexsort((peaks[selected_indices]["sample_index"], peaks[selected_indices]["segment_index"])) + ] return selected_indices diff --git a/src/spikeinterface/sortingcomponents/tests/test_peak_selection.py b/src/spikeinterface/sortingcomponents/tests/test_peak_selection.py index d133a0f9d2..83469a4017 100644 --- a/src/spikeinterface/sortingcomponents/tests/test_peak_selection.py +++ b/src/spikeinterface/sortingcomponents/tests/test_peak_selection.py @@ -45,6 +45,11 @@ def test_select_peaks(): selected_peaks.size <= n_peaks ), "selected_peaks is not the right size when return_indices=False, select_per_channel=False" + selected_peaks = select_peaks(peaks, recording=recording, method=method, margin=(10, 10), **select_kwargs) + assert ( + selected_peaks.size <= n_peaks + ), "selected_peaks is not the right size when return_indices=False, select_per_channel=False" + selected_peaks = select_peaks(peaks, method=method, select_per_channel=True, **select_kwargs) assert selected_peaks.size <= ( n_peaks * recording.get_num_channels() diff --git a/src/spikeinterface/sortingcomponents/tools.py b/src/spikeinterface/sortingcomponents/tools.py index cc45dd3e40..0872a6066c 100644 --- a/src/spikeinterface/sortingcomponents/tools.py +++ b/src/spikeinterface/sortingcomponents/tools.py @@ -62,6 +62,7 @@ def extract_waveform_at_max_channel(rec, peaks, ms_before=0.5, ms_after=1.5, **j return_scaled=False, sparsity_mask=sparsity_mask, copy=True, + verbose=False, **job_kwargs, ) @@ -69,18 +70,18 @@ def extract_waveform_at_max_channel(rec, peaks, ms_before=0.5, ms_after=1.5, **j def get_prototype_spike(recording, peaks, ms_before=0.5, ms_after=0.5, nb_peaks=1000, **job_kwargs): - if peaks.size > nb_peaks: - idx = np.sort(np.random.choice(len(peaks), nb_peaks, replace=False)) - some_peaks = peaks[idx] - else: - some_peaks = peaks - nbefore = int(ms_before * recording.sampling_frequency / 1000.0) + nafter = int(ms_after * recording.sampling_frequency / 1000.0) + + from spikeinterface.sortingcomponents.peak_selection import select_peaks + + few_peaks = select_peaks(peaks, recording=recording, method="uniform", n_peaks=nb_peaks, margin=(nbefore, nafter)) waveforms = extract_waveform_at_max_channel( - recording, some_peaks, ms_before=ms_before, ms_after=ms_after, **job_kwargs + recording, few_peaks, ms_before=ms_before, ms_after=ms_after, **job_kwargs ) - prototype = np.nanmedian(waveforms[:, :, 0] / (np.abs(waveforms[:, nbefore, 0][:, np.newaxis])), axis=0) + with np.errstate(divide="ignore", invalid="ignore"): + prototype = np.median(waveforms[:, :, 0] / (np.abs(waveforms[:, nbefore, 0][:, np.newaxis])), axis=0) return prototype diff --git a/src/spikeinterface/widgets/base.py b/src/spikeinterface/widgets/base.py index b94167d2b7..9566989d31 100644 --- a/src/spikeinterface/widgets/base.py +++ b/src/spikeinterface/widgets/base.py @@ -139,7 +139,7 @@ def check_extensions(sorting_analyzer, extensions): if not sorting_analyzer.has_extension(extension): raise_error = True error_msg += ( - f"The {extension} waveform extension is required for this widget. " + f"The {extension} sorting analyzer extension is required for this widget. " f"Run the `sorting_analyzer.compute('{extension}', ...)` to compute it.\n" ) if raise_error: diff --git a/src/spikeinterface/widgets/motion.py b/src/spikeinterface/widgets/motion.py index 6f8bdc7a6e..81cda212b2 100644 --- a/src/spikeinterface/widgets/motion.py +++ b/src/spikeinterface/widgets/motion.py @@ -230,7 +230,7 @@ def plot_matplotlib(self, data_plot, **backend_kwargs): from matplotlib.colors import Normalize from .utils_matplotlib import make_mpl_figure - from spikeinterface.sortingcomponents.motion_interpolation import correct_motion_on_peaks + from spikeinterface.sortingcomponents.motion import correct_motion_on_peaks dp = to_attr(data_plot) @@ -291,12 +291,10 @@ class MotionInfoWidget(BaseWidget): ---------- motion_info : dict The motion info returned by correct_motion() or loaded back with load_motion_info(). + recording : RecordingExtractor + The recording extractor object segment_index : int, default: None The segment index to display. - recording : RecordingExtractor, default: None - The recording extractor object (only used to get "real" times). - segment_index : int, default: 0 - The segment index to display. sampling_frequency : float, default: None The sampling frequency (needed if recording is None). depth_lim : tuple or None, default: None @@ -320,8 +318,8 @@ class MotionInfoWidget(BaseWidget): def __init__( self, motion_info: dict, + recording: BaseRecording, segment_index: int | None = None, - recording: BaseRecording | None = None, depth_lim: tuple[float, float] | None = None, motion_lim: tuple[float, float] | None = None, color_amplitude: bool = False, @@ -366,7 +364,7 @@ def __init__( def plot_matplotlib(self, data_plot, **backend_kwargs): from .utils_matplotlib import make_mpl_figure - from spikeinterface.sortingcomponents.motion_interpolation import correct_motion_on_peaks + from spikeinterface.sortingcomponents.motion import correct_motion_on_peaks dp = to_attr(data_plot) diff --git a/src/spikeinterface/widgets/sorting_summary.py b/src/spikeinterface/widgets/sorting_summary.py index 24b4ca8022..6f60e9ab9a 100644 --- a/src/spikeinterface/widgets/sorting_summary.py +++ b/src/spikeinterface/widgets/sorting_summary.py @@ -188,6 +188,6 @@ def plot_spikeinterface_gui(self, data_plot, **backend_kwargs): import spikeinterface_gui app = spikeinterface_gui.mkQApp() - win = spikeinterface_gui.MainWindow(sorting_analyzer) + win = spikeinterface_gui.MainWindow(sorting_analyzer, curation=data_plot["curation"]) win.show() app.exec_() diff --git a/src/spikeinterface/widgets/unit_waveforms.py b/src/spikeinterface/widgets/unit_waveforms.py index b046e55fbf..59f91306ea 100644 --- a/src/spikeinterface/widgets/unit_waveforms.py +++ b/src/spikeinterface/widgets/unit_waveforms.py @@ -540,9 +540,7 @@ def _update_plot(self, change): if self.sorting_analyzer is not None: templates = self.templates_ext.get_templates(unit_ids=unit_ids, operator="average") - templates_shadings = self._get_template_shadings( - unit_ids, self.next_data_plot["templates_percentile_shading"] - ) + templates_shadings = self._get_template_shadings(unit_ids, data_plot["templates_percentile_shading"]) channel_locations = self.sorting_analyzer.get_channel_locations() else: unit_indices = [list(self.templates.unit_ids).index(unit_id) for unit_id in unit_ids]